From 468dbc355794dec156c02804abd593fd56b9442f Mon Sep 17 00:00:00 2001 From: GinFung Date: Mon, 30 Mar 2020 09:54:05 +0800 Subject: [PATCH 001/367] Add matmul biasadd fusion pass --- .../ascend/ascend_backend_optimization.cc | 2 + .../ascend/ir_fusion/matmul_biasadd_fusion.cc | 51 +++++++++++++++++ .../ascend/ir_fusion/matmul_biasadd_fusion.h | 34 +++++++++++ mindspore/ccsrc/pre_activate/common/helper.h | 1 + mindspore/ccsrc/utils/utils.h | 2 + .../ir_fusion/matmul_biasadd_fusion_test.cc | 56 +++++++++++++++++++ .../matmul_biasadd_fusion_test.py | 46 +++++++++++++++ 7 files changed, 192 insertions(+) create mode 100644 mindspore/ccsrc/pre_activate/ascend/ir_fusion/matmul_biasadd_fusion.cc create mode 100644 mindspore/ccsrc/pre_activate/ascend/ir_fusion/matmul_biasadd_fusion.h create mode 100644 tests/ut/cpp/pre_activate/ascend/ir_fusion/matmul_biasadd_fusion_test.cc create mode 100644 tests/ut/cpp/python_input/gtest_input/pre_activate/matmul_biasadd_fusion_test.py diff --git a/mindspore/ccsrc/pre_activate/ascend/ascend_backend_optimization.cc b/mindspore/ccsrc/pre_activate/ascend/ascend_backend_optimization.cc index 9883443910..b9b324e5dd 100644 --- a/mindspore/ccsrc/pre_activate/ascend/ascend_backend_optimization.cc +++ b/mindspore/ccsrc/pre_activate/ascend/ascend_backend_optimization.cc @@ -43,6 +43,7 @@ #include "pre_activate/ascend/ir_fusion/momentum_lossscale_fusion.h" #include "pre_activate/ascend/ir_fusion/mul_add_fusion.h" #include "pre_activate/ascend/ir_fusion/mul_addn_fusion.h" +#include "pre_activate/ascend/ir_fusion/matmul_biasadd_fusion.h" #include "pre_activate/ascend/format_type/insert_trans_op.h" #include "pre_activate/pass/getitem_tuple.h" #include "pre_activate/pass/optimize_dependence.h" @@ -173,6 +174,7 @@ void AscendBackendIRFusionOptimization(const std::shared_ptrAddPass(std::make_shared()); ir_fusion_pm->AddPass(std::make_shared()); ir_fusion_pm->AddPass(std::make_shared()); + ir_fusion_pm->AddPass(std::make_shared()); ir_fusion_pm->AddPass(std::make_shared()); ir_fusion_pm->AddPass(std::make_shared()); } diff --git a/mindspore/ccsrc/pre_activate/ascend/ir_fusion/matmul_biasadd_fusion.cc b/mindspore/ccsrc/pre_activate/ascend/ir_fusion/matmul_biasadd_fusion.cc new file mode 100644 index 0000000000..e81c804b71 --- /dev/null +++ b/mindspore/ccsrc/pre_activate/ascend/ir_fusion/matmul_biasadd_fusion.cc @@ -0,0 +1,51 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "pre_activate/ascend/ir_fusion/matmul_biasadd_fusion.h" +#include +#include "pre_activate/common/helper.h" +#include "session/anf_runtime_algorithm.h" +#include "utils/utils.h" + +namespace mindspore { +namespace opt { +namespace { +constexpr size_t kMatMulInputIndex = 1; +constexpr size_t kBiasInputIndex = 2; +} // namespace + +const BaseRef MatmulBiasaddFusion::DefinePattern() const { + VarPtr X0 = std::make_shared(); + VarPtr X1 = std::make_shared(); + VarPtr X2 = std::make_shared(); + const auto prim_bias_add = std::make_shared(kBiasAddOpName); + return VectorRef({prim_bias_add, VectorRef({prim::kPrimMatMul, X0, X1}), X2}); +} + +const AnfNodePtr MatmulBiasaddFusion::Process(const FuncGraphPtr &, const AnfNodePtr &node, const EquivPtr &) const { + MS_EXCEPTION_IF_NULL(node); + auto cnode = node->cast(); + MS_EXCEPTION_IF_NULL(cnode); + CheckCNodeInputSize(cnode, kBiasAddInputNum); + AnfNodePtr matmul = cnode->input(kMatMulInputIndex); + MS_EXCEPTION_IF_NULL(matmul); + auto matmul_cnode = matmul->cast(); + MS_EXCEPTION_IF_NULL(matmul_cnode); + matmul_cnode->add_input(cnode->input(kBiasInputIndex)); + AnfAlgo::SetNodeAttr(kAttrHasBias, MakeValue(true), matmul); + return matmul; +} +} // namespace opt +} // namespace mindspore diff --git a/mindspore/ccsrc/pre_activate/ascend/ir_fusion/matmul_biasadd_fusion.h b/mindspore/ccsrc/pre_activate/ascend/ir_fusion/matmul_biasadd_fusion.h new file mode 100644 index 0000000000..56675243de --- /dev/null +++ b/mindspore/ccsrc/pre_activate/ascend/ir_fusion/matmul_biasadd_fusion.h @@ -0,0 +1,34 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_CCSRC_PRE_ACTIVATE_ASCEND_IR_FUSION_MATMUL_BIASADD_FUSION_H_ +#define MINDSPORE_CCSRC_PRE_ACTIVATE_ASCEND_IR_FUSION_MATMUL_BIASADD_FUSION_H_ + +#include "pre_activate/common/optimizer.h" + +namespace mindspore { +namespace opt { +class MatmulBiasaddFusion : public PatternProcessPass { + public: + explicit MatmulBiasaddFusion(bool multigraph = true) : PatternProcessPass("matmul_biasadd_fusion", multigraph) {} + + ~MatmulBiasaddFusion() override = default; + const BaseRef DefinePattern() const override; + const AnfNodePtr Process(const FuncGraphPtr &, const AnfNodePtr &, const EquivPtr &) const override; +}; +} // namespace opt +} // namespace mindspore + +#endif // MINDSPORE_CCSRC_PRE_ACTIVATE_ASCEND_IR_FUSION_MATMUL_BIASADD_FUSION_H_ diff --git a/mindspore/ccsrc/pre_activate/common/helper.h b/mindspore/ccsrc/pre_activate/common/helper.h index ba8db8bd29..4f30a935af 100644 --- a/mindspore/ccsrc/pre_activate/common/helper.h +++ b/mindspore/ccsrc/pre_activate/common/helper.h @@ -84,6 +84,7 @@ constexpr size_t kLayerNormGradInputNum = 6; constexpr size_t kAdamApplyOneOutputNum = 3; constexpr size_t kBackendTransDataInputNum = 2; constexpr size_t kApplyMomentumInputNum = 6; +constexpr size_t kBiasAddInputNum = 3; enum FusedBatchNormInput { kX = 1, diff --git a/mindspore/ccsrc/utils/utils.h b/mindspore/ccsrc/utils/utils.h index 3328480d00..229c0547cc 100644 --- a/mindspore/ccsrc/utils/utils.h +++ b/mindspore/ccsrc/utils/utils.h @@ -110,6 +110,7 @@ constexpr auto kResizeNearestNeighborGrad = "ResizeNearestNeighborGrad"; constexpr auto kFusedMulAddOpName = "FusedMulAdd"; constexpr auto kFusedMulAddNOpName = "FusedMulAddN"; constexpr auto kFusedMulApplyMomentumOpName = "FusedMulApplyMomentum"; +constexpr auto kBiasAddOpName = "BiasAdd"; // attr key name constexpr auto kAttrInputNames = "input_names"; @@ -140,6 +141,7 @@ constexpr auto kAttrDynInput = "dynamic"; constexpr auto kAttrDynInputSizes = "dyn_input_sizes"; constexpr auto kAttrSrcFormat = "src_format"; constexpr auto kAttrOutputUsedNum = "output_used_num"; +constexpr auto kAttrHasBias = "has_bias"; // attr value constexpr auto kValueTargetSwitch = "target_switch"; diff --git a/tests/ut/cpp/pre_activate/ascend/ir_fusion/matmul_biasadd_fusion_test.cc b/tests/ut/cpp/pre_activate/ascend/ir_fusion/matmul_biasadd_fusion_test.cc new file mode 100644 index 0000000000..c8f97be290 --- /dev/null +++ b/tests/ut/cpp/pre_activate/ascend/ir_fusion/matmul_biasadd_fusion_test.cc @@ -0,0 +1,56 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "pre_activate/ascend/ir_fusion/matmul_biasadd_fusion.h" +#include "common/backend_common_test.h" +#include "common/py_func_graph_fetcher.h" + +namespace mindspore { +namespace opt { +class TestHWMatmulBiasaddFusion : public BackendCommon { + public: + TestHWMatmulBiasaddFusion() : get_py_fun_("gtest_input.pre_activate.matmul_biasadd_fusion_test", true) {} + ~TestHWMatmulBiasaddFusion() override = default; + + UT::PyFuncGraphFetcher get_py_fun_; +}; + +TEST_F(TestHWMatmulBiasaddFusion, test_matmul_biasadd_fusion) { + FuncGraphPtr g = get_py_fun_.CallAndParseRet("test_matmul_biasadd_fusion", "before"); + EXPECT_NE(g, nullptr); + std::vector shpx{1, 3}; + auto x_abstract = std::make_shared(kFloat32, shpx); + std::vector shpy{3, 4}; + auto y_abstract = std::make_shared(kFloat32, shpy); + std::vector shp_bias{4}; + auto bias_abstract = std::make_shared(kFloat32, shp_bias); + AbstractBasePtrList args_spec_list; + args_spec_list.push_back(x_abstract); + args_spec_list.push_back(y_abstract); + args_spec_list.push_back(bias_abstract); + auto kg = GetKernelGraph(g, args_spec_list); + + auto optimizer = std::make_shared(); + auto pm = std::make_shared(); + pm->AddPass(std::make_shared()); + optimizer->AddPassManager(pm); + FuncGraphPtr new_graph = optimizer->Optimize(kg); + + FuncGraphPtr g_after = get_py_fun_.CallAndParseRet("test_matmul_biasadd_fusion", "after"); + EXPECT_TRUE(CheckEqualGraph(g_after, new_graph)); +} +} // namespace opt +} // namespace mindspore \ No newline at end of file diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/matmul_biasadd_fusion_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/matmul_biasadd_fusion_test.py new file mode 100644 index 0000000000..5d55e854f7 --- /dev/null +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/matmul_biasadd_fusion_test.py @@ -0,0 +1,46 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +from mindspore.ops import operations as P +from mindspore.ops import Primitive + +MatMul = P.MatMul() +BiasAdd = P.BiasAdd() +make_tuple = Primitive('make_tuple') + +class FnDict: + def __init__(self): + self.fnDict = {} + + def __call__(self, fn): + self.fnDict[fn.__name__] = fn + + def __getitem__(self, name): + return self.fnDict[name] + + +def test_matmul_biasadd_fusion(tag): + fns = FnDict() + + @fns + def before(input0, input1, input2): + matmul = MatMul(input0, input1) + biasadd = BiasAdd(matmul, input2) + return biasadd + + @fns + def after(input0, input1, input2): + return make_tuple(MatMul(input0, input1, input2)) + + return fns[tag] From 7b7a6a45a043f5e32d0daa4b82ef214ff70592f6 Mon Sep 17 00:00:00 2001 From: seatea Date: Mon, 30 Mar 2020 12:10:35 +0800 Subject: [PATCH 002/367] Check if the shape of the input of NMSWithMask is (N, 5). --- mindspore/ops/operations/math_ops.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/mindspore/ops/operations/math_ops.py b/mindspore/ops/operations/math_ops.py index 213e04e351..ad928d792f 100644 --- a/mindspore/ops/operations/math_ops.py +++ b/mindspore/ops/operations/math_ops.py @@ -1745,11 +1745,14 @@ class NMSWithMask(PrimitiveWithInfer): self.init_prim_io_names(inputs=['bboxes'], outputs=['selected_boxes', 'selected_idx', 'selected_mask']) def infer_shape(self, bboxes_shape): + validator.check_integer("bboxes rank", len(bboxes_shape), 2, Rel.EQ) + validator.check_integer("bboxes.shape()[0]", bboxes_shape[0], 0, Rel.GT) + validator.check_integer("bboxes.shape()[1]", bboxes_shape[1], 5, Rel.EQ) num = bboxes_shape[0] - validator.check_integer("bboxes_shape[0]", num, 0, Rel.GT) return (bboxes_shape, (num,), (num,)) def infer_dtype(self, bboxes_dtype): + validator.check_subclass("bboxes_dtype", bboxes_dtype, mstype.tensor) validator.check_typename("bboxes_dtype", bboxes_dtype, [mstype.float16, mstype.float32]) return (bboxes_dtype, mstype.int32, mstype.bool_) From c6d261b27787bf605d473b2d41466950ee5b3c69 Mon Sep 17 00:00:00 2001 From: yoonlee666 Date: Mon, 30 Mar 2020 16:21:57 +0800 Subject: [PATCH 003/367] add bert script to master --- example/Bert_NEZHA/config.py | 55 ---------------- example/Bert_NEZHA_cnwiki/config.py | 57 ++++++++++++++++ .../main.py => Bert_NEZHA_cnwiki/train.py} | 66 +++++++------------ 3 files changed, 82 insertions(+), 96 deletions(-) delete mode 100644 example/Bert_NEZHA/config.py create mode 100644 example/Bert_NEZHA_cnwiki/config.py rename example/{Bert_NEZHA/main.py => Bert_NEZHA_cnwiki/train.py} (57%) diff --git a/example/Bert_NEZHA/config.py b/example/Bert_NEZHA/config.py deleted file mode 100644 index 2f3b22fe50..0000000000 --- a/example/Bert_NEZHA/config.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ - -""" -network config setting, will be used in main.py -""" - -from easydict import EasyDict as edict -import mindspore.common.dtype as mstype -from mindspore.model_zoo.Bert_NEZHA import BertConfig -bert_cfg = edict({ - 'epoch_size': 10, - 'num_warmup_steps': 0, - 'start_learning_rate': 1e-4, - 'end_learning_rate': 1, - 'decay_steps': 1000, - 'power': 10.0, - 'save_checkpoint_steps': 2000, - 'keep_checkpoint_max': 10, - 'checkpoint_prefix': "checkpoint_bert", - 'DATA_DIR' = "/your/path/examples.tfrecord" - 'SCHEMA_DIR' = "/your/path/datasetSchema.json" - 'bert_config': BertConfig( - batch_size=16, - seq_length=128, - vocab_size=21136, - hidden_size=1024, - num_hidden_layers=24, - num_attention_heads=16, - intermediate_size=4096, - hidden_act="gelu", - hidden_dropout_prob=0.0, - attention_probs_dropout_prob=0.0, - max_position_embeddings=512, - type_vocab_size=2, - initializer_range=0.02, - use_relative_positions=True, - input_mask_from_dataset=True, - token_type_ids_from_dataset=True, - dtype=mstype.float32, - compute_type=mstype.float16, - ) -}) diff --git a/example/Bert_NEZHA_cnwiki/config.py b/example/Bert_NEZHA_cnwiki/config.py new file mode 100644 index 0000000000..a704d9a264 --- /dev/null +++ b/example/Bert_NEZHA_cnwiki/config.py @@ -0,0 +1,57 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +""" +network config setting, will be used in train.py +""" + +from easydict import EasyDict as edict +import mindspore.common.dtype as mstype +from mindspore.model_zoo.Bert_NEZHA import BertConfig +bert_train_cfg = edict({ + 'epoch_size': 10, + 'num_warmup_steps': 0, + 'start_learning_rate': 1e-4, + 'end_learning_rate': 0.0, + 'decay_steps': 1000, + 'power': 10.0, + 'save_checkpoint_steps': 2000, + 'keep_checkpoint_max': 10, + 'checkpoint_prefix': "checkpoint_bert", + # please add your own dataset path + 'DATA_DIR': "/your/path/examples.tfrecord", + # please add your own dataset schema path + 'SCHEMA_DIR': "/your/path/datasetSchema.json" +}) +bert_net_cfg = BertConfig( + batch_size=16, + seq_length=128, + vocab_size=21136, + hidden_size=1024, + num_hidden_layers=24, + num_attention_heads=16, + intermediate_size=4096, + hidden_act="gelu", + hidden_dropout_prob=0.0, + attention_probs_dropout_prob=0.0, + max_position_embeddings=512, + type_vocab_size=2, + initializer_range=0.02, + use_relative_positions=True, + input_mask_from_dataset=True, + token_type_ids_from_dataset=True, + dtype=mstype.float32, + compute_type=mstype.float16, +) diff --git a/example/Bert_NEZHA/main.py b/example/Bert_NEZHA_cnwiki/train.py similarity index 57% rename from example/Bert_NEZHA/main.py rename to example/Bert_NEZHA_cnwiki/train.py index a5500f25a9..87f425e21c 100644 --- a/example/Bert_NEZHA/main.py +++ b/example/Bert_NEZHA_cnwiki/train.py @@ -14,7 +14,8 @@ # ============================================================================ """ -NEZHA (NEural contextualiZed representation for CHinese lAnguage understanding) is the Chinese pretrained language model currently based on BERT developed by Huawei. +NEZHA (NEural contextualiZed representation for CHinese lAnguage understanding) is the Chinese pretrained language +model currently based on BERT developed by Huawei. 1. Prepare data Following the data preparation as in BERT, run command as below to get dataset for training: python ./create_pretraining_data.py \ @@ -28,35 +29,29 @@ Following the data preparation as in BERT, run command as below to get dataset f --random_seed=12345 \ --dupe_factor=5 2. Pretrain -First, prepare the distributed training environment, then adjust configurations in config.py, finally run main.py. +First, prepare the distributed training environment, then adjust configurations in config.py, finally run train.py. """ import os -import pytest import numpy as np -from numpy import allclose -from config import bert_cfg as cfg -import mindspore.common.dtype as mstype +from config import bert_train_cfg, bert_net_cfg import mindspore.dataset.engine.datasets as de import mindspore._c_dataengine as deMap from mindspore import context from mindspore.common.tensor import Tensor from mindspore.train.model import Model -from mindspore.train.callback import Callback -from mindspore.model_zoo.Bert_NEZHA import BertConfig, BertNetworkWithLoss, BertTrainOneStepCell +from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor +from mindspore.model_zoo.Bert_NEZHA import BertNetworkWithLoss, BertTrainOneStepCell from mindspore.nn.optim import Lamb -from mindspore import log as logger _current_dir = os.path.dirname(os.path.realpath(__file__)) -DATA_DIR = [cfg.DATA_DIR] -SCHEMA_DIR = cfg.SCHEMA_DIR -def me_de_train_dataset(batch_size): - """test me de train dataset""" +def create_train_dataset(batch_size): + """create train dataset""" # apply repeat operations - repeat_count = cfg.epoch_size - ds = de.StorageDataset(DATA_DIR, SCHEMA_DIR, columns_list=["input_ids", "input_mask", "segment_ids", - "next_sentence_labels", "masked_lm_positions", - "masked_lm_ids", "masked_lm_weights"]) + repeat_count = bert_train_cfg.epoch_size + ds = de.StorageDataset([bert_train_cfg.DATA_DIR], bert_train_cfg.SCHEMA_DIR, + columns_list=["input_ids", "input_mask", "segment_ids", "next_sentence_labels", + "masked_lm_positions", "masked_lm_ids", "masked_lm_weights"]) type_cast_op = deMap.TypeCastOp("int32") ds = ds.map(input_columns="masked_lm_ids", operations=type_cast_op) ds = ds.map(input_columns="masked_lm_positions", operations=type_cast_op) @@ -69,43 +64,32 @@ def me_de_train_dataset(batch_size): ds = ds.repeat(repeat_count) return ds - def weight_variable(shape): """weight variable""" np.random.seed(1) ones = np.random.uniform(-0.1, 0.1, size=shape).astype(np.float32) return Tensor(ones) - -class ModelCallback(Callback): - def __init__(self): - super(ModelCallback, self).__init__() - self.loss_list = [] - - def step_end(self, run_context): - cb_params = run_context.original_args() - self.loss_list.append(cb_params.net_outputs.asnumpy()[0]) - logger.info("epoch: {}, outputs are {}".format(cb_params.cur_epoch_num, str(cb_params.net_outputs))) - -def test_bert_tdt(): - """test bert tdt""" +def train_bert(): + """train bert""" context.set_context(mode=context.GRAPH_MODE) context.set_context(device_target="Ascend") context.set_context(enable_task_sink=True) context.set_context(enable_loop_sink=True) context.set_context(enable_mem_reuse=True) - parallel_callback = ModelCallback() - ds = me_de_train_dataset(cfg.bert_config.batch_size) - config = cfg.bert_config - netwithloss = BertNetworkWithLoss(config, True) - optimizer = Lamb(netwithloss.trainable_params(), decay_steps=cfg.decay_steps, start_learning_rate=cfg.start_learning_rate, - end_learning_rate=cfg.end_learning_rate, power=cfg.power, warmup_steps=cfg.num_warmup_steps, decay_filter=lambda x: False) + ds = create_train_dataset(bert_net_cfg.batch_size) + netwithloss = BertNetworkWithLoss(bert_net_cfg, True) + optimizer = Lamb(netwithloss.trainable_params(), decay_steps=bert_train_cfg.decay_steps, + start_learning_rate=bert_train_cfg.start_learning_rate, + end_learning_rate=bert_train_cfg.end_learning_rate, power=bert_train_cfg.power, + warmup_steps=bert_train_cfg.num_warmup_steps, decay_filter=lambda x: False) netwithgrads = BertTrainOneStepCell(netwithloss, optimizer=optimizer) netwithgrads.set_train(True) model = Model(netwithgrads) - config_ck = CheckpointConfig(save_checkpoint_steps=cfg.save_checkpoint_steps, keep_checkpoint_max=cfg.keep_checkpoint_max) - ckpoint_cb = ModelCheckpoint(prefix=cfg.checkpoint_prefix, config=config_ck) - model.train(ds.get_repeat_count(), ds, callbacks=[parallel_callback, ckpoint_cb], dataset_sink_mode=False) + config_ck = CheckpointConfig(save_checkpoint_steps=bert_train_cfg.save_checkpoint_steps, + keep_checkpoint_max=bert_train_cfg.keep_checkpoint_max) + ckpoint_cb = ModelCheckpoint(prefix=bert_train_cfg.checkpoint_prefix, config=config_ck) + model.train(ds.get_repeat_count(), ds, callbacks=[LossMonitor(), ckpoint_cb], dataset_sink_mode=False) if __name__ == '__main__': - test_bert_tdt() + train_bert() From 6c03542eec0bf591b48a9993e530e0ce1e67fc67 Mon Sep 17 00:00:00 2001 From: seatea Date: Mon, 30 Mar 2020 11:46:53 +0800 Subject: [PATCH 004/367] Fix dtype bug for loss_scale and weight_decay. 1.Change dtype of scale to dtype of grad in loss_scale.py; 2.Change dtype of weight_decay to dtype of weight in optimizer.py. --- mindspore/nn/optim/optimizer.py | 2 +- mindspore/nn/wrap/loss_scale.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/mindspore/nn/optim/optimizer.py b/mindspore/nn/optim/optimizer.py index e2b0cddb71..cd0ed93a10 100755 --- a/mindspore/nn/optim/optimizer.py +++ b/mindspore/nn/optim/optimizer.py @@ -84,7 +84,7 @@ apply_decay = C.MultitypeFuncGraph("apply_decay") def _tensor_apply_decay(weight_decay, if_apply, weight, gradient): """Get grad with weight_decay.""" if if_apply: - return op_add((gradient, weight * F.scalar_to_array(weight_decay))) + return op_add((gradient, weight * weight_decay)) return gradient diff --git a/mindspore/nn/wrap/loss_scale.py b/mindspore/nn/wrap/loss_scale.py index f7c686f535..a11c753eda 100644 --- a/mindspore/nn/wrap/loss_scale.py +++ b/mindspore/nn/wrap/loss_scale.py @@ -32,7 +32,7 @@ reciprocal = P.Reciprocal() @_grad_scale.register("Tensor", "Tensor") def tensor_grad_scale(scale, grad): - return grad * reciprocal(scale) + return grad * F.cast(reciprocal(scale), F.dtype(grad)) class DynamicLossScaleUpdateCell(Cell): From 34e42bd6f924407608cda54eeffcfbbab1675b4c Mon Sep 17 00:00:00 2001 From: jonyguo Date: Mon, 30 Mar 2020 17:23:44 +0800 Subject: [PATCH 005/367] 1. add more log info for dataset & mindrecord, 2. add two new testcase for MindDataset --- mindspore/dataset/engine/iterators.py | 20 ++++----- mindspore/mindrecord/filewriter.py | 7 ++++ tests/ut/python/dataset/test_minddataset.py | 46 +++++++++++++++++++++ 3 files changed, 63 insertions(+), 10 deletions(-) diff --git a/mindspore/dataset/engine/iterators.py b/mindspore/dataset/engine/iterators.py index 806df64fdc..268a66c0cf 100644 --- a/mindspore/dataset/engine/iterators.py +++ b/mindspore/dataset/engine/iterators.py @@ -66,11 +66,10 @@ def _alter_node(node): class Iterator: """ - General Iterator over a dataset. - - Attributes: - dataset: Dataset to be iterated over + General Iterator over a dataset. + Attributes: + dataset: Dataset to be iterated over """ def __init__(self, dataset): @@ -86,6 +85,7 @@ class Iterator: root = self.__convert_node_postorder(self.dataset) self.depipeline.AssignRootNode(root) self.depipeline.LaunchTreeExec() + self._index = 0 def __is_tree_node(self, node): """Check if a node is tree node.""" @@ -185,10 +185,7 @@ class Iterator: Iterator.__print_local(input_op, level + 1) def print(self): - """ - Print the dataset tree - - """ + """Print the dataset tree""" self.__print_local(self.dataset, 0) def release(self): @@ -202,7 +199,10 @@ class Iterator: def __next__(self): data = self.get_next() if not data: + if self._index == 0: + logger.warning("No records available.") raise StopIteration + self._index += 1 return data def get_output_shapes(self): @@ -234,7 +234,7 @@ class DictIterator(Iterator): def get_next(self): """ - Returns the next record in the dataset as dictionary + Returns the next record in the dataset as dictionary Returns: Dict, the next record in the dataset. @@ -260,7 +260,7 @@ class TupleIterator(Iterator): def get_next(self): """ - Returns the next record in the dataset as a list + Returns the next record in the dataset as a list Returns: List, the next record in the dataset. diff --git a/mindspore/mindrecord/filewriter.py b/mindspore/mindrecord/filewriter.py index 0e44b61857..d1471f47cb 100644 --- a/mindspore/mindrecord/filewriter.py +++ b/mindspore/mindrecord/filewriter.py @@ -328,13 +328,20 @@ class FileWriter: self._generator.build() self._generator.write_to_db() + mindrecord_files = [] + index_files = [] # change the file mode to 600 for item in self._paths: if os.path.exists(item): os.chmod(item, stat.S_IRUSR | stat.S_IWUSR) + mindrecord_files.append(item) index_file = item + ".db" if os.path.exists(index_file): os.chmod(index_file, stat.S_IRUSR | stat.S_IWUSR) + index_files.append(index_file) + + logger.info("The list of mindrecord files created are: {}, and the list of index files are: {}".format( + mindrecord_files, index_files)) return ret diff --git a/tests/ut/python/dataset/test_minddataset.py b/tests/ut/python/dataset/test_minddataset.py index 99b31b64ec..8b8cbc807a 100644 --- a/tests/ut/python/dataset/test_minddataset.py +++ b/tests/ut/python/dataset/test_minddataset.py @@ -25,6 +25,7 @@ import mindspore.dataset.transforms.vision.c_transforms as vision import numpy as np import pytest from mindspore._c_dataengine import InterpolationMode +from mindspore.dataset.transforms.vision import Inter from mindspore import log as logger import mindspore.dataset as ds @@ -151,6 +152,51 @@ def test_cv_minddataset_dataset_size(add_and_remove_cv_file): assert data_set.get_dataset_size() == 3 +def test_cv_minddataset_repeat_reshuffle(add_and_remove_cv_file): + """tutorial for cv minddataset.""" + columns_list = ["data", "label"] + num_readers = 4 + data_set = ds.MindDataset(CV_FILE_NAME + "0", columns_list, num_readers) + decode_op = vision.Decode() + data_set = data_set.map(input_columns=["data"], operations=decode_op, num_parallel_workers=2) + resize_op = vision.Resize((32, 32), interpolation=Inter.LINEAR) + data_set = data_set.map(input_columns="data", operations=resize_op, num_parallel_workers=2) + data_set = data_set.batch(2) + data_set = data_set.repeat(2) + num_iter = 0 + labels = [] + for item in data_set.create_dict_iterator(): + logger.info("-------------- get dataset size {} -----------------".format(num_iter)) + logger.info("-------------- item[label]: {} ---------------------".format(item["label"])) + logger.info("-------------- item[data]: {} ----------------------".format(item["data"])) + num_iter += 1 + labels.append(item["label"]) + assert num_iter == 10 + logger.info("repeat shuffle: {}".format(labels)) + assert len(labels) == 10 + assert labels[0:5] == labels[0:5] + assert labels[0:5] != labels[5:5] + + +def test_cv_minddataset_batch_size_larger_than_records(add_and_remove_cv_file): + """tutorial for cv minddataset.""" + columns_list = ["data", "label"] + num_readers = 4 + data_set = ds.MindDataset(CV_FILE_NAME + "0", columns_list, num_readers) + decode_op = vision.Decode() + data_set = data_set.map(input_columns=["data"], operations=decode_op, num_parallel_workers=2) + resize_op = vision.Resize((32, 32), interpolation=Inter.LINEAR) + data_set = data_set.map(input_columns="data", operations=resize_op, num_parallel_workers=2) + data_set = data_set.batch(32, drop_remainder=True) + num_iter = 0 + for item in data_set.create_dict_iterator(): + logger.info("-------------- get dataset size {} -----------------".format(num_iter)) + logger.info("-------------- item[label]: {} ---------------------".format(item["label"])) + logger.info("-------------- item[data]: {} ----------------------".format(item["data"])) + num_iter += 1 + assert num_iter == 0 + + def test_cv_minddataset_issue_888(add_and_remove_cv_file): """issue 888 test.""" columns_list = ["data", "label"] From 468e257a146dca74251ad7470e5515c624ffc614 Mon Sep 17 00:00:00 2001 From: shibeiji Date: Mon, 30 Mar 2020 17:55:28 +0800 Subject: [PATCH 006/367] add log for kernel runtime in order to trace performance --- mindspore/ccsrc/device/kernel_runtime.cc | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/mindspore/ccsrc/device/kernel_runtime.cc b/mindspore/ccsrc/device/kernel_runtime.cc index f320aa9cf6..99f5d491ac 100644 --- a/mindspore/ccsrc/device/kernel_runtime.cc +++ b/mindspore/ccsrc/device/kernel_runtime.cc @@ -50,12 +50,19 @@ bool KernelRuntime::Run(session::KernelGraph *graph) { bool ret = false; auto context_ptr = MsContext::GetInstance(); MS_EXCEPTION_IF_NULL(context_ptr); + struct timeval start_time, end_time; + (void)gettimeofday(&start_time, nullptr); bool is_task_sink = context_ptr->enable_task_sink(); if (is_task_sink) { ret = RunTask(graph); } else { ret = LaunchKernel(graph); } + (void)gettimeofday(&end_time, nullptr); + const uint64_t kUSecondInSecond = 1000000; + uint64_t cost = kUSecondInSecond * static_cast(end_time.tv_sec - start_time.tv_sec); + cost += static_cast(end_time.tv_usec - start_time.tv_usec); + MS_LOG(INFO) << "Call MS Run Success in " << cost << " us"; return ret; } From 9862dea3cffea33119987882471db9cf35dda458 Mon Sep 17 00:00:00 2001 From: zhaozhenlong Date: Mon, 30 Mar 2020 09:34:34 +0800 Subject: [PATCH 007/367] adapt relu6grad and graphengine modified --- graphengine | 2 +- mindspore/ccsrc/transform/op_declare.cc | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/graphengine b/graphengine index 5f763679fa..49351fb73a 160000 --- a/graphengine +++ b/graphengine @@ -1 +1 @@ -Subproject commit 5f763679fa33de1608d07f7651c6f16012b953ea +Subproject commit 49351fb73ac7786b9ed9e807568a1a8e06183693 diff --git a/mindspore/ccsrc/transform/op_declare.cc b/mindspore/ccsrc/transform/op_declare.cc index 78b949c525..82dcd62572 100755 --- a/mindspore/ccsrc/transform/op_declare.cc +++ b/mindspore/ccsrc/transform/op_declare.cc @@ -506,9 +506,9 @@ ATTR_MAP(Relu6) = EMPTY_ATTR_MAP; OUTPUT_MAP(Relu6) = {{0, OUTPUT_DESC(activations)}}; // Relu6Grad -INPUT_MAP(Relu6Grad) = {{1, INPUT_DESC(dy)}, {2, INPUT_DESC(y)}}; +INPUT_MAP(Relu6Grad) = {{1, INPUT_DESC(gradients)}, {2, INPUT_DESC(features)}}; ATTR_MAP(Relu6Grad) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Relu6Grad) = {{0, OUTPUT_DESC(z)}}; +OUTPUT_MAP(Relu6Grad) = {{0, OUTPUT_DESC(backprops)}}; // ResizeBilinearGrad INPUT_MAP(ResizeBilinearGrad) = {{1, INPUT_DESC(grads)}, {2, INPUT_DESC(original_image)}}; From 4cbcd8e90736158f54e0fceb87ec9262a0f85acc Mon Sep 17 00:00:00 2001 From: Ziyan Date: Mon, 30 Mar 2020 15:17:05 +0800 Subject: [PATCH 008/367] enable use float type learning rate in lars optimizer --- mindspore/nn/optim/lars.py | 9 ++++++--- tests/ut/python/nn/optim/test_lars.py | 19 ++++++++++++++++++- 2 files changed, 24 insertions(+), 4 deletions(-) diff --git a/mindspore/nn/optim/lars.py b/mindspore/nn/optim/lars.py index cdfe45de62..a69057215d 100755 --- a/mindspore/nn/optim/lars.py +++ b/mindspore/nn/optim/lars.py @@ -13,12 +13,14 @@ # limitations under the License. # ============================================================================ """lars optimizer""" +from typing import Iterable from mindspore.common import dtype as mstype +from mindspore.common import Tensor from mindspore.common.initializer import initializer +from mindspore.common.parameter import Parameter from mindspore.ops import operations as P from mindspore.ops import composite as C from mindspore.ops import functional as F -from mindspore.common.parameter import Parameter from mindspore.nn.cell import Cell from .optimizer import grad_scale @@ -111,7 +113,8 @@ class LARS(Cell): self.gather = None self.global_step = None self.axis = None - if not isinstance(self.learning_rate, float): + if isinstance(self.learning_rate.default_input, Iterable) or \ + (isinstance(self.learning_rate.default_input, Tensor) and self.learning_rate.default_input.dim() == 1): self.dynamic_lr = True self.assignadd = P.AssignAdd() self.gather = P.GatherV2() @@ -124,7 +127,7 @@ class LARS(Cell): lr = self.gather(self.learning_rate, self.global_step, self.axis) F.control_depend(lr, self.assignadd(self.global_step, 1)) else: - lr = F.scalar_to_array(self.learning_rate) + lr = self.learning_rate if self.reciprocal_scale != 1.0: gradients = self.hyper_map(F.partial(grad_scale, self.reciprocal_scale), gradients) diff --git a/tests/ut/python/nn/optim/test_lars.py b/tests/ut/python/nn/optim/test_lars.py index 92d218a32b..17bbe69fe6 100644 --- a/tests/ut/python/nn/optim/test_lars.py +++ b/tests/ut/python/nn/optim/test_lars.py @@ -46,7 +46,7 @@ class Net(nn.Cell): return x -def test_lars(): +def test_lars_multi_step_lr(): inputs = Tensor(np.ones([1, 64]).astype(np.float32)) label = Tensor(np.zeros([1, 10]).astype(np.float32)) net = Net() @@ -61,3 +61,20 @@ def test_lars(): net_with_loss = WithLossCell(net, loss) train_network = TrainOneStepCell(net_with_loss, optimizer) _executor.compile(train_network, inputs, label) + + +def test_lars_float_lr(): + inputs = Tensor(np.ones([1, 64]).astype(np.float32)) + label = Tensor(np.zeros([1, 10]).astype(np.float32)) + net = Net() + net.set_train() + loss = nn.SoftmaxCrossEntropyWithLogits() + + lr = 0.1 + SGD = Momentum(net.trainable_params(), lr, 0.9) + optimizer = LARS(SGD, epsilon=1e-08, hyperpara=0.02, decay_filter=lambda x: 'bn' not in x.name, + lars_filter=lambda x: 'bn' not in x.name) + + net_with_loss = WithLossCell(net, loss) + train_network = TrainOneStepCell(net_with_loss, optimizer) + _executor.compile(train_network, inputs, label) \ No newline at end of file From 0da0bdcf40efaa45e8cac01b1730e8b8ec9b934e Mon Sep 17 00:00:00 2001 From: buxue Date: Wed, 25 Mar 2020 20:04:12 +0800 Subject: [PATCH 009/367] Fix bug structure output when there is depend whose first input is constant in outputs --- mindspore/ccsrc/pipeline/pipeline.cc | 81 +++++++++++++++------ tests/ut/python/nn/test_structure_output.py | 32 ++++++-- 2 files changed, 84 insertions(+), 29 deletions(-) diff --git a/mindspore/ccsrc/pipeline/pipeline.cc b/mindspore/ccsrc/pipeline/pipeline.cc index 35336e975b..0c2edfc9c3 100644 --- a/mindspore/ccsrc/pipeline/pipeline.cc +++ b/mindspore/ccsrc/pipeline/pipeline.cc @@ -725,23 +725,15 @@ py::object ExecutorPy::Run(const py::tuple& args, const py::object& phase) { return BaseRefToPyData(value); } -py::object StructureOutput(const AbstractBasePtr& output, const py::tuple& data, size_t* count) { - MS_EXCEPTION_IF_NULL(output); +py::object ExtractGeneralCnodeRet(const AbstractBasePtr& cnode_data, const py::tuple& data, size_t* count) { + MS_EXCEPTION_IF_NULL(cnode_data); + if (*count >= data.size()) { + MS_LOG(EXCEPTION) << "The number of elements in the outputs : " << data.size() + << " less than the number of elements required. "; + } - if (!output->isa()) { - ValuePtr value = output->BuildValue(); - if (value != kAnyValue) { - return ValuePtrToPyData(value); - } - if (!output->isa()) { - MS_LOG(EXCEPTION) << "Output can only be tensor except for constants, but got " - << output->BuildValue()->ToString() << "."; - } - if (*count >= data.size()) { - MS_LOG(EXCEPTION) << "The number of elements in the outputs : " << data.size() - << " less than the number of elements required. "; - } - auto shape = output->BuildShape(); + if (cnode_data->isa()) { + BaseShapePtr shape = cnode_data->BuildShape(); auto shape_act = shape->cast()->shape(); Tensor tensor_exp = py::cast(data[*count]); if (shape_act != tensor_exp.shape()) { @@ -751,16 +743,58 @@ py::object StructureOutput(const AbstractBasePtr& output, const py::tuple& data, return data[(*count)++]; } - auto tuple_output = output->cast(); - AbstractBasePtrList elements = tuple_output->elements(); - size_t size = elements.size(); + if (!cnode_data->isa()) { + MS_LOG(EXCEPTION) << "The output of operator in the final anf graph could " + << "only be a tensor or a tuple of tensor, but got " << cnode_data->BuildValue()->ToString() + << "."; + } + auto data_tp = cnode_data->cast(); + auto elements = data_tp->elements(); + size_t size = data_tp->size(); py::tuple tp = py::tuple(size); for (size_t i = 0; i < size; i++) { - tp[i] = StructureOutput(elements[i], data, count); + tp[i] = ExtractGeneralCnodeRet(elements[i], data, count); } return std::move(tp); } +py::object StructureOutput(const AnfNodePtr& output_node, const py::tuple& data, size_t* count) { + MS_EXCEPTION_IF_NULL(output_node); + + if (output_node->isa()) { + return ValuePtrToPyData(GetValueNode(output_node)); + } + + if (*count >= data.size()) { + MS_LOG(EXCEPTION) << "The number of elements in the outputs : " << data.size() + << " less than the number of elements required. "; + } + if (output_node->isa()) { + return data[(*count)++]; + } + + auto output_c = output_node->cast(); + if (output_c == nullptr) { + MS_LOG(EXCEPTION) << "The final anf graph could only have constant, parameter, and operator, but got " + << output_node->ToString(); + } + + if (output_c->IsApply(prim::kPrimMakeTuple)) { + auto input_list = output_c->inputs(); + size_t size = input_list.size(); + py::tuple tp = py::tuple(size - 1); + for (size_t i = 1; i < size; i++) { + tp[i - 1] = StructureOutput(input_list[i], data, count); + } + return std::move(tp); + } + if (output_c->IsApply(prim::kPrimDepend)) { + return StructureOutput(output_c->input(1), data, count); + } + + return ExtractGeneralCnodeRet(output_c->abstract(), data, count); +} + std::shared_ptr DoExecGraph(const FuncGraphPtr& graph, const std::vector& inputs, const std::string& phase) { std::vector ge_tensors = TransformUtil::ConvertInputTensors(inputs, kOpFormat_NCHW); @@ -806,11 +840,10 @@ std::shared_ptr DoExecGraph(const FuncGraphPtr& graph, const std::ve std::shared_ptr ret = nullptr; #ifdef ENABLE_GE - AnfNodePtr root = graph->get_return(); - MS_EXCEPTION_IF_NULL(root); - AbstractBasePtr output = root->abstract(); + AnfNodePtr output_node = graph->get_return()->input(1); + MS_EXCEPTION_IF_NULL(output_node); size_t count = 0; - py::object oj = StructureOutput(output, outputs, &count); + py::object oj = StructureOutput(output_node, outputs, &count); ret = std::make_shared(oj); #else if (outputs.size() == 1) { diff --git a/tests/ut/python/nn/test_structure_output.py b/tests/ut/python/nn/test_structure_output.py index eb2722878a..f5f6d77a67 100644 --- a/tests/ut/python/nn/test_structure_output.py +++ b/tests/ut/python/nn/test_structure_output.py @@ -236,7 +236,7 @@ def test_soft(): def __init__(self): super(SoftmaxCrossEntropyWithLogitsNet, self).__init__() self.soft = P.SoftmaxCrossEntropyWithLogits() - self.value = (Tensor(np.zeros((2,)).astype(np.float32)), Tensor(np.ones((2,)).astype(np.float32))) + self.value = (Tensor(np.zeros((2, 2)).astype(np.float32)), Tensor(np.ones((2, 2)).astype(np.float32))) def construct(self, x, y, z): xx = x + y @@ -246,8 +246,30 @@ def test_soft(): ret = (ret, self.value) return ret - input1 = Tensor(np.zeros((2,)).astype(np.float32)) - input2 = Tensor(np.ones((2,)).astype(np.float32)) - input3 = Tensor((np.ones((2,)) + np.ones((2,))).astype(np.float32)) + input1 = Tensor(np.zeros((2, 2)).astype(np.float32)) + input2 = Tensor(np.ones((2, 2)).astype(np.float32)) + input3 = Tensor((np.ones((2, 2)) + np.ones((2, 2))).astype(np.float32)) net = SoftmaxCrossEntropyWithLogitsNet() - print(net(input1, input2, input3)) + net(input1, input2, input3) + + +def test_const_depend(): + class ConstDepend(Cell): + def __init__(self): + super(ConstDepend, self).__init__() + self.value = (Tensor(np.zeros((2, 3)).astype(np.float32)), Tensor(np.ones((2, 3)).astype(np.float32))) + self.soft = P.SoftmaxCrossEntropyWithLogits() + self.depend = depend + + def construct(self, x, y, z): + ret = x + y + ret = ret * z + ret = self.depend(self.value, ret) + ret = (ret, self.soft(x, y)) + return ret + + input1 = Tensor(np.zeros((2, 2)).astype(np.float32)) + input2 = Tensor(np.ones((2, 2)).astype(np.float32)) + input3 = Tensor((np.ones((2, 2)) + np.ones((2, 2))).astype(np.float32)) + net = ConstDepend() + net(input1, input2, input3) From cab5503280c594b48475c0f9d8bd3d7216933295 Mon Sep 17 00:00:00 2001 From: chenhaozhe Date: Mon, 30 Mar 2020 20:10:56 +0800 Subject: [PATCH 010/367] use find instead of equal to distinguish training graph --- mindspore/ccsrc/pipeline/pipeline.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mindspore/ccsrc/pipeline/pipeline.cc b/mindspore/ccsrc/pipeline/pipeline.cc index 35336e975b..70ef9a5407 100644 --- a/mindspore/ccsrc/pipeline/pipeline.cc +++ b/mindspore/ccsrc/pipeline/pipeline.cc @@ -1071,7 +1071,7 @@ bool ExecutorPy::AddDFGraph(const py::dict& init_params, const std::string& phas } std::string init_graph = "init_subgraph." + net_id; std::string checkpoint_name = "save." + net_id; - if (phase == "train") { + if (phase.find("train") != std::string::npos) { (void)DfGraphManager::GetInstance().AddGraph(phase, convertor.GetComputeGraph(), {{"ge.exec.variable_acc", "1"}}); } else { (void)DfGraphManager::GetInstance().AddGraph(phase, convertor.GetComputeGraph()); From 66714da29f7beb91c725a58afe44a2afc7eb089e Mon Sep 17 00:00:00 2001 From: helloway Date: Mon, 30 Mar 2020 20:17:13 +0800 Subject: [PATCH 011/367] Add issue and PR templates for gitee and github --- .gitee/PULL_REQUEST_TEMPLATE.md | 26 +++++++++++++++ .github/ISSUE_TEMPLATE/RFC.md | 19 +++++++++++ .github/ISSUE_TEMPLATE/bug-report.md | 43 +++++++++++++++++++++++++ .github/ISSUE_TEMPLATE/task-tracking.md | 19 +++++++++++ .github/PULL_REQUEST_TEMPLATE.md | 24 ++++++++++++++ 5 files changed, 131 insertions(+) create mode 100644 .gitee/PULL_REQUEST_TEMPLATE.md create mode 100644 .github/ISSUE_TEMPLATE/RFC.md create mode 100644 .github/ISSUE_TEMPLATE/bug-report.md create mode 100644 .github/ISSUE_TEMPLATE/task-tracking.md create mode 100644 .github/PULL_REQUEST_TEMPLATE.md diff --git a/.gitee/PULL_REQUEST_TEMPLATE.md b/.gitee/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000000..b58f1abbf3 --- /dev/null +++ b/.gitee/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,26 @@ + + +**What type of PR is this?** +> Uncomment only one ` /kind <>` line, hit enter to put that in a new line, and remove leading whitespaces from that line: +> +> /kind bug +> /kind task +> /kind feature + + +**What does this PR do / why do we need it**: + + +**Which issue(s) this PR fixes**: + +Fixes # + +**Special notes for your reviewers**: + + diff --git a/.github/ISSUE_TEMPLATE/RFC.md b/.github/ISSUE_TEMPLATE/RFC.md new file mode 100644 index 0000000000..7dd17f56b6 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/RFC.md @@ -0,0 +1,19 @@ +--- +name: RFC +about: Use this template for the new feature or enhancement +labels: kind/feature or kind/enhancement + +--- + +## Background +- Describe the status of the problem you wish to solve +- Attach the relevant issue if have + +## Introduction +- Describe the general solution, design and/or pseudo-code + +## Trail +| No. | Task Description | Related Issue(URL) | +| --- | ---------------- | ------------------ | +| 1 | | | +| 2 | | | diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md new file mode 100644 index 0000000000..2c0260ae2b --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug-report.md @@ -0,0 +1,43 @@ +--- +name: Bug Report +about: Use this template for reporting a bug +labels: kind/bug + +--- + + + +## Environment +### Hardware Environment(`Ascend`/`GPU`/`CPU`): +> Uncomment only one ` /device <>` line, hit enter to put that in a new line, and remove leading whitespaces from that line: +> +> `/device ascend`
+> `/device gpu`
+> `/device cpu`
+ +### Software Environment: +- **MindSpore version (source or binary)**: +- **Python version (e.g., Python 3.7.5)**: +- **OS platform and distribution (e.g., Linux Ubuntu 16.04)**: +- **GCC/Compiler version (if compiled from source)**: + +## Describe the current behavior + + +## Describe the expected behavior + + +## Steps to reproduce the issue +1. +2. +3. + +## Related log / screenshot + + +## Special notes for this issue + + diff --git a/.github/ISSUE_TEMPLATE/task-tracking.md b/.github/ISSUE_TEMPLATE/task-tracking.md new file mode 100644 index 0000000000..f2d3d23ae7 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/task-tracking.md @@ -0,0 +1,19 @@ +--- +name: Task +about: Use this template for task tracking +labels: kind/task + +--- + +## Task Description + + +## Task Goal + + +## Sub Task +| No. | Task Description | Issue ID | +| --- | ---------------- | -------- | +| 1 | | | +| 2 | | | + diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000000..96800b55f7 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,24 @@ + + +**What type of PR is this?** +> Uncomment only one ` /kind <>` line, hit enter to put that in a new line, and remove leading whitespaces from that line: +> +> `/kind bug`
+> `/kind task`
+> `/kind feature`
+ +**What does this PR do / why do we need it**: + + +**Which issue(s) this PR fixes**: + +Fixes # + +**Special notes for your reviewers**: + From b4d34973bc172df475c199aa6d30e3b42aaacfdd Mon Sep 17 00:00:00 2001 From: lichenever Date: Mon, 30 Mar 2020 11:31:45 +0800 Subject: [PATCH 012/367] fix_cast_bug --- .../ccsrc/parallel/step_auto_parallel.cc | 6 ++-- mindspore/ccsrc/parallel/step_parallel.cc | 21 ++++++++++---- .../parallel/test_element_wise_function.py | 29 +++++++++++++++++++ 3 files changed, 49 insertions(+), 7 deletions(-) diff --git a/mindspore/ccsrc/parallel/step_auto_parallel.cc b/mindspore/ccsrc/parallel/step_auto_parallel.cc index c3e3f5893e..aee4247755 100644 --- a/mindspore/ccsrc/parallel/step_auto_parallel.cc +++ b/mindspore/ccsrc/parallel/step_auto_parallel.cc @@ -346,6 +346,8 @@ bool IsAutoParallelCareNode(const CNodePtr &cnode) { } OperatorInfoPtr CreateTheOperatorInfo(const PrimitivePtr &prim, const CNodePtr &cnode) { + MS_EXCEPTION_IF_NULL(prim); + MS_EXCEPTION_IF_NULL(cnode); auto attrs = prim->attrs(); std::vector shape_list = ExtractShape(cnode); if (shape_list.empty()) { @@ -381,8 +383,8 @@ OperatorInfoPtr CreateTheOperatorInfo(const PrimitivePtr &prim, const CNodePtr & operator_info->set_outputs_dtype(cnode->Type()); operator_info->set_cnode(cnode); // If no strategy has been configured for this operator, then candidate strategies are generated for - // auto-strategy searching - if (!StrategyFound(attrs)) { + // auto-strategy searching, if this primitive is Cast, we ignore the user-specified strategy + if (!StrategyFound(attrs) || prim->name() == CAST) { // Compute split_flag_list_, indicating which input has batch dimension. This is ONLY used for preparation for // BatchParallelInfo operator operator_info->ComputeBatchSplitFlagList(); diff --git a/mindspore/ccsrc/parallel/step_parallel.cc b/mindspore/ccsrc/parallel/step_parallel.cc index 927acea705..2d948679d7 100644 --- a/mindspore/ccsrc/parallel/step_parallel.cc +++ b/mindspore/ccsrc/parallel/step_parallel.cc @@ -370,15 +370,12 @@ bool IsParallelCareNode(const CNodePtr& cnode) { if (prim == nullptr) { return false; } - auto attrs = prim->attrs(); if (IsInBlackList(prim)) { MS_LOG(INFO) << "Parallel don't care node: " << prim->name(); return false; } - if ((prim->name() == CAST)) { - if ((!attrs.count(STRATEGY)) && (cnode->operator_info() == nullptr)) { - return false; - } + if ((prim->name() == CAST) && (cnode->operator_info() == nullptr)) { + return false; } return cnode->in_forward_flag(); @@ -648,6 +645,13 @@ LossNodeInfo GetLossNodeInfo(const AnfNodePtr& loss_node) { MS_EXCEPTION_IF_NULL(pre_node); LossNodeInfo node_info; + // return -> cast + auto pre_cnode = pre_node->cast(); + MS_EXCEPTION_IF_NULL(pre_cnode); + auto pre_prim = GetValueNode(pre_cnode->input(0)); + if (pre_prim->name() == CAST && pre_cnode->operator_info() == nullptr) { + pre_node = pre_cnode->input(1); + } // return -> loss if (pre_node == loss_node) { @@ -1943,6 +1947,13 @@ CNodePtr FindLossCNode(const FuncGraphPtr& func_graph) { MS_EXCEPTION_IF_NULL(current_value); PrimitivePtr current_prim = current_value->value()->cast(); MS_EXCEPTION_IF_NULL(current_prim); + // return -> cast + if (current_prim->name() == CAST && pre_cnode->operator_info() == nullptr) { + pre_cnode = pre_cnode->input(1)->cast(); + MS_EXCEPTION_IF_NULL(pre_cnode); + current_prim = GetValueNode(pre_cnode->input(0)); + } + // notice: the GetNext op has not input if (INVALID_LOSS_OPS.find(current_prim->name()) != INVALID_LOSS_OPS.end()) { MS_LOG(INFO) << "The loss is: " << current_prim->name(); diff --git a/tests/ut/python/parallel/test_element_wise_function.py b/tests/ut/python/parallel/test_element_wise_function.py index dfcebdc5ab..a917dce9b6 100644 --- a/tests/ut/python/parallel/test_element_wise_function.py +++ b/tests/ut/python/parallel/test_element_wise_function.py @@ -272,3 +272,32 @@ def test_cast_before_mirror3(): y = Tensor(np.ones([32, 64]), dtype=ms.float16) b = Tensor(np.ones([64, 64]), dtype=ms.float32) _executor.compile(net, x, y, b) + + +def test_mul_two_cast(): + class Net(nn.Cell): + def __init__(self, strategy1, strategy2, strategy3): + super().__init__() + self.mul = P.Mul().set_strategy(strategy1) + self.mul2 = P.Mul().set_strategy(strategy2) + self.cast = P.Cast().set_strategy(strategy3) + self.cast2 = P.Cast().set_strategy(strategy3) + + def construct(self, x, y, b): + out = self.mul(x, y) + out = self.mul2(out, b) + out = self.cast(out, ms.int32) + out = self.cast2(out, ms.bool_) + return out + + context.set_auto_parallel_context(device_num=8, global_rank=0) + strategy1 = ((2, 2), (2, 2)) + strategy2 = ((8, 1), (8, 1)) + strategy3 = ((8, 1), ) + net = GradWrap(Net(strategy1, strategy2, strategy3)) + context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") + + x = Tensor(np.ones([128, 32]), dtype=ms.float32) + y = Tensor(np.ones([128, 32]), dtype=ms.float32) + b = Tensor(np.ones([128, 32]), dtype=ms.float32) + _executor.compile(net, x, y, b) From 840280e7843ecaf4014bda8cbe4fdf76aa75b614 Mon Sep 17 00:00:00 2001 From: seatea Date: Mon, 30 Mar 2020 12:18:02 +0800 Subject: [PATCH 013/367] Correct the comments for `RandomChoiceWithMask` op. --- mindspore/ops/operations/random_ops.py | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/mindspore/ops/operations/random_ops.py b/mindspore/ops/operations/random_ops.py index c8f59e898d..9ef5b301f9 100644 --- a/mindspore/ops/operations/random_ops.py +++ b/mindspore/ops/operations/random_ops.py @@ -25,20 +25,23 @@ class RandomChoiceWithMask(PrimitiveWithInfer): """ Generates a random samply as index tensor with a mask tensor from a given tensor. - The input must be a tensor of rank >= 2, the first dimension specify the number of sample. - The index tensor and the mask tensor have the same and fixed shape. The index tensor denotes the index - of the nonzero sample, while the mask tensor denotes which element in the index tensor are valid. + The input must be a tensor of rank >= 1. If its rank >= 2, the first dimension specify the number of sample. + The index tensor and the mask tensor have the fixed shapes. The index tensor denotes the index of the nonzero + sample, while the mask tensor denotes which elements in the index tensor are valid. Args: - count (int): Number of items expected to get. Default: 256. - seed (int): Random seed. - seed2 (int): Random seed2. + count (int): Number of items expected to get and the number should be greater than 0. Default: 256. + seed (int): Random seed. Default: 0. + seed2 (int): Random seed2. Default: 0. Inputs: - - **input_x** (Tensor) - The input tensor. + - **input_x** (Tensor[bool]) - The input tensor. Outputs: - Tuple, two tensors, the first one is the index tensor and the other one is the mask tensor. + Two tensors, the first one is the index tensor and the other one is the mask tensor. + + - **index** (Tensor) - The output has shape between 2-D and 5-D. + - **mask** (Tensor) - The output has shape 1-D. Examples: >>> rnd_choice_mask = RandomChoiceWithMask() From 080bcda0214c7120d6f06b34b9421587c4e52276 Mon Sep 17 00:00:00 2001 From: Jonathan Yan Date: Mon, 30 Mar 2020 11:02:08 -0400 Subject: [PATCH 014/367] Replace std::cout with MS_LOG in dataset unit test Signed-off-by: Jonathan Yan --- tests/ut/cpp/dataset/arena_test.cc | 2 +- tests/ut/cpp/dataset/btree_test.cc | 2 +- tests/ut/cpp/dataset/cifar_op_test.cc | 12 ++++-- tests/ut/cpp/dataset/circular_pool_test.cc | 6 +-- tests/ut/cpp/dataset/image_folder_op_test.cc | 33 ++++++++++----- tests/ut/cpp/dataset/manifest_op_test.cc | 12 ++++-- tests/ut/cpp/dataset/map_op_test.cc | 4 +- tests/ut/cpp/dataset/memory_pool_test.cc | 4 +- tests/ut/cpp/dataset/mind_record_op_test.cc | 12 +++--- tests/ut/cpp/dataset/mnist_op_test.cc | 6 ++- tests/ut/cpp/dataset/normalize_op_test.cc | 2 +- tests/ut/cpp/dataset/one_hot_op_test.cc | 4 +- tests/ut/cpp/dataset/path_test.cc | 4 +- tests/ut/cpp/dataset/queue_test.cc | 2 +- .../random_crop_decode_resizeOp_test.cc | 4 +- .../cpp/dataset/stand_alone_samplers_test.cc | 2 +- tests/ut/cpp/dataset/status_test.cc | 8 ++-- tests/ut/cpp/dataset/task_manager_test.cc | 2 +- tests/ut/cpp/dataset/tensor_test.cc | 2 +- tests/ut/cpp/dataset/type_cast_op_test.cc | 2 +- .../cpp/mindrecord/ut_shard_operator_test.cc | 42 ++++++++++++------- tests/ut/cpp/runtest.sh | 1 + 22 files changed, 102 insertions(+), 66 deletions(-) diff --git a/tests/ut/cpp/dataset/arena_test.cc b/tests/ut/cpp/dataset/arena_test.cc index 4b5f50c47f..0809c40ee3 100644 --- a/tests/ut/cpp/dataset/arena_test.cc +++ b/tests/ut/cpp/dataset/arena_test.cc @@ -45,5 +45,5 @@ TEST_F(MindDataTestArena, TestALLFunction) { for (int i = 0; i < 1000; i++) { mp->Deallocate(v.at(i)); } - std::cout << *mp; + MS_LOG(DEBUG) << *mp; } diff --git a/tests/ut/cpp/dataset/btree_test.cc b/tests/ut/cpp/dataset/btree_test.cc index 993f8f465f..3e0a867fba 100644 --- a/tests/ut/cpp/dataset/btree_test.cc +++ b/tests/ut/cpp/dataset/btree_test.cc @@ -196,6 +196,6 @@ TEST_F(MindDataTestBPlusTree, Test3) { EXPECT_EQ(it.value(), "b"); MS_LOG(INFO) << "Dump all the values using [] operator."; for (uint64_t i = min; i <= max; i++) { - std::cout << ai[i] << std::endl; + MS_LOG(DEBUG) << ai[i] << std::endl; } } diff --git a/tests/ut/cpp/dataset/cifar_op_test.cc b/tests/ut/cpp/dataset/cifar_op_test.cc index 48c332bc13..0cd1db65b5 100644 --- a/tests/ut/cpp/dataset/cifar_op_test.cc +++ b/tests/ut/cpp/dataset/cifar_op_test.cc @@ -81,7 +81,8 @@ TEST_F(MindDataTestCifarOp, TestSequentialSamplerCifar10) { uint32_t label = 0; while (tensor_map.size() != 0) { tensor_map["label"]->GetItemAt(&label, {}); - std::cout << "row: " << i++ << "\t" << tensor_map["image"]->shape() << "label:" << label << "\n"; + MS_LOG(DEBUG) << "row: " << i << "\t" << tensor_map["image"]->shape() << "label:" << label << "\n"; + i++; di.GetNextAsMap(&tensor_map); } EXPECT_TRUE(i == 100); @@ -108,7 +109,8 @@ TEST_F(MindDataTestCifarOp, TestRandomSamplerCifar10) { uint32_t label = 0; while (tensor_map.size() != 0) { tensor_map["label"]->GetItemAt(&label, {}); - std::cout << "row: " << i++ << "\t" << tensor_map["image"]->shape() << "label:" << label << "\n"; + MS_LOG(DEBUG) << "row: " << i << "\t" << tensor_map["image"]->shape() << "label:" << label << "\n"; + i++; di.GetNextAsMap(&tensor_map); } EXPECT_TRUE(i == 12); @@ -133,7 +135,8 @@ TEST_F(MindDataTestCifarOp, TestCifar10NumSample) { uint32_t label = 0; while (tensor_map.size() != 0) { tensor_map["label"]->GetItemAt(&label, {}); - std::cout << "row: " << i++ << "\t" << tensor_map["image"]->shape() << "label:" << label << "\n"; + MS_LOG(DEBUG) << "row: " << i << "\t" << tensor_map["image"]->shape() << "label:" << label << "\n"; + i++; di.GetNextAsMap(&tensor_map); } EXPECT_TRUE(i == 100); @@ -159,8 +162,9 @@ TEST_F(MindDataTestCifarOp, TestSequentialSamplerCifar100) { while (tensor_map.size() != 0) { tensor_map["coarse_label"]->GetItemAt(&coarse, {}); tensor_map["fine_label"]->GetItemAt(&fine, {}); - std::cout << "row: " << i++ << "\t" << tensor_map["image"]->shape() << " coarse:" + MS_LOG(DEBUG) << "row: " << i << "\t" << tensor_map["image"]->shape() << " coarse:" << coarse << " fine:" << fine << "\n"; + i++; di.GetNextAsMap(&tensor_map); } EXPECT_TRUE(i == 100); diff --git a/tests/ut/cpp/dataset/circular_pool_test.cc b/tests/ut/cpp/dataset/circular_pool_test.cc index 47ceae5930..c42b08ddcd 100644 --- a/tests/ut/cpp/dataset/circular_pool_test.cc +++ b/tests/ut/cpp/dataset/circular_pool_test.cc @@ -52,7 +52,7 @@ Status TestMem(MindDataTestCircularPool *tp, int32_t num_iterations) { uint64_t new_sz = dist(gen); std::string str = "Allocate " + std::to_string(old_sz) + " bytes of memory and then resize to " + std::to_string(new_sz); - std::cout << str << std::endl; + MS_LOG(DEBUG) << str << std::endl; std::string id = Services::GetUniqueID(); void *p; RETURN_IF_NOT_OK(tp->mp_->Allocate(old_sz, &p)); @@ -76,9 +76,9 @@ TEST_F(MindDataTestCircularPool, TestALLFunction) { vg_.CreateAsyncTask("TestMem", f); } vg_.join_all(); - std::cout << vg_.GetTaskErrorIfAny() << std::endl; + MS_LOG(DEBUG) << vg_.GetTaskErrorIfAny() << std::endl; ASSERT_TRUE(vg_.GetTaskErrorIfAny().IsOk()); CircularPool *cp = dynamic_cast(mp_.get()); - std::cout << *cp << std::endl; + MS_LOG(DEBUG) << *cp << std::endl; } diff --git a/tests/ut/cpp/dataset/image_folder_op_test.cc b/tests/ut/cpp/dataset/image_folder_op_test.cc index 82513a29a5..5b118a629a 100644 --- a/tests/ut/cpp/dataset/image_folder_op_test.cc +++ b/tests/ut/cpp/dataset/image_folder_op_test.cc @@ -102,7 +102,8 @@ TEST_F(MindDataTestImageFolderSampler, TestSequentialImageFolderWithRepeat) { while (tensor_map.size() != 0) { tensor_map["label"]->GetItemAt(&label, {}); EXPECT_TRUE(res[(i % 44) / 11] == label); - std::cout << "row: " << i++ << "\t" << tensor_map["image"]->shape() << "label:" << label << "\n"; + MS_LOG(DEBUG) << "row: " << i << "\t" << tensor_map["image"]->shape() << "label:" << label << "\n"; + i++; di.GetNextAsMap(&tensor_map); } EXPECT_TRUE(i == 88); @@ -126,7 +127,8 @@ TEST_F(MindDataTestImageFolderSampler, TestRandomImageFolder) { int32_t label = 0; while (tensor_map.size() != 0) { tensor_map["label"]->GetItemAt(&label, {}); - std::cout << "row: " << i++ << "\t" << tensor_map["image"]->shape() << "label:" << label << "\n"; + MS_LOG(DEBUG) << "row: " << i << "\t" << tensor_map["image"]->shape() << "label:" << label << "\n"; + i++; di.GetNextAsMap(&tensor_map); } EXPECT_TRUE(i == 44); @@ -155,7 +157,8 @@ TEST_F(MindDataTestImageFolderSampler, TestRandomSamplerImageFolder) { while (tensor_map.size() != 0) { tensor_map["label"]->GetItemAt(&label, {}); EXPECT_TRUE(res[i] == label); - std::cout << "row: " << i++ << "\t" << tensor_map["image"]->shape() << "label:" << label << "\n"; + MS_LOG(DEBUG) << "row: " << i << "\t" << tensor_map["image"]->shape() << "label:" << label << "\n"; + i++; di.GetNextAsMap(&tensor_map); } EXPECT_TRUE(i == 12); @@ -185,8 +188,9 @@ TEST_F(MindDataTestImageFolderSampler, TestSequentialImageFolderWithRepeatBatch) std::shared_ptr label; Create1DTensor(&label, 11, reinterpret_cast(res[i % 4]), DataType::DE_INT32); EXPECT_TRUE((*label) == (*tensor_map["label"])); - std::cout << "row: " << i++ << " " << tensor_map["image"]->shape() << " (*label):" << (*label) + MS_LOG(DEBUG) << "row: " << i << " " << tensor_map["image"]->shape() << " (*label):" << (*label) << " *tensor_map[label]: " << *tensor_map["label"] << std::endl; + i++; di.GetNextAsMap(&tensor_map); } EXPECT_TRUE(i == 8); @@ -282,7 +286,8 @@ TEST_F(MindDataTestImageFolderSampler, TestImageFolderClassIndex) { while (tensor_map.size() != 0) { tensor_map["label"]->GetItemAt(&label, {}); EXPECT_TRUE(label == res[i / 11]); - std::cout << "row: " << i++ << "\t" << tensor_map["image"]->shape() << "label:" << label << "\n"; + MS_LOG(DEBUG) << "row: " << i << "\t" << tensor_map["image"]->shape() << "label:" << label << "\n"; + i++; di.GetNextAsMap(&tensor_map); } EXPECT_TRUE(i == 22); @@ -308,7 +313,8 @@ TEST_F(MindDataTestImageFolderSampler, TestDistributedSampler) { while (tensor_map.size() != 0) { tensor_map["label"]->GetItemAt(&label, {}); EXPECT_EQ(i % 4, label); - std::cout << "row:" << i++ << "\tlabel:" << label << "\n"; + MS_LOG(DEBUG) << "row:" << i << "\tlabel:" << label << "\n"; + i++; di.GetNextAsMap(&tensor_map); } EXPECT_TRUE(i == 16); @@ -335,7 +341,8 @@ TEST_F(MindDataTestImageFolderSampler, TestPKSamplerImageFolder) { while (tensor_map.size() != 0) { tensor_map["label"]->GetItemAt(&label, {}); EXPECT_TRUE(res[i] == label); - std::cout << "row: " << i++ << "\t" << tensor_map["image"]->shape() << "label:" << label << "\n"; + MS_LOG(DEBUG) << "row: " << i << "\t" << tensor_map["image"]->shape() << "label:" << label << "\n"; + i++; di.GetNextAsMap(&tensor_map); } EXPECT_TRUE(i == 12); @@ -360,7 +367,8 @@ TEST_F(MindDataTestImageFolderSampler, TestImageFolderNumSamples) { while (tensor_map.size() != 0) { tensor_map["label"]->GetItemAt(&label, {}); EXPECT_TRUE(0 == label); - std::cout << "row: " << i++ << "\t" << tensor_map["image"]->shape() << "label:" << label << "\n"; + MS_LOG(DEBUG) << "row: " << i << "\t" << tensor_map["image"]->shape() << "label:" << label << "\n"; + i++; di.GetNextAsMap(&tensor_map); } EXPECT_TRUE(i == 22); @@ -392,7 +400,8 @@ TEST_F(MindDataTestImageFolderSampler, TestImageFolderDecode) { EXPECT_TRUE(label == res[i / 11]); EXPECT_TRUE( tensor_map["image"]->shape() == TensorShape({2268, 4032, 3})); // verify shapes are correct after decode - std::cout << "row: " << i++ << "\t" << tensor_map["image"]->shape() << "label:" << label << "\n"; + MS_LOG(DEBUG) << "row: " << i << "\t" << tensor_map["image"]->shape() << "label:" << label << "\n"; + i++; di.GetNextAsMap(&tensor_map); } EXPECT_TRUE(i == 20); @@ -442,7 +451,8 @@ TEST_F(MindDataTestImageFolderSampler, TestImageFolderSharding1) { while (tensor_map.size() != 0) { tensor_map["label"]->GetItemAt(&label, {}); EXPECT_EQ(labels[i], label); - std::cout << "row:" << i++ << "\tlabel:" << label << "\n"; + MS_LOG(DEBUG) << "row:" << i << "\tlabel:" << label << "\n"; + i++; di.GetNextAsMap(&tensor_map); } EXPECT_TRUE(i == 5); @@ -470,7 +480,8 @@ TEST_F(MindDataTestImageFolderSampler, TestImageFolderSharding2) { while (tensor_map.size() != 0) { tensor_map["label"]->GetItemAt(&label, {}); EXPECT_EQ(labels[i], label); - std::cout << "row:" << i++ << "\tlabel:" << label << "\n"; + MS_LOG(DEBUG) << "row:" << i << "\tlabel:" << label << "\n"; + i++; di.GetNextAsMap(&tensor_map); } EXPECT_TRUE(i == 11); diff --git a/tests/ut/cpp/dataset/manifest_op_test.cc b/tests/ut/cpp/dataset/manifest_op_test.cc index e062335fa6..9e36f8c747 100644 --- a/tests/ut/cpp/dataset/manifest_op_test.cc +++ b/tests/ut/cpp/dataset/manifest_op_test.cc @@ -76,7 +76,8 @@ TEST_F(MindDataTestManifest, TestSequentialManifestWithRepeat) { while (tensor_map.size() != 0) { tensor_map["label"]->GetItemAt(&label, {}); EXPECT_TRUE(res[i] == label); - std::cout << "row: " << i++ << "\t" << tensor_map["image"]->shape() << "label:" << label << "\n"; + MS_LOG(DEBUG) << "row: " << i << "\t" << tensor_map["image"]->shape() << "label:" << label << "\n"; + i++; di.GetNextAsMap(&tensor_map); } EXPECT_TRUE(i == 4); @@ -134,7 +135,8 @@ TEST_F(MindDataTestManifest, MindDataTestManifestClassIndex) { while (tensor_map.size() != 0) { tensor_map["label"]->GetItemAt(&label, {}); EXPECT_TRUE(label == res[i]); - std::cout << "row: " << i++ << "\t" << tensor_map["image"]->shape() << "label:" << label << "\n"; + MS_LOG(DEBUG) << "row: " << i << "\t" << tensor_map["image"]->shape() << "label:" << label << "\n"; + i++; di.GetNextAsMap(&tensor_map); } EXPECT_TRUE(i == 2); @@ -159,7 +161,8 @@ TEST_F(MindDataTestManifest, MindDataTestManifestNumSamples) { while (tensor_map.size() != 0) { tensor_map["label"]->GetItemAt(&label, {}); EXPECT_TRUE(0 == label); - std::cout << "row: " << i++ << "\t" << tensor_map["image"]->shape() << "label:" << label << "\n"; + MS_LOG(DEBUG) << "row: " << i << "\t" << tensor_map["image"]->shape() << "label:" << label << "\n"; + i++; di.GetNextAsMap(&tensor_map); } EXPECT_TRUE(i == 4); @@ -184,7 +187,8 @@ TEST_F(MindDataTestManifest, MindDataTestManifestEval) { while (tensor_map.size() != 0) { tensor_map["label"]->GetItemAt(&label, {}); EXPECT_TRUE(0 == label); - std::cout << "row: " << i++ << "\t" << tensor_map["image"]->shape() << "label:" << label << "\n"; + MS_LOG(DEBUG) << "row: " << i << "\t" << tensor_map["image"]->shape() << "label:" << label << "\n"; + i++; di.GetNextAsMap(&tensor_map); } EXPECT_TRUE(i == 1); diff --git a/tests/ut/cpp/dataset/map_op_test.cc b/tests/ut/cpp/dataset/map_op_test.cc index 746c1e8d5f..271cbbe190 100644 --- a/tests/ut/cpp/dataset/map_op_test.cc +++ b/tests/ut/cpp/dataset/map_op_test.cc @@ -697,7 +697,7 @@ TEST_F(MindDataTestMapOp, ImageFolder_Decode_Repeat_Resize) { std::string result; while (tensor_map.size() != 0) { tensor_map["label"]->GetItemAt(&label, {}); - std::cout << "row:" << i << "\tlabel:" << label << "\n"; + MS_LOG(DEBUG) << "row:" << i << "\tlabel:" << label << "\n"; EXPECT_TRUE(img_class[(i % 44) / 11] == label); // Dump all the image into string, to be used as a comparison later. result.append((char *) tensor_map["image"]->StartAddr(), (int64_t) tensor_map["image"]->Size()); @@ -743,7 +743,7 @@ TEST_F(MindDataTestMapOp, ImageFolder_Decode_Repeat_Resize) { std::string result2; while (tensor_map.size() != 0) { tensor_map["label"]->GetItemAt(&label, {}); - std::cout << "row:" << i << "\tlabel:" << label << "\n"; + MS_LOG(DEBUG) << "row:" << i << "\tlabel:" << label << "\n"; EXPECT_TRUE(img_class[(i % 44) / 11] == label); result2.append((char *) tensor_map["image"]->StartAddr(), (int64_t) tensor_map["image"]->Size()); di2.GetNextAsMap(&tensor_map); diff --git a/tests/ut/cpp/dataset/memory_pool_test.cc b/tests/ut/cpp/dataset/memory_pool_test.cc index c988a09e89..136f3fe1b8 100644 --- a/tests/ut/cpp/dataset/memory_pool_test.cc +++ b/tests/ut/cpp/dataset/memory_pool_test.cc @@ -35,7 +35,7 @@ class MindDataTestMemoryPool : public UT::Common { }; TEST_F(MindDataTestMemoryPool, DumpPoolInfo) { - std::cout << *(std::dynamic_pointer_cast(mp_)) << std::endl; + MS_LOG(DEBUG) << *(std::dynamic_pointer_cast(mp_)) << std::endl; } TEST_F(MindDataTestMemoryPool, TestOperator1) { @@ -72,5 +72,5 @@ TEST_F(MindDataTestMemoryPool, TestAllocator) { std::shared_ptr obj_a = std::allocate_shared(alloc, 3); int v = obj_a->val_a(); ASSERT_EQ(v, 3); - std::cout << *(std::dynamic_pointer_cast(mp_)) << std::endl; + MS_LOG(DEBUG) << *(std::dynamic_pointer_cast(mp_)) << std::endl; } diff --git a/tests/ut/cpp/dataset/mind_record_op_test.cc b/tests/ut/cpp/dataset/mind_record_op_test.cc index 9664b66ecb..abe7faef14 100644 --- a/tests/ut/cpp/dataset/mind_record_op_test.cc +++ b/tests/ut/cpp/dataset/mind_record_op_test.cc @@ -69,7 +69,7 @@ TEST_F(MindDataTestMindRecordOp, TestMindRecordBasic) { rc = builder.Build(&my_mindrecord_op); ASSERT_TRUE(rc.IsOk()); - std::cout << (*my_mindrecord_op); + MS_LOG(DEBUG) << (*my_mindrecord_op); my_tree->AssociateNode(my_mindrecord_op); @@ -140,7 +140,7 @@ TEST_F(MindDataTestMindRecordOp, TestMindRecordSample) { rc = builder.Build(&my_mindrecord_op); ASSERT_TRUE(rc.IsOk()); - std::cout << (*my_mindrecord_op); + MS_LOG(DEBUG) << (*my_mindrecord_op); my_tree->AssociateNode(my_mindrecord_op); @@ -211,7 +211,7 @@ TEST_F(MindDataTestMindRecordOp, TestMindRecordShuffle) { rc = builder.Build(&my_mindrecord_op); ASSERT_TRUE(rc.IsOk()); - std::cout << (*my_mindrecord_op); + MS_LOG(DEBUG) << (*my_mindrecord_op); my_tree->AssociateNode(my_mindrecord_op); @@ -285,7 +285,7 @@ TEST_F(MindDataTestMindRecordOp, TestMindRecordCategory) { rc = builder.Build(&my_mindrecord_op); ASSERT_TRUE(rc.IsOk()); - std::cout << (*my_mindrecord_op); + MS_LOG(DEBUG) << (*my_mindrecord_op); my_tree->AssociateNode(my_mindrecord_op); @@ -352,7 +352,7 @@ TEST_F(MindDataTestMindRecordOp, TestMindRecordRepeat) { rc = builder.Build(&my_mindrecord_op); ASSERT_TRUE(rc.IsOk()); - std::cout << (*my_mindrecord_op); + MS_LOG(DEBUG) << (*my_mindrecord_op); rc = my_tree->AssociateNode(my_mindrecord_op); EXPECT_TRUE(rc.IsOk()); @@ -434,7 +434,7 @@ TEST_F(MindDataTestMindRecordOp, TestMindRecordBlockReaderRepeat) { rc = builder.Build(&my_mindrecord_op); ASSERT_TRUE(rc.IsOk()); - std::cout << (*my_mindrecord_op); + MS_LOG(DEBUG) << (*my_mindrecord_op); rc = my_tree->AssociateNode(my_mindrecord_op); EXPECT_TRUE(rc.IsOk()); diff --git a/tests/ut/cpp/dataset/mnist_op_test.cc b/tests/ut/cpp/dataset/mnist_op_test.cc index 3d5c4b05ca..2733597b35 100644 --- a/tests/ut/cpp/dataset/mnist_op_test.cc +++ b/tests/ut/cpp/dataset/mnist_op_test.cc @@ -91,7 +91,8 @@ TEST_F(MindDataTestMnistSampler, TestSequentialMnistWithRepeat) { while (tensor_map.size() != 0) { tensor_map["label"]->GetItemAt(&label, {}); EXPECT_TRUE(res[i % 10] == label); - std::cout << "row: " << i++ << "\t" << tensor_map["image"]->shape() << "label:" << label << "\n"; + MS_LOG(DEBUG) << "row: " << i << "\t" << tensor_map["image"]->shape() << "label:" << label << "\n"; + i++; di.GetNextAsMap(&tensor_map); } EXPECT_TRUE(i == 20); @@ -120,7 +121,8 @@ TEST_F(MindDataTestMnistSampler, TestSequentialImageFolderWithRepeatBatch) { std::shared_ptr label; Create1DTensor(&label, 5, reinterpret_cast(res[i % 4])); EXPECT_TRUE((*label) == (*tensor_map["label"])); - std::cout << "row: " << i++ << "\t" << tensor_map["image"]->shape() << "label:" << *tensor_map["label"] << "\n"; + MS_LOG(DEBUG) << "row: " << i << "\t" << tensor_map["image"]->shape() << "label:" << *tensor_map["label"] << "\n"; + i++; di.GetNextAsMap(&tensor_map); } EXPECT_TRUE(i == 4); diff --git a/tests/ut/cpp/dataset/normalize_op_test.cc b/tests/ut/cpp/dataset/normalize_op_test.cc index 373669a473..05ac3f6289 100644 --- a/tests/ut/cpp/dataset/normalize_op_test.cc +++ b/tests/ut/cpp/dataset/normalize_op_test.cc @@ -51,7 +51,7 @@ TEST_F(MindDataTestNormalizeOP, TestOp) { cv::Mat cv_output_image; cv_output_image = p->mat(); - std::cout << "Storing output file to : " << output_filename << std::endl; + MS_LOG(DEBUG) << "Storing output file to : " << output_filename << std::endl; cv::FileStorage file(output_filename, cv::FileStorage::WRITE); file << "imageData" << cv_output_image; } diff --git a/tests/ut/cpp/dataset/one_hot_op_test.cc b/tests/ut/cpp/dataset/one_hot_op_test.cc index 3928aeffd4..4b8bbc1bdd 100644 --- a/tests/ut/cpp/dataset/one_hot_op_test.cc +++ b/tests/ut/cpp/dataset/one_hot_op_test.cc @@ -47,8 +47,8 @@ TEST_F(MindDataTestOneHotOp, TestOp) { EXPECT_TRUE(s.IsOk()); ASSERT_TRUE(output->shape() == expected->shape()); ASSERT_TRUE(output->type() == expected->type()); - std::cout << *output << std::endl; - std::cout << *expected << std::endl; + MS_LOG(DEBUG) << *output << std::endl; + MS_LOG(DEBUG) << *expected << std::endl; ASSERT_TRUE(*output == *expected); MS_LOG(INFO) << "MindDataTestOneHotOp end."; diff --git a/tests/ut/cpp/dataset/path_test.cc b/tests/ut/cpp/dataset/path_test.cc index 7bd2abe7e5..3839d659aa 100644 --- a/tests/ut/cpp/dataset/path_test.cc +++ b/tests/ut/cpp/dataset/path_test.cc @@ -38,7 +38,7 @@ TEST_F(MindDataTestPath, Test1) { int i = 0; while (dir_it->hasNext()) { Path v = dir_it->next(); - std::cout << v.toString() << "\n"; + MS_LOG(DEBUG) << v.toString() << "\n"; i++; if (i == 10) { break; @@ -46,7 +46,7 @@ TEST_F(MindDataTestPath, Test1) { } // Test extension. Path g("file.jpeg"); - std::cout << g.Extension() << "\n"; + MS_LOG(DEBUG) << g.Extension() << "\n"; ASSERT_EQ(g.Extension(), ".jpeg"); } diff --git a/tests/ut/cpp/dataset/queue_test.cc b/tests/ut/cpp/dataset/queue_test.cc index 37560e5dfb..00366fcafd 100644 --- a/tests/ut/cpp/dataset/queue_test.cc +++ b/tests/ut/cpp/dataset/queue_test.cc @@ -44,7 +44,7 @@ class RefCount { explicit RefCount(int x) : v_(std::make_shared(x)) {} explicit RefCount(const RefCount &o) : v_(o.v_) {} ~RefCount() { - std::cout << "Destructor of RefCount called" << std::endl; + MS_LOG(DEBUG) << "Destructor of RefCount called" << std::endl; gRefCountDestructorCalled++; } RefCount& operator=(const RefCount &o) { diff --git a/tests/ut/cpp/dataset/random_crop_decode_resizeOp_test.cc b/tests/ut/cpp/dataset/random_crop_decode_resizeOp_test.cc index a4c4fbfdff..facd35c4f7 100644 --- a/tests/ut/cpp/dataset/random_crop_decode_resizeOp_test.cc +++ b/tests/ut/cpp/dataset/random_crop_decode_resizeOp_test.cc @@ -78,7 +78,7 @@ TEST_F(MindDataTestRandomCropDecodeResizeOp, TestOp2) { } else { mse = mse_sum; } - std::cout << "mse: " << mse << std::endl; + MS_LOG(DEBUG) << "mse: " << mse << std::endl; } MS_LOG(INFO) << "MindDataTestRandomCropDecodeResizeOp end!"; } @@ -150,7 +150,7 @@ TEST_F(MindDataTestRandomCropDecodeResizeOp, TestOp1) { } mse = (count == 0) ? mse_sum : static_cast(mse_sum) / count; - std::cout << "mse: " << mse << std::endl; + MS_LOG(DEBUG) << "mse: " << mse << std::endl; } MS_LOG(INFO) << "MindDataTestRandomCropDecodeResizeOp end!"; } diff --git a/tests/ut/cpp/dataset/stand_alone_samplers_test.cc b/tests/ut/cpp/dataset/stand_alone_samplers_test.cc index 2626add22e..c686a9486b 100644 --- a/tests/ut/cpp/dataset/stand_alone_samplers_test.cc +++ b/tests/ut/cpp/dataset/stand_alone_samplers_test.cc @@ -78,7 +78,7 @@ TEST_F(MindDataTestStandAloneSampler, TestDistributedSampler) { sampler->Init(&mock); sampler->GetNextBuffer(&db); db->GetTensor(&tensor, 0, 0); - std::cout << (*tensor); + MS_LOG(DEBUG) << (*tensor); if(i < 3) { // This is added due to std::shuffle() EXPECT_TRUE((*tensor) == (*row[i])); } diff --git a/tests/ut/cpp/dataset/status_test.cc b/tests/ut/cpp/dataset/status_test.cc index 835e6303b0..30bbd1e909 100644 --- a/tests/ut/cpp/dataset/status_test.cc +++ b/tests/ut/cpp/dataset/status_test.cc @@ -43,19 +43,19 @@ TEST_F(MindDataTestStatus, Test1) { Status rc; ASSERT_TRUE(rc.IsOk()); Status err1(StatusCode::kOutOfMemory, __LINE__, __FILE__); - std::cout << err1; + MS_LOG(DEBUG) << err1; ASSERT_TRUE(err1.IsOutofMemory()); ASSERT_TRUE(err1.IsError()); Status err2(StatusCode::kUnexpectedError, __LINE__, __FILE__, "Oops"); - std::cout << err2; + MS_LOG(DEBUG) << err2; } TEST_F(MindDataTestStatus, Test2) { Status rc = f1(); - std::cout << rc; + MS_LOG(DEBUG) << rc; } TEST_F(MindDataTestStatus, Test3) { Status rc = f3(); - std::cout << rc; + MS_LOG(DEBUG) << rc; } diff --git a/tests/ut/cpp/dataset/task_manager_test.cc b/tests/ut/cpp/dataset/task_manager_test.cc index 182eee1882..c6fccd51cb 100644 --- a/tests/ut/cpp/dataset/task_manager_test.cc +++ b/tests/ut/cpp/dataset/task_manager_test.cc @@ -36,7 +36,7 @@ Status f(TaskGroup &vg){ RETURN_IF_NOT_OK(vg.CreateAsyncTask("Infinity", [&]() -> Status { TaskManager::FindMe()->Post(); int a = v.fetch_add(1); - std::cout << a << std::endl; + MS_LOG(DEBUG) << a << std::endl; return f(vg); })); } diff --git a/tests/ut/cpp/dataset/tensor_test.cc b/tests/ut/cpp/dataset/tensor_test.cc index 6686be15be..7437b3d942 100644 --- a/tests/ut/cpp/dataset/tensor_test.cc +++ b/tests/ut/cpp/dataset/tensor_test.cc @@ -311,7 +311,7 @@ TEST_F(MindDataTestTensorDE, CVTensorAs) { m = 2 * m; ASSERT_EQ(ctv->StartAddr(), addr); ASSERT_TRUE(*t2 == *ctv); - std::cout << *t2 << std::endl << *ctv; + MS_LOG(DEBUG) << *t2 << std::endl << *ctv; } TEST_F(MindDataTestTensorDE, CVTensorMatSlice) { diff --git a/tests/ut/cpp/dataset/type_cast_op_test.cc b/tests/ut/cpp/dataset/type_cast_op_test.cc index 0bd1e90eae..f5fa035579 100644 --- a/tests/ut/cpp/dataset/type_cast_op_test.cc +++ b/tests/ut/cpp/dataset/type_cast_op_test.cc @@ -54,7 +54,7 @@ void testCast(std::vector values, const DataType &from, const DataType &to EXPECT_TRUE(op->Compute(t, &output)); ASSERT_TRUE(t->shape() == output->shape()); ASSERT_TRUE(DataType(to)==output->type()); - std::cout << *output << std::endl; + MS_LOG(DEBUG) << *output << std::endl; auto out = output->begin(); auto v = values.begin(); for (; out != output->end(); out++, v++) { diff --git a/tests/ut/cpp/mindrecord/ut_shard_operator_test.cc b/tests/ut/cpp/mindrecord/ut_shard_operator_test.cc index acea1cefae..46ea1712b2 100644 --- a/tests/ut/cpp/mindrecord/ut_shard_operator_test.cc +++ b/tests/ut/cpp/mindrecord/ut_shard_operator_test.cc @@ -58,7 +58,8 @@ TEST_F(TestShardOperator, TestShardSampleBasic) { while (true) { auto x = dataset.GetNext(); if (x.empty()) break; - MS_LOG(INFO) << "index: " << i++ << ", filename: " << common::SafeCStr((std::get<1>(x[0]))["file_name"]); + MS_LOG(INFO) << "index: " << i << ", filename: " << common::SafeCStr((std::get<1>(x[0]))["file_name"]); + i++; } dataset.Finish(); ASSERT_TRUE(i <= kSampleCount); @@ -83,7 +84,8 @@ TEST_F(TestShardOperator, TestShardSampleWrongNumber) { while (true) { auto x = dataset.GetNext(); if (x.empty()) break; - MS_LOG(INFO) << "index: " << i++ << ", filename: " << common::SafeCStr((std::get<1>(x[0]))["file_name"]); + MS_LOG(INFO) << "index: " << i << ", filename: " << common::SafeCStr((std::get<1>(x[0]))["file_name"]); + i++; } dataset.Finish(); ASSERT_TRUE(i <= 5); @@ -108,7 +110,8 @@ TEST_F(TestShardOperator, TestShardSampleRatio) { while (true) { auto x = dataset.GetNext(); if (x.empty()) break; - MS_LOG(INFO) << "index: " << i++ << ", filename: " << common::SafeCStr((std::get<1>(x[0]))["file_name"]); + MS_LOG(INFO) << "index: " << i << ", filename: " << common::SafeCStr((std::get<1>(x[0]))["file_name"]); + i++; } dataset.Finish(); ASSERT_TRUE(i <= 10); @@ -137,7 +140,8 @@ TEST_F(TestShardOperator, TestShardSamplePartition) { while (true) { auto x = dataset.GetNext(); if (x.empty()) break; - MS_LOG(INFO) << "index: " << i++ << ", filename: " << common::SafeCStr((std::get<1>(x[0]))["file_name"]); + MS_LOG(INFO) << "index: " << i << ", filename: " << common::SafeCStr((std::get<1>(x[0]))["file_name"]); + i++; } dataset.Finish(); ASSERT_TRUE(i <= 10); @@ -166,8 +170,9 @@ TEST_F(TestShardOperator, TestShardCategory) { auto x = dataset.GetNext(); if (x.empty()) break; - MS_LOG(INFO) << "index: " << i++ << ", filename: " << common::SafeCStr((std::get<1>(x[0]))["file_name"]) << + MS_LOG(INFO) << "index: " << i << ", filename: " << common::SafeCStr((std::get<1>(x[0]))["file_name"]) << ", label: " << common::SafeCStr((std::get<1>(x[0]))["label"].dump()); + i++; ASSERT_TRUE((std::get<1>(x[0]))["label"] == categories[category_no].second); @@ -194,8 +199,9 @@ TEST_F(TestShardOperator, TestShardShuffle) { while (true) { auto x = dataset.GetNext(); if (x.empty()) break; - MS_LOG(INFO) << "index: " << i++ << ", filename: " << common::SafeCStr((std::get<1>(x[0]))["file_name"]) << + MS_LOG(INFO) << "index: " << i << ", filename: " << common::SafeCStr((std::get<1>(x[0]))["file_name"]) << ", label: " << common::SafeCStr((std::get<1>(x[0]))["label"].dump()); + i++; } dataset.Finish(); } @@ -218,8 +224,9 @@ TEST_F(TestShardOperator, TestShardSampleShuffle) { while (true) { auto x = dataset.GetNext(); if (x.empty()) break; - MS_LOG(INFO) << "index: " << i++ << ", filename: " << common::SafeCStr((std::get<1>(x[0]))["file_name"]) << + MS_LOG(INFO) << "index: " << i << ", filename: " << common::SafeCStr((std::get<1>(x[0]))["file_name"]) << ", label: " << common::SafeCStr((std::get<1>(x[0]))["label"].dump()); + i++; } dataset.Finish(); ASSERT_LE(i, 35); @@ -244,8 +251,9 @@ TEST_F(TestShardOperator, TestShardShuffleSample) { while (true) { auto x = dataset.GetNext(); if (x.empty()) break; - MS_LOG(INFO) << "index: " << i++ << ", filename: " << common::SafeCStr((std::get<1>(x[0]))["file_name"]) << + MS_LOG(INFO) << "index: " << i << ", filename: " << common::SafeCStr((std::get<1>(x[0]))["file_name"]) << ", label: " << common::SafeCStr((std::get<1>(x[0]))["label"].dump()); + i++; } dataset.Finish(); ASSERT_TRUE(i <= kSampleSize); @@ -270,8 +278,9 @@ TEST_F(TestShardOperator, TestShardSampleShuffleSample) { while (true) { auto x = dataset.GetNext(); if (x.empty()) break; - MS_LOG(INFO) << "index: " << i++ << ", filename: " << common::SafeCStr((std::get<1>(x[0]))["file_name"]) << + MS_LOG(INFO) << "index: " << i << ", filename: " << common::SafeCStr((std::get<1>(x[0]))["file_name"]) << ", label: " << common::SafeCStr((std::get<1>(x[0]))["label"].dump()); + i++; } dataset.Finish(); ASSERT_LE(i, 35); @@ -298,8 +307,9 @@ TEST_F(TestShardOperator, TestShardShuffleCompare) { while (true) { auto x = dataset.GetNext(); if (x.empty()) break; - MS_LOG(INFO) << "index: " << i++ << ", filename: " << common::SafeCStr((std::get<1>(x[0]))["file_name"]) << + MS_LOG(INFO) << "index: " << i << ", filename: " << common::SafeCStr((std::get<1>(x[0]))["file_name"]) << ", label: " << common::SafeCStr((std::get<1>(x[0]))["label"].dump()); + i++; auto y = compare_dataset.GetNext(); if ((std::get<1>(x[0]))["file_name"] != (std::get<1>(y[0]))["file_name"]) different = true; @@ -332,8 +342,9 @@ TEST_F(TestShardOperator, TestShardCategoryShuffle1) { while (true) { auto x = dataset.GetNext(); if (x.empty()) break; - MS_LOG(INFO) << "index: " << i++ << ", filename: " << common::SafeCStr((std::get<1>(x[0]))["file_name"]) << + MS_LOG(INFO) << "index: " << i << ", filename: " << common::SafeCStr((std::get<1>(x[0]))["file_name"]) << ", label: " << common::SafeCStr((std::get<1>(x[0]))["label"].dump()); + i++; ASSERT_TRUE((std::get<1>(x[0]))["label"] == categories[category_no].second); category_no++; @@ -365,8 +376,9 @@ TEST_F(TestShardOperator, TestShardCategoryShuffle2) { while (true) { auto x = dataset.GetNext(); if (x.empty()) break; - MS_LOG(INFO) << "index: " << i++ << ", filename: " << common::SafeCStr((std::get<1>(x[0]))["file_name"]) << + MS_LOG(INFO) << "index: " << i << ", filename: " << common::SafeCStr((std::get<1>(x[0]))["file_name"]) << ", label: " << common::SafeCStr((std::get<1>(x[0]))["label"].dump()); + i++; ASSERT_TRUE((std::get<1>(x[0]))["label"] == categories[category_no].second); category_no++; category_no %= static_cast(categories.size()); @@ -398,8 +410,9 @@ TEST_F(TestShardOperator, TestShardCategorySample) { while (true) { auto x = dataset.GetNext(); if (x.empty()) break; - MS_LOG(INFO) << "index: " << i++ << ", filename: " << common::SafeCStr((std::get<1>(x[0]))["file_name"]) << + MS_LOG(INFO) << "index: " << i << ", filename: " << common::SafeCStr((std::get<1>(x[0]))["file_name"]) << ", label: " << common::SafeCStr((std::get<1>(x[0]))["label"].dump()); + i++; ASSERT_TRUE((std::get<1>(x[0]))["label"] == categories[category_no].second); category_no++; @@ -435,8 +448,9 @@ TEST_F(TestShardOperator, TestShardCategorySampleShuffle) { while (true) { auto x = dataset.GetNext(); if (x.empty()) break; - MS_LOG(INFO) << "index: " << i++ << ", filename: " << common::SafeCStr((std::get<1>(x[0]))["file_name"]) << + MS_LOG(INFO) << "index: " << i << ", filename: " << common::SafeCStr((std::get<1>(x[0]))["file_name"]) << ", label: " << common::SafeCStr((std::get<1>(x[0]))["label"].dump()); + i++; ASSERT_TRUE((std::get<1>(x[0]))["label"] == categories[category_no].second); category_no++; diff --git a/tests/ut/cpp/runtest.sh b/tests/ut/cpp/runtest.sh index a50e1d4933..283d6bc639 100755 --- a/tests/ut/cpp/runtest.sh +++ b/tests/ut/cpp/runtest.sh @@ -28,6 +28,7 @@ cd ${BUILD_PATH}/mindspore/tests/ut/cpp export LD_LIBRARY_PATH=${BUILD_PATH}/mindspore/googletest/googlemock/gtest:${PROJECT_PATH}/mindspore:${PROJECT_PATH}/mindspore/lib:$LD_LIBRARY_PATH export PYTHONPATH=${PROJECT_PATH}/tests/ut/cpp/python_input:$PYTHONPATH:${PROJECT_PATH} +export GLOG_v=2 ## prepare data for dataset & mindrecord cp -fr $PROJECT_PATH/tests/ut/data ${PROJECT_PATH}/build/mindspore/tests/ut/cpp/ From 9b2dabccfe11d43496c9760fbc0ea32382518160 Mon Sep 17 00:00:00 2001 From: cathwong Date: Tue, 31 Mar 2020 04:37:59 +0800 Subject: [PATCH 015/367] update CONTRIBUTING.md. typo fix. --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 591beaef82..85fee704c2 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -78,7 +78,7 @@ Please follow this style to make MindSpore easy to review, maintain and develop. * Pull a request to MindSpore repository - In the last step, your need to pull a compare request between your new branch and MindSpore `master` branch. After finishing the pull request, the Jekins CI will be automatically set up for building test. + In the last step, your need to pull a compare request between your new branch and MindSpore `master` branch. After finishing the pull request, the Jenkins CI will be automatically set up for building test. ### Report issues From 8c88b39da1328fe9054209891003ea7916adc6bc Mon Sep 17 00:00:00 2001 From: qianlong Date: Mon, 30 Mar 2020 20:16:45 +0800 Subject: [PATCH 016/367] Optimize the execution time of test case test_rgb_hsv.py --- tests/ut/python/dataset/test_rgb_hsv.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/ut/python/dataset/test_rgb_hsv.py b/tests/ut/python/dataset/test_rgb_hsv.py index e887acb01d..14ab9ddc2d 100644 --- a/tests/ut/python/dataset/test_rgb_hsv.py +++ b/tests/ut/python/dataset/test_rgb_hsv.py @@ -133,6 +133,7 @@ def test_rgb_hsv_pipeline(): # First dataset transforms1 = [ vision.Decode(), + vision.Resize([64, 64]), vision.ToTensor() ] transforms1 = vision.ComposeOp(transforms1) @@ -142,6 +143,7 @@ def test_rgb_hsv_pipeline(): # Second dataset transforms2 = [ vision.Decode(), + vision.Resize([64, 64]), vision.ToTensor(), vision.RgbToHsv(), vision.HsvToRgb() From b12e6ff780385759258c070674251ea0f0b99867 Mon Sep 17 00:00:00 2001 From: zhaozhenlong Date: Tue, 31 Mar 2020 09:34:09 +0800 Subject: [PATCH 017/367] add operator diag and diag_part --- mindspore/ccsrc/transform/convert.cc | 6 +- mindspore/ccsrc/transform/op_declare.cc | 10 +++ mindspore/ccsrc/transform/op_declare.h | 4 ++ mindspore/ops/_grad/grad_array_ops.py | 22 +++++++ mindspore/ops/operations/__init__.py | 3 +- mindspore/ops/operations/array_ops.py | 85 +++++++++++++++++++++---- tests/ut/python/ops/test_ops.py | 10 +++ 7 files changed, 125 insertions(+), 15 deletions(-) diff --git a/mindspore/ccsrc/transform/convert.cc b/mindspore/ccsrc/transform/convert.cc index 74b0695cff..c8c6abea4c 100755 --- a/mindspore/ccsrc/transform/convert.cc +++ b/mindspore/ccsrc/transform/convert.cc @@ -178,6 +178,8 @@ const char kNameLARSUpdate[] = "LARSUpdate"; const char kNameRound[] = "Round"; const char kNamePrint[] = "Print"; const char kNameApplyFtrl[] = "ApplyFtrl"; +const char kNameDiag[] = "Diag"; +const char kNameDiagPart[] = "DiagPart"; // -----------------OpAdapter initialization-------------- std::unordered_map &DfGraphConvertor::get_adpt_map() { @@ -357,7 +359,9 @@ std::unordered_map &DfGraphConvertor::get_adpt_ma {string(kNameDepthToSpace), ADPT_DESC(DepthToSpace)}, {string(kNameSign), ADPT_DESC(Sign)}, {string(kNameRound), ADPT_DESC(Round)}, - {string(kNameApplyFtrl), ADPT_DESC(ApplyFtrl)}}; + {string(kNameApplyFtrl), ADPT_DESC(ApplyFtrl)}, + {string(kNameDiag), ADPT_DESC(Diag)}, + {string(kNameDiagPart), ADPT_DESC(DiagPart)}}; #ifdef ENABLE_GE adpt_map[string(kNamePrint)] = ADPT_DESC(Print); #endif diff --git a/mindspore/ccsrc/transform/op_declare.cc b/mindspore/ccsrc/transform/op_declare.cc index 82dcd62572..af0ce29f85 100755 --- a/mindspore/ccsrc/transform/op_declare.cc +++ b/mindspore/ccsrc/transform/op_declare.cc @@ -1173,6 +1173,16 @@ INPUT_MAP(ApplyFtrl) = {{1, INPUT_DESC(var)}, {2, INPUT_DESC(accum)}, {3, INP ATTR_MAP(ApplyFtrl) = {{"use_locking", ATTR_DESC(use_locking, AnyTraits())}}; OUTPUT_MAP(ApplyFtrl) = {{0, OUTPUT_DESC(var)}}; +// Diag +INPUT_MAP(Diag) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(Diag) = EMPTY_ATTR_MAP; +OUTPUT_MAP(Diag) = {{0, OUTPUT_DESC(y)}}; + +// DiagPart +INPUT_MAP(DiagPart) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(DiagPart) = EMPTY_ATTR_MAP; +OUTPUT_MAP(DiagPart) = {{0, OUTPUT_DESC(y)}}; + #ifdef ENABLE_GE // Print INPUT_MAP(Print) = EMPTY_INPUT_MAP; diff --git a/mindspore/ccsrc/transform/op_declare.h b/mindspore/ccsrc/transform/op_declare.h index 03463b978f..59014b8b7d 100755 --- a/mindspore/ccsrc/transform/op_declare.h +++ b/mindspore/ccsrc/transform/op_declare.h @@ -435,6 +435,10 @@ DECLARE_OP_ADAPTER(Round) DECLARE_OP_USE_OUTPUT(Round) DECLARE_OP_ADAPTER(ApplyFtrl) DECLARE_OP_USE_OUTPUT(ApplyFtrl) +DECLARE_OP_ADAPTER(Diag) +DECLARE_OP_USE_OUTPUT(Diag) +DECLARE_OP_ADAPTER(DiagPart) +DECLARE_OP_USE_OUTPUT(DiagPart) #ifdef ENABLE_GE DECLARE_OP_ADAPTER(Print) DECLARE_OP_USE_DYN_INPUT(Print) diff --git a/mindspore/ops/_grad/grad_array_ops.py b/mindspore/ops/_grad/grad_array_ops.py index cf6247023e..79841cf27a 100644 --- a/mindspore/ops/_grad/grad_array_ops.py +++ b/mindspore/ops/_grad/grad_array_ops.py @@ -408,3 +408,25 @@ def get_bprop_depth_to_space(self): return (op(dout),) return bprop + + +@bprop_getters.register(P.Diag) +def get_bprop_diag(self): + """Generate bprop for Diag""" + op = P.DiagPart() + + def bprop(x, out, dout): + return (op(dout),) + + return bprop + + +@bprop_getters.register(P.DiagPart) +def get_bprop_diag_part(self): + """Generate bprop for DiagPart""" + op = P.Diag() + + def bprop(x, out, dout): + return (op(dout),) + + return bprop diff --git a/mindspore/ops/operations/__init__.py b/mindspore/ops/operations/__init__.py index a75b078df8..295e2edaf8 100644 --- a/mindspore/ops/operations/__init__.py +++ b/mindspore/ops/operations/__init__.py @@ -20,7 +20,7 @@ A collection of operators to build nerual networks or computing functions. """ from .array_ops import (Argmax, Argmin, Cast, ConcatOffset, Concat, - Diag, DType, ExpandDims, Eye, + Diag, DiagPart, DType, ExpandDims, Eye, Fill, GatherNd, GatherV2, InvertPermutation, IsInstance, IsSubClass, ArgMaxWithValue, OnesLike, ZerosLike, Rank, Reshape, ResizeNearestNeighbor, ArgMinWithValue, @@ -208,6 +208,7 @@ __all__ = [ "Cos", "ACos", "Diag", + "DiagPart", 'Eye', 'Assign', 'AssignAdd', diff --git a/mindspore/ops/operations/array_ops.py b/mindspore/ops/operations/array_ops.py index d0d3d5006c..6740f172b4 100644 --- a/mindspore/ops/operations/array_ops.py +++ b/mindspore/ops/operations/array_ops.py @@ -1615,37 +1615,96 @@ class StridedSlice(PrimitiveWithInfer): class Diag(PrimitiveWithInfer): r""" - Extract or construct a diagonal array. + Construct a diagonal tensor with a given diagonal values. - If input is a 2-D tensor, returns the diagonal of the input with the given offset. If - input is a 1-D tensor, returns the array of diagonals. If you use this function - to extract the diagonal and want to write to the result array, see the more - detailed documentation for "numpy.diagonal", whether you return a copy or a - view depends on the version of numpy you are using. + Assume `input_x` has dimensions :math:`[D_1,... D_k]`, the output is a tensor of + rank 2k with dimensions :math:`[D_1,..., D_k, D_1,..., D_k]` where: + :math:`output[i_1,..., i_k, i_1,..., i_k] = input_x[i_1,..., i_k]` and 0 everywhere else. Inputs: - - **input_x** (Tensor) - 1-D tensor or 2-D tensor. + - **input_x** (Tensor) - The input tensor. Outputs: Tensor. Examples: + >>> input_x = Tensor([1, 2, 3, 4]) >>> diag = P.Diag() >>> diag(x) + [[1, 0, 0, 0], + [0, 2, 0, 0], + [0, 0, 3, 0], + [0, 0, 0, 4]] """ @prim_attr_register def __init__(self): """init Diag""" - def infer_type(self, x): - args = {"x_dtype": x} - validator.check_subclass('input_x', x, mstype.tensor) - validator.check_type_same(args, mstype.number_type) - return x + def infer_dtype(self, x_type): + validator.check_subclass('input_x', x_type, mstype.tensor) + return x_type + + def infer_shape(self, x_shape): + validator.check("x rank", len(x_shape), "", 1, Rel.GE) + ret_shape = copy.deepcopy(x_shape) + ret_shape = ret_shape + ret_shape + return ret_shape + + def infer_value(self, x): + if x is None: + return None + validator.check("input x rank", len(x.shape()), "", 1) + ret = np.diag(x.asnumpy()) + return Tensor(ret) + + +class DiagPart(PrimitiveWithInfer): + r""" + + Extract the diagonal part from given tensor. + + Assume input has dimensions :math:`[D_1,..., D_k, D_1,..., D_k]`, the output is a tensor + of rank k with dimensions :math:`[D_1,..., D_k]` where: + :math:`output[i_1,..., i_k] = input[i_1,..., i_k, i_1,..., i_k]`. + + Inputs: + - **input_x** (Tensor) - The input Tensor. + + Outputs: + Tensor. + + Examples + >>> input_x = Tensor([[1, 0, 0, 0], + >>> [0, 2, 0, 0], + >>> [0, 0, 3, 0], + >>> [0, 0, 0, 4]]) + >>> diag_part = P.DiagPart() + >>> diag_part(x) + [1, 2, 3, 4] + """ + + @prim_attr_register + def __init__(self): + """init DiagPart""" + + def infer_dtype(self, x_type): + validator.check_subclass('input_x', x_type, mstype.tensor) + return x_type + + def infer_shape(self, x_shape): + if len(x_shape)%2 != 0 or \ + not x_shape: + raise ValueError(f"DiagPart input rank must be non-zero and even, but got rank {len(x_shape)}, " + f"with shapes {x_shape}") + length = len(x_shape) // 2 + ret_shape = x_shape[0:length] + return ret_shape def infer_value(self, x): - validator.check("shape_length", len(x.shape()), "length", [1, 2], Rel.IN) + if x is None: + return None + validator.check("x rank", len(x.shape()), "", 2) ret = np.diag(x.asnumpy()) return Tensor(ret) diff --git a/tests/ut/python/ops/test_ops.py b/tests/ut/python/ops/test_ops.py index bfe8075972..e917c12748 100755 --- a/tests/ut/python/ops/test_ops.py +++ b/tests/ut/python/ops/test_ops.py @@ -942,6 +942,16 @@ test_case_array_ops = [ Tensor(np.array([1], np.float32)), Tensor(np.array([1], np.float32)))], 'desc_bprop': [[3,]]}), + ('Diag', { + 'block': P.Diag(), + 'desc_inputs': [[4]], + 'desc_bprop': [[4, 4]], + }), + ('DiagPart', { + 'block': P.DiagPart(), + 'desc_inputs': [[4, 4]], + 'desc_bprop': [[4]], + }), ] test_case_other_ops = [ From 9d5890d9b99da399b618a6f78556673c3fcc26c9 Mon Sep 17 00:00:00 2001 From: lianliguang Date: Tue, 31 Mar 2020 09:35:44 +0800 Subject: [PATCH 018/367] fix bug of got a error transdata's dest format --- mindspore/ccsrc/pre_activate/ascend/ascend_helper.cc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mindspore/ccsrc/pre_activate/ascend/ascend_helper.cc b/mindspore/ccsrc/pre_activate/ascend/ascend_helper.cc index 6fff8ccd2a..7f11c8f2c7 100644 --- a/mindspore/ccsrc/pre_activate/ascend/ascend_helper.cc +++ b/mindspore/ccsrc/pre_activate/ascend/ascend_helper.cc @@ -187,10 +187,10 @@ AnfNodePtr InsertTransOpForSingleOutput(const FuncGraphPtr &func_graph, const An return node; } -void GetTransDataInputFormat(const AnfNodePtr &node, std::string *input_format) { +void GetTransDataInputFormat(const AnfNodePtr &node, size_t idx, std::string *input_format) { MS_EXCEPTION_IF_NULL(input_format); if (AnfAlgo::IsRealKernel(node)) { - *input_format = AnfAlgo::GetOutputFormat(node, 0); + *input_format = AnfAlgo::GetOutputFormat(node, idx); } else { *input_format = AnfAlgo::GetPrevNodeOutputFormat(node, 0); } @@ -206,7 +206,7 @@ AnfNodePtr InsertTransOpForMultipleOutput(const FuncGraphPtr &func_graph, const bool padding_flag = false; std::string output_format; - GetTransDataInputFormat(node, &output_format); + GetTransDataInputFormat(node, output_idx, &output_format); if (output_format == kOpFormat_NC1KHKWHWC0) { MS_LOG(EXCEPTION) << "got the hw format" << output_format << " when insert the transdata node " << node->DebugString(); From 976af212e94ba25ad2c339c3923c76c240aad8f5 Mon Sep 17 00:00:00 2001 From: leonwanghui Date: Tue, 31 Mar 2020 10:27:40 +0800 Subject: [PATCH 019/367] =?UTF-8?q?=E5=9B=9E=E9=80=80=20'Pull=20Request=20?= =?UTF-8?q?!17=20:=20[AutoParallel]Fix=20bug=20in=20the=20case=20of=20two?= =?UTF-8?q?=20cast'?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../ccsrc/parallel/step_auto_parallel.cc | 6 ++-- mindspore/ccsrc/parallel/step_parallel.cc | 21 ++++---------- .../parallel/test_element_wise_function.py | 29 ------------------- 3 files changed, 7 insertions(+), 49 deletions(-) diff --git a/mindspore/ccsrc/parallel/step_auto_parallel.cc b/mindspore/ccsrc/parallel/step_auto_parallel.cc index aee4247755..c3e3f5893e 100644 --- a/mindspore/ccsrc/parallel/step_auto_parallel.cc +++ b/mindspore/ccsrc/parallel/step_auto_parallel.cc @@ -346,8 +346,6 @@ bool IsAutoParallelCareNode(const CNodePtr &cnode) { } OperatorInfoPtr CreateTheOperatorInfo(const PrimitivePtr &prim, const CNodePtr &cnode) { - MS_EXCEPTION_IF_NULL(prim); - MS_EXCEPTION_IF_NULL(cnode); auto attrs = prim->attrs(); std::vector shape_list = ExtractShape(cnode); if (shape_list.empty()) { @@ -383,8 +381,8 @@ OperatorInfoPtr CreateTheOperatorInfo(const PrimitivePtr &prim, const CNodePtr & operator_info->set_outputs_dtype(cnode->Type()); operator_info->set_cnode(cnode); // If no strategy has been configured for this operator, then candidate strategies are generated for - // auto-strategy searching, if this primitive is Cast, we ignore the user-specified strategy - if (!StrategyFound(attrs) || prim->name() == CAST) { + // auto-strategy searching + if (!StrategyFound(attrs)) { // Compute split_flag_list_, indicating which input has batch dimension. This is ONLY used for preparation for // BatchParallelInfo operator operator_info->ComputeBatchSplitFlagList(); diff --git a/mindspore/ccsrc/parallel/step_parallel.cc b/mindspore/ccsrc/parallel/step_parallel.cc index 2d948679d7..927acea705 100644 --- a/mindspore/ccsrc/parallel/step_parallel.cc +++ b/mindspore/ccsrc/parallel/step_parallel.cc @@ -370,12 +370,15 @@ bool IsParallelCareNode(const CNodePtr& cnode) { if (prim == nullptr) { return false; } + auto attrs = prim->attrs(); if (IsInBlackList(prim)) { MS_LOG(INFO) << "Parallel don't care node: " << prim->name(); return false; } - if ((prim->name() == CAST) && (cnode->operator_info() == nullptr)) { - return false; + if ((prim->name() == CAST)) { + if ((!attrs.count(STRATEGY)) && (cnode->operator_info() == nullptr)) { + return false; + } } return cnode->in_forward_flag(); @@ -645,13 +648,6 @@ LossNodeInfo GetLossNodeInfo(const AnfNodePtr& loss_node) { MS_EXCEPTION_IF_NULL(pre_node); LossNodeInfo node_info; - // return -> cast - auto pre_cnode = pre_node->cast(); - MS_EXCEPTION_IF_NULL(pre_cnode); - auto pre_prim = GetValueNode(pre_cnode->input(0)); - if (pre_prim->name() == CAST && pre_cnode->operator_info() == nullptr) { - pre_node = pre_cnode->input(1); - } // return -> loss if (pre_node == loss_node) { @@ -1947,13 +1943,6 @@ CNodePtr FindLossCNode(const FuncGraphPtr& func_graph) { MS_EXCEPTION_IF_NULL(current_value); PrimitivePtr current_prim = current_value->value()->cast(); MS_EXCEPTION_IF_NULL(current_prim); - // return -> cast - if (current_prim->name() == CAST && pre_cnode->operator_info() == nullptr) { - pre_cnode = pre_cnode->input(1)->cast(); - MS_EXCEPTION_IF_NULL(pre_cnode); - current_prim = GetValueNode(pre_cnode->input(0)); - } - // notice: the GetNext op has not input if (INVALID_LOSS_OPS.find(current_prim->name()) != INVALID_LOSS_OPS.end()) { MS_LOG(INFO) << "The loss is: " << current_prim->name(); diff --git a/tests/ut/python/parallel/test_element_wise_function.py b/tests/ut/python/parallel/test_element_wise_function.py index a917dce9b6..dfcebdc5ab 100644 --- a/tests/ut/python/parallel/test_element_wise_function.py +++ b/tests/ut/python/parallel/test_element_wise_function.py @@ -272,32 +272,3 @@ def test_cast_before_mirror3(): y = Tensor(np.ones([32, 64]), dtype=ms.float16) b = Tensor(np.ones([64, 64]), dtype=ms.float32) _executor.compile(net, x, y, b) - - -def test_mul_two_cast(): - class Net(nn.Cell): - def __init__(self, strategy1, strategy2, strategy3): - super().__init__() - self.mul = P.Mul().set_strategy(strategy1) - self.mul2 = P.Mul().set_strategy(strategy2) - self.cast = P.Cast().set_strategy(strategy3) - self.cast2 = P.Cast().set_strategy(strategy3) - - def construct(self, x, y, b): - out = self.mul(x, y) - out = self.mul2(out, b) - out = self.cast(out, ms.int32) - out = self.cast2(out, ms.bool_) - return out - - context.set_auto_parallel_context(device_num=8, global_rank=0) - strategy1 = ((2, 2), (2, 2)) - strategy2 = ((8, 1), (8, 1)) - strategy3 = ((8, 1), ) - net = GradWrap(Net(strategy1, strategy2, strategy3)) - context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") - - x = Tensor(np.ones([128, 32]), dtype=ms.float32) - y = Tensor(np.ones([128, 32]), dtype=ms.float32) - b = Tensor(np.ones([128, 32]), dtype=ms.float32) - _executor.compile(net, x, y, b) From b3a306489d35982fc1d2781a0a0aac7f6b7ce3dc Mon Sep 17 00:00:00 2001 From: lvliang Date: Mon, 30 Mar 2020 22:01:24 +0800 Subject: [PATCH 020/367] auto enbale dynamic mem pool --- .../device/ascend/ascend_kernel_runtime.cc | 44 ++++++------------- .../device/ascend/ascend_memory_allocator.cc | 2 +- .../device/ascend/ascend_memory_allocator.h | 2 +- mindspore/ccsrc/session/session_basic.cc | 2 +- 4 files changed, 17 insertions(+), 33 deletions(-) diff --git a/mindspore/ccsrc/device/ascend/ascend_kernel_runtime.cc b/mindspore/ccsrc/device/ascend/ascend_kernel_runtime.cc index 7be480e5e3..a7dfc96b2f 100644 --- a/mindspore/ccsrc/device/ascend/ascend_kernel_runtime.cc +++ b/mindspore/ccsrc/device/ascend/ascend_kernel_runtime.cc @@ -239,22 +239,11 @@ DeviceAddressPtr AscendKernelRuntime::CreateDeviceAddress(void *device_ptr, size return std::make_shared(device_ptr, device_size, format, type_id); } -void AscendKernelRuntime::MallocOpMemory(const DeviceAddressPtr address, size_t size, int flag) { - MS_EXCEPTION_IF_NULL(MsContext::GetInstance()); - if (MsContext::GetInstance()->enable_dynamic_mem_pool()) { - auto device_ptr = AscendMemoryAllocator::GetInstance().AllocTensorMem(size); - MS_EXCEPTION_IF_NULL(device_ptr); - address->ptr_ = device_ptr; - address->mem_dynamic_alloc_ = true; - return; - } - if (flag == kStaticMem) { - address->ptr_ = MallocStaticMem(size, false); - } else if (flag == kDynamicMem) { - address->ptr_ = MallocDynamicMem(size, false); - } else { - MS_LOG(EXCEPTION) << "Unknown memory type!"; - } +void AscendKernelRuntime::MallocOpMemory(const DeviceAddressPtr address, size_t size, int) { + auto device_ptr = AscendMemoryAllocator::GetInstance().AllocTensorMem(size); + MS_EXCEPTION_IF_NULL(device_ptr); + address->ptr_ = device_ptr; + address->mem_dynamic_alloc_ = true; } bool AscendKernelRuntime::GenTask(const session::KernelGraph *graph) { @@ -488,23 +477,18 @@ bool AscendKernelRuntime::DestroyHccl() { bool AscendKernelRuntime::MallocDeviceMemory() { device_mem_size_ = ASCEND_MEM_SIZE_BYTE; - MS_EXCEPTION_IF_NULL(MsContext::GetInstance()); - if (MsContext::GetInstance()->enable_dynamic_mem_pool()) { - static_mem_offset_ = FloatToSize(device_mem_size_ * GRAPH_INIT_DAVINCI_MEM_RATIO); - device_mem_pool_size_ = FloatToSize(device_mem_size_ * (1 - GRAPH_INIT_DAVINCI_MEM_RATIO)); - auto ret = rtMalloc(reinterpret_cast(&device_mem_pool_base_), device_mem_pool_size_, RT_MEMORY_HBM); - if (ret != RT_ERROR_NONE) { - MS_EXCEPTION(DeviceProcessError) << "rtMalloc mem size[" << device_mem_pool_size_ << "] fail, ret[" << ret << "]"; - } - AscendMemoryAllocator::GetInstance().set_device_mem_pool_base(device_mem_pool_base_); - AscendMemoryAllocator::GetInstance().set_device_mem_pool_size(device_mem_pool_size_); - } else { - static_mem_offset_ = device_mem_size_; + static_mem_offset_ = FloatToSize(device_mem_size_ * GRAPH_INIT_ASCEND_MEM_RATIO); + auto ret = rtMalloc(reinterpret_cast(&device_mem_base_), static_mem_offset_, RT_MEMORY_HBM); + if (ret != RT_ERROR_NONE) { + MS_EXCEPTION(DeviceProcessError) << "rtMalloc mem size[" << static_mem_offset_ << "] fail, ret[" << ret << "]"; } - auto ret = rtMalloc(reinterpret_cast(&device_mem_base_), device_mem_size_, RT_MEMORY_HBM); + device_mem_pool_size_ = FloatToSize(device_mem_size_ * (1 - GRAPH_INIT_ASCEND_MEM_RATIO)); + ret = rtMalloc(reinterpret_cast(&device_mem_pool_base_), device_mem_pool_size_, RT_MEMORY_HBM); if (ret != RT_ERROR_NONE) { - MS_EXCEPTION(DeviceProcessError) << "rtMalloc mem size[" << device_mem_size_ << "] fail, ret[" << ret << "]"; + MS_EXCEPTION(DeviceProcessError) << "rtMalloc mem size[" << device_mem_pool_size_ << "] fail, ret[" << ret << "]"; } + AscendMemoryAllocator::GetInstance().set_device_mem_pool_base(device_mem_pool_base_); + AscendMemoryAllocator::GetInstance().set_device_mem_pool_size(device_mem_pool_size_); return true; } diff --git a/mindspore/ccsrc/device/ascend/ascend_memory_allocator.cc b/mindspore/ccsrc/device/ascend/ascend_memory_allocator.cc index 06b921f509..08a30a28b7 100644 --- a/mindspore/ccsrc/device/ascend/ascend_memory_allocator.cc +++ b/mindspore/ccsrc/device/ascend/ascend_memory_allocator.cc @@ -26,7 +26,7 @@ const uint64_t MEM_SIZE_BYTE = (MEM_SIZE << 30); AscendMemoryAllocator::AscendMemoryAllocator() { hasMalloc_ = false; - free_mem_size_ = FloatToSize(MEM_SIZE_BYTE * (1 - GRAPH_INIT_DAVINCI_MEM_RATIO)); + free_mem_size_ = FloatToSize(MEM_SIZE_BYTE * (1 - GRAPH_INIT_ASCEND_MEM_RATIO)); total_mem_size_ = free_mem_size_; } diff --git a/mindspore/ccsrc/device/ascend/ascend_memory_allocator.h b/mindspore/ccsrc/device/ascend/ascend_memory_allocator.h index 74cc50834f..8b0f89a9b8 100644 --- a/mindspore/ccsrc/device/ascend/ascend_memory_allocator.h +++ b/mindspore/ccsrc/device/ascend/ascend_memory_allocator.h @@ -24,7 +24,7 @@ namespace mindspore { namespace device { namespace ascend { // The fraction of total ascend memory used to compute the graph. -static const float GRAPH_INIT_DAVINCI_MEM_RATIO = 0.8; +static const float GRAPH_INIT_ASCEND_MEM_RATIO = 0.8; class AscendMemoryAllocator : public DynamicMemPoolBestFit { public: diff --git a/mindspore/ccsrc/session/session_basic.cc b/mindspore/ccsrc/session/session_basic.cc index 1d786708a2..be03f54f3c 100644 --- a/mindspore/ccsrc/session/session_basic.cc +++ b/mindspore/ccsrc/session/session_basic.cc @@ -497,7 +497,7 @@ void SessionBasic::LoadInputData(const std::shared_ptr &kernel_grap auto device_address = AnfAlgo::GetMutableOutputAddr(pk_node, 0); bool need_sync = false; if (ms_context->enable_pynative_infer()) { - if (tensor->device_address().get() == nullptr) { + if (tensor->device_address().get() == nullptr || tensor->device_address() != device_address) { need_sync = true; } } else { From 30231eab2f5ae6d7892d5806f5dbd61bcbcbac85 Mon Sep 17 00:00:00 2001 From: lichenever Date: Mon, 30 Mar 2020 21:41:59 +0800 Subject: [PATCH 021/367] fix auto parallel st --- .../st/auto_parallel/onehot_model_parallel.py | 9 +---- .../run_auto_parallel_loss_expand.sh | 3 +- .../run_auto_parallel_resnet50_expand_loss.sh | 2 +- .../run_onehot_model_parallel.sh | 2 +- .../soft_entropy_loss_expand_parallel.py | 40 +++++++------------ tests/st/auto_parallel/test_expand_loss.py | 4 +- .../test_model_parallel_onehot.py | 3 -- 7 files changed, 22 insertions(+), 41 deletions(-) diff --git a/tests/st/auto_parallel/onehot_model_parallel.py b/tests/st/auto_parallel/onehot_model_parallel.py index b0e6f1eb91..14b351c0ee 100644 --- a/tests/st/auto_parallel/onehot_model_parallel.py +++ b/tests/st/auto_parallel/onehot_model_parallel.py @@ -130,9 +130,7 @@ class OneHotFactory: context.reset_auto_parallel_context() assert np.allclose(out_mindspore_single, out_mindspore_parallel, 0.0001, 0.0001) -@pytest.mark.level0 -@pytest.mark.platform_x86_ascend_training -@pytest.mark.env_single + def test_reid_onehot_forward_int32_128_depth1024_model_parallel(): fact = OneHotFactory(batch_size=128, classes=1024, @@ -142,9 +140,7 @@ def test_reid_onehot_forward_int32_128_depth1024_model_parallel(): strategy=((1,device_num),(),())) fact.forward_cmp() -@pytest.mark.level0 -@pytest.mark.platform_x86_ascend_training -@pytest.mark.env_single + def test_reid_onehot_forward_int32_1024_depth128_model_parallel(): fact = OneHotFactory(batch_size=1024, classes=128, @@ -153,4 +149,3 @@ def test_reid_onehot_forward_int32_1024_depth128_model_parallel(): axis=-1, strategy=((1,device_num),(),())) fact.forward_cmp() - diff --git a/tests/st/auto_parallel/run_auto_parallel_loss_expand.sh b/tests/st/auto_parallel/run_auto_parallel_loss_expand.sh index 83acc6ad54..71dee820cb 100644 --- a/tests/st/auto_parallel/run_auto_parallel_loss_expand.sh +++ b/tests/st/auto_parallel/run_auto_parallel_loss_expand.sh @@ -18,7 +18,6 @@ BASE_PATH=$(cd "$(dirname $0)"; pwd) CONFIG_PATH=/home/workspace/mindspore_config export DEVICE_NUM=8 export RANK_SIZE=$DEVICE_NUM -ulimit -n 65535 source ${BASE_PATH}/env.sh unset SLOG_PRINT_TO_STDOUT export MINDSPORE_HCCL_CONFIG_PATH=$CONFIG_PATH/hccl/rank_table_${DEVICE_NUM}p.json @@ -27,7 +26,7 @@ process_pid=() for((i=0; i<$DEVICE_NUM; i++)); do rm -rf ${BASE_PATH}/loss_expand${i} mkdir ${BASE_PATH}/loss_expand${i} - cp -r soft_entropy_loss_expand_parallel.py ${BASE_PATH}/loss_expand${i}/ + cp -r ${BASE_PATH}/soft_entropy_loss_expand_parallel.py ${BASE_PATH}/loss_expand${i}/ cd ${BASE_PATH}/loss_expand${i} export RANK_ID=${i} export DEVICE_ID=${i} diff --git a/tests/st/auto_parallel/run_auto_parallel_resnet50_expand_loss.sh b/tests/st/auto_parallel/run_auto_parallel_resnet50_expand_loss.sh index efc1e0c0de..094668ba5c 100644 --- a/tests/st/auto_parallel/run_auto_parallel_resnet50_expand_loss.sh +++ b/tests/st/auto_parallel/run_auto_parallel_resnet50_expand_loss.sh @@ -27,7 +27,7 @@ process_pid=() for((i=0; i<$DEVICE_NUM; i++)); do rm -rf ${BASE_PATH}/resnet50_expand_loss${i} mkdir ${BASE_PATH}/resnet50_expand_loss${i} - cp -r resnet50_expand_loss.py ${BASE_PATH}/resnet50_expand_loss${i}/ + cp -r ${BASE_PATH}/resnet50_expand_loss.py ${BASE_PATH}/resnet50_expand_loss${i}/ cd ${BASE_PATH}/resnet50_expand_loss${i} export RANK_ID=${i} export DEVICE_ID=${i} diff --git a/tests/st/auto_parallel/run_onehot_model_parallel.sh b/tests/st/auto_parallel/run_onehot_model_parallel.sh index 93229aab4e..8e931c2cdd 100644 --- a/tests/st/auto_parallel/run_onehot_model_parallel.sh +++ b/tests/st/auto_parallel/run_onehot_model_parallel.sh @@ -27,7 +27,7 @@ process_pid=() for((i=0; i<$DEVICE_NUM; i++)); do rm -rf ${BASE_PATH}/onehot_model_parallel${i} mkdir ${BASE_PATH}/onehot_model_parallel${i} - cp -r onehot_model_parallel.py ${BASE_PATH}/onehot_model_parallel${i}/ + cp -r ${BASE_PATH}/onehot_model_parallel.py ${BASE_PATH}/onehot_model_parallel${i}/ cd ${BASE_PATH}/onehot_model_parallel${i} export RANK_ID=${i} export DEVICE_ID=${i} diff --git a/tests/st/auto_parallel/soft_entropy_loss_expand_parallel.py b/tests/st/auto_parallel/soft_entropy_loss_expand_parallel.py index 081c6bd647..17dbe8f304 100644 --- a/tests/st/auto_parallel/soft_entropy_loss_expand_parallel.py +++ b/tests/st/auto_parallel/soft_entropy_loss_expand_parallel.py @@ -118,6 +118,9 @@ class Dataset(): def get_dataset_size(self): return self.length + def get_repeat_count(self): + return self.length + class ModelCallback(Callback): def __init__(self): super(ModelCallback, self).__init__() @@ -177,7 +180,6 @@ class LossFactory(): dataGen = DataGenerator() self.input_full, self.input_part = dataGen.input_data((batch_size, embed)) self.label_full, self.label_part = dataGen.label_data((batch_size,),embed) - self.expect_out = np.array([0.9205861 , 0.9205861 , 0.9205861 , 0.9201946 , 0.91951686, 0.919343]) def single_matmul_trains(self): single_callback = ModelCallback() @@ -187,7 +189,8 @@ class LossFactory(): epoch_size = 6 dataset = Dataset(self.input_full, self.label_full) model.train(epoch_size, dataset, callbacks=single_callback, dataset_sink_mode=False) - print("---loss---",single_callback.loss_list) + loss_value = np.array(single_callback.loss_list) + return loss_value def data_parallel_matmul_trains(self): parallel_callback = ModelCallback() @@ -199,7 +202,7 @@ class LossFactory(): dataset = Dataset(self.input_part, self.label_part) model.train(epoch_size, dataset, callbacks=parallel_callback, dataset_sink_mode=False) loss_value = np.array(parallel_callback.loss_list) - assert allclose(loss_value, self.expect_out, 0.00001, 0.00001) + return loss_value def model_parallel_matmul_trains(self): parallel_callback = ModelCallback() @@ -224,7 +227,7 @@ class LossFactory(): dataset = Dataset(self.input_part, self.label_part) model.train(epoch_size, dataset, callbacks=parallel_callback, dataset_sink_mode=False) loss_value = np.array(parallel_callback.loss_list) - assert allclose(loss_value, self.expect_out, 0.00001, 0.00001) + return loss_value def mix_parallel_matmul_trains(self): parallel_callback = ModelCallback() @@ -249,28 +252,13 @@ class LossFactory(): dataset = Dataset(self.input_part, self.label_part) model.train(epoch_size, dataset, callbacks=parallel_callback, dataset_sink_mode=False) loss_value = np.array(parallel_callback.loss_list) - assert allclose(loss_value, self.expect_out, 0.00001, 0.00001) - -@pytest.mark.level0 -@pytest.mark.platform_x86_ascend_training -@pytest.mark.env_single -def test_matmul_loss_data_parallel_trains(): - loss_factory = LossFactory() - context.reset_auto_parallel_context() - loss_factory.data_parallel_matmul_trains() - -@pytest.mark.level0 -@pytest.mark.platform_x86_ascend_training -@pytest.mark.env_single -def test_matmul_loss_model_parallel_trains(): - loss_factory = LossFactory() - context.reset_auto_parallel_context() - loss_factory.model_parallel_matmul_trains() + return loss_value -@pytest.mark.level0 -@pytest.mark.platform_x86_ascend_training -@pytest.mark.env_single -def test_matmul_loss_mix_parallel_trains(): +def test_all_trains(): loss_factory = LossFactory() context.reset_auto_parallel_context() - loss_factory.mix_parallel_matmul_trains() + single_loss = loss_factory.single_matmul_trains() + model_parallel_loss = loss_factory.model_parallel_matmul_trains() + mix_parallel_loss = loss_factory.mix_parallel_matmul_trains() + assert allclose(single_loss, model_parallel_loss) + assert allclose(single_loss, mix_parallel_loss) diff --git a/tests/st/auto_parallel/test_expand_loss.py b/tests/st/auto_parallel/test_expand_loss.py index c3ab64395a..786cbff980 100644 --- a/tests/st/auto_parallel/test_expand_loss.py +++ b/tests/st/auto_parallel/test_expand_loss.py @@ -18,7 +18,9 @@ import pytest @pytest.mark.level0 @pytest.mark.platform_x86_ascend_training +@pytest.mark.platform_arm_ascend_training @pytest.mark.env_single def test_expand_loss(): - ret = os.system("sh run_auto_parallel_loss_expand.sh") + sh_path = os.path.split(os.path.realpath(__file__))[0] + ret = os.system(f"sh {sh_path}/run_auto_parallel_loss_expand.sh") assert(ret==0) diff --git a/tests/st/auto_parallel/test_model_parallel_onehot.py b/tests/st/auto_parallel/test_model_parallel_onehot.py index aeebf1b78b..1df7ad1e99 100644 --- a/tests/st/auto_parallel/test_model_parallel_onehot.py +++ b/tests/st/auto_parallel/test_model_parallel_onehot.py @@ -16,9 +16,6 @@ import os import pytest -@pytest.mark.level0 -@pytest.mark.platform_x86_ascend_training -@pytest.mark.env_single def test_expand_loss(): ret = os.system("sh run_onehot_model_parallel.sh") assert(ret==0) From cb29f60bf18ad3ce9c714749c534baa16e64cbcf Mon Sep 17 00:00:00 2001 From: Ting Wang Date: Tue, 31 Mar 2020 12:01:58 +0800 Subject: [PATCH 022/367] fix typo Signed-off-by: Ting Wang --- docs/Automatic-parallel.png | Bin 124010 -> 36199 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/docs/Automatic-parallel.png b/docs/Automatic-parallel.png index abf572d568d35bbb61690b30d5c1d1c66be76f31..f52d8b757d11a07ce99aebf5458d10059ea6dd3b 100644 GIT binary patch literal 36199 zcmcG$2UJtf6AcQ1y;(zYU{nolOcV^A~SSw^Dr=Rn_&$FMs_mdc7Lv02+PPz*hE->io z+&8^&fgA_Cl4z)aZ~AtgzXsmOd`z|PT__*oMgt!xT{R6fFI=dCUOIQY2z;jX(s}B0 z;R53i(u?e)7$et(3-RZ=_cb2}*lpvP2Cia5Cvi*`9vhjpIv!hn&HwPxHNU8@^~e%= zVAqrz-Bf8Jeissb&CBg_{e!4Fiy?_iQ~%sC)L(h~%P+_}Oz#Hul+0I_rAd%Dg?H6O zo2+E_vzLG7=RDr>e*Z`Or0$pe!TjbX>Jct@{%~Mz5f_qicyf0waA+W~J%8lxA!KME zD`WUX8J_VJa~6Lu`G&6P=DGh^(GA_?{8X9AG~nH;&%qw}-c2+jR5$rXaREK%CTZdA z|MP{#E)~OH+KkFv`cutOq&@$V)1UOHRF7~V&bJ1eX98czJx!IeS9@IP$q5BMjha&7 zNT;I5v84yROSn`@0f(;r(AoB1vrr;9^x<)+n}l`U*9QMxyD=eMx8#-nGz(OIr#Pk& z73z0e{wCEkB8Si`g-N)~uTBE*@T*A${C2zTSKQLIQpuG52+QVxE2EXEPN@CIg_;uG zq>T>biog3$;(O(CE}l-K&NE%4a}-c7G#u0|42N(_)o2P^&Ec~CX0S_#S3 z{&RVi2)xW0q{3Fr%*T$9#9Q1nt8J%_C$#Sd+hehV#BGRJ^I^8oFb=wy*N9~zI*z%v z`Le@8=2&~`;PL}R||^z{RZ#lhmBy- z_hY&z+ch0^{=4&m_!sF1czuDSTSG{MXdi=Q213S8& zeb=XOvf9fqA>XL`FTGIn|&b5 z{K`kxvmK%BNvO>TG)~t!Ja2FFw14VgnSF87$fnU$(#Y;5oQVr1dO^SW%XM9*_^i{r z@4bIFg7Ie*)TZHzjIVNj6tc|Zq&V72qo7Ck`%ernK9b-e^uL;zsrR(7eNK^FW1PxM zjR2$D-d|fskW=D6U*SKKo%6NB)=+WqV9(8DLXP@RT5Ifi2Kcv)ST_IJAllzGeo*n;L6zAl$5=f?pOJ&4LNO&m$@v!ovgoF)t0!4#a6oxKSA;Q2xP3vE+6`F z@%-BO*QaK66H~P>ExfmvL@RzlW)`GhW?fU9ra4#O@h zKPms(HrMNgGBkXJjAfxGn;_tErIOXcV8%pl-wnomVWS*zrdKdKRz@Zs70X%%Pn3RV zYq>^o_XCH{H__Aj)3iFDjUSe+rb<}%f%*6h#AuxC7Xn@r&ar{Z?z0Wv^B9*VZmsp< zs&^f$tAm+7U2C;@5%fm6ABW&B0}*dh<~UKIU5Hr!(Cu~2ud*16A@i?-zGeXjsov@->5jF9Kcn(ql-kp(%3*bMIdenXe2&nht=cPh&IQKyFgIS2`pC0 z-(&7ziH4GgsS)~OudWoaMWsv?8f2E#QLj*&GCIYx)bhg&SsCw6VM@9Yf1Gd+7+zC$ zT;5~$y7G#OoHa`LXRL-;8DF%OA%clr;l6{GJBW`tt~8uGcpb7eS@lgd{EwU^=d*xB zb5OZ_uV|tL`UtU-hX@a+_%fRNT5z?y2E=4Sd&M%fm~NGJmjBqjXvGXh^LenVRl*i z#obmkZuPV9R4PrM(c6c-!&CqxVh|tB){EGc5@I2kBPnm)Tg#i;y96|bE~6}kCj>K7x|dutHkY(tx+ z%BPKKcIvi8DMt-tAA}{ktUqbqeZ!m6Nf5}Tf^#`EdZ)dGFQ~MVUGoFU<2OE(MCk0t z1XIg#a(`GtUl`R106mSi`syM4RVlE{sy@U@BC#pl8dk_+FskHV>ODl^=VpLBM|;QR zio2gl=)bvCdBDoQD*NRqheo=+slzn;!EeJLOK$CkpcMN~TJMVmp}~{Wy04w!GzHqu(y`91^t2c>$V0* zt&H103|cpss5|YT*w0iqCGWoW68M>-aYl{n;ghd7DShaitIn~cem#u!;+dg^yTF1` zM0Uf-NB7JpRMM(+RB?U-d=kPtyzHY<8Hd!p^ z3hy&U9p~_9eI_fm{5o-@mZ0eAcgyQFI1j?gdPe(gDis?_F6wrY@w|_WcjdCv+yVb-#UiBvi?vG#_Y(Ku{R+1vyy=Y?UQUN|RC3(^t45LF1K- z@n)a*tyQBGs`a@_sl&{?p7iOtqk!I;#z&WtgAw0F_(m!uBO~@R?Ba(*ETQ8I7%6gp zw{D-W*5qDwf}3*VvTKs%-+ga+-B$T%m*hI!%5Kw%j&lE1r#GD0*l>W{E_q^e^CxnU z4d-S#Bc6G8U3}ie^DwFm@Z^GJKJ4h&fm=`+5*6&Jp&3!6c@$D*g=6StIe|Jng zw>D#<`!`Es%ww|?UPBMcf3OKLU$qR=P0Wnmek=<5%_BV8lsNdQ}x#H_IJmH@cHCg11Yin7m%n zw@2NaQe<2fvCDFlxWaw$D|SKM22Jh^M>lm%l`pR|1@HFWONy;|eCToMCB`+r`my%B zN`r3QcH`lAdF?xSJI+*GBJ6vpdF6BaOKegz2SQH)z$S6$e*+m4I($wVi&Kq`djWXa z6YYB6$LVu%aNL*2!sM`)Un6B6?Msd8b3Jp-{+?3_!%9`CHyah*$lW$+!>ab*M>wkI z28&s11eeLnxd z=0qgmSG;4daWlBB4Q0Djrdw4%H{kdU2f)O}?Kq9ZolFJK@}?Jv(d|ot1!g?&i$3y! zI-$#~bBKK;LBVrAYhp-7E-`N)0$pe!eORgqQiq*qrDHblc-UFC1|4tMsHbXX1K{|h z`%K+OI6CYLva^b}H}SgIB^Em>xQcgpY^idV@q74(ds*}xQfDi|*D%oaUf;Xk4;Pb$ zeTlj!VlZF0bAzv%Ly)+&5>t-~mD6b=WJeJFi3{Furg!b!`>p-revA^Tx0x z_ku1~k(vfXXxQ`Ro&+C@%jh=e-o!@yjUg=)w5H2ru!mEoHsn5pE*7}n&491DycfgM zr7Yr>0)7Zv#5grQZjs*Ys1j`@@4otv;qndu`^$L^b5!n5KwWfjAhn3l;@3b9ye*Pf zcL~qU9XwS7G7Drz{BdmlYS-E{TQ{2Q{W+S2jUmjWtyMYOMt~U#WWh5R>A2#FTa9L|_%*5Kc{F5n28RjG0Skv(Oynj<6-8sV7V>D;JSMFPwDrN7L)WM9v%K#v`*Y&O!8PhQg`*;o{6KYw7k`KSl2s z3QsDM^&DWTcMJP-R~vlR43Ep|zsR^i1onnc*4|TDQU=z%JXpcqqg)>O(|f~>kjIN^ zE>`w`utK)>%fo`Q_`YsIq#?{^8tQ0P`lPI+?fBu!yA6)5mnD}qy!Vz6!kxyPUXJf0 zoU=*L5H(7f)uEK_yYb^!<)!fVj^GyBoxj#e_M{SiANzJ&&7 zmEkS+K^Oa`&qkyt)1G@R@b9hhrE>mRuzaVN!TnYqdoY@Ibl|P7Slkl;GIYysk?tti zGO@pCwcb|xOH102U(RijT2t*!kyuL7f#xmt-cC)rD^{G?Hs*_UE#GgAW}{w%N_Z`( zmg$&8W6?5kfrTez-siEn9QH(fd})kB`{rZs?}2IGAH`=bZ@4K*c0FjypmMI1GG7wP zSNwA9!8>Te6(W2fJdU^=vvobm+Sk~H3i zgieQ??Pt?LEhRC{MPk2!-*WOS+dOm6@_%#w=bRSTY6U+4bcTV0D^7w zwG4m2*fX9iPcf7E{pqWAyV}EIIsCg6ub@2Z#FQJt4eUAi{F$5kj`Y>LZr{9Sqs4B6 zQa0u2aZh32$uQOHJ!)5Qw2A~753BbMlTDdHwGC%;*POTt4VqM^QbFw@Gq3$}%k9Xt7N?@hAbkDEQqgmMQFwApmfG+dwWpftI?p(7)Ey%JzO-hRR z)~&7J)Tn`26Y)3n`>|P67bgi{cm3Tbig0VVCAUEr*FBog?gdoDuzxWJU#H-XgM=5B z&Eq`d0ib_nAoI?@3w0A*1_|^pG1^_6ahS>;y)LVgP;JgAT*+??EHnboL50g()ck5o zN?bffH$g`LJnZ;OOQgJ-bUl+;2qO@5oP*X#m5 zxn;g;d;F4&9;59%nQfeW10*UZN_A9V1ytq9tN&HB$bU+xAd(wGu&=9LO?_L!uE8ss zjxg(aP&S}yCY^07Fi?_%g3i!I{T(WtmZF@XlB{ve_Drc{p5OWDKB<)J{*@wJD#`R2a@IVQt$d=< zA*E~ZoV2}&TQ~H&@)K~>%ZYMxl#rSDNoNFg{{=!%0+&m$RGOqM=ZgScm@mNCp=l9+p0>iGb&VA;nOU<4>R}IE|uwWW5x3$h9B|;=z*?;FT>PGPt ziBh`$AeMp5r|2zwhjO*Z2%e+&j=y1;0*r9G<39ZugKCFB#{Al}-Z2guET0{3x=1Kr zjL@u9iSKRsOCxlIQLLQUd7#>C!R3EbaDJieq-O>|>PS5mEY7BB(d;lXhARghy~x1j ztj?#XX;tR1F|)T@fC*F8uBDz20^t{stzL`w#tPI;;&>0ID;>44Q=7%=;+u)rmHWiV z2dtB)Nh5C5Y_ulgt$OJB3ifcVA9keE`e~}c+j&aL;N8?W7ro=M>V`+m49M%FF)YG; zZ&mN#GMY`lAQ z1pk6s$m!nf>F5?y_;fJcO>ujx7ttSbj~-DR6zv~19q{QI0HI5lQ-|_L63Wm+trmCb zK@($Slp*#UpNq*M*W&Cz{xFZx^c?+2x(R7F{O-F46MCk9hT6A-L$MrKEZS={m+A0E zcFpgO7HZvP#~k`$hQ`L9MB#4jP${Qr?q{7+}j|B|QtcU0unlbyNJX_!0qU}a^^^gO4&WksU9 zz8$|^VgO9|Z=q}cVAuUVv5Lp;DGBVJ>6LR15P-pXLEp^s2!OVcLfvn>dY%^U>i{v~ zznq}_cf9Au3^0h}0LXcDSe+O$=JL8KOFNOfc$(+cZloiJb~0=L7?qssq}j;FIY=V> zjz>(Rw?{zA_xVLV*Lj>c4McQw-2kwx>&715S^IK=4VbgH>>&8#Z+8O^o>c+S&ng9Y{&|rT1g6mS&^Z+ZSzb!gs9P_hmZy)cX5~wc0E$;0;o?1R>1?h85pFS z|2}_CDPiK0sDzDs^z6xt1MfqZ03eu^s#QBF$sIvg(DHWbPOy=q!{*oVS_uRajRvrE zS9_tn6=z39aCcGNE`Wm0kK)f&@!zqTSV(;v003YgQ9;T)fL=Y_kb>{{L803Dgu5tv*$l)y<0t#wFg_bW$C*j3k_DJN2BxL`%RAcDsGts0M9dh ze4;g|0y2L&{xw6-bHo2`IRG;gg>{#mDl43T5hDn?A;C!gh}56+n=efRkKRqyd)i}lHF(HGYR4Laj@__#MG%h#6sb|P@xQ#8T!(YY+6E*8B zJn2S4Y-ii*EKrDPCggW(8{8!mto=oqQGE1EYUEER;ogH!g6>qBO_#_NB2(GnC`5nd z@`}@J4ac8{YZ25;sc_JzuzPG99xVp-zrNH*-WUV&XFgl>^s}#|K+P?8-GzJOHNH5^ zzBR}Xn_a_&h(kPvgXxv+qD!wutgaQYr}-J)dTeEx>o~OpzNl;U2*@;}()pc#G;cwd z<6-WIdo*``%^1DQyQ6|IWV;j=sx;hCtjK`Sb)^+|ZbvJ2CBFMPEL2LC}ItbIJcYJo04E%r4hyK%WjtCbAq#DK7e{{w$4#7y3v-P}gAcvOLbwZfB zZYSO_AO~WSl>jL~apd2@J#G)n8TZ3}7K_*_R4CMH7Px*HN(bWbAFsc~FT`#l14WW zJxc1ZhS^Hy05mIbGn>g$^!_~A66nLRS6zmyf)EK|QE75>z}N!C00nxPA`qZ55|cjT zPxqI*+d^?Al7}nB*F|3s=`wu6C|nmRV@Px~l?hR@$>(~IvRduWg1#Bp7FL^FJBu12-mpm z=FP&+Zwx7M&asB{`X*y@{_m~y>d5#c#(Q#q#)E1ihF~dk|Lj#bBWZbw%>0S zU@m-^wXbW7F5E%OQLchE=upU&60xn(x!vEx0ogxx7ZeJR3kq{Bfz?|R(1zQ|<&VHl_m7lzqBTDZhAzf zKepBA|8}20tcGicI$Gr%%%E&%S?gwweQJT5m+El|&A<5Ng=L*ewq7#ZchXtYbn=Ma zZGAmW%O+S5xlkpUP?Yww$jGxUVS42;Y?ueT6cODLP;Mjbp?;A%D()WB#=$5pfuakO znJBvz5Pg1r{&1}URL^VTV_(m+)D{c&c$=CJ21*tLWsF0ewPCP|tE#C6&F6Qz%lz=1 zeVRSszW64whRJk#9j*PkGvIGLx+;5}P8Pp#l0?CKSWuS<+x;@NRzlSYInq`q1@*M0 z5-k{5OUueoy;zr(=2RkkTx)ij8KeftTmS^ksjxs5u~Ban{QSiEuAw>jJxX>!(Jwu* zIVE3-vXQciRj6ssH$LE>bw$L78=|$WquaKNy702ZheDN$fe}p7*CtR&Pn>BNA zi@hNM@Km~pNw1i!qw6^;&Sh=DH$E8G^lHx`K$wQ9t^QT3-Nb>l{+9ssKaF+&Zc8gb zlHb6MTGhRJ>b*PPT2eLzh~#!)#)IG}j-nxFsOpp1*5LYo#cCkki`fM%MwYmH({uUD z%u72qcM|9$@FSF@9JAq36%mRI0IJvjE8+g%PBx@?oZha4l!*+!Ievcw)Z=ZM6AJ4j z9887E6$8Ep-2leo|MA}cd+{AOMEIW<(c?VVQfg9!g}mb78$R7y+v3_9 zW|x~JszdM-aTP=)#Hw~MD4;O3TZ-T`R_6=&5P07Rf1nyU`ri0&TXEdk;l!Lfat^;G z1U!GhvaasE@@PLomP-`o2^5I4*aG{KDBz|fr{>7xWX~kVLbp8$g9nH1M67xmJmp0? zVyW`xK$d)}75zByL?3p;LI*#M6K3p*K~)%8cyZIwE)hFD0ThL(Q<58?)ueLoQ5dj4 zVCJ+N{gts>(4NHYrFO{7Oo=3LB*Zh>$z2Nh!_kd)^!42@O{AJ3Wf+)df72#j=C9zM zQDk(u6vg0Dzd5WTJ0anLVNe;8Jsa`5xk%Ff1LqpP^8q*dxW8zBV13Q-RrvO92JN`T zFy%^wfR6CO+Uor78A;ZEqwTmrk_E%ybLs=v=ey>w<{XL&TG6WkSz7KE@ZYE5?uk@X zz!fOl4p20#z^lpwk4CghKlQBrOcL}tYy>EWyY{KYyy%8(lGM+1F4?rQ(ZmqDvg-s;=(?}zA+geqT#FV+hk$%O*T13t5$z9}0=uzH^F#4_)C#lrX zs{tW5^JUSBn%h8}pZ9D6Bx_^rzPylOa zdQ0iM0_oY@+I6DbHq$@;5%8h(ObB#^ed{Df+XGC}{+~9(*JdK_y&dkh4*~LwI6z%Y zOiphA#mEQY%~6>A%RY~zlp9-f&Gqz{;T%TSJ^z)iiQP3o7@c-zwcx9NHDy|> z3!|jMQJlV%CBrk#H(P}CmY5%UWEBLyz@H^zk6BsSD^MY!d#Ur9hsR^eH=|N+3U{wx zOUpnmf~Xp;-Wx)n|JIur9bHijGxMQJjU*r3U+!I#G-EnbMp%WqZ98Qf>y&V*b+>$m z zT%H5ynYh0h^kn65;F2)5YMoRkz;SFrUq2Rp>?AOchwjI@?B(2HrK~ws#F~^oa95-T zBkXk|V)SMKwL`SIKoRtiNTPmJ98FSq=5($JG#Fl@&GtIDOOiVp1~OBgsV^@9evOru zr9bA^ZukhSDI8dTzVKEPOpOOo_cw&%FpVrYG_q)|$5FK~j0{y=5Jv!Y{zFcSKm~1# z7B_D5gX9Fh*pg_5r^5JQZ*g7d#dGK~WRRHS{20c@A$nIoVwqx{m$+}JNH~Bq>1TpS z`4^BxR)*Jp?Te8Unw&?%D5(3uj-Pt1KroK2QW?ccZDVc*;mU@ugS1g88bOE|KpQyT zRtsNc6@Gt||IlHU$O(>#gKCC2nO!swi34p$}-) zySL;O$Mbt{kVAS_6#D|W^$^aN!yxS4RZn2gsc>%bHBUrfauE6O5JURQ(%NNebKf_{ zN=rSfom3=Uio!5u*7beEhJbg(3|vU2#2{}9VhB-ZR|B)Ovw#?avm*(KieZKQJj=6f zq0KGvJdy924{0bd-ru0#HS4FfbtC#;_BA&K9zNf$iXh(ZB`0)I&YJ`xHWiVVG=o?L zelDU&?2BsmzJ$76x95^#-_@@3$#1V$`Zi`>Kx+|lw!wv^h&TjznhYH!nErh>ZH ztHli`%XQ#^ya66a!ugHf82*qbQ8#7oHhOJCbh2XwM;J5Grvlj3qwM zd^;OVh(kj@dY?*}(O&sT4_e@kjCAyhUsCM3ZLDehPWuK@z18*Y@^kKG9sDoo19ZgI zzw$;9w?buG!s?BCxAgv0AiQKbgeyF1=fhZ}g+H;UMe!-OrsZ-) zITHXPe$=->q3fZC3V!40P+1_LNEhCs879&bQ^X-7E}~-M%!_|2nUAfJ5kGW-CHF*F z1rVI~A^D398Yic`;TT1`-+StbvCL6|tum@l_IRNLeK_$pev|IRPf^X=MmLTBfqyxV zS4d3Y>)Ci`o4Km!<)?0cr=3LRT@m`!+}V5Fayul#Uw#JglJyU3f`_G-M;(=R47Ym< z-WYCVH6U4(}QTy3*q;Wq%LJ^dX8<4L+zMyKR$iamdW8maseFdq;00n-;Rb zChr|`Y#u~=*S(s$tsup%4sNk|c$Q>$A9rKc2*3iVIh}2zT|lMQuL0}09`bO3mj)mt zvVi((<2H_J8sO(xiwv`G&75q{C{(D|dCXabHhTi{>{$I2S40M{#pUk+b=M~(&5(LI z|Jn!`K#<>T0fa}oTBFe#?QNx6O!NmBpb)0PO=zN{tS}8%H24@jHWCP|%m-o@#h!fq z(ss4GJMP*`BzyU*83k`-SWV3IIn4nRx1#h! zMLCK71w96CgXB`-RFew6h6)UjSBK<8B}yzn{Yyrizeg=(S_6F0upStkuKimFYAK&`6=ujZmTh9;r zyGa15#GtWByB9I4Y7)B>p>nlkq7r>%nb;J_==RB|?DK-Hzfl70UFkQpn(8`Xros@J zuY5RA!B_-+Hgwz^zdy6|%d7RM*&gbQf1v_gs+w^ZPatL_-{>bT<#~40Cye^GHa}ls zCZ?i!@y>OrDC$GKOWa>J3+HK$Q1~7HYpBy6g%ZhO(auo9 z2icvf&-V4fRS#nx{{}SKOV>?76j zNOKE@SWbxP*gcrj`y!ouQF4<~BMGC@tTbL=Ir?ujmp=C|^;kEp2@k~5^og*!E(gD{XG zYYv~frx65Y?@t<1Fu=tSjheoS8!3tRf3WY?fxK)Q2r1gr)Qm>IH~^@0m{mpFncMz= z>@chPv6;Gg$X*2R*Csdi%scKIr+-AqKL%X6%Fua_Qt-0=h`NEg;CjsBCVAEQ;0PrT zzR>3V#>2PTWH(bYR&>{X$pKJR(V}A^g`9Q{J)GyCQ3OkHH+(;5um!=YeOK#OyP_!< zCPfx-w3hRg=I17+aW;y9s}C>s9?`Wo3Glfb3)FOE`{WN-hhJA3^|a9LDPLR~QH^?_ zWPd+cN&npSDVTsfR|BUB!AUpwX!>QvM`69!cIxMgOcu7a>}QP?|11=8h3EeC5n>nI z147vSDK!ytK{_T-nVY_B`I9}}U`p^Gb#1;vOkC)V)}U@eubRfEYQ~dieso)#_YFf? zd34a9m>KBEDNmybGAf6zD1Dy;EJQ}7Kl^j`4w_hjgve|U5oO@1Fo3;4jPn165OFB` z7VMy>q#C9QYHlkpNhvTItNlnr{V&&C+N&&L8d$c@_ZQ@tT66U4Xbpx^zNFN_-d+LR z3+NA~DZr_Ow$mZ}KXI!BREjhIAfyxy9>q;V4)!~1mZ)ZH_%t9XW@^k)EX_x!2 zFHtR=yFI1nJ$OTJ>NWBVnE!HqIiZ-TgOvU~Kax@0OLqH9qD0q^XM3i8v6UnQaL?3>(dp zGmvygOoI)ifSTQGrEx7sjL;9kN_7{h9TzV#cm`;V9k(^HJg<0IUHhK$)*N#~WgE>I zh%K2-qa!7@DCMn!)moFPkhI&&=CL1FUaTO0rfCjp-LkiBx-`iVT{A;H>7)#>%;cM;94FLUu+wwH~r{xYMb(p(e>_gU5C)cRwn?T|WD zJu_G%PTe>$`lImF)>~HaB1hrn!#^o5#?>Z=gLT3pMidTpQaB1cd!YxfBhiVMFO+&q z%9}+jol4W!cCngewd-4DMFE-Wiz2=bIV2}ehxgHmfBC~vTXToAa|`5(etT&)Q0W^) z8ig#etPJ|ia=YcKtAx{qN7pRI2xjN>X?k^<-9u!Bl4l&n(rB{csRQGAPsr}WC_68M z=+O+#HARO5W_2|4%k^)=`T8$+A@kegK(Yzx!f^2j7}R}Otq;sPle68AG4wPYJh_aK z>WS;foIXDycFG|LShWdVl}=%X^~>9!HnsJ6w0aMO{~FGh{t`I20{6_+#?OJr&q7`< zFm-T?-VDM3%fz5aJMcm1${i&6VLsPN+xHqaWkAmS z%QXzP2{DGLDOVc=!_w@6il>|6cKhuKuQ}FZh&B81q~}8KQ~_B*_R!ad4`q-pm6gR=d;6joBc%pmb{Hyy2u31 z^Ye|;=|KQNiM8%0c#5lGe;Ki9LwRfwasmJ#NCtw&c+kNw;W%i>AHi-Ez8Ny%`dKfz zkhI_lu8J2|wR8=Fk5BE}{mQS))O_p}WCxaBK!#bRIk8&xSf*RVXcgwC|MN z;3x&t{eCI-;2LED9dZbwJO_wP7PgR5}0Fcy6=gUbx%=?yNzghAzdf-|5qc>DU=Bvo_C!iOgvu zo&$clG3Gh%t;T?TCqVPMQV#CDPKA>)noA;$wgjfob*=6h6ajMpVy$S!SdIBLJ>ZY- z5bRbGSYN3MSuQcLccav58Ts&5qMJ)MP`NxTIsXn|HM*5`iqxwHAt}X+(q0AQ_TH)6 zoPTUTu;|k$ms}XG4^=lXyZnP?iw6ix8@G8ir7n`?yr6W$7H8|5fnN-p#La>5f0ldI zGNCLLi$*Vx0}CKc&e&134I&`>c@iI~` zO>-N#kuHe-YPr!p@8A7KTPaa4#|+V}t&0qvB5M0FMy!96*;e)N6Wg`rtE&-r4157l zcaW0XmO=tJGbgl`FQ7zf3rfU=)CYXSDfAqrD?~z1Z3mQXdFU?7Fm6UWq9K^A zsc)In!YJs}d`-l*VP^(jpgoptk{9p`*e3S387kufm0AI%)?90t4UC06fV3T=1aj9P z_mtOri6V4E=Wr+PC?g7RKGCxlP%}p<V+A5J$c4;Br1!cuACflm!dVe^NSuYt)ar0} zCPh1pF9ww^DR^jD5%hiyy_;bRkEf`G!R4$^ve2e! z{!`vUBkf;R`gpeFV#fp*>i*$V7*Cck0-yGQ0~6+b^<3a5boA4gnJmf^fwMT3GInracdMuMKzxwHXk$Ff?~KKOii)Yx zr+y-o)zG5;PQ?M`LC2k&JHpXK`-OU+o8n;QNbe}UilO~rM7S}5E3eXlSzh&&sQ~vZ zi>oJz&CKFLLeUeyE1-u}p5fk0#8M0(R-C;bw-X?zV}$vMsLmK{eEBQt84VncI&08ev^}+`%c#fP>eXOtPkt1&6iT`f@iyvlmRtb84xdBxg&T0D&4k>@6`gjYa1#%tH_^WR zs6guKZpfN)Y264{H}P39LC_CZkUC_#O}&h$L*?y*5O)iP{*A>b;GyyQlJM)L`p>Q+ z<<2)1+BpSF8yYbCj6&*o@<=K=Nm0fZAXwN&g6!|K8=xwn;NS>fs);F@zAq6HtydjU zxF;>%saSg;!&FW$eLW}SHr*&e7JB6W!<+(Wd{ot6&d}tmnN0KQaY@(H>asKJOAt^c zdj&1rO3fe77gZ@y=yRVwB5sNhgbmK1FG8m4cpL0X?G4n60HKE=m;hMrIZahSog{L_ z;8sK^h-P#>C92Dyk|Z(!y_=lmMuLl`u-Y@A=uQSs&a_MAe6PzTr+Cm;0jf?2sMQ7> zoV5n+1~VAE_aW6B$Cjc&+oF{GfB|pUyht)vcfvb|;0Eiuevx_*l30;<2G>bu^AH^p z?dq}Z%2EG1^>0n0Wt~WGkxvuy`$~tPADLlKseuEYv2T_71#k&LE22_AY?mJoKDsPWSofXX;1pb!z zu_yDYrU-%R+>J#E=_bRv9qd&vQ<8-75lte=+=pV;Ii3R4`;7V5Kyo`!0IWWBA-Yds zXDL+Yl)tU(US(`TNXqkYsEum0oB+a=UH$qyupH${7zSWGhPa~maDe3NqTK)Epy&k@ z%dHmD@TwiqHfH7C4+-`HN7sk@T?ju*S76vg&o=tFr~?hI;#9Z|YqM~0IOfa8#Hbau zH(QqSKfVHH0drLB5}@681K4P-^+w#=G`=)02#BT}3HpPiKw1KA1Y>uU^_95smTfjl z^hk$5XQj=F3Mq9#c4{*45sGA6Yy?!U0~S_k5e?t(r~KeUKsD$yq7x@|!nHdCK&!T2 z9PqlrJHdKDWGLMg8a>YYWf1h6SAHI3b}hX~^KQGSyLJr_TbltIp^t*hQ%yz5K+qsA z26nFbKzhES#_PQ`ARPl7JXkoA^RJ7C9%S_%(6SgK@ToyFGxOvAEE{JdzR6U0py|Z5 zr2Nr?_wd4QOFcZ*y_;xsr6u&(L8zd)%e!SrJ5=sh;e=&^RphLTgS2zEoO#o{ZJX{ zdXz%-!yI~KQjlEOTiOO*AoB^r?U-Ykm=GgXC8_eX7SOs@ zR>XA0MD>bI*A)FilHD+%BKIT55HTo%^?tV$xQhM{uG6cXh-+6rd2(!FA6mO12tu4@ z;W;P;)m$O3bE>8MWVeYHpvN@7{dZ=c{v}?J{xq}HS1m3QPNBqx(`>+(we;y7Psy3i z#*a19Evq`~QD`$c|6@dWwvR^<@!h1dlIiMt#6DrH{mh~p;GkmL5B&*--arH{f#&N1 z1fVX0m7fo?ri^;}YQClMbh20NiJxsc6nvD|t$s>P%ng`mo;RNb0>s8GlH#hDEO*lm zj9t0SN9>?hGY&cJ;w^7Vq(ZCUFxz4zZh1%>@eHWAZh+{R-?roCb|o2se%rRI{@h>Y z*1D|+-nky@Sq07KDtB@DwrW}oE;2MB7HC>fhdC!78qc!nea=^jle7w(;0-hQEwiW_ zbM=!7>zrG|(vu347Cyl7^AvoU7l|gElTOq?gWCVrb^B64_28%AN`n2J?H4%#OEGT! zkCOAd{~!aNGsmCG<@^fbWSaS5Tdn%AQMCw9yp<4Pw4B5SHCi4~Y5Q;=$eMt$6z-+I z&!X-KnBbH1Ke2BaHISZE!1G=su07xv2|x~1IEKA-6#ga!7|`Sy1{~VS+Hk={?f>zS zpZ@fw?(&ZmKPvYd25J#_DwzeqNkRd=z+IOl2l(B&dft-MfBfx9Pd*C52+8Na?Sg(G zG)FS=Yrm9z5V;zV<9$N7aFOz>c z{sBd?%&Qg>AeqiTsqVGVVO&AG6B)<=^FK|3hUA~!u^U*+1}0z0wkB}`j@R|xW&&58 z4xmxuIUxtXN;Vy$Lfw5lfPgyTm>Cb6$kG}lH3qyqY3EY#U&}P%kJt1M0#}hGxH-VVtG-#)%OeIfp;^}gq{;OEX!@|`j=R1Ey_vA~ z=U0jg-~9#64qw0*STY`8(1%?Fy$jcA z+5jKH?Fsnz4ZdQHbEZ3<^EQckZ-;L474|}IO1z>>P5RzhY*thvSuLWo3s@P>dHOk! zOa6PfQ(G$)%CjnlSERqxwm|GkTB)FKaDvLD@P*G9FTkJdqAU*`{ptS=P~g8gbV$dS zF0$`2L37ilmueOtnFZCNNx!nNmouJaRyy1^1lIelUzsz0{U65i^<#x{cl=1%im9bo+Xu@j1#V zy`d@UuXKd|hBQ{(BMR!8HUk%=4v20x@56Lf_)piPYQn*YKmn1s=T)y0GrzxuIDWxD zhm`5H^Ct+Qh%XS+1QRDzVxTe2q8fYnkzujI`L3j0i=w}uJHU4U{D0{+^lj~waSRXu zF&v;uqcQ<9W%(!&gTS1?+@};Bv1s3%CM(n9USP)}>8lrw!Xe~nLlS45i2U_eq(5OW zEj2VY^fLTCZ2pfD<;nR;LsJh?n2uk}qFg*}&?-bXxo}t~A~Za|iSq|dxYFDgE}b>U z^9C7{F4+ZMJG(tfqdxxkhGrlUdgF8#cEpQ=&@{(9bwKUv;0ipN^D=V}qL5y`&4?^v zu^6%R*!f87TlD^Y(XXmkgb4sG)hAuVU5?Jze8_h_%prmmZYLrwo$XPZzX2s9?k%ZZ z33p|dPH|%^mXo6#V|_08F}?-t@iT!y-w)P@&{44RrSsq$78MNqOSRn9knw04$>RG- zaC>0_1e?&NA6LpEBf;sRXy_>dD52Z3-`2Lwv~&htDHZOz-K7N!SjKSnH)Smb6cX~) zUE62+%Y}3+*7l4xeK55FKwNx>Z8wZHA!y&wy;RS-_Q7$v+;N!SIXJ>2rABqKw@jT- zwj|Yt3E6Ka?%IpZ16tJ+TUUl={pAMv^|(+mZ!hvj!WR-&A!&XPyG zcfyCqhgS@A^MIwMq@_Gbm}G#oxNAYKV-^G6KWOz0fN7Sqp1<=~%KrSN z!qlBjUL~yCKC-)yw@lJ@kDNN3JwcM_EvodhqsNl^Nn{EDeqxCdufVmm6%d?A!UIq| znx}2W)Y-nlT6_ZE+6A^5x#fJmcKgzAeaP6MXR68-9!f)EmRDW9FUMzQ>AMFxo=_(N zq(Ce#cTks4_^%g2@};>CDEhe>(5Wog@_<@}S7Gjke;=(!Oq}RD!w`*}YaB(gj1MS~ z^;dnsGO@r@zx^|q!Uav<$DUzglji`I8-4Fz?BQDOn1HZ;iAixb;*&|$^|-R}k4fAB zSBkh%Fu1`L=+j)~K`X2?MoBM4PZMp{vmHNn?94JKqU9jcZ?3RIo|mN|cs1~%v>!^M zYI{(Z{~kIl06Q&(pDp=Hb=}bDLfmVb`6#t|jZ0yyf22P231rhvqMg$@Yr)c%_>U{hBX}O0i?p*Hb+Yp=v;%7$mmGk!M8*iEV1nTo5 zU*8@@M|t*lW3M*-SWD5#H}`mjKx1}K8*mZF>UQ|(sG295VNp0QBiSy$(qTW5z$pzU zrTVDqd5;9Pw|5`fP~Ng6h!ll7%laj$)Scv6pC0G=6w;Z0udJx z_RHMv!1N}+KsLncDA7EBMZ|tF>ik4I`axMO_M{V8apiWe0=pA8GjDaDU~hflxOUp3 zGGN#O3en_WA=chYIG?KW<1e<6wb;q2-CV_PX*^P(Gi%;1a*EVdHLr>r+2QUkks?A zBOSM8(VGGAr4F%o!QshPMsBBO^hpm2NyN=QSNFS^@|@XTmPlfj#10<|<89YRgjk@h z7;(EtrtXxPeliFz6OFb4`dtcsEg7H5v)K_Q7}k4MGUxpG%Akn7ObK$}YEj#0Gxo`F zk#Un$6Z4ZUL{3N0q#Z^HF0Xo`>t)T6+L9b);NIrz)uv#B9oX%E|BE%#CXMP_-)6Y4 zh0PQqtvyRJC@Vfpzx0O_y7Tk>#Yx%_ACJt*S=m#kGm_j|OQRqZV1Mlw_@!Xu-6T8P ziq9%`SkHG@m$(G3w2?%@&P}LtY!vRKwp2QoeZ+d6@ULT}C_65-X{<`WUK^QkkGq!5 zx{Qp}_5Sx0VU|xZ`yVHw^oKf+UNTK(F=8KQA^yd(|pX9{w?#ih-6;e1;>|vBD-XMNNs~yPPDS3+Djy1L{TSn=f37CZ)bkp^6qSOxP-%KDlsCri7xtu~G^*PGjG(^dPEJ~yQ*^^M0P{F~ETyFcnIHNkBikBp=ExzYT%`G=_cSN_p zm_T&dpG?}Io0HBjuU#2hGyYLz^Lz*A_7F!m6o4zUS#~$Bg7L(R7kLs-GEzEqfnNS& zrrQ64f!%-b;32)my~#@F44*IBPhEfv&@T505M%PP`R?`Q7gQ`E{pwC03#ORztIPK& z3Cp96NaHewzw2I)lb%GtW4wX9F$}bGj~#0Fjo2}JmeP!NU&j4?0~QRIu|kd|pWR?} zYu~Jf-0Hmwnq0jyjLaw|9W!JYp99%JL?<)^@g%v$n(QnF+Ptp^QW3wiwBmnPh^Q%_ zf{tq`rSfod%FLwtb%EV#rkZ7}f zgOpS^Rqn|@?@QzG*qUz4caSjw^xZ$NWqnUvOJ1*!c`6C6Cb&&TA%g~54y45TRK&xM%VE7z$Y z%KIK(AeG8GBDGc7+p8t`b!M5jYmUuFL?(rh{3NN|E@lSMns>7kht!6%QdKGd1$wM^ zFH}XQ7A2Nx$enaVar?o7GIjP%4-*Mc@Ze97AjrU!&#fY7p|7WP=52P zcUrYnC0k`*PAfh^!zv7I;&`*wV|oPp71Axt>`;(#%IdXAwr{J^jwDHjIc}@b7U-GF z+(@?W&(fljl(5kC1e|(0>uxvh@K=SFyh7%AV!ax40uSxM^=|>h-u&`YsSg-^;1oMRroUj4N5K_|utMsP@afU;oAWDBre= zM$(56v2Pd$s-j28*Y6!(LStM})rPl*BrmFON|mHk-fBQ7)+^%s?`u~W4oeSNYsc(= zCOJ!adEp%Ke8u^470^1!_Ti1sMm?gb+Da}#wSHib#Fpt$l!uJ!P@RpPtUpO z5LKhZYP#dqz;#A&nkzuAX6cyp;&cz!51Yi*IaU|*15Y@d`adUVM5u6*p3ycRehmir#AXL!w@%N$0*mO_3F{l=lWd@W?3 z1^*cj{I=9;tlQPH+Mx7k4aI~FGl-j199681!CnejDm9-}5t<}BMHJy~61}6vy{)tq z%FR%ZiX#eftCQg=$i9)T+iI+%oA|)_ zw3?&c3VNM+ciN7~fNsaysOpvVenARp^k}@5?>t7_?W79zv8YYQ;P6CADf}xL=5+@C z=}$m(WH-z{V@`uSOt=QGlbQe!d{BZNpPA@^oypvrKe`Pi3Av9tDZ0hruvxKpEc+aWRg20gr^eQ9YwnfLbSuiV@tGjTia3!ZKxN=3eT@szwfoE% z#^)+Y%1M^Zo1tU61UCut&M@2#O*#fwhhXt|zgDT6OyV@6uw^guA-9 zE2>#jnR=Ww>Wqis@#{H-i$nx9}8^v;!25@SUIKcW@5efms#sg_co|Kmak=&_1{dtd( ziZZDKbX3RtboX05Ty-y$n$E(d#kQZNHpr1NXDEm#l)feX1HWErex@Ou-tNa#7An?< zWSPwlo4N@5)}})wY`2ScYdFb>W?H?;Hm+H3D!vr}=g5A)7xJ{OO$?qp18d>r!KmN= zRUVPk)hvbCOt(1Fu>JyYo0#Hm%Tf4I!CAIW*~!^3=>gjLsOy1L_pn9%5l!=6rAXXR zO&d=3gtJ_H2j@Hay4}wJ%f0}9fW{I((yOOlD-ew|dOYbVoz)afSacI|l+#>__dAXc zW5><_aF%lMFLd66J5~^^+X@zZ_;f(!Y#4R*RaM?64uySy7?&JDP;ROaJK|{OJ8K(1 z&16^aW;qxXa95b^QlbQy5hZg-(E6J$Me9%=*z&i8q;i#@AE1UjeAcYk`4DGqfI9_% z_pi5uJTY1Ep;PCRQv&v0gC+wZxK^h#wZQ|9eh-MZm+w#~w(K|mL7kGH<)?YcDA=$? zm&?sPg3fu18CbnOHtX=fcj)$FR_4A`U#4VC?phy~3v#3g0q&~=S|wk2$jTyEia=Mc z)k1UDK+~CJK^a=Jjyqz6m1vxm!hw}_b%3hNUUk7lc1-(! z{rW*_h;sDAczkuVL2fBYJy(EhBu6cQ$I7)y%jM7MpF>6`k0#dq+eutO#HXIo?3DaT z8B-1m5-Hk_^^2EXjfH&1gM8U*c?Iy4!~;f#>#_T@ob1OMiFUhJr?#)sh!L((cw&X; zL?Te|`lH#fg&N$RF0Rys@xu8c=dY!Z7Y8|V&|sMsjCTw}^-{)`4*hV49XShI%?zdl zq8n++U&-a7&J7R?2B8pqzu*!pKI%32drOQh_5CDYgITAMO6rpydVf7Ro@%tqI1_6R zV(GV0&e8|LGDO8-WjXID{U(z}R!<*0VUJGKwg=tCRe4_omc!!%)9L-3cwbm(%O6BZ ztp~yv`+5|SH4gtW(tR9r5;E!MWl`lxjk`NM2VL2M(fEOQ_uAE`Ezs|e5%7ToXolo& zyO?MkPTNfWrNih_6Q8Va#Z}Z;r`zp#!f68JdE%dYRM7=!oD6kj-7QY68&KaZiI`Qj z#7J7a;+aLi}_xb6$oHKJA z#iY`$BHex+d{J1H7{d(b;Wa*LGc2O)DCaQGOmR>NxRzpO{=`a-g`78>-E-!AL)JCN zZ;b-hbP{&>r`d@;bNa3k!<;^6j~8A50`>IxnXGHFquf)+Xq;LdmJF-pE%t8XMYUK9 zf1K7F7>-5INn+$r*M3;r(eTv%rAC^49uN#U=fTY*KS`cn)bIuKuLHO&OSNg#M=H7( zZ$X`9FlBRr=EOxW=S#UTjEp0iLr7Eq5~*Il;zxL<=j0yeuhlnrAy{~Eo7|Jg(l{xC ze2I<<7!Rg=v9-ex_f2_dfg_dyQ=T5(dJyg}EbqN;{6d+a?WV>-^=9$6Ai|uOkh`?? zqP{gE^&jT2cVGVblEye5t6T0;(QpE49N_0MLJe3FuoL9(VI1%nSac9ARv;|DS})jY zQDvwT@wTz9lr(oVM-5v9W`agF>kdwQG@om0wtiArJ7CSQ3ubYWSo!1mm`Chzk@*03 zLWXJCbFwSfJ$?ld1)@r`C9ksgrDl`THopq;x{6vVJ2IEh{GqdgH7olkO|!-ZA%ce_ya6z}*z03EMUR;CoqD=(>q}EYUFnFOIJ$ zOLt*a0b9-R$1?dr!h26W;b+HJM( zu8xxE4qL;wLlL86%9JMN-$E!AjUbK0gQBFe!G##n~|%@^Y} z{$&qI2{c#JhNP)HggfidVgu9k3F_wBO(zXfP6G=HpSxoMRT=P@^nShLK>joa5QB|b zSckcp(K9)b7$d}OeSH^PS&?tSifD56@An&k9&G2CwjX@Y=$47zrfU#ez~$iH8b2fb zU+#fSc4Y9Q)Pv0%i%J4cnXdjA*SjzanHRZVCtt{x!%R<8LiZvcny#7}oXXrL+{<)R zDfD8tnS!%K?w_e8l?o|0$ymsy7n%?8354z^O&zg}Ct|k!G?&_r-)?kPps7yIIez6# zw~8sBOns}io2Np}0TrGAK3g9cyCc4Lw(uM+Ef})nj@mjdtm~c+=*CK8A)dt7FIg?> z-LD5uvPiYB3w79=&n?Ps-Fa0GKk6ovZz)AP>(Py5;y<6r9VP3~ZUC%6XTv{o?_S6b zc}_e`9`0?GfNeV9NxmYsvw0GKEHu|3VTsdl!8Qq<>Y!;K?y{~19{-w&I`b&EH9}AE zaYW_ZZUM@J_v;gU3aJiCFH_BGtz=4YuBW;S!Ib5fPW7kVAG~`%)$+=%I!ciOI)N{T zb%(Vr^{4K*!lN}?1>-ZQggBl!o2+&>@Xg9nO(HCF`8<(vt;|2>^;2`T;)_k!U_Yk4 zQ$-XT9!RD%)R1Xg9svSud~kRrUBWRPSBW;=(SsNbTDm#f;(@^f-JqLi#RyB^-eS|$ zA+lA8koS9G4OV`MjORN)o9+q;ysCm*{^@I$xC-tBd7ueGHfrpfvc=m5Y12)cq@37t~D|? zGASi&*#A60`#|-N*&1`S3qEnmX7LiO{XrI)Vzg5lm`Fp*GQe8wc)>)QzvadiO>|BH zTyEvte4S;sO91xb9=@f1L7&bnWA z8i4$On8rGmDD#8n_w)-tY|)FrGmeuYwQUAW&uRE~fiYGn1kNZ^zMb-c5Y2*{IaNUI zw<8+>O|&~UN9ugNrOg1Yr2}N9rMV^CX*bZb!UzV?bxTcf&ssu6IocHhmBHR^YiWY7OOZ z1vXs^whE+gc(2?ig=XWI36TnZh}#h%A&OEo=I(1F!oz+5kFkzUPVaTb^k;onK$g+~ zqSCRDFYPuuFhw))twJf%~Bo$_LM)^j1&V3FKf0($-U zE4RFQ;}T#N8va!%PBP`m_R-$Y*?AqJp82EqzJ3Z5-j3%2{>DEe;C*ucRmWb~g>}Pn zjT`A-vXR?*e;rc=v>9|P>jDl+Z4@@6Aanr|HgbVGYo*%j3A*nc7iPNo70a#Z@&)Ff`)F2r=0Fda5CF1jr2TJGGtHZnk9>IG0R@jB-JEvsxE*MzJM2(ir z{Yk3Aj88c|FE3esj#|i6RPxgE>bd(Z3c%x!o&t+Kz@hrO-3^=$KFj-}C^s^$G56T7 ztwwX|;$5A4TKV$JXU5<+V!hmWkro~&ut}G5^$2Uu-pJ!um##Z9Y#d~=lXGt;zjEFc zf+6Na4C5boq`&tjgeabQiE&+o%KEqy?Z6~poAAhiWeDQvPNQI?`R$34(<$0uU5MOK z_~~@GC7`)W<@LfObeD9AfKp}(OpYNRjsIekH$Wu4VIm?L44k9HKS1QF|Cady{w&{f zyx5}Zus>LYC_4evO|FQZUqQZqcG^X?GT<(~@{|PUz)Ap_3wd{DZbK9R^Lii>*umWB zgKUHO@t#Zc;___&7@tky4!=HP(u`^4a{|H;@Xj8-6J4bI)3UBHU@TbzE?&NamugzQ zY!@CIszMoJBnh0y6Cz5uVWkiSz5arf*C2~S$j1t(#vCI_K%F+!`trI zPg081B50rg=$yOgL>m>dE4#E<%G&hY^=34r=Cg}Yvzmv?5iVT8lj3+e&AH6p3S2kx zoem#duLqN>L5Weyy{z^_AY{B$?Y?;%EHnl{ngo1G>{#eZU-djG@_+g<$!5M0A?1DW z;_{x=7+XC&O;Ef^;nWuQjq(VXzWCLyq{A6$V&09N%dH0;hy_ES@rf(oX(A@VT8~`k zvY$EBySf>tsDi%K!SE8wXopB(E^0|lk5=X89Vw(#EYo#%o6QHuQi2Bg29 z^Jrv5)9R*c=^IPgLK2g;)<>H%7%>`v#}jXy6MA+A4R99oKe&HQgC+M!*fMa)wE zK{RJ=h(nSi67oN(zZ9;E&kv)sO57nZ=E?MIk#DGUyG-3L1jKRoe++I?GB2MEm5Ge|rcCg^wE(E?E^-BMb_-nIu}e=^nG_g#JQ2}c)) z#*I(Uny4y1j(6&=vH{Edf^^s}edCoSzQA+R$E)+_a}#mYNSz||tCv_#k7rnZ3Vbu8 z`x@1%FWZklI%*@Q?{sGc!1b050cI=hgVHX!f;ldy4}2WblQU&sr!5&pq*!ruj@j&X zGdes*-L0d{b3GrPsaU{&E$qM^_lGg)3qL-g^hH{ zMJ_GT;B`UYydx&utQi9ikUhsq2z>A39W8*)Du6|2z)mtyHt@R?&%VBK$A7}sW2+XM zFe|z4*pl$Rm)#3?M7+vCGuyZ>aTN%31h(HxG zo)}yDEMD!PD?2HXqf7QD2`M%aX2bVN^`#v@1Qs|8Ii(D0Nv%!Px{9e1ZsJ66KXL*F zy#9VEKjwR_u3B$X9tLG^(5?xpa>A-1u>Y3s5a9_Sxf|l>t)IuV+PYG&5!0tY7y6~^ zcDb589L$;AuRj+1M0`2!Fo?Z94-IPZL zn7T@7(V}*$f$1gju7R~7qL)DA25)9rR><%HnD%bhpOg9 z(Vk>XQO~0&=16DAD-c0~(dw_fyhhXh&oODG^-%oq8XU3nqJ2YWuBI$|XzDtqPI`-M ztl#-pQB<<9l^p7jbvm#?I6Ho-w?i%u9*JcVH2v%zWlXp!Mt4&bk;&XQUms>3$9>`= zx^r_c$w`oK1Kur9B#2Ut{wyfDdvEwreS(cPrvvF)#x(8Z5fx<=P}s#Gr^(577}&~5 z>JzP_z*d+kx%P^P>!~i>g2sgRo%OTC%m>V)Xpf|Dg+UH?6-n{lC0@n3nz@Ob?dK)? zC!0HzzTkpFG~HeXwVF#4p1vt|0GC7mWbw_r3>*z` z*h3~K#BOpM%d%F|?fF%g;1)$Q81RzByWpGW`DZ7t0{B3~J+@2P`Xl-WuE6^v1V)zB zSg;H!uevMkQ?PDT5!-L?ojQwBPwoJ=;25;@&JlKDOx#HAA%Y5YDgpZMY!$*qz%zY#RC*e!FAzhHJ{#m#S4`KtOkBNcWX^V`6FXM~ zkLja25G(b)q2W-2XcjJ4<9XXRc0ae+C#Y^D?Y-{Oc9xM{vIVw!x^fqSJdc_b&YEUn zPG0(hZj9*puEd~|e-`}CcstF04^RqTIgtBK8+NW&@PYWWrZ9P_=k>?#TQmns4y?zg zlx@F}^XBX@iX*CnK?w?Jja?}n(*WFppkI}bwk`8gz!Yd;TkXNGN-F9&h5vQ2LcbWB zW9JQADww5##ID$L@5wj7-wK!mG}Mu4V?HE>nfpO}vrDRf%K0?)SR?pHqF7x#Cu}0m zeEVO4(1|B*nDDVP$EelMV$kJrw%%r)I#S#ItEdHCb^M2{?Mz-Nx9cr$jSzu2lvt`_ zmpnc$6I>i(O$@{_dpO0&w0m_a>lWIhbVO}?%6EUzAj}<*-WT+}NKe{4HHXF6G+Bz2 zq9W$x7Tv#(z~unNrQ*F>*%zP?XixFms9MS|ZxQc4W?8c@iCC!z_4MTI(@igfR~K)QAlNLznPZ%7^q@QKxF7St9w0i=f}a92u&JqvxU&Ol!cj;L-SU_)h)l zPKRK;_!$r<_(_!Ga=_;!;=$)zcZet9Hq^MEbjv?P$4hhIKefJjU>G;aJeR+`g`DZc zk&Rep<}IzRy$cFk7Z!F80lkF+pbFuu7j<$zv7tRsqIbUdwwA%-XLnZbhKoeBxk>_B zdKx?f9o=eGtYGz-6?J=Ba%8jHyxtcaZ3INB5SRcNG6k?Xi@EK}o-CVtP@-Sl?~y>? z!1dA@=`-|ie+a$MvX}?#Zw%pK4Q-efRo~W`ooS|cLI2->Gol0}9;6@kct@ZL1gjyv zcO9k+s^&aS9b!}(a4A!mjC1h+6hnY_<<&w}bqO#%&DXMZCYlCK$%3uXYR{nt?n@PL zejQxGdVn2o6u1|QH~Y9oS#Zz%CcwvSVmEA|W}OS}qsF*K@1vhl4>m9mum>6QWFM;( zC^Q~Jn^M0Yq-}s~Uf1H&jVYUaKwJ#AfvU>HdIY%I)y&9TB4?z>ua6@>)fn$@PjC3l z7z%62my}s&*rN_)4ydhnxg(<#SQp@gx$UN4-Ts zlsEX*0Km?DsWhD6RLTZ_ff{P82pFOws5y7GGW>fBqB$ZcB$L!FL|IS)fzPqbqri5l z-!5HKzs-_eck@P`@?ipxWuhKG$!<5=V`rg9e;6fz{QD3Lt;O6^PCYAz*i(pu-GNlO zf~DD(uolo5e3gJp5&krR&63||kkZ73@@Z$L0FjyFAFTSx2%y||Y=+l6&$Z5imcnID z^o6Nnn(@+T{s^ zr*T2zlanE1mR{F;bDH*?WY0*SSB@GOy}QKs_!IuiB<@z&pKFEYs*Rs~KQ+h%IvyyI z*TTJ64!_J35pJkrLOexurcLR|@thaJkM%3pd32Fe^oNCe)jj|QyQ-G@15&b!?1zjs zi9O;-t)CK*y2f!W?Y*<-tWJ&?TVPg#1QCJVtzFwwt)S5Jmipt9iWu9MBZ^2%_=#6`0w<@bd?2~AdK%045}qCn$c2mFxsf(N3U4hL z7h$bZ1Y#7M2@kh_jatO%#$X_L$3UaB1iYR+mfLQoAAF$KXl{0I%VTJd*jexSilaO4a}#skH>B=~XY1a2km7MsS4L0|F$-79Fp(E1 zFfjGBHoex@3SNu*v(RQ1`H`IX6lAKjs#7VB(T=DWc)8AD=|a>eK85ANd-Od>e^>dv zk;5er2NO^$dfqY;q|~(t90Q2%`N@uGEnG^~K^}NPnRc>g9+-cEeofp{-xFib26#&7 zxBhbP(8Z8>Dscow%R$24`} zDXa%L!+9?8oILm%1r&~qe}Rft5A{Z2l@<_DGM&N+Ea5gA5Qf=`~8*Ofm+XP9{d0VFgZV4t{C1d(IJrNm zT1l|X&igKYppX3S%{*xQI?+kK;CVen!hY|Q_~8IZlhHu$%W{|Sc%V>yO#3Os{p;5Q zyOc~Ngy=X7ffoDsn5*4rRbt$P_s*j7{Ct#hr{tw|NCKh&?j*1)&?A8Y1t@Yo^Qn%v zA#4VQYmu|5iQS}Mb2Gp-hnYIXjoa!#gn=(%cz1SMD&q&qQuSuN%A~BB| zoLLwMP$?vtS<|epwzN{m#FN4HXja!J#DO37Fp?~P{&_&^l-wWZ;F!v?8A8y3ZuI>7 zuLSqEL-8aDUf(z8TrV-OR*^S#%=0ry85!E1|8XY#o56I1g!HKGQSU={@JL<$1Cet} zNi{U*?8XvBZS!wWnsK)s$_3vJ;ko%$YwpXP6w_O}JeLO_5yuIqI2v(kQ%S*Ov`zW` zI9wgT?8{!ibYs1TXJrUJ>81U5%p89FlIY{tl-m2Usq(j_)bG;=$fIqlEKgfxe*0;m zHu`<9%}*wn*3~`Vv<8dY-<`JnNy$^lr38`Pk3nayN)0)^AW8D^U8;}a9&$HN>WkW^ zMN^)C<3p+Xm4R_77u@ot;61sYad=#QiC4<9=}77LKb|luG2Xl>q%&E|A0A&v8Ko$i zeW|Py4$_`XSZJv@15vK2{AQ@miLIW^PzxXiA1Vk!F|7ojxo>H1T~%VoXBS6r|7=)h zUm43kOc0Pp-Xd%r*xD3 z@Z77w$Aaa`GB>8bL@dH1@yg-POq^<Vwg!0U`HSs*C3{77)GziK z;2d#{bHxcfN>Al=HzsE)h!Dy$lpS=t>xaQ!*X%mx+_&2IfT>H1|2OnaO4T&B^ixoY zbO3wxA>#@r!tYn$0bh{uWNOh#VMYtjcN*?Ir6I&(obVg@R}0a5tC^?F==2nasSY4i zTr@2uJ*Y)ZbsyZLP{8bD_gjp-MNc(I^IjDc;1V{eLuO4iqAX=|(B{moT1!n%dZ&;U zmHVO+X@D^s-u2WF)%y$>2D;v1A`>X0ENI1quq9kQ!cm6Z?yj)LbV8`LHU(|kofI3B zr=%Ykn}o#N<}!f3@htLe!E4*YG+T8#F(+}pDW}oVgttBZT5$*#zxvce|b1 z@GHx7`Ap5bt2Oc?-DP0emkdnP-(xYTod@0P&w%hYiP=*qDO({EHqd-=gns(sFTIX? z{v*86Yd2ud8p#$p!wl>b!D&Wvdz?V&$qB0_PpI69l38v%jv8Ss zbH{F-OdE}L!?yKC9uOz+XH6M? zEoeb^cjbvfm8cpsu*l~x^Y>ywd)%tv`MDC<%^bPC97l|zo2H+4Q>K!E-#so(ph25s3B&r37J0Cz zq;@WOk0RhZ#+Io8iV?1f%pnbOL&}pf=3XhO7DycRkiE>EAy)BCPd|Bm=L^p(7}%Qk z`q$fcO8yv9$5k5(wZqcGd(NzUSjw%77-CSKqNZLhx5lG6Bu|E)9$S1iWcLy2a@>Wf zrcJ`{^da|?Hw+l0m~)07eQl7iK4M0?wa5olvRpb8ZJ^Edf^XBJ!sOh z;DmvW;d<^17a@(tH;Q)zo|O}|k&>G{U`}QHr*@jvDihnsp7r2rCK4US(BYiD@nTZs zOoTuH=dqgv-`UTDl@Gb^JP_hE@6vY_atNpc(S(q`{CZ39v`~^*7Y~6tIo|P)G4aHk z(GP;m;?p@B#ZoVy32V>9%Cj2*mq+bJC8UD+Z2{eiS2cqN! z{$FYF*%kt3bu};D`z`Ip{Mdk2`4XBJAw9!v3zONmF)!nP8-RfcIus`N zFd$q>`;^jaYkgRZ1O%o9i;f^Kf^0kCe8rIU8OPDvb7R?1kXmp;$p2QV9eDU5UI3^F zTOlEN-Sdn>s0kJySnH5V1^sls!%RQs&iNi1icjg-1aEEvWM&!QxBl`t8&&#c9O~|_ zg?=9glp^4bBLikK&L9E$uPe(-K&-<%02%{Cd&}2CdiFdy!edBN4%$1>_%V7SqJH1#3P5JB8UZe8;==^026glqm^a9ng$|wSArwnyn z&d4T!r>ZIO;`s;&dD<_y38rh}1{&r*fZnX`#q*Rn6^@@GoP`}o`-DU1Mz^Q^-kszhuXFZyWX3X?3BOa zW{f2uq}?|SgG_R()ZO&>AUB$w)7%1+s_)+xDj31N$O5;5?z)qFYPa$<><Kj1FIlvnd$^{p}?ejVY4;7(`e4o`!_6%H3I!@fri`sMN zn(q$vO}tFO1p@J%4`F7KwIFi@>VY}cRuCw~M$Iq%h->Hv}K z(fF+4TFy4}TF-OK2BG(33~Mx;RNWB9YJ)3tUhA!7kW48)MSAS5UEi_x9FJU9)$f zQfK@L>Q}LC?s`o?iJEMoD1)2_>b51+&3lBPz%5j18z^Pzan*l7VnvJ<%L2V9XDfEf;c47)NfbZv==IQ+df&oXP7NL2fGA;Y_Dd06U zii3W!vI&b2-my6YauHfm?|6p-hUkAD*6$9|AD(Q3psQmMtC|n@Ndn~`Y6o~KgyundB+aU6m`QYzR z2tdB_P&@ON;Lw#GCvVqIwxRUzE!4Xi#?a$Mbm+lf>He?y%Wyx?(>{fa&{U#z*S#{k z1_q%!A#ejMF+qSg^k~WkVikOY<;0~V!`o|loa9w>uu{!F`h9f}KUDuC7cX$TbOXi` z0agAV0am#>ty%7^`qn&RV$A!s#HqVs!x>1$PdP4)i_AEOJgu z=YYv(rdv+znvk22-2ui0HLe}OX%|CslAXn4bJ89-$2=y;OGKrplkUu6yv=YY!(R(2 zmRMbHXd0z?G@8`#ak3Of4nBQiOwQj1g=?0l*XT)hr^%YmyD#^C@mzhw_i;`~)r6d3 zMeXu(4vhuJwC&{Om5$<9^ zgtfJ;S<@d5qF7w*_IX{KePzI^I=dP>m(>FD&==@}xcZfXZNbXILW4zvTmi`z? zppTls?6{4ymGJwh;e4>iM?q_EjS>)nKDWr#2Kk{}aJ{cv2_~tJ2Vt3Jv_2okl#8{R z-Y0w|I43kZt`q~JaFu8c2d_O6rK=d3`QI*Rgewg{>v=x|d$eC2z{_`J(Hc{`N*huE z&vO>)fN7|7k)ve_ZKjC8kg7H^P?KdVXdw){EffSL(5;6j%lA>1wn=@XA47k-9w&%Y z7u4n)QtlL``mWE~d7(H!xxW`*oh`OAT|z++!XNuIKB)&Sby!w8PS_m{@T5 z!|c)R9ZL^Twaw_N>m!`cxeO;jWW#90bn>(iZS?mEPkakz5IZf=&&PSDvzSPX#GDfy zXZX*>uo{6jqB1nPaA{9q;oLImfriBxgUrew7M*MF5b!iMNz=utMzIWu`uGtx49Yh7 z$BS_>Q)Dz%2_A&D@Mp7|UkY*Xv9aI+z0Zh=@OfJ&ZE@vLwNBE-WhKr+N;n9@IMfu13J; zo7BhjS!xb5Zf|Ya^s?HmA`Gy1RbELx3_2s`3h4%=^k4cwJ&Mr)G|dEj=;z}50_ z;1XN9LNMMU({AKfWuq$)gJ2tC)1Iv{?D@@5m@#|)Fock^g1gbNLtabQB@qmyz1R1z zJE#RDnUPbbEqZ_qn1#dpl2ba3B1FuE46Z*<-hGOLGXO#IJ+xC7Hn-WQ>bhcM#DcJs z-!|?1A^n~t#4E~sq~Ugrw5Ve;TP-!s9XP4DlvR1Ii;6eZ64Wvu{`E?~8iM(h-A98U z>0l|V`9RGe{ww7bdbPPSB~YE5KC{oCWvfQ5T7P}Na-hq$_bXvuP28OpiP^)L{7=;M z2G6}Pu7g2thVJ5DnEKyeUtS7*!)U-7gaG;9pWm`0JT~;QVX{e($?zDe|A_8c46xOJ zvCz^F&^70H?or17A#L9p$X~1)a@%oMTK6FIm|vv-mV+-L{_C9B|BN~=cH!CcUN!m$lP3{Qq zPCfx`%2%-&j^4!^hHuYryG$Y)kKEw*jIMb0PCWx?E7&5I{N&xG(b-qK!UXFK9iGCS zRbIv~4$${rOmm5OVJ?O0i?U!&O_L(OFgZWh&tdnLjNl0|!P>HJlEz+C@=MGg47p4EGa+6e5q43pKGUwHB2hGN?^Nia>{8Mb&VGB{LAH6MnfZY!=#FvlU+(?FQPjd8o z;#FU<_F$@3WInuT0;$qe*ZPFzG1$+sx?J#AVi-A*wd1Cwe^{>nUmz^l?(;yc^gl;v z|2O~fzeUQ5eQRU${#NOI=s3t70XT~Pij=k|#_@o{CdrlUNu7_Uu=a>!u>#bs13M%r z!y5E6{}&GK-;~S$hDzIwu8QVw>e~a7(V!$$0*O%3m$Uw>#A1Qwb^iz2rvFj0fo~}L fSFv|bbsISQvv+WJcL)S`cY;H(0KtMgK^JFncPGImgakrx3A)(g4k0)!?y|7h;+N-n z-+TZ1s=lh3I%lTlobKtF>6u@5PuzP=WgJXO%vY~o;i#%8=)8J`2z|-1=%_C>-5$gZ zFAoGy9c8&!)f3bQF9l?KSq<4&uj*2;9xPE_$`}9@BhOc_NZYz5QioHtwWYoK~DGm&mY25(Mfb5T%4Tm8|*Y3930-W z5@SZm6~AZQ;n%CsSFF&lB1SdlXa5e;$`!Fz z5w5uE6P%&lj7|JWY4tkj-?w+FCDrO3!#|BtM?&blmw!n{{mW3tW!kXK(B!1Dr6t3U z@b6#0zFJ&dR8dpYZ69A#@bL7Ey8UHL8}9{fw$#_3Gv<(Z_?nlMRpw*F`Z+6#t{sxu zavt z{PWYJJN(RYqr23#J*h51V|A zqKlaN1_!NAS6cV~mS?>8ndKsj`bKG{ztrr87Zel}85PCA!lLX#!7+Lk1fHEGDN#z2 zOCaTnd{kVm*M?=&U-`ysMg&~w`X`E$p$2>^$nKs;XdnxjSrHNyAiz#9zdm5#;^KlOF}{6AMnl6I^awb}`US2h9@ zbqK`b=~zX_8nu62ZD;aZRo-8RT4#$eHTUxflj%TubmGcAI>k9x;GkB2kO&BT32_yD z_&SU=vo)9!H5qxiku{D@DX0QBUiIM_jfG7Z-;a-v|I58uhd(brKjDA42kjJ$2F(7_ zbH6?|vMfZ_)^T0ye4N_}T5fWIA2&=!su9M)kg|LQ1OztR{!h+V+frE5SRvt)-|Jf1 zdGN~#ELCtZ<>2(lr>4Cs2p_& z9WU^iAUIdxyjv|;N{X(Kr6E>B_f{OcTu}#HlpE7RZBqD!H8eBf9J8iW-F_SdRlpum}Bg0 zaqztM`>g>vkjFw9c7In;c{hJrq1*$}A zso&R1JY1yYGKS@ev=^(rBF&zE z*Hh6m{-1@D;381*=gpa#mGx$ueT-F@s=X z4{hb43B~n_I_&Mtm7hZZ*{7Tv0^w8pl>$Dr8*VAN%a2IN<2iBn0(ZPp0S3O-FK7VMb4KU+Ox5UE?YErmBNvTKzz0l+>e!RufXpP~`Q52^C$ola_~^;HT8 z6zip#KDJS1;_61#-;(LSxmaB%hN+K!#~uz)#pt%pn0BZ+m;NOLjp6dORUu4fFZKfj zQ7Jj8_oV@X!$a`0cJnMY$&^}#?&4!u=rM~=kN9*-g#8}{5QyxP2SdA1H*e~cT4*Pt ztyxxd0%X=*HUCPQ{1)XRWD}}3r7q5-hMNYe7H}6S1^pEx` zk}t$B{7{XRHl9a#3DkavWe$)#i-(xIr|VMZwAi~~K&J%1v$YX#+ipx#Fdld9V`iP% zdw+5V`Q4g_?6Aj!$ZUVmyWZ67>vbV?KG;(wx?R|bp|Azl8daspb}UrdcwLqp2Z7snCxqGXDsf`|G9<9BC5b%n&CrNx5xaodBP~aee_Ew zJ|9H6wo==>k%}LIu~dEVvKWcae~R@ILJ*1%SsK!0sOF|hzy-NJ{N@q9x1-ly_+aU` z-H2o@$h;0vp{$^y+u9vb-5-mt_|r%>7f?oTM$V1MK-<0m*aGm1#ucbU_5yDx_i^%K zocKcv7(cTyK?%36j3GIoqa((3XLgQ2l%b)~7|tuE@aF^UnmI;7giVa;;?s3Uxqn#{AN`cCRTK}E|TwK(w&)xF)8HqA*ZP9`Ssd{dp zJuM9uNT*ki6wSH9(OAn4+^Yi&-N_mp2?f|TKNy6zG363tHm|jcz$KQ9eu9p#=6|_t zNIBvZA}cVw;h>>F!6OhqRbDxmrD)!B>+O&^1YcxDx0IjrN#1GpBKf4g%Q(-#`r*wb zD@vja$?>yq13#qrXA6d4X`UCch>Gpi+2k?0F3^S^6^g+($#hp#M)W?=@=N+GU}vDN zS%+DZ$`u;re1FmEY)>uDZPX=#!972xi;B12g@|G>6Ak0O5BRF9iZ_==QCW$+aY*R7 zKW~Z|Fn43bXNksogER$nW1fI9vN5g(V+i9_M=%Jt#**NM|B-u{kvI~ob%u2Stcg*H zdd(x=%1N&WTfej`64kdq*SJK@JHsmsYBrays9H#v<=gx^&9W0fhIB1rtD%oOqb-~M zXtc#v%eMTl5hXXk1HbpS%-X|3;ej@o>FE(G`Ao=bN%CV6d1pRdiE0Lt>!%dSiC~A7 zix!N*gscreiC1_!3fda-CN_kHaGAI84vv=KN6KoGj+ChdawaC^4bnWkqQRvbwmN5j-v|N>CHHbK2P>gF_^$5OF1mYf zPQ#?t3kSEEXY1QwuzHX=+C5FjMge(Zd)C5@#|dQ~R8P2C;xxLCkxc{_uf}cQZ;{hh zk(g8M(m5aEH@{*#7UYAp*mvs6-_~P}IfX$uPug3f!Ov@pbv^Fh3b=or21_JsG| zpY#-~A}#6Xa+Bfwt*b;!B$df;$XXfAXOW($&@XwS8Y} z>{|o|@vduM{sE$AuX*>^t%8Qh%=T6}XcpH0nFYXy9!|o!3IJuRH2^xdh)9sS%{KCDFXy0^=7Wgkyobf)_pBy_ z8ICeI2vr|9?Rwv!PY%oZ_gE{8IipREKsR=I+Wamft9YomEq!_5Ue4&vT`8}nC3LyZ z4(Hml0qlvOy?XZjdR)p&Q_R)gr*~Pxu?lq=rFv6Ub0l_!6tTpPgBI{YNIZtn?q;C> z+)U=1a(S4U&I7<_`s3KGEY@DlBv`pXqgE3xC62)GCjCZ*8G65iRzJm5>Zk4$n6my2 zDB}g84*%(OAk?9z1GyToxLwh!CFG0@+0$H@vL#Jqp^hShe&|zYxO_!=?HFHUXj*n& zi2AD*D4ZXJVL&&`^^N{XIxw@j&-b~4`ELy6)zjOH+2BgNjK(2(!#_0?o!hHM><#`c z&v)&BEi_d9n!d;d3mq)a%EB`ealxJ5heW93!B*rXCAQ0hl(gm6hvy%3(X_FOh3io6 zg)T3;%VoU5Z+y;1zdl%PT}B`*hculQ#+omjK^p>VTu_`sqIQVCf1?KwHRe8rP7;r5 zKQ_2cd9;eq+OI{P49I7%_zS8JIIc%HLa`q65_gw7f0l6MlKPrin9qN~br#EEbp%OH>Miu30E4AXLg0Uf@& zc6~BraQ-@9Vl^ajkrKN#RVj(1A68b^KZLF1Vt{^?$cy&uoh{V=s%zhHLpvDP1~}sV zj<$XVR96yUpKTn_E&nrHC-qApp>D;2#eM5Nc-W~jMo7QuB+T1a2cMbx#v>b41YXiK zty|)tCNEtG?zG8wRn zbV4{R;RY;%VPDTr_2a@cz0ROk4xo-@|vCtBM#$} z%7li!XG@r#cYwtls6>N)9vQ=AmlR8!d+?bOuGJVd3Oclx@ayUy*sU zLf6R?dP2PNX~c;ke*lW0h9-l84=r<1(_p2BfxB2ThIcngzJPPwA>~%x*oL&+g74=q z*_af|S++TqUdkUWFVgkMWmYl6_;3wp%V|iDC1X6(ZilSa+_ha%zZsfJto%h?Mv_n4 z{*C1+eHyK}N86nMJ@qz|ZzNn$ zCyA@>fmgw^;_^O7%}tu&18zqE#=?2P4Pdd4dWJH0%OjY-(n}_4W1^uYyD&mj%N7_C zOJzsO`?Ea}CkxXlf`>O`86#g~T>jgn0FZ1qZ71S>FRNHSg{Flfhgn99jZx(vxZ4ck zaSfG__Xh_|An0q+$it@@(Hkxl0#y+IT;zZ*=DGI1GX(3AtxWTULeH6ON|hO17w6Bi z8*%X_l-~~g%DaREqCiKRr}||2s^^qHgd^*=F40aGF#LDKN{4SRT}kwl}@8pVknv}gCi zmms~n`2!}b(0O2pQs%2=6*IhQ9=bo>!O`aPDbCHPW8S65$WTn8*K-umINcWZ7WkXB zj29QBne?lwxj?1Fjly(V24;Im=OE_{Qmy8YqT`UIy6exll0sFOIh1yJ^Us6%dtdMy zKlG+c^iyMfi%S+y_oh_yh|{RvhFz=E?31(ipVPK`J9u65rIlRpvV6P6$b-w0-W(%u zFc`aClbCJDWpBLs&J;=kgc19_qFwS>-Ueul zECx>+J1kOfhq#Ol>14u%+Qz3#9^a2Gcw`(19c7(Hbp&-OEtg)Mc{b=fEbqBp@-_F0 z-6*TzQ{p`U27QF?9_mrK*^DZ=x3(GPCJ9Naeib8BH0M4nAy&m4SYKuy0nSqta%&x* zS{$sR%W=3O#d&6eFiX`T$s}ba1e9JM6d?w0pY3%`HhO`F*1t|kIe&X7_GN5o?U=T( ziSxX|J25D>zQ+NeYXw!gm{I==)@oj?KhiU$jA!qv0ra~kjXB^9Tf>m2NU|r8j}h)4 zQ?l+5IICasYP8|AQ_GE-6x>B3WhmC!W_O2yiyQm=NW8l$V*ysp1bz4DAo#6ruUy+J9BxDW)|=*`?HK3 z{PVyWRT)=^D=1Mbg-bpkr%*i?aq^Bi9Mm<+1+`eSzuS;?u(nFTIfY?Co_91+id8NI zl^b||*DdzqmCaFEUUhfXNo8W>AS+ggszB5*-GCzff0gW7m;-}jSMRp?=0DJJRN`6Q z-TRB4gEMc)BLiG)VG#63rQ|`@lep~mQ&Hz|l8!gAm)laLS$o*sk;&uJE+B4ZE!t@2 z)oAPGFZ8V{WPPvaClMcmPZDJAk|HMrrI;Di6-xJI*WS_@*F=M7AeaVY!OCSNaVk11 z9E=~(4lG$k`AGHb3;H`H;`1npSA_1#qn(ktajs)Ly7 zb}+#}%?~TY<2EOxOX;E%tZ-2KR`=#AuLy|U#IavhOq#ku-z|DW$W_zCsoLrw<<7A6 zPfmFVCm{`Kaqnc8^-%12wljTogfzC@_y9qduHjQN3aQuf$Y;~fTN1T%4c%aIM)xsS zenC6a2;*2k#`oeL_0`~?hs4yaffQs~0&G&$2W`CU!-IC%SLSUJnJdT-asL&2+nkOvWS^v*~Nlgoa54$0*H5 zpbyLvzkFNUv5ZAV}~@Nj;{$+p-(3(KX9#7~YJbW=Ob3j%F~)s$-oN4B#Y~?YEpW&|)O5;%(Gh)09en z@x`MyY$O@AFC;rH+hn4liT^Y@Z?TIKtbtrpQElN)BV}kP%I6X<|7~9U^2O4!M^|wl zH$^6#JCHqXDV4v#0u%$`sCAI=I!Y4E8Q5<`AfN%*R6rFm1=#Sa-qf42^(j@NeaDFb zI+~k}12-%q;RR>sa9Q2T^K6XyIoAS9cbG7UUgXzkN|}o^nObGNj2#oFAKO8eFb3k3Eu6_H zVQ9B{HOYh48?Z#s6k+?Ci;aRpEI!lsH>?_`W_LH1)n+uD%I`8jswHrSsBn~eTV#^? z$LsSmg_Xx69>4v3C96(s3X~N5(%IPIcIM%Lx--WuX5l6{W(Xet0K1zH{-~;wW-<*a zXMKP93`}0vOzCz0UT|cTwZujqAJue_M3;S+mF&N`Khs>ecl)vY!}e7F#u)?kiKWU=5HiX~$C^ zoNmsq(>UI6B1k)gFm zOvq?J49?`}y^Sr<9PR-Wv4nMkA5b}hT8CmqlZ}MLt$KQv0edX*MikC8{EOmeV*Z=^ z-=nh2*`XO4d^IQP`M3+=5Iq))aYgecCiT^Dw{A;I;|~FDgmVRE5m85TmY^n*a4e6- zb6z@k79l>5vo!}h03`zuVPUo%E0eA**(tulwXg}4w`eig!J-0Ki!axcus;8-f(%2u z{Jt8wTz$L$z89Zm^LAP1bYlc;|C)8=UXdFr)71-mTF2)U(d<>ekU|k7jcfmnbJRB5 z-9nTDSd}7Z_i^p!Z?e4bmIYziLewD=J!2y@r-83?@X}n%V(^x^ zdr8TH+a0OHPkgQ4k(e3|+M?ea4(~h?&-omO&lalU)8b|TNVu1oOFRaz0Dk`C63cd1 znNrKrK<--OOSIrWKe3a3p)QcIS?@gdc*3~-LOQ9qIjLgXD53IOK~l$7-X&*Px=lRc z+_}Zp^HqRk6{71e_2K{E_(D)axD#9m#KI@r6kG=?vnP8kaK;NR^@^1LqKRIcqF|*% zY6GUyU2M!ox3ekiel!n84#D0|qFe59>6Fr$LBbb@QNa_ZdxH#o^zT1F@U}MA&s%0K z_`p~mshK$g?B)TH>PL8Z!kwN;c+(Nm~mG)c2&uFAcZ}Js^ zld^V|Z)j5r(NmyF_?h>v<`>*C1ujvveM@O4kxsG|LGOw5IGVwIL68>4C#(FyWNtE+ z;vEA@cw-Hp{ylq}0sV_cAEC)FbU{4%2rD}6&!o@U^Kqsy$vPp+sFMgEy(bbiPE_>O zFJ+(o8Fnc2t2Q{qqVd2dzrz=qNRWABKSSYGtCvvu#lVbWJYH-l=pvSkZU=6c8sHlz z$ab^9btCvEXfBm+WAQYx8p6dGQ1 z4v@ibMO-RdGq7&u6$33r-1r(h&A)raCGt#g86sA>B}SR6Lll8{zw;xLP^44gK-;(qU4X)bgN?n@MFA~ z;b0FNX8PeRLaSBDLJc1IfzliDINE0XplLGIKq<(<9w>o{ar0Eeq)8IjDKDe8sWYs7 zn&LP>|9xLFGaKHzO$g~ci1uw@ilSopcE|Vc4BV{b2+2Jv7Da`$G7VPvWc%S4WJvgF zrrkCiSZrE>>%oV}ngya^L@br=57~?>4|I&o0+cUyY({ntX!(O0#iwBj7PPu2X+J+G z!M8%;9TIxBGhA8}>9_EiB*KIkR|EH_mN%7J3$-qOgqee@>w|}0#h633LhkFIOe8%K z=PFM{gQqDB!ijxD>|_RLvearD8zYmFxHR)Ul-_>~vkI_h%*#TUXQ|YjBpP|It%OZT z2kgFz!>%NfkKV3jX>kgnbAMO~FIVr2crS^a7{IOwOlu}MJL5y#YB_- zT73NRm6b4fp1XVE&pSi3PR_YGGr!LSNrPcMEt8pHMP29(Z`r?{qB2ZzsqgFKZ{ZHA zmAOCc^W``Bc8{kLN4BpiR&vsbEl$Utw<9}oTz426_vX^*QcKdk=AXsVxQ@K| zK_u!iO_H2)OQD9~0bX%u5yjL(=CEE<_{PIsxHD?q-&xks;ump31j>4~JV{&oIJQyz zCpa@19@BHyRlIUCo7x7lSAf$qtuxcsS`ADb6sC1>sSY;*n>_t z)_9gDdg9eSV_xVI9G*t6?-L7GPQ__(5KW(oob&P<`2~8_>t76(q_cX-*zMG@AnHeQ{syun;D{* z@gcj_i$y@|kP6Y@q|F7ppKxa?;*babj>sftFYmd%cT{3hMX*1x=ID$A40z|i2*}SB@6=J&zwZ%MaQirum+px zm;Jo{pc@?jfu%+=wrxX=cic)F`<}f7#QywIvcIdp*wtvn>XwNMIvIq}|M+P2^hooI zEhYbNKx{ot)b2BNL{;-2#<0R~)zlnMdP=K(vnS$*(Hxi&Gf!99VpgQw4K?+Z%VYnT z@Qz0Av3iRHGtwh%);MB4jUxgg+Sh%QH6&ry7b>=dkMUK@J7W3q^DxbqAij@^x?FE%#2o$D~lxab!b(fLczp&41auN?aejo1H}KgJtD0;*bS zijFqC=@AjB#H2}|zZ-NFnqa3jIfDbWNAhTy84Hg z!D!gg^3@i}?|TU%tMU7XYph`j1`-tr20^-S-1)%vBMnB(cE_eHOcTWY@`n#E)D>Pt zfjayW68Hc5*IyIw?4fV%BruYayq00X zG78L%|AaMcJHvgnyt#&`^X29nIB=WI#e8R|{$ka)xdPa+s{aE)5X#W_7|F=>#ZEaoO6%g#u3JQqc#`ur zX>9Mg`^92QWq^M5ds@A;`7BUK;by-E2^QOM`(Yi2?m&~154Q*Gx#VNytUXFA%yz?# zYQkm|9KUNEJzowy4qywq{U<#H!24gZQN{ZNZ)e7eRv^6*;H zy!eS@OjGf155v@oA8kzFwww2`YFC7HNw8dk+pm&$-oht`dfHnJZrXa!{GgJZ8Y}Jl zhZT=_!5t|)ZL@oexm~G1{RY%uArB}|H(k#g7rC=%J%i@oJYr^RFH-_eg*M&3*l4Y+ z-r_mV!BfAqYx9ax#iA?L#3K&}7`=~8bU-_Pyx$^OE(; z{mG7@f?3xOa@zwh@x1e!si5ZG+-MOj6S(_Z4LUY<`{P>>raH}$VP2QwZHv!VV(66YE8e{I7nfJ$v-XzEeLjkA{xSBB#p5ul7!xTIj z8=yH7WkH#BWfDAjwK?kHZ6=L3+I(qHD4ldo*v*13JgDn{7xWZ};eR0+IXdBzJ-;I7 zCjj_FYpMF;`F%MCk|ld>OHpXsa4hWM;PcoxOf&WES1v^Od7`b$4qRFl{1P4b`vu$3oG0MW85Wczw7eQspvwm z(!RjapR$sBcKcM1`5a<`Y-%#7 ztb5Ul-_H9e5@T6wEBgn@ME)YmmTJgX<)UKvo&tR4-JxQ8?E*Ba_w#pLmGh_@KBC5B zF;csGybXAc?n0Xl*BsAs+;Y5Z{MMJF?`1!!1dvauIC&2mZd<r}R`vL7`%Z6YdzEUr-Mcpz>&9J6nw$~i<_7m|``Uo~;24H`e6`NobXfIA z1M|P&KSC820KM~^jC*|P8|da=V8F^!1F@Qqc#q5u25n0|Shn{Td~F6=KvudC*;tr; z8-;zYLx9RNI0%pkB1~o>wb=O9Z`cChOKy*0P}k0cAM2V5TdcK3WxIj!6G*H3_-7K% zM%m?@r^KjpA%q>dvkEMaCx#w;)a6D5u@{nYTo*|e;9VzYn?!kT2EW(TGXmP#$4N7^ zH|x&7X@rkMyG|QJ2HD1O^}G4$?u#h)C_H8OM97)up08g9Z2 z#a&pa!h0MHw>y20)>^D3O>X2tzi47oCP(M&em(wMk8F;Al2+y&OAB)Lx>Q_ZyHfZv7)VBQo-SuP$SjMeSDF zO^~jFi)I~O3E%}2_+6n3*IHB81gMj8_my(s0eGTrMwQGU)+Wt3a=2>jSRDQl#KhBZv(XYvGRMt30Patybx32&Xe6%2d*S5c0koGHo6iNu&Tbhen}c2Y&)dGg?6=tIH-DlS z*?=NNC=D0_?)0kD7|9D#!L-cYtB4}k<*9VItGHIZkzo$^)>49wE9gm7vtT!C5vykF zM1TtCCalx60OT9pI?m-Md9%FANPW0SGc#D--Gi*ig7Kcf)lj33003cMZp7F=A9`%Go0=VDOxS)K)$C1u__Gzrs}Aoqq?Wx#>MWsx(h!@ zd!E7AW6*PH>B>7=>FQZ>^U20Oqa&``Tw#0-nVO`wtHaH~CAzPpkz5N0L(m^83c>4` zm*lRA!(Gii`!Y{*cqJL=9O4n5Xm+jULa)gsK%||IzX&@0xh+`}}2zd|Ykwy!94WxV3;GNm>I&pHu zy@yTx*$6LToZ8Tw8A>2LRpA_ZqUOI<^xLVp0lG+B=uDju#G=9L9U){FD2QaA4eI)a z_a%Vo4V1LOQrw6Ay6@E_w`Uf^_95xm{euXn*J!$kuw>KV?Hi)$5;G)0+xx&?|F!k3 zCtbdysi1xy7NemSZ)D@&PYM#jiPl%9ThTIKZ})#M_-H&OsO#qnH7=?aMm+)4DVJ&2 zLT-c(g&uG<20m_prF_QJx^O0uUTl-r;yz|Ah|NBc^j0`@%&C4a2>~0(Fd? z&rimGoz8G5lf>g2!iI;EI#Bk6+V*E^34%Y3WkHPvPlPZsZdw?*kHm$&prNNSbBZ0M zNk&1l#2QCe1KSZ#=&2+O{U`WMIOPeGhjWSrAHB~?CRDr*YGNpga3j-h=^y6W*n;7e zgt=D~DF^m_89C|yVBCK<*GBHVoVXopuR5;JE>PztrvT1~(mBq?oPeZqshS{e>vOG) zy1pS?UpT9x$%MSctyT!98HlFo;oD@sjlyR`2twCvm(^nZC&P`ZeR>4;O3>(Wt@mH^ zeTWzPH4A$ioA&$e4+Ufq+e8ZvAmor_vEml)RTN{1ze;<%TgAQ_?%V3puQ~`uEnDU? zXykg+K9xmL=IYm8>V#yHy12#5&ro)Ehh>o)I74sAyilupa>|s+-HuNmS(u+KaVx${ z5<|>mWSp8}=0=Xr@bC7%{#>U{?D~~V&=%t!UKCo2)?;RXMCXVdemWhaUTsC-J4{_5 z)CdrSA!jBVh5{d(wwS3JzlIh?_HDqz%MHa`@iVDw-zq018~QO&+qUkdgR)Tihuf(q z``3CLW>JqZxI#G_{eUvkAiB(4Xg{0&T*9AkX(8HlIQKlg>ZA1f%eM4FU`qclrR#lB zLfMvWlo?)HY$E}@xJibwy)438L$gRzReTxS9oS$tV<1|3pw{|#R~(0;39IEbyrzt# zuWKG}svCk^!8muS05c&pwU|#_7VqsDxa*~1397mq9c$R5_fJt*m9Nu%Inw8Q zTkD28no&F2Yx=b-ycZh!@TG$Jkt*b(N`%Z^Q(TC6zmGKU*Go~mgKLsBMH~9DJMK?< zk*-xi65F=_r&?)%S`mAh=j8VhsDi(OV)0~thTTNF_T@Uk=zb)?Pq?Q8cz=I#3GA6G zJ7g(Z-%61g7%V!Y!WNp!j$6WgqOyck47WeG*%@}OyxQeyC$FW}w%n1rViG3LwO9}x z-HQ^MC24NPQM_^#df%`+2;WB1SL(c@aVa&`K~H0xYx>g@c)}QtK8i;(UsvDD$@xbn z;?*`I&-p9nkS7Z1nV%764V*=$zrNDLVEo5LbNF}Hm3=MQ7VDR@H`QCP7*rXUn#StP6m_3vQvGn13?mhBNz*Iq{ePSYOHZ|0li$VxnG+eYzpF=AT- z&~qupYp)jK7fU2ImGylpSVv$iMZrv;ckZ}C`Xlw0r;BGXO#)Lt{G#gk@DgZ?WyT3@ z&I&>*nLVnzA5PeoLN5EA`)!7-lC#U4gf94s(eZG(hfhVaRbAnoXqGPxbt_F27Ry!L zCnfXj)&?_ftZ9!C+H484o)We8;uny}yz%)rh~>T@^5pN~lw?6tcJ`5|cq_*tD@~X5 zZDT1w{=gt9z$#gwqV*eYckS#XT&LCSJ@pnPu2G_kR`ywG0r=9fq;6mrH=Ou_ZA`V> zwkm};sDeflGiFS3`@7O>?l7+9r9$$hGpxT*X`C22ZrPdv!%C@bgr0i}#oO}Q`A0%E z{ew;DhV^v*c*PP6T}i3cEqlXcQSdWI>6scel5mg3dtHZ@>s55~l|3*7!CbDG@^h6+ z6B#u5obNLCyW#6C5NdJr$xscuw9Wdi0Kh^c<&q31nB=nVa?B}M9Wac7HJid{ZD}VyQulqb2p2xa2P&lnPc-yvd>- z6e_i2-lBMlh^MzTvdZh93*TVV1i52eQwT;vY=y5|%l92XMAll;Gh7oH9j0|Y!z$zw z#9#M&{4b#$r~%wkABMc6>7fnxZUe&?vS8VARzC8cJn@oAakBiaarIbbuXVnCNA-am zD$=QZX{-n|p1xoSD*fag+uN2TF6wHU9+GA6{RJRawTveF?!e-F#C8WKt0n}f>K_^8 zl4rfU5+PbpkdSD_lZO#@weLDr@D;09URbnc8ChQ9cVF8m13F15S;{tmW51&;DadR% z7H0{uLTKsd?-i;W^{&1%@9Ww`bou>77bd7JvF74PfUsQ6N%tmQu2m-%hro1Mop&{Y z|8;ImM_HBoicZ~RL&&7qiX6RLZg4$G2C7C6tA;1F|0%gI^>uSNL1A9pF|o0Fg*Z`6 zjZEMja!EKS(2v5Y+hP=hT375ZS>HHLbI1&AsPp5Bkq%Nq zR`T1Mw9)DIW%XmpPyNX~&Ygj`mCk^tdKys2gawd{OWm}VlC(@~>u;TD?aH^LOetZD z{?F2eZ9rV5d|l~`ju@(=xwp{v{+0ph;iF-CQ5T!l&ioXt;|$Q7=biskY%ifWdp$aJ z&x6$`k>^DAmfL)t++vmZ)X_dpL7a4N6T-6 zuZXf(l`CB&keD;Lo*yHl$nEoa;Y1;SKOr*Ci$>OIVi;tDv|L_wa2yA2oC`Z7WyfI* zTvvXGSLjx9dR2NvzV+U#dI|>V+Ix50cJ|i&Rr=_CzRXulZEG%)&HjM7pI#&%adT1W~+s*7XPXT+U<< zU51-wP?*upwhK?08LHc9_SNgt#3;l6nvdSWPHO`xGn4=Hazp%k zBot};OkAWIX}Yw|KW9sbO8bf0C7CDBW}SBTj-zmMi)?I@hoTXbgL66ny@#2}U8@CF-{BX#4{F!RL3jl~ngqA14-*s|qLS^hYH~#i{gS*E3Q{I zXmp`?HWFGFE430kGeYU)~(Ol?H)_Woi`n>_ni@!vk=8vIOUDKTZ6|jqJe>arW@*pb|?{I zKIc@^xVQ;W1!nfNo)Qm=zEaP;6fxZV+Uw#MusU#cA#BJ8a}pG20>`~IyJ2FGhT}I1 zp;}BAX4GD_qWJDDpIzcwVqTNSL|^yoBt(dgM%AP~PKLZ$@}2Je>{6{B%AZCWOf62C zQM=6cIcP#2{-ys|;Vlv+9ao=l5mQr$!g}=;v-&URSIb^65Hk9r4R-2WoO1h!YB7`Z6po!HD%Ii_s=knOr(e@_r%PsoUzyc?ap_6af3WsxryXK(j#EZN+5od|!>kwhAw z$_>ssMr#}Xg$&xgswX*c%g8Dj@dc5hUbm8WuVixCE|np%$`mj^PC9?>SUiJgx3EsJFuUY9_Hz~lN6O=x7Q^%Tu;rc*mk_3ho}E{PMh*uE-+Vne{e1X?=}+W|O(sAIe&@>U=#!{_djHy@M9(yt%GXa!WM! zM#2L=R}w26A4U;yXjwOt$%MkP1c4jfUnjMn8KpU?Vv_F7u2PEmAfzJl?>+{7rehBW zl@;bjDsqf{4bfMH=xSLu3}Yrve`)@SH7N1q)=fY?^Qg@__;B|=i6P=_*<7u*XFY!t zRhq?lpBHV^K0o~B^66;1fUs8~(#&EOpq(E1iL~l$c2FbvcM{UC4fYy>%2WbX%zm^&bhZG{q;Qw&<)Le~ zTX2Wq790Y>-8D!EPH+hB?!kh)lVFWD?gS5(#$B2OZD0=M-upan%~ZWLRlk|3n(8Y4 zpy8aeZLPJ}`tHxUgO8j^X;O^~9Vx+1JNG9;ZxVS@{(`dgJqmG~(u9vQ1uVD^Q1CAt zeY(5fTC;sF%vjd|0W=21V6Rjj+UfXI_;xlFR|OOnn5*(8?=qF>(z`ks@xIYj+r>Ob zRO}Qg%iTN;>?!rDf^EZI$ZiM91z1S1?|08YAur}gwS^LYv<|nW+1{7q$ttGfVt*U* zTa0zZ0t#(<`BOw!y+FANFSL)K^;_=GQR1=7h6kWCk}V%Q#9L9Dd^M%3CJm92aFvwP zCN`MO31;lrn55Ti8zZ?=nw!t``&J}a!o{Up{^@r0!TjKj&u@6feWm^q#d1K)ftxEP zQ@hWmncAi$x6j96Z{vCra8@LJ8lUH=M?eP*~)RbLuIF6Ju4> zlOjiYta>DeLh)ZJR|q}Wb>JTev$UYoxaxgl%KI%^XK+A1QcB$%t~`^3mj@TWop5ej!WdRVD?+V?s>w3d$-VKT~7fezi7Hfp(Xo1 zm|?!=o)P4Pjk7L|ygpPt!}eNCk#ViZ@d~7q9vw3pcVCy471T2?`%MUhzPTxkJLtXO zyZ}0@k4rCjW#}zrx~3L~m%&<*0ZOYEP)hN~`6kr;E69!wzyO#6oyAHrIMCYCrv0`N z|LDNRP*f{{_j|H&oRgGPoSSRODtdL3>ziN5-P07v|g^-%da%rug2`nl!F%*bROUm zi+=c8O)V|p(PfnO3m?d#D+(DfT0cxQ@(JeNavU9M5ngmehK7bJyZK!27Hij<2G>MS ze`tkbK5|Zg9Iz3(!~QHwE)7K3*BO;b9cC2a0&b)nZ?K+P8S zco-O~A7SrW-e&N$CD(!VR`T=mlskNV7zLBGBD4+LueOMnTHTb~pVhW-YNkN6b###E zGWJr!p{tv>7pwi*!d^ii3hWk>L;pZgQdtQB_JyKiA2<{v7@3`&UHLMVYWd`Y`~95{ z0NFq`1;(!6gg@S~}j+3K4G$ZsWGT_Q4J&zB<^L+FJCSi{Jo9}5c# z5G~{k9;qQzA|i%O`ZBH_^!Fbwc_68G?W1NZ?>2c}!`S}9~ zN*A~6sX(|I%TCG#_;>O{i;RN$EhTrkAUpR358|~k@CA}@`j?i?FR%-bjNh~S59p5n zc?3)$N(Wbl=!cMwpMTSKY);%}C#K%L1S*`n-Mzw;luuSxR=P7ZtBpDy_$Pl8W@M>g zZ_8oJv@n(IJ^kWGtO(FX7YNId{u1#kt4~A8Oenz-9VYRAh&6>mGTz|5R3qQ;e3{sk z_xKx+xEf&jC@Ly;mj_>Uz(ymKvW5P^*~}rKph#F*(YN28>uGCiBP!;PxeiJEVb}om z%D})t<+Qx~{NV9F{2x_FD7o*a$8{kv$AIyNh4Z*JMSWq~mkN)Z9!lIMP)2$>l3%%V z)_7rKr8@k{at`3dL$T8YW!2)qx{1$5-mGs7?MR?0p;SriSPHaP7(1tf9A6EO?We4FPG72mJDPSKC%2b-6LJ2}e;lcWrRk{6QO0rxB2KkjiD0{{ zTrwbJU0QQ-Pb&V+2xUd)yVt^T*NDi-o(G}|iWO3zhozJ5=;j!(67bi2~qIX1x)(?7Y1AcRo`(jdr9(3h4oFk{Ka`iFDl; zvY0`?unIM@emY5BX(XGjbXPIodK0+PJpr}Wfy(n3=c=iW%Z7VX<$hoLxK3fWGGH+I zVt~lSFmbVg>7$~uk!5CSNoPCF)_(bP)#ri~SObf^WFk2L3kWWmNlV(ws-%VlPE4J| zJ&Q_OCoD2ix3(4WUN5Tve=dvqo9|helYy#-R+eZe8}D%usABDxdcJ^d) zFx4B*OLn$Iw^|E*&{*b~0JO@pt%b8}SRPn)kJ6YDfVfjFJU%?ECaA9yDxs7?8RqtE zg^G3PCQ?#|P-J9y!ytg6ZP6Spk<^OMN!vDB&ilIM7LPF}c{NqGQm0Lf4 zNc;R5`F}OG>w9|bkCAc9y8r_{b!pFN8ARF*dKi1yRmuucU*#02( zTw=|C1XPdwxc@io?*DinKX@W_61x=;!@)SftW$MixZl~`6?S{QK73c^now9+$c}F0 z7eKMX@?TyitWF9?j>C0-R^yk)8DQ;!B>}kt%}}xi+FZI1{W@H{sI)T*ptwW(Z>#>X zB!WH^#lkT`7RC!5E6l*CZmE=9iD!z0Dkb{OmEkm^ z4y`)@K6kC-VSfPHhh}GGu|(++f!f+;$O;ex++8o*f7RW}>9|N;T3R4&tCswDt7F)^ zTdo-Cww+@nwUXSw9d@Q^h@jD>k!=!9tmYf0*mNvT`J$Cm33Mh28IZVgSXua^@`17@a-6{Wc8h^lN@Z>g>{ zP$?CIDeqdx^~ai}h5B550hG0>p6^2p)J0M~)t~y~`q>L0iU_+WdyoyX4tpsoqN9@u zg#oNL#9&DMxCj`KFI>NYtgBZW8l>6(?KYCgRN6g~Xta}LES_wHElXXg(*^e;F_J3^ zsVFD^z*kwSbGZD~V_ily#1l*3NRw>2c`UVI0j&c04$HGm6TjRn_fDVAb)fmE^29q)t6P2`)bl1uU(d5 zrPjWLRvon}$3UCPhLuV7zF~`D+CO+8F=#u6TCrj;uvH0mbm#RY^_Bx;VN|gR%3{Ht zdWJa-U2r)1dBvMMP0K;{2W}$WC+p~y)Rya#)K`*=si6YUu}}{zU}POz_I?hRfHDE^ zK=}|uJRiIf+1cMW8%|*XTpO@22>D?Wi4vO4BY|m;r-8NMcV@>Xo`uuS;GspTnfW}2 zd|T=%Yn7Vga9?X_1aA)BF)NhQWBb;Kb^y4zNc5WGQ}ylA>t^7P>EF{d4g|OqNe}WJeg#pDd;Ag~N|6T{U#M<(o==#CS0{G6;qd%VZ;gWzEb$9z?ZXP^0V9!CV z|G7K>16}z~Qy+}|F(8@+g6@Y~0bho>{h1_+f8zX})<4nf!{h(gZBw&AGBPqKIgfXv zC=jpI{c3Du!w9SVuWbk0aZF51iF)aa%&aUda`M=T3FSYu&LpR-urS1+(9rb<_94Kb zgez9r7TBcxL-;IAyvWO=YHDo-aFRsg{+`Ee7k@)JUt%Az34T1l)PIw;pY+e(w0(1V za1iOZ<6cBDTL=kYBQ`ZR|LY}}T7Ydf8QCSkxU6;BeA8t;-K_YCMtm~pwP0{YhL2C@ zP5X#PjYD=BeN0dYdK4Fr{@MR(M94AFJ zvRr2TegTaCgF^ey_zw}OAOdNX@}0cw3|{*btEPLRgbVz$vh^_VncocMxmGmbGfffs zK}&!i)H)3Z=H|cCP*MjR92beZ%GUG+9|50DgkuiuaI*mQy3uC+Xa4Bu@f`T`rW z{;-I5Ay7LzJ60GA5Kh~?*)#Yv@(&UBED+z`Z>k`y8eRHQqw8g>BoEGEEkTDCQ`}DH z@EqIs6pw(N%U)dqaFvA*H{{ z-aIFljw%C1R#up*-$cIXFD@4%Nfp6zTq#S{4V%(T=}R3|(x6Qijr1!|R9?wqD;G#g zRnjNtC|gbCt8hxfN)_`E?y&~hF86pumQs8lj;sa&S z8pWMq5yVp`C z;kx{$rE2KJ_u(H@NQ?P;jtwOcNf&J%O4{gBK(Gy=Q}Mjj+suKSFNO59~rY<3HPrQ}{Y zpL%y0UA8nu^vdcr%W?IyKR$rFOng8zGJ)nzIU;acT|x4IEyPcgS9o)rYKT)SlcYT2 zquf?8WAUDOvftJ+|C*3t5TB7Ff07EGt_X%mCV|Ld0y9SO%l-2@*Ui;ZzIK})3ue2! zp%T_eo1}i0!C$}>g;U}A*DTY&R}YRmCSDWiwul@~Mt^Y zq9+Qt6w*h(|6tNBqRTLyn#{wOlP_>JO4o0brDDMS8rj(`yUs}#G2?+4U@2unM}3vg z@|>a?>FRW;c~Oyc2yEpTJrE6GN!XnhNMwD9*gwhJgq+w_Ujv;Rd7-r;viES;le@Bg zMrfS{J^JIQ{>PtkmvQ!C6>4UMF%DDS>j8o5MpA2OhKP*Dmb#P%&O0)dmB+g=E3#fS zsHf)gO|ziLWQK$_p|62@j#Zi$BU3`%@EHc(N(ET%*zPM@6up12Wpsq?Rg;#5tp>Yy zL+f_KlYX_*BChrRSC2*{zPAv=R!Me4Cs#y0t`=xpE2>u9N-FNXDaDrCG?{)_@{x)1 zwXJ&sWfD5tR~@G69VqC>wCCIV%HKC3Ve6+!8?DTCx@)g`=r8;xVf<^*4#X8B{fpGM zN|MXxz};s4<7Jc3^1@Q#W@g_39`qjh-|m)N=+yJ~kpC$6fZ@&qy?#uQJveE4VbgLi zx&0>(zJ3VhUjrOACzEupRr%~8D4=Kiw=oZn`G3G`|L?uTe}aXJy2Oj1AUFuuBOE+P zy~LoEN8#<;;EtJQMIGR{O%kv*GF%~jNTfG7Z!79IJHdba`0*cFx4`yz4t~%jfFuSe z(ZP@V(+WWI^#%}l(j5c8Ie|hSVg}_(U2gSKRd~QP75>?!#!HIaU7C^bIlOjYaII3C z1=JNZz?qgrHxX|jzR1hV!^FZ8`;+ygDg}Z}Dh7t#WYQhie=jcNow>B6|2Gy~E<{jS zSNF5-IKw{hxG#V@`0&4kaPvQx{ue23&jj2j=phXHlY!m*5d7H6#DAdRpi1Bx3AqxX zd4BhI5lY{;etdl&0HUL#V`>4BFflQWWD67DTpVk;P(DT!6f3WyiFre6Yirl0hOt)5 z&mK_dIqW7++b`EpM$$N*0bV!(&FIem8A6_F;hQu*M=DfQ)M@nM%u=;t zMC~xq+h5o#UdO@@Hgz5?6m+zH#*1hT)%f~fzn(rv+{Q5{P_jMRYxXSmEh z1j)WQ8y7{1Zm1RyPP#AwhveF?ws)_>!^0<_ZLjr4c3W_9aBy^6x`G)(oi_%s6mmpJ zTR$GY5IN&5DkDz$2f2QYgNutaHdCTPJ(9tz1n{CAh9L8ci)zDyGFKrQzV$`wV1DPV zD2iiBN=kr;{`h?INS9xaVA~l$DTa!PNxmsue7WWyIUh|;}G1BZ{8rIlL`a@$yRYK zEv-XORUpdUJnZf6ekvj&Vt?2KNYLNBd3kJ2I503MBdyD<3ah4ykaOB#(38xr5i4lp zG9@qO+D(=@ytJpxycb`&ihM34Bow3tgQlV-ZH!jo4DE-cRuggC7$5%lDxR*TMLX<# zIcO&?9&m!Uq5IWF?rwDKfsBvU9m@oi!Mxx11E(2PRjEA2Wi6hvNz`Sab^qG$D%I8C z(=0=kIfobEJwiV|d?G6+D-oiQb=j_14l9oZU$Jdut^fQ?Dj3~JM>;}gO7pr8`wfjae~_~gi_16yFG`S5@! zv>XJTZ&A0Eb_Bm6usP`1wmhai5$o~Ve!)!gAe2O9Mf7@ynnduUf;Pg#9;PFBxHUaJ z9jFPmv_4uTL;B-7X4=(Y*@nGX!N8XF*&WibQ)C%kR%|HDdjL5yE}q0c>C=9gPg2CR z`5m%eZzW9S9o9#kqomJZg6(w+?{WOs%L+4<3!8=w7cHz?P49U?Ncov+@Ody~`( z-e*MTq(>JR+&Z|$P~JVvP^Le}}PA*qCrNr32(YLrzX-z@>;2KKK&EJmv?d z)H~G5OiajtHQ-?MSM}%RpImd#Cl}mj>kMtvV4T+D04uoS3nGJzG==f0>SfLwNj&XF zkiNM&C#9d_uk=^-fh|#N5+`Uq6+3XT$%bJ*34?tM#<%mo%-?{^fwPub?)ZOREAekr zwkW76aR|SCcBtkj5!p(`3>7cbyjV*JXz1Z9kd~&Tp+!epL|8vs@;F%}4{B(#Dv*?> z;*Lj4EP6Ru8ZH|hE^XtDoM68|>-Bb3EH@OL?XNK}*K~gZMU0-- zzaGn2Dk*>yj>0$ONhI+{Z45rSZb0zf(xqlRqe&e-q3bP>rdU`djPGa0GVyszIVHF@ zc7k=1?e$kYT?M8ij4#ipJ1!}%(7nN&@%#ESOs~lpH2cUC7Wq>L7m225o$#NB{g~E1 zIPv&ZR6MV+t*yPu7|+y$)J`|r!fv%V-w5cJs${854sXMjFz<S1cd_-e>{xN&>MmJ16ZSyAD&hsA=VZ-jSZctg1QCkmf%0_#psU1Nys=8HE(M z0+|J>lT_W4&o#)g$Ilj|bjCv<)svfZjoT7;VFRdlx(2kJ&G1UAou6%lk^PIdPDJS> zzUAq?V@)*|Va`EMv`B|}UNGb7G@+QzxpY;2b;H6p2qQ7c6zB1AbpH)eA4s_fkPT|e z>HH>zVr*+gFSMFriOzI@l2X}c*ugw7e{tVOKV$|ECbn$M1X~nizO1iIBHoRlW*S@9 zlzp+W+9^Na^=a{As>#9N4ipAo}|vg24oljY!|q|MOxH*B3pQKEUc28D=&f~R(rnmDBc7!5t*n&;ai=_ud@ zF823>copB!#Hvig<{X#(y7oa0i#2AhE4alnvdS)a185=R3(MNAPQ;q4Vd92&^@LkA zp%eplND6P2zz{i3PMl?kF4y1~ltGNGA=%kTku|~VFU=3?mQpymEc{avI7DfJQrN0q4j%6}c zWc8zf;q$Aq;JyxSF{ZbIO+tgriDoIf$P2rg^PZ@;?r^lDZ9xE)dbqf&_OJ7M2_ zf*78AQF0Pj+`p$1xCf!0A5aUvVx$*0oE3)C=L}I?#< z!JUGn$s6LRg1(Egc+b7Mu#7}C$cplwvIzJJ3lH$ga5!lvAy!iA!05A{R18*hs z_OVFPXE9b^eF*T3CSFgaMN=*p6MVHJjBo?Wj~|GUq1g4Wu99Z!^5g<1 z3;z3uS_^G#^~sXx*dhZl@v-`p#LK^!D)x)IV&MI{cT_Q{9!KEvfFM}sumjvPT=B!x z^PMVcFvq|tx0FmY7Q29hywxv?Fs+Bg1P?$F6L&#Xu-qE4lPnBr9wOzy zNHDr~m*dDpW;kxB8JX+RR`pB zqiLwQP@0u8TF&A#GMFkXc|0JrGz62@am|Z-WW4^WjfQZ3nhuho z+JQce`d$(uM>lmUD}-LODl@UASaMg+F)JWl_Z8ZEnB_ci?n8Qj79qt(SJN!JlqExbQAn^PTHLt-loyjr7Qt2T`E3HYO_^zbPGhc{cM4xLA!CMC39hY&wx zdoc;4`WoBkiIW8()Kx8MpX8hrdd!;RWADLlduO^&a38bamO6^Q$A%N6`e2>j57?y{ zn~bMip7i53n6=Os`uI?`(?=!7~STEs8E5|t!3DQQ1gKcgn{O-`*EgARZf_`St z=|)`_VhdPBfu%KhLr|9a*-D7v-P=a@P}`Y-I7;e;exp*5<0@^MC#fv9`G9IGlChRGWgMG_a@Us1~4xds#Q!OzL58pqj$-JNtYR3r1!Bz}TzUZx= z{9O*;G$10ayIH^@zc1c@d-1?jy~wa+l?q$7t+4IO;yODPpQ4%Y@}#JIEM zcoHpY>KQA?ismlvN~l-oMSpEfoX_3FdK$BnSJ~%vS>x$y0*ZOQ=qMDS#8nRm9h21j zNG9)h`NEa)G^P044*Mmhnq4pjKHV{VBjl@_$PcaWfv&1C-)Z<52BJg}M=u0TC%f9T z+dQ9#)L48sPN>f1c=Eo2*KBH#g<#ql_1!t_FLF&HB}E3VCMVO0+8mZ>@6z;xYvfk`dT_+S zqPBg}{&qKn^S_kaWkjg4k;GE8e~3TB))wCB96#t%Y*<$`?%F0_ZyoaXI}TctofCVQHZmH$|fiCnH z`K%14d+v8x>5aq6iE35C98C{$qDq;3iB!!rjyxLF>>)2Wp>M>-1bRhfGw^NA8zBO4{M8Gy}lKTuQZ<1ksVl>HZu_eko7jY z1R2c#wNC5vr4{lLJ*x%rW*xpR1$<`1SMX-w^~9|+QQzCOLwA!TVmmvl0K0rmGw_{r z4fjlc!zuN|-id^{quO{anCC0Yak#tF2hxU)I~>1rLLD6jo^kwcm3Op4S*$j-LzzM!1&i=dtHrsoER(DLXvW2~I3 z5ICc-5)5 zWgZr`yQT^BGyP@=Ps%vSr#sJ36Stw~UVR^>Z{S-^wnmR}*9UB0y>BM< zt$X^BT_`lmW-}`}=d#sdfrm6it;lpOihm+U6hFjSp~fK`_lQ0OGK}r=9o2wwypPbg z!nBxUR_%H2`USW~$I5!W2XYsjctBwl*c!nKD?Z40Cx3d8D&5p6iZ^epBJ<43Iy<5I zy)WjuYZd%kwfFPksRwXu;&b1IYj-!EL5#Cr*v7g>`-ZGo`v&yLy@qmgx!e{}9*EqKkGfnqP%{A18%ziiqfm$IEYG#&M*@w zXLr`layBug+j+~`^ZaxhwDC3V)jYxV@KXHfEN3i$4gwC!vvRy)KHe0{j9@rsZK`v^ z`E2DK^dp)$-z1UNs{5|e=)$sL_mobPOD$R0?+Fc*Z1q#?IvSrRS>LROGY9PKif@<= zHYO52^9+@}VVl*x4iYq(9g`8;&^g@Xcq@=;e;nj!By8on9FgJu$#gSI)Q1*N?Bg(p zZaz)XQ9I&(c~#S4))>Hi~W29#U!szQg*KZq#=Ixb@$x zP2=dFIkE*$+2dGZM=_jPTLh!C9XK8XdXYwy*%ir65y zL3q!3fy%?FBBY+gY^$;>js-fkjlce^xDxNOxQ_}_$9HvXS$iT{OH?m_nWbb|2f4gF-H~CT-31n5XXwKjCW3_SHmBgK(T55xgUTt6G=4J z?{%puB1WB$Rct>u{55vBKk>B=Z`Qy!>LpK3C-(kHV+bXyc|Kt}aJGbz6vqH$M%hpVI|Ue#?4@#m}7A{D)lH8L7J6BeodN?-}+AsWp6 z%;i)omc_vYJXB0`?^lt^eSTNzPc*Zl6*Y&-f`z~Rz3wA0%3Cmadmb=NZuDT0_95n2Hqwx+Zk3s zj0oFtze{nh)-yE-t>G1S4fMAxatl%h647NDtuzkFpc@t{8pd$LM#K?`feE^68z#>ACl|CX0H)8K!yY>F) z4Y;WBsT6<_h-q-)Xp3+&ZaF=&RKGMf~v#0&nad7MUM{jKgG!jsZbA;wZhw1TX zPn80j+(F4VKP*V5IXlTmHIaGOB3#Ypb*K-re-K;C~SScs5J8Ex8{JoqL1PV%0e@kaHOgfKJs^}LpcR$ zc&%gDTUjY-U4gowaA~TwF_EabG=JGda)#vA7sE_K==+3L@pCjf3W-iu-@CjfIiohv zK8MF@M7b2_{O;|O{omB=ABW`>EY~vk+upDqiw}F@1RQ)b_AXrD%gC;byC4t(eZx{K zckxg6dQRMjvAxaQn$uvCyf$$CbatF;OuDFCVw{qZx942KENINF`P3%@q zy*OHXaiVfuK35{-QxMVJ!LU9vp?AIcbvWTe@WCB5beTFh)>56F40upnc4PW;97#@>pM?c+oS(-R@ zXcCMw$taHD9!!0RIrg=oT&npTO~N<26SM2dWlUB@Ks!hjIf7waJ1W5w4G78erXS0V z7f!6l{O2`U#r<$@v}5cZZ0Alj%spyzE+o`XEMRtRWZ;8}7_lQ@Ioon53VxN8Rs}H|T7|$moA_Ry3Jn zf9q07mwIRk%?_MCLE$T%!_I#`+BIF;2Y|Gm;4uRz`>UxhqBhtSc72q|SyVy#hQV0e z#JL?uCi-WKp}$+McVLH9`Y(#Q>Ag|M4VXCE_9pNdabH)*;|XEM7(xwD!>`Wmem*t%H0nh}l(Y=?qx{u>5~|osg8~1w@u)3H-}HW3|_hHN_}j+XREtawjx~5YtK$D->39x)}1=) zTQl4JZ|0RJ8EE}^&atZ;7L!y2wbm>|BG0Ld?R)ax=PIP6t6J*!)t!@0mKWk}>0Kem z`ryEXmywwe6eMGf=DOoC-0T?yW_JRzfABN~zA|rO!6i~28G|1n&(9$GOLX6GXLL{c zaPX`~QXZux-_mpkOH^`e(C@s6)q3FWDuuv@)@0!GJ6??;rU(ubJ*Mi9~!8pI(v+t zg$!qpPLS6;vr|}^G2+R$!g~YlZ@UFg2&fSkn-9mXm!_U_a%qKSAq0g{VZcY%p^hv{>kGv1`0%)W>YE#u|v#e+{t z5eoNU_8r?NOw|s#uHlV7_#3SzGO0W}x{c`VmCJ;;bawvF>TOKO7RG7}+R>e9$*{92 zs8^C`gucGjKG&OHXk%%)g>m`XsgwWSzY8(a+cPyHU`q7Hbk(+Qd_C;xT(jP7 zaQ4M4Sij^VhLb4M`JKIJJPlYa*K%KrWb zDY1SU2nB3TDLjHD@5R=+Iia2V`}nQ+!$xC(3Hck7G{yRm{ww?Ly~b;_i4wyPu<4I{ z*Pol1SmjI*aTU>>zS|j_Wf)E7`hM`f1&b1ht{R>yBJ=N(5!Y(9^7vLQVn!2jcVc`+ zHGYR(;_k(jV}uOiiS0~Vy*7!X#dq#4s3$RLvtmJ|%lSl3&U!_niD48R-3r1Sv?;IX zq)wZ)fRFJC5~(CcL0PJ;CoW&zyZcoA{x&!_as20Qym+lm-9QGPNFA9jcU2_$^RX_g zN~HNZmlw13*IKmd4swV!oZw#qUzIyluwmM6aDrlp35{F$G}OnZjw44VF}LSp-RoXR zYZ`yN)u#T+FmXiSX!!|IK1kTG&Os^|_O406ydIh~96H((X}s9GqwXVsz7m^gM4NOF z{^@=B#NbcHY)M={6Ez)x8;V&7ffu|T-h0;DVWV<*m0;b5ymmF*p)!?^x-Ab9d^C)J zI5UqAIgaQfp`PdT>&kHemKlwq{JV|qL}J;mW{JfB^Wy0^cMZW3CbhRG9g&a%= zf(8r-Q1(m$L=1m4cJwh+8y0;O^N^QgKe9iKA`xrr<@8=Li6G)v^S=SbzD{N0^MUYm zJLed@`1RifL*OZ)ia20-Ki@lh-y9%60f-D9%Wscg$fEpL@$cb?I~y%|vn}1Q`Ko+K z6b&_Y-H1Nqs^^{sMkC)mkg`eDQL>ZOA`|&2`J|WsTdC73u%W~|Am%%(ljeORI$f%Y zoBUo~I{9KBa^s2Dq^teXul?H3Vif*Klb;jKb8lX9x_njKSa&a8_qolsl1mx;2la?r zl7^4$-!hk(!~n!DFKray+1!}|Nn%gd8S`V_Rk``8`s`uof2QcgJe564LMWE-*SaN-cq57cse0@HI)*1YSIk-N?$7lV@2 zn-hzJEqf7zrntdv?*qQ|fL0w4xnF)Rd6khfcu>O2xZZ;hkF6Ck2UTR*yJxxm(|U-r zF=znpEZS}Tdz;G!gfbw8P09=HS%t_qM7%}qj%Jh^r~M~<{dKhke#~rm32w#*^R*8$ zn_s_veJL!I@<0GL3a1kIo{W#D-t7vCo;&#OE!5K`zkgFZIXUTHR75Ls`7@BS?N{8R zF--)O7fvA92}YgX(B+#za3koYEX5kmmTU9DaJGV^VXws!s4+Lqq9%=Qqm0_igY8AG z(JEV6dQyV)uOg$g^b*i+ayV_s_R_5x1gGW#?_}3R^iYzQ$Br@5s?t|+ zp$uEQ0#5ga0wc*JlKddqf0*c1*-s@`e;`er&nP#~I34^(m10ahu?1c$R+lZROna5p zH#wO+VJ-0&tofDbe56n0^DlcF<+5>(tr$Ve`aV0aX2DNZNZ1?*Z)gd>*khyU16gAX zQ_cGOT(e{Tc^qcc&aiIl%D#!5g4h`h+$_6#>ym}67Lx; z$jwG9fBCt7i0O%GoPFDUf_%lPEMFxvar|rPvAH~IVN=MA{G#Tu1}Y$MXp8xtjHBWx`AMsePigsG)NLr|@Kw&6)hc`y_n`Gs$P_4_BF#=l@i%ls4 zMr8l(Iq}AKycNHgOasQld%(ks9FTR7rnt|+sIxSp-0-DHgrI1Hu4np&#v!Fjn?7te zUax2s_|cVe{hq|R!`1te2Fm>ig|NDqZ4^^;aO3kcWSuPXyPS%)1>arq$>M6ijeysFAh-Dr=S`w3I7vV)$V90ku5@Fw`bg_0|F|H4=Njp{^ zqv)q|TQaHm>*UdUB{%QA8V)AlmpuV5>01$6{mM7A)JPz&2W1rn46#^w+Scm&y47y< zUz2vHAU9GXSI%~Uk@msX1p)HmHG@a#79ig;5SF`ZI`(3;VJN<;9lcHIfCMH__w1BX%+TCg-qx_ZM4biI^phSJuoN|dt{Qa0baZ8V=g0LNA$?o)L+!Xy-z{SLTFaRpU(o`ZkH~I_1*JGb z2sZO?)QRG7Le{fbM_3)9g@>hCQFf5Za0k1bGcSodum8)SmFU;g!d^{!EO;BY^n+gM zwEX-!O&*E{9p6JXvK|b7IPls!z_S6L3)3*^FUvwZ>h}ceF|-sE?SCD}OZlq2S_7k- z?I8%)TzMRk)|CFWFDo%E-Tf{I`OG=3rtbE%IlTkfwjJKP zZQgq!AjMW^rfXJ|4Z)+i`F?O+2f*6K4(!R51)=th?3V;fr_K|kk+9YmBV}t<+i=;OK7XVwhRB$*sH9P zr>C;W3j-lHII9|EBkhrF${o?H%rF!Rp|Y%aPiKU0Q6=!vd)&(FfqNxCYzTB=ni%WX zU%RLGD>#u?{Rc&(`v~63yAR8#{ETpx56u_6@fE!+p)Ip|)wRk*>c}*N?-n~I;f(f{ z1XD^lCrg23Ynj%>NcM%gAA1Qs2+3o6GduM@6oVX@EesM{zHWb{#%pU$#qYru_8L^q zA2NGQv^sr>$t?nt)jr2~U*n{ID-kt+kLcEO`$f6$W4ggaMRlkL-W+DZ4onqeN zyn}ggu$pfm^19ulEa)6$zCr=F*ml1?kC*RmV`;0gt3>*CQolV%xk~QO!^E)@3SSfb zmaUgKA`w#4K(2sVWLlY%H>DQ@04nA9P?++`D(^OivALF+- z(!GUpF+T}Cd;rrVSLSTgXbnT+E`0CAOoKG?us=CbxjD?JmcoBd^HKiiwr z9-?@mm!i$1gHN9dcQ&cUvR>TaB5dCexi*VpiCaMEj0Cr2zwEJ=0plRX+^NVh$~i7h8nK? z)kiyJa>`b&JoR)yvHNbBaUkWiN3~%lEo5o|9&;sGQ~z1f_x-v1TiSDlrL<*|Yw&$Y zEsR6O9*`Mwe)=%wox{z3<;-^t<3~VI$_d&$UFUOzzG8N1G5PuF2|?ZNQ9U*&Aql&; zV-HE9?B-3wTw2sE#*3WU3l9cJlNpTVPTvnLWi8!a^Mz{cd2iEd#+Tf4GUksOF5CK_ z;g*AG2^!c`Cn4}Sg;X>Lly(Lv;CHDc zWu&G3>?ilb8G@#BSq|A=55*UNjJTyNqf7ZtD5A4oF=9W{0vo2WJ8_^YIeazIq;AEa zdyl^ajZPX?#M)2U`6RJ8XWaFanpwBu$$Oqwi7Mh!#8%LA`PQ%9ZpngXX%Hkn(RMUm zKl@BR&ljoZEWIzV^_oO}`EOS)HaYTq%6c<=sJWRuEWZV61`GvNJ6s34g z0_sMu2V|p<>BKvU!mr?cJnGdcXi?)*Qnb5GueJ7@J+b$_p&-oK0dAX&4u!6@S%dob(ZVR-9LW`GD+)61>+}#SrwMcOf?he7VP@uR& zffg_B?oMzhPH>mtLBrke_nvd^cYoZSUp!CPvi4jnbB#IX7>(WyPV0lrAv^dVleuw- zCmWociqRAnL~4x&=*jq;QBUT`op^tgSzg-jJ#yfI5b2S}DJdZKr2#-S9y>#KvfD3} z+z)qFM|d)4s$!3C5J}KM1Z;w(tU6lxIdPLgZnv_POiw*%6v8!q~NChBv(tzPOBA%#TozEcV3vBWR( zo9JUWE7T#yg5T&9~JWcPu>=jsEeR;s!Y$gpWyzDfgbEja?g1PBw@gnJSw@b8nU41zW7G<;E z=GP-I7#xuOe8*00d2JUOV?EbDUPrRga)6xU!SXt0p5m?7&|3lSlH6}nis}{o6c;fo z#)UYT?m^#sgNarbUNE|QqA{OW^GTOla=v}BvLFc ztYE=chWZ-uI|;e!jP)b5%yEA$v7Ow@rdA!H;w3TVNo$Eqf5y1i6W@k0ggXWAE}j(q z_9cl{ZghkOF7bd|5soZ?hc*5pXnfCb@{bbXNK3vY_Xz)wvfRtbjw>BD|2LTomLS2) z!x?nNLcu|YStb6E;vcl1ycdaYIJt+e4MGjRav3}AlL3@@hGuT zkm*K6Clz+lva2=P(w;28FQtT&H^Y7;G|UQ;nk-(P-GC=+4vmp`TnD0tklFi{s37eS z_N_URo6-aVjUgD9f5|_+8QOg4W7g|6Cp%j1?2Qr*`RyH`*E=?D1C95E+~#`{QXFA zmrB+HSoNwadzKvs$$`q8xKowkJbLvb(gP%%P8`g-9|3`PR4w*jRCi=@$%f?w&Eyy_ z-TUbjG2iKZ$8&pDlktS(rfXx|%%)bq3m%cY_xnavkKiGt!@7NbI9d3IN8=}!`a}A) zJ5W#f_wzwYdr;89X*VWV$m{Z>-|aRb&{CmaqyXk+c}=IB^|oLf{yko5#YY`r1`Fe2 zLoAGL=hNm1%NSq_GB~2)vH+NIIRg?>iRy|%J1f>6+|E*4d!8~XwWki^vE{@FcyO5) z5y$Ya6ZRTNQ-@mQo??;JMy`Md_-ZudQyEk~eD;t2J!?r&9Bb&CjCe@8&*6!K@<^ zy16xW*{=gfO+)_!H-0CGA{L7}Db)UEO21=Ril7^6MQ zsFYX`zN@v;KvzcGU;wMVa1vb4J^oFUd<}uIMv3H_2Ye8=8WFfj@_FP*o>Z`BVW3EO z!l8Fo9^SZtwOSe_2@BX^T}IZ-MT9?J|DZTnMRyH9x?xKE~q3EH8t$*?p}W? zGgzWo@mn^5HmLFb9zvu0H@<~$O&E0 zFEIAty1!9}9&amW3}H&GooG)SEnpn$)=J(-KeDf7*Es@JUe5QcSYsJG?8T{}YQzfI zKzVxU6*8u63oc75FQt_EJuA2aB!a2SThY%>v)bHHg))*FP;ogZmIaI4dVHP$fA@ky zXK>4xAzISneacU3K5YdF{~s}@E=wnS^G%Fi*N<3kHnWnC6QTlw^FE*NN z2BV)kx=a?ju=A75PH!b-dL%hb+x5stl!QJrgM_495C4ZjMhnRnh5Xxvh!Bj(9Q z1>h1fS;rGqzIZ;rC&})T#a_|B>O8hX2NRaJbp+_Uq<}6RZBFZw4Ns|q=jv&+`joOf zfPFk^GnQM9AVkTJKkoEJdfS9e5t?0a%_}3D1nLvD!iSLkRzMj<_CFuA?bD2%%5lUT9dJefp@2I|8~svU2n$ECjM=O)`>`J}K;BrNVB9CCud~b! z&*TogTwLG(MTyf`IWOeL6Mqy^(%%N7oAnc^0X1-&) zWT@-z6#Zrrl0{3akQ(xRk(1p6>XmsaFXlB%zWioem^p@d6aVOE9oZ%-IkKoVWLp@o z!GozU-9KwckkIWXasBJ8AYho>c?-B^gRW~`8l}$-00l9i6%c?t#B~y-px~00mKH%* zH@C?~Cy*x?{HyYd-`6s0I4G))Lx{qEi;?M7sP(bVk%pIQ=} zB5G%j?ta@lSi$Nqhg}`5)O#9NGhtUJdY{;{<;&RbGqAOC`DfzXAoY~!L~Q>-Ljz@f zP_Pi~$5V|5fzekQETVol_?-USU$^5rec&!yT2zisut_A}wqUeWwdi(z>O;k^lF2`O zv>0o9*SPm5=l58{c!TF|uP|I8(_=?rD|=uqbMDz{$*s>OT zGKVOZQLuE*4TXnNdKz2lJe%w1r2O}<&fx0^7yhU4hgs56Ta(_sXwLaxb}7wcay<{V zc2G1Bw-L8e{Ou;B$2&HBVS0o?E4h4yuyW?M?4HOgA(-<;K+0fGCCHfq5UM_2j)@x` zS@;v?&2PEXEbG0f&TA3o^TBJi=Pa3y!XW}*MfzBJaf*7cP44=O|Hw45+DEzz^(YaT z*Gk&ZGfu|yS;{bd*foa0A%dKn`j6AFCq+dCI zRLgn&mpv3-Na&<*B{MZVt-bx>C5+$MeD)&)E$N`w^GV~b>ybESSiVUh{LS?1hdkrU zvVEH3rB>R$LVd!yk7GvyDHOL}Q{ICVDps$kZ~pRpqhlR-l|h~Pb_Aeb24WQC-;U;x zQ@cZZbg#jN+*#O+WegG#e2qN$MCJ@$#G{-(oY#Zdj?4P;LP!$HlPa9PF#6s zJMg;Hd#B)lNmJ;dmv=d}t5RTFR^^>@=N}5_%XSnP7>nDpgNgx5OtZ8&p%w$^ZE+2D z>4<&X%IK+inia67Kh{uo`N+_|0ashoFR;aBe& z4G&ql6HL*Bn+C%^{^~gP&38LZUI)5lk50loKfBT}Z)jv$cTO^fPosT{^*i-4zJC5Z zHAk8Tor(Q(J5tM*7Qv$XwyV*OXZWwps6z)rX-WdcO7QsAz!d~N?4`G}h4t5AM1pFf zqG&>ujL4r1(&>pD;rfi9Tt?@PTHSj`s2^^NkwNgr(>ER`lq8az!G*{V_nafJry;G`;L;(o)X&W)xyaq+Vo@EjBbp)t>ifOr@$o|oYi1`WKvLN;C5L!I&$;5 zQntVlASl-dhOeJWs83ujZ3h}Fk2|}**7kEPUm4cyrYeW{gMtK9k#q7~?NQTy_@6%;w7sdWL-o`N6KdbLIRS zWOXlgQxbyUp6aQBF%@J+edkHdTO%a4|3p~>(1YOc<>$0ClMJIolBoUYCo z^(ECRI=%1ts$BAAE=nXs76MjuUQ(cbJGf7dtU$kBTQ63$)pkqe9AkA=OOT?+gb7+m zQp}G>XF%CWgvEVVJohS-MpLraX)G)v;+J>vS){KkE&803E8gsT9THQ7hm`AUhR{*K zD_oWlB+BC5P*+go=}s4P1mJ2Hrd>H@zUog}3M`+(#BT^?zn} z%aq0JT#$#A_Ml;#*H>MIVvRDLGoA)-SKo_Mgr#VUh7PCk##=y4CuDTNi}M-l<4#2u z-scl1w^bGQQjZyepzqmmx=FtznD&w8s7ZIoP-;36$lAOsaXZEFdS-kG*O2$!5w3^w zPa{Zm&0;|3O{X0SEL+=H*|DuL7+-@x3%xSIv9M;4uAg87hHf`3Re>W*-POR~@%XCc z*x^*6zyua)`?D$`*a9J(tI{gBl@0NhKrF0<>&4!9dsZ|2_hW>3q-B{k12P@61>cWm z%SZ=eq?sYatas+?!tXP`j4zR8L(`@3o#Qz`8r5c_60@cLUu-6HG_o(cc1Nd*43z^j zs;m}&GJr?wB-6P(V`4U_-Y2eEC@rV6GPvVo#4~-SbvA{F&)V|eFe?jws}Sfb3-DwL z6NlIMeW8>?h{OwTfMQv=lvGaWYTE0j&?tshMb&i_D)XK{Nmq{ij8{$i#&I6oLBP<{Be95% z7fVD!@Ft13yLOhW{HA~T6oxpfOeSHDf+c96n&bKl4{ES(ueLEH^cRO zA;d1jV20VgkLpZiY@qxnqe3R#0a2Cz%#7Pcg-~`fWl&#T_~CaN!w}()pv}aM58Qj5 zue>d?4tvZp8hb)&x9Fkav})s087%_hTUCRrv>j@hoc%F`lqQMP0Nl3N=RisN4J+q- z6|_L#ox7746a2Q--yDV9^rDDL{n7jV%iaO_s@aOm2L>$24IvJW{zK$bM=eNol!B{_ zI}Go=#HR0AW~A55cmKf$e3mJGgu?}t26JGm(>1*ei60I-?NM_A8k8@d0 ze3E}U`vpKLzQLy_p#zV0QIBoZJYkvNwUf}OHg+(-X&!%u5Rf*Ohmu=JZCi~Tul$(Y zjELJx5xsqk@lxQw{;+s{!jYh7uDvpqsAL-;7XPrt#HDX@JbmKYDamq_F5C0%Gtu-r z8vm!+0G3%oN9x$qrTW^T#N^8k1nqET_Mlpsu*RbE4+|}54xgmNt%*@=n(G2>WuJ7x z$*x}NLW;(B%$wtwnFm%YL-U{BrMe2Vm&AxMjJ^53#6T<;HGX+2^uJo2vQ_(|kuPv4 zvUGdHP4kJX5!jlmwEMA;(!I>E@?&o!up;VUrhkT=)KxK6JLj2$wrrB0&{8W>ElM^Q zig)M!`rGlOI70*+@994Oh2$TmjouXv=?>gx)~T_AzE0;U`Sd%|7GA9 zr)3g5*8b%!o#E)1mJK6iwz<{H@3&L!ycqi~3MPk)X&6~fW6gxj5;};h7_JYub(Aiw z%~lw!xaaB*iMOtMw!=B!p1!cV7)Q()4ANK8|H|X|zv&$Mmp)qIkRH8};ZnCY zQn!hug}X+JuR*Zve74(jQ`lZgIG-B-47Uu|q4Q~_gGq;RWPss_Kjvq-%XPRtzT9y$ z6kN`#izfT(l!X~XzR59x>6GSSILT*~qLV=skj>#V=?%xJegXHnJ&)tGP+}hU^?kgw;Ag>k0#s5&$Z6UUDt zZrM&o{e_D#ST<0LCOAgrFxo&rDo`@Ats!}9u+`~?MuJPo&(k1`5^t9GH#Jlt+na1a zT1ii^gRJH^_r3@itscTtX|?WHaCxFomR`b#cNwDVUC37CDS2*jGTd9H34T|8v43`= zaIzZPtiMxj-lsZ?z4xgu5QANqYH3{f)FV~lFPBv@)cPaH4qoN`q?i@Kta5vGB|FAU z{;A-Iy2=B`3wdpF3w-=cb1Zv1{lRIYQ+mnT1VH}CQJKv*Hx`%B^sn(m$|Eb}-MUgI1@Jau7 z)iJslG*%xsDepRLxr%BL)G*QS3jbB2QEB-ISXkM~F1hO$wGL~4$fj$wB@^2f+NRI+ ze@)ObcD4m|CCT3#{FLx{8QyF+!}C-3|Vu(hia6AFT&X zJImwF#&?6uwEX{m?yO~viDv1|(_(H`*6uFd=6J#csgASEl!FEqjN<&=m4ExI#Q9gd zgQl)+8V4U-N=fnVxC;z4)BKYw%|Chn!PblFD<<5KGdR85_`p!8^!W!*53xl{w(!f$ zKGvvY|Mu#Cw}e2>j|0msN;M&Sc_rqs)j6AtVmaf&3}Fq$J9r9MdTH)7Li5()c<3Gz zE*NdQ;)#Y+!AK_H7lYT*;NrXlqza?#>aOek_lGl4i99O0ViV#IaoCuj9JYoh)5;IsZnFyIt&+;9^l9zp2;9-U+`sYDs7^Um zArsb;8*I4QzH5q996+SMSZ2(u-WT6Dl;gNf@71SwSRPxdDO$o*l4^T(wiVZgg@xr+ zIi{P89Hs@eBT|Hn{bhZ(IaH4Qm1x2Psm9bK<-9r{oTgMuRxKYeVS_|ITJ)1oaK&7I ztJ*JxCjm$ znkx}6=UT^R5s{CQyt9*IO=+~PFNkYQ#?sLc9*kG|Y-M09620h- z<6d7T@$_6R6|d2-_0uka*?;{ z#^i{f|FXTnA&}H`MZ?6_i8LO?RmeOZ{BiHS@RyN(bk1|_1Ng%!+kl2ro=hEhxs5-` zT~TVfQryg|_AWAt_=#4H133b7afG3)mSEd(k24GQQyhlgH+W_tg%~>}T;O<={5h1u zUnYF{+0)qynfA7Z&9fAjh9QNf*>1QsfE?)jA6qQ7#qM7VL~$4Q0|`5XW!*p56OV>hV~e?ma+|N8YMfySTlH6@q%gJbE5 z?c$k-eBNpIl?n5r+mx)M?cVdQ2m4wa-c`8!x7X*iy%t!)h&h}k?vy$f?Dw0I4#E-I z{mxNrqVJ4^-e&9Cq{SB!^z5FPLFlE)$jc+o-|Gydb|Tx7@TdOqkFlt@A=0&Z8j$-> ze51J&H++ccdXk|Yo>0)PiT(O88c#3mzsn?-Y%b4#C(*yfS?-YDzbCFpT(d3|Hm0{0 z-(yG7D18{ST|=I%hiAjG+$1itfI$;cc{QX6U zJ7mL6YSKO#Sq!N-*oTMnrXeCq-%)He;wC7kWOhbw8Rf!m&WVr;s;Ob3!GMcOel+hS zoji}zSZ5H>+Bks9tWov3zSAWmq1YGw%0sQvdPdfG8bh7awfSjOpwwd3} zmw{_x2f?n+`wO*Yo#~8hucNJM8PTU?LTS1=bIS|VsJ4$PzM{1J)@+s&I@gkiR<6b- zJpJjq9EPTkITjG&ST3|;Qvt=qaEbGZ01D*bsmrB@0O{wN{Jbt7@ck%#!*sgpE)PmuYEww$?&wk0e^f0l^=b5%5v@B9GlgIxzBA%s^j5WhPu{2ds4n)g%0S#qG)`ZVV5!l04bw~&IUayN`azN;1UH)eg+WUtt^+RVOi z5zNo2qo;qNDMF^v z7wU$rvAQ%$-x?f0lD+#hk)uWC4( zE6L{^XS#*X+;tUKd$)-!Jbpk(CL$j;Z&N=bC&cYm3_c`eiB3hEClskmcX?~;K8L** zYhlfi#{PU4H6M5~nFA5kZLaI}+z@y7l=WgY7Ud#Co!mNz)z+Gvb`t}N*kAW9b`v5xM2wqKq^Kl+qqpcCeF77s8l!v7VPWPQDLhtT@WG z?9N>leD)VSt)jbQRGi<;homa<=lItj-elkF&hr&h()g41C(mtsdJek))waUS<)C5; zWVN5?9zWTDJ33pPuZZT2S(a*(|C!2hg$|VIQo6y-BTneAHL36);V)fg^RN?M;`B-R z$c8+j-VKXY0o4WmMH;U#NitKpMQ2RjmC^O0{(Gb6nj0RXuDc6OZbx-T;=}zA{yPO| zK(6iAQ-spcL!9IK<0BAJd1RtlEkd2!ae@?Kg&^TpLhTm&%`7Iz$63&uic`M?-Hm$+ z3NB;IS+YwVzu<&1Rsd@0Z9em*Px1-z%-UaX`+AuS*H;+k4wvom*k9Efs&&cY&p+g- zyg)kgecRfhZ|{%I=oypyPbTRv+6oCeW{4b~GnZJEoR73{94x_0O|Gky z0ShO(BoeKz^AT>L)+4P)x!Iz8sfC&z95t%=<^rDkJM6p6 zJvdz{XFN4{@l&qO{wsIb@{daaGtSUotxYe)e5T`?zB-yA zUv-0-c0@{c_}{Za5~Rbe?ktwRB7@(AE9OAYGgSjw=X(#0b4{YY*ZQMxDaH!ylMF7i zT@*@OQH4E>R6?VO%A1Q|7c3l*vahwI4GDNLX499UT5FF;bhh_jhI2$Bh3lUEx?k4k z4w@3JGlsSqi9gh{7rKcaBxMx>Hq+_IE5nh_HXk$u;tAXWin%_{{n(+tM$>{*t##-? zLk(6|#{2u5hEzx5Q8Z0hX&QW)oIC$B7UNpYQFI@36s>;5@%9?Z;68V6$67X|5 z8-}grTb=Qfad*=E?uu8^W=&&OaX(0G;Z`pt?r}I;_N({w?@1kUu8zOEkGbokk1GTf9{)5YGLqHQM zMptmuR5toHHyA#w(6;vdqjzYKV@KVXpA%~?|9~6Q*yc+S zEYICHF_rjzy1;}J5@jqkT9ihM@d&>^dw)tIDCPRgpkLrJ(>tE@H_KVK(?~)W>5NFim)YkooqKMYdfvjhI_G6vaWtOxINXP}sJ&V>1 zJLUzhf3i~2+w4+3q;_PS(vaG-D6C~+*LIe(_8dI@cse z2N-NVbgeLgm=mL*ESBH7UCOoG{J9Q8xE~NkJXeT0+|3_Ki!@Y^pmrvHSiffwLfgj?Po0%pA} zUuh+WYfaxcCGEp(FBi|F1a$-w;`vQP`@!jx9oE8 !(37fVe?rbrIelykpgn~5}b zHEVDer3ui`Pj44~N)s!^!+&PyXr21(bqld8y~XHyKrFA-1fb-c%v&%ax-b|^i)96= zFYsA{YF*LyG(J|;U9=o$_cqF*3?{55k}p0}Q!ZyBlWkC^AB?4^vzm%A8rm+rgMr05 zN4GDT*yHl0Px=kOKM?nnBfku5rt%&*$q)k|Fph-m2eZni)KGi=ef;tIb1{pyQL2P7EWJCFwt`9v=yn9`4Xe zC$pC}!|1(-24w#U!yt10I?s1kHD*-OJ0qsiI|}}hweJz`x?n=OG)+BtwapiuYKtMJ z;a1m&v})pG>0w^6IP{I3J26ZK{h;?WQTz{%+{o6-#yi!KI*0BP5J>6aR2mK4iHkhM z``3-P!DJ|TOJq&*gb(=2hu6GMfZcn}ZsiWMeQeoaF&qg{kC0_NS8n1_w_U<3Y2POD z?=;NtJvz%eiz?O%Xhj)WD84ej=yZazHAg%oh)j#-jj~0UX-ej6X0aaeBY2aI)qUPT z`syT<_7Y?NE#Ltm=;cW%7P|VfW?{~-!7<{R>9P}|xSVb1_q%Nf4O@bxi-^jo-~!K3 zAHL<$tH$aHVLmAUvfv4i;)@-e6I0iy*z7xy_+v|WmCbB++BC1D2C6I|*&x|cd%QR0 zA6f3-5(1~I*^g}}|9Nk_@dwlUc#QiZJ#jRHB=|!|{CgJ)RZW;eO(tq>{d^ybipyzx z_4AtFG;I9$@%JP}FVDB8Xh$O74s2awoLCv#FAMVgxJK2Z0Fih_wjY`#J9I$d8A*)XB@Ly8? zd-22N_Z~BdifKQotL&c4uPLq-qtxB+)+h+w_1C8Gc^}oIOrAdB*54x$l2iBemMcfs zI2C^v`>1vsZ`M*M&=G_QTXy^y3vl~FoC&^FZze=XbL2%w^R?mvBB?NX$^?!k?vPzxT zm9=V~%$a^?3t*Hh8~9y^9)^PQ1ufUv1|yBC(ynEhOd ztZr%GM4z{o*s47d$k&~Gy6%As?$1}{&bk!EN3fC-6R+>>6)6ilr3Aqp6o5xU_JkXRfgepCXv(QV^;VQjyA{$n64>oj zIq*wleG?!;7@p&|?RwOga|wE@D>j_wic*}Wry48oRSjj(p{snr(7VwCt&ply^zFl+ zQDCd7Ko0ow^rQy+U%-WG_fkxtX`|AM<6Fn9@#58M1Ir7wFq<;<^>t&weIXJy={~c@ zBc$+czq&rNTusqQe26oeM}^?VW9Y;WB3l<~RudeG&&P58qKOvsGspGY|SdtROg z9B=)BzkUFKDI5j1dnG0iy6!yjW}2gzeGxw*Z0Whi_)pm-DLZ04POM_p%bx2gPNZIc z`vLQbC-|yN1_RA^fBW&ueC+0O8kLmw{-NSF%PSX?_`>^)w^!1WxhnFuDN8QPhxmcK zgj2;O{5|PA1*-LL60K`hHRE1Y540N7J3qoS<7t$Y?n=Q#K5VfA&XaOMo(HbsG|W0x zX4@YbzA|*Piqg=!?DxB_9AKr4>>D%$G&a01l^_?eyV$6!cl|@Dq?=Mx3nD#A#-Wyu z9{GTOmQ~XCLIJ0#r>g7i=+E!HqTeg~gNeOWXLWjU&K4rfBAqZPt4J-*GJ0UFxfT^= z3Zz;wVxxXELlyM5jJ^bS_U^mM1a%c>ezk#T`)Ub_%Nb8e_Mby(A5(d9L?QZJaAd+m zm47EwVq)T(EZ_};fI+nPO!+R49THzw9vB$N9{rKaJpCRSGJn?e>60;Eh zvt>L3Ymai++1;(XxhsowS84v9d2>1#=nC#3G+0g_8vc%7JTVvq#vlOkHj>)i=}h@f zGY8HIvW=$J#-o)N|Vx;{0vV1FNobiEGA!oyIzi7ezV{RF|^6->K z4_T=)N6OcWMA7`{=x=GJ!*FJmVzG`rLCIL#1i?>#KPwb9bQ&tn6#A&ts`#r5DUM$JAuHTvv>n%oT z#16=u&8ACrcH~G2UZ7w6*9U`lJLXiUJ16L;?2V-!2OU*W8!)79f%vmerSP{YxCpWbYwBrs-f#1xn+UD(;tKalLHPk z<$o8P7;@-#N4BW{7+}1?=e+eQhEjBPj6UW&_Km6`C6w1>A&=OshPz;uOt(N=mvg0` zKzDNt({eSlc{Ii43r}vx;@IUk{35+tI*c~gF!5Iwie7AgRMe!bLm1ySClCE>QK|k` z)=u+8bIlBy@Q6io$+CIA)*fX~!!YeR!GL|+PH<|kfE8ToYUruBnk^JnIcK3YYm9!4 z?I%&zbI=gzj(S|Rco33tv$tP~e!;Q-(b9`V`q(|<-fuo6dB~4$=_Wz%Z4w4h>xS#7 zjOWnTICyPJer!1Y##t#&CCB1Oqjk~^Qqrv}JTQFvVp$%k!5(mUL@65bt@yrL${=8u zK`QA)Y(R;$@jb({^|5KIYj2G|SGo|5WAEOya)oy=(crY&Lx5?!V4_h@)He3rGbN*` zQ)ILJ?R_CXCoE#AeC;u}Y)xNQR+fGpnDo2=E3m4^+tHED<7C-*zS5Y>dgc{SVX2O#7NI;8NbU@rz(*>@lJKQbGNB0w`-kjcra-D;!;(E;-`xVy+#MUE=qr zwMPsDGETIa`KfvK*Mw7-t}oKGE8%JiDB zcD1RF3H_IxfaFHA#)bwDq#zI9Y`6dhl%(=5*@xF?GKW{OWOOI-rs(Xru*(O9PZw?=$;L)0XXx@ z&jOm2cHW<9T|4HGvqFs=Y#6e{vEL~y)LXwx$cy1w(LEDs(D-ezE!cCT68{iADb1@Z z;ib+z-U)fOELva$ zgcZPWq1i&<-@eJu5G)W0YM}dq0Tq)dr%avJ0yoZgdE1?S;j5Im>W5Z3rOebsdbN07 z2Lr4FR)f}}V~^`2Z9DI7QefOnf<2;ZYFD)Gk~2ulxDf=`kImB+GL!_C%rHTs6bB-6 zJe-4fnl>8#W6}1^m23bTyQ-^&GO)tkScQ#LSZb+iV;N%^MmFFCc{ThnLtY}>0Hzvu zrh49ePbplbC7J@3w*VzB=(T80>Ow7MzB0qIt^a%B@pU+5p>{i2Zn9l&RM8c<_ciG7 zYnweVC{!YE_JEZ$KfDhVw_mC>4tCG#|3R9-Wu=EX7|^ z@TH|Sd*L>Yp%#d0R7_ZK+J)%`)G8I{WiI^nSchk4pN@^O*bKgLX(gSNF@1&#v zfH5hczK+>~GuB;gB8I7qdU!V>ooPN7a=kS=DGqS!B&54r^kk=?P=kA{f0kD{LroxT zyLx*i?mcYHJbZM!yveTGI$J{Y7knjYLnws3EX(%ajI3HT#pszgI<9xCm+2%S9`C0s z3_FVKR+`;%CBDB|Rb(8*}Y;eEV>0L!R=h=ZvvVxiCSSAdyR#n z#q7v0^>OV8jUG*xi6zpG8nur$6BHLKyq+I%!cVEzu6bxREEQvLH;DxOd1@?lIo%G7 z4)SDVUmoUWzBW1mTaoYd$s2o#yqQ6EYH7s1Fri@=KJ4;kK4SO4R?ZI=lcgoUnz(Bf zWX|f%OINJ=j&!A|ry~?P^q16_`ru1NZ6)bZ##D^^P=>LKq2AO*J|_K4TymMl=DYRB zZ#3MPi|oz_s~3@xq#INL&$kMFxd8Hrl?Ck)#y zTh2e*kGbS%Fu+!Q?tHX66xbkuzEuOqxn?EVIIqi&LfKqUXlNgyclk#Egl;GFO!?vn zKhbBVQUVOlgPHJb!?4L~30#|&D-{OgmbSpO%2^74446#(2U$!b<;0IF>hLtcugdz~ z_ir+Mt|iTZD@{!5u!%{e{X(Xrjp=|h?YaKkJHoHCHyTR|VPSN;Y#*+&O~+!B4gF!= zdr2u^;yV2UlwAv)U-A*6GTDr*#L|l#MGE@nT8mZVr`Qen+q@yQh|zEl|Z*cPLBr*nSclw(VTk*mC1I`h6`EcGUO+qk7p zc%pp}ii~#~Kw?j0adgKfLYSnjk z)`%mv@I~@(#=0%sE|OT1iuku@$!g_Zo}LpqV#pk(gP+p5tYlJHe*XIgT*or`>!7a; z51M|j2}j=*1Z6}c|>k-zkxi$mA+X~S{q6!-wM-s$FH8O!9dAm@JKCPi$$ zBsLM(y)RhvW+wEidEwVOLz9zn0JD94b2E3P0Y(blb`vsw8nouRqaz+ti_`(^xAK%U zGO=d|_zJKrA1Uxhf{|wxJ|-pB?oVSBZjXOhpkK0l@51`BBdq~}oVWJ4ntZFEk(`u- zS8XPcG>+2rB|CHC-8nJtQv2}mYBPST|7YOt9zjeki>)?)WSSJ~TY+>ei&in6&1pm8 z`edc#d9#9}bBltHs-f?n!EpvKxnE<}{9t{4{YW}z3iyH`{BXHnn^y%bVsY17jYy&RnP&up7VI#YwKIluumpXfnoIbzP`HpJM7h@wc74`Un`5b)nV2Xy-cvg zK0G{3X^sMON(_Q~R8$_EoQ!1&dvl3;7uc924@iiH&A_o>Pc*(M@h}Bg$0H>?5}-T+ zr%N`XDkO(in?>@}%PLvEv!M3AIsG;JhVuV&!ufT59fY8>foBu*T~%E2UDdk&eg}X< zY2PkY14`PZGQdJN-6PZYp`=0&PQqW`#RVO*u2O?Wg~@8EPD$iw_T@t*QbY zkQTk+xal|)LPgK}m*P3cGKIkLXn3>}h02tfnVGd|mRs#rKQ)2-1r+-!r0G4CwN6Cv#5hliEZ`tO*;K>BfM-za z0Dz_85wt=Uu(p~KJ6Iva3>)(Q4A!Q-_N@H8yPI1+008Ll5;fgG2jrCK_`NPI7whdx zzRsFugHcY0hhYu>duJYCr3Lg4lt{y?VP-yZGU5N;M~0a;YfF<}n^zKGzh<@F5ptuP z^x_7(~Q$*-eDuKO>)QuYCgPc!MP1eiU9- zW|!G1tO~g30^Y3w{|{+z84zU`g^Q{PNJ}>eh%`t@hp32zAl=>FFfpU%u}<_uPBV`FH;CV}N(wz4zK{?Y-9XJPT+fXRajO>~*8)A5+_+2Rxz8 zGc|Wr#GnSk!gL4Ji~!?=n9Q+jb!@H6TwPshswDs^i|#~puF_~iXCT>TqriZ6QYoQ3 zEQh~v$;k51`|q*=5cmrlDPWn-@-?vE#%+WEGHuujR=T{ z@hb&uymM4BX0^pcAfVx3zde}r_q-qad&OmMoF3BD zBnF=YrbFa^n+`h-@IYI^pFe*%SrNqp5~s#8_15nm)Gv3(J_^aPAHPNnws`y8q*drd z0t2@GUk2P0Y2wa7^8wsDz|NlW4C?rE3OGS+00}5~4Y1~!{~9&moV>MwL}D?8m@4q| z*5xq>D4wL3uj;T(UU;*JuB-jC71bYCdZq%fV&*^XhG6g$iR1%yu7^mSAT(1 zsFf2Qi*KXts1`9(MY9m?HN-c|~ zy-S~Q0xxxWFjI3b0y=q5opn)C1gP#Qm+9&}Fxmc}9za|Py1%Ulf6zH6V$*1JO`DeI z2YYYq2cSdwY!^p|8S*)l;G(Y+q=77D{JqlRqYD=BDSmXs2al^C_ib%yb~dr);XVau zdRb+-C4B@JB`SUx0DQt@a*&0ELHPeOYGn`~C+8crF|wRNbw5oP^2+&PxmMRql*Q92 zR9(RM6M#yW$yUJ57m%3mJp^S9xb%*lP4#>!?Poq(UCn&_TBJwkE?s_NE1VQv;~AZ6 z&Ec3}Aroi$uI9`6)z!FaaVU=842Fzz#GtH~$qA6NB=)!aK6E(oy!YY>dYRAugH@HnfTVR?$OLNmMMA%lr&L2II)c=hA{XY*C zgb>?urU+(J6pVW^B%P~FJ@Emg4oEoKKRm3*$YxZk^ylsK&5H-fa*j-1v=d}`q(wdVsJ^@0?^;=xf=rfn;;H%UK z0wzq-#(idvt!6a$wwaTJ=Rza--Vro|v&Wru-u542|CwtWjr#!?iB0ZZBi6{^NWM0} z%}3)?L3*AEajJgu<-qK!cUH&gmMH023np&Qj(It3F9qG>gH^h zPfSudM2%C%M-?f{SWXs~U>U|C=Lej~Tije5tce^x9f=IfjTPr4gOPRB)q@Ai!QCEk z=e&j4w|}H;$ebxhGId}tMybz+d5ATZHMgGEG9 zmg#5v)dCzZCxBJ-CKB`BA6{y7BM~wskSMJ7MS^~mw&QGfO?a=>C4>ci~3I)H^3LZr?RU zB397>o3i)qn|XhqUxzdzz?CM$*4u-6!(5GYi)Z+4Yx4EJ+s`CnitiK;k@JrOD!6R{Ev{?h zZREmUyPY))d;*U94%I985s?R9QzNN10*O@ag+=Rw(wxjV-P)w%OYK(`>=TdqyiA!u zGcGc}k-^28K|w)4?_OXpzYO~M(*pA%x51D8JqTyDpyS!+&3CPtg>0_GHN&VLQvL43 z_iOY8tj)JG)@dZ3qEE_pFJ9@tHnu|bVdsrF6@prbFT3#?>9+cb`N(v0_~yL1%xH)I+Ph^V2Bjwy#{1`>M5)IR2O}@yn zmC%w4jT8ilwI+XeVKO?w_q|Pu(8D6*GVf8bwOq)#MLPp`KpZ*INndZ(8e4pMP*d?Z z^5{P&)=FGx5Q=-?T+5v0{&aj?o1{^KRgl=BjM3pu)uiS2$7>lf$q^lXkkfa!(&x6u(ld^gLg3CXld|-2IJD&7 z3kzHQ2H={=lzUWtdTN1kBH-HaQ{tQ(kxLYW9&LQ}hYLkVfoANeI6RumuQaTIjs zGG%xg`2PHRuK8! z?Iw-uN6c%fc0E$3sCRaxTs!PH8mW63hPO&>zbkcAl{`CtlAM&f&yp5iEay^pYWiax z%=lisQbmS}BsyLFDcurEzCCIjuT)?`zg)mazM5gh7|(8@@peW2Sk4f+txV$yBMqCo zQ(mCj^-Py|=i6<+uM4gZ_EVFCXwSsGCd{k4>eaSJVCITf6i^VR@PJ^oX5tAxlT2J+ zF*a_{0|~xL)RQ44xvHS_HbodI`MkyVDBjAtCZO8vnUJVqG&lbM3qPjq4Ap0_+P|Mq zD`^^w zKTA6dvGkhhK&&c;?T>}&p7V~je3K^QfDD_+FHu2!P$3EO0c*^}>^J7(r+JY-VFxp5 zUtZg4H#aE#owP#T!kY0VQafz^-rwY==sl`X(t>DN(;(tB|ZC5``khq*pC+_K8 zLZy-$%Ad3_7LjKgOg*454xb1x&%GW}-mKnEDOiUBHXmN(CW7W(5mm1PSK=T7m64-C zIpW8~^qd5*)N^aD4$k@w2G961-9nKYym{Q$uNM6oL>nME{xI9bg_Z4oEFo}Ysmxss z$7!sdqbtS4*PsdG@7#aZdfpD{hWwepcKR$7`E3`)!;G@~PzAde-n|0^CLLV?=iQer zw`UlhSBGocLn-s^yuf|4zhAEhP(B`84#<;9zy>^0NoqvNZG;^IKghRwDLGd}a}=un zL_S{ikH&)q<}BVExF857-bK1t#eg5#uqK?3LXRXq%@0qZ zGP@~d#3Bg04k@4Moj6Z&wu;@+==pbAqlGTy$zV>Sm<=ZdotNmoM99!LDaR+TH+is& z;0nbXRL6nWYdjM#&vA|3HqrZh+*R$)8Ir5_?N=%=kpwEXpATH)`mDbtmd2y-zEjTVtx@9mqW|W_txm^=@^MHJO;@rbDRu z_4jzg%_HvKV?RP}WHs1Ubs`kP|M(pZ9*iakcI7!}drhv2*b|2~U}gI7fS?IwLQ4qS z52hGQI{iMqHA8vMW$GHOb~ct+{+8!0EXi+v*$FOR~|Hz1Q*PX+bNkh}K~p zpjDf6Z(I80koI>xn*;~ORVLg zcQj5?%~~r+5San-;;qJ#87#vR?HHX#SG1SD53i=FiI=HAUAkwuz@Jva>hj9p--2uM z%qyiQNgASJG&2J>CuCWskz%A8M;o%O&^KJ(zwnvw{pRLHX*#-^V48Rg`F_m)tD0&* z6yYu3-UNP|ZzV=X#Jlr*HY-=X>~hd@#=lUa1lV)y*M7vpK5%r7@T^kE7I3(9AvVywSH&6D^k6#J~| zd5{?enQmYRp{1hnxb3Xbr+H>5NqM1**$a9eWj_6Pr9j$Ir;4I;-i$cuTx~*g)y1D< zSp+zxBeYIR4QenJ8jKlybK@t~b5f4J^tbU-2i}G6*6R772jKfND%w#j5$R6PZ~Hfm zaMg~>lgNqnvX^A-N3?YCSl*Av-0UF#l_&i$k}9Ik3hqnSGYCS_SFWTWm7cdOIeqv6 zD}Ti=5F7Hfd(;u=<>7U{-pk$bch;OgJWuzf8ZOrIjKDrhpyy(HU)FOA-*>MhFvv~Q zr<%lny};O>92K(=U8Fca=h`I2_4J4VSE6+{LT{}iuCL=C;e(G>hNdFM2$9*X@9K7y zV4{<=Jn306?n_M?)Tp$0X`uf*!3%4~L1SUY-`fL}k=53yi=)O>B`I>VKVE zETD?IOjW)QpF%BuV-1Sr{Z*q*XY=x4oL;7S?{J6lX=iHa{i!SJYJ4O;Bc-Jr9#;p`GoKNbd3r{#_|oDoEv_^_jB3}cT4fE zIx|@VL|KMphh0<~$vn)r_)J{Jv`%@&IS4%?4|dUE(vq$Rfi^YS0{zkC3Kot`m3JS~ z*WEn$yRf(v>?V1y_OiY({&?Xll=qHFa_ke+mjqF64aNAZp~|9E*f{j;(K-HGclHdzP+kxB$u4_Jm94Kx!{zwSuM%Yg;3}%H0Q8-f2Ix7pe_n%Tsz=l^R&TB`7C) z;%#ML$j8T9reEJ@95!T~C_0cn;L{MncfGuI9Bd+6Q{LJ>7}mugSLwM!Pr3m z2LB+!@uJgPp~*WtQ%H#4+AJiv06CskEGZGXwH`Cm;1r0!-*!(*!-%v$J(gK|rTU}O zO~-6y{U{Xz4A!FqKtF-! z$%=_eUs67j%MeG?CA#PjEty8c*dzJ=L2vexZ2?r&?QFFL*RX^bHOYh#LiQoMFW^ro zB*vs+TQo~&oVvpY70jmZ5nXQllrf*zi=jBnF4HI#;72#6Sb{C&dbkPX3CTJo@6)OA z0mTsVSG1;I0NDrHWr1qi`5Qx_Zxcc|(|bo7v}HRb2O%^SM|xUD82B_%22fEQ>z=N% zf8I8vU?gp)8PK@QbbA1+gT3~{e;r_j9h^$9#QA&y;ow~DEN*JYlIwSOB0c0;m2o>I z^3}aa`b@oH(yKI_#Ai4@x;DGN-5L64##T~&N*F`HDuw%@x|FYS#9xHfB0YR9MSEos zm0BLIyQb4H%qZFKrq#1jKcLWTNC@PL^6mnASI#@YKr+hVAE?+xeEZ$0_i2D+)?%!Q zSq^lmDRdxa2QrD}d=rLF&pH~EUrGf*N2g2G|KZgHg7;$hDoi{?zCooCZ!su4u|IE} zV0q6Ms+vRl40NBBs6!I!Mf^gZta! z@I!#ZY3K(p_xn*KC;$_$O^}xMxGM)Rne7tdFNvr;Q9@p>8T<*@=ldKG`h)gX2NCk& z<1zW2HoW{qU5jWV4@$ah0!!*|0>>12F>sj-bO9|%YT{vE-v5ux43P5BI-3trIbY8q zHc4ZJW(m1hm^#mA9lU_ey+%Ez=F8t75lM2IPMhI(^yp?}eh~O^blXp zG%lis1CLzJ)yw$6(c)cvr~$|jY_E?*<)2`K{QJwG)CNinNYP}~Mu*D_!ec?ckY{xb zh1B>5o7oREC--Fvur&@emvr$Bf~6$L`iE+b+Sphheb9q-h@Gad%!*xY(tz@A^LGjp z!Du>pT(IrJu#%fwl>TySJpcye05Y^ddTTj)hPhkS`eWLek-F(kOe4Rgtbl*%pzz(NRye(!9Hn$pj{h7G@}NTX0d4@f zsekcjddFd~f+pff-=Q|pn3qvLnC764l0a%YUAPYHCo1tklOayddbRjz01XOHH(#r8 zK{Qr@IgE;ovuD2J1i;8A{+_oChiQ4p@wMtnNpU>F;F^1$7R{$Z0}px?vnacy)lB!I zy8s10ToMwJ4VahM$^Dw0v8ZzkR7LLq2JBTJaTfm^5?L^tf?YD>s6(BjL;?eUHIzN3 zz!WC>-pIRffPYM_)w^ptWHAF~8%n8Z1$B_JTj^MJKIl@J-1O_PR+0p7`B6C^G$zSt z`o;>SYMQqDb$_*(%EPjmcQu|a*b$xt_jT>d78sAFX_y%gG$Q5a{Y$M4wRFd6Kfe-< z2U|$>fJ-bEz22~3i;iMV&S)<0Ot3Z5cz-?y6>WJ7g?e1%pNxcCu#}oD8Hs&AQk7Af=|Xn+4bdo;}e^6388iy#8vFOP7trN#Z>h6 zhUsXTmUlPLko=1Ov*jAZW>3e4RpL}u@O8s6`+RrLTe0;l>>R z?-6_i3Lqh(zZIJqKxOX^!%j4q$!ut>3a(M8Q*NTiTJ{`kp4v|= znZ91H_F!y6H_m-nd!f4?*|X+8xWAYfLSLwq{Ta(LR#vhrFD4N(CTV@?-#B5e*uYE} z5|`;9^0eeA!GU)%+{)?=Cs<0PKLM{trR-Cu&7`-fi-TyngBu&RWaf+4Fnh-+DKP{CsYh`Ow{{ z*mt}A7fUl;Dld@5HrMuas|Qu$`{|h#Ys_u-r>OdRt0LWT^Pt{Px9AC1n*#RIT#wws zN$ar)>D-4u(z)%LwWNA;+&v2sDjQ^qoN0Bs4jIyUrJuM{(hf%}GxC95<|f_d`9pzG zybN**y8WR>tJGq3GUSE?b&POZWA=6huPvXRt2*!R0gbT}ChSB$`cQs$ zsStwK@=4LLOp0fcl^IfTr*(}wjMZjGYZyL~%Ny0+f14eso)@uD zu%XH~AzU=UrW}8hZCd-&`d9Mg+KYIJWNw3gUH09NC$-sD`w^I_UcZDOtt_e_>b$l_ zR`O%^z4idGga!Dc8$U71M2)4A* z7u!$wKF=P4b3d#ft3x5BY|$_{+*Xz6=4Ud7oui76L#G8h`@VSAGgAr>Gc&^n9KP_j zN)v1{Yobv)BnU5enH5!Z9!IBNhhwEhouz8J&yv=-lGcsf?t4#W7cv>djaw%wP(PD~ z0>#|t*^Q~`Vi8mpnp_9ze}A|5FSihbA}Lan8$Xx0$ZP8D=QSeHHN!RAgfQWlj=lLx9H>dvuF(=C%+C5r z?jrNyZYYG3G{DV)VrFi7;WyP!M8$>4=76;%?^Rl7W+gZ^_N(Eu z<;E6_Wg)CTQf>E#b=;w6-fj+CW8Q$+Ngo8eX>Om#zrV%VY(8Qk3w^E4|6}OxWs9ga z2rk_+++}o3Rq@9vt57}l3fI7mUwAI|-u(?7as9N59jwE_ougZMqJ7wtutgF>jB{5?eyz*38Bh%4^=cn9t$l@t2I0b^jeHi(lkyH0dAAcvIZpcIk4C{YhJ}x((J7> zH&Na>$t;*gq-c91{vYf`;PVozL@H(2c|^kxCTjq%D_zB>@#_MZ&VLYgbB%^Gz5sl#4E9?PBl$ z^RH!7VP*5_@VM^E!5cDScJ0-yllm&NdeKDYz_Uaudie`<))l)#Dr2pAE0-LW)E7-BB85pd?zt_{$#;tiSuuu<<|z}J zIGnXqMG45LRz92)OJR*i7mdD+A3(-m0=8t2&}vi1d$PFX&gE5^FlbY(l`Uen+KFQ# zh@+#UFPW@`#IXK=Dt)otqU$*`JymaaLfPrG+{;${pdt0RzrS)B8AWb!s!E}+_P029 z{?3xQTTvY)^{%Ja?>c=$g%%~p=jODHydH^7E6yq&NQWRZ&35Wx#~&GtQwFPc6Lx-EV=NZ z6=DdrQRL|h=KQ1_!ecvv&$CN$w3IoW_I)m;A}#$%?niwo!sI2Wat4!iSci>=1KeV$ z?CeEfrV8&!xW)-m?wfXJTu8zbbhb6>*iWA?>vVcl6~>kc$1-(q4<;`9&;JZjD*r%D zBuXSCoGKaVh|Hvsg)P#}ZVV4YS{L_7(=mc`L8OQq!jc1j&JnN%O=jw{;fE!xjG;LE zgzFfJ=kh39&Ig-rm>JM9(b{s;|M=ya``EfbZlhsoZ+RdwDL$KIx`2w* zhowDpiG49T|Mtm$u5(#H|I(&@qpmwp;Ld})Z5)rA+o`zcUG|6W?n#@9&#!9#iZ^}% zLA&1;bv*l-yQy@A9mMw8NW|*}esP|mfgon3#@Z8e zBgrYflnGXlp+|_Z^00TP&m+o?Y2W<3&eNEz#Q_wk%AsX8Uc*{76c&sEcrXmpE3vhs~eb3?(v^Zt2Lh`eH%`?l)%A zQme@mN(_Ca!8-59h`866x&mat=@Rv&R)6Oo-WNZiHnK1Dd$+VYSr7n}F@tHr2fhn{ zm1Y>XNKi}D7j*p29nh$o$8J=y(x{q^6);Lxtmxz?O1*05xZq;Q4?$eEds!lJSL^+d-;}-{^~NQ6!wKxErUJ`qRLvC@i#{7{v-6pNN$9sp z<=(Olr?=yFrmd$IuOxjPOEFX+9l1*GlNGK|Yn|}=_Ze@>h;^mJ47vf!{M_Gv*m%`k zJWm`&yeH`CUGS~)J_ouNXMWB49H!%_E0h4e#FAx;iKfWLu~!uH8$8G%9g4nf*F&Py z8g2sl7?ZhRCUYJ%;FQrtRG9R>Dl|#SYVzqeqm1b%&QT9umW9wD1*8w=7~S1Zk~XIm zQSE4a6C^x3DqYXqYGMG*;xO2SoQ2J9^iv{3sjZm+pP_J8X=F`LH_xNh@9zEWaDHS` zy6bOD3vp<6e_Yd``DEIgciZ+ng$uIIAQ#V$lI;Bm{gQk8=fA{U-CL#1s86A<6eu8`t^280-WK2H<2%-6`SL;> zi)`F2!?b>$N~wKdmj`4Uu+0>f1J`^!ur|2WIwse0r%`o9TkMWVXjI9yGtm_aTc*Db*9O)Betl^UpUefqB4|Zs7gp}^EG>43;rZHahv`88{ zjRQzov(%S^wG7~w6vNtTI=hX8;^i5{%^Vr4l&}3Z#y+0D^&ECu(XWt5WjQ3|pb9NV z#i!Fk0^`8{a>O12BggT}YZ})Ux%mL2Q zr$0^^sJ0^xzL(oN4?V4o&yCE?%(#rd$X_k~WKxIFB8S9B>JhWZ-)C)ApFb#p=}ZJm za;|=1aIT5Ow-q?;40=FUCueWH+YlNNvUW0}4%(Gs_pjVT2mtaJY770>58c{bqz;PZ zWv@?u;b*l{OLY+#%~F!l?pVBR==-TMv3K2~a7gG-JXHDW)n&sS5BxzD3!_Kht82r> zyG-hWg)cJHu;})HPTdHq2)$85$^*uDNc z=MCbmoU$o>LePzliNU3%?2>)px#l<~-Rs@FC-8msGO?e$oX^W%U{{-cI&%z5bk+8p z_0(HwyEs!RcR6X7Xv}=6ySUhM*iU;H4NPabnJ|PuQWs?_(XrQm$ERmbkVg;zGds_T zIv`f57iU*jyx6Ho%v-Ou;H=s=uG5h*iAZmpqeKeQ&-{ge5$ z>yz1RIukKpGVVjHEt!!krCj8DXYb7-ggd&r;u31(Il-S*EY+ym6#1gepbO)IF2sSK zuH12m=@&Y3F9s+4#qnpHu4{roa)RgASy(pQgt?ckH54ajojU<&gqWBWB6K_M#3!~Q zsg8Z0d|)`!WW2&xIA-0g?aJw1Pj@^cPZk7Xu_@u-%zJ5c`=$N;V5J(3{u_M zdstqiP4(2lrm~y!D zi-9rywkirT9Oq}byMh|&?-S%mPo>5?v4=jR^ZT{H3+O+FtaKswhu(5aI-pSEnxvti zi!Uw;V^^V&?z!2EA#t0$sr6}z)xfYwn*Esa;bSY}M;{4&31e$DA1PPf_!Vcbg1;A0 zC9El;@DXyc*x$SfK<}&;KskE@CFB*z_Z&B2#RvSuo9oAAc+86Dhk>1?Ux^r>#R7b9HT#~b`+fS*n?36 zci)R_5=a)CgeqO!gd>6Moe7m#W;)>wezyq=iSOnU!6LO*cN;DRJt-5s*_<{DS~yUQ zy_-myrl6J@{ov8ij=V&bggqWyl;F*k!fJ|IJu7;Q^G4d$Ca%09d3$M3xjvth8Fgi) zfD*xesiR9jKeq=P%&)rNaVO->313h`)H?SMosKR1s5{RO8q1>$@-rONFg*J^&YkuQ zzTw8ur9*xpFZaa0%lfzwH!o15x6PE9HU6Z5k7~F<1{}*F7L#4N4Cga#1Bvht4xVjQ zygc#w*oHvW0%U*{Vgv zZ_%YHw{H92*0TUjAPZRqt3khcNlI*_k>lyI70J~Mb{J`~Pl{}caDv<{^z`(wRsjm#y~g3*9uw7Jsch=oh%6PMZ*@1;`=9(_yZ%Z}n@p^J z4;?7^-P_bAt(KU`lI@6czLN<2op|}&BaZAjYN9;5aNq8sG+xa6Z1io-0ffc$MY=3h zI<$@!yzkM5YVUCMyf|BPc_q+o?!r6nQpb|J{OFbJp`GL2q7*xnOrj$!?JY%vlILi* zpBvWX<%#abK$|C`cCG1ySc6OQV0>ed`S}3jAvGvOvQ(Kw)HYvTxn5ZHku<>yWJ;K% z4bp|{?|!4gL@rE*JqZ}oac}?y?F&+y_XkN?rcezL(EV0Q4A_=r>>*9qC}lf zdZFhlkj_1&d;!nxpc&&2gs^uXyfXs2tb1TTO6T=IZy#Xm<%v*Q33fX}>^+ZT}pgI~Ek1iL3ZS zCERfO?Qn-hy5Md5U4}!?Se%*YcX*|yXr$hn&kbILnViGf#k*3$17fw1yc2!B^cDVr zE+;d7mrM4Uv@{1#`xr@D&%HMrXk8CoACJ>w>Yd%ey@Ru(wfI*jKrrjWHf@Q|15xF0BzlZ{zP0t!dI_zHdT20q^ecnWSh|Ci>1* zgE%HzbaJIg#QI`;O-Yh1j zsmoV8&bhyS^QALbnpaMq;4nOoQCV z&!2g1kSX=WBe!FV`2BJPa{s`{~c1SLYnqbQnt)e&vTn&YKKX+%WZ886qG zh|?O`O%BO|t1<}Q;B`UCG+eGKBkF(1B^ zuf@q02xzl{E9nR^NyCRIf}7Z5+mSp09jr_Y9zZI$~#UyJO0$3H#$z)Sz6f zoI3t9B#PaEI(`v`XBM)06=)z^kQ8pPuAX7e=3ACdX)2+DBskbO^z)S)I|rEzOu0n; z@Xgi4nZ?UmH+FrGC6vKBKoiI{;|Vllq7qpL+?qlnpO;9rOkX|lv{1ociDk0Y6kZ)k zmP(e=!T}O%FAYXwygYabMgz@=yR)K;cKb|-$6lMf(s~X#@cI6QR6fGVP}P8$m$SQl zI>IKNj&#BE6=7c5wnJhm*8ZgmxvCtiTZM5bhfq=g z7Y+JscSnW5U;x`m8{?dQ){YwqFG0V2OF<_CW~EiZs*5+F6qU)Y$Fg8V-ytWu1t!{# zIMC1%B#$-Dn%=p4k%DsZ$7}v{o*W&Lz+G%^rui8w8K1PviS(n)uk!5!iLG^dxq%SH zvaJ_^^15x{ORy~ULNLTP-ei+X4Y+6+JYF;&Yt`~%yM8LcZ3!9*`JCSSk!BH~tR64= z%jXFbGJuNOkcS9lMSCjp^qr%nlCZnPF@;w+d6CdLpv%a}xkZbud0v1K2@cP$e)3@A zCel@byST)kdaA-TW}x+gBdfVLV`oN{vtrhcWNxWPO0KNtu_C?vpI8n9YoMF_BxLad9YC~;Kt&(d>$Qop|TEK@RKVsNw*AmTp1HD~zXB{FV z>G>Qo$b%5~Qn8J-q^)Pmo|VVmC-)bxc6OY*GH+S2 zMWH?`!3Gw0-STGJMOF1c9R1}IH9~$R&$Vju(sgkn(Y-f8;y@LpXu@m0YYT` zwq@J>;cB(X{6IqasFA_aem|Z~i>Eb$rB22d5LO>o2B|aBv^gqwA}-SJXxq^UVV{;0 zT3hvhd~A0fya%__ITsPZZH!D0mU$Qrx^Pbw(mAzL`FNBQz*z+B?H@V?E_bl;5CP9o zE$rlI)xKfkC-KWF_bzM4h_bSJBtn{Bevn8!F1NpPu+v48yk(JXhGS1e;0FcEuY0X~ ze*DpKu|v3n88t(ER~vFCq5kDd^$PBdrO;#&qA|~$Tbb3_kyrrH@&5;}Ja6vYfWMhe z0_Qs?7jO0^hg;7%{bqBLt|Vrb`1!}I9?H`A>bZlv6klMR+}8w=sH%=H?rW|FG$Q8@ za;2$BeZK`*atjl3OgpxX`2b#@5S1ohP@`54XT>H5&+%fy;tfXhJFU;-4+?%>_3*cs z(d4$896<{dDWbooQQlQRkkiQuuGgyDlBG-v_4im`Nd|aHXTV1Gc29IGT{-RxM1Q1i zHAZpU8Q8d{BxLaOzvJ}<#LViqdM;gnguN)6hSL^}Tl%80yw2~2CDV#dn$;4%uPdRn zpv&tQGFqFN#@|zo@kdj+m8e1pZ90Ap0cZkWrIPDM;?K#WC`RnJOQa4@WAd~oNkNk8 z0w(CD-QBXBxas`30|EoH&J2k!v5GrDdtR|sT59&6ggbsX>x8^RZt>|2za*MfC*gKz z|6~m`&%Hdxw@!0;ZbmJDg9I?B%Re8f|&zRo4+#e|OCL1e1Uwv!P0a+mjj7UYeT(oVi}mmq7Lzmm6? zsJp$Twd%<|C;E252K%8|gwZ$wKAX>*u#BE=d^h*Pl?{Xv|94S0f!ak9t16~-H+j?X zPK(*Wl>wJ1Gck%KLWs;GFYOT+0fJX(EhB46HktZHE(`P`K4QQ5eLx<>S>d7TK^etQ zm13;+^iC|B30zJ@%{hWduzw99CzoG?4hr2JpsyxY5(o=LQr~Q!KXxJcrnk^an8e8dVdgw}{{#O70sGS-|lk0^FROxS3T8 z+fxP3*G>Kz*1z0||G+MBKVT#5PoW1vZ<`f88lX4#t2)oBQ+K=pNla}BND2W1#BwYE zkF6*DPja1!iHTkrkf%9BXJ|d(l(*ICnj*HDL)0<;X=|+3?wT4J!U8hGASg5b(*IzH z{09&L_^1 zh%}EnF$}_>7Ec&^Uo`#1a1x^eEtYF+i_QRltixQc46!$6&sX400l|^jM+Kip?Hzd# zz|$LOYx}O04;&~rrMB!A0Sf)<#ok0+UEQt`zb$-hO=SHp6q}L$TLB2+le>Pk@Sn~N zz?>`kaDq_v>MXN#pGkWp2s-ai1_PSn%na{=AteC}N<%BFqM=t;G?i6VMy-BNnRW4h znF#k@t2cSnhz2eL0C-yiYwKc4h83=6Z}u&YjmY4t-G8-YJln0@)f6Auj{blC&=e@< zW&wScl#A6B1fDiHZuJweX~s?#DEXdf<>S#y|7O$TB`ibhI<4%aN#ZP-9+BkfntrS$4`Hl4yQ`NFZM1omf6s5@JKp3HtHO66jSLN z3xO^-au0ie54|G?M4EtA{fFNMdQjcm-Mv;gVgLTI&9i+xjFkJ)CZY-10huJ=Z-uHJ z$MBCNm@f-3sw0^;L@fg{loPP8|Lfnia6x>da!K&UJ#aF7*wDz9ed|ECJl=dK_cRKp z0@E5d=u+?7JY|6$G7ONY_OyU$+~9DICYD! zH5wbAoAo9+xYsfm=7%N*DRk{oTwuR zJk%KPp`gK}pnI(hM^3HJ5q>?8%!#BT6lQ!%cju!>gPp2lD6wD|Pl1UkzS$3B zp?#ri-Bp*wH1Wx+C)ute;|#w4L2!$*BYFb{qWSSmXV`ucLc1;LUH8DmCrV2Ian)aH z!^A)L_p-!$8T=$8C`e-=oApiOQr&Sj&OK04iG1AqNgL$LBErJe00K8d$^C3|OqDn; zgzf7aWf^)B5KiMzhfW8hUp z2R9_$PoG%BJ=E&JmK0rB@Cr75ZPXN~VS#x`*n_RTSJ!=W{oY(5+lKMbI{xJxK%&C7 zMST7y+ikStgZYan65m)@)oNt(#;gwSmD_aB#t&}_ksbK=Q_y$rDoKTL{kM%>`Li3R zqH3;Gh6npH3a7ho1%$G@v04uUMPEr%lSR(dRk%Wa^@v)djjG*?GEv>}^S>1nkvg0XRIq#W&tTwXu>i(uIn6Wt;Rbjp8V zO1)ugUKcj>@Y~OPMZ)>ZL|n5G%t4KsOsOsF-Yq?x7g4{vUeHw*If2dV%#+z+Dn|87 z2@-X@-CfrO$9X`OK24`E{#@;=hGOrD;H7b?tLC zR&(VR_$hMKMz;CFm^}O6gZ+IHwnSQL{f?hr*`SYsyI#Mc0_-K> zcm<4t*Q z!oi5sLBTIcV%*4?^wlCt)P#}{BhEY|x=+HkxTkeKcc0}}VBOrZwo^TGwz!cax)NPR ziA1i1#86`4#;RZy5whrme2}!4` zcdd*MiVL)TQwFXhs+d8z>^)l6sQg>sj;Fk3T0;>Z^X+}uTD~#f zlH!!{I~>pIY?@fK@E(#zmM*W}1}_NX_y(Nsf9F9o;t?{->o3;_X@)irwjA$7Iv5{W zPM6l24P226V_?ismOX{sW)Qom*63GJslvxB0 zC;@p2h-tm)B0?>9SL8r5r>Xc~01Z@IE{TgCPS|x5eZc5VGZie^D&Qagl zsW2Ed?0Xya?S+h8{9#7_m#VecK9xY;T&1&v2ak#qKf;d-uv}YQmWU9geo0uIMQ+7m zY-E2;O4PM)%kS4wPYDB|rYTp$54fUc;Y@%YOjX*<*s=b#k!~8_pHy)M>j*p3o2+G? z>1LSP+HfL8v>kf7z1hl_WYXJD{d&H*YbJmB%td;(P89@4Y-3cF@qnHuw>bA|4Rk7@ zx8gmb-kffe&@(hR*9kj|!RJ9Y(4dmHee4IYk$^odrKdHan|x>X0A@11on!`@=NMTR zQU-qA@-Ibq?%fSee2Z~x{pQ9DzjZANvXzki`8!#;tPIZzbExuXE$sXYF%t`|H$Q*% zqz^7wb^RK?`x~>8O1B=%MO8K*+as;)Q#fhSHUbB+@=c}>Y3fn(>FWJdsb6qQ{{H~r}x_9w$dw0ofaTxNZ z6DAsIf;|RR-w7TH#8U1)j;h1AKl5U;a;zWw-p0!=7lrU-R42&LUj5K5HBz1@$eB1c z5@6;_>`f<+k7b`rSIt#adSzqLKbhQ`UKAh8v+MV>S4qB;>CTSQ*h&p4tihGm2)wwi zj>NUi5jnQ~R@kKEm3hkF(aTK7D<+DU)HUeY54_uMhaac3JLX86LpcS;qf`*f&j^YI zBaI9b>|Hq>C-ByQibY9?QHtr($w|U3J;*_2 zR!hv!N}&cT*Ojli=7$|-UOD*6JB>QNHHpE}KXUT@fCPK-g+=z%dW+QYg|L@|aP#uu zAlSP=Z(OtJWgiIgPD%N9KOv3XNZKOCDgEX$X2bTeL`GClC(HxizAuDK%&o&w(wKgb z(pQoN;HF@mQ`Ua+aBg?_{`d7S^o~kTj%q$Z4xE&?Wp1j>8bar@LYOOT-fzkDzxY@& zxTAb^Bc3r;&wy8J->1G3)8iKn^tg%sU08{$RJQ8|9PzL~Gkr*9K`eVWN)m5u>w;uf zP2}VO`QXitS)!NBS54hu(HIg{2QmcOhRU}WBWmD36$)a^gYii3o^@hXp+5J-Xr2=4BdMuI00+}+*X-4fj0 zH3Sdt) zbH(RJHR4S+V`XE>POg2p^5e#w2yZ4c{6xI^xbzSEYv{?ew1*Hm*)l2gA=1)Z0i&#k zwLyI^0}kr^T!~awU)1bOl*^tWMK4w(J*~5kX2xkNojr}UM9wXJXKyHm{2IDr_O0td zH4==1HyfDuP%jXqS7-jkQcRu=>%*S-1~qXB0PHgUez6JLIYoj)ws$t(L11{aXVdl( zL73*>3Uc(n$F&{1q^oWJ(>T=c!T?GgS>DSmx0#HdS~G8N;i@rp99XTlnj;3S$>kPQ zp+hHlI66oZmr)E$+ejdw_Z|Qblfm%yZ7WM*dzg5nuY6yQ*BikP=eL;p6+**r+mB?Bri$!G-~^8?d)RZZmH~5SD5tJbar!%A zsd2Fzr{45A*;Y{NNEG)14TH0E+jhR{1q+_P2s4y7e0Z*IYw;sv;7|b`-g;3HnX_dmyNS z`wCuT&C^wo2K>MWHLS~f^?0zrQw22#d+5*yTb(vaiuwFo~T~#OH z21KAL_1Ytvnwlc=?PrzN(ZE7RH{VaAehWoWvu`}nVT$yCi9d7vCKS+e&9_AoX(dE8 zzD;6Y#r$6UT4GQVO@1a&?C0Bekp3vMCrc$u7W18Rr`(0i8bI6+nBweeMgy5^*YOer zL+XcOYnI*?Wutqrg0)XtVvD~7mr3_4ochCSyih27?_)-j+ZPrIELQ=hAVu;aC`6Ei z!=(`gUJ?D2A-UxrNBGLkq)%=3F;mdRsbBocA#R3FaHQ4uejfg#5OT4o-=Hv2;CbV? zkR_U{C_}{wji@$jqH$tH@VP9pxZlKEI(~H0Qt1{``g@aSbB+~HSew1Xj5JxYYBL@- zwk+tUUE_-kAs#z4wzeMnZGpSZUswG-ze^(r3;S#Y8hFF!5$4g>VZ^lKcRH|K{7gY& z@7%i>Cx)n5vRK2qc6qLGkz1Q2HIqo~M3b8E&jCo>eqOC0Z-TYfZsw_X1jm+;8udWNwywvz?~hAQ77#%BPR$c?$g^Hb@$k6 z`|q#UxV*Mt`($O_&q&}-o#CI|Yoh#scWs3ggAatO;45SE-Wx&Qz7#G*yo!ziO-=*V6>(tF?_cxIUk(WYq1yY# zj)24BkG}ttXp7Shop$3fQk!t_dy}z@WWW}z&g0xj5A2bb4Qhlc$t|YbyKVTg&MdnW zX-1bGUtoppVlLV4##K(V73GhfE(d9eMkp}(vk&7|%D2rmwUoN{3-rV{ZTOni3360^ zPE0g9YWv$x@4P!|psNC6%gRm*W#1BGfBGf#1NV&&)#-SB`*`a-&yxD`R7M5?cs=qB z#NOGX76TP!TDe4uM=BM2Qm+UOn}Mr%r8@XHrGB$6;%~6`OxXa&hMhTOI_TpzM0(nmq1fkbJZmbTzQc3XYNm``_m?>BY^C=<#Jd)OQ>c?ZoiwNucPLcvaVDQ740DSW-@6lv12t?h~#Y8ZRYBaj{u-ey>{C#VM}>3Sv@hf0D8fjSzLvj;{JSO*?K z=!pEId8t^k&Gj1mqCh%0wjaunGrh12#F0c~I^tiKArq}ZTzsdf{8oOD*PpPZ)nZM+ zwA}JNK8Lyi)7j{UMQ6x9d?e6~HqrikeCeve>j~+sdVI;db!xT<4df>_k4`I11(oJ| z5c}`gAmk&y-F*2f$I*8DZ)Xjbvv(~ZoE1uKNbcET7JIV)K}abgbesU3`qa`{GB~sp zcN!^`Mj!ZI)~M61Lqarm007+Pa%-}2t>LL$olK?IDK0G*(&^u|u8jJeM5~~*<12;C}v4 z3z`o7)AlkZU~_PuSQ6IeEStYiMv|L7v2El(X`P8~$BI^0q%L&0!@cu!@ps|tb!Mjh zgqP!2S{|*5oBak0c`08--dlXrea=qx)~je?cGaR zkg--)7$}{8bJ6sRz4*Nn)fgM#-u`dI@CuM+YM!HIrsxMUhiZzFH4`s!u(a(C56B8r$VeF+fFOTscx9-;|PJd}fje zoNAoZ4lfqii2U{~`Pq(^*Ne&zM`_lh*5+Hitqe{21UwgmOF?xX(fwr6eEN$KgGTWC zggLa;rYHle0is!*gTl;|vnh(v5cJL5O6^Nu>%SSQJoi^NUvW>2f-7b66o2STDmP4U zvl*z$VJ~I&$+5buQU7vze*~ttSYA!SCn$|Hna7&?KC*LIe0YCBLyxLQp&R)u(iUvtzU5zFnU#TD`?9I0lA?@Q!B@%2{q zX<9G=BfkssM*+R=Sw;9Eh-G3W1?{$pZuo04+5H04`VXyE!gbx9=jzmy<(S*w=Um|+ z50kayMvAf#Ea-N2B*INE)WvJ93#*A(YvNg6GuX@$j~#?+DEmB72|cl3&EDaq1PC0& z8UFtpGZ`xifn9L-z` ze++?_P#+7h3?;F!?0@O9@fyS@bKTPSl1+Yx; zJDjCwb_PPSk@-shjKxsCGTC8g_(78L^vo|bv<*>dal^l0EG_!@4J=W}UJ$l>C;pkU z;hx!ijpw{N^+D{m`^j?lMc>*>p6BP4Cwj9lLR}F(y$&0)=EW|JdJ*Qwk@Z}9TvC_< zB`>Sp2}l1~6;;6)PY5S%1^)DCm{u@T!vO-Phkpak1#fmKrftMm@vT)9>-fY?RMeqF zM(!3+O3_u&+Hp%UXB8&SSsZfDutJf6T!Q&+s8R#Nw97Ob17W>c>yzy2n~gTorb7GH zT**vJvKhI~KgzG3cB0oxpE_l>arZ{=%+ZD$xVnyi9mnLQu6v@59*C_3SPo^g3bj}_ z?y|^VqZWnzXr92YO<)$|e-<4Rl-6+W7w4`p2$BdyFSksq=M)%# z&Om-5KmxAgF*g%>W+Zg8>S-(6V*zdr|mCIx2yW=FzUIjP@R09^rG1?%os=U=U zXhBP(p~SD9qzqBFBxywjIq}?MiAji3%LMCuQ8#ix2VZ*1>R8TgZEEfO?9YG>kzx!i z*vaBtqt?tbLvE^XtA5OBxLYA7J4fW@l7i&rGP6fX_ z8oVwuT%-Bmv>F~UouZ!lpXf{`L!)1lZ=$vDUh-2eC*KEmUW9#~NDU?OpDo%cX|TCU zS~Kf){ib6mIRpMEtCpe_GV}S^KuR%?D-GI_2e2xxKjh9N(vf#Zon+So$i+7m6?IIHhBJ~Bti`5v`0%ZHtJprpJzVe#>f+4ZVwbq|PUh~KwUg^_Gxne7-ECRQYi9L;x8BjeVaJEP+5q>R{giuFkM#!YR4#=U!=4bCT*2%} zTw~gS&pqBj+*P_U)yuX18-vqsQF`D7vSPR$cNXcIB|~QCv|Slx8;$Q zLRK1VtZ0Ow8JCYeVkqXf4vaDt@?sU$^gB1RKW3z9K!&qs93vhC%wu_1i))D8)cV+dkKjE6)ovGN!CJK0-|{=T9O?sAkhq z=}heo(eNqW4y}864POwwAsgKhLox(4F;C+pwR@~TCRl5{kLhS)NQQpq&1lx_X+>El z!`i*G;AfCPPNSbgCf0}Uu|Yf@9zy2N0%RM3{~(oaB%y`;@|sU~>ijVToGA_JdKVO4 z{P(;DLf+h=awFl~!QCRBp1mKn>jpjQdrg+ZOK#&}-frn2X*y_Z3m_TQi7pb1W?NQb*uCJzQQ(7mb?C zcf+rGd4Ao#6VP!upKX|re6Jh#pgA8=?R?{E&mqjO+g!Jjg@xqCHa8sb6`mI4JJn@j ztQ+KG=lC$m)Ia3H<@0&w=EQufA4HLz)g@{Z6PLU{!>9qM8JE_{SswE#VrrHW@0u-#kqz9Zmln_O-~FUArTj@~C{#ca88Sjjsccjkez5lpmAs4 zIB}7+kyz-JoRI*F(tR|$+f+Bidoonl*(UXRe8kte|3(iQ>g!y74?fHJvUUE1+E@5{bP$b(~WFp%mYjNi`YKZ_1%IVwB79q^5SaOa(AL@gd9074ou&4J_CUI z|9-Z$HtzZPd9cr2!rnx#b>s%!K&*g(Kutr#+-Xe~%O|H8r*uc5Uf5TM{YmnFda4b- zhb(CU6w~O5WhclMA6hF72r&WAA8u~&Evh*sxfbBZE4Db9Ryf4e2X!9KmDRZc$A}|C zDWI?0SFxje5V>F11Q0FUeCJc#3nwW41B3&1Qpx$6b!J-95WpOuetr#rBj^VbqoboO zrwhWlPV$48rKP1u$Hv$}pa(MfY~EzOb}vh1F|{%^S?>ylE&Bg15YO!_EK0go2jeNz zxoowfnj33tr*|(W;wi;Z08_a?FAva83d*ScrWsxO(B_q;v(yFAKHYW2<&@KGxCIN#KI z^QCcP#JT@kclg~(?NK6=J=G{aK0dqEYzdwtb1^)iyhRVl?%&~p#hYZ%F)>YYRHYXG z4;6q&;ckD5l0~Qa41yZ=Qj#rL)QYDD+zJsrx>zqeEZe9WF!&{BqX$QNHPO zza^#ySacyZVD=)^bcQDh69W}LA+Z&?Z-rgsx0HQiG&T38< zkNtLuRF9$YNaguHh1c(~84>EV;b@bPY_ZfSfVm8)gh>L5c=c*-z<=RfD>-px0HSTy zFVE%g&v&k}DHkK_8%;;lxFR{gF9no0zgST&II%G#N+#f0j-5BVA{Xv8xiDioc){Mh zjyu~dTz1R97!w<9wjT4ptj)%g{tkO33_f+%TQ$aIb?iLvI)b>yK%4s$KdhD0lCr*T z{=uRnxF5DPM{8dJimSfd(T3=6`CSn#$V0X*7av~V>%aF7iQGaZ8gHc#ALvc55?S?d zxGV8+vz;%_Wwzmc_}-?syq$c`^!f;+%CXJmRdJiV2gP3F3t-pvk0~%bY_Ewk>%=gy zUr{LZ@(B75I>%%v+nqw>aZjHRNN5^qed#Rr%m?Fpd47PFg!L;8fHdiv460qCIza@Eo_tX0e0Hx`( z6@tGa$(&@nIIGi=`O134zV(bA`7Kw^lr2?IoGS_I)W2PyK%7l;iPLqz;`0sb);;6r z3I2pgobGlR|H_t@Wt>p{s#C0Xj9Z~o=0#jePW#RV|0(ji2s(Mv8k~Vy(!meTHB)_< zuV-D1U6MzL^+);93Hh!Xcsk3#C97@9B^O3a{@c{N9Dk0#Q$i?QB7^Fkr06|69bZrv zVZ+?YrY0Ojq^ho<^>ubXlj+hx!Bo?KgPDwsou+rY!i~R%)!YHeY)+MD?Z$es@pRVW z+r$Ua#ty&o?VgR3mbISl?s^t^r&rd1k6pbm;Lb7Ph%$AfEcf}pa+7L9cw%0c$$7nI z$E^^xKLd#H`f!jQRxMD4Jh9Te?w>cTq~NEWZdSQ0o#{jzq2m1I)wb1(=eM;U3zd#u zFV9Iot!GNp7vE;WH#w$$eQ|7cw~HAu1&in-Lnt5h&P+6!YgO^StrzZj(6EiKL#dyC ztfVFJ5o23L8Fe3&6h=Rb#Cp0qGR$FKnT%Vi=ZugkJ_8e@Pr2=NugW(U;VgdE7fMMZ z`!FcvPr`{tw6(Q~ME&eqd-s7K5ZCs0cuT+}^*!jzD%Pgg^JoXI1epZ6gu(brlcBk} zE@WvbUDGzT{n1XE>gy;a`QN+Rhgs!qRFx*rEtsG2`<4E;tn7^i=U)DsSJyf7DT47h zZIPN4UNg;)Iww661oCU{_nnsUAK+8Ply*{6RG=+CE;cnp1>aKhp5hQjzu?^D#9Z=U z)N3HI~s9KpEAL#begV{AGT*RM{x z&E2Rs^^SOCi;qV8YLq499&=)@HD^h*I+uL!8hK%SD#Zb94+w>#)XTmbID#?se;gE# z4|p^9piN3+Q@`NPBrzDU&Qc_+JFc*$mbSnsTJs6SQ!)pw-b+*EKHR=bhV#dz@_+6; zXgH^VMZSDJW(QBOvn`Ozas;HoEtA#?P)lj*}}3x9u83F^Gs*u4>qf!?shs zQ=X-+Xog`v7O%g23H9#crhA)pF?|Ua?j0rKy7J6^A|s(bq0boZIzGs0$m%*XEciq z-FO=Pl+l%Cx=@4e-0L++n#6L=eFww+H7NAvu2_zcL8e_oAt>pu0ui_TFQ~mIy+ZX{ z|83HqVQIbAL!tAo-nF&fxWCeP_DYG@*ky_hOH$-n1%mea3EZ?$7{$9yoAvq8%(+0j zNN%G=ug=e`R~L~@Tf_ZtF3Ul=Aog>M2C2=HmPSv&k7ayQ=NGCppK77cZtP%E%ZFgb ziA{x|Rrsz|4c-8f&gaowe_UGqAC5My_$3dYrtkE$)Yt}_;o%9Nq~rZ*>lhW}wz}`AeJ$c{)Sp>2;(YF4#*Ld4xTNz- z2_x4V`)riF+jrsX(yB$(e?Q-=4Mv6Uc+Jy$`cqIzL9XaL)#FLXrRS;L+}xI9?ExQ# z28f6W@$O~PEuZ_T1vkx-hbxc)5b=8`V7V>XeGPwTBTm>GdWeKA?+8dd;&C5{&opY2 z_;D)#dMHJw9kzuxTYu^M$vnPEyNp{bn{R0p7yC&zW7=us)xEow@IdQH3D+4_%;sb2 zOkYct+0nL^3X|>V_aA`=Y8{_k@5fA{66>WgZuX{YWE7=vs^s7K4nN;43>GG}nswJ5BF1m0X!LJmyJrJ=C)r=ZfU+VqDk{ zC&F07(i#p#E?l{rS@r!yq2^0}5I+to#<{hJ8ryZmh{$zR6+*7G<8M67`D3Gt46gs$ zNGeZfEl%g7W;3SON?3b2hP~@l&l4V3Qeg-6yL=HsIiKD}!lwxVqj;je zS85!x;mXz-xW0p@x0;NO&YS9upkCBUpRO33NIC?C`vr!w|Mqr0oGXTN%;12b@VooC}%Q~0#RU*0Ke=(*lN`>M(Y}5 zpgYQzeN#(wGTY4)cA-4pkEf;OEOOlTB{WaN4fml^i9001m%Bhu`Y?K}+e!SSh6hzR z+-2>%<+w14rhj;ume%maKsF)Q-eT=q{D+p%cHEf?Ey)Pn-&B<=k|?At(o8or1LkOk z-}1TYk9niI0ujSPVYRcB)?I1XO>dLxFxu$<6oJ~cI1Bx<^`h{&0C+}AiLL4NT!^hf zaMIJ>NJvqwIflLcN$|5YW!cyuiLFT|w07OYGx7ZV ze2N5RTb8WCRq@;|n5t*6ZA>A95dGKeB1nlohwRg2%2hPV{->Ky#T4BRD{GNvn@Brl zAuFE?RRcPP4Va?ZXUPC>{k?WVr>#k4Y7>g_oYU-OIKqW~FTomje+y z(alp==Q^Yh6#u+U)LUuJ8~M41wx=dzB;zg}6FPDgxn+`@-~!?@ZT4BCr~wtrjbPx7 zCH38+hc)C1Kgrk47_G987hD+n7{f?#r5+2(IS#>DqO_YquG+>RE_Bt@`;7u(F~S&G z!f$s~NYx+)IaVlFRu=tq2f?41x9JW+eJA%b;Ax}Ek+0_lrnWjLbr9i=MD!kCQXP(0;kS70O--OCBtE@@i@)=6$aT zgZUXERD{Uq^~2Hn3S0dOT>Wfg_T^@$F)JKpXP(QM*DoQF1PSYGGQw-iQK%t>hm#R> z=S7q!JmKbYB2(-`GwK@NyTYo#|1rzNGxW#htvkbft;vKnxmc%5;Q||dR4kUbO^ML+ zYe9nT0b$)7Ea5HgRAHEbS1LoBI z_X=dohM4ApB$um5+fa`{hSTq_5rfV)R;d`wx%v;2kPL+fSjO=agdvp8CY)K46>YWM zq(+>cet-Hr^V6r7gLeErW+kB2F4i5b)i zsi8<>2o6PkLS?H2<*43lzI}g5&Hwx=F~Z&0p%_&dfikQqS1!A}NM_N6bB&O>lipMQ z?Zx(Pl7A_u7v;rx1Zd$2k2{6XXwnQsyShzzDH>Bu4-IsMkdA5x*>0eSugbBhX3!(N7)M^jVYqf^&b_U`Tu8|!^~+=bCeSQ>+hD0e&z+3Q zb>m`h*Xt+|5lf}=SOn;kZ|#vvC|Gc?zcccKk(V2&%Xr!N&z{}>g;#-dH1ykSnn*^VSHSHQ z`|ZAQg?Le{P`m)O>DIz@pe8+ImN!%PEjOhX%335qq(pzm%Y!jO@W~&i)+bByu|n*q zhPF9T34-2?Nmc*SU;8)dp@2IRzWWMB5}kVDR+L)StFqz{&|Lt&`8BVP72NoKX%+@1 zfoKgqcp0O#0#namaYsKb_z*7Xl$(4FoDwls5cwf}>s}pkZmwb?Q<^_>Rv7X`;*Ft< zh3TkKm5C?%&SMsajE&(Sg*-VlV`90++kiXjna#9@X#*Dqp)3TCKx6~vC)?IE5Vc5V zI0fe!Iu@$|!PI9-&r=_!iQudxtIjZA!>L|Anc`FPkf?Qs;O*L%%gwPtZ_N1D=9+3- zlunilqBs@iKV=YeBm6TBU75e|n=Cusi|({P>X1W?1i(1qNJf-iXlt}2*;Gzzr*#d= zAR5g|^W+oLMt2Ufl|9n%ovyCsw z6es1MFa@=tt^GQp3*mk|*AOCLAvGw_XNgyr69G$hh`8pXVNn;KHuL#SzA_ zBkGi`{%?k_Tuip>!6~Mpb-L1xW}CUL_!b>Mw$R%_0r_UC)oIn=$8z$5a{bvo^)J)F z@*iLwXYV|r_i140B&of9OvbFy#%#7tdp4K?M)Y~OewxOmOfaDkm{R+e!#6MvxXh^P zoQ8hD!ICIfgRy*b@D)pTXN9w8%KNHlT*R_62$>n+aeMse-DGcO6kUZ6?s@0!&o?eh z`emsKsg9OLZb5_loWgt;r5Kj_l&b{m$nNUAG`pM>**w>zkcL#%HtX0Ty6Fyz<$Ay! zqdQyox;6ds3Ln&b$~u>XUIlM6C4Gp$|!7H$Z|w6_av4a61$rfjv%5&d*BW49uiV^idcTukX)>=VCzy<+tQ9Z1=v}*GCY|rBy%04u zx#eAdFIzm&!#xNmbI|+BAXjVFp2*Ctj#@k}o~^-}Bh|1*RGnz+OAybuLe?nl8N1XU zB_?2@U|6G9(7rTZz~Fl?XXpv@5!sspr^W62t>k+2B+Cew)3RW_SFXvx@0ZSUZ6D^} zCL-*Gcl`@pW*3Lx6@YQ&=%JfreRe;gRWH*8&rC5EmI<~fj7uRY%&CrADAd%eCYnpa zmuaop?X7Ba4(8;{&$nR&@bfj2oEpCWsRhrAgt>T=G5rApy9$En$HPOwZq32XZT_%v zgck-Ak;@m4o$s{)*eD2Z4*Wb+In$#^6QU@ga5uI##LEeocdcv|dn{ZhLP1wko}>6v z8D@7!GuFAJ9a@}`4~w?MGR}cEK5IsBAnxNHv%g1L?x|>@ zFGCxEno6337+$#Kup)2dA#d3K1`!{LjQD3vmJFzr$7nhmK)J3@NaYrcOf8mdOFOgz z%Fq3i4n$$+4-=ep+*YXbZQRZV9`Ns7(9e7#L_F-wH0|li65DG{)=L zVMC)$VZXmgaLH=OR`rf?H0d!wJgSr07KNr)T;E*(0-UuAES%%+o%dh5jO>6gdbF`RH;l;hrM3fb; z)|n5-Y#XxmbsfFGEEw26AzG=6A;!`^+uB3?$gMwOMh%q@`Zm-?LJehrxKOG4YJxr3 z0V}+|#YGLQ`(R{Tr6xPwikIt^CVOj7lLAh3Yg=R!JR1Os;G1^hAG#NrLmY}O#3(jnj6?0Sx zvp;mNf!$^qs-26W5Z0!`+p#Rlvi98Q`xg<@+AWbX>vl zS*-U%suWs+L{u!f(kQJ;X6o(h$e0-`PHUWaJn7j$x?jJxt}MMa^TQ+F&M@m&ihF#F z|7%F0&r!4NMfCxfzA`<|fxXis`zLaEI2tmxB{@Rm4u9ArZ~kojM|m3+4vfF-TBsWf zL)xkCHS+nCBw@Z}H%%-Qy>i}{L%!Ky?d>W_`x)xvw?S`f)`fdJqGF_Hq0&F%hMOf? zgeb$JaJorwDPf{VvG$nnYNmR>i5CLBcye6vE*V@qiM}8Rmbo=xKHnPJ%o2=W5c=4&MR&-_4_=GQsA{ zoDixlpvmbF8p#+sJ^T|6>m<>I+RO4>MO(tSH!@vxncsX_J^jjm$9Cl5qDgh0lK0P9 zRw3IdI7)!^f?!kBecyWN+;Ws7JN3d0*u_|uwhz`z&;WI<`A8F=J8X4W4gqj%sT4R% zNT&LBOK&5knj{mb9zOepY(FL4d;p)w*LY8Rjo(p!IBzu}hlkBeLvkaT;9R#Z!3{*K z{@L19531h~sYHg_g#Xi~$P@094%zlix>-0goW2Qh*N2db6ORrk!g9_P{^xcj3g-#r zoFmsaYu}2u3OLzAsbkS)s^PuOpIN5%z4Dw=J-h5BvbX|MlqjF>2jE3JD4Q z18@NshQ(Ibvg;*(Yz94Zo2A-d-8MHzz|gc}Vk>I0p@^z|yIrk93z&zH5-RXek6(9; z6RKxnfmtg}&;n+y83+szVgUQR5vHJv)s7+JxfF?L8Um=(4b}&pKeq(}^R}Pngi}5( zE-%AISUcl5tj;de?0peNmQ2vESv$Ir+-7-ASoR^6%4!PxQ@R9St>Z<5{143Fv#2ko@!Rm zhI*o{etg26p6Y-aO3ddrYqtS2)a+{`R9f8;DFU+b=RtwYfH%m$x9($t`ALA6**s}p zah=^7uj?@+K5igSAy4?8^1|qU@0Ru1QJ62CUqGP2FnCjztjuyq=@wlPVh;fibp;|d z5Z(dq(kkFU^4Ku|ZRhd5ufI0i7j5>rtG#Y8b=6sqjvM$&f_~?A=Q((o4-h0Q$Hn_H zBVU|Gm4J%jzt=Q@30hfMxm+!LPO}L11m*f(FCo!Fe>-~ge;NUexP< zzrtVJy60R9uu28Gj}ez&hP)9)h91g-w{??m7S=-O=8k6^HCegK3SPL@w$g!j3?nrZ zxL2M8IuOxpN#IT`3CM47==$bn`WDe=hd2==Q6hoh25CI@clYO2i9xUUv+WH1`vMi% zZ*`tzBdEz&i681{c^q%qLeYsgs~;IRC%Z(lT+e%M`n-~)GYj5QJe@jS%`*G+p-E7@ zB{er^$#tVvh>5-FyNW;_AAb&_>=$o#zA6!D5)cWWHH)kHzfWE<^Z+m4-9(^#ws358 zmwro?_&U8~JpAj5>tb`r0jOQ9yNRtm{2k|WdiOi_+ts~b1bYgW*t~w)+_Ta}eZ{)GBI=f5X_CcKp?-m?2xRW*!Y0AN@b|_gU+MUm{ENt(rc?3<~JUt)C zl+6=1OQD2wo6m5x(HpDlQBkUSDE*&Btr+xPL_MuK5Nl*2#&>>1l$(nG7V_Y7gm3zHf{8}nt453J?@IbyE#Z~4reVIk*Ea%?#7A_(#>CQ;LgM^|?7vz% z2zjr+A|T$w$gFU23Nxq5dMK{N4=#j~i&f)Wf>&Z3lO|>}$6m)hBp;YV&zk_mx=m+U)z-5TosvKI6aK!_J2}M8^_`{IH{UQ#FT{;D+ol=gV5Hya5n6Aa zixlE1l71^>zuWtD9)p{YX}4w9_5<>?%U&ifnxV!j7Ej?11cZ){VO-?PlVDnKXj3&X zmd@l=z9}_rxx2tD7eA_bcU#9WYr#!Q2o;F3;!BO5XSuZ)Nd&nWIcMH;&|lK}KdvQ( z5H~s~7g$-5xZ-C9dgD*}!#|iO{gRvewLPLAx9eM6y(VJjQRg!Vs}y{n5A3Of(1aV5 zkc1m`BN6!9y-P zS=uf9Q7vD%JqvBOjQS#JWaDN0n?m`86c<~~A6Crn4rgvrX{UnMD@+zvak&v=1XIoG z?xR|>#kLiVvqWw<;s-744;s^jZ-P4+z$9RGeRm>bwPotsh`T^?T7aW{^$`c3e?kccHAZB`X9(t zS!z2tWXQY;w_9&9pDcL7XFT1^$2_mQm9k3hD89r#6ed)s z{!=z~-UavKbfKVRv8tlPXh&?2@OlWnef#P9WMwpiGx7&K(kFvIuTHA|y;gk&$wfUz z)7X!f8>Ib|1#!!W0P^@^zJ*ZpiS2%1b zQWS$yEem}{+J&f@IWOm{wi$^#WgM9u^i*EDMmKj3^4&FV)H(Y?S$62kl2#3&Hy&t* zm=X^w?H6aH5zUyvePzTqoz51@EZ=OzoBdrzmAFs6y_S%nc{JsEY&4(4*XMYYPs9Vx zO*|U4&@6(d5kWIzBoV><74xYu2_y2lb*nHpYqEg@_lVmsUjWj)MNvd=8<%+ckbgG& zsd=l#9TOXLV_dA}`uuUF7hbesoyOrC8jY=~N(&?{?RaXa^+JZg&vJ0k6t|BO?OSV( zndcmH|6=>Lgo!ql6={9{dn zuMMq?&FOUC)TL#}1i2EXr)>>{X!5^$g!#6iXI1@ zfOMHueuelp%WlKT*C!)o9u`F9A)8<580s_%LMymFG87h$1?FY>B1MXLFKZ7%dGf z8rjKRtuwj6NNO{>sAcyj9qT+$!YLF7z2V8B`6>DsFw&^l6Q@?ZOM{v0S1t{HaA$5B zauYIz_|AWjsw4=}qW0yJNyV%$W_bI<1;%sYDVPTouHSk3C$`V~?85|o?_KTPZxqdij2Z`GCO zUQCJiSqSXlHO4<9WLJNH*>!`FPg`5p=(nH71ElI1sJ`(k;|Shy5T>K>vR{F5(a(A; zR}fHyN{6@0rf&D!65h4eWLMDWPrABO3FLeHNbAKyhsP@dNOrX@MjHc(TlTp#ecU&1vKYBK5dQ_ou- zf(t7X%PT-T~o5Ay#2IZ zJoGRF6d6<*%^HMiTvdHPzbdEI3}pnisa5y=Mh^buYWR4nZDei#H%kxJ@yMWOd9#9udn9jX2NtcI1-6I8;q_Fd(Hy#e;`4N zSctSiF4%s;_^+fsftwZD9e<)$H*@LkXIq^nD_p6(z^pHtX&*V$yh1|`{b4!ShV%^V zfVmCad;6QY1^QU{J$#A^d>n7xbMo)neB#qA%7VX{I)d98U#VBx*8Y?VC$YK0)FNpK zy|dL5h6r&P4eKFQA zrZWbW_9c}#-Jd;eKdA1QuyXs!R0POAfeoMsBp%B7+q!*N+ z5^d+!nl&#R#d0|tiyhfSge&n0*vp3X+i)z35CkGZY0JRzQ9*ZgT4*=~L0?NaRiE#Z z%K;)+j?hTD@yb$*580g1NaVpZ;UdQH+Eb5(cPneMb_cpCdsX7$dW4%Q|~YPlbqa0?x2|!#m@D z=rpfa|MzlgjcAc0p;dpcaHd{a=dIJu{C(b|wZZV=L)U!*Y-M#B6yK_O^vUBy)b8%o zAlTEM;e{ha#K~n=kIB0Jmt{judGp`zofjNde$(-PrN_zn=49TtMxT`HmqLaT!bf5Z z+gwQ)5%JG6@9bC*M!ZA@mIO<US!jZYvGqRDttv`l(i-M$&wGPfF*?zsR@u$KOn(RrbF zD`Htd!M=iz`oh+_e&_7`8kza)j>_faoVi+~*4`9n#ja+Ow6C~<(`h#)!}6XTn0HPJ z)%_NYK~&swYhwZ9PZo-%Xy$!CX%6GPHBDx7cd^Z#X?U2^zp+wOYEEsekZ2IQY-;M} z(^B_h)HYx@yf>YhXPs&C9MmeLEmEzWc0I*FTWgtEBi<^_sUXed7jZ>ZlcZX7#dZGe zGn|TOi{Iu(Gea9)XlW>IV*XjuC{`y+DfNEIpWMDH&xqv0^43_MrlO;KsI5^b_@_K` zTBy*rAp33Ndc*;Q!6JL2H_KomEpPV8I7=;|s_eoq!FMZEY44_n2nSYK*iH^qO#ij{ z{+06ox&}FKhgK5ZzbbK{2f&@-!tL$t(RS!Bugj!AN13Z4^oiie+6UT*8g-P= zh9BxAys(p9c7{XZ>p#&2_FW*KGlN+=~=f=IV? zgObt;(k0y;!;mW7-7Teb3_X-ILrC{fL+Ah_L-%jV95yKt)Lxea{=ueL^FbJusuZ^*w0E)fg_@@;i!OM$o zFw6qrMxfj|^Z~wi=A15?CS<8ri+ZIgu72pbYbPB%8LCVXkI626{Xkp0Uf5;rufs!E z+wIY}|5nmPO;F~}XMpnZ1wTLksCYHue_qWRfwu|2;L8^CA-1=-A2WtV+V@5YUM6K+ zFPNWVGO1?qBEE8+s2(yQjGlbv7=4QM$9f=kA2D|Guuwk6rxNC;`TuSLv;WXRV$c~N z0gPpTDJm*D>c>s>pSLPS;B9}tqr^a*r!Jd?x|OWn>1F0(_L8rpndSy1pZ$e*dPQ~= z`<%j8F72|!8)$6eYsANYkqN_3x+-0T`11qaV{@$|LL|U7Bb(m%47Wk$Ep6LkVsNEy z&*_1&|^3X*}D5Ybev#2uV`9Mxjcr>)g>_6i=ga8##aX>ovwg z3W@^=i6e`HUZmlYNNVK8Rn3(#HP5BRSD6;y-%pHrYGsj0G>5yI2cn4how@iK`)(VDu7yc` zz586m@@FN;Qj9&-u`M=C@}iR{E^h@-5Z|tQ&z;9RYGIf^Ff50|Rwp>pQd#q>2V>dv zH?`44t#9x24|)iR9UO*6fcGE5mizLX{UeE{a53!@8fDWk zK2!fTNPM$mz;m2Vbrp)=Bi;L{TPD42ArrZ-Uo!Ut8GnZ8|BhZ&Cgsfc2+{Yu18ELt zIh3Bpvy=Os5)~@osLfe+RNfzRpt#0b@GVa^=UiY`MtWwZ20-p2EL z^@{r?+f-%r3;vzA&+S(FbUad{G>oF3N#27T8mhV@0dZ6$x^c^GSBmSG6fhaqK$lt) zf^%E48R$?Cly@(5#4-hq`ycze>5KbIn$diJYo2juc{6Ef4G{Dwx;im#2uef7Dm9!s zH!}YHz4iQQ%ozxV=4o24d2R2ztXF}&e`gsYxJW_#B_37lacV!lt#BaCRKBCaY;6@c zLcKgbiumwzD{ns2J|0Yqdl|UB)3k>{{Ou=cq31IwSD?BxSluc6HwrZ#yT5c^?xN^M zYNsZ$S(({9N9l(hR3UYL3^h*BNdSR!hX0_^U)A#@KUC_ch`4Xt2x*Fpa~d?-z~Y7G z$*@JLA*o5c01~e@}A7Fz1U7-cN?E?ZChVe3lOwp%LUy)A{eMfj(9AlG)+ z%%jpOmz9N}wcU>gE60rd+U&zW&W;UdGptIlF2(jlymF@oDqCy}(K!PU)@{RiZe8xYx;$AuWZ(Uv)w)JkA&r(YSe0EP0Z%DjV zc#_UyC`b;hLWw*zdeLE)CoI;w>5hD4t(Ld8JF#il0;au7-lj!`%~i4kR86&P z@xDW%raw3_6C~FX44HrJAPr|4M?*EOQG^VnUpbk1zI=N(9(HxJtY}cx_GtGsC;6;P z3x~r}I?T(yCdF2rkzBo$J(~>aYIxnBBkqZ@^GSFgSH>)zETD;dgDH1zqX@s7z^$+<9)$;^W=GlceMHd5- zFOFyBQz-;X*U0wMUFIL8eQRc>p&(Yj->)afp0nmQpJkxkW7T_clr=iXN%w69^~DMw zzrWI(sT2~pg>>oT*v;7-KbSq{FTF%dzE;5S@8cjzg@y7Mqy=I1sAA)z!NcwB(kCJAy-)ppnvHDo6P8|N)40Byy6x#xbNi~N|DyAIbZ zhSPGpDWndSQ#`_FjM_9=$H@ySF268`6_4y4$9Z+avPGiKmp;Lf&TDTTJrJ@oe>vGn zZ2IwCij}+jeop*FPjB9F zdn)D#m(sBr7_6^bHXvwCeMS;vqv1=p)0@xqiO!xvYau!{hPg(_LPA1v=tt9{Ta;$O zC&q2VA{-N$P`KN7wohP|Gq?{Ts39U}#$j zBWiuA)@~sO!_vx4AEzXM>X~%umLm*og%*fIeT^5?U zzY)9hNa5$?a)TJZOTHy%BbcXHt78aJ6 zR9M5t=WH=YrQydQQ?1*XlAA~sjGPR&3rS3)Jk5^3Os!b`097)f#^3me%3y2P8*RJHgG{ zlND)4c15L7W+64B&HzrSt(zbzw#@f%4@?2f?5W;%t0!PuA)oX{71rm@Y-Z->4JXR> zD<*>fHd@kBKnstH^73e@1Rd6Z7@T9!^?JsdfWD20rvMXmx@|ovRm<_Q8l2Pt-vUhK zla^u>aL#c~*cobj=Q77v54br4Jkel6LU6RIgzx)((9>9(P43#(2?BX9-NDc4nBKn2 zJpoE2kywf-sMak18QtzQxm^G}Qc}CLg_U7{EY~=BEHR`+(?NKb10uHfWUOuAU4K`y zr4JZitTVi2lsBpPSXSG_m3iKmul(xdE1H(i^ReDDDhi#xWYQBdcViqoG)-%6?M&Mw z6i%3M`l%+0$Odi1-io*SJ;GNdoy=Ons?h4Lk3*b9c5n+AcdDDqjX}E=Sz6^FJmQKe zcIhwky+A=^`^8w`l_v+v_)!pdN8#q_+vO>rl^Yx|ey8Io{K4uwuiXcD3t{@ zJ7{tBH+zhxQvG^`XCznD+pJrqHms_7#1lRLOZB)NwrsSRk&V0a>g>xjNXM|zqZpw; zJLT8U7%MDKNJ3G`y&muk97ni-$Gg1~dA!$@$$2ml>@Bk4>4SqCus1aCIp!Ubvp!bF zm2BXh*XAbGQvGqj?Wx^Jdo||(c+AA(_e>HEQXK2dqZichtWzkX|634Ryd`ct;V_x+ zmDO#r??%NlhUM;^dGBEu|I*d@K+Z?hJ3gg=nVb~R!{p{W1>iunUAt^MJ>}zqIakh} zE?D(2C4jB({NV-$xkq%2#ML{#JBHXOj!fu)L47w=szGz`YQ3Y6!p5N&lEvFYZ+I47 ziq2e}0;e^jIzqLnfYv4Y_1n+sQXo<}W$lg9z$JUnW&7i_NiIbDo!)+R@|jBB-RmRy zhWQ3}cI0e@pLs3N)#Wm3O|H7pQU0eq;-*KvfB$}QHHJz^1JEgl;!q_1hT=YbTKKX1 z3t+9D0L+}21f0sPL!p7moCezVdu(&-dVh1@r&4egRAIy4k)7yis`+FK!8I+lwR<&u z`rQ1>saVMQ<1CFLRx4amvqIdbgwAG2kEct%=rm58gR$ZXof2IKkCz1M2hOz8dO?}G z#yHYvH*d(6y<(r)8tH9UB%Q7C)$7e}v6rcIMwQ>5)Gbxn>P~b1z%t!R#!D$dJ70``oZ3>%^LKLnF3RQM=c>n!k(Ngdv zki4(4yhHjGeM^HcolKx(7ews+qR(I${=lfe&4slf4oG(6G7ljUyfk}6e(e4}^a=f} z){<)vxSG)ut+t`kJjfdCtyenxVPq~yPE?bOqKR*y2(uVAB7YWArKIXq}qzfW5?B+b=0tw<1;=gT3_YqLJ zdzZfxf{=KFINI*7N)nFPEF&qhr#64*WzIAvGGGM7Zmvxy9c?sY&h@=Mi?i;D?v8d( zzNo6tm-P=-o&PH3+|Nt3Rwpf{)h@xMf!=h_=u; zR@DLGxv=zwGK#=Ek@taan67@W74>JGRmzPPk7{FgryQOy2bW#g^U5uMmZl{cDy6Ds zL!k#mdC~7Tr!|IRG-H}H+}A?FHmX9j(E$n6X1=#<`Ux1Es^6!k|BTA=nMd9jV3cR@ z8T%MaJpszezNNKJpxV^l8r4vn%YXBbQpoALSA>04tK>^?SBnG?xu^AbdcpOjSG5Y7{nMUGzAq%gtZ8nxYAv0Av?Lv?0wIZ-Yel?jc4gWB_n!D$$5H69vZjW+tRk)>d)~(eEZf+ z-|8TCMo`RuT$HDm*|>`>P)~kDqOZ?I$gnB8#9>>~;TVU>5>uL+0maIAar-8glcp;^ zL54y=&3i;)pQ0U+s0;J{g4(x!`;5q|RixGqZ9$5c6%*s|ff7ns=-g_~gRJN<3K9P{ zr}v?Ztw-K@(e;L+t$r(6pmkplm$|nQ>r$uD1(WHu(f1#Ia zGO&7w_IRF@`c%ordaEC}%0#{1WbtkU&~cyNoUIBR0(d>%ks%ngmD<4QDdpH(p==tg z@Z3C!3Dqjp+bVsSWoS4NCfX9Xb|!^Bgy{snw?5Yvv=8HZn{JAp}D9Vv$*6sYegd<6jC=w&Lk9FJ;uf`FHXT|!qknL1_L7t9O3eLRvvngrv zzJ|4qY^MzO!#Q>E(3OtlEder<`>ylapoC9T{qh;PZ%Vzk%y;w_z)+V+?DfE^tGC6`--m<3ZWkxn!)lRdC(Px5zC_+ zToT`n=Ft;bU}|BBPBDQy=F>ye#rX;H%QS6M*z;Vs?066}`)_}TFI%a|OZe}Nh`4(5 z_kEGkK-*dwzX%#fjE3h3fP#XJiq=Au;Ffy6EFEgG)b+pL#}EfT~!x z*&RKdg%0wpRogdI7KCyDCl6lRAC|7Mve@f+>c~ANoF>sqRHR0#DBR3*ML#;HHOIWb z?2a~N#!4jwx;i)H`<1L)mm zXLH53!)mP!Nk%ufe!{oaOef#pH8MNi<^13gF z5R~L3SRDe?!!1Xao6`?5@e+>$rH+&-WGc&|XN6C&T?2_V*6i`b@8k=|YLFJUC4aTI z0%$Un7WxgU?3y-XYBA31iJUD~%MsmM?KF`})3KhfF_r#s3s7^al9b?0=-$Gm_V(;_ zc*KZ6s++vN$K{;w5xqC3xn25XMSwn~b=$>{GL=<)&e)HFEUykFGyi%0x3w&X06n6N&bw4C`d5*q%}N0$KaQGn%^^1{1VP zMnaqJ&qG_Zq+ZflbCLH3&FTOd#=FMbzbMrtxf@l)AIa{U|D2N_S#+TwBJlmO*U_cD|H} z$<*-qgV&!MKjVYg3==2}6tqMi7tRm)Et;=NhVFciy~tn5jzKg3X?{$iIRl;((B&*u zf=@jLnjVT`ATx37XSe6xzU#k!MU;kNN(MX#w$&-g4JJ4Has4N8Q|=sl9Tx|Sp5cYz z+!qr=I1m08EPOOGgRrg;?K2vh&yPYR;Bp@>(RFAp<-@OEU+Oh^HJoluOR;$sN=Np8 zw^h6hEhjosg_b-jis5+CN+W>-H6$*kCloc=hMA_TBQl!36%j&n9#GrBI z}i&9xA&-uygb0!NjqV-?nyC>A(*7@OzTrmI(s=KmpSO}gw?R9_QRrB4JJ^9 zoA0sTF`eO}@5}Nw8h5*9J&pf)%csYSZ*aaONt|!z6+|y_!#0eXTMie{lKDjxIbzQK zO5Go^BAQAxcQ&d_|(8m}TwiEM5=c_EZ_K|%PmUK_#- zNX$iC@|cj*V3y!pGui-7fls;Ydt1KGz8I$doXCGkbn3fXcKaxA^T%MKh2Sf*y2^^9 zfX|ymeKN4Qo`yy2Oy=T>u|jXAdq8>BO$zaNta-;090Op_gKf*dG_bZuYbPm7Z*`&V zOa~gtnlr9})<5Kt=Egu*!G8k`1k4g68k&r&2}}Yio3Tz~Jp2^_@Fcz@gYx90;ZNhm zj_|LwiU~=E(@Om=o+NYTqz&UPO@LfMWU$@ zhtzgI1GQE;Gk_;55Y?xO!!lLEZCN&5q|zPueZ~gMyvh4_un7x#Me8BC;e`+|mQ5Yd zK-t)vs-(~^{j6R5du)(nLU5$wSWK?7S}n@8G&s4R(9fNGMLDHl#JP{v03uBCWG;V# z5OYuZc=-AL=cNqS^f@;9nMp<#gC+{g;TxHZRlX@V%|}hN!!}Q79J=b%nAvUw6n*V; z%YwznV%n^Ie{#g>8n0_4-;}>1Ve`u0)W-L-?B8|N71)=)FzL6GE*tXg6|`ysWKg-a z`e}*5D;6`KD5tqj=tEAfnpNN>9(AH!0a8YMx~yyO&zO0>1~4k!{lT|X4&OYt#%$g1 z==Q91n)2N1y}ew@CsH;#Sy5o3-;($!@=*Ck#wo~ZAM$Esl+(1$KGErKs7{jjC7e7E~o%BHvEXNh;YW>JpD{ zY8liaECwZnTLHEq6Rk(txKsPJ^Z8WNzYN38?o!w00*5uK{rwkleydRg364mh=*FTj zn;ocB!yFdQ+lwBTXWuknO7unD^LM#fQ7FMXId~FsRFXg=UqFr1L=!l`P^t5)`Ikxq zA$GF~gqUxttGj#8@UW8o2T{OeDa=T^gc+cNVku%11Z3Pi}cSWXp5uLL@I2pET+>78-?(nTeVof3fak|XBP`>x___hFc>z~*1fX?Z(Usc#11r#vdJsQQ{k1TXoW$JA)L zznwbTtQ0V}V^kxc!nmHuGUf@|rG*u8i^){&ht{wBwh2!;Y5T0M<{@oXv9+7iza2TU z{FRU(Bj3RUo5v2=42&%`oq_6r+l4~8*9|$3BmU`7_1NZK?LtisBVVk$0*>8KEnfC& za}eWHtgY62gu_<{vN}^~-^GTstNGy5=Q^hQlwXtl9YxSmpAC&fLlpP*CmNGPN+duV z@$p3ld5^gLs%eO?{HfA{=uae88R$!5pphrM%Apjcl9)JRjm7)1|hnfZctm%gDBb(l2}Cu#p3Cp5IQiSZyLps+D7uj*GPcb+yBPB z(;Ce(Km^V^0blY$J@uzk!Z#lnUIou+w{g7LT)56u3Tj%M+Da37?)S zxh#~y6Sp_ejP|t?u$C3`ge?IZwLGLnHxRinY4ZUtnJ}+ z2HQW~U0|7MV)}w=DYz?cwhEz~GhI{rPwzETcj z0J_Kp=QVGe>+>GoJWaYNRKgKQe9l7u20a1xcCgohbq>@vRypTfHg(4cXPoNp3p5-G z%i&O7nX54F8}CHi#Q~QO`R~iaklNZwdL4c(r|XOh+--h;sXO13qG`zwCuGAeH`Xa$ z>#F>)-~S+UxO(3K-a*9n+Ja3&acJeLZ{U{@3qtQTaO(a;%E|mz3hgjwq*PV>k1h$k z8vj1&Ru!po)FK=8z$j_7vq8jg3Y(3Ic!h2iaUW%z)iG0;2U7Hx9KtBFJA0v?)$l8! z6tSafUsYQ$B+oYt*(^l^5N%Pk<4GV2jc@lZMKiwK8bw&;+Le<|nZTX_3fYi(nWKb9 zQjL56-0}yqu97BJ35O<#i&pZ-OEf?Ay`@C^HV^AfVCUBu*N2V)0YZiY%j|JCpF~GK zsx-w~IM!&}W6@I;QU5ae!W8J&$K5cpI|&%}>Pq)G(djgl81@ZQ5lQpU1yODzh$Iiq1WU_fC*0DP`OPiBJ;NqHx9|0bXG zwFTXmbP^ylYGDAa`wJ-XtMRP1F6$H-B2oNZfiJ2MNgD_#W-F}JcAES`F3M+Lv3V_( zmW_u0SJx0&)L>RovN!7pFe{+sO%MjPsK?v;6E$Km;U5O`#`8D#h)1RvD;Hthh1M_e zD}oyZe5vZZ*YhkA}Kr_{+ByBW$flJIPUAqsW=&xU?KJn0=5*=XiTJh4uW*{9R z6_m;awF#Qp-L=_&8Ust?+`K}$AAeYD`^+8QLzQ$p!TJvTDC?W?~xx#6}1E2<9B?rzOg{QHl# zg3pYo?|X0>BgoC#*T=^OcMW};nwO`X@vd^*)ecWQFZ5NXasj~azdbbBl4yA{3b18u zR$3cQO;guCUSwn|yzLee6f6M@5BY)N_APWMTA;m$sP4R-bn5;0J?`J@m~D4rPv~wZ zJtEU~yOV=J;XiV+eET)H?8Qv`#&rToYOfsCDKa{TO;Pq2-E}jdhjbQO&2B93E^Be_&zA~~W)w|f| zbahT?+58iJx8=a!(KLjFKN=mEAij|q85CzIq~s?qK>M{7cy~n$DDgioLTq()b;bD* zqsFPw@eDqNtR}zLZ|z&|dy)W~edFYxXV_Llw}-+;%n*W$qg?H6axDku0qc1@3jt(e z`}H5O&zWFXi&uszMt!{tu9n}Q$w_j(Ev#(%)6x1#uBSDLJp`IZu9wm%=k`877CT4G z^Q+8~_Hi!gQ&5wpfAU4jIX9Y1=2jTS#xE%eSE+^M>wagu{xFRM^WK-#}wH&wSE3D^3<^ZhtZA;1hhThD5v6V_gZqZa?fL0E9Wqf1w~^x5vVUb zd?lQ8NIa@aQ$<;7Ngu~(y1oXn3n3Q8l6jB7f^ofnrA7HV-f(n%cHFQ z&2Wd?-*q%i;RTR+Hrh8VwYduEB%@*&XDwP6;#hi1=K#37RE1?r}D_q zrpDS$iY?I)>HpyJCCNK+8>*}4SEf&pW$>B42uKEg84Lm0IDmLkyY>?k&cb?l+#rQR zo-{nEGbxc+GcYfOlw=sK-a~f{KLnP#zZ!#4ZX0qv0nh`?`Xaad?)=qM*OHXi zrV>vP-{2_znX6nMce+&O&PQ3SOL_Mx6_=YGHAJU8TRqZX>_dx)g?Qyhxg<$6V-IUGC zPOaT;?k!)meTcC$LqPSiI`yB5<6WyujipP-f2A=kAdMk)0(uosA8`BTC(P9*BLS!f zQUbmwJ^arq6Rx_kpO!#ILf*`H?%EF&btUbe;GP#xyu!$N!q9IiKpIH()9pfnt7Ml> zgZ`MsT_3Tr6z87QK9d?oYrD8AA;lC!#k2b*^^d+c2{?@o{}T=X>{Hbai~bRL()86% zKSsB&(;l~Vu&MJyzDsQ#;);oz zhBRR=CMKWV0;%BgA7-9EYy(!OrZ-yz2U3UNX@&#ZT-et`(r9Dru&R%eiCJQ$_5LSp zc;dftUao+(z-b9}&yXKr*wSn4NUGZ3m!H+0dfD!b?0+)iNWxUFv}1(98!Wycl(%`U z{f?T`{DSR?$JAnM8n&WODpdJ<0xtHJT=H^OSY$NC(13cXRa;BTuQ->>1c2h>{MGFe z@DR0~u|~8&%?I!tDQvd_|JTPT8q1MqU5rXAXDlAenhhQwjh8G6gEEJ4bSM2SWj#B> zPgScFF^9OBq9vE|3T{*Z|ABw#9T>33r>N^T)<@p)(kZHg)1*)P$V_Y)^V#5W=hsK$ zdca8BTdY!Apv}XJcv!i|ghmV{En=pfqE@Rp=_{e!REfu_XS9LW@1=?Wa_5sFDEJ6y zF|BoOjlBR`nXsP%W+zr#!z6xr^kk3i@Ct@|oET0RQF?(NkHfovHI6hz54>xv+_PL-(mTRI>OkP za!Wu!z+zZ-?ws>gAg-d%@KqwCG6czG(CBb4Cn%8a6p{XU^-loFRTM7M4tF(lGz_6%Q0o}o6j zy)e~L`_V#KJr3pdmmNzcLZ}j;QEqmEMo;<_BpA`*QG6{MEec6IJHuy0wf$C0 z!FyKl>#Kc@e^cny9;h7iRv{_C z7&neo1#$>D-(rx^Tkkhl=Ne0!w=%o4{1e5{q_eY=?zQJDT>tAfDsca1e>1@nIc0NC zfrP*O?G_ml(+Ps7av8Hb{bTq_0o?fqPKiVFa61d@^NwYS&LBDS0drg1h`!z-hqm3= zuvy1a;`5K$XPlc~qByO`mTDBRYPj+k4aVBs)M2H78B&WqPs#X)ZQA8NhNn+CO2fCP zTZbz1g)>-5oB`w-c)9KVSAsM&G>hHJ>?ZP0hQzp0iW|*RHJgLO=GhG=1aZkIY{dkI za}$dr$W6BAvHxE8#mbwZHm?6VimYt*B%b}@GhBWZ*~wms@X<}KIu>V*{PqGw&1H<3 z>{B*`BEFRC2y0{!nN`()`}%;>Cu^e4oqO@xFnv|Odfm*1p;i)eo*(kq((Psh)p<8o zy@12A=t@&RHk9$?MBU}1Uc(4v4<52@6gJ7KqXs0Bp13@UcQq#}B{>N#Uj@Ll7w&RC z0ygsQQ&!jUN4{vpaO9ha1QiJvoh%(CcoC4w_974b2|yHyAY(XDpHHi5>AH30D=P3` zzW(UIO(TvOJf<;AiqOYluRzrgX)dQhK!x%jp`Z+xICJ$Yp~FZabrEEVl+7STeVW=n zxMsJyt;Kp6r^g+9i1=sh#`;seLnBSO`>o@JPVqb|uBjz+YAx zXE{fq90mL7I*%7CrHy~P(fk0Y;b$_b9ii>koO7$>C(AF%61>tYGGHyF({UE9uHWC*<#k3+h zIKjhsvAwisamSma+s*rdZ|U|Od`|E(^Hjo`_bgH{BG-5CRdu)?a!kdGG)Nk*u*DVR zx*D^Gm$2EFP4ryXuuZhk%3;eTSj9K^A35R`J!+$X$M0!R`Atu?o+Kl3CokcuNg78u zqe!6nbV)2cr4VWAswhqfs3XJ&d0~3TyMcdQq=$P`ul)SVOHxPvXJY5^U{WCOZ0%v| zDb2)Otr%~ZylUvHiuJSWOV8(v(GA-rf=0Xv%er&4u*Mk*-!~PJd%diOIAZdr1)d?@ z4u3*eGDbZGv^V?Tl`Od}8|wW0zDBUU@4D7OO@_p`#&~xeM|;k1Pbzs+M9gT%IaeiB z3Db6Ll6*1fDIwE&6`I#E7TK+UYa_IWrQVUIM0=k&|2>4lB_wM^Q9{kFp2C)$dxN59 z3^Ye(-HfCkD7(IKQ6+S6Zu!gYSETDXnZkNN*FV0z(=^FP?32S zj;UALOo8d=WVu*T<;XxqXV{6u;iT$!`iQ7a7;ljyJ;o55An>AZtJ#9&QCrEU2}~EX zLbF1ZeZe9?J3@>B6$Vuxpm0eClh~Orx)Yj{Ia%B!VvkV4vxhCWYx@{e^UJ3eL|Skg zga!yIbF{T}_00v`!Uy+)y_GZL1nRV(*r~2B3HeWV^sIBt30PedQ1UdpmZ;t zK3GJpB=U`~d}TWmLL&E7=?w>my=-U8)0#y{0;5dpM7{0vCu1#NbegM16^fABr>g97 zA^k+@?JfdORk9|Ai||MBeC#);sQy<>tH8~%YLC~_%r_|Gz{I*F){Z+ zAmc|7td)cEvsn6vPU09!{Esck0Bx53Hm$Dwt*u^@-pV&p()tu=Dd;U6AB@k;d(Ht9 zQ~B7bd0luU{854vXVKJi*(m(k2;_7X+i)yuRn zq*G6qqK!e2oBrwQE0+qnSZwXiHGfO>RR-Orkm#E7Uk9aD6C?Q|NXr{OT+l&YM$`nF z>d|f_y4Hj*JAvehsoR^hhh8<4Ke7o=1YhPwO}UQS|F50-Bbp1VJ?iRGoA|1QK{d3} zEGCA?OQ+h*-WOYJAOxq3KYZdLo{O@DYIK9SUJQo^MSZO25IUCyakI#v58)|wx=S(i z044^YOy0Z&tg>TlRE8uDaVETITgebP`nEA)A?ovUi_2+8a9!y z%8LAPMvZBX3dCvxSWr#6dYzQ!m5na%hJ0HAuZ#Gz4^dCsg4%*Ob@t-ihkY;{c`535 zgT+2?1PZBFmO6_(<&q7lw@=4bCaYLa{<*T-)!;FH%xy*A+v0ygIyP1+4Du4($I0XY zr$xn^DO7RNsI{nL*3t7ze{0vZN*9vkB5U?r(9VXXHM_9K*Ss^JJ{!-sEBKxr=hw9! z5j4MC%@wpAvo&Q!)F zXharB^HgTDYhq{{GNhlhj_KrbiNYf-F)HIaKOt) zA}^$~zY~0v_#82llt%wG@$u%k@ryqjlp~UYmUBTXIF*u_6!JZ~ErGlwQon~Ae;VOb z^90v2)p5QRK0KZJUO03`0@+zF`bg+BmT!6Be6Ep&(m`;2?A;S1s4@x#)nqs;8Qu6u zwrbZWTyfKHN6MVP2DOw>DZxGesdVPTueF2krP<%!KF7wMyfMzk{{Ux9c;vgfb5Lop z+~FQ3_R=D7`|R&t>7+It^+?9$xtve&ST7;7?n4aEh&F<%uQ;6|Y!MKe+gtCIr!QDu zM0)c`zMlmAPSTMPg1f~8xEt3Rqd06?4p!YvRM**Fd?g?0ksCbz_&|X>{G0Wl)ot8pz1ivu)g8 z-PQk5)IPzSooHKIBFO*kq$a$wvXa%Pm47whigVTPDCSfr1bFR|i1Xj&)q`0=-0i++ zv#YEo(fGRXywN1CQxk-W*90@7y*n*E+PLo$A_`H{Jk8B3xi@u5e2-`!y={{7Nc z2zK0ix8XfZm25ilMhYTWQn%f9#9u}}ZH*9ADB>al6XmUhPG5W{9JMw=E;+0KIcM9l^8SGVAfR&Xa7sNx>fQkEj&c$d2J4lMTc2x!hb*ifW=y#%V zoLCLv-xF@K=49d?Z-_)eZ1n?5?wf{ZkGG#RB}t$yQH$ z^cJl2-?G!>X`P9^LaLTf{^nW;M+(XA6gEH}Ax*@gLxC!7QVMZM*gT3**Lo^>z|bFF zCe@GEcJ`-m7sl;vJoyy+*opXOI2jT|<$6iAHM@$V)pZlDaIl(f-xi&0vr`{~FD}!5 z%V7<%6c1OV@73?g0x}uWC59lp1&+f43-5+b1Zv`fg z!sw7=d_?EyY`%Zh3p8nN6kFuYHD?27oa=jbnlrlXeg-xQh$R9;%~Yxi&}W3`;MQs1*Uaf@*UL<%tk z+$n(k2ZYa-#%^A<3R~L-xxKfik27GSlKR-I*g>MjOvRlVFu zDqeS__E0Pxs)st(Y(ccMpDP;c-`Eu=zB2qm!VaNYd8Z`l6p)`K2{`T7uY z_AYpdlCPFVZ6W9O;#V>VYn4j9Id`l|w(-phYFbDJPeT~5!!nQ4(I@h&7RY&b zc82fsw*$HFSD*Z!D}VwVx{*Gnv_z>WhRT66X4ih#Ei5I<^m+x0w=c-YqjZEF{T1e{ zeUI&0Z=Al^qWqT=dgz2i>Th_*atj7g^^#dTm2*q}1c2Ah)BjK?-p>XB1gtj+XOEuc z8m@SyP$}TKA1t$#sAUX6C=7&mo*kyku6A1;xsmFtSq)Fyju_V@6B!e*>WEF?2cFsQ zE&GS^Em_rjbaPm#{!-3M)f&2bZ@Cx)@B4KnpNtmqhKsvX_U|ov}f-+Q`T5i9hy#7Z0yf#XUw1*X~AQtK|q}H(-xYovjfhae9n@2Bt z5@_&$h7kI8&^J4LQ#a*2!BCG)!>ZTzOL&Op_VtlJJ7qMaUked|UXn$r5A&GyX;s-x zfzfM|7G+j~7MUkT(k|kIg}AUQZP#v1CYmN+^=5O@ndK^C>cU%?8=_P; zfgvb0*|~vK=99kMY5;4h;rsOEbbQ2;Q+Nvuzb)4y%#?{uH+QSZFC0rGa@0@;XLx)| z_LDBEXvzVplkm66wNgsRS%P9SmxWs(pIgJ?pb^h`<0F9P^WQ+uKRu;?y&28`b0Xr) z(8U=Hpjb{*n)KWjvKg-7VHaV5X7#@EbB4=^HsjS+@%a8aN#J>k*Y+&@p$;it{vLCV z-Y=3$c8Z-Rjmlw9!Kj|upWZw}-P33q$QR?ltdDjbaaqAcSMT7@4;lYf&k{JkYQ>E* z`n#{D7nljhG=(S>Rgt2V*PV84JU&p`f1rzL+OHRFnOiUUh9JO|1qN0cH*kpRJ z+zp=cE~CIYJo9}WKBCS5pKIb}4WwXN3|_liOOHS6m>v|0NHVKn`ppxz@DU!C2_A5H zi#H5u`t(1?a-xscyiYrSrnQvpEmylw?%w^E4Ge9MxR4IchgrC%CG!REIgXmnCp{PvQ@L)GS+1F|A^6 z@WqszZ1#fpp!tQ8c*E_einZ!b0=TaVuC^&mkH|HS!asqW>yktY9q=DfML(ZBy` z8Z_Oz`%YS%+Ld_VB@2^e&X{E%4Dh4-*TFFHtg72?Vi_?wmHiuL!PE9CvQXlpZ5At9 zM>vv}K}vdDPh*ST+=|+>TShH)?ag2a_6L|orZH^4g=A~+gwZw)ns)D%|2JR@e%qAL zfsoi6!YBu+)lM-GZ82wWG^=2V|HxPfwJ77y)BIf5{rlPM*$4D3R~RBF^-S1cjB~GR zb;CG8FBodgFi>g|s18V{n}2&vl_%`V^`@|XxSJ<{8F@i3bJZ02$He4+Ud>G-?_Tj* z_P@9zu%hlPFK<0rZmv+T=d<4gy zX8dgY_}gFCl|kmz7BmZrTN~A#y@(m_;OeU;ZM>9y{-zO@p`+szgstth@jxxF%KHcq z1OM07=Fbk!)2NWFyt_;rj)^*QYy{j}i(icHEV^i{e)sVkG~$Y>l8)$fI~>w6Ia)#8 z{gH`@<-Z#Jl&Wnl44#WXKD~T5-<5pw`Pro%*ZQ)USXhS?<(~_kmGrhiwjNa%ucZ%0yY>R5&<3;P=X;yd*P3=Xuxok` z&)H&Jd!d38%avSKUYo zlX6k10KK%#P0%`HqQ}w6#_x#;k8{q>W)xNJ;F!T(=&|i?iH}*1#-UA4UM>nuyQ?gZ z@YR?1ztr40+F*u>-IHqmJ=?*BzOL-dBJ-ADptsyFIY z+bp+f;|#|(mjP^J#abBTB4V9=^9M{IKe*3Mtqa0Fn?Db$-Zz&va$RmpW^}J?`E4~Y z1Je=i7%(=`Ck?whk=Qe+{fukC<-$D@A%k?N=_KQTn7j6}zV6@-XVvSBpNtYPq8_xeSs&}cFjU;&@CH@rW>_|2oaXg*ov6#STzX}}zr zK0x#fz?k^=p~(xFNw7FeJZ`mEq{1p?$~XNsK1ZCU^%n|^hTDHqjdVHNs*Gq`+vLR0 z)DG5bW@NogM7kll$eNCIxp+afZ5Ok0Q>xbKGWjja=dYeaLZdbk=PCyt8sc+Xx{a1i zSFVOhwHw4I9^kxC!;@8uGm-BQ={v3 zG7&b@+!rB&IWKA_d3AbH`2ObdL%Gw~o($LGQlr8nSH}fe`Tvi+x9o~*>$Y&?gkS*@ zTtcwm?v~&L3+^t3yA_4(1b24}?(Ux8ZiP$XE`=A)UD^AUbMCqS;C`SrXboGn#+o(f z7`^vLxm+f!iJ@@DjVIqH+gZhUEk+t9t8rDaArRisH&|l36zk$#9I}aS+%N^ctF?b- zO_+6Dz0kkd#9^*n&Ny|e&f+l1?qWONqfq|ZDxt>Uo(6PKgH-sW15RSR_Q{^q3CcSv zJDj_U=W-wgYnySZhN%XTYDV-rwU*72F<==kHL^1^)oRL_47E+_MTPKiB$D}yD<6Ut zT@QriD-90i@*)U2YDk97CY@L$e2f)D*%9INM!^q>D&-s@hnEjcKSF@y34RRLB_|rb z(Wc!xf!(J;x=2Z66LlFbxV>c2-wIV@PhvCwC4`z~Je(n|i*ve6JeS}ekNKqL z5|U|clEXlae$-=l&K{3y^Vw%!Ngcv*ebWy_>;8M8ypKk7hCVd%>x8LV3e@C?$2+ui zxd&N;x3>E1M~NO=a`QMIa;ML0|D3|2F?3-d^t`)-`#DUq@B~4PfY=D)^PaTQCz_Mxnh8bT9C_<3u6HCvpz6p*PPBZ7FET~N`<@}KF}?b_tu%D# z8j4*%Io9iRXH-&<>$iI@8S&z*zjhMHnk!ZkITzRbgngu5^m{^i#bk0Fd82%nOAGWT z@&@GJo3lG&@I7;WlRzc4X3uT+{$4tHEc0GdQ7!9jHtDRLy^?g%cB!yW6Pmep)^|7V zq@l#Yn}BCl>e}Vx7lI6OOJx@Y6FERi+dJGfjxc+qmUE0l;cA^;wl)z4+B=OjDzucb z97(!)dch#)pDi6afdAC<#EmJiA$%#FbMm2-_QGO1-`>1(QPl|FVI+q6UWbgujt^qW znY9+7VzyYoGR#+#r`%?!Y&#nPW;D0uOc*kur!>r#Zk!OMNUVgM7k>Bi@jMH(*`~p^Ug)vuJuVcwQ z_4alaLT2h*q`>cutXa1w6QVlEjZc*-XvS0QKb`H7Uc`=hTV}sXRg>6|c7fRRYdt@}mqN3EgqmA>Ln;R;upNO}z$Rd~5{D9aD;2w-%!j?-PD^D}=0|nf6^>diHkvFYkWJ>y)i^nLXUP$*+Z;3c z1mge~%|tRrT>1T(`9uSWY|-7<&FMjP2YyLJ1s18)7@Fgd@1h!mVu-1==Gf<<;uWsz z-8NV}UDXEF^veYDG&AMTskl>w--;#QTTF81w~KtLbU+Br{$6Yw{&d{#Z3eBQyx1Sn zhdKj|89E@8_v7E;z;LxR#2I}NsLOYJf#;-dFR2Ce&w=a~x}ig`2sXXf!gkDld4Tm0 z_wHyUe2aAa^Xoa)VSM;nTb-cZq}GTR7t@hda^Fy$b>a%@_^Hck^|Rv2_!@StZqIyt z<=d{n{^nHQF{(9BcSfJbt56aN!v-c$oLR_v-s*;RU3Bp{ODeHmd^*+PZm#8bbZS0@ z$|XtO;p!RcoZpTcOP-??d0aO^uNFF;a;p`zG&nna4>q?QR3OBzakAErnLocg^@5vc zLqfLrNTJcPG^(!Xpbh3IB97}IbRR0_VKMX&6J5ezq}@Zil|yp;FWlBrfZ<Y%d$9E>t=8~RJ)AnPY_NM-tROc0^z&c?le#w;CdQJpK!>*dhw z@v(CV_BJT|=KwWe;9`=S-w_zHIOckJI-TiwT6vr(TYo7)V^JgW_-gAN+hDEH-Ia*; zxc|$O6z1WnC|wa3OWUk9A9zJ+e?EJ1RSNNZ?V9vKTlyW7B~7TB-VBL;_qVKBXS~tvQ!v@H*kiZ2RFJ0N>Af z_zg763`Jb1rs_#aK#G9@Zz{)pFp2&K#!@2hneXhIAbJJvuAPkH3DB60oWu>L9lHk1 zvsX@xkMYIb@$5SZpyW0;oX4s;mQD4 zxcOr5x?c;5d6}j?E#U}EdJ0Ek%NtMgM=ZkRoZdLpU}o4bLdvNZWD2(TXe!yRil?ey zqdzO+%+$)px55Y_Q()SYPe4d|l*@H?eZW4%g{@}QwHqO~c=~E=4ggFX!I7%BLX?WI z;`2P|G%qU$ZPCUuq+|D$fx;6<_33hqeR=t*4bOh(lW(~hq9O}a*BH1XOes<`wn&xo z1I=zfQ?z?SO*wGw|2%V|C*sDwAX&|5pb!Hl(O_`V?3rjB6|*8Q4Jbs2;Fa{iqK zf2=E#jaBVW?W9KS*WvE7ufzgcMrQ#E8gg(DWHx{KQGU_eA#y)}soTLpp{embpVQVo z1>l9|wyZ-vkhsX`F%uBjeR1*hGR2ft;dS1W^<2SX{FH36zM}bSI;BZ*d)>uYd>>8b zy%xL%zJzIj%WE}lt}}%pt8H7|^o5~kc=mVD*t=7&VZEIl%5Zri(M?$}Evw+fyhVIDS<57>S!4DY9tDB>&gOdzHs)m;NOPdbZa z*9_H59>_%H6+wb6#Xg#;sQrvUVcDeTg-$vtQ zJ)40FI1)D{MHo!XcMoOMM_x3ByTC&JV9fsK|sJ#y@IWqwM_2t*LgaX3@-S! zmh6nJJF|UVnB3=Di3qwOI?W`q>^evKp@?6`;PyRg_NDLD)jiOSvnZnXJkBa=&+Hd@ zVlstPKvik<;Cj=_6ZIj2QVB+3*Zi=Jj!-fg_^eJ0LbQ^omxe)uyNK+N9v5o1BKB;( zW^F!zqT3g%s+H2uA~XlC>A9|qFd8ybinc%ti7A%h+e@+||)wa(hP8lIWPQGd9m@QmZy}Qt6}?@D#dI+jP#+5^GEC z42-I`P%Ze9!k&&(%**GFY*U45fd+LLq(4a2Z;xL|llPQFo<2OupbPPBQ8Vhn_tqi# z{X8PX%H|l`2hm_y+!`bWq!S10m@m6kzN@!tS~E0r+qlT0^Gf~uXVyZ z_7@3o>TEj$%^fR~+kH2y-6v{Q$=^KKvfpmnYM*CEyArZMtkwAG!85u2oKGag5r$b{ z@17xzfgg=*>fB{KkH*$Mb2-p8O=Pkm6U^Z@!u~C3lBTkt?$oT#Lk*SIXECmlV8r?~ ztgwcyK7az8 zT-%$9?W3c@c>400>c z8$RvM4z>8UWHyzHFu@4SmllThY}ZeB-%#UTW_Ka-O4>=8YWO49pF@}n#MMlJdoI<~ zx2Jn(u3OJVcXz&K%9s4o|9#=4-HW{5Z1B*M^J$w%AEZ>_TUje)GctEOsIFWz7o#+920Q7v-VxbRP48=0%nxE1Rk0Fa9_sa!0S| z4Fo)C>odigRkS68$$lG=F$kIW6qu-;V+ei`CE1m$EFs2g*y9CO!4CKykqoL=c6uB` zLJf7=hr3ax9Q!C2`nJ97)+9+kDtKOvb!}925E*x9x%f<8TmkRNlfiNx>;fU25Zf1l zY(r7lG4o3OKk=}aE}-kahX>zdAB&I(MY>%?txUcy0a?`_NBXV9G(+Zag`im>Zu*hy zlqZ%f!wavV-yt>im@MD(lcMV>e7JNcGXmBO`V3wkp2nH1^bi^r2Dy>w{0{V}O=7$( zJo-izxwG9j4MjL@OP$+n)vKK{O6@?of$M@5Y>y~_m zWAltlIa4;(Je*ERcm9GNRC!Px2?-^Y7_=S`iCuT_6iIX<<974v^m>AkzH$-?hwvm^sK`%z9)SFM zbA!W{Wt}p0(=-$Dy0IxX%;!w83GNlHX2RUEzoS-xdkU+ zP>sti%yH0j<+{U-%fnC}kU-2LeK$;iN|s9j5xHW!L|WYTs0f~wDw_-Pv=oS!+Z;UA zntLdbi$`>=(_78N`#)Y97&S;jUEJQ(Sg`ToY4yY&3E)(XL(@`btnIa?0^C&|R@Ylp z3a5=|aDCqik^RsVf>)bq)nVl(N0>e|HggX$P4PC{c`vBna258FUi6T)=n{?Op5HX1 zq=j+Wt8mr!+?NjH!J5msO>w(en!n$&vK6@`|FudhQJ70Xk*dF!vAHX#&oWqkYqx*; z1zo;G6x|2sT-Q@1l``r|FD0n*cdv0F2gzXT404sma5#(gRn|4t$W^ZWhR+OhO)3jR zatWq{e5jDg$Y_%EH`h=O^q{sYA|mo|KNgh3yY5-S)Fi=R@%E(ZuKx7FdK<@413tP} z^|@;Z6+CZ-sGZMbaAz6w)G9B?tTuDHU+!WmC2K$f>=T7IDdAZPsyU|D7)Xo{`>w@fcVx=830}> zXdm*Ikt+Spvi~Sy`MPF~y4)hkR{f!PX-5nX<0;@~X#G!}D*zrYQ0CvGpo-0<_%jqf z#czvr6E__e0Sp*+vPZ=7B72=QTdEH!?KK`5CmMC`>a+gqG1}kKcEqkWr6#W$*adJ> zfvtG@G_GoA5c2ZC7}4BS24}g|opudU2){4F4BBAl;104dUuO-u3b{NRao}-&c|B=H z38&lD`*;X5-5`8Idds6}OQmOa#vq0kGfea1{285Q${3|9vZgaVZOciETc@`lMx@UEk%tR7!SU|;_SoFRL=m-HlzQ&wSsU5_R{FF_>i zb{0a?a=*cVUaWYfFpp@qpq0;U`W2$`L@sxWI(G<5E+wb*Ig+$eljp>| zhXk?y_Ap$aadzhfoxCWq{Zje^xTbslQF-NxWZ7PNDtc{4l?}*W@Y>u&yx3fcfH!vv zD}+&SHiw7tB%ZbWgWuU{7q310A^WD5T_@pw3)N7@%N9HH{_{ae;_-5 zMs!YrTsCsy+&tW7l*6nChljOTYVf|A7cB8(kg|sPrC%d3Z6Ie*z(g5EJO-pQ#2?<&vYe1x^E`w*pV(Ngp%^g>U(=yRxErbHo|}xdOw)Qvv^70QLdR);5`C-@cT`=Sa!3mW+=K z1Gu(`lzA6!Ze9cUmQOz3CGu+u?(U$3cLX`I``ZxWd?jchIf+>wk%K5=kbatr=GkM4 zAz`iM3ROM)20OhaGicSWcICYr;B*tMm z;A1Q`@aqH#SkyTG4#59H2B!fQSCx(wwx(&2zaZV^LJ$yHW@7avF+U6 zc@oV0RL!6qf;xYFR8FsJz+Fv(=GBtXOr*D^F*S+nHt5x4rF}8*C%xP@s67WoT8u<6 zZ3V63>R<@m9E@s#DUwT;OoVJ;0g%ZA?b{ZsotudaNyf^TIy&a%kpg*HFnQ0nk<|7>lNCb zQntG*eZ9^U$3Z9>`O1o2QynMhoc4Z3YFSzsxe1c`K8?C!K@e}brl{ZeHC7Pd?W*GO zAKnWz(oEp3)fz8aXGiy=LO7ip+}F|mdxAhA0AT!ramD(ZqYzv)_v9Pf(TnGM3@}Use^c>F;n}?nO zNTtf^YUrPT$R6N6m+7_3jimBaR#i=quAZ-pB>V~3|CWRCAG#m#v^#DMjT_6KJRGk_ z2myco-QWA`Kgg7Kf0|wYAejERD*;O=J6J7DZJ9KuMLva7Rwx#DdjUMe|8LUY-{!)9 zdu0B@$NC@LvcHw^|6!K>DckBlBl~}8>k$77Cit&5#~)E497weI*XvIj-6B+}T5iNa zngM#{9S(>N|L;$Og#i0WSE9bh;*wo6AOfdZyFBDu#0aKf)y-hy#VhCY@)hk4$~^yU zW!drknZWbcfUAl;Dkt%6i}wBN@IcvolNN#hsvTj#=l_NgqJZU(KlGWh*R~A}Y#VBA zT;_!8L2L}5vPt7UD~|EX{Z>Om*^F)$*zF&sDp#6A)BR+3=Rr(z{2#XFl`T9iy1J~n z>}EiN1^+|42Kg@zkMz_l;V-52Pqt%`4rF9)a-X?qkeGH=PfYa~u)nrQVZ?RkPCh?>YcN4Vf{JBF8 z=Q|3An&z_=WPjiZ)(dri+ke*>#BMJS&1cHeT=e7~u-r5e@N;zF=@0zJnMMgWrEl1f z4Fm?T0x0`ixiLwUgE}3775K9DKGQP%ZULV?8XGNXhta-{WjQ+2vX@FVc61l&W)dIb z@~ji2{7ezMvX`T`m90&sEYPhbpnOydpKsl&9@Db{Es}%bl*PloSk`>$tglM(8t0I` zqt+_GA6`0eW=i)?u$!xfS%`UO3eRE1);C2!d|@#1QF{X=h{Z!W#T1=d?ncCBfjKuC zo$ZO;G0va62F`Y|v->HV!l_)S==U{_R-w_s!qqERa#;(vgNY@E9fL+!+SP5@ohQ3K-M0 zYX7newrXbrxkR1C@J4Y|W76Gy48)>wow+dfD86H#%`7)_w8m?7Pf>I`e#!FT#j-8U z5(jwMXT6gb?k}^rEBwS}QQJ}m*=O{VKzBMJi(zzEh_;@gc%FBjT^E;j%H};WdD6t4 zgoSPRq{_B^@~^S731+X~=-XH{ zQ$#~J2@|!;2%zO+!9jK@ z5!xHkjCflq^W~1?U^?vQkc5E7Hl`eXS!(JSMmFgJ>$1qAZ;c7Jp*2s^QG0l|CaTo7 zHzBYbW(xl?MUVgnW;K8?0zgjhBVApejjDgU&Za`uuJQ2<=W1}@#Yzy>ZgC4*csefJ z|5EDcMSiqv<}`%LFfkVu2lwO2BrXtyePEQbn&7njzQ^;(&ceg8D8%#p0;B!dF}>yP zrWfI18tJ3w1-?T%ZzzMm|nrE@$U5n!*i)Fi&cMLtAYBWUa0w?Q{V zTD(jX+W06qA&=~oF-npGOUhjo#vr<}RAqMhfL4qzdEzMv1NK=F-JE*FdAeUkAq`X; zcU19{j~D_j%jDm7uP$MXciKPE)AE{FJSHW@L(~82&ABdtxkpP!(R_u0)~~u_%33VJ zu7>8HDOTN|_HQ@*+-O}43E!fFtIhJY;0kRQe}`^VHiVB4+JQQ{z$J@zZ{d=XSl|TU zzX_sPAV0Ms`v{L0i>cpOiVs36$JS@`@|D;Sh_y`dAHVT4WY=(sA?&#?{H{I6`kg9Q zv-;SbsPgME2Kv4PoHkA4O20a*Vf~iymeD_^$X2~I#ietvx)1X1r06^+fUZ@#-e|<< zR>|l!M#D(DqigOk1YO$D@FNWk4OWoo$Nr~Ma&nwUQqi37#urhpgIWEAr(Qmh&pWTD zi-@t&`K4ga)k&DjHB| zN>KDDeiY(dR6>GK51q>v1|&`(0a29e$n1Zv3j8qMtDjb^Rt_+btR$UHdyh)O`I7JL_UT>(Z}=wo2b!h5E`byO%#1A9kP$ z1$yfbJ!mg+^tsW7*^87+H7nPobh~GJhI@_N${Yw;hJ>xZVx)a9&&`%x#8mg&IZO)1 zGy)efXV|MJpJIKl3B$cmF+rz4Wn1nll6&qUADWk&++LV4>-y^yc!L-j-7mb zUw?nad^uvHrHgIxWjqF;QlqysYK$t5a?Gz!H+x*A|JPNdN4ipu^^bqKbMHx5*`dC$ zNS6E>ntW8khrDV#N>4u2Kh5QawKPK9h(cH4>s6}Qo*f*rP#eZ;5%-K z@bv8>Jv^2kZVm&7PhJTgCl?VE>F!)~2iqkh${oqTTyMrWMlmLW& zmkr*9vMN<4Do0}rp9p|cm3kuzO;XoShu_LzL=Opy!l2nRN_tB)>J=Y=_kc|x$NCRG z`Xg{TbCJ+|dB?p;noho)m^^LYzGjp04K%QKpis*dVXBruVn*oz^MOoop_DWgzn7y7 zxfj>XsyQis+vCpOZk5CvsM|r{_K!Y@`^1J$IUmMJCsew=c!A^fPwug+l zO1|R%JWbS__ZZkemJ6L_ta*K%+q<#jb8$xRYAy0s*Y5gVrUsgk3)W;hxLBa?jGkXs$tsx+j}7|FeRelI6bpNrkzrH!lC#Z zKrk7U9PrBih&+1>9yR$>%wc^HaT3?KdwN|q_iu{>-{UF^bBb1%*7I0@nKR&?@APTZ zK-wuw4`%E&hRH7Kr_LYkpABLxuauqKP@iqI#mRxD$*{xo1c6Y2F0RQ*zF^4<_yr;a?jt4` z_r;e!aA3UTcM4@kd z=F)4zkKir+%{?t&zh!yo)#;{*UT_iFX}LbtGs>*|ddbCdb^oIUB!l4b8N{C_R}o?J%K@7AF+ z1K)Z~2nfRu*Ebq^-Z9PlT|!5&WUx*$3Gr?e#E6jSjgOw%ju2~hmua9YGqNXY^!|_n z0IWQvSEp0$P~%B-Re1AXqfw1RyHml2*U79-AsA0HixU@ng7(BDDngj=T+n~c1=JQU z>A%^x(Oo-gWKjlZa+J{lX^7Lb9AhCO*|6-aL*=|O!QXLcN{I{20;Ie)E80%9IR^&d zQB99acY`7{o&Gu>>c*;BWa`_?ce^gX%-my$Aj-$4gGl9LOxzQ9>g!K&aDi$cuHD^dV4$l)aD-@a0 z-hNfT&9jy-B~*G7z-G@!B~$iP%475}{?1jGAZmuM08sDHmFS`M3v<6JXYYs%^O`|yda&u!lkcLp@F2xV6w5v_D> z`O)jLgn%906$S_Vr64w#BQIO>5ZQN@t?HzJSScY|&fd^eZ=iK;D7fOqQNE3d4I#CK z=dNiqVugqMePwqYRNP~d4y&f6y=cR8*yKlCncf0>dPYf8r67BGJJW1%QvaoT z=I1)Rn<<9JUP8Lwdhv~aoGgUdpOVhx| zv5Y?40L3Dl)sRZ~5u~DO_L>hmQ1$fxquvo4t>Ul1Go$Xv`m)9D4d`i|;wtfYb)h8X?ZVaH7v_GJ zcTww*@BE~T)2%UCJkt6)UHuKFrngo$fk=UX42`COrd+dLL!piVDH3@r$HFhQF+|@I z02MfQ%a#v>JidKFwhgSlgKxq00Ci+~%kwUQ^wge(FWGiHB3$8)H+VpWZ`@{$r6W#K zBHbysMTJ=W0F^n!iC>#!(3MF!yEl4p@$1Rz!BD`4z%E{I-euP0VC$@*)jaxEy-QXe z8N6s^g#AFUvbU|*=5EqGN6_rd>q{E%=gIEUp|bQ0Qe55Dtdj#tCPqewmAHu8YM2^> zkTdRJD)7Jq^_9UTzq@qhz2bTXcFWW7Yyo8Ok3yGMsl8WATPB-co^Gmhr^HnJbl%Fa zS|wXwRfh+a)>Bw1rzQnsjvS7y&c!D$)@+YVZsdi= zerhOrS%G}4C6F{->++|ym`*}^6~bT9f-}~HS&A>|k;BM35?!CDaYkfa?~^KKp0L-Y zru$nGBGG!X552Wi3}?=@c}a_F@z>!_ZYqz^Gt1%({EgjiSpmVzS2M2cnO=qN>{ppD z5BE3#J2+#FhBv;q=X0wGs;&8&-kS{1xuRES3K0u^cb7#a`lNLQU(C4_0! zi2Fu}{zq%gx;)vGx*GEfl;N}`{KiYwCR>3EyX=ufBe@Eg=SyjU(L(uZ-rwl+qX)2b zE1^w^0&o#%%ZtdDNMBb-o|E)MiTch!G*xf(xbAEYj%(1iD}W-^*mGgMZZ=**37ouh zOXsPFo8&+fE}n%Sn6hbkZ)zPY;dzCa|&Y3yCi_mY*b)!uD`};Ci zO5o_0{Gd82cJ|4@+-FSx-kr}R4~No^2g<1su5*w1SDDZ%)(L}5GhTm%@3M3Fb&)@} zw@^(})Jw>*D$VnNAN<~c)h1SS2cWn|K3=Kl*gB$N<#+`6ykM!}xJ)<_>7s_I)^JM2(z2O)E zzONS^y-J-$_)GbzqRTwkU_%xi3mN^vVM)UwC4pbjokklb9`Owmi0wTO^|EL0Cj|?0 zE~Qr2;tFRCna@W3iZ%sI07J(eNT5dOrV4beB9`VWc4(}+jT}l+%hw7c;mf9GAQwrK z<^A4XRQ*W>q~n}+k7Lnhc7`{^=6QPa2-l+K=S%8`Se4wQQd1K?B2G>W)_cNA^xD(^ zZ1Hh%5&BJ_|2`Zst}~-;`7xb_F8oz*Mjx>6u(>26AZu6oOdQoT=?_KQK7X(9KuQAL z{CJTlSg?e3l)F%te6+1TY{dU>S56+(4%y2TA zO3~zm)os^+$Ubf?Ecjb{v)8qHw!*yzDLi1IxY$XGwaI?(V9or#Pn2f}EH;lkc#IoU z^iJR>!!T1Psje4Dm>c==``!V+ewwYDUGA`ah~qmWP~47zf0}lqR2$As-tqLr*M77< zRU_)n45V1UaKDN5HS3saf^kl(Ntruk*;7Xby;~zE4-7*kr)7QT%@HOSId2;jgiIk3 zP;kMLP2^unq%FtE7&_!H)ec4U&_d!n z*NDhpl52{}I{1!XKk|qqJq)ujQpJybqtwNGc=2k-`I7t-J#B0L?rlFs59M3Zj8p@@ z=pYmfve{Yh`bJMaUr3O}LYO^G4wX1&*4|JJ#$=u7r|V!PYOeqQO^l`j65+D{D3V55 zh%F3lQ}b3@mk}^3M);QNF_W#@=Y?OeTWFuRs&{lM>-ZO67n|%{BT4iW9J0uh^W(A( zG`AI%ZSu;rN?afYGerV`Zusgj*K&?IG!H=S9eFv(Y*GDEvN_)EPJX1Tb~2_JF$oxk<&8yr!LHEP}l1|w|l@I>&}0xQb^X(L&oqUQV+CxHG$G%AajQK-ujlU z*OG=<*W)`^SXJ<(IY+E>ufWw2Q<{41Xju3?)ykr`tN#|yDv{k`hgm=yJ-bZ{&#x}U z$J`2V`r{4UuCCZ<2aaLj&Tz@nWT(Q*Gh~vsn^cV-xH%t=iCV+>*I|=-8DRk8L)j6Y zxHQMIn%?ADncr~3^F`*onZ5ndCsyjxvS4F&1bKQG1&;pfOQYBu^lZY8T=B+dEeVI&4j^58{ z9uF2zIvzu)3yp45-wOZWf`bh)z0%EJdo_%s*t>{7~Crq({o9yhu+v)Mor zOVx3Osk!ApFPjZd>;*Md0k5MW%($`pH+prR@6mxqLDvg<{t3zK?OCr)A|u`$iYjrCR2IZ^X9s0*9$Z-<^rWaypb; zB0R7K#P#>K+;u~Ti7ogq6txCtHs8SWLDnBH16%SXHtu<%qJMf=F>@62o(xtjy+{sW zi%x#~rD9Z}>?Jh;A`tKw`sLmW=9k>R;XD)!9&Hh9;J(PY>&y1tH4>TiXc7>I+3sHZ zX|Faccwe>Mzo4QQ8maUn$D1pA^_aBMeJBa7RekI{8mgiBT8tSmS2Kf>Attb6uv7QQlnQ*znQbG89p|MIMjTBF!r|YnR=b}3kN-)wb zwz8=Z$m>Fm-*$}Gn*doEFM9)TChnx>?zG;M8km;9%&l<-rRI@R&uXVeZOrQb@CbKg zV4rH>C9*eWhC<>uw0dg~VBRm_yNoq1c%Ki0@`q)?Hr3;GpWf4Y$j=#QaXULPQLFMm zbCm62bW_3pMqGBMeS&gjE_u54AZ>qA#qw{1L4v~vMK3=oH$S(|JKS8WL_0`_`oDR+ zPDNNEvKCTxw7Hwo0j21tGcQsq_@!FDctS(NPmJ6WB6Sa~dgd86-a)@^Pn9GA6x-e+ zXV_>_dhPYW#b_a?8&3*?czZ%vg6MEwDd(1qa?%3sSL}Y*#300_(`PP#@oaXXoO^`P z2fu0-sl3&4Do8;9R(AMXwA?3ET_D-U&tlwpm#mJMzxz7PXtK@qr+a@oz2=e3HP`;w z;+!%_tq$Y2=G5Fd&<82tq1M&u>>(pn>T$tL*NU(=-(~i1bjnj-Ztd~<;EraL62A&X z44lO*yt*Xi?eNz*vvTYkHLcYR^>Q)=ZC?Dvji_=X(u8WZfKqRp1H>k$iP~U)v`NV)!6Q! z2)o&LhN%0+6AzGA-)^ca4=!d1T*6ub+s%yv1Aiado3vHcW7`9dhGGKcFwI& zX8tIT-f;tsvN~7IK+BfFBrR&Yw!U&5>_3Oa_M0q8TuN~RXY4%JFx5~_>qx`r>(NXURP<%^p`LEGZ$i%HL%lD2Da}Mcmg>@)P&NCyg7VwpGp1PE7SE-@p_M{rf)e27@ zH7AOh7h_e%2|juTRLPwsg0$!5RZOJK&kWISMg+5OW48v@9%3PLLAg(X)-*ai_xUG8 zGSqbMtTew9XH1bWpA+I+O2fH6oXy=U8Q}|W7-`m@oLgcLDbP3R z9ddM?}09dOym_XA{KPithF#1 z5*e-5s<-LkHU8g)v#Hbf=HEpVr1oPWp-4{2PUA+xhFYL&pzOK%gMvRva9j?tJzNCQ zA#0}=P&e6(vKv>}vY$UH>abmdR9YH5X7LM^pP#G3d|xIn!a1`^MW81`ML4Z0p!Oc3 zP`D#1C#Wt?uxGrUj3m@*E|OuMUvr6FVNww7%4ez_`X=ym@V3@+u8e`vMtVOlRms?E->j}$>xCIm~0FT>Uzz+=)-hq%H~l0P`W|} z@X3!%O{-xw)#(-`<%P(X3@+z;lJygHGH8oLXUoNHvT|m6`WUp@f1y z4B|;`{@$+ z{OkG*Ql&&IY|?j^5mz-dbGvy8|1jpO#}#eQ^sZ=(rXzQD4}r?t>_H+kW%G|nQ$O#X z+wL1^Jl2^S!xgR{KIIQyBVBKVLAiz$C#5W0T6QF};!B!+<>C4fr$ z@X>*kkl$Y+8zx*<*qCC>9oE;|*DS!+DiXBK4q#34Y)X;csCA4T?r%a*XMeG9B!p9bsNJhT_x#r&|H$$`fcYD+isv2tjpMGXBkx%d*A=*$Z3)ykvl#=^!i zo~ItSw6opmi`W!E#pEotMvoL zpW1&v8==+D%gvcG&_ltYGQ?x*r53e1i0;@$e-%`@our`>#~}F2pwnq;o6?j)2Sxek zOc!=)gwkmng$_*C8a8TgpLFCoA4O??YIE*t;QpfgWc~A3YBABi~eg*o0Ke8iPQ|{5l;` z=_u(+`8?po$0wB@Wkp+CCq|V+_0ZV(E1lhl6^R1<&C6sPmb3zYtSc_!61pO2FHIhCxYOB=yVv{CbJi8q?D=UljexwgC ztgjjWPg@IUpQii%a~8tY#6z&wISEjPD$S<$J1UBJs4_PW=vADdZt<$kvdzY!`)P-n z5>tWO2aZ;CDk0@OF*bs-F!I;@9JpGP3n3nNCmhD?;SE<5iqR_P0Nbvj2&6{qt-mKZ zY&VTmV6?lwaqkhy8z#9ibHRqMk^2UUkju8HRggJ7S2zp&cyNPtP%_W9y;FXdr#xD- zC<=8T<-ZWkR&gPGE>spC5<=bV?S1*PA>uKLCbV4h?d`?Mp6@ojqrCrg>p&g^Eh{S0ce$rlcV0D^P!;fBV}-Yj1+8nL=-mq5_=d;PKGq#aQU`M=THk8 z^jlVr0q=*-cizzE)UUz~eGoaab+lqvIE;e=rqOEqa8d4kF9%EEvK<-EmSAWzxX=Jz zX)a(ap~0>}1HS=6^kLv$eu|Bk=kLQ<=E31|Oe=Q^YV+(t{Kcmn~}MZ$LHP{)|5%d0ZvwJ@R4U z*UqGl#qWQPuMKXs5#GICV2fAeOebnCM&n3TV&h#^FJW$FN{+>0v^TtbcQAml^6NcU zQQP?UOcbUEhs zjEC4`H?nPFZJHBxxH?Ir+ttg$OGOSloIfoa1nlNXvC)7Q@n+V@-^3sN?)i(t=-wlb zmc?58J13G1^esn98|NX7qHS5DXDiml^yn8DHIY&l@la`6wwakq3e8uKFuJ+lT65F2 z&#q&~Mo|Klq#sE)TCWwAl;RS}Lmy6M55!9hrRSw|CkdM3i3jdXY#Pw^MZ52aHEc;< z=Ff3uVQ->L8eQsgwK0rE8K8KcJNmx%8r#yz82Oz>9AjU8&4@NFkqC@EZH<;b@xeZ^ z1a~&w7^<=q)d4qiWi@|zhi-MITn{vwE`W;9>I;Av@YA6HtreYAkp47%>F+V3>cDL% zK3gbwZhqbn1OnZ~ZH(;PST47N1LpsHhbL2GvR z1Y1b0CfAyd@4%~YlnJeAjop~KeUFf{ol!iuoEV?m8yU$JJ*z{xmJsTt*b3PO%JN+#l}$v7k0{v**w1)S z!V*lvEXK-bUZ~<_m8n2+NAqfTW{il^dw4Ct%VGQZ@6B(s`7BYv2t&o)a=Z$CAtY1< zwNnyDf2nCbHvJ=#OF|O`3eobPJ}St(5%aw)Dp=muC}+i9;m)n`la}W4h^N+-wby=z zs+>b?>73PRetR2Pp^f?GM*HiWEbXnt(=TuF1!S=Wk+we$n}73W%!62EPfC5q_R%VA zD$NYnIMg&+Sds9usEl-`9&zwELw)6e5a8gOozR3A>n>e2+PwF=I(W`ozTkhqt#{&C z7>+vTTsNo<()G%_Jj3ke+0=+A5+fXINkY4W;m&5s3I<=98Op!;R_;RN700?7fF|G!%ydN76p@~O(ciVc(55!h+E`3tH zk!~x4z7N1p7XMMFtdBY&jeCkeut(G-Brrfj8fxk z7nOu{BX@lrZ{_4I#8bZ3abIK%{-(3sJ?S)dSwv+kTiYZ2(#b@h56AFSci&@tDxM^6 z+wIIb`zUi(F8%EwWpC1oKl*Uh6g(W-<(R6sHpHbD$LgsXC+V$X`$QnY^vbhimb&sq zn$}1nD`lbjNOBgX`)b`*)r3yi^uSy6=7?X8yEx$l2^|+9!rZI|6zL&c_FbQ#``g0= z3BP3=7dnt@QxvgnDoXX3R1XlV(c|JYejC0iMI~H=2=Ka5Y}Oa0#-PpSpLnD}_n5xqve>!_Q=OL^?YaN#i_t^>=ZTg6-S0Md%5M+8 z;<@kL7SHYF`%7O>yZAfn*vonDCQ9$qz!RpcpTyqU`8;>it~ys>VKsT<+3&#>cUqo( zfBW9e-sY`*ZN7ckrsaF$&i=os`R{>|{6o#(Du+)T&kI|oJTKte?m&M3=|8unhJDkq z`52m6`uj%byxf^=yUxjd|Fq?))c;wA{3&5-e}3Bd-7|LU-ZQQ2uleSW^<{9&wSiOU zhvdOCHI65^{sBi^%3*SDPhwB-fhXZw);PfC+?C!tl_`MczZFtgVKZ~UCd?*g(EK`! zqBdeueD?%d@a!r_WFunIz4eU)Xi^Ah6>Q2CIGLWp4xYbv3R8g26al;2iq-<4d1Zl3 zOo&N+?wgIE6#z{WL}8Q7^l>8aUS;#VI|gm64=XnmJv|kp`~LJaUE};tkW=QuoTBD? zkC}<7C(*G&ZcX&|zUy(-y5Xm)|NZ&dJDDF8GQWR5Xyz{{Ej0x%kWhC4HbsCvf=wBG zvQ{a3W&)Roxd4YFG^4hxaQU=+nMhz7iGu!+@OE~K5 zxumIQ28kZcw3*^d~xt9q?h;i+aK?jR|hUjx}03@*8%Oa zbar(awYUgu>VEO!h0p!jmZnv;z?s>WIj%`biyoTHl-*wJ1W7Zhpk+Tm7g}0E227yb zNpoZ%tzC$$kf&cC#AL8y;NmNFcW`LLg7gCQOq#qH+*SiG6azWzBydH?Y(dCMEufw! zPauV`H%Kv1Pf-yKtiQY=(K+kts#G~4U@8QnmO01o?X7mbXQ~^u<-zLp`;5v?JUcrZ z=Hp2+Q)_>JGc0=IF|phicw=3!tRloOAlL5sdM!Gw`ET+QPjHe38MdbboRWblNa2YP zIFEsBQJfCRTA<}HpoL&yi-DO%6_SyFqE32_;QRw}Q4uFN+W Date: Tue, 31 Mar 2020 14:28:46 +0800 Subject: [PATCH 023/367] update the supported version of EulerOS Signed-off-by: Ting Wang --- RELEASE.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/RELEASE.md b/RELEASE.md index 76d86c8bff..8920095bb5 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -3,7 +3,7 @@ ## Main Features ### Ascend 910 Training and Inference Framework -* Recommended OS: Ubuntu 16.04 (or later) or EulerOS 2.0 +* Recommended OS: Ubuntu 16.04 (or later) or EulerOS 2.5 or EulerOS 2.8 * Python version: 3.7.5 * Preset models * ResNet-50: residual structure-based convolutional neural network (CNN) for image classification, which is widely used. From 3bb48ffee17d3d2c5bcbfc545ae0a97ba425a3f6 Mon Sep 17 00:00:00 2001 From: c00425699 Date: Tue, 31 Mar 2020 12:29:14 +0800 Subject: [PATCH 024/367] use std::vector instead of std::list to promote performance for parallel module --- mindspore/ccsrc/parallel/context.cc | 1 - mindspore/ccsrc/parallel/context.h | 1 - mindspore/ccsrc/parallel/device.h | 1 - mindspore/ccsrc/parallel/device_manager.cc | 19 ++++++++-------- mindspore/ccsrc/parallel/device_manager.h | 20 ++++++++--------- mindspore/ccsrc/parallel/device_matrix.cc | 6 ++--- mindspore/ccsrc/parallel/device_matrix.h | 9 ++++---- .../parallel/graph_util/generate_graph.cc | 1 - .../parallel/graph_util/generate_graph.h | 1 - mindspore/ccsrc/parallel/group_manager.cc | 6 ++--- mindspore/ccsrc/parallel/group_manager.h | 10 ++++----- .../ccsrc/parallel/ops_info/activation_info.h | 1 - .../ccsrc/parallel/ops_info/arithmetic_info.h | 1 - .../parallel/ops_info/batch_parallel_info.h | 1 - .../ccsrc/parallel/ops_info/bias_add_info.h | 2 +- .../ops_info/comparison_function_info.h | 1 - .../parallel/ops_info/dropout_do_mask_info.h | 1 - .../ops_info/elementary_function_info.h | 1 - .../ccsrc/parallel/ops_info/generator_info.h | 1 - .../ccsrc/parallel/ops_info/get_next_info.h | 1 - .../parallel/ops_info/l2_normalize_info.h | 1 - mindspore/ccsrc/parallel/ops_info/loss_info.h | 2 +- .../ccsrc/parallel/ops_info/matmul_info.cc | 2 +- .../ccsrc/parallel/ops_info/matmul_info.h | 2 +- .../ccsrc/parallel/ops_info/onehot_info.h | 2 +- .../ccsrc/parallel/ops_info/operator_info.h | 1 - .../ccsrc/parallel/ops_info/prelu_info.h | 2 +- .../parallel/ops_info/reduce_method_info.cc | 2 +- .../parallel/ops_info/reduce_method_info.h | 1 - .../ccsrc/parallel/ops_info/reshape_info.h | 1 - .../parallel/ops_info/tmp_identity_info.h | 1 + .../ccsrc/parallel/ops_info/transpose_info.h | 2 +- .../parallel/ops_info/virtual_dataset_info.h | 2 +- mindspore/ccsrc/parallel/status.h | 1 - mindspore/ccsrc/parallel/step_parallel.cc | 2 +- mindspore/ccsrc/parallel/step_parallel.h | 2 +- mindspore/ccsrc/parallel/strategy.h | 1 - .../redistribution_operator_infer.h | 1 - .../parallel/auto_parallel/dp_algo_test.cc | 4 ++-- .../auto_parallel/edge_costmodel_test.cc | 4 ++-- .../auto_parallel/graph_costmodel_test.cc | 4 ++-- .../auto_parallel/operator_costmodel_test.cc | 12 +++++----- tests/ut/cpp/parallel/device_manager_test.cc | 16 +++++++------- tests/ut/cpp/parallel/device_matrix_test.cc | 16 +++++++------- tests/ut/cpp/parallel/group_manager_test.cc | 22 +++++++++---------- .../parallel/ops_info/activation_info_test.cc | 4 ++-- .../cpp/parallel/ops_info/activation_test.cc | 4 ++-- .../ops_info/dropout_do_mask_info_test.cc | 4 ++-- .../cpp/parallel/ops_info/gelu_info_test.cc | 4 ++-- .../ops_info/generate_strategy_test.cc | 4 ++-- .../parallel/ops_info/generator_info_test.cc | 4 ++-- .../parallel/ops_info/get_next_info_test.cc | 4 ++-- .../ops_info/l2_normalize_info_test.cc | 4 ++-- .../ops_info/log_softmax_info_test.cc | 4 ++-- .../cpp/parallel/ops_info/matmul_info_test.cc | 4 ++-- .../cpp/parallel/ops_info/onehot_info_test.cc | 4 ++-- .../ops_info/onehot_info_test_axis_0.cc | 4 ++-- .../ut/cpp/parallel/ops_info/pow_info_test.cc | 4 ++-- tests/ut/cpp/parallel/ops_info/prelu_test.cc | 4 ++-- .../parallel/ops_info/reduce_method_test.cc | 4 ++-- .../ut/cpp/parallel/ops_info/reshape_test.cc | 4 ++-- .../softmax_entropy_loss_info_test.cc | 4 ++-- .../parallel/ops_info/softmax_info_test.cc | 4 ++-- .../cpp/parallel/ops_info/tanh_info_test.cc | 4 ++-- .../parallel/ops_info/tensor_add_info_test.cc | 4 ++-- .../cpp/parallel/ops_info/tmpidentity_test.cc | 4 ++-- .../cpp/parallel/ops_info/transpose_test.cc | 4 ++-- .../cpp/parallel/step_auto_parallel_test.cc | 4 ++-- tests/ut/cpp/parallel/step_parallel_test.cc | 4 ++-- .../tensor_layout/construct_operator_test.cc | 4 ++-- .../redistribution_operator_infer_test.cc | 4 ++-- .../tensor_redistribution_test.cc | 2 +- tests/ut/cpp/parallel/virtual_dataset_test.cc | 4 ++-- 73 files changed, 141 insertions(+), 160 deletions(-) diff --git a/mindspore/ccsrc/parallel/context.cc b/mindspore/ccsrc/parallel/context.cc index 74ec948c49..64cecf1669 100644 --- a/mindspore/ccsrc/parallel/context.cc +++ b/mindspore/ccsrc/parallel/context.cc @@ -21,7 +21,6 @@ #include #include #include -#include #include #include "parallel/device_manager.h" diff --git a/mindspore/ccsrc/parallel/context.h b/mindspore/ccsrc/parallel/context.h index 22a5f89e38..866609fbfe 100644 --- a/mindspore/ccsrc/parallel/context.h +++ b/mindspore/ccsrc/parallel/context.h @@ -20,7 +20,6 @@ #include #include #include -#include #include #include "parallel/status.h" diff --git a/mindspore/ccsrc/parallel/device.h b/mindspore/ccsrc/parallel/device.h index 0373617ad5..8c3174ae55 100644 --- a/mindspore/ccsrc/parallel/device.h +++ b/mindspore/ccsrc/parallel/device.h @@ -18,7 +18,6 @@ #define MINDSPORE_CCSRC_PARALLEL_DEVICE_H_ #include -#include #include #include diff --git a/mindspore/ccsrc/parallel/device_manager.cc b/mindspore/ccsrc/parallel/device_manager.cc index 999d6b4432..3a553e08ec 100644 --- a/mindspore/ccsrc/parallel/device_manager.cc +++ b/mindspore/ccsrc/parallel/device_manager.cc @@ -30,7 +30,7 @@ namespace mindspore { namespace parallel { DeviceManagerPtr g_device_manager = nullptr; -Stage::Stage(const std::list& devices, int num, int rank) +Stage::Stage(const std::vector& devices, int num, int rank) : devices_(devices), number_(num), rank_(rank) { gm_ = GroupManager(); } @@ -104,7 +104,7 @@ int32_t GetListMemberByIndex(size_t index, const RankList& devices) { return result; } -std::shared_ptr GetListMemberByIndex(size_t index, const std::list>& device_list) { +std::shared_ptr GetListMemberByIndex(size_t index, const std::vector>& device_list) { size_t i = 0; std::shared_ptr result; if ((device_list.empty()) || (index >= device_list.size())) { @@ -178,7 +178,7 @@ Status DeviceManager::Init(const RankList& devices, int32_t global_device_rank, MS_LOG(ERROR) << "The number of 'devices' in a stage must be positive"; return Status::FAILED; } - std::list curr_dev_list; + std::vector curr_dev_list; for (int i = 0; i < num_device; ++i) { curr_dev_list.push_back(*GetListMemberByIndex(global_index, devices_)); global_index++; @@ -278,8 +278,8 @@ RankList DeviceManager::global_device_list(int32_t stage_id, int32_t rank, int32 Device DeviceManager::CreateNewDeviceByRank(int32_t rank) const { return Device(rank); } -std::list DeviceManager::CreateDeviceListByRankList(RankList ranks) { - std::list dev_list; +std::vector DeviceManager::CreateDeviceListByRankList(RankList ranks) { + std::vector dev_list; for (auto& rank : ranks) { Device one = CreateNewDeviceByRank(rank); dev_list.push_back(one); @@ -312,8 +312,8 @@ std::string HashName(const std::string& origin_name) { return std::to_string(std // is '0-1-3-5-7'. std::string DeviceManager::GenerateGroupNameByRanks(RankList ranks) { std::string rank_list_name; - std::list::iterator it; - ranks.sort(); // sorted in increasing order + std::vector::iterator it; + std::sort(ranks.begin(), ranks.end()); // sorted in increasing order for (it = ranks.begin(); it != ranks.end(); ++it) { if (it == ranks.begin()) { rank_list_name = std::to_string(*it); @@ -343,7 +343,8 @@ std::string DeviceManager::GenerateGroupNameByRanks(RankList ranks) { // Create the group with the given devices and the given name. The GroupManager // gm_ will create a new group only if there does not exit a group with the same // name. Otherwise, let the pointer g point to that group. -Group DeviceManager::CreateGroup(const std::string& group_name, const std::list& devices) { +Group DeviceManager::CreateGroup(const std::string& group_name, + const std::vector& devices) { if ((world_group() == NCCL_WORLD_GROUP) && (devices.size() != devices_.size())) { MS_LOG(EXCEPTION) << "Do not support sub group for nccl"; } @@ -360,7 +361,7 @@ Group DeviceManager::CreateGroup(const RankList& dev_ranks) { } std::string group_name = GenerateGroupNameByRanks(dev_ranks); - std::list dev_list = CreateDeviceListByRankList(dev_ranks); + auto dev_list = CreateDeviceListByRankList(dev_ranks); return CreateGroup(group_name, dev_list); } diff --git a/mindspore/ccsrc/parallel/device_manager.h b/mindspore/ccsrc/parallel/device_manager.h index 80beb15f86..798d99383d 100644 --- a/mindspore/ccsrc/parallel/device_manager.h +++ b/mindspore/ccsrc/parallel/device_manager.h @@ -19,7 +19,7 @@ #include #include -#include +#include #include #include #include @@ -50,19 +50,19 @@ class Stage { // This class is used in pipeline-parallelization. Available devices are partitioned into multiple stages. // Currently, the function of pipeline-parallelization and this class are NOT implemented. public: - explicit Stage(std::list devices) : devices_(std::move(devices)), number_(0), rank_(0) { + explicit Stage(std::vector devices) : devices_(std::move(devices)), number_(0), rank_(0) { gm_ = GroupManager(); } - Stage(const std::list& devices, int num, int rank); + Stage(const std::vector& devices, int num, int rank); ~Stage() = default; int GetStageNum() const { return number_; } size_t GetDevicesNum() const { return devices_.size(); } - std::list GetDevicesList() { return devices_; } + std::vector GetDevicesList() { return devices_; } int global_rank(Group* g) const; private: - std::list devices_; + std::vector devices_; int number_; int32_t rank_; GroupManager gm_; @@ -89,10 +89,10 @@ class DeviceManager { RankList global_device_list(int32_t stage_id, int32_t rank, int32_t split_num) const; Device CreateNewDeviceByRank(int32_t rank) const; - std::list CreateDeviceListByRankList(RankList ranks); + std::vector CreateDeviceListByRankList(RankList ranks); std::string GenerateGroupNameByRanks(RankList dev_ranks); - Group CreateGroup(const std::string& group_name, const std::list& devices); + Group CreateGroup(const std::string& group_name, const std::vector& devices); Group CreateGroup(const RankList& dev_ranks); std::shared_ptr GetStageById(int32_t stage_id); @@ -108,11 +108,11 @@ class DeviceManager { std::string FindRankListNameByHashName(const std::string& hash_name); private: - std::list> devices_; + std::vector> devices_; // each stage has a list of devices - std::list> stage_devices_; + std::vector> stage_devices_; std::shared_ptr device_; - std::list> stages_; + std::vector> stages_; GroupManager gm_; std::string backend_; diff --git a/mindspore/ccsrc/parallel/device_matrix.cc b/mindspore/ccsrc/parallel/device_matrix.cc index 5a7aa36c8e..f9f314d5a3 100644 --- a/mindspore/ccsrc/parallel/device_matrix.cc +++ b/mindspore/ccsrc/parallel/device_matrix.cc @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include "parallel/status.h" #include "parallel/ops_info/operator_info.h" @@ -64,7 +64,7 @@ Status DeviceMatrix::GetDevicesAlongDim(const uint32_t& dim, RankList* devices) } RankList group; - std::list local_group_list; + std::vector local_group_list; // lower than dim int32_t step = 1; @@ -160,7 +160,7 @@ std::string ShapeToString(const Shape& shape) { return str + "]"; } -std::string ListToString(const std::list& list) { +std::string ListToString(const std::vector& list) { std::string str = "["; for (auto& element : list) { str += std::to_string(element) + ", "; diff --git a/mindspore/ccsrc/parallel/device_matrix.h b/mindspore/ccsrc/parallel/device_matrix.h index d3b0fdcb49..a912000604 100644 --- a/mindspore/ccsrc/parallel/device_matrix.h +++ b/mindspore/ccsrc/parallel/device_matrix.h @@ -20,7 +20,6 @@ #include #include #include -#include #include "parallel/status.h" #include "utils/convert_utils.h" @@ -28,7 +27,7 @@ namespace mindspore { namespace parallel { -using RankList = std::list; +using RankList = std::vector; using Shape = std::vector; class DeviceMatrix { @@ -36,7 +35,7 @@ class DeviceMatrix { DeviceMatrix(int32_t rank, RankList devices, Shape dev_shape); DeviceMatrix() = default; ~DeviceMatrix() = default; - std::list group_list() const { return group_list_; } + std::vector group_list() const { return group_list_; } Status CreateGroupList(); Status GetDevicesByTensorMap(const Shape& tensor_map, RankList* rank_list); Status GetDevicesAlongDim(const uint32_t& dim, RankList* devices); @@ -46,11 +45,11 @@ class DeviceMatrix { RankList dev_list_; // From low dim to high dim. eg: [D0 D1 D2 D3] Shape dev_shape_; - std::list group_list_; + std::vector group_list_; }; std::string ShapeToString(const Shape& shape); -std::string ListToString(const std::list& list); +std::string ListToString(const std::vector& list); } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/graph_util/generate_graph.cc b/mindspore/ccsrc/parallel/graph_util/generate_graph.cc index d0548ec647..43df9fe802 100644 --- a/mindspore/ccsrc/parallel/graph_util/generate_graph.cc +++ b/mindspore/ccsrc/parallel/graph_util/generate_graph.cc @@ -17,7 +17,6 @@ #include "parallel/graph_util/generate_graph.h" #include -#include #include #include #include diff --git a/mindspore/ccsrc/parallel/graph_util/generate_graph.h b/mindspore/ccsrc/parallel/graph_util/generate_graph.h index 713c13dfb5..bb1f811f2f 100644 --- a/mindspore/ccsrc/parallel/graph_util/generate_graph.h +++ b/mindspore/ccsrc/parallel/graph_util/generate_graph.h @@ -18,7 +18,6 @@ #define MINDSPORE_CCSRC_PARALLEL_GRAPH_UTIL_GENERATE_GRAPH_H_ #include -#include #include #include #include diff --git a/mindspore/ccsrc/parallel/group_manager.cc b/mindspore/ccsrc/parallel/group_manager.cc index 35e81a7d83..cff5877f0a 100644 --- a/mindspore/ccsrc/parallel/group_manager.cc +++ b/mindspore/ccsrc/parallel/group_manager.cc @@ -30,13 +30,13 @@ Group::Group() { devices_.clear(); } -Status Group::Init(const std::string &name, const std::list &devices) { +Status Group::Init(const std::string &name, const std::vector &devices) { this->name_ = name; this->devices_ = devices; return Status::SUCCESS; } -std::list Group::GetDevicesList() const { return devices_; } +std::vector Group::GetDevicesList() const { return devices_; } bool Group::IsInThisGroup(int32_t device_rank) { for (auto &device : devices_) { @@ -66,7 +66,7 @@ Status Group::GetIndex(size_t *index) { GroupManager::GroupManager() { groups_.clear(); } -Status GroupManager::CreateGroup(const std::string &group_name, const std::list &devices, +Status GroupManager::CreateGroup(const std::string &group_name, const std::vector &devices, mindspore::parallel::Group *const group) { // it is simple to use size to determine whether it is a world group uint32_t world_size = 0; diff --git a/mindspore/ccsrc/parallel/group_manager.h b/mindspore/ccsrc/parallel/group_manager.h index c3b95b99e0..9e23569b15 100644 --- a/mindspore/ccsrc/parallel/group_manager.h +++ b/mindspore/ccsrc/parallel/group_manager.h @@ -18,7 +18,7 @@ #define MINDSPORE_CCSRC_PARALLEL_GROUP_MANAGER_H_ #include -#include +#include #include #include @@ -37,8 +37,8 @@ class Group { public: Group(); ~Group() = default; - Status Init(const std::string& name, const std::list& devices); - std::list GetDevicesList() const; + Status Init(const std::string& name, const std::vector& devices); + std::vector GetDevicesList() const; std::string name() const { return name_; } bool IsInThisGroup(int32_t device_rank); Status GetIndex(size_t* index); @@ -46,7 +46,7 @@ class Group { private: std::string name_; - std::list devices_; + std::vector devices_; }; class GroupManager { @@ -54,7 +54,7 @@ class GroupManager { GroupManager(); ~GroupManager() = default; - Status CreateGroup(const std::string& name, const std::list& devices, Group* group); + Status CreateGroup(const std::string& name, const std::vector& devices, Group* group); Status DestroyGroup(Group* group); Status DestroyAllGroups(); Status GetRankID(const std::string& name, unsigned int* rank_id); diff --git a/mindspore/ccsrc/parallel/ops_info/activation_info.h b/mindspore/ccsrc/parallel/ops_info/activation_info.h index e988ccf54c..d8de19b328 100644 --- a/mindspore/ccsrc/parallel/ops_info/activation_info.h +++ b/mindspore/ccsrc/parallel/ops_info/activation_info.h @@ -19,7 +19,6 @@ #include #include -#include #include #include #include diff --git a/mindspore/ccsrc/parallel/ops_info/arithmetic_info.h b/mindspore/ccsrc/parallel/ops_info/arithmetic_info.h index 1b641152aa..734368a533 100644 --- a/mindspore/ccsrc/parallel/ops_info/arithmetic_info.h +++ b/mindspore/ccsrc/parallel/ops_info/arithmetic_info.h @@ -18,7 +18,6 @@ #define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_ARITHMETIC_INFO_H_ #include -#include #include #include #include diff --git a/mindspore/ccsrc/parallel/ops_info/batch_parallel_info.h b/mindspore/ccsrc/parallel/ops_info/batch_parallel_info.h index fb08fbe9fc..0ffdea97f3 100644 --- a/mindspore/ccsrc/parallel/ops_info/batch_parallel_info.h +++ b/mindspore/ccsrc/parallel/ops_info/batch_parallel_info.h @@ -17,7 +17,6 @@ #ifndef MINDSPORE_CCSRC_PARALLEL_OPS_INFO_BATCH_PARALLEL_INFO_H_ #define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_BATCH_PARALLEL_INFO_H_ -#include #include #include #include diff --git a/mindspore/ccsrc/parallel/ops_info/bias_add_info.h b/mindspore/ccsrc/parallel/ops_info/bias_add_info.h index ac263ec91f..e5001fc0d3 100644 --- a/mindspore/ccsrc/parallel/ops_info/bias_add_info.h +++ b/mindspore/ccsrc/parallel/ops_info/bias_add_info.h @@ -18,7 +18,7 @@ #define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_BIAS_ADD_INFO_H_ #include -#include + #include #include #include diff --git a/mindspore/ccsrc/parallel/ops_info/comparison_function_info.h b/mindspore/ccsrc/parallel/ops_info/comparison_function_info.h index 2762f94eb9..8760ce5666 100644 --- a/mindspore/ccsrc/parallel/ops_info/comparison_function_info.h +++ b/mindspore/ccsrc/parallel/ops_info/comparison_function_info.h @@ -18,7 +18,6 @@ #define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_COMPARISON_FUNCTION_INFO_H_ #include -#include #include #include #include "ir/value.h" diff --git a/mindspore/ccsrc/parallel/ops_info/dropout_do_mask_info.h b/mindspore/ccsrc/parallel/ops_info/dropout_do_mask_info.h index 2fc18b521d..45d4c28d8e 100644 --- a/mindspore/ccsrc/parallel/ops_info/dropout_do_mask_info.h +++ b/mindspore/ccsrc/parallel/ops_info/dropout_do_mask_info.h @@ -18,7 +18,6 @@ #define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_DROPOUT_DO_MASK_INFO_H_ #include -#include #include #include #include diff --git a/mindspore/ccsrc/parallel/ops_info/elementary_function_info.h b/mindspore/ccsrc/parallel/ops_info/elementary_function_info.h index cf85d28fb0..33ad04023b 100644 --- a/mindspore/ccsrc/parallel/ops_info/elementary_function_info.h +++ b/mindspore/ccsrc/parallel/ops_info/elementary_function_info.h @@ -18,7 +18,6 @@ #define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_ELEMENTARY_FUNCTION_INFO_H_ #include -#include #include #include #include "ir/value.h" diff --git a/mindspore/ccsrc/parallel/ops_info/generator_info.h b/mindspore/ccsrc/parallel/ops_info/generator_info.h index 0f1d7209c8..1473fead67 100644 --- a/mindspore/ccsrc/parallel/ops_info/generator_info.h +++ b/mindspore/ccsrc/parallel/ops_info/generator_info.h @@ -18,7 +18,6 @@ #define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_GENERATOR_INFO_H_ #include -#include #include #include #include diff --git a/mindspore/ccsrc/parallel/ops_info/get_next_info.h b/mindspore/ccsrc/parallel/ops_info/get_next_info.h index 1280d3b191..3dd639ba57 100644 --- a/mindspore/ccsrc/parallel/ops_info/get_next_info.h +++ b/mindspore/ccsrc/parallel/ops_info/get_next_info.h @@ -18,7 +18,6 @@ #define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_GETNEXT_INFO_H_ #include -#include #include #include #include diff --git a/mindspore/ccsrc/parallel/ops_info/l2_normalize_info.h b/mindspore/ccsrc/parallel/ops_info/l2_normalize_info.h index 10bf46d8ff..1a67073065 100644 --- a/mindspore/ccsrc/parallel/ops_info/l2_normalize_info.h +++ b/mindspore/ccsrc/parallel/ops_info/l2_normalize_info.h @@ -18,7 +18,6 @@ #define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_L2_NORMALIZE_INFO_H_ #include -#include #include #include #include diff --git a/mindspore/ccsrc/parallel/ops_info/loss_info.h b/mindspore/ccsrc/parallel/ops_info/loss_info.h index 70de84fda3..585545302f 100644 --- a/mindspore/ccsrc/parallel/ops_info/loss_info.h +++ b/mindspore/ccsrc/parallel/ops_info/loss_info.h @@ -18,10 +18,10 @@ #define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_LOSS_INFO_H_ #include -#include #include #include #include + #include "ir/value.h" #include "parallel/ops_info/operator_info.h" #include "parallel/ops_info/activation_info.h" diff --git a/mindspore/ccsrc/parallel/ops_info/matmul_info.cc b/mindspore/ccsrc/parallel/ops_info/matmul_info.cc index 2cd8e0df8f..6103087a1d 100644 --- a/mindspore/ccsrc/parallel/ops_info/matmul_info.cc +++ b/mindspore/ccsrc/parallel/ops_info/matmul_info.cc @@ -397,7 +397,7 @@ Status MatMulBase::GenerateStrategies(int32_t stage_id) { return FAILED; } CheckGlobalDeviceManager(); - std::list dev_list = g_device_manager->GetDeviceListByStageId(stage_id); + std::vector dev_list = g_device_manager->GetDeviceListByStageId(stage_id); size_t dev_num = dev_list.size(); Shape input0_shape = inputs_shape_[0], input1_shape = inputs_shape_[1]; if (transpose_a_) { diff --git a/mindspore/ccsrc/parallel/ops_info/matmul_info.h b/mindspore/ccsrc/parallel/ops_info/matmul_info.h index daf422e6d6..c9feae55b6 100644 --- a/mindspore/ccsrc/parallel/ops_info/matmul_info.h +++ b/mindspore/ccsrc/parallel/ops_info/matmul_info.h @@ -18,10 +18,10 @@ #define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_MATMUL_INFO_H_ #include -#include #include #include #include + #include "ir/value.h" #include "parallel/ops_info/operator_info.h" #include "parallel/strategy.h" diff --git a/mindspore/ccsrc/parallel/ops_info/onehot_info.h b/mindspore/ccsrc/parallel/ops_info/onehot_info.h index fd5d6be8f6..62c66695fa 100644 --- a/mindspore/ccsrc/parallel/ops_info/onehot_info.h +++ b/mindspore/ccsrc/parallel/ops_info/onehot_info.h @@ -18,10 +18,10 @@ #define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_ONEHOT_INFO_H_ #include -#include #include #include #include + #include "ir/value.h" #include "parallel/ops_info/operator_info.h" #include "parallel/auto_parallel/operator_costmodel.h" diff --git a/mindspore/ccsrc/parallel/ops_info/operator_info.h b/mindspore/ccsrc/parallel/ops_info/operator_info.h index 9902c69ce7..89fd73564f 100644 --- a/mindspore/ccsrc/parallel/ops_info/operator_info.h +++ b/mindspore/ccsrc/parallel/ops_info/operator_info.h @@ -18,7 +18,6 @@ #define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_OPERATOR_INFO_H_ #include -#include #include #include #include diff --git a/mindspore/ccsrc/parallel/ops_info/prelu_info.h b/mindspore/ccsrc/parallel/ops_info/prelu_info.h index d7dac46291..2e74118f80 100644 --- a/mindspore/ccsrc/parallel/ops_info/prelu_info.h +++ b/mindspore/ccsrc/parallel/ops_info/prelu_info.h @@ -17,11 +17,11 @@ #ifndef MINDSPORE_CCSRC_PARALLEL_OPS_INFO_PRELU_INFO_H_ #define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_PRELU_INFO_H_ -#include #include #include #include #include + #include "ir/value.h" #include "parallel/ops_info/operator_info.h" #include "parallel/strategy.h" diff --git a/mindspore/ccsrc/parallel/ops_info/reduce_method_info.cc b/mindspore/ccsrc/parallel/ops_info/reduce_method_info.cc index 216357d5ab..5f9a67c22d 100644 --- a/mindspore/ccsrc/parallel/ops_info/reduce_method_info.cc +++ b/mindspore/ccsrc/parallel/ops_info/reduce_method_info.cc @@ -198,7 +198,7 @@ ForwardOp CreatReduceMeanForwardOp(const std::vector &forward_group, cons // Creat RealDiv op OperatorName operator1_name = REAL_DIV; - std::list device_list = forward_group[0].GetDevicesList(); + std::vector device_list = forward_group[0].GetDevicesList(); auto divisor = static_cast(device_list.size()); py::tuple tuple = py::make_tuple(divisor); mindspore::tensor::TensorPtr tensor_ptr = std::make_shared(tuple, dtype); diff --git a/mindspore/ccsrc/parallel/ops_info/reduce_method_info.h b/mindspore/ccsrc/parallel/ops_info/reduce_method_info.h index e9f03bcdee..6f26b99ffb 100644 --- a/mindspore/ccsrc/parallel/ops_info/reduce_method_info.h +++ b/mindspore/ccsrc/parallel/ops_info/reduce_method_info.h @@ -18,7 +18,6 @@ #define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_REDUCE_SUM_INFO_H_ #include -#include #include #include #include diff --git a/mindspore/ccsrc/parallel/ops_info/reshape_info.h b/mindspore/ccsrc/parallel/ops_info/reshape_info.h index a8a9323540..982894a8e0 100644 --- a/mindspore/ccsrc/parallel/ops_info/reshape_info.h +++ b/mindspore/ccsrc/parallel/ops_info/reshape_info.h @@ -19,7 +19,6 @@ #include -#include #include #include #include diff --git a/mindspore/ccsrc/parallel/ops_info/tmp_identity_info.h b/mindspore/ccsrc/parallel/ops_info/tmp_identity_info.h index 1ef7341f3b..304e336adf 100644 --- a/mindspore/ccsrc/parallel/ops_info/tmp_identity_info.h +++ b/mindspore/ccsrc/parallel/ops_info/tmp_identity_info.h @@ -20,6 +20,7 @@ #include #include #include + #include "parallel/ops_info/operator_info.h" #include "parallel/auto_parallel/operator_costmodel.h" #include "parallel/strategy.h" diff --git a/mindspore/ccsrc/parallel/ops_info/transpose_info.h b/mindspore/ccsrc/parallel/ops_info/transpose_info.h index 6c5e98e3b9..c7c1c96675 100644 --- a/mindspore/ccsrc/parallel/ops_info/transpose_info.h +++ b/mindspore/ccsrc/parallel/ops_info/transpose_info.h @@ -17,11 +17,11 @@ #ifndef MINDSPORE_CCSRC_PARALLEL_OPS_INFO_TRANSPOSE_INFO_H_ #define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_TRANSPOSE_INFO_H_ -#include #include #include #include #include + #include "ir/value.h" #include "parallel/ops_info/operator_info.h" #include "parallel/strategy.h" diff --git a/mindspore/ccsrc/parallel/ops_info/virtual_dataset_info.h b/mindspore/ccsrc/parallel/ops_info/virtual_dataset_info.h index d22565d305..c4fdfcef04 100644 --- a/mindspore/ccsrc/parallel/ops_info/virtual_dataset_info.h +++ b/mindspore/ccsrc/parallel/ops_info/virtual_dataset_info.h @@ -17,11 +17,11 @@ #ifndef PARALLEL_OPS_INFO_DATASET_INFO_H_ #define PARALLEL_OPS_INFO_DATASET_INFO_H_ -#include #include #include #include #include + #include "ir/value.h" #include "parallel/ops_info/operator_info.h" #include "parallel/strategy.h" diff --git a/mindspore/ccsrc/parallel/status.h b/mindspore/ccsrc/parallel/status.h index 4c018c7c17..9d773f0d9b 100644 --- a/mindspore/ccsrc/parallel/status.h +++ b/mindspore/ccsrc/parallel/status.h @@ -18,7 +18,6 @@ #define MINDSPORE_CCSRC_PARALLEL_STATUS_H_ #include -#include namespace mindspore { namespace parallel { diff --git a/mindspore/ccsrc/parallel/step_parallel.cc b/mindspore/ccsrc/parallel/step_parallel.cc index 927acea705..379a8a473e 100644 --- a/mindspore/ccsrc/parallel/step_parallel.cc +++ b/mindspore/ccsrc/parallel/step_parallel.cc @@ -19,7 +19,7 @@ #include #include #include -#include + #include #include #include diff --git a/mindspore/ccsrc/parallel/step_parallel.h b/mindspore/ccsrc/parallel/step_parallel.h index ac7376a09d..2d1982dc9c 100644 --- a/mindspore/ccsrc/parallel/step_parallel.h +++ b/mindspore/ccsrc/parallel/step_parallel.h @@ -18,7 +18,7 @@ #define MINDSPORE_CCSRC_PARALLEL_STEP_PARALLEL_H_ #include -#include + #include #include #include diff --git a/mindspore/ccsrc/parallel/strategy.h b/mindspore/ccsrc/parallel/strategy.h index 24879c12c1..68ba4962d7 100644 --- a/mindspore/ccsrc/parallel/strategy.h +++ b/mindspore/ccsrc/parallel/strategy.h @@ -18,7 +18,6 @@ #define MINDSPORE_CCSRC_PARALLEL_STRATEGY_H_ #include -#include #include #include #include diff --git a/mindspore/ccsrc/parallel/tensor_layout/redistribution_operator_infer.h b/mindspore/ccsrc/parallel/tensor_layout/redistribution_operator_infer.h index 77955b5d1a..3515c7383a 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/redistribution_operator_infer.h +++ b/mindspore/ccsrc/parallel/tensor_layout/redistribution_operator_infer.h @@ -22,7 +22,6 @@ #include #include #include -#include #include "parallel/tensor_layout/redistribution_layout_transfer.h" #include "parallel/tensor_layout/construct_operator.h" diff --git a/tests/ut/cpp/parallel/auto_parallel/dp_algo_test.cc b/tests/ut/cpp/parallel/auto_parallel/dp_algo_test.cc index 73eff99304..d0243d5327 100644 --- a/tests/ut/cpp/parallel/auto_parallel/dp_algo_test.cc +++ b/tests/ut/cpp/parallel/auto_parallel/dp_algo_test.cc @@ -154,13 +154,13 @@ class TestDPAlgo : public UT::Common { void TestDPAlgo::SetUp() { cost_graph = std::make_shared(); cost_graph->SetDeviceMemoryAndCostParameter(); - std::list dev_list; + std::vector dev_list; for (int32_t i = 0; i < 10; i++) { dev_list.push_back(i); } - std::list stage_map; + std::vector stage_map; stage_map.push_back(8); stage_map.push_back(2); diff --git a/tests/ut/cpp/parallel/auto_parallel/edge_costmodel_test.cc b/tests/ut/cpp/parallel/auto_parallel/edge_costmodel_test.cc index adf06d8fcc..467f4976e8 100644 --- a/tests/ut/cpp/parallel/auto_parallel/edge_costmodel_test.cc +++ b/tests/ut/cpp/parallel/auto_parallel/edge_costmodel_test.cc @@ -42,13 +42,13 @@ class TestEdgeCostModel : public UT::Common { }; void TestEdgeCostModel::SetUp() { - std::list dev_list; + std::vector dev_list; for (int32_t i = 0; i < 10; i++) { dev_list.push_back(i); } - std::list stage_map; + std::vector stage_map; stage_map.push_back(8); stage_map.push_back(2); diff --git a/tests/ut/cpp/parallel/auto_parallel/graph_costmodel_test.cc b/tests/ut/cpp/parallel/auto_parallel/graph_costmodel_test.cc index 1428193481..83a9eceacc 100644 --- a/tests/ut/cpp/parallel/auto_parallel/graph_costmodel_test.cc +++ b/tests/ut/cpp/parallel/auto_parallel/graph_costmodel_test.cc @@ -53,13 +53,13 @@ class TestCostGraph : public UT::Common { void TestCostGraph::SetUp() { cost_graph.SetDeviceMemoryAndCostParameter(); - std::list dev_list; + std::vector dev_list; for (int32_t i = 0; i < 10; i++) { dev_list.push_back(i); } - std::list stage_map; + std::vector stage_map; stage_map.push_back(8); stage_map.push_back(2); diff --git a/tests/ut/cpp/parallel/auto_parallel/operator_costmodel_test.cc b/tests/ut/cpp/parallel/auto_parallel/operator_costmodel_test.cc index 3b71c80c4b..3bd65c049c 100644 --- a/tests/ut/cpp/parallel/auto_parallel/operator_costmodel_test.cc +++ b/tests/ut/cpp/parallel/auto_parallel/operator_costmodel_test.cc @@ -33,13 +33,13 @@ class TestMatMulCost : public UT::Common { void TestMatMulCost::SetUp() { mmcost_ = MatMulCost(); - std::list dev_list; + std::vector dev_list; for (int32_t i = 0; i < 1050; i++) { dev_list.push_back(i); } - std::list stage_map; + std::vector stage_map; stage_map.push_back(1024); stage_map.push_back(26); @@ -90,13 +90,13 @@ class TestActivationCost : public UT::Common { void TestActivationCost::SetUp() { ac_cost_ = ActivationCost(); - std::list dev_list; + std::vector dev_list; for (int32_t i = 0; i < 1050; i++) { dev_list.push_back(i); } - std::list stage_map; + std::vector stage_map; stage_map.push_back(1024); stage_map.push_back(26); @@ -142,13 +142,13 @@ class TestPReLUCost : public UT::Common { void TestPReLUCost::SetUp() { prelu_cost_ = PReLUCost(); - std::list dev_list; + std::vector dev_list; for (int32_t i = 0; i < 1050; i++) { dev_list.push_back(i); } - std::list stage_map; + std::vector stage_map; stage_map.push_back(1024); stage_map.push_back(26); diff --git a/tests/ut/cpp/parallel/device_manager_test.cc b/tests/ut/cpp/parallel/device_manager_test.cc index 0814ca64d0..056896f514 100644 --- a/tests/ut/cpp/parallel/device_manager_test.cc +++ b/tests/ut/cpp/parallel/device_manager_test.cc @@ -69,8 +69,8 @@ void TestDeviceManager::TearDown() { } TEST_F(TestDeviceManager, test_dm_init_AND_get_device_list) { - std::list dev_list; - std::list stage_map; + std::vector dev_list; + std::vector stage_map; int32_t local_dev = 0; dev_list.push_back(5); @@ -85,12 +85,12 @@ TEST_F(TestDeviceManager, test_dm_init_AND_get_device_list) { ASSERT_EQ(dm_.DeviceNum(), 4); ASSERT_EQ(dm_.GetStageNum(), (int32_t)(2)); - std::list dev_list_0 = dm_.GetDeviceListByStageId(0); - std::list dev_list_1 = dm_.GetDeviceListByStageId(1); + std::vector dev_list_0 = dm_.GetDeviceListByStageId(0); + std::vector dev_list_1 = dm_.GetDeviceListByStageId(1); ASSERT_EQ(dev_list_0.size(), 2); ASSERT_EQ(dev_list_1.size(), 2); - std::list::iterator it = dev_list_0.begin(); + std::vector::iterator it = dev_list_0.begin(); ASSERT_EQ((*it), int32_t(5)); it++; ASSERT_EQ((*it), int32_t(3)); @@ -111,13 +111,13 @@ TEST_F(TestDeviceManager, test_CreateNewDeviceByRank) { } TEST_F(TestDeviceManager, test_CreateDeviceListByRankList) { - std::list dev_list; - std::list rlist; + std::vector dev_list; + std::vector rlist; rlist.push_back(int32_t(2)); rlist.push_back(int32_t(1)); dev_list = dm_.CreateDeviceListByRankList(rlist); - std::list::iterator it = dev_list.begin(); + std::vector::iterator it = dev_list.begin(); ASSERT_EQ(it->rank(), int32_t(2)); it++; ASSERT_EQ(it->rank(), int32_t(1)); diff --git a/tests/ut/cpp/parallel/device_matrix_test.cc b/tests/ut/cpp/parallel/device_matrix_test.cc index 9c3fa9a861..877a211df8 100644 --- a/tests/ut/cpp/parallel/device_matrix_test.cc +++ b/tests/ut/cpp/parallel/device_matrix_test.cc @@ -35,9 +35,9 @@ TEST_F(TestDeviceMatrix, Test2Dgroup_list) { Shape shape = {2, 3}; DeviceMatrix arr(0, dev_list, shape); - std::list group_list; + std::vector group_list; if (arr.CreateGroupList() == Status::SUCCESS) group_list = arr.group_list(); - std::list group_list_expect = {{0, 3}, {0, 1, 2}}; + std::vector group_list_expect = {{0, 3}, {0, 1, 2}}; ASSERT_EQ(group_list, group_list_expect); } @@ -46,9 +46,9 @@ TEST_F(TestDeviceMatrix, Test3Dgroup_list) { Shape shape = {2, 2, 3}; DeviceMatrix arr(5, dev_list, shape); - std::list group_list; + std::vector group_list; if (arr.CreateGroupList() == Status::SUCCESS) group_list = arr.group_list(); - std::list group_list_expect = {{5, 11}, {2, 5}, {3, 4, 5}}; + std::vector group_list_expect = {{5, 11}, {2, 5}, {3, 4, 5}}; ASSERT_EQ(group_list, group_list_expect); } @@ -57,9 +57,9 @@ TEST_F(TestDeviceMatrix, Test4DGetAlongDim) { Shape shape = {2, 1, 4, 2}; DeviceMatrix arr(5, dev_list, shape); - std::list group_list; + std::vector group_list; if (arr.CreateGroupList() == Status::SUCCESS) group_list = arr.group_list(); - std::list group_list_expect = {{5, 13}, {5}, {1, 3, 5, 7}, {4, 5}}; + std::vector group_list_expect = {{5, 13}, {5}, {1, 3, 5, 7}, {4, 5}}; ASSERT_EQ(group_list, group_list_expect); } @@ -69,9 +69,9 @@ TEST_F(TestDeviceMatrix, Test5DGetAlongDim) { Shape shape = {3, 4, 2, 3, 2}; DeviceMatrix arr(5, dev_list, shape); - std::list group_list; + std::vector group_list; if (arr.CreateGroupList() == Status::SUCCESS) group_list = arr.group_list(); - std::list group_list_expect = {{5, 53, 101}, {5, 17, 29, 41}, {5, 11}, {1, 3, 5}, {4, 5}}; + std::vector group_list_expect = {{5, 53, 101}, {5, 17, 29, 41}, {5, 11}, {1, 3, 5}, {4, 5}}; ASSERT_EQ(group_list, group_list_expect); } diff --git a/tests/ut/cpp/parallel/group_manager_test.cc b/tests/ut/cpp/parallel/group_manager_test.cc index 1e9ec9c060..e3d2b3a364 100644 --- a/tests/ut/cpp/parallel/group_manager_test.cc +++ b/tests/ut/cpp/parallel/group_manager_test.cc @@ -42,7 +42,7 @@ void TestGroup::TearDown() { Status TestGroup::Init() { std::string gname = "1-2"; - std::list dev_list; + std::vector dev_list; Device one = Device(int32_t(1)); dev_list.push_back(one); Device two = Device(int32_t(2)); @@ -55,8 +55,8 @@ TEST_F(TestGroup, test_Init) { ASSERT_EQ(Init(), Status::SUCCESS); } TEST_F(TestGroup, test_GetDevicesList) { Init(); - std::list res_dev_list = gp.GetDevicesList(); - std::list::iterator it = res_dev_list.begin(); + std::vector res_dev_list = gp.GetDevicesList(); + std::vector::iterator it = res_dev_list.begin(); ASSERT_EQ(it->rank(), int32_t(1)); it++; ASSERT_EQ(it->rank(), int32_t(2)); @@ -88,7 +88,7 @@ void TestGroupManager::TearDown() { Status TestGroupManager::Init(Group** gp_ptr) { std::string gname = "1-2"; - std::list dev_list; + std::vector dev_list; Device one = Device(int32_t(1)); dev_list.push_back(one); Device two = Device(int32_t(2)); @@ -102,15 +102,15 @@ TEST_F(TestGroupManager, test_CreateGroup) { Group* gp_ptr = new Group(); ASSERT_EQ(Init(&gp_ptr), Status::SUCCESS); - std::list res_dev_list = gp_ptr->GetDevicesList(); - std::list::iterator it = res_dev_list.begin(); + std::vector res_dev_list = gp_ptr->GetDevicesList(); + std::vector::iterator it = res_dev_list.begin(); ASSERT_EQ(it->rank(), int32_t(1)); it++; ASSERT_EQ(it->rank(), int32_t(2)); delete gp_ptr; // testing for creating a group with an existing group name - std::list dev_list2; + std::vector dev_list2; Device three = Device(int32_t(3)); dev_list2.push_back(three); Device four = Device(int32_t(4)); @@ -119,8 +119,8 @@ TEST_F(TestGroupManager, test_CreateGroup) { ASSERT_EQ(gm.CreateGroup("1-2", dev_list2, gp_ptr), Status::SUCCESS); ASSERT_STREQ(gp_ptr->name().data(), "1-2"); - std::list res_dev_list2 = gp_ptr->GetDevicesList(); - std::list::iterator it2 = res_dev_list2.begin(); + std::vector res_dev_list2 = gp_ptr->GetDevicesList(); + std::vector::iterator it2 = res_dev_list2.begin(); ASSERT_EQ(it2->rank(), int32_t(1)); it2++; ASSERT_EQ(it2->rank(), int32_t(2)); @@ -136,8 +136,8 @@ TEST_F(TestGroupManager, test_FindGroup) { ASSERT_EQ(gm.FindGroup(gname, &gp_ptr2), Status::SUCCESS); - std::list res_dev_list = gp_ptr2->GetDevicesList(); - std::list::iterator it = res_dev_list.begin(); + std::vector res_dev_list = gp_ptr2->GetDevicesList(); + std::vector::iterator it = res_dev_list.begin(); ASSERT_EQ(it->rank(), int32_t(1)); it++; ASSERT_EQ(it->rank(), int32_t(2)); diff --git a/tests/ut/cpp/parallel/ops_info/activation_info_test.cc b/tests/ut/cpp/parallel/ops_info/activation_info_test.cc index a725cb0bf7..a9fe9b4c48 100644 --- a/tests/ut/cpp/parallel/ops_info/activation_info_test.cc +++ b/tests/ut/cpp/parallel/ops_info/activation_info_test.cc @@ -38,13 +38,13 @@ class TestActivationInfo : public UT::Common { }; void TestActivationInfo::SetUp() { - std::list dev_list; + std::vector dev_list; for (int32_t i = 0; i < 1050; i++) { dev_list.push_back(i); } - std::list stage_map; + std::vector stage_map; stage_map.push_back(1024); stage_map.push_back(26); diff --git a/tests/ut/cpp/parallel/ops_info/activation_test.cc b/tests/ut/cpp/parallel/ops_info/activation_test.cc index 200b96eb2e..149aa9d5af 100644 --- a/tests/ut/cpp/parallel/ops_info/activation_test.cc +++ b/tests/ut/cpp/parallel/ops_info/activation_test.cc @@ -40,13 +40,13 @@ class TestActivation : public UT::Common { }; void TestActivation::SetUp() { - std::list dev_list; + std::vector dev_list; for (int32_t i = 0; i < 1050; i++) { dev_list.push_back(i); } - std::list stage_map; + std::vector stage_map; stage_map.push_back(1024); stage_map.push_back(26); diff --git a/tests/ut/cpp/parallel/ops_info/dropout_do_mask_info_test.cc b/tests/ut/cpp/parallel/ops_info/dropout_do_mask_info_test.cc index 5ae79e2851..2f17fb4450 100644 --- a/tests/ut/cpp/parallel/ops_info/dropout_do_mask_info_test.cc +++ b/tests/ut/cpp/parallel/ops_info/dropout_do_mask_info_test.cc @@ -38,13 +38,13 @@ class TestDropoutDoMaskInfo : public UT::Common { }; void TestDropoutDoMaskInfo::SetUp() { - std::list dev_list; + std::vector dev_list; for (int32_t i = 0; i < 34; i++) { dev_list.push_back(i); } - std::list stage_map; + std::vector stage_map; stage_map.push_back(32); stage_map.push_back(2); diff --git a/tests/ut/cpp/parallel/ops_info/gelu_info_test.cc b/tests/ut/cpp/parallel/ops_info/gelu_info_test.cc index d07794f63d..e54d1f2423 100644 --- a/tests/ut/cpp/parallel/ops_info/gelu_info_test.cc +++ b/tests/ut/cpp/parallel/ops_info/gelu_info_test.cc @@ -38,13 +38,13 @@ class TestGeluInfo : public UT::Common { }; void TestGeluInfo::SetUp() { - std::list dev_list; + std::vector dev_list; for (int32_t i = 0; i < 130; i++) { dev_list.push_back(i); } - std::list stage_map; + std::vector stage_map; stage_map.push_back(128); stage_map.push_back(2); diff --git a/tests/ut/cpp/parallel/ops_info/generate_strategy_test.cc b/tests/ut/cpp/parallel/ops_info/generate_strategy_test.cc index 81b1d3a1ec..947ad60cca 100644 --- a/tests/ut/cpp/parallel/ops_info/generate_strategy_test.cc +++ b/tests/ut/cpp/parallel/ops_info/generate_strategy_test.cc @@ -34,13 +34,13 @@ class TestGenerateStrategy : public UT::Common { }; void TestGenerateStrategy::SetUp() { - std::list dev_list; + std::vector dev_list; for (int32_t i = 0; i < 10; i++) { dev_list.push_back(i); } - std::list stage_map; + std::vector stage_map; stage_map.push_back(8); stage_map.push_back(2); diff --git a/tests/ut/cpp/parallel/ops_info/generator_info_test.cc b/tests/ut/cpp/parallel/ops_info/generator_info_test.cc index bfd14ced30..eb463066a6 100644 --- a/tests/ut/cpp/parallel/ops_info/generator_info_test.cc +++ b/tests/ut/cpp/parallel/ops_info/generator_info_test.cc @@ -38,13 +38,13 @@ class TestDropoutGenMaskInfo : public UT::Common { }; void TestDropoutGenMaskInfo::SetUp() { - std::list dev_list; + std::vector dev_list; for (int32_t i = 0; i < 10; i++) { dev_list.push_back(i); } - std::list stage_map; + std::vector stage_map; stage_map.push_back(8); stage_map.push_back(2); diff --git a/tests/ut/cpp/parallel/ops_info/get_next_info_test.cc b/tests/ut/cpp/parallel/ops_info/get_next_info_test.cc index 1276a30225..503edf2eda 100644 --- a/tests/ut/cpp/parallel/ops_info/get_next_info_test.cc +++ b/tests/ut/cpp/parallel/ops_info/get_next_info_test.cc @@ -38,13 +38,13 @@ class TestGetNextInfo : public UT::Common { }; void TestGetNextInfo::SetUp() { - std::list dev_list; + std::vector dev_list; for (int32_t i = 0; i < 8; i++) { dev_list.push_back(i); } - std::list stage_map; + std::vector stage_map; stage_map.push_back(8); int32_t local_dev = 0; // create a new g_device_manager diff --git a/tests/ut/cpp/parallel/ops_info/l2_normalize_info_test.cc b/tests/ut/cpp/parallel/ops_info/l2_normalize_info_test.cc index 07a160ec99..b59481e1f6 100644 --- a/tests/ut/cpp/parallel/ops_info/l2_normalize_info_test.cc +++ b/tests/ut/cpp/parallel/ops_info/l2_normalize_info_test.cc @@ -38,13 +38,13 @@ class TestL2NormalizeInfo : public UT::Common { }; void TestL2NormalizeInfo::SetUp() { - std::list dev_list; + std::vector dev_list; for (int32_t i = 0; i < 34; i++) { dev_list.push_back(i); } - std::list stage_map; + std::vector stage_map; stage_map.push_back(32); stage_map.push_back(2); diff --git a/tests/ut/cpp/parallel/ops_info/log_softmax_info_test.cc b/tests/ut/cpp/parallel/ops_info/log_softmax_info_test.cc index 1c31463c88..cf5a4239a2 100644 --- a/tests/ut/cpp/parallel/ops_info/log_softmax_info_test.cc +++ b/tests/ut/cpp/parallel/ops_info/log_softmax_info_test.cc @@ -38,13 +38,13 @@ class TestLogSoftmaxInfo : public UT::Common { }; void TestLogSoftmaxInfo::SetUp() { - std::list dev_list; + std::vector dev_list; for (int32_t i = 0; i < 130; i++) { dev_list.push_back(i); } - std::list stage_map; + std::vector stage_map; stage_map.push_back(128); stage_map.push_back(2); diff --git a/tests/ut/cpp/parallel/ops_info/matmul_info_test.cc b/tests/ut/cpp/parallel/ops_info/matmul_info_test.cc index 02f4488744..978b792a0c 100644 --- a/tests/ut/cpp/parallel/ops_info/matmul_info_test.cc +++ b/tests/ut/cpp/parallel/ops_info/matmul_info_test.cc @@ -42,13 +42,13 @@ class TestMatmulInfo : public UT::Common { }; void TestMatmulInfo::SetUp() { - std::list dev_list; + std::vector dev_list; for (int32_t i = 0; i < 1050; i++) { dev_list.push_back(i); } - std::list stage_map; + std::vector stage_map; stage_map.push_back(1024); stage_map.push_back(26); diff --git a/tests/ut/cpp/parallel/ops_info/onehot_info_test.cc b/tests/ut/cpp/parallel/ops_info/onehot_info_test.cc index df67175ae6..07d150a294 100644 --- a/tests/ut/cpp/parallel/ops_info/onehot_info_test.cc +++ b/tests/ut/cpp/parallel/ops_info/onehot_info_test.cc @@ -38,13 +38,13 @@ class TestOneHotInfo : public UT::Common { }; void TestOneHotInfo::SetUp() { - std::list dev_list; + std::vector dev_list; for (int32_t i = 0; i < 10; i++) { dev_list.push_back(i); } - std::list stage_map; + std::vector stage_map; stage_map.push_back(8); stage_map.push_back(2); diff --git a/tests/ut/cpp/parallel/ops_info/onehot_info_test_axis_0.cc b/tests/ut/cpp/parallel/ops_info/onehot_info_test_axis_0.cc index 17fe365176..c89bf97fb3 100644 --- a/tests/ut/cpp/parallel/ops_info/onehot_info_test_axis_0.cc +++ b/tests/ut/cpp/parallel/ops_info/onehot_info_test_axis_0.cc @@ -38,13 +38,13 @@ class TestOneHotInfo2 : public UT::Common { }; void TestOneHotInfo2::SetUp() { - std::list dev_list; + std::vector dev_list; for (int32_t i = 0; i < 10; i++) { dev_list.push_back(i); } - std::list stage_map; + std::vector stage_map; stage_map.push_back(8); stage_map.push_back(2); diff --git a/tests/ut/cpp/parallel/ops_info/pow_info_test.cc b/tests/ut/cpp/parallel/ops_info/pow_info_test.cc index 86d62190a7..f6ea2c3d3c 100644 --- a/tests/ut/cpp/parallel/ops_info/pow_info_test.cc +++ b/tests/ut/cpp/parallel/ops_info/pow_info_test.cc @@ -38,13 +38,13 @@ class TestPowInfo : public UT::Common { }; void TestPowInfo::SetUp() { - std::list dev_list; + std::vector dev_list; for (int32_t i = 0; i < 66; i++) { dev_list.push_back(i); } - std::list stage_map; + std::vector stage_map; stage_map.push_back(64); stage_map.push_back(2); diff --git a/tests/ut/cpp/parallel/ops_info/prelu_test.cc b/tests/ut/cpp/parallel/ops_info/prelu_test.cc index bb815921e0..5ff261234f 100644 --- a/tests/ut/cpp/parallel/ops_info/prelu_test.cc +++ b/tests/ut/cpp/parallel/ops_info/prelu_test.cc @@ -39,13 +39,13 @@ class TestPReLUInfo : public UT::Common { }; void TestPReLUInfo::SetUp() { - std::list dev_list; + std::vector dev_list; for (int32_t i = 0; i < 1050; i++) { dev_list.push_back(i); } - std::list stage_map; + std::vector stage_map; stage_map.push_back(1024); stage_map.push_back(26); int32_t local_dev = 0; diff --git a/tests/ut/cpp/parallel/ops_info/reduce_method_test.cc b/tests/ut/cpp/parallel/ops_info/reduce_method_test.cc index 0ce81b0cd0..a1fe46ca33 100644 --- a/tests/ut/cpp/parallel/ops_info/reduce_method_test.cc +++ b/tests/ut/cpp/parallel/ops_info/reduce_method_test.cc @@ -39,13 +39,13 @@ class TestReduceSumInfo : public UT::Common { void TestReduceSumInfo::SetUp() { UT::InitPythonPath(); - std::list dev_list; + std::vector dev_list; for (int32_t i = 0; i < 34; i++) { dev_list.push_back(i); } - std::list stage_map; + std::vector stage_map; stage_map.push_back(32); stage_map.push_back(2); diff --git a/tests/ut/cpp/parallel/ops_info/reshape_test.cc b/tests/ut/cpp/parallel/ops_info/reshape_test.cc index 73a0d68be5..7ff94e9af5 100644 --- a/tests/ut/cpp/parallel/ops_info/reshape_test.cc +++ b/tests/ut/cpp/parallel/ops_info/reshape_test.cc @@ -38,13 +38,13 @@ class TestReshapeInfo : public UT::Common { }; void TestReshapeInfo::SetUp() { - std::list dev_list; + std::vector dev_list; for (int32_t i = 0; i < 34; i++) { dev_list.push_back(i); } - std::list stage_map; + std::vector stage_map; stage_map.push_back(32); stage_map.push_back(2); diff --git a/tests/ut/cpp/parallel/ops_info/softmax_entropy_loss_info_test.cc b/tests/ut/cpp/parallel/ops_info/softmax_entropy_loss_info_test.cc index cba9b7ecd6..03634b9a6f 100644 --- a/tests/ut/cpp/parallel/ops_info/softmax_entropy_loss_info_test.cc +++ b/tests/ut/cpp/parallel/ops_info/softmax_entropy_loss_info_test.cc @@ -38,13 +38,13 @@ class TestSoftmaxLoss : public UT::Common { }; void TestSoftmaxLoss::SetUp() { - std::list dev_list; + std::vector dev_list; for (int32_t i = 0; i < 65; i++) { dev_list.push_back(i); } - std::list stage_map; + std::vector stage_map; stage_map.push_back(64); stage_map.push_back(1); diff --git a/tests/ut/cpp/parallel/ops_info/softmax_info_test.cc b/tests/ut/cpp/parallel/ops_info/softmax_info_test.cc index 65fd46fb9c..bba6e89626 100644 --- a/tests/ut/cpp/parallel/ops_info/softmax_info_test.cc +++ b/tests/ut/cpp/parallel/ops_info/softmax_info_test.cc @@ -39,13 +39,13 @@ class TestSoftmaxInfo : public UT::Common { }; void TestSoftmaxInfo::SetUp() { - std::list dev_list; + std::vector dev_list; for (int32_t i = 0; i < 130; i++) { dev_list.push_back(i); } - std::list stage_map; + std::vector stage_map; stage_map.push_back(128); stage_map.push_back(2); diff --git a/tests/ut/cpp/parallel/ops_info/tanh_info_test.cc b/tests/ut/cpp/parallel/ops_info/tanh_info_test.cc index 3ee99d093f..a892c5c84a 100644 --- a/tests/ut/cpp/parallel/ops_info/tanh_info_test.cc +++ b/tests/ut/cpp/parallel/ops_info/tanh_info_test.cc @@ -38,13 +38,13 @@ class TestTanhInfo : public UT::Common { }; void TestTanhInfo::SetUp() { - std::list dev_list; + std::vector dev_list; for (int32_t i = 0; i < 130; i++) { dev_list.push_back(i); } - std::list stage_map; + std::vector stage_map; stage_map.push_back(128); stage_map.push_back(2); diff --git a/tests/ut/cpp/parallel/ops_info/tensor_add_info_test.cc b/tests/ut/cpp/parallel/ops_info/tensor_add_info_test.cc index 612b344514..e7736a4b3e 100644 --- a/tests/ut/cpp/parallel/ops_info/tensor_add_info_test.cc +++ b/tests/ut/cpp/parallel/ops_info/tensor_add_info_test.cc @@ -38,13 +38,13 @@ class TestTensorAddInfo : public UT::Common { }; void TestTensorAddInfo::SetUp() { - std::list dev_list; + std::vector dev_list; for (int32_t i = 0; i < 34; i++) { dev_list.push_back(i); } - std::list stage_map; + std::vector stage_map; stage_map.push_back(32); stage_map.push_back(2); diff --git a/tests/ut/cpp/parallel/ops_info/tmpidentity_test.cc b/tests/ut/cpp/parallel/ops_info/tmpidentity_test.cc index 7edeb5a553..ce1238baeb 100644 --- a/tests/ut/cpp/parallel/ops_info/tmpidentity_test.cc +++ b/tests/ut/cpp/parallel/ops_info/tmpidentity_test.cc @@ -38,13 +38,13 @@ class TestTmpIdentityInfo : public UT::Common { }; void TestTmpIdentityInfo::SetUp() { - std::list dev_list; + std::vector dev_list; for (int32_t i = 0; i < 1050; i++) { dev_list.push_back(i); } - std::list stage_map; + std::vector stage_map; stage_map.push_back(1024); stage_map.push_back(26); diff --git a/tests/ut/cpp/parallel/ops_info/transpose_test.cc b/tests/ut/cpp/parallel/ops_info/transpose_test.cc index d54258915d..991ec47820 100644 --- a/tests/ut/cpp/parallel/ops_info/transpose_test.cc +++ b/tests/ut/cpp/parallel/ops_info/transpose_test.cc @@ -38,13 +38,13 @@ class TestTransposeInfo : public UT::Common { }; void TestTransposeInfo::SetUp() { - std::list dev_list; + std::vector dev_list; for (int32_t i = 0; i < 34; i++) { dev_list.push_back(i); } - std::list stage_map; + std::vector stage_map; stage_map.push_back(32); stage_map.push_back(2); diff --git a/tests/ut/cpp/parallel/step_auto_parallel_test.cc b/tests/ut/cpp/parallel/step_auto_parallel_test.cc index 5503377ee2..a1474ca244 100644 --- a/tests/ut/cpp/parallel/step_auto_parallel_test.cc +++ b/tests/ut/cpp/parallel/step_auto_parallel_test.cc @@ -32,13 +32,13 @@ class TestStepAutoParallel : public UT::Common { }; void TestStepAutoParallel::SetUp() { - std::list dev_list; + std::vector dev_list; for (int32_t i = 0; i < 20; i++) { dev_list.push_back(i); } - std::list stage_map; + std::vector stage_map; stage_map.push_back(16); stage_map.push_back(4); diff --git a/tests/ut/cpp/parallel/step_parallel_test.cc b/tests/ut/cpp/parallel/step_parallel_test.cc index 23debf11ac..afc898907b 100644 --- a/tests/ut/cpp/parallel/step_parallel_test.cc +++ b/tests/ut/cpp/parallel/step_parallel_test.cc @@ -34,13 +34,13 @@ class TestStepParallel : public UT::Common { void TestStepParallel::SetUp() { UT::InitPythonPath(); } void Init_Device_Manager() { - std::list dev_list; + std::vector dev_list; for (int32_t i = 0; i < 20; i++) { dev_list.push_back(i); } - std::list stage_map; + std::vector stage_map; stage_map.push_back(16); stage_map.push_back(4); diff --git a/tests/ut/cpp/parallel/tensor_layout/construct_operator_test.cc b/tests/ut/cpp/parallel/tensor_layout/construct_operator_test.cc index bebe55250b..2ba8cc9dfc 100644 --- a/tests/ut/cpp/parallel/tensor_layout/construct_operator_test.cc +++ b/tests/ut/cpp/parallel/tensor_layout/construct_operator_test.cc @@ -39,12 +39,12 @@ class TestConstructOperator : public UT::Common { }; void TestConstructOperator::SetUp() { - std::list dev_list; + std::vector dev_list; for (int32_t i = 0; i < 1050; i++) { dev_list.push_back(i); } - std::list stage_map; + std::vector stage_map; stage_map.push_back(1024); stage_map.push_back(26); diff --git a/tests/ut/cpp/parallel/tensor_layout/redistribution_operator_infer_test.cc b/tests/ut/cpp/parallel/tensor_layout/redistribution_operator_infer_test.cc index 5ffd9b22ce..1b1dd4af04 100644 --- a/tests/ut/cpp/parallel/tensor_layout/redistribution_operator_infer_test.cc +++ b/tests/ut/cpp/parallel/tensor_layout/redistribution_operator_infer_test.cc @@ -28,13 +28,13 @@ class TestRedistributionOperatorInfer : public UT::Common { TestRedistributionOperatorInfer() {} void SetUp() { - std::list dev_list; + std::vector dev_list; for (int32_t i = 0; i < 1050; i++) { dev_list.push_back(i); } - std::list stage_map; + std::vector stage_map; stage_map.push_back(1024); stage_map.push_back(26); diff --git a/tests/ut/cpp/parallel/tensor_layout/tensor_redistribution_test.cc b/tests/ut/cpp/parallel/tensor_layout/tensor_redistribution_test.cc index ad20793417..572763faa3 100644 --- a/tests/ut/cpp/parallel/tensor_layout/tensor_redistribution_test.cc +++ b/tests/ut/cpp/parallel/tensor_layout/tensor_redistribution_test.cc @@ -33,7 +33,7 @@ class TestTensorRedistribution : public UT::Common { dev_list.push_back(i); } - std::list stage_map; + std::vector stage_map; stage_map.push_back(16); stage_map.push_back(4); diff --git a/tests/ut/cpp/parallel/virtual_dataset_test.cc b/tests/ut/cpp/parallel/virtual_dataset_test.cc index 602a7370f1..1d3ff081c7 100644 --- a/tests/ut/cpp/parallel/virtual_dataset_test.cc +++ b/tests/ut/cpp/parallel/virtual_dataset_test.cc @@ -37,13 +37,13 @@ class TestVirtualDatasetInfo : public UT::Common { }; void TestVirtualDatasetInfo::SetUp() { - std::list dev_list; + std::vector dev_list; for (int32_t i = 0; i < 130; i++) { dev_list.push_back(i); } - std::list stage_map; + std::vector stage_map; stage_map.push_back(16); stage_map.push_back(114); From 5a00d8cb585a8b8e26fa131d4262d9a612e27d5b Mon Sep 17 00:00:00 2001 From: wenchunjiang Date: Mon, 30 Mar 2020 17:35:19 +0800 Subject: [PATCH 025/367] This fixes an issue about mindspore process cannot exit when calling python api op_select_format failed in select kernel steps. Previously function op_select_format and check_supported raise an exception directly on the tbe_process python side, but we don't deal with the exception, and raise an exeception on c++ side to frontend ME, that will cause some conflict when recycle resource on ME and tbe_process python interpreter. This changes adding try...catch in function op_select_format and check_supported on the python side, and return the Exception string to c++ side, so that we can raise an exception to frontend ME and ME will deal with resouce clearning and exit. --- .../parallel_compile/tbe_compiler/helper.py | 114 ++++++++++++++++++ .../tbe_compiler/tbe_process.py | 108 +++++------------ mindspore/ccsrc/kernel/tbe/tbe_adapter.cc | 2 +- .../ccsrc/kernel/tbe/tbe_kernel_select.cc | 6 +- .../ccsrc/kernel/tbe/tbe_python_funcs.cc | 18 ++- 5 files changed, 167 insertions(+), 81 deletions(-) create mode 100644 mindspore/_extends/parallel_compile/tbe_compiler/helper.py diff --git a/mindspore/_extends/parallel_compile/tbe_compiler/helper.py b/mindspore/_extends/parallel_compile/tbe_compiler/helper.py new file mode 100644 index 0000000000..b5f7750187 --- /dev/null +++ b/mindspore/_extends/parallel_compile/tbe_compiler/helper.py @@ -0,0 +1,114 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""tbe process""" +import sys +import os +from .common import get_args, get_build_in_impl_path, TBEException + +build_in_impl_path = get_build_in_impl_path() + + +def _op_select_format(kernel_info): + """ + call op's op_select_format to get op supported format + + Args: + kernel_info (dict): kernel info load by json string + + Returns: + op supported format + """ + try: + # import module + op_name = kernel_info['op_info']['name'] + impl_path = build_in_impl_path + custom_flag = False + if 'impl_path' in kernel_info and kernel_info['impl_path'] is not None: + op_impl_path = os.path.realpath(kernel_info['impl_path']) + if os.path.isfile(op_impl_path): + path, file_name = os.path.split(op_impl_path) + op_name, _ = os.path.splitext(file_name) + impl_path = path + custom_flag = True + if impl_path not in sys.path: + sys.path.insert(0, impl_path) + + if custom_flag: + op_module = __import__(op_name) + else: + op_module = __import__("impl." + op_name, globals(), locals(), [op_name], 0) + # get function + if not hasattr(op_module, "op_select_format"): + return "" + op_func = getattr(op_module, "op_select_format", None) + + # call function + inputs_args = get_args(kernel_info['op_info'], 'inputs') + outputs_args = get_args(kernel_info['op_info'], 'outputs') + attrs_args = get_args(kernel_info['op_info'], 'attrs') + kernel_name = kernel_info['op_info']['kernel_name'] + ret = op_func(*inputs_args, *outputs_args, *attrs_args, kernel_name=kernel_name) + + except Exception as e: + raise TBEException(str(e)) + + return ret + + +def _check_supported(kernel_info): + """ + call op's check_supported to check supported or not + + Args: + kernel_info (dict): kernel info load by json string + + Returns: + bool: check result, true or false + """ + try: + # import module + op_name = kernel_info['op_info']['name'] + impl_path = build_in_impl_path + custom_flag = False + if 'impl_path' in kernel_info and kernel_info['impl_path'] is not None: + op_impl_path = os.path.realpath(kernel_info['impl_path']) + if os.path.isfile(op_impl_path): + path, file_name = os.path.split(op_impl_path) + op_name, _ = os.path.splitext(file_name) + impl_path = path + custom_flag = True + if impl_path not in sys.path: + sys.path.insert(0, impl_path) + + if custom_flag: + op_module = __import__(op_name) + else: + op_module = __import__("impl." + op_name, globals(), locals(), [op_name], 0) + # get function + if not hasattr(op_module, "check_supported"): + return "" + op_func = getattr(op_module, "check_supported", None) + + # call function + inputs_args = get_args(kernel_info['op_info'], 'inputs') + outputs_args = get_args(kernel_info['op_info'], 'outputs') + attrs_args = get_args(kernel_info['op_info'], 'attrs') + kernel_name = kernel_info['op_info']['kernel_name'] + ret = op_func(*inputs_args, *outputs_args, *attrs_args, kernel_name=kernel_name) + + except Exception as e: + raise TBEException(str(e)) + + return ret diff --git a/mindspore/_extends/parallel_compile/tbe_compiler/tbe_process.py b/mindspore/_extends/parallel_compile/tbe_compiler/tbe_process.py index 9b970b7242..2f73ced061 100644 --- a/mindspore/_extends/parallel_compile/tbe_compiler/tbe_process.py +++ b/mindspore/_extends/parallel_compile/tbe_compiler/tbe_process.py @@ -19,10 +19,8 @@ import subprocess import sys import os import json -from .common import check_kernel_info, get_args, get_build_in_impl_path - -build_in_impl_path = get_build_in_impl_path() - +from .common import check_kernel_info, TBEException +from .helper import _op_select_format, _check_supported def create_tbe_parallel_compiler(): """ @@ -41,40 +39,17 @@ def op_select_format(op_json: str): op_json (str): json string of the op Returns: - op supported format + op supported format or exception message """ ret = "" - kernel_info = json.loads(op_json) - check_kernel_info(kernel_info) - - # import module - op_name = kernel_info['op_info']['name'] - impl_path = build_in_impl_path - custom_flag = False - if 'impl_path' in kernel_info and kernel_info['impl_path'] is not None: - op_impl_path = os.path.realpath(kernel_info['impl_path']) - if os.path.isfile(op_impl_path): - path, file_name = os.path.split(op_impl_path) - op_name, _ = os.path.splitext(file_name) - impl_path = path - custom_flag = True - sys.path.insert(0, impl_path) - - if custom_flag: - op_module = __import__(op_name) - else: - op_module = __import__("impl." + op_name, globals(), locals(), [op_name], 0) - # get function - if not hasattr(op_module, "op_select_format"): - return "" - op_func = getattr(op_module, "op_select_format", None) - - # call function - inputs_args = get_args(kernel_info['op_info'], 'inputs') - outputs_args = get_args(kernel_info['op_info'], 'outputs') - attrs_args = get_args(kernel_info['op_info'], 'attrs') - kernel_name = kernel_info['op_info']['kernel_name'] - ret = op_func(*inputs_args, *outputs_args, *attrs_args, kernel_name=kernel_name) + try: + kernel_info = json.loads(op_json) + check_kernel_info(kernel_info) + ret = _op_select_format(kernel_info) + + except TBEException as e: + return "TBEException: " + str(e) + return ret @@ -86,40 +61,18 @@ def check_supported(op_json: str): op_json (str): json string of the op Returns: - true or false + bool: check result, true or false + str: exception message when catch an Exception """ ret = "" - kernel_info = json.loads(op_json) - check_kernel_info(kernel_info) - - # import module - op_name = kernel_info['op_info']['name'] - impl_path = build_in_impl_path - custom_flag = False - if 'impl_path' in kernel_info and kernel_info['impl_path'] is not None: - op_impl_path = os.path.realpath(kernel_info['impl_path']) - if os.path.isfile(op_impl_path): - path, file_name = os.path.split(op_impl_path) - op_name, _ = os.path.splitext(file_name) - impl_path = path - custom_flag = True - sys.path.insert(0, impl_path) - - if custom_flag: - op_module = __import__(op_name) - else: - op_module = __import__("impl." + op_name, globals(), locals(), [op_name], 0) - # get function - if not hasattr(op_module, "check_supported"): - return "" - op_func = getattr(op_module, "check_supported", None) - - # call function - inputs_args = get_args(kernel_info['op_info'], 'inputs') - outputs_args = get_args(kernel_info['op_info'], 'outputs') - attrs_args = get_args(kernel_info['op_info'], 'attrs') - kernel_name = kernel_info['op_info']['kernel_name'] - ret = op_func(*inputs_args, *outputs_args, *attrs_args, kernel_name=kernel_name) + try: + kernel_info = json.loads(op_json) + check_kernel_info(kernel_info) + ret = _check_supported(kernel_info) + + except TBEException as e: + return "TBEException: " + str(e) + return ret @@ -149,12 +102,12 @@ class CompilerPool: """compiler pool""" def __init__(self): - processes = multiprocessing.cpu_count() + self.__processe_num = multiprocessing.cpu_count() # max_processes_num: Set the maximum number of concurrent processes for compiler max_processes_num = 16 - if processes > max_processes_num: - processes = max_processes_num - self.__pool = multiprocessing.Pool(processes=processes) + if self.__processe_num > max_processes_num: + self.__processe_num = max_processes_num + self.__pool = None self.__next_task_id = 1 self.__running_tasks = [] @@ -165,11 +118,10 @@ class CompilerPool: del self.__pool def exit(self): - return - # self.__pool.terminate() - # self.__pool.join() - # if self.__pool is not None: - # del self.__pool + if self.__pool is not None: + self.__pool.terminate() + self.__pool.join() + del self.__pool def start_compile_op(self, op_json): """ @@ -183,6 +135,8 @@ class CompilerPool: """ task_id = self.__next_task_id self.__next_task_id = self.__next_task_id + 1 + if self.__pool is None: + self.__pool = multiprocessing.Pool(processes=self.__processe_num) task_future = self.__pool.apply_async(func=run_compiler, args=(op_json,)) self.__running_tasks.append((task_id, task_future)) return task_id diff --git a/mindspore/ccsrc/kernel/tbe/tbe_adapter.cc b/mindspore/ccsrc/kernel/tbe/tbe_adapter.cc index ddb78a08df..50fed77a9a 100644 --- a/mindspore/ccsrc/kernel/tbe/tbe_adapter.cc +++ b/mindspore/ccsrc/kernel/tbe/tbe_adapter.cc @@ -98,7 +98,7 @@ void TbeAdapter::NormalizeFuncName(std::string *func_name) { *func_name = name_tmp; auto iter = tbe_func_adapter_map.find(*func_name); if (iter != tbe_func_adapter_map.end()) { - MS_LOG(INFO) << "map actual op fron me " << func_name << "to tbe op" << iter->second; + MS_LOG(INFO) << "map actual op from me " << func_name << "to tbe op" << iter->second; *func_name = iter->second; } } diff --git a/mindspore/ccsrc/kernel/tbe/tbe_kernel_select.cc b/mindspore/ccsrc/kernel/tbe/tbe_kernel_select.cc index 25495d1e68..1953fd0c72 100644 --- a/mindspore/ccsrc/kernel/tbe/tbe_kernel_select.cc +++ b/mindspore/ccsrc/kernel/tbe/tbe_kernel_select.cc @@ -35,6 +35,8 @@ namespace kernel { constexpr auto kName = "name"; constexpr auto kDtype = "dtype"; constexpr auto kFormat = "format"; +constexpr auto kPrefixInput = "input"; +constexpr auto kPrefixOutput = "output"; const std::map DYNAMIC_FORMAT_MAP = {{"NCHW", "DefaultFormat"}, {"NHWC", "DefaultFormat"}, {"ND", "DefaultFormat"}, @@ -146,13 +148,13 @@ bool ParseDynamicFormatJson(const std::string &jsonStr, std::vector input = std::make_shared(); MS_EXCEPTION_IF_NULL(input); input->set_name(json_obj[key_name].at(kName)); ConvertFormatDtype(json_obj[key_name].at(kFormat), json_obj[key_name].at(kDtype), input); inputs->emplace_back(input); - } else if (key_name.find("output", 0) != std::string::npos) { + } else if (key_name.compare(0, strlen(kPrefixOutput), kPrefixOutput) == 0) { std::shared_ptr output = std::make_shared(); MS_EXCEPTION_IF_NULL(output); output->set_name(json_obj[key_name].at(kName)); diff --git a/mindspore/ccsrc/kernel/tbe/tbe_python_funcs.cc b/mindspore/ccsrc/kernel/tbe/tbe_python_funcs.cc index 1e60742fc4..7204fb7f96 100644 --- a/mindspore/ccsrc/kernel/tbe/tbe_python_funcs.cc +++ b/mindspore/ccsrc/kernel/tbe/tbe_python_funcs.cc @@ -26,6 +26,7 @@ constexpr auto kTbeProcessModule = "mindspore._extends.parallel_compile.tbe_comp constexpr auto kCreateTbeParallelCompilerFunc = "create_tbe_parallel_compiler"; constexpr auto kOpSelectFormatFunc = "op_select_format"; constexpr auto kCheckSupportedFunc = "check_supported"; +constexpr auto kTBEException = "TBEException"; PyObject *TbePythonFuncs::pCreateTbeParallelCompilerFunc_ = nullptr; PyObject *TbePythonFuncs::pTbeCompiler_ = nullptr; @@ -133,6 +134,10 @@ std::string TbePythonFuncs::OpSelectFormat(const nlohmann::json &kernel_json) { char *pstr = nullptr; (void)PyArg_Parse(pRet, "s", &pstr); res_json_str = pstr; + if (res_json_str.compare(0, strlen(kTBEException), kTBEException) == 0) { + MS_EXCEPTION(ArgumentError) << "Failed to call function [" << kOpSelectFormatFunc << "], " << res_json_str + << " ,function args:" << PyObjectToStr(pArg); + } return res_json_str; } @@ -167,7 +172,18 @@ bool TbePythonFuncs::CheckSupported(const nlohmann::json &kernel_json) { MS_EXCEPTION(ArgumentError) << "Failed to call function [" << kCheckSupportedFunc << "], function args: " << PyObjectToStr(pArg); } - ret = PyObject_IsTrue(pRes) != 0; + if (PyBool_Check(pRes)) { + ret = PyObject_IsTrue(pRes) != 0; + } else { + char *pstr = nullptr; + (void)PyArg_Parse(pRes, "s", &pstr); + std::string res_str = pstr; + if (res_str.compare(0, strlen(kTBEException), kTBEException) == 0) { + MS_EXCEPTION(ArgumentError) << "Failed to call function [" << kCheckSupportedFunc << "], " << res_str + << ", function args: " << PyObjectToStr(pArg); + } + } + return ret; } From 31b165a57efb33fe36006905a29b48be1fc172bf Mon Sep 17 00:00:00 2001 From: wangjun260 Date: Tue, 31 Mar 2020 10:43:24 +0800 Subject: [PATCH 026/367] add vgg scripts --- example/vgg16_cifar10/config.py | 31 +++++++++++++ example/vgg16_cifar10/dataset.py | 65 ++++++++++++++++++++++++++ example/vgg16_cifar10/eval.py | 53 ++++++++++++++++++++++ example/vgg16_cifar10/train.py | 78 ++++++++++++++++++++++++++++++++ mindspore/model_zoo/vgg.py | 30 +++++++----- 5 files changed, 246 insertions(+), 11 deletions(-) create mode 100644 example/vgg16_cifar10/config.py create mode 100644 example/vgg16_cifar10/dataset.py create mode 100644 example/vgg16_cifar10/eval.py create mode 100644 example/vgg16_cifar10/train.py diff --git a/example/vgg16_cifar10/config.py b/example/vgg16_cifar10/config.py new file mode 100644 index 0000000000..8c6ffee98b --- /dev/null +++ b/example/vgg16_cifar10/config.py @@ -0,0 +1,31 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +network config setting, will be used in main.py +""" +from easydict import EasyDict as edict + +cifar_cfg = edict({ + 'num_classes': 10, + 'lr_init': 0.05, + 'batch_size': 64, + 'epoch_size': 70, + 'momentum': 0.9, + 'weight_decay': 5e-4, + 'buffer_size': 10, + 'image_height': 224, + 'image_width': 224, + 'keep_checkpoint_max': 10 +}) diff --git a/example/vgg16_cifar10/dataset.py b/example/vgg16_cifar10/dataset.py new file mode 100644 index 0000000000..4e82beb2e3 --- /dev/null +++ b/example/vgg16_cifar10/dataset.py @@ -0,0 +1,65 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +Data operations, will be used in train.py and eval.py +""" +import os +import mindspore.dataset as ds +import mindspore.dataset.transforms.c_transforms as C +import mindspore.dataset.transforms.vision.c_transforms as vision +import mindspore.common.dtype as mstype +from config import cifar_cfg as cfg + +def create_dataset(data_home, repeat_num=1, training=True): + """Data operations.""" + ds.config.set_seed(1) + data_dir = os.path.join(data_home, "cifar-10-batches-bin") + if not training: + data_dir = os.path.join(data_home, "cifar-10-verify-bin") + data_set = ds.Cifar10Dataset(data_dir) + resize_height = cfg.image_height + resize_width = cfg.image_width + rescale = 1.0 / 255.0 + shift = 0.0 + + # define map operations + random_crop_op = vision.RandomCrop((32, 32), (4, 4, 4, 4)) # padding_mode default CONSTANT + random_horizontal_op = vision.RandomHorizontalFlip() + resize_op = vision.Resize((resize_height, resize_width)) # interpolation default BILINEAR + rescale_op = vision.Rescale(rescale, shift) + normalize_op = vision.Normalize((0.4465, 0.4822, 0.4914), (0.2010, 0.1994, 0.2023)) + changeswap_op = vision.HWC2CHW() + type_cast_op = C.TypeCast(mstype.int32) + + c_trans = [] + if training: + c_trans = [random_crop_op, random_horizontal_op] + c_trans += [resize_op, rescale_op, normalize_op, + changeswap_op] + + # apply map operations on images + data_set = data_set.map(input_columns="label", operations=type_cast_op) + data_set = data_set.map(input_columns="image", operations=c_trans) + + # apply repeat operations + data_set = data_set.repeat(repeat_num) + + # apply shuffle operations + data_set = data_set.shuffle(buffer_size=10) + + # apply batch operations + data_set = data_set.batch(batch_size=cfg.batch_size, drop_remainder=True) + + return data_set diff --git a/example/vgg16_cifar10/eval.py b/example/vgg16_cifar10/eval.py new file mode 100644 index 0000000000..b034183373 --- /dev/null +++ b/example/vgg16_cifar10/eval.py @@ -0,0 +1,53 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +##############test vgg16 example on cifar10################# +python eval.py --data_path=$DATA_HOME --device_id=$DEVICE_ID +""" +import argparse +import mindspore.nn as nn +from mindspore.nn.optim.momentum import Momentum +from mindspore.train.model import Model +from mindspore import context +from mindspore.train.serialization import load_checkpoint, load_param_into_net +from mindspore.model_zoo.vgg import vgg16 +from config import cifar_cfg as cfg +import dataset + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Cifar10 classification') + parser.add_argument('--device_target', type=str, default='Ascend', choices=['Ascend', 'GPU'], + help='device where the code will be implemented. (Default: Ascend)') + parser.add_argument('--data_path', type=str, default='./cifar', help='path where the dataset is saved') + parser.add_argument('--checkpoint_path', type=str, default=None, help='checkpoint file path.') + parser.add_argument('--device_id', type=int, default=None, help='device id of GPU or Ascend. (Default: None)') + args_opt = parser.parse_args() + + context.set_context(mode=context.GRAPH_MODE, device_target=args_opt.device_target) + context.set_context(device_id=args_opt.device_id) + context.set_context(enable_mem_reuse=True, enable_hccl=False) + + net = vgg16(batch_size=cfg.batch_size, num_classes=cfg.num_classes) + opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.01, cfg.momentum, + weight_decay=cfg.weight_decay) + loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean', is_grad=False) + model = Model(net, loss_fn=loss, optimizer=opt, metrics={'acc'}) + + param_dict = load_checkpoint(args_opt.checkpoint_path) + load_param_into_net(net, param_dict) + net.set_train(False) + dataset = dataset.create_dataset(args_opt.data_path, 1, False) + res = model.eval(dataset) + print("result: ", res) diff --git a/example/vgg16_cifar10/train.py b/example/vgg16_cifar10/train.py new file mode 100644 index 0000000000..32cd344d50 --- /dev/null +++ b/example/vgg16_cifar10/train.py @@ -0,0 +1,78 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +#################train vgg16 example on cifar10######################## +python train.py --data_path=$DATA_HOME --device_id=$DEVICE_ID +""" +import argparse +import random +import numpy as np +import mindspore.nn as nn +from mindspore import Tensor +from mindspore.nn.optim.momentum import Momentum +from mindspore.train.model import Model +from mindspore import context +from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor +from mindspore.model_zoo.vgg import vgg16 +import dataset +from config import cifar_cfg as cfg +random.seed(1) +np.random.seed(1) + +def lr_steps(global_step, lr_max=None, total_epochs=None, steps_per_epoch=None): + """Set learning rate.""" + lr_each_step = [] + total_steps = steps_per_epoch * total_epochs + decay_epoch_index = [0.3 * total_steps, 0.6 * total_steps, 0.8 * total_steps] + for i in range(total_steps): + if i < decay_epoch_index[0]: + lr_each_step.append(lr_max) + elif i < decay_epoch_index[1]: + lr_each_step.append(lr_max * 0.1) + elif i < decay_epoch_index[2]: + lr_each_step.append(lr_max * 0.01) + else: + lr_each_step.append(lr_max * 0.001) + current_step = global_step + lr_each_step = np.array(lr_each_step).astype(np.float32) + learning_rate = lr_each_step[current_step:] + + return learning_rate + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Cifar10 classification') + parser.add_argument('--device_target', type=str, default='Ascend', choices=['Ascend', 'GPU'], + help='device where the code will be implemented. (Default: Ascend)') + parser.add_argument('--data_path', type=str, default='./cifar', help='path where the dataset is saved') + parser.add_argument('--device_id', type=int, default=None, help='device id of GPU or Ascend. (Default: None)') + args_opt = parser.parse_args() + + context.set_context(mode=context.GRAPH_MODE, device_target=args_opt.device_target) + context.set_context(device_id=args_opt.device_id) + context.set_context(enable_mem_reuse=True, enable_hccl=False) + + net = vgg16(batch_size=cfg.batch_size, num_classes=cfg.num_classes) + lr = lr_steps(0, lr_max=cfg.lr_init, total_epochs=cfg.epoch_size, steps_per_epoch=50000 // cfg.batch_size) + opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), Tensor(lr), cfg.momentum, weight_decay=cfg.weight_decay) + loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean', is_grad=False) + model = Model(net, loss_fn=loss, optimizer=opt, metrics={'acc'}) + + dataset = dataset.create_dataset(args_opt.data_path, cfg.epoch_size) + batch_num = dataset.get_dataset_size() + config_ck = CheckpointConfig(save_checkpoint_steps=batch_num * 5, keep_checkpoint_max=cfg.keep_checkpoint_max) + ckpoint_cb = ModelCheckpoint(prefix="train_vgg_cifar10", directory="./", config=config_ck) + loss_cb = LossMonitor() + model.train(cfg.epoch_size, dataset, callbacks=[ckpoint_cb, loss_cb]) diff --git a/mindspore/model_zoo/vgg.py b/mindspore/model_zoo/vgg.py index 76f265c326..6fcd075cc8 100644 --- a/mindspore/model_zoo/vgg.py +++ b/mindspore/model_zoo/vgg.py @@ -15,7 +15,8 @@ """VGG.""" import mindspore.nn as nn from mindspore.ops import operations as P - +from mindspore.common.initializer import initializer +import mindspore.common.dtype as mstype def _make_layer(base, batch_norm): """Make stage network of VGG.""" @@ -25,11 +26,14 @@ def _make_layer(base, batch_norm): if v == 'M': layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else: + weight_shape = (v, in_channels, 3, 3) + weight = initializer('XavierUniform', shape=weight_shape, dtype=mstype.float32) conv2d = nn.Conv2d(in_channels=in_channels, out_channels=v, kernel_size=3, - padding=1, - pad_mode='pad') + padding=0, + pad_mode='same', + weight_init=weight) if batch_norm: layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU()] else: @@ -52,13 +56,13 @@ class Vgg(nn.Cell): Tensor, infer output tensor. Examples: - >>> VGG([64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], + >>> Vgg([64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], >>> num_classes=1000, batch_norm=False, batch_size=1) """ + def __init__(self, base, num_classes=1000, batch_norm=False, batch_size=1): super(Vgg, self).__init__() self.layers = _make_layer(base, batch_norm=batch_norm) - self.avgpool = nn.AvgPool2d(7) self.reshape = P.Reshape() self.shp = (batch_size, -1) self.classifier = nn.SequentialCell([ @@ -70,7 +74,6 @@ class Vgg(nn.Cell): def construct(self, x): x = self.layers(x) - x = self.avgpool(x) x = self.reshape(x, self.shp) x = self.classifier(x) return x @@ -84,15 +87,20 @@ cfg = { } -def vgg16(): +def vgg16(batch_size=1, num_classes=1000): """ - Get VGG16 neural network. + Get Vgg16 neural network with batch normalization. + + Args: + batch_size (int): Batch size. Default: 1. + num_classes (int): Class numbers. Default: 1000. Returns: - Cell, cell instance of VGG16 neural network. + Cell, cell instance of Vgg16 neural network with batch normalization. Examples: - >>> vgg16() + >>> vgg16(batch_size=1, num_classes=1000) """ - net = Vgg(cfg['16'], num_classes=1000, batch_norm=False, batch_size=1) + + net = Vgg(cfg['16'], num_classes=num_classes, batch_norm=True, batch_size=batch_size) return net From f946aea10d1832c39ed0696937b470c3c0b3d5e0 Mon Sep 17 00:00:00 2001 From: lichenever Date: Tue, 31 Mar 2020 11:14:28 +0800 Subject: [PATCH 027/367] fix grpah mode loop sink bug in auto parallel --- .../ccsrc/parallel/auto_parallel/graph_costmodel.h | 4 ++-- mindspore/ccsrc/parallel/step_parallel.cc | 4 ++++ mindspore/train/dataset_helper.py | 2 +- .../parallel/test_auto_parallel_parameter_cast.py | 12 ++++++------ .../python/parallel/test_auto_parallel_two_matmul.py | 7 ++++--- 5 files changed, 17 insertions(+), 12 deletions(-) diff --git a/mindspore/ccsrc/parallel/auto_parallel/graph_costmodel.h b/mindspore/ccsrc/parallel/auto_parallel/graph_costmodel.h index 4efb77055f..fde9514540 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/graph_costmodel.h +++ b/mindspore/ccsrc/parallel/auto_parallel/graph_costmodel.h @@ -34,8 +34,8 @@ namespace parallel { #define OPERATOR_TO_OPERATOR_CONNECTOR "-" #define DEFAULT_DEVICE_MEMORY_CAPACITY (1024.0 * 1024.0 * 1024.0 * 16.0) #define DEFAULT_COST_MODEL_ALPHA 1.0 -#define DEFAULT_COST_MODEL_BETA 65.0 -#define DEFAULT_COST_MODEL_GAMMA 0.02 +#define DEFAULT_COST_MODEL_BETA 260.0 +#define DEFAULT_COST_MODEL_GAMMA 0.001 #define DEFAULT_COST_MODEL_SIMPLIFY_CALCULATION true #define DEFAULT_COST_MODEL_COMMUNI_THRESHOLD 2048.0 #define DEFAULT_COST_MODEL_COMMUNI_CONST 3072.0 diff --git a/mindspore/ccsrc/parallel/step_parallel.cc b/mindspore/ccsrc/parallel/step_parallel.cc index 927acea705..21da11ec21 100644 --- a/mindspore/ccsrc/parallel/step_parallel.cc +++ b/mindspore/ccsrc/parallel/step_parallel.cc @@ -375,6 +375,10 @@ bool IsParallelCareNode(const CNodePtr& cnode) { MS_LOG(INFO) << "Parallel don't care node: " << prim->name(); return false; } + // get_next is not in the forward graph, we need mark the get_next as the forward node + if (prim->name() == GET_NEXT) { + return true; + } if ((prim->name() == CAST)) { if ((!attrs.count(STRATEGY)) && (cnode->operator_info() == nullptr)) { return false; diff --git a/mindspore/train/dataset_helper.py b/mindspore/train/dataset_helper.py index de26d72108..6252116efe 100644 --- a/mindspore/train/dataset_helper.py +++ b/mindspore/train/dataset_helper.py @@ -88,7 +88,7 @@ class _DatasetIter: # times the batch dimension of tensors for run if _get_parallel_mode() in (ParallelMode.SEMI_AUTO_PARALLEL, ParallelMode.AUTO_PARALLEL): device_num = _get_device_num() - dataset_shapes = _to_full_shapes(dataset_shapes, device_num) + self.dataset_shapes = _to_full_shapes(dataset_shapes, device_num) def __iter__(self): self.ind = 0 diff --git a/tests/ut/python/parallel/test_auto_parallel_parameter_cast.py b/tests/ut/python/parallel/test_auto_parallel_parameter_cast.py index 67b8f98faf..b7a3255f7c 100644 --- a/tests/ut/python/parallel/test_auto_parallel_parameter_cast.py +++ b/tests/ut/python/parallel/test_auto_parallel_parameter_cast.py @@ -80,9 +80,9 @@ def test_common_parameter(): _executor.compile(net, x, y, z, w, phase='train') strategies = _executor._get_strategy(net) - expected_strategies = {'Default/network-Net/MatMul-op8': [[1, 1], [1, 8]], - 'Default/network-Net/MatMul-op9': [[1, 1], [1, 8]], - 'Default/network-Net/Cast-op10': [[1, 8]], - 'Default/network-Net/MatMul-op0': [[1, 1], [1, 8]], - 'Default/network-Net/Cast-op11': [[1, 8]]} - assert strategies == expected_strategies \ No newline at end of file + expected_strategies = {'Default/network-Net/MatMul-op6': [[8, 1], [1, 1]], + 'Default/network-Net/MatMul-op8': [[8, 1], [1, 1]], + 'Default/network-Net/Cast-op7': [[1, 1]], + 'Default/network-Net/MatMul-op0': [[8, 1], [1, 1]], + 'Default/network-Net/Cast-op9': [[1, 1]]} + assert strategies == expected_strategies diff --git a/tests/ut/python/parallel/test_auto_parallel_two_matmul.py b/tests/ut/python/parallel/test_auto_parallel_two_matmul.py index 5155db41f6..e7beed384e 100644 --- a/tests/ut/python/parallel/test_auto_parallel_two_matmul.py +++ b/tests/ut/python/parallel/test_auto_parallel_two_matmul.py @@ -86,9 +86,9 @@ def test_two_matmul(): costmodel_alpha = cost_model_context.get_cost_model_context("costmodel_alpha") assert costmodel_alpha == 1.0 costmodel_beta = cost_model_context.get_cost_model_context("costmodel_beta") - assert costmodel_beta == 65.0 + assert costmodel_beta == 260.0 costmodel_gamma = cost_model_context.get_cost_model_context("costmodel_gamma") - assert costmodel_gamma == 0.02 + assert costmodel_gamma == 0.001 costmodel_communi_threshold = cost_model_context.get_cost_model_context("costmodel_communi_threshold") assert costmodel_communi_threshold == 2048.0 costmodel_communi_const = cost_model_context.get_cost_model_context("costmodel_communi_const") @@ -137,4 +137,5 @@ def test_two_matmul(): strategies = _executor._get_strategy(net) expected_strategies = {'Default/network-Net/MatMul-op2': [[16, 1], [1, 1]], 'Default/network-Net/MatMul-op3': [[16, 1], [1, 1]]} - assert strategies == expected_strategies \ No newline at end of file + assert strategies == expected_strategies + From eaf7146d4689168c23f2601c1f47c8675848fccc Mon Sep 17 00:00:00 2001 From: chang zherui <760161589@qq.com> Date: Tue, 31 Mar 2020 16:18:04 +0800 Subject: [PATCH 028/367] modify longtime python ut --- mindspore/nn/layer/conv.py | 2 +- tests/ut/python/parallel/test_auto_parallel_reshape.py | 6 +++--- tests/ut/python/parallel/test_one_hot_net.py | 8 ++++---- tests/ut/python/parallel/test_reshape.py | 8 ++++---- tests/ut/python/train/summary/test_summary_performance.py | 6 +++--- 5 files changed, 15 insertions(+), 15 deletions(-) diff --git a/mindspore/nn/layer/conv.py b/mindspore/nn/layer/conv.py index 6c78e1a715..666be93451 100644 --- a/mindspore/nn/layer/conv.py +++ b/mindspore/nn/layer/conv.py @@ -159,7 +159,7 @@ class Conv2d(_Conv): >>> net = nn.Conv2d(120, 240, 4, has_bias=False, weight_init='normal') >>> input = mindspore.Tensor(np.ones([1, 120, 1024, 640]), mindspore.float32) >>> net(input).shape() - (1, 240, 1024, 637) + (1, 240, 1024, 640) """ @cell_attr_register def __init__(self, diff --git a/tests/ut/python/parallel/test_auto_parallel_reshape.py b/tests/ut/python/parallel/test_auto_parallel_reshape.py index 26e7e95a9f..ed9f24cc0b 100644 --- a/tests/ut/python/parallel/test_auto_parallel_reshape.py +++ b/tests/ut/python/parallel/test_auto_parallel_reshape.py @@ -49,16 +49,16 @@ def test_reshape_matmul(): super().__init__() self.reshape = P.Reshape() self.matmul = P.MatMul() - self.matmul_weight = Parameter(Tensor(np.ones([25088, 256]), dtype=ms.float32), name="weight") + self.matmul_weight = Parameter(Tensor(np.ones([28, 64]), dtype=ms.float32), name="weight") def construct(self, x): - out = self.reshape(x, (256, 25088)) + out = self.reshape(x, (64, 28)) out = self.matmul(out, self.matmul_weight) return out size = 8 context.set_auto_parallel_context(device_num=size, global_rank=0) - x = Tensor(np.ones([32*size, 512, 7, 7]), dtype=ms.float32) + x = Tensor(np.ones([8*size, 28, 1, 1]), dtype=ms.float32) net = GradWrap(NetWithLoss(Net())) context.set_auto_parallel_context(parallel_mode="auto_parallel") diff --git a/tests/ut/python/parallel/test_one_hot_net.py b/tests/ut/python/parallel/test_one_hot_net.py index bf2677056e..87b4acfe37 100644 --- a/tests/ut/python/parallel/test_one_hot_net.py +++ b/tests/ut/python/parallel/test_one_hot_net.py @@ -247,15 +247,15 @@ def fc_with_initialize(input_channels, out_channels): class BNReshapeDenseBNNet(nn.Cell): def __init__(self): super(BNReshapeDenseBNNet, self).__init__() - self.batch_norm = bn_with_initialize(512) + self.batch_norm = bn_with_initialize(2) self.reshape = P.Reshape() self.batch_norm2 = nn.BatchNorm1d(512, affine=False) - self.fc = fc_with_initialize(512 * 32 * 32, 512) + self.fc = fc_with_initialize(2 * 32 * 32, 512) self.loss = SemiAutoOneHotNet(args=Args(), strategy=StrategyBatch()) def construct(self, x, label): x = self.batch_norm(x) - x = self.reshape(x, (16, 512*32*32)) + x = self.reshape(x, (16, 2*32*32)) x = self.fc(x) x = self.batch_norm2(x) loss = self.loss(x, label) @@ -266,7 +266,7 @@ def test_bn_reshape_dense_bn_train_loss(): batch_size = 16 device_num = 16 context.set_auto_parallel_context(device_num=device_num, global_rank=0) - input = Tensor(np.ones([batch_size, 512, 32, 32]).astype(np.float32) * 0.01) + input = Tensor(np.ones([batch_size, 2, 32, 32]).astype(np.float32) * 0.01) label = Tensor(np.ones([batch_size]), dtype=ms.int32) net = GradWrap(NetWithLoss(BNReshapeDenseBNNet())) diff --git a/tests/ut/python/parallel/test_reshape.py b/tests/ut/python/parallel/test_reshape.py index 11ca435e5b..43906aec23 100644 --- a/tests/ut/python/parallel/test_reshape.py +++ b/tests/ut/python/parallel/test_reshape.py @@ -490,15 +490,15 @@ def fc_with_initialize(input_channels, out_channels): class BNReshapeDenseBNNet(nn.Cell): def __init__(self): super(BNReshapeDenseBNNet, self).__init__() - self.batch_norm = bn_with_initialize(512) + self.batch_norm = bn_with_initialize(2) self.reshape = P.Reshape() self.cast = P.Cast() self.batch_norm2 = nn.BatchNorm1d(512, affine=False) - self.fc = fc_with_initialize(512 * 32 * 32, 512) + self.fc = fc_with_initialize(2 * 32 * 32, 512) def construct(self, x): x = self.batch_norm(x) - x = self.reshape(x, (16, 512*32*32)) + x = self.reshape(x, (16, 2*32*32)) x = self.fc(x) x = self.batch_norm2(x) return x @@ -508,7 +508,7 @@ def test_bn_reshape_dense_bn_train(): batch_size = 16 device_num = 16 context.set_auto_parallel_context(device_num=device_num, global_rank=0) - input = Tensor(np.ones([batch_size, 512, 32, 32]).astype(np.float32) * 0.01) + input = Tensor(np.ones([batch_size, 2, 32, 32]).astype(np.float32) * 0.01) net = GradWrap(NetWithLoss(BNReshapeDenseBNNet())) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") diff --git a/tests/ut/python/train/summary/test_summary_performance.py b/tests/ut/python/train/summary/test_summary_performance.py index f004d4e584..9ee9725d13 100644 --- a/tests/ut/python/train/summary/test_summary_performance.py +++ b/tests/ut/python/train/summary/test_summary_performance.py @@ -43,9 +43,9 @@ def get_test_data(step): tag1 = "xt1[:Tensor]" tag2 = "xt2[:Tensor]" tag3 = "xt3[:Tensor]" - np1 = np.random.random((50, 40, 30, 50)) - np2 = np.random.random((50, 50, 30, 50)) - np3 = np.random.random((40, 55, 30, 50)) + np1 = np.random.random((5, 4, 3, 5)) + np2 = np.random.random((5, 5, 3, 5)) + np3 = np.random.random((4, 5, 3, 5)) dict1 = {} dict1["name"] = tag1 From e6c15b82c8f7555ab1bb91685e7392507874f49a Mon Sep 17 00:00:00 2001 From: jojobugfree Date: Tue, 31 Mar 2020 17:02:25 +0800 Subject: [PATCH 029/367] remove cce dependency --- mindspore/ccsrc/CMakeLists.txt | 3 --- mindspore/ccsrc/device/ascend/tasksink/task_generator.h | 3 --- mindspore/ccsrc/kernel/CMakeLists.txt | 3 +-- 3 files changed, 1 insertion(+), 8 deletions(-) diff --git a/mindspore/ccsrc/CMakeLists.txt b/mindspore/ccsrc/CMakeLists.txt index 72ad4d7b09..1c684b6736 100644 --- a/mindspore/ccsrc/CMakeLists.txt +++ b/mindspore/ccsrc/CMakeLists.txt @@ -185,7 +185,6 @@ if(ENABLE_D) "device/ascend/*.cc" "device/ascend/profiling/*.cc" "device/ascend/tasksink/*.cc" - "kernel/akg/cce/*.cc" "device/kernel_adjust.cc" "kernel/kernel_fusion.cc" "kernel/tbe/*.cc" @@ -361,7 +360,6 @@ if(ENABLE_GPU) ${TVM_DIR}/src/node/*.cc ${TVM_DIR}/src/schedule/*.cc ${TVM_DIR}/src/runtime/*.cc - ${TVM_DIR}/src/runtime/cce/*.cc ${TVM_DIR}/src/runtime/vm/*.cc ${TVM_DIR}/src/runtime/vm/profiler/*.cc ${TVM_DIR}/src/codegen/stackvm/*.cc) @@ -379,7 +377,6 @@ if(ENABLE_GPU) file(GLOB RUNTIME_SRCS ${TVM_DIR}/src/runtime/*.cc - ${TVM_DIR}/src/runtime/cce/*.cc ${TVM_DIR}/src/runtime/vm/*.cc ${TVM_DIR}/src/runtime/stub/*.cc ${TVM_DIR}/src/runtime/stackvm/*.cc) diff --git a/mindspore/ccsrc/device/ascend/tasksink/task_generator.h b/mindspore/ccsrc/device/ascend/tasksink/task_generator.h index 4d1fba9db4..0c56fcc744 100644 --- a/mindspore/ccsrc/device/ascend/tasksink/task_generator.h +++ b/mindspore/ccsrc/device/ascend/tasksink/task_generator.h @@ -22,9 +22,6 @@ #include #include #include -#include "cce/aicpu_engine_struct.h" -#include "cce/taskdown_api.h" -#include "cce/fwk_adpt_struct.h" #include "device/kernel_runtime.h" #include "ir/anf.h" #include "kernel/kernel.h" diff --git a/mindspore/ccsrc/kernel/CMakeLists.txt b/mindspore/ccsrc/kernel/CMakeLists.txt index 4cd60b591c..9c5e2c1890 100644 --- a/mindspore/ccsrc/kernel/CMakeLists.txt +++ b/mindspore/ccsrc/kernel/CMakeLists.txt @@ -25,11 +25,10 @@ endif() if(ENABLE_D) file(GLOB_RECURSE _D_SRC_LIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} - "akg/cce/*.cc" "tbe/*.cc" "aicpu/*.cc" "mng/*.cc" "hccl/*.cc" ) target_sources(_mindspore_kernel_obj PRIVATE ${_D_SRC_LIST}) -endif() \ No newline at end of file +endif() From 0173c401240b3158d2cbf7fc9c38ecc63c1b978f Mon Sep 17 00:00:00 2001 From: wukesong Date: Sat, 28 Mar 2020 18:26:56 +0800 Subject: [PATCH 030/367] add lenet & alexnet in master branch --- example/alexnet_cifar10/config.py | 32 ++++++ example/alexnet_cifar10/dataset.py | 54 ++++++++++ example/alexnet_cifar10/eval.py | 58 +++++++++++ example/alexnet_cifar10/train.py | 58 +++++++++++ example/lenet/main.py | 125 ----------------------- example/{lenet => lenet_mnist}/config.py | 4 +- example/lenet_mnist/dataset.py | 60 +++++++++++ example/lenet_mnist/eval.py | 64 ++++++++++++ example/lenet_mnist/train.py | 58 +++++++++++ mindspore/model_zoo/alexnet.py | 73 +++++++++++++ mindspore/model_zoo/lenet.py | 5 +- 11 files changed, 461 insertions(+), 130 deletions(-) create mode 100644 example/alexnet_cifar10/config.py create mode 100644 example/alexnet_cifar10/dataset.py create mode 100644 example/alexnet_cifar10/eval.py create mode 100644 example/alexnet_cifar10/train.py delete mode 100644 example/lenet/main.py rename example/{lenet => lenet_mnist}/config.py (93%) create mode 100644 example/lenet_mnist/dataset.py create mode 100644 example/lenet_mnist/eval.py create mode 100644 example/lenet_mnist/train.py create mode 100644 mindspore/model_zoo/alexnet.py diff --git a/example/alexnet_cifar10/config.py b/example/alexnet_cifar10/config.py new file mode 100644 index 0000000000..9edfec2b60 --- /dev/null +++ b/example/alexnet_cifar10/config.py @@ -0,0 +1,32 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +network config setting, will be used in train.py +""" + +from easydict import EasyDict as edict + +alexnet_cfg = edict({ + 'num_classes': 10, + 'learning_rate': 0.002, + 'momentum': 0.9, + 'epoch_size': 1, + 'batch_size': 32, + 'buffer_size': 1000, + 'image_height': 227, + 'image_width': 227, + 'save_checkpoint_steps': 1562, + 'keep_checkpoint_max': 10, +}) diff --git a/example/alexnet_cifar10/dataset.py b/example/alexnet_cifar10/dataset.py new file mode 100644 index 0000000000..d62ed2852d --- /dev/null +++ b/example/alexnet_cifar10/dataset.py @@ -0,0 +1,54 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +Produce the dataset +""" + +from config import alexnet_cfg as cfg +import mindspore.dataset as ds +import mindspore.dataset.transforms.c_transforms as C +import mindspore.dataset.transforms.vision.c_transforms as CV +from mindspore.common import dtype as mstype + + +def create_dataset(data_path, batch_size=32, repeat_size=1, status="train"): + """ + create dataset for train or test + """ + cifar_ds = ds.Cifar10Dataset(data_path) + rescale = 1.0 / 255.0 + shift = 0.0 + + resize_op = CV.Resize((cfg.image_height, cfg.image_width)) + rescale_op = CV.Rescale(rescale, shift) + normalize_op = CV.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)) + if status == "train": + random_crop_op = CV.RandomCrop([32, 32], [4, 4, 4, 4]) + random_horizontal_op = CV.RandomHorizontalFlip() + channel_swap_op = CV.HWC2CHW() + typecast_op = C.TypeCast(mstype.int32) + cifar_ds = cifar_ds.map(input_columns="label", operations=typecast_op) + if status == "train": + cifar_ds = cifar_ds.map(input_columns="image", operations=random_crop_op) + cifar_ds = cifar_ds.map(input_columns="image", operations=random_horizontal_op) + cifar_ds = cifar_ds.map(input_columns="image", operations=resize_op) + cifar_ds = cifar_ds.map(input_columns="image", operations=rescale_op) + cifar_ds = cifar_ds.map(input_columns="image", operations=normalize_op) + cifar_ds = cifar_ds.map(input_columns="image", operations=channel_swap_op) + + cifar_ds = cifar_ds.shuffle(buffer_size=cfg.buffer_size) + cifar_ds = cifar_ds.batch(batch_size, drop_remainder=True) + cifar_ds = cifar_ds.repeat(repeat_size) + return cifar_ds diff --git a/example/alexnet_cifar10/eval.py b/example/alexnet_cifar10/eval.py new file mode 100644 index 0000000000..be71e33995 --- /dev/null +++ b/example/alexnet_cifar10/eval.py @@ -0,0 +1,58 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +######################## eval alexnet example ######################## +eval alexnet according to model file: +python eval.py --data_path /YourDataPath --ckpt_path Your.ckpt +""" + +import argparse +from config import alexnet_cfg as cfg +from dataset import create_dataset +import mindspore.nn as nn +from mindspore import context +from mindspore.model_zoo.alexnet import AlexNet +from mindspore.train.serialization import load_checkpoint, load_param_into_net +from mindspore.train import Model +from mindspore.nn.metrics import Accuracy + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description='MindSpore AlexNet Example') + parser.add_argument('--device_target', type=str, default="Ascend", choices=['Ascend', 'GPU'], + help='device where the code will be implemented (default: Ascend)') + parser.add_argument('--data_path', type=str, default="./", help='path where the dataset is saved') + parser.add_argument('--ckpt_path', type=str, default="./ckpt", help='if is test, must provide\ + path where the trained ckpt file') + parser.add_argument('--dataset_sink_mode', type=bool, default=False, help='dataset_sink_mode is False or True') + args = parser.parse_args() + + context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target, enable_mem_reuse=False) + + network = AlexNet(cfg.num_classes) + loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction="mean") + repeat_size = cfg.epoch_size + opt = nn.Momentum(network.trainable_params(), cfg.learning_rate, cfg.momentum) + model = Model(network, loss, opt, metrics={"Accuracy": Accuracy()}) # test + + print("============== Starting Testing ==============") + param_dict = load_checkpoint(args.ckpt_path) + load_param_into_net(network, param_dict) + ds_eval = create_dataset(args.data_path, + cfg.batch_size, + 1, + "test") + acc = model.eval(ds_eval, dataset_sink_mode=args.dataset_sink_mode) + print("============== Accuracy:{} ==============".format(acc)) diff --git a/example/alexnet_cifar10/train.py b/example/alexnet_cifar10/train.py new file mode 100644 index 0000000000..b97843902d --- /dev/null +++ b/example/alexnet_cifar10/train.py @@ -0,0 +1,58 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +######################## train alexnet example ######################## +train alexnet and get network model files(.ckpt) : +python train.py --data_path /YourDataPath +""" + +import argparse +from config import alexnet_cfg as cfg +from dataset import create_dataset +import mindspore.nn as nn +from mindspore import context +from mindspore.train import Model +from mindspore.nn.metrics import Accuracy +from mindspore.model_zoo.alexnet import AlexNet +from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description='MindSpore AlexNet Example') + parser.add_argument('--device_target', type=str, default="Ascend", choices=['Ascend', 'GPU'], + help='device where the code will be implemented (default: Ascend)') + parser.add_argument('--data_path', type=str, default="./", help='path where the dataset is saved') + parser.add_argument('--ckpt_path', type=str, default="./ckpt", help='if is test, must provide\ + path where the trained ckpt file') + parser.add_argument('--dataset_sink_mode', type=bool, default=False, help='dataset_sink_mode is False or True') + args = parser.parse_args() + + context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target, enable_mem_reuse=False) + + network = AlexNet(cfg.num_classes) + loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction="mean") + opt = nn.Momentum(network.trainable_params(), cfg.learning_rate, cfg.momentum) + model = Model(network, loss, opt, metrics={"Accuracy": Accuracy()}) # test + + print("============== Starting Training ==============") + ds_train = create_dataset(args.data_path, + cfg.batch_size, + cfg.epoch_size, + "train") + config_ck = CheckpointConfig(save_checkpoint_steps=cfg.save_checkpoint_steps, + keep_checkpoint_max=cfg.keep_checkpoint_max) + ckpoint_cb = ModelCheckpoint(prefix="checkpoint_alexnet", directory=args.ckpt_path, config=config_ck) + model.train(cfg.epoch_size, ds_train, callbacks=[ckpoint_cb, LossMonitor()], + dataset_sink_mode=args.dataset_sink_mode) diff --git a/example/lenet/main.py b/example/lenet/main.py deleted file mode 100644 index fe20264d95..0000000000 --- a/example/lenet/main.py +++ /dev/null @@ -1,125 +0,0 @@ -# Copyright 2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -""" -######################## train and test lenet example ######################## -1. train lenet and get network model files(.ckpt) : -python main.py --data_path /home/workspace/mindspore_dataset/Tutorial_Network/Lenet/MNIST_Data - -2. test lenet according to model file: -python main.py --data_path /home/workspace/mindspore_dataset/Tutorial_Network/Lenet/MNIST_Data - --mode test --ckpt_path checkpoint_lenet_1-1_1875.ckpt -""" -import os -import argparse -from config import mnist_cfg as cfg - -import mindspore.dataengine as de -import mindspore.nn as nn -from mindspore.model_zoo.lenet import LeNet5 -from mindspore import context, Tensor -from mindspore.train.serialization import load_checkpoint, load_param_into_net -from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor -from mindspore.train import Model -import mindspore.ops.operations as P -import mindspore.transforms.c_transforms as C -from mindspore.transforms import Inter -from mindspore.nn.metrics import Accuracy -from mindspore.ops import functional as F -from mindspore.common import dtype as mstype - - -class CrossEntropyLoss(nn.Cell): - """ - Define loss for network - """ - def __init__(self): - super(CrossEntropyLoss, self).__init__() - self.cross_entropy = P.SoftmaxCrossEntropyWithLogits() - self.mean = P.ReduceMean() - self.one_hot = P.OneHot() - self.on_value = Tensor(1.0, mstype.float32) - self.off_value = Tensor(0.0, mstype.float32) - - def construct(self, logits, label): - label = self.one_hot(label, F.shape(logits)[1], self.on_value, self.off_value) - loss = self.cross_entropy(logits, label)[0] - loss = self.mean(loss, (-1,)) - return loss - -def create_dataset(data_path, batch_size=32, repeat_size=1, - num_parallel_workers=1): - """ - create dataset for train or test - """ - # define dataset - ds1 = de.MnistDataset(data_path) - - # apply map operations on images - ds1 = ds1.map(input_columns="label", operations=C.TypeCast(mstype.int32)) - ds1 = ds1.map(input_columns="image", operations=C.Resize((cfg.image_height, cfg.image_width), - interpolation=Inter.LINEAR), - num_parallel_workers=num_parallel_workers) - ds1 = ds1.map(input_columns="image", operations=C.Rescale(1 / 0.3081, -1 * 0.1307 / 0.3081), - num_parallel_workers=num_parallel_workers) - ds1 = ds1.map(input_columns="image", operations=C.Rescale(1.0 / 255.0, 0.0), - num_parallel_workers=num_parallel_workers) - ds1 = ds1.map(input_columns="image", operations=C.HWC2CHW(), num_parallel_workers=num_parallel_workers) - - # apply DatasetOps - ds1 = ds1.shuffle(buffer_size=cfg.buffer_size) # 10000 as in LeNet train script - ds1 = ds1.batch(batch_size, drop_remainder=True) - ds1 = ds1.repeat(repeat_size) - - return ds1 - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description='MindSpore MNIST Example') - parser.add_argument('--device_target', type=str, default="Ascend", choices=['Ascend', 'GPU', 'CPU'], - help='device where the code will be implemented (default: Ascend)') - parser.add_argument('--mode', type=str, default="train", choices=['train', 'test'], - help='implement phase, set to train or test') - parser.add_argument('--data_path', type=str, default="./MNIST_Data", - help='path where the dataset is saved') - parser.add_argument('--ckpt_path', type=str, default="", help='if mode is test, must provide\ - path where the trained ckpt file') - - args = parser.parse_args() - - context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target) - - network = LeNet5(cfg.num_classes) - network.set_train() - # net_loss = nn.SoftmaxCrossEntropyWithLogits() # support this loss soon - net_loss = CrossEntropyLoss() - net_opt = nn.Momentum(network.trainable_params(), cfg.lr, cfg.momentum) - config_ck = CheckpointConfig(save_checkpoint_steps=cfg.save_checkpoint_steps, - keep_checkpoint_max=cfg.keep_checkpoint_max) - ckpoint_cb = ModelCheckpoint(prefix="checkpoint_lenet", config=config_ck) - model = Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()}) - - if args.mode == 'train': # train - ds = create_dataset(os.path.join(args.data_path, args.mode), batch_size=cfg.batch_size, - repeat_size=cfg.epoch_size) - print("============== Starting Training ==============") - model.train(cfg['epoch_size'], ds, callbacks=[ckpoint_cb, LossMonitor()], dataset_sink_mode=False) - elif args.mode == 'test': # test - print("============== Starting Testing ==============") - param_dict = load_checkpoint(args.ckpt_path) - load_param_into_net(network, param_dict) - ds_eval = create_dataset(os.path.join(args.data_path, "test"), 32, 1) - acc = model.eval(ds_eval, dataset_sink_mode=False) - print("============== Accuracy:{} ==============".format(acc)) - else: - raise RuntimeError('mode should be train or test, rather than {}'.format(args.mode)) diff --git a/example/lenet/config.py b/example/lenet_mnist/config.py similarity index 93% rename from example/lenet/config.py rename to example/lenet_mnist/config.py index 3ad78f3bde..9a13ae535f 100644 --- a/example/lenet/config.py +++ b/example/lenet_mnist/config.py @@ -13,8 +13,9 @@ # limitations under the License. # ============================================================================ """ -network config setting, will be used in main.py +network config setting, will be used in train.py """ + from easydict import EasyDict as edict mnist_cfg = edict({ @@ -23,7 +24,6 @@ mnist_cfg = edict({ 'momentum': 0.9, 'epoch_size': 1, 'batch_size': 32, - 'repeat_size': 1, 'buffer_size': 1000, 'image_height': 32, 'image_width': 32, diff --git a/example/lenet_mnist/dataset.py b/example/lenet_mnist/dataset.py new file mode 100644 index 0000000000..cef6973483 --- /dev/null +++ b/example/lenet_mnist/dataset.py @@ -0,0 +1,60 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +Produce the dataset +""" + +import mindspore.dataset as ds +import mindspore.dataset.transforms.vision.c_transforms as CV +import mindspore.dataset.transforms.c_transforms as C +from mindspore.dataset.transforms.vision import Inter +from mindspore.common import dtype as mstype + + +def create_dataset(data_path, batch_size=32, repeat_size=1, + num_parallel_workers=1): + """ + create dataset for train or test + """ + # define dataset + mnist_ds = ds.MnistDataset(data_path) + + resize_height, resize_width = 32, 32 + rescale = 1.0 / 255.0 + shift = 0.0 + rescale_nml = 1 / 0.3081 + shift_nml = -1 * 0.1307 / 0.3081 + + # define map operations + resize_op = CV.Resize((resize_height, resize_width), interpolation=Inter.LINEAR) # Bilinear mode + rescale_nml_op = CV.Rescale(rescale_nml, shift_nml) + rescale_op = CV.Rescale(rescale, shift) + hwc2chw_op = CV.HWC2CHW() + type_cast_op = C.TypeCast(mstype.int32) + + # apply map operations on images + mnist_ds = mnist_ds.map(input_columns="label", operations=type_cast_op, num_parallel_workers=num_parallel_workers) + mnist_ds = mnist_ds.map(input_columns="image", operations=resize_op, num_parallel_workers=num_parallel_workers) + mnist_ds = mnist_ds.map(input_columns="image", operations=rescale_op, num_parallel_workers=num_parallel_workers) + mnist_ds = mnist_ds.map(input_columns="image", operations=rescale_nml_op, num_parallel_workers=num_parallel_workers) + mnist_ds = mnist_ds.map(input_columns="image", operations=hwc2chw_op, num_parallel_workers=num_parallel_workers) + + # apply DatasetOps + buffer_size = 10000 + mnist_ds = mnist_ds.shuffle(buffer_size=buffer_size) # 10000 as in LeNet train script + mnist_ds = mnist_ds.batch(batch_size, drop_remainder=True) + mnist_ds = mnist_ds.repeat(repeat_size) + + return mnist_ds diff --git a/example/lenet_mnist/eval.py b/example/lenet_mnist/eval.py new file mode 100644 index 0000000000..3473a99532 --- /dev/null +++ b/example/lenet_mnist/eval.py @@ -0,0 +1,64 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +######################## eval lenet example ######################## +eval lenet according to model file: +python eval.py --data_path /YourDataPath --ckpt_path Your.ckpt +""" + +import os +import argparse +from dataset import create_dataset +from config import mnist_cfg as cfg +import mindspore.nn as nn +from mindspore.model_zoo.lenet import LeNet5 +from mindspore import context +from mindspore.train.serialization import load_checkpoint, load_param_into_net +from mindspore.train.callback import ModelCheckpoint, CheckpointConfig +from mindspore.train import Model +from mindspore.nn.metrics import Accuracy + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description='MindSpore MNIST Example') + parser.add_argument('--device_target', type=str, default="Ascend", choices=['Ascend', 'GPU', 'CPU'], + help='device where the code will be implemented (default: Ascend)') + parser.add_argument('--data_path', type=str, default="./MNIST_Data", + help='path where the dataset is saved') + parser.add_argument('--ckpt_path', type=str, default="", help='if mode is test, must provide\ + path where the trained ckpt file') + parser.add_argument('--dataset_sink_mode', type=bool, default=False, help='dataset_sink_mode is False or True') + + args = parser.parse_args() + + context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target, enable_mem_reuse=False) + + network = LeNet5(cfg.num_classes) + net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction="mean") + repeat_size = cfg.epoch_size + net_opt = nn.Momentum(network.trainable_params(), cfg.lr, cfg.momentum) + config_ck = CheckpointConfig(save_checkpoint_steps=cfg.save_checkpoint_steps, + keep_checkpoint_max=cfg.keep_checkpoint_max) + ckpoint_cb = ModelCheckpoint(prefix="checkpoint_lenet", config=config_ck) + model = Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()}) + + print("============== Starting Testing ==============") + param_dict = load_checkpoint(args.ckpt_path) + load_param_into_net(network, param_dict) + ds_eval = create_dataset(os.path.join(args.data_path, "test"), + cfg.batch_size, + 1) + acc = model.eval(ds_eval, dataset_sink_mode=args.dataset_sink_mode) + print("============== Accuracy:{} ==============".format(acc)) diff --git a/example/lenet_mnist/train.py b/example/lenet_mnist/train.py new file mode 100644 index 0000000000..2fa8d3c27f --- /dev/null +++ b/example/lenet_mnist/train.py @@ -0,0 +1,58 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +######################## train lenet example ######################## +train lenet and get network model files(.ckpt) : +python train.py --data_path /YourDataPath +""" + +import os +import argparse +from config import mnist_cfg as cfg +from dataset import create_dataset +import mindspore.nn as nn +from mindspore.model_zoo.lenet import LeNet5 +from mindspore import context +from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor +from mindspore.train import Model +from mindspore.nn.metrics import Accuracy + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description='MindSpore MNIST Example') + parser.add_argument('--device_target', type=str, default="Ascend", choices=['Ascend', 'GPU', 'CPU'], + help='device where the code will be implemented (default: Ascend)') + parser.add_argument('--data_path', type=str, default="./MNIST_Data", + help='path where the dataset is saved') + parser.add_argument('--dataset_sink_mode', type=bool, default=False, help='dataset_sink_mode is False or True') + + args = parser.parse_args() + + context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target, enable_mem_reuse=False) + + network = LeNet5(cfg.num_classes) + net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction="mean") + net_opt = nn.Momentum(network.trainable_params(), cfg.lr, cfg.momentum) + config_ck = CheckpointConfig(save_checkpoint_steps=cfg.save_checkpoint_steps, + keep_checkpoint_max=cfg.keep_checkpoint_max) + ckpoint_cb = ModelCheckpoint(prefix="checkpoint_lenet", config=config_ck) + model = Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()}) + + ds_train = create_dataset(os.path.join(args.data_path, "train"), + cfg.batch_size, + cfg.epoch_size) + print("============== Starting Training ==============") + model.train(cfg['epoch_size'], ds_train, callbacks=[ckpoint_cb, LossMonitor()], + dataset_sink_mode=args.dataset_sink_mode) diff --git a/mindspore/model_zoo/alexnet.py b/mindspore/model_zoo/alexnet.py new file mode 100644 index 0000000000..8cd316229c --- /dev/null +++ b/mindspore/model_zoo/alexnet.py @@ -0,0 +1,73 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Alexnet.""" +import mindspore.nn as nn +from mindspore.common.initializer import TruncatedNormal + +def conv(in_channels, out_channels, kernel_size, stride=1, padding=0, pad_mode="valid"): + weight = weight_variable() + return nn.Conv2d(in_channels, out_channels, + kernel_size=kernel_size, stride=stride, padding=padding, + weight_init=weight, has_bias=False, pad_mode=pad_mode) + +def fc_with_initialize(input_channels, out_channels): + weight = weight_variable() + bias = weight_variable() + return nn.Dense(input_channels, out_channels, weight, bias) + +def weight_variable(): + return TruncatedNormal(0.02) # 0.02 + + +class AlexNet(nn.Cell): + """ + Alexnet + """ + def __init__(self, num_classes=10): + super(AlexNet, self).__init__() + self.batch_size = 32 + self.conv1 = conv(3, 96, 11, stride=4) + self.conv2 = conv(96, 256, 5, pad_mode="same") + self.conv3 = conv(256, 384, 3, pad_mode="same") + self.conv4 = conv(384, 384, 3, pad_mode="same") + self.conv5 = conv(384, 256, 3, pad_mode="same") + self.relu = nn.ReLU() + self.max_pool2d = nn.MaxPool2d(kernel_size=3, stride=2) + self.flatten = nn.Flatten() + self.fc1 = fc_with_initialize(6*6*256, 4096) + self.fc2 = fc_with_initialize(4096, 4096) + self.fc3 = fc_with_initialize(4096, num_classes) + + def construct(self, x): + x = self.conv1(x) + x = self.relu(x) + x = self.max_pool2d(x) + x = self.conv2(x) + x = self.relu(x) + x = self.max_pool2d(x) + x = self.conv3(x) + x = self.relu(x) + x = self.conv4(x) + x = self.relu(x) + x = self.conv5(x) + x = self.relu(x) + x = self.max_pool2d(x) + x = self.flatten(x) + x = self.fc1(x) + x = self.relu(x) + x = self.fc2(x) + x = self.relu(x) + x = self.fc3(x) + return x diff --git a/mindspore/model_zoo/lenet.py b/mindspore/model_zoo/lenet.py index a22eef1a96..6e39c439bf 100644 --- a/mindspore/model_zoo/lenet.py +++ b/mindspore/model_zoo/lenet.py @@ -13,7 +13,6 @@ # limitations under the License. # ============================================================================ """LeNet.""" -import mindspore.ops.operations as P import mindspore.nn as nn from mindspore.common.initializer import TruncatedNormal @@ -62,7 +61,7 @@ class LeNet5(nn.Cell): self.fc3 = fc_with_initialize(84, self.num_class) self.relu = nn.ReLU() self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2) - self.reshape = P.Reshape() + self.flatten = nn.Flatten() def construct(self, x): x = self.conv1(x) @@ -71,7 +70,7 @@ class LeNet5(nn.Cell): x = self.conv2(x) x = self.relu(x) x = self.max_pool2d(x) - x = self.reshape(x, (self.batch_size, -1)) + x = self.flatten(x) x = self.fc1(x) x = self.relu(x) x = self.fc2(x) From 2da38ad4016f69799dfdc16b4be873962e6a2ce9 Mon Sep 17 00:00:00 2001 From: lichenever Date: Tue, 31 Mar 2020 18:43:42 +0800 Subject: [PATCH 031/367] fix two cast bug in auto parallel --- .../ccsrc/parallel/step_auto_parallel.cc | 6 ++-- mindspore/ccsrc/parallel/step_parallel.cc | 23 ++++++++++--- .../parallel/test_element_wise_function.py | 33 ++++++++++++++++--- 3 files changed, 51 insertions(+), 11 deletions(-) diff --git a/mindspore/ccsrc/parallel/step_auto_parallel.cc b/mindspore/ccsrc/parallel/step_auto_parallel.cc index c3e3f5893e..400e985f18 100644 --- a/mindspore/ccsrc/parallel/step_auto_parallel.cc +++ b/mindspore/ccsrc/parallel/step_auto_parallel.cc @@ -346,6 +346,8 @@ bool IsAutoParallelCareNode(const CNodePtr &cnode) { } OperatorInfoPtr CreateTheOperatorInfo(const PrimitivePtr &prim, const CNodePtr &cnode) { + MS_EXCEPTION_IF_NULL(prim); + MS_EXCEPTION_IF_NULL(cnode); auto attrs = prim->attrs(); std::vector shape_list = ExtractShape(cnode); if (shape_list.empty()) { @@ -381,8 +383,8 @@ OperatorInfoPtr CreateTheOperatorInfo(const PrimitivePtr &prim, const CNodePtr & operator_info->set_outputs_dtype(cnode->Type()); operator_info->set_cnode(cnode); // If no strategy has been configured for this operator, then candidate strategies are generated for - // auto-strategy searching - if (!StrategyFound(attrs)) { + // auto-strategy searchingm if this primitive is Cast, we ignore the user-specified strategy + if (!StrategyFound(attrs) || prim->name() == CAST) { // Compute split_flag_list_, indicating which input has batch dimension. This is ONLY used for preparation for // BatchParallelInfo operator operator_info->ComputeBatchSplitFlagList(); diff --git a/mindspore/ccsrc/parallel/step_parallel.cc b/mindspore/ccsrc/parallel/step_parallel.cc index 21da11ec21..54ad438430 100644 --- a/mindspore/ccsrc/parallel/step_parallel.cc +++ b/mindspore/ccsrc/parallel/step_parallel.cc @@ -370,7 +370,6 @@ bool IsParallelCareNode(const CNodePtr& cnode) { if (prim == nullptr) { return false; } - auto attrs = prim->attrs(); if (IsInBlackList(prim)) { MS_LOG(INFO) << "Parallel don't care node: " << prim->name(); return false; @@ -379,10 +378,8 @@ bool IsParallelCareNode(const CNodePtr& cnode) { if (prim->name() == GET_NEXT) { return true; } - if ((prim->name() == CAST)) { - if ((!attrs.count(STRATEGY)) && (cnode->operator_info() == nullptr)) { - return false; - } + if ((prim->name() == CAST) && (cnode->operator_info() == nullptr)) { + return false; } return cnode->in_forward_flag(); @@ -653,6 +650,14 @@ LossNodeInfo GetLossNodeInfo(const AnfNodePtr& loss_node) { LossNodeInfo node_info; + // return -> cast + auto pre_cnode = pre_node->cast(); + MS_EXCEPTION_IF_NULL(pre_cnode); + auto pre_prim = GetValueNode(pre_cnode->input(0)); + if (pre_prim->name() == CAST && pre_cnode->operator_info() == nullptr) { + pre_node = pre_cnode->input(1); + } + // return -> loss if (pre_node == loss_node) { node_info.has_tuple_getitem = false; @@ -1947,6 +1952,14 @@ CNodePtr FindLossCNode(const FuncGraphPtr& func_graph) { MS_EXCEPTION_IF_NULL(current_value); PrimitivePtr current_prim = current_value->value()->cast(); MS_EXCEPTION_IF_NULL(current_prim); + + // return -> cast + if (current_prim->name() == CAST && pre_cnode->operator_info() == nullptr) { + pre_cnode = pre_cnode->input(1)->cast(); + MS_EXCEPTION_IF_NULL(pre_cnode); + current_prim = GetValueNode(pre_cnode->input(0)); + } + // notice: the GetNext op has not input if (INVALID_LOSS_OPS.find(current_prim->name()) != INVALID_LOSS_OPS.end()) { MS_LOG(INFO) << "The loss is: " << current_prim->name(); diff --git a/tests/ut/python/parallel/test_element_wise_function.py b/tests/ut/python/parallel/test_element_wise_function.py index dfcebdc5ab..2eb3a22ed2 100644 --- a/tests/ut/python/parallel/test_element_wise_function.py +++ b/tests/ut/python/parallel/test_element_wise_function.py @@ -192,7 +192,6 @@ def test_cast_before_mirror(): net = GradWrap(NetWithLoss(Net(strategy1))) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") - x = Tensor(np.ones([128, 32]), dtype=ms.float32) y = Tensor(np.ones([32, 64]), dtype=ms.float32) b = Tensor(np.ones([64, 64]), dtype=ms.float16) @@ -217,7 +216,6 @@ def test_cast_before_mirror1(): net = GradWrap(NetWithLoss(Net(strategy1))) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") - x = Tensor(np.ones([128, 32]), dtype=ms.float16) y = Tensor(np.ones([32, 64]), dtype=ms.float16) b = Tensor(np.ones([64, 64]), dtype=ms.float32) @@ -242,7 +240,6 @@ def test_cast_before_mirror2(): net = GradWrap(NetWithLoss(Net(strategy1))) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") - x = Tensor(np.ones([128, 32]), dtype=ms.float16) y = Tensor(np.ones([32, 64]), dtype=ms.float16) b = Tensor(np.ones([64, 64]), dtype=ms.float32) @@ -267,8 +264,36 @@ def test_cast_before_mirror3(): net = GradWrap(NetWithLoss(Net(strategy1))) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") - x = Tensor(np.ones([128, 32]), dtype=ms.float16) y = Tensor(np.ones([32, 64]), dtype=ms.float16) b = Tensor(np.ones([64, 64]), dtype=ms.float32) _executor.compile(net, x, y, b) + + +def test_mul_two_cast(): + class Net(nn.Cell): + def __init__(self, strategy1, strategy2, strategy3): + super().__init__() + self.mul = P.Mul().set_strategy(strategy1) + self.mul2 = P.Mul().set_strategy(strategy2) + self.cast = P.Cast().set_strategy(strategy3) + self.cast2 = P.Cast().set_strategy(strategy3) + + def construct(self, x, y, b): + out = self.mul(x, y) + out = self.mul2(out, b) + out = self.cast(out, ms.int32) + out = self.cast2(out, ms.bool_) + return out + + context.set_auto_parallel_context(device_num=8, global_rank=0) + strategy1 = ((2, 2), (2, 2)) + strategy2 = ((8, 1), (8, 1)) + strategy3 = ((8, 1), ) + net = GradWrap(Net(strategy1, strategy2, strategy3)) + context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") + + x = Tensor(np.ones([128, 32]), dtype=ms.float32) + y = Tensor(np.ones([128, 32]), dtype=ms.float32) + b = Tensor(np.ones([128, 32]), dtype=ms.float32) + _executor.compile(net, x, y, b) From 7b5640da4e6d4c8d27f34e1289de65f5b96d0737 Mon Sep 17 00:00:00 2001 From: ms_yan Date: Tue, 31 Mar 2020 18:46:59 +0800 Subject: [PATCH 032/367] Repair parameter check problem in TFRecordDataset --- mindspore/dataset/engine/validators.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/mindspore/dataset/engine/validators.py b/mindspore/dataset/engine/validators.py index 4dc1867808..adfe54a02e 100644 --- a/mindspore/dataset/engine/validators.py +++ b/mindspore/dataset/engine/validators.py @@ -398,6 +398,7 @@ def check_tfrecorddataset(method): nreq_param_int = ['num_samples', 'num_parallel_workers', 'num_shards', 'shard_id'] nreq_param_list = ['columns_list'] + nreq_param_bool = ['shard_equal_rows'] # check dataset_files; required argument dataset_files = param_dict.get('dataset_files') @@ -410,6 +411,10 @@ def check_tfrecorddataset(method): check_param_type(nreq_param_list, param_dict, list) + check_param_type(nreq_param_bool, param_dict, bool) + + check_sampler_shuffle_shard_options(param_dict) + return method(*args, **kwargs) return new_method From feb1c36811ce59a41f8cabf359468f76ad21fbe2 Mon Sep 17 00:00:00 2001 From: panyifeng Date: Tue, 31 Mar 2020 14:55:29 +0800 Subject: [PATCH 033/367] fix parallel related valuenode merging error --- mindspore/ccsrc/parallel/step_parallel.cc | 1 + mindspore/ccsrc/pipeline/action.cc | 28 +++++++++++++++++++++++ 2 files changed, 29 insertions(+) diff --git a/mindspore/ccsrc/parallel/step_parallel.cc b/mindspore/ccsrc/parallel/step_parallel.cc index 927acea705..65e5cb976a 100644 --- a/mindspore/ccsrc/parallel/step_parallel.cc +++ b/mindspore/ccsrc/parallel/step_parallel.cc @@ -112,6 +112,7 @@ void InsertNode(const Operator& op, const CNodePtr& node, size_t index, const An MS_EXCEPTION_IF_NULL(new_node_value); PrimitivePtr new_node_prim = new_node_value->value()->cast(); new_node_prim->set_instance_name(instance_name); + new_node_prim->set_attr("keep_value_node_input", MakeValue(true)); new_node->set_scope(scope); node_input[0]->set_scope(scope); manager->SetEdge(node, SizeToInt(index), new_node); diff --git a/mindspore/ccsrc/pipeline/action.cc b/mindspore/ccsrc/pipeline/action.cc index f3742ab654..392602f419 100644 --- a/mindspore/ccsrc/pipeline/action.cc +++ b/mindspore/ccsrc/pipeline/action.cc @@ -276,6 +276,31 @@ bool ExecuteAction(const ResourcePtr& res) { return true; } +// The parallel primitive related valuenode might be partitioned so that its value changes by device, +// that will result in a syncronization error due to different executing order. +// Here we temporarily avoid the problem by skipping valuenode merging used by parallel related primitive, +// the final solution will be proposed later as a parallel feature. +bool KeepValueNodeDuplication(const AnfNodePtr& value_node, const ResourcePtr& res) { + auto& node_users = res->manager()->node_users(); + auto& users = node_users[value_node]; + auto used_by_keep_value_prim = + std::any_of(users.begin(), users.end(), [](const std::pair& user) -> bool { + MS_EXCEPTION_IF_NULL(user.first); + auto cnode = user.first->cast(); + if (cnode == nullptr) { + return false; + } + auto prim_node = cnode->input(0); + if (IsValueNode(prim_node)) { + auto prim = GetValue(prim_node->cast()->value()); + // value_node is referenced by some parallel primitive + return prim->HasAttr("keep_value_node_input"); + } + return false; + }); + return used_by_keep_value_prim; +} + bool RemoveValueNodeDuplicationsAction(const ResourcePtr& res) { if (res->func_graph() == nullptr) { MS_LOG(EXCEPTION) << "Remove value node duplications error."; @@ -287,6 +312,9 @@ bool RemoveValueNodeDuplicationsAction(const ResourcePtr& res) { HashCache hash_cache; HashValue hashes; for (const auto& value_pair : value_nodes) { + if (KeepValueNodeDuplication(value_pair.first, res)) { + continue; + } TryToDoReplace(manager.get(), value_pair.first, &hash_cache, &hashes); } return true; From 624ab97de6f28444b7bff27f743deddcc0a80a46 Mon Sep 17 00:00:00 2001 From: ms_yan <6576637+ms_yan@user.noreply.gitee.com> Date: Tue, 31 Mar 2020 20:13:49 +0800 Subject: [PATCH 034/367] repair parameter check problem in random_resize_crop and random_crop --- mindspore/dataset/transforms/vision/validators.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mindspore/dataset/transforms/vision/validators.py b/mindspore/dataset/transforms/vision/validators.py index 17f50e068d..443232d780 100644 --- a/mindspore/dataset/transforms/vision/validators.py +++ b/mindspore/dataset/transforms/vision/validators.py @@ -41,7 +41,7 @@ def check_crop_size(size): else: raise TypeError("Size should be a single integer or a list/tuple (h, w) of length 2.") for value in size: - check_value(value, (1, INT32_MAX)) + check_pos_int32(value) return size @@ -239,6 +239,7 @@ def check_random_resize_crop(method): kwargs["scale"] = scale if ratio is not None: check_range(ratio, [0, FLOAT_MAX_INTEGER]) + check_positive(ratio[0]) kwargs["ratio"] = ratio if interpolation is not None: check_inter_mode(interpolation) From cf40305bf05bd5a1e4a37857e325b25b77730940 Mon Sep 17 00:00:00 2001 From: zhaozhenlong Date: Tue, 31 Mar 2020 15:15:08 +0800 Subject: [PATCH 035/367] add operator SpaceToBatch and BatchToSpace for ge --- mindspore/ccsrc/transform/convert.cc | 6 +- mindspore/ccsrc/transform/op_adapter.h | 22 ++++ mindspore/ccsrc/transform/op_declare.cc | 13 +++ mindspore/ccsrc/transform/op_declare.h | 4 + mindspore/ops/_grad/grad_array_ops.py | 20 ++++ mindspore/ops/operations/__init__.py | 4 +- mindspore/ops/operations/array_ops.py | 141 ++++++++++++++++++++++++ tests/ut/python/ops/test_ops.py | 20 ++++ 8 files changed, 228 insertions(+), 2 deletions(-) diff --git a/mindspore/ccsrc/transform/convert.cc b/mindspore/ccsrc/transform/convert.cc index c8c6abea4c..c975b18812 100755 --- a/mindspore/ccsrc/transform/convert.cc +++ b/mindspore/ccsrc/transform/convert.cc @@ -180,6 +180,8 @@ const char kNamePrint[] = "Print"; const char kNameApplyFtrl[] = "ApplyFtrl"; const char kNameDiag[] = "Diag"; const char kNameDiagPart[] = "DiagPart"; +const char kNameSpaceToBatch[] = "SpaceToBatch"; +const char kNameBatchToSpace[] = "BatchToSpace"; // -----------------OpAdapter initialization-------------- std::unordered_map &DfGraphConvertor::get_adpt_map() { @@ -361,7 +363,9 @@ std::unordered_map &DfGraphConvertor::get_adpt_ma {string(kNameRound), ADPT_DESC(Round)}, {string(kNameApplyFtrl), ADPT_DESC(ApplyFtrl)}, {string(kNameDiag), ADPT_DESC(Diag)}, - {string(kNameDiagPart), ADPT_DESC(DiagPart)}}; + {string(kNameDiagPart), ADPT_DESC(DiagPart)}, + {string(kNameSpaceToBatch), ADPT_DESC(SpaceToBatchD)}, + {string(kNameBatchToSpace), ADPT_DESC(BatchToSpaceD)}}; #ifdef ENABLE_GE adpt_map[string(kNamePrint)] = ADPT_DESC(Print); #endif diff --git a/mindspore/ccsrc/transform/op_adapter.h b/mindspore/ccsrc/transform/op_adapter.h index aa466adbb8..3dd299f83d 100644 --- a/mindspore/ccsrc/transform/op_adapter.h +++ b/mindspore/ccsrc/transform/op_adapter.h @@ -744,6 +744,28 @@ class OpAdapter : public BaseOpAdapter { return list; } + static std::vector ConvertAny(const ValuePtr& value, const AnyTraits>>, + const AnyTraits>) { + MS_EXCEPTION_IF_NULL(value); + MS_LOG(DEBUG) << "Value: " << value->type_name(); + if (!value->isa()) { + MS_LOG(EXCEPTION) << "Value should be ValueList, but got " << value->type_name(); + } + auto vec = value->cast(); + std::vector list; + for (auto& it : vec->value()) { + MS_EXCEPTION_IF_NULL(it); + if (!it->isa()) { + MS_LOG(EXCEPTION) << "It should be ValueList, but got " << it->type_name(); + } + auto sub_vector = it->cast(); + for (auto& item : sub_vector->value()) { + list.push_back(static_cast(GetValue(item))); + } + } + return list; + } + static std::vector ConvertAny(const ValuePtr& value, const AnyTraits>, const AnyTraits>) { MS_EXCEPTION_IF_NULL(value); diff --git a/mindspore/ccsrc/transform/op_declare.cc b/mindspore/ccsrc/transform/op_declare.cc index af0ce29f85..cc63bacd8a 100755 --- a/mindspore/ccsrc/transform/op_declare.cc +++ b/mindspore/ccsrc/transform/op_declare.cc @@ -1183,6 +1183,19 @@ INPUT_MAP(DiagPart) = {{1, INPUT_DESC(x)}}; ATTR_MAP(DiagPart) = EMPTY_ATTR_MAP; OUTPUT_MAP(DiagPart) = {{0, OUTPUT_DESC(y)}}; +// SpaceToBatchD +INPUT_MAP(SpaceToBatchD) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(SpaceToBatchD) = { + {"block_size", ATTR_DESC(block_size, AnyTraits())}, + {"paddings", ATTR_DESC(paddings, AnyTraits>>(), AnyTraits>())}}; +OUTPUT_MAP(SpaceToBatchD) = {{0, OUTPUT_DESC(y)}}; + +// BatchToSpaceD +INPUT_MAP(BatchToSpaceD) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(BatchToSpaceD) = { + {"block_size", ATTR_DESC(block_size, AnyTraits())}, + {"crops", ATTR_DESC(crops, AnyTraits>>(), AnyTraits>())}}; +OUTPUT_MAP(BatchToSpaceD) = {{0, OUTPUT_DESC(y)}}; #ifdef ENABLE_GE // Print INPUT_MAP(Print) = EMPTY_INPUT_MAP; diff --git a/mindspore/ccsrc/transform/op_declare.h b/mindspore/ccsrc/transform/op_declare.h index 59014b8b7d..978828e16d 100755 --- a/mindspore/ccsrc/transform/op_declare.h +++ b/mindspore/ccsrc/transform/op_declare.h @@ -439,6 +439,10 @@ DECLARE_OP_ADAPTER(Diag) DECLARE_OP_USE_OUTPUT(Diag) DECLARE_OP_ADAPTER(DiagPart) DECLARE_OP_USE_OUTPUT(DiagPart) +DECLARE_OP_ADAPTER(SpaceToBatchD) +DECLARE_OP_USE_OUTPUT(SpaceToBatchD) +DECLARE_OP_ADAPTER(BatchToSpaceD) +DECLARE_OP_USE_OUTPUT(BatchToSpaceD) #ifdef ENABLE_GE DECLARE_OP_ADAPTER(Print) DECLARE_OP_USE_DYN_INPUT(Print) diff --git a/mindspore/ops/_grad/grad_array_ops.py b/mindspore/ops/_grad/grad_array_ops.py index 79841cf27a..81d38a1e1e 100644 --- a/mindspore/ops/_grad/grad_array_ops.py +++ b/mindspore/ops/_grad/grad_array_ops.py @@ -430,3 +430,23 @@ def get_bprop_diag_part(self): return (op(dout),) return bprop + + +@bprop_getters.register(P.SpaceToBatch) +def get_bprop_space_to_batch(self): + """Generate bprop for SpaceToBatch""" + space_to_batch_grad = P.BatchToSpace(self.block_size, self.paddings) + def bprop(x, out, dout): + dx = space_to_batch_grad(dout) + return (dx,) + return bprop + + +@bprop_getters.register(P.BatchToSpace) +def get_bprop_batch_to_space(self): + """Generate bprop for BatchToSpace""" + batch_to_space_grad = P.SpaceToBatch(self.block_size, self.crops) + def bprop(x, out, dout): + dx = batch_to_space_grad(dout) + return (dx,) + return bprop diff --git a/mindspore/ops/operations/__init__.py b/mindspore/ops/operations/__init__.py index 295e2edaf8..899b2f8d0f 100644 --- a/mindspore/ops/operations/__init__.py +++ b/mindspore/ops/operations/__init__.py @@ -29,7 +29,7 @@ from .array_ops import (Argmax, Argmin, Cast, ConcatOffset, Concat, Shape, Size, Slice, Split, Squeeze, StridedSlice, Tile, Transpose, TruncatedNormal, TupleToArray, - UnsortedSegmentSum, SpaceToDepth, DepthToSpace) + UnsortedSegmentSum, SpaceToDepth, DepthToSpace, SpaceToBatch, BatchToSpace) from .comm_ops import (AllGather, AllReduce, _AlltoAll, ReduceScatter, Broadcast, _MirrorOperator, ReduceOp, _VirtualDataset, _VirtualDiv, _GetTensorSlice) @@ -225,6 +225,8 @@ __all__ = [ "LARSUpdate", "Round", "ApplyFtrl", + "SpaceToBatch", + "BatchToSpace" ] __all__.sort() diff --git a/mindspore/ops/operations/array_ops.py b/mindspore/ops/operations/array_ops.py index 6740f172b4..8585f873e9 100644 --- a/mindspore/ops/operations/array_ops.py +++ b/mindspore/ops/operations/array_ops.py @@ -20,6 +20,7 @@ import copy import functools +import itertools import numbers import numpy as np @@ -2020,3 +2021,143 @@ class DepthToSpace(PrimitiveWithInfer): def infer_dtype(self, x_dtype): validator.check_subclass("x_dtype", x_dtype, mstype.tensor) return x_dtype + + +class SpaceToBatch(PrimitiveWithInfer): + r""" + Divide spatial dimensions into blocks and combine the block size with the original batch. + + This operation will divide spatial dimensions (H, W) into blocks with block_size, the output tensor's H and W + dimension is the corresponding number of blocks after division. The output tensor's batch dimension is the + product of the original batch and the square of block_size. Prior to division into blocks, the spatial dimensions + of the input are zero padded according to paddings if necessary. + + Args: + block_size (int): The block size of dividing block with value >= 1. + paddings (list): The padding value for H and W dimension, containing 2 sub list, each containing 2 int value. + All values must be >= 0. paddings[i] specifies the paddings for spatial dimension i, which corresponds to + input dimension i+2. It is required that input_shape[i+2]+paddings[i][0]+paddings[i][1] is divisible + by block_size. + + Inputs: + - **input_x** (Tensor) - The input tensor. + + Outputs: + Tensor, the output tensor with the same type as input. Assume input shape is :math:`(n, c, h, w)` with + :math:`block\_size` and :math:`padddings`. The output tensor shape will be :math:`(n', c', h', w')`, where + + :math:`n' = n*(block\_size*block\_size)` + + :math:`c' = c` + + :math:`h' = (h+paddings[0][0]+paddings[0][1])//block\_size` + + :math:`w' = (w+paddings[1][0]+paddings[1][1])//block\_size` + + Examples: + >>> block_size = 2 + >>> paddings = [[0, 0], [0, 0]] + >>> space_to_batch = P.SpaceToBatch(block_size, paddings) + >>> x = Tensor(np.array([[[[1, 2], [3, 4]]]]), mstype.float32) + >>> space_to_batch(x) + [[[[1.]]], [[[2.]]], [[[3.]]], [[[4.]]]] + + """ + @prim_attr_register + def __init__(self, block_size, paddings): + """Init SpaceToBatch""" + validator.check_type('block_size', block_size, [int]) + validator.check('block_size', block_size, '', 1, Rel.GT) + self.block_size = block_size + validator.check('paddings shape', np.array(paddings).shape, '', (2, 2)) + for elem in itertools.chain(*paddings): + validator.check_type('paddings element', elem, [int]) + self.paddings = paddings + + def infer_dtype(self, x_dtype): + validator.check_subclass("input_x", x_dtype, mstype.tensor) + validator.check_typename('input_x', x_dtype, mstype.number_type) + return x_dtype + + def infer_shape(self, x_shape): + validator.check('rank of input_x', len(x_shape), '', 4) + out_shape = copy.deepcopy(x_shape) + for i in range(2): + padded = out_shape[i+2] + self.paddings[i][0] + \ + self.paddings[i][1] + if padded % self.block_size != 0: + raise ValueError(f'padded[{i}] {padded} should be divisible by ' + f'block_size {self.block_size}') + out_shape[i+2] = padded // self.block_size + out_shape[0] *= self.block_size * self.block_size + return out_shape + + +class BatchToSpace(PrimitiveWithInfer): + r""" + Divide batch dimension with blocks and interleaves these blocks back into spatial dimensions. + + This operation will divide batch dimension N into blocks with block_size, the output tensor's N dimension + is the corresponding number of blocks after division. The output tensor's H, W dimension is product of original H, W + dimension and block_size with given amount to crop from dimension, respectively. + + Args: + block_size (int): The block size of dividing block with value >= 1. + crops (list): The crop value for H and W dimension, containing 2 sub list, each containing 2 int value. + All values must be >= 0. crops[i] specifies the crop values for spatial dimension i, which corresponds to + input dimension i+2. It is required that input_shape[i+2]*block_size >= crops[i][0]+crops[i][1]. + + Inputs: + - **input_x** (Tensor) - The input tensor. + + Outputs: + Tensor, the output tensor with the same type as input. Assume input shape is (n, c, h, w) with block_size + and crops. The output shape will be (n', c', h', w'), where + + :math:`n' = n//(block\_size*block\_size)` + + :math:`c' = c` + + :math:`h' = h*block\_size-crops[0][0]-crops[0][1]` + + :math:`w' = w*block\_size-crops[1][0]-crops[1][1]` + + Examples: + >>> block_size = 2 + >>> crops = [[0, 0], [0, 0]] + >>> op = P.BatchToSpace(block_size, crops) + >>> x = Tensor(np.array([[[[1]]], [[[2]]], [[[3]]], [[[4]]]]), mstype.float32) + >>> output = op(x) + [[[[1., 2.], [3., 4.]]]] + + """ + @prim_attr_register + def __init__(self, block_size, crops): + """Init BatchToSpace""" + validator.check_type('block_size', block_size, [int]) + validator.check('block_size', block_size, '', 1, Rel.GT) + self.block_size = block_size + validator.check('crops shape', np.array(crops).shape, '', (2, 2)) + for elem in itertools.chain(*crops): + validator.check_type('crops element', elem, [int]) + self.crops = crops + + def infer_dtype(self, x_dtype): + validator.check_subclass("input_x", x_dtype, mstype.tensor) + validator.check_typename('input_x', x_dtype, mstype.number_type) + return x_dtype + + def infer_shape(self, x_shape): + validator.check('rank of input_x', len(x_shape), '', 4) + out_shape = copy.deepcopy(x_shape) + for i in range(2): + x_block_prod = out_shape[i+2] * self.block_size + crops_sum = self.crops[i][0] + self.crops[i][1] + validator.check("x block shape prod", x_block_prod, 'crops sum', crops_sum, Rel.GT) + out_shape[i+2] = x_block_prod - crops_sum + block_size_prod = self.block_size * self.block_size + if out_shape[0] % block_size_prod != 0: + raise ValueError(f'input_x dimension 0 {out_shape[0]} should be divisible by ' + f'block_size_prod {block_size_prod}') + out_shape[0] = out_shape[0] // block_size_prod + return out_shape diff --git a/tests/ut/python/ops/test_ops.py b/tests/ut/python/ops/test_ops.py index e917c12748..f1d365e9cf 100755 --- a/tests/ut/python/ops/test_ops.py +++ b/tests/ut/python/ops/test_ops.py @@ -952,6 +952,26 @@ test_case_array_ops = [ 'desc_inputs': [[4, 4]], 'desc_bprop': [[4]], }), + ('SpaceToBatch_1', { + 'block': P.SpaceToBatch(2, [[0, 0], [0, 0]]), + 'desc_inputs': [[1, 3, 2, 2]], + 'desc_bprop': [[4, 3, 1, 1]], + }), + ('SpaceToBatch_2', { + 'block': P.SpaceToBatch(2, [[1, 1], [0, 4]]), + 'desc_inputs': [[1, 3, 2, 2]], + 'desc_bprop': [[4, 3, 2, 4]], + }), + ('BatchToSpace_1', { + 'block': P.BatchToSpace(2, [[0, 0], [0, 0]]), + 'desc_inputs': [[4, 3, 1, 1]], + 'desc_bprop': [[1, 3, 2, 2]], + }), + ('BatchToSpace_2', { + 'block': P.BatchToSpace(2, [[0, 0], [0, 1]]), + 'desc_inputs': [[4, 3, 1, 1]], + 'desc_bprop': [[1, 3, 2, 1]], + }), ] test_case_other_ops = [ From 2f031e154033f3dc82c03fbf084c1ba3ce7bc127 Mon Sep 17 00:00:00 2001 From: mxm <83028974@qq.com> Date: Tue, 31 Mar 2020 12:08:02 +0800 Subject: [PATCH 036/367] fixed: PrimitiveToInferImplMap map is global, and key of the map PrimitivePtr also a global variable. If key is initialized later than the map initialized during compilation, will cause the primitive map initialize failed. Variable initialization order is not guaranteed during compilation. --- .../ccsrc/pipeline/static_analysis/prim.cc | 248 +++++++++--------- .../static_analysis/static_analysis.cc | 49 ++-- 2 files changed, 148 insertions(+), 149 deletions(-) diff --git a/mindspore/ccsrc/pipeline/static_analysis/prim.cc b/mindspore/ccsrc/pipeline/static_analysis/prim.cc index 98d82de5d5..e06d58466d 100644 --- a/mindspore/ccsrc/pipeline/static_analysis/prim.cc +++ b/mindspore/ccsrc/pipeline/static_analysis/prim.cc @@ -42,92 +42,95 @@ namespace mindspore { namespace abstract { -PrimitiveEvalImplMap PrimitiveToInferImplMap = { - // Statements - {prim::kPrimReturn, {InferImplReturn, true}}, - {prim::kPrimTypeOf, {InferImplTypeof, false}}, - {prim::kPrimHasType, {InferImplHasType, false}}, - {prim::kPrimDot, {InferImplDot, true}}, - {prim::kPrimSwitch, {InferImplSwitch, true}}, - {prim::kPrimIs_, {InferImplIs_, true}}, - {prim::kPrimIsNot, {InferImplIsNot, true}}, - // Maths - {prim::kPrimMaximumGrad, {InferImplMinOrMaxGrad, true}}, - {prim::kPrimMinimumGrad, {InferImplMinOrMaxGrad, true}}, - // Array - {prim::kPrimScalarToArray, {InferImplScalarToArray, true}}, - {prim::kPrimArrayToScalar, {InferImplArrayToScalar, true}}, - {prim::kPrimBroadcastShape, {InferImplBroadCastShape, true}}, - {prim::kPrimShape, {InferImplShape, true}}, - {prim::kPrimPack, {InferImplPack, true}}, - // Structure - {prim::kPrimMakeTuple, {InferImplMakeTuple, true}}, - {prim::kPrimMakeList, {InferImplMakeList, true}}, - {prim::kPrimMakeDict, {InferImplMakeDict, true}}, - {prim::kPrimMakeSlice, {InferImplMakeSlice, true}}, - {prim::kPrimMakeKeywordArg, {InferImplMakeKwarg, true}}, - {prim::kPrimExtractKeywordArg, {InferImplExtractKwarg, true}}, - {prim::kPrimMakeRecord, {InferImplMakeRecord, false}}, - {prim::kPrimTupleGetItem, {InferImplTupleGetItem, true}}, - {prim::kPrimListGetItem, {InferImplListGetItem, true}}, - {prim::kPrimTupleSetItem, {InferImplTupleSetItem, true}}, - {prim::kPrimListSetItem, {InferImplListSetItem, true}}, - {prim::kPrimDictGetItem, {InferImplDictGetItem, true}}, - {prim::kPrimDictSetItem, {InferImplDictSetItem, true}}, - {prim::kPrimListAppend, {InferImplListAppend, true}}, - {prim::kPrimTupleLen, {InferImplTupleLen, true}}, - {prim::kPrimListLen, {InferImplListLen, true}}, - {prim::kPrimArrayLen, {InferImplArrayLen, true}}, - {prim::kPrimListMap, {InferImplListMap, false}}, - {prim::kPrimListReduce, {InferImplListReduce, false}}, - {prim::kPrimTupleReversed, {InferImplTupleReversed, false}}, - {prim::kPrimReducedShape, {InferImplReduceShape, false}}, - {prim::kPrimTupleDiv, {InferImplTupleDiv, false}}, - {prim::kPrimTupleToArray, {InferImplTuple2Array, false}}, - {prim::kPrimShapeMul, {InferImplShapeMul, false}}, - {prim::kPrimTupleEqual, {InferImplTupleEqual, false}}, - {prim::kPrimListEqual, {InferImplListEqual, false}}, - {prim::kPrimMakeRange, {InferImplMakeRange, false}}, - {prim::kPrimStopGradient, {InferImplStopGradient, false}}, - {prim::kPrimStringEqual, {InferImplStringEqual, false}}, - {prim::kPrimDictLen, {InferImplDictLen, false}}, - // NN - {prim::kPrimPooling, {InferImplPooling, true}}, - {prim::kPrimPoolingGrad, {InferImplPoolingGrad, true}}, - {prim::kPrimFusedBatchNorm, {InferImplFusedBatchNorm, true}}, - {prim::kPrimFusedBatchNormGrad, {InferImplFusedBatchNormGrad, true}}, - {prim::kPrimReluGrad, {InferImplReluGrad, true}}, - {prim::kPrimConv2DBackpropInput, {InferImplConv2DBackpropInput, true}}, - {prim::kPrimConv2DBackpropFilter, {InferImplConv2DBackpropFilter, true}}, - {prim::kPrimBiasAddGrad, {InferImplBiasAddGrad, true}}, - {prim::kPrimRelu, {InferImplRelu, true}}, - {prim::kPrimZerosLikeTensor, {InferImplZerosLikeTensor, true}}, - {prim::kPrimFakeBprop, {InferImplFakeBprop, false}}, - {prim::kPrimLayerNorm, {InferImplLayerNorm, true}}, - {prim::kPrimLayerNormGrad, {InferImplLayerNormGrad, true}}, - {prim::kPrimDropoutGenMask, {InferImplDropoutGenMask, true}}, - // Others - {prim::kPrimIdentity, {InferImplIdentity, true}}, - // Set impl to null as it will use PartialEvaluator; - {prim::kPrimPartial, {nullptr, true}}, - {prim::kPrimJ, {InferImplJ, false}}, - {prim::kPrimEnvGetItem, {InferImplEnvGetItem, true}}, - {prim::kPrimEnvSetItem, {InferImplEnvSetItem, true}}, - {prim::kPrimEnvAdd, {InferImplEnvAdd, true}}, - {prim::kPrimMakeRefKey, {InferImplMakeRefKey, true}}, - {prim::kPrimMakeRef, {InferImplMakeRef, true}}, - {prim::kPrimGetRefKey, {InferImplGetRefKey, true}}, - {prim::kPrimGetRefValue, {InferImplGetRefValue, true}}, - {prim::kPrimGetRefOrigin, {InferImplGetRefOrigin, true}}, - {prim::kPrimStateSetItem, {InferImplStateSetItem, true}}, - {prim::kPrimDepend, {InferImplDepend, true}}, - {prim::kPrimBroadcastGradientArgs, {InferImplBroadcastGradientArgs, false}}, - {prim::kPrimControlDepend, {InferImplControlDepend, true}}, - // Debug - {prim::kPrimScalarSummary, {InferImplScalarSummary, true}}, - {prim::kPrimImageSummary, {InferImplTensorSummary, true}}, - {prim::kPrimTensorSummary, {InferImplTensorSummary, true}}, -}; +PrimitiveEvalImplMap &GetPrimitiveToEvalImplMap() { + static PrimitiveEvalImplMap prim_eval_implement_map = { + // Statements + {prim::kPrimReturn, {InferImplReturn, true}}, + {prim::kPrimTypeOf, {InferImplTypeof, false}}, + {prim::kPrimHasType, {InferImplHasType, false}}, + {prim::kPrimDot, {InferImplDot, true}}, + {prim::kPrimSwitch, {InferImplSwitch, true}}, + {prim::kPrimIs_, {InferImplIs_, true}}, + {prim::kPrimIsNot, {InferImplIsNot, true}}, + // Maths + {prim::kPrimMaximumGrad, {InferImplMinOrMaxGrad, true}}, + {prim::kPrimMinimumGrad, {InferImplMinOrMaxGrad, true}}, + // Array + {prim::kPrimScalarToArray, {InferImplScalarToArray, true}}, + {prim::kPrimArrayToScalar, {InferImplArrayToScalar, true}}, + {prim::kPrimBroadcastShape, {InferImplBroadCastShape, true}}, + {prim::kPrimShape, {InferImplShape, true}}, + {prim::kPrimPack, {InferImplPack, true}}, + // Structure + {prim::kPrimMakeTuple, {InferImplMakeTuple, true}}, + {prim::kPrimMakeList, {InferImplMakeList, true}}, + {prim::kPrimMakeDict, {InferImplMakeDict, true}}, + {prim::kPrimMakeSlice, {InferImplMakeSlice, true}}, + {prim::kPrimMakeKeywordArg, {InferImplMakeKwarg, true}}, + {prim::kPrimExtractKeywordArg, {InferImplExtractKwarg, true}}, + {prim::kPrimMakeRecord, {InferImplMakeRecord, false}}, + {prim::kPrimTupleGetItem, {InferImplTupleGetItem, true}}, + {prim::kPrimListGetItem, {InferImplListGetItem, true}}, + {prim::kPrimTupleSetItem, {InferImplTupleSetItem, true}}, + {prim::kPrimListSetItem, {InferImplListSetItem, true}}, + {prim::kPrimDictGetItem, {InferImplDictGetItem, true}}, + {prim::kPrimDictSetItem, {InferImplDictSetItem, true}}, + {prim::kPrimListAppend, {InferImplListAppend, true}}, + {prim::kPrimTupleLen, {InferImplTupleLen, true}}, + {prim::kPrimListLen, {InferImplListLen, true}}, + {prim::kPrimArrayLen, {InferImplArrayLen, true}}, + {prim::kPrimListMap, {InferImplListMap, false}}, + {prim::kPrimListReduce, {InferImplListReduce, false}}, + {prim::kPrimTupleReversed, {InferImplTupleReversed, false}}, + {prim::kPrimReducedShape, {InferImplReduceShape, false}}, + {prim::kPrimTupleDiv, {InferImplTupleDiv, false}}, + {prim::kPrimTupleToArray, {InferImplTuple2Array, false}}, + {prim::kPrimShapeMul, {InferImplShapeMul, false}}, + {prim::kPrimTupleEqual, {InferImplTupleEqual, false}}, + {prim::kPrimListEqual, {InferImplListEqual, false}}, + {prim::kPrimMakeRange, {InferImplMakeRange, false}}, + {prim::kPrimStopGradient, {InferImplStopGradient, false}}, + {prim::kPrimStringEqual, {InferImplStringEqual, false}}, + {prim::kPrimDictLen, {InferImplDictLen, false}}, + // NN + {prim::kPrimPooling, {InferImplPooling, true}}, + {prim::kPrimPoolingGrad, {InferImplPoolingGrad, true}}, + {prim::kPrimFusedBatchNorm, {InferImplFusedBatchNorm, true}}, + {prim::kPrimFusedBatchNormGrad, {InferImplFusedBatchNormGrad, true}}, + {prim::kPrimReluGrad, {InferImplReluGrad, true}}, + {prim::kPrimConv2DBackpropInput, {InferImplConv2DBackpropInput, true}}, + {prim::kPrimConv2DBackpropFilter, {InferImplConv2DBackpropFilter, true}}, + {prim::kPrimBiasAddGrad, {InferImplBiasAddGrad, true}}, + {prim::kPrimRelu, {InferImplRelu, true}}, + {prim::kPrimZerosLikeTensor, {InferImplZerosLikeTensor, true}}, + {prim::kPrimFakeBprop, {InferImplFakeBprop, false}}, + {prim::kPrimLayerNorm, {InferImplLayerNorm, true}}, + {prim::kPrimLayerNormGrad, {InferImplLayerNormGrad, true}}, + {prim::kPrimDropoutGenMask, {InferImplDropoutGenMask, true}}, + // Others + {prim::kPrimIdentity, {InferImplIdentity, true}}, + // Set impl to null as it will use PartialEvaluator; + {prim::kPrimPartial, {nullptr, true}}, + {prim::kPrimJ, {InferImplJ, false}}, + {prim::kPrimEnvGetItem, {InferImplEnvGetItem, true}}, + {prim::kPrimEnvSetItem, {InferImplEnvSetItem, true}}, + {prim::kPrimEnvAdd, {InferImplEnvAdd, true}}, + {prim::kPrimMakeRefKey, {InferImplMakeRefKey, true}}, + {prim::kPrimMakeRef, {InferImplMakeRef, true}}, + {prim::kPrimGetRefKey, {InferImplGetRefKey, true}}, + {prim::kPrimGetRefValue, {InferImplGetRefValue, true}}, + {prim::kPrimGetRefOrigin, {InferImplGetRefOrigin, true}}, + {prim::kPrimStateSetItem, {InferImplStateSetItem, true}}, + {prim::kPrimDepend, {InferImplDepend, true}}, + {prim::kPrimBroadcastGradientArgs, {InferImplBroadcastGradientArgs, false}}, + {prim::kPrimControlDepend, {InferImplControlDepend, true}}, + // Debug + {prim::kPrimScalarSummary, {InferImplScalarSummary, true}}, + {prim::kPrimImageSummary, {InferImplTensorSummary, true}}, + {prim::kPrimTensorSummary, {InferImplTensorSummary, true}}, + }; + return prim_eval_implement_map; +} using mindspore::parse::PyObjectWrapper; @@ -907,10 +910,7 @@ class PartialEvaluator : public Evaluator { new_nodes_inputs[1] = NewValueNode(new_signature_value); FuncGraphPtr func_graph = cnode->func_graph(); - ScopePtr scope = kDefaultScope; - if (out_conf != nullptr) { - scope = out_conf->node()->scope(); - } + ScopePtr scope = out_conf->node()->scope(); ScopeGuard scope_guard(scope); CNodePtr new_cnode = func_graph->NewCNode(new_nodes_inputs); @@ -927,39 +927,41 @@ struct PrimitiveImplInferValue { }; using PrimitiveToImplMap = std::unordered_map; - -PrimitiveToImplMap UniformPrimitiveToImplMapValue = { - {prim::kPrimScalarAdd, {prim::ScalarAdd, true, nullptr, true}}, - {prim::kPrimScalarSub, {prim::ScalarSub, true, nullptr, true}}, - {prim::kPrimScalarMul, {prim::ScalarMul, true, nullptr, true}}, - {prim::kPrimScalarDiv, {prim::ScalarDiv, true, nullptr, true}}, - {prim::kPrimScalarMod, {prim::ScalarMod, true, nullptr, true}}, - {prim::kPrimScalarUadd, {prim::ScalarUAdd, true, nullptr, true}}, - {prim::kPrimScalarUsub, {prim::ScalarUSub, true, nullptr, true}}, - {prim::kPrimScalarLog, {prim::ScalarLog, true, nullptr, true}}, - {prim::kPrimScalarEq, {prim::ScalarEq, true, std::make_shared(), true}}, - {prim::kPrimScalarLt, {prim::ScalarLt, true, std::make_shared(), true}}, - {prim::kPrimScalarGt, {prim::ScalarGt, true, std::make_shared(), true}}, - {prim::kPrimScalarNe, {prim::ScalarNe, true, std::make_shared(), true}}, - {prim::kPrimScalarLe, {prim::ScalarLe, true, std::make_shared(), true}}, - {prim::kPrimScalarGe, {prim::ScalarGe, true, std::make_shared(), true}}, - {prim::kPrimBoolNot, {prim::BoolNot, true, std::make_shared(), true}}, - {prim::kPrimBoolAnd, {prim::BoolAnd, true, std::make_shared(), true}}, - {prim::kPrimBoolEq, {prim::BoolEq, true, std::make_shared(), true}}, - {prim::kPrimBoolOr, {prim::BoolOr, true, std::make_shared(), true}}, -}; +PrimitiveToImplMap &GetUniformPrimitiveToImplMap() { + static PrimitiveToImplMap uniform_prim_implement_map = { + {prim::kPrimScalarAdd, {prim::ScalarAdd, true, nullptr, true}}, + {prim::kPrimScalarSub, {prim::ScalarSub, true, nullptr, true}}, + {prim::kPrimScalarMul, {prim::ScalarMul, true, nullptr, true}}, + {prim::kPrimScalarDiv, {prim::ScalarDiv, true, nullptr, true}}, + {prim::kPrimScalarMod, {prim::ScalarMod, true, nullptr, true}}, + {prim::kPrimScalarUadd, {prim::ScalarUAdd, true, nullptr, true}}, + {prim::kPrimScalarUsub, {prim::ScalarUSub, true, nullptr, true}}, + {prim::kPrimScalarLog, {prim::ScalarLog, true, nullptr, true}}, + {prim::kPrimScalarEq, {prim::ScalarEq, true, std::make_shared(), true}}, + {prim::kPrimScalarLt, {prim::ScalarLt, true, std::make_shared(), true}}, + {prim::kPrimScalarGt, {prim::ScalarGt, true, std::make_shared(), true}}, + {prim::kPrimScalarNe, {prim::ScalarNe, true, std::make_shared(), true}}, + {prim::kPrimScalarLe, {prim::ScalarLe, true, std::make_shared(), true}}, + {prim::kPrimScalarGe, {prim::ScalarGe, true, std::make_shared(), true}}, + {prim::kPrimBoolNot, {prim::BoolNot, true, std::make_shared(), true}}, + {prim::kPrimBoolAnd, {prim::BoolAnd, true, std::make_shared(), true}}, + {prim::kPrimBoolEq, {prim::BoolEq, true, std::make_shared(), true}}, + {prim::kPrimBoolOr, {prim::BoolOr, true, std::make_shared(), true}}, + }; + return uniform_prim_implement_map; +} PrimEvaluatorMap PrimEvaluatorConstructors = PrimEvaluatorMap(); std::mutex PrimEvaluatorConstructorMutex; -void InitPrimEvaluatorConstructors(const PrimitiveEvalImplMap &prim_eval_impl_map) { +void InitPrimEvaluatorConstructors() { PrimEvaluatorMap &constructor = PrimEvaluatorConstructors; - for (const auto &iter : prim_eval_impl_map) { + for (const auto &iter : GetPrimitiveToEvalImplMap()) { constructor[iter.first] = InitStandardPrimEvaluator(iter.first, iter.second.impl_); } - for (const auto &iter : UniformPrimitiveToImplMapValue) { + for (const auto &iter : GetUniformPrimitiveToImplMap()) { constructor[iter.first] = InitUniformPrimEvaluator(iter.first, iter.second.impl_, iter.second.eval_value_, iter.second.specify_out_type_); } @@ -974,20 +976,20 @@ void InitPrimEvaluatorConstructors(const PrimitiveEvalImplMap &prim_eval_impl_ma void ClearPrimEvaluatorMap() { PrimEvaluatorConstructors.clear(); - PrimitiveToInferImplMap.clear(); - UniformPrimitiveToImplMapValue.clear(); + GetPrimitiveToEvalImplMap().clear(); + GetUniformPrimitiveToImplMap().clear(); } bool IsInWhiteList(const PrimitivePtr primitive) { MS_EXCEPTION_IF_NULL(primitive); - auto iter = PrimitiveToInferImplMap.find(primitive); - if (iter != PrimitiveToInferImplMap.end()) { + auto iter = GetPrimitiveToEvalImplMap().find(primitive); + if (iter != GetPrimitiveToEvalImplMap().end()) { return iter->second.in_white_list_; } - auto uni_iter = UniformPrimitiveToImplMapValue.find(primitive); - if (uni_iter != UniformPrimitiveToImplMapValue.end()) { + auto uni_iter = GetUniformPrimitiveToImplMap().find(primitive); + if (uni_iter != GetUniformPrimitiveToImplMap().end()) { return uni_iter->second.in_white_list_; } @@ -996,8 +998,8 @@ bool IsInWhiteList(const PrimitivePtr primitive) { StandardPrimitiveEvalImpl GetPrimitiveInferImpl(const PrimitivePtr &primitive) { MS_EXCEPTION_IF_NULL(primitive); - auto iter = PrimitiveToInferImplMap.find(primitive); - if (iter == PrimitiveToInferImplMap.end()) { + auto iter = GetPrimitiveToEvalImplMap().find(primitive); + if (iter == GetPrimitiveToEvalImplMap().end()) { return nullptr; } return iter->second.impl_; @@ -1010,7 +1012,7 @@ PrimEvaluatorMap &GetPrimEvaluatorConstructors() { } std::lock_guard initLock(PrimEvaluatorConstructorMutex); if (constructor.empty()) { - InitPrimEvaluatorConstructors(PrimitiveToInferImplMap); + InitPrimEvaluatorConstructors(); } return constructor; diff --git a/mindspore/ccsrc/pipeline/static_analysis/static_analysis.cc b/mindspore/ccsrc/pipeline/static_analysis/static_analysis.cc index 0bfba265db..1ac43abdd5 100644 --- a/mindspore/ccsrc/pipeline/static_analysis/static_analysis.cc +++ b/mindspore/ccsrc/pipeline/static_analysis/static_analysis.cc @@ -292,38 +292,35 @@ EvaluatorPtr GetPrimEvaluator(const PrimitivePtr &prim, const AnalysisEnginePtr if (prim->HasPyEvaluator()) { auto prim_py = dyn_cast(prim); if (prim_py != nullptr) { - evaluator = std::make_shared(prim_py); - } else { - MS_LOG(EXCEPTION) << "The primitive with python evaluator should be a python primitive."; + return std::make_shared(prim_py); + } + MS_LOG(EXCEPTION) << "The primitive with python evaluator should be a python primitive."; + } + + if (prim->isa() || prim->HasAttr()) { + if (engine == nullptr) { + (void)GetPrimEvaluatorConstructors(); } - } else if (prim->isa() || prim->HasAttr()) { // If a primitive may have attr, try to create a new evaluator. StandardPrimitiveEvalImpl eval_impl = GetPrimitiveInferImpl(prim); if (eval_impl != nullptr) { - std::shared_ptr standard_evaluator = - std::make_shared(prim, eval_impl); - evaluator = standard_evaluator; + return std::make_shared(prim, eval_impl); } } - if (evaluator == nullptr) { - if (engine == nullptr) { - // If engine is nullptr, get constructor from default. - const PrimEvaluatorMap &prim_evaluator_map = GetPrimEvaluatorConstructors(); - auto iter = prim_evaluator_map.find(prim); - if (iter == prim_evaluator_map.end()) { - evaluator = nullptr; - } else { - evaluator = iter->second; - } - } else { - // If engine is given, get constructor from engine resource. - const PrimEvaluatorMap &prim_evaluator_map = engine->PrimConstructors(); - auto iter = prim_evaluator_map.find(prim); - if (iter == prim_evaluator_map.end()) { - evaluator = nullptr; - } else { - evaluator = iter->second; - } + + if (engine == nullptr) { + // If engine is nullptr, get constructor from default. + const PrimEvaluatorMap &prim_evaluator_map = GetPrimEvaluatorConstructors(); + auto iter = prim_evaluator_map.find(prim); + if (iter != prim_evaluator_map.end()) { + evaluator = iter->second; + } + } else { + // If engine is given, get constructor from engine resource. + const PrimEvaluatorMap &prim_evaluator_map = engine->PrimConstructors(); + auto iter = prim_evaluator_map.find(prim); + if (iter != prim_evaluator_map.end()) { + evaluator = iter->second; } } if (evaluator == nullptr) { From 816b60491d6e03062cf8e41986e29ee15eb2b6e3 Mon Sep 17 00:00:00 2001 From: fary86 Date: Sat, 28 Mar 2020 17:53:22 +0800 Subject: [PATCH 037/367] Dump graph with type info when static analysis failed --- mindspore/ccsrc/debug/anf_ir_utils.cc | 25 +++--- mindspore/ccsrc/debug/anf_ir_utils.h | 14 +-- mindspore/ccsrc/debug/trace.cc | 108 +++++++++++++++++++++--- mindspore/ccsrc/ir/primitive.cc | 21 +++++ mindspore/ccsrc/ir/primitive.h | 1 + tests/ut/cpp/operator/composite_test.cc | 2 + 6 files changed, 139 insertions(+), 32 deletions(-) diff --git a/mindspore/ccsrc/debug/anf_ir_utils.cc b/mindspore/ccsrc/debug/anf_ir_utils.cc index 0891e211a0..98cd2f4b2f 100644 --- a/mindspore/ccsrc/debug/anf_ir_utils.cc +++ b/mindspore/ccsrc/debug/anf_ir_utils.cc @@ -34,6 +34,7 @@ #include "utils/utils.h" #include "debug/trace.h" #include "utils/context/ms_context.h" +#include "operator/ops.h" namespace mindspore { // max number of elements in sequence @@ -69,7 +70,7 @@ py::object load_obj(const std::string& path) { // ============================================= MindSpore IR Exporter ============================================= -std::string GetNodeType(const AnfNodePtr& nd) { +std::string AnfExporter::GetNodeType(const AnfNodePtr& nd) { abstract::ShapePtr shape = nd->Shape() == nullptr ? nullptr : dyn_cast(nd->Shape()); TypePtr type = dyn_cast(nd->Type()); std::ostringstream oss; @@ -102,7 +103,7 @@ int AnfExporter::GetParamIndex(const FuncGraphPtr& func_graph, const AnfNodePtr& FuncGraphPtr fg = func_graph; while (fg != nullptr) { if (exported.find(fg) == exported.end()) { - if (!export_used_) { + if (!check_integrity_) { break; } MS_LOG(EXCEPTION) << "Can not find func graph '" << fg->DumpText() << "." << fg->debug_info()->get_id() << "'"; @@ -255,15 +256,15 @@ std::string AnfExporter::GetPrimitiveText(const PrimitivePtr& prim) { } // output primitive attributes - auto attrs = prim->attrs(); - if (attrs.size() > 0) { - oss << "["; - int i = 0; - for (auto& attr : attrs) { - oss << (i > 0 ? ", " : "") << attr.first << "=" << attr.second->DumpText(); - i++; + oss << prim->GetAttrsText(); + + if (prim->isa()) { + auto do_signature = dyn_cast(prim); + auto& func = do_signature->function(); + if (func->isa()) { + auto sig_prim = dyn_cast(func); + oss << sig_prim->GetAttrsText(); } - oss << "]"; } return oss.str(); @@ -351,7 +352,7 @@ std::string AnfExporter::GetDictText(const FuncGraphPtr& func_graph, const Value std::string AnfExporter::GetOtherValueText(const FuncGraphPtr&, const ValuePtr& value) { std::ostringstream oss; - if (export_used_) { + if (check_integrity_) { MS_LOG(EXCEPTION) << "Need to process type: " << value->type_name() << ", dump text: " << value->DumpText(); } oss << value->type_name() << "[" << value->DumpText() << "]"; @@ -420,7 +421,7 @@ std::string AnfExporter::GetAnfNodeText(const FuncGraphPtr& func_graph, const An } oss << "%" << iter->second; } else if (node->isa()) { - oss << "%para" << GetParamIndex(func_graph, node, export_used_); + oss << "%para" << GetParamIndex(func_graph, node, check_integrity_); } else if (IsValueNode(node)) { FuncGraphPtr fg = GetValueNode(node); oss << fg->type_name() << "::fg_" << fg->debug_info()->get_id(); diff --git a/mindspore/ccsrc/debug/anf_ir_utils.h b/mindspore/ccsrc/debug/anf_ir_utils.h index 5b9ac9d2f0..5342c1ab96 100644 --- a/mindspore/ccsrc/debug/anf_ir_utils.h +++ b/mindspore/ccsrc/debug/anf_ir_utils.h @@ -64,17 +64,18 @@ struct ParamPtrHasher { class AnfExporter { public: - explicit AnfExporter(const std::string& id, bool export_used = true) - : param_index(-1), id_(id), export_used_(export_used) { + explicit AnfExporter(const std::string& id, bool export_used = true, bool check_integrity = false) + : param_index(-1), id_(id), export_used_(export_used), check_integrity_(check_integrity) { func_graph_set.clear(); exported.clear(); } - ~AnfExporter() {} + virtual ~AnfExporter() {} void ExportFuncGraph(const std::string& filename, const FuncGraphPtr& func_graph); void ExportFuncGraph(const std::string& filename, const std::vector& graphs); - private: + protected: + virtual std::string GetNodeType(const AnfNodePtr& nd); int GetParamIndex(const FuncGraphPtr& func_graph, const AnfNodePtr& param, bool throw_excp = true); int GetParamIndexFromExported(const AnfNodePtr& param); std::string DumpObject(const py::object& obj, const std::string& category) const; @@ -101,8 +102,10 @@ class AnfExporter { OrderedSet func_graph_set{}; OrderedMap> exported; std::string id_; - bool export_used_ = true; // whether export function graphs used in current exporting function graph + bool export_used_ = true; // whether export function graphs used in current exporting function graph + bool check_integrity_ = false; // whether check integrity or not, when dumping ir for loading, must set it to true TaggedNodeMap tagged_cnodes_; + abstract::AnfNodeConfigPtr node_cfg_ = nullptr; }; void ExportIR(const std::string& filename, const std::string& id, const FuncGraphPtr& func_graph); @@ -115,7 +118,6 @@ std::string GetFuncGraphProtoString(const FuncGraphPtr& func_graph); void DumpIRProto(const FuncGraphPtr& func_graph, const std::string& suffix); std::string GetOnnxProtoString(const FuncGraphPtr& func_graph); -std::string GetNodeType(const AnfNodePtr& nd); } // namespace mindspore #endif // MINDSPORE_CCSRC_DEBUG_ANF_IR_UTILS_H_ diff --git a/mindspore/ccsrc/debug/trace.cc b/mindspore/ccsrc/debug/trace.cc index 4b0f4c4fb3..7ce13052c5 100644 --- a/mindspore/ccsrc/debug/trace.cc +++ b/mindspore/ccsrc/debug/trace.cc @@ -17,6 +17,7 @@ #include "debug/trace.h" #include +#include #include #include #include @@ -194,37 +195,116 @@ void TraceGraphInfer() { MS_LOG(INFO) << "\n*************************************************************************************"; } -void OutputAnalysisGraphInfo() { - MS_LOG(INFO) << "Output analysis graph begin"; - std::unordered_map index_map; - std::vector tagged_graphs; +class AnalyzedFuncGraphExporter : public AnfExporter { + public: + AnalyzedFuncGraphExporter() : AnfExporter("", true, false) {} + ~AnalyzedFuncGraphExporter() override = default; + void ExportFuncGraph(const std::string& filename, const std::vector& node_cfgs); + + private: + std::string GetNodeType(const AnfNodePtr& nd) override; +}; + +std::unordered_map CalcTaggedFuncGraphs() { + std::unordered_map tagged_func_graphs; auto& list = GetCNodeDebugStack(); for (size_t i = 0; i < list.size(); ++i) { - auto& node_cfg = list[i]; + auto node_cfg = list[i]; auto fg = node_cfg->context()->func_graph(); auto node = node_cfg->node(); - auto idx = tagged_graphs.size(); - std::pair item(fg, idx); - if (index_map.insert(item).second) { - tagged_graphs.emplace_back(TaggedGraph(fg, TaggedNodeMap())); + tagged_func_graphs[fg][node] = i; + } + return tagged_func_graphs; +} + +void OutputAnalyzedGraphWithType() { + AnalyzedFuncGraphExporter exporter; + exporter.ExportFuncGraph("analyze_fail.dat", GetCNodeDebugStack()); +} + +std::string AnalyzedFuncGraphExporter::GetNodeType(const AnfNodePtr& node) { + if (node_cfg_ == nullptr) { + return AnfExporter::GetNodeType(node); + } + auto ctx = node_cfg_->context(); + auto engine = node_cfg_->engine(); + auto cfg = engine->MakeConfig(node, ctx); + auto abs = engine->cache().GetValue(cfg); + + if (abs == nullptr) { + return "Undefined"; + } + auto dtype = abs->BuildType(); + auto shape = abs->BuildShape(); + std::ostringstream oss; + if (dtype != nullptr && abs->isa() && shape != nullptr) { + oss << dtype->DumpText() << shape->DumpText(); + } else if (dtype != nullptr) { + oss << dtype->DumpText(); + } else { + oss << "Undefined"; + } + return oss.str(); +} + +void AnalyzedFuncGraphExporter::ExportFuncGraph(const std::string& filename, + const std::vector& node_cfgs) { + if (node_cfgs.empty()) { + MS_LOG(DEBUG) << "Node configs is empty"; + return; + } + + std::ofstream ofs(filename); + if (!ofs.is_open()) { + MS_LOG(ERROR) << "Open file '" << filename << "' failed!"; + return; + } + + param_index = 1; + auto tagged_func_graphs = CalcTaggedFuncGraphs(); + + // first output grapn on the analysis stack + for (const auto& node_cfg : node_cfgs) { + auto fg = node_cfg->context()->func_graph(); + // the graph is already output, skip it + if (exported.find(fg) != exported.end()) { + continue; } - tagged_graphs[index_map[fg]].second[node] = i; + // set node_cfg info for getting type + node_cfg_ = node_cfg; + tagged_cnodes_ = tagged_func_graphs[fg]; + ExportOneFuncGraph(ofs, fg); + ofs << "\n\n"; + } + + node_cfg_ = nullptr; + tagged_cnodes_.clear(); + + // print seperator between function graphs on analyzed graph call stack and others + ofs << "#===============================================================================\n\n\n"; + + // second output other graphs + while (!func_graph_set.empty()) { + FuncGraphPtr fg = *func_graph_set.begin(); + ExportOneFuncGraph(ofs, fg); + ofs << "\n\n"; + (void)func_graph_set.erase(fg); } + ofs << "# num of total funcgraphs: " << exported.size(); - ExportIR("analyze_fail.dat", tagged_graphs); - MS_LOG(INFO) << "Output analysis graph *end*"; + ofs.close(); } void GetInferStackInfo(std::ostringstream& oss) { MS_LOG(INFO) << "Get graph analysis information begin"; - auto& stack = GetCNodeDebugStack(); + auto stack = GetCNodeDebugStack(); if (stack.empty()) { MS_LOG(INFO) << "Length of analysis information stack is empty."; return; } - OutputAnalysisGraphInfo(); + OutputAnalyzedGraphWithType(); oss << "\nThe function call stack:\n"; int index = 0; diff --git a/mindspore/ccsrc/ir/primitive.cc b/mindspore/ccsrc/ir/primitive.cc index d62553ef60..a576c1e76b 100644 --- a/mindspore/ccsrc/ir/primitive.cc +++ b/mindspore/ccsrc/ir/primitive.cc @@ -106,6 +106,27 @@ void Primitive::set_signatures( } } +std::string Primitive::GetAttrsText() const { + if (attrs_.empty()) { + return ""; + } + + std::ostringstream oss; + oss << "["; + bool is_first = true; + for (auto& attr : attrs_) { + if (is_first) { + is_first = false; + } else { + oss << ", "; + } + oss << attr.first << "=" << attr.second->DumpText(); + } + oss << "]"; + + return oss.str(); +} + py::function PrimitivePy::GetBpropFunction() { static const char* const get_bprop_func_name = "get_bprop"; if (py::hasattr(python_obj_, get_bprop_func_name)) { diff --git a/mindspore/ccsrc/ir/primitive.h b/mindspore/ccsrc/ir/primitive.h index 8a60412e44..7dd37eb15f 100644 --- a/mindspore/ccsrc/ir/primitive.h +++ b/mindspore/ccsrc/ir/primitive.h @@ -102,6 +102,7 @@ class Primitive : public Named { PrimType prim_type() const { return prim_type_; } std::string instance_name() const { return instance_name_; } + std::string GetAttrsText() const; bool operator==(const Value& other) const override; bool operator==(const Primitive& other) const; ~Primitive() override = default; diff --git a/tests/ut/cpp/operator/composite_test.cc b/tests/ut/cpp/operator/composite_test.cc index 984f24b326..d9dd9e5e99 100644 --- a/tests/ut/cpp/operator/composite_test.cc +++ b/tests/ut/cpp/operator/composite_test.cc @@ -22,6 +22,7 @@ #include "operator/ops.h" #include "pipeline/static_analysis/prim.h" #include "pipeline/static_analysis/abstract_function.h" +#include "debug/trace.h" namespace mindspore { using Shape = abstract::Shape; @@ -124,6 +125,7 @@ TEST_F(TestComposite, test_TupleSlice_arg_one_number) { AbstractBasePtrList args_spec_list = {tuple_tensor, start_index}; try { + trace::ClearTraceStack(); engine_->Run(tupleSliceGraphPtr, args_spec_list); FAIL() << "Excepted exception :Args type is wrong"; } catch (std::runtime_error const &err) { From 0727c2e76aa7a7663f3e96efc7913bf3b4591718 Mon Sep 17 00:00:00 2001 From: chenhaozhe Date: Tue, 31 Mar 2020 19:37:57 +0800 Subject: [PATCH 038/367] modify log level in DfGraphManager --- mindspore/ccsrc/pipeline/pipeline.cc | 7 ++++--- mindspore/ccsrc/transform/convert.cc | 4 ++-- mindspore/ccsrc/transform/df_graph_manager.cc | 2 +- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/mindspore/ccsrc/pipeline/pipeline.cc b/mindspore/ccsrc/pipeline/pipeline.cc index 7b3cf06b5e..3c3478e89d 100644 --- a/mindspore/ccsrc/pipeline/pipeline.cc +++ b/mindspore/ccsrc/pipeline/pipeline.cc @@ -1110,10 +1110,11 @@ bool ExecutorPy::AddDFGraph(const py::dict& init_params, const std::string& phas (void)DfGraphManager::GetInstance().AddGraph(phase, convertor.GetComputeGraph()); } (void)DfGraphManager::GetInstance().AddGraph(init_graph, convertor.GetInitGraph()); - (void)DfGraphManager::GetInstance().AddGraph(checkpoint_name, convertor.GetSaveCheckpointGraph()); (void)DfGraphManager::GetInstance().AddGraph(BROADCAST_GRAPH_NAME, convertor.GetBroadcastGraph()); - - DfGraphManager::GetInstance().SetAnfGraph(checkpoint_name, anf_graph); + Status ret = DfGraphManager::GetInstance().AddGraph(checkpoint_name, convertor.GetSaveCheckpointGraph()); + if (ret == Status::SUCCESS) { + DfGraphManager::GetInstance().SetAnfGraph(checkpoint_name, anf_graph); + } return true; } diff --git a/mindspore/ccsrc/transform/convert.cc b/mindspore/ccsrc/transform/convert.cc index c8c6abea4c..6ef1025513 100755 --- a/mindspore/ccsrc/transform/convert.cc +++ b/mindspore/ccsrc/transform/convert.cc @@ -1100,12 +1100,12 @@ void DfGraphConvertor::UpdateDataOpDesc(const AnfNodePtr &it, const OperatorPtr auto normal_shape_ptr = dyn_cast(node->Shape()); vector shape; if (normal_shape_ptr == nullptr) { - MS_LOG(ERROR) << "Update data op descriptor failed! Invalid shape."; + MS_LOG(INFO) << "Invalid shape to update data op descriptor."; return; } shape = normal_shape_ptr->shape(); if (node->Type() == nullptr) { - MS_LOG(ERROR) << "Update data op descriptor failed! Invalid type."; + MS_LOG(INFO) << "Invalid type to update data op descriptor."; return; } TypeId me_type = node->Type()->type_id(); diff --git a/mindspore/ccsrc/transform/df_graph_manager.cc b/mindspore/ccsrc/transform/df_graph_manager.cc index 3339f43145..bfe4d9f5d2 100644 --- a/mindspore/ccsrc/transform/df_graph_manager.cc +++ b/mindspore/ccsrc/transform/df_graph_manager.cc @@ -112,7 +112,7 @@ DfGraphWrapperPtr DfGraphManager::GetGraphByName(const std::string& name) { auto it = graphs_.find(name); if (it == graphs_.end()) { - MS_LOG(ERROR) << "Can't found graph name: " << name; + MS_LOG(INFO) << "Can't found graph name: " << name; return nullptr; } MS_LOG(INFO) << "Return graph: " << name; From 2dc9f632c17992535fa7e775d9e60f2051dea6f8 Mon Sep 17 00:00:00 2001 From: kswang Date: Tue, 31 Mar 2020 21:25:48 +0800 Subject: [PATCH 039/367] add cpu st lenet --- ...{test_cpu_network.py => test_cpu_lenet.py} | 67 ++++++++++--------- 1 file changed, 34 insertions(+), 33 deletions(-) rename tests/st/networks/{test_cpu_network.py => test_cpu_lenet.py} (59%) diff --git a/tests/st/networks/test_cpu_network.py b/tests/st/networks/test_cpu_lenet.py similarity index 59% rename from tests/st/networks/test_cpu_network.py rename to tests/st/networks/test_cpu_lenet.py index 6745a7626b..a3105721d3 100644 --- a/tests/st/networks/test_cpu_network.py +++ b/tests/st/networks/test_cpu_lenet.py @@ -12,25 +12,44 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ -""" -Function: - test network -Usage: - python test_network_main.py --net lenet --target Davinci -""" -import os -import time import pytest -import numpy as np -import argparse -import mindspore.nn as nn -from mindspore.common.tensor import Tensor from mindspore.nn import TrainOneStepCell, WithLossCell import mindspore.context as context from mindspore.nn.optim import Momentum -from models.lenet import LeNet -from models.resnetv1_5 import resnet50 -from models.alexnet import AlexNet +import numpy as np +import mindspore.nn as nn +from mindspore.ops import operations as P +from mindspore import Tensor + +class LeNet(nn.Cell): + def __init__(self): + super(LeNet, self).__init__() + self.relu = P.ReLU() + self.batch_size = 32 + + self.conv1 = nn.Conv2d(1, 6, kernel_size=5, stride=1, padding=0, has_bias=False, pad_mode='valid') + self.conv2 = nn.Conv2d(6, 16, kernel_size=5, stride=1, padding=0, has_bias=False, pad_mode='valid') + self.pool = nn.MaxPool2d(kernel_size=2, stride=2) + self.reshape = P.Reshape() + self.fc1 = nn.Dense(400, 120) + self.fc2 = nn.Dense(120, 84) + self.fc3 = nn.Dense(84, 10) + + def construct(self, input_x): + output = self.conv1(input_x) + output = self.relu(output) + output = self.pool(output) + output = self.conv2(output) + output = self.relu(output) + output = self.pool(output) + output = self.reshape(output, (self.batch_size, -1)) + output = self.fc1(output) + output = self.relu(output) + output = self.fc2(output) + output = self.relu(output) + output = self.fc3(output) + return output + context.set_context(mode=context.GRAPH_MODE, device_target="CPU") def train(net, data, label): @@ -48,15 +67,6 @@ def train(net, data, label): print("+++++++++++++++++++++++++++") assert res -@pytest.mark.level0 -@pytest.mark.platform_x86_cpu -@pytest.mark.env_onecard -def test_resnet50(): - data = Tensor(np.ones([32, 3 ,224, 224]).astype(np.float32) * 0.01) - label = Tensor(np.ones([32]).astype(np.int32)) - net = resnet50(32, 10) - train(net, data, label) - @pytest.mark.level0 @pytest.mark.platform_x86_cpu @pytest.mark.env_onecard @@ -65,12 +75,3 @@ def test_lenet(): label = Tensor(np.ones([32]).astype(np.int32)) net = LeNet() train(net, data, label) - -@pytest.mark.level0 -@pytest.mark.platform_x86_cpu -@pytest.mark.env_onecard -def test_alexnet(): - data = Tensor(np.ones([32, 3 ,227, 227]).astype(np.float32) * 0.01) - label = Tensor(np.ones([32]).astype(np.int32)) - net = AlexNet() - train(net, data, label) From 0ae77bb0db6708cd9b5ea19bca8edac6348ddc0c Mon Sep 17 00:00:00 2001 From: Peilin Wang Date: Mon, 30 Mar 2020 18:25:14 -0400 Subject: [PATCH 040/367] TFReaderOp fix, threads will exit after reading necessary amount of rows changes from yanpanhui 524009: added set_dataset_size and changed get_dataest_size according to ME requirements CI fixes --- .../engine/datasetops/source/tf_reader_op.cc | 41 ++++++++++++++++--- .../engine/datasetops/source/tf_reader_op.h | 1 + mindspore/dataset/engine/datasets.py | 20 ++++++--- 3 files changed, 51 insertions(+), 11 deletions(-) diff --git a/mindspore/ccsrc/dataset/engine/datasetops/source/tf_reader_op.cc b/mindspore/ccsrc/dataset/engine/datasetops/source/tf_reader_op.cc index 16a7700012..c872c02015 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/source/tf_reader_op.cc +++ b/mindspore/ccsrc/dataset/engine/datasetops/source/tf_reader_op.cc @@ -105,6 +105,7 @@ TFReaderOp::TFReaderOp(int32_t num_workers, int32_t worker_connector_size, int64 data_schema_(std::move(data_schema)), filename_index_(make_unique()), load_io_block_queue_(true), + load_jagged_connector_(true), num_rows_(0), num_rows_per_shard_(0), equal_rows_per_shard_(equal_rows_per_shard) { @@ -203,6 +204,25 @@ Status TFReaderOp::operator()() { buffer_id++; RETURN_IF_NOT_OK(out_connector_->Add(0, std::move(fetched_buffer))); } else { + // user specified number of rows they want, and we read enough rows + // + // IOBlockQueue thread needs to: + // -stop pushing stuff to IOBlockQueue + // -call PostEndOfEpoch (will send EOE) + // -wait for reset + // + // Worker threads need to: + // -stop reading the file they are currently reading and throw it away + // -keep pulling, but dont read other files (eventually skips all IOBlocks and will get EOE) + // + // Master thread needs to: + // -tell IOBlockQueue thread to stop pushing + // -tell worker threads to stop reading the file tey are currently reading + // -keep pulling until EOE + + // don't think we need a lock for now + load_jagged_connector_ = false; + std::unique_lock lock(load_io_block_queue_mutex_); load_io_block_queue_ = false; } @@ -245,12 +265,14 @@ Status TFReaderOp::WorkerEntry(int32_t worker_id) { while (!io_block->eof()) { if (!io_block->eoe()) { - std::string filename; - RETURN_IF_NOT_OK(io_block->GetFilename(&filename, *filename_index_)); - int64_t start_offset = io_block->GetStartOffset(); - int64_t end_offset = io_block->GetEndOffset(); - RETURN_IF_NOT_OK(LoadFile(filename, start_offset, end_offset, worker_id)); - MS_LOG(INFO) << "TFReader operator worker " << worker_id << " loaded file " << common::SafeCStr(filename) << "."; + if (load_jagged_connector_) { + std::string filename; + RETURN_IF_NOT_OK(io_block->GetFilename(&filename, *filename_index_)); + int64_t start_offset = io_block->GetStartOffset(); + int64_t end_offset = io_block->GetEndOffset(); + RETURN_IF_NOT_OK(LoadFile(filename, start_offset, end_offset, worker_id)); + MS_LOG(INFO) << "TFReader operator worker " << worker_id << " loaded file " << filename << "."; + } } else { std::unique_ptr eoe_buffer = mindspore::make_unique(1, DataBuffer::kDeBFlagEOE); RETURN_IF_NOT_OK(jagged_buffer_connector_->Add(worker_id, std::move(eoe_buffer))); @@ -478,6 +500,10 @@ Status TFReaderOp::LoadFile(const std::string &filename, const int64_t start_off std::unique_ptr new_tensor_table = make_unique(); while (reader.peek() != EOF) { + if (!load_jagged_connector_) { + break; + } + // read length int64_t record_length = 0; (void)reader.read(reinterpret_cast(&record_length), static_cast(sizeof(int64_t))); @@ -599,6 +625,9 @@ Status TFReaderOp::LoadFeature(const std::unique_ptr *tensor_table // Overrides base class reset method. Cleans up any state info from it's previous execution and // reinitializes itself so that it can be executed again, as if it was just created. Status TFReaderOp::Reset() { + // start workers first, otherwise IOBlokcs will fall through if workers see it before this is set to true + load_jagged_connector_ = true; + { std::unique_lock lock(load_io_block_queue_mutex_); load_io_block_queue_ = true; diff --git a/mindspore/ccsrc/dataset/engine/datasetops/source/tf_reader_op.h b/mindspore/ccsrc/dataset/engine/datasetops/source/tf_reader_op.h index 69de068f9b..560cff114f 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/source/tf_reader_op.h +++ b/mindspore/ccsrc/dataset/engine/datasetops/source/tf_reader_op.h @@ -369,6 +369,7 @@ class TFReaderOp : public ParallelOp { std::unique_ptr data_schema_; std::unique_ptr filename_index_; bool load_io_block_queue_; + bool load_jagged_connector_; std::unique_ptr jagged_buffer_connector_; QueueList> io_block_queues_; diff --git a/mindspore/dataset/engine/datasets.py b/mindspore/dataset/engine/datasets.py index 8aaa6212b6..ad3e7d8255 100644 --- a/mindspore/dataset/engine/datasets.py +++ b/mindspore/dataset/engine/datasets.py @@ -1906,11 +1906,21 @@ class TFRecordDataset(SourceDataset): Return: Number, number of batches. """ - num_rows = TFReaderOp.get_num_rows(self.dataset_files, 8, estimate) - num_rows = get_num_rows(num_rows, self.num_shards) - if self.num_samples is None: - return num_rows - return min(self.num_samples, num_rows) + if self._dataset_size is None: + num_rows = TFReaderOp.get_num_rows(self.dataset_files, 8, estimate) + num_rows = get_num_rows(num_rows, self.num_shards) + if self.num_samples is None: + return num_rows + return min(self.num_samples, num_rows) + return self._dataset_size + + # manually set dataset_size as a tempoary solution. + def set_dataset_size(self, value): + logger.warning("WARN_DEPRECATED: This method is deprecated. Please use get_dataset_size directly.") + if value >= 0: + self._dataset_size = value + else: + raise ValueError('set dataset_size with negative value {}'.format(value)) class ManifestDataset(SourceDataset): From 2e881276ab2c5266140598a9f2024845814e8b5c Mon Sep 17 00:00:00 2001 From: Cathy Wong Date: Tue, 31 Mar 2020 16:46:44 -0400 Subject: [PATCH 041/367] Enable skipped dataset zip python unit tests Signed-off-by: Cathy Wong --- .../ut/data/dataset/golden/zip_02_result.npz | Bin 428510 -> 1065228 bytes .../ut/data/dataset/golden/zip_03_result.npz | Bin 428553 -> 1065271 bytes .../ut/data/dataset/golden/zip_04_result.npz | Bin 430298 -> 1067016 bytes tests/ut/python/dataset/test_zip.py | 6 +++--- 4 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/ut/data/dataset/golden/zip_02_result.npz b/tests/ut/data/dataset/golden/zip_02_result.npz index 1e9c7a985b10c5ecfae012e1efa04ca9e78a8f81..00a59e585dcf86e6a3a1304555a8fbcf7663196b 100644 GIT binary patch literal 1065228 zcmcG%3z${am8iR`D5#=ns6sJxqrjGvSS4+TlRiczAZ!F;bYG!}*h!~b!B>ll3aAKz zx+yUjJV{72$+}|M2wgqmPix@$^HL1_M!^3AE&?X z*7{bhwdP~YF~=OQIp$nzw-kHS49Labp}Eoj^26UPE;Qymd*=q^PM7Oo|bIF|3FPwJKoU_h0 z`Ln0by!dR=FP?Gwth0G;KJtkC-yVKM{)+s6|F6E($y;Pbm7I9%(?W;OX_FmqE!8bXGQMps$%Y%+12B#&Y0a}$?TfX&+ge= ztSZ(NtG_(ESEcA#m&~7acKz(yFILmd$;ZsDE9Qz-OJ?`}{OtPXVvjFYk-BhppGlJ@ zo%m<|(!b)^OJ@JY8DBYdcHibN_9Xw@3r;`h?AiTJo!!uU>g>km;-8BfIXeKSF~y^ch2rOm|5%*FbwcqA#fhXGQ*0?-P@KZkwBi}X zbBZ%K#}rR0&MwX`&MKZ&oJaYyNjq~dCRMa~0LNbz58@m_yMy?DX7M;+9mnAzk-9s;Jq6G4EOIRjjSlJPOLj zcvN#WSFY%h;Q5rJq_A!CP|`nD90ygV6iuA_=&cyKgydKD@>&i#AE zFH(9wX9KANz)zZ;2+WnKtqJgFHAk8A*5W$S^OQKXIH%ZDyb-*93K!IZWtrAGXw&<5 z;q;)xVrVYxHO1dk?h}+4$M@z~Ax3uwt+oU2cg4%Vus3r4TJa}fy^LN?rT=}4yNcJ+o1OIG zA~>Wcseez6eDRRtxjdZ=^{-8|%oSTXnrW#I_|KvBbBc?h=px#^tT;VEw;y|oPZhsK ziBfSc?JVMXEqE8mX{L{hxr*uiiE^8{SAAtiEqG=3F|N5v+ww$paXBMV1+0Et$AaU$ z)Q5>st&}jU=Q^}lgf0ar^DM9qO7N=aX;0F6LYF)#3pu5wvedCRr=7Nzr1E917ecA4 z!MK$2bMQnCeP)e~+&y31!QE!gmQ*8G+|K=QW4gsSwXAT&*lG=gP^@qyJU^S_Zay1x;(9#a~0wm!YXW=}+!daGcAXyriC&bMK-T_mkd>p7tUw z@M%Fy$*D&H>!DPmA>myGMb$Koi7G>|Y+EZ-mrC`V1?ufWZ&wuuGQx6L4Z5gQ9G)Oo z74J0bi_SOrN69yRfXa@OG~FTPU*?%d(9=-wK7cCT;WrI^#yh z?Z@bedbnpn;=`@f`5NQ)4J^`_q^}Q2BlsS8FQoNZJT=mO3ET51ZEmOh#p!M<&^G~F z?Qt2@mv>uIe@~{3dbB|S{O*GCwLtz^%4>v|ZUED%;PE)nHbd#qDywLr2csZ8hSIhY zyRdi@*9Lm549?{LDERzs#w`c^oNcZMwJe8& z^&`}*MLTYx#xS7w0NQQH?3JW9Q1(Tx+tRp%4t<9fIozid_ytt0CY9?j4>*$FVuBD7A+DQLTP1pYT zbUu6Z2iky?7czHr>TPJw9<-B7^d1bHkboZKtChBs6EL!I%YjXvRIS17;Iamc8t8us zPAMf_KPB6OOhw&Lq>Nk25J*lV4Z(!!GB_4I2hW1B-G{3!MIKH&9F zWnV*93W;wH=K2==rp8=G-<*M{V;tr8!Fdm`@@U4@==l!(#z*myhd?Q1$2v-58{>9l zYP&aO#w9+e#nx1V`&@jw$IutjyaywcFMb&fx`-CG@AZ7DNWYa@>w#e}^PJu3AqBJq z1@KWO9Q%nJj#mrU93IboN!pc!rHo}6zP=ZIH4nTSs5g#sonX2Qp1+zoR1Vrsrqvv> zy$c#WO^qXyRGfkaTnm01Y3VX*odj3TNMksS+SNcAfe*Kae%j~3)E2V=Q0D>s?+?Iz zDB46lS55n8aw^vc(XO7!)HFs)lfKs*iHfzf*a4IYVABUqZ{xm!{5o2aUTWp-w5Bc` zlsHlwkt?20nNF^1^Gm3q{c0nwKQ+xtVqQB6@ISBj~QC4r`pv0@yjI~;>P|-_0;kz09;H5Ayr+007QGR8 zCjt8uw51kVnoTL5N6k3YfN|H9ILP~&y|N-@E{8WvUCbzKPkOMP-dIZ;?^XY%g|Xa| z%1M=m#K$Y?v!1USB%ktXlgA+a^|^C_>sdqAC6N)mg>;z=J zg}KOBA<3lWmDAp6>^_XNGnFc?8<4Lx;FhO!35mO<<lK9fO4!Tk*i`tD35bmW@5k zf(ipt`O9hJ34GV7*l8&_i9XiDRV8$18?XzsI0Syn;fZ|{Jy1vx4`aOStr4>6jM2mB z?eWm(Rj}AanZ971r^HJ1PCKw2{XWp3KhV~YZ(f13Cp1)}PNm;U?-A&gx6yG2(~qH0 z&X~dy@K^(s!0q7XP!>0X)%oCF3oi|WS~HN#9cai~ z;Qlu7&O_CAkPB(;s2dxYkLG-g6t%H7gVL(pC(4)S3nvFwu>26=SC4}*ar5?C}XTNM<2ZVs6(#N z&sFbra%%0C(Z>3u#q7CWs?t=)sCo`dahQ3eRnt>FE5~UxPpxQ!^XZdzZ3q9u_tn}r zfT{B?sjv_ZyoRyVXBdyYDgZ(6p_9H`LW!mHLK(J~&Ty|yR44;msXmwg!87WnPEwrN zs~xiI3gmbxn64yE>31H`2$$$-i$UBi1Y5DWjnPoM9Gdt>J1YLU#5?opSvB}B0-G!8 z!&-9Jp|KX@YnR}MR&rLR(bk67!R`9cWoi$lmC#FXBNuvcTNxSQh6dAz(cWtrf-myW zJ)~4lmWPFNGd|@wus^K=H)y#&(PB6)H&SvcW1yd;miE8ntp{{1RLBwKkOJDEMbLPB z>TNY%$7E1tzC-m=(GSq?3a`8`_>OhlZrwCXR6Q$ZSYJF>ZxN! zF-C`{5g*I{f%rX#!D;G}JL&87=nVaa#n8zyJdLM~Xs?*j6oC8Bc=tc0_e-!n_e0Ys zDg8B`N0V|Uv%J-bV}`;9RcPy0;HoQ(bBtgv(T;UG28s@W-{qoJaOW86I`{2S{3mQv z4LT~9Qu?9kl>O!Kvv~Jl%=BX(rp`8SYDn<@4Q^~i%hbCvzYq{TK8~JrPpn06o<0Y};pmlX(Hy7H-b8F=c(ILKbE#iB`tE&p z7>~-Oc}lb+2NqIp#5BUg@%~6s^NC+V`i|iFz{F*xL|ezrnllncHH@IPR?DC@F4Lzn z*LjS!*yzXVX~j%P4^7SCndiP#c~(Z+NxQNFN8D-{?QhZIalp`vQ(xqgL{(#p1+K}zJaSVdNFc-@)Y(#n-=k+o|Nu^t{IUm!l)F@b2NuC&nRJMYmhqi z}(Mq^7YXaeT|-$?TgdpDZ4W7Nt}y8()nh{g4U z8|!#>ES8`Fs*yP7oUah!lSlI4)d5DW*wJmYwSYdj>hKQtV=31UsKco9G(8>wr`cb7 zI0&jacg%FLeu#dbhgwn2)=SEPxzx;aO8a)Mj*TVMkv)O85Q%T1&(~7wE_lOwqiB0= zrIwV7$W(#y)|Fqh5)u0e&qo`#8mYUQHpD7CK6yZIzD#Rv;3S^nWJG-!XCvclWM}~8 z<(euax1Fol`J{I+465&>To1~(r259lI=Iu*&kiGFxwNWfUFY_hM7eHo5;%PaN*v1= z9*_Rnj;8q%_@0KJ^&>`RW74F3(b)Fj99q?)-9$O9sk4Yf(PK)_Ysv3viPZ;MW7){V1FxMV)6lPpg7L#-OBbiTcXS zbEJo7-bjyEU@^3P+Q9TVl^D;Pxz{Jz$_VKphy4n#xmU%5z8W~C z6t^ie`kR0$2i2nOe+G9x0K^V-a~bTK`9A|#fB5&y#u%t zz*|^qeK}TNT8^zEr3&uKQ%5OjC%rdV3(qwqz4~$*EhR<>WoUdu>a#P<7S2x60)w8Z zC4I%Oai_-YpqHU()j0#e{2=&F-y#R>pJCHV(Dz2@wvK0cVjE*+wDP`WIqqa^mQ(8m z+E57HzW)E?P>BM=7@(2>tL`UQF~>7v2q|G0Af} zfKo;sB_Dl7qLJ6bS8eJ^0#~KNS%Pb58~t+Z)ZkjM0;To-I=C0&3{K;A#;M!VoiY0YIF5xfQLYV`!p$*@ybS7Xfgd`d z(G0QYqZ?MIR}yMD4mipOv7Lpz)#Hxw>IXE%4nUuKIvYaA^~F%0;v~1i1B#wCit{DJA_nno-Du;|_G9 zBQzpuyqgk@ak|g z(5e(AG8(Xy93$QVNjq#?J{ez*S!kBGBzbN=y-%Ke9$Kge@@1@1u2ILxfAWd(L*sY% zQ9A6t++mK|aTPjm8h1vpTR7#LLo2oq0nG>lpK~_2@GxYrIywF4SSg53uNU&}<^3ULakIJd9SRlm7@jTIR{s?}7040i>z@ zTwkfBb57E8;Ma9X6MNj&e!A4 zrC3O`RR@jdCEMq|B>k~9v@jm{RY+Tblrc%C1(!_cS(+ROm)W}Gz8U$Q$z4R&M?wMh zu8+}l~V&$R2wWO+L4rDH;)VPk}ilct} zDq0nTf57W@4lFP4d${|euwR-b9D92uo&FxbL<#oLSG}BGzla5X5~$BIj*qimqivfB z{?0)T!>dtVA59i;C4S>MjGS^jg0XQI)WoC#XOa?HLH|Mz@}}gQ^+RiY4ga)*Qu^3K z(Ie`iRp|H4#HI_CUe!V&b!#(Y{Q$n(bfE2EMRqRf<4E_An>X<^l2(s_k6oD<0{nK$ zhE|H%gER6nkfqRp$-9_~KI{t(7lQSLXs2t)c^xWdCp_@ z-p=(Zbg;4UX=!%5r^ zPY%@3FET3yTX+@VuQC08U(rmj=x-Hhhm!F6S# zMu`%};EP&*PirW`du6fI21_^;F3Llb1xRBD&sB+E zz1M5hW7$dl8sxPPPih`HLG9`s)4A(?+`kW(F9(YW;}kdDMIb>TgSY zF_9W-o<|Ko%Do^(G>4bxv zbWozaQJzd2%TfC2%KM#316MH;!iYn8s{`J2m~`KGXrrfN8&Xv3_#C>X zBgwpRl{)%6o!*FH8zr0>JC=uV8nf3fl;MVJlf`sCFpRM>Dy4qObK2YPM_)8;rh|+a zgiJ(<%;Siskam64c+S(3eo-wlJPWvM;F(*Xsx2A)djQT^3_h+}cY^6c?4#?I_O6kB zzW|LEQd8~U51Gvo6?V2S2e?mZB-d|NsH&W5Yj&i)6b;DV9CU6(yWBHmlvJB(slfBO zq>GLDEW}$~!2Y@))BnNH^D(T)bnxq->~1i;nfpUi-I|K_y)4qtL zz<;%q_`4E&5-rPHd(z(CJf4zkXY-+wP{M=lg@xEdADqvf%*=iTdUOgUHl*3bDkxXN zxrEIhC^HkRbY zof*7`4JvaNTHcv?eMK7eCH2)}x7A&6WQiwr(9w)i6;yR*y)vzF8-s2IR#k#}2k`U2 z`7$GDWYO_^F|9MG8Jigs{RVs77mKbobcHeI^>V6tdO#1;Vk7$-OW^npqh%auDYnx{ zu5-k5p}APpF&`n32Ae6^3H7Bg!b*G9gLstukn@$~$6g`x z^viqTgGa1L{aZ?Pb7}u!0bakDQHXerqgLQvJ-d>&%RJBI*}dHQ?C+6gq)$IJbV$Ir zoev+%gC4)p5+UiU=D9jAzCT?T@ZZCg+XA%w61eR^2hC*VTPwOAPku9_cX%4*btyLZ zkCb;0Yl#xGxHsbMI*(pZ3EC_t&Dc!H%{KZn6`M8^%!jhx?H-oJl-Lc0HUpswu2Ms} z;xZNfGh!h%ALn>E*|FWEEr4q7oxPWK<^kzo_@;&0hfvS;r4ICmBk1@Zo%W~*S-Y;k zw~kV~!R{L@b%U3pD|GB zs4Oc5&dFVGeT}i!=bV9da|Yp>bl9Q!$v>_pU5JOkEhnKBCv$xVzAI4r%L&KWukNmz z<0D?amF74-BrRW`feo7mEgh2^NGmtHPmv|rIurJ!k{0|TSz@MjY%VIj-lS#Tq>*oIu&>ufmfoZjogR+k#3H?dnmkjyrrbE zf-WB`JQ8PLYb)iPYpdTjg3oHW##$am7rgr4eOXGfbFU8m%O|CD6<%NZ<=&rGd~Rc&C-?aCmHZ8VjlMB%@Sa{3B!RuBv>3C+x06j_85sZC)35WOl%E>JC>a zy52Pzmb?=EggEn1GUDWQNR)eEpQabiMQYI%At$3#lyVd)r36nIyD8a}%NQ$nRGgW1 z@{CUUuK`@zDXaB-h|_4Bs}WMU9w|Ks=~2!`K@GLyGE%kO-XEJQXJQ_+mI&)#~KT$je=t6)EgvVGyNHuBFTqB#m0_M(iVxjE_Ho#;Gmr@q4|?tE)&JL@?e^yTC< zBC%Jw4M^?~D{wu4d6e;@$@KDm>b9Z>C#5&SMw8oh3`;bqgS(ZogZN(sPgkWU>E~yblo$)gVE{a}j?~O9SXw`FJKW|~jz5rj z=NWM4G9X5zYYKYRmek9RpO(dVMFDPh2BqXLqqdwhfWBKc{5)q=a+`cRi(bk1gTU)z zy!A=&yY@y$HTo#u)iNpKCwv z#k?Uwa7E`fWZE|we?Y0r!DA>n?y-HDIt}=x&K68@kB8VEQ1K6Ke=seSDRnWrqY0~% z!{!)Kbp6^$xMMsAF1iIj)7?M2X#M(x!MD(1A0#Val*{as47Z0J+P#!)nTE+qGumq7}?q z&05;mn>1CAV*}T^bRXryE_T)VkM6p2a-KHbJwE~2a&4hR4}Qhn5TwvKixTRr*Oj7v z% zdFr|T>wIuJC2QecBQReO2^i5P!7M5@-OC>OyKudWvhVfM2?QoUV zH%DKGv$a}U>xsaeLdmK8*KX(uZLa8MJ>+Q;|lfqGE6uT9oTZJQkj zBe@-<&*Nx9Q%+^x^)@;|+KvVjSB;&4KhE=rRC{!q!TNT*7+sB~D!~Pl!Ke=wr_8EX zkF?r+AJ=TB@=!cG=V*_U|8SzfI@)w*;ViHT&5)zrC;6X)B6BHOAYZC!5w|k#F;_H7 z+61RQPK%wXcKFNg-9H=;Y#mZv4Yo%n-J!?01y~Q@w+%rXtJT)9<3hfDk)BD9rTqU6 zJ7l+T-N`sU&$u~HFghOZ{@B0pLz|L*KNQ{NDzUSBXU)DdHJuVyqet6Ge+>?PlYYi- zcjdykxiR&=NzyXPXEJ;ddp4hE6ZMw;5w)q8y<_vY9$IY)fmW!MLgXgsY;xziF)j!*n> zF=_hj&bg(Xv<~k^E)mb+6?!^ijYQz*)*M42;#p zSzIJ=+h?Rx)IDe$xvm6d`!*saXb=`g4X?kt1*|`do^}4V5csuFEg0 zcj!|!W&bU`XXkEH-*h;S2&_`Lg;6ttwH-+}UiahVPnF?@hj=zB6})SUZRGb$G}~G6 z`yBPygAzK%UDHB60eEV^k&N_Yu72I2goo#Bz&L|*UcJU$jQwMbw4TT+S~?JTdzf=| z(%Nr$7WO83r8hl@8sWFOrXoK+nDpkX#FOT22KFrC&h4~d6l-k4So%qAV-mjkr!*au zQtFeIo0anAwmZpl1-}FNT2%1>ttXcZqpXnLAa{3)qQ|)SzfiH4tEtxtt*<3*HQb?m z7@x?&g`rKncbtrPdnbphb@H5L&N!4(&+Iu*?Gn(k=eQcDb+u8Mn~0~Or>sPjd5-$| zRHL4{o*AD;ZU1+*>>In8p6ssI`T@DCeAk`)-_k9|_Z2?u3uZANcl{_IYKi zP+=jdYBVu%7nn0J-?1=$e+~WViGMeT+#ApqS?hf$(;siOmKevbMEN+&VWMd7iTD9El$dzpz1c{c&SM;h z-blss%pE51Bo(BKKEkRL%hQ)C@f1EzNR(cMako9-b{@IDy=Lrgc!KDP_!4Ae6jV8e z+AGPqg_T^P*lTwa`Ub60(Nf|(=g00dx-_-#zLj$+89M@v$+RWRd~1C=cl#cL6Wp$} zBfI6hKkc}Lf7gxy^=NhSu{O}!i+H7@*xT)UL{q8xa^ji}XkZVW-TRh>95a;D*#5zk zIU~)v+v(LR@_f6-dD~CXZ<*|Ol5z!J))-bEU0+pNc0(Qaa~zEB=!czLl6qrFy}|}` z$eUcl5A+R~A0^+?n7%8M%c%1h_Es#)$?C-(HFeLiVBkBN?sB}2r)9L}{G>k~nmd`d z60chay#BQ4+Z;`B#m>~`I&|Yn^zE~h+ez=9K~uX2zkrpR!vDVDUI3@9XAmy7D{E7Pz|SjkKK1hJ&aX_b?w4n z-@DkuEEHD@TYzB9z6`BA)oFDVjavs7oJ~q6e4D|L)8an6%VtJ;1$7BRXyqJFmJCVL zC-lxVv5up(_b$^Urn|bFOBUS-Q48a*_H%vr#N?L-#6z)>@x`IhaBN2^;X7`nB#(uJ zXD?_UvEGOAo7+>Fxe1da(ufGxJ$f~~XC)sUUi>>o(4A%r80lu}mQo#C4<7aESo#K+ z@m(YTehs2T%1*GIiq`QPS*~WZ10mMQqkd+g!hceF3P^uDbw8Qn*x5{2PEl9I5fqE| z;{H0gzA=qoV51~whmpQvKy)`%tbRw1oEmRBWhLdM=*v8=S{lDTVBGmpYAPqeZSI%3 ztkUOl(kHezoU>2jo-%hH`iL1HM#{eb!yhvC;MoF}!Nsnv8Ba5kwmI!2^PM5TSyn@o z=y~>3s7Z#Qar{EUp2PvGkl@F7?uRzt0|m!Wdp;PShX49166)NtI<2;kVUFpn@HnU~ z#a&Z!U9-&Hi}cDj-CezROnR_Va5dK}(H>H86p;T+>FH=U$?-a<8BPdSpKo#CB2V9&;K>O>YKc^9MR!7;H(uu)c5E|xe}%P7K9P@cfdnw`bqNC z`XP75>1F}U>(WMOPTJQu?CV{pfTT^UV+UBJ^igcK@sWD$vKm zUTB#=7CDx8)l*HjZ(V0QD^qG7`W>D)SIuq=tdw-CYn|>wD=-#UqaW5n^E!ItK2Kxc z%As@UL*XY^^=G8E{AQK_PCFVa zspp%Aw{k$rxTS%)+LlyT{T}@ho~7cjhUZXtJ5qc{V=7SN$*Y{xu{ZaSv}4aNK^B>N z_=STD(%5{79+oJTPbJkJ9!8PvcZvVQkYX*8J5`QF`W?Fg)X%I!4ZW^H$`?Z;_xh=^ z{W6qMN9O_V$J2B0ZF}Pl*OGonlK&cX!sDdh0}Y0xIcj)_`o+PofsL}@w;X-a?(ciV z+%dTB4O;RIvr;PKP6};~Z^Iam(^qkS#@qZq68Ut$r0ve4%sen|Lc12oQHqE0-_f4M zRZZ{;{o9UTSOb(bP_ck?|AJmGgDZx>E!QP@kJ468a4dnxG5A~V%GF1>4Cq^u{`dx( ztCNQ&r@MQ^1CQ!6@k2&3#8 z_V6^y@_~ET9|dbMJ`hb>PpuNIxxQiqZ4Bl0uj6g{I~i}sA>)xBQQjEXE9k$Q!AieW z%NEhlQYzPDuU(bryWaH3ypth}hnh@{W~@lxVMpRy?TItch^Z+jw$RGy8oH72E{+jK zcK98=8tjkn(X8XY-bQ#`Q{lfXuBVin6Seg8Jk)>T>zOhxae8omf#>cTFv@rT-yNUs z=^u43B=^0p>n5Rj(mwi_#%OfUQ9C-qIjE7%v8$Qu6iA6ZT!FJaQP<2%)X*NC#B;1I z%L$XPnBSw1y^w-a()j-hd`eK-uc~Z7*5@Tl_g12#Z#`<4oYl%NCHOyD96*|HT{wSu zK3U;0DJM@!4>fVz`9IHLtTG}7k*-&l9a@Crs9{{B9!u{kq+bIuGPHrb)!fMoBa+T8 zQKppY_JMEwf`n2kuXX83+jNG01*xk!2O%r@R6;xMyZ3ptL@y%IUAE5maPzeCdHC&C zdKDJ2M9MYv)ftUzHNlO(KVpvWgSG%^7Nw`szUyCEuWG zpo{w91jD*Wk)N4LPBulJDC5xa6x_ zPrWq|%)%?G;;obt_m&tI@ERQUsoog zbj+LTNS6zZb$OpjTHHmHFklYv2D^Lu{w@CZpS1)fT3MP}lA{8aGmISLQD#K11;Ztb zm|sY8N0mE#ltn3~#6}IRNHe29CH37EyaHUHO??W!G>YAk#`WOjDcypmdj_f4kZ2>n zcffJJ3*zjvdw)#XL5lde>rgH|KJirt_xY6C8{DLoT1X!%R(ZYm(7zj*?zE$NzD?}l z`+3qF>TUM0V9~=uby7}w^yuzE+gE3|L(w2*0PT9Q)aJ@NC}T=67pCEqLGA)ps?S9YvO((xfd}bb%&fd3`#rA z-c4SR`(ON;?bYNOL7N5r^wxx`oplGCJ33py#+h?FTv9?4s!OiIFWW;I-?92Mbk^D( z2KV#=)3KyE7c7zH%7zqD(-z3_3rT(vv0qvv3Y)4oHk4WV8mynQbgh(YxqIN-bx_oq z_!rQZRcRlwYi8~vo=l!2-Hxo+0NVU-MK4lTAI_OYe}27) z@!hvn=!Qs(c~}*;Sq#O(^*(2RC&G<+(mVFn&L?bJu`kCZDpjZWm2cwIGvZ~+Wqz%1 z^!S~KOSo32RjJu0hBIbjm_G*y~)4p(|o_9H&@`q(zZ&a>@ljpR} z?5-~Pp`mB#j3X^}i$`DN8doF4 zDRfiZIXde4d?m-!5~tR1HvFdd+(8MgbGF~O1=_~$xT~ov}SE?MCw}W?P}>{nR$z*@L*(V(#6IE5)3z7N}>w z)@noYn;aLlonK^G3GCUFT1WlOaE-Jahu7`5Fa4^IGqy5v5x(?lN)=LVt$8Ez;JSdi z+V<6HLN?Cf7uVGZ?%h>$%WHm5^fhpC3}aTP)J1e&KS&Fwb&Xi8?fC5xbG#RRMA3R6 zp~3C$Y>sa*6vz)9BnH_LHc!oCbSfl4Z1RcTsT3An-zM@xM>(rr1{K6b9oCrO>CdPm zdU7p*nHWWh0{5Bbid_;y_FY{u$`0p*|Ir*(&T+l^qmIy9*;+&Q65^fp%=EhSTuM;v z#R2rz`BThk>M1)Eu3t_M9Ko4zfKe0o3jPdRSX+5Ne0O{5(@n`9PJqA1F&B89o*5zbttQ`LC=s;Xa9``MftGBQ&Zp6j+R$HDckO57>14+1J0|Fb7}fXe7jM| z-Mi4~3Utmcw1siG3t-RxZ_&iYZ$3|RcA74C9ZRjYJJXI z&|%w?cjIpB@B#DS;U28hz+gEV=|S#}M57of*ofwGpWY1k&TlqNL60v&XKEE1)4X>A z_k+Q4Px5A5)0|Arh+`UWU6Z9p)X!1tl`1vVl5(C{Drf7^Gk%F) z9V{okKzqZ1bT+m0*gP^F;M-Z{N?PP}|G%&IwKSDebJq8*41X*IakT0_SD$+%^jDKx0JDZ zl{}+nzAvd)(o)Img5#Jw_P?T~68Nm7jPvV@ok8(JH`1zJQ}jW->RKk6Gg3y|c01s{9U zkJ0pN9qSv(#8P zZ$>7JK@{LAN8dd-wfq8K2H9PoY8CZe&_hkoMPYeub)|HkYmB?!BQK=XVa!onz5Z6> zzOZ9@ypikoU`jkWD^i!W^WSJk9?H2=;+r_~QJ?eV;%G2?$Ojof9_b~bl@;%V0+ zuch35oLkYpv4hU9Zx>Q8j^XP2A4{;9N=qJ$ZC^j7L{GG(vq<@p!cq9*_k6M9O z;`wvnclX;HDf1RHKPlKrY6E*0HZltC!t-edSiA>(eLU&p+x1t%eP0FFPf*`4 z^f}9R-|CNgPrNbICtIotWPd}`yW}fcmH&sosF%eEj4E=_zz@c{JMkVcyoeQ$8%_^ zs9)mwqmLQ;qzg0TqkHMlz0UK@-}ktZ9-0!`Pd%i5TAI>Y!Nq$$C|5p9+?B~Oy0I^m z{h#;E{ivtFqz*XF+sg^A<-)5|XYXIDKBf3xh^zAIn(rq(^f)&`!v~O&LW-P5Z!*tJ zy&GDv4Cc91rwiZM@7?y8=3SFHeNCRt^(pB5++ds=`eG7hqo{ztp>iTsPJDE;ECVk8DPV$_6h}o;;tfhXe z=NeshU*(46t9*yP>ht*RoOb3uZ=(;4OV5EiKcU9w$eWkAe+VPtn(-L4ncudxz00vO zexG79S|x|pdkNattqI@Y|?WFC) zp+*N!?i-lO^>g$m<{Iv1TMul%alS6`TOWMCkEgQEpVUUN;vCesKQY$onDEw}<(vMW zAJ&uxS;=frwLHDkZ7g^X0ykv*8W$cOZ%r~&&sFwLW{f1?wT575IK5pjzKnG z(LdAI@pl~-!2_<1L?qFg9^tXsqbw!6s-ZSl(ql|RTjUstGF)DU`i@N|i?N1~ae^z? zLh{Qc#;IJ@`5Rh$2b<>CPTJ8GvnqYoN@uMK&0lcMA;sgt)_r{E@xM%pG&+_ZSvH^k zi-mIr^;Tv#)D=c0_CFjw%MAhtzwGi9_ao37d1emB0agFW);~ZRw8DPNcNEs6l{CLT zG&Mntud((+0*51)L$K6-=l4c^+fy)8AqhV|CkfW3x0`q2Og5UOZ{C4S1M|Ve7wj1Q=yHe zU!fI$ePJCwnQs$FTgOZ-5V7h9!K9rY%wm*W9f@~e>Y=W@axmk+7Fu2d?oxRc|AiZ? zg4Y0}jS+DtTq`+Sp`{V9@aemI_Wb5|su?yw|8w72k6)|$@DZ`T52vd&^gxZzt64fz z-H0zae=vS9fwWVQf`}w2EqYcThV_4!61@+ZEA0bK4`pe$lzn}kVTo$C<(+ca%zbwV zU7o@I7_Ht5wM)b9U#9ChXyLPZ_7MKdDLyBxlSOy0-AY<-<*b9Nv)Xb*td-=TtFudO zeh2v@kebroa+Z&Xc~BxWYwX_%eHf8Ibz9Kf@8NXmK;IN=gR>3+PBl9>$6@CjAAi+D zFHk>EUqkIRns(hqyd~w$AjSRBuT#T$n$QQpOZHm+boW5)5X<`H?^|W}-K~{9?BH8o zU3sA?v=3T@h1f1zmcyJ~xKCsp{V{Ik)ftlGU4llwjh+YJVaVYm{Dk#T_a-7WZRB5! zthm-U2VJv)XXUUNJoA)(f*hsR$j4gljoM0^W9Y+zZp6bmUXcL;`x)cfamq}hCdg^+o`#IxFZ@zWn-rh54*}3Z_$hvVlz9NV|SGU~> zjB#ije=Fu*Fn2$%zxe7r$#(~&@KRv9XZ6iAL!Xg2!>`Tg@%W87-|){dr#P0`tzRqi z*AU#7CXM~hzrQ|fe5VC{+K6uzT3ZidVWQFq>bsMu+}$tqv^2oU ze*fG2hzWS7)MwFP@2}+yIoINb)$M9UpS$a+>#pu2Q@irxPU-lsAC2H;q`FJbIJZ(} zR(69D-oZyh&3Yoo7P{4m(&1Nyw1?a~uGWzb_9*nTzwNUa4tx}9#*B8~e#Cw(y^nfo zYANH~}Dn1HFs^ue|q#Z27b)gyd^}V`Bb$hWJZ6PJ~h5Xuq->+Q4csS?q zS7(gRit}bTNDQ3a=!xv+z7A`iy~})X%8l_^eb>v8Y4_4N7kHZ< z{1z#8PB84!+D(i3B+33p@QEp_e~MMr*az+lYN5`7-^`qBsi1mt!~a5H$4$sC_n`P>%9TfwL;<+ixVC9Km_?Ja*l? za$>aSCba8Ruvt&pOTpf6yt~ULU*TDGlr~Ck^uLnU{ix?@HKilXr|xBfYpN-619;~d z-HyE@tR_~nR)Y!i(qz$Nh7+*jzbX5+DMyA3`(~)zBNQbwjxNoRrF``D!!nrz$I@On zA6=_^>M%;rO4d_K01K6=| zLRS0@s&?+Xydvjbu0eHg-}!CVQL5Z~)#DQT>a?<5;6BrXa;LV^cTIjutKlKq&sFgC zL~^=jqFNaFG&_uT`WA#=3=w9Da~4>|FUe=Kz(!K_6s)Dq>+bor;1di>IT_5jx~p7E zYm`{(6oCB`R#Do~4!4pbOm$R&^>V*m;0!?jCHt1CU#S%v^^c|f6=ioC%tOyVnoW`r=nnjKy`JEoLzmf0y*mz37+@Xtx&1=v~!75B)LM#U95$ zIgJ`SY0qD|ycJLCVfH{x2hI<`#a|eIk?TpE{!Y!mQl=?UV^7-cx(rQfBxHP=9W6kY zy2iH~8lOckjG#8*x4Db5nOy@X)&OLlQ2|B8NjKSNGKtW%{{@_CHVRW$3@KgcIobDDZPW>l~{( zQQNUoM=K%PFGsGs?ajtGi&gvFRZ|;ev_TI#E9uopfZycv9rm!!u5*gP6nfUhk46A{ z((CSZy}tsC+@bbe>`j!GI@;@)l{r6;c%m~jwW!ZxA?HM%{FD_kJW%a`E^r?#$YVj;TuIifnerb_Y-Y>N-dAH^n zb@N-&-gk$s-yD=NrvR3PZsT04^iPj7PD`nu^=6#g`Rviv`*`!r(KX$EdgjxU`pF%Z zC7zs#$tCSTY2&VeF?ZMHJn%8c6lV}o-|wxN7T;NqeEnka@Tm{nZ9QlGwe0_UD%Di4 zxQk5gvUffO-Q{I>Udx{miPHa?3O?sS5q%te$M{CqOr+^6wB)z=cOW~xk-ep~;wsO2 z`ZARn&H2cxF^U72Sq!JP-%4P?WTGhV@?N&@iYz9|d=7eO9P*{#VB~i@y>>ln z8BdoZweEFvj$}mI2=P$jOv8u?I|q6rr5JO*6Tjjmbb*?~jg;Qbn$wHSJjS7w{N|IZq|2HA zeUUt8J$mpqOg|(|LBc|D*I>tyaKClpmZaX?tff4$4CCz@4P% zo%Tm2RhTN}e00qv}CDrFORHa|)`CVIur4ow-FJc)YB z6?l3L&JB9UZUjp-BW2nCD4!Q0H8Ic2;(kUF^m$x8^&OOZDjwG{r0H=K7`sQ)y?hv+ zle(>uTF#{X#h)CO=4W8hk~p+Ls&%s?eCT*`cIdq0(Y+Li^#16DJbGa%8o^kxIH{Gy zbs{|R@Ln7Sq6>Vt`U(D*$+hn6Ow zzDs*Wv`QJg&!hESV7LP6pT&&qHK^p?0rx=m0tep`TZXsfFQ4q9#i8KP$nzXpnhva4 zv|5MebVT38v%Z~ZbO~G*rMEitB=ti^qhEdR-0_x=^ba-w-RO<>##I9C*qvbQzLx93 z&^II7$T1!hqkm$mXYK5ed&BmGln2H0^kp4TR_`rm`QU5k8|r~FDfQG2&Eq^lk3{PD zw%C>6)g?z6H=Fm-daA%UYB?-n#LA;1jegwgGdbVQv_q!9mO`H_7HY|C%_eHu+D1~< zaN`)SpkyhfWF?#fl%SdMnk;SK-2w7@d2e1;Cge1GvggiVS}B=H^wRWfgfU`b+J^$1 z65OBFjH{9yBeie({!cA?@#vdo=?8%QbhK1o;t?gTRkVH$|NY8h8{;vJarAAGDoR~U zUH7>;F80Y|bLHw-4o;&Px;4f#Jc}srp0DtaLLY~A_KmTLK(yrvz*?H3L-t0W#BY-2 z6BqfVe(kf-qtL_p|5`xb@zi7I#G>Dlz9u4eQb`&&a-Iy<9kkx75)00A_cSw$64Z9r ze)PpX)!Lmnq96LKYN~Pg7Aq<5O6&OKtvv|dMz3CnFO;0&XbaZ^jmb+VWwex9*Z&qq zW`F&*zyQ{rXOMn4`szi>`!(M@zfiUfZPkJ-zYAW@!zQ!;@2}B$MufIPffbBH6LRdY z=7l_uWfnM*sFkbnBcQ@NcsG6@elu`mZfeB6nLNKN>^H>p7yVMeLf-9g--mBF858%* zPww?}FZIW1?IAGg4ZLa8nauq-;@Ym49RNqR&~x`>8!?e@2jS0tKs_bp9%!VuVFb)? z6T2(O_e)&2*MkU8O_^v(w3OZv--?~8(r-I>l{rdaehT=>5iu_MO#Wg>N98&5L8PB0 z)c9R@rudv`jSb`vPidz6{Q^g>#9_S|i5FezSWlii62v5vRAtnLM9J{FoWD6A(Ss4&E}inx@b*m&YGznO|I{Y| zMT=2@_paoB5jiQt?Tg5B<~AFib)`PA(OZg0V7W5h+347w`bR9Kcb+qjc>6YcaxSlA zI`=RhUB=&fhPM2*h%%Z$Id?6kko>U2DN#yr9Jx_wkVVYHocXMw_Q(pC_$B3gpxFr4 ztyf};2P7Vv3g6cu1*O!>Ta)GyFL(Vb=eO^jiTloG;FodAcqHC_hQ;!Gz|P~Omwcgz zK0NhMP3BkUhr!dUfD%?YPY=|8pW~`8AG}gn(Zgx?jc=5lnAW$ppb>p% z^+#v}-<P8dMWlgTT8twue-M5o~dkJ(nzXD1AgrXsowKY zCQq-lJz5V}8={Q!e0vkeY0&&J=#l67kkm81nr}cqTgyuM^&<7CJ0vmTGZvXRK8VOR7bbKV;O1XV!+-7O?5yif59d?%ne~>R#GtcOsV3 zRYDCRoyz?8jYMOF{zBGDxb#)h_HA|Fr+sIHT`;|~@3t2E|1wXgnSOPUb6gsASJw57 z+~Ls-cUsS#s!}MRWmJo&fMSGm*{X z-|zYqz`;1Xy^lSpCGH+VBRoP|Q-Rr>c)F{Wure*w2-hrOz1aN?J%J-m{_mImi(LU( z{{~|g@4Mxar!*|_vRr&u>VKYcccyo@?Ln;1HZ!N{dvIooJ|_HResew zgDVj)fAQ~e;=NOtt-Z>+y6@&Ym;5yIzR5`KaNtnq&M1F!dC33CGuXcKyShOPldfcR=~+ShI)GZAQ=hJtaL3zd&5( zzF*=Tqm;2Cb_}^4^xrr3JGgHrX9j*)9b>VL7Tw)GiIm`jnLHm3-p=Sc&?HOJ4k_1h z2XH)1%i;5u>9bz8>#jYKkKo>IJjqS!FY$Ne%4izf%sj20)aud-a&&Np|Mk@5R&8qz zT9?!PQvPd4^t9^srdVpc(un%3DDsXB}ZQ9OUAlj<39<6Vzpiy4Rh>taT=3VVDLrSzwx2fk$#I?-Na9;v)8etxmcU##n( zMl0}-!Bfa1Pp>1Ptw8t=m=-9Z#L1o4?zMYDb7m3L%=5IGGox+wUQUnS8g@JrAy*#? z_8Ut3J%(C()lO?#z2)2ur@X)S^AypQ0(86vYx4|g`ZA9&wtnYn45=g0|MxR0ZxUVg zJ7pzcIP>tAM|Lx}bPnGWj`3I6UG*A@R2NHI&Gw_Wi|$*3<{}afv+p^HE2v6q^&Vo)$1w4r45Ct<>pE zS0%*J(PJBieu%lQHI=E4g8tEI;gy=_o!1zB;d{zi#;e=O_uCTc3iZ|T?5S=`~Ka&O4v#^Ps}15))5CO828Z^ElLf?LBJQUeOF<%VLj|3AbX16#{2O%!u=X`5sR`0Y~E)Ms4wNa^v?P>F}rlc+zUQ3mEDa#T~1nmG-iQ1 zTaX9emtO)bz43dHI)CwTI`eDy6%4|6Yp3lu61H!n6<ydIt)LFfHe*<`yD5Zb$;Ul70;j>Mpwr|8b>e(8N zKAq4TNsD+vwr53{0WVLU`^}rvv(a7kj(k2Ud0%Rg5_DY#ELT-?l)XN2uf5us;@!`Y zru{ayz7V_X3dehRNQ0p2EznRte4DZZ7=sfjyPcWH0Jz~Aa{Sf7QsOz|gL)1AHjDSp zR7}wuSPni$HT<5eZy5UAHPe%h;ysUZ9OGV%R#S)j8*=8v_j4_wX70<5ePfkJrvEvv zGq`&e>v9=*=IVSRr^Gq$cyi+D6 zN;odcpSG0>`+8uh= zldG1YTS9m1ca>AwQsS5u*ly!b%dqHvdsiqz_Zx`?uFoPTt^m8M$UHTgU;fw%hv-*{ z`{?9vO$G;j$6m;uzxr@C6qX0oKT6N^RNHxt(3hpF@4atGcSH8}NxwjOah{?7=&w@W zN*lr7+BkjYdY-RCyZDP@bCc9K#*UNL!CjsQSH{t|R6ox7PQqgmGWj!l+DXY(yph@) z*q=CE9#9)31zsIhP7JB2`sh6WsE(H7KTt9(bUkz99z>cpc_eLl%ni`Ao`;?9` zU^2PV>$r2%=)J2ZYW4!>J`X|!&bDl&JA7n&SpHq@NL>U{s+f68wzPtx9f?kQyuTf*9X5g)w&|t zy4vFZOiya>u=wi0h^357TxO)F8sGUI#(5Pf!|_i$7$?8Tegjzk6Oo52@bCP!2Y(~E z1}a{Wa2$m1dv~H^Kjg(<`#v?n8U+1sgdTU(E9czVjzhhWmV}MH(z>^S{gvFscU)GW zIsH5)`ps>~>YI2P+en=Tw_ZGr;ew&&H!>L!&R&g)PH<7zX7}?vf^zYO5-+<*WfqD@Z|8k^fEI95=btb1e zYmk~r^xa7L*YTeJMDPE=TO(sr?i{3j54)Uh^!TCJD2L?fdJ;1vom3|rPdMS8*Aw9j1xB57v@fGHT&!Js^ z5AHvwFaHZkwS}F~{w1Lq*Z?O3_T)Ufy%~0@Yqm9?; z#b#(b0y+N!SpE-M8q1tz3+1K8BlP7}Y7GJEWccuT;Qs@7oCqCG!FSpT{*S|fqu`S> zk=;LH(aOZxPDR#^L!Yb$#{Ng_ztS_09BryAhq2=-OU-uUy1pK{@jjN`!Z=DdQhVxN z*s*7E|CYKx3)ec~ZDcvypmE&0Lf|ahcxPxotxkK&9Y%U9=hCEw@}z0YT*I)gcG>5E zqeXC)atqG|_~B$EJaS@3wQCppNP0!1m}jrwo9p+vo=6sX4SeRiN_r`J49?E|dMqpH zSoDa(7>gArXG&_uIy|(j9rp`neTd%(GvZ^eUxfD>vyBNnt>nqQlV#xh3*osGCDj8k zg6H~`@vPx!vfZRP``(xrr*S95COF#o zQfrz~IA0k_E3Wd1?<6Fv4E6kl)q&`wcKCcus*!{H+px0@$u_R0#wsv#&Biz4)IO~x<>SE9Q!yH1G-&}jLiQK5 za(ERx7!UntM^u|U03BN*r-qdI^s0<>77~TC@e{WB`bBXcqoRMMU2TSv)m-yDDSPg# zD5a8_JOxj?t|^>pl)R392r+{!-`~$uA*Gn=F0Su0YUhCa2&gm?E9Do|M)7|h9MQ@3 zCwOcx0A)0~%e_@=dEQ3eE9_Dm!WjF@R_&B-CKB~9T5&Qqt{vaN_gb!GbUL8G(`c0k zX#F&_$nSwE?nae6xN8ST{ROdmG4VwQ+HyL4{oPc`d~pq_b?=EW65l3wo$v@?33=CU1a}w z>OBDDi{PzCz+^eKJ`JAo{qfkCL8LT+$LtXR_FsP&=~GsL0=zB8z%sL1?BFe z-N&G)U(EX%+Fpx!Gkv?6(qF?)ufcit(w^c4SJVZ6qm)1Kj}H?Su#+TX=0KTa<%qi_EV2Y(8m>mf$?e%d>X z{w>6Nyqlb_Qu_N?j&I}JyaUD}@mX1Aj&zD3J_K!(ptn&A{MU4ei6 zYb3W1kat1%2N=y?Q~Ec_r+Ws7Prz-b(z4w26JUH7oWF#w_zb=73)L^dj{1h^_n^wR zvDah3=M7r^6mULC3rB0(0^KH4 zatTy^m;BH0^ekHDZxcVg1iZ(<;Y{?<521>+&Y{lN@#DTmyJtg%ui=C2p~ug_TjzlB z@1g6zpx-_N_1-71YFnEU%+|EeoiG80@dzO@WQ1b&c;^#SUfF{yFK3)dJynY+J zub_=l^yG5rbtksqtt3guBR3zw$)BOdaya!JB;)Vs-AZse2~5xDx|CYik@6^3{79f> z$FPciDc{NaJ!uk@49qS$5Dx0hGS@DArObsdM%Q% zg!?g^1HeMMmeYSJriFVT_0RT7JWGAY#{b!&ACl)RlN&y@s}IKBmQqJw&uEl!G2<8h zCTy&KW%VP%5PpH)r*Gj*LAz*T%Z~;hSv!R*Y|e_?x?%v6Xf7&Rs*&$rfiq zb^G=z<2D*xCc_cF)8<#qK8KFI>28 z>dX&XZ6<>szEJx6y;YBJXa>JdEtzMQQDMJ+b3g z!SV00THBIbZRXy-U4(6!03FT;uXE`8?c6^M>qM|4Z2Ol3)+f=EXOB2Dbe_0W^ETT zs#B2jZSdnRu)7Ir@4z~Z0*Aiv$@k!xuk!y}(BRup^)1@{8zB2#v|-TX)1?0Zu00}Y zlh^3+mEim-p#2h#(3^V!?wSBsJkD7BEts7Rf1SW+{R7WG=cyC!S`Q^}Vw7H_-lrI~ zHBjPTNPU74Iw3_1-lX^MkoRlG_Y3s;+i>2GlXv6Y3Gmi^l=U}Jk41_GCwn`YlD*-z z<@9SVkd>d!jLr4vtM7r&D{zGK{MleVC((Qo6q<}gzfUi(qU_~RXE%HyFY6I}4_!Kj z{yfNN>|z|Rqwd|%U<6*tYv81xFdM1(Zt^mI2_N~}PUq9tH{hz>P~m3a4nmLo3E1}o zX%imEFMxF+^80^~?U%{_6_Cb2rHJ%=0&M;_ZLjBQOh}*WDxglHUni$N-N_vEJ3x8> z&isACVJ=j-5#Q}~BzhJ6{cI|C2)Q4?&p)6qFXH3g{r~ZGA5dEr*Z%ksO^n^d78`Pn ziiHv-ibyqyxz`3NMnqaHNE8GC=>o4BV{B+dK&r$7Dgp|T4iXEfD5!K1P!W-iO0j_J z|M{LruDSPj*MB|heV^x?GiT54vu9?{4DKh^Xzh1e;8LWJS+_(Dd;H`7{w_rli7uYe zqL=1A{-;OI8$Zrc{_pqy-|MXO%~*Ldq5JSy(dMj`EystJv8r>>aO;1^6)zN+Wn`qj zUx_gGjHHxitV;36QQ)MeQfq#SXsS0YPcJ_G&(fSi@(fGZ{0zna!CHQTs~Lm&TR~T5 zOiN=g>Ct5dE2YzC{F(3g-~acN<#^%Jr?kIU`m>=e_tIilEO;A!ub15$|JtC#P`_{0ej=5yr9n&DToRWi7w(R>p}h*Dk!pKD)uNz8W$o zF&ylpXjS%{If7zZ#%Lqh96Mo8n zeh1rXv^_k?PBUNdUHh@3L^)Y0mQm%bl=z4rNzPd5%<#ks5Am@b^(3z>MvrrjN6sX; z0Pp9b=Xky|yZ&Um&c~!|T%BQmfstz#+4((Ua_cx|>CJ!|j*x zJT+9)j-P|>1K8_+B%Tu}N8nUOY(~<_@O(Eqci=|)Y8hqt2ipE-d@}LF>wbr06_m`_ z)kbw?7BZ1ma?(rK^>5QN#iRdAEg1F9Cb1va3Z8b74~&i{3#4+ml&hlMY4}-f^{B4WH18 zDd>8fR#n$V&J;V;QdUaf`O;VjlWIwNHf6y*$Z4 z?Ot<#$GB>w7y4%9&UjrMmyJO|E0Q2dBD&Je9X zq1LpbOJR2*+^{Ccfe&YWwi+)P+5Utg711S4m+5YpstrbHzWCa6e-nx8crdZ2Sh?f6-MNHJ(|tKp%Rm z3C`w}yOHSGLN+}`I4>40Wli@U!=E*HFilICNGz@7D0LoB^Via}Zr*4M-tlmntEFzC z-!dM*x47PlM|JSF74Pwg>kaVgRMguJJ--og)uto0J#EM5Re;@Yln=mx>3Dw}7^7KM z_8^~1%f6=zU!!Un=#%kqA^NJ~L*JsE?Mo`B(1jXtzdl&tYV3ut7$rF zZv^u#{&6PAZY!|(9oci| zM&*CwelTw$u9^trX0SS!cOQekzp{p|t{36w9HWsPai%+-u0Z)B?@U$tTN3@A#WWEu zjZtP2jc83vPWHW>t+W*V+(f&Ef!P9P_0aSlY6g+zOnOruua}|yFx0P5KI1-fhW0Y$wGFNqjV%A5dFs@Eg;N>^JlxuQS1$ z6ry+FF`$Y?bdj-{QVv@n@4;k$Ww*M{oe|oezx$P}?tS^M4?#~4JS)I+I#i;F ziugGTA6|e%X6E$M`&&lppQGm6qVDV-oU@xk$6o9) zJ@4f1eS{bH`PJX2o*J^&<943mdQX1hD%q4hLD~cE(^G_cso2dE(b)Cl2)0dj0{%FH2 z&td_MyqOWIcIpe)H`8hyI4Hdht+4EY&Va2m-Z+-SO0{EPETPh-jnH|UnTEnH=X(R{_6Y}$P4kU0o+!S?PsW-LLR5n(}mj8Ls40s)n;Ag-FojkQIyR5 z9z`qrEEt1PdLue|Dl-x8!_<4V_Fy}1b|c%&5j|oIY@(Vb;_=ORwU8~25k20b?dy;F zztM!VNPR3UZqqK#h4DdPp3Sz_D*YiI{odUwAZ8{fqXAFBusKSncw@ZhOL610A{JQ$ z&S20EV3mvbm-m&g$m;e*$-l^F9(v0Bp90oZTDsT3xm{fk(TBgepP8N8;Wv#WFS+;@ zE&2?Y{{x2o=;ejJXQ*uhN&JCzUxM=2!~$oK#UpTiPrEpW9Z$pIjzv3v9G++1Fgxt@ z(C#h7y94P{9c}M0(p!Wh+xUlMw^R_porlu<6Yn4ia0`@}@d>h?WPS_7ax# z02n=Sc(M{*$)LGUQ&zbccV>!{)`&m{;_-dj=Am@I1#Oy7<|D;^b=lu2HMa(_mH!h_ z6dc=m>t)|9RS~BX;U|p?KaA?TJ4$5+)&u!OQ;?R*V=fPvC8OqW%UiD z#mUu5?DDKyw~%pD8gQvn193Ow%twROfvsfBKKhoJWE&jD;ne|3{Y})EXnCREm08@c z@OL(={4+i4@2{yEBJVTU(parSE41y_4sRmUQ84TZw=uZbSE-udwnoEBz94gaTX_0< z;Qog8&4$nUYW)LSXwQzOlFBC{vzq2aJ*UIDr&uv-3ep ztl;gQ6JKs2vqo%Y9P4jPpQ_UD(KO{{bRCcC+OE$~=0+tN!FYrJS@GMLM_Gq=MJbwv4hO@uzEGi>@$&XmYPcEQa+fec)9VyKyexXJG z5N94zL*mi2u_IB^g$x_h#YC2C#3jkCdVyAKb5(=Sd$fRcQj6#q+m zAdjHS$!HCPif~9Ak>BN%&HwDPT;KQJWL&zx=YKM-&-Yi#4nCydMdHlN2WD@B?|CtUAqp zs)D|_IoV#St9%CJX@lrS*$#{)9Fi`R#1~! z&D6JlA-VmOeZy6HB@h45+d32mFMF#bkNWD)s9j>}Wc(y@3|`UPXu$}dC;U#{Z|o%B zr}>qqf%+-Al=?b3?X%3iXh8{fIalTQ(OGIt1YgQq+l#$N(+|hV>}1j%ud`3 z=%GDrX}~YmQ%6r2#KWDV^m=XLGT5(#dD@vx^f#k2M=5b6{k@)cwW0yDc%Y$drHx3l z3rc$Pn)UEDbCg#U@n$=HnU{FNcZ1M$2VTW2Tl`z52xeH zVLKU*PQ-x)+J)EYQY|g#%i8ZNwHSjy9D}~}wQ4DSIZa-u%`Fexfheg-W8W9)57T0- z77u0gsw(|@5%q78*Fvp8=G#Xr(N@bm5?x1&9+zn~zVqhwFioz|Iy}F`(=)_?Z^7kr z)UD*d2a@pxMa;38W+bC*F@OKHRxWuZdyz;jyy~aT>xJ&B?u}LIJsf!xZpnN~22xu3 z2I7zj`1m(`%;;(zZzk^kQk(TDywg6;CEITP&!JJR@pdMBuI6n{@>!wQo4t1u8NNXV z-{9V_$!eM!Hu-B!N{h858Qa|8&4uWB3TBt9;}h-p*&y7n4V%h(*26iYX79jy8q97Y z*@fhEu-fK&w<#UZh+0Q~HPPQj9R0bL^*(Rx)?U?6<7{|MC#Q_Kw9qCm^*4Y$JOIP* zNU60}@ho-3XB|j_?QwLmw>o)#i57gBx6Xv$rQi;r!&6aL6MdOyYfq|MiirGbxL)Y0 z8IP1v&q3f;!pV1ikD+VNf!9py`n|fYC({-9dWDvJHhvDEO`Z78W;ip=ol=JVGpykR zRJ=$kW7XJ%Ei_YmOIk9Q=1uZObG+Wy|MU%Jli`_YJq1q|(%B(uYYxlIY1UIm2fXXW zax$7eg5)+TUk%*hyzcv`$Xs<}8a7RI($%l*BtDt{%uMQX{7dUO1pQy(S>pWT$zd6} zccMqXhS36arVllrHtsaO-G*fhq~WKL=((=4-_@oK$av_IUc$QT?}(YCrEh z3T}y>UF6*ZCwrjcMH0$P!y5IiM@w6r_*T?Bl^$M;rz_p>K=v7VuUyD7cJ?a&xZd*) zq;N2+x!U`?v{#EoPqopWdLAj_#qI3rado8UF;=ZUeu|c*YQBb-YK^8_)%E~xwo_XL+@8bBj6lz0{zlT6**>4)%@|((IFj59(kW{C z6Dnr%stfTXXA(cjPtD<5+u^}AO0D$PHn7j(FZ(Jn1Xr5FW`i2PL{EIx8OmimxTWvt z#ylE%Cjaw3kK4p&jwg-PT3zj{c#5tlSVlV^RYxP(wWTAkinbTwcCrND0)3OetR!Dk za6CFbNPK&r=gCXyr+oU2vuXO7yjTwmZ?&?SQOayb_C@*@tc%^h8GkClC2+q~`Wf1|f?Rgf%nv-fM@yS$ zC!=>gPK^a)8;X0PaWM&=tlV_c+Ag~4facVd{zOC2lk0IJ*cp@h@sl-@`Mba5L{BNoJcp*1vCVQMn*97Led-r*OL0On z+1@X%6K@A@#UifRQ$xm@N}n<|n%T-ZMdVQ5@5Cyl72SdVe|<{pP!lPY{oC_g-}%Ge z-{@`X&4}O#e}7YddU_fC4u*-X%Xw0Q{zu;>t^|`(KOiHlq3{mgDv@v^xccOOGs`%H zjT}T~?O=VkIJdX9d%UQo5=#j5O1Sj77AI?@&Vk1a9xXdc9O>OFK+OnnIGTQXUwT%` z0?WB`$@mXWjrgUkO#7OLOji4C>ih#g`!wHjGRw(KYhB~#2hiSo=}2<%E7I@wFxv)` zig-B7-8Vt{6r7#(YqHj$FUu}-y~|s#u(8BxnVHU-{LG}UbSHf4!#0w?Z~zP%vhi8u z`wec?*J{40{^Y*ZbN?OhXWjm6o}dCf+>WOQJ;!=ix z`b63Jvoxmr2HzCgvm*Ia5q{S7-NjFgL(9HDa45R`@1N2fcScRq-^yMsk!ms}6X{0E ziI?*^6_!KcTIywHCL?zf`)2=>tXeNa_cKamJogEDbZSu|rShrx-&cF_ZW-A+0B7gJ zFZTF?-c4fuj1i}mo#Gd`c)PLs`=y%Up}SVOsF=?%q=M);a}kIFb(hK7q!+K&#E zPLJ8;Noln~HJXuKA&>H`3|0T3FaIYvU#b2!u(=oxH}DgWkY8OK9M6YjRYAs(n(()6 z_|+D?YdxNIsHkNvFFBiJGFICdcJGM9hQnk!SdFz4Z_|$T{L>nqdMGb;6MuUy`S+#G zi5z#+;2k{pIzD(D4rYbbvD(=oU&l9Yfmw0@kJhp+qHT|9xgQc`eI`~pAGf=Lw1SQv==#rO*M#)z!|}j^ zd(&Z(o#(%T%hhV0>3Uxf8qww()N{WU=zKiNYQ876O)cQ?5(`};u9-%FDo_h|8( z;NAqkjxg!&om%Re0#1592WzPo!+DL*vnY5-?TL>D@)*bCb4J24wwe~KvNrZrJa4M@ z`n=T>YDunsW)X(?ZleEkwpymslI(o@GOVhj^LjDETi|!b?dNG{G67b>r#t#asp%Uf zdh@oOY2Og|ZPrW3iq1>5@t2U%LNzAG=s+6LmBf!xS1lY%d%auvtCc&ghzT~q_9=QW z4^HQy@hhbdqJ_yj?Bm`Iu$zLWM^SJ&OGtzk%8nMJBr{+;N|PCKF1ayT046<8iivyX|ReA9y{1n|%veuR+~X{=5fVd*ac2+CPTeSCK$0b{pE`i)*0! zN%C5bmJ?{;uVA$p4IA+HWHRrmMXwM0M2GKy-Ahel*;qB-FN1l!`bW`$(Vk@d_+(z8 z4f!o7{7@$xxJG2Z35E^vrmETei|`@408R#VlK$>OlAl0JH;~kEsJI+uQ%I#7J3kO- zG7mJ6)TV(v34hL1dH{GA(~}2L(AcjdaCs1}-O4{TM{nqEq-=LMzNdyo;D`Ik^!+uP zecO}l1Ut^%#^@goW1eC@8o%J7&cp3C zD9cFs6=e2>^5@Z$wK(t$%D16n5!|i^;h*@k4wjeVWM(=3Q@tyFdgJ)V?(E`OvO452 zvBYAMX#tP-SlYL=VFw*eZ=fSSe1iAa`~@nI{q;*a5E#){kUqvLU5GhF()KaGD*p2dskdJW9h>V6dleRu*G0k0kH+ipTw@h{Z72gDe7ATm-q0w9~!HYWef53S`vMoq>iSAS^YkY?q5gi zA7QH->HKzdF5$sD!L=G4$$FH}aBB$&!&&BF7Q32e4Cg;jLCd%F>SprjjBCjlF0Fk% z6wHijf2pn|aOjV|-msjeolkC8;2q=H*YtEmp_Rv}zY9!nVc}I^aw5%n34Y1Dz7cmv z)0HN0PPCnj@||L`tQl&pTqHZy=P|mQS+fgK^&J~&4yRgW^U%zDC7x6CDic#}nxF0@LqeB_Ru^m%E?&oStZ^ zhsw|4^`bY2@?~G+QoQF{I`g8>Xny4py3OP)puAh}ZdN@hhu#10nHU`ZRlMF)J3ob5$A(O`jv#kV}8K zAH41^erMOx(mo+6U0>XkS@2TL+0i4T@u@xIu*nwPgXPvA%9NtXjH6d9$|T?N5D;e; z&r7@eWNbHkO#B`vw&6)>2aucxH;T_m^qlp!CxDP#-15eHvZMC9`UJPL<78jvjGim$ zXnJLP)2MzTnLr6WTX^W4Og+n!7hGo-zfLqTJIkg=*;R>i{px{|D{($^Jzdq^iS7-f zmDOis(mLIvdv!FF& zoQKf$rQWPmaH69+4pLi2l6#}_L9I_B8#p7 zdKi?9;|JCV|Er0+9#LuJ>Z=O& zx!|56Vz>(I^c_3Fwki*JySt4@H|y4#f^!^?xeL7~^Qp~wi0rh`pZ{&ka}A<@gZc5{ zTE^wPaBa1((BeJ_@6ycG;ac7Uw3q)9&kVw+8s5m-lhtT`i!7QI5%XG6u6 zR6Qjs=<2;2{eB&<9w*P_;kNPJj)t|u%Q>X}y5}R=dR3*5CFuk3I&JtpTGU&`5*gz< z22RKcM#4^(P|CyPGy*1uT6OZW^Ku+7j8Yvl5Qztwq~GhS8H8$ zx5mxPVq~AJVK|?R-|=Y8eiwDr`?RziyqoA}37B+Jp{|!wf1v;_ht|2r+0(jEg;l_|6KNTt@^G3^*mH`fN6XB(-J0= z$S)bG*~Rxv<ei%v5q|c7UnSf-3a^u;ki3~Ml{<@8HNwG+>nGY<<5@!- zoJga0(1`T1CKnzd>zA_QW&Fix_4i}VT?=0R0#$2Z{34j$*krO;yVH!X+^+Z#Btm(w@)p{AtqaBi8(mjgG;IIwG0} zP@KNymp+5({>L==NN?SU)(2p>kUu(-7LMe}vPa5BSRD+%iEycc7a7yPLA>+}vF8?e z^uo&xcsxZ*mh7?0EOVn$JxO4#yPa`ruJ;%6 ztP;P?J3IC4CXqm8_|H~jW|lIOIGZ2*8h={R!YA;3toP^R01d*RB%3|Jx8<0Ep+4#5QmX|W&pav za4oGEtamjXes973k8EWDoI0aEnQ-aPWZ#VJ4YEVK-H`=m=B64Rd`_3%VYkWoZbTZ> zaqoM0K84cs5l*EKSyj4$z78ajcDS`p=??tE+caq+XbVxi6y)q_^dZ;9QY04MKFx+o zcqS4n<*X+XOfM%9S;l!2ttE2JC-s#6O0xpPMFfeq|Gi(br;m}l^=V7^kzPbbXiHEM zk$vj=4*GYOI4yUx7kaWkd$EG_8cO>JX1#fymT41`6?2T3vLU{2gwZ&3CEw(I@b9P3 z8$p|8wCN-ME&Jx(t?u-Y63Ne?Y01|)#GRIYEv0SY{zAWgitqn|Pxw*hUVjuO`pk9C zFZhz4_9CM{@j|s(&rG!(Opnj;yc-N&U=8toIq%|FIFxx~f0}Uz3!Y5(va8F%MW13U zy{=!tea1T##So|PAG6f9f;Ns&GG`X_6X8C41Oy>tUxB&5$|7Y>3Z->87%3q zG(matwA0Tk?OmF4ymu(kN$uft@-vd{GfSNxX=R=REAu?j)~=pa;(4>H-XVBa+L7~I z_1KH|vtBuc zIx|tzv>)HF>d<(wdlh)UoP&0IVRc91RC2k?;l!nC`v-1L0js7vmw9g~s)K7?diu8S zWbfppy*uedW}q^M6B|2~Un=$NvjVj(2bsVEZ}X!|*nbu8H>WA>yx*ROyG#o(Qw(sG ztG|GC3H-W=$+9*qEo(DZiQC`dVYBaATXY#En7 zP1}^XdkKs;!RH7uP7il3)DAVq<883Y_)&7k=i*f@lr8}wr;>fZ)~YI-%z$IG82yx4 zO|!Dr!-u%DupSwnZm>S5gA`FinOM|^5Q zr>@eHO(DN$i#|X{lr(@x@?>72GXrq%Tohjm^NjnB!@r&MbhEy4b$(<$8B8l;;4i?q zPR)zl%So$efKUT9>0x~gkBm=850cxy9kflP^>>t}{3vbeGMfHV@Y|_(3=9@&SvukB zvqkw;+n^xo4?X-Ok4L%#R7O?C9*Splb zp5&5qJC0E4%3B|j&5)wjSRSuB(J@wHm?f$TK9?!gf?e8!l|1hIc$Pef>`xH=>fzTJ=|9N#qS0DU~)y0XOjNKx3q3w-f61u`DjlM<2V$Z zhvHp)#xx!$QP>&qok+({r61WHbAzh}-pg3QTIKuF_GDun$F3)k@=SF6)m1B2dKIn7 z+)54Y!4fucBl+#1+uca12D(?NJw2-IJeAWg9{{m+VO@VhT`Q21ZGR*BuP*d=qq}Qh zdz(9b_~m(YW;uTU4*yrO%?G_x17=O}H`Hflrv)glky};*kH*6(W^y<9-x8#=acnY~ zcf^;zc=e>-Lw4W%7cI#QL0^2Cu54D#G~qWY_|6KFmr$79eV+i~LHHctt>2K;CjKY$ zOr8B5ujY(+WQHqy_oNq+J-qtS?ptYKLuHd+-yC*l;dW+{+q0#!+*!s}uTlSK^n8j- z$AA)UB^U7~C6fD69$uMK-i9Zc0EG@&1jaqClDnukkMaP@ZjRsj!3vhFh8J(}4M z<%7v?lBchu;wyZ(7bUCJycjJ9@sk-H%Nno+_&u2iO}6I-m}H$qJoqA<+}qQvmEK0v zw!!dYyq$xVT5Kb;_XF_%WL}^>d)q|LKSg!swNJv4$U9!BB`nr~y9z~V6~dYH3|G^Q z1}yF>u*RaG9qOl{e=E=T2hwU!ZW&FR4b$47P2>SKtK}Z!bQvFb9tW?6dA#A>zO#b+ zBepnJeb3=RJDAVLrR%+Yf#`Xg@=ek8mHMaC$jL0=8d%-t*D{|DYR+2WyOsTnRt&|_ z?VzXcx80K_U@p+UK8(kY!mXWBpNWXp;mAkyB$kmm$!l?VJ*b(}nya3F@ILLy_~ioY z?4UDT%=ZmFyM*?fg^md{dABxq7~l8~n2o{gLF=FA`Mzg&nKOQgE{*p8Auu!c^(u&I z+utqXEIyWId#b{}k2f}x_yS(JRQ^reIFE)5qB94wkgPt=da2Wj z??L4I<4WHXUyPW4^pZgO{WZ_jFr?EIPQ z5>M)a7#Q2!*~6Oy3LL(2C+EIf9B%QyZCmGZkt3QX&EA7Pgxsu(KScq$h@IU9rlIfQ92es5aLHS$V|C8(N zu(>x2YNW2E{?9_=K44`u^G`VU3VcpgE_*JtW~(33nL}_ry_p$G=OoF7?0p1E_vLqA z*E7lPC1uLY2jNQh?!}4Fz8J1quX{eLe-3Y!@jzKM^S*nRvdnUw<=!`}cpd(%P&TXJk_GoKwvr6CpYcAQs3$w09*>^*nCptZS+em)D4Wc# zj{c_~SNddM)6$+IrR$Q8l=QbWA9)#S4h1Xoewj~+e(mpnY^IEcX1({GK3saxIobV3 z=(GGTo#K#8@$6vtGCcP}o@3S8(#ejvKUCejuL}B zNfNz_d+&pqy-6mJ_0u%4HXk*HH~hO&S&i`^*hkWU0owLh?uj^bB~SN~7~^Tt?G=1o z3lhA9*0kiiGop8+7N7zAPgG;(N@^7T{diC&@bHas@U9{bY{rUjQ?@yOT7x&pT9357 z)96lF5wCs)^WMDP2Pn$=hplvY8V(-7Zn7)JW?Ion+tk{pJv}I8Pq*e@+L6_ZU{r=p zqND6G@qp{WFq+Np&hXX>T)UFZq_uCamy(`YUv11ZUUjOw57O&{X?0?&Wc_@PqkZUj z4SJuo!C6N+mj*n<5TuZ)H~XkPc}pj2WuBtJZ@bRSU3TqS4`wAiI}ha}y)gvL zt3j)V!mLZaMvImm+|DD1Pqnr+@uwTO*&k^YPm5zV>w5Qt#0PwHj65&(}!l@H-hzWb$7t4df->bo$P^|dAqE_ zdKop7)v*}Q+i3AR`A&}MSoKWfeb%tM`*Cg=E}x=3oK9DUqkIl(R-z;Q&*THHr%TV$ zw{3;CR{$%0hR#K6TAi+??^Ho7atS&YdviP8epUNhOPyEI;+Z72%>Bl=b~?`Igs-k- zc`}Wd>Rz%Fvm(3-2;J!9R%YxAFizjNH%j-U?c>PW*SW_$m3GHz?PbC1q!X^i7|K&0sch2>8=c z)0ULm@evQO*Kf#V80fbuF`GYbL5D_?`X>c9UI6z^vKRweOB&OWTr&o=8cn-NZ=E+^ z#HFl}c!cj7S8!wjUw<_`d&4yAQkH>w68`Vx@rIE{9g)??;GXAcc0%Y|MDpR@e(a@z zR;81cD?79dhkY~O=}(?S8AB(_0`lg66Q5=wmT{HBF~H%Ede8Y3uI>KVB8$SB7YBJ@&&W1 zs584fM2QV#y#bwH^3z;B$x{ws!>eiC zyR_hS8Z<#oneXg|?oll0ozZ-%kc?9dqcugEi4)ND0Zvc;}IC3Cee#`fZ>e+@L74h;a8h9N|tD&`Nt*+$u4M2A{ z@Xyw-n}{d9;5ZLv**9n=9i0o0ooafFb}olae?BpLSGQ7I*64hqhJjki=`8F9v<@$H z;Tg4#MMn)b)(T~lK-;Q?%c_%%0c2f$FTCHv7aT!irQH`Ya(BCu1J&^muaW+5#^E<< zqu-%BYuNlSZveZ-O2BUb_-CF+((7YU#!wP+WpLbqE zYgVB?xv9roz z_V5eevCOe#wg9B@xYkh%QjPv?rcayI+J=NrL*rZC>Wb1ebfBHMV;&8@2<8vDx(-%d z@GHIadGJ_Bv*w8e2Ggy})iBZ3xoUWv&bFf&8P&@9*v-+kH<@Mh=`;56m}glzbTUpS zR`?~JWsD>{JY^iD1?@_UzY(UJ(fvFL|JC#H>bwPyv#-T!^-af{eU!`$bW@&eIXt`L z*}3Y;D!Spgb1P4f6GSGFVo&;2xu_?(IpdU_0M0dJF$b(<35@f`Rcx;-N>O6xkE%MF^v^)kvvMZj&!OSl|SZK*{{KgWW_juu!-hahk z*8Id9CAYUW_;Z(K;;VQM;D z*)o4ESZ}VAftPu&cX1%+1(rU0aLEZ`zj5zVpUh|G1iL#} zepfx2QZFwniR$}1Rr&orOMHJ2uao(}%r0i7co}ZjW~+DOR0VG?6&qzO_i^6I$iU&G z-H@c)s-w3zKi8sX&b23>zSSG`Vf>Q67PPIdlI!T!3vkX3tU1MOraRe}Kc~NC?l^H| za+>PmL^Ylw9IM469jnJ?CR{8g7RV&=dE?MbO&+>nVzkl#P86#+q=gH$d#GRhr&q*AS$??3y*?#9_ zxbXW8nAOI)^jZ%?Z9?gx-n@xUW$*K6P`^LCMzG)eyp#U(A;y4}&#&@qwLYBB_dgbrUFx$l)+er>DN0A% zQ?m5=jql9bm-+rm^pZ2dBDZZMn;jaaqVj3~U!p_h`N}#<9#P0-7a9D5KIR0*FYvP& zyUr-a1-KC|9YAJ>f?5Xpw=665b@eOXbNwi}Uie-Cf2S$A*!_!9^;_QaX6?n#d7%~{z7Njx{BlkodB8i5_{%Ddx$191 z#+CWB9;E!dzh>UK5{yMSbS4isM@0Fge$Q3B=W6`OimMC3ud6kFhYlTKoIiVY*C4Ai z^~0;G@g%gQ*L98faH6N#+k6{un{zR?@J>6?(3l-`gvX~m!gd&BUC2zl&P?s2-g$__K|kxASb6 zwmvgTXZ!4?t;vA>(EqdWJ}vw!qJVQr??PTSy`b!a{;(Eowi>R4SrtK z(TEdzCOhx?Cq)pY@YHgy|%t+Sw-j8)4b($C%wMx z%5)7lZ~HZxrMLGi*-HnL$WXR03J-IZ_W)R~g3$^-cZqktRW>8sz2I^rElPxxvHAu$ zoK+q3`H`DZ{36)t8zqalGEGbms4Z+hfOBU1pFqj0qKt-x)l`P@a3wQ(zuEs2;F$F{ z*_|$#t7F}-ic5*tE3nFI)DTI048lkpxf$Pk(4gb^_3vRm7mk~~oi$vc^*KD7>Um|- z$&R0c!FV3Of?H+ zRyl_5cC@2sVI?&SuhALLZ^p&sa85^iZ2lYY`hy|IT8myC=GjMgi#E3xjBC-P%q3=C zJXyc%a3%igV$Yr-ryi_mu=*pVG1`He>}fH2K7&at>l~cDq$7>_#%$o>)oB6CjVhAd`A0j?7FcUFIO)6AUvXl{|4tXN1Xh+?Cg=#9FsMjy$(Oe zpPX@V0d8MIx5Kfm{%3uD4NzY8&UW%!L?g1-!kavP4K=+E^Q?|aRB|AhmCo~d0MEC1 zqXt;p@%ACD-C?XRJIZAyyenzF4Q4<5u1VXHOPgJ0v%0G`j@|CpZ8+IbOPL&liR76* zJ}y)$bCIpozAq_#iHd7TIa!bIt8tkcGImxC=Qb5OojJw6tiK^F=d*?yYDx~uv+7N& zP!D(B!`+r_p&^ddK*tqu8w>WQYKq-t@3QQya=Er~Hf$e4!wy&T>1}r9K9j~Z^Tzjl z{;NDjXIy?4{53QwCxB(=jHhXS{O9W^Pi{=+LwmBLIA%X0va|a%EX4B8B`7`$ar5YN7+6T|xSLO`T$x7#BBTPVZ7no;- zYtEIZ;(1G)y~On()_V*5r<46*;EpYLl%05PLGx^*%(H3TM%wzk@A5o!FTVOsc6A)d z45f|99a=*VZ>C3;@%C_VvJRk?H={L?+5}}fq3AAll6;@62Rn>hGIM@6zD-gp)CRip%<60WCgw-CWmO8FmlGs!nxd_$gxYL#eZ9#oT zYX8V`H@Tbn-DFfR!Mm*O%j)Ljx{p%-4Je;WTap8w-AWEb&#UyVdqH_-=VMV_iwLAA zY_FgJ*)wGZP7P3YBN)}ypKPnltk>b&7J&FQPS?fJRj%UQJ_U1zrvSg8{UDDt5=LD>>yEbX^+Yxlt7w6=#4J7tF3(DSr?eP41y1UYMdpueU>KPz>2B%~!TnWnF`Yq}A zCjQ>57z?ZD*ZxKPo>R+9Cy$n*^OQ)IWafgO@jstafB4StNB{fxkA3diqQ%Ub)h{5W zhg`qtX{4W$tk9g$ULV}#^=8M5J^h~a8!GzyspsjLr*B+IG*sGA;R#&W$hNXiar!dJ zygbzZk5IRexB8QNKYo%`ob#m8-zrn%2!A<8A}b3sXLUO+o}~U{J*P*KK5_OP3xw>0 zoSt)fCRr_+b(6Kg$?SB_xcm;RoM;tG&PvPlfWw;;z|9=#7;SnvvVN8Rm66yzJn*5k zF|+H9=tKpwsSUe3;9eVqtUh~#57`V#MhHd~T&>L}%Fvusr~ZVSWiU9_vlsAa2w7)m zi|E9|H1amu|2KS_Qbd_KRpalTe@;7F!gMz;f39DL(lV!sXwmbmHf;#CIhS*9T)mZN zc@95XdQ#C_rL5`XvG1cLx<@?+gESEANa-b9s|dTJ3SMm%(PW>J|4>6YbaX8&|_*{G6*Wr zi@n&*4nAO-67|Vu7v4mcIq&oLB(i5!;E(!{U+IaL`po~HqmU>2^V6jg$#5v0*b(hK zR87g+_)Q!%*FB28E6>+7>q-ivB^%)P1!&bt=l(#*QGU6$zY6#XRYXAwzjP<%G$fP8^3pI zRi+t#IS+T!FCB#b0r3C5Xlost$?Wqm_@9p-iK};$T?>}egGHU8CC&_T^6wg=GUqL1 zRoD!gGnOpUt2qxXLwrA7L~E0NXybA&#Em>g6_H#MHGb|odnJEMTAAz1+)Qs2oP&bQ zkyYadj)M1Cy3@ovzx3uD*tEvS%qKn1`%Z%AYPC$|RWi$$(e28lF_6_Ht1&Az+qs(z z-0OLW{`7H!tDFsYL?QV^HJSVUg3Q-@VQUhtZz(u&zx1Ybw7|i3hydorWhXGrgt#Y4;p;zky?s=O#Lob!lC_(}_oX zg;w-0;=yDrX1D0EtgN+~wkm%Qu5QG&#A-u8uA|OgJ~?&qpYSg2X!uK1-K&O5G~zv2 zXNISax@OSkZzsxvF@(sVBXW6ej*%ZmxtBdYj)u4-u@j5xHPt(}?EaGWswxkAol@y_XP3IHE4-e)od=Vw z={=X6j>G*PU<`)Wi$x^gl}5GTT{4%p8YesB=t^%^Mtyc9O++>ptZ5*&)e2?@rR+KM zJTJ6{h5eExW$oD2%A7%SR=P8%=uggrMNg&926LHOd*D_VerB;6GV&A8a|$f7Q_f@f z)0j?-f<<$j{v~d%qL1U$G{tqs#QJ;dMzsCW`%B>V2ANORzTOP)btuY=;aR*?XYXd` z$VMPGb3b$E^YOGj+srN`-RMA0S^QWGKR{1A^S~#=Y8r`TH^WwDcfG5dlpY0-%t6*5%^~EO-A|78m+`7H6z5#Sdh{zj z-{qvYm{wnpvYh3f{ec2+F8JX_`oY0D<86&d_hi(r#Leu7-kC(R_9DH7oGZS7R_`iG zH^89^YH0*sFY=j=!cV+&Jjsoq@yU_OeD+!qL{?zWR!{GkL8_nrWJ6NWlKc1!Bpq`xTb_iJSp?nG2N~7?nllUmm+mYlw-dhXe zWY}gOn4Cg!7KuFz)92_$d-nGWUhlt@`>nrsU1cP_73_QCVm#3i-dsT=GBTQ-wYtJ< zE7~&(m&~KeN^JuF0g}pUh}*?k=N0e2L!Wb^wpo%A#E zc_%JEhL+N)k*htsj#tgN_j@S6l*VOLI4wmdu)fpRJqRwF;Fi7o&Y`&%p)_v-3^Jl~y?1_!2g#mEYnm7=vd>9I87Iyd)hETB8|n5C zQfQ(bK9*fP$=j?&)gMVMxzj!HG5fG(jqwT)k7OyGaW*5k3thkNJ6`2mb<`q-)RLHM zESuPla|h$m2g(l-ku?6nPh@Ap)63kmw`Y3XM<|!v*~9QO>kG5WZR#p!cd@@@^mp?+b9z;j&#vm3%dD-Q%{X)iZyIO^xj(ePG5!0jN=)wc zaMD{SX8FCkYl%O0peA`q<c+QzjZ#J`pPx$U$j368#BB)PF&FE|9zq3O1nxfaZ z11+g-7wmf#dNBgzx*(>1lZ@;1*!Kdx3;(x|>&y!#%e_4f&Kb3wgs^52x|F+13#(qBegB9W7L{z!4(LK8B2bEqfTK`1(r{E3wM z_s^b_nzEku3HI_Z*BEi`@}&8fk!-OcM|hnqU+O2+U{c*LE`U&5ZI;NEKZ zWe4yHIIs^Mt>D>i12ws?$zSaVYHvO)y|L^4zul9p(ar4B@mj+~?3r2mg?6tK@k>7aES!7Wy;5&(7ke8{cS~#3R-khpESACN3tCeNUz+nl6M5kHs-|!_jGkva zcN&fy?!EM8Ki30o=iTIy4)W%+IDI4-4-`J&bP%%>+5}RW4bNWeyfvR)*P9)@*@1;$ zh$o}z=Pe*~z@0PcbjE@^7d)E)^JEyNe{d;W+T(S9&?n((&K%0#itXw7^IF*KdC?uF zm5RvmZ|t}ec+=Qj)-DfF(|qq_bxyL*Pw}faC_U-d)!=;Y?iWg@r%;Dna^iPA_ufQl z_Szh${y8kDgA#MtTXwHZZ{uwg^>i=06SmQgW%t&V%H^br>|Wg)U%R@S$fm1$tAd&g z@vPmfsZ90@S)rYpEe3qqugG(`R%tu9Rn$-)b;H5GmSjhwHaT-SZG9+fU5fX+No?}_>r~KIxQk;YKsbD2H^l7*!3w)y57t+LJ(4Byr$*-7- z)@^KIA`YCRO^W_rgW8wKWVAYJvb^mmpMjq7%4846_WZ^M)b>KjI8y(WUx_e|q@`2f zxC4x3Y+yFomqz%{=eM%@_htyu*oV&sfDn@XCDAvtX|k0d`hP z#=NqYy%P>zOT)6}?+vsikqvh77eFDW0b`rIcYM%&5k2-=y<)9f_*F>HUYw(O~t zdHu}K=iJ)#Jo}=z6=@{*ehvM3jNEei?Iivydn#Q_XR|UP5nxVy`U;2piV%N}$~TJ< zzUlPx8MReW^Hg;Wz}3vkZxn}T{Gl?-Z-nydxVc(1k!+89jUA=;HAsKza+;YHl8JM3 zO8q##s50EQ;p7eQ$m*iUz|DE>Y3n2HUG5|UVFnIlN3&6Qm~5EVvuR?u zx|kMUrQPiWqe1Fk#+I|5W0V#>Cktd(zcYP*%g^7>HltxRXzoMa+Kg|#=-0vM9|~G? z_(!tO<9bGOavt&{IPnL3KSgA5iTA5(_mXRo6O?E2_!~fY)BT*JmU+s1NM`|`vkad` z)3!ByW~sMz1?^bCA8mnkG6P1T>_If|=3%2_kFueo@p&~_jU(IcV044mW6C5;W3(rk zBTt{CpZg|IZ}tBI%gbQOa__huY|%2{f)+{_9CU6fqOToi+R8KMSpJ{ zo{Se^T`!hAnT|ArX)o_28|{7H?O}W&O#0EFi_}xyd&#%WxJhZ&vlSUWtlTcNPT)r) zk(Wqh6fClTMJH5eM~fkJpcQ%K#OK%8Q&rwEYnw)UcP6g94sv#TYes%^#gez;N;IYt z`UcVOoY*|h+ZUsM3>c+6mv7XN)x^ir(|_Q?%OKBIvKl!}pqJU1Y#RD9a(_O|#)Fmf zZ<^Dnj8Qhmjm@BMqBj}icnQ9fV4U3Ft?=82M3=FZtkd2Ow`j*wzlLhe7=+y z{toxD>ik|>Gnf8lrZOu4hoa~JxTXi+4_AhRHMy{(!{PT^Z)Y@Q5^Fw-7Jcbg;+|vx zw4!@^v4RJ@lN^fC1xFt$@Xv{v$#Gt+TvjKiznR?pv=K+q);g%2iOV(E@DNnpL~_|f z;(UHF7M)RtY3i9R!d*hwj-{P{VW-*U^(uOE1KAwOhH|!ade+I8j%PcJ?WK>n4CE_t z=q!~0o%<|`q0xy

uEPSq%Yg&zZ-VB0`cl4W?9OykzGTxiTh1)xt?k)C zJsI9b*`ck-&k5P4l_|kTJ|Ev{^;Ej)Nu`bvrHgVVhq;7uBBrb1>MY7#W7(X-oaCzx zpuBD$XkSck0}4mtTuNLN%2oF?0%3K?-36EyQKx*P3*&YpDa&~4E^2ATnYwi5jKsNe zhV>9Z6V0!u2 zwBb>pSp%Hw;RbD>@YR9hwKTcztL1tJ?VF3PXRm8NNWwkZ!lUXKqpRQ*TBZuYcuzjA z9&Lb=enOcYDA`rfd*Cu^;Ce}3M}BS9dypueWVIg(bvM%=$kC0y7)uHIVeMcPUW~j^ zA9GgMiTt03l9J25KSvAhfw#<|=F`F3Kx%#-cpU{>;O_bbyg}K zJX?>>{ULJ1T}HdW`dVsv8v1z%>ga%!dXv@<0hUH`>l^3OTAphcjN0c4>TySiz6Q>M zJ!MR-b|zBgUTC&ckgCoMeu)&{M9Zp?=&rhWYPB=Nf#7r(E!_-8R#BHWt+&CTb12vN zJTGAsSbvGu7J>I3U^5BSS}(P6xB_jCMU-Kzs6F(FCt!KPvY3OnScY1fM$4fOwBMtdH$iG3~a+Pcbsw}_G(!w8AqPL}UVx!fu4x$`U+ zqaqp?%XxvC=Q#Q-^4KGk`U&rqVA)SCcf!7yt9rD&%`*_xiG7+TcQ);bYnEH4DKY*1 z`x@$$!zwxS!Hc|DZW5!t{|wsvF*w&}RXLR5t2SeVO4p&>73Qfu_E?g?&Xl|gf$}6u z8s&gez^EPiYIz#j;(!~|=7V9ceq6_I2zPRATPatw*OxMOALNaY=5XiCDYjFOu>`G; zB`6i;!W;RBne2?iw;TCJ8DcLOAHk<@VY^dy8vRb#`qL)qaZG^9Se9zg%lO-|2{v=d zNws8dWMoF~R4p>@^6tuSbx7CKT^l(ZxVQYh1ZbR1JH}j}w^#jcL_6-o8)eC2`c!FW zkIsgAUWPvl=cBwo1cx%_snBcjmga=z7QUsVb|2be>h`_9!S0Z8$AjA1+i<*&aL@Me zjflHeT$|CVD&DD`xR-7zciJ`z88@dx)yg$xMFseIit8q*v4D~24ri@RKLbknMIot< zkZKiU=j>q1a8xe>noG&2hncf1qpz-q-+R)g9Ig$VS)U$vQ9Cy{MtwU;^-F4B7JOkl z0hK2@yvhjC+h+p!HSL2zI_tmn3Q*}!smys67+yrHF9;)2U%he2o+|jxw{Qv9l+`cY zF_z@MFBoyI;qLGQ{AvzIR|>jL@ELsk9b{fDoWdA*daO+VJC5`>fy0$k*MsVjds+>Z zL(?fskIxzKIPLDv6CXxmOaijFKSyt&GHUoOH1SiY&V4pJ(fa2zXKJKXXVJ!TRub~y z25rE<>+?pBox>RPWRbgR>m9)Rds^zMgMK&*sdW{+NgSvhyb|V2Xq**m| zG`aV(J>{v58)4x{uwKmzRF#7+@fvg(S9kU5&|l9TZthL9kCs!KxK`4s`#189AN_J` zzW zRSs-wPbuzAtL2^>Pf8cPbM(xtLKCW__UOs&%smsZ8%vOG>q5S%jG+jj_%{Meme22? zE@Lic!1Vsm6Q2g&6Zy9NYwye1vYKwxdLy)14PV~~pUA=En;&kO{aQz>V(XN!X)s`( zX?*3%;w+Zn6MfSg`Fj+otv3eY`sk%653lB96m!R^wJHNL z=}sw)iIB{@8!8?45s1c#vN;qwrA9YQ9SQS&9#hwtsJh#ct+YtIHHmA zTL)jt2LtDA3*TM~4@&UW)21F>^*Z>LlFPCFEfDTw6l%e74sj{G@m9Eur}`OR%Y9$D z;C2Z8vx-`dgSQM%oV|P=fm*9%i*>~!A|DhyOrd257VQRXRH}djdG{++wj>7`R&TU zMU)iRuxgQRzX$I-kw8y_4X--`C)@-tbBD)N>@aKT+o|9_!B<_X01k2$(ykFnrw!TN zaQ8xc+T8V(8b~WY3f$}{9N5{y+2Cvo^yI822hKJa3Gj2M#kGoeDBE@Pc5rs%CAv~` z9k^-{dSe^U*8!P2#0IF=J+F6z-2`-FtfhMT&OO}jVkrgQE$}`gynINB6l$@m2 zC1?bQ`~1O6_g_dPRpBz@z~m$kIpSSng2{}pT*!P}jAo^+>` z)DJBmZk@spHC7ahiO_ZFl~le9<2KB=t=uza!6G>84c^b4AU0 zh}64|=kgHyQO@JM$&Y+E2b|W^FT2ANb@J9hdOCOVQ;voctuqC16eGz+pWCgB6LV`% zj%i)piFxAHzpk;ggsPkI9miiSJ#xD-O(|pg_WVlnRD8NB5TBm-J0M6v&$q~ypv~n- zQ@WeECcw!zjF@iV$MffUqv_fk!@e)elEtelsFtjaG{&!$!&N{@Sss3(UajYH_;kq8 zmO9)E80Cll<8jvD^Grx*PVau`BL_@}H|anN_U2-qMM)BK=xdS9oArp0)1-mC8Q5{o zfyE4oo(iK6uYb^lmUWg!#W7&XqhHNshTt(p0RzJ1Ifzl|fhAfOsWo#H%L`cHp|0@})KFT7>30 zk5r#Py~cssN-xcYPj@4Y{Jkydra`~AQI1l_nT~p@ma?93>c^mewdrdqE0?jknfsmK zcnYKDEJ{i8Yc_h#qgW9uAfvA*^tke_yC^T$km;a zWz_5{wGlR-frG2ZtF;cMl(&###=SH0x4d{gt@ZSX*WjZ1TXfbj8TJ<=#IGRtne?Ok^W2SaKRx;x zs&!ZKd>tunfKQ)69mYl(Moq<}@no@2X^m$YzKwOq6En_3&YVI| zX_x8>A2L$JV6bu(C0Ehc+v%ru{4J&)qriE(o%^b^91MrEJ%o%%!$o#N6Ut;`B9=2E zKA`k%wBt_RuBDx}unq`qr_of}fl2o?&4->l!{hW2RQen7)3rQX^(wNnBXxd8`n5c9 zH-k~F)YkL^@51LW{L_>0jzc9Is9mnDkDM#hdK4)??gqo|=C%B`wCO(hN-zE%qNJYS zrHo#P`|PIB>!vd9$t+r7oYd2Rr#-Z!M+r=nVBaQsv5v(NdO&_pVDg z&Rs8`4<#5~_5g6%vJap!qd2IGX^GI1=;)WalHfu=rApea4}_Ln_sh!p^mcRagPz}} zuLiF>z@~AT^m5w^eIE(f-$0+|L5W(!jd=Gi@0GIAZ|)0z-48W5!d=5G1ZNfGvrPG9 zHP_jU+MF=LjbFPI=;eLxc1;6|r^|U#`sLxH1er@c%eksu7~jLP{M-+A4sgx#`JB*_ z9LlQTX#yw`A)RR>2afr&e>2~|jZezxWA|O{1djEg1Z^&9?y@r>&&#ait6%9{TAaf- z1ueQOFN?5 z&pxIsqfu?3RAuw6)T5WEv3BYhw;Oq%p(mf9FO_L8)2>l)A>$hvaZ5YNQuMjc8FAX> zrom~9F%s$ZJks@pCfzNapq=}mVr`kOYu?THdYI?V^A z9)g#(fvai5UX8@iF7B>{k9q4Vo9C{&`g#nA+}l=%{Lv~mi$2xA;0>U+cc(!!4*>U` zz^^>Jt_S+Be)*9KIwO(va~?vgoO!yU{sgVQkv4d4_X0|APmib6w}YQP3dT1eh1%2l z(QxTG^*Xv;9eRxoc*_*Ku)Qam_u8xWg;g{#r85{2`X!r^6mf+2= z`Rgc=ryS+4BlKMK0x9Qe>eH9`L9S!SH;{7O|LCf|{pKF7&9t(Z`y_vjsx4O2;L+HC zQl}$a?fvRdv+I|hS1kSJ!S#;t-mz0h3F;TyC`YJ=(HEX+sy~v_V;g_Z4v^~geS(AM zKov0pwjRilSIBkbmc`(HJ&OFtAqV*@{H3o7q_i?^a#GN*S zf*_x?dnB9EnOgr}xsebjN%Pn93ga_XLtpB@%2LnW@$5)tNgZD!G$u$f8#ubchn$h- zLF4ThJ8AB}r1g5ds(H8y-;X z{i+y9J&~x!VO0I`X-ANrrk+>a~!U^2>CKbH_4>7)8E}D2B zB+0m(`@l){e1hbWD>TCv{`hkrbf$sxw*q@$=9-sOpe+5shL$c|w za6VYNh0uV-$YS7Z zB;^R|`6YByL@S3;?pE4l{l3Z-{45fRnNFVYzj*@Wu9n`xAt|jMkhUJcyWwatm&&#{?$B3NI z-RSd!M3ncdOlsZ?>?iYVH_Jjsexbm@aaQm zYCo1z#yGkljM?4LK`CQJAK~suvu5LE|o-**j1Y_o4NOLpocjuTX?fWEsTuRU8z^Rw8O8g5b^ew2d0ZM+B(pT_1N}A6| zF^=d6;7vm7J&-wH!$S`9^iS~0ZPck})BIqI_JmEx|0LRc6d9ry;CH+mhn0L9 z^9Zd4uGy~)xwZqTbIE4(ozVx6P|FLvmxoQDg*TIbJ9Cj5IDq=ot#HDZfnQCz1PNvo zp#pIGE;&8}@_P97ZJz1}=sK#R#%ph!XuOR;AR<1>PG zYax4rvGyQsw1iMYT^-R9V6;8CT><#hK68||_5}82)a739e#n;2qmmJ+p^RX!r+s>#hKIl8V0ubUSyK+yT@L;NmDPc#ghsHP2@` zlu|-Dy~&e}4Qkvj=Yh49@C@zAW7HY{${2K;kz$_V=B`dHcaI?T)rGZ+p9y~Y0{1qa z?qHm!c_a7l#b14$-P5hTM&3RbNZc7zM9J>lUH~VEvsHEJV&v$#q$mQ)G`aKWwQhWi zp)BX!TAR!z-_egaYMPhR_XS|RhH-7|jvT(~&yK7C;jhv`y~rpAKCP#8EzgecV&InV zY=IsUVf;t`--eTz0gsb=Q6Zv1{o!Mvc zs%=4@X1~bsib4)8Aad8}S1ado7pcolMd4fD+=!-8yzp0O8z?-;x6u7-B!9G5V_C{?xhki~o9+Yi*j(VDaT-%jn z_4)={D4(-zb%W?d7p-0PVvHD*!<`?s0lin8#q{BCDc@)b^qSRa^`C2XROL(GRf!)hSo>m|7O_4LOFK(24o^FaDDda#gmuEMVmV?0m*(O+o_?&b7|uQa{r1n?l}G?*xS_Ys^IHVRexHtmDLTS9Xt$wO@ier zevJo;Xmc*3*PNFvrOXaMw2Zd6s`?eb^^hHb^mGq>3+hcm$7hCiH$unSP~_`sod@Wd z2Ku-qP^wQggU%d9j|0Coc}9bNpmNt1^p)^9B9%uSfb1pS)I(jB{OSc~i=*pYu#ur< z4N%HcaQyZ3@J2@Zf*`r#xRgI+^=0F{_N1;HKJt8Pi{o}l)33cidsWl8ls`w`SG7xd zva8YSXnP5@7_lJ6NqQ@kSHN4P#rffvHK^BlbzbU6KBA770ACHfl`lTcRY@09XvK+l z{>_|pf8uHO&M}f;EIFfa)ld$%0G&b}ZW>oCJJ9E?b{hW%$Xzqn*5h9@z=zZIRqg>6HCU`~EKbBzyhyH`ZPUChSWw zV%uEVaE(Iwr)14AYh8_;pT%40LXPkLi)a&ZMN_RIN{lp^N%JPk!Wb9IKJ3BxHR@&p zc*R&9B^n7iLZx>_#JgPD8kAeE%*OZeS(;x<`NkTRl+n7ZtQ*{vvL6)q6eBv$G~9Wk zS4V^@j~0wU&YTHdI?of5*|cak{A3?IvlVT2r8#mTX{9rt$@G$u4Ll7Xp3}3HH-(JF zZM0kKsqfxlJRYT=?hO`Fs@!bdPxzQo9Dbf!tBQVnPY*r@v@mnbXP;aWnTSM3TzpvL#8aR9ifBmWZ0KN@T|b?~!8NccQ>`|DtB1LfDF3$zD{ zowU~ILBh3{8Z(qo80;_IXk|xA(>KV`yc>+h{m^s3+aqvaqt<#_%q(l# zi?-&iJC9>)<>Q05HH^Fa)@btG(%Sfox;n5)6mMf;52~*IKs0oV670H)BRd6(nI~=u*TTiPmldA^6fq#(V8pH-GS_k&?v_J z$>;j#IwVbJB&WOK)_);JcZvS{Jj*KNK)fNyt$Xwy2nt@n)jiJ zfxIyyp*HKjT>ps{3`Bo6KaT+qsDTRx3+$-$1#GX?Y^>8+TMF6*eJ79Xa~0 zs(E}6FzD)I4RrJrXuBnCPo^=&VqC4KW_*ev@MjEAhkM3 z25OtjbMqDQOMWazR{vfM)#=$khJGB!Y`h;5u?%dzOwQHh)63!ssHX>X{P*Y)uRiBii}P__dH< z#`<^8m3_L1C$1M)^K@iW+p`i&X<&^*kt6oByBe}}c}+qaUfZCDX{*bnUG8t_3HO>0 z=6b^;Zeg4#*YvdR370cwTN^CP%Cu%s|7%oSH`4AE;ARQfTmpT*PHEcc^h0S4mi_>jIm++VKF4` z=)T=ylv76@qv^cNY~?wAO#+IGf%QFbUk?v?8E#YoUe*DF5rfY_50p>`^8b_DiM zusnInv+Jf?c;lMp1V%?S&<^F<8pgaai(KQWhPsv5o}96nQSW-WyFR~!Rt5o)CsaCX ztOt`vxpVFNIIW!yPwj?(=6Y~fa#=ehdWQB_0iXWV+NKxNwsXNkg7>TF&2DHKX-aTK zW(WB{0{`bAKcstm_0llA*aDvHg>urnqpy;?M)==gAoc{sE1?xVd-fqM{wFQ$&eNZO zJy&h#ki%$D@;=vDUgSE6uTkg@fq`qeUr9UM4U$8>XOQYXWK1qCT0R()67tCaSqF4<)!?Ti>(Mv|YWY8+3DifT|35tT_P$o;F(m-6>JDwYkT(9Gs_v zH9~7pC!VzqDU7-ymXuCC=}qI+)zXd zv~6E_VzhF##_6PsTrbPxc7*pe&|rr!3Z0=32+(Qgh`c-*YL%}J1YfaExu|!MH$@vy z=KFVYw8i{N2djgeZ#|R|b4Cp}-v1^4^r?0UuW=<;3x~c}aqS_=FCllH`xV#tad%OI z`g-tN`*=Ayd=n$&=P?$e9-RP>_0Ucddh`UK4z$Kd91&~3W`u7c*Km3zTk?sk^8+z$ zZ#aL7Zz^bC%E!nnjHe;wJrwG(u8we%ZR9Tn+PKHsIZrijZVYJ#!S`I_o)YG&S^^x)+tIxe zP4phAuQj8;KeY;DN500m`U2|9MUO9JWH*54uA%QnpnKg-iaEgVzM(Eqhdv_3^p!h% zT#tC4I@`e)pAI&3&ykpiMw23sz6>t`uQ5(P=V<~sw=$OO3nhxN;J(0Cz8Jo(E#tSm zEu;Jr_(Xd&ImgC)O1_8Odg3=Avp)u!dT=t8zw^M=FgUJScRk!szvltu`5L^sB152&Ja!sS)(_d~(JZ>^(v{cgR(Pg`@CEv7^6eK64=NcLRz0mnYMg z?qT>4E>zEIiXNLSDen&2XAIB&@R0^Ig~7-PcNyP}O=$sR`V3Of1-90-W)+m?*$KAn8kU5jR7Tl9sgV7zRnOznWmhr!6}H1E~N zi+JW5*C^iir{tEv;Hge~sY4%@Gcc;Gtp8C*AcMTLH(Q_C{uEo7di`B;J&_A}&!ba%DXpm|f zN&ePRi+aqdV6cqwBiB{a&wj*m#HN% z-8jimMnCZBUa7?QCF;v+F6TC<^3n6a8L>MBqP{IBNQX3g_?ypYsfM!8hZ3~9j|sGB z>B^IFAzj3#rDi`#(rL}>&lA(C-Nttbo@P`17wW}V-Dwl)<1ArElN3mYa`pX8lhz$D+Q_8b4e-EMf@WfSi^*LI zCH#(ezTLt|^KNXwudzFwk3}pNlX@R($HN()J%M#&$gz^A$N04kthf*DZ1jQE%%&Y% z^>D-;%ptlVudbk955T$HS9S<4>A48^1C@J)>&fl8(9M9;(<|I9;#mvtQ9~Q@)dNKb za_c3pmH%x_#-p2hal&C-HC@5whWxD$%NlGXwat^=c3+~ISqISJ%>w!w$+}Va_Gk7X?XCPP^ zIQeeU)&lz%w4(ui*nNveM9?e76M7B@*|ZQi++`#!tfe2%2NQjOa0fG}qA-^zr^k%t zvjVB%-oxfl?kJ$0MoVrAoX8U_9GgezmqyAnYOUvTx2MFbsdEL8R?%BU$bEOW>j!!| zv(dHS^iA-Rq<;6(-A}q&`pUCCJy$LtowSTnok=*~-cIfsMyI>e-r~I{gWkpc1Hd6p zw0b&rdXWE9ARJ2`Id~B>k0*e}NMdt%U%*T$LwoNCTBWi>AJ$>OVZ>wqs)t9tSGtWB z>a(%;N$)Kw*&K2prK0!7PdCnlQe!o6Y~jwD;>Va5&BHVO9X*3QYsHCcc+M4K6ucx? zy)m@FG&0tVwjx&^!}S1~`|XU41k@qM)aEPcA@lfAyHP7|4TinbUUe#MP6uh?EW%XU zNc<%DMt#y>&IaVOHMBbVL`p3~xzB!nh!aNsdlpcM9M^y(8cAZM4V7JO6ee<%wVa(cP?&MvI}Ay|Wl2339r|ASa&!XOGZY(=F7L zNBg4f$C?+0++sm!m+~aaUQZ9I#cprJO>aBb`01f6+v0-Ny4`QhE}O zD>dpUY9&VbX@*2E04wfM7E70hoTU*PTM1N)zlNk*g9zOSz6ZWR4RuYmte zfYS^MBXB1idMA3Lr!aeNjO(fea5QaSM!4J+;wCMiygl@QE7=KBE`THLhL=c#UkB=P z6}gJiQ|RI`qSP`XpbJyb{eE@l*Xs)BYT<5?V}^>ZnsC9}Pj zKwS!kJlW54q>S{?6K<;&%J>%fq+bH$gQ#zB(CPn(bW8L6BH$Vgez)>@1PIdfQXB4a z`D%MKE?gT@O{NtW^Y(7|L{LNIm*I9bgY(7(sMVrs4j zgVwwk92sfEGo#!yE(DGIJ)bh(0^7fU(|Crrvu%9nYmgi6FK|p%Q<_?`ey3`KM%T^fJ^|1uy|B14b{3@d6jzFnpG4gcN=sQ{wQs+?D2B3{2 zMY&=uF;kS$f(EXRyIN{^5qy}}ce#x2rC`Nhds*dAhC6wzTvI+hj3??@rKHs=HkBOi z74YN|b?*tt)EWHtWF_TLH=b2dx19atkK@-*9zClz*$nE{^IP3M)^RylOH$8?Tt6fQ zPP{$&u7G@ctU1enj1u(LF=m0hFA+-A{xE`GN&umru#SBFVBJfjzHBV9GM<$1jX2Ei zmXg;SJ1dtu#*uRlYz(`vRqe*<3}6BJl=}=lWpqJ+9S8~yOFP)G0pWaKgqv( zaw0|4*R!#$l&Td_+$ze6G?1VrYFLgb=hNC+dIVj+lr+kO1aveNI*nEzOD+rc@QVU0 z#`-8AO&3PjsZc;a^0!BeYK8u{m=qo9AE{IOzTEwGdT%5ccR!FNDj&Ryp0!Frqa-$h}X*{h5Xpvqe?oq6QMy1sA z8HIWD!@eMm-X^d1k)FWl-bhb1%kWI8)QYcbUHNcMx$JWI#0~UgAD+6JaTvJnrp=zO zwij$^huOp!F{Z{objOX*yYXhbk^T$vzfRqQz>D!)`i1eKb*U}9t51lXDSvVW_g&Jd zmnRvOuOq2m1kc9o(I5Fj#*8Zv4M5;(-P$le$YlgcYwk;`g}b~+f9cWG8NPQZlq(N= zg(uaaB;zIP5p)=yxQWuV*SpJL1f#o(&lp-<6YB6Z`5N%zc>^sOtC#R=95s)H=Psrm zcWTa}wX8WJ(Y4Fy3+y`9Jmkbo-nD5PrRu; zpo$CV3uV3WfTAbDjmYNRP;)hP&4FHjL;c5rz9rIG&eDhRuYSJ-EUA|t215DuF?!N{ zg38XV(68KIU85^>(@2}!2Z~t+4%~^>iFb!d;aNxjo3i!J83is6Lz(*I*FXoSfzv)f z;&b=9tNTR1t3rCgb5?sWLusHj>Vs-%&Ur02j_jyu$}!X+jhG>XzVn2e1T{*V@*vO6 zc8^@-98*G?uE^f)K`Y5+<|k+Ilhp&;HvLu(#pmszV)gPQpD|EyciQF~<05OrkpIf# z3ZZo^720r|t!g*dc4=C78}ElsOsRI94o~-nzwYd*LGw_{%vuy$fGyL+SjTFxu1)w| z-;_sBBun-a;mMYked;yMH(RE^#hfiserTo0g=aX6u?;6ajoc3=Uk!7Bx!k{jZZ{N) zmDlSLY|41bkay~D)%2rh!04G9ef5iY(~-W;&N%dKG<9}Qvo@Fb)!)<>#<%JM**mYX zKRTZDOf1HaHookAlD3SZ#N5!n_E143e8~A(TfWNQ*dr52V>~D+P5+D}oH`NkGMP3h zYvgQ}DreCbQY|me$6OmydMS_Gp{G{Tl_ztkVrC3PTuZ}4!KZ&ylL7NoIOoS#u+Z)jWm$^&xXWJbdkK7DPp z{oAVpz_-wOub2MPaIfvgwcIG#57Fk4=vKR^xeujvWegY7!jWlEB=w^7ytU^A*whXCIiM%=Bu)4%f{fwdjKD|oY((kIaF zOK9N*c))xZdiMzJT#aPA0h<3Qz2?aRo}S>`vLDx#l+*(mQwmR4Te+E*8yVt4-l%K$ z1wY$?ra$E#1HKo5&)ykIO~&HAiB#_1xHHJDk4R(uV9y0`UG6A7JR3PvO)qVRzu$%A z{g_g23%#}}aLHFfDtV05P!6Aa8L4N~1$gEh1SqZYv_xSP}7s}lqHOw z4)hul2yMgm)}fy*<)dmn?7*=-|ftA ze*~8GH@*t#`*V81wT3UrouXc2|J4EA8T|TZ%JCd@qj<~%tEcgN6c8E%_D$+*3q>Bm za;W!@D;4X2(vt~1W2gvCW+JtGP8!d<{g!s>h13_UJqzRwq!`XBhPq=vwCoG1$q_o6 z_nwom1PaOFt$S*pl3G);++823!}Qb^AZrA&4wUyD{c|BT zh zo0o%y(|EXg)f!ty{@uu5HTi3xEIqZ=i{;6SfTRY9wF`$CnlEMqsPPP^wAV?$mV86#mnurLCTA@f{w8Qwh9|Z3y>?<_C@unvPt#Y8 zXm&f%E0vMW8BD&|vKpLv_OUu>1(fP8;bDv-sZ&3kN_5T^pHPx>qNk2Pd} zpGgVlLw)1ITRpk-Xs)7FML=h4u0(hfb1NsN(8__7D<7Q3bvE`)XZ5A@`+A^yksehx zX|YZQ_+0g~cSq0@3wgVgTAXe7q@8InZ2X{KLVfKhJwch$n=6odvHhNM^}W*jQN5@$ zX`7SEUHHQ(-}Vh4ZEISs1%5TPXwh+;OapsrRnY>c_f~902HQ>p{-TDl52*xlp*c3gFVK3_j67sg(uA@@123# znnS>+w&VUwqxc%1(Urp;w8cF{k3mzFq*4m&J#rOzQdc+TPR~G7jwfxZYCxayO!=+P zinB|xXRjaRJB`ucc~(P!U>Pz-zrhQrXB|?k2-?$XTT1QYNt4f$JHXXCM%*I)eiX)? zBU*|x;+w1B#^-q==(5Hm+eizH85H*)oeLfwK{jc1f0{P*rj_~_Xg@OcT+}%A#kv~0 zutqJaZE3N3t&u^*(QSNfcOH`H2B6Dl%-stQ=>)Xhp~Sey%DMQ^&_i!coOsnKIXOAg zC*E}1#3?hzPr7mX)OOcSZC`Qi)Ke=$N%q8l)6c|fr=IrbNQlJ$-|8<&=#WLie@?^y zlA8W=O8qY_g;7$>D48^+_VzeNI$k@q6Jw-v1^UTjXc;$Mc2labB=uTeD!*B(qTOXT zroPMN0edpU+Im2RGW(xV`CU2*EXRwU&)iEc`LmY4dMf>iHIsaQ@cx2y9<3B)6OqR<&{{>1No^9IC#f5T!%DEZOlu(oR_L@ zo9foq(*D>gb#GU*bt_2y-`v!gg4FLjrh4U*wj|U~vHX9mG}W&t)sUBZwJ0^tny)-P z)xIEge;dnM*edmC=Tt=rAo2!8{GYtk4b4*N))dZzg429f-kjSb%~J242If+?o_5(! zA;2!8`6kdj2lKKj-G93eNazX@QiD3C?r)V^SdbcDY`<|orbTKaM2)WOcF z+lo@%N>X=}ND_1mX@8#IL=v6zQ+?W{4!1~Me;WDdtPjk;tt|DU&{Km;M0Ki}^m$`5 z{v2qL`Z%9X|NjH?4Ao`~)ydc2FmdwKzqxj5H-_oym*ro7?Nqc6{u1-+`iT=~Of9?N z9E?YK?ep7{+JD`120ruTd-Ic}Ies1P*?;L=@2;OVZG4||rqs^DI$P+2Ts{|a{WZyR Ga{eD?N7U&6 literal 428510 zcmcfKdDzbN`ZxO9JS5UmAxX62u2xa3C`48zSB9wcYgC%3WC)>wG)f{WNm59fG-z5) z)IyU6lvdMHM4A=NtKai_f34?v_Hpd}$3FJ4kH>x7x9j>|pU?R@&-Xcf=Brn!QssJO z+y3oQc5{cTUOxW1s%81DQC6?)^r1sfYq{T`!J{ga?NB^S!F-s1Q+piOz1M$LC>v2W zrums?4eLL&`N2(^U(&PrzD=5+ec{lHhMqp?v=VHBTFQR{skJ4ZCRQ z@ctLo98$KeRLwzWZu@jdg;Lp7Ln?NuaK?~I*9@uLdq|ZQr3$6WrHWS%srsLyXI?aF z@L4s7R6C&}F?+ThQoU4Gs&LJa8oh_qY*DIoLIqDR8M0lE9zDAKw|~W-(xDd(sddJ6 zrwrM?#R*k>KH!4W&pB&I?Nf%-X>rPs9a@zByY!ylwFvD{>Q*|Ubd+C2i=VPmKd{9DPZO1Rwiw@RscsfwSRBzXYx&vE|J(W5zEr!k zqZadjUB7v@LuqHfbxVIL)hq2({M0JdDeY3K?|D6ccPyUQ`(KfUAq2H`@Sm}|{5@`1>y!b>r$K2UrPs-+Lqdt4rUR5D;>^ejx2R$JKaisO2?LtD;;0zTRNfiU!@aE{Yz(- z&MKWs&sYfn$oqU>q-+!H3k z-BP->G`)0NX-4Vp(wx%V(!A0=rTL|MOAAW(l^!iUR$5egy!1rr$HX3trB6%iO6yCXl{S<%mOd|SDt%Gf zT>8HBLupHCYw5?*Po9Y;`k$}<-|MUHfAkeyMO&3=V4H8m5-P%A z+yAkX=seo0N^6~Is~UX`p|_@mwyM+GVTI13ts3-pAdP)TZ_(Di0(oHK6tO?C4zB^ejEditE$= zyL{kHmb{_VxHJ~hQ%j}NWT>|;eay-`v7#lV+5YdL&dIRu@3Z$wKECudD?PPzed%aA zJb~@JNjFzQeJS7A!dBasI-n4aHkM|2GGCo;Y`r79z6d8<(XHz5COq6wdX}aBBytQ; zW+E?Xt>$!X&1UoKm9JOYzjQgj8LjNU_){5wtIW$@5qlc*f_F%>n-Vokr?B#Spl~bi znJ1zT76sejzCKF7741}DSsQUb1dofA-5C!(NjeB76|`B8{k+90TSH{4_w$sit>h}L zHZL`TVJ{x?Bi_D4{RljFh0d*lPtAd6gmVN^GfH_MmbLYB?gVhSrZmfqMr)1 ze7h%;U|fdx8HJ>m({?*jA4X^G$y%W_hK+4OV>Ql?WI^3gYoxW6K5NDDZl$GT#j37w zt>?2(we=_ZsbBhCRQwvfm(V`=tQIJ(Ql}*?ucV7;?J_Z)ooY$Af4#39_qV`js2e+DH+X@~V&PKAAMkMW|oez0!&}kEO?g{ff?H?&^wHwi zUf1y11iZbg_9Xc9_Iw#{=*rU`=NX@(QI7U>Ei|E{qu5(@ZLUM*5IJy85wF7kV%)SA zS$2fsTJa}-yo|g<3S>T@r9`MYrPE})X{_Tf+8qM9-~9B3)O5UUQuYlJ9v};Ur|wFb zeGEV2FYiNEO(K3|(PW%4F8(whcmiX6{*S0CDmLL>1zj^0ma z>!ZDkKaEwtI#d^vXCOHqrM+gQhsZHc%?dOepR1;RZ?)?~VLk0nBSl5t+ET{q$m;%} zrTgi*Gs|hAbbN0W{;IIJKcVv-JU+r@OaDKGtG(co*gRJK&OSc_ug8k1bG0&5Oj*j) zMtk;_$dgD_OTL__2XnaRe=lt1HZd@W*Opbgl^#H4Ju0#D1+=*noPMVD<$ew;y)J5g zK<+PK`no4qk$eyBA4Cfi^-bRKq(19i!LxUR|7HAj9xPs_;fkf_y?+d~??j`KGR?{O ztV=`xq}hKcRY}xXD3X82%44nnjjNsEbq&28r$6-w%70@?O>urd3;wc*yEDE z=;5MA)RyeC=xY)wHY>jg#~;98TbAfhIuEZ~)YzS+?q1kWba$^jQb}z6S`-?Kax_~9 zG8L5WqwPD32z@0R8z)+RLp#ZZU7+v@ds>d?f#^*@BU&BrRT)H|P`nKXSI?KLkV+!dlr zTePYaHkM3UT~wc_PFt4M5D#tTnGGW7Y!>%5t4dZaqo-%sM0HPkp>;Nk?_JtQ{7S9u z@KdJN9CmjpZ)u~g);wY;ZEk?gPH=veuZ&i!A1Rjdib^PtW>qKi*mGglQ;ny{8LMB< zf*+%e=aj9W&AWK|Qqf=-lyC9mIT{)Z(bf2_S^76!v=Z-n7CKA5sZA?mNIOf9cpiR^ z!0|o!TZ`ukEPVnQ20*eO4p!-vv=lWbc(Vh#HSD8w2YTDE2#Fv--E1Z zd)ge%T4#}?99q|~g(hAlCw5~+6aCyts%bQIFg?x{(Oxd-57AQOy$bzAzR5EBFo-rQ ztbIDmoJ^Yw*yu)lw8YOA8dyR{?F%W(@YjJ}U-9|lcw4XbyKJd}2sWdz)%Lh5r-e6t zzPng=lX~akZ!B6*l5(jY=dPYD!0YbpAU&Q|=(W>kWwEU)4(fW}n%*jCy9Pv$^=hJe z$@;%4-;OkMwb`dYr8i4B0$1($(`<6ZVrpx%KD`f7yBnLT>fH)_wPPjeBR&elDUg2? zho_PLcf3C*>aB+BgDky`*XN?ww{)R$Jy^nNrIXn7LBwt5&{c*AiUk&x3FND|4r1)0c{}BHFpsO}w z-MM1a26bM=S!D>`CtLJG^F~je7w_xvg*FiST>ZW*_!qxDaJ>nFw-i0I2T1#ezUk*E zy-0gEs4)`#lSI6UC_Vy}CV1LIyc(nJzEGG{V7^Anb=YZp&qj(Xmx|@PlBk`C)09MY z@G#nEAB!Q?$XE`upXu^&NWV@4hpW?BygP?feU;rn`xl9lU-S0e*~m}yTMJKZ^=R&r zEx(l`rs8}lNq5pKx&z%rg1LG!vte;1{>I{e1&;p87LJf_PvpHdV3YWJdl60ML$A%#0%Av51x5m4A%N-Tj{l`USI}pFU*xXoF@tjP4y7)F-#_21nZIPMh zDtkCBKdbJ1nPmVRTJgmvp*x)v$iCW4MtW9%Wx;*$ zn7M?8I656xXG8OBrO)+zGJVyD)2%FRAW7=uWjH<-Yd!tal{g=XZvP^#Z?Bc%V)V`Q z{Rqxi75%rTd_ETLWpq`ax7TBh?Re5ge5}+D8xQYJqVfl{HeME6V068JHw>cj!}v@M zRzE=twa|_RHlpy2lDm8BU2T8R^7q**C`cftMD(C(+L0lX|Z zYAYmX(!@9!{$f1N@%ywO4i!SxFf1Et@Hf}pB*{|qlb;Efxn#*XW9NHUDk0wUo zI=Q=_HZzi(A}$Wb!&0)P$G#^&O^@O!(pTg|?|A7Vx_UB@gv<0jcc9r`q&^gW-Jn#LKGOf_=>Ns! z$$Zmn{;^Pv1I5yZwe^se(vztUwVB#EQ19R)e&3yKOhn;Bv?j>kM{Bzpy^W`XRro%Y zhKCn^HyCd#N&W>4Gr~=eY@Tt>GF0cXt@rR$38!DPlS!z@Yv$5N3)+v?mW$&zDt$Hn z>UsL9_;$Vso}N)FNFJ>9?mQv6dx{AAwb93Ptv7erq&lYbgYhwnk4k6n< zJmq-N;aof)MB-0m($n}v$?wfRY0e(DLj4BlG^No;V7gokz6x(wmj=t|wY9xQ96nSO z>JG0{^eB!IX)}*H1M*Epk&j`uln)<*r>oiMulRTtx1W-6cicQ7Ue)3o`=ZkhM#J#X zSsv(v{lr!3W8~tjc;Js|+Vptr5 z<_;u152wBP>??427o}ma_!k)mk?{kyyRobuv@wfM-r(KIP`;adCzIerulA+kW7+e~ z(EJkJQ~Abs=pQH#I(!mD-Wve%byOJPw4=6n0aMZQP=zRw#T* z3)kWN4ipa7{#{y_gYHgvtHZ|Ypw^pjoyd;|^W!~epgQhOCd+G(A4tb*&>8625KmuZ z73;Lp8CreO`-Ki`cs3A%-_h=kq3il(ex#x9IB_0d8jM2kx_R!{oPMX9rP!z z^W-NHb_djFkSE@LN0GyBr?<@8pLLR0PB z#m_UdI+aw9qqxf7d9cuM(emIUF^hpEHE8*ujcJU_5Nxw0pkNu#XUjM$h z?Z$S_f&F3XXH+%2$j8Uws{=HSL~jmx_n?cuxa-Zz*3#uTx|qs)8i-rzk5(edTG9=L zemOte6K~7WF7y6P_L+ILUBrP|-oM9Af6~r=P}w$4y;J}1acz{zJQ+7k)U&yZ?ry|o zRdMYQebKk@aVUz1@tqZT`!BKkH!bgh=Zx5@`@8{9oWrgn#{x0p?IMoP#P2jx&-o9> z-{K{+$H^pF^U4hJEk$nnK>SGGz5;wQ3v-SP)YFr8d||QwH{-4n&wN}nxjS=SUqN;Q?iv*RiBY_& zKc3RhImiFW!iUk+O|Z+XRr*Ymz4}!ISuGc*r_vt!Ti}xHoJ@WP4ZV+IeX%H(xGPTT zu(8fGF#>n(y}s4yvI+t}Y5Ob4Od{c8RX2&a|Mx=mb&AKc6;FNMxma{30?f2bcZ+PqXF zqp4POo|($))OtxQ?1|UcS=!HPe}bFe^c;5QnaAsaJj`PLs=YRH*!`mPAepxTlsbEt z6^Vtod!E)V!Q%_`)e+Y9;G4MGP`;ZfBUNCT?~|{u9JddSIuw1XjIb7IbANO4d!n!e z!aDP6Sd7vXmyQT2!%u$U(=N^%| zJ&8xN$D7eRjW?Vw>)j0NP5k_B9x>QwnHQ@hqup&3TE+W``muW}8Bace4Lrh@DrjLQ zZ)pX|%=iu}qThV-&(%hv?l&-L3fYOuomu3d^hXEav=vXu8djOIt&NBhZLd@&^YpzP2R!+UNJ6l)n9OxVe#xmDy(Ixsv@_%2)r}vX57moM%?^ZUB;&=ZY_6d={R(^SA>RG~f%P~jQ(`Z% zWiK}T5{X9P<84tmWB4*2RH=w$v$a!Gy=#=3#<~_4arhjTvVlLo41xFg%St{xUj*2q zW=7A+D@SWTdFfGj^kLici~h$RG@ALkmZTgghLn3Xil6KyJ|;T0LwA~2S;3tl0?(z_ z8Kjw5MCj(^yvVak#+D_mPJ-u3+Sx&=J}i8Rdh0x0ByMI+Gb^my6&bCZC*2IUwuOBr zQ*IW8x)dYdMZ7L+c9*lxd34lHseiMYu`H<*KiI-UvO4xl(c<%{H->mcR&|$%lRidU z?Pg^%QU|>O%C95+JUV<^zaaB=hmasE+sB~UN=)914yzS$bP}zW#DwN>X<|I{IqGlv zq=$GA-&<5fnMET0>#|oR^;U{M)ku(Bv?tE8i)bLa2g7OGD&t7DaE#VFY3B+aHkkb! zj(Q^zCt0xuDKkR*otBP*^-_B1LZ6k{ZcXj~!j^0D=H;;O;qO3pv==M-RO^WW%e1tf zZM;pYjO`BbY>P--5&G$6ZYum~Dal)k(oc(lo7j5Rg==YT0*SL$Iu2LOihf?^`SM$)Y zV@ufwun^`MxwYX1myzUJ6w;r)!KXXJzA+sS7NajF^DO`O;R(;PqHoM7o}|vBTA0Zy zJ}y4%B`TJ*xJK#wwAEjmU3{K#>>9HE6)Neq&d^I8;CXYg;&DHJ$NdT7(nfhIt6VF1 z-u`T2lV@MxqP5yL(d`1CB)*T)+xr3U^VnB!Khf$hKFw^>PGaREE&WA1nG0y6%z1@| zr;_Cnc082!x3H;Q@i3UI2lCK)a6FkcZuE3lKKDeikKu7xS6~C@&{}pK^e6H0Q2m16 zZy;w!vFjO_Ji-!MsFT^bC-9mme<{kpLHAMmN6!|0=U>F} z1|)7w>zDDy&y}db0!u~qt}df*6mu7${gIx?;o`tPC@+BO8k+78;V(%#O8+Qx(#gd4 zqx@Ie{EK&6aJe4ujY(J!7Mbgx4$T^D_9#z2gZ!ED`oEz3m1i&be;|1_k!BLverC&W zu$CsgIX?e~ICeQ(Je-#gfmlW!$3Z9d+NPNK7$6$PKKuLorvAhJ;^Qi17wP*>(^kfl z$r8nzjF+m=+MPzsx03H(t!2fd zTyllXvThhPxK}8N@a+pSFx9GMkw7uiel~>~Bx6uZjkD;<3A)!8H8+%-34;j;%gj zi9%NQ`zhZ6m5d#4(DTfk^%ZJeAp&O)KqXwIM^Gl0CaPB|BIY69XD7uAP!3j|X}*>x>f!f#pJg>=vT?v0aIOuXW&ElWJJ?@8w<`@y(neM#v)Vep@V*^s z`*ga_tiWGIuI{|3G7fv7mi|P2?XUOqsmQz0CygOG94_6I|6bktg%76hl6AD?$@R3H z6|f58%VRj|jDxHU)H6D&tnPd{bQrtLI%#Y(yZf@MqpL`Kq~61cFsaW1dqMVC@^`{> zBe7}Qcz>a2^as1Tim$anFKg;Ykn#b@jTcMzgwtrqjS~slDfgDT*?*ZCp;|mHW2D_! zYGo0As`xgJzQ;i0NfJ!+tTry!$w>Qp@=x0Ohzuv{@4N@uuHw(7;#6i$GpDm2waGqh zhttLM(Nyg2?x!m2$X>L3elA&`q1zGke-5f|qy9cUKFu~xV}Z>fbDwDW2&~8Jhy7LA zGyFVF2a{-Q4eAH$oBqyDK7!p}XurL-r+9Y&o2;$1eY~$qtM%p0eOTRoI8D#sMA&9e z;R=Xs5tmn!`#%2sfwr@=sxd8mFSb7;n`}^KIh-!^v!52W${Sbe5p2QNVwfd*O=c`c_^yP`4WxJ- zA0KKfGZ#_dY4R8zH6O*J#e(iI9Kq_U(tCD}B;$R^E@trXmb6n1twjHI zMU0%HAC{GhJe#6i#?hVVx{L=UdYnk!J(U8e6}7x%;@tKG?pq?g>__yO?Kn$$riJ3Ff&0}D{sPzcUA6d+G@aFmWl=^@z=}< zW@cd-su`m#;=3!=oyB+ilcpiwV);Gvc{)ROC7U>u4RxW3=^|40V7~10>>k_+%#8!0A& zenTI9Adz*~wK75G>dHvE0#-j1)Q)3oJ+-)W0^ELN@#ye6ZW6~K%MPDS-vzLc@p zAQWS%;rB(Bcp9t{ud|yt~6R`U371umC^cm>FH#pHoK6cg-cN#!R9hMFt}*zFny)u zlx7909kp@>39|oY5y>v*UCB`=6l3?h*+3(assm0BF6<&Zd%wruMtV+fqKvK@kS*p}UG~O_j`^^tD<`p3MBmU35~nu(BXu1hH;$VsIbWzf8+>ypN{_i_1xvUPdEN2eZkHwX2DNE%?UE zv=`hvqxvRW?!@nRLpd{}LyOUFU(u(9)-vv_%5v72mzvL#vpO=KO~!w-(pm-jnI%XR z+A7LS6EDxj!~JyqvY2(e(+tt=#bn%93)z$KJGnD^avSSfDNhU+JDw6LRTZ z{>k&8_dPlOMB*>#YAO_IF|kF?DOFw>fQQgud~CcP+1M_^ltwS z%{TDf%xv8nXkN*79wW&QV(<`pjNh)(-sf`7Ykcw}emd0aAL%Hwb4RO{Jv1Xp(;g>p z@$8N4p@L`G?Y17*KX~^(j%JXuUD4b6ndZ~m9fRMwI+?pK)`(R{NrrRnw=Tvm4$Hg79Ue z-xb9j;rBH((?i)w+>VakSK=s9GOs)6p>2iCI=*tTQlsf^HEBLWE&F9(<9~ZW@KCtl zh=bq#|Df7)wX?IjS${f;_YCLl*-5tp{ho%epZUl?VDtxEzC&#m6b7OF6P+F8_fR#S zLGcZ}@4X>f1O7GH$+N8QRXOb^n)+RPduXQ)9W2x{I21l_(%(jOW{4^S=TpfeqpKa%Q6bw5&W8jJb@XD`a(EA__igjZd%Z`<{Cw)Reh z)p&f}%VGDEc6n!yUq#ui44q5ikUUV8M^2TEzCthSR+Djb0M4=(airMuC)hoR zM&}Gx-w|yoWLNyly#~hC}n0Zd*ikv^Ue5Zjmzv=O{^_r7n9j$ zMq}xf|0YJgpj2}7P;z{VmnVu0|E9PZ%raW&!fvPFz7&@=A&Ga>AFSAACCs_JAboUWM zs^fV+o1Tqgnds3N)yxjoqm7a5I(tWYX?G3Uomj+oo}@>S)!O-Z>`l@mNYj#TGUi+h zhaOOCMuQpQKPTQCjqAtpkg-@j&nBUf5zPVcs9I#HE5x<=V)_SK86c)*=jI-0XY_iF z*0Q4^Bkn4^@&MM7J+L2=VS<*Q!SQKg$t?dzr;CdmP$q*+gx^Y~hVY+m1v0Dg6NIy} zm7Pr+MB-T_&Hj}h>?`}UvKun;WmPZx=zA2|?`lu7SLjdZ)DnL-YO4jkzr#D`i^)sy zGy}JLh{-qTZM25uxvcPoLNm#t6UkIf8w1c=kEiSs+l{YhH%idRs3$W4?F*}YhgBWQ zQ>NgcI{sVx{H`Ku4CVd1sI!e?qFGHiK2gwki4HQmotU4w-;Y^Bb|>W=LUu&ggj;$; z-AJ{HpE*)g$Qgtc?C&kH{w?p~i(S~?dxezQVg3d!e4*WHC@m5h z+QIobB^%)F33@)?&wPkIOaII0@2~XrvQhQR+BsFp>_N>MMn*{&;py+jpv&+(2Z|$c zaieUsl}rbaz9KAk)6RS1LVIy&2Ah3>CI;}>vwSvOY##ydo5jR|Uj3r2GBIJTe7GlT ztBId|)tiLo{WSg#?Pq7=M|gS6yC$@jlQ$>hd=CEmu@}w^M zi8b{_C*iT0*4`=N`Q8xzUYTp;tw}P;AAIlREGQMo}3^; zf6ERUv$;IYYQq>llB{_%e^`Lu4&EJ1r_+q~GOljOV|F2X&LL*^&}Xolg!>=yy8)f- zPdwL?O6q@0`^%u+5wb^%80VmWCfYCIH(CA+mRp6UUsrlhXkIE_OrW*rpq?EoE9KIL zaNLYeb_MN^%imdJIdsPG*`efm0N3~8V4{7fV@n)Gk zdJ;aC(pwi2PF5x(qBUv`SLz9|@j*Y&c=aA`e_@C1@%BDDI-d@ohsbs81|=jeOezS-@R z%&gZ*-d7{d724oj|I_ zaQI9sPvPl)KC=Pcm(W@$gDsQo2YMfwTHyO!NMygxF(S!5ygt!sKGgeq_8saSM2ByS zSUrix4#ekZ94;<;-qYwTdmO&S<4Tcf0O_Z)>T&9x3YmXvp+5V}tn5|lRQ2gZ7Sn?! z6CulrzhlLq74*}CjOkBx(oV*o*{S%cSTPFE*>$~_n(sq1yY(JWrWT49vCln8k*L_3 zHJpLpME}lsDix(G7SS{_^y!V)lQo91$vZ@#tgF}7Ugm5QaVzn~obk#oycz8AZ2s31 zM+11k*aDf}rAjjTINH2e{K!eZoT3^_>g~zdL5#{=z!6Ztg9Ob z?Ym-ru0N@&kiP;0O29j)aGEdEV)yNWJvfy*~qy$6QDrVQ=v@v_SEx}^P!CkOeh%X0_lf8Ip0 zoCI8qZq`V{WzLE(fKa?Hv-oGiX0n=<j1^fIUZOp$#@v(MYT&e_TAFrEyHKI#qN50!XHOTQVhU(Uxe(>9efIm4E9 z;?Mk_xx;?6(M7DUKzHN#ch;SA>NV%yE9rSvRCYIrWyXGQSs^nAJL9IRr>*dxvri4! zeI4b_(cWG~&!QE)7pv7ng#21d_1H`U_?7E3zocecs8nEexf!6VlDnbZla5Eg>W!km zpOZK1_*m8p8?n5pVn=49>*+sc{j)l6xrqE3b1%kw_86~YcbmySn3i(BvbQIz=stSQ zTuk=M-{RfNw3qea%r|%AiDkHLPr~f~EyrnkPIa}noafiaK?hML`^Ia_7SAhjAemO+ zxSX$Mg=vvb!^twdW=&@iP33Izp%9)Asrq7Y6EV3VTdpn^HTUEm(I_MA-gsS7M3Cqv z@#oL#rss4XZGMCErnIvUq?gMMOA9S;n;Yn^^hqQ<1(%n|cb~AbBYFEy`YJPM{2{u^ zNa1V)55mVYgVN%)#6DwY9+4jqOC_!I7NoK3#UgY*$$ph zi?KiB;U+dQ!~YNC_$PMI91qDcSxK7-gY>x<((+(jer+c6JlgBcqxSahStTyR!Pn|e zfk9ug+(W)aeCc$U?L`0E(NeV{{w|XnE=B2VpZ8O*idF`z_ZfXQ_Q^UZ?B=tR*v)6+ zXnGT^@OLQgb|qcbVfP~aT>4lCiL3=rk@vEf=u1(v8{U4UvA%lL?5H{ ze_j;Bj-ugb^)rs4>j`j~MK67|zkzL^LZ<8;JqNw5B)wGZI#I03Z2U<|e!^z%5EF;{ zq@n1y7EX8K>@-|o3#0q=vd;G8T8M22iHvC1d-jYtvA-yBf+vyVk3!p@kn3uid`ZOk z6^?&rN7vB6Q2%FV>X&-=7opr6UmYQ^9UDIr9uWt|WV`r?-u0oykwLb2W3h{}#g< z^497&I*oj9&}eq;pM~c`<-IFKr%!2mHx!>MqEyaR4Af$Zt$d!*)dX=cvu)kPW>M&_9jJ(70Qo3G_dp5N8Nc)C1YmdLE|M%-j){&4=A72@pR zoQrxn>m63`@UG80vESf%5`E3a-E#HcWrr;bO!gwdkMxvzj>%%oToHO3_GQL+i?xw4 zaZddN;}y`&8q;Ra>cBd^tea>cr!z8hnbq-(;wGY0S#}zO^X#n2j;^xe|1ueJJ`6G@ zpRIPz(%eQ9d6wNd+57S>gwq2&)RRj+dx|w)3BRk0OrEo!xmV>ZJZ0^8yynSV2p-92?}Ysx;^_v246Ojiu~Rbz&#|Nt6|clOdOt=kx`m*;*`OSJG#{c6KagH`*nn z%#Mkr(721$j6|uSSe@C7Ix`W07xGT^`UZSW7oE0O`&1a8DLUs) zot!i2sRH{r4kkN#MWSVZc5#lIJ{HAac=Y5Op-Z4K%_lW!GF_s9Pm+U#!b z^G$rdgR__QJtx3(E6Tqp*A7Rc#fR)F{zYWF->2`$b<2veL0@*0-QatYY&6X`5U)PK z!>g?FMOb|3`6+7suEa+F4`oXey&gof3(1hvaXCNHAB}&Aj$bIXpU=)i_hG14*ZM9v z=>mmrbaN1k>&Cwp`1?A`9Egw4dEeFa_iq^7PsVpVpHjrI?0I<-@;N|`D= zQ0_a0#C_zUFZsbBap`E#>x`XCRjGPj+=3Mk~pO zr>L1#r7jTJ9`9XwTl%c$`J2zPU%#1JnLp01mEOhE=Xl@@&#JM{R;1{mUe4NNRFV5L zZWW(0vp5Pne7)^KPDX(|>M;T1INg`}r()sbw8IJNeqN&+G-w zY~?20Wwv+_J)O(q5?AU&E&FS;PwOeNKUr!mqnG)~T48qQB@<88c1BKRBsc`NlOfwe zHe4ri-pszLv6N)27D~NKgPD)b2H#H%(guqvjaH@WTcrnpya>X`P}z3)SX=|qqod#W;Ho;p*s|oRRM+cd?(c3PtFvHlbw$w-HD`G%8s7W)B27C1H8*U8f`@S+-@*N{rEsVzqw;< zt54sAcUP9xQDpht(}9rf&JOF>7i@w_y z8vQrh=}VR!e4cYI>sfPFN8Z(P12!_+{|%wrfgRPN&okNooWfHsAmcc8vrxRR3&)vs zxwn>XWG{6fcP0Bcm<>#l!QP;ov1&gq|DFc%tQuTR#!vA(k4`(Xk>MjI(a4GHZVriW#_xee2HjJOFX5n4k)@`vomp_KEe_QydMerB zd9qr2$j$eYZ!XD_$3KU1ZnAkF|Chlncgh6Y-SH8N--R{zK)DYaiof0n-#NwnRxO_% zto}3fW)BFQ~URY@{O z`-$RzAkxUqn=+k5>T_Wljb@MCw`8q{w^_8FwSIS@XHP`OCKij_Fsig>yhUY zu_LS6S?h@AbI)lLRzFb`ec5j^VPkfjb1s>$sVExc#=9Y+Nug9_M?CE_Uzk{Kcn>=Q-zHv$%K(6Mju%2k1`Ldic z{;9Cyqsfzz>aW`BhL>PkLECNFOUCE-qc(spH{kE_B0eXdokrUmNq84a`UL7(*O^7C z_)>OcWM;du(Lq+PvszzHhr4LKtY~w9He%@qvdsL=F0h=!&0O;`w$Yc5cM}ydD|HxI zT8bT+HJX5@NuphLz?|&K-t08DkqwYXvKKNt2o8kFUh>c5yr;ds*Cu#80q4k>I}x&@ zAg6>z^Mi|Nc%nRzeP|hX%wT&t37xsnoZd{ocV~V4QF2;GQeMjDP9@p>vVA>LzJ;5s zWr}_M9mVS&6qha|%b~dcjMZhWBl}l6;^rf?-_qJa_!p*Wn?!*1E827HA*~a4c#pr#=PL4*u zA1&Wc+nIg6&xq?6uM(Z#Bl)TLEvL2PNO+38^_@}T_x@&Q|Jy~(m`7WSS!r%h%#Pu5 z@6uWyvajQfdugQyIbPAmBc#qbpuOn1vg~&}FWAp#6Y$$q*#`W-Gq25B-cF>iLFUX= zv=N7PWxuDglfCIHrvN6x?m=y&r_rAVPb1MtdcMG%_vK#AW>I^=`DyZA1eaO#&=Kdq z(@{p_zlu`3dvz)V){2Xj$#8|5wej~ZzrE6v>%^t(&&|wcb{hTevu|OYeU9UmS&4)F zdFo77vEk%NiABI9RBgDH%kb4a3@Q2G` zkrl7pe~|HO#t1nlF^o(-X+C+N860zGT|4h{Mx+96XY!up=Pz*3UXAoIvg0bX6QAeO zR2>oSR`in_a;HPi{(UR9y@ZGKptB>S6AL(mEZMz~8QuE4`YE64>M0+Do$}HculNh;?=5tYGswTD}0)fJg0MWr|HFbIh{0_T}{5v{@O|6-~gYdM_Yj` z+s>H|W#hB7lp8fi!}dFP{DPAm*mp*QTV%e>8RzD?>|qRzxoGFcot*DUUn}=-#@n(> zwz&wI{Zq*h$z_w#u0ih~lcg0~X@|3_;%IKr$jCHV{%oF`Gm9Cg)==(nbhF>)4iw7q zv=R5YZz21OGgtl^PM$4V&grNhNPUButm!g(BhpM3Ny>Rf>??U9b$jr}aiVi_OwqNn>@{QTF!y!Dm;=MZJAKfW{i~@?qXRL6Y%e z^h0#g2Rgs#p*@M-K4QmKyd5A4WS#XmC6{Pzk_daBmS3c;PiS$rr`cbY{jOi~y&Am! z5fL*t5oISs_Tjb@OY3T*J_$3UI-ETWl?OM9)R}wx8|~ktjYc@me*fz{U7+<1>|lRB znsfOFd%gm$)o}2RD7{pBedOJoQ@x7z&(cbEL|w@DhbVa_o&D_D7MQ+8hQ;bW&dV}3 z7^P%?+PlEG>ItQDCa0xW*+qQ4PcydfPIF7pN?$a4u>OhL?og|#t(<^*gy)WCx#?Hl zj8^VA>Z!~rUah8)+;)EsIeUxLmul%ZcJ;GbkBb4dl$^_x_R{jcco@nq*LZ&+Ej{n^ zgO&UR>TC6YuJybhsrr?G%W{E)S-G8kOp)!qC*tF1ZTKc1f6Bjwqw;$wFI8Oz9?!nwTVC-pB-FKc_l zls*hk*^AH{S8Lf!Rx_utft&*CfTx~pmqPg=&mY?`r(6=XW_FyH&i{N67}pq1jn7PJ7vJc!-~@KW#)DvA{JL}FS+m_A7F zL;auKXye%WO(IcFcYet~H=#YA|82V=cRa2qi;bD>JPNJs%e%$9J&PFAQUu5z=I=b8 zD^k3ot*q})FLLr^w){_AWoF_tQ95_ur*sdpQHy*uC{XP7hq0H%!Oh()QuVkac z$<>}l(l?&W^7bG>W>W_hk#QXEQ+E#(v%)i&4d$%Qi|l+ATb!;BTCT^CGep_3G`1LL zCs)qr$9?fK2*+7beS?j4V;lWt<$s}>xy1Bs4&Vuyuf2)}7ULtQnpd)iuI%hXQQ$-+ zw&k-5TF9B%tF%~|ma}i^GM{JGeOp$Gr{;u8Zgk8zZCCa>4L0}DREcgz<7NcD4)S^# zDQarr7!fQxuXEzQyRtX=+eDODgNxK1AWP@eXKuF|tkh+=T?V1dL(YcqZ_3q0b13x6 z*joB&mApSxZ0-i{kL8{04S5QFy-+?4g{=Q(PuI3lR&ENM0hdPVj+PCx*S5N7zoU5h z2$`Fae-J!Y_$2$xk40q{5w#qO*(-Dz>C(bD+_h%!l_37Ur;is=G(DqbJaGeWe*uNe zHs}2QaNHL+cgg6>dCc8#%gtqPsh_*|GuF-8b?%X#t5)s`N?6*+<^J$|PV`B}hzI^nz3h|9jhdCTd#+w(Rj>D9ft%Hzss2^; zaif+S>eI}VFD9|G2Sl&lEa!DNW{3PvO590`FWFXQ@>is@#F%ZXm)U)idCXJL%qqg8 zY~pnMwujj5V#)}-%wo$Yk!d=Oj-sc#`1&KFZRYKU(ZrRwf7JiUWEZfZlgN}=$2DSW zcCF`L-|=*ly(d|hY@$}ihu0UezM&{QMr6E}XJnt|Qr3DcTgqB;?!C(j+_^sg-n%Ze zd<4olIX@7l`xP1LNAf<%vzqv1xiS=D(dzOWq!zYtHzb4fWcz8gY*~J@M zzoO`WFBJ{4JAQB8utJaeVaRRr?hzCgvXC>$J`2s`WQEs!Ho{LUzL}ZWGF09%^2#?9 z98aFXEd3f>rCzC^-NMK&16b0-=7Whe4ne4oTRcJ%r_dYXfuN9e6K{u9d{=P|F*ddBlBX<`Tp zZ z-pnTt;xGNH+)%g+>y3VM7vY_%o!B?*$e~m+DI9%_8>Sj9poqn?d z*@M6B4}~B3>a9HfSpHI-rt)ca_jG_{b~z??o+rv=cJl$T`aXP5;(e>=FlW#w^Q5fn zRO2P_(9Bk*x04efGwEQKJlo5s=_x-Aqkd{<^=EIKj>2Jb(C@ICg3`_qufQLNk#a1) zUS~a7cge~7%ogp3O70Pz#+I{xCbyIg;y>MZZ5@9zA-kzp>|8`WkatLcFfTe$K&B z`Z+--eYcAI{e_~(REfuDmul{N%ejE;2hVM_$v@czbOwDqBI#Rtd717J>NR(uRU`KFfS#~<{f zcf<3Exad;YbapG}tlvx)IaBMIKdR5iZpK43R43r9omUg+S17xEGP<)sCxufw>n$79lr&vQDyl2-!=AtTze!1Of ze&H>dtI93?+2`?qSpExt{X5^fLe~3Sd^%f1X;1p)_+3aloAGh0{_g8gn1j|D@@5y( zZ4h`xY&waQhj=mtk~vdZt+1{e;c%+xcdr)K7oS~b)Oem&vVOM;*4dRmM9g^Dv&@d< zo`o~W*;6!oh}Hc8k&MV6!R4;}=@(q(#OUV`y;I)LskQsHGDCX@!Kg8+Gf}u5YE!+s zLln+j(up#}i?lPEocloN1jw8Rt^eYymlnOQz9h*hxInssvAFOL5e-ek)6hka4?b8HYd+wR@ITTxmhpYl$QG#FQkiqlKpu4xL917h~M~7 zEAi?(pEn@gVn`k1lbk<#0oR9!&9BjA#tpgUU<+L4lJ8U)C(9m5^4y@`(BCB@>Lxj7 z2-~{Hv%|%^>}z z`QM%%1IhK;8LD1q5j@{-kyAUTiR+tCOuwQR{x8DwW*&AeZg;}PnJ9HoGEwO$J&x!k znK?S_!2VY8-Ry46uArkye5nITTK(n?0@?LEs3Tm>9Hrsl2mxt|uxSz>nS^dS4) z@?4o3-}1LFe3I`-sixJFSyN`ta)U&A=ULsH>e*ruEcda^_xDcYxa=9}N0ZqPmDAv> zMdHlTP7;;p!L^JZWqs_w*lq5iNc_%To$T*A5<*k(SIM*LGDihk{g{R4Y}Ku5mE-MC z>StF`X6^I6H|w-9SDkVC0+}^j%=TL2CO6J@^E&%Fa-Z+*TF!k3_xgVyF)ZK6wCyD4 zU+6t6yi-V&omDTwEO(P-wfX#lv+H3smJYMqcs);R%`S3(WM(?PAw~Ah)`CmsPs&O3 z3XXR3ZWF!U0FOP2RgkIV$SnBnc&o`5*0ZVf?t0PCG$Wnb_;2JV=gF=zc3q%+SADxx zyf>%Natn1I5&KkaEF#5yIK7pH&Ktkc|U~|IoDJd zin&p6Hyq~NZdORM2K1gtl^eeH_U>J_nmc21W^Hd?f1nn|@awE-=5CHw1s6Fd{yt7~ zJ~{UoXKa&i7V0E2Tvi~pQEVH_LbCTcpY-!dBeil***^CVi}(|nbN|R)blr|!W%g_)D!FAccQby?60)m`RmgGO~>`ULsJ7#z`Oaz`s-*d^ah=ddHm!45_Qoq7))28qyE5RoF|F+j=EiISR)fjg20q*nNEp^e(+3084;VtBQ zhfSncpEa%*_0yjhkz1hBl*iS@mgJ&CMb<^y&K}LJ-qj<`5!x65&&B90L3e~mxxKix z5A62ERqmB~j@_&lwX=_5oOtt@h`gt^{(#{rxL&}5Yd~YO&sN|l`<`=i!Iwt++hzcI z^YoVLT|>5qi!AUM3h#>bxhuFnJ6*3%_BdS5XKUi?TROGbgv%~Fa*nK5p_r}WtH5wLeUxV}IZ2bb5w`KQT zaF88ajd|WxbX`jp{wqY+Kr1_OJ8SVM@nk1Ywu{Bl{lR!S4^H!OFh<^dMPK`T`h3do z=g>Rcv!!Y^z{80+-4_wJ) zf40z4X2NpE!EhSL?$Y_JFjlpT7TzHFQ)*{#)$8o)EYa*|oPSdE19MJyHeFt*^d?C5 zm1FZgeS0c-DX#PVyDQo43ViLaU$BsUyvu^SYcJ$y34Aj%8Xy_W@CgpXQ0=Ms_NQ;6leIWD20);?=WXZx|d z>|1ThLMLi-0pEX8^c}@2(~tcJTn6dArOjQ$me18VNzDCHjVWZ#x9+XySM|x0+53li zXl7`477ug6wYt(HMbHJJ{t2`@gEnr1NKP&Gfo^hpzDwi<@pvEfa#M3oYn(t6+3T5n zzl4snzOfPII#9YD?g!vC-)mEmmUEx{TxjR}N^>VszQ-XuP5q^o?xUHEQ_`VRQcO(U5H9Rh)8cy?z|Fn62e6n55pGbSI5m(b5m3OUQ3 zd63*c@)0Y_JVI06FbdcGak?YzH=w(;UzKLE)|&O)%=u)E>3jZ@d#TUCP3HQyBS*%0 z>G57s0^DURs|8_5fLi; zsd8qZtdOvtrz^b9w*cpRV{%e$sGlQA+nq|7Y)F6WH{I(Z#I4s~WjIAv@`EZZ&%jv%|WXvPUc14UL?l_}aV0 zY$K=mGwZV}Tbk_u+(tBuzva%cA0dA+33gX&iinx>8$anaO=mNW3rr^S#$C03y7sd| zSdm>lEOU0IsT0MqoCf-?h?$wyUoSG0k);RRk7vux@sWA1YAEFHy3Wu$461YRklP#b z&1Cl(-DFO(t{7E`UP`pFvzfag-Zc|l*NL&`$-LQJ`ia=|1Y|e(e5kl}FAEt0sodY5 zn>WrwKc^WsQnh|1&24P+blER+CHLwHx<(`Oa~yyaf^ zf05}&z1p8xRru~GSAPqyoJ;=(j$erfhlmra$niWIc!JH{gWd)7K29qWNYYGStxN`9 z48`P_&1mGN^goNfC$W>`yo=4|8=ih7ZEhLtE%w&&tflPR5WX+SXt~YuZhsSndXgji zDi-0eB9sP^aEcj`MZDut`fuymGKfFJ%Cgh0K3i!6+v8ZsG*RtVc)g9&yJ0&6^0DhR ze54t~rowlU7;ql>SE2YbDYBpRaV@@}m0CW37OHnphZiq+?vAP!V3z4vSA_-D00 z5h0ps@piv2pq}$4x6{onY`U4cwODXsC##^pc^FqIa z$lgL*1NBYvy&m6?WJkI1b#}5?uiy|_VLct>7WZgv;D zvYwD{&HG8Y0XV)AwUK1b>fjEnX$gF?7ccku=TyiIFq}(QhvL2-8bRqeaj(7SGw44% ze9FlE9I10>+Kcq_k(zToI}#=@vbS1d)%I-sCO+2?pP4y21FnnIe~wIBWs5P|xSoei zFM5_2;QckdtL!a*hF=^+ZwHD;fAi#;qC{g_&8hqwSzir0Zc0-N+0)}Rb0)hR@97zE zna92}+TC3%H?yt@xOfcz2m5~>8g0pcqnghukr}MxwR;+z_9NG>?BEG@bRN5ZS8P~a z#FiK6rLPE|Z<4J5k(~-p%BtkQXeD&|qqLA+brwa+#e_RyHC_Gf$aeuwH^Z`rPrrgk zc9M+$k58Y%zLPQA(b(VVX$<|IO*ixT)#cDBQ}Szi>dYR_!)J1SAJ#FG)Q^Z-*%LJf zcloy3Q)uf}eVW{yzZKv6!#(?UhdQYWK7vuK# z*>hfBLi)9&nPWUMgpIBs$2D5b>{-s{XFtJ?tms~L+=jlZ^Q=WIDc`u3Z;qUgmzJ>1 z3SjoyW-rQUoF`|O6*)8Cubw-L#%n3L;Skh&@|PyKucyphvS&>_->H(BsTpVw!%0@Z z4u|lsdL0#6K+Z*9uZ0d?&t`r3hKby)ne%QrIbItt^$MGAh0iV|yvw_MPs4rm`5pus zz##XL_C>$E;JGbp$@hW1&xW#ImAw)bP~2ktn_V=yH7GsL8lLB-lFYZ{-mGo+kw1vX zj-<+|r0m1!r+jvu_2D6xX(4lX`DU^^;C~3KO^-7>RWjF?efgOyE5m8d&zJbvgWhc~ zR}aH~R&<*A$*l+vh?iaQpA&bx&`h#^W=-=gAUS!M`%E&+oBjAXad{LZ9zrYQj*RcK zH#RF+`|*jK-aCo645GDRP|2RVLF#6IMTuF?dhQ}SJB_t5Ae{GokODz=k-NOfrA9A(pAxl@+Nsm6?-KV~PBSVA{;ojauRy|O(> zl741e++^q10JMG}%TT=LJbTbzTiC%`p7te9v+EBeSXBUwnEi)HiyX*~ccR z=451UUd*oAmMm~4-iKpm7-!-vE0~Q?$sNYM;hG!O?jcF;@;D5K3;B5ZxmOgu z<%~FTE;Dm;nbWv}o--;fL+v;%Tttd8T8@q73~zVs-Q-!;moqy1kv2zZCHMZfXSW$y zW`B0Dxd6I32bDd(m0+Cxz?uJPErLv=ft-KLIQBDi){?cpXp&xiT_|QAJNN2lbT9%g zbMgFFnEk*)a%;pe$YIP^=BVX_P%6ad%l@v9J9~CvC;ffCN=w6OBA8B5^Cgm;t>r(+n%SxS;FRwm zt0a&9NPiEqioT-nBjkTgO!!hh8Y4fFC`o>(_@_d-by?>0gjcKPD$UCgJB6D8ELJcWa|7+;W=e zGoLkP51S!WVl^Mo|Lyp!sANtT?VydpqQO!c&PwUA;>C85%8tLQ_}vqQ#kQxLQ_1`( z4zjP{uMl~Mp3WtGzL~N&uWyf9_Woxd^A-_jJmhj~*tOc2%oP_hjT4rP^((^Y!YKYEti z?R%nlAkRHtyY-nZ7WkA1+^OJ z-^J6Hsxhv}1hq(=`!8mZ?GqGQlVmq#KU4Q>+2=XdzOR-uidn~7v$nJgE0{@|XSJX2 zGI|RCy=Y_<-G5D*jSxBtwcXjqK>D0V7Z;$MJ+Ko*k=$(du{Ixt#9;UjRldD;Ym)dO zPnVP8b@|{4mYIyP6`jraT;%!oqRPd}^`-ZmuelH9{ngma+n&eIQ#`2|UA)hpH?qKe zv^h$w$clY-7{0@*KjB&JNPP{Pt4)fWn*3hexe(o_P#!_nPYWNffYxjB%oc6kMath$ zU5B^kEU2uAY~A(Lvy1rycwbJQ_sKE_-Q3dEn%9jHp;xoDzlb{z`hP3B_ro@Qf#cZh zv2xI(;!@7fU8J?Py!QlNRh4YRM7a~8kg>zPT0D_euEyW1MuNHDBt5fNaJ&jvPwJl@ z;#I!es3kqb^KOBC?l?|g>qlibE3*WK*W)IbH5AcCv8&utmOWN$3oFdX_Xx3nEq?o= z)KO3Ha=3p%wl+NVIv#y9ta7u~VR)}0Ctt^_){07%$$h@|TR?R(in*boAswzF`7=sw zAxFl()p*V0Wd9E5b7*h@MDi^ZC$Wm`x2cQL1v2#~MQ`{(GLF#VG5E>Zrwc`ocBHx= zAKyT22j$Z%&;E(K^b}{os|PGIiY;d$t$dalnmt(MM&P}9AGx!60$!t& zU*xi?MHD-dy=32fZVJz->{+ZjRRS!Fx)*;OSn4&7|cHw37W))$q^( z&e=bf-EoQi-C>wBCFy6D(?uB`t}gnk-@$kf<)2{{_u@0RBMcT>wxXZ@Sd*g7oVduH z2W@Gk+><3f%?-W%QSB#AX7#Mhv-H(pgUbiBmL0Vt@tgZD_Mori_I$5+PAg5)^T;>o z{DqFPBK`o6|2ukBm28XBPxLcVxk!IQ(JMRxa_O7pUaqXH{Zpj+oHPTp{U%MNk2138 z-yN+lkZ%pjx2k-s{?MWinZ9HlvUNgrPbjq!o6|pDME@W1#do!n?=)Mjjhq>~NHkoI zM!xTAEZ@(%ZSHXFuVn5D%g&*5S$an2<@C3lujiyv_UC`Zva$y>=P$FH?J9o!rr6fm z`$bA;7GeJXpy@uqz8>5Efww408F`cz%FHN|Qc*$!EfFD6NJxXHL6pc!Ls_ArlB_&d zMKYo?8Y)6Ynu@5*D98Wx{@%a;aUb`6f5$b>@jlP%`ds5Y$?YJNWY79p+~lN8&JwTI zR(t$p{V*p}Ym)miv>#<$2$he}c%DtV=sEEh)5ZvPHnkv`?7zp~Zqi;Nz}>|@vd4ap z(mCz@7^xjeI{U!NJ7`T5#>Ce9jC9{&sWSmx#m7;_12SLSBJQw51mO}BbTF&XMWS=+y$6335 zw_q20vHz^m=N9fag)uU>p+ik|vxYI+$<3($T&dn-wTF@Azcf08&8#(|=dN5Y-l74IlU!c8X`QnL zi5$Pd_>yR2&+>B%_@+Akw$RpPq&UN;S*Oog(8QmuX-@Gv8ZI(=y{*gu$bE~a?uvr5 zjE+Uw!YlBsuc}oO)tOZ!S3y>|C+It8(i4sKTFB^zhfySz71iNdO4$aq-`ZH1IIr2Q z+RP$`qT>>HO5C$Vqsq=EqNky>b-CAN$Rc-Qa;7~ei9RqV zDG5L4_-w2iRg4Vf`Hp|k(+p-Vq17FDZtwq5M&PWS?4zY+keW;hjY({tx_cYbGQRb} z$=`+Nq}gz`)%SnV=vuy`6ZEYm!K<`i7u~%{CK(rAR3f=uTdA2lcgdRD(M+H{eSHV* zzo9L$AYLZj!`R4jG@Of{!wa%*j;`srX#ll{6+B$-b!WvQcc>F>x(lr*26aWn(M@H$R;WQp%t}*9q=s6zUU(rDWlHXIm z$!WY5RXfS_MdhccI~L8S>gNLpeHZnk=vJ-w!FyMz*pJSB@=eg2_11Wi zQ}y^hv|dkIZ?K%~OB64lWiZ)%PQx<_^PI%XU1@2I+rF0?i>f zJ8xNC%`?La{9L4+mLyT0b!0X&m@M`t=P7(+&hFIr{XD%VChWfSH^4V%!0bHw&N}iV zY<;HwuTZBV`ipxt9J=pDe{Qws_W9Mc^f=0XBbU+SHU`~UjoZWcoUDf{$tQQ7B8Mwr zEH|XaqP#gTeuX(uR!_5wUL3{Qw@+;C+^EVLO4i8sFv{fgfAZr_B)!&T^&Y$q*Xmd` zi!#052ip(hG7-*~s8wCRH{kdn)VIUu!K9On)VZtHf#n_v2ZMO?N%+e;R3a=+Ya^UW-82X=Pky0l^J!;bj?Oh;xs&|Ue4Y(VLhwycnm3>$5UpOG(qbp!9{j; zek;r{Ba`a<(P;XcOTG`Okz19?rBw$N*|jc?qGYIEMQc;YH#xu)r}=hvR*u(w7m{-0 zDQ6bGW6RaxA^E;?GAfZ*=M={5%$=g~V{myXIcAr$J)3+>+sXF$CyEktyKUk5BBwDP zqtnyy__@E~H<5pmu_F0{w)>mB7dd;MwLrH?jp%tssH9ZpG+(3t2bxIK_TA)IPutV^ z(ag?DlTTKwvkISEa<}>HEEIohgvf4vZkUc^SJ^f1jQiY)zJxS#r!TR;#_|-IFO<>J zTd2?JkgOno%abM^YWAJ7x;U~hAIN_95Oan+JMN`k^4T_k#69_?~VS$xg(GPOz0TvSr9KCt4P; zx6Ex5Z*)1yWX~WcOy(5y*-EX<$(qqrZj`?0yAN?yT)#E+^a0v?kx(5JB$DaLw3yt$ zIhjz7HrJ8s5F96d*OO#&sL`|{=_XfT&QWHEJu$#@HuFlOM zv+EM>vpd^Y`-!8o6;;teTQx=&^fRv@k=A(2d^KlrpP{Q?A@l>j=NGf~>;_c!-IX}~ zLyy;>YO1>Jw4J!CjUc@xtD4BZ^VFP`shr?$@9!E?`j^CZ8MV5TO%q(7q_y0yJr(Yg zqq#S!<~GhOqj+weR^qjODj@M5{U;jE5WZmtSvKLn#`9elu$;MYcA;+;!F8S&assd` zUVbD0$MjxP^JCUL#i^T8Q|CMo&8de89uukb~6&~p;MxrDr*Lu-6!UoxplE3H}h{l4vC zw8&f}vHKJKs58mURyJp8cB5m4TE{6pRzL5-{5`xE{VH5cdg!L=Zx$agh8J1Q zdW!pet$D*PQc7Hno)9=aJhhW_Tiaw zBj_4^6{F!1_}L$=Gt{?tYD6CZWh?lQ+kBq$64`Od!XQqB#4 zDdhDVc^-(eL_O)xKiz5+c?Hck;HQbUj>B0cJDoSr5h39GZT73OQQ&-VStod@;KUS2r+rG%?*urSXgndl2IkAqEp#GBI;l3RiZb)t-a)QD9Tb^3i2D!SMG;) z!q4==sCG09Jr%MuA4&$7rY!Iot+qC{W!*Wg_0?|f4ppSr>HdbooPn55E3d%$ICvSu zYxJVa>Fj8RZ-1tP#F{ORgN!>zqp7$N?o|H2Gk%M9Y89bqjP}WtItuloA}ZJEE8P`a;@P0&1vhsb@| zQ+?Bv_OH^+)sw^mRDia|ql1$#m6%cYHTC$q^56^It z`A1iF9N(Q&E{U$w0?l{f?g5^;Jlc~rsR2}-&)eioPYD`HysBs*8I}&Fu|xfThK#1M z(nI*9ocPUC)O`?=wYtm;b4q8mu`DN!8)$O@Ud~7VFzx0hR@USXBkOVWk{e1nSI`8N z%eB2w>$wr#5Irl62bEb&85VR3l-Dd+Zyj7^j-C8KyHTE~aSim?LLbTBHA*{KeR)F5 zIb~i=i^b6Mu(q!!+tDzWli#h$;aBssJhSBFQc;%5GQ4Loa-D+zW_q7y?C(qdS!YUI zlH^M&$*YxRbHnr$WW|&BG838M+iMD5t~|e$EU;O7DaFfHS9hIpay(t;)LC{2URSyX zD>zr*dDh7e&uD%%Pu}xb#pl|cM`zhrOg5G$aBw8P6Xm!hJvJr#!*RQ@AdO$lU6L1g zpl_n%%z18tzh7YWNM*nDYP`=H^G*wBvIh^HRo}VF?tt$^^LiLX>sid(WNL>>>6G{XQBpqIxYCOZd(hgmW*%c<_8p^GR{NaK zHKOlFH@VgMsZSnYKRfW+8$Vf_&q;#MX(IPBCy>r|^>c>%5&YFg&0%Evs+xBjp~rfk zz3j0#f7rO#i^NXUV|P?M!v5OAWum-oL2zdPvuHxm4X538!pB}T?^@V?nMnPWW#AFt5!WyZa% z%e~4k?M=S@)xH6jH{qr+&vuq^|3=?mhT9gt$(fI{d9Y$e-81!90^Rf3QzyKf#sadd zoLs!w-*2L1QMSPIwa`kP-1W=p{bpX@L6W&EpHua7)mck3Lt%X_G-n_F5&CSd$7(e9 zE41fn=TCIzbVlNeosGgZuYj>tTKkzi6WuwH zOMf);K8GgCk?TNoPa@}jF z&;A936i3?$#1Pduxw{t(%UTszapdnB_= z?b$5&6q3kF!8bhT5Pw@i#p^iEX`jnU{U%I17?4(Mvbq zw?avMy~ZzQWXZj-5A^ddbe!w+6V+Zv^DWu$=X8<#n9Y6Dhd(;myRAmNKj|$Ie`*+e zHu_z~o6KSjcfoR^?0!Hx50K_LJkc|HN+y^Yp0nEWwD-}$)3o;@i~9p#ISaZH_1Q)L zk+*1~{TI-b`StgDyBc?Uk=O(y-pi!c$=}Ng*!o!AM9O$UTU}}8Dtu<&xG8+@MtK!p z;dR`1*G95qEzsW0EF>9D4)i2Yi>|7&^bstt46Y9=*w^*?sLZnN za0+DQOkZU`t4Vpb-kye;#H86%>ofE{9B<9oLtQh--18cupX_UAj#|O1#0bslwZzQJ z-P)XZXrWxfg5#`D6rHok z`1y@GFQ7l?0CJ9dPc*Sx+I#y-;W75q>O7;1#mWqRnL{vtZa3}r9+dQZ*cSyp1m6vduN zZj#I>4^VO{lvhRTSe#|Yv}m5+6CXp>t%b5gCs^oR)?_Es-xYpEB{z}oMt#l|bYKljaob)WdCp5Twd9$bRPc*U@!b!e6XPdbWL}s^ z&nLk{vV4963H=K;pIh&VBe#rJYild3k*)Ah6i4s@C1>cp7wN2kzrAN&(8Ym zxXSu&BmN>0zjFfn6Pn75YO^tD9{Q5W?oqOi9oNEDBIvcje`yFg%HP>a-UkP_qWu~& z-pNDf#M306GdM26FXXKE$y!|j2YH@*&}WGSS)5+;6mg~hd+RTeZW5E~EJ%2S*U3Ia zE0Wt@pkXR&+N6bR3wr^H8vWA$d7?zHTuw$=i_de$nQEPY;(zIKMqwrr4_^UAx#{{S zP5o|!YJr2nM!lz5%!Yzpwq;e*j4~ya%?R`>{^#)`8_2G%9(KUUeQY3+^k%a2|Cn)n ztKWZ>SwM%`CH;;~j$qewd|%JlI{-$ezG-rCZzhk z5_yWuEy-_q#znB0{gvP8E)mDGPB+)L)yQEQy6!>Gk;c3A>NG*c$z)!g1-@aFUqv&C z9+0@vzu-JM?rRz~s%xVa^zTN+MD4EBS9jmf;vZKT?RUZ2GuqGHpigOU5g8>;>5U|R zI`1n8nd>vHq6z9O!KB)PKHT^&mZ`0r=vzX|s9S=Iito=OeJg-9OX5R1{ z^O`^T(9dWicW*v3!Us#qvs(f39)y^j%;|1qe2mRphJp|Aw-{~J@OCZ_KbMzzTfdFz z{}`5<47C|kUxUP5u(AyWnZupHQZ|_jeMLrDYQq(dAT0PL(DC@`boK1yz^OSz~jk#{BpdTHD8y)nXbzW_vINM)YJ9+T`^PKZzdJn0>!MCMQ7RX=IsIz|Tk|5i|ZWc70EW8K>{Y^=o93 zC`eD!>U=iv7j7=3xf5~wCM+M|ldRSMq|9YnIghRrZ8CGHkw*U=BykSEl3c>AU}gia za+uodVW=0Id`GFF@O_gKUCem5vGfnnH>)ttZpOtE3yn^JxJ%Lt!U@3FqsXjfPXyh1(n{g~4E2|Z> zuoB+O^IAD6zYm|>8Epqc#G^RKUeOGnzMhaNY_A{bC zNE(T`U5V#NKF3>NJh7Sjz|YkrbDR1T;9~+^d;~>{XzgWV!ECK()in23|s(HP_I%Ze0X-%t{;g>GVQVt@urF>v(zV262 zOHS0x$a23oGutQ7krNQPm;NwWWo|lLPe<~Yxv!sD(S}0p^?1)7Y$baCP2EI8%-PS( zRm!r!P`w-o{^-&5Ma|5lRb~7{UqmSGJTtV(_{9i^7rC7yg$T-_4KjCAX{*$XG_kWWu za5wBER&gCzo=T57&AMFOoPsO|*@+$TygFH-ZseO@1u1+(8p-=GA98aZwKfWiA~{}8 z)7#LJ`9w|?WW6!@d1lk)a`TEwq@B1JSsy=!ZDggjBAGQ{i*-rjN&ug#p?u~?7m#{U6wB;$zl7glaGsuv`Z$G5meY80*EKKTA#Hy} zTDg}vf*j_P#~^lG94(vRY6=v5tM5+e@8Hva{1$_k=}?!f$+_j7wbq>dOb*XHwd5vA zMH>8&zxWrOPx|IhH4>fcYSbr&X-?`VPF2=#^9;OE`?*C^N*kG9Cx1h6+DVJ28z+L} zjCm92rit%2sGHc^Jy_-AQ1Y5m$7CEs9f_nf@F-*VJcjZzo1;=A=`! zljn$INH)>;lOJt~zIz*2GOu34#{SlG^3^52<&~sVh8>&+U2Dw)a>D*7h^vj()%v)W z2WVLED>=halZ0|R@HgHy=e-g+`#>m3)Yj8rH4)(_;h>xLi>VhGbyXtgj_YYZD4&V4 zYqj&OPjlY9jPf~|JDD%b>AB8$Jz5Lnecpvs<8hvc;IBzDxs`IicNQzT4No~66D{rw zEhF)keUf2%+2q}CxKC`=pV73Hq%s#@tLKU+d6C8voiWeMSCPXZM!Y?YFK_xhx4FlW zd`{AI)%Iw!f;Z^)T^=*BA?{(bchKl_wEq~o=At$Cc(WpiVP(7hktk=~Gah+mDru&j*nI2tD7+Yu@bhYha_Fe&(PxXCKZn zI>Z_pno~c?GiMJV`6iAb{~hF?xRtlCqEpy+N!t2Q$@8^0m_)Ph_*H?=mfjyv-rb?J zvJvuDzT|Y$xsFypHNHQ^>J#7VB%^jqbE5O;dI6c`bWCyuCI0iX-uGY+iSM+Szs=fl zVjaE1%OnzFBU;F5hnr#eVr5^}cSG~3hWtVI!fYn2s%r}wV!~!Q5(NNYxitBR|O>D$TMQD7R?@LUxjp*xviknc; zS?@XVJ)7L7n3rteJ8poE;v}2BfCE|HQ|vVPc-KMBc1W6k&SoR}MwERV7_#VK5-o@nuM%yQ8<#E&{ z%1h#Ny`Y!a(0A&70dKw7;a#Y#PI_BND<>RQq5L-7=2XgU+MI2MQ(d2b(R9|SE7N0r zR-9Wy$;9!wpPwP}PbCw({0F*yPAj+SV=eUO%zG1*B>UrJ-@c^Qvrv05onC{FL>S$o z{1~swvEVvpcDF(3o9KFpoF3HbbXuuEet)sZ=Hzxb+0OHNyHE3!aSdKRW>c^D-|*tTWsl{V-{!GT zg0QjvcfnbriIgYLgVnuDo3E4UF0;qniX4WTkua90t_tw9H*M_a)!!(q1&=w~)s|(y zh|9#=xD_U^q3@hT&g_1nXNG^t^)CpGjMu?jN7$%O##?caae9>UiH*C>x1r@;*h_Y- z_w||cFj>)Sil&-soeDD#(7}z$C5A_=?{X47)y(2+(#tuUDP-Tl{AdOowb1`BB(McH ziCmUwFLNO>8p@r(%Ia=pPbEAhCB|cRL?`KKgxVV*bwB-8)n2kozeLJAjp#Fdb}MeX z(PEJQIUBgzcR9K79B-AIf5WwTmA{qgE6*w4X`zMI7eVi_u(g8>O3*^$mb9R?zOa-y zvx|Ah=Xt2yO?=3>o(N=(dH33|pEb^(abBAL&Wta48lU9_*V98!e?LW0PBb2disI^4 zhv@N8a2QQJq>sv4SimlF&*eV-b;Z-SMvPf%CAP$~q4sbNviGOJKQwp^dCq zCo57P@_mbKtRd}*C`@*Y#H{&O4}(y&fW7TvXM5Az3erxLiM#NbdpzBg-a`9{*RT_{ zxl6wsb(3N3QYh$0_S;BfG8vWg)7_jP+2pz?pGbkDNNxoA*71Ibk}Y67yXDnj=?;?a z42>PV$`i2~Ki?IICe5ah~Vvdicwe;kC4qc~@(`CGl)B)(Zz+cOW?*C>_K+S5q3Z2<{cZ;#9pHMky%-{-X#!frC_jU2kttYRmgNg`Tj&O41QKcUT4 ztR*)Oa{IX+>Q6H7NZyG@*-#>#FNECWDl1QmoAr?A-|O}J4qIuV-`oh91G}5Gk!Pls zyiia5ujVm|;W#n*5{o$z@sh#y8YB7!EstRBiAea3eiOm2B<}Lun|-Ea_1TH?to2RC zZ`K@=e|9fDwJD%;Pvh=PCG)(LC!xeP%MA%yn70iiDA=$9oD3=$m&B{YHchkrj0yh zy|0aWJk>{VkXzA5u#AJ0Ozix;zko&)QU4OYA-e%9Y48X%PN3D?N+^x`7g<`-%6v7m zq86ywK}-Ads5`Zl2*Xu91I~oFm8eSmfq0F9M)#beTg+x3=H*u6YdxJkV0O`;f4xZE z-Z)5<jjk3j2EwUb@yb?CW)#2zwAAA*De;w0x>oO5Lc{$ws!_Jm~^6znbKC15T5BWCt4dL;aqpnG5HG zX>l1^hWnqr<~C})S*W=myW6aXH*l~6B5&2xD7Z<+?wrML$CG?zrf?_eo`ja%-Q9ri zSNYZ($>n^t%gIpCk2S$tb%`^VLXh!?V;MZ;rnl|Cd2g z3FzHL0`DlbMH`vXePl+lk#9{_v}b7HMB2TQWTx{kkDwAwlhI+LODB)lZvSt&I})70}UI49T}Xk|bAf2H&tq<5BH z7ULoj3w|==`xY&CqvZiTWSu5)ZLfgiZ|UWE2tFHfcNhn=URGQEZRpCml7(=7i%;gz z{y#ikKbRhGmimm|6RUM3N@wZ$629Xn-2b4J+O*!xd@JMgR&(38NhT}0cY3#u3@*g= z-^PhfT09jJZ-CdV;x(bwCFD?%Z2S8(F~=4{`E5pk#%TN*U8Pa>F{y0hEyk&HF<(^~ zFP}ot`R3mJN$D~6o}Jl_ke}!G>M&g!wm!q%cq7Mybeq_PiL-Jlx_W4T5E(A!Asgs% zINN)l9$tseXf{zAhf7E=`Onr*6D>69_@{S|bgCTDr?C)(fYgn+XAUj#KzR}X@^nM@tP9u>Q z(2%J5ub^`t{wnz2$*1ih;6hwyFDn_-?vX1T@u&$<15@S1D` znXM#e@p`Yr<7-wh#*uq{TF=bk1Tx6p`M-33GL4-@iaFo(IC&lD+j{y)+`X*gpN*p8 z#zlrJcul8S2FX!kQ)5|d={DGcFLQH3}zndRT^x{X@ zz&Xn8#eUY(@4Jw(igljB|L4izEc$K*rLU1=ZfL&=l}m9s0tZ=Bx(w2?o{^I&hq9UR zu+faaep0ErMk`)m3LReNZ-2;`il(o8zkuB&&-)lwQX0Mz zVf<0A5-Z|L{eFvvCM4fhkF{~%Q@zB1TjXz7@@&UDZP521Wqu{`3R--^cZs%@c>V|A zs}En2num#ou%346)hl$l+gA+rA8!mC97FUCrf^Q@wgVzrr7A{vcD z`M>aY7CBLlk2bj*6L)F5T8X2!O1p`ImbvLIddn%E#89i}e@f?! z;YilFP_4|G_t4kbe%ioFVmX{vSZPl7n>*+}`67E2DA`G3C1_$Aj3rh`Zf%y*W<|(K z2ELWbo&g7s8c)vP+xFJ}k!a~aj!RH-J{}T5BAIxGkVS8;9IfsK(wTv)8NR!f|Lee_ zuJfBn&3o#hC`ZBGyxy(8dqkVF^^jN@qv^UAYp6uS#n6#l2q$RuEf#bXoQy1Js=RuG zNw%49ejw50jZWV=dohkQM&db9?{cywQAcuWZ4DG=-LnNBUlFH&E0t%T?DQprNNyy) z#)n*lx}0!{=HJm`V(3;^{sz2HMq%PxuHYr^!CxX0_CxzNy|+a}BEFRNGs~FUoBV`Z@swEf`?Ags{8{2fCfaq*t0e~FbK1*Ut$JGOr$%DmIe82^VrRD8rdxc`=fhw&4Q(bbeR29e#X_{m<`Mz&E&Z*}q8ncZ#D zTcUH+(9b~a4P%Kd@cFP3Il2B0Pn=j1xiOe0r{u~?o{O&9?5Ll_JWVdR5^A>KS^L9R zR#LV@*8~!b-_CQw9=z-R4ny%E`28G87O}nF zVCG^nIFzQY_I=LVCf{!QP1d^awNb^-E*|(gp6*sM>TNdsfPR*ko#sAAZd4~{L(Yv4 zq?4;RX~OZlrk5T>3AgS#|&((N<1w z)zs5`GqB|9?nH9y%#im$`+uN!gm3@!X&D+^$J!D(;RLghc{u(~OU-$utYfvLi%-zk zj%VA=^XKkQcE2+Fc@S3mliqP8^eOC@rioj$yPE$TMJCsx;sCaEF{`NJ|D80ygJz$= z-C(5$@f_3j^&M>uFt_~6Xg7gJ7|H${kkkLXy4IX_3<>N-*VX7Tk>KYV0h+__t#nvb zxdqA$M9FE!^5h3ePLIm)zb|=ahvyjFCi+xP6fEWUTB117j1xuobawqTI{)EiN|WbI z`hAz?e^dHq5*fso%!AEM@RSqsS&2&?-#Pl8$6_*XZq94`$-CvGLo3ob2hJbU?o}*v zBiuJcQ4Lh)yh>d>Z-s^rjOayq@4tlb4y1n=Y$RU5LwxApeCBqqd!i=0Fx~isJN-TY z$9tf6vd^BRq2KiUgWhu9Jm=%Hj=h;A`jFV^_`Hz>9-)s)ENDBdjDovF*vZ(K`Fc*4 z{fW1GSVXMj74-M>{dKgFdwe5#mJ4C4lQ!xX=6%1JbN;RFHKeuHJfnoN)gfw!mS?bn zoDgn}vU|xa`-b79zfY&>|Db|LXbic}((5u>Sm~KIyAauxIFSeMP0vT7_B7nJK-VQ+ zl}FbC|Br#OnaVYS{$JJqhh`W0dy!Ahhm8{O*nq5mAfZ0`x)4n{@m8CzGp|crp_TaE z0(03n?WM;Dp(Xc|=c6Wb@UpztgM7jjZ24k7AQ@a5$ONP743tr)EB@%o3Tz0#WMh}9vX)G`KkQUMUWELO9z0Hz7 zQR_nzxR7nW#fIC$$PT0Enf%6HXdF(itNm_)=N+sjr$%#DUBhfMYgEa; zHk1EJKF@MXV*1Q`Q zE$xVW=5V!x3lOdaaOuQRA!Sud6m<&M-;}({ptNX zH2lWGz9fNb)I8I>QbvZOQC|fjYvHmcTI;Lzw$=_Ig&RryG>FO>i^NWSlozR^#byv# zbRx62_ZcgO(0ystSJG;-b=?T9+l^>>^32@uP#k8CJOU-z&+1L$O_j`=W@0(LrtUZV z-zN25qmzlOGv`$kQ7tzt$NEkFt}E5g9I*q-$|$qI3~@cFW?eaF$g{sa7HZm{Bl&o; zc3B-}CbEypeEP9ulbbl>jTw0=Ee3bV#Z-(I?!`eb+DSx<=4xL^BMW)FWLRB?#wH|O zoUAg-d^=JAu;p_pf}ZC2$x~jW zkq8Z?*xw-D_#NIQ*+Y`k;X?Sl3qQ%_nzh;7AWz1+#Iws8-1;=qMxCN)(vS1ziRh4X zXC1WFfepv|CNo=^!c1o>&Hh8jFXH=YGRP_R^(66WLFS!V%duKWOuA%1NnU}>S=XQ` z(M5A>Fz1P`r}wAzTF1}6{6_9E=B8@S#$*nbxU~y>H=isf;UXuCbDkwL-zFrKHMq^j z&aCEVmFXxr{MUCINaIOzO+1QNQSQag(c{x-ypjH=ky|bFPl2LMzPpxO`;$py?=lCQ zLKA&xzK8Y_#k4d&#%U`tNXGmBs}_zk3MVF5R=O6D(KfA2CFMjr%DJp3SzI!b+{dHE zR%;h*=pfq334=V%y-OpBTJZ*ChRV8h8;feEm0O^!BVFZW-Pn~vreMTyCJ9O-eF>I>_pvuN-oqwp1zBdYYQHKAZ|aWw|n$;lh-ex zEl;q^3TWF?`x9v{8BI6PLy+0W`|N#`hMqjL{E3q-Xi7AbgXv~(GFi>mSD>WiIe#0` zUsQf7{*uWnvzVh%_KyF_-w^66kmC-r%K7qx*+*Zc|70C|u(w(KK&66?;$5~GWmGwmWXYlkT>9<$GvKy$CwV@;RmrSgG zu(dDQ$pu=u8KM)TY&k!9rqT6W);7TWCUI4l@~FMdR~xgK+n+(;7 z71a^8Cn)_rj;FG8f!@AY!b|U!Rw6Vr;>gL zwpR{vn&Ebsa<7xjQgrRX%J)EZR!fuLVIU9qGI`}0W2Vne!FhSq{fnxc=~?2_DRh*R zfH(4ON1K^sHa5kW_og|*^(^Xoc+8G=6`$@4WjQZ>C;cP~^lrZ|>LsTa-azqbkeEEv z|3&!~zP%mKzv*W`a=4rq&3^sKu$&0#i3YS)pSiD|IHBG3(UJ^1qVW=zHxPy6`K?NL z%$eU7{Nzq3I~N5j`K83XxLvDnu;({uvXf6Phr`oo;VWp^ zPpds?EW70+wR$(6tJ6uM^W|Jja<`ofhn1C`Xx5NS6SYWg2OK0P(nd4dL>k^-yNT@9 z8}9yNjOs|vbJ6k)G*+SK;rc#;PoBgpr>9fZxXdSids?Vhu-HH7Xe%UagOAh91!thR5$Pl{e4;1~^6k?Q z6YGA4{1e-zj51T8|7mqDfze84d-ubBb_%ybz;kH$k-W-6-V%t)IlWFiOxEiTLg@>r z{+ZphCY9W*Z=n90dc7S+UWbP#NH_OXXW;NR=9P$Jj7QbAwDc68lI*xI>2&~&O(-DlTvRN8h`kFwC%I;_7SIqr?`_bW2cReCf0{!2IeNW|cs^v5 zrTy__@F$9@`Jd>$`x_S;K4eFL;f_P^`(K5^!%~UH{)O)$&?{?xzLr( zS)u!U@;wX;M^#S7Ez(MIyY$fF9cWHOu_J)fH^$ME;iplelw4d_zH>Eo0(ND(#i?g?kF1Q)0}fnZ^?#|6ZeO( zyLC9tOl=}c2Wl&)haZ4a?(EWWs5S>-{RBkIFurf zBaNdui+vUvdgC_{%LhRB9i+ZcOF4sDj1Q=Sr)GsZS+lzdLYnz*EEFe?+NbP)4LK(M z+B@*Olm?TrC3`PRS?D;QMwW9>lNn%67<{eg^?Y{ndMt&EoGm`hPZu_PDm@=cXT?x@ z2kU(f>43CrJrKT=Lt2r4U@UK4;}4Ezo)=PJ=*A2_-rU%S{t25 z^8#7z-h=E;QokuJJc5!Dv@)7+$W4Njxlg^s+g+mN?6#bxpF9KBL`QMs#Ib5;WSU62 zxeb~fho!uAPR~}M?;)@>8U7LpEctPtQ1T{!6X&@E{XDM!plE+|=g!zlH8zlWvYq64 zvk9v`6zV4#9iHF|`tf&Psr5Fzj742Wq@jAOtd~5UR>ybFwB&^U8MrwL0^ic_(Y{*` zw=L0@6=i|{Qt~;>KSJ;|H^vKBwYQ=Z}f!bWK(MC{duI> zfZs?Y#9j2BCyB%rTnb4!<(7;qsh?GmlOZ>+f7Ewr-XSNjhwz`-L)()zOsALSq`H$I z{e?#wZPt++yd`+9sw^kBPLk1R0Z*PA?Vpj+c75jLOC1tSoTNOpB#%LSR9U_3L)LZm zQic?=N7jMYdxeI^v8-3&XD(g`!AG9*vKGE?VWv31OzL#?A0UUs&Q9*daFY82W3*BV zezJD3hYvSxe<8^X8 zmnxum74Fs=iEAqJUoBj&)+((HHoHrFnip8fu{5=j6=8{`wuQ=m{)Bj zmGS0TS*L1`$5GHw8?LsQS=|d!cWS+x`m>A`PeAaQzJJtc^fvlC(OvFp{jFv3a*XIl z8F5$gDP_sF9D7SVsdx1`&uF^6fS@C^I+H)@Np>S3X*HS}6#UF+ZIxH&E?#3N8D0c= zr;uQ+@8O z%*`nHmaI0AUZU^4Wt7a_x`SzCG%58!(eni^?jxsGB=oFLy3y%BoVwjiHAFq&A-B$x0*8^PWsQIqZyjk(fKMeNS3HX z4c|s;BlS`dS`+EF7E6e~D2Jz*Zz7Sz=}Yw4k!D>rwJ<|_OVJ;&fYS7$Ws*4knozBY*t^pkAkUFfQ_{*Lh3V7z=p;#qa- zNN>-h`VcbRjHCC^SwsJ~!sj-9Jznr&eQ}xj@5|mDg62HY&t+4gd^e3H9>8(DNTR17 zWajlAP375iCN56)*>`H5g11C`${D@Vs2PO64P-l*o*pvVEmG=OoK$1kxm*7y3TEOW zXYuB0t(X~RMH?#k>#JL*TyJMoc7<% zVmhGz1|@P9D*H0ox2dakV(6TO#!X62XHiF^dn4|cpy>IlFAV3*LE_M!&r_Cx^4a9^IPS9> zT%6wK(BRRe(##z6Sm^7^I~B*p2GUIIfy`}w@Oq8%mqBGtcO~*oCH)j9w};_0(HC?2 zI{Uce$bB@)G&LqvrT5%1XvUtZctPvGchT0REHYWPb4R)tFO__FcPJB%F2Liv zRs!Fl@!#mSyZ@QfmFIuH@P07F)}!Ua(cBhIi7=D-XzoW3A*Yq7MBkI&$cp)E?0*lE zn$C+<_Gxy55`k}ru{)W^d!lM1ZvX3>2Vv$NeE#-^a=A>SrC z)gb(LQ|b!3PGrZcSk<-vhq%eU&H0|hdq@uH+>E{p6%C*^af%WZ{03g+7<#$QXpo!B zUCmPVB88%RJU`G!6D?-#qY0mvC!61CxIB7G({NGL^`_{ENBNqhb0;NFG12A=IJ#8X z-^?kplXWjn$7><+6q76K53(G@Qch)`*%x^XrFDGrU%Jn|#R^80?1b(^^XJm%iM+yY zqej-MgRDFS1;Ok5AF98T3bHs5WsP9&BwprPZ6?!3&T3`+Jqzz$^_IK$K|yH_6j|AC9AE<7BhAftk=JdNq7r2lhS z;B;uoU5ujj_QY@-Nw!&`=weooTfOy^Ka<=y=_R**lT+*yGlN8dxeXF}@Pngaa|pkc zm4|1QZAz;-A^RDvbnz}Rcs^(0qhT#`-$tzU8J_BDI?Nf8Ug)1jvOkgDM=UIB^~tyL zk8(}5e;gTnuJ_HP_=0}g!%;@G(`h^ztg4gi0IQ(6(YY78P9>K-LFD#8H5gk}K+vmt zItt~9jx?V}vdfwn;7{xMW_1^80eGSLQ({Lj+=bmsU9zSRP+M2!IMACPlVikMd zi#1L{!_jD}NlLZIttI{vNAx(lDvzFG_;{FfvU2w+I?tt%>$4QvHGYEv@wmba0&Bs=(Dkt>3D(Jh7Y(({*rEU5!^r;Z1r?ob1Nh%JafW{$6ax zpA0TN@s%8DhtO$9Qg5lH&X8~$nv>Hyw;A3i$zX%15Sx_SO+(Hi*De;`oPc!-*grALm zE0M@4ywb5+|66a%SWt2ZJ%#HF3;6g4hh1rCI1cKtx=W1_Q{eAwl=Py>?19ABHDU{M z3ZqwZlqc>~GEyfe`SpDF>wd2$_xIEvU6^ryjMGx)t#^_2h4j_kxO+3CCoe}<;M&sD zOfq^|-RIQp!;U{g$E9R_vbvMednEhG3dbchb-%viLuZok1}(1AZ=MUE;p5(=<@UHL z%Hf_dU^j2m1((m7mwZp2rF_4dZM?0$xApcm?=#&uC#cnf40Eb(I33TRnb%364IcI) ztL}99EUisM&&eoiN=kjT`?7X#X3Gzux}nwwX)ibBPe#jJvb}}8f2QA@@oxcnk3!-Z zXm5kE_E44BJ;^P&mEP{)uTFyN+`a0@(h_f}rZy8trrJ3}jOe9E{1Y4xHx{+W??Tjl zO^ywD!5?Y6xRw&hZzOFc`dM}h1{8ctRyn?7S^s0DJ(M5HdREb9)_$_y`ypC$^EIow zZP~#uKF^w1&e~_qGpl+z(~u0x$-+9_-<;Ua&v$BOEwd5pZUTK-lX_m=-?f}udEN2* zr+PP%PV&C~;nSSm`h{<~1ac?fJb5s_D)`%+7fQ6!+n^}7X>W(Yrm)kC-^h;I5ImOB z`$04@UEBK`g>t6gO>Gu~ghc3145HJuww>m3OD}V-F1XG9?v~fVSE7An)^rIp ze2t&PcTF_ytn=1ng-`n1l}u{le>}M?Vvhq@`Vg|-p=_d=P1j@QO7kIb31rqqM{Yyr z?9tUUd9SfF`~Atn@F;IFP0z_Jw-C))mB>l5(exf)F@W8S#%q2An9;DZi#% zQ!>ftl>E!aiNp{Z2f2wWn7B6g@R=i6ZdVd+u4JsE6}i?S^P^Fo*rJ(T=0-qnvo}@q zU-Q+R70JEjN4@Kam*MR1KHAE9R#rqukzQtg$$}Ist5=wHP9ei&V|~(3X7uHtxdGeA zlU<%h8q(G>{}XQ}PbrOknmfG7t(iNpIXzUJz8=#r? zH}Kd#W=>je5#= z;dc_5aUT-Qt=v+$y^^2Go?kK%B$IPa;U-u8gV0h@yYHYQ8SfGq_Hnb^@Rls8>wFjM zeNMT-<{A6bbX}hOdaa$I#xUF_@9U)nn~k@5lssmUYef?Ig@+%Z-p}SdzoBh6%|Ajy zxrrYScL>xctMhac?tsE*Eawf5Ggr(#`0QHdq*-SBcj2ZndQQ_r?s`92n0+>Yma@3G zi0vd>!^bQ%o^=xY$o<%rY$!2d6QQaO-R&mf(kRcaME1H~Al*mwogMj}tf9a1S>4WE z>8l~~e_E-o-$cXA*@@iAOy=awMUGdZD@5f+f3nj*14-}mMMrre>+MM=@l#HL&5}@a z8+|`b8p(;i1x}aI=EZ(j!{pa=znJWEHu@6SACBKL&~b}#KR$L0ZhuE-cUH8w5#?c} zFQdiRp{PXx|3|a7WJO(x-VfRONYroA$}?;-w^-}aLhelsXLp~|K~BCWLh4{oMyuK7 z2(|z5)Ygg@$XTVt0ytjlH$&Z7=0TTfs|%SdE%5LWl)Z`OhNndm0T+*LyoQ@+Iu$ z)>L+hlST4aV@^xF{i{TMKk=!_Aln@6_tVP*Mu$^)mz*8jNlV#neF>M>(bL%wkd>wD z$Y_eOV2tlRS7tt+dLQ(Espp(b+5-=d;;J@ujHl=9u|?N+X@8qB_9wL)vAsq(O#Je; zurr^gTku#X`ur31URSm~-On{Q{g_0Sz}zUMZa2bLBc~7Odo^xX!})alcGYr2wvs66 zpOZu8v56yi9nRL#St8C{K~95NNDW#(z>M%s?;q1fPiWY#WPDFc7IF^i^VHOmPuh>9 z`e?P2l7F%4mcC8)heU|z$Xk9$yOWH7NBQ<9zn81IS-VY*w%4N}>x4DD+l01J_-F{< z*Wqv;eg~NSC%*R`Z2K#t@TWfSOPe_n{WFU5-1Qq;&h^s=PR`fHEVR@hxqI>4n7t-m z>1tB>)w?TLWPLNHa`e0>ig(e^JTr!z9?QDfLXw!s(oe_dFXVm-N;;!2GlQjk{3Uw3 zkfr{pjkanhn%zuNEXr}P7*;2;$oEnDUphI&_oLOm4Xr&{)ZJ>HKwrsIx{mHEK;%Vy z-SPax<@i5W=~}cgAC-T@%NpF)(PC#-ncc5@Nn;EB9HI0GI7~L+d(9DY1Fs^RJOuA; zc-3bhxGH%pMB7`1+0p6Ha4LIy0Ht+F;YID8#JZkW;xH5(T9D>nJWp;TW!15TR$idN zGNe=)0?I1A7$=wWM9GKxA1G{Ljx(B9%$izA2+wJ`+)G-oc#(z6N$b2fzoG@$!UC4uYw-3Uz;-zC-5P4Xs#!&l3_Qo%8w_PejK(QVRy+0(Oa!wAb*xwZ{{Dl&pDh- za)u_SSc|e@=bXXKzR#1xG`~I6Y@mlk2FX1CY8q;Vlb^^XBX4P*qCY+xDpS_0kMv!Q zUK_BmV_DZQ?YE@6qxGCQPJ2`*Lw!yd{Y@shDVNpea(b9RdXtnb%i~X=k1+*1EX^{e z(f=^g>jvjvlITBZ&JDAi(3!xB63ZcHZf`Ri*@m*|=*SEs&p3$^yt^5xSC+bA*{hUBA6(F@9Ka#x0$;mVaN8$QE+HL}m-_mqRl3b*hvvi0U=Xy z`;poCBvjl4@rjlB1XNzD=0bfZw#<{PuO;iQPd_Vsy8`yQlR%z4vNLfFo#oDPJ3i(b zdU; z+)yrtySM2+XsxVdS)XJS%iZ9tawh6)Q9Q*Pw33?>#c;ZpzFQmTlOJXtM7`tloN>?H z;9Cn8oS6T|7yQ>CcD0GMS375m5&aZe%#G^g4C}}*B%){u947|dc%Q7{F}Bfo?q4Ln z+g5EhB)fGao4IH*Cahs;Yf#!zkNX(k#^bdUWZs3>A$rQ~l19o8(R%JgB;Qf~ujex} z-;I`IzgZ#8zEWFyO7!WrdME>*`+JueOCPe#4nfWWO<*IrSCVJD=GvafdMX&@vyaij zXIs(mKOX2$eHWcXnyJ-BXlstz!wXXHj*HizYnGacJUg8v*73AC8@F1$oCey{_d|KV zNI5q$65sO_?=nj(S~ZCJ{oTS$r?YQ%sgb>i#LLerac&FL z(R$WPazj1QJ?rWlP>{+o?BI7e zxt1@e?)5|HIGYq^7)S4938iRoyOH;N*3caXKk(gol5FbLo~%vg0DWhzXcSC#hmk}m z+|!(NI%~^1|M@g}sM+TnlFj|dc-b8VDZfvPhr;n1(%Nk_dsvxS?EMz?_94l(Z0UK( ztc3TR>K}%-+Nhf6x9HSa2cGF&_I)BrWWBm8tYkH)r~a0~VlqP~+v6d|v-$o%sLh-d z&)vg*INVHI`#{3;eC|_9w#D6b#-@AF)D;zL>FQM+eMl0MA@?(q%?{L8sP1B{ewUn* zhxB69EhfhoeflLHit|52I}rF3E3YS!pkM zGn?UPg&wbfk^4v|H-n$!@9Ud$l_cL4i$CIHkz13w>8+q>tygLJMDv;k@Wlv zc_o_VDiZHOUf=rsQ=is@r)dTM+KMD^Wa*C=d{WK}hGT;53oF?_pE3NMfFOWMjHpX8wEw-}?}LFW`snBJ-R&E=|+J3jLMvMDZg@Ri&ex zugEO61&Uf|aTHl4V!&FhCX--dVlUJ}ZcyfQ(~aaYmP8WUV+t#I$|uLNyyTgG-rQ^} zfAe4Te2d3d$!rWs&GYVUI{urM*P>%6i9E?)<|a;ZxF1lMQRl>5S-gI)>~whQVvdpM zB%Rg%%1>o@Z%>2Ed6>)en7FqYosuVF3r-Rv<325YX{^fUd(z|!YUi9s_Hr)r-1LH{ zhV!(%iR}L&uUjGha%Gd{>|Z#3fyDdb^*?w!lI^X>OHOHRBH0qW%7HLm12Uq`ueJ0m zWR!=kM9Iy0+Fz92ZeI5&X>C@gI@xX3Lr<-LtY#J5k06(uX>b8+IEBR9dY}EA!~vQ> zXVJ?w^!p{Mmf|aS&gSXoRQ=uv_sP+4yV=fe(mc}tiX<|bWPXIq=8KrtNj` z+#Ap4q0HPk!ZqBd1g_9%LdCn)bN7sSq=#K9`v_Ds^ad1`hxsGw{kJWPO~ zd1TcFN|GDrOyki1U?))z`;qmZd|NquNwXc=qI1OiI)H2r#MTQ?(;K^10!%!Oq;p4nvt!f_wSL-n{<0CDdt4Md$4r~ z+4iHW>g?cP63#PVZt6BMdghi_HBxSihHh->QT0#vX;u`oCO1!;;~;+(1dWBNtP>xO zx9rd6%enC)VU2 z%IU_VNVT_ei7b^Y2PJ4Ru_taq|J5iPNdtSrQ4PAtF6rsoN(B0s{r2WF&W6hU^u0vc z59lCw&=V6q>zauon7bdzYBZsMuf*o<&EA^mu?Ni_LN?iJU+?=o-M!8`%!bs&ILLbU z5L(Qx%tqh0g2|#M%1?b?mqyRR)sqEmjxE^R-f;IH$Z1MfO|`iVN7;ecOt;I?p1XOY zNa0cPNhI+|??O}*<1Ox^^~8BwRG4WevPklc<(@`v3s0rjL=Z~MmpoBaCxcJQ*0U;PDaVi1pdf&PEs zEmkO73U}{o>ovGJm|U_y{&&I0BsW)X1oeRH{k_il$$ETyZu0)l@}v3WhRvC$+>U)o zuem33B)sN!Wu9;2NfxrZmd3VuIJ{g>%e9mfI@Q$ZL*gye*+YrQFHb!+aB_=&lW#1$ z961?wnfCtHdN^B4V&&1?pDYp^bhRD^86nQ)g(s3lZcyJrmbdYMQ%N>=BYLr=a`Za{ zWlh=58njfvO$lwyM?(Xn%PjsetI^+}c_rC3WW7J*I~hVNvZ>s-&1}Adv7;5qB~QWA z^qm{Yi3(e*Fe8n9G=<+g^n0^@$C+oHjl0C7YEHK0l-=Te{7WYY%Tw4&_^GRhIylH0 zLGEtkblMy0l;Ih-K;OH1dy3>=W1q>xu?}_F=P769b~qogu`rW3jnuMBle2fn;H(6S zXT#`Fp16V8Lt9qeo;-6Kc0Bu@XS`jC!tFE~PFB)uR;PDCNn<=dukY8P;Co(fAdMwL z?#(2Ax%WTwshjBR0^?zMPqiEMIu$~ng{khi%BkpY@jMIfPwDLwzsY)Zuu_Mymv^<* zlSjJ>ibratG3k#tgUL;uTj+3x64l}U0n!<%?3t*m!a7#sG&|zQlk0UjiuSK8%#^F4 zJCS($kzP(%4#B}Gg&9Yp2>lNqA41YRuRFm{PA-l2yf6~-62&mkerf7@xaY+ zIuq*Vs5J^*xhFM(=Q~y}i7MV3F3Wmw`>UZQF$BaLHV(c; ziaBGjf-ODBk8Pm8!6ZI_y*~s|V+!<4qnBk`+R4t}F39#>)Fux9fiV6ED*iBDzD%~$ zm7K}KVsDGlnixeVKy%_UBy#NvINJx-ccVRH)&ukN{nK#+! zlc9N(a=-cJO0$tIFjSwVui-Nj-*qDFPD6K9+M46_V|@M>Y9=?%@p|e>x{u)XQ=i-h z@4NWszqH;BB65Sfof7SgUi;{wE4~J@_6d3#N1u7B8RA{y<0Ze{^`x7)0_#!#G-))V zpS`r$iGTSEt!sJ7!4OxKoO<|VqE#o+-4}L#s7NV36NE(R8vJ$@ZKRu5Yl0 zJ5iLVu+PA3OY*O#w;H7Py3c+k_f>E*0FttD_ZLoP;cX1+K1ai&`tGamSA3I5IXmI< zURIO)nz`fI*z4r-$$g@4(f<$+A6pMM`RrW$w1(rHLT$h*pQF=6a9vT*bRxI?X@uIK zy+LZ`EYM?SD6Ne_@6%GEU~e{R^(L)}>~aO(j-|tiK7Sm(;=!(^^Owzw8}L#&6;g*! zeh;q;(Y75;Q&Dh+KGu+X;?>07AHwCKMu_Ac&htm2GghS8-00}Wi{=#f7*=(qvbXu| zK!=y3@C>z^=_{GF6EP(@kg12<;P5HyjDe)a@Scd+$M~)!DId%8j@3p^vOfj~qhaM4dLGNlj#c|0 zUbYoIRy7XilvYb4L|;7J!44|ny8~H$L}G2&O-7R3v&=c434GbUUbUf@MDXZM4>`TO z5cPQ~IRpA~0w>R%Jz?NOR42Pd))ftH~UM8%blCRIeD|TFiK?xniI~+BlbFU{6$`86u7yB=k1E#*-BT@ zLUuWF^EKIXf`y#u-N0|1!H;z!fg^EVT93(Vn3eA%SxfpK1c~39H}%nHVuUn+!AWk@=tK$us|fKEIB)cm~ZK^pd>WvFqF_8KB2M(3D-8qG$a< zKFRapSeTnhK3SPBkIts_nwz`Dj6WCQH%|lm@jl(mZE_oZqp}ZbBez_WgXAQQ?#MQ7z`BX{xd{kR|Iuz^Kl4_kiTGE9 zUYWJ}4mP#vncnVPo|X*G_h>7p^X-mBTg8qN>dA_=-PvHvi(dO^;V(5@jvdu`x%(T`J8#%)9C%Z z@kdth+~}7P<#qIA!G{lE3Hce-P|Rv*T< ztOIPtuG6(~D71n_eYLzJR+JPg>cXoLi+l?8F*v@2^>RMh_U$a)$t0d=IAE;!_;dlP+#QP7`dyIm5=0&UE z*GHtt2?J-4ByTp0=mqaEaynm4d3Mi==Zn~5I+R}`*#!kzdgJef@XBoTRJQz;&97BQ zdd~@B=d&2F%&6kKLjRW>fiu)ngXOa)BQbI+Z#oSMeZ;vDYJLnKxAXHIq|CaGwEiOu zI%#nY?>HNF$=6#_?YH51R@bfLyT^+HSsgQ26xptwwfOOfQT1xJ%<9jK`qO({r@i;} zcH>2vmMl3Q5?NREC;ZM3C37O*o?=nf=B#GX&&XH~yB^eLS5fFJ60Fx!StIYhqEjc* z+=LC^lWwSJHc^Sg)m;-tId%I%zxQYRx5)A=4YMmI`_B(nTlRP7X|@L4CL1q*M7Aqg zI`dAML9YYFd=YRFdkd+F%PwBlGv-ZWcv&Fru$yt$>$-F$*6V8$R_)^wvD&~CZ|Ch053A7eq z^ZsmK%6K;?D3ukPlW%A(%|D^R@%%NX$vzD6tl`TEc_-60dkN-aaS&x1;&i=yAnQfh~y`1k? z6+FS{$QC^uID6`@5GX(Mo{09q!#3|$1Wu%ko11oCF9Q!^&Uiu<6PhA zzMGWI6G<~|olKiqu>0A!4lJ-sT&oVL&1xNu_wncw(0UF&7m33CG5%?`C@Yq=#jLw9 zIM2V^^*Vi6=u@8j1)OtMO$$6BIYw+#)MphS+B)!D~UYhs0Sr31a zo@6ydH{kj=IFOYdXL~{@qGbBZV*DZNwvHt6PBM-07UM=bW*2qV%5TA>PjGyV)_ePW zJ4O`~FMqK7H2XX5Veb zpOvWZ9SW*(ElMti>=cdvP;IvnL5g;&M*U z%bBfjXs?vWlng#M@wofovRI5>;#+bpmNRn5oNo_Y8=$tE3KyT*z%j-vg>}GHm*`S{p`EyuO{a0 z!NRYQK6%sXm7LBN*;{Zixf;@C9a{yfOT~|M zP$=s;Gi%ZAh3x;>Jz3t`KsEcKvyb+Ee{&wvb!ym?euGGvwKAE_FVBxgk|EC>$KiiN zcuvs%?m55gq0!gC(LcP-{cRb8aKkBhEs=pUrIeV>`h?jM2@yYDrd0H%c+~?t*_HxEu1(wLk zjd@RhEIE?dKVF>GNtg1Nj5&Vs+YU91!_s!5O4bHd6CF!IDJ3V6_Yoypk~pV?uJCIc zC2yrk)=y;)BKxi~Gn-kIJ@g1=)Z75`_GO7uIFLD)C;XeIipD(V9xN-$KjuUCcXmEW zWFCo=J8}PWsAlcmc5y$u5pr7aE?4qomV8oU{qnV_c0Se*P%_V|$-Emc+lE6~2{u(s z9VTkD#`F*HAuHLRpmEM}%Pz%4hq^S5B@TviZRPJaa?0s$Is302nYXDmD;+vxL3VEB zEP=g5{S#?9MGv1{f|b}XZ>Y}k+bF%-O31v7(>aCY7Z!U$i~l26ajdUS{>-bqCGuwn zMD#j|T*I;LZYYcwfqtgV@1)7xz#3&&>CubAVInl%hWuSL>dt?Y>-b-?exR-^NS0kt zl|-Eu@Se>}Pg7zX-Lw05ik8ZYx|zp29?P>5GPwY$#XHexpfQg-i<3D&RdXXU=ehMmJWG7ax#?M%lJ%#;(&lqcr;k{jz zIs=CL!X+6g55%C1-ZEQK4?C~t9bbq;&*5EW9tIfU&T?N>7RgHMO=_y`j8haO{27sBauooY!Y3NH^nxr`(_QS?A)xP*|?R@@sg_XCmes+Rh5p zA6?5CcVn>cV=b;H(E!)(Byn@vZiVtHOvy==Ka1{JS(MxepJ7!S$YkH-XntRf-gU*C zpJ8~G`;H-5-oKB9%ARbWT^7HoGd;{d@Lexy z7eVkqe$xtC*`t&9{>ip;KDpbI@L93^Yi(R2zMVs!yhEOVO{3k}oYq;HcYsKj=in_Y zlT)&?_c(9W?@{+0*e6HJFR;uS$;rl#6G%4~_V0^@$=ZKA39|ZaqS10U4Co|omw`un zHOKGXbocRWdZ)II@oTa&Hm7T5&n~3PgX+n7x7$Rj?R>a1rewZ(iJCIAJV=|n#QjHH z%l@@w%h~MDuk7}n=sIO1Tag(TBBO!OW(R$o{3#Bx}G*^F^(UWUt}%9~zY; z+gCBNWk+vWwo4A7ajs_+pA1PuY4(6W4X|&rnyiN@oUY1)muShdgU%JwzYvOi_9k8!U%MGNm?9!>f3DFP(D2NzS{tNb7?z zXSTZTz@B&U=w9(`Bxe1i?wo~u7b!Ce+yck!i2cT=6JV8Goq1yEAo{O`%IhqaoW3_` z|28$Yg-iAxUk~-C+|gW*Fo``ole2`{bMD%w?n*ZP<5}_sBZ2GGJ04qR;e82Xt_`f4 z`G)N1PmY9~O>~>jm-)2ayI)Uhg)U=|4?R%>t%WV<)nNHA38y&Kl$pC zcDRzmdDu=~+fl2_M6Um|_B-n*Q+P9&<=x>4{>{4HbD;1VwC0gE>$|d6dmUaRDkK;G zB(hd2#H6eT&dfk&qOQf{K6HD4WJ}!Fla#GUcr{CB)pE|>_(iYvgWsxPPqNoffmba& z$_df0@s=$j+!ORTQrSt^+5tzF@R7gxP)pyAQ0HNETS4lg?m1HQ%?U>Ph*~H6<#Sp* zgQquXsVfiJ&Z4XM@FJSDgwO4+!wm*8uh)8ubaqG&RtKqE6+4n(0&h^e4qzQ zR>b-4NGPZMx9`!q5J7{rhAL^=ohZTyn#k|X%? z2$Az>s1MhRwk6Slv?xN3tbyOep0(87%D1<1t)JSG)3K45)ftA3wY&t+e$jGP9^3=T zoY#_P*~}8p#PI{wFq8Hhu(UPQDr$2g&pBVaW7)YlE8Zv4y{F^`zb13)u)_HCK)()Q zljITV2KSsvI#;|bTJZGqShO>{C2LsnrRF{L9C0A0)^3N?GpvwYtwsE@ng`v9(-|SI zW%0cK9>%_FjK`88B(wDU;rSw1Z^7lv{@utTCG`u{d1`V)X4gP6)nz=mm0dQAo5^5a z4>wO^^*2ba2hri$6KX=@~I*GGuzQ%m!?pB+fhv(@9Y6!)6tI zI+tBbv2fNPj&N;=Kl_L<6U2|4l6!Sy5i?I>NBd5cY+dG_m?nt2q`)*0roi%sSyS-93_p?)Uj!7R<*WbxzB*-81@+liM(&h zY0&dX*v!2nu=!!Jr&@u3iF3&kwRxj)IO8Rvh z{8rN~yUCNGbv#^)Vt*T>hP+QmhOxZccm`UB!g2<08iy~pKt9jKb41z$)YDn1J~X|S zg&OILvgbTI_0pr)#hsb1# zqcn?D&~IeT#;fkQ4Rc@j{czYV&{}pKW*63<>^V})8B0B8d|OW0RU{o{Ot%fA&(QO2 zP=2R=tC6wC6(mXy)IPp|YaWYJPgz5w8sLTS>)YuDGrnx(-EV7sK zW3e{pPkcrC6{7EYOze((J20vTDPP6a&qS&*bV{DByeZAwh*QXyHC&g9YQZ?!4L?zH zPXEg3rrlYnD|u&;_b?Kjt+uf+->Kj0CR#VuhyCDFR-89wsV+3`!V(MovbQLcldXEe zJa4RDq0M~O`WQ;JM7I)p^dWlcYTC~`@rzh0t2@5H`b9XBT!laQdO)?eiF%ztX}}N;eY|YCv!;USyu>K>bY#ecX5I zO+K%_YPor-gl-QQzj_Bm$F zbMRNP82#!B@}7uUCDr~QjWd^)y@~IN;yL%eCJ$`JKPK_6oM4umdv`#gwf3KIPxdEe z=S7~#hpKNi=C4-gcy@b=)R~XW$uUbkEp{PSX7ICqC$or`)1U{hXaUpgpeU){=f$l0 zxLT1`P06~06f5CVmOp$2(|$0|=~jJ7aWPv~_UmePIkK<{>t!;&L#8@(`_GlyyeHW@ z=J4obp*;uw{mFEPsF3|2`?F#-ev#cJv_gaUgKX#wwk;b!uTd$ zazFlM4cX%mXiDpk{QHea_BWsEYMj;+T0f8_b7Q~2;RMoLie2|%UKcWdWM1m9LT@rn zY)n4YoJF7Wh_X6oyEv9!WC^diOlvtq<9K*1hsDd-+E}~kG3U_ZA}t>ViDU-7Ms)d3 z9Gl3}l^~RUuM4E_V3U{l>VfVoir0%^QPRlf8d0yiGFh#ZxrkHoV4-r^&)E(3`>SP* z7`Yb~R#H#$gIDF73$>Cpka=b;hU0f&XlsZb1j#vMtpvNnwAHGBQBj}%727T+&v@3( zZke2gIbOMaM6Bi_S~87Zh+n*$R9_udh#CI(|eGS8s z{CC^E6|D>;-4bN4|R${;GS!hI?S@aplW3vi+KfhFhb~m!V2>pzu z-*Io=EoU{(1fRMXTVDdrBiyx&OxOFZj0mv`=UVdbHjud*PIu#4a;4-1f|oF9J$$!A zXavjW{Di|vkXh&TRcO&o3&*Qhl3b8*U zgMGx#B+Uuj=VElS5pH9LBWRbiw;PbAB`#(?Sr^`L6KLYhbkb%{f_<8FJux?-5UJQ#z z*=QfvP9ei`7&I3#55%OCXnmR5qF-j{zl6+xB+aStC$RT0E!@rj+lhCzv~fRgepwuv z$|FxE?Rn%c54SHNk-hLa$D^Z2FrH_uF8KO`Vq^AmXC2g`n3=hbe)#_n?(S4i7v-LX z{sr{7l=XAI!Fi(L8N4NXF1ymOizt>8ey)RFa){q+6kNprnK#T^^QU~v={^Twd@;Q0 z$*z6Wu+8VLq_L-71I9==1{4~YkPX3bp6W^HAEOrELA;dnAfOn_J3@?@7v)=^!jr+!mJ zxQV32$yJ(_G6KzNyJcF>x|?LbI^1vnuwQbJX3bVk#@nCOvJYY^X4J)*%6`fF?~-cE z`p)WFKZOp-3S5oFey77E*3G%MIq@y0O(etgV7AFVzam;MDSAB1558rm0knGC-8o^f zsQ9v{&sm>%zPqxA`&#m3^uL4zQ}oUK@Hso|^48%_97#@u@U}68+NtGRy6(%i$=bTh z|NlTBGY7AzC+GZ}r%XeBoH23{Z44oCF*UaIsW_It3X{dIwJON8fP5!wG0)pM=i_C_ z{7myUaPLLy_|bKRUSJKK5uqi&0Irc_sp9Q~3WXkjZI@gxNGCO19_0?TzA|8won<`;McJfxnxawj|W>`0^y}&)0^;#$XOjNcz4YD@06qY34X(G!7JS6+l^6upn(JVWXlDTRG9)0EW z1m!cQ_H$vRP_$s(BGBHGN7Q4-tj)>knmOO^5>aC;S+jpOd%XwJ~5-4oS9R^_u6dy2W;-9bDpa5%+?-X8;gTEKmG#Ny^n1&Ygda0=Tw$0%HP9- zRu`V8FI4(hcE~xAIdwj#ke?2H-zC2v-Olh ze^H|l)1HH7Nij9&jOV<&rjW^wvL|3zRPCoh^c;5W-B-u%ag` z%_RFR^g3Ff*@tDa$Kyd*r`P?2Go?Vt#75amm>t2^eKdJk262GGC4=j7V z@z_gjoRhV?tNk@n%z)rh|8~|F)MwSabz4c&WSD4*Z9@ui=qz?W1gEkq=nr=KlE*&8 z6W>#2i(j)!^=n+M&oVpIoBnegZ~0oyIRpDMarp=mWz|q;cMhl7$Lh{b?o0Umx5j$Y ze9umY;h6EO`_5HQ@&Vn#qnfbT+oZWy=}oY1iY@inW+U`>uOFJNh7@rWC^ zfJl4Z_b$726+5y=;dm`2&-qN)58-uR^4zQ<@2mIETavl{%^4aw1NI=CcwebXWKZn= zMGdRyvkeCl?GMn~eym5|O3rKOI0$A-FgvRPk_+lHR@&fd12%4fPyIdFURQYj?x{aW z@6lTSdJaZZ!hkYtQm;_=5j4)Oo~86%qScxH&3I`VG(T4Ud6sw*ht4BMQ<9JL9G!DR z-^HfE{EqK5;Joa?Y9-Xzm1onNESqO!^WypN! z2^g6@&)?w3Gpw3iY}-Vj+sQhEXJ>EgNpP(qN+d_(X4uux(s0;6tG9by>0}4_%CE^$ z`~Vcnig2Hk<2Kit!?XmP-Y5GY8jn+IH~(8}?G+rmfp+6)Kg+jgSvMyH{=r(mvw7B3 zX64uk+S%LPpJQI)#;4**)@HxY#z)}9Vp_a|DW_s;SC}TN-V)!-(&8gAY!j{XmbNAg zJGpl!c2#A!9<;bZ%bDk1M~i2adXk*ki<@=LIel~`KTMXEQ9R-}<#Sfz4lgVgnbC5RdR?r%P<;3v|E|Hi+gL0k z?}c>PtQXs&ATu8GEs<+a!X zy7kz)F7|vZKEH{jnN9nItcRC^I;X@#$=qo2I2?;>!$aW(Vu>wsv&b$Tp?% zva#zuS$-Y_YLGd3fitc+6$7i#sD(Z#@5W!`cda0iw**6tn|nk1agye&wPNDgaO3K} z+8N8bx03T_y~qn{x=%Y3A#wvIKBtZjr1*{$Yqc>1*OME#I^90=Z|2(<@`CJ9JX?gx zISTL6=sR(y2w&S1ZeP*-aS~Ng(`m*N*W>y-TF!gjGNeofllm}drDWEIWb{-5dos5) z%Lx2OvGO*x*40WktSQ5`*TSF~i6$#|n9l>~d$Rbul~!lNrXG0?bbTr=C-?jZ5dVlw z4~bjJ%)Adv^bx^MQDbF3mgnB=de3P18}?p`(@l6^)|nN>uQ##sB=w)}p5g5KBGx45 z@kHofDIVp##ScWG5xCP#RPU>m!KByCf8$5?Qg4K3Z9UCo-cpl?{YZmq>Z?VD ztcU)Uj73G>Q9O69>xbY@Lp(ak=d9-XfHk+lqB9@60@7KLncVvm$X1hglrnm%Cx*4f z(Tta?>+kn~V@`)FMaHa?yjy+!Xf~C-&m>uLLY?C7MDcTFZ-n!B<|$jIkP2u$k&UN*>iRk39g4# zRw!jexg4I~7?GXKF1Lz!$*h`nbv?B4lxR7OJx_<-7mc%TWFPe_6l^Iu3R6r@8f$i^6jK~bsE)jck&QU#@^nzKA7H%{gVCe z$uzM}BzzhF(lcF2o9pQE4twQf(n%~g7$>s|?>F~diye9EP(+FCr2Q5`H}bpeu*?eC z%wmkwb{#Qvq;b>dyr==YW-jd?|JN#@yAG$aORo|w_GN)zSth%g+WY)BtupG%33Yk4 zNlu3S;8O&;!+fhIW`E}ck6u!^ zXBp`y!+bN1U(<5-xokIr`V~7W@~X5j-F?N#nAO~+{5nYt>O-qGWcZSmGMhG?G+$~h znVK_3Ka*z3u9n=-iy@n5*L7-6KY1qZBum36%v-AE?C0z!ntxs3bF!TDBF$s!+I>z+ zCvCSCt6LX*stx8>_4y`I_Ir1D+d

EZIh>UpV_NiF6kXN7X-#n|Yf^B6sUkRIJ7VquutO?n@Ri?s$RUi^4T5r zE8R2hY(vw`(`Pl(aqi5VX3ptu@B1G5qE8AQR$HX}1@il|{;B+G7+WP5NiUe)!>iZ& zlt`Q1m>Cxr6K}5)oAb1DpxXZu$9J;KCq8HH{u1MvF=YIOEIIusSt(}w^dyF4^x7G| zzq#5E!pXYuJh>-B{w9&)cq8+(_3+2jFl**EyHZDQdzPMbwz#$cHYY*-V*Svog?o}^ zzathT2k2|+&YQ^Nuy?-ed#k^z7IHf0AZ_LhuDq>CwuE=Ix5eLv7z?i#meHBE`#gNsIzC`R? z&118&YBB^*=UZ7tl#|Tn8jl^sCP!(fhkNqOG8*Dpleo6f+NrFcy@$>85GVRv9fO}B zeX^qN;kTuO^|Y6MeE0;2*6Y$FAM=xksw+9Q97e*z|ip_;oO*{fG%cID5h8;`{H$54-mV zv{74ji@)ujPsELj-ElMdr}EfK*r}CX@^NE?jV!txB7#W&iOizh-Rm3G5$+SXb7+ z!TsIE&L_#7j1;%Ad90lMn17Kj`}EcCJy_3%v! zn06%LBBQ5MS?)G&I32k`eYra=hqKP}Z zvr0D--73~@W&M(Vt7i;RS^LY$xtOIJ6p;cQ}*a837p?*3q%_`70Vfm-h zpBE&|$)d@?_^2Me19`GDZjB!A0cBqBOHsXc_H{l=qT`|YD8IZ}8|OnRGeGG%e-Iy@ z#}xGWgIRp!XSgIb93=)^%Xa@l{!o}cjkn1(woo)EMf;q8@*Ha{!j1iD zH5%Vu!HR3$v7f&q_*$(30`F)cdBblIdy|9oVZA|4dbtjd+u}(DT4dMU#ZdWA|CxLr zSrhq^`11{aIsl$;L-}%i%ej>~%YBm??^MTmBsoqk8(4HTU#Y-8r;ACoSg1dXH}f|m zx#U914xwG_dJT`cUewDulNa-{s(96zj#=F{Rk`LO&_!xn<=@O?9!{T%P|q8#`hFQk z_vA3BY|NRAM5B1iSS-E>>P7Wj*XVP5ka(K^>#@{(;>oSrm{^c&Gu+oheiZF4(8}Jd z+MA5k$@rwGa|8+3^RORz;n}WC;&WFP`kd1A%o@Lom3l;5@z-nVo%z2pVrxyZ|D%oH z**<5u9t7)Adc5QeY(nZA#h~##asrK}LgH0?DFuru*!?<*I)SK$K<@qZ~D?^b7aGG@0yk3w(uka8spo}c;Fv-oJ939E^s&x>j8 z*)!P&>KZdV2$$77=@eI!|6&F-p7r+xaqbHi+)@}}OvLtq+FQ?Bd8TaTj;t-uD&4%X z{8t@Cl+OCS!OFd_-`o?AKj9Pmsc9G6zQA6i=vv+V<;i;=*{@Ra1X{hNoz}3KK-zNH zl6kQan3o+Rhr9PwmKZ|%qaj^YWRBl&plRlI>O&wW3@u~tgVkP%EXkky93-E@S23Mh03hK;o7*6F6;erD9kGG(5&lB4}P?7r)VoPL)|dt2nZ}E zV+q!%#qW1NWv#%OsW`od{wWzD>X7qT{mCcpx}Kd!VQxK=XN_B)qFac$yLjM}*xm*b zlcAd|Vvi{M3_M>pzRBEQ_HJIt>uZYP)!cVCyWK#JJlSTiLH0%Fbj`fE&6}WKS@3!? zmSe|dn7LMshp^qpa3p=GixhzlwS@K$ImG6Hmm)z^KlE!uj4B@mn$ok2Nvv| z6L-gBQZ;|G7UBVIKFv##w|I^yyg#c~a^J`5+i7h0DoZABUL8-*W$|zw#_!bLv83%t z@?@iIK$-{Dm>KJ(Fs-9EJlS3G&~x}!QP${!=5}MZ`uxt0i?>_*5r*%=E@?lErL(gVRi~Ea^~dsES6Il%i~B18ocdW`q&0~ z$Zy4hY0%G(wPg9r&cGWnGG`8D_wQQKttE7_=dGx_$Fg-!Gyj3?SzEe_K6#7wA6ar< zNeNhdt)@I3Cs*b$WzRK=&C|mJST&Ir#nqf=h*5=+_u|6yU2>vaq2-ec-gFar^4yo_ z%Ew9nhEHG9V+7_6Q1b%VwnRHb0j zPu|Z+b1<|=VNf#G{7dq!qD4ct$ZnQI>Bca=outL}1z8sBAwa(SGI4*-Y<#1o#>mj;W;5`BJWNf?r+q80qazReVzxO6P?N^*B8f5aV@8M zCYNu#Y9rg{3GFfe-jCtW(l7JW$)}R@X_8;~O)dAv?(hAw62J4-C+jk^EAvfATr0j@ zmiw-9iMtigQ z+jDB0q`jH=d@<&}k6$<8?}EiLkFbQwt-;?E{p{0ohZm~@Xe7xRWL%J$F~ z=1p48WXgM~WLDo_iM{YB)yk*a)!$m{TiA6jTV!p*2k;x>?&Q#Z9D*BZyTG;1 zq*+7zyhY0z_hiF-#eEye`xcC@Ch1K7e~%AYwV8bjuc-S+Y{?Fn+p(d!5`9T>3IEGp zhVKjVR@CFp=NZW^(v5V6c zO0Bz$m0rZJ%rmxU-(->dl%x&GyM!N3C&eCO#cVj0H;&9&hn$<9Q&^Kr=sF%Zg`PiR z(l$N75>^|}I`8q|oYT{kwMVg1JDkXQrO6kW6V;OyxG5RmQSyA&=my<7EU?We=|McY z*Y}(bP@c`dcJ&p#+26E#nvG`DJpFXuwFjGW{QN?`kbPad*FEGc;7_%b(}7mPHRlIT zt$171~<)bSNQY1s~h>+9)*!v@{;F`W!48| z4OnUAN@^!h+c_Wc5w@$MUu&#yui@9C+Mdn|HAJ9U*mR9+li9MLxY-9kFVN;37G5Y0 zXN1@adcEm!R-qTm+xTg`CU1&r>KQwUh#x>CIb)9#`6sd4eXgvb_rqk#6X5$I$fu&* z%lf3}*|vzaD~7Z@`MRYF=Q#MGM1jf=bE!^cXDNyP9r?aYQO9y=q|>s5mPr{ z$O>#o2AxlhoDSgQiN3$H`#R`kZu??ha;w@1B@m-~F7h?XqPc>?H1i!Ly1D#m@_ z&sR85*0;>?9>B}8_BW&XTiyMDS_iP+n^^yrGC9|0KAU_3=e7KC0$lReVhaqC1+5zi zvhVyb{XrKf4Wjp+zUTCm8S1R#%Ecl2 zr)bv`ZugUQJI!(u^a`=2u3wT9?s2R>7IGh}V{dm)h19v?#`QG$i&PV}*@{(j-px+s zlSw{#B>L;6a@ueUaxHVu4cfnvWWN+FJq7Pdve4t=-C#EPgMTi9`e6Ex;#ni1dl-%T zvV3-4e+c=mbT8@t%$w#t@LuBE5OMAk{&^(tID;MPV8LCqKU%wIktFX|US#d1g{QiC zJnk4WWXzmw4&(W36%r@U#Bhv0*}Zk#l~b-hfN(=Bn-Bf0+f6i1zL*JOQCru4)N1DK zi$F2Cm@@m;h8%6hunDZ!iWKeLd4l#btCu;uoL@Q^qn=RLAEMBDEhJw?TRcqOjuxIS z>v`@?cjm(9E^ngbA03-*5T$ z175UrCE2K}(4{k8{waPOK+-BK_a!7VACZ&GlA&*x7K)H1dzRba_6oS=e5}##$oj*) ziO7!U?8?mU>M}l;)~{tHcuoK-s>Sr%1GU`%6SKB`Ebg^bM{>C>X4#`flp)IR&fWSI z3CF7SHj>N``ybX`&P>Se&g5chS!gNitREm*^7ky!t9X zXsK`AJ!f_rd*6s_6SegtuiNgv@>&_eLak|ESsbh@68)j4cmgvsJD;40m%wO*8navb zYV7}x50oO+?zd7`sAKn-W+2S+Rx0Z{cjwT#S^t@Ruvwdy6>aTFlf012A=Oyf%oio= zV;BEs)k01JNS@T36FdoXMvy7F$TN#v7jqBy%c<(h9^O}J(U(l`;d3!4=1DqzZDw_w z>VZz8VKY76YBqZwn#sF*iB@v{@}WMJRom^XQITYWVE78}*-6rEH0@MCY%sj%K&AsP z>T^@Ct($ky~rW%sRhHCZv<6|4GT+PiF)JdD3#!e;GfrRksQPQI(U>N?V| z*;kaTh{*_%ytKRgp0iEciz(Cj}aB_>km5pzftE~+`K8Me>6c-coPlVA18XSyQD|yRG?JpO} z$FfUSuFa(N1=_AnrfKwlk48`6=9{AG7AT)1a*R}OFIu0-3LV||mDte)1F~{)fm*&L zQ8DE!sQ(G_w-N=4y5m@~SNAkmNeu3;56XVmj3Dbnq6CS4E3|SO%g+?CdMkOAJBlcs z-MXv!Pb>0N;djr|{6#*O(}C$BG#+H< zoQaYA(U;P&4}aXx{~00e;scq1YtPOFMMth&LPEA&w$nw9l%{-cXr{yYTp*I3#O>!R&@U&%*cwe5%gK4Y!%)+5-xe`(TIoL zjS*YOxX9B~1^SKV!;8r~!#$HQ@p7MEhU?LIG>VQ(p}CyJ9`xHDT6|Uca+r{{OUba& zRkv%JPXd^`&pGg+qgb?^nO%tE8bL#CKFx1o__B`J!gM^ti)I~qP8O_0pT&CH2h~|p^cm-l$s$e@w$I5nrN~eL?m7Ld z7EQvi@k$rx*N5U!f81IQw{ziq5M(yEGf$m&^NVDsslkqU%Tku^3&|yW2KIi)SC>NUSoW(##!}cbgzm5VeT?r}_q0F8zlQ~R>Ke|rhr9N+ zQui9mRZ?dW@@2>3-|UO~->?eDEL^&3@Bvw7Uxm`;vX9 zn6itMRsC`*#P%ooG(Mg^r>DU)Z?~Uzts8B=^kq7Qy&YH|EMSF28qZ^mA_x98vgXc z>hQfvp}s*PS@PFqh32yb&K)7HW;OKyQXc2ueZ{u-AUjpx+CtlT3;!{XXb$_aESgi> zUNzQg3#0SdmmqNq}>$bs#4gSnY+$~yWZp_6~&8H z>~sqa{$bUYq-*3;ZK%Hh>x@T^b5C|3pTK5sVf(d^ug^klVA+m#*;C(_X7?zw4wl*f zc!GBGcI8nr9>XhFl4%u+3M!(8@mku8EgFb7e~TX3Z`nfGT|RAQpL3MV zS#vq?r5C4h!-dSb~h|+qi@ZbFZ6&jJ-$g$@`O&oLAvS z=3$%Kn#(HM7&;O8w6)pEIMMGwdp3aL(Phi;ra;LgwGc=o6Z= z=zBh&Po`Uf8SR+XJfZ#rL0|=ty~*-X1{de zRNf@#z4(3jvCMCIvMqv(wZxTUl&lB!i$%+O{reE!)KmTv5)Ytt6~84f+(^8uDT-yi z_9$&__uOzTG?$YsX9(u}<77;E6AD{JpktK(pZa@|C^OohLFg>HbY-`>`oxV!b~Q-9 zAFdrolC1MMUVE4Gje|*E7qYiNC1+R7~#{qevuRh_?xyr;^A50-4H6-{req+vmU&XG2>E9-9*ETn6ek? zcKTLT?|(iIqFGjcAMDCvEmsf~|H9*%YW zgP#i#?8w4Mr1H%Lu(WH!W5;Ovhi(vqy`!CrlJ_@XPlW;k~zQmG+M3J`{zW}31UDoNR)uWtx&xS zj!#2t7k|ne-E49!SL0YMeCF=Q>9o?_2Z?~a$dTEatiP$tYRN`&p+95v#4BLm(8%-w zR(h1Bm#T4!2$gKrM?voq98dPlg=)z9q3DkjbD;<2~A!iUoZdOtd8V2 zzD$Xn_PCdtw$t)7n)lG!CT!Tu2IED}$n@m@dGx1ZMs|_*Wu>3_(#nE%>4!Us2+x!2 z3^>(->thg}$p#CJs*YxjHR?DLBl2XmoIUQ~i~U^Pi7^+D{HIO zezIOI!lai(s{YFTUanJzOYP0O3%F4VO@GkF7#)}4b(rXCTHX~O{Eet2uWqfcZKTBq`YiX2`&13GU z0-v00@vC2ApIHTL&t=w$i($A1YQxo5S)@LY9g4%EJA7tgNqtgYVa(JSmmhL< z0d_A_{!b`0gYRYHL1N7eQFaPz=k$&1;af_4%6g{aFXP2%tWGYPYmHoc({ve| z9YgoBcvJzh$)x)kG_qefPvDQ?cq{(j5C_|<=|y(z&q6uxEFKo&uOURX2Seg8#F)D9|?&MY7l)hek zh_p4KTLU8w<;PV;#r{zK4Nv;B?4vY_cTd6Hr}@q>mihq`bGrQ!{m(w zz8F_-=NZSU??qP6+p-QKSr1Yt5A6{&C`z_Ov0=1ZMZUA$alJ^@NgY*K;syvLr)9G1 z9mv|pK(?Bm;!Bomsz-knBD?7H3Y`bh<#z34M@1EFjN(_xN&cH!zF>#NT3jf)R8T(c zoPZ5m;k1neYhBHJ@5BD?&*E99a+V%sDyvjv^Vig{RZX+#d^xWlgQ3r}&4tE`U3`B+ zylP9vOX)MSV8gpug=ERj)z8U%5s5lrQFcq;UhtI3ba_Cz zWn%idWGV^gV_7(>?UD~AYY-~%s*QBZD%A6}dothLrL{7U$?o)_ICm9nvz|XYS97BC z#s2-*i)5}P z8R_S1D>;rg^7?C7B4f-E&|8cNFJkR6yzf>1xKm9zGqNNOH0IsC^FD>2E_3SVtu*d0nq@^%_Q)Pc z#;^HwFH%$@*D!2;75kFCZ4&)o#k7{J_MT6F^Qt;V3ioTR9c$!#nb|b^T-{^%Px4>w z#Ip?|OfQ_ASJ32iJW95~dgSTt@BQ%F34@IyKr(C$fy`O7%UXuF)Rf(NAFHnw>y0M! zvGi_3ww%3DNzG4S#JjA%8Y+uLmqW>N2bMnL{!iIuDa7J$nf3pPbT9C>e~gflSzsJb zpZS0M&PfV+50D+RQ~1>`_IjAl?!fu|`A~75brGbW#GP|#^Qvgnk*_6#)l6+B(|0oa zoF|%BpwSHNzsKt1+_ksw$?KeV*=Lb18U7A&{V~|wg!x&&-yHt!^vATf0BD=FT#jl5u@h|q8AYxtz(aNr8SN;3!l$BHiSRcQ?Fi23`48 zAeB>evpVh+yvdrNylpJ4rk}9qDfJ(~Ru9l1xwr08!#jR^hNh>%DjBh^@F#Qc#jrMK z?u=xs(OS%Td8K^LI-9IwnZ#11@jCBPbCT%wwBCTx3|(IZz*#6-Wk04JJB#Q_bq5OL%eJc zi=2&_z4p=JS68!ZIdf5Yb8|3MlZ!k%Y4Y}AmL9N?vgNg$3|()s#TDXBPBP8Tm8^k1 zTktb!G@}A zA1%(K#hGx+$(6~ok~3VAX)f6_mKLPXo0GiND~ACmv1N0ma=vFDk|i_pNE*y{eW1GY ze&ae(d9dHI3TQN0ekWH$(%#R4$El+(#w};1q6Mzx-O8!ttmwBRX_8$+*#%OB70bA? zgzsd>!X&z8wZNWwmp8DIY(Lq)!v zxBolUn0FULVRpDSKg6W(m8dO=00v>cMjrd@Ewdv*c>U_cuW`r_>ID!w#_{r|7)NKQdEx4-ZZs zFvs|`2w4)vn;AE**Isr>{>84z1N=R&%I?#w!booM{oqj)~Jm#+N=E~tyIIY&mg*yZ90f2{r$~q>5j%x88Myh+fQ&gALh&H*xda?X`uu(j%LPL{UKdS^vV-(-qk;>%|}`0dF>vgo}A%%HLKlE z(wz`0A(DL!m#o>C29*|kbvk|TVfjUDSeccwZ}~P@KCb;982Ax<_rcJ8@hZK|^K6y1 zz*+TJnHGomllSg9F>)JO{%0g`ySs+F-b?GpY4IzFz9Ei|=GSX5q)6CzjwfmEmbFON4_WTyw zIZ<&D24|GpfVMA?{2R}+O~_bZ9QX>ZIVG(ZU%HT`S72pN_qD*mJpKMezMr8mSFdyt zypka$nd7R`paUK6*JcI$%}GoZ@TWf+Zxk)6X=5`aKJfb;TKa>OIb-f)`h6&NjV<&6 zE5+Tn*dixDuOw3wHrP?<8E>Y+uSNqy^h|S&yRxt3|7-WTyAD=M&VFcL7~R*W_1 z`x~dIFEh>)l)JNFuamS{o)_$e4cQrXC0!DCFJ_Ai@hbT@Mnm;`*3XPt-n3;_Xck5` z$CLXprlKCbGuxcNV=iW&1-vbpDEi^WI@;z9SEA!#N`2#Max0B0uyH2&im`UyPc0I_DYh9U63nU(U3xT8J0T=zAF^J_DH@{35F!4%34)=HJ=pb_oRTVy7``|57cp zMa0WlEVEH##NKVbH6#5j=nmqUPy2ovA0DW+?PSWHkYm-nzZRR~YF4r)|MO$&T~5Lt zIFu}-FVmwIoQ`J8?1V0k$&K-D8AN-lD|S@1iyE5^#@GJYQ~a0rIx7qH;qU7)*d#u z;_DUA>5s$J3laMiQfIfq%lP=Vh_$dV${9=YtYG`IFxJYR_+-<38eZGrJlyvI+Brpx zO4ic6o2sSu9!Acs)thr_$CD-}dAHS4W*C!YYZP|f#~0oe50m5Je5~jpf{xK^-^7RJ zXd}Jodpu?|-KME;4lT}5S7s2a@zjCX@DkhKqg+l>C<60jEId)I4cy;R46hHfRcil@ z*2zAe9RTBvKsNHIf%uyfWOH_T&VV{x>)-Ic!^Nnh;nPnY850&IY1WuEgl;lN4CF(3 zul+2Jw#J6V{Br;t4&^~liJVvIqcuO=Io&7tZGYpdj!-&597wjmoQIWhO;)}y5m(+W^z-i-&y3(JMT~0x;u+K3{WJDt zr{qFdH;2=YqGnET90{p&#FHZKE-$ja=Z(u4cMUQ=uMFRL(9GI|=PZ09PxjJ9oo3_32Yo+p!kvZ?h>Zz%*IMf&RFYuuY zj2*f_`875g4VUC5&YJb>^ep$l`C#|HMY{1?dl-Isr}3b6vKn}@-|nRQE$aOT%D2*M z8Wv=w#(Fi6z>A#AGpaB?{)!n1(zV98ca8Y?n{mP_I(;IFRdj8RHkZ4a z{mt3U{JIu0x4$1`bL!IL;!LtNXFldA%qg#5{lzHfOp=f1>v;>X#II9b-{{k+>^eXU zOP1N}bI8uz&A8Z-Ouymv!ML(bYX@n+49s2^Z>}rEmK7rDsVq`YM0gVxnSIX5AjcTL zmN$ATE(X*iYc0IFP+bj3`Yo=u^nc!om2veeKGaQXIhiFZ@n^W2=sTid$Ig7<6j=Pm zPE{bVoiF6MI-}wTF|jsnlDVxR%s z<+PvdS#9Z;_eF0PqE4okWm-i%LKYW@+!%LLNQ*c|KieSM8o=~G; ziLAdHgh5+Hy4L)$PN8p&ch}b6CFjX%^$oX=%vTydIZC0KKQ zG*X+Z#G3y6@;@A?C!$Pcu_bhQg7$gVyA>|ykmpfW9>I$5V%uQ_E0=}H{YEaw>hIoz zZ)u;d!|f~Gm2;BD^T~&mI8u9wm}SW^8}921_tl1P8MeKJrE(5*F|B=~?n+pYx%O6i z@$4I!2%YmuntWc_Gg6ccFRC?fFP_z}-zdT+_gm&K(j#tStEw~!_ix4e?FAgqcHfgC z%L~dS4}B&0X0=OBlstg#vJ)(Mrm{CZ&-F9Jh2*^44 z*`C>LKS-aD^(4QlHzVKd`gj*Ad9%AfFVYTE&mc!PQT=;fw=YTaR%;RTj-+8CPEJ*> zPv=8eqo-CgKFCZ+*7iO}uJp>uMiAS7M!um~`6x~Hg7-VH_(G2m@3|2^zp?3wkex>3 z1$65mGK`?Z@6fo8RkFV+BlfePQp1%4NS#qh_V1O_aw#5=OycW|{7!P$4N6=G-`fhF zpKM1tohmtxE@IK{FnFBS_w&A-o>T$nKVsD%#GpLQ#&Y}kya)RpLYCw?=*#jqh)tOj zUV$Btkl-8sy^I{$7m&G?n)K#>&8?@c3t(&Ef|Y*?W5rOG7-Oe1Z~ zG&UV7mZUdLE}CRj@5h^d(${1yb9We=F6v&-R`Yp4alaSkA^(sd`K&(I;uCtuL)BJU zTiIDtS$n%z39g3X%SL9I$=ib!l3ixF$gv#D4t7^^c^!`{n`mB7jG05jyj_~2w{9jn z{l=pE84L8mko1ZhajLgk9^nPu`Cnt@JF{zgnVbdJi!8tLk9l-FQ+t>5hENp zSfC?}GFP%x)N8?8>$1%Yu)P9j*ORfXyM7|q>-1|YMvuYwWZz%L#_x(Q?P2}Ce&`)> zdp_T-s+LPc{^V-T2z3nY5;u}ZeG{aLv*S#kZ(_}hd@l*9J4CVAbODJL;L+P!xDr~~ zF_`x+H!A-$rgfsncD{NkT&Ci7brJqp2JW^-vyghqiuK>S>;~Sxs_h?^khL$!@L#p>w%tI9?q7 zgARHA%NqXuA#*0X4y1QK$Ynk3OPDj1giRouGnl9I`_#G}i>hGcHA;5otN;4#8!es( zujBpN3D=w9?OpJh>DMd7qZH&&0HoU>VhK7yRt?KB)Knu|Ke;r`X6e$xMa3-3AV@#{h2*+tm% zv%8Yhqp#@k4&Qo|WCLOTlusX!y%jmD5u*!4$xyvNrMB;&oxWe-_)o+lqnPE5%OCtX>oalt!oh5J+58wb;2rzF>ucs!mpJsUx=Hv z)po4*7K^4=xHl^suYhJ-Qclq3vApUue>;*rdn%Xmuq)N{GJijgt?SSsPfK;^{uNu@ zUI{}6SMba<6-!ceXBo-2Q`hcKjYz9^IrnX{wLbL=BH3o;rx?T^OS-W+h zh?cq5#(rBShVInjQr=#JT$yX$ft_1eubMU=qR$s3*#`59SeBEUa~@F6F*ygqPv}kS z``!b#hd_7?P9&%DIWXK0&Zo24F&I(}s~WKAe7(nL@+|dv5Pu2&*`rqiqwZ4b0309S zo;gb14#zxACWFaz%+3gIPZs%<-giQKlgRS|hW7LOg&5xgcG+WcFO;wJ>2&w?R5sb1 z?qq|py!!z9KdSUvH8p}$sY2X8itp7R&0#oPO#59(o}IiulBu*`|7n%WAW)U?E{C*DBecH?5l69G?X*ocnIg(1Z+!MuWVcX7 zpPKQJ25j>pB+p^Zt~~H^49H%~(*9qB**R(W0W~j$M+3~uNl%qnp{`L{&Xmheppwwa z8u<@I%4L{SoAjkc^JIUVj2U5MU;1T+^%xku=}&9-iwCK#v4aj#iuGdmSPJbFN zo;?orv&nHVIjf3>s|w}vB$&LLInnqT5*MN0{$(XXNS)ftZtp!ZHS{)QpNM7^Bpawgdaz_mXlzV)p*p6)eHvhJv(Jv%wNWP<3|9A286WWc#os7>T<#=)DB~qrZ?ZCQMLHJ^0pNn8L zP>**S#IoX}nlgDey@VV|-G+PWA#(KZAY}Rm= zgv0-%=}y3Qs=oJun@UtPsBc7wl&BC%WJn1q85(Ia6e>d$ic%Vp3{eSDq@+SrN`uN+ zRGKxB(5R$Yb^Sm0dfP`3m+Y|C!|ff$evR3u~O4y@MZ#6@SvZtLVR$6nSR%taDP?vKdK+>R~3v zf27?_yd^bE|L2O2_`@iioy}rrYV`(87^B73qTqq-T?RJEaQ2{{e`LeW^xK^sJh{!2 zmve;?W&lJ=v3fPJxPzV+K_)pLHn{pN=e(?s?EAaI`1zOEm<%1~ncZySML%gb71>7f zmaNap`n&AMZN|51v)xD7@S|3iV8~ALj>5j+JzG7W3{5%@3Q;~LGQKFaYlX-OwMs~;E z#aQ(P=~F?VHJ&%Z`#m6+y1s+i@Cx>L9$(gr7##~^e{!N^m(yk9<^{A)ji2fy`UHzQ z(Y&YD|7PPsuh(+scVDv5yM@s#?;Pgpp(5rS$;R1((bkcyc0UaV zE)><4=;L?ok79$?t}N>g@^Ak)!sB+@?xXdW#mEL&@DRE76{G%vZgt%HpU8R&)OYBk zI_(m9pU05LV4JzjL;PDB{g5#)3^ey+8eF^6Om3L z=U8*Xom$O~n>=S?2>9qGoCHklH~!#2B0v+8EFcz8ENtMGu-9bR?D zM@IB}arSVrZ;>jQr&rfj4 zO23RnJMpGEF6@B$c0E3Z%cJ#>RZg?;Gxwb($ua2&%BX9IGt5{Cq!QW}Ay{81TSoRgy{|oWfuD;I|^L)EYtjgW{M=X01 zjq?Wl23OWIO1+9P-{SLEj=U?5E+b_s43~m`-k79j$^xj=qv3(siZb05Q#yp^ks1MS4pnxg(cBHqs;$LEFr z4ria_GdPyLvpYQXz2EUE^YZolYZQOZ{q-7}-OolxyLZ{jX34AE6(&z$ds)XvYwIFs zy`|k&+RS}wQF3+Vr&sgdV??nQ7&{j4UW88aNX&LkYFT$8>vC6i_w7t}OrDRcVEl#A zAlWo7p#2`aZlO3gQ>)LgS>D7hfyGlWTT1UwM35mkScGn=^OCiqrRY?T%*SI%aX8(} zCOvp}?vJWzeFFO=2S{d|d6K+9uNP@G+5J-kIJ>%UC(TAu_lMFJR?EJppS1t5aV!6h z!2P#yYASt_4?H;ll4t81az3uFD)8#UB3;D)R{pPr=9i?*?#%3aU#iywF>V*O_9OFP z-g7jWo^>qwo?bO#)-z6IZBrB49?GNVVCggL-wyVDjffZdem|CI$SZs4cQS4_*TP3E z^R*rir)_aI{lm|^OUhfNHduN)*{5JnYK^=K^QEqxF4`SMj+cCw=P(!QcNyJZ!^x+O zF3+Z-Z|4#)DRPrQf5X~=P(Kq<)QF?p6!06KCKODv@ zwXly*snb{o$`xppx_H?;y@^%Uz%F%`Ux4m`=8seKvk4l>M*lF3zIVk?3_Xw>i3X=Y z`)+JH00WLOcPY!d^?2$T+FJv3dMYl@^RpJc?t@lc=dIx@t61qo zHu{)cgYR~3 zHFj-w$N$Zqdl_-^*7-|uH@nAA!Qaa~fw~nJUNka%tDVG;%_4m&YaMSio~-2!Y?R&3 z#aZA{k{=46VPxpUleYN2v9p`QeS|BD;qnX`B%5lgN#$AePEjHIelsd})=KV&p2Na* z&Tq{o`(WobedJxnR9N*EtnghW zdhA}ap1Y`0&gxJ1>}DQ}4W<2cn(;fCgT5x?C{ks1@rBl(fx~oc{)^R9?X)x$vcECw z_gmn{*<@@}@Wh~zy)i>YrAM%=CfT2)WisejqIL2ozDVQ!|8FGuhs=34bU*8)Mpu9S zF`hp^3Wa5K-=_7EkRPD0J;jdFTC4Aze)P(n#mOw*hc`5YUsi_XO=>JTjs)40kW3Dn zjp%7*xoD7C%5Mo^=%dBj`TBc;WzN4!O;5DsGN1-4e61*P{|%}JEXGa zHmd_KDvV+kA>9_Izr*Jn^qDn3l|<)Ms5?wlNj`_?wfV0$2he;Boi6oFagb(q1Dyx8lt|K`l=RO1N&EvGXjN-0Ra?c6*%^vqi|P(M!&~`7G8E;Iaky!&FeOiI5iB;fpt6mx8;40JF-O2XY!yDda2FMZ$UjH zS8=nb(^xv|mR^I&p3cmQvDM@lio18vDS5hn;|~jcmuD=W<8EqwCW5SC?H|RPGsMcQ zwn%oeOZDFv&UruBMNex*v>7Z=S$|!$GS29bJsjWrSrgN4h2T8BlxMxf%_;iNSkMH2 zZl`(f*Rwu-94@Zn_piY+*(34>st>{>17ZTlY{2M(sW|^#!m`j8A`OS7{G@{@CoHg2*^f}p*(fuj5`xm=< z8#5=7Y8q?T()$#$T!d@Ku}v~%W!~OToBu%XN@qMEihNRF>+OZUCec6Zq$ZL!Zzx*x zr#!j62qUWDU1L8hy7m%Rw{dg{PM=KcGYYfLw@BBW9wmA6aBct07k{VSa+(dGd)|#c zOTJln{Vp6@@zlS%&>d-JG0sW&Ux7k?pgLZlC?THD|=No>bDJUB*WJI_>x^j$=rGne3A)2 z`9;nk;TSwi?#nOCjk53RB3$|zveQ_yg|TfqCLfJ=k6=}Dk0lG#$!wR^?%5ae4qdX| z>Qj0hLe9KP?#D0RGy|;QcPeOJO3K?sp!U#P3aL$ndD@-Wf0y=75RGy-P)!VNAjwlqrL1%dW{D0if?&ZXUJuJ_I=Q4D^_G(!};*3&-?0Q zVjHm}qiF7@=7^ye)AMN>U5JfY2k{y%WH)%SmCh}+nc9 z3$p|5!I^YFO}iI}DIe48 z7g6ata;KJU)}X&D&Xj=Y17x^B&oAK4OgMj!y^~oed!Gj5<}7>~50g}XO}6Lr`RbWu zOg-zHX;l%{sntE5^(H~+I5s+nFC~vdp5&Kv_4~XfyAq$kt`D5k3v!iRUkN&?1$PS{ zEKlRhSobVZ_BiZp@4MEtyTf<2@Goy}vYxnyR#)nMmLBt#W3rZ>)lSw0=UKr#EtC~W zrW&({i&)7K)S7)R=lglKHBft(z%YB?j&RluW(RAaF~d1mW83e}&u;4(dMu7fwJ~~- z@7nN>tozDS_^hzXnwJ0I^%*{Ia-<4R%gkmAEV`5BFsQtQQOEGkwLCYw;s2#g3vH&# z?I@A961$w|^D)MQJjY1et>Lth)b~5`rvI(!lgiDh0^y^_U}W#n?#e#2VHL{%F;-|4#q{N8Z<9c-RaVDJQg4}?tK9*l-w>h0bI zuRMpX!scaQlR7ZT*!Uuk%WB2x`ss`}BYl&M7aPeoS7f=7_g@D08aO|PY+0lHCPti2 z^E~&>lcjh0YYW&Vr)yS3WT$lYH`an*@~P~kU$TXaasEDqGj>0@N#4{ewJ=y@9?Ay} z)=E|EAK=WK@i)Iq?cUxzKJOJaLw6RhJduCzr1_K1so?MXFzX?5G-Bt>BG=O59kv<6 zL%)abGi0eQdbNT=W_L%?<8WNfF8#{l(!u)4N`&Y2z162TXx$ITUeQM~M5Sini~3*6 zpYwivUlL}e!v$D^*B-7&&LtA(@5mckKycL3Tm(5YE`%cDyP+x9NC@AFDtK;6Dyg$?{!Xgu&-i~YHTvz zIqyI}IcSHA8J#ew0qqk#Zl}#a5pkW1Bw6LG0N4|q|ZW9jxVDT|Oc z&s4H{@Dh=EC2z_on~Vj?0Geo#tbC~zm>hfW;7VgQK16TNvF2g;(M@#u$r#(g?^7I0 z&dbbBaz8tjFK5?Ss%YPV^T{86sH>ClAa~|N+4eBMlXs{JUX=0e_u5(RyaB!~1D*G1 z^)YK?7y1aTXSLK$$9wUbO&D94w(iWceapYyZ`N-L`A$TkH zg@MS+$g%-+V*kBu|MotJ$j#c7@WfvHzI9k%01ega?5 z{?P2~8d&g)D$dQ0jw|TdO`O~pHu02VboU3@D73?;8(WyQ(&6 zUuDD8cQ_jYjm@fy>uUv6YtVZjk80wYxoq}>^O9k<5nrB*Cr6R@4Kw=GWcZ%n=I!dJFL;t2y#8f&$HiP03?3%O%Joy#FppCK2*HA%SBx~W{B+nQ))n&B}Y`zeKMqo*I zewSzB%lYfg@LLYS#QW3`$UX8=zL{cnQBtH{fe%k&`n|lcJRDvt%!wWv$Z%nH0+S{}m*(+ezpU!*OcX{JeO`kY~38`C?ZVJT3eRy#D4b} zRnc$+sg7~=(-`+5mK^}~n$GD7n+UrfGNA=jxu~YDOr9QjzrL6RS1QNGt zGb{c^@`67+&8US9hZV10$7c|-OYK5d1^g=~@>u}ff-mGRky^I;(d@8z$ol75MF zOK86jRPz?%Ygo)SdOTz{F$nuUfI-^1)%Qmg_*Rr{55mQU5NbfGWd7NoN4(8mS%Llm zIg1+Ys~VrSkaQ^)XRljX)}G168Fj9_X5$95 zd`J(ePB2wmc^lS$i+k&M?A6*R;oH0$&8qIjtarD*p7K}T{4~^OsyOzC`se)e0shwj zUX6M0?eI?hkR=dG-RdQdY=dAq*Uc3#7O+#^%J(MkS>(EpcdRoG^u?Z3%gOkW%HzK} zcPJ?;(Dxqpzmtux)bjx6&vsr9NR$&RN3zkkc>Xzb%iu$@{d{f~Hv%)yEJT}oNcal7 z47@Br6Nq*3;s)_dDf$e;pnh20f=tKr$z-p-S9IJ^U~?j6GX6XYtqaYXhVbn!cy=6bm|?Uy&soXp zF@X<12#Z1VZwL3}mq}jatSWE7Q+M%^g&6e#dyi1S_16pAg` zU@V_*!ip!seRsy%3iNI-qO^utvT&VC>*NjY#@qkngPri@Yu@~x9&hC_SLm-Zy|bIF zgx~iU;!m<=7h$qvAyDZq3tD* z%L@6dGf#butemJ_VD}m1i0^%`mjgtNRP@c;lvP>}(_a59Ycc|EPyqH^& zkDldIG6)T3<8ds}lXoPe_?`N$>u6R1ZYy~72c%COsJvYZ7vJQ)*YUVj^v}wTG1@Lk z#)n~=CnlfxKMdllM9ZVuB=5XeLnzN(hQqkDbCkGZ z1gp+ALs+3AM%>Fb7Z!YLXW_hLAL%KIpGunS{5oAv*4Kj~Bt#-EBl&#}?91#DA;=`^0a1A4>Q z;TV0bgVpo;FX@`>AbZZYy;$~6o|ARRr(^y$F?fc5se{)JHiJla91Kzmr!ekZqZ|ps` zfN@)Yf9-gxalB(jkld47cwDlkH575Ow>vqYi$L)wy{z&-?`n_7jSKXb=Kv4;oV;bp z8TOq1Q)~PwUU?0SvgRk*ve&c!8d^8zD_NJC+VkN-R#G>oS!##>NXopoZN^sF`Q2YM ze9u{rlI0v{Wc_h+m8{~UTS;~|24#%ei^sIZlY@*I1Ibc?+=pRhdDv(7V@bWfz(=mv zb8~#j%I$5gc%21HyS|(VJ_hDgU@XZxlw~B` z6VA!nmbLtwXxhNhA^Oj{w&Z`SK)!E$Qw%26ak(fdwy}7sQ;rr-a{rnA#hvt12Ah}A zAo>2rV_`9u9)L%I4SmCb`e@G~z2Uz6q4N`95>}OZiD@Bl-+~r8-Ae zA_U>-1#aHNE~%lA==?l>rb215zr6#KWXH)KoUECj0I94H_|vCkiq3lMVm=Ln&82=% zgu!!K&+du7dL9YCA~0$#n%5C8lC8ZYK85vpf3_z}P1f7BB1KkZUk9_Ie!e0;^`p=3 z-67+(yMkt8NOLT_TN>R`pJI+)_H_2Ke&$|hBB_6&`Q?sf?Lcw~_9V|fSk@F0dBT0C zwyTk+Kjf21_e@@N8&4<>%O-_0Dq=zI0IOlkYuZdbqwFFm!yodT~58w;D_S4Nr! z_%>BPSp!uGyKB*>u7+s6 zUUd0UBl((rCR@vN4T$D+GPa1D=6UBuEnxE@=iB4l!3 zrT*6er1_|T*(LlrnG7$ZMS0^wF}>$4M^oSJ<;v75de2qKGw}rbuG8<^&h84Uf3=r; zt*lJh)8Ab%=6Z2x2uoBijKphPk>?f{ij>nFDFvzWJiRR~a@RB(CdoYb1Iu)SSW~mK ziR4=B+;~Ltc~9Y4sT!P$!Fgjp&-JTtr5kSSenwZ0`A#aaXI1HyaNP*;bz0d|#ktlmdM)IsDS-2G)j?sTP-jUTKOGSwldQEPqtc@y()B9?pksd1X ztUgBEa`4Lz%_-WP@7nnDFrNpLX+G=by?!EV@|&&Wj}6!?*`2b^qN;J^2v(~_?)HAB zQdQQXbr4&Qg2ajbrTScUMr7B^dB%x9*}0uj;v5=fZD?g}<~?IAHrRlkrIyhs zPoVpXKr^wXrn$)O8cnG|_nMZn-?%4vzh$=u7@oDS*+-wXQf*i->m#?Z#S*@smBpEt z4Wm~wmhI4MMe~HL{~pYr${XV@_U+FY*ac$Gne!hmHZ{>gvb?6+d}LM*B-hI|o{M2Y-{6%ivQg-VTLbvIA!o>3H8KKWM6`?B}fb@Q>tLfvua^ zuaddU6sS~({x)&9A}k&!=VH=r7j4e*?Qm8tk2$j)?MUXD?0GOm-_mkwg60`lcAXu8 zDS3wUCf4lm%?3W!kTjpO-V+5~Pb66ZLurtvm4Y9gxk+izY066dz7r?wKU+sQoNtS(-4`^o~ zl$T)77LuhpMAyPt^dS6_g=`eAuNRA&nIU9eyRUJp7L6Ae(Nh&6`yz*syfck!VbNw9 zjH7D=oNEP&(ar9K@ttRPWar#zPPL=EP^pTZ~t8wsM z_MUGfZ*6pKDYh)qb2mJi#tX-?(JJkSv(LG@wV07ya>*^2JVQsr{RsHy8U2$`ztCTw z(c%@h-{kC4bgEPE)rWci6GplCZFTeg?+bTB<@|m~E7?IhmGn2lHv6oO!J#^=@-e<_ zqw$4e(eE@o-nn7xIDIT2+l_4S5$4PiK}ygdcN9Mr`Wa6CtYR(h>H}Em77QDL z8K<)BHH9`kd~Yc(ZiQdg${q-t(%85~|9N9K#E~U>Opdn)FfBQ8zvEA- zWpjwAlr@vLx$=9OK1s&$WKAu!*_bp5W0t#eUvrmpV7F5|%?P(Ad}fF-yE9rIq?N2C zI*#>Ur29Yopq(=|v-K6YG6jzgz{#vHolCRp*gbK716(HH!e7w1*x9SurwuJrO}-XZ zj;2di$G!)TyrrzqF8gS047+~{;bh2Mg%{Vbz#)3=BaRGV^Gj)QzF1R-6j>AZ62>HV z#4+@5NtUn4|2f?2vC&nu%x;^L$&x(7$J4a|X%5wYXSj^g!#cg)QCCmJoqHu#=Z=4=|JSo$U%ua;JyS`kNx^E#x=@pM-HC4xW94wR zS?=mDv~mWyKQxc40l8CIzBmii(^7JbcfiB{TvLyW?F^?q#@ zGD1&JU`TK0H7Cc*t#q~O-{lR`pLW4eYL%N zhs-BfatPk6#m9|>$ng?+b5EalhOODPh<1`|DpglE`2DB0$FfyNF(WxK7SQ%9y|3YC zSuH#QGgAllbl=pZcj?0DypA1{tv_q>N3vORk+0>U$z;5z){0?ERuxRw;%u7j4d)z9 z)|yKpQd?Vj=e(H?smSmupQy*T+UT`~Iqpi{uvY=qtWQZz%v4pW>bs5jlx*F}(vS?| z$!6csk>2_oR`9W8G*30!?Djet!tJ!MkE2-!a<9I6U{EqSUS^!?MEX2?=;Yk9Nt-o< z6XDd2zZ56Sa`tR5rX*Ly=WtK8heX#0c;eCgYmVQwwVeCptQ0P4yw1M%Jk89?wco`5 zC89{~F!yAe?6l9*h7vUSRNocpowuAVaCk)_D%TeQOR&!8P?`;;6X}{A5qa-@P+^9g z9n1Aa+0V4viSF4KFiIa)u(}=lT&B(DTHHqJFR-KuN%Ho$1H0c!%d+NFQ)sydT!*_Z zYmO&~3_}XGNUr`{@ie)clc#1JoBX8xiq1Pw^j^=34I!2c*;#kI0=lVs`z47l)y7Pt z!<&$+D-MNiU;CeIh1rL77po?lbFxRSGRhP(>lo3$#q~$U*yNb#_J8;^5H0e2KO3^@l2hrl5_LGmmvE1z$1ZYr^*&cXw{DOt)= zCuolNm;E+3XzefFHNwdD3vO(qX=(WFUzoM;$8Vlv;R}tPi{O6>MwJq`FT=g$0sPna z@xFFHY z<_BJJ80MrhX(iFAE-i_B3l35f`q- z>Ei7F7AtSTsAom^M~r3*Y1fra#*wooos09emqg%>&e*B-Wv-rp`)#yzB1~`8+hdL& zK=#epa+E8cc12hGcv}mpTa~(-d2>Ao77cvcRlD^__&&BAr2Q}GI88g*r?Etg-&@=s zW+ZR!yl=$BLAaf~)z`4#*?Rv3&z7@GJ7<*CLNc8E4}QPlck(!Y3Wv?U|H%9{^|X%T zci%g+EssnksR4MFXDOLkwPc5nM1nubn3W6XlHx&sjm40buzknTJ6QfxpMDXG*OM&u zIB&wj-mLSCvmf+bE&62TW_`$Xg+=yjx5nYS`OP?^+n&ytM9<;ubP!(OgqQy~dxO~g zh<>le){G6w3Vgnp^P667z<}k(&h~mJBO*1>!VbJU3-7u(=WsR~1BblnINur18-K?@ z@+AH_s=)HL+C82pys5q37_$&!pXo2T{vOfx3-D~Ht%(rGleDMt@&%e+%&V6|dxBAS zsaeOb&{--nv~Z*g3{zJy&pgjz-DgC-B{;Oeyd*VVFJY~Q{5GRfHGS9No#$)!2S?u2 z_D;T)Ix|Df%xAIMzXdBVX1iwEicN0EfmW`+65syh@7YP)mbd)Go*C255N|&bhi9FUWu7?SpTQOc${5F!>~L%0fy=8 zJ~BtEa$2pW#b)HmTlx3QGV_k*R19eV-Rx}bj_+lhQ5s6A-E^^bC-8^INZFedQ)u=v zEnkM=pIGsgc2Z^ZCX60L!b@=Y1Gqd((}P9K^xF*=8ezmSd_M2_9&^P9ddYL(<)lhw z-@5vkC_2p4>!^bFHfHf_p_dhlH$&k}?R+I#K0=%I{yGL0*%{l3eCHSX$@=JtnEj=B z+GrB(&Za&{T(8d4lfh)W*j8CYO-|@X=y4v&zYq;~(Cl|w-wC}%l_Uh4kusyPvYU5uu2Bt$s%MQ@@7uj9zKt=-h8Z|hJW{Jdw=>) zqu=;~CEvx(*!XE2Yzwz!{tX+RarQ_09LfWa^Y3zAG>)yZBX}6RW{~ABXU;KxB$r@2 z*JgcGDk-+-hkL?o9EKe#ri{SHk7>Qj^*?L3f!=>{^i;jQ$m-c&)kqt8o{-V@GknRO z#v|z70mCa{?MrxE(dWk6s0WwNS+ox%-eskl{=bZgf1L5D5&a96NMTgB0{dByQqaw7jqZu|3D`WSMw^Z$Ob9t*ME&*@VI z`xudW8vDEj&++*7H-;t`_X?kq8S5{sy_~-!f6Q4Jl9^UX@$hPX-j>ahNo}~6E9*BE zZOY-@`7l|)v#udSDtC7!?LY7)5-K( z0r}K4DQm8uH~G)#`!O-99!ZZeyT~)z$1rXuHe`)x_BLh~UefX8jPL8`!CHUPDE%Ut zv+^$cN#A4pcVUza`6a}(yp7Ka)st9df4bJ@Lz{Tp0j{h;>zl-lmvBGt5oVEWC%(LG z45>n=dGN}ci8n;pJ>dJBD;i>3*0UvpR&r~mwsEpWR^5M?47)8_NH)>uV6e^TcA2?OGUs1U zirlwm{asd4RfSk~W0d4wyZCnQwufWP6dwHuj<%&mcUD{K`XP`!n$I6%l=*-SP8ON7 z&vhirRS|nHaz*aAQYSo>RQ9FiT;FHaNO!R^`EM%mpXA-%-`~lu^LdtcsvgdXmp!xWfiyPHuDBP9S)IQUYGYY=lB*W8`xQ8J zCsZe3PpYk#_sy%ugWO4`hT@-MQRp%( z$$-*^M)8znC!2_qWAVKuSz3`OS$taSxd=_N-mxuDzlx1NF4(___HU+3@@TcuW4Zsk z7kgYR&8ntP*{8D>-xE>hYW+ZSfg-;B14^&aJZl#2cV703Y{A|{?tOSm_Lbxvet4a| z+sT%FHLJI#(@ruJhk8+#%nY>{yM(jZ|CrUYSM!O5?~K zbXi3HfAqhSWO;+SQ%p=Ko~54+o78mdsJ;8JGLh#iRy~Lf>tT4FOjKu|F<3f> zzn9W?R?ZA_#jzMOlGRdkt%$hs56-<<;9Glq$y&w!G>+W2XsfaQM`vz(jv5nIk!VMO1P4eF3L`y7*b{|7REEh2Oz zUGfMe+s6GorUc2y&^T4!$FoKD6m0UdD#r9A+m~#<7k(T~f}dIMMp5HOEWaD)lIK17 z`|>n9@!>O)_0jVkqGax(j>G2FIGtxCRrR(fsgnzt>_rG-O9={_~i{eIyc3Be!U}ObyZ9^PX)ky!q1jz9nTB=AxSX62x3LWG&oAzEiPyCaew+As55=DYEvZ+vT*#>fO9q z%nY;#%=+U-?(}aZ_ix6TS|l5VubXlBDRJd&Eqq*vDbF~6GYNA}s_kTzOL^x!M#?=| zG4IPa@VM;MTq>^3Gp8BtXVyGagylBZk0kepqHWgyv>?;b=AqS`F$U|t!L!<=$-11= z$ncZa-=g0r{`?jFN3(7v*4xaI8CObaG0!;ugyuzTmO5Ka`PDYOJs3B#+Nu_bo+H~l zXkN@(OZZH3r6t!{YP)TQ|CM~;9^*%MaqfNbB5QFo&;LTixZb%Jk)Wuvo9Ojq*6gpP zx;&&3Ms5-j4m3acMDMu+yqn*>ezGl;pieT=r@m=+?Ps^aOX5cS zd@`9kkxqptI%PH81h{7PPu_!0fx{eF9qdkh4`_d6Y{)z91NmC=Ht*mod6tz5mRGuJ zKAopvO`ZphCqZT_jqoawD({L@1AVsRm%#gB{*_%$!?n4u-~SWAvqJBGY`Qng>=cui z_}qf`UE-?VwC%0kyixv(|IBjsjgTp;rM7I;U;9^({{-HZx@K8l{RFNy6MwFSz$Pds zv(s#np2H&F^Z8^pY=}v<$T$ct-S}Tta$QIIDVXyKl&3rYeb>!oj~jU5PBzJ#*E~nr zm)HN|o4l{D29?|cJ>=6TuD%gsA8L0kTrMERQnud0EAu>KvA-|(zq~$Ew{M02*<~?> zHeZr|7X7j!_cJj&>%!LY>XQrHew=>yVO8p8{Z+V2n6KUI`PGp$JP-G(i`1`(KFOlk zLR=i?a|_Y@ZP;dQX=S=Kz}T^vn!FsPjH1Umr;m0f)AAfrHX?H$mbr^>r1rqqu6_jy zSzY^p^S1MbM+?u3QcGbFzq*pv*~y<>wJmwkE>>#~k5rv`TJJaNV-eh1iomJTo#!zN zi~=WXceNXU5QixeNR_I54R_PILw2J(g9VUbmi zc~f7Rv@dDZyRF3ZxPJP?ot&eA#p=n<>0}-&5Z9aI{eF?|FX~5EYeuCInuWsVN#K{ck;njIB~42SCJuo*Au({&Ze{sI`NvHOP|4lKs4GjTZaE zz6yPs;mB5!Wgl*8du0tyvh22{`xRv9fhB9%c>)_}Md-PZdX4=m7~2kjNve1jb>0fR zEy`|rwq70D3t0U;avkgU4S1DmmoGu(-GWA6Xz?r7%<8{#kV}SwHY`(9WJ-><7xg<; ztXhZ@xz|a3=Kl(jU>v=YN%11OWJmB={xDRJ*}0$kkF#*?erP=9|5_G1ns?P^=OSi- zf6@6Ya-R&>XZ2Xl(}v6G+FHaq7!oItxI6vE@PX`cI7RfkRx2Mu`d@urqmKi{yuV4f zlAayNKa19fICeA{>S*;HXT4d78V_J$asGT5U90M41F4(f%lX)Lv54@QSYOgrSFq3H zSd-b*8JISxfa!XS&)w{5(##icrjsCRBJyVYFh^3?CmHCIlX#JS-iPu=@jF!oNBg|Q z&pgYm&TDrRxI0laE5(|5H$RPB-SFiCPkgV2Vcze}_5WVuLh6ysh2#Vlx&Rw4gH#ti ztt3l%*j-=nzY9t937tM-x4ki_1a11_?t0QC8|LZI&K=!X#-!@Fp88EK@Op`J2AXp| z?dVI+$XFE@O>$G$`R-}&Xb1;2`6^>%FWMjS22%nMuX&9xh+{>=5;+pdOrZmQ% z%9^QSocyhS7tTCMyQwUF9KK{NZ+zeaqr=|NNW{#l@5O$9jHAiVnW|;Uk8qThQsey| zJj%1AUsyA^9!C099!SN>VP)L22vT%F!}d)WE>rm4<|X_rnA<)Y>}#*+3A115#2!NR(~>?N@5-t0S)twTY<54-$;{|i=cab=HF~&%2mJ-Vyl*W=uIzOviUl_r zDXtg6|A5_eC}qz;s`2Hmekyzw7hf8)P2RL+=gXbE=qJ{$qy6e+-fk}0iT2@Lq)s-` zuU$I{Z$Hz+F48}VGs!iWs%fdERf;63<-Qz~vOi)ey)xr^M^rzMoTUr)DNTzKm{zwS zd!Ez$MW1A9|ER$C3)r%W5w8(B?=+%kozEW_l{=amaOzLSmsqYpj=xIotlj(-TDg<| zS$xWh!`XgM!?{M<&C{!7%FEc-NGyKOI9AftsS=wE%$X_PYwXGM_g|fT6-^IuH2XIz zn1|ns^G#SH>&$D?^(H!Xc4jfN_GA)l1g&MzP8IsBo;i|@)rtjHxE}5idz5!@jhf z?wb~@o7G{LVr&cT1ev$_Xx6zb(PHu*6!UwFNY#%YrmoxqT4x_st3oWQL*ud{NOqr< z!Q8fHn=j+nTqqprSae$t->g~drSJ3j`7T(saz*aEyTUkoZBpIf4>Qm6AifHlvil%= zdb-np6x6Ed`x89M6QCI)ah@!{feVk2_Is!m=k3i|w5iWa^`7Tmi%5J5d0UH;^Zm?@ zp66YE06%U5qwJmBUhw$TBde&-V$OeF8<#q_5!6$oySdMQi+p3v_d4*f)sRcYwNyab z!NPe8eY<#))lyTnl8OKynO)T5KZz&db}IL&j>Y247B1%K=8dtoC>Up08oY zLvZ+x)=iA>scCVK_LAc$>p17o=PK7E|K7zg9PVx;YmV|WtNv0G`aBWkDPusgmyRcQ zZQre6pFU#bSz_Q?nC)=HU@g!@rpCS39G&@%N z4?3PTc*%=#0(-yVyH(hkJ*%l5wh`8oSn*F>{R&TC=7F>6crtCuJMUof|AqaHS#|?% z_hGZO{>s|gdHPHhkmUN_O8+V1%~;qr;7_Z>pay(68LSrJ=OC?2$Azq`>_v_v*d+PndejuN&0#k^+X`O~a+ zDrxd$T~OTz-7v`lFro zruL_qjjo~T&t}5kK&h$u$9iX+Mc)hcyaqbM{Jc^W&06y(vEoJCs;8|}+{#z8$}iR2z7s9Bkn0zirw;Z!Z6puL z_w@NyKfl8~Sx3It*8lis>Y#VWm!52UB+OHLkyw(Q#-Yz=IZNttt3<1 zB~Yp?;vQ#&8s@K527CkmCTrm#eC&uZxg#%&Fa7E8qw#BC0sHs(%coc}lkA)Ma7pc~ z5lfq}Qh&eC<#i2NU^`4ZLpuAW?`DDRT0GX+@C9xsZ{z(K^{J7e3~i6bibHYW20U4W zM;B@RZYaKnecMFQ&h*G=d?LP{%*P(4Nit@fcF`Pe@p$_ zmTdDBtEbvmo|o;%zh`T|p}VKDT9SRsh@PB#Yw_m|9(@MCWcCzuL4u)*Suk3{$K;AyC>PV8;;IoZ)C+=dmFz5(2`ATa$Sg0gP25DuBUNSpPg}l70xX#(xOLRWn64kFY z|H|sy3EJBUtAl*|gtL=3vAXE-6uXVY!*V#+M0DB0N@qEqTpcH3;3y;Mep)-6cRwq} z&NH_66<7X+UVWUn$B{3!mVJWRU-=BaBuC9e|9;hCR#;Vp_T8jN#;7;(Chy|&2J`^l zF;(Q*gB7zQ;!!_a>+60P-buF95gW~}@00u;KhO5X#5?YN}kRJ ztT@*Bf3VY@q#DEm1L;Pw=N|j`hdS-&}Kzh_{S4P9yoAT1}3Cg?h|7*6lF9iu|A8 zL@LUs`pWNEmMn&~$kPGNC45e9f>)gJ7j4&*IBVhRk!2xHw!o<=qS`6qMD~iN%JjEl z>ugb?AG=Oq`_ou`EJk$`8$K3e*TAlk_Ob&axqsU5sgr#>PVZ;KIyFzKIhqV9xxao+ zA7j{}2F{%e$%%z|-DZ}|s8vDRE1Z`-dsze6UvGo8`6B*ZXUuJd-^tY1g}=uR%|8DucCLnWvW#V)a_ZV_ zaPA6mV}mx+diL=QA;-12bb_{1f3}<}^EPjt7*|#A$w!~vx|IbXTfSQRd zCD?HqiHnHox!+jgf7tpX3G#MwD*jA079WE}SrA6X-Ft+tbIEmeq~B|%&NE5oUxS>8CUy@exrqx+4R zmul6Ai6E=lp^CPbiU4_&vz4x|(C%xIB730cv&>`UZ$h^`MQP0Pa~xTVft3n&UrOTR zMVQA~X)F$BrF5Ppl{IhLgGSkfSl9VQ%xcc@`5KzGrSH|ooqP0~(XtCXKBmJOQSU3( zSc69oxxP88HPm+Al*QY|u|cYLtT(P14~*zd#jIrJ|HjDo2dT4{VL9wRhfnTLZ+2`i zlHQKRsZW>M#nW&?w0oTucK3uivZTaJcW?k$t?`Z5;HQ;#BI z=7)Fkba2QJG5jc4g4KLU8iF)bB3$qG4^%PR~M=&Z--B5xVjjAHw(W{n?{>TExgOJR!G@+dT3HM{?VAMQcMf1I1lmc`j= ztDdhgl4l)JYHhA1S-=D9(H1%Ze~fb{~dGh)1ZDZ@4w1iHoN^F<+uCLb|Bwdf?rRvKvpYdt;W7In}I)zuzoMReo8d@ z+vuEK;@4rvMWp?gP4>q0Jy>Us@iRMyvyyrseMb2{HR1+h%lTx@vz~RVeU7m)c~}pl zPgWw_#$MObGr3N-`99gj-o?!x{#{DWl50`#`$i?OY`)m7qjCn}3 z8AAS4Ncew8{ybydPh8Bi(PUj|%HCOz@TEE5CV2gbo%d<)XEKk{_g*B} zMu*%F2H_qe@XZAuDGt9M_4c9u9@1j=E*{25W?}R;`2CG#$;AH;wCB< z-Vs0Jfvd^79OJUK^E*g9z>kyh{c7TAm6F=8~MseOicd$ z9=x&`{hu`^-B*~0rEcX85}Z$iQ+eqe2>yVvslzZp?^%(WJGQe(|EbvXA|{mQ3CRqy zy%2fwY$W@6Gku3ckPK2;n| zU8vL=$SRsV)p-oo^;s=--ZSc^l1KIh=gr<;TDaGz2JDy2gbR)6T^vh{Yon*9XgEbb zy`Xv|T`P*zrD4{FJU5Fj@A;g{G97$+2u{g;@&%vB{peXZ`~%+f!i&AwWh)t{c5O8YWUE5iFzYb&d0Q#WRp zK9|vL2Kko5_)w^RY&0lK(_39p68?~Ujamzh?Rn2MSWL;=g7Sq~#M>Bli(Y$@ zu?9w+pq&X?&e+wQtY_ogmAtPuS?X!y4n2(KomH`<9}TlBB5&2&^O90z?r99lZj(pY zyj`+f6^vlc=AGdxMYzE`1;U4kd+>0TpBW4-m}=mDcSv3s}MJ`3aB#6 zwZz=}A=`>PE5+OYcxmdsB-ePJn&-LUn^?I5CM$7iKK-(1E35rGutF72Enz7?gvi>X%9nXKdYB%p6vU>LN!kj8|!_lJM z1Uj`8C)>hequ;goObP5ghi3J()Yk9UVV&xJ$>V+vPrcb+nIDzoQ!TK#xpt0b;m=4t zgO=G1H5l9P!?Cfv;6r$3pV9LWE~1B##`5+?;0x)OdbxRz{~k|#TR*w?P5!)}MAu?I zKP6tQEl6Ahnipe4Cw;XS4Ra58BK)3b%~$-D*{4wHxxD4tR+!^u zFK#LAzDllUnD!FB?jbU!!p|hS^<%5#!)uQp)g8%w$=mSh0d*@WD|5mH1 zpj#Jf|9>^>j1wn1EBU0`(QhIKBxhI!^885iU1DVRjV3o*>Qsznl{fW~HC;>aA(`|# z@{4_;_#xILZ^}hl$U4dOB4TIgCyUkjwEKfRSCb%nVzMS-U%qmHo?5}V0ohK6^;mw> z5mM)}PHCPTbn=AbUQvFmHnXyO29GH2-y-t8?wb)Ljh%;SKRF-DLNi&l_o8*y9q%P( z{14&{Mc>k7%e`(gH`Iq*2W_mxsf%zRYhialDf|1c<`<`n2EFj`59}RF=ddBGfVPk* z_Z7*RmOa`RljLTf_r!}%JavVmubbmdCv|poRpQ0loV|qQQ<3IOn!i+d9(ao5OAE0+ zHMdTMV`roLPTYNjz5d1Utd&^AlKZ&+QAnlcdDi|H#lF0GXwE*_pODxw)UkQ4sRZ4g zd~%fWA+zr3v`rnTeaLn){Ce?&2YJdCm?z6cZzzovhd0qZxy_Te|3Um-LyIbyJ|9Xg z;drhYQ89jV2>imL`;h35Y*)^RS1nuD07!#A5c%6uwI`VmIpSzUoR`09jmLfr(pJk=Y zgWAiki%8Q9c2D{IGX%edOL3M+-ox|gm!~f)9nV|9yr~_58#jv?c~&u!M`f)-D^aQ) zgkPXVWNr)farFLniL?2@3{!njxSR(BF@2mwiOv`qWiVOU#C z8YJ^wQIU38H}Exkp21qxT=|rEwBA{HK7A0>USqlB5?@WGvHo7Ej}sts7S8nLb6Z(= zBYP$b%7}u8*MwKrv3!ehsT=ts1heu!JO0ncygKFw*)`wLr^j5`j0C@8#wr%dY+{Dk zk`+UF(s`ShmQ{eu&7D)@`!C#ho^&%v`yu(tVDXhA*8Zg2hYi2ibLw4Z7j*Va9foBm z@!qF>UXGa!wOgMJ7HY34yKKjWKS_8go;}51zY`JK(5{IVm$|AJ3mzlJJwg6bB-zUP z>A5MMJOPu%^r%4Qb?kV!ek)>cvTt{x`5GA3Vu%0X;SF#*g`5xIN1k(>B3`$2Ri2tv zhDS?M4G@n1$f#s1K&)h|43ERRSnG3 zUxCCw{@&o~ImX50&U*{W(_q|&Bp0&VM4Dcw&-1i7N!)xv-%sP(bbj+9M4I7Y=E3he zx2gUo(X}(qt=Cubr@u~$$Mv|wRcAW?T5YB3S=N=+a;&;|R7txX{gqsP$-~$M6Yt`& zdyy{he@lyU)mW?oiSG6(^@?_3{UK2P!npJs1SdHEXa1YD5Xbqn%~?A!c|RKLg!KT@ z^ueC_qR;Q*LPnc!^w*v@tRrD^yN)#%$=-lxv~dT$_ki6J%+H&-su+E>cE{18AK6o7 z>j~c!(eB%%>}HHy&r>S-c^j{3!zz>6EY(x5gh?|V+TQoc(x18;E&0U*&S)blWUp;v zcQPnuH)7sFCkfTwmN~$hZTJO3&@oeGjr#EqBF|+7hiaYHE(3U5rwFg z`Ubak50sNPJ5f$TIN=E~TTIN_q&yvH1BH#ovSi46L%b<2@;%84 zSufs7eArtXv2vbYEg?@eGs*f8ZNcWf3pnPj&UfNO*8Z$Cx^6a$`;xwE3K%{@f~>>8 z3ils())=TZ)mEOgJWR8(G)z9qBlvYO5#R^%)r7!n+L`34zg_!|9*=N-H`eXq%2c8K zfQ^c2p?_gi8jWd>(z&62D>-u=Ydni3Ma7%{jOf?9Hd!ui`M=R5d^kgVXvKE*NOhN1 zck+RAc+T%4e|^^JK=0T6??RI>AkSXEhJ2nTXT@W8apoM}vKS)!`}DbU#=!11BVjS@ z&i?nCu|Ac?j@CL^{nYO9@*(J3p+{=xiWn2@rQP%@|>Ie&q|=X=s1MEQtPug^ncOk*X;8XAI>h&bKQHqe)QkO^Mp1qN4-^ZKw{HBx^|Aoo|65p!rS8;Q9 z9i=mQZ+5KzEYhFKK6SO6e5|dZv|SHNT$?+F4Un4z%U5VxjrERjq!_9GAV&x7P8Lxr z8oi5XFHz@oR`0-03$*%}_>{M($70Ss;`;TNb(bqXcW!H#F2tgFYh`?;JXvT zbH$#6NjilD|3S65)_>Apd9A$)u|3gguIU{10b?Od-w9; z<9Op<;!E~LKCGQoImw#&(j6jI*8UwV7Eg9g z5w`t>Pi5uJ?=;PhmYWMBWbV>Ni$3Lzjl;-1iKHv>t`C2&NS`BNk!oQL=ykbXexY05 zu^vp)AK{SQGY`PxM3HJ9W=`fSpR#*TR%*nG6-B13g3ivCNs#S|H>viu3cufRbSn%h z(PVyMjLcr+ybrvTB>S`4e%kG!&E1)bJBc`NY4s<1Hh1sU-1V2bA{E&4{PrEZna_{6 z(7Lw~@M}8c(^9&f1;0_^;lXUVS)a|Jn&%rQLaPFVvTvZMh};l@kCCMlj5^UTd)}wh zJsGC5x2(7;=8!Ix=dvQKnvvxJOc?;H&#*0ZXnXRUd5Zp<;!h<$HW%Ly!Tj&>E%wZAftupVAYPmL z2fdv;jwX9xP3|jme^~_MGAFNqaqmN56??TMT{8KcOrq9WPagXA`k#uW8RtLoSMFLf zA1q0>_1gUe%dgjC>ObUJP@d^G*Fq|dw}xjgGIoG>@`4p--`QqPcd_|DFn$uExd+S& zo;*=#j@$3~m-oG0czfPQWv|RKl6+3eiFlq$Tie+-wX%yAIB+iASBZT!MYPv(<4e6f ziu*_FHESgDJRr{~GJ<3Ubus^v9dw4CM$j;MAF7M~?J&KGYu{#@!DM;E&r||WE}cEh z-7B$AGVEvma))>~okXcl^XOYw2+ke&*RxF@MLyUL(&~DD`#C zMtF>c)3>}W>noNyFIh`dGwdk1)Dq8(h ziX_Fg8?P^+@0z~9r4Z4w{`(ZIJ&oua#gTI2$p{iRGBW>*AHBq}<@j)q5j86*)8`1% zOkkaV^wEh8$C{12ptsFrX$#l8A$zI5l5eI3S+hRzPu9xZeLCrOXF%_WwXF+tx~%%j z+JIBFFi`8Qa5V4p&!Nj~xIZVh-R$T?q+icBeipAfLhBH+cI5Y2XFnbui8ON|I#;{N zhTNM)Yr_3Mh_59@5A8lp{^g_`Y5dKe>^ETjhL$@(^f;0Gd1tHt%Yi#Q#g-b((9+!?q78vS(o&{fe9+;9miv4LGG$!(&`aWex|2oG9J2r{` z%pvP9K4%?T-Ys2&UDsjIP4MVIg3rajD@Dm);ha5tuNoUx`aG3Psh#zh-=}N&9Twe& zXUCANyKm0ZZ!O+1md*1f?rpw%u4wrp3rwJSDOMhahspHvk~@W&G)(rTJe|*+>qFjH znw}TCCQq;b;~S^p;2HXufjf`6`WA6#0ZSeZ>$)tpLSH?3_VfPrXRDzsbv_xAjU>C) zC*kKaTG*Rc-pISN%IV_5{lZuLsVL4)V5|Agyp3JU7b4wRuFCqRFJQCK?5q=9vex(_ zjKAC%c{(J%Ga7AUwbNNRPqHuc*Ky+IDsm^=@dmmq)@yrc-ogT@n7CZu_c?yEf2pGI z3zYN3HLGEIv1|p=B(*!Ty5b%_dai$2p?CDN!67n`+Y-NsM| z_AiV){dmK(Y@dB7sTjC`4KHTf>?6q>^-M=fk}8?pvc`BZ{HyZzh3>UW^P3?eN)>HB z?DvzdJB!|D_}RvoIhb#qV3hw5n@8#SC092kK};Cps&jeXv(Bx;-g)Dg6+qSe+z9s?c>alAUZK%2NdJv9e~>WGiVlNL zs@#oXzpKUYWQY5m1@>dh%*=X<%ClJhH*6oQwWpoAm3MvUoGY>6We8lRkKydIOH}$t zr0K^(6~v3I(@Tco6%f9Ie>~^&V&iIZPd$jSw_`;rEnJH)Sp{1IMn~!43dpq~O*0Yc zCQ-jBuI^96hu9!*|AT&IHjex?ocpgM!yF$3n}Z57o7VicmTy1PMiaVRk6DRY3(T25 z=dXw3dsjZ33JZJV*N@JcUx@TK;?_azvxyc@@V{7KIWPH2Y$#(qORl@!x$R#TxqgP) z7?Lh^_C~E#CdIiv*MLn2I3>@^r*z7Dhxh4~XC7IZc(isJ;P3@>|57`HS?4<6p5ym> zEdL>YyPoVd=yV@u_QbENw4dr&$FOj+k*2nNRw_NiYpb*Ub97z+=hb3G^7l99V@DSv z-XD09N+c)3;5KpgOy~XOSO>PRORnrr`41P9v*ichWk>!oq(2F_`jX>u5vM%K$C0R| zG3H%5Rq?YO%U+7jSM$8P9K8wB$@l#_=I!K%muhdKt53(IQDkbU&$EmM4~QSlpt(X^ z%WlD2STL0INGy1_K z9!ay?$T(SSxtdhBJNpl3Cm+f=bbOyLzG!Ye$-hU>xY&r^6V}H&ubGIKwFu`Jn@5kzyhu}lvo-XY1SbiCI% z{xQFNf*dp9F%Y&_W6gNh>W3vivP0JXW+i$-+)6sM`{`qhdA#5o}X32Ka;DI zIZY*8I#;W&Lggt;KAbgsvSTVfC!gsrtltu=Hjpgdo_vCr@rQ?a!~oXnOT!Z(GTqsC zW6m5p_topX0zXa_2maz^zxr!;)}PtDB5Ti2(bg)=SWK&vV15=Y%ZUC%#g5YF)4;)zshAem`6bL)U3F)xNJVh9x7+2KXey!yj0XJz)>(e<#kY_kSp6 z*M;{Oy40ZYTV#1$A7_er<=N;kd?}{=g<9H4$2|QXETY%Y=An8`#*m(vQce3O@P)x_ z{-22ZA0}pR=&@S*01}^?KW2|=D$KOi-zrR4fn`g?kDXBa9K&B>nOh1Gbc6m5G}^4d zl}YTJXD`WXb~`>lCSE10WjU5;!csS4@xHE0Ma5(mtO$cV3+xNS#J++5#TL-7(Hx{)NQ%{ty(sJIwc6T|Ypxj$Y%PPa)coHe{p^SG=( zo$CAr*qS?{hNMp>^oC{`rSPsM8~u(wSuuLEcv2jbKNGJjknDaa4}wE-g>}cZ@4pRY$WT^aC*< z^)urc*RyS3oZP*0JT)td(>FVAQbqnN);UFV$@|~TVCs=5Z_o42sR(Wy%4f&AIyv)4 z8w+0X-M_GX5^XNQq^xL}&Ic~TmEC>h9sc2ZDW;ci*sc=n7dj)k)qfP9v!=Zb-u2ba zM3!xBtjJ#U_n?|xC!JY$KmX@B(|TvsCV$pIjc2dgzR7;Fr%9TX<%{%ChYl|o=LfRw zO=LNMZK7HFOKr39Y%!C>4;ruXgdl5IrqS_LJYCBQuj5h)@(wn#w-D(rW2G9nauW;h z0fS~@NG-DD-Qy+rwh0bTu+Ml*pTY|dHXfBG*>zZzr_}YdeZ3fUJIhYxyJ_tXQSLDO z{uDB2vtlLj?paJcPoGWMHWk>C`F;g{d_jk&Smbm6Zt{P!5%0r^cwA#f+g}ff&)- zl7wxk_R_k5>AB<@#*6aiupLg7f!AHQ-H)X^7}1~PGwpGAhU?CPVd?=-r+YHQZKTT( ztUL_vd9V9D`y8s}%w_-0pOSN^kBDD^#0|x^`HuEt(IQw8Pj2YD$1rIz8L|`VU2WB7 z+vL7$$j6rBQs&aB&)u4o53y19L+2^QY0h|xSEmxg7JixB{b!IS`0QUmdXqD=wYvm079*`oowgXkDh1RUA2~H7?0CEPY+*$*1@zW zYK$q(hqBKiHDun`!+BU$PmlG;nkOEk%+uTY`y)1-O}-ECX^B`clZS7n-HXP^hy8nt zmSz}Vj?(AZM)xyWH5HiK(J@(?D~U`y9a&G?ptpeh9~!B$(=a>hvQOt=hK#9~(aSd@#I>e0y9f%Y)02E~t=OY7 z?bl-OQLKA3w70N$PgmYSo1;X92|i!WLd{&0-8zT!`bB;Q>SG9hs|Ll41rsp&Q-~z* zck1m_b?2KG+?r%i}|y8YKtEOibF2^CR?--0l61-F2Pw4*Zt$@4RoA z&%ztbUQ?CeZEWqY<>9``Gs~>T*{qFZe2g!qe)3T3uk?aX-#>z`g7kUq{yEM&!?-k`+upuSNZ06bdj_|C%Yf0OZUhTl4(P6^}0YSW(R_Gj30KmUF}swGhT7l~iO&$jrQ70%hmoQj5PoSl`bbJ+I=Z9K~> zsyTKn`FlY%>&m8){tc|Y663CMeNk5LL#KP3le_QaK|2#mk}qltzJ9EgM{wju-?zh~ zd7{Bnev@x@k*JZ{Z8wNh+aT~KI}dT*gZ#Y`#BO2HcDVABR{G)9AU6LPLdope4~Bmm zBeJJ!e+Zn-c2AKoHN)~Iz9jyfR2ZAfW8mI6lIIY`>3JP%f5}oem~UL^`WbBA9)B~A z->!|#ST#;7hZJIEP3%kF#ymwF0mBoGI9XMZb(eiyT@4eyGiuz)Yxi_svXa##^<=Cs z=bGM7$v%-p&TEYe-RSu>UH-*3?_>SJdYO-S0-OtwMuRI`oAR6Q1Na$X``Dpi3i^S#umnXcbrBz)88wH|7D;=Ml{?_inD zc(V!5chY4X394azSo9`aX1z=mBS5l4*3nl+spnhXVMI^;?Neb@PoE9>O%FfE;#@Hd!d;+$OCEo6ZWog*6q$Y)cHke z@||8g;$uzH_2$84v8$c2Cwt{~(d#q}$-DRtxPBn*la(;}QS!FzMAo|-S6_Bbv>Jiw zMYVLb-ju-ujOO0B5a>MwpV zlJ{p#^8?P!dzleh&mG6huxo6D%5&pXPs@J$)z05bysSmq&&0FFJU45mijcAr#6N_@ zJlyMSj7;vVtZ|<~_8l}m3O;q9vcm5i2z`q6vwix4eAoLvHI%a3BjZBq*7nBg%gMHc zK3T}|}u z3$vzT`}r(9g`U@G>nBKz(bEgg-i}S9NqHgZQupi{{+1_msUwz(HxKZZb4jvEw9A^A zath08OiqTnRb&%MV_5APS)lz-UaMka${go7{Ts@~?**hU$7XBZS z={O_eMcPU=+&Kl`X(Xb2LYuv`e5TQOAbY=Ne)1<7%aFFLsFkV=wa9%Lc_xYvUCBC` zmKm{IkTCDz@^@ng0zM=VMv$J?H_WbVr zayWR2z9vI(voP&er9gL`XU!Is!-H4p^kjWp`M_-BA$?@?xiJ-VMoL-?bx1k=579quK*nZT*$X z&xh&tdDl(H>%J^hi9}bj?I>2i5sQc5+a`VdiJ#fCyH;yo!zDWlp5X5je9m20DZcg~ z&pw4*kLjTSG&_pTxt~ft!wu|oE;M%MuiSyPWAHZ5R5EkQlkU95IM{fV{k+SKobC19 z6k^S>wTSbA%20MX4Nn{DsixyIaI2#+w5zebtlwT3QOtF>@%mFSpeS6jR_PNxWq0uP zbZqMLgY2I<`3<6au+IMcUbGA+N@3jR5ZQxnvqjnOV9}QB3t^M@=ed7NRjp*k?#Bbi zLg_p$w_*R){N)TfPbx5CxmJ_6U@sb``c`WQCV$w=w13$Ma)lmJOK&nBR;KUCIPxSN zqw#Q&VJ=_Iv#I1odl7c!eV2^E%|(hU#lVr;9|M7F_1;9xtcE423p`e&e1NSUBJoCX z;7EO}VZ{dQG6vtvYhw%xrt;t>+I-Jb>p&@Y%F~VL!&xx*TDP-AR%0aR^(gV?OPtBO zhy{hQ=Mq?^e)y@Zb_u`Oq}?Cko+qkh*e})lrejiecNZg5<|9|K;sn;X4)5CGa_Wd? zokVS19?epb?=45O>u{8qk~Ig(CH(<*KF|9OCwW5d)`|Q{mQ(oQFeBlQ7?!$p*=_JI=N|y&_gH5IWDhpJMVE=r$~uvY zX`Y-~XVa(zTW0?CuJLz-IMk30^Va%U*Qc(=*N!F^#HYL_HF}dL;4rpmN4tAOj;zsN zOt+n^eILI6gGXy?<;4 zwC&86GmLQMeSa&KWSzq#ICUiTS_o#o^E{5V<%{E4r!*TLM}}k&{a#BgdH27KEpp~}{0_mtC-~N-#+&hC@O$jofV`*C;2d##B`G&TCe;dmglg`iA1TZW5<9Z0=`?Ie z)}d3h^Sa(^OWwecl`@Q+jGbl{4Su$!F0}!>a2d~NVbE-qGw$DC#_15I{S~Ni!)iLccpV{lI~J1 zH}`)se!U6bJ|s_Wl5^-^!f!F2mwU>I`u&~<e;4L{S$U8<@~lKDL+@l!9p;S1 z*xVL_z9M(lp=U>FGc$=1BGs?@y992TJN^Z?SD-(K#;c6D$q#U4!CtAuxX@Y6$i7X> zcaUXwe!baR%NvM{G^x7Q+4<+29rS_0y*zOXMDB!Db}VfbFW;hZSyI)+|B8Grl|BxJ zYVM5cWA+SeuR+p7MZ&x}?#f5<)a7dytS+L?WrxH0_9p$Ds@1Dqk#~t%dD_98W(gn6 zbC>QyjNiI8 z&uv$*`f$fOKy4rV%WB7JFwEP)Sw8>B3hmf9Im>pxEkB$jO+~BqtbGk>j)3VlOvv1M z5c!Adx2o1=>Z2RPszPRf=yEjw%xv%^*d4F0L0GVZUT=vrsk68ZDyy(!5N+EVgG*t+ z;j~)r-|Whp0+Bxc&5D33xG;skepQH$-DscH_ zvFLtAw=3{!GyYd2@!Q6yTDg{q(yh&GU5s zZJt?+CKuB+J3N>1!EbRd>jILOJ{eu-h#pty|61Rq>cApLkMsTg;{A8}K1SR3uxKZY z%Dc1F3dl;^dFJ%VDOVR7$#eU)?^Cn#be{YcKQAdF6ve^#^en!3tG|+S{6_eHf&;r~ zzX3OD!uf^52$3onm$1VAu)f)hHv2s*vemx4pq5^{k$D^6S+DJ(zN>e@M!uU=plm^-Wi^!lB(F2%&>p#8c?lqa80LOm-~*0WevrKGA! zTUuo8!#Xi^wTM`a^>4@Au5dY<^$+(=OWGYS4rC|a)Rwc1==;N?6K(n$zrLqiR{R{` zujX1w#A~4K>~7BU_v?A`zP_mnmppszK%zs%o9`f$>Y*=^dcHYc~lc{1YeP-+Fel~qi ztY2KP!1W|ZcDMdK;aaG6#<8rUzJiSoF~Y9J!aQZXQVXds{TSI7(CBzl-Q}$N@N7E< zJqV3^M7=*r|F*wUQ}uJvVyyOSV%bdbqZa1xf#tvA%v~aN4=nnE=5NupDl2`bjl0;i z6Qr_xyRXqLqw7&5s%aj6Mxl-5e*1>@+5I-19ru9peWKk+T6BSE72egCx{cu71amo%k*_UFG$XxtjM^O23<&fKBO9G^>tUhrL{rY{u8@a z@YdwlVI{{eNxoZ7?>KDa3=zaUtth>d~|cYvx&16FofY?DL&{jkXp+Wsdk# z#ix1@{)60kgI3x1C-IOM= zYp*PCI1pn_*J?7OU4~Jg^4F~WoXl3KO_v$&K@dysfMja@n8tawlloz)PmVp7y$eA&lHct zq}&0W;n)u(KAcvK=#kn7C-_{4q(j-_e%PMG%kzHdVTfJ}$3t*qGrr_KMeanN@LQct z=0a^gJYLdMX^7=%%PV{>>skho{slamsiohHbW<^@Dm@zUi;L)5Mc>I`k$PEkNtPY7 zKk%+6>DYr^Pt<4fjOD)c3V05L(pcKQNAu)MI~5vr`CfL3pUz+Ra(zp->caBdM6Qxp zbcg8m0!dS?@;K5@#@XauY0YD%@rTrFsshba``WWGQfw-Woqvf8d9yxO)JaX-$DN;S z%|%JM#lPeHyi3X!d^JyaGlx%Qnli9jT!;%9!Ln*1E4?SfBs2c(d|pbX=JZOP^^W>D zh@6Srk)RGHZzx3eqwwMs(dbMTeT)Y+7eh;W$Z*-}RnR(va z$Y{}-ch>MzUo>vdMm3B%$tiHVc5+vn3hSxGnM?)m(=lt;lg%-C?R)#29U;}UnVeir z*{nH>EYWs$RvcEa`cPKs&*#o%uiN!;3eFUBReN5TTy)8)@Vp3BT+dlQ(~ZS4x;E8I zR&F%Gj8tWxCGxz4jh&p6yp#2LWOZyjQUpmBfuG5@2dm!fnyyClJ@lK^8XfpxX>C6u z0!(F#_o0+3eaYC63JY(V3*L$s+0B134^5`0+U$FU|L60sfw=dCwsyaV&G=u_`E|sp zb-pb@_v}EJDdy&RLi{^b6IRh;ArB5aGSg0$wcR^q-r^-$mzt3%49fn=>DZDS)Ys7} zyLBdD<9^zC9`-dv%49X^&zh%*&v|Ry)?e?6Q447~7SreW{#{SvjBV{QYR1J!Z)*e;I4N!b`r>>QDM>P3Ku`zCe7< zYUnENZj$wTD0#+{C41Sj>UtW@_Hit4V!FXTdqY~2;y!5g)n1+sU8%3#b!|tRjs3eO+V4e6{>A4|`NM2=G( zOD=>X;JBUs$y6A5e<5XK-u00g6~!#;|XF+EleuM&WrTD9G~*cU?g2q zrzGpMi_*6_Y_1g%j>MW@V3G_oSrL-lyaRFd4OgYMdgA+1V}I6(&vZOr zyGQZ$we-r0?qa0Rp1v>m$LsvC2-`0r*(7Mb&Po{>2bq6t=dUd=JS+9HTCX@{v+lK@ zPi0+Il!b;ABFFA*n-h6pRXuz`-_H1u`}Nj1G=tSsNo1{g%)S17jvQObnkRZAXwnJi z%CXk3wENTCG}-({(yS%BJq(96Sp6>?dt7hXp-{)nstRoCkZl3YZqiaR`JCgtt4L6v zeC1hqIa~gK3wvs}EALMph92yeOpo_zKegxcwD|))-z>UR5>KkL^2wymDD$VY>cK7d zg9kgFJh4;EuDfXe54t^Ch-md;RszS)C*kX6HX~`Y$(30dwl~CP^N$fU%w0&<)f`Ov zlM%RBivQg}< z=&y^kwSvsG^!TT<2AL7ONdG0Sdx1SJ6VX#q*OcDrog*l5ZaRzsb2mnRu0#Hd${Hee)i;6 zHfL%9oAIQqWGuLX?j6a{3Ca&*Xx?1cAa$~lA4Zaj&^VLSsp9YjIc_s{*U(3*dhHAS zT0FKEE*#7P*%{cGb@Q(DAvWKg<*x&2PIT4XBIzX<`jB?pvffLO%X`{xr1}jnZ(;Fx zPWJIMt{~qiS02d5 zlk_Y4aTKHZoO&_1pNETBK8qp0l-cJKMd8PkByJomQpsu_O=3^SZ27 zddH`Ge4AY_|KcT`_}~8UsU)VgCEa|VH|ymUk|c{^6YR=U(7Z#M0*O=#+>E{Jc*>9d zO7+_F@h59kKfuFxuunu-LSG5Q27%Br@HTsg@sb)Ejd7A>x%K~_s& z$9BuKwVXYM(PV3>JIAejAU^Z>DD!e?-UrXnXbi1VGUqL}F$bKC?+C#Pr->C`z ztY@wc_lmfY9QqZtn0M+gLMs*6lVkcMt-puocapsZ_78#6HQ2F}?{uVhEf^+)#CWNPVU1 z(dXJLBdX-N_DIsEE=Zn;{Gp8xMaSMg=V@HsLfkl;rA~DI(eS>PhFP83K}*S#nEd3) zkiLYEOk}$YN%LHOcxyql_R^H(*6ajK)UkrYKh!|JE zBKvxFuYtaSB-uyu72eOF?dhW9^Caux%5!lc*?uF{TGA&g(!=6sa{fI-##i`gQyw!1 z(s@>1gFpSwe{aOIEw0Kwp>njyI+@S-`W$1|?`)g3?KPY=nhc9EFr(^iP(78eo=&%9 zbiN2~xfdxD6`;5~&dsV066tIPth)GYc@DHwH2?y-XooxddLT}RI0MqV990LnLxTCS}7w+rS5!Z zaikFse1txY>D(EzsptNJIpr|iI-VSB#H&+TGrL-|qWE3c4K%8s4wLL0YKI|3M5ANr zd>4Dx#)IQs`w~`X_FE1*E5zTd5YF1Eyvffuxm(!1g5yz=XBGKK*CqpfR{T!T!&&4W z>fegkGMp7sAO0ga=S{*jyzVSqORm*7wRy<%T<|er&eA~NaXCj_{9{F zXNqW){WP`kFXyLXz!vAN;tko!I@6>cC&%EpDOrF#-OQo;usMwvgY)>QLk>ciKxDZU6m_6C5R?mC*E}0rr(>9nE z#n}DD>-%u^D6OSRK;AiKTw5xRM8^_(EXD8ovf^mGxF1IA=sn)pG8pC+$WX>_V;F4{ z4YGbBb%K(IZ4M9pgFQ}S^Dp%ACX9-;{LYA;-rsiQXu8*ee+kEjuxnMVCt7*DyZV9WOUdPJ%t>3I1Yr`h%AbJ1>_2wxJMVLGX%?gEf zSYJalUE|6;u^V5IG}&wN#36Y#Q?oiYdBRznM4B&ON%$;M9*vzZ!EvauBD->rFweY)uE_@X2rE9yqwXkp;|4z6 z+V@r2u@3pWVO1DEL=<>YJO2@7&w|iTj{eB*XR}eBfTfnv{>HgI$+p;tUxrPNW~ULN zLY@>R+U5NBVVgC0?Fe|$gpOO){3w=Qqdp1N?se z%I!?W!cQ4abfEB%g2|PfpW_w&mn20C{idJ%mhzi_D`+$ASNK2h^Iu%42#NQC$2`Az zT$|!IMSL#jQ?bIo|0Qte5o!@X*?_wn+y6m0NB{RvhMS<7RJJ9xTdIK9?iTV`H!Y{# z*dUny57xWezRh>JzKHM2`EGv{Jq|rez_gtKu_W30z~y*^9*5|AAwmi|E`)1)fi(-^ zPB0}(In(rfKmq-OxZmMix)v9zjhyvO#!`N^0arW)BhEz1Y%yq#J1qo_j@&XE(=QY( zQgAYZ>=mebl>w+13g5u_`g8Arg4oSM%AE$q;T-)gZZs0z#|xSh4IvMp_Cx6Wgh~5U zQ-Y@q2Tu$5&l*@>F3>xh;pcGm*9474h3V^JP7$If`T9z({;8mw$2%!|{Iy|XBRAX3 zCBNk&-*JcS0_?9G?>~aqA6{boX%hWE_DZ>-*z^DX^S`YXd;WjftK5IvE4GTYinG9O z-`IWS0N?L!p`7pbELf{VAq=*2=D*k~n}lMmJl;K{V6#}O6uX_kV%yj)*7~?$tC}qH zHA`*(Usn6yHv9k6Vt4!>wAugDV*hXUS_8*21zYX*y!c%a6RaX^n$i-JguzGso{lwB zS-gRNGrjtb#aH8Ww(3{IpN|~P)T$_^RmPDdj<^@^vsM2SL)b>4IxlaFKtJWEvfOY_ zmdfW$03I+EsftaHxjKtlv+QC&(~Vv{B#UPc#j>Xici+0ftYLD0q@nH{p?oi1^rACT zZm%{bkH*8PVsc(KR?y#VT04&)jdb1s-}U5cr}_6eZ0%5(c>X|}QKX$nrw2`6df@7JaWfMvTzTd^rpF(|`_~vMS{4qajLYFMGYh+5aQ_D+W`j?-TuHA%v zO?c_kCPZ1@Q%OH5H=m7j?e+Z&Oqyfk7-%FZby8L&F;*7GB!zBS`Xz}~mNw6!eQ{`) zfL>kw6(#*re?3Bu_98|#aU^A*GW9BA3Xx^kc_5P(%0RfbcvF;=xi!inuaPDz*RyxZ zOf`k#Ml$p#?Ln@*L?4&3Yf@fix%UznHqlZRjI9)-#=>^8a}MP-&%ro1cO^xL#KdLz zk%d9~icnvhUZj*jFBV->u*@HHEGZV&m>Sw2!x4C^WO6v42v;_pmRvaHx#7Y)b^^{i= zr?NsLs19I-dF*?eUUKa)=ziwRJj}bBo!`X8U$p-fKBs8o>riT8(ptpzeV}|N%!=dr z!8HHKRoP_wIoY$Qq?|riI)ay6!2<~BM70xooIZS2f~-2qrx9(VF;VU#mcGO?4`zprbu zxqiB1qj|$|IMs|?mpJ9|QOejwTQg;GjHXBqVh z-;8G6Y$yDS?rAFv;M?=arwj2gX_d06IU67bXdw%ke#80?`Q1a1cv1`3;z9~rXFivy zy5O3oWBQY`%C<(PtWbxSDgMo;cp%=jb`UOWEq9k zxw#za+auVZ6{~&V`tq!sv;(_~iX4SGDFT?3-3_((A5!HOu9dk(()lJD?xm*&ets4m zvN-=HvX7$6a#DN-yI8mq?AGG-3S3?8{47IRubmcbxtyORUHBLg>JygyQ>!Tl&>xn= zp_^O5EK*47!2vuv#V)cHZo0WjUb|<(RbJ*L5lCC!`vXZ@`@94$Rq%GI&q+7=5nF#o zj-*g|3hRI7Q}3ErHKEm0(7(fYe;1jfXEs$0)njqJkNIyKbRo;VSb7PaFLeGZ^h)ZC zJRrzR-L~}4(&$R;(h%CU&B)60!h`j22wqPkX-Q|#V}-mhAH^3_mh)bcrWiv}%zjjx zU-<2#_oSn%Po{OwO+oW_3fN|IZ%I~2*`K0rNK!O3+Y~3WYnFho;M?hCxL$_oA-C%( zBazV{Mc}fX{4z&Iih-43bET+qFUhL1@+VNw!iY9(QiN0)0|$^{KFwk+=wnH16h zh*jAlm4qDem=vq-@9&Y=)>pKy@5-d?=;>(EAARha%5+MMA5W8FES~aP;~go@%c{{m zi>-@@8%c%zEv!>6EH}X~V0*SBm*pePU0;=)bNxR~J7suIM)()Vm!;xU&9{>rE-4%{ zJ|^j17W`zh+h}@SOZ&&*c{W))Xgz6HlU_b);get_7{2Aoeo)9gSH|=b5ImP1@`$k` z);8cxVSV`7k0n#MBV2C6A4;-!#^{Cx9rtkVk0g5!T1g$5^b|LfYBEW}=%TL4m|NET zrL$|-VOnBgmh5dHX+P}UAhP9FG$Z|be9GpXjPEH+y34HNaP1x@4o}v0Y1}v$+LzMt zBB*2`%!wrWnKqf9HrDpb=H|Z{Q}-n8eS9g8>^m6|e<{qQ&eY-(ewy+`&+@O=+46O> zv=rkm%R8=tM=j?*>FC|!TrhhcK6yEjEyB(H-!S7L^g6KNS&qCwnmqEpTaQ8XRuUv- z@luiTM|kvgKX5Y~@AoqnutI6>k@6p(eC$b9@i1 zZ|T$J`br}9H9Wj7-_1t$5ViY}IzI(X zhw!@<`0@aKNhQ7LM<=FTaYegy|X;@9xWuDUCLY?fsY$W(voy5#jF#cn?+dnkueKmQc}3P zo^BOwwqyLG&Yn+~6=o!BMEPaxzK1zVUfw4$%te^+3~oQ7#T4_r#u*O=&osu9 zVHS3d#K{jxeuP+^A{$BQH;tB8V$cG84-=jWQWWZ)MG$bQs9fv$%2z2K9o*ne33Fq1oF1i*G(RO5JAc z$bzI##IU33QI|~1;6G2_b3~?WJ?YEleIfifDc)eaqqUS5vL$(4cbxwR8Vd^Zh#jz5 z#!r%5YOV1o%aOC;<`EI`Y93ODG}#c65{%RFE{mHN`czV!zl;n?OSKR(9~y67bKOvq z{|LQFut*8jEHxhM+HcviHrYnj`NH;KeXihQ}x+n2=Cv{a)I=aY1`HvP+z zb@yV=%*1jJH<)iF@l83}mxNxD=w-}lAVMWg^?5L>MUHGIOv1dR?(a$JY_O~ky(Z4@ zg|DM%mnT3gT>Tkoj_2oDzSscE$BL1;tI3POv4y!^5(TU%yiBOfE^Fyi4^9^pTIp-V z86v)CQE*wdNxZKP&0?ZNl1QD3LDO)l4ScfjBFm3@VpBc6XMD>t!}Obiy*(j5MeE07 zdbYV_Vdhrj;8YqdXT_F!90j$Mv8t}^l(#v#;91X-p$Pp}u-*w!xYRkB!DS)v*<@IO zU+?I#nJbd2CPkIA*k}f3oLWG=1q&7v8)uQM5;>x25{8vz^&}U2SL@m(lkq zdZa8x6EbfXhcZh`@!EA-4-yBGrYQeeLSQXK8wEx?zr5he({*_mmDb&>f3MtQ3 z1alS|-#@~t#NaaQ)2P6g%(WkbVj-;H72cj>WG8_$T#mB=v@JI3JX?R=u6F=8E?q`1M4q`1+yS3__MMkF!ckA4n- zPKpg=_Lagfn~kU`@=(<*g2qQf7Dui*GFS_Icsow?(5ZIJ&>6a){4ade>Wv-#fQ%!8-rZbIPeS zaQ)5tJBg(?I=2lh|0#II0Wi5nAN%vZHjuiIJ?=2_ByHp%ESm|>H_S_)BEtr;wm7|$ zuI7F-kH5)XMz3S(UyUuRl0J(|cF)MW`@GBVC-}Sr8r#|CGd*M>^6$>RoX#m1c0Y`h zfNrojvjhH9%~98wPqi}EpMb$h9P|zzq%=!XI~>9S&xsn*y&Z6ZXQD56JZy-Glut5PJ>#FBGR1lPhzp zVIs~0&>O*Od2RIxWUiydIkeh9$80fqi?p|i8E1(kSFu@je9x1`z8I7d{0%KUz?bG~ z?QpnN5**XJBF1GFrLg|fjl>O$@dwZE*3@lvgKdqW97xH5xg~tvbQS4R+xIlEdn5DDD?@ylB$%x+xTe8V;m@~3q?n=6Dz~o<@nZ)R`p|p`SJ8--nX=nK6 zYZ$yGmXxL08g1lRR~B3p!M2Hb@Ca%C#jgDz&|F`wN%5s47qRMl_?IPW{b1jQjDreZ z^)64lj@++ny{|ZNkd{V6HL1vwHa$|dhtSh-uI+hS4{TYCJ6ZNU&ChsOt%qt8@$G2s z%q4Xv-rAlQCz;t3qJKuu2|hi^%1v2r9*=l}J(3iuhB0QgK3dU#f09q4%@XtQY=XLk z{L$nJm|aZfnqojTT%YIMsbbUsK79n_{();JUb30Q`|`j`3+I2V&5Ow}SNmB!kwxDr zYt|b7KA_nFc#>3HKeJ6s7Ehwu#XR9$?Osd6cm1`U%}>zleDUr|2xf+`0#@T;T~9l) z;1eQHijI6ImbN!OZDpTNXnhmbpW^=_j!c4G#-tmJEH^qgPYyFz&l9ZnbjZTWl+8Sp z&O;!#q%b1X!HxAK?dVup+Gmc|9XB$Ed0LFA%wyiu`*CDS8i^DZXl`svnzY&wY!CO3 zTsa#uNiUL=?<+~Zdv=p0i%BuD6NfVw+6%Tx5OX#jH}`J}J3q-ge<%0NuCuX(>_7Wy z?(T1r_&1ilUUTh9blZcBdtiLKf)sg*n7LjD$X3C?ybnmpjyxgyluhPo?=R=v=E|wi z-o@vNLB6<1Ig!P(Tz)wHhT==!Sj-Zik_>7xN!n`b2(l-6Nvtu!dH;d&dANCq7IOcZ zG^T&B&UBWmPlCtDTMF*EFUa%zY}@R~dvfQvQx7RjR7{KKVn&wlmuxxBh~5YzPlImm zAKuW3a!4wa@&$?4IWybk-yzFoq#21>cWbLO1WWVfEDD&0 zuYb}oGxcn)PeF{0P|ck{59jv4rlbxz2Tsqh!a?L1!8#LIJ6KLHVBgayvM-yr*J367 zzCjDSm$w%e*^}NW&(to)jjFD^lijmc#?{ui}~a#wq6LOx14pC?=OQxGynFZX%ZW3bnOT~Z;G0C8^3qp z-`9{>0oRR2xgY3UlQmL8BdM73Ua+h&{8YRjXh!_5c(ly#hqy9JZwHg)cW2MnXP%K> z?b@H%U_QO8i2|*}w1zB^d1p3xXK{XS)=kl&-6^QY8go*@b!cJkFwz{MCi`Ro_I43A zGqc_>Ng0!a`N^}Q`8)`x7(#0-t%k3;Q>dfQntZyRe)5bz33-#aBq>B*fcg=nO{vT6 zT286M?!G%ldy{zF2mIyE!dTyk99eqat)O+bZYD-&4!M=}M$mB!{3aW3Uc&m1U{aoc zPBcq;!uKiE@`;&I3b?%u*;$UXAk7k%-Cx8^3Y~Uz&vNgN>6lHTNsKxXw^G#pa<9^azb zefaeUBo;vCe2CU2Nmsu=W7KM~?=q3$8v3=P@0ZSBZ#Gp${P-Q$>(C?HFaCDzxj1nZ zd2VH?ZEXKLJ1yWnC()uCG;bH1o)^IqiLyldUWk25<7|L@!*5b!JfrQIKCi{DJP~N* zuI3Y3FA;yf;9ZNfm8X^u=`YU!-*%*f7@p)LW%+5IEY_q=o(gm)%`S|{rsAZh7)G1( zSnVNmf;;))I%hqDNlCJEjP~c^z!%z2VfCHx-eq)q4G!aBFx#gr;+zVtq>%dp&b458 z7kxjc_h6_rCf8F1?GoiuNcCBLB&F%)`1uyO)))L?1q)|eMYiC6$*aDj<)^;M6QxPy zT#FB@aPM=m{STNmW2cHnzm%}fY$wTW_uy;aLf{kle*nwdq1n9UwMO(j(>$>d<%VI$ z!>m8twZ&lcoVarpRI=nT&+JOmKcxcKvC|Rwa<|CW%(*p4vf9|WL)72E8Y5V=yfd#R zZ3`IPsE6a=x2I8e44hJ?V*`$@;YaOB^phjUYW+J_N$KjdSSjf|cKRL6JDwGDpPYg|Ch7o{^Yn;q*)?%q_kd&!yU+uuW0RLI%P}5c(ROfUUiK8Uh998u`@ifT={TE znqgQCe5u2u_YyPe;pM(Ct?jHl_c#R>tx5Zxn9>UqzNFDEXJwoEDdNrExN;&iv)sKN zsczKf)i{@}YTem7cOCuoS%VLif?g$8)Hhyyi;GV}CdF_r=ND_Rxe@Me;L{(n@V0XyUbe!Wyw!T2&g+epSy7V|7+C`nJgr&K}Q3XVYP$>+*Ct z&zUo$`reqFxqsfQm!kVH9sIa?iA$)G&CDXN)&15NznJ11f_`EN)$I&p)iBo_jbMutU+k>8& z(X|#~OOx<*%&83H%$a9m+!QipvviUptbkrI@gQ$go}uLl#-glLNP5I6&`Gj{#H75f zyc6ed($|~X-JK>Tv%=h6U5KAa&{j-)x3ggbEvJxg?!~(Ds-%>x?r0^_W&F>x+1%Hp znDw5HW)nhYD=D^HpI%=Y+3WJ<|B!Soj-F?_XwJ6hLF^SWXL)#g7RtJ;l&k&*t7>59LY&WB?qfc&voM!_*}Xwhg^to*o;?p^ zlbJ@~&aTduohb$TH`P~T-dF@$UHL+NzEp&K)0~|ddI}w6KHOZ)&a?Z>Tbi(IX7qW! zRe>h!ottfLXJG8p{5Q`;pCxgYFDI!@R$3Ir#-aXdRp3Aey=0z|!hM%OV!Hl{7gUOV?~D@la>rJ-Fp_qoOWv6$!B95%?*X?lzO5{Rr1Z=f@(-nH-fA`xTlUp^ ze@v){0gL&5O&C{#YJKfzx%(^<4uSbE#?uxBRK9`llWdxG5*wkn#F%s*tlL5@_m5|r z`<;UiJ@oiEJ+m$(NyGPWeb`YRT3L==q~I6*p^_D49qG`(k#cf-q;v`QJ~v-p3WQ;f#3yaUT>(cG(q7nAu&kjay=y~VEI zaN#;_4kvqMh##u0W-N57Z{NqqdgM=HqA6rr?aFLLNKuOVBJC$Emc+Tm zVONVx8HwLvq+{+t-YJaW-}CEh>3i4k1K5Vd&G&ShlUvwP@)joQ^fKRN!&|#3yTxuifo-leGCq8`a z?<4{Lz&R;(pCaMYVbVd**>+nWFM7baAv=$Q%0K2&PdfJrQ6sbW(sa24yY^y(8F=1< z_758aQ}Avhsgl?u^YaU6mc%Tl7e?N{@TaWUI0)7kv0OGy_T^hinHvseR-Uyk%V3n0 zCR6m7IeE9jopx<5D`-9ChK{G}7Ivu*i*j&!2A)YM_m%ke4L#;yWDnN6 ziRO75z6rO_z>ph!s)nGfyEI7x zhT6E2YeqriR(Kx7E-4|HJJ$wA)M_ly7OU>UiE^~cllaowJx#ysF)?o`)|2`!HrfuO zAL%&=jwj;mWsq7(+I!4KpEe5qhvq3SQjuQAxMmW0Hez}<<`mOYlGL9e62FbzZTV>( zXC&>`K7KZf*x6p4H7M7SbATD=t#rs6h^^Q+N{iw9FIwtLhxx8AMZSlzIa{n!JmeTY z{5)ijh4l&%V2xw#SnF18enZc`&`nX$q;)%=w)=~bTQU4dtjJybr}%y{|9F7^Pas1Z zR+$FPaoW2~Y)qov6sqdXqaGHsGspbHPao%;4dui2nY9Bcv++JgHQ~>{uzyM8a!a3Y zaa9*qN=m5r#IJp|p4F4x3M0<3($iT8~z%xDk7v@sCX0V;KoU)loH9oO0 zgUb9css0j)=VMadJExf9M%+w!wd{vU3c6CTXb8)1^pessdGFH`cb?%ZXPfDbGh03y z0~_e+MV$DaeS3-;b403Hmx-wutWk^2;ytMv1bF?AFBHiz}IqqDcCw7r+T zPIlEOQasP=%8}|6c#Nk--g31!E4YS^dG?rOFZ+{ur0DcM6f1{_RuT+`P%qK62fJ=!^YJ3m2mbws^hqLC$JN8MocY2rwCW`m{DpDf!SM_>tE{cV zMb>|@z*%&@STE!0u~7euNqr7BHTUo1ERZ)V5Auo?S}fjje@7mmV+^*k8c+Pg`UE=?quzU*Q zLvgc*vva4A)nQL-qZ*HE>fd`Yq7Q40)$5o3e-vA?PCuz5Z)U6B+DQR}tXM9}!nO5$ ziQcl-xTqK~#B~i_If>6@jOUhFoVcP0r<5_o5IM|WopAyIRd z7|SM)IyQ^XJ`KUo3KCsUs|xt`wj+-i_1>ZTi@ab98@~z979`5^fTA?X$nz0qT;p>G z%uSK@NzTpN+cTU|j82)C4d=-Rxi-ZI5}__;p*~vA8m6mQG>Is(-L^O{`JButZSl9h z>uM(@=wi)V^>jTiKGK|Ns-AoBvFw;y;@iPAYK4nOyM6**pTItoNSnK)4gM|48vA15 zxA>CvE0^f~Y!Xh!#Ug&bb>1M@-H9n%T%Gms75tU-0aZ7;g@)ZV{Y@D*cZ z?xL@tcO`846=t~$JA;%xNmyCygK0IGcB6TG?p%gBnxaW7ouAoEE2z{b;YrSYNI!Yk z5F{sP;}&N%DMY33wA+o`C-}Cl^NWx^h1|1;;Clc5O~yIcwUQQDkF&wKSwUToWwI)A zwsE#T&#s~Utcib6giCts>_IAuzpIV^yTpwh&b*P={Kd~tWve|zfLn|iEm-gavyQLC zv_a0;Adc)L$*p?X0h^iP=dmnQUq4yRom4cBu**%@+na>B$4e=y@zCr8+vzY{O0sjs zu%6=1xlp+PFRNi6D&w)enuZ`!Pa z*E-tW#(%f6U*?R5X`v$t&UfrCQtU&nYQEi@rcdc_B&N)E%`{eh9j;kh@*w%D<5)@& zBw^3VW*UQ>|E1XXisM}%GMM!TX+2LpCy}EvX)-^Y!>f|cy^}fWAGq7!7*$%FD9@@N z8Zq7|#D3CP_t6FFwCbuXC~MelaxVl~bO*zBaDIjbJ#zZwETHq35C6Ndl*o zm%bc=tBfF3;d{B5zmXi{pg5eQ7Z{t8qV!h!=E>}J@Tww0S2c%xh>cI-`F~?gZ624W z#<{miy6D42wG)gTxmUdigS$Z}B{<@}S=qRIp3xDuC+jEg74M{RcEhgL{yAj0*cIVb z-U;Su#))*BYt~Rgv>2l0SH+Wu>D*X^dDQ2(p?J8vgCvSw%M!gIb|!o_JF6OB*<${- z0+NZcXRzoZJ~{+@7C~)})GFkZsEynRn`q+X=>#YpoGOqP(mm43RoADU~9vXj(<*|-dhPQsbn zwb~DgrM0ulzdNz=E8itK%2POUjqg^`Dfb%nj8c`MJdb?87iMZnOqsV@JI&ma?CNFQ z%9EJvWm`sq`Rw~Q>*d`;-s&&V!xPSZ4pK>}+MbUljb-jep3!eT2&`g(o+P~iGiuX6 z>p3oQY%e_eoGxEreaZn=!PfWxn8<1u!?7gAogcv3&KQ)$ z!hPtonC5%3!6;sQJ}sZ&>3czJiIMVA_Rjok6c+5w7w&^wUoqe^*N%haVl%fjbUFYk zC$M`LvSvTWV7;9vLe8YgbC@v(I``>quDMOt=8c8RWEQy}Cl4rib_Fa=TKcoa{4!el zjiuVK+SNw1O+2&|d3y1pHs-aT7c4muJByGe>BZ-WR9Ua`u^HKc+N*-Q!(sIf%guw> z7#MVMbQwmyA!4<}j;wYXhyR!|5wm#oxCZ4VIB)hrT{zi&f4Vh`GPe<2Ldqb=mI{}Qw2Lt?Hu$e&v7tj#2L8_4gL(Xl+)w)23DCC`X=?Qu7SGP3UCVOa0& z=n|1^_uhzS>Hn9O%aUg(dtFeF{%yx*vDGFPehZ3u?)MkwRuln}ivCp`yP20JB}7dK zZ(*B?tX{^E85nmyZA-xtpb!@5qgO zDoINI&AVc|mZU4A&&3e>6+iD}$y@c8JEq~zx`HezDmD|BPczEpt?fV&G`8ysn>*l< z+1Wsv{0_%Xj`zmdylr_M;@O#(`^J^H_6C_=6v+&G}>!l?Kte3D?lCH77h8!O{!o@u5)X;D(7koP-SUly;js%9sP+)ulIM205x z$Ud7CZOW7JM1*U^sC{_dN^I>Z3dZBQiWY-dJ;|}3#Ix3ZkM?g*KJ+Ba^0aV~>(*k) zW~@lkz-w9HexuKx{Pl9}PsX;3SomW9F6DpY{995qNTStSF(>H<*TSPNoaT}MCPXXX9jiW?LafwbAOH(7B6+lh8bC>hlDsD~Wz~{Vbl99c%CFYb-QUly4k4 zcVNhwzFY0O-WYQ>t^UO76qNd&L`mH7tJsi{a&J1{%9}b~oNj#F2r(^gn<{!JCYUEwxI1#w1n3~lHKj8jKaiSgOrvT(T;@LXbeKK#BbtXE3xGcHZMZ|>|Oi_5>@%~Z!~Wr5@gQ*g&yjYAa^`@8Di-qzmwscLO!-zIYK*Q z>GMyahiQ0~qFSTGn0WDV{Jw+#7tp#sJo2{c6jrLskFpoM16FO|0e$eJMM2uE^Y4!1 z2%eQaalb9rCV1?CCzCV1|vucabRbsXB;=< zm)#_f(X$NHi?VO_Ugw=qd#&YdXx{EsY9%8>ZI_M4ZiRbPn&^ z9}5$l=aH7w0_*NF6C*A8;l=chehaena$BAxm>cMYXDuF6`efqeQPbFYic1Fkl*UZaoa$ed9QXG4k^e98vkJs={;?QQpVTQ*oU#Zj=%1pEDeP7ggXZ#<_gE~uO!hJ}NQ$zq zP*|+3V0fM0ZiV-WWDl>}`uAkKX=+5igf`g`P)+w&(e0SULS^L*8C?6$4=hbg&TaUsTVGMEpq2AY~DD`z{m}(^doc^ z=;cN+<3=&;D;mrf6SDH?bb9}Zr}KHzJbK*Xy6og#PUq|qS%D3YY5f?VX3+0`mYB+t zXX&|wE0&3xQ=L1;-@lUTCVF+mnQO(Gf16Eb)yh2W{6V+mLmBM*UCzk9>#Ubc61Io6 zo0Wdq304tPXJho8{>mQgaYlqDY<)lcMv8*>@|-+>I017jy8c~}BfGbI(0nb6{;0hj zVph`Do-IK8Q<==KO*shiY>s3ASr} zD$|lV@B1Q|4$Y)RRo}9Ajh$7Jk(?9etW5KEK6}4%Sl& z;ts)!pGEkS;goli1quJEQe* z{K&fdyv@qG(N;LQTHoJcXZ96c?u^WT&w)ykGJgomdHgtc@vpPt9Ie;ac71ZDFlNTy zJxRR~Uw?;b5*shmZa3#Y=jZ@3%!1!+(SHWX@9=F4fBh!D=6zaO?Ean}-|H>=yfV|c z*$DqQf86MERuzBligNsE9{>B;-0>y7+^M~9U{evZ-XYxqg&5M1^d~x+bu{0?FRR^( zLb$QE^ZxMyGBzelR~B)`+&_0f&e^JK6X%vL!Bj#fodlnESB>+RuKnle92Q ztJyD+GMRrAxLVGb+fxg*@bq0ePc;Mo5psE+`v!D6(SRwiF@BqpQh17n*;Obr`N?&1M;Lw$o3_ z%WdGfxp#hETX_=yJgt-cBRjL|iv$O;<`k^C$heo3`gt0hEP}}jv|XFqSnh7P?lSg1 zL*8|Ce#Q4ip?(G}@;rDodvxPPQ`qr0UiWWNHYMOwWM%=2j-=fJ+UI#?3Icr2B7=-~ zE5(&h#l{t4&@XU3otC?J=@M->(Ay>AO$XO(!I(zyyc`YeG6E6`ti?nWrQB!f6)? zD>{0=cs7Rgr<3gm(IMsY7L)MHf=8ah-?CDs75t9TQXA(UflrsP(uHh)zDSiF&W&jD zoED$J&z<;_=P}>n^kDqC)66eBadW?t0u-n5idjaflrTLNOM7F%Z2cymM(zj_p+0~^ zJLk3FN5g5EVhdR_nN`1+>+KG><*CP5?C7GuBvdVp0X0cpj9!mAGFV#!$*|rzJMg0= z%Y3Sp%KYF>z21!p&y%?$ebyO+deiVjBj@R^--Q#iA+;ECzltyiu=)X>wSKJKnc7LB z_B=B@jC}32-U>I?7VeemVtoncJPV04aP~rWyqx`C=JnNC@oBbBve@tScM31g-BU`A zB+B>$D@)1HQ`}_!4X|L(DwVIV-f9cjfg&u4J)FM#4ixgDbUI zgl{Z_+nqR{>_rQCQdZMsS9=BDmf>CBu*y15RqiyuSc^+}iu5Kc=dEs69N&{QHn8dI zEVSBHN?yROA|@<`dG_(k-j}S8IV<}V-{ajqai=ml@}Brzm}JFm=C()BJ`(+c z<^AyY9B1{T)h)1k%(3jmOs1$&h4?ib!nb4Cq4<`y<H5sd;{Eu@{ZM?z z@kMyjlfU*S%gy9^igown70F!owYWH4KfjTFD5!tTz}->Y{vS3M{@N*KmE^h!0J$yr!$( z^zBwG90I4}Jh{3Eb{ne=pv5k2uG0Qh?t7Ar;1E9f6i++f9Hl?~CiBbQ(96@y6#D2( zvQj+bOIqcAFU5NP!#8(7A4mqw#H(t~IZFik4j1k+rd6QtYkKW(ba_(*oy_*x?~uY< zM?vA=c)5hXCm&#W7I;HMx`pMFs=bV#&v`;}Ze{Il-VAki{>Va_ z8MOaQAG6u$Mjp_gR_#QZMgB^Tn|okhi!Sq>k&;Ar>-Ao^HWkUTUh@QYtITeV;Jg90 ztFh`gu{Fhn8{y0;dR)y4?}+>H7~jInJD7QD&=%UEU!7Mku@)2`zDPgZKQn>O$~z*v!z1alyeOhp%y@kbITgXhDJB(Ztc`;NrP#@al}IVlP@!QbEF z)oR?k5bDeQ)+hTXo6SI>Qejr6MGbC5jDtcW3)q`>O zSM0sdcSA^(9XH+i)ERJi+d0Xin01&JKyoCFn$xF@Sd+I0$*=dlPh0RRd916s2N|U8 z%=Ld0I-Y7RTJ@$dD(aPZ&`TGBzE{ zuDy88b@c4QzIn@-e0{61AX%+vICiI2Iu)LIrQGp*J|+9mshBkt#+^u*_2xC9P@EN$ zhw^H;w()79^YWbHO@9?ZXL#tsT2dh$GUBpKK*px@-7{H$P<#RD)Kl_0)sDY#LQ4l z!mSvt|G^uwhF~9TyIovg1%qdB^ja3n{nm?)6(`rBJfNE~_*?AF+WbM}*};3BfM5!$ z)hOH(+(juy;eB;RouVQsL<6pyI3as zZ~rfv?gVVBsrw&zDk>r&DoF?_WJ&`Sp^>7A$`BC^A{3P<8G3t@3ZYVwR5TbG-$ceT z6%8^=MM_13dAROHR{rem4S7O(< zq~EjZIeD|@8~?LrdwD^U?|IlJ-?k#bzD9}lt{oye)x?~R`uLwdSK;8lY*g75-}0AN zuwfvsT*`t^Id3O?w?JkK8lzC`k%LDYwZlAmj9)@w#jio(k zP?-cjlKEYzk8*uIyk0FLHN)*>AU+XGThsLu`h-i_ebUY*kHYO24mgN;>1F;mplR84DT29pYo<}uz%*S>sjC`Z62nF6(a9#&gr7Z&&&ko z(W|yz{&iJ$WoHHR1+L#F&hHJ&yXnycVwu}#ol3^jj6;d=XKHPkD>Eyc523Q~e2~u{ zA$okF*X(8gwqVQA_%|E&S2(Y%==;8Fi@5erEr;WI3jKvX8j8jg+AE%IkvULo2;9h$X;vomYgvs)B01^UPQh` z*JQLiktGK4zW7bDwY@9$riVNm=<1B>kpJD)zZq}xWGC2Ve zT54gYIfsW%GD8?h%3ZA5+usN9t(sbX&Zo@MCXwWBjG674J6WS0-j(uqO5nYSMOE|hE zw|C)%^Ei5h=Ny=Fu&8 zo*5TY{`nJjNeQ$8EO;I5E`h~NRy#pU-{50?ap7}1E^usn;f#~9ZzB#Z<(bvBHd3T` zhHr*po3VF>xb^~@90;LgO-gp;zr~skEY(hYzsx)-D|j<@|IUsn?X$ixHZ_M$cCR&| z_qk$YE0UavMUT?+M-k~>WB>E|JC#mr^_sQE<4BqkpYQsd9B@yIls(M3D;pJh>8&iu zZ|5H~G37F69Ye|#4}O&2W)I6;@nDg7bT0{-<8@nq4fkm(-I{1&H9PD@(@J_goRuGh z!Hqs`6@43yMSTLo?8frT;woNR4A-qOF z=h=ciE0bxGJGTwau8MitPu!bC^=a5rkB89daX6eK1|~OZ^4gbhM%HBJ>CZ)YH<2d$ z&?uShcVN*L@uh;6viBf+tF!8$pT3J@$Qhz{6>YTD!nwYy1pQ>pnk}A|qDL~;Cu3_$ z(_Re43a)-gjLuH(68cQWs(nPuR@#n)r$RKDe6yM*>qJ&yVlqnRO~kD}9j}$_*uD&& z@3Mc<0!#8{>=|-4z^W(cl_GmDlPH<7mhyzV*|4iN>(MuBT(a*XSwbg4ufOwt6?L*E zJCX4{l9nRbSP~o~zGk&ZQ~b#a|Li81E!O9)|Mj%qMb^Bz-CIA$`Kjc(ttD zoa+*LlqUCZXyl1p^3!f7LCVl)Eo=6B57Bp`z+?E8JyrKYAZ0vX6g^uPBK~~;N2|=w zUuLJ4WJ?y>QP4bwrT;OC4H6M&<6QD;r8>#v0%sS~Jxs|yn?`zGLhq+ACY1>CzUpk8 zOXk!ezB`>PiV1TJF8vy6Wg-) z=}bo+;x*g!c@exPy8c4P+KRTX!?mlJkUeNA0~pM6U0Jh&%na9%=q)y@%FkLr_6h9U zM(WX+kzLMLX(d@D-_T2C7EI3V0lfM$k`ISKQT#3;ZniIs#pMg5Qq~-_!HMic{+Xv{ z4da=RUr(Q9dVP?_ElB>ERzIcDD=g9zN?8q(H5r%Ubq&%cH`5ByxU3dBz@?oQR`QpJ zojpc-$;gm3y2%E%nSW+i?Nu=C4u?hf@E03r1=cKhtQT8ehxL*8eh2<^z_0o2Fdk!e z(c%~JEKf$>hG0EDaEo#30db}qNhZTC@xCF3CClo!Mx`t8@eM!8Jzb6Nd4oAwyIY*O z8X7Z5JrG*Ql60lmIf|5#W)umt8zXztM-;5FH*S39e*Q+RZ9}@ruv#nr{HDbVX;FvF z->}fXjvS+{#n}2Sd%wjJ10ngAktb`?#^CYQV#q&qnZxo&igmB~s|&x%p1gh#y#Pz^ zWZ6W6uZ>?1(D#4(T5d$2$eSA#81x#QySTOsS@(n1*O1Jr*JI&Z!I(Cf1}pXUK5S1T z)5FGt!G~O)k%yw<|-jBkP%$!S-Xf_#t_s!3+%u~E=eqJk#1MB^ra_5hbVe!jnw1rKv}ABfMV!=E%?Wh~fj1YL{! z-?DP@mrP;P2`v3Pq%*%LiVs)AsVA;~A_86Rcilp)=mDQpOnTAX%(0H#h6TlGyc$1u zk!TLDS%W8E>NS;{D&YTxFiyRp9A9aNXnaiU(D6!>&7 z4mHBnvyBVq!~S+>|4ouJ9M8(9`J^aXV9tl^-x<>Vct{sKn!th|7e=ycjI3M8@Ugz% zg7-2t-e=~W*HKIjUUZ2LVe<8;MJTB$De`d*<^y&eb zteS7YdbN!2vqYNj^t0SIPm0jL;^akaFxz-h6=EOb)w@`ieaTx`Bs2V%$#8t(Jxg+b zJZgrMbz>9tbt^8kVYByLF%eIC8fn&&qZ{eA8rL`Zf0|a?XmuPkZX(ldu5RMm6Mbsu zU*2bqz@*{YP0` zL`&_0#VpiZHzJpiEz*3#n zzBHO|CFcoPo9E=6;dO$9uU4(n|;NUzWi(jW=|0wv7roklN)gwt|oi!3^L_Tf4tZ=hMdU~x|0l*v1HHgn7fVESzpqJm6E+K z``=FFqXS%b8*Gy8^fXr_H)&S?HPOQwBX}d)?I?`z4?<))$$l5}S34usz>b0YZiwfN z#}3hPz0U*rYI17T#MACt-XLaV_jpHF&M8QroUebty@D2o!e=%Ndqeha*sj39uVFL? zvmeKV(U>)cY!^dwsH3g@{uNg9vHb=umf^Fz#OSYhR11CdA@BeA?fEe8CyGrV?F5o7 zCV4Bd|3vamgXuVD-{II=9{8efl6|us-YgZx9wy0YESg;E<8b9w?4QIVvp+AH7i#GD zUNiDn_5KuDrok*L=`$K#s@xeveoaP4YbuEp|Ic~|Pl6J^-$wN3UH2R7=bcV_k?cg;pJ8+?ORsGrMCQ^jyD~K_55m4t z+IgQ1^2ROI37`8|#^dBFxSKy^JwR1xKIdOqwk%KjF)UXNryF4J zt90l}pZ>H+-nND!-)ihwK<_-muCK>Qu-;DQ%)62itq)YP8~7;kJJowi>H8Ow=jwVe zJsUGGC-s_wrg=|!n^9@IzE+WZ0LhY9F*~^y(flJmu-)g}HD6kYS>NK>m27gH*4NWz zGfW=D`Ba6*!g)5__wGr-KbT^@25zy2B#+QtI0I%NV4bX z_N2L1DF{4|aksF<7*>CU{tx?I6)H98eKx!Q4CmAVIN6BUl~3(mz~@&w{Kv@s7q1(| z*FS(^<_ePvQRp-O*T=L}4tkRo4f)SXeN@rr#|6Z1q1T}_?Lw!CB<+MH$?VtNePD8l z)ueA5s9g!8Mf%Npvb=-LQ;^rilk4#9Nqtrm$Btpw@p@dzE7m|Eck5ZXk^SPOVKe~4 z-VyO)QX2>H@N5XfN>%-TJROF(D%G`mV8U`<|1>82?W&5rVyh8;8?2us z#|flLwa|?AzlmnW$XAO8{Gi9Zq4Oers)%vN(Rhq_+n=pgKqK$-X5mqu#w>#2p509! ziK)FroBg!)fL>GeY@qStNQgaLVC&s%v5Lpf#imEGL|GvIWnFJm2=~Q?ya| z*w;75LV75stY^Jc1E^}GS|)zKBdQ#X8>uz9h_tl|vZet0;jEH-|MlYVoxWeMpM@~! zf`#i{k?8%eo_^5oCYY3iYz5fnE^r_EU%;X_z%cdpinDKGaZC6fXY5WEw-=l}AJ>z4 z<8eABd-eeQf1Cf0VVh*JEeDCRtUVI8$q|vdEWPR83(J$qu$Y)z(a3cgP4e8QyrUC% z;N$G~D+DfdR-TyLR_MK(ks~V-lDF<*Ej8hD2hgRmE6QoBlAf{_dIHSzJn3dC%ZiY) zI4d_4xeg)QF!G-T$w%OmRXGPiV7VSrf37SBpF#RmxcLS;sau$AC|R$WmFQm==5dvv zlWf%~zW$7sCK%gOXS1JZm|Th9k*+x`cIiFuIAg7u*i#-mk};ziuAbo739g!`rQus&rlB^OB zIA^Y7x3cIxH2w%T-r%V>iK8uB^fsbrg+U{2ZSij{8;uYnO6y@FBzof3IU-z5_zxBj zFN1I|I%d!2WS*A%;qUOtIsWRx8ks3{Df~a&v6k4I3M}8^Unl(~7s!n?nU5Q7pq@3M zt&FgFyOnjokMqvm{@;po{luTxc_L2y>Uj31CU4nZT1bxf)Xhkin}hh))4ofVi<^Cu zYygizr?NBdG1EE>UcX^v1?OZusY8Q{oV!&7JfCG=;=NDMp*%0!z{=T?I-7?78ue1$ zr9YXk=F=Y->xMb{UmUrf9Wt71#mn(}JCy7ir z<)HB(AJ01YaQ`q;G=cLU$k>Dxcl&vn&9Wjab+d-hGF1>R)^D;hH6~#%3|P%B6STk4k*rL7mwe6n zP$~##4aW@eHG8=Ki`i$9HFez)+CLZ#F8fr&k~l&uMOCxfw3oX|Yfn^PJV6 z-ID1bbyp5H7M-A{oO_U2SS{agqQje_-*&Q3WQ}qna5EUSW09BP`6Q{kIkN_&ZeiEV zNDqc!?n4$qW*f9>;a=7O=Gn>=mQ6OFTCPn_>SX^~gHJ77J=4)g3LGkljsM{nmzXuA zW<%?OOfMUA-=o{htdmuMsm5Buzp-TdKy163hS&4Sx{h4pyY;krMr2BUr^opEzWUw8 zxAMMqsq1!3ph`h+uQ*KZH}s2X-1=JHg~Lt>#&CvOQ+U^)ZmD&w6uNB0G*( z;CA-7-s1CS*UT}JC9}XY@OZ=wVU1|A({+ROaFgFzb>7KU$@=%czyJ8Z`P(4T=1+R& z9zXU>jm*pO@)KzTacP*|Mp+PsxM2RD`*rWbz_H*{1&7<-}WE}Q>?#ex@`euse zFX;bTeGJFjSv+zPW~WkQGM}svqms)byY(k(c>xbu&f<5H>O_Oka7fH3k#Dr^D&&{W~lC%qRbb{*8E*U5sC{+6&ri zfbY*kIZvQ^KrHX?>(OpAG?L9bSx>7%XO^=jz%Thu_htK&{rd^t*`M2y6ixV03--R- zTqMt;E;rgQ6!TuD*CTj5iZutY#!2jVo9j}c@K)dNkH4vxSPtLrh1_AJ%&N;SIF)<` zukx-#$aI9ZTj}jzENm%$|7)z9&6b;59AQLXEEe{~(Z+O7wW@o_azA~hL%S0sQfuZO zIuy~?Zaf%|E%BGkLuQhBIvu+4`#HX8qlH&oyBz+5jOGI{yE_&f3(I9B%r4-5gX|cbthF)Z9HN)2_(BbKTg&hI;qn_KzZCoM*#*PhAGI%0Hjk*$-nlb7-xSZA;OAm}W>x$M!a;hM~xsyRP1F`oDQqyER(0dsScdnS?ip?4*PcFK`R{E0om+io{d{C;O{Ww!cMF_k!5;A?rd@8 zZ$Ej?I-TV=>La4tCHnx9Z#Qq^{^1kU^OwHVV4V(;T_Y4YUx;3!e)NGO-4WuA?v zPT@Q)&4$$htokNDOE$+^5W30#&B*++9uCsxWImFN%e;96sKTS@XP(uJXe$_V`rmDU)SHtW*2KU`xh<|pC9(`YU5N@ zSSRo5N%)XzT`eJSiPo0m#TYh8X7EvDxC>Lah(K?!burqU3Wd!O>`0p>taTKBOpgAY z{?A=y?nsjR;!&*56PjP?mKAQ;{$K# z>v40Wy7=-sZK~tlr&^kZ%MXY%y~XIBT04qw=AJ6=y^CXbdu@z>ZZX!(-rnpgeqD@c zM(0kXT4>a{7qZvUV!rRIn^AuTspOeG7ScaEbGn(`gRIg4^F9y@OOUN2Mr_97N7;NW zM(@cHf4i|W*+{apGZlKXJAbyH)b<<7LoOrDP99uE6rWUJ&u`+#+qm~XnxBK4CE%0X zgH6U{` zklk9Cdu^j{_Vm~A*F~AcdNcoTZ<+1GTlc7N1ovVC5OC65?;?j!Y;KBbb@!=!3$#$QR#*^8Gwu+6a``J1ND z^*mP2`if<+yqM*#XZP$mTI$MC5WHO6OwFVNTz!ri>QUs&Jz&;!Tra|uHzRN9`n|N0 zHQdS0nMzMtKb*No9mu9)>zgo5-je%WT}nHtV^E(BYeFnp2Dj}FB1Bx__9buvQ?)>}i~a)!^@JAMNRDm%KF9p88KZHOkfPM(X*_Wzr_Ci!I+lj}#E zx`#}MuyS^`HD{$CXxg0oCE(W_`zw>UzMfl?H1%Gy&Ze5{pMiSjY~%HnYO(kF^e3M! ziVInba<6-V)Thn*l;jzEOw>M|{O4l*87+z!(Wm|&vgPR13ic;xKQrvkdQ9fiw~QFm z^*o&=@}}^3h-PQu@y7H0%ztX(=Xs9Z?aI72KT8kC8?p0NXu0`E_B~%>)XUv=Gi-l> zl)oAIe#Gicv>6PU4$$Z?V$amW*W!G0zVJD0@)R+vuJ8A41<_y)KYEoOjeR~$+w<`0 zD&t6IbXVznvEKGn`uL$R!n^|e*GYAVk@RAmIe{%p!nT#8v&D|)Bx{c~AK}2Y&b^rh zFLCA|kvcQia_o`p!Doq*<=JloZ7=cv7mzto434k$rR&AGlP9sa7VNu@NnFyu(P9^8c`F-b!XwYJIIN$NXuqKAD9c6?x9%RZUoOFM6F~G@30^jx+|{ zj$6q#)ryU?T45p?ne4Gz zD3bP~`Ft20z@P8b#|Eu`&Xe2voGiax$hcS}Oum-ySUfAGlWS|Lo?h|yMOwd_EK{MM zl~38dUdy<F1ry1$4)XF}t z*pCb)M9}SKgTFbikCvv==XPfv%|i!6JoRga(0mrJYV7v}dOq#b^Ln^ZBz^$`XJgLu zu!=`zcV_ndl;9;2d$_(R%xp7o|{D>b3th3_JGA89oC z24?5G+a9RD2F_3JkNSG~j<$co{}6uB0aI7QWDaR&!9UNT@5PeK@GNWAyNU~m7ay}& z-b(Z>T>Y=P(oEy}W>S4d=SsMoT^}o0FIj}=k#z%G<}F}WN^d4_C!FX>(@GE>B@V@t z*U_LnIeu~ee7$DvXjfj43PSgjV;q^bWAL%0n#PwqkS2E_Pl*j*^TSX1$>F&C5y{)} zqrAa=iL6hNsh4XHqFL74{K2b}sU(#7YmNc6N>&iSMXbmh_xD8lNrfk0)yT4d zPN{BtIUg%ikbD}eXXoj|kjq|=ygSOB=rI1$mOm#aOR8KIVUI4XnY<~f_56wRYT;h? zPh1I;1z7Mk$+BYLTM>N$?GGkZa*3r1*q1aZ0*T>%?@jjX)TvU?wu$zK(rBGC2SaF> zqw_?BStNLuRlAYxNw$836set;>b9?A*yW_i)AjnIz+0lqug-cJqyHe;H`*R2E*}Tc z+gbk!de?Am?n#r!=Xp`*Y!)2o=nOWTM80Gy8*fCv9dEPCY9PKGE&gU_c`A#(LcXjN ztykduLj5+R>7BT8BJ8KTsyExr;L(?fh!3*itx#A%hY}F0qW2%1F_uRTV9iu*&)d9h z&R-7M%lJ-JzLNXjWCh$ewqy>+$|W_HBuGEBO8E zTD<`J74*nhvH6XGJ@fco{ z9deI~e?|53DtwM6Rb{f=Li=iP{Q|yur#6C>S3&k-+)K^g?;%q|9Gz*bUP$xLvF|?8 zrPXq5)e`qJ+Z{}|ugI4-qj_(a*u7ZGS>4=-S9T*o_624yO>!8PBWbd?Ji*GT1$GbI zvL7P*eJ1E-AIGZUTk@R$rS-Ou`ktk;&wUBxs?hfx_Dz+EBk;Aqn6bg%HTXjA*YebF z82epK)@QZb3%9q^suUWvVuZLu|+vnKd$D5b8>Cg)Gl+5#9( zqSH;}%xdsdFiGCbyI6KJ4?CG&^`V{mgQ>_fN!0p^4F}@NXp-jXbDsO?ANf&O>U_gb2c z#F#U*ly~Mwy8b6}9!2w4^mm9)$#OKD6)S0F6}wG>L}{%ZCnih~4e~y)1Y5k#XG>sO zGAd+;MyeF-a$ag$H!zdf3BM|AlK1u56;PM2?WX+>wj9aQmqFz=_~$LmUPj5xBX)@2 z&){ko3{1AiZ?!rKKF4YA9RGeS+-WB3Y=7XfmgP=y2&jD7o(Y88t&<$aMSHI+Jw z4u`OLGFg-*%^C2@Q}^Vtyb=e`p+~VoTziL9Bk7$L?_u~wIFbqwseF7cq%#ihYkV7` zk5o~sq_uNJqy}1?gPj-pCN;@gixcmXGw+~+>9~QM_w(}8^_`s-m+QZu_O5qMS;zMBJGs2u_`FQt z*`crqK99h$3_nj*n(Sr%0e>IRcE;zmEOH+!C0_j~*5)}ta?K~_@p*dqN^I`{gJD>E z5xzAMH?ul#A7f>9eRqK28GdKSX7Z=5C1;)(9?$>s1S{FfYx3b8&PZN|ytU0t;sb48 zO{3(t*;gBR7MC4FJ6t;)vYq%qO_f_MJbJ^)UU(C;a)k!2gizoak zUWMs-4mjEW4~bWc^pr|8Kj>pWF*E#IxOli;PZUGT z!ZSOEhUoP<(WZto@9^EXt{+Rg=5!v<*B>SKJXQ(b+ab}%cTe$=rL2`Pdc2V{wpvh# z^^@3g5UUi&*Hp|qljkkb>xG#22mCVI-q*Qx3Ub%Q>|PKp@2st&c_q3>&I#nn)Bfz6 zNG0xbFeN!lUewkG)|p}CFG}jvqB+@!o~L)0lj9J_ONw*N`OGZ3t;5IEIG?1~i!mV{ zk_uFt^)ZTmXR}ItsSO?+p_enfK_Yi@x=ID&|<8)a))u zkf@O?UcW=&0N+gzG2Yc)ai6p6FLutm;DIbsl5CTWUzf7L7(Y#nN}0#6(?V*5WY5@n z(E5&FKLz(Y9ofppP3W)^w;t5uW;*wQZ(G-YNAo4Pb};YwFAwg@I#=rD3mQBGgJtL|B!x>--)Bi7c>K7v-2^v zev=g;m10g{RAB|p8zvAQIyh=q?i=SKfhQ}Ayyy6dp82J})FsUzlf_V;R7WIgx{@@GUiQyZPk zmm9I*XU@nP!Z%qlt8^a4{mgE%%PYGP=i*guBh{fsykx>!%~zMRVaEA9ku6W|=4K3E zLgs#>#{uN-Pv$G=n(81Y@yrpAoWElX>O-7e)EVD#Z{z>$jq`f@PN_E&Z zuxLb*KKecc>-&%;xp*J+-G`8$!jo#dBfOk7x3KmXK3^+x)9MB|WQ9vso>hR(Vv>AXxKo(U z$5QR3Ey?4R?_+1OQq;ll0}IjVJ2IuB-WYSz;k4Q5ygG2(0LMd(KB>2tvv0!2YFg{b z3cr%Sx(M*6_T}5-=<*~|N77|4EPdexwpHYCp^kJXG6a!YgZt{1AT2wD*jY+e2Uq2GWg9Bhll%aDyx3ys;_C)TrbCye}wk-h5mZcq?g{tJHH|C zYRdAbvd&((wUfz*X&U5&z2kC%&$KZa}%XY@EqRyNs7A~X9 zA$%rpG={{4vw;0PF75x1QWG3+GUjBd7OzK4~ zK1R;}_<0FR$(TG_#CyT0{3Z;hV{9KbsD=4Ih@;8CafH!7`_PN>m^;XjeR};JonmbN z8l#RjLOlneec0t`XiR`yMe;Qlv0}xn8jEzvebWkmwuxCsX)o2w#yT%q<)*l9I8Nlw zFl$`1a;FHX>q0qIMj2IfIg|FT0}dk#C;$>B>!CQG&aKVWj!QISQRZVBf&8I$g{;K zpz))YN3!kLbiWindFNCXPY?I$Q99*WRjM1^q5ZtmtVx=IDw3uO za~a(F&anp0xYx|%H-6BqfN<6|WmU_1quvM}^|!cG)Oo44o4mn8UHLf8vc561wkybb z5uIxAp1g(1O3pK!opr3kT$vqWN75*}_nW~y>(`$Xojc)I)&W$;q|`o72D>ZCv>t=1 z_>||m`-x1Iw31zxjj=BG{$)v#8Dj_XB!9$VB42Xmw}SlhkSOAsFQNWC9v=*$C9tmn z`G0wPRwrlXwHsUWX8Sk@j5JEz59Q~yHI`Km!?|01pWUS&>H87qg(E9iK2PGZPI5bo zSjXz~UC=+&HzTklcUTkn%`MufM2cVZGX)OCVU<b$ea>gYi}r2=_3ep;7p2C?W_ z+G-E`5{|Fb+I&CL$do)EdB3p8H5=G!C0&Q{qt$vH@0`|bwi{yKkoA#*Cv_zGDj19~ zl22ptROU&&g4J3mO0MO4E9RSSqS?(_`Glu0(Ec#^l+s%=nq*b$X4>TaQ!)RtJ}Ik_ z>lyd5PV#0l_rZ*HWbVxi78=L0>h@~fUCVZPUc8i-RL9`m&g~$+r!LG?_PbTo`T;}l zAwzOeSJ3a3xSrZ^_rgEbuj2#L3ffd8vD$(mj{#(tbtZA99_s7YXXEBZ7p1J5{{;y2lA-H?AvA300 zZ`9g9@LFRwFvppBf_bOww;B1K#(}3XHZzu9+P$6^SXo7n8hEgSk0p<8s_`Xj|4;by z9ZWhw?HjCZ$o^Lt4If~aeG9(!j&F~n;Z4~1CM)#TT6W#!4mtTr8|!5VKIHk<%dV)& zuGjN~zv*_Q|C_@(HLVx0*!M7a%=J%T>LOa*%{GJeon7UrN|-y@tq@8LyasT|dzAH< zJ=iyw`@WUddeOJ6cD9gow{~yh@mXWt73vek)YLJ|3dzT5bsfa7r%5XD&8F=N964RT zn`p6F>(`LtSUq*X-SS$QZcJUEy;Mzq9lI9t)vPtYPK6#n)LVX_Cm(Lu4zdPue z@io;m=DMrAg`TOnd#Dk8CzR%hM>~zGLur@s{#;V_fb1!HxZl~W=zba|9L36+*JmgE z3P}EKZa5cTYmsX!z3<0^b6I|Go;{a-*Fa}4SY$ok>t@5FNp&-i%e%7axbrH_|DxMG zOnv}oPsfxejH20lnQEbVi~BxYv#aM(KTG_y#*sXyoyUIbM7Mc*np+s^lA~+NilUK0cc(!|(7Cl76`!W4Q?VhduvN(5^9#eH9D;zRs&76G#jwicB zWtO^@M{i-nceVWiOh4qOMKI|kn&esYZXAhZsV0%wcaiZoHPtSpT`_1s$r3j}=N0FC zMas^8hGSB4ret+*c1fQ`$H(~8NU^I9S+Z+5Z}#&BsU75ZIno`s2f#Qh+>&cEyWm%_ z){ofNg6(f5SK?Tn6xKI#9N@}_V3aq6H|RaF^JJ~wUzqKTg1|M2%x+tEj~@;-@Di>qVd9`r)~o4ej* zn#sPfeIWM{z7F9ZhwHh6xOA!GUBu1oE!lwO$rgPX-+2Xd@}74(&BoI04>S57ot0{f z3-JFFnEl6`_ZfEYM4pGl;+N>2%4(}=U6Oy6cKmNw=J|F;r#42FWO?|-I8%l{N0OEJ zzl9I}moH=|*+#rMN3S>OGqrrmz;L$K9>knuA-5Bnjc9ZoRBH2*eO;e?xxYi|Hu0+} zHvh((K6chH{#r+WbH&d*z5HB@qoBJ$tDlOu-O14rm&arBPW~{B2Yu=IFQnL)jk?11 z10Ip=c0)y#%no)!XSvq1etoKFoct`wd9{qZ53#|)dcT0zrlQfOcoxKe!?R8Lx|1DO zli(qi&Yi_1f3>7Ts>)W^?m-YMjRUWXA$h9YhIKZvXgB(QE;?tQ&ux5WljuFn(d;?S z%<5X-{Ec79hmxGmS=*4xMR{IXfravPZG&$nlPK%dGsDatiz#e3hNc5p_!9E$<`EC` zt=x$=5;wB<>}(qRLjLk1ZC26#P3oWd*U|8JM_*ZoFxx#(vULAutjH72Nn|QUukm8Y z=Qw|wI6f7RQ&IhN94!r<20SWzcW0X0JP*sEteL!=3wg~8Q2gHUOK`m(_Iw2ItNHLe zjGIo&tu!7%$Nv8}3rWSwyyH68-B#+SJnmB;OrGbwS<6*~MSg|PFg^WBo9toAjQDbB{lr_cx@}hB>Zao0 z@z`4*B8{;m>!9)-dz8^jWMkMlN}v$rev&!_39D4%$c$7c0hUD|fRfwZ4@IC(cxP0M*|l(%=+vgz?6 zWqB(N(+enI$P^(dRkN&c@f5GUUv~4K5=MCy9B)FZ8=h8kkNXOyf8Y4|AlO~7RToHdl zfyqy5?>`XC&WnMCnDP%)N7L~Oe`OYtUD8>NGE^VGW9$d8PW_-qNwW-2^|5)4_FMCU zt@JyekKQ8QyeHbF-g4?`zfabgv>i*PEuwl!I<3OLtQt)f$0^PnjI*heeHG+0NBrIl zqBQjKKH+z0ok*tS>im}-vLZe?x5pY!TJeHBc}S*~{diE;q$GFLG0w;{?q^wO5v!)w zcJ5g!=q**#>hOh2aVXi!lE=3!jK9_54UmlX4WN`<-j@{aG_&&MBD*5Y1&Bin{8&7JBHjFjDcF#TXkfw@7cG8Q6g1v zul2hNEt73y3C$kzSL!%i373VioIu)C>sUkHHb%1SmA)3L|Il-X2sW8#_atBHZq@ex z6Z|7jNKd14JvPkB#P&4#y%67$pW-sMIFv4Vs=p1Z-Z#eQX=NDt0}opa-Bh4{O|RL_ ze>~YHvTt`>>(37+;^SXh$QYJv@%!=NMy{GDR{yNMtToJggrL9GxcL$e_V;%+=qI!9 z99q9^1nua|Mh+)7sA zBQUKBDR0N(x1sWZZ|-vRbADKo7oCo)H=8dkz>v(QdpK(xKb=bB>;QQZYm#F-`>c{l zEALAiijmn_yr0-p9&Y6^aVea#XDBn8WWF6uo-bXwjprZastd)YS}eGQXH3SEtYiB@ zr0MKu1)1_*DOE`-JO2=fZKd^eJi6DlsbhZ(1Q*chEU`T~qEZ*45}!KPc_;YEzKLYq zxL6FDMxx}GdfD~K7<{F_vXi6*4*f#o{bBkgCS6~+hxnCU7m_AVsYk;mIk_wAe&Z*w3}eTyhU&hQZ+-jLy6DRC!1xo?H0p*?O3b|FhgD>@T_%(^_NEVY>d;IHRBA z$@lt*S!{NpbR_3Al0S!qBUv|Vvhtqbc|E_1oyjk|UWDJuzR6;rIbQY}&BE#KxSv(} z*Wqph5u~o;t6V*qHIschPkXbn{eAQ4@uF3pye120s-d@q!d2ROo|f5$8 zI!(-aE)a*l)50Xam(ge@xw5OUggA7E&sCuEDLhh<;X9*TX%d~zhG&bdFJfqQ9`_jB zcls-pd9r$L1|4%BwimnI$<{Mzkf)e=9(NL{@>cF?F`yBRI3G@Zg*ldEcxv#)~4 zO7bPoXjL9l1F!zZr5UW99PB6ah0fyY%lf;V)XAbe+0T1)%={wR7#ictGP8z~q|Z~Y zvKaX;3k)^9bsRGfUv)%E#hCHmLq zE7v;n0^>z3elm*WMfLZ&pOtXyEym7;bfuKSEfbjRLgr_G9y9VsOi}Lxzf;p?IxeR2@%5teHCS=3`CZn49q4GPhzy1N zp}2KA^t!-u2m~*8#l^T>0<*e0zYGSa=Jfg4o|$1s>=;z=#O%YJ&th%)MaKL*)f-Qi z!3FR8SX3RUpR9XMz58UbN{-8C^id0%Se$vb%vY6Hv57zpUNBlXA5(r+#%+> z@mkFau-3SEi&nD-V>+M9Q}5IDd^R?0^4$cH`2`VwB%UNcg5C`74)dEfueNAD+92S814CRy*0Y7x`z==py)M|M82oS|WC3e%PD; z-GooO$^M};%4@HSKD+t;a?-ZO?5sy!P2YYv_bfZDfLHLG!=tXoWzBjdxlr8 zE!>mzqvw&($=cYxj)8f1 zS7onDa$#h}`X?l-tIsNcwH+A7}7R$2IZoGdPvwkICf9I4hu=NNnU8eVuBwR(p>k9Fq3k?Uu zV4Ne>V3?g`J6R`h_UqwgTk$?mZIa3LNz96;?x|K@k$lh6d$X9buX#nD%w<1tR(4h8 zyY-Bui|CPxiihdt7T71_L~4iMM5;NiXeyefYHHpqol1%kt{94uclhRFXC$-YyQHlv z(&SlI7{8sLG-r`-uq9PoQW54saWU%!Ht08bPLCI9?%+d5!{`df7i%|+N!5)3=3tdc z{wz&S;teZAj2g~K_WqOE?@@l5)lp>&ygt)4$t7?OtLC|5S<&!v{Ux8pA^Ls>Mu|P$ zUGpRh?k8fW(o-se>Y*pk&u0X zAN*^!m9;1{XqV~>-Jn&?+@%#cm-?MM_|?8oCd~gC15yp=I#KH*Sij}U<`C-!|1Yp{ zJUQMcXEJ0wY(A1I4X21qS=pZq$eF7jh#_Z^_CN4`hva){;a>jL1lyhy-NwS^RZL0t zyJT&@4+4Eeowr%~BR=^zEADXB8-@8!GgoYN_6wwc)XyQ>Dr3g5UEisrIu*-P$G!<^ zzr)qZ^q7j1-;?kIENzS@z3{4-8R8E8*Mt9_ouqksmnWd7LO=Om4uxwUh>p@u^3^xO z`hWOjEqt8--G4})?21_lkQIr^p_Cc`$$x*Am{1<)Plx%zu+KA}?AiE?B-iUd&#Vvf z`5@8YM7?JGtE`vyE#5PtXXN}I0vjQp6%-k(vd?;(`B_uZ_G~TPt%uaGXaMQ%u-miy zpe0>XIrkd=kQMFqwVGY?kjru8Z!xM?RM9q=d|0e%G1q+wDrZudR0d>B6g!ftNA+|Xj)0z}U$LF;96wYOp_N64s zyNA4qn9O%R!GtII_uh2vq4liRSk0Pqj6AD&w`*W-_=;!IffSd-k-Bfy6CP zDFVs4`hA;yQknE?7(NE4V|^OWQm3)Niz0eP+*JB0f*XgMi8RHv%f*PDu1PMQGa%Rv zf)}vX+azd2(v>(>LoBUA&b(7CM$;Cq`2{DAg>UwBKE*5E(8GJ$sf`H-`rTQ)$kVsS zM7r#bdmJOa)#n#Da3$QR? zo{9&buwC*PjB;)(cFUf;RQFv#_y0gAPm7atd$IQR!p)}c6}ph=Q2l3i)(hM7UZFji zJ|;mj$hH;_^FDqRt7esVPkdU)k~>(fGX$=2?s=^KDjRh7U0=Od#k~`8qX=84KKyci zb1Cdn(_wvK7WzFcxr9Q+T#+~W(8Y$;~t{=6RMj~1({>%RhAPIbJszq-;tInQ2% z#Kq1U;rMiaeaTb5!sz5vnWn$euKt>aQ+>+4;}`xV-_1!-DdzVge5rv`Z;&9F@QaJi z<9yl&i(8336>+cv4B9~FC+HmLcyiNT>-RA{J{d79inuLVZ3eC%iVb(^JNbb|vCJqo z%(|QDFwOmIacte9$Mf0sV8>GtvkWAYU%xnRRTLX*!1NQ>9E4@fU7vT9+pwW8U6=FX zc65K#x5@O;koF}>*bGO;zq4xxW{68?`&73f(SKAj75t$HF{a;=<8-b&8P4!V1^ z-djZZk#rC7pvo{@O}ewNwSqI>gZk_AznHy(S6A_R5tK9Ix|CMg39y7snqm7{G)ncu zBT3kTO>&RdoDUWydq#kx^z^CtaSor_gh?g*8?NPI^tm6WvM1mx+}K;6Cu8#8aLYQN z5$usQC%G5=kay*-A{lwlEX)I&z#@0O+2dA+EQgRP_syGWnYByZUAa+zd)~lIV3}R}=km5Atv! zE-V!l+ZXU!4ar|{VJD5Ap=Gjp_aAq`{OvkNt1#2dIMin0MEVk^WrT1WRiFh~2*=PHGrg+)eD0vlqX1Q-D z3F9_=?{Qw4j65?%*!PS`(Q&KDv)WjeCncBBG8My5fa*}Az+oca{+O{>e~K6!iJi2r>L zk;Zzt%($PN`&n(Z0fK9I!||>^r7&wO&rZ*=`Epjylg>s)x7XRKv$Jv+c&hI+v&rhI zz2N-|4aT}=nRDv2*k(k}TKEyPk3W6@$BepPKs-F%vmv!ziAf zH%hl@DeIZa8g&j}`(zf#JBLwXY}RsA#MRn`Id7hz--FH5`9><@e1V4tiY|3&oqL7B zVp}SH|Ha-9W5H55W@k=D&o3P7t+ni_%`@f9TgyWtYsAXJq8aw2s(0kfn%~FqW`A0w zvP!Cbq*8fb_MLz|$(c~snf2(B=c`SfQI@<}8Oi8q zIo;m0{tP;KYV$NSyP6#(OZrJTl3g0Rjs1D1n;ha}{XX2t_XIri)ToQTXY!2W{B@qI zQe`_;9?uX#Zq)wXaOxonUnIVkX01nQk+;GjAoYrv@`Wd`4fxxiBF7O} zkUDO6W62s4eFDA1VK|O{*Fq}qv?`IRjd9=`c27p*KB8R@vEoaJEP+o}LnTLfs)s*M z|8EL2*Eie+EFi})LK?l+_x#QV{MOh`{eZggr^+o%nvcHr8Bb1^&#h^X7hp0FQWCT^z1~^flzo&Uw?=JuWIvI z-<}JHO5#*@a5bjOr_kC97Rf%bg)b#D{FTl;nT@vbw)OfgWkgI}(JR>U20E3&@>W=p z>;S1|`wva>4(o1QNsa8;&>e_F-JG{K1|-*co{rp$Z+qilc43!sele2fE;O}KuPEHN zC$H!Myn6sEzs#==gXqH~yxAC@EWR6j-b{-Yq-qcOZel?8D`gybj2*^`hJWenSBPcT z%QRAFzeERjIHerR-n#4rP8GZ&ELydo_xH|i$dZR**gV{)J|aUYc$|b`{hXcEhIJgjj!apZR)eiSbLQhB{|bGj=J;xI zrqWbu$!Fi_4>)oQOHISfKD7Cd{+DClp*&=a>t5p%r)qtb_Ma`VzO-3zbr>bT%mdmg zqs?}FtqhB_fc9!Fzes`s?r!t8Xo#qhw;yZy%FS5&5Ohnj-@f8$YT9S5=LBeW(ML~K zIUcW`rbX7(7R8V6FfR8Y&l;_=>$(U{@_fHE?Qixww%MO`Q`e{t-j%|}Hhd`ake~T* zp4orsXx5M?qfZOo*;PDUfOo~{c@2L^W}60BR7_M&-u3PE0@MVrm^ z&NGDW(0z#CJdH<1@go_tKA}shw>|@t(=jBKYKl6uv1s!K-@04;>RK4zxZJ2|7D*MFX8CVNjl^`TAne!s&qf4FvsF=iRe|6uu7 z^>PEGl3BQhXDZ3{HkHQpXn8M2C&y%cV_ z`W`Jy85ffYwlB8b>}V<)jcn1!i2fTMrs~QEyrTlkf6HI8D|H&Ya`#?}CsZL@_87m# zA{F$u9;$uJ8s5`uUx?<}a&wkf@$@%GgO32#+%|k-K@9zuFCqt=SkAVId9?2cdYz|_;7=m zGY-xrXo@kwzP-lqyBwNz zU6U-C+u8Oh+OH+oe28sv{c1j#%rn`aGnb`qqF2VI&c?>?V3+rDnL{Ky=SQxNlzXxR zmv&8_Wo6aNQktwZe(ccFo!T5MzBhC3YVBnuYT5_xsS>b)44uh%HpZr!Tx#lM&u*S|w{~^vAm!~s?%^9cp1tC`c}vE{ zl9=-dJ10w6A7>^rMDqHqgx_HPpPET~>EjQ5WS?HLLGOn#YjNjC+GcI(R#!HI@(4Pv z;HAkRyBl(QYvXQOo?~8>HCh$<6bE29k zoBd2Pd46_QrPgWk3xBWwYw#%9Lb|f?smAK!n4C<2$%Fs0GY5;C;d~|So>X||GDb_g z;GA`8n_ZRn+UL_XJEL;XTu(du!6!TP^A_k2%sT?+jq$grcIv=2tKNdvt71|q9@iPB z+1HvpJztP)KN@r<-~C4XGyd zyL@vwtW#BH4ZFTyu*seJ{}o5`eDg~3cHJ#df6VM>Al@CN_m&uS zzg}k<6`Qi`t7OW~_3SxHW|L$HU(6P(3U^T1o&7Q1W%uuQ1@9Olrq-hUH2#=pA}jQI zv(cs%EIUBIhDOy(zCUAJx80% zM6u$SS_aZncym^$wITJbv^m;{dmG!IBW9e;!iyoF%(>aCJ%zq`<8>kFiqJQATxDT* zt(ME_XPtk^w2-%KJDi=FOiP}a%xJgro~c-pyZgkI?EQb8B~Rl`)%k7>QDHB-=AGif zkjprk>;h!KJL` z943Cuc5T+E%)ytNp)-ufRCQ&ZvS;;JF|o7+@5u_yiq0R*ey7p1A)T{RD^J7LJGuZD znnR_pNM}bj`(>8?5REc3{K8e&8%OV;;{|--a>!)QNE`OdJHJ%Wn@6&|k6CPl9;dDB zCK^ZcuOXaDRpnhVga3U^!p8o}x`9jJ{4$vrVo=wD{JH0zj~9#h#y0Z2gRi@sf2)Xm zo}V84`d4f!&Ke&=;v)S_r{ye&WM4^j66d|lXPA7a{@&Hnt9p3TNVA+r4x`lo5-oA; zL%QxFaqRk8!Ta8Utjujj^aHe#75?)`8Xq~Bx1{dxY7q0e`mFuNQF^nI64&!7-9xn1MHs00~b&_{` zXQAgJB4TRXH#XwFM%EiyqX(&%!SN)PJ8qV9n-rc{*YaHtdY&cb&J;Cop!va$ zoxvVG;noDp*Rg10eI%>N09@Rk-{fi519VO;<*R*n93D4d)l>BIGfBUK!vdajGYPWZ zHtYI}J1=AQmwc~`I8on~Ph&)Ouv~!Abulv4PalWTX{64R#`AdL5qkO9d?$Ia*5Xof zu4m=faWtPq^Ewiu+1u~>qNKIsn1=-419k^RLDJG zswMT)PTo!=&&;dt5xVI=tEf_Aq!Dc%`YBHyg;F z=Ml**(-luTvEY7MN$tt@`mcpIOGW17h0a|s|w)@N#2x1yYO=uZJPRfBew5k(S_`u)of2;XII><3e)k>&l--yaG?v$ zAAxb!iI4H!Yw*k4yV7iV3L6z8YfC!TFh(2!@8kWS8FA(#S?`d1`HNwBp6@E^DH&RJ zYNH7oHxQYxz?D&?n$KghMyn%jR={y7d;eD}<;03ATK$M_Beb!|Pu4;vUjKp_O))l^ ze~TK`KXt4$PG$AN;Vhl(IY+R?$1E@ob~ovFps}U9z6XoEL(K%|lYTYZccOcD%)N^J zvRj}XM6xe2chMuUA{nPLTgooZ!*D4z1D@04G7@&?x5>PFEv;@hl3h}mM}8otJV%Q= zXwb4CT}Aqqg!y9r^e%R!(n_k(rIPw4eN2H;atU{V>2?t-^~*YmAN`E$+1qm<)})^9 zYQI0%=kY!z-@{`5pC`3V^|y+AC3(YNJTr6p7xk1qkJ)|MTonDD-QFctR!G$sbuM6^ zJiYw~PI=RvT{fwZR!Q_->i?Da@vv6&#^YS*T>#Pif0k%dpGTd5L2uw=QTUDbyN+>e z5)G11;S%2@M{T!499+ji^aFZUYlQ#J#l0~-#%@2jyNrhR;KBUaMmG;`IC`P0@`+8Ts=j^uZ zmH9_%|Fm)BiNZXnzbLoHk^FQy3%5i1MHZ_^ zip*M51w6U%Dq>&>$9D3&bM#${WG%HdoObu}wKM2<4Loy)k{W{TNi?G{Kbp)+HQ_f4 z-;ZbUy?c3L!zfc4?)lUyn=h@45`1CovvI6xOTK_20Rpv?AX*r#? z$*kWT;-~3plw;Zda3XKHkR&DHc_qH(zGV|0t@dqgdac8}2c^OTwo8`?DTIsU{DRPJOspx%%cCr@!5)3Fp+Pr}}gJfA9o6HjBX_u^z zwb`QzYvk_m8+sg!C8?5H1iL zYON&BHFj3kZ5(X08jCflwQ-J!J&eo-4^{&l1WM)ss*vbdMP*`qNb zbu`bQ>*2;upG_%B>jlddPzEPI}g5_`VUV^J8_XNmq$oT>dh#ahqenRC8x9^Q!N zso=cIS#M)tQ#hT3|HE+o5s0=W=|GV+t4TY;b&kJN!)~N=Qw{q^h`q>~|1*?{hh_NV z6GrHfY`Fz*-eajXyz^+>NhQ2N?2>!Cctty{*Cy4&&a1!z4SdesNIiP?C_Jsnle$_Y zm`0vw#J$A3#q6<`cdjGlPsYSy?k~RAW_^|@>e@=M|1bNWV>b5(uieVZ2NittEWAt} z)x0~K=lbMxNcEZQ)XdtMweafBD-R{@BV@{3@&Rxt$Db=t%@F!$MH^RU(HLV zu|s%x7yBj4L*6I$H=_T7KiBY>O{_50xW7<4^Ze^in={CkJCuzWdzCm}iR^DWD|@8x zCF5d|@Dtx238yvCxrX*7ozcMY72;4L`VTlijwDs-pQj{q$kEJbbc4vS(CF8+-~-#~ zev+O$;N7Qmu57kg3+k=uypi2XL;Om1Sx=rfoOc}`-hhvp`ORhNOW-hu^siuFG8$)` z?aJau=_wh!J|XM7__&Nr=j!o($A5H9C(Ikc6VEe(-3yCklc`A7o<_=A=0I6zk+Ekc zt7mt@0-QRIwU+ujqxl5PyO+$Zj5&F})tf%g@s62v%hRl$u78N<*Mj*vTsRDu7Ww98 z@;r~xw~96GX_Qr@S$|$j-~D;bdF;D4Tx+vR1susL!K^>+W0o?C*2!HIe@tcPo9U4i z%*k1ty~oQ~<^mjil4N~Z;Ws_iCTB@{R$+xAAA)SLPYX`G@ZxfCJXs(8pr8Doc@vRb6w~qacudOvy}DR%i-?#iQhCPr4DQrrqr0_u0!)tM z-P>8BGW&1ijoEFSssYKWG1c#>`pdn1bG>ENX0nB6w?S4^LqjUF@7gwKys_E z(tbGzX8%m)+HZ<6cZ)Guk5f}ixrcrQ8diCVAlfW8=f zM}hDAVCz@f$rIl z{2<9Q)_lzVlW{0x!G0oC>V`IBz4Nv5C9j(zE|lZ*Go80hFSoOO@)X|)>BrdUX1!BqDY2H?3c7GT2ekNzu9p!E0RkTiZ zu*?~|(EkG{o`EfGNR~HZ#kH5WKtEvGc0K-zO-Ezv7JXkVy3a8NW#0G<$?wwN3HsVi zs%P0D<4JZapYF=}&{-)`oWlQ7K{|VFmN{!Y>*Z;DNvzn1t@9SMKA&vN#sl0jb;I%@ zj+Y?kxo|AY9xL%ENWBNqrJ_*Ysx`r-s`O62+gXsvT7+NeocpmU;zC6cCz1RSV@M5s z9SOHH=$5si$zXPk(QFBMUZim)2%X@Z)^Of|Q|J1W$`*M#-%I~BeIDlcE95#vZ+E&X zm9Ksh8P{qnwLV_JtSWRkm9!(ZoeJP5uuQUrXGc~s5h>C43^HdeL~;#g?Nl=3_7^F~ zkfSX9@}xPtpe7U;`xP8!YB$g87GPH|9NNVWf05^Lz13x>VKnJGG_n;EiQ*IJL-TIXig}hw_H``l@bL(F+>$#ga;pepgJ- z6Zy>Vvs36pxGd6Aa#&pt)ph#c#Y0o|?ppHvDwaJjYMl+kH+cRieCw#+lk_%*e%TB3 zvk38{{%XQHGu@NT$TH6D@TohwFQ8v`lFh=9S}?fGxY>+OZArb5{0ES7il}&~o>M2| zQg*LT!tRa^_wPub|K!8}Ksh^fQW0Yy#Qvi5Tr9}*zcyqzi#~bV+>DG(uq`{VEAgy4 z^lRmAC2N_7LiQDrV<2pk6S<7Pm-EC+N%;eR>Wh!bSUZfb{N@g!It#8Bb$*55m!ukq zTU)TDtyuO_;a=cU_N`0n{rtWiwp;wQUAz5ZeI153gV}o!NG`*VumWFJZCm@BX-=5QLmD$D|zyqE&C#pZ!lxY639H| zUuLMY`0Gg|>#4J3S(Lvg-$S3M*VCm-PTq;Uo9u-<8OyQ5#36bIKeWPn<=skB^ zv3J(cRikJ2lxA0da+3`&MCCikw4FR>vu<;7Wer;;u1q7vE$rW!mA28ZCuGXd;e2-N zN1k)|QiuOX(w%^7Ic;$OFHy#Dg`$Z<1Fn>ziEuSiDixJT84}T8C`v@=ra2{~R4U5Y zXr3C(G*Y27lOp08pZoiNJ?A;+eBb--ckea)*V_Bt!&65Z7EFZcj2F))^(By$5nxt=Dye4xd5^=-CO?T{aS7UM;PMPl+z4B1pu7ZY zKZX{nz`!F~&U$k>{65W=r}I1q;3_#@+wd&e%bt~->`>e2ZV5F!%_pCtopmI7DBW#m zOXJwlO?*eb*_LMC!|&;nnJH zfUm?K{J}GO;CElO?4q4%xEiAGe~rff@RL)4$t{w|v&p`ce5wD1*^!nnvC$Sja+8WTm1L+w6qeWWZ|3@-ox+o6lEfUZ&MJ=QM^7O21GS&i z*|*Zu9Q3@T_j{jBF4KO0&n22&FPy)M#&(|lj_i^(J#z^;$$2*KR1fz1kZW1*8=1%6 zjn2!VrHbAy?*}KMzm&eJs{ffKy^$wM_P4XpTusZT&~EZt^yg2KnPQeUvd?k=y(Owm z-oNG<>E~?55FFj6U-(bKS3D1qC-QjlXNg&x)$6W>F(d=y6zMX#Z-1migvmdUu`m(O|mA>H#-#X(raezwlov)DMsl3BZWk&IR z>!ANuh#p<=enDTxi7%+F6(4;ctN6Mw(z?&n*$;6T-%x~9Ceh3?H8djQoO|xU7n}%p zMIkNwG#b-yvQgxuOycTfRF<9mlgKBL-#YkBl-0>Jo#+@Hy{#s~(l=;IPWgA}?-16S zSY%nd%s8nkj%wm8JAFRZ)`c*47TR-yB-C8iOuwAW3Nv9RF5zzC9_fA+@PI=NGt6 zK9zxd-dZ&_r=jd1`GY2B;o>=R&s%PHe=~oWe4-PSn+x~73bap9{|!cC$CB4gZ2m7E z`&-;i!{6U9(gF&Sui!`YX9g!{TjugjHR*003}nabm7eK{_RaKlq!L%E{Vueef#0&E zvzo5|A(b21SI(P`uT1Q zyJ>xEVSHHwHV5zziwh&6qI}z#`1+PCPv*TdM|6TZvXdY?AEu!47hL5n?pYMBfV<7K zSrk2qG??AI$-bIcLi^C=XY`Ua!OUG=q$etg;^aVS3u7D5kti8uATJT}c4;S3B$|=S z7&P}In?3OUANBl~z8mnLD`_mJ#k#2PZ>3kl;9{Pnwz_(<&3jP0oWf@H=loOl&mB+74cKkgbGMON4g8h%`5F99({^X24|Z)k+|NN);!w z*T^U*2dD8y<;iA{(!oITLMCfpEAL*3@?C`8=6p=v&=d9TSS1=k#2u_SaW4C+Gr7su z6ne0epl1aAW$q;X_8WXgQyL!18&-na^dresb3K_Po^VcYlyd*@f){G+?*>(&CbeYFIUI_*K|^-_mDS!1R-V)9-ALyey<%5d>`m5*Kh=T^v-hQpnt$`e=Xf5k zmf?kw<9ztsPUcya8~`VYFqfP^r_%kEo=TLI&8}t!Dr>zrx^kqJ?!v`kP_<4Ai7r}; zq>|;h28(OqfA*lwLPz3xKEy9(-1l!XFG-sh>scy6M`ie&&nEYv=v7kBp4`lYwj}A` z>sZhBapf5tJx8Y_NjI@{7D3`pEghkJPC=DL*^elCS$$o+In?H_pNEbsNH2TkGUuKw zVI9b%7>WnLQ58NY`%3ObP2MT9E|(~7NANGBn?0aMKM+d3g}yCnYfwPfX=q%-jyKTb zS>(Bntny|&zkr8MtYIJCb287;lAo@i#>9&#MVkB5bsIipkRG5aeO$v5vnCQ8t){(k zB%iG1S9$vF!ua!AZSRlnOVBpb{lr{MOvll@{diAgm2f#*zX3&gSMH%iqK03m#U*ri z2A{gl_4n9vc9QolQ1dZZ5nn7B_@p28B z2gzBjuEgi(JYm*(KhkexH{)jLOAL?CN%B_rR(U@82^+Jw(scAz!9zABq0Z{eE|$+w zwVob7WJ@ajhhXyISi_j zkKzh)>`A*B$z20AN09of>@YhBvs1ktMBYM2+X{GEg#XM7od6~A=+DCO0@|*Lze;3x zAgPpqxu)t#R;lvpNx!;Pf7XRJ{zgB3Kf04)wua{>l4#yd7m{FY^t?~A4;Wd_;8A+e z>;`wQA?2*ZmE$L$QrCr0J)cCk@tF6Ze}&q!w{@MmT9VCle~*B)_kn68 za}cTKEKgS!mmS*0luq90QY6w9Uzx$pY0hLV%s$djj4Ki!s+2mq_?s45kkS~OG{@sb zw3+<|nX@^Iq>|Ag`%x1;trg5>1|a>~FkB=W=#l6ws?6TA8}mGsOn*5Wy~lHp=?O~G zPa=6Fo=s8puYsMCxcr7~u4DUEAmVYB+k^%l#Y-|joW`eRF8EGXoq5^D?7awnvZho~ zYnhXnLJ}3otufu@MB^p=(+vNUKOoUav-3Cc1CoDg7!+lV>R_76%)ko#R-~n5zj|GJ zx%N3}Wz0|!Z8>$E(ccjD97c~5J$EEO*BKWVkl4Fy=3aJM#l4+MU80@)wUd1^+50_# zF3(48&chzce;vcef6Wgq)#uf~TOz1sOpthIb4l_ee&}?(BnMvRLXW45^3YfkcNt&a zg|6AK88qHd@H4eZ`Zo2w!DsX{lKGyGO~ir6$bA-6jHjzbq}0T7N22FtnC}9ybCl`8 z1}=0hZwaR>lMHhu^enUKbszroD0Z6oIfs%(Zya5xuDo^UB*%v=wH1tSS2{Zkx6<{K zw0MJd_xJoA+AeRjFar0PANZNyDo-L^)PIihjd1%MEpGC;3mR_Vjkdr;~C{t`f9bw;6dJ`vm7P?#EVFA=M zh0w2j+MmAPA)&wUkawCyzD|^fhkX7CA6XMxi{=I}_bWVI<^S(w)fO_Qt2%$6HHhKIAjfT+3=QL-z#~=3IVqrex32MJ(%O z^bO@(X8JagOiGi-+XXEBq>W|DS1-u(Z75rho+d0W`GIz7>q0iuialO{{u-V?*S%AD z_T+Jyim!DLvlwTIACcYm)A`2iIiEsr?L40m*b|VD9de2Ed>@ON!Mc)jH_;^%g*juw z#J9~EGAZZ!{)vNlDg_op38?sUzgwh}Q}%Z*OdY z|H9#E@I4HLiK#V)TqmJvGI^D8tv1U`+{vd&C2?oI(s#7vO+KcTy}LTH(s8z{!}y7J zJ^!7O%iv`)yBq)!Wze=s>lg7Y57KW>mYnsb#JfEhCP$*_T(~(Cr^(CITFotRm+@SB z$A#)ltoSckbD~%zcj2?N^8=J*ekA*2+L3d8+Q}SqRz4DUW*&(ovUEl+AFDsRc|IVS zv+46rD7;xaiF1@aDLL(vU2|`0b-nt#kYOU-zoo_Ox6RtjI8x5JsHsL7>49#AjaejG zPfc&Iik|4mS@DTz--5br{P8j~F|*X0{wYZKx3+7swYOY38AStWtiKks?*B9`T%(>+ zyyUx(KAEjXFOztl*xOWPOGE#9zB18qcB1DG*h$9wtgSxpQ*%1b>cl4J|g zM=n{Vxe6-Zp^JKY_|tfvVW;9+`dJBiD#Z@CE49m&0Eu3H2wl=vd?1}T5c!5 z`AYQAKP0MA3D%i4zWAo^NufJ#GJkamAg!!ruqUekcDD@0)B|4aq+z zT3>ekCUsBq#3<5Bew$x?ds(UOXgy1h{x$h$MetFz9IMUv#>dcB(WhjQn5TR{NKA~< zH&A&wUI)3~l@y=!_V$LF4uF(TN#r#5zk;lt?w)YB^aBC6Dz`c0FrI zCX1ZeO8%?g^n(|p_8_B;16V(2~pti7=g=AXjPmb(A*HY9x9v{dDy=@kI}$gY0;y;@&d;AZx=Xs3)tNd1EO- zCRN=V?M`J9xj`)#kX_D$WbHC57B$#l3mm?#Om?9yRQFG69Rl5DXgTqal4bKa6eh#R zYtWNOI_J>hdK4@ozu3>~+8RkKd&q5(dd^YDn=p|*7+rB&+xH?q|BA*$-R|f69IYje z=2c1!frFdzo7vo=C~Lz5Ca=l$P%_7u^eE`tk40Rd&KvQN{K zJ)VQ5^OgIAhQBBMy!~hQdrr1I>r;DDNam>MqdW^KK_0E3Bjl8aiFMGsC(VL-G1MN{*1|&)7Uk1av$2}w1rMXoQ zTS3cR`OWG4#*zHhdA!WKI4DMvne!jRs_!S)j5ISAxR0(kkX?ONm6_Q(JU}f{eF;TP zY2si=P43vAl^sEI^-);b_gK#ew4JSgDCYU()x1_s6WK#~@;U=5dw6Pp+PfDXOYky( zl2j9|&Lok(baey2@iafbofMAaapsX}_JL={4}f^WJgwO=@}fsRz}U zLrkLmWH0Oe{c!dNai_vpyHrNg(-c=?(tO042hm)M4O7`40(f*{7(97)gUrKzUWM+I56)2f3 zE+^G*aGBF0AL964x|&W-=RxySwAaGz>9`)q3Ul^7UUj6l&ZWy0?Dr`8Z;A4Dp2-a1 zLS>$zrLEfU&zJ1u?qD?UU3It~%@qq=JgY~j%)cbo-3V>Ir_Q{^3?zdQJo>$!s6_uA zjboF=WIQhJP~&Ev<~+1dbU*oPM!A=_$s^Tv6T~J$(@cKw-`d<=7%je}cgl&wL?}$m z&yPvqCM~ansUO+R-*lUdGM_-qX7n{u<|WVfb7cVy?|}585P34WjBvH9-ytZRioQ;~ z{&95E1kUp&{v^x|qTNK{KbAdaUrg4^W_Y`Mg`P9}Tfy}u{7lBXN2@1&ZX%~{#qA+D zs0rntsNrqcT%+xyNOUS$x5VYio~vujQ-}AN$Dejo=P1&96@u$2J3|XksH-h1?uUeR zrQoU|+W;jhvB4E^2i`MgPF=l&kv^=A~{ubzvbd>aJR@wu6P z|3#i{8~WOk##OMh8FfANF<&Zw6`KD*Lv}vD4w-9cDswcA^rIPH9L}l^cm1e>Z%wTB z^R+f;}z5CRD z1#Dk${4xyMlR-HVp4+SaX!;olhi&RhoK>DJ?q-+vas1*=+8m?4Ny_gc z->Ni`oXE*aI32b|kjyZalJ}X!nhQ@2*lSMc4Rw7#a?5_2#M8bZtwp83AjC) zhVxForSM#$^Pk2KC8Fo`u(gr@nChpPE5EaeL@6AI!%O+K?EcFMiqA;2o;zL8d8%jM z$NN0JP)W5;=UeNdG3)R-Pc|4*vOlDp-YR-{l>|D{&=AxG=8hAYuNBSDDU9uU)5O*8nZ^42t~VSAUo6>;2>FP9#uzk z^gfHrX=Ht~x{LWKi`wL47zn8q_?^T6XspIWtD39MEA^Pk(3&#qK|bUVej z3dTJD@KfKzOY)N^Q&IL;WIuOXTK_L^RZ4$Rf&V`Yf-32AGWQl=`Ii5QzV-uu*$Q^j zOBTa#PUdG?Lyki=)YRCCVKSbtu6xv$*kSQSS>54Ikh;11-*pY>>AHo(<%te-nsSo&OX?w z9ihnp zIKXJ-FFjTpzIQu+c{**KiPta5X1lB3Kze3#Cc#clhrbOEIUPJr%UyVky7aq?h2*UA zDRg?U``rs1{owibkoh%B;M~ml)x{d6ONe z{JXT9owsx8@L3Ydd&SNCPg(6$!^028Vwt;Z2~l_R>A$d^S@how?-|qY?f ziDvN@-Oq--tS$clmAfH3ajDZ+=547xzBa;0a=1<;=~d+PGZbG0H7!VN3?%JOIvM-l zfU|qi)Pa>8#?}`@a0flzKWr=SLo@i`L_8RQwpZvdS!hnwlOC$vX4j9?XB0u_2A*Pt zHcm(RCFBwWRUq+c{MQ8}JsSNvi9J$%f6zrIxUE;{OR|e;4_iGLikd@RBjqzszC`OI zc(~qd|23AEvD4>DWyks(+8Y7!$x;6c?A=KsXR4(PDdyzW-9CSZ)66<%_xb7i%)P78 zHBs<2TTSMgC44|vwYG)7vC#K04r=Hja`LJ^%*@qW@5{Tq2;=qPqCI<9r;T%@+@6W(%tdBo_|QJ#>wiK2Iohhe3Yl}Q~LuXaJxEsE3trIyNKO>fQszp%&O8O zG_j6Ww&3d%l=UXlMA-dWor&C?c=w5mn_URE({MdyKBI}x*~P8`9$Kn*2b=5bUKe=G z2r?%D65}tC&qt{LH!Z}r5EX|Z^v~(!e|XM2cxEy#!E@q6U8;^` z-AG?_70xQ*@IrhXrNmMkXP?j8>dMSw)@aYxr{ru~qCB@H#W@AvR9!#wHy=@wg2_8G3rr-Sa5Ve#H05=6F3zLw!OA6wPG!Tlw3T z1~#6R>FNoZ8^Ksk=)bIXU^hksLy$Toe*&ViF}HyPv|kH zzb+_@QL;a?s!{%0R(S%RK2Y{-i20sICc)NiG`Pd>0r*d5&+O-e7oR(tmMgWq{1@|&`myStoL6aTuh z>vPHeZ|!~R$=kJ`_x7{MZ6ZxPz_;g|WljN%hnmC_>Vp2#sHp%miK4uXjShmoWCMSK zcU|S$V9%|hvma>nN3tJ}p0~+mUve3x=V%M{$@Eg0pM1^FDn4wSdb+XIBS~l&X(W33 zV=(y=Z_}QQX4j`kBYN}?+1PhHW_L(f4wH!tlE~UaJ-dyM8U$nYdHRj0c!lJXq3L|G z&isjONqvRq8>7!(2cJ~0;e}lVfwBEz@%z9MU6EtBjIpLX@MtSdV#82k5;aWbu5{i?P zVL56?`8|!beFRlsxN;lGW&gxXsD01aE3rV5Cv&Xl9*5qg`WoMOgil z`k6;Pb+EchqJM=}>he$D@SEAi^QbXbPKAz!<#@S7JG(mkCQPNulA>PQ5^)i70$K9b!n`+8gY`zgsE3Y|sB_dIsA z*z;4-n6>3v(A^kyiT-jV^b{exon+IeFxIHAhG*Dh{;wy!hxwh%xGx~F>@v8`=ftsp zyx_?u<028P%$bSE@q$K;1RX<|1d z`f+zP|Ba5TSXD!FA|2r?Z%x^eScVmqW3??jogMG{yPGq#m!Pl$ADBJwnOzEUpJVZt z@L|b$n{V+-*|*b^Ps`{cagVQs$Z}dqHty`#%r2c?EPf07t3&J6>HkUkOrDWzd`n!O zoGp#F?q2YQe?W9b@t2|bTbNo;Iz4dkGhg~73ObYf9q=6rKIG9G(O`2}m_bI@!rXfP zHS-LkNqZ`JXG}H&ippy*=Z(K4#Sh)h>fUB{otrgpZ3z<2| ze#414$@<+H^j^uQI;ek3J;^syN`KnU)$DpsjNfG0OiZ3nNH#0Yo%JGr`F;=yZ$#-` zC~S(y6`shxiy|mH4&r`Mswz!y)Y?;gM`@n=FY=qK-6d!otM%ln{S?*b^M{E5(u&5W zvFS&7pD)o-R@+bEtB*UE!1@yP{={GQB%MXLex9rkVIwE&XD;IzGjE(X=j=T>1V7v9 zJZG%isQ+M=ll=vw$f_5*vbXtNtzAr-2hwS>pH}BtGw#Zpb}PENg;X*oInk%RQE!q3 zeGR!jMeZX=KXHoNXk{S2lZWeU6!oU5*6L6G?g=FFnrm;6M|pb5ncPcI(v&BziHC}w z%P8@FvPztWAI#0%2vtSZnAkuEK-qA1K1>g>M~!=XvQ==fPJ5rQ-r#wfejvM|Ymn>f zYJ9-+O<3em_LrzkMfBxAx>5_5zoI-lRI>l!7_{C5cXi43aBWnh)y8Do*z-4&Z%*jt zME`6uevh;g!KVmx)Tiy7|IA!zJN-vvzHF&lUni$`(UxdNC+OXh=P0M`Z$U-&k|wL{ z!Mx)bdVQ4N%c?}8swd9ecs?roBe(Dt>(%<7f+zSK4aL=xnaZqlo{!5|XBU#oIrO$h znO#xctI&JBQb1eQ9_JcuCGN)y=v{-RidxIrmQs4f&mkahgNeTxbdSMJR=HoIk$bh) zpB#=;?_%=J&da(5|L_MK?SPx$t2p_5!q@z}&}&pC-NX3u*Jx@B9tY4;Rsx&wFMYMr z&~rWc`O|3k2kkEM+#||&g6GNl_~Iz6uGVsTk>r5P%F_=hoWusMHUg;S-Wb;pLEZi& zo}M*xjhS;;t?#`-Uzn_!i85E0gm&XO>$nZopZV{^4DL%-|H1p)YPr?5TUgRr>?bFE z??-J}GP#o`{??;cQ&%#QPv@m_0_QsY&13XipGNw__JOohnrAs$?^*`6+vzPaHWI1j zLp0^Aak4@s?_WQX%D&4)%gwuBW{BQEOK;vH=UZB7`ECf?n+q|!91~A$3Or3#Yjy&i zP70e?&1c3C$rj%O5}wfNtvJYe`(Ge;G&xKp`w{r90HwDmlm0wWD9hsb6LjB+?nDns z1fxWuX~^zxEsV{pL2P1*%{J~ih@{7PGB{2Q!TvnKOC+%wA{L&r{Y!9o7(Z4C_ZgW!tEKx%H(u{jzANL!dL&Ut zd)@rBLu-A#(Jp$QKvsK49Y-mdc->=3q$GSKD`qwF9zqgdX!T^*x}v=TEG^>G7NTk{ zA2tD+^Io5bdeeD}D+=B|@wDG_Z44j20Jj5S>mNPOKuF($#^1G_Jl$&{EN9WXD0i8f z9 zR5W6LS=~#_-!rs$D2cp9@}2m(lUY<}Wopn+h!3ef`}BYWo{+yZ|lT;CPK1+oB=aBD3?T zgOS@iIQ|!D90f7i;c}6(N4UCGtuK;8-tph^ww3%TnXhl^`e0mVq_W)eyIIOaH9kZ- z$u^ff7!TwB!NTa`QTLmm^fWEMt6X(@_=*l5;)9RXM`X5kHft?|yQho`9?<#$g??rS zIbB(>xJ&qwthwjR#Ul6DpecF2r{T3VwB7$u(7qP6zUH z)oJDxSewn-4@2#2KPQpkO|bE%-Xp=XxAiKdj@Y&pR-jD#L>?>*=^omDlZ zr$m9NrR;7fd;{+nLQyTY^(@rB2+5zSH)D=hA?zTuB&v23Wkze~HNOqXFELn?lYO|%W_>R!$$7WWsrQy7`3#L-L5CmE{B$&A zHaojcOW^SXelO=YkJ9=|@)<=Y@6+;6csbae?6m(Af3x|jAL(O%9w~3-L!n|Vx^7h> ztM6Oca`s$w;S+CEZYgY6QtnPya{hULx~W#Mlwht1kFm?&UdGf}eErY@@kZ41>i~EDyP-YVm4&zafvM}&C zYG0(8%n@a$qlU3BZo_G?vrsAZm z`|+>`>OT`7V{goYA*}m8l02`#VWhPP^+jnq8OMrhvy-0nbAG=#jJM-|hd|zkwAP=6 z{#Y3EXO231-ezm3VL>~K)cPcAOaz>3Xen0O(6y(v)tAhEDNvpm5U;zR-uWgJy~Hbw z)oMkyUJn03)!#6l2!XfYe}Wb>pL(3O)yXyR)npM|j@(4G_RiPd{89UR7Yzpwq|dtXk{8{JQSl?Kq> zpUjdAD)C*j!r2EO^^`BmGKR3mtZ3EH%4f=DrRrBm&Z=2rBOL%W*}d3YO_^WJsr`{g zOz|_jP?t51cKU+H$hIq6I+3@|NOCbvmW9>JNjZM07QAnwqhxi@e9_8cb|fPoiHXVqKyx1XIa9 zn2gJj@^X~q9d05!&q?Nr{vXE%hP$_0tGiiBdm2w}$Di?aIgfIIwqnbfLChOn&UVz| z7mm~V5_)J*koWIw;A%!PH`KWSsCz(qI zp)9%JtB~;XB$PNt<@K&JJpDNQ=FHK4xUb6+w~%vI%AWVB8?C*}Z zxIWL_^uue>_MRt`wQdL0-k_c7bbYudYN~r3Pn0#b=`fnPfMxKS+!@W;V;wrn>8BUz z{$f%;5n4u~Dz>)5^NAjsn1sJ8k$A$1cQ}^}KGH*G4rdcwC9C;Pe0Skns?x|?q_mtq z^ZwjQ$(&qDWY+8%Da&gNA@h)}Nk9)Q^R( zW&cH|MH{@%z1Kbb{-(NwBOduknggJlBg|Z-Un0 ztp6W86jAOf^2)5}0CF6Ov*h5|N{;1Nbk2+A#L=&M>6Rp(>`;lFdS_uQn+)(hJu?@E zGS`uL#hh;a1XmN3yu$s`@Lv%Mce$1vPmj=2-b#=2%p>HU+)Ll{Cz)XxK}$1Wd^`*k z^TeO}|GdY1q5MK`z5AncD7>8H_Y-tnj;B*e>v~iUVuwrE$uhr})5SE%J&^t~-r1k; ze2U)AQ^%`Ny`Eej@N8?E%}C-ev|kKoAF!wE@p=UReg=n5AUZmNQY>Mz=iG=eL{y(Pcj(E-6 zO)@~NL|bWCdx~@#=u2-!bu$QVsHF;endWTuVmy3ELbG^_L}%JbYE}GgsQhS_-+&$5 zRX|Akxg&73)pJW}yAdQ_%^tJHc&C2y8Z__le-iZ1ya8(o~nT6WvO3oXWDfCKZakw{&Rwo?I(X-!A@*luT z&Lbvf|1+NIini=?+?R&Rx|SKV4%+xg>9$6KD_t+A{@Ku6+m$Bh&)V~JzPCSVJODqB zK=61L+{W`~;QtABK7-$`4nH~Xku0ZA;A}aLx6xJRu|DNhnriE0nmvo`m!kF=<*vu` zLA-l*%4H_Jqxys7qfn9D2}MbuH@q!^jyv=)iH|&kWnOM9(p$YNaIqhWj^>ZDFXjTz ze1iI9o~og?RX%5*jxfukoK|~GM2QC zr@^U^{Ed-nVjp#a{Azfsh`S5W^EbquMYmshf4T#=d1FW}&bI2_K~6b^IfJLmD0v5J zy75x?L0%gkeJttSK?6VO6<0#gLuenS#*blO6%_0s`RsYh8KuNBT*ez6gS!jSmQywt zkyAW$&bZCP-IpZ0KdlVL;iY^-ZOGUghoubEEg{bV+Dvr5@o?SPy{xn+f z*39X>M9azSLwb;8(cVZRhrwa`%EVtNq94Ae;I}6Ee5d0#yXbRIMP%|EqvePYb zy+_c*JT_Gp8t!0U^R-+`4UM$k+5aMR(pN3RdFhMM{1_cxM9=j+*NIGr(pzGT9XC0no|uKTQ8ibMU&G;zB$fPMzZf|lj=Dt3h_~vBvxRWJ+W+Lg znuq?y=((FGPfq#TuwC5Wv(R=aEoOxN8~Wmx?q>%V!sa$~Jg%kppfypVPJ+=c?hl9V zPBfAJ;1Cqf#@S%hR63d8XL-qd46gOY>aF&Kgo9-|LXosWklxx~8FW zJinT3HpSp{Hj2-L$1%9CK$p+Z%xPN5np`nl=IuZGaj%7##pulb*6Rvmn5O*oBVT#NPCFk+>i^Gjap_udL>FIC;_&)7eCFb1h}LV@YK|3J?7bbGC_>6uEDE?_tNZeGL3QvJ=5Xg*Rshp@tr==}n1WG8l8 z$gbp>ZScKL&yr{>#b_vJN`HWyM8$uLS2~T(W6629D~k5v5$(UN_1 zIc=Z!fnLz@s=FiDS8v`Skz7AvEfrij4PDu7l6kMoA^j+KdeP~2e9ls~xZb1`T94x! z$CC8ktcsamTqP5IL=Px8WiTsZypJ}KV&)iE=x<#&Y6th-Jpqo+u)0gWtIZWV6+!OH@^E2~(ylgjZp?{BQ}4`ejJ z_t_|_4^KHYbOTIQcJJRbeLf_YBm0V|T*8LS!ClpY)-U7H|J2*Wj*HU36;L#mgzDk_ zd_Hw8%udHw?yscVobc{N8v8@tb-sU23Mb%TF^gX3XCr$#3vGv>u?6i8!^KYHhOAe9 z0i{Lox{dT_@mtC1aT`0UO{>|Tmo~CCbQE8FBbwr~FD+1<{1|iK;z9Q=!}T~la`K1$ zNJ77%=rU-StHd0=*>z?_O2K<_+3$p;lgRvOvVM|A9;5lrg)z)-HE$}k)fOf`Ww({o z7kSO5Gb8t>bT=5BK55ggqDDQ(~+u_xXqr7!4d0snX`3H=*K2NtxKIg!MLItpc5 zaq*(kc^^HG%nw1+S9mH-|L3#vVR)TFF z4$V1vQ--vDR?`J2%^Y)P2R_zfGj~pBVSV)5yY(&EQ+f+jWVK`st^A~4X-tdzlFkV* zkytCUz279$`+anm%p)(VuNh0Zl6|J{%L&QWZ1e=^*{I!|f4!026)~bn&w2{|-LBpz z^>`cT>`I!hhlXVIc#wBU9PY%9&3>R+Q1cj1v^-oqB>N~#p&fVzsVag z4h_Sg>fr*%?a{fP-eqmMz*=>>s_!#Z|KnFcw;6fhnTyo3)=YNy^ZaUBT`!VPj1xAm)S|4;i z`cFsCNOVtuw`R%=fbbVdzL%f6MjSb_^`@E+rqkc~wuvNP1qOFOV9v6qK-ze7m zn%@WYryFpVvs05u`ZM;DeN>6w(MxM*si}x|u66e=63b43Lwz5J{#ov3*J$!YwLoVH z68;rE%Y7OKrw{XzBgrDGK1)1bnhcX^Y7(l`57zKGu~jo`o^|tz=y-)ZvmbjPSw7%y zve@TL&_MdV1t%x*|B2X>Y}bFGD)C)1Z=c!j+VEIit(hCm3}Z$Av+MmDcz7H-KE~?~ z=uTd@{R_5G9}=?1y@6+Dx}Uu&W&KVh#bhM^g*_x@T}A&Vkj@~|%jwV2t~JmvT#TN( z@I0N&e#cjKFlQ%yNp_MMnI1kDhk>l&mq6vig?rh7npo<^v{;WVO-A)%yqu@M&3=Tw zq%)b6Khis`)!G~MopIDrQ1T^DI@tC9LBxZs<1r()?w;R@uO5218CuF*$vIGvJkQAn z(*>mm!^=n>KcmlE>1`EE9+yt9<#cUH5Q%e1f*y(M|sH%TcpWh6aYeb3QzB@&wpWr=s5HJW6$ zyP6Dgo?s%nW}$4UlE?Arv7DU6o67F9vuJdo2hI7~w@4_m`49Q-LtkG&R~1qj%GMir z?_H#4DqEmAk*l{Tvrx;E)iQ!)lWi*zPLAd`8pC-a@aC*YB7Wqo<)b8$(-Tkoxm`=i zMVVQkzj=undjH@nCq38teOS%E^P}IXZ8(}M@}L*t?o+6rg8!V5&))T{%_Zu=f84zb zS4};cOhWTfk_Z7$(CZTTYF+RZqxh8y`ton#xF?CuC)t;H1cA3^(0KJ-cNOC%Z%nPqd;$_x=~jeoT__>N&;G8-_lIxV!k4Sty>5#^k1b1|9t% zXpXzz=v!j<#o@4@pNy@uzWgnXCuU*#`<<|oJPOH$(HJ+`F_zhv`$%jpc_e~T3G^pc zZ0745p*bh#vi`MuFTyXfd2I$h-2^}J8k zzL(S1)1FD4PZ||8MgPUD{x25UgEvfU#u9jsMCa4pN3?JbYs#tG`tIZ$!$^Hovhe&1 zzwu~gjbD>xsS3;-$EU4lIeFi`N8JyQ`I~6%!?R^?=6Rlfg;zh;d(}kGzKYu)Tw6$@ z$uj-8=hm@<8^~{@r#icHI9b$0SH{OpQJDCL7veX1tU(8tsbe$bmF86wVI{GQmqN-d zJnArhAoDnho%9`igpwm&&phRM>P(OH0iFMXpPb!!71Cbgv$Kv^S&!DrC~z>IvYM86 znA_QL#-|7RIe=u^vBT`+&iS+Kw0Tjf#1`3vt0mgZN&9_JmlO0QX=^GEko-B9@g0{z z-x8owZ!0eXzBMvrqKX(xQ|#Qj@n{tX&V@N`b8p5pE} z($9IF#Q7PjZz;kf%!J%#XsN;z?1AihILg~a&dukH`dx4_rP(AsdPNlEl-xa}ofY)F z57uD+gX!*7vYe^bUs=@|q>}mML^jM`h)$llope`1?3*-_NFZ&YB)i`4)z;}Kx=QJ4 zWV2h1_rk|Iyw1VVJ@`HfPFs2IIh0gK_n9=7H@R|ZUIP2eN$YhrC;!9$qKMJ}EnvOyL!#MgLs^;*W2l}4(*w#vBrK_FZ>Si*$8}{bFW+JuxiLNpQO?;`f zTIl*Wsy<@9iF-3qt9{tVY%+%(k&;CE5t0pUwT7CADMu*%h2U@Y$F6s8P+$tS0C2dwBMC=wAj~ zSD^1gTI<06EIPi$%|{DI~mC~^S@)X^PrJmqGGPlZatbfM;o)r zD<^(h7W(lv@bNjcy-FgP56wtoo7VmdC8hOBwefZj-!Vk%`)RX1iI(u>jp#cF4w4%! z^PIE!nb%0>SqOfIEW4m6^9BdgU}MNB1~1={S9cI_^V2?p(GTB|F zL}GVz#a#y@k$Oh1bqi9y73V8qWdjZ_Kuc%Z>`Sk=dmHY@BV?cX(d2NgcDLg;C%{Xf z_FWp-!INI$`okzHOL|+?GG3`z`p>RBpza57*N&z((L!S4CN9-Vwpr9~Wi4go+=T3Y z;x9Yv&G+u&&P-jhL7c!N%!iv=ynjdDF)_3H;q^q)X~xSBz~^u+k0+-Y+P#>^>q&d} zLrYErKTU2~iz=_Dc!15mqQtq-l1TXFaI%_RUdPw|>G?n5E4kLL)J8I~FDJiEtYVRR z+wm(eL)>gMFQB!Fq?h&43ff;v)0rWAPp!{D|0j4VhrV&J|2fHgs`Q8S|6f%70>6Jj zVD_y1q`kSUHCWmYH#4=>2p9jOh4rZT2KKUk{vABd$5+brWXp+&uy+T~d=zeHbJHPy zHxA;xI>J^?_})gEml-wn(0(E`KBd*_C{BdCzT~*eyUusC+ltITCXqobIJtRq0x2;o z7NWP8-aqH^|H0j8I!LUbtWB>&^&o7 zD)AiRk|s){cd+s^_>RPb&KPADbRA2YhZ$#Nt+xvrm(k$? zJaJnVzJxE#NtQiOe<^$)4(VgjeI`9_P{TNuoA?^h&P=kdL3TB8T@ng<^Lo4Kx4e5( zU_4i|Q=l%)mFsV zW&=G=RQGu0&OrO=g|a!fyiK_}q?eq$#g)iA%1DwJtc6$EO~$^h$Rl|qlNV!y_Kwy! zBnMj}BxN6UtTJ=i&9y$0KI_p@PAO-H`2=P2c0Lu3y1OKiK0`@;mAHGc@;Pv)PNf4jwvq_FkNwL3$(9nOHuv z*}^`!%t`Kb^z%G=o>S{Vked@xW$E)~-xI%c33^tjw;wI!EKm{jCPq?L%6`Flc0Zit ziM)yCJzy~zCH8vdeNQD{(S8NxlX2iLea|d< z$gElMg5Boi;Vww|8VcJ$SaZ)FOU9XDe~A3Pg^8S%nt{rku&(X-M7&H+sART12$x$) z;$;~90J2NK&NC2M%>R##M%Ka0Bv!ouA7u)*T889bB-vtM=fX2YeiJ%{L>Biyi;3S@Ro7ucd&6Qx)#vKD7JP7 zjXsFN+wqymVJ8_aBo}xqUMuGxp4EePh2b{1T}(%BD)lau%+TYUkCy}Se-(Y)tCj3Q zNNk@D{8m;o8hI+%%j(%n#%QD1VKb%Q$Hm!3ktKQL>|nf}9uvdra`LIG^#OkKCOVs? zEQ6C{=q6*PYg|vJhm5Bev%njmJTu*yQ9oYIPtpBD#vb?M=-L8maz>(!diP;VqxivD zFtVO*lCM4e%tAJCD=9yY{{P@PS$=-z>H6R@J0pK1|Lj9*rv6R%8AUhQvsQ$Ro2a1y z#3xHwBC{uAO){>$s_z)e3PzzW=Vgk*#No8Roc}M5?%HZxp+s*#_xPEJrb}7RGIh=^ zSj1bntp}TX-%yhIDw$%k7x)x3jq~#$9@??xskB%dr*mm$AN1vPW+$|dDZD4OCdE~x zIi0`DTy+EdPbSM-*h==@|H#|yAm5|t?hdp*P7lu&^qyV6nH|2t-^E(0!)o4BS9$Ht z=WQ;9pP`=EqSkBhb2*)#rQVKYGK>BHt@Ji^<{bPDczFjR{>x9*;q%9lLmxVNjAmXX z*<1O~Gg-vjbQTmI4yXH&-xB@6Ge#-Ds&Op*tcIX|u4JC>TzHvd{90R?vU=0(vrZJn ze^}((+Ro0RtlK|AHjj|Vg)p{_Bn~W$UtcGkM4HGh(JRqjoz#*MDtmfHlEqZ^ayIEE zw$NxYd=a0o(DSXd6=@`oM-6D70HMdD?tb$;o7I&wzKILh#}n7^);aOg4KiA)wXDA< zvX^89%iB*@lzJW!@%oHuO3e-AK|c>7}%vCG4#gzD~ejGAtbDZgy-8CAVZw8_4q0 zlgAsBr?&@S=81wXFJ}=Qd713HNNkl`3pyC5wocG+y0*4xC9`NNUFmBC@VRGmCU+p; zbpvGVLnFyqcRLQ2;w;e=`k=kLexoxBYoe}XAxzAiC!pYZG8}=btZTQ1vNDjG6NtO? zO|98%V>rD=ZHaE06^A-nxRbwV@9#g#3?jji%3sdk9Ke5-_GEjlW^JvaUM=rv*>!zA zT6@xQ=Bbj;dpOS>FVK+9?yE=qnqKG7^`CH)-Op3pn@U%iXLy3=&PwpU`nR;?O6Ni}(tf79kf zw$)!hn6dD1wEYMzYhW}xM*k`FRHs7i1#nOu_I_r0S*tJR*|oGaMLWr$nv6~-sNod+ zen&<*U7tR(D;|I0L)N=m)3pO_E2P1 zemh^%globb2QfKcn|!A|c>IjxM)C`1>lqRq=Scc%$sgq`Mk2%X;SJKaJjY7@C}8z5 zy=6wC@4-t6GW-O8tKsTnE%apZnHS6(SI$EAhpOjkqN6(g!eM*g7o)fo3+b-C`+du~ zy|2)8C*E>0V>}st0ecsC{x34ip1a^=vMU$Es&Ju|R<;HkZHHdRi-m@=BDF&bf$Jb=Y_sOeLa#nLk%bK;@PWr zF7G@Sj~AnTG%3cS<6}OA;B9PVvu`i(5KYi@C2sFPUq^JlOJgf>w1b~}g0?f0mbb=4 zxl7i*3TW=DosZ}=?`VI*{8(1|peGlgU-Y8ew@bOQ(zJ(nZ z_x#cP$gBE`0W`Wwiz8TLb5@pImpPxD(-SX2S~0Rt)U-Bq`UHJd(0(GEKh9gM(&r{C z(gp1FQIyOlnfGzI5H?n-c`JQnW;tk`?oRj#p0l3%5UsU(#-!qR>5;b?x*EX77K{_*WdJhkh7_sGO^$Qf;rTjDMFTpa4!ryG4 zhkACWEAfSswb#w{8R)xOTiJ^nguhzwIaPf+l}`I=p)5Hs^Xwz|NGyV7YWxX(!{Buq zFZ2aXH6iE3yDXxnB6Obf4B4@FA$*LXwR_2?9CT&p&d>a34Vub&WiMB=9*}G}Ezp%c z!dZ9D4%M%D*5mkq#kl#6ye{U)_bFg(D>+VQqh;WwC4_FpRWi@N1t({+?g=C~*1hcg z$liy>g?1OQ%k1r0#+pu{t)Vn>Iqx)tHf^Oz~fNyI0+q3I`8vp zLG#r{5(D*?)!{t5lIEjn5c+>2p`2_@Hn_p${S}EXfUGC@&(GEH7ai`MpGxH7tgR+0 zOKV(Y7Aw7ZYy9@lc6U_CD`6gRK5g`eZbnPUbZWZz9R?d?lS-=E^R<=ogqh z5XFs1;!pPcn-&)tg-o#KU}YYfp@i0*QoSL zqk=W)d=(cq3>$i#J=3pK4@t2ZXhum1F|igSupw|F8O!xuOFP^C(SkDrn?+ zvh7EQ(M~;Mn~mg{eGBb))^6}rPOVSieXhDPX5W`DJc=GR7+)l3PiBP@u{`l2j%HUE zxq2SyCAvXQ5T)LhdXB}sdN-Wahxw*(d75t}VD1ez(wmGLXyE~rzlw^SCI5z1eP8J9 zRvV+8fu=n;okasPwfwd3+5P_ly|h&RM3$A6{r72dse9RdaskQKg7~F8QoR2dpMF<= z&faE^?c+FI#|moW^k9~}# zTDgqo6R|nx=b!c5RZ!D_RVF6M7%kf8rEW#97vd90SU|8)0DpU-1&6QM29&)Y(6 zaxmc;FWp=ngYZ+=&x?irHtXGw`7{O16$@I-YS8QY{a#9Dg=Cg`zahn|c*2ot zTS+I0r+xsvoCOzGC+tCo;_Jdjy zulQ!T{=*YP{e2j2*WkAVWPeSTyGZ|X+;!Ar^g>Pcn=PmRAnYpiBo_G*r1_EG!`b6a ze&(XMIKPtb~zZ|b@WqzhtuUK$bOPE7os|ITF=nL>8>2d785TZMek#I*4jU;<|Q_KFz$5GRi9cGN1lhw;vX-02T{9nZz?D14%eM4!;T?93c z>$^($yUep+qU%DwvKREWLf>NeuBzst#!fj+Rt)wtledvQC#TaUrCumVCVO)CWho`u z;CA=UhRqMju(Muu5_u*TOk!f?%;0zIvlyIaH*YIcC2voBMox`>O2en4suA0-$LdEz z$;KA;t5jciPKAZU z9RC$PAL8LoC6bBhQ;5rq@1ywmk6t?K)l+aXmu}~fX(#^TSN6RxJTIl)eaWv8j(&%P z$Kh}zS+>U0LX;f|L5q}I0&mG3SQaj3LHfN)#;R7Md76IYaGE-k+%t=n^L0 zn3<0uB!8pwnqxPFw(ajeW}=vR3pnAJx6o(UnY+>2n0>O+e*1R9;l@gP*}{t%5K5m?uko(yRjB z4sVIW_zRl$QU6eq>xqV)Jl|qoFIm~9XesCQt|Z%YJXxQHZ$SAQ1&L;5@(4QlnH{w! z-_P939BXxw7z>vJ&{dU24{&cY3(XGfHBm2pWW}<^7{?mbUK1qp< za9E4Xx8kTSnzE8N4UV$sV4(kZu+E(0PA=e_x2*=9-C?*cY}M!Q_C`2AhlS+)arW|l zjo(DGJCLS6rRh1Y&eU2m2^J;UtZpZIXCmgD1Y@T{z_YNCm_Wm|f0@sT_47FYGn%%N zuPtXb6Orpkb(d1Qhn9z3DT$8k=qN)nEg(J7{cbG0dz5B< z$u%^O&9)-JsVGQ9un}-}KQ1T2#@;BX9n{>+m4jGKW3)#Cok*(=i5Fv4M{4g8n!L*2 zxI_P=Q9`I!Kg|c-12x{iN6bZhw@PTjZ%HdKb+l%vl^^8G3mcj$J5%*KF+HlW(i5P zp~WrUyNZ$H2s(WZ$2)n37fAC**qY+%SMZauS=MJ7vg+hxP0W}*>PU>#W4#j(WlPC+ z(+#gh+)s}5T{N1!mPf1S-#kwPZPbJFthdhadmY?l-&p1k9>Vo$yiZ~Se}S9V+3}^& zkeDi0LCH*V?8OUysI@(MvD1}~-RBh8M!s%0J&o6!bfDp!<3CcL)C0DQ8n0)3Ki@Oc z)U^VKtF`tP37n(mLwLs_dX2l3NCc_#d6@>#l6VqD)PJJ?-$L?cb^jNZ@-AJIc9NIl zHsuf3^PZr-#bh%WZ{MQe-?)Fz=Wa?o;z}|rWH-mrH2Si3p5k|tm-##hxX1MxdOK5LN&$sK*+Z&&EBhTc9UZUMU^j{At zzXd9@6ZctG{x~jX!O*LaUtWzz7HWA!n>mvlZ}%om{|}z>%ts`2xITKJtBH0sfpj;Z zJL9_FTyFtE1KE2~css+G`5!3!68`To_R2n+XGnB!M4X|lVF=q#j>zow_z|TY^*NPr zxJfPlBhgZxjQ8Ay?|a=#Uf0Ck&D!{OZNESxB|ZHaeH=@RccQ8mS?$BZs-rZqxh`Zq zx6);@FkZ(pu0~b7(iixzz}Iv~`x|umm%c4$tDYnMWP45|z{Ky$%0*)At>Zf{)Mt)| z-khdQM!~F-Wyj=o=+8WCLnS+)bB?QTlKxM8z|(5Yd|9F>K8EJsw0SKVKBevh$zv2P zC%RQ9mY&?6S3r0oas7qT73$CE^LHh;>3L_Nvj3?vH>1XBB|yq>_Hk_%T~vag&(WN|p$1>Rgty8#DA7!p@=d*6DT9(>JYjj|eq|xO zaIpmL|01#1=q~THr=qDF$-hS5*&%%ZUy>79-SF~1=q~C0JtUstX6vPvgbqi$)u6pn>mAafzn^X>p0Tu&H}P;wzk%e zgvcQHBN;%~AJWe?&ZxO6u-%ouACEgyrH|M8!n zsBZyE+TwdNxr}BJuY2Mv-+v|ThiD|x6&^2W^#-1yB`%-jTN>kFDY-^^$ta((NmqFL z0GjsZuuio81%?C|o9P^l;eEp23gkFDq*+`HLx#@FBV`Q^RU>v{K_* zpRzA`7GCZnnN8Z6&c-e#x0>pI4L7Y_=>a9(j2yp&$@}n?_1wkU+@!99Sj`&TjY0ir z&tx}Y-akr{^^N58A4ng_lV>MbBJsDvQzIVeZN51XH4>#gxf>#h&Jg@7yGTawku*3% zJCW!X9KG(hA8aLp>(6jAT&bKs%lzK~xK5t6R-d!KXmUh9^E{U#KA^Fz_OJl=JqNprl zXJW_kkUoN?&la_>WPu+@b`F`}W3vXL-9o>g6(ql0lo{jry$ine9I3Nw_E|W*hud5E z*zZQM)A?C_`8FH{=d&cHeaNYN0+eyUkAE(xg3(hu%uRFe&xpKlXd3cVC#_ zB&XOoquL4h*wg=F7+4=(@x;`+`^o1+c<^HdiC6jmE?Znm_WFLe>t(q(dK_tvrEh!E zl%egOc$QU1&p>enM5ohZG=A;5_Dcb-wD?`@{M>xSYlFB=mFkIFwb3u|w8z4=Kcu z%=7!`XEYzoitQ$tH3ENIvdU{>*5}&aSMN__eo5RM%?E3dCs=1cf7VXsZhQwL)D->^ zANe0d59Eo5u~T+Y_2R$fM3^dCxENk1;z}bJd_s=g(ajUkRC}+h%|5KYoi8LOQ0_(- z=Q z5hg1YR{BZ(;hX7nk>juSO(}DM|Ki_6kUhzmlR02Lmc0{B706bL4r^gFkmkF!|23bO zt-oRk5w=vdE3X*qfsG_9qWJciDlh?#`VMXl>A4D zs96b`)gY;MGs*wMNZVC^xoSV$ytEet{>7rzJn#(cnho7#BfmxroK5H4r+mbi@)PWb z>ho+!-NV;vU`{XJ?PRU_ER(7_*%y^{aPy&lw6orG><$v82JBjWe8S@@;^k1^mw?On z^hj>i9e%Sarz4b_nR_K4{Cvl)gy)roShfi>PNx4~Y?FD&Eo>Q&%d>~&IGSg-(=nnS zR_4081?{UBejo7J!}@B-v(9oxYW56;eQV=(TDZq}mY8)o8FKZ}TK_#+{YjE-WSi8u zm_Yj(&YQuvF5`{M9GA>rt4Z=7_Bo%OTH!^q?PhOg_RgIpQdeT{tg1VM#|`0w&l=I& zh^_4*m;5<-R-BbCxyGmnuimsv-qX|%y$xDfC6(-S+1>pi#{Y%)@a zq!_#I;J+8rBu_y8^xYXg$#r3QY*{QG9^mNnSu3k|H?z#$`l;*PTUr{AA<5If1MipU zb09PqkuG)P59Sfi=(nybx>snP{0i-0Q$j0wBAI=&!Lc+c&nC;?JUVvx8y4BK(1F}v z7x+0-+hb_58;3`V3q7H-OS{QLcQwSP!uNB2akgGwg_$sw9L8WVk*e9+bX=Gkc(|AXqm1^tr6 zV-T)ob;|y1SP@TJuvKN>KF%%+jY&VV%)7X?SYLnefH$!71QI+$jz6(}U*4HI-RY?g zbdpK6B27opKC9S|!Gp|}lVds+0rHG;6wTJbK2?hMg=v51PGGw{%guV9`Pko~5DQ1s zsS@;(7d3ayx6-K&Pw0Si-|J<$&vGB{LAHOCr)K42@_^l3&~mKEyIOyx*rW_C57Tcu zt*!AY6<%i=jW1=Z^|-tnZ!XvK3u4igTJJ!Yp{{wdyK{{r7Lfg5+(<>|yPR_>->t!` z$HJ{V9iGu{?hrmhhUB9iroDq{mnU;sH{a0trFl=*CzpqEcFZS!bu+Wu2cPro^aC87 zqrd6qhmYV-54?R6p3Pv;lwK2jI*^MJ9ZxoXa&RA`(?5uXUPnH(hSeDQe+6WO(bb;vaP|h1%8fVpV?Z>`jOs_ zTMVZT@E$?`n`m%yWJgJsAl3gb=wbMv%2k7rRlIH1kYFQ`q)Wup`PP-Z+ zZdS1mWwl3GXbO~{#<^}H7mN!xOINVVBwTIxaT zQ}ua+_XpwT({Sk{N;WODRGqIJMbcyRox2DF$TFJNbI6hUX(P0gU2u8UoYf;mjjO}7 zbh219f1Q`u`!7CO4BCHU#Xqml`AlaVg_R1!Ovh|M3;?L|yjqlGV^kb7xeAp44+GsL!8 z^zYAVxl%t9-|PEiw)l22U6Ro@)mO6e`aA7xiqL z0{z|%pR>JtAG4E*w=>J;-g4se5%9|0pVSL$V?-(fpY?8ggSL>xAd9!{b`-58IQZHlB%MeN);Xb_WZAUjV!oEe~VTFiP8VaMt zjtOj1hP6uR=OC7RA4mTXjmqFr*8N}V?|$t0Ak?#qAv5guTImJX)Y?tH`Q$gelB~(X z@g7!g5&JJN3LS|9Wyw7h{;3I?YGG4YChL`3L#PaF6u;u$c`Gt3lHXdFnik}IA#q^o3^&(KN%1e{?`$s%{9cINyS=L!Iq1~Qz z58?9&$3AT)_OX$4I0Sk+d#gUP`{Hs|yq4eG2DQZL+7L;7f{wVA+IelY@QP@bj6zxG zl8k9pXpriIV~sNzeOL0pZ%DSA%q7jHlCiNOPfi8FR72aTwW_XCN3l;+@>X_KYQcQY zAG%@L*Ele);8)4Uy-vKybBR6GVP@gtG>9aJ=quv?d#swu;B#5{KZWsk5L6aGJ1bac zkv>^oGN$Fe$dByTLtnp=DOG0|utX|_WDQX6K8(}a$3EXozT0{IBVHwQ`1w%zM}OT} z^$XsSyhd}#{w`fQk^U^K`ayLpM&$I z`bt*l+&M4rZ!bD5cf?`ZP2PbQjURU#3;WQoCcW?EC7-d(8|-w8*}x{2onZXl1jD=8 z^=op>6n~DP{UYsTG}^AkH|Uui1nc=qvLmE=!D6xK2NrDTyNcL06<>0Nn|+GM!D9~0 z^JKLGD7^g+Iw8a34HV z+b7T5w}}t8;QfBI`ax8=kIpZ#@DFBPx8ZhQa_)!aW%bxddylZgAjeIk^*Ht~qTSE% zWS38qQQ`n&Wl^{cVc)~BXeHn2$NSzbuzWQRl*E9(bWH7^1|m(O=p*LCsi1m-Z|)@T zQzFJ2{$2$8pV(*`?ant!X1DR}a2|`LBlzRz?3OvmI>_7s^nD7M+vs+*@JN(TKGxd0uIG0>Q*YM6cUd`3Zm$+8VyJ}=@%$G)D zT=rNL;c_#deMS@11RugTca9XB&NmeFn}K9?*E$vu>sT%E+eHMCByp2}kC zHT2Br^d;Gov3D$s)nvO#VsG}zOcm!kuwE5B7z~fx!}!D}O~u58(7qi$>oKN>PY#90 zm29_wJP&(y1pmmhnSac6M(Xu7cF9$0DkDGUygb3n9jlx1;3zgco9sVB^DX+`Ns7L7 z`WEsJ>8YVvbT7U$mps{zoK^SN8`rnPFn9lxt7E%1+hclSO``n$vAhwz7ili#i_LIv zi_z*JbEyRo&mPczJiC@QZezDUNSs}?S)H4z7~kUKAsGF-Pv*j+lfS!QIus6XJFW!F zC0oc$-qr|$=hHPgNCwgJRrfX$k&@@L_9rG+NyO<4L@Gi6PREWlc&|mUO-hyMPiJny+SyS~{;VxP3oL4n3_z#`ek>Nac&DBz>8Yg3Z8GiVYUi;Fq zzG#y@4SD)87>9qr&fd;C8B3PJqfy~LL3=V}pJOo3zRuIg{~s~xAw1bn+dX(wMF>1^ z4)_CHvv1~Fe%+E?cZz6FdNmlcGmA{#mZw-?AZ$M7XPI?n4aXGn*QHDD?@Y(b=b)b* z*m=4-1oqSP`2~&)BjrBM+J`>Z=ywHvWafS{E%H?1Wl|V;Qac-*G-%O?{=A~KRa+tVVj%U2(H%}0=;-x?IKP2h7UhU&ms!41X z>Fa2*zfZfvatEs>f={AHvJ>AcZvUviWXG$*>b2>567C&`+k3OZRCauxG}*(N)sho1 zs)3)Z^tqh8J0N!j?!U^KS!X&Cim8H`njURf>=`Uc6dG2T3pL?ASLuEZwt8B&(>iGU-y1HDTdg@nqj#=I1+oJB)A7Bk`kT8jQ&w zk-We5PGz5}kj*{%+I)5({Kk=CIV5}2qc&d6!T*MZndA6^CIhf_s6JEksJ0`sGw&7* zT15N3@H2a=8~|@IL{55U-iV^XigD@n_S&f;= zMw9GC3rO}W*&p-SNrka>t~o(<{anK9>cHV?t#4_ zuu9&aWC`siLQK-{jpheGL1H!8rjekHNU}m-d-7E!o7G^F%w^S$P)k<4IdrY8&ytvv zyh5kaV)ib~t`w%8}xD^Uu!=aXws?&r*ju7UCe*q2;3 z$$FTbXJcvn1vc(-R(3Rgp#RiHtS@rx$=2N3=gI7ym5W=j@D-zRYxbPPIuDr7yzAUl zSy%}1EAXT>h95_o4@f=;3QN2{h|I}8S{={E_%2V>vNJgQ3HQRQo&2+=SJ!Fb3T+Ky z%XiuR6EW^jWAHRlp&yyD!}ldxmFI=Y*fIiF`(Vna{@$X^?6=(Q`_w&8J<3mL`7tD( zfcxE$t54SF#ok_yn(g>oSUP$7D;Hw#nK=3`ujwSBp4(z=;W_$W=2jQ*k>Bxr44j|9 zoEexj*g1(F$vcn=D4WI37Ygw{`=&dXnJ!_Sl6XFsR=L`{-hGLmj5+7)bpzCsuP!+e zg3U_yIFa|X78AN-*902pp3*apsB31P=U-Vvkx{*&xz~gG?GKF!c#x+?sX*R?1)COl zurEZK!Y37NC-JqcL%+bex#s^Gd)wnyGKH1VZYBNhVzumi+~vp;@JXJns<`$NonO=A zXy_(0;sh3MW?uL=sb9eJ=U6POjxUFQf6?F}I5Z*ECK?tcVIuu`&iEhgONrViiAAX@ znYHfi9sL*HjBxa)box?EdXF`aW23C*Z|K_(@Zuv}XbZIuFz|S~oG!lYfPUg!Px4=? zjcHg_83wapw4F^_(Df>ke=1si1eFy$qlVufNxl_Q*)?&u^OIwDIFxgbs1^R+>-=2f z{X(le9k|C&4-x!JjMv;TiH>d9qMCC@!7;g720<-r`jV42@pZ6h z^rV>E!*>&5nmsVdTam2v#V{#t{o`jk*0#lj^DymrcrRw1wa&i}GxyO->h8|-I=O?6 zh2GzIbu>&D((f$e(I~yX54ASDDm&@3cPqJal0k8nK6jF5zV_D8rYeM1(z=KdChL~I z636!86RAXXJJiZ(`**g>Xp_B@mtyWp?`n{%GhWSeOlE`WeK>12DU1?3wYaM=8fONb zHBy5he~Sp6`m?EVoqeuljF`zWF}^U;Y;*OW@jDeJlW8L>Fm~eGN4V0Nt^W2uBhx^P z`xGw67i7O$`|n`XGqh{V5?lRlXThUraIT2=JsaMSV=swSSFrL7)>sFpOGWm2IC?6Z zsk{l09@A_4Ny$Jxlw$3${K71NLXLqK;}qzGb-A zwJ>|j9;fV&$S%Z;d#MGtk0_HE*n!>$Kq8gJ*7)uZ?Nu^A$sVJlv~`py@UHlDyuPj_ zc``g7%v;Xj14Ch-{W%S>aT(N8=WZsi?e6bfKgl?79&BHO&kdwJQ_BnaOrD({ikI&d z7@4&a*&*yr$R2P!l!F*D_`DAJCFIT1dqy?*Vb$!GxXyiKEvON7tirwFadUD>f4 zc-w9sl6~s$U`8^P<^FgoY-V58Ydq)=ay7lS_B#wVY;yl4R zmnE_yeU}(@ka)F3e@BxyPm~_i!k4&pA1*Za=|mVVVEu>4ms-T#c*J8^k-cAE;X-Bp zT8if-8^(~rn3UYqr+A%+a5R1=i_m$Ff0~^8^CkKDjp$27lB&*Ku8k+PzrwjcVq&T{ zri$D<`aM`r*@rop@8r1eotr(1r}FpgImo`8@ihC+arfZOvwkX&KUqaa@`sP$m8V>( zyOg{fxdut@-zM6*(!3`X+M7Trc|yJ_#P3ynYtI_qYBYX_HB%q?51dXGi&RFbhd1S% zIh1b6&Qw)Yde(1p04ED-G8s%EabkC_<4TesJFC7F&xZ4YTXD#~6Iu=**L}-$fM8l}sBw_7)=#CSP(C{U$bVgjwow_vP3B`dgmdRUue|W%t+S zYTh!NPCcM-gShmwv$CV^4H0J!;^mQpeeiEmTf=eP&_V4Dt!VLZW zLH_S>?++UM%_d{@Fy9$1$aTKC-cMTj+3#kS-$3WAhe&;mtXWLnuk3#xrL{ae%=q{` z~z@P|+S#rl(IwZzQj4$-kgAs)W&tOo3p{Zuvh zTP5~6NIdwQ4dbQXLf|p)KEaS3(D^S7{`4wi^BRo3T@Txho1f5mE1!7L++jIZB{NSa zv23^q^R>Cad#t!Grl)dtbr>AU%30gmn>6*DzfKFOrkwfAXl>uj+K=(o?7HtI2Hd9K z5?I!NJQq7|KJMoFAM54)mCR+xb-GF-oV z(<4=VN;&%+5&RuqJcU2iGa9B|SSktqfNh63Co5mCcgC4yNnY)5FtHnyljm-E;Yu~v z#~byt0Zy?`E1uTXjNw*Mqc80yu~K$XXT9-sbFh;{y0c-Gb<3%3+y&mL{gJ#=i@nP8 zmBh(0G%NwzYw>sy4RQsa>+Uj88^G(j`Hlad2BSA&uz?PJ3YJ^s^(=VYBWhj8g2~pB z-P*~!R$TP$gvA%LP9NVMPs-C_T#?-0>1!Q!WzWmmEO!qJJdamT8L#$-VAifS;JsyB z!+t=^mD)H?q+EatmymP=P7M-KU(x1n5vU)lB-?8{h@NgNyP5x0bYwCMWnWZ$AsOnj zo+1@YlV$2aQqIuIF4nxrXp{PRM?xlhKR)vNBop8sDNkxitwk>;9WL&50n08tNrn85qT!*@mKO?A95c|t_+_Z(4NEF+mQAI zlK;j2BVm(z<>U3Tj)cQ~Tf|6OTaVeVo!P)NBU5TVe~gvmp#LHn7inz~k4XiJRIMwH z0a;B~id>uZxu-5xawr~(bvNMuRrIZm8UOOIPxRE@Pgs}nqp!#^41(FMo;p|~*#1#| z_zFuD!^y0DEGK3>%xdA$c|3f7cF)~{KICnUAqRL>Q7c(R+gE?@n6+I@@7u_f`(w{S zAysnfIJ+L3)qwe|!u41s-(C%`MdE1@_ADo+RD^dmJ>8Gfxt?$AjJlYWJ#dNM=aN1- zY}RW(tA5`X;~vubUHtiXcs6(bOwl+`CZ2)LOcuFJ|*$ zj}^8JL#o*XY-XyHmu<^6c}JX!A9ObuD0c zy-#yb`Cr!g%@z4#b}I*wtX=D*-PA%ohCJCBHy+jtoizeVr}Ko%owJ6FCyTEOais-( zPG;TD3z82KNs_(v8eH#K@bty_ksN2Kcb}Xpsr~p9TrPD~vMF697Vd@dS=XP+?TuM= zqjBO_(IRUMlanHQl5+>?Zqoh6|I%M6XQob3b{kLh-CO2Abulk%msZg>cVkCj(?YNF zY_h68F4R(1^JQiB3?4aCpQC+}T(Zxz?sMYIa=m8-;p^rEQ?NC6dpBXpf#gm`_G>Zq z-2#Wl(I|P{&tdnUVfCHk=g}~kQ)|F4Gl8$kwt{5K9XHUcRF^M`(b>O|r#|bTHHF-{ z##l@8&00wA{$!2b38y~LI2xl;Lp7@wKE~ahFv*k7hkgD6M6zFEfX|bcXRdgi8QOoL zw5t$FYLPe5CM(ply8JF}ucF&Ge6lEOB)fl4kuMpmb5(Ey(%rjY{R2G=NW0tNwwzW_t?oAd!8)+1?}0;e-_tMgQ+~6 zO3~;{TzZZ*Yrx@g?aY8w>Ie15%^qTEPiMU0I%yYF^W1SDZyE;otg$@9&lXlm?!Ub; z;ZP`D$`3CjVe%+u3_F0n50G~m&b-BIzc4z_bKG|At`f^CU`8AENG7UZcvdQ)7iF~< zSp7nuzs%y*p-=|flLx6f-IkKzC;j9JXz{{KB^4rK*Nx=g3-hisQe1>_|2X4LQfDP! z4H^t1QSMSa;(e#WxO^eGlEZR48=cE5^5kzaRBLFb6Psse-Wy{4v7}s}#p&3PT~p_= z*Dhx~!n;P{T2)rhe&ppM{joG2E=pyGYzLgp4&SY~kj%l!jq(8$QrYlkuahTWuvThA z;BCyOgPas4tQ!4PQY&QQE?Fb>?nc8TxVnO>WfK4gVfqR+8jy~L-EqEl*D zcgCd>^r<@+Gf#;AiSLKJM)5;{Tqto>|A2-09!ompo?l=_*z< zapVSPt-;YMg$Uk6|H%Y6UXO#FTc6#s11fpwlNJ*0S~he4EZr zf77cz|6UJ+3ytopM7idL`&X&uxz)IFJH6_Nw4Y;JO_uzV|L0ltFcIhh-`0i1yR5!I zYfE_K7`}LtR_2mBIbcii?K~M7fOiM*oZLZu1kR)B_Zpk5!PHOazR*!!pgW)3GsVog z^teS_`q>zoy!^BDa-}|pz_bRRDewKq#?$4{xq&72@orDHoW=N*Dh+vZnmV9O@hmwE zKi2k8XLaYne>?MLtz<9pa&7N~RdM6q{w;bM(YKpN_MCvLYKF-wb@xOvRF4*_hj$);J4?=*8#MC4{q!9++CD91cD7o zS=Z66XwVNYl4E%ZE!*H_p79^)ccLS+@-%geSBSq;cwCYHkG{9UFZZfeu-7@_%7tV~ zX8T99vI{d(ouV1+28d+K`Awc%UQpof0C6DMSbJ#c4_3QK+}e(zx&OZnr{`$nEEtR+ zZ}J4+gW2oh(}VT1$EQBK48(^YwcZa#sg~DPEQ$5 zdmdDoXZ7WK*&mQ6r>Qi0BKbz@J*)fj3};NiTYoJ4&F-$LIB-Hif{WQZ6+WJX$thys zB+@pfSF*A$@~#H&c^$5e{iMQHo(uiR=gxIr4}8d4dz zh-dv+FZtbGCG(qf$hAc!XZ{G&=g4}$@0%A!%;bUYtBydA>&g6%8OIWy(-l%#TXHm=Z({2qFk5d%v6~j>`S|62Ro=$vc&=h^v6-@ifHtF>?? zJzKJSW35zn>^iKvL6o^s|4W=-inK@Iduu(GDO{87#SY_nR3AQn9Tb|;qakVjpkoWi zj@CnJM`aiC=Mc~O%LFM zk4K!7Rp3L#;6&9y7$1u)h1w?3JM-aWq{z6LS_Ey#(ng!9E17vv9lB*ck}+r)$#Nff zIDC%9v})wYv$9-4uE+5o$o?)_A7{b8oxQJEQ3IQ^Z}V&|b@zWLj4q?wEYjB@ee!Hf z5$oy|e5SqFlbYyRf1e1D^$m#pusyKNUjbMZE!HVpgHgb?oWvQiQ&Fa(pU=H8dwk1;0G+t?t~c zeaTf%Ry`(Da4XE5$=;o5dy%tK5A#d?_b12Q?6{rRA0tBNK4Wruee9c(ERxmn1GV=N zmZzfkx%8+j-lm4!BnW3!L}%armn^xaS;6xf&_C6eQ?2AX9(Spz+zVejkZ?5HcH-+> zjWd_A#W$qu#H)+K_-$6(B1){%%RBn~T?^y!B$<3_k@-=0k7SR%_-`situg;kZu^Ef zaF5ngKWs3xj>eVmM1l%5I@c#<`ADiqY-R0n^!Ode?{!qJ?6SJ-Y&d6SC3>YHBWUPMKj??=0K{(cXc<9Xvj&e>#q?2FZhJ9i@=>H^DIxH^qAKf?D9 zUeyk+H{!@g@H`A&cR@aJKG(Wgw_BB@N8`w9Xzg1VA6v8PVK5uu%)XdV9Ahep%2{`x z+)&99G+aBmB7KjIH_>4;=44Gy?qyb@C0U<>kJrVyaz>f8bWLW4?1#9_k^wmTVcHfjPM)n)rpdaA*0j$Knb;{c zN}7@`I~?ojuQrx%&_njh?8(+J$e1&o6rcI~KgVwt6O)IkvdA{m-ywWE}c z1G}{3@!j>1+y`6XnAzsf&`j>R=e%D>w`9-R7Y-dUr=PjgN9?`^PQUv!Ouh;>hnXYf zI{zuXe-5=P=yV6RUhck0;^k{Bmpsp-T}kxkxw}Yv1a4%NOlmsTV85TRqlDO2PJGPs z&l;@U2w(Pd^sNQnPlw=v@XZz0!F()tZYMfBt0`adGa1f_MAIN}0c@T!dL-xL6!H|& zLRFrTDsm^2tuKBgllqh7ui>nUq|LqUgV?(kWa>laEZjWJNPnm^FQLm)ezcbKS@}Lw z+$w`VSws7Qeh*`bB`o)sBRQG@6DQqu-P+O*qgLvSosy!dQEJpj1jkr zzHicXe|T;*mIloq=<}_x%QFA+>P6(f&H;Ca3C) zB1k8^?=9YDj+hbdcgO$F5n17X9iL9#{?0u0bclTb{nTmi1c|KN-$xHKV0EK!YCHRL z8odmuKcE+%?^|GER+{W`{3k5i&uBA5FI}PA2QuesrI<*Odr=E9`X%Qik7H(ZS#Oa$ z{MozkmcPjYl)9k<*(XoVvI6)tlI5vMQ&DCSR?O4;y&}SgeC<3u9gG8a6+G#A9^FtI zsj)YLe8-9~&5h(8XtvQ&*^id>7@P3DrIrrR!}GBC2{$%5Gvn}qj;W~Mhv>DLeIC>2 z0(RLdt{yICl_f`VjqdBac3R9@-c;&M*2-bDIUi2@x*pB7)PV&FULk!CGOdG6E3D6Y zgKp3or`YU%U$u=iX?B#ZK1#>A|bABPD?knL;NO=rK zVcGe({3@(3f&L;tKVe31EZIk|L-6xFysgJCwnIGEGHuC!l=oR*+Ria~E;oYLB-hDS zM^<39iV&>|pNi0{z<;N++kD)qLhodBN(JCl_nZ#7ReGNw0{3+OW~gRO`OA7a7l*29 zHFav9;gPAzdp4h(`{8-+=q^MJW*wOuuf@QCLd%+bVPO7(NUVbiFQs?n@ z8YXv2oP{w$NlkeF#9xxn@LeOy<2dyl-_5$T z`B>G=c|G(qTlCHI)~pSCMEjYUXXRXWst+ddPE0M2C*9%oKRUnX^M6QNlSa!NQ3*@_ zGWOm_qa)3_ay9!MDUZ=wYIY`j#0pY{S3C80F`o7kr5Etlwk&mzR?3ketH+P?>U}eu z)M!m!iaZbb8h4>(cd^kH$3F@0)ZuI4+f@F{j+#61tqsJdvd=KD-XrT$ zI=19RJ@n9!4t?p{S&tP+bTlrdw(FmC&DNXjNgfSWf!hSeO+V_ZD@qB+u6K@P-V~- ze`QABLXT51XtlVVrvW{2J>Id9{`K{^=ZV)5blAo}KgGFE+3yf?XZ`%6JnSsecE{h7 zv@)3IcZcskY~GM0)7U-hR+9s(3cr1go(+sBU9j;e$Pd@c-lWf(hOC5o8)y`kGgh3UNLKC!nBf6j`|RxH{X5)+-ZzxXp-{}+fY*|k>E$nr@cmR-fO57++{ z#>N*h{&=x0chEXvK`q}*b#B&#Y{KtreU=I)|Kh|mj=KhPUS`SOn3CN9xi^(e=JRR% zjN_BtIJ<$8KXL>v4d#h?O8h)VrJiA~SN?@hZSklxE9E}UB3_d!yV(u!Fz+hsyT3$} z>G+>Jx`(rUYA4=D+p1#NaPPXZ*hfZytchQbb=ilK{8BIIsRgFKt%c{vP>TMU(IsE@ zNUsz9HsMa5B|mA@Nq)u`jE~3BBKKNf#iXwK%gV}bxRbmD=lV?rqUmsdlD_Rlo;UgA znIb^;17%0Vy=;Af9vb<3KRKH@caWp^B}rCRzp3Ty8OjxOe5jnyPG#W=5GXC`X196Y z!qvck3VpW0jkP36?X3&7HV6~4FCtl?jueMi(Ckk7PGOT|mO9$`7ieuhkLcvQO8le) zFUvlYJkibSCW`&#SI<5aGbuOj1AV^u3wtOm7lK6z4~4;g!>(InM2Qg6KrE9S0c_FjLY z_0E`|YJa&tU+=rht}C-U?KHjCapd8AY6ZQEi6a;Co8(A7RV?4LpP=RczTslm?Q ztcR)WTOX${WVxOGmeOxAo>K&NcalF9p0gJ48>35qoc)f^X9vRFth^78Jsciy(DqE8 zH4MI!c+!d5J%hxf9g`W%MA}}2O*g`+njSMAr!r=qyzgtA>(89%+=>*UQ&zQWybEJFA3JmfRXn1LnV zh$?%sA=UC-7j5+vfs)xSxhKzL!?yhE9=h~qsYCtk$Y$U0^Ub7t$80BgNmKoAHuNXx zy&0d$UWDv)epByfvqW^boGrGKt!tmpym6w!YupmnImMJVZtH z-2%G3{IsX-jWlS$%d%GEW)e-&&Lc2;1LD~WmK__DMfk`tOD~hLJDD!Bt1UH8%H#XP zBFz2TD}gV+Lt=$b%jvNjdFqN({f+3wdGIBCWs*-z z_O*FM6SBNk+72ON*Xs@1f50|tlS=^;?d zJ^8G3&t8h>w30n>x6)!8CVflB{=Bjhj1MB=_aefLc(Y3HS!0_OvS$_e`Gg)uXgk?L zN}CZp;qO97W=zUn!rUdyHQGqOU-|nO=@#I0))U-dyvmr}NPoStqA8C$gWi9#NHQSL zF=NX9_S8>%oD9p^_%1%ulJ%SNfotJ-z0dB&pyc0p3Hq6V9*Mi(=^@V+!uwq!z>&(mjW%El86!th=Gt9Io?3t6liN=RWfL zi!ZQ#p3-LS@e(XnLA4(w^2GISM`eHdGHB!~el%T|h*AgYX>Zr3rEw}z_eES|~n<-yijsomh8^E2A*rY%Lstfyp#JkVQN5)6`$y#y|h&>tkUy z+b6GRvp-Lnp#9XE&I+Ex{hbDhWEj{*%HrC{n#dMfoduE8=yRH=awZgaVc<|BSh#RG zMqfqa-wP3=$}ShR}aq%_Qf~?Rs4a z;g=zw>xbLvf1Gy>H}i-Cw0JU$Cck>}KD>)PyXcbpkQZRW2<>I1 zG$KvgDI$g|a*XbpB7BkAu z#L)^cuZ)?gy1RsZKci!EM!djh5AuE_>m~E)Gmg)5$2;`*tXObAWSX;aD*G0pV{_jY zVVz%`_YiCk_W4(OY-L3MS)6@EACsN+Ux?4-IWLPPsnDBhwEsf94uAfcubrrc{&-!K zMyXyk4dYk9I8HUwx=cD*^=;7F2iKB0KX-pqQ|yL9oGXK) zt&AKO@x*)KnUzt=ywMETvtKZtJQTLc()_EwR^r6_zU@MX>`$A+9_@JEH=@TVpWF?z zv$T~R@Ml8sG%{XHzpO&p1d%b`e^~I1y4d+XYpiqpQ1WG_Ti@B6u`w&RGDgh6#}YW7 zT2t%jKg=vLd0J~4UsAndIhpeG=|&jN!?#BuxZb;?obw_HQZM>xqd-;%B#UZxo_B+2 zd7fKCglndanqL1%hLX;i#k0_i}3Sl))Jxz z@#Ceea)a;k#3;GUzJ%iwxOBWTv+imF3s1qk*Ri!C>)Z~JBI4?B$K7bY_YB7DX31L} zeIiS3z~3Fbz7>o=X1gBnJ&|sX!K= zdXis`fWaYHc%fGx)96V0&nDL#?KNYWqag8#NSPg*ss2z!eC>@*m+;${jgCe6QdU)G zHDz|NrnXF;i=IV;ur<5WC+MvwRBra$$5?$3%s1+_B5l@?vK)MW;rTno+WOd%)!S#n z?{cGBuIQ7+a4J8^)6(oE$V&Jv`na6-$u5!#iM@DnR;#RnZnDiD0_{3bk9QnikYJW` zk1s@-vFy-TkGVte2OBqs_^r-6RR0^zT#7*;&p%$&e()L07qez}D_$kjZLVo6uuS&K zWtZ^}&dhzlzGkI0@$n((C6iZHpLfLh#G~hVKpQBI#JpCH%1--n@cRn#m(U@*5^sn1 z8@|mm$~?Wgojn$r=O=UA_xzw54IVRc&MkQP!LS;}>+*c;7oRkuRW(1)!lxT4F5o3M z@`GuZKSXRlicFuA`*e0b1fmxicdp}~r+D?Kn6jNMlUXq1S*~_YBHvH6=;6FvlU+sj z3(dx|)8$c8rh3Aa{5_E}*LiQ@Y4&5MLe3Q0bb`jIyfn1{mx*0(LGnACYwXh=WUPTf z2STzqKD5+R@_wgE?d5RZOy?0~ZHBXB=($&cd#Uo%90T)=dbw6|cWWFu^31F#AIkH` z1|mRnF>0Z2voa&shD+(6)!x?_owA!}wq9S-YA^h&!*aPhKOb8UXPZYw)sFg|O#jMQ zkvr+{8)I%J;WoUlOP?C-INCR&RUUV{TkuunLMxui=QFN?@QOtg{y~>nBB?|siBmS=WR#EuPd-? zM}MCq$qfZ^Ka$h+WB63}v-SDwlt_+W5i)(3=Co>}*Q%-x`^prLK-{8*QJpBL`%ALU{ z9G`gFn_upr?L@~PqtEOPcohy`I6gU+lAA9pfwO8o`_z+JKP%vh!!*12r{IMWW2YPULIJ zP0~_aSZeHS!O{7uKx0DHBs$L4DaV%@*gLkTXub%2Em@>%00ou@T55= zWJOSN__Yybk}LE{c+Az#3s`YIU)fiWVQ+Ou-ixnCi&d#<*a!FXJbDs+Ud6=h^RG_crd|;;5Sn*FZ0j`yqD={=kTz*!)SBElG-1tQJh} z)cU@(PZjC${B;H^RDnZQJ6!GfKj5_<%5R7=&Bd7=SaLtSz9GR6?45l;@A@uzDMlK9 zoA}?_@vCW3!OuFaw1Z?XKGB3+9r@i;{P|59j>Ly#{}}@FyWnyv%~Hc<9!_0CzcX1c z^`-jwJQ<)<<#0UNuEVUFzN_e%cg2y3xSu`R$NO8~D7K1qSHmxJiXlE};mG;0uFhV` zp*$CsBg|7hF58+=H*Ffb*7ka)Ft^kG%G3tv2JQ3nAJW@4oiQQmx$yksWki3!#^M zvyJzDo49O5&$GB;Y&e@PSCI8s$9IQGDeax2|Kl)cyI6V<9M_8Y+h~0zzO}=W(e&sIKhv9t-p|I^`aE7us~^Ey?3GQ_tDceLGYJiRUwo zeN||ZH81nXF_;X8`&)|DKO}usF*+*@vqCNVFplFj4b5np;p8HC=3dXGc-x)j?}g$J z808+}8Gb8@g<12nnHRkxYK_6kOUXO}Lym*>Q1~BN@bi;da}0hQQ($RU!PVrOW9YgU z9CJ7ObLaOVL-va7Z2 zh0#MCox8Egy7&^drk=$rzMUC()*x4)=@{|EjKcE)8PiN~Z*`u+e zj%SBFd)~9-E43l-6BXiD$v&B!NbALh%n>%gs~@ayX02JINdA+@MVaC_p1Z>}{hp)! zM6TMnGt}R@d?@(?GHY4_`*-2CoOQF~ZW)9}V!)r+mRg%r;B*_UJ|<^TEXj4_e%QGP z>+XQdbo?sq|9@!K)F}2SJvOuXI+%>WoUE-2XL>oW8mY@0dmqA|>#*ZVcK8~jcRRNz z$=AVj30qZg=H2?vz5iA4*aYF!ElD4x6)@H((Xy` z7P8$%{;p@!670TB{HSRBKh)W=<}CFGi($!P}0hjRjdVc`~eWcknq; z@+k2kSTEvzd2%&{*Nt{w?kJSzMfcD!eOycbuv=o+9DI8dYp;b;GF(g~Yd?BrjdChD zOvAx0n4UVzSz}m2`?IuplTSV;!KEb0O#dJ@Nj;0?rJ1PRWKzt?aDq?87wu-KOxO3@>S*$zeDUmd>q6J*7C(I z&|B)=PSN2(cxCl+_FcVB#{PJ*n&ocP?{GX?Mx)2DWf)1Ch?N;Nk}W59Rd0jNkq}S5 z_9c9|Cv^VOb1JD1)9yqV6!(6#2=b|@){BjkC43R@NY2*OkG(-pHO14^*C|4#HID8M z>r@+BOQKCYC_8y>()JwRtrriIv3Zrami?uB!{vWixr$70>Y)a&yN?zP*sckW>`1OB zu&bi}Q*$yoL0S}ISat?}3fVKAxfwS$KqL+)Kq|bvhJDp& zwS#S^;@Nh;*{>0A`-0TNS-KcI<*v`M;`R0B7o{BCj<20B8q6~iA1iXjqjDW_g0tq* zq$sYPN0&N&+v_Q3r!Hu6i|5JV{(3ErRX2&C?QwA^RNEP!_Q90I%*;#H@$)0;(VW(^ zA)4#BvD%mWn+NZ}Wgwv$FSe-(-!!T9{=`!3!iwW#iQhcP9~M?-+F)~?_u6k}8pC4iMW(UFkL+zKPRa>nO0M-ps+J@sIG>67BNAjp%?4Al6@33vM)V3{1cpenq=D%YLF*N(||EKo@+52yn&G`2CMtL!hG zqvbAS&8oLswRo2APlDJ}TD=0=J1}b_-#dbzW$tx7iDqki&zadJl3MgR=2Hm$;*36+ zwt@fF!Q%t*^#Bsg;Fq;|PU?==fIxeU>*%bkWZenj+S*MH^4!TinHSTCJuCP37EX`j_VQV!p|2KLkZ^szCnnuDOvFtr4|4YYDXjw|z{dvN3K5b3nBeb-^ zS=mX~g_OAy^tg8Cv3fiDWL;?XFxBEWv!FTGIGGjDW9WM!n`F<*d~=Hyn3lXe#qsJk z_HC(;daQgt1bWkR&sAX76dvTW*>qY!n{T1DKcC8VeyY&d^jYSer3(Jt11&f9Q-KAa#n!lIy=&@Xwm|>e{_ftD_2> z+{i1BcXU~JuE&iTe6qO~@=8bm4x{7<_D*Xk=Yyisu(khf3|VXwfLF*b=UKR+;_|Vq|bcbnZEC9aT&Yi zE>r4qWF5rmz8y!d(JWt%P2R!SWLZr{m#=u*p9PqM^q^$n?=rfu5KBmh%Vo9#% zIzxUZugRL1WOzuH+%qAQRcK%9X{dR@nRLv$+2XjGHK?fwSB1vq#EDc2|5L0_ox=KT zTU1Ng`*>3!uC&x^+Pj!dwmK$r<^Hr=fLqyzSd7H0F(#`;laaCyBsYqpxwbq9uLk>@ z{S(Kr_I}vVnzUOU0qo0 z(neo8rt|o>I49iawt&M66^H z$sULec=BlBo?WgPONw*f738{(4{T!3g^tLzPZe^v=JFU8PAoG1J}pB*cbWgm!6<`4V(?i@(vxx+x?b7j4ibjG1Z@$0eiAK!nY z|IDb1i*eUNBfG|5B=5I4J*;5mtmqqyFH5!gHD1rwRtNIE>)h4Oe!=+{)9*q2e~cX4 zVDSsh5A^*U==bNBV~w;~pM3+)whjgsW;h3+y%ie}$UG1i&D@nc*n?8ix zQ2lg-@kp}NAXEIM2MfK!QY$g<3;kV3qmg)V3M~$&OLDqqEyuSQdYkx^3Wk^ZNe0M8 zSavag%07Zi9F^R4xm)}e?%eK(TgY>pGyj42R(D;y3~@q=^|&e z!2M3z=z-BcZ4D2{+kE2G8-zH5vfN0K>N*HY^_72R^DekFD;7Z2WJqka4o zXO%-)E7t+3!Jezx8e-20MzMGB^HHrH3Wpi^ookOr;h9|NXKG{5UcsYS;We5R)kdy} zHj5o~SbC<{Gpx!R3q!(I8#aAN9t=XnOGxgJM#GbnB{szgCopE1m=x+SZ^Q}4hd>=OdarQj=y~M-+_U&`* zlzi98R?r)QFR|jk{O1q+TCeS_%^g~ZQ+b+o5q4b-laCzL2v468XR?F)GO|7m^AX0 zzDv7ZuJv%@p+9;=7Sq)XP(0W9zi#K-w8bHb;MqQ4emPO>I;VU^_c`41$o!=FoiHXDA) z_m^6h^&FisH2JU&p-U>8Wry-B5UUEo%p#Sho>ti^aSoDGs=F0=pF zy-WW8MD$B=A#wgX824u3CNzAK?y*%Bel|_p50K~|aXFc>JBfs&jIAFO#-WUGwY2xH zpImjEq}?fwe}OFXSm!B!r+RmszH5ll#ik8i^^|}7$LMM0Jl6icx9`f9{vdA<7u9<6OUvX9- zHD%|=jGoE-ce?oWG=w%n?{juK%l}5=$s~N(P(bEWt<=DyRAjpk`%^b8&(q&?eVe^<-VEHlN~>AtlzZFn`((VHQf2O68n)*j z|FGdY^MuM?Co6kt%&P{e+sT*gaYNXw5?|Ygf9C#BDkW`ahim^dlF29ZK}>>cDkAO zr-pl~7GJIH!SLE2l27o%Jx`;CLHBLeO&zKJte#bj7n_0f<9At4o=hoSo&P(38>01d zaV;~fTt(I~qNbYkY;kVS9;V;0q8G$k;Y~Bx@59eB2kxrp+Qy3iz#^H}hCyT!FCE~! zD{wkd@LJN=F&ka2?^I~YvxKT-DuUV1nj7qdZ#C!sK*Mj@_b_^Y#lNnB;2@T(&U+Wr zYB26(u2P)6>bb(2fm{FcZFN0#VUglm%!>Dkkj(yr%6#N?uRdhqmB!!cbeP5#$;wfa zPi^r(St^fZ)yG*cwMXuUU-n~<*7hTi>kW_Uyzx@_)z$WIbO}ShG!Hu8_}&!0m*U}C zF(g%ElTq{!DD-9BljxM{!*Aov1g+-zNUneeLt!V@Jj8OfjK<|yG`r32#Gdo{*H|9> zx;V93d*ifvfY+(X^g25ot%YaIYm;3sR}teO-4?cu@GfhoTRUb1JLkIZF5k}Kl;kURqARsjMG;rx3F;2R{)n&cVeC=<^`P zZK6SGkuX(h-@@^%N4OEIQ%$EB-)_eX_SI*eUS>r25idTX^TCeDj;;DyI@t`ioL<-S zj65kF#S4!o)49AU&#l_|ybwk))yTY>tJ}3XgQ|18!*|~z>8hY75 zm$`-c|D)c&FV=?D>oMdVe+R)VS9u*Ampj_^*y8{itjCb!N%j`hT8Y2ypz$BEunBgq zqtV@D-&q(>-^7j;&d&bSzZ^ThVCNzj(oU}x^tqF^bHu^!?0>O1cq&cKa85PvH`6UE zQ$J$$Jvj+t)A~ltpCFtZH(T_Tm1W6Myc~A9lkgK8Epk>R{+7KBt>NBVPiOJ6c@P?m zBgx?NCF|u*Y_13{px5<|$Q`)*^|mJuZ0=EIcgWXpd)ln$3W!{9bjY*H$+&SFeR5x; zHsrJF^Cprd>*(8LIm$VUe3mC7edwNN%{M@?H6LBd3&zvnel3r}$0PA1`Rm&9_Y#mu zA483M?1b5yHzwVSq#OG3*Xdp#ZymZ)$3TP9Ie$#gG5^zT$6Qv8|oGM z@(=^i8%xPD3x`p(AB&o#S_`$t>^f045 zoAYnaIEqYIT?=h`HLlEQH$l&I}Dt6>0Y zTcF_#^c<~ZU)X+}2J$Y@(6VLEf5y#5xZE=;-JZ z-qHuKlZ?Z!fT+)5V6s+nBJ))ezohW@arFELj~V}cjDuH6`dBUH%)&_5zT*R4!gDo} z>VuBo)qNN0ztbOW^F8swix?MtZ4^>oI~!nVJ%p^G-$UW>c~6{z#yNVt=CE=aSxzI- zGx^p;+y4|#W7*g`6#W-O-|4xZNb8ltNc25kX&)n|<7s0Q&-IC#8XGZH({uOI8=Zlw zf5;%Rn`B&ZKYRNFoxx>EdVh^KzMh3Fh2V0ok8q_GS$xB5CcDg)v~>^Jy+a2%7gpUk zXO$7mlk|{xsJV2VJV@EMpEa7pVCovyavK}k#cL%l>zm{|j@A|#B`r|%5WPTGNXeSy zQamSv*};YJME05G)J}1Hzo^H*3EoSS<0?{rioeT#h?e~Jo^zZ_aCw6o7eMC(J^1;! zT}h%D{X9&^8T0Jfmzq32OHdOJ{xDn|;yvpdRGg`F_H#Z$*NxFWll&^c!}0uFax!<< z!ZgTVrRAJK_?eWelV!4^)hc+C>&Y=IyWc?Jbu2Z$YXW~))BQ$;o+oD}6Z_?uLXUn1 zX>V1-Oeo3qL{iIn7zxSyLf<#^a+?1+zd098KhsNg<0ZcCckp}xj84_Y822uLrH5SYM}GUW z@w{>FODeZP%mI*gIn89Q@<89x)85INk~b#V88crW?|cZ|C;HE0g)w9^+P|3u`;lBu zoBqr7tb%2oe}mep7Cd`ZN5Wa*+3CtaxIWLPS9pr`@H(Bw?q`$P?N-K}^q=E#n8>d}88?~Y3K;*+k1=)B9G#*((s zZ#&q^s^)q<#>wnsnbH~OKftn=xU(5;$v_*a-K;0hDW6WBY~yau82#jlUSyqJNzW)X z7Xp%rC^7cW;JFrPC#Ru5Bl`wwx&u;*!tAvy^%NSb#RoSd#pS%rBWlXqM0?b>hre@fd2ea=DdE8sI-sPH%I45#4=EE+^tPeRNjfvezRg=F{xOs9cTTocjk!dqH>8<@fyDK=6IgpPxd2msI&AnU zR&%i5#QjMe=bV9VkJ{w9ALxl1G`$y!JD{SHr)PRLy95#kZ#W9;u=wkg+oqMu%5HK! z5fi@E`bPC<9G|@CiCWQED?;2zAGTCSHlF|?P{Z);b zq2JC2U&?L|#9eD;!e27E^u);nY~@g&FV|ls8f4B9?kjmUKNNzl(}D`TTp(_yu~3&_Y%m%JU-0SdcRSbv?J4N6Hz> zyI9XLFn+QBd27y0b6fuGBowq&GVl1^=;2bmQXM_kP1@@Kd%f9W)|Pib`2l=rCt97& zyETE{O&YEBZzl2o$8ZQ$#Nk;?>i_M)YnkjPv~ z-b2T-r560tU${7vMBh=<#)7Q&_x}zWyx+42!(CZg31+*qv&^AoH%9}APJW^aB$b`3 zt3BC3$;4{Ak;W1SeHY(Qn@8svJ=C`hvQB}z3ea8G&)fQw ztj9l3UjM*X1LaQPbAHiYqVHCs*CFa?gUV{2N*|b5GDoTNgu={5_914kUo|?f!CEq( zw#3uPYMptmym4oRZL7OS;5Io*N5cGNuJ-U`VtqfxLnL}*;?iG47u)E!q`O<_;1N=M zsW2AlM6&zS|BGz=FPhCv!yvY|RQV=ttLV|cL(xulzLp;D=W|xV*Bfjm@x2!F)Ajk2 zS%r7w(NIwbhF{>-hvDif5--n3CL3LLaTaA0+dP|CF?j=C02$eXyp|vSLhVg?&8yke zpIZ3Cc%p~aeuC58M(ta8&C&3-pZ5MCr>fC5r+i|*u7%&>G&2|CvdivacuD-YUr4zn&$dQCTn;CX zDw8>mjx?6@`!BHPZMFM`lw-tH5SK(;|FPA=PmwqrgV(Zc0)of;85>L#s>sf9y66ogtP84)S z{l(}?R=Is4<`i6Y$6GhZnan3_qxYuDbY&GK@P8!x`4NI9`Mp=4`4KKJ(@suB%|-tf z<%UE1xwO=n95#6B6Fk+S%WXJ&kBqVdelJ(r@;jFpqa;Su)x2FvR1Bk|E6|lpix1Of z_R1u3_-EU6 zqsdlN)Dy}5JV0NZ?8rA2e0L(O#OBMXE#9Rl8cOj+-Jm9WbLO*&@#Hp<7l;lXh2_if zJDA5!w3jE0EH1^@E$Tc6H?8qj85T2MXv9}NqQ9BO_KLvZ6||RJdNLcimuwhMl1+D| znyWcylJnl5okg!`dlocLgu<*G>`fx?YdINY){xsT?C>oTU(ecKSK>~+#};UR8@*kW zN?eUgX`=+1KjtfvNhPD0vtj8$sGNcNXGv$XyE$!imXew4dWI!*a_u9M_&1vJHZfWG zCF;BY?#A8!TkzXwl1Ih@trB)g2UW>k zSCdxO&_psp<@58rQ4)g6^R-aS3x02eSWSgFII!pLRUop;(>rX2q?`1+ zZPY&vW%bm&1Bdn0os;+<@-L%@r(?8I1FCO4MW0AdSu{R`t#yfA}{br*o`83Az1YO9mC+XdR zju(v&il`y)?^DU}1oiyv-iKQ1L`N-Xfqb_{F>sWko z2>C;q$!b`jmGk+b#EZ&1_~A-6R(nU>Tn-EE+qTi8kE4~>wA$bG%pYu_%W7&&cA}it zNM6Harp@e9Vyq>i!G%2J^>|6d%?3RF1tjqr4zh6J8Q& zCOcM+W?!Rlm&oPW-93b?K38gh{&6(^7b<;d!G>y+N(~yCM<=~;l$>w-p!#W4UWU5N zFXhB!O~}o@n3vU&nCK%5{dZ#e91XQ^pe=pFXYPFoo8{PbYN?NkWNE*T{2%kv%k^=% z-pd+XehkBs&~B+<=Nx;SE1A{zHo0kuK|4*l4l#^qu-6>?p4o4FNIEmD50c)~blcyZp!Ei5KbL-Q)t41xiH*^kKJx`9+N11n)9 z`*}}y|4JTv1(|0pvlB@@r>5jI+=k!UN+wrYqWomUG?u()`{`voQQ!aHprkU*Tn~9U zwOijaSCUdK?POhSBg7?c!|!}V@{+B=!8cmln-Vby5s>pnIhTbc z0{CpT9mj|F;RVLA@>`8Z8nKrDpualLw|`;G^ctJXT5z%oEN3D6;4S%iU&KXawB_{r zLeI@d;g#^VokX%PDr?_anak{Z^0L2AC)q_m*8et8RgJ}^zdw!UrtoX+{U5=G=aN+m zsJRVyS%IvehHbb$3MTRfURmpB!u>lWnYG+=VdN8KX5pc(o_``)JxzlrEB`t=+G!{I zGU~gQ6|iDz&f0Kd(!R+GPbRxN*!qv|cGlWowuyzgS`oy8eqS#*k*>W*!JtFOu{@ zF!wbYZ$$BKCAzWQR_r}0tu^rSG(3I?laYstR|T^>51}sD5%eF*RZ5y#yFjHchKDk_V5KMl|jocc6pAH zb$O05_+6-u4!F(u|8-VY%{V#z=QV8SOBsu{v>QHqiZxEN_PvX2V71n|kv6RUo}w z!QPYaa3K`*CaVc(+pdMo#Z@q3z5pf3&9*lm|CVpn$ak_a?I$#s)3BMxyT(&LX>(A) z2V|}29!NQczZkE?OmrtsLn3kHWZi9abS_%IC%s~N%gfQx6o1L@60dS7ik~x5?W$yA zR-fqp*HGG*L^|t(vl5nELrc(+Sde|t^}D_(^%qA^2dG$ure>^Z1*?37JzTBD>8=$= zQAVbT5Ro{erBIi6+&LMOmvmio zLK_|Vj+0<1+4eG9zM6GzSIw$sOtf`Jlz*95r9?!ok-i9mlE zKhPE>d2{|8pP8-99+rO!-wuHP8NgqwaIf>C+okeM4Cu!_OUPVZ50exl9#~3n7-m2S4@D6=QVvA?5=nZ_v zxqc?&YMK^1u&O@%V&*QUkarb&OWdMfe0n10RmRf^|Lx}nqbg@#IzYl=5_<=)CDq>Jetxta;}$yl2<3@fG=siN7O7|zZoQb(i>&bWjs@`gU z(ppP;$*Pih&P1!qiReUFe33>M(9Zzh6EP}dg}ggvZ$zT9Orf*Vd{kCyA5(ksYaFDH z%P!9I)K!UX|G<-{muY~T3haCX-R8aNerT?Qp3y$-&*t8A=O#4$L@KXn?>=qiE$J!k z=Cnl4nEl89#rQdud^5jQOO4I6bQD}(Y=pc-k6y&5<9V0?YI?qawUJPM9xUAqkAKi` zB15gwD<+3SFZ3=a(;nUe?%}g?GBGnk&p}dSb!B$#IzHlAPrYJ{^Qk*4@wWr_*|nQU z7CrU#$&^}^&a#vI2y!XH<{$NUJD&2MQWVeGP5UXon|-p^G43uKt@r%Rfu1ZS?D<=q+KT(hVbbCS2$#@7mqc!9zh$_V(th_P zuKlQ?T;YE<-sW04$|=VQ9Z1dDs2C>q_fk;F^$Px$FI=zie^8PX4tv^3Tcrx^W+Qdk zLj4td`v3ape{hHil39{0l3IBlin5KMh|dT3RIKp#e=wUsD}hsrqH>RH|CfYw^?&~q z^<=mzPHH98TdE+hJvAiUd}>a;k$zC{mNX(5sWVZ`D!!C20E$}0XAvw zPlkxI7()w=xE1VViXp?x49U*ZU3O=3y*Z%Eb#zy9Pkou{U?w)iFlHElj^Gf+FoAL4 zo}&~K7|nFfW~R>PEuBEpm;=P5Y zD$rDP7HyTHw=7irNN>^B1}Hg*?i%1R2R36f(c1sk+5ejw8}5(#;{k0D6H zHD;1x1x#g9e5TrO_S8Bk%jS;+%Fp)eO0d+9#`22O3+gUGO=iEUbvR{etHn&G?wS}O2^XRzi3tIL%W)HK%c=4-V7 z2Qrg(^K~sIK!1Yz?1kc?q@7N3Z*s`N&}>djz`Ekv$fU+EEO$DzSI{?Q02=vK(?XU% z9>!}0e&8dYULw5*TuT!98w-+20;wD*%)!NMzPQ^nm6f>}H%VNWpdNcB4^C!h89q+( zH`9gT{YFnLp#51|J(a&%>;AjsQt)>ca&^yNjiir@WQNg|e$$fq{Eh5sZkTdwA2+TWjThl6Z6US@b2&a)jn9eQM) z?O!wK>{`#h2Yp4fvNw5;M(IC=Nsa?in3t9#{k;Wla(E+wxekGxUrBR6++RsgRZyO( z$Gx?eY5WfKa;FkOUV^1gckehTo=dl*={;egX3_F|gUT%4WP)Ti$s~Aa(yb>*e_g#m zWB2k>+rB_SjziaG9ht(NTc|h3Pm8&laLV(v@|ISH^S%$OEt}~6MLKD{fhQ6oVVY~1 zcn!L?dG1&Gnno^XD4(U-ycm8%3RTG@I^VMy`A*(FOLqIdR@Iw9igDwpm45250Ickff{ zWXQ?X-Oa9rx7xUToGwpB_e=lXLYacKCSYMPd|vY3eC(~SoCwdg(U)OjBwc|`Z`KPu z;!1+AUkf+S@%VYs%^{IW?mSOk55duBn(V98^^mdF&s&9_Acu{zakL5j49Dw__^++q z$m0_9Woe?jb`m_|Vg4xrM%t-8iRF@hKAz!WsL8>)yi%Q1zMFwPW@}A?XAUfQ$3AZZYWv)iht4sNbLcaZWqZ?nQ|0EWDQOjgssSkL zLt4eOkmRWyJ>3xwW@u+V&sD*2zAv;afQ~uF75Awrv}S`?5i~X@v23o%HjDWXKBa)I zEL4Auh7-v!b(PlpB#rjoUWi(wH%Hdyqi;9~*77%@f3qPyAv9{?pqi&9tNTc`1<&Cp z=&zvU9P}jcQC@S$=+QdS=a+cOp{~-T{1&Pcf}^|p^XPA`y5q&l)5)GANgE3usTU8B zLs2Dpnmg2=Bv?6=(;ffK^ger12!_`o1>H4QUpAV>3x?7p^T`O~TVttP$UeszH=*=F zwbf8tCmh7ad!lDe!FyLA@q`S$SN%D}_yS#wDR_z;7Vh9myh}ooCEfW6XbMN2QBq3_ zrC55>7si8c^Zj+&O6tKRQLe=5vJrhd+SjQuHk%D56>!&1D@oqBu^@$OJe3r^W#IiS z^){x7Z1b)P3(eG2g3Vl`l?qLj{sfu&a&*I;#~tWEB{W&p8|&jlH&?sm2oM3#Hk_A8(rg2y4ipKH3si zts1?g2dYLwe;YApYv2^m*GFF#g_7{975}q{1zu0DV~w)s`hTUn4NR8w(oPWgoKgCG z*e_A=oJXp2g#IO4l5=pSX@UA?o-BjIi6oO_fcxrS<57FSX%^u&;-!ZYgO%GGXOp%0 z9%{0PmcVrP7tr>tQkhh1L=zLxd$K39D6$tUtk-T4IDVTp{-DtcC@tZAAjf$3Lvu#% z2^3WUGOM5|8`0Y0unYWUbbpPKTS#pQM4U`Fqt%}U`iW$hpeS<-Be0}yPf)NdMrG^p z2<;D5YPkFFkYqEyCIR~=k?$==-bd2mTO{>wayXj&-t+!(yS{a$=jVEBT5rVmH_o5b z+pk64-Nq5;!ut}f&Oz6ACICts(;p2V2YBjhmUF8~j++XjlXhO_lNh`k4SdU^XMBIS zC!WVgQyzLHD&EoF$+&pZc;i5{{iW0|ka@Z1hS7A6b>3$ZVS&4yO*5Rwp4z}}8+vMj zn9O63`kM-#0 z5*D8%Qa|GM-#p?3_p?~>Bkg4?*FPwDoNqcChgHdL5g(j%L@n|A0=)JoooqFEv*5pH z>GhBG+zcp9`0DE9J_z^U>xFug)%m3S59xo6!aoWqt%HI$X}TLfw2#rt_cVWBp%34! z{+pn2CTa$eUN%~sqwXZSIv9a zvXfi1d9Hddr-O6p;srGg^7LX-8iv-bt_|`o@gMk12)oD8K3<&(yYr;6?63S!cXwxy z%x&cUizlyvm@~;a>7ZKCRkUyu`AsjN|3LmLhk=))avMJTlX)+-CUos4WzW*y$8a{- z{~SnKOfu(d=VtY0jPNY#8uB(nP*5J~tHRw$5OM?zz6%?F!$`Du0WChJrQs~B3hZRz zrXdaAz!pxXpo>zSdJaPCDGVp2-_h zdX5Cut>p8Wc=!ooyQ%FxT1&Fja=ynWw^3^aeldQyJnna}#Q2Uih0#Dp>WAU;etQ01 zsd%YsT5kpy9dOpk=Om@BK@J=7n1ekDZ{CSclf0_5GVj650@6H^>^rHusFDeg*xTPT zmF}y~ebv?n7hC8!+gEOgv7I)CLSeQqzQi{2?vtce zmDIEoRT(d4>*)$I&-f<2N=CT}^ZyXKGtRBcS7z}fTaP;N8|yrkWthB=mMS3O6xvRb z$5CXmqu|MF&~7I+4%NmbkPzJ`F?}X%tND~e|2Z_CU|5ws6Wm=Kilm&o?kBr8d>%uL!J z2CF~w1;2atK)Or6@*k4UR=|}IaXpDXZjK;F5Zh??bpGUbnCeYiS9puca?L!x=^Z${ zlLsitMF1~k_NUE-JH)aT|rNCaI^?j*Fo$7Bvlj7 zxBESjHFZ{^Hw+%6H#h=j&M^vWqc{Ez+7pbS2md=wJ@2xqDWvxSyu8l}Dv{oN&o=eO zk~gI!U+V|2TX2?*&q*`%IbXYH^Zr8KKKS3RM}G%~?&Kp9Z2c%wn~sKiJ=L(#)*-%U zGtE#^xr(LqP&TQKEAr5-P&R{&T||oC@&+HXoQ3F1sIm0M{gsHNoa9>5KQ;=65@Pfbv~c5BjbemG&lEoSt&StADPd|*} zW@=A(rR(YL8R)Ku+xKzX4v%-!?Hi=_wC|-oxf<%rdG=Dh-7SSt)YT|Y*uQK+-HRqq zLc!w@Hvv@%vUo5r(-}Q;ar!Y@eq$@&l5(~TChf~c)|W-V-DF(@p3bAUyfxoojwZ9G zYiMi;%AREtr}8i#<12FzU2(gaeeFi`u`F;kK3*oZlS%0UlwVAaIcPXjFP{a})y5g~ zAaagpPock-_}Z!mzFvup39g`pn|Xr-8JtPxf3n`!NHV={C-T|>!xy1!3x01@$CGsR zkMZSP96vy-zxrMmim#=yjJSSQ_D57MEqJnO^e|PK`S__oHra+=f<8XL?LMfz1{(4P zd8ScWW_%OWD}g-{D(y;boWU1l!%#vGCA|N3qrjt}_$}W{kXB|#KcVFXsLy*x<_{Ah zC4El|Xnc<}=II|wYb~>SoyqfZIQSO|5?psZZI(hy0-R=MEi+^BsYxgJh`%SGun+zw zC_R`49>z@;$%~Qtp7+TusLAYS2~zCCS0o|%rE2a=dMBu%I?I{CGxUPwqm(SERCS*+ ztI$)enfLrot0xqAoUZTatcJ|KUF2^z)m4Rrj8itL@d-V^sh)nD9&>E27L9z$E`IQI zEwZ^5T}jt>Eoq#jr5xDL`~8=sagLEk4%nSVt~tz~R3r_3{u}ps*Ull^ljtS^YVvN} z(C3;s&HR1RpyW7OW&>Y_+#GMt5svi23GMQbZ`DXU#{x5wJ5;&6TW8BO0)~r!Y?aiC52CUPhz6A^at6)+LS10d--OFOhF|e6H7C1s0QJ)-zaF zw(Wlld6n?{5^qzSSNNMH=a_g7eca-14{sE=v#grzHfwT8{Ii(!=AtoMA^$-4RxLlP z&uzsL&(uOTNSA=it^CA9h&dfbcF|@TG?wOD4mIMAzqyIrPSe&gWRWm1HMO{%tP_m* zD%38+ZD!wx;$sxtenTo5HU10<-PrCVntanwj(z?YuAU&xP4qYk&NgfNZol{7=@Isp zjo~9<;a|$mhmecV^d0_o72eQ3)8h9ea|g;ty4zdJkGNl1?{_hYK5Dk?8NJG4GS4i0 z559OMFVeK2t!(P}oTSrVttz}jHQ*U?>~aICy{AX-?|%a5OyMm%Lq&7cPow8U@Ore7 zM-KJ>O17(DD09f!?41LJ>wHcqgQ11F#%zoHkXP;E{~`33^@WmpjD@7S0*Wis-Nm?$ z^;V^iyxr&E?5|4Pj)r^b|9n>u(l51TDS4wG#V*HsVj*5$!QdVOLQ?lrT&%6gEN*Zaskfv6XvyNi-Jnvx?@^SF zTk#+XtNNTWQ|O^Kt9n@-pP{xi41cUQUGClCFLdP{v^^d#@MOZ5`~V@h@$%=OB|)A0 zvbvq_zKrr7q_KofulBhwOy-ElAi5poQ&t+6(_k4I|Azm0vEZv(ve9Pve^cLjI4f&M z4o^eDXnpb=94>U_ZAe%Mrw#QYUDdJ?l9HIND@<3w=kcy(HKijR=D2wf`M%nR`d<`x z1J#(&&1+!lHF{4Hq!YCEE&1Jn$9+**v4G5raC(BCB!MuNXz@;-r8Zti;HR~Emb!m8 z&R!(F%nDV|i>~4gu2FABG_yTfQY+K7P>XF}#@nydQiA$-#^ayb$!5rG5xtQ2$}Ho{ z=*afxA?)QI&mE4x9Iv{Td@~P|nXK1Pb0At19%4S!RpF0M_U!$9?sQoBkmNRz)OX6q zzvkflhf0;ge>YY!+4UYUnC&}1X)(tfI`H*L^)x9J0RChs3hIJxPEjl0(KxUyx~1AP-b#ec@VC(QJaFq}h2C z#wzl6f1v8$I0~Q7k?1yD-&5$}W})(Yt+a;7+ts}rcP;5RM~st}yrmXiB&|0{JzF<( z7;hrl??=@J`kn*9AFKNbd?bijU7s@_kpSLR{Ui+P{LV2 z3RQ;ftlr&-m#oEnyai@AL}s7%9JR`>hpa9P^L+u^y9y2(!^%~x z_8-=@jQsku_RmPS5-D`|=AL=W1h(i&x0(O$JGMexNd6dj2U|yF)9l)7Lsb@8IJi=*_0SRa%B5s|(|(c{bwLFe%g z6L3BVW;YpOwbUDC#JQID7)0h}y-}{gQQqn9QA>8gWEa9RTAR*aF!Iktf8lxJ`dJMW|pK08RUIrts2iO^ku)}qNVFUs<$^T-k|?_?502OlAyhf z@qGhX&Z4PBLhndy2zwUSWM?#JaL;b*R!Z>Ovhz3fUZwS<*?fTf_SL_4*RNF2Q{^Dfx4g_3uw5D5(e@npO5*1P zNqJVEQIBl0D*P~;tptY?*yADSKdCSW+SAxP9;qU2oj|rN*v3HRYw0yFfTrR6Pj?zm z>bcZfmXBY=$CPKkGtpf^uR2$K1JpbU1&jHGwdB{&s5fa-lI*7=yV!`U;T%pa45SQ7#Iqd3Dx^BP&l!VNFc!<|{3?_b6Th?T=J~5Mb zIYMvQn~wL!#R=rH#+`_!3!Og;<+6VSXA9T@`^Q}3a;TdkcnD7X%HnG3v=q!+NB zIqtpX+3UT*B&g{Wvdpa2vvBhP&F6^nO06Y1O7Qq5y+<-R3L6wA`Q|aa^cJoDN&`K~ zeWd@-(fm5Tdn^n29mPomIfSJy@c%GAb6-f_NveOkml^Dj(Q_87ty~x%Nnto%;$`yQ{y6`ieluOIj%A+vSjWH=O5iSys=IMrRxTk1P0zqi~WSVHrJT zH&BkuC1KA<@;ej)G6HFj%DF5eM{^J0*%DYNbIO}oQdWNUq@mA_q8hF~Z4`I8ULd>R zD(Mpv5I*zJ*>(OSe;XS1tWb?1qggne0&{2cH@R~w6ddUtpgsL`EZ9(1+v@UfO+f|SL6uv)&l#E*^;3X+WyQnAo*4ENS`ot?qKZkmYu%Cf?ucWxo?wSU? z$+x(855jg7q?P@+??Tx|m_LMkHhMa{8zID@W+4CZY30DTs4Ia?3q7q0NGRF;105C4C~S5 z1zJh?mnQm|@+>X89WwIHe!Gph&(Wshu3ZN?U-Gr(_=XUdG!ibeeq|FQWPdlKTlNCqwxzUTO}T83!xvwO2!{ zy;#AmA>Xyw*@64B8ND#Dt;vlCyem`m_80&t$6*VB)JH;W$C*(tyIVNW8``YyUg44L-2eSI;-J( zHEK4|$Z&r2Kd|)~$u)%1%*4%7ChsoK;-x#9FYvpJybt0p{(^`3T7Q&glh|z;8Xv{c zjr7nNM@y8chVw?gt;5$rRVCA-s^ZM1%Z{7&Me;}uz9V&Im<8*4zFX2*{^jLZe>N3_7S9# zfWOoHPw@I~q;|Dl_LBDftajUUW$|K?aJD5Ff~m}S)J%fR{d#gou^mO ze)e6BD`@ft_lwivOYHe$(oIOF*R{Wif2&^TKkxI@l@Ps!2H#+t)p)KPTCD=F{pmVM zS`ut~oW3NX*$2?^ecHK3>urot61qCJbSL_=N4htUQC^(`jH2!${RQ|;V93m?Y~h{X zM#uSd-j5e}8>JbiE%a_UgFX|oBkKjjf);6 z<%G9=htGM!b4jk&nvAw;H-Wv1(8Ia7=z)gENbDT+zmD_%B%JhVPm{y9IKG`#wv2O{dk%hL09kavj^jTe?3bVKCUh4KB`>OEN4GbW~Z;v+Vv2; zg2m=QWJB#G;Z;r7Di!2cTrK0se|TXeQI$N-W+Ov9znNDUNy1}YIfmD~6+c&@s*c|k z5IT#bfAQ^*wvF}Z33=L8iM#l(^~#mf#>?#B336I z$0o^P!U{fvnxu)yPUJ3Zv8CSiD}K5O3YODyU$vcDn1NUbali2pN&7a={R?S#F?t`t z+d-}lMt@d35=daMnpY{`$+cc6NNV*P$*>cBd|l|9`?9o)Xk?+QKNR|$?AUJ#?+fW= zRbd2{5%vh9rmPrFL09I_&S0l`-+LM-*=Juv*@f^JO_oGe!WGws$`yXckz<(x?eV<{ zB-TS8{v!Nkk5)p6r2bAU?OS}5BHN$*&tCjh1+J15G4nNN=uMxT-U@3&Qf8p$l5DL)Z6oOXV4ClU?%jT~BR+|;vgVm1 zkvT@2HSoXrl&=;t>@LMWlir?$=_Mg)7}PJ&D|PhbXr=a6OHL!+reC{XYdNA^TPu4~3}vtG zJKD-B>;yD_ul}r29EYYNxLn~UDbf>MC~L);cl;T3w~=~z<({IwRxIK!t*pjD30(C; z-vs5JgO&MOOOKrq;lZwEC+~}%>aYG$r2ReoWUp>QQVzlW^XNX+_3WN$tIdJDR90|u zvTOp6`2ufnCv;^;$Sjgh$fTjJo~OP9{QrQ1UpLT@AJ0&}TEPXK!aSW6-Bib}nBu z%5w=;kd*M7(0>Syi=gRX__>+PFLvc`(piDVKUv66EG;X+IdeG4^@see%$oDQk=~;o zOS*>C(|>N^7qUX}j5a%yPbrSn=ak|c@! zXmbVIeO|BnB~OXkOEFKZz=ACq9{hv~z!%Y2&pHj&hA^p{kS zk?89@>m9IjChfdWHr;9eZcmiukCI}jwNF_!C|&S{1IYSS9(Id^nGtj;h-A}U0zC3=sUpX!P2gY*VGeI$vLOBU;8z|eJe0LQ*`V)9dLh{9U zy@ORIeBN{@IhJ%2kTkp1D#FP!6y=0+(gK~#Yd=BOUx~B*?TQKm#6#gS~(Eb z9^$vEv-)_njW|juixW}356Lg_X(%ZqS<=hoc8C@de&tT|X7x0?4!$>T=x4+=kd{7E z`xTx{xbiP}me%n6DEwwDdl)1osoosDYQ999v6HIgH3>#a;(Zs4 zjG*PK(0CquKMYzD==Ut8lg6f3O?|%-ZAUH8T59dxt5% z6?Sv(?NHw04%B~621z0K6fXATZJyNPEcD#3mCVUq%);vW{-*nbwR0J4CM98Z5Iybl z0N!xAa#i)-^?CMzkXd;`opkIP2NEup)lMYxr8Grr7@ucWnjnWhhbpME@19pj*c)XiA1lYT5ADEA}x zq-<=d^@pKgAWGKK=xemq3ni=2_X-WP;_~ z(Zbs>l5@yO@Ra~XAJF||nEkXcs!aOD1nxV99+#l!K)mGCPj;v0Y*S~t$jn%8KiR>5 z3o6e+*LCn#tuQvLt6X+Zts&z!Ecy@f&g^14QYuPwm$0G@C@V=a3H_JA@i_@rn>{2f z>I7r8o$AaktDK0wQn?A_o*naNlJQYm`dfclfqZjTusKvEl=Es9^B(?l4(l~ox`O_a z>OFI!&3M?_P@c8R>~uMoa5Q@hvX9ms3w}Si+;A2 zeo)&Q@|{5!*-!frs((S@KJFznQT;-R-`R3b&V8ob=j<=mUzBatgPU(?bDsYGEpJYH z8L2)?FKu~=kKnE!?oOfMj7$Ggwg>Gl)_&3j6w}fb=($WwkHFDWBr=%ZTGB;*Wn1F= zW%7CwI+7})7%gX&bR#S#=t?&pBis3_b;q2S9as+B656(9~nu8@%mNJkrn>CP(BRC+q#mFz$014lme|ur}6?UOjB|T z z@Lk==C8yS^vFxoVxrA(b8{u_l^-K8fqNI?~@awoNLQ{Ro{=tG@YQgJeKI}*qpLv&Q z=sOPTG7q~HokNXij`i#$8d$EMD5FRFj6|By%LD4Dr{%--Y)Px|4TSxsKRt@gCG}WE zyuHb{4P-%imskQh(_KlJ($YNFd^X+!K7T;fyRg=SH=Ib<2|2S0qEEtaZKc=4Vs$o> zr~zN2^#C-^#_LwxW(9ndR;#1@Ezh1==vPmKgQjMT+pzWz;kzG?n~>9GaWGujSB-IB zVmI|jJF^={z}_u-^y%8qtVstH#t%N?dwb>2fvO8of4N%Tgx$$(YC1X9Ro|0p%v?hP z{e6kkr1_cYxfjX*OVYXz%?VmMhmVU~i?XB=K3!rIP#sDi)mP*kR?eet=9Ly1vt=Cf z5xr#uaHsooVdQ+R6>;x#5}QkQ_e1eid`yAp%*9-Q#=d0SlaxNA>m%r|F`Lebh{u)5 zn@ryDCP3CY65mT-oiuw%4|S7gi|CnNfy9%@s|zbgDzDq2I7z}v;P-s?crxrIm|W&D zk}@^>8lN*}u8iVkkpC0RCed(`N{`?(pVF&bzz*7~y9y+|1UHEw@B@1L({@%J7O63* z@n*2|DSFqzg}QFUY0j%AooxaE?uV`oth@$9UZvf^Ec8pg();dyK*ra=Vt;jI|K$Qy z*T>mW=%1;@@}%CCv~ELvOLZp6=iX%79&*2dtxkHDhgtACC~v1183Q$2$*;6l_b1Qc zsK3O$ocNna%P;ws)P0Z8%@Oc;C)~{`)U%8hFIN9;deY-r_dYOo6|0&>CjGTt7f%Cu zg7bOM8*s9ap4!uM)~gfHHLJw?>T^ob%nr{a#Arg*orlXU&~haiRAB7~z;njvGa);p z{7qyr9x}3*W>-OechSsd8ovs9ath=bc93`y-_!0?ww2Z1NP4xJk~nZUdE`uFS#n4? z=%0MbKBzfpKU>L~bkmnMyO7K`ywRY77GC%LF4n(NiSJ=1r&^NGFOfjfo4mqi4uiFv zwrNOSBl(ODZ0G`d%A0h~uq0sHT6I){!o)o2K@JU-xSbvncC#K2lG(U~0ZeMTZKUxm z{7iwy%L}8wH()0Lc<xp`hIoi48@dbR;!PP6!wwpFDgOUT` zC3~*3yW(55XQ$Pb_=@GVaD9~TH6Uz+RmluRU}B(f-t*JuBGo@)_*^OX9&BIlKK-yhchEO@m|Y^F7D_bXW@ zDQ{;w>_iiLsUzzFR;Q^9>qLZsLS?UZeXB3-trCW_#Yx7YD&}k>~ zd0MTTA>k;r*}e=X=@jqlM*!clMFd$*8P z(r2w=Eq{Jj#kFuMs0*O^q3bUumsM)R)i(3{g$8+fGRTKbKB?+M^G9fzmE z>~!+%0dL2UUE*t;i>I0Vz+L3ji&Uq>OC8+Q)9Nb)n|#QXTS+uC;)8jYa$3K#FqX*f z>tWi>DoWirykYw#hp(eNt$EaAE{#PGVf() zK=^zJX`t<+^yoPUnGsNS;>=~GSMo3kr&-Zhr~)Kj4+*tBy;d7Pv%t@-O!n3ObQERA zHPOjh@sGFQGh@NN5VJ~OI>__I=xQg4Wvx8>I1^FiUH&fXx*Peoj1np;`#t*hg24ph zdk#XHlU?2-`k?1A=u4D<2i2Qk__3!&_IcL0jaTf??__6ID^yp< zRf1#xh_dpYitSy@6ZBT&c)sC&vR0B`VBtqLVf;D zUO&5~`omn#B=pp3PW~T^=TQ)HlZqx?Cr|RfKUCiAnZ$`&?EaUqc&yLyhC|rzTKYOqZ&1?{+0XL@4L?Jt zi`l^jI8L&?L^6u6NX(|)=+BOdsp?K5k3>eg)0lsq&)0i4XZ%iq#jn+xULs*U7m#RU zC@6x$zGVFpK917Et;Bsi<YM&JFejG zvWjyZ%J$;LR^Yn7v3~>jYtL3+&}tF1mmBip1S>74*UR(*k z32A>VAJj%K^c}jV7Cc@_JQT-u#&u6XR!!9YKTi zMCQ+1^NkbyyaeM_p(Q5*#*yhKq?JVU$I{k!yjv^UOxX6e?w8^-(uejdj3N^Q=5sR1 z>BR&9pRKkBJ-1z%^?Hh#u>Ohb2dm+s!kcI{ULduPN5eSWWj}CP^gc#L$J2chI}G>K zB_!U6-PItuweXba6g%0_V)|>ruTJp%9coAt(Tv894dW3@t!Iklc0Zr|#!9)!65YB*m#Wwf3XQ4JugH;<8X!0XYG{aL3N9kW z?s>mAkk@F`?v3MB>dJgl)^neO(&O3j3igoL601ouyZ%0AuQ`G7EVQ(DCwu&|?`#Qc zIE8Ixp1!JUyn`P7Fp}w{Y*t>U>zmF-K~b{KIChfJ*mtCuxr=GIs;lHj=z7BE7O;G_ zp6w_IpN+fwykTa}q(86RPn*|iD<@2n0;4;*y``ls=&mW71PU?Nv=xitN@)Zx2m=HgBao0 zt@xK>Fp-$0#aLkw&Tc2pv#F)!m~euDe#U+?OE3-`}; zZ8Iy&cyo)gQ{0=V)}7>jDEUv~XU^09XRP~d5~v2#eNq1`og}Kkk4kp&I}gelK;#=_ z)0stg_QaX|R((D)kynn_$~rA>q1ko(!$V}hzA*O4sjg=r_h6Le?Ee7uW|nUm#0-F} zDeBCwpo~4&lWa$K?x){l$oosWO2ml`q?SlmRbgs73s^NVkL>j_$AbR!;Ui&+d{WZ`O_0<MBdIb~JD@UA;xb2_h;W^Z;4@O+3ag#@ zwd~?9P6DT*F%d0uCO-3+i8ME`;9bvQy(83?Xkn|_PAi-w(r4N`ofJB{m)!{eR!92n zr11Ne+)Aor8;sR~t*mssL^9dIFqd>QuYVmba+>J{f8#Ch)%ve6QHNA7g0ov`6onk!#L1?`FxJJo5`}Wp7Tr&2oAkKo8l~ zn&@Hgdv*)j>KFR2?_st(WPDG@_u^s^eiE9#0~xKua}h1i<~NTa&*`3ftw2S4qk@j? zy*e;5y!>w#8dxh7L%vhf0GWr<8PsR&vRLdKrb|K%g0qtiPXEuSJ z1(3MdxaSi8KVj=Z^_TcQpGKCE_hQuUOX3HjWIkz~4FO3+8t$J)(?h7*lNMr{D}!0s zYAxqPVOAS@(7{{U`VYFd>CqFNJ3Aj5LPipibkGlE*Y_f}S06WRT=~=eocej;zqQ<( zRVPkb=JS(m?pxmS8R)&mZ}wRJoBf?ZL+japVsE!$B}p_s1b%b+s;hc-`rE?iUV5IR zQU5z{k`&$N&`=k8t;%ox2k!%T*dy6$J8hkd)@#(8S%?GZJE_`}3Ojr9=A(5W9W-RW zx3j28kouLU@($L9qz)q8>X0`bKWD*2qRr>5(!;!D&U7VmZg{BWTMP9(PlMax@M#jP!{ndiQoRX?S7p>v*-*nvw?>Ucn0T0`i^s*j$6We`E zzgrGZPpWkgdJ?t!LAo2Gt*ld(CBL!s@eGV7)o2oi)luu$tadKXTUUKazBZm5uR+yG zw4Aq}F(kPQ{`0OkSwFa~aD5^Df2Q_tX?2I%TNoYmL-$xd>`z*Jg_aVRr;M7C?6S2_ z18KWD^p#daBG}YG{rhxsAzAj;Led_M!0U5pI!8@Y3W%HwC4M>99NtZlSHpxG16K+wr&16WvI+ymHfEJSmQH@_3$6P!s*m47QZh zHA%N|7+og@(ZA^RZQ4H4^{mJzDQ4otP1SPdvU=0^O1gQFRzIZQq_vr??ReAKFfy4` z^9Fc;Z%Jy}fEFI&N4MiNd&N7U;4nNMga7QR&gr=w>PX~+8NQ!Y7?}qD^*wtd?Ik5g z6&Ab6b6IODf%iqEo3n)blX)USWYja66h9~3Bo$nWvqSKa(a+WFXDbSm0%kw{J-Y&K zVP)}$7oqe7XiV(sA^v9->TdicMp?%0U$BtrB(oGAUM8Qc!Mv#6>{I*~>+H*0y~dlh zr?aB&CE8r}HD?8QDL*(6HC5o?6&@;kNQa<2?@&pIoD|z*@sU$)i*WHgq%VN1ye~9# z^)h9CQ)gCsR%`2T_cFS83I%uLtUAo*WaumSy&O0F`Lo0Y{SN0JYx_7qnWZ^XtNTFL z7=Art`<3iw24ArdM}L!iUAXL}jd{?PRlX9aiXVBJrbL=OL;yB?W0E@<*b z9DfUGo$;5L-)psY2k$l>hm&Dx8fwma@GOPQ&Bjjzi3|4w=<30tMuZccjU{pu9DtE>DJ{xACn!`Uh= z_hk!fNuVrg{fhoBJnI?gTtQxkK*MBhWo_dpJ;??&WPNTBD?A)0_53E;-9+E#@m-7H zbr&prM=urG>tI}E=I#gbItD#UA-t*5b&S~lX1`mtybBkX@yN3aJZ9hXy%3)0bcfUH zR91L6Exr$Lf6&bZ==u?bNs?06oheG^%+PyVe>a|~~ls3iUAXE-gj z(|-1Iyu_k2M{_)y8}d@iNcdFro{PI}+RTb_89aSW>(fw{D5p7tww`p8V)hQ)CRyi1 zl1R+ux!S+k_j_1Nf7YMKq+8XUcmcN>g?7>IMiNP6{=^^b44pUedO2y9m`GW>}-gh>A9T!T}y}A8@mZNU*Ix3-71pAO!W67 zq0HSsuCK`M=MC&R5k9h)@+jz9gz`0{^D4R)@!*@`VyJen&}MsDSdN2U>iB_w&-tQ^ zH?pIwFDqTfB5sD(CGHI)hmB~MMFth&^d{&vobzfsctl}1>YG#Y$B}uSFXfEw}J&MO-K9%7GA;b%^AvIi43<1_vMGKl{q7x%;8s zI4hm=)t56%cfrPGsO!KszfxbkRQ79+B%w1{#NX;Ft<3RSuR`J-QPqTW$GX~=)$O3& zb#Qx@&n1+fhMUjzG>Mwp4Ik~)`x0a?a;G&b>7L=9W4oz=9FxH}tZIkOro-0On-#P!KpjpPjKO7eZR zTth!_AS6%4`NQh3iSkNX_!$o+XuCJ9SAv<<=sHPF3us^*t^bSUMzf^s@Y?GBwNOyU zy*)eN))nOS6)dbl&(SblkM6&v&x`|pgV(Hu>`fN!NvxYTlAQ7^ek&uOm+9a=@>oS) zSqr)nem|kLiqtBoy7JUmm<-O@w*H=PpA_lC1 z^z7Fd$9~GN!iv6SO?I8X2je1ptFylP5e@%ILWylKlBLY%c`nxGQSOabQ$~r2bW#Tn zXTjlcJ^KAT$5yD%=w_3Tc~H0R`{SKo|XCSr}RYMfb4Em3Vv=U5P-t8@(G*F_YhGqUX)~>R7tysHH~y zS|9ek5*;;Yy$Jjths&|*{#fmwpuLi3CaNhp#}enPp4Kv^&ZzQL^mJ1yF^B(Vxjk`t z1{%)L?$6ptq^*itE`^5SG@n>wwQ!Q?6`5n2rOr3dxtfLjT)=QcWml4SbC$4K-Fu>R z521s5)U*cIS-%=f%75@{Ingl_(ssgG=1w}WyGeAG6CT;8n{i|I_?$u8t$44zL%-^& znkZOEW~cC9pKI?I*N@iTC+HsK>I5=8fX(LQ^UGwE_u=##iM%_PH@uPU{RY<=Bh}EO zj!FASI&| zpNzCaQ^QC_lLjTb$Vhx5MMFb-(ohk;WweD-lw@a?k-cue`{U*RzrAn0d%a)J=kvPG zd7Q^N*Y$ipuj`yn&b$aGhvUWu_I`%`wZx)Y&N~bjx3R&0c+eDDJuAi}8)Y?r&2r{j zQ0nE}cO4nYNB>KsjcjlliTImhwyj{G$v+b3*02-6-E&mM>CGEB((mE6~k zf>af}{S~LrX33OdUdb}agMB!O24d1I$5Z-v9!niX$NtX9d+Pg1zJrEsNqv={JOMsO zBs!inJ;^u=2fNWCS&d4HMA?^pBpEljrjz5@-93aServ=|rq}8E%d_r2Y&#g@qc9teTUorA!cKEVmuh^r1}*C2V&1Bi*HYH3TwbJM-jXb3 zFZpYcnaK_CDZ}Pj0ltG)skL>vpHj5_M~@TvQR_=e?)>!M*8U@WK;4C zFC3eO5r5LYKOQzCclLu!p;xl#_Q&^R7nw(HlILlYCCS$aS-ps^WO}0vdTjwS|=*gh{F*?N845%+p%Td!mjk z-QFCky7Nkq{Xi1*XVF|rXsFoC)aG}Zz9W+G)iuQV@UqBn2=|2H?Vy2pcjKd*@CyW z<_XD#@rTx`Lc0h~W`}u0i4zqEK?<-{SubI6M+F zCXi;f@pU<*lPfOIwDT-9yCSpZE!iY4pvg;cE3K`)eDxgOelbf7ZStTIeWkOe@$G%- zvx)B=Yb?yWxZUu1Rs=e%fcpE89O~QZSaL5fdvp!c@^FRqVs{T^%f(h-|dpCKzd!lmS1zZW)jMU&g;vmK`%WSwLa9KwFtXY&qw z9ZK^9eVg~;mBs2hMwMhEUhN4-GQuT;+cswvcYZVH77;ye;8*8icuDbTfZzQ@hp%Ct zU7j_KNcFT|+;z#D&{{;u3g_&Z&zxtfaWC_xBaANNXp)K#mHd<@_oH;Uz7R)~MfG!1 zl@$@gtL)}U#{RPA(lf}NTm{KtJ)C8a(n5b0{D;4GAWf<$9?5oDIhGyOH)HU-ShEw3 zDW0CuJUONFF7ysEY=CMLw$JnDL&(sGbj{ecw0L(PN_6 z8!VK44Ot~}Dy=Kg{sC7uCqa4Eehr(}L-SQV^>A*g02a~n70&nyI=gs6R%Rzd#bbrJ zbn;$KBWW%1ZwQSm=;d-}KO{ok!t=ADa;0lxt4BqO4YVJC|B1&F_~*60>+0I+WKDL& z)pTkD%e+e%#G7*WI*Mjj;&66P_8tR!TOtw3Ivl)G#>4krRB^mM<=I zUDgAA%!B)|cGmU%t+&Ip|0G}7%+K#P+Ri0o8xmXvgZ-R$lIXOYk9L4vBbc=07azbj zc^a#UbtSd9ve5gpG_9%67xkSi`9JxY%L3W|IU9G*)Z68SwkGh`XZYa7@Vk<<8)2Kb zo1epXoxbyCKW{Cn^4d{)`HO^?L%Sw6)N=M2uE`tjE47i?$pGw2HiP>5x==K3?)+a^ zGqb4=$$Xb98qz;+7f;5FZ=lqK7VR-;jp%ZRNOKNm*Cx+>VsM_@nN+xvkBx<@j3ew9>&c7(&BO` zWR+_v7Td`_^Jsi6%cnXsTNjjUM-{yB^;X07bIf0)gWaOI~wEcwqTI;*#DH#wuUzmBKz zv0AEv5zFDyAD7pOSYurMAC^Acb-BZ6V_wvVoOx?n9=EEIz6mSmjo&^Z!pr&!Ba^rM zJNji`&_BHV2v_AfZk}p<3c;uGq7q!*=E?iP`3_v|td-ma9n3=6dzoyowVYjz{ri|f z)TUL|k7sRTa9s?M>|}oq8)uOrb!GGH{Q|AL?3}FFOCF8UQ2r5TKhr|yeXCuS>{Xp< zoeZYuvj2(vzP_l}03S}!THd}?A>G6LY=Abpkm6C2zT-K>Ji32~0Z%x)7%YB)z)(Fj zCSBh2ZFf9xsxuF44TBfhsj|Xz9jgOR@t9FQrn3jd9HFK)=bvUckGdjNb}e) ztJP8qVm3^RVPp2>oTueEJfZ_TUqkM)TC0Q~qe!rrj@A6%y)e4uO?v}Y$}YI9<81?< zcD!-0elNq;tiedN)U2PLD>5f*{WK9S`^9#X>I7J3mb10Vheq_gW%!EMt`|o>q)8ug zWxr-e=vC)e4cuE~CiMWmC-ZYwv?cp*s>vL|La8j*0e9=rYZ7l8kNc}#n|e{R#oOrH zPk%*e-QHD~>MyIAO5ovT;#e)(O@#Cg=l)EF9sIBpug^AczdwVjf`8#%kwFz zz7zLb<9h0Kl;bNE`9n$QbjP9-;rAk+I2r?zw=VfnvhuJoE%F9vs_(MqVK5twh05vd zv%k5?BO=5o+&YuL4WM6geg8qief5~Pr7hX)e6pr$=$p7Uyx?O8^VS)7-b1UY);n61 zn<_%J;Bz17?O5_3gt;HEZuYruAaSa`Bok^QeZ9divy%2QoXY$5=doz2e_8i1N6)Fv zRE~YGf!Ce5nI|!y>!lB^^CU7AV%ux|2MEm6#uDG34Ef*jF883PiIgY6^CnTNjd3S6 zhD)$pRyNIYv;&<|q3vM)da>A*Onq}$J!^n7gZr31k6`#l@v<|-ZY0fiGTdy8K2Dzl zM2PpDF^DEz#j0e;8%mQt&iRbDBnSQn&`f5_nXcZh|2((NGx9%O(Te{Kq*r!`JwWdF z>6&}V_AK%`8BT>o-h=dl=hNm4sSB3(O*3#i8LJLti?g6z*^DXgJL|c2BYE={Iy0A4 zkYA~dxj4`lx4K~P2+<(%X%1P@i2TBXFkwsR{7^Wax>Ru_W2I3J9lc|5R z-g9U62rGQg^RC3z|B3L)!H}6+a&}$o?~}#odwB9_vd>}Z+68nbu|QUgWaaZr5`KUI z^~jz5vEN|xRFVzgwVg3?4}V`m&a5vvNW5spCf7P6(c(oOmKpBkQrE6fO{_}$oFPeX11A5PV> z>?F*}sS>oyipJC;87|V@%`#KXQ`d<`kNUnHq&v`OJwASU^n$@K*j=TE8u*nREK7`1gIFXv0al9xeSO-Gyjx&*qiaU7 z@JZs!H5mA=t8e4Q=dfmXIzHj=$MyLZ)+FEaC_Qg+_3I+dNN4|`@1w}^i;*PxmY>Cu zw%X2}ckU_w2j9>1vly!FjfAJ+TJ|$^7H?7m;JU)y$gkpds=f4u(F~lM3$1dzv8`So z6+d2Mfm?B?F;shCa_*q!>hU~wOeOLu+TO}eFRp3etut|-)FJC28-W~QNy%* z6nVSykMHS}e8tI@@C?l!@Uu=|@8d*PqNUc`I7prbjoEx`q5J65NqV3d*pGHwVBP|6 zTCws;Y>=w0ZJ~CI{}*7$`52J5n+?nka>uuW+^NpD$M5Weyquk1HKI2+jvq#|vVPao z>;E~PjH6d$e{y4WW#=pK`#So)3ZXpx{l!n-qaQ%VtgOvWl4L_agKwSWymPU*4Lons z&nWYVrtnIo)1UDw@7a^}eW!DiH!CZAu5&CaItKH{mLxpOdGld;HvCHaz9cU>-4*|? z?cG7zCA^}HerjOu{iHpUZ;!;WjgVMEiw*1>wvN%}G5A}D1$$_DBC8*4+#H4>RoSXh zA+{#F!b*SF=ASibR16zZf9-GIj1#vm($~#Cr@Hto-g>=OR$)am))`pvfXXbiUJJ=T zp0{B$Azwj6++k$xj@j2kKADAPyLvlm?-U)z(ybi1){){1Z70TMZoXa|O2+zTtUZGq z$+P?azgI~`%;f4y4ZaSzQ&#J#$$PF*;aG7jIgM}8UNXK8beDD%-_IS>RgV3F7kT@0 zsWY?tJG+i&&@43#58#{6IMTyU4WGwhbQ7Mu2Ubm3AkS*^oOX|X&cgloX_GlcR_I-f zHv`C@{64RdzPBh)q`>ibO4cY{!Hy4$Pub6w)hykOX)|2E4%#Qvxgl*1z~=0|JkJ&7 zuqOGRlR2swsgg}9wjb}@a)rp7eYKg(%;#g>$agmnPPM=6I+%a~WkmSYLvM=-`xY2F z+xO4GA@4o^W5k+dY*?+e^Vqr!{l4QdAJHp2M~1sLy8?g4(P_}mhhBAqFQDt%gtcNL0|*Drt`I0 zdT3aXsW`uRUJU(^Jhkxi9pk|m?agPe14y1cFo*k8M(n-FS&57DMDjLdKa*c)A9pP= zt0VSQq3by$-H5f{K)yZC8V<)^+Ai&{TlmV0xSm;CJYq6kQfV`JE^lVZ1%3vye+BVm z4qh$hfo)l(iK{-t>kCQ!JA04N(qz{p7s9zP>FbJ7;^X<`-&Kf~yD_RUeoP>DatPJp zE8p{~+!@W$`y@K8(dKcYeR57N@>2^ZRyuw;ZQdj4FQj`?J56EOQcG2_@OHoFz$WkM zlQlT`nqT+r-(pEW^4;M7P0;?IsB=E4&c=q9;r3WTma15E1~jhM+eH6HJNJAxI#)~Q zLgH@>Xd^cK;)>M;>s-ZZ%b_?;T>nJOCX?N7e=|6h36AF_b9RDSI0K%=Mw0TaK>C?&yjGfPV++?OP%!jV$3QLZKgASG9q=s zk?LBWL5>gEJejig(&GV~9pZO!EP6%DbK%_tPKUtZZ5rpzQC(3XyXl`at{hwNhA%MS z4dc=IqQyI|Uxb_2=r>P4dg^1V2-DGZiy*h%n7jfaHWKisY6VKq_6g~W# z;8SkjbXUeehtLzg^zGk)TFPtZ8@6mRB zyr{0HJf}=Xw{fJJhG~Ood5`mxrRZmUT%x_hSU))@c3{iJ7_>3YTW{+*fo+YBi=E8IJZcr@FPF3LaCaoJ8cRRYOpPxgtbD(Rp>S&6XGgCCI z?C(4&&#s%!zU^O#WZR5u#rZ>4bBu#W_Q-ePtMkd8XRsG*y%P;mnV|<}j-zD{7}heg zSi{rKrR^f^Co@n<+)qyLr$vf>n01YoyO1MyFB6?JUF&DC$X_&0R))M;Z-do&dNe@H zIh_?o>hntM$hweYS>#2Mzh-Q`1MXE^F-CkD$~H%6_bcaJ#v4=RW;u`lH=|ziM5OX- zRwZ^RMAu4CS*fk$57{3>l4pA+&2A%m@(ng;lZ%bTkCXmJ{LBuTn_&Ejqb0F=1MXa| zg=3uCg8!XP*LoyhrM2t<&D+pd_(t+BCu3FaS>q=^xPF0a_hZRL;_x1*?;z(6UOc1_ zkrR8ebN?#3CSz|qj0gj}IA;*12gPI~IuA#uIht&xHO0ZMP}ty#YWiM7pJW98m{pT2 zI(rV2d+14$m!Qi==O4i4^Vq5)x%2j>C@DsfHjycJ#-BQ4shHow&!4~98(rTlvzaxP)LTYP%Rtmu5$;THdZ5W$cj=AWAxh2 z1FQ4Y@w97*Z(GQ^o_x!EoBeNdJKZ)-{f6gz~CIRpNresqkl6V&ZTEtJuc(v6(QfyNR{;&siu*2 zozq0&Y50>$J*n+-1KSVd$yr0ZPE60b{M?1z423xmZOgZ=5}o!!XVI;Xo1PA}uHL zeR9ivgPs2suO13d)YC1 zI}bZbY3D}ROf^otAqHf1)O_0SLxa?ixe|sINP2{8PsfdVwC=&;3$>8y3{PRqncBRN zJ!)cM?wsCZ=dA5(VCK_`cF8DvsL`th6i4c@n7-y2X_KA(bviY3Rd$&rgY54tQyW9a zV^Y?hRUv(zBz$HL`Mc6lik;#{I%KEPpbaO=Q8!+M5j3 z>OAj6HhIw%kL&3o%rAzWsqLEgD5)X#lX#RWWz*r8OlHwOdvH?c`At_o1*hzHf0Qrn z@$WC^rTTDZeYM7gcxE*tOC^4F8hpOgesV&7ibtF2zm#{~ZN!)l!{Lxfmi37+$vAMD zEB|80WE9_|jl7dCOX@Gh(w|{bn%B+MMpl?k)K6zK(>xz(tN(cF{;&(DdPA!Po43HD zo+9I3*2-SEyuC?E4f(+i5#KnO$^^X^|S*S%2}MPkFOn4{}!x|lc9EzUY8b_o?5d1kS_NCnUOt$8{@Gum6k8@P4@YJ1O4WD_;>Bu z1z5NiA4cMGR(?EAqLr9l8jnY6A-T<~uy+mD*Awrr;Vl_~)`-Oq(dRio6~vJeMwdIB z`5FwfR;49<$BK5xifefe_pzhhUD3lg!(p0SkNu62H{ePIw(iICDjMaUWuXa1=4AfL z6XD_RwI|W;1zP0J@ME@b>bxqV>Uh!lT{cdpleeA!3#3+S`y9;6^R}IAHkA}}U0a1N zd8(8r|Cef`qUbaRfpbti6q}Nh zt{fyTVUZWKn)}O2Se(^c1KBQ;w{^uv{*j%!hx7WIjkk$%EzP~MH>U&H2GgO2UXw?> z30-q{n%&7+r<}X!qsXCRR!i;l^{)LCR) zgSlt;W)Frw1J4BIMt&EQexom&&8hhqGfY`D^Q zrLdqM&66)I>kxWCYb&{5V(;u7o~y6PBE1TLBON<`_+x@k@8V5VF%lX(d)j5A@qn$COh8aV0nq+_6 zdfZBu)a-grc8#n?dWtVT%)gTTJy{f2LA(@=USWlAXndv~vp=;Z{o3Mes=ZclMfUiu z!k_l~O%>oP+*c&u`Q3QiRZMu4rP@RPQ$5rsad*hIaL&$xU;Rnmd!TxVqp2Vo{-us@ z>b@se-e|{(k`>0CdChmE7_HBC+PYmoogtU^PtA-B zUHHfukSPlPdHDGP8*hb0^6&P5Xafi(vL%!4V!aLUyC!LlfZJ*@?QVREpS5R?t3}cs zV$_xNxK9MW4#uVU#CxRr9^%=LeH^AV5W}v7#9qj4gl%vC9>({}&28t1$)lVT9lJs3 zV^ZyLRq{X`4~e{k?#N?ziz~?{z6Xb{5t+*A@lUP1t>4f1`Fz)3&l*`#vCur=DD8K_ zzRr4ig>AO;|Kum^ubtLxT87`{sbf}qWv@yy8Bd47YMzyq4V~as+_5}Msmu#M=Cfl- z@H3qT!L70@^Tf11T#9IC8Z27jS)xn!_x0lCvw1)lvNWP~R)D?3UQanQcN(o&wl?&S zA^Sqd-_`2@7;v%(c{Zt2TlQsGH)sEg_0r9E-TBov5+)P&b`~G*%l7?sOP-=wcqW~b z=V779KAN{&3gMN0vPNc(t3F`u|B&?~BV=cM%N40~xJHbquGidS-{<>JS!on3T46>> zQs-&I?Z(Zl+1Td1)N)LI`Fmm35brO5|G)E=nIcOv6VG%-o&^sgSE_$(AysAetxUTG z5dK#CQ+Rzc{LP|CL+xiZXfh#REOI6zK^O6}7(`0brk55D*KTX_-bA zYgE=x6~l~2F(A(d{|m1}^n1N_@*ZamZjLAU7_Fva`US3;g>}g!aX0@+?t<5Fq!iuu zXTuAyc{832(SI^;CSTcSV#0k;7)IJj{(qk4Q+em{@H)jPxrvW<(8}AQ(KGZ;rIjuE zeA`&?011jfIhDA=k-VSkOa6@owpG&eMWp$Ihb)KMT`Yc&ad@QT^&xg3iPyOz8S_@^ zD>>WV!{_QyjUUu>Rceu*&tA!mJ55iq^;!IEE-fyE@;6vA!<=d@mSvt^(`;a0ycoa| zU$ItuF=a2-trMxwF5I>3tHo!@ohN^#cyvG5Zz5-Tn9Xt3ZkW8rf?0X8gWq4oBH8sH z{*N(Up3NfJkGro(^(lswBYhh<5775$GIlYdkJ4M#ey568Tj>1B8^>Ty?sd+i`D6@h z1M7vDIush8`!;JkN*P-+gG!dGH%YaeMvX|7oo2JOH;@e~Xe~JbU&Grq*f19Bd*kzO z*m4CKPv%w4wbv68@53>9@sgkC1m9#WXBqa+JM(Luv%_`USoLj^)P?+VmVc8x$xCzw zsj{MGC%cWuns*_TRRCE{k!-qGi?p}nOrDkvgL3u*W@mpt-f%Ac2WvaK^h${~_1y#K zopy5IWbIE8mfMT}+4XlO+z-H{C!l#IPChQ;4abFx@M4B*dN@{r$JNK)Q}Lsy7;&km z7P)I}ff--G>_H6piA5$lXS}vwqgywK{mxQ(ImOQCrcp3X&Lf*WeUBf?0 zvQpNQkI-*Tc)jY}f&S%b)p*An`TZE|KhoBVTK^jE$yuH1dgbx;CdUVm>@3$b!>YFM z-Pbt1FYdlclH`X?<fCd7(@JvKb!Vj-#=9q7lYA%tu>EAZmlBtXIPxfLMzh%sFw5Qka6flq$_&1ij91BG zvqkHZ`Q`PZaQ5!Lq_6L3)CdQYy(asmQ}t|x7?St2e~Xo!M6TAP&m8tSEj?r=@i^pe zCqr#o^x#M1$&j2_$MEQ8+R78{+wi`$NM4ga_Qk)~vEXUED<$51OVVq3aP~Z9<>*nO z#4hb6U;SL4a~Cz=?|+}HX6;1k3m)Qk=9?dqtgEPasy3$>8;fJmU_G~lelm}y%G@a| z(g3H&u;voXED!mY%?R4i=P8)fhQp6E?#mkQeCB{M;4_`QnTj>2a#Op%y1D3oMI@vK+l%=y8 zd_TOJQ@~>|i=_%pvgkYugDQA=y`C1+_kI#zk4fk8xEJAnAjy+e^B{kB=ikeqIf-q5 z)n_Aie%?8GBK#ct7iHmUtTSJP%ZwzW&@$FbriQcGwm*rJIVNj;>#}H`L?)Y8@-C%* zWEsBk52^EZG?_9=vD5_UH^#t8#{RMJ>qMRrT0M`AT0?IPX|on4nLcyxaT*W5mUg>b zb&6>CKbWRgN7jr##XIV-%auH+4n~fK^bA`4BGOca^S`yRrfD<#e*5sk19)2tE&ZXD zu6&>iDR0u|{`_!Bf%hvgD65s)LpIs7hr65Gi4Enn*9RvTvh7N|++zI6`=Meq_?4`U z#i3ZIud$^Exsq@4A!j@Y+YMTH3TIMtp)KD0o7+0iM;5sLa_Fu!gG>&;I=rKbYqS1s z5MMY2hN-Cg0X`+mTsyxjuuv(UFbW1MTz@D}>;bFQwCVu2DbUY;#iIJUjvxF+qmq1a zaA9N~#|}68>n-C*o{3~X|E=__rH|6A)DVil8PTu7)#Oc@1B`tY01J4+;N zqW4?K@FH0&xjJ+0;&`6*M0u-Gm))kZMXK3Ofb>mznnK@~VDh!7zL7N+(X6^wpN8+( zESz1~=fC_8K)l(fEEVX1^!aw!w)ndDKCq zT7t>zcvw5g<$dOJEVY-8twqkfwLC(beL^5a3D#h&oxtb5AYHRjSZiii}O#H$N zlT+Xu?CM;2dixA3Z4u+wi!D9HubyjlKxj$=p z13bCKq;Hb#6*uX77eDc0fFcvp{wMkR7(_4sqnDNXLI)NQKI#q5#H=Fii;lE3mE zE$`DRu)|w=EN)c06YAfR>l9dKzL3g(n^~+PD=!i&&cvZh99xVpXVCL#T5n+M_gVZA z{iixmKdlXA-!3$~hrcG5`2>1DskK3{dcfa(9LXJiDkr?gE+@cl7_a+<_4}|+?%U@3 zU0Y;I^tu@KdH#K1fvJsryGtv*S?46!WyVQz~??ZC>h325P|X>D3QLWZ|h=O zo+kau>;K>(hd?W}!mf8LYf)3N;5pV zAD#07Z~PxiO!w&{2<Bm3$ST_zkew_&wQ2rQbA^wezI5L7c!)7%lo3+lejVmUR7QB7^yy@ zOV)zUB+-$D=aJ?3-evSIN5dVgo|W(2NpO!Ihx+R*QLH0t*P-8Hyq^k}@9|+G`Ag$k zs#=X?iK%4xf|s1DjednO_Apx9%<_LZ)|L!sVMJo%BYIjzn}?lOf_CTdzsyCx!n*@- z{ADrecH_fxOkG{@&H?bRUzic33f>N_T~4>z@Ty#}QbRq@W51$?1M#>f z$?j#htcCtUdmlSC30f1ipL)Srtycq^vv%@HWAFj+ZQ|Qh>ARKG+1*uzEyCX96grAL zP2pS5IeE^|(cky6M;{!U$sd|x^lWW6-pUmB*+eds;vBF zVSaKSUgeq0@kZ;bN!gNRZWpr$YUd5TpGInPHY56_!LF^A-NPuB^#+jw>zyCu6$_K~O794XOXfC6CCA}A+BuLfCdW}V7JHEmG8a5vlxjl5c5q$q+T1_Q z!{r_1%r4-4MAJcq__~-Cd-kyWol*djT0Cy@0x?7Wh`tMEGO29i0uv!1$=cq$(q zK!^M2zMfXaStYzJVvf3}5Hn`Daw^Zu9>+JyxF6dz<;`8`nN{@r(J7fepVsr&K4;zN zd35>-8mVHt9;4Sf<4;;=$M7!a^z~QvZC|K|4mA88+mG>6%JJOU7ZXLRJ2n^_-_ZAB z9LPJo2Vj_05IO5bqv=d&<$YfR<5bqiA4Aty#l`#Sw+9DO7oZOtZo#5^w32w5nfpMt z%zK{H?ukUtXen>lkAu%(7+(dcOC6sLg<(dgIr^%FNte-Tt#3!b{cE$5k8$B~j6BXP zbpy%X7cZCM#Y@n?)Uhiem8ar!NuJ#gz1glI9%i-Act2aUcQ)*b;&Odbr@nUy+^EVr zyB*oeHp$O=AugW;3FK_=ZVX-`EIhcG`^UOSZDd%%mz#d{IS<9z7P;L#04*0r+UJdZ* z8U5`LO->ZC5)r!@X;TL>?c7ARZ$yyGV6{OMUThq%#;?EQyIHk<7K~EaYq;+&AX^Fl zk7u=KVSP9cI0qJ~5;F;lUetP4E}mGBGj*Uh>wSO7j}X_AQ!1;2Cc-6kVs9|Yr4m6? zynRP2*&UNR+hh6ZNAy2Mi%-F2m445lUn-WrB?5h|rB09;>r>vkWesErx^JgrYp7g` zS$pBuUL+Yr!z%DRf(LIB@pd>b>$U3O^;RQ#o<}yrqSUmS&4Q=Vc|9HO^Qkc#-dC{L z*{swDHwMD?8BERH;V@Rd&s7KUud=lJ5^hx>m{Ig`aiFqMD0e{5z&n+OX8P0%YqyDz z_wkb7A>5K2i^TtJI8llY$=EXh!-UsKx{e%e;JI4-E-F&?#IWf+stMok=)BK;-j_!u3w55agt1Kue9Uf?p}s3F zp5@*nqhwF~sY1Te;!QcYRnf|4BI#pnk~J^6XIjN#Yp}RDTTjB9Zv5y9IzNheFPiCH zq4jUsdohpeL5s6kEpNHbaNZ?QNxp|S*dS|U57$T5*i?ef@ocn3FLgzlj_my~pEwW? zlB;>To^EmcS7X;DESr^^sW-ENtPOCi0pGinA5Epcd|G&H=&heP`dT4pRGv4(twO1?ZAuHfp(eG`M z>R9Z!h6b~EM^nCYDKt`9`3Umo?&wqP<}Kf8Se2Tw$yNI$eHP;Lk2sW-o?>|0WrQgL`?Wosevjs5)#hg;mMc%7(E|ni&I(Q z4jP|})jzS*Vr`r)7PKzRxW3V6J+dS>Vk*H*(%MC&y955^jZk;!p)`$$u~hOgT&Ac0 zKxw^EtG&KXCF5AO@8G(ustW#D54ejgWqnRvxMh5P87z(?XK}Wy&Xbb|=}Pw41l_Zo z{eO5mRV=y~cDbi}&G=FedtZU|Saz>PgZZvV-Of%t`z5X94yZVOWyMkQiKYh6Hhxo< z-pTKA7W7_Yr!smzLoC?GRaI%9_uJEX;&MnPn?N02x7xh2HNGV?N!|(PzM`kIc8V7( zppcbxSNm=feMXUUGZAE zDn(rcvtQXEcMA^`?(43hTizUH7vtUJ$e!VSu{~FOg|+3hnfEqf#qX?kFaNk*?AgUv z&xKm{l=oww67;Bm)eE8b7r8gHYB^)Up(M;x#dq|OTr$aBQrf&Ct7RL)DLD{IIlDUj zlF#Ekt>wP0EH=C%=4XU?kOi;C=xtiby04b>u1uCIc-a>0uMLsv#e~z9`m1R4l^!>ct%qJ}_$%*a|Abqr$Zv#C z8&>bG?>tM|Al^21x0?4Zc~($`tc&%v21hT~Vlt2Y&yn`xV{-f^!%WtS)ifKLEFO%; zh|k!tk1H=RQY5c&4J=A-*&?pkhlPLRe{&&M6V5NQUhehYfyOdgeW#VHa5%G~RRs&4 z%&J*4v7CKVDJr@CzZIFYr?r-IvVMFr8+FrHo)}aYU2;eCGxm;$(Oe^W-ZKqg*XjOm&6`eT$7+rz7gbFR&l|!P;!aUE%}RwHW+hFu-w-<8VfZM_ zTa!D_!OP-uGTd&(gI;i|>zl0LJX4fvps$56*rkPUA@rdBn!)vXcFj|R;v}g>wq2OA zhEG>z>Bg9Mq_(QzY~I?xD~@L^NO!Z4-mGxF&mX}*`#q+Su{o?!E!^Z)o|uzOQn z`T?R%j4H|bwSX3X(4d?6@b3!nWY)?%iV9@Q)2woQFx4PazdgD9o9d^&5qc$?4Z-|8 zWl5dU^{kjYy{C{cb%RFGxE@)*5k&@y!^x7-keyN?`&rQ@v$fwu){LcDuU3o{ZKN05#Lhw&MoxP_=v*?ec zPo0ZizCF&Ts|$0Po5j{$Sn`@PhQTGdjyB?KDlm-`Me;o4dmg?oe5UfW)X_?2)XWZ+ z!YHdTTaqm^nHPMziv2SxG!_Z^vqAQzC9~DdkWS9C>|ID+!%y`dTmPx&qhZnx7VSxW zrj~DT{mITN;`3MR_O7;`=2r`xl`Q?Ik@q^RYGHPC6HCo3M3Un0JYDZw^l}8t_rk*+ zI8;x(?hE16TOCiQ9X=oC+T;g$-CXSm7F*)lgW)-lWQXBNssm>|Kzn{t9+xh`$Nfo@ zib+|M_cL6+BX{z~j%Af+S)v$bUQNdf>3ff(spB@04P&h`J z+m@f*#~W|dcji1D*zg4{KQ4--ip>uN%V(X*DA%3l`#Rd|tL^qI^%f1X*YiQ&m4Ml| z*z&jd+Fn1&LbOVR$qMM)YcyryWhGO42;nyh!_>!#8uN!}a?H-)+S^D)P6imTu+T>_tf(uS1PN z+i~p&GGr%lCk)sM)jS`mAVzM--Zs!%!$QrSkum87Eev6q4@mkcjV~upQ}dgq{B60v zo5B4!Grz|nz85yhk#`wQ=8*qZ(Xc)~eTbpSkh7dsQ)OW>K5g?&X16_P^9XO6)=B?p8N8c?3OqOlU`uy%+_wv#?b}N-;6=C3nT3bBIM5rupYf6-{T>3x{@9ZD6Fu=Wqjz6DXZ7UIG#hUA zk#}W3;7e`t48;DdG(H6fK4HVUeDEutU4bo9C+~R}?W9i=v0`{3o|O{=k_RPEmRcIA z^MrRTZSTO&^F@TzgBeT1)Sh08gX3}G7IsVyii5Q^jIFo&y+2?3kq>Mp@9XSU&Ac@; zzBjezwHAak=a%)*1HgjN0B;Dvd__X z)>IWQ%#FX~xns$AGM1!b(MMWZMyEWJ%3InZg?VyW=Zw+DjX3+DdHi-Z>O`7Zw8>q> z1?)JMEh;%a75gh|C9AX#)%QxRyzQz~AWUAN0d!6c;5-rDNy_AeO19ptc-@a*Hi35X z$iB*YS$q5$oEnq5t3ISV9eVGHvjRt65I!q zUy#=Xu2tUXW)4Sd%`Xq3L)He|toLKco!UR0pztjTDx2ATVB|j@e^Vi?Ja+emL3VI1 z()$wHKjx~N*swK5J;5(C-yTJutRei(cP+@7z2(Uuwv+VRv>Y3K!`dH-vc3FHJ?w6@ zPA;M%ESox_=dwZ8`d@5@aSo477VMI2mwTeKjpV7hoK=L0tG)0&HN6*$eXofDbA8Tg z(ZjW$m5`~u^ft+o$?khCZ-Lo()*Sls?8PqHIu zbs{{wxuQ8HJPzMln6}k;lDd7#vU%D*P%p{LUyDt@ z6JJuNcAl{<_fiMy@di>R18deC{HcXxFQ2QQd8|DM<{O~(d6T=1=-V;)7~ju=N3tdk zBh}|DcMeNt?blx%Mbt76cRIQ=lbUVy*Z zVX+RPKk~k7U9;H#2RNQ}E(5W!jH^G#wcc!zRTNEO(+2}OLwG+UL*7?Btmou|Zo~HT z*zP7=eiRa^vY7SX)3Bol`=q|pWJl^kD67e{e|#7uXJO6;XI((IR6)$Mwk5`=aat`3 zn@TWAv&%sNTwoha$B~?t*Nxllf*k|d@3*8L&6VT)y5SslH@+I z;4g9QYsZoSrkhxmw$kgje5~>C0%sS;ysVjS=FDCsNIta9xRNS}Pnnza zA@S$Vs7Ld#=x*1}A?tP;wb$2Tarsz$I+untoGfdq-rf{<+=Mz5>GH!y2=0dk$p2Bqz3LIu33nu z$>uaxUzxK!FZK`PO&37zU>wPsoY%EhlxF2I@B&tk2B}qD+qFY*=M%Ub3DfMe>Is#@ zu&*>5&4=9KKBunW2hdA>GV2dWCSqZ1vr2o{B+5URjRaupp_dr8QbA_=yS#Xw+AUQVb`1T5S zmJ5s=}4f-?bwV77a^pSke-{D$O=zf89?}%#K zAak~snrZn%{_r^s6D9sf*C)k*%wHP8V!lzM7`=DW^8&N0RIsk^Co5TVXFmz!7Gq5+ ze_zYTOR`j+ht|Nyf6OgL^3zKSk?>kF%*L@zdT$Ah`!S{$%~E@3wf^%wF0~7~GLC-VU@&#IC+?$~1f#%!CyY}xwOn8%xFA^JWpv8Kgmh4^w^^`mA?y`F6&n>DZj? zKeWQrUdWsjc6eo(4=_KCyB=pv@U7nM-FeW}CLQRI|DRf!E);q+LeQA&yKmG79 zyQ=;p+T1|y%UQdV@#!xbXPw|omRd#1D|uXbK5{xelELo(Xi|^8KW{S5h@KvvVV{Ww zT%R=teF6Pb9dBx6_>AXm@y#c`+YkOL^^pn)A8PLf_PGth2WY*7voZ?huIYNmThTjv zw{N8FU92>dc9{c=BUf@mT}8*yP}mIJc8*n|zg1&acAwF``6s5|t1` zuA$ZS?9!h1cGXW2v7;V+Cp(_Jidir5gX2r}Rg9-(z28_I&z`rezBvu1_w&^g=s1+W zr`peZ;!>VnZDI44JhTd%uV>8;m@|h~scMy#e(S`w-2FFpPIF^%LzaINf3gZQ_vI6e zggaTiuPgJkY>sb|`RoMos}c`nn&Klu_(Dmi}0WuF!VB<%UjU$tduG^McMIe z_IwE+QYZ3au`Mf4Q(x#p*6sz}>?O}L{XzI~Jk6^+H@QE*6!Wh&%4EOV>oiX0isxZ{ zw|kfK;JyIXz2I~thQCD8Zy{Czdy*+}Us7i$)l=g5GkAU>-L5d}y#^}TS=3BF;ZHUH zvLfYu2$hFW@&n|_)@<$ez|%{ha0`6e7(rX}>8uQRmPI$3mu1IiYE_pfNp{#KTjOTF zIT;JG^Xo*6D6YM6^xn@*U@lo2!+w=3&tt_@w*5?t*O!dbRTgy5cYRrk-;x+MFR$ET>JF zo_7K9>pGB0Ws|odn!DSv`1vyn_19)93uO&hJ5h6v^RnhHyUQm+>m6g_|M*QwBkOc$ z^w7)k^h&OZI$9e<_I9{g1T(TPc^pnKgd9g7Mcl{8 zs_iqxqMPvGMoevuKZnsdIZ=k-Tvhf+-J+d%zK>bJ;rP%Q6S6m^3Y_M$-T_#er^xHI zTtPd>x~>M)ryJ9cB>4dRPF06r$#FU~wuxzb#P&mJlimNV#J3;u=W~`$R*-?LK8-iD zgHS_0Utg;odGr}%N=3Mf^^tu|zq3#FxBa8NuDG%RJCdI()v2nvI;$M6V3F6|5pKZ5 zInFA}lK((sp^@tjeCWX1|ABk%Z!RG5c5-ZB_1j3fLS)FZtpOM{kFI&r{}Aj-Y3V3j z?5Ky-O6U&pR5$yC=YNMIx&J(cwP!;sxQ&BmHL~T3VcH3QGNb9uGsyI1 zDN^KJVpn+WH9iz~#Ro?2MwqZht9jR!%rNDBKak!B=q1^{c8a_UAyvlF8?;;3h@NN5 z{d}Iz*LU%s4fE}3%ZTZXTRNE zzS5r6Qq^FAWABjqYba;s;2Exb3vO8(G6iBoFmyqI)4hCZqTk%Ptb)=b;!a!gWL*DQ zYm>3$LNl$|?#8NOYdt+bCIY^Wz2mi7kp!RNO;*z+BWyz&xh zOJ{eW+iCbwmIW`P+w;D!0<-fR%j%rWs*WLZ6<59h&&gQ)yEeNyK9+3p&5UMww{st> zl!EL9+P?(~C;4fLyMr9hs(?40Gv1hf1J>ki+itQ97bjm~vxWSr1|Rqu-=5af#jd#D zn3vr+(YGcvJHr1B-o62Dm*LTJGrAR8JD=Q%>Bs4J2ig0Rb}i4Wg3qZFlsmxGGOmv= zYq7GQ^N+&5RDxR%?Y4aN&jPPbHF`7`VY9aW7dCjG$FA{{`_b!2G#%@%qg653x71F3 z_FHT8U+9~Qc=Z4HVo6#xhTs0?DoYA8;_PtDUj3}6KL(D;8+nT>l0S4C`41Ck%Rx4I zXlp|7ce1{LznM$jspn+Vd|QmZj%D9xp&m57f|s4lLQk00Y@p@Quz%5cE&P4}<6CGu znQafj*qJ2XS{P9#W7QD$nT!oj`X)O~v+`!D@hkPWX2N3(YaB|p4|voibjn`imXMll zJimc`?xM*Sh$O4uYvjwWzVg~S$=?sKVe+0ljZqWfpF7Nsu1*!p3tU}SyY+cWo^afu zzsWc`T5ow?I85w#!Qb@?w#+Jro#; z5BY&sZy1l}k>eAnCcnl~vR+8;X;`of_Kj%#GmFnIjLpeA@FONZ>WVtNek)yH#+8rY zcOXygi4_OI`2*-z#hiiSSE}`V$g1(Dw3#mV&mph;I8sN(#G=$UNOhe2``d#AeL3^w?fCvDeeZ&u8+ z#Ohj0#oO7gy_1%|k}B_a>Wck`L*ih3&n~-Uz-!0@SHLWJ#Vf%(?-Ylz-|bLO{;5Qy z)btx>d?|ysyY-dTB6+%=sy1clx`#)U^KU*Dtb%11OxalIaR7ewG;h^F`U2kU1Dm50PX9sl)fj{y!-`G%p?L%=Xyy0`8U*$Fs_Hfj&Fv zBYUS-k>m?_EMUiEi+g}vLrI>Tvsr_2D%Ovp(=ezt5^2_8&|((J`|-(aFbG=DvgBFp zFi31k4Zf1%;x6OaV4S%f4<3clgBZ{iig_k{30~equB@v|ew9-p_ydc4D?X=IY^wie zEmPJ{O~AZlUQVXrzVOa_!qg^9Zm#L9eg@5}ux;kJ&tYO}MLbQec_f_)?H+nt$5Myr z?;9<@h?&o8GxuhTS*a-=RE1|U*uE+9WSz=$1#e%^OKM}oiN=!$A)Ke)J)AQR&IdZL zE&F@|l~?pIo^HvFxkv2lC>9>*yUcZa`YtoI_r$Niw7US(>)2~we`Rm`X0a=K&gzI} zJIK?BzFVMn4lJ8uOm?Vs^4;4!Cv^(G!>8=GS#KtGpjl#PV@**2U*lPGa3;w7-ycgCVq- z9a4!tc>q$&Cr@a~>TNA7*NA9EpmH8rvL^gDnr7$nIlNFYyVA^!D)xpVRC90u?tNpYmWu9YZef59*)yuWdvSxN-?_!n7qSC3*U%)TA zkn?AKeM|R_+Ps@|tKl?4JlR-?AG@4+s&hvbJmDZm8{_hmuDDD)_p*7iSX~FzhAg$5 zw>G8ir+n&Ze;vbPM#48a0GAe?S^tEmsq*j~`R3C7Jy%Yr%U-=UbnG@7rLy(=P-(-K zC$Z+~xZaV3c_WkPH3~O=VBf~%UsM=rb5Fbjn_f1GlolbANvL{-Sg)2I}FJ? z++-J=2BWN&NR6S1@OZ>1oZR^LXluUMmD$o6n0S(J)|2fqJ^q{fGwY_mWu5Hwzt(qC z$d!t<4WPRKQXj)`9t2;}YZ2qlqk5<=w)~^L|M=@2y-mZjN7*>qWnX;-ea;nK9>C4@ zyy+>3eN>2v{op)^ZT6#oYGw3*&l22O<>>8LlQ-B^&kDM(fJkdSC+}rVHvOFc4`tzxv2r5myKD7I*F6ly)7bqalBPPr zzC7u5{q$tLQ)n^`&-0F_K1_N-dk6Gu8hMf(}KtxnnxvP z9BBU9mLD#`pyXHo4*T+q?Gkpc3Z)k?asw93CTI2lTxE`)+UQvwxi7Cg5hG_|#iukF zAkro4#xKrok1ZRVbvEfXLNWEfk`HMw`+Q2fgM9k|D`p461FU+OR$rojvc?`z81YVb zX6__^BiCPWNq+Wate!OyRaoU+D5aj$T;thM+TBLxUZhEW*n8l$A1~?)|5W47I^HWW zVSjDh3#l(?l(%PV$(0NYqj2e7THH>S1u)rDkT2CrQ)4@Ca@S(~5D2`W#iFnpiKQ8N zk`FPt3QxnQclk&$vUKJH4cW0T+gxM@mwDGy?g^Nl8XZ}~iwva-B_9`kl8`2@B=Y=Uo6gLMO$vc5CV#ir{cRfMukPA#gDM)b@M`?J6`+FfAQ zv_=pAh0;wv&&AwM#)~DQ#X|+JodWyoeUqni*N{CU(Q+-Ub#_*%XMgD%xOYDs^Mq&( zOd7LAWBgvlYjbZtNDoSJ&cgnZklUn}?Do6~g4rka0u+-kD?3Y?lQ1=~ zGhR%9*El$3{bSyDq#oU;bk0iqtb$Cw%#-w35!Tr${shhQMzjc<{0yt?bG$~2*-u>A z_>ku;&%$+@sF*wKj%=Ehpeyj^axJEk#T$Bm7I!xKf0C%ZmrZuFQT7w$SwbmtuQE<0 zXWCUfevTe%i;-EKRTP76!_+10ko>om^>MR!vCJ9CneZ(iT+hz4pqV>?td?KRR>|<( zmEHa!`3vMYpX_64dYGd|1CvWO5E$2zpOJZo&&|as9 zQ)u49k&%4jB>bC7`>b^6A<{jBl_lUf0n#({cL^*Gbk5zZpH()iT>B0yUx2UIz%OgJ ze<1B@zgvl7gSD1bNH_E8DXz+X_J7}{e9F=b$eK(^^;oqx4L>ARD&X$toNq{<^~4YG z|7G|QJ?^1HYu9QPRT&F2;Qr;UB(%iTP@fAdZ^rDOt?_wtImsm*J3IL zzQ+ezkRdr)hG0;C?YCyrT3X3U{+=w9_fTcnp%hD==!`bbO=Z__;dL@}Ps514zS*CI zCy=6tc>G`1e-c(>9l74sC$i=#IQgxf9??dgD`wsKxI!eU#dast{BW`)=kNxeGM7cR zV#H7u+%2y4WtT^3T%WJabbPp$hiN6xPHK=WD-m|m|1qd%?N*}QE-@^7AfKb>sjlAz z*LYs`n@%%!E-Q={pF;mTsGj0zo~~!tT-J$Y2XI;1X3V|9NHCKOo5ZfHpYG-QWZ>OL z{M+oirT97pQ{xq9ljASnW>%clY$MHfw!(b@8H)0ZRuD-fI~#_N(7&c{Ho-ZW_)^2@ zR?IpU<36D2pQ7TmY5vb#yRz}085;bi)KYXmyT2(bzNy+zR(j_29? zQ{rLrUDqsV@F5BA(sFn6xHm+i(!cSV{VsMndSajjz$a6Wl@FQolUbFXetyOyO> z-)aG0OuhOgeCdLMKQ?94vmKv{ZQo?nXsYkbVv^yfEsG^5Q8B!`%ZR>@mfnE!Le{^F4ypY3 zgSfGYB{smnw$G)F->EAzheQwhrV5n8jQKb<+WDv8WPNA%=K0w*_mbWg@rRN|z?VqZ z3m<3dy#;S92h+KZWv6TQ-G8p-m86-e@3Pt}&LfY;zW(^MM8uoNrrAgO81x^(;Mw?+ z9Q~u^Q6qQqsS%zz$!8F$ ziba3GGQ0e`H2Dwx=g+5=b z50ZT_y|ZhQ7w8Q>vCfQW7_VKx+R0AVij_Wc+ta-w#f7JpLpagBzlk? z9yYd>W5F(>(Zwt>9^T15+RORVNte}>kNGYuV2=@XjuV#~iZLU_+Ra$E%)ip)N=53I zAeTGJXGEVoJ6WQar^tOgmL1^C^L;y$zZ?at_;zY2OxI2g%&UQwH?mfCMQkn16&my1 zREb=R(S1pF9k~}2ICHcA`|{yV(A`qF(^)JotRUy>khmFR7x9!azPk`w#p(FB*p!{T z*XliWY7XNOsq2$kkPF?JRwLJQqWcng9H+5KvgB}@OBLa*KF zJEjm9YLNR7W6W^gT|vA$fGpVoJ4=+_s`YnS;Wyf6C+fk*w6|%MHHq^fku@>h3tvCnFNp0?FgD1(^37WPM2mSsdX`vl2Fwp|RvQu|=g%MH*$J=I73$2kr}4kk zq-OfG5d$m)((WceJ2 zZgp*Hy`K&BA6(tpxHH*LawU&u)r$`3@3kF+tFR_{ZwmUDLC zQX35Dti>@jy20l>Lpg%ypsJR&z z^S&*2DXDdmI@Ni$^qweCg*8U;p>IXa+H91ZvDv+MY{6b#U6nU6sRWaEp}(_Es+6Bg z+9`U;?(H=!c_2-0BSor!B*)-JNL&HiFR-K(%Vec>1(wQ=>+ICZGn6;Q`C)94j79BW zlpHn}z~E^7%nF!ccsCLrtx5k8EavFz5NI5t_1nm~AKiAqq7)1p)BbMX>?dkvRlw*% zd`k7RSYexaLuyc#H_DZR&_&M3UV`T0QXQ={hrvyJuMP(PPR=Lr`X$I*jIVEFej@+# zxSv(z`|GnI+2_%3jPEDGwB|*+>X`uK5$d0 zBU7oAM#>PKR78fP2ubECIzpz(2~9dpDxE??D3TBvLgwa9lM10SBx#-pRI1Z-`uxAw zI3onMf#P{S`~cBd0TJkv+{QB1??S9 zqal1Hv(3f%!+N;B=e`{x**Yv4M5{_LYKcdQ7?~UIuARzi&HPr@!(}yIeGxM!JJ({3 ztT2gheQ#W_RJqG(k+u1!)1s|Nbp>A=ifdVywSPx)cBsAwi^s62AN?z`TQeHXXTxQz z+JVG-c*f~3V-Sq8H>7jnnX4a6m-B_ra7|vV%#a_-8>jKo^x@Ij6Dkfvl6+-UjUcJOp>C{cCk*cjRYu`wn71sB0c3*O3cj;~gmd}td35Myv~@3>GE%QmBb3Pc+^hqc&o8cpcJlc2rt7_YKV!44ygqD6{62+-OZdVgB1q1m zjQ53WEumUVz1O>AiW;6$VuD^XS!crD)gs$QpHHXbK5Wm~r2U#~=N(=9ah=;*G=T2AGYn(>w zkeAl?Q0}D2z;osDl_&(oABe~dGLGljc zoSe$LpN=Nd4rMqe=0=NWZS(5`$j{$rP>ZrdrpoVr_Gv$(fK!Q_Mvz% zo}N!ap$SQ{BIz0t>@krs=L%(3G8wejDYpveub}lT49FUTSICn!+sPm|OD~WWb*sgz zQwu%yEK%`p9+Vk`hv>A0hG)Ak*{zZvrndX*(yE@goY}9${G4?0qPUmQ<~Tfk%)j;I zn+=1!C(Yj7Be7=~>u2A7S@s)4_T>PA~f&8|b~)6(V+=>i8B1G0D0#C!?x%`^IiJYhSFwjj8yfXTfqvH$G#(FLD8!j(3>ZXQj);B)hM zT|1aB#f|)zoLZCAxl81jM2nmxwFpk}wo7>F_awat+P_0&Ia~*_;j{kE4$3BcA^8(B zzJ44JpJkckKgb#1cfxY5xSM&`3uy8ff6i$!zmhIFNKaJjQ`#%aeuLnbywu4YmS@}7 zN%}k<{^=*P!#PiAm-fa$CRqc@(CHTUjDcSh^0m;`vH0E}KlA>p2hXjh-k;r*w#ps!JK;5zC649WEyRr&$AieL>@^1L%T_u{v4KXL;A}L z&j=+8BZv-s4pVLDn=WY=QG}1S&<0pgERUVQ(NqQFBzU@+C$ic{(YF6>rifnG;9j#EVKdZ(USO)SrD)A=+yn5F(k&g?c>sK#q(ow#?s$oHe`+5PjF@xfR>dmuZQ z4AbD#7vGZwe+Fx2J=$RqN(S$(B)Uahm`TdR_+s8iW)Jg0+Ho5s=eIo`NWUz2<& zIR*J*_a;~1Fmd!3xRnyCl1CylT3PWpQ|aB1TnOi^xcLUU+l>-hVc9rwU=d4Y#y4jS z_w$og>&vimuit}tOAEhm;}y%@-v`DUeI5zvjNhv$e*}G&iNoWuDd#ZmAH)BEeFv)X zGQ8Rb@2ovaZova^DeLlTl5GW#JWtCFNs@Opt3|4;4!)26Q(gI8^!k|=@2j~!9cE+I zTs9xUcK7SEv+sB@#P|4avv!iBB-sbD0{TYvr_X7i{9e~H(>0&pWaYs)tuDaaHvXE; zlOG^oB2^=!z^Ao$2n|=!Y`I#SYcs3pr_%5#cRlO7?)dUL>m|d*X7=362KB|UA!1W$ zk?>_Bif{F1*(Z_n)JO2U3)`f9RAq>x{LKQmpcKob56+$<+H-BCVpmLU1Guzo}SoVS3Jo` zDp_1Ur|C^>pEH7!!DESbPZi7ZK5Cs9@c^42ulzyu?}h)x#F5MpRMg(9klL!X8nk#p zJK439`NND+uEU11?DH9`Z>IGhYI?+7HPn_>CQImYH+J2pe_aEM51^CP92>PVUd!os z*U_hiI5AQi@A8$0@nbOAPIg}%_y3)3Dq#GTqCobQeajM4X_qsGGl!Is%5l8uNj^3b z?>`ooa(>!T^j)o;i%GK-wnatwj34uED6=wci{Q|W?p5IW9()hMrkqSY)1AX%nT(E4L2#84S=o4s zhEc}YxPKni)bg4;#l<#WStGzT-OZI zrT)&lrZO0i{hN(_?m~}=7?;`PWXwr^nIm|~^U5@K-v(a!ni#f=t^SP(pTg&JV~EI!>Zv6=xAH`nC%JJVNmZp6kSP107Z(^jLoE&Y+l8(+V$a@qG)FDz0W*h@ zY&GA*sVA0w;=a2`Hd8Eph$SnFxnm(ygyuQ1`7yQJtiEf+y?NrxH@qfqjkc41iD;Oe zkdw&yoBJA&<)3U@Q(LWEN%qk^ha_`S@+VI$^p`N&1Y5W!-H>{7i7)H&2r>c{YFK zM>$*iR8cj%WdGx@oYm1=TuCgfq~4Y+c0P~16tXkv_b0|=f8}m+XC&C4pJpdp2UsTO z(SI;Gvr;E(uN-|Vs3VyT&l0)bCSg?;7+rXRDM9NuXx5Ph#?a$eZRHfJ&4oFYx%7P* zBGzZ?(Vr4^^Msw*q>}3DONTdLT7usu*W5;4G7+!y?l^hiepO4J9W!cqT+dLHg(fPO zoD#$7(U(VmBw9@)V{}1n`AHEFQjSfNj}*vGb@~Is~+?Q8nLF?maH7Y6MV(_< z<_b*BPU)hrz9&czMKQ)%$t90ymU0V-oep1ShNRwzY(Wa z@TFy}G!hS9qu(N|>qxp@q|IB$_WUwybu#0;5LXX%HS^$yxZYhYSBiW$Vc3(Tt4{94 zyn2EjJNdrmX?q@|zR>z~CGKalro1)rsx_`B|8;hKXEtMpn3`FK6|8wQ&b=hZzRSHWygP$aSe86b=hX4h=DkKE_>X`(=!L1JX0fKx=Q_>*(~oQ zYrC_DI*X9AFWEAi{wW(}t*IRtjDOz*p>P2Pch^mSC{aS z>a&c{IJUYa&0Vb-o?HAB|+{>=8rlJM{FTI(5SFBTQ`FZ(-6L*jFOahB)N7xZ4I zh-gjgB3p-)(O=4+t8a1X_GLqblk=awa1xI@nde28;QI*`2#K1Ko#Eo#t8^rna z7@+=Sy8GEE?JjnIk;d<+=S1nD- zzlnEyFKDDke~g~lkM^TT(zIa1yf?|}_zL`0bo@hS6v<36h_Btdg(u zm#kVCA_5G6%#*m()4kbGoY7S~l4WJUi!_=nrqrfMR)USwYEAMs_U~QtBxm|OacL}l z=c{pu`kz)~^4%nJRuFFi*)`CZikC;}kFy&l>qi&By1DO@rQ;d4o{3BG?mAi+K;r#t zeYcCEb;YafRn0k!%f*~ZxSIMF`+X+7w~Aa}Vn#o#I-69PW63$ii{O)dQVZ~6EA5k8 zBJT(CJ}HbpMeIqwzZLXdq~zn6m;DEQAXb6BGj?vS?wkgb@lDPwzMFT}hR)+Y_402W zS?_}AjY{M#ZxvRJELZaD+9Fckr@ak}^h2+)WL8g1awW6&lcDtu3#`>jPWc_{`k8F> z4F1>A`eT?>MC3hy=L{A@GS^v`WoNoF5c4Z3oi#z@$+T3w&RLJi5s-{FZ<&Ys#P!FO zJQ#-t;NZOwe71nZeSG&m@oO8V+|Dbr0wbs8tag77{%|au|IqrwqRI_MYD?68omg@? zT$4TS7xDU2qmmzKmpA=6mGUAinpWr`?pN1MbpJa()a8|>SgQ;&|d*`g924cY?TIW>Fyg&MsW@Yq{n|+hLpbg-^$^RGny9j-T=m(F&`VUBP zr#i1;si~y;n#9R&bqo7^j%S(Wc|-)cmSp!}aeDU;@!%uz<|Q_o2ECd1_Ba+KbM1N( zHe>mgX{eE&U`tcwE+#FwmFtwEMcApR-MZ&Fu0|DWV<>;At; zRF6Mo6@KUc-z#;a#VKT1?#`SM)d3PYMY2EnlM8L79(=g(v!>}6woHcI(^rN&KS(_{WU%5TPs*|o`?v!|!3

cfdmZn= z*OoqA#nS%NcFD2 zPls!F*gWX-v;Nx3Cap-_PQ*Beow8eKiYr}6lr{Wa#KU3q-Ng27#rtaXAHutb!MlVA znEZU%4VpE2nSCy&Tyw2w7GRTlD$#f?Y_dK(S=1*&BQs)ov-K%fRlwoQ9bUTd5dau%~mQ!p_#iE0;`9zq{R_17B zkHWT9ynj={PZql)tM9j|F{h<|4B7p;65B!O2A0m#VzRQYF=F{n-I@QZPV>)vvsjOu z$hu2>?n0kueaeX{858f)U;LZ*ehvA3Y`sU+|5}|Vi+?|nFS|14< zWMAW9>d%_~^fI;So>QKZBYX~aeI|}ns|Rz606E zKuyLDJ^Gnin}9<@#Il@&oYney7kVdozbibsPGX^cye4PVw#TeE{sF+d^QfkuZwR(NOdH}Bo|?EabpaL z+K@7DDSv|JhuY8isd<-}{9k3smA#iaV=&RP4gWuq@2tm(b68-hZ+;VBGY8ohqjTP3 zW`q`!rWQt)q)}^j&pMDW>Lqt16G1ChoF_gWC~7CW|6nyQ&dD|44?Pu3v$jD_fqS*aM;X$rmF2YdY2(Z1+8x} z@|Z;W6a4-Qy6>~}zv!KuHpzfL8;gGA2OD_!RpiK<|01|`pZJ%Z*AFO{^WolS!LR-F zfbyqCsPU`=t|CQSH64xVYkfWq7vqVIM6;GWca)#(qRyU(194|MR4UN& zRuMe!19P^*AM8_^bym~jOwp-@K09Y0mV)DlknHU4lCD(tZPqn>Os|sk-VT>jFeax6 zlvnaDZFf<&i?(tKT=KF`6~`*D&Xw%D#{X-;F{?2i=Y2U<><@Tcpq8RE$Qy{Rh0#Hf z$ZFkwVrO~Gevb9Ki{~%#o93iD6vx-I!E3yz4xYZ^>XTx2O>t#nq37-IdNq2_XNN;! zmo-;MV^ZcS=8?3bo@NZB^S0?bc03GTk7y^mORr|NT}qXp`vAR9PHnoM_vLBj8nyjM z+OZ;5HP-K<#=7`b7tVQGa1~?@#Ibd(mA_D>0ep=8@_ZGPV{Ca^m<7K3G$$V_=n)%gM32 z5i-B%7e?V}&SR;p-LazF;qYxo$hP?XKa1G5YL+w#!Gq2{|c;{ zlVU9&%bMFtbehBp_h~JAd9!1?rKhO(dCYd2evKi^)jCV9+h~{_znP2A_~mqQEjz}$ z(4w3Bld&(W>nj&}(=< z`y{R-!8SEE@KXuK?dg+u8IR%C`Fpp`VjsGpmzbStDJ`N{R+# zy$qf$#pzeo&>EwXPh+8$zT+zyt>$!)JoCPd8^v(;BDlBFTYRFu6Vy3I40#z}ZlTR+ zQRHB}=z&`~Hz7Up^Q5`jr>CLxytWsyN-c8Cf?G9M_fbPm7P*UVnT<-my{3Ac*Nr=> zLoNGK7HJ{zqBU>GDMbDB6_>K-c1UGy%I(m~X<;|1;SPS*AK#LD^8q>2igZO%}bR!%_ zuz#|uT}<}BNR~{wi?O2x4lLKN9?1ShaP4ds+ftbMh-dvww@T!^96INztC8CBtT|VE z@6q;q5iBQoZYucw9CoRSRXJnnXMbmwHo3F=X#E{`oea%W$yJiYkJRU86~jPv=lq$h zz|5Q4bMRv=B)YKL6gE%BjI4yXiVat4?K|Zw`+Tn$lblEu#G45;n?duOm-G-cPR5a( z5|w?6O||%Qa6z-#6)*^nzpDOdS8osH+8epgQukY;cA_aU9Ci=imWjc zr}7>=*)lRS-%Y8OSXK*;R~k8#B;nNs`J20^A(Z;kDw(IU8om}^3qnQQGlE4nVB~mh zyv+_-EAy^zlaF9CY}>nkjObE{B^QwI0mvWBGAmf+G&20;U;4v!KD|n_WM|)wl_x+p z*`dpjBd3B)hj>5ro=LODIG1(8P5EioM}G#5nR=(v;(sxSWtQeTEslgv=JC&mdQPBg zrT*lDJ_5S0VPmrBZpY8}v2ZY1tH5)C+W%yQj39&G8~E@{!6JXdtvxuMl}?|Nb1fga z3^Q}i*7ZtXg=<-DH%0r&6tf&>|4Z^WX>kBacEcvK9C-_VC)VtQ>nqScMmvA{E?FLv zOKHPZiJ{G91&78wif=`#HNiwJm z#+XlFxXJaWVNnj^nT0x6Y{}efPCv|Bkm+h4OtWN4e42kODD)8(-0>>kK1UnRu+(lS z|AHmS7`}@9=acA4{dsYH<@0dtP3O_#Vl~+8#_7G}OwQ+FYB)h0C~8Ex&1h~p2APTeZ6m7oRG$@nRva zc3{2xuyQ*2{v^dwJT2$9=In?KJniWMFUmvXa(v0$N>-j6i&cBXlYgq|3%;1X`40E> z$J5U4yAzJ%`9$*n-LK^$(9QXn8}$e8L93RsGxSP5v|9&O$3Wvj7=4QUE%Z~vw3QpxZ885Hk!cb?-vqf<|INd|;5n2|Rh$?%nV>r+WTNb6g)mpRVndgY+7lusS* zdPYfuc~MR`*hA0uBpYMYkn?9&Y3n5Ro5&KC)N>r~yoL{5M%T;5%5Pllz=j*ukU9Hg z;F-dH@9~M5tTp3;>3a14;Kxq(D5JK2^Rc^GXOK2(@rsOo4^ZP*di%CK!3;k3(H-~$TL z;0%7;fCqG@QTn}{pVI=*s$tD&xTXgzM%v^$d!3XyBc`@-;Yuj3CRt8A`cD6_jU_vY zNaOuKyQt5@jjUjd?8$Rq0bi>roi{KSl0GZ_)0-u?=;KD-C&KemG3a_declW`%;I^T z3bRk(jFd&UDu2x7O;-WXzhLPLQ6A({uw8C;qvFKP>N0& z`R6Gvt3&cmbuMWpvu8<`T&k|~NRkY555VUOvtUR0CjEHU3`}6bC-fuJA>USvvN7&?kV~v2Y04r{5d1WJhl7_ z>&`96*@L_za5~S+EA`qpW6i;AUPo-HDq5C=Q})I+)^hsPp6>e(6wc9hR)24ze@;|< zkWHFF@I@G9&g&nn*a?Q?$TLh`Ie~mDt7rAft29YwgREIDPQ#qQyOFfB#P5nwzXC?X zu(|}6e_Mz|4`9qceZHM%Ok=C*>^Ksd8PVl=cn+=dF7`hB&C0$*X_cP#QdZfF8^w&N zs$g4F5}gdow_?n#{9&Pf<{3zw zT;NpVPHWL99+Le%$-Vg!?X#9+teW=vd$7A^>J>U+|Dj~ej@Tg>os|L)qpY z<>&MN_pOH>g*_{t#&<>#-kvS z)u1_XIhnD}fyMFcUX~qmf>K|MI{?EX_eOF*28A+o%{LfKs7${foQ$#TJ; zm$p-3kT9A3)S-)Dw2zV4vN;ZnN{Bw$XavtOsNabv(;=D66t{I2#;NN+on!;ai zBU5roWEMYXu;g8LON{&tgK|1b&Wz6Z;9{sv#-4HX>`S&|MW3G9yiyD(u7&gh{k5Al z7dfpX@3^KzYEXeijWOZ_mVKWUnz7#@{PqR1Urfep>0OKWjf2<#Je>fI`(5v(Y(~(@ z1Q-@%M`hMzO&0@R!M*>8TFKb)vTu`>Y>bgZd}xW9+d!rz#x}+LE^2647?a;mf}8>U zi|+=qS}MJ-_O&=V8WsB2Kak zU8=>+u>Fy@CQr*>e1D?a(zCybm1VKySYv|hraaJJr;~REX}9S!55@LJA)Qmfv;KFb zzy4BNId;E^Z|(4H@|a|Vl#>zOXXCx}%SwnbShLSh?E3?oZJ~Q+b*5lnOKqK_%*BOk zSxcHs`gz-#v()Ee$8OQ$65Ot-jX(WAvhJf<5x=LwFZ+GZW}U3&AES+9)t%MI4YZ$~ zPUnzx6?U(NNY0c>=F`o2@UAHAP8De~QhSTfCr?^gXg#D?*h!{|5XgC$*+INW6s@9U`r2be{p=Xpih~t! zAbWvdg~~GZ)+gbGB5*VHJVWbmi~x_r*DqN;ruY68M5YSE2p>SUGU3Tvj1eUNd;@Z!saW~ zIvRJfi+3^!>d`f$gybm5X!r)KeO14Z{1x$sGAywdH~xgwZc?t-MtyhXz3N@cJfgmy zEVkNxJ9x~UICefgGgqBABu}y2WLhO(btgZ`6r3GaLt&JCK&{nv8Lb||qf*$Er`H3Z zGam*=;&n2n_9Ag|D!)PFtQV}rW_gmz+o@Vw*o7O(U%Q1jWexrn^lpepd6$|T{&mE; zy%_PCNS@uLU%GOvJBE;OGhCC|ZwTh>(^ghOZcyV}u4e3iCoblV$q-TE8R)Ii?#%_e z++v)POrAMkzCJI^+r&+x@mbiMEFQ`Hp1IGg&Y!`;YiQSxZ{}3Ei^ST_(9GHCV=yF9 zydB(^<9t_tXQxuK6C74(s~sdgDU7Ypsz@c;Dw zU0|3IQTD42P|x}7)8Eflx_(g@m)`)BWL*AI%WcGkTd^Z6NXMvalpcMW7@AdRomk*S z{Y&L*8OS3k+37*$gx;PeR6?bwB{7~R% zd0v&d(5HBR-mPcUkqocxa3JqY55%R6^ge^eV{puyt-Kj|7+=8-6pV>U;7IzNO+7EC@2Hp#?r!Wm$-Lnh~K_Y{5pRNGQ*{tlPy?aAqr)gX{ui*ce&-y~pM4t1 z4p%8zg}gsQ=6zn0eP(%@_=)w#`|fDCU#;ae{OC3QnOw4WkgpW}WgXrHSbvf`$MTY_ zzR&)chqaIuKX>}{0H)@AvCM(bqs5y%H>XHP=XL5I&U2r@k0yAxiPp2}bqWv8c_rl_ zcn7v#DN>IjO(Qm(#;y&>c>$S@;v-c^SCS8RCjSB$X07%gzFVZ7m0H~B??0iF^Cov2 z>klXO?P5!RK064{GOwMS^QBmF2ANwacQsr8pxqbwT2471qvSV*kydsJ$BIS#40OG| z|IdT^SGe4!z@#@xR-P=YVEU_=+6RNP!Zh>$6$-djDa8Kwv|EcTvCzT(&S?pmh0I)O z#utMzsxoV4mL;o}9~F_3)uOf*r@Qt;VN{!)C&}ZpjfIP1L9zq1;=jM)|I1L!*leAj ztmt~&|No6KRd`-zz%ojm1Br}sHi~eQVV#+|A#625*^iWKNcQ34a8JnR6x(~0oXM9T z<2iZHu$W9Ql4hT%R+1b!;U%Y|7gI~d{+V6NyMs&An-N(h7*8SXG7!4Dza;nBeQg_qx2R_$!^&~AO8(n5| z)?#3DjJOYbcHv)g>(>+uN71qho6pu?)}qO8m|0J}I}TQL>3pR6b1q3w+LR{y1kos~ z^V+y;q}sFgxdyz>fZMeq`gJ1uVf>*N8Fnhy&$m;>lhOK^BCckidf1zt-lvJ@tLb|q z8(c^K7kJkh_%aOJ8evvR@(eQWY9*fJspIg1q*Gn#dO>|X`g>YfK)xzUB(HucwO!1c zGbYOHNHsAz^0)Q-YHgfAyX$#T##yD+(wz*ep?RHnd<5SbLWiz&9_VT{wfEG2WHi;7 zB%4K{TI`t&g~^JLlg&QTN;z?92K4gGdLf>jDca?npdTSzoMq?vJFAznRwUUOo4KPF z3;zJ0?4QeihxS;vhs7>p!Fc3eQsj-qZQ|Moh3LBs@&j2QXQm|6`_YiS3`W__bUQ}O zR=zfSg>^Id#ZR!z8}{s18m+BO`qGREYeGIb=3CP1K;G0^%SXHK!GeW5YGpk9lGl1W z7M!k3IY?v<^%K$hMcQP~XmEIlXP+)&cO$`j>Nv`sJ$US9Ow9RKIs3Yp*qfa{=Nnla z2K$yi<+Q;Kkl9S0NiidH?e`VK80W2DPGZaZ!S6^}sUYtOtamOBwRB%{ zPM+>}=5|VmiJe)029HXwUkz7!h;Jjb@DFX?ujB}p$+?YB@V_I}`VV}(Q0p_;G8w0` zr~W)hl@SXXu-_t4;X)IEu1vsSjV_I8S453$O@*i{;{D)E$Nh2D8L|H@k1VK|uc z_`daXk!bKa2LB`)T}}EsXqK`1{gAthzP+`&L7P+XIMFyK_%+ZcjaKGFf5kt#vSA;k zlP7Q_bc#Xq9#U0w*PED}w{F*p8qILwNUeAF?Jl02XPlCFmpu$G6@2wQ9EWB3wIlD%qCq<_{!RkN}|&9*qKu!za~{R zyl(27hQ?N&%tDD1Jec&xsP4Whu0wZ-U{8XS*@jh z?IW$8q0Z@QO60hjZljbqkR`K@zLk;j2%LZ0)dRJWlfm1v&v{}=_7prszl{9KlPc?P zlfS$@zfV^4bIF*MtA8iqD1Bt2ex4TpZ&&>>%H^%@M=-gNKXr6Zagt_+BKrukKC_P+ zT4QgxUcOQe9}@LA=Wz?VMq~ob9suE>GFlLMvrkK|kw( zll%4<2xZ*)G>bMX@VpBNEAoeW?C}E>vXf~W26ZTmE*_*u_R3zbUt6T7C|Zy;PanxV z+|Wp+ma91*qZJEw@onDyJPM;K;(bdag!|}n9KYO*NjV|rSqw^>bCh4q1Czu4LGql$ z-|jNzy$k|h&?cD>HyXWt!msjFmXl*zDBnPjp7&X0lo&$4Z`m&^FP7rm&%7)rMXnbe zHy3=f3q5+1STsVdSw)r^^}GqWOkb7VvB@fQKHDB7x@8_EXQ&M?*dVK4{>ztE z;$3am5!zKRSk{iTCl{hfCHH6laC+PN zuI%K0$rti4Kba%i<*dG(@79d0nH}rzde)&ehyE;)@&xrwg7(EC;3W0*Blnf;yj6*l z@Z~_-p5^3lbR#$jCMpbv!4I1IgzSLGarh3;L0zaPW;Am3*n^JpA@hD?d9IsG)d zgOc~T9NDurIy=OEfmP=Gzkpm$?9cS5=oT>Be^o$AiKRX;dD&z)VBTPYSSD6r;3IBeh>jYZm-aA~dnQ}D5RVXTE#ihTzK#ctFflVGbyz0+$~gefVGCuLsVms62D;!;YK;(wGf%YSneDH%1#%3N~-agVJv%>*@}7 zS0?3gWUJ=y?94ueyvg&>k>$E!Zk`>k$BiQ7+eofG{(jc)6Uo*Xg0-|zgT?#b_YjN<;4zWw-N z@_po;bVaq@s;#UwxxlBTxRV_zUx+6;A2BoS$qkTm5R$WNAFUSR!jWRzCAfHjJIe8_ z#lCBWdqwn|A3*yO`X}G{PX*haqQ-%|EKeH?Fd#cmJBldH)w{AV%b&andD~h6W4k~o z`P-_%KG|@R^EUZo^ZbzUgv@n*rME$?uPDZ3zk($*8^+}p> zPJN7;D<-aEmzHdFE7>b)=|V$qTVLVf2|}0h_hz$edkKNaRHEM!q?a9-pvv z9opu!xK(PqRq3THzm@%Su5B5gbN+EvZQrhz8{l#qZIUsfG2M4#c*cv#&eei zUVWEZ7U)sd8?AgR8st6ljatZB3ijqj5O zyqn(ede@V+pdR14+10TkO=B(QS*|8T|M1h17f!^N?B?4It*_OX^EQg&#OZ2l>GLhT zqZ3c)PVUw8Ek^sSj9$WWb!d5w9zAckvrqH$!ieo`c-2xe=iI%oU%1WHFY#=%Yg_o@ zF{J&BJ(5>3YetGdXg%I+qF+tS`dPV*$kr--eqlV@mF^{Z%K-e_>dHf+=K)Zg$_8bz zE*ZcRQ;zc~Z(wF)#j!ZQozBZ)oEee>ux0?JykQKP6~qIvs~xR=bjKLno=&ce0^VZ5 zfiO$Sf!L8;kxL*jRj-m0(3jxty-*GR+ldxA7ho%o7(|2X$u>kCdB2)$rz;`!l;0_P zl@c}BXc8;rZE5xeFX2_yp_w<@GqqouA9nG(Hv8mF(Fv?GNiREz6brDkwEHWud^LXm zlXi1P_g!k+X&f+(1t&w`7bS*Z!>wX^Ct6)0s*dAtTgmV)`Er8c4EnvrTWa~dfAp5z zT0cOoDF58VyY9h*H!;Wxi zM4z=0H88FWiMwO)Kd>hG_Hu&AKY8Icu`*{oJ)(}Ba+mD@xCX#zQmyyNRnKXNAb*YqVsk*Co|?ZJ|&xI)-q*P zK%TUx6)g6q*mEW?NcQD^zS#|dy2cJ!({cr!f5N5gNf~HVHW9BgK3b^$}7 z>o9Jg>#vaRWl=fvxjEahF<*X0JN1kQ#^GF6K-{R6S=zr;>>EUbtmD6kCk)5&oLjNl zJ=rUhw>Xh=w5W2rlB?)HmhM?q)X``oZ?5|Bj{VQg!^Mo7V3@2nCB*5U3&`i3o8{VQ z=-Zj@9%oEF2R`SpMME-W>`<2U16ZUQ%rdvp8kXg-Y7M4luA-@!nANCyAaBX=)4slNA?57i!~xxXS!E}QD@$FmCwKMoy%#K z^9S1N;d}Ch_gSVP##P3{C;9Nb@O(tQ)!bQ%HXD^1k0W_fOm3Kwd@u9;jj{7~yn8^4 zoyoM8PW4!;hw{n5H=$tXBG5cc?K#E0BU@!Ith8@)+V<`0%$~M0eE$b|_LBOE0zdDA z#C7x-qP_0^-9nG-Y+8VE*XZ@LZ~rnaWCe349$N?FvMSjGYF< zrz#z?1~0RWnF$>Pp~u_}Yfzpy4%O1zdh)z+&u*rA z+9}Ut&g6mji6LFt`C5@DZ;-M-<|Q8ZkZajXS{-tE&UlSw4-HD!_(_iryhNw9`PjzW?$XIkpGGnr?5y?ip_(5P3Rqt>!tj?L~P4$m+11K zdNTubFV3yvwWny|pY(r9^vQf#R-<-ikBdo`r{D5QH_%sRCHOEXe#NiqV9k4a`-Wuw zf*oFWe_7wH_QW{f_p8K~-K>yViky})j%CA#Hl#j@#T&cYf(_fLxsrbF2yu7>bmkd- zP9(|4xScV|E@+k!jrNF9OFXA#pT>o&YN-U0-Coh-SxWH`y)M(qkqB zpTmq7pmGo$&TxIM=y{*GP?Z%=f^Ozy_NgW3`8^7|-bR+$tJ;~A$zxTSRD-Y}yU#O2 zmeX9$hDc>$=H!v-TaZ^>qDwqWIZ$-D{@rPv^u zc^-gAW+bPu*5&+t7mMc9tL)XuyUgW0V7Geza92(M8llDAq|09G`)HQkqi2ZseaMsD zeecn!nK<+UNe(Cd9dOQRM05SVk|mGT?i!lkYK)Y(UwNnB#y#Wo8rO@zGa!{wR2B8j zBT;MU_99L8+2!=ucy}9`bXR{yix={ZsjQw|C-3^J0v*mL|03QWFDL@D4bXc}xoUi3 zqIUCaTv1y&(Yy=kJ`~03!fmDcTDkhItJ7W0`KR?rbE5g$Ualph(oj}zqd!Wo+nc6& z{(el&_1s+zmf3-r`JOKBdyNLivr%>sb|w8{*j-hy;7}U0qE+(4q^%S51Z|XTLEj$G z=;Yt+c$;@&oyEJMY?QZ)Yw;;rLz34ukt3&f=UMw+SpLQ?Ptju!PH!MnYmw|1EaddoWge|;%WA8?r?w3?mk0bJq_aH)sgXZG7Wyse#z&zKRdt_z1kCM zoPbyJNs_bj^3-3B{Ox^r56N=w!yEXLED19)^&#W9oEABk6u;^ZvIZ)vk#qh_c9UmS z+5IBhTdr-wv=``+^@kns`vYF`9Bl7k*K;A86C7W`yS^BFkyhrzAoEDcx6qk4J*kDy zppu+xc?Xm;bEjx2eCS56oN`=EB>Iv>waK}XpS0w$cj`q4`#z^^WqsQO{GC={=P>&G zT}_V^#t+$NJO$U26X6F^R$+zgp9%7rudVLh=J+{FsS<3k7gtWBb236^H{1pz>^JC= zcg@?N-ICPBFfVpl%DUMhlx#HpX?LaQk<7>cBuVzv^wK9LL)JKW{DNiNT%63 z{$&UHeGpmV?rrKxhJfSqLt8)pr>S{@14g zID0lt^Yoo}2G!ly+P~~u-RYi;toN})PAEu*@@sIThT1ZpHV(2Q3S-t!)Lb1(t$1ex zNI&kIhW=jRT0bm2pHI#ddy-$I3mZO#8|yWL7}1N4A1Kum zHY+eVdqVd5rnX-AIdx=Kb%2)6_sw>;{fDTyUAukAoEeBO*s+Y>@_#UkZ#RJ2E`4t? z-n$1XIYD4KG?ROzqED^xaTIn>^{+8hTe5z7&=bYD76qiTUUQD8!)2H^R$X~-aXKk- z-azJVyJ6QJWls~q^A2^a8oOcIjapu$uHhJxJ<~0GbE7!=AWunduDK$?Ev#~-I9^k< zI7bAyjxA4ft(iFYDx0>|-Z=VfQ~F%ePgeg4IFZp!R&xFaMo+4_343NmXL-zTPScOn zls$ag)zYI-^P%`q)aW8_;9lXA$rOAfrei)6N_yxD}obrNptB4{` z`<=Bxc_%X+V|QXtF?D=otn4I$y@NxmefkwIU%--=v3t0?^E7^zZ|Be?D=0T8RYZyK?FHVEb!-b^ zK7y52LTj^jvoH7xcr^3f59Dm`j$-`g9-3VThYG$qf|sVR>8@X!Sm4fRS|;Pg6n2{r zs~>&32Gg43*O9nX+?_Rea#oBSkDX8Rrblt0sHl*g-!Ho>PdO*Jp4<%C2Q-Tm_psDx zku7g}ufoynL+wO@J6U@S`yJt)w|H*W-1Z{xUii&Y>vU*j)<0(xbjH)H7vAaKoGO(u zes+gX_d9#QvqF8RZ!&s%Ra{+(&5hyL1QU{Hur|#X^Wg_EF3)1gFPu{#)@u6$pFhKv z{>tov-pR0iS`DStkW=rwiJkw#+l+dK`&3_Fxef=D>HIf!+@%lbExK%k_D)`$H_MG- zeuleG6Hog2)I}s(qI3h=E@j2cx8zB%DPPQ~9Etv!mzYY2{cGWJ!g5YY9;!qqQq54u zQQA6-Ph_|E(GXA0sDt#dt+bPKm0Q8<18BFw;tihdYVw(JEL2C!)wP>mWhx!Z^Ylc& z?$BIEl8@E>xssK%`8A0Th35#LKK41Qux{f6H?u_Mo-W1aolwh32uCWLC;iI)|0Z-w zvFB)L{K}Uq(f34J(U0X zaNbsRz?r+W`z~)<%z}47KI@Q^^L{wJ#)w%thx;Yo)X48VVPuWf=j6+Jw!9Oo1B=`E z*>Qz;VmUE=G=}~N{Tpy~I%$V%IjbXbDqP;1O%S2K5U;*ulkBz_DLQWE_y45LaJ5&^ zMzUBZ!|n>D^M-W3D}S<1GW6tq)=@Aj28Si^d!96{*zr-{Zgt-#c&~PSBb&aWzr9s_ z3AXc;s-#Enk1fU7Jp17%vUwYy;{6BtloNxS;7lcbP;$mEfL2E~xev~Xz(e(DqtxHr zIHn2vpF!rutUiUzOUc+s%(m84;#)FwVyD$~xr`3?yEi8dR3KeN3~a~x$;(uq zUL~~ioA{o7t|3o6lSIinGDiH!OkoSZ|-Ar_bKR+j36SpO~23mk)}xEtSd%>Uraq z-52|?^I&$nj79f|a;K2$Qc@JBZ?a9!rS%i)U+&&v_;x};vXRPXJzPdHYenBmzF!83 zm1?<%ZQl{2>yfU3YstUyLLpM+oQy-%euy~oj(bX(Z8%OGYE93LqF6mr=Om%-{{GRu z#p#x{8hPtDhaLyR{#M9O=Oe*rAok=usgY2is5F~y?3Ko;>Zmc`%hk#-JEygTh?m*Q>o^>^)iwsbNyG)?<6X|&KLXA zpbop#@l809F+@h?-)Som>_5;?{5~1?z>rKaQQRDELNO*Cz9a z<6zqqpC>B!9qG%F{a_a9ueHO6>tWYlhfZGASdBV@X)OkM1=F__! zwAQ-&E*!W*4Z|V)v>5*(tIu@L9IVcn276q8lsyjQS=U0UfzO}d@Byrqtf}X^CueXy z36VE(@Co?*#H-Su41`g?LQil33@@W+RyOv;o+`c_N4qOX(FP;_C$27p(5X09v@nKS zCVpI|R93Bj37MuM)={4H+G%4L?|Bn$RcV@cq}ie0nO4tfC2zeZ>pgSk$Q~LTr$o+r zx*zw?V7U_HNp8DheE(S(ZKTmY|IgfF-m|pV`WW`EDUKws@7pkKhcy@Z`x6m*A&FYD z_N`*YEY{AdvtRiC|J?B$DU%N=`;yDxK;C&(RVHT`zshFW`JAzA1NB9#+kMj+mP-oK zoWttLLAq19R{SaF#%Co+G4*AxY7DKfB1LBCGEZ}?Hg8~&?hwgy$3%>-rH9W7ww&O0 zlHTY&chuuU!+7yXEsbH>WM0WT&3|FqPwGgH&-MJghi`tu!bM`lyJAEacU`Vm=)tCk z(d~NrW}jkqkF6%fO!4LrcP!;8|MJW-gum5Ob~RL=@?GZG^EPsT9%&=aC!cNRR43tB z6aJXlxy`P;0MjG2+q&RAdE33gUs;)vx1CRuzP8pEscDV-TG9I<-ZPQRS$Qy093R4x z$xoN{V^6!X8R{pvqX~q|^0B#?TFjjzjScrHUrX=(gi>dd_)6_wCgx-w@on+4k9I$S zYUXq=RsI(I%o>H936<5WFDo+%8#4EEH*~s_Cz-R3rt!6WupujE%~5rqP8OwSxl#?1 z?ez)A!YA)}XX?@4#J0@yRE6y@damLzS)-MFXc_w_6Ge3h)h}2ry;NyPjK`1c5})L* zu zYABxOjc#(YKcVle3WqmHJDlZ{`8rubd%G*=nwRFQwR}63Y{9FD`a9EejrvB2BFW+3 z(p@9O{`4s)k#`z>vU8&}j3!{j{q=L+1(Q$c?1;3hz8Xl{*in0^q>AN8R62~ zY=q^tG%mvqx8rGV-g~e}mZ#&KJN`O-ni%I~JoByAe=kJ-b37Srf#M5={v>-UD{4Ji znHpl@P;#C}_ng?e7ANQ8Th_8yC*#rV-bZ~`@s>Sm>EPSsY`KojYQSViI%@d44{oi*jl7e*SRax7b+OJm zY#K+arck;T%Nyy(cCzRb`l>!Sd@FQwn#o6giqNwMJ09*@BcA;W2{SUuC_1@(Tf!sH zf!Q0Gr_kTAXc>#m(`xq0_a)JjtUKN)^1g*^3-3!sq#$okIG|2rI(Ou0pI_Cfv*KNHtO)s89DPjR zbe4O2^Wm9d(<1lw$IIum^P2X4gzk;v@i1efM4R!LRvM}~v0@1Z}w){sZsLWNWLQ zergy|7$2{|*^7KT4_8Lf>qMij!?7V}{$7AJ*<*Jx8!jeg8Tv-+16)f+nXGI3m26+o zI&Xcq;M&jd`&NIkk@W`o?suM83j%o~k*DehN#0Z>tq1w6ILNh+S^jc&WxldI8)TJq z#`)ve?OS^6#ia^-B7I^F-`B*55qN(cUS!tpd?@D~dOzG42kZ9!pJ)FC;#qtoGh>gE zd>WS)2e%nKFC1(^L%ZodRB=f|*L)AH6?~-ge^W8HB8pYw0bB)f#!K@U%O3$4&V0lKG z#FpzX_*9R+z|UNK+y==BN_<1V)9~a1wNBx)oFJ6C+zt=#1EhKYZR8D$s z>CQEf%bVGparAm2g1pK1u3*_bMI@vB2xv`&`I(sWu)4a4^|h7x4LUgy;!Jm@|LU!t ztQv{_8K2fweoTQ6$rjZgyOQH!JeiY!WwUQ{ZgDauA4kKIERkJu!!d6#`7-CXwjg!h z3BIkJJK^-2*p~6qYLc|4)d|pii!2wY^G3`{50ia}8Sy;Kv%kTS%xdPm>O2Fk(psLT zit(%a{O&=MWZ+xC%5P%TqaxHbc-b9_#eI`i{%4B@>y02=xqFj4zEf9e-!|1J-$sui zB7bF8*VnkbfBP1;1#wLJO;*!h)Ok{>Up{d9GI zB1a>54O3HX_GnF?p0w@9L*J)i&K+2(%siggpRc`2gJkwuXx1X{sq2&UF5DhUgUkl! zOou7BkhrrN?tj58r{0tyVNM)6nrBUBmnYrvD2_hI_8UdsNBLtaGuA$Q^O|6XP$8$UJu|8$t#~{t(!-C*dX;J{9)Ecp znqRTTnY<&X1ZG^ZT#1a>KIHk^;8+Qo*?Ih3!DC9(G3!qEsbL%rb>t^G^{Oh(zE|c* zIJfsH8K~m(-C4dF{4$sID}E>2RQ5yX+}WLKeVyzr)OQS-k{NstFPf(O8a?`j(8v?_ zX>`hr&m(hHs#&gBdpce zU&WM3F3apz{>10(1ZqUD^sBA3@g4onQo|SQa)TbanHD-@(HCk%SUOcv|4h~<4zuvGsH97(+&4J zC_7$#W%&B_q{}$yVowx1)Vo^g+PIJznwROBweF|+Hm9-WY}g+BaV0rFc zP8Um_sFbd+m7E&X87hNBfa^(_{$@vk137;!ZwHgPVFC{J#qn7*Dz1MUfKz#&_ywen z(%KLaFRSo%uj__+Lr5RqTFv-dW_ojX`lTrseKA* zo3Zhw^my3stVr4i#fiLjG5crL?4xAAj?G@DOR!E(pgx7QfMs!EwHCh;0WQYs>sf3v zywb-f-YkW31CehcY>SD9)x?j_l>UykRYi|^c<`{OIz;L3ako8&&cdb|Mjx-iYdA@k z8sA+(>YOY+6QcjpPG%)fhhLt&^YlAN*$MjE>FU~`#uB7?pRfN${*7uq78{Q9JhlSf zOJQ^}yt32hZs@*6_D*W}krh5wLrpRIP#9guX6Iu>&cWLUzwIK~CH~4=vyAYQOZ`iq ze&XM)$TvVd&i>YasrhS2op1+&`F4T*)`ekzkbC?#ANVv{|o!W>@kS zr&V&y+^A%2QngZJNp+5a#}2wJ!hy_%|BL=J;WQo>%3?q`pEtP&(fc3jzsNUPk+na& z)Cce$CZ;xk$@46fvDMQ`Wq;BhOwStd(FJDwp{2)-LK?ACMN-ZZQ<@n|^m277RI}IY zFGx>hziO~7EkeJg?uX%#6Oc|LMc$eAz>((e?2O67+?SKdzjggq{mH?i^mHD5p;mK3 zYgVI=^>Zf8vi~Ogd55vbM+HKDe&%;);7s!ICC^LNO;y($v@4+9Q;8FBc!^QMha!4kGCe};E@IBv+C3AW%KCjJ zEB-;+M@8$bsGUg0C1l7vL?e;lY(EY0;NJzdG-0Jxo?LpvGX3=)UY#|#|5n=;JUv*< zJqHhdhTvf1#?sJfFTQ>xp2YH5saeN2dDa?CgP?XTFa4RO_rkq_dS5FJaJ(z%VZTo2lDhLL)MljYs7nEXLdsyx9HKgvfR63>i{Smq!-P& zKIZRW+vs#4%*Od*(mYR-GvtZT};?_)`m6+$_+>M6hXubui3f(zW8 zr?eG3<53tVJ8dth)}(Ltjb(S+p*(*djwLtIUFym1y|#RBA7rQStli2_!GL8nN%pVi zc#~`cYe`&06v%FtvVIz2WG{Lq6I|9W_4Dm-H25EjHRHKC6@0DoIbm#+7S4rAR-tDv z#?3}6H<79+>tvj;Kzlh=dY9U>J}6loF62KakYqdr{)HpuA@?bkf6foTrgz?d)pTXM z`~Jq-owSlnm3f&#W7L6B(BENj4?_;ZAf7}u~+F^u_(P#DP_lK z>qnm-@pqnl@`RXmWEuIKPqrbfd6e(Vk!(8)mxJMaeQWHMced|i&T-nlk2TxF<`yhY z9`5A)Xe-Jl+xXj%JP1Fprhmp`^YCu6-XN>W`$6$Nad9JtPFCtZ_he;IWi{+BjF^J> z0@CE{u&!#){B|oImmT6cmnf^~lWi<-yt0bFr@D^dFc;6baA!kz^ zAr>TmYjyO7sJn5(F;^-2%{!^`w;K)&=%(&5Z>xaPUe{hndCRhPM9y~yNYy~w>wO^EfCwP)CjdDqg;O_ z@)Y+4?!ODcWZb<{?MEt={6}f)L;g3*{cqs?6@`&)9l9r;9EOWK*!?DKKE_|OV7ZHg zInyUu_cAAy=(&rZ*8Ozly zXtO~}$%l3odA8GPs@AiPv!~H~_{+)b>6A}$sX_~$+5%DDqxdI!Gao1nu&9z@emMgI}Z>xJ1Ml#FPm8@RL z<2nR7@szK{(4W-Q724mB=RhpTO6WdXECsim?(?$9IZ`b2w1C^KV194QgRqOAlf^~ffKKnk>WC`AOOfJ)septV)gfEckM zjmTa?6e)@bLI|6pfVk8p;s-?p#As5Y23&ssH}XM0W1^AL>CBsZ&pr2lmV4fP_nuoy zsSb3bJ37}$+eh(TQ^9Hi?>IN9SIA z3gvvHteqpx^gSJ#G5n6Q%6;a@@ihAOA~Mvtl!dij3x#g@zg&3LDvCyYaV7e=oH0nx z<^9msib$RK6QR!$IMAAiRA^`!YKIE0qKMyxrO>-9P`d}q{ z_|BJ<-Wi z>)>WIWdgLblU5cH{|p0(e(2Z-PK2Tnah%hVGBRjRr~+@(h9ow?QxJs5JFR3uA&T}Z zi@91J%jaQOz$R)RMsGCg4hJ#iAs)cz*O8fKY4uFyB9wGKLOR-`vzy=Ju1f1+Mlpsc z+NdT=*VXl8enMqLE z28P<*-vM$bh{UFV$U^KySr&9wlg>eG=KW;e?L~gtL!;ReopBNZ<=)gOrst z2UU6Y^wdTt&0WQ-=J=-2LaP%iKZbrO2W1@=uN**&oQN|yTa^W0Pf_C|dawNl+Kadg z-PF_WNk~JUNaxln%kW*4Y{wfEb2qselrx+$tsN~&1Bb62ZCnEOJ7`1Cv!m4bg{#^% zt^2N1V0I^?XatTGxi;ffPNHQQpq&HiDd1Uz$8AK4%ivsAueIy)IWN=Ok(6`O1D&&6 z31`}+^(H#wqf9<>^Mbfevp4~sieI+V>q7MEL2C3R>QZD?jXu4C@4AEdc`JQU#$L_3 zZ3F||`K(97G#{!c@@XU{Yp%uWcTx69$4lwlrCM+vgeO;K{YvCC58kwP%m-CP;wRAs z#b_HqLTelgkoDX6tXBBYsBg4j(XRj% zd255;IUqP3f2s^+QFQbjF!sTb_7ZNyyKKR4DA$~F77v7L&6?HGw(?or3d%aCa0OD+ zn(1aZNWfQ`);B}0F>t-1@RSvMlLOeDXW~DbHG?mubYoYU@g*S4-_7 zPH7i0lI~f~I^UgulUwmm$I-8g)YVM!J~(Luf1Q~vUtGutD@}QkZhBD!tk#i>>aU@NFNrcVyMKo7w2!@# zsOEdL=ODT({(GSlhAe6s(e=drZb~$uw`+8bD^c_#2Obr<^#rk(kW9?c{CXs$HY*u* zH^ZZ9UjyyiotjIX+WP(_(Ekc8*NI9$JIKeNpk1{W(M4qk)V$h3P~CwfCZpRzVlCFx z0$Q8U?rQ4lUUC&E4h^)RN^2G-C)zE`wf# z>&i#95$WxAa8Y*n;h?VB)#DB}U9cYUp*alISLVb|@aENMj80X{H_-NHXjz{|z~!g$_{1w*F4w$x)xGiA^QTr<&P!yJB|_n{ME5X#(2x87`V%ip zWd0R|Bq0B*S3tPx5(t;4sSnw{D!~9e%s)&gB~mCX1U!&?Z==s zSO>-$|fcYWZ@$^64(myKje_I=X zkOK%-m}N~ttM}Q|G|7$kqtmT84a|7^s@@_LJC$wscCy@n)dr*z2!^uvdH+<2Y_DCP zVc(@&c_#G`)=Aahn{NXgNevAMYV%7!E7`4XU#Ht?FGBxsAdji~G1Xb+74cb#o@I$# z%rvjmU0#;xRqExncd4g59-p7cujoIz)a?m*LzLFP-md2gA3rpqA;qO%nfs-Gb#A>Y cuc?`OegD~W7jhb2iY~bMxskt1DeiLp0WIbi+W-In diff --git a/tests/ut/data/dataset/golden/zip_03_result.npz b/tests/ut/data/dataset/golden/zip_03_result.npz index 420db667906eb671283c89e7f34fb109c59b5557..0e1d28a1591d49e62c8847d8bb9056b4bf4020ad 100644 GIT binary patch literal 1065271 zcmd?S3z${am8iR`D5#_ru5KTd!5 ze)pd5)cV$5Yt6@)V~#mqbIiF`wG?~Q49Labp}DUgc3A%(6&iD%y>kO{r_P%<{l9+d zyg3W1a*=33GyS`6@=23g{-!E-ac*JL88a_9eO}XN^G%DUG#!y|`up?eT{!R5^QNCa z?~Iuyf9}-T7tAF6f?21|naOkW(MRO}>hL4-m*xNSfAysvr=NGxxpNkL>h$yH&HU84 z7tOx#Ebu*bLH+c3GfzMNybCUzchTt=*3ZpFsrvKIh}^kV#oSeMtH)QJHn+#pxiw#$ z+q1b?Rje&me|2uJO3^beTrg*5{oL9wSJTZY$IPuO=89EI=l1^M-1_EXk1tn|x@c~n z$&)9a@Mr$gPjT#pb3bz0H7C#Q+x+Ev^3OVN&P5kaKljuN&Y9cqA7tB5I zn9C4U{c?{llt_Ww6WKu zNBT`_ub$NM5er;%$fQzj(Q!Puxp?+}OfymVkY+0%`H@M7_n0)g$D}_4=p&PQRsZYt zLAxhtpLOo3XU?2E_~f}mKzC?!@z2FgoE_9_ES^yOT=5H>7#LzrK3n{6#bb-3izB%n zQ#`s@D1N^9cg4wECltR_oJh(s#g^i^#i=|^FP>IBvp9=$OmR|iZgD|z4#b>K`I)4h z4z__w6>T2C@v-7ToFiy=5WlAvj|0|mT>sxc{#<<-cq~^xC5Zg!;H%pi%%6h8I6}|>BQoj(0qKQy^jH_>-Zp! zk&z_Y_ZppGt7YG*Z6=2lp>tR@}(70lr*T>|MMUzUonY6z;nNNUO=K zDz1eKpP}tGxNQJ*9aSvDgQKC-t59JB_wN?JOz8!j4WteLKWTOXFju9vCcvN79A(a1 zit9}ykb-F2JrehTu=*^Wm@Z?P4C}<(}NOApt-Qu7Jo;%Pf}tePY)GeM*_A{ zXJ~NXG`Ar}qm;8A@Ltg51P~6GYV+!X3jP5L2Z3o`J z6fXh8-pKiD#h-!oa(X$9{`W2JDqc-*cG8FQ;gFuB{tY$q#Y2i`@pK~8zdF$}S8U~I zrlmgMKabYWEG~hfi)r_g;*12{e(WheS^PF7O2vz5XED#~z`H(AQG;Ni}lC z?cD#F`v%5ej?LP#g&f#?1AfcFYm3wKG$^!&=PMZR95_{xQ{v7e;1nuPJ>UyRwiDiO z27jS6z)>?&Y6rCVM|gQXDI3772NaZpq+=(%-J^IvTB(tzt)ySfbs8<@(7fvKzO*?6 zy7c8q>9X|&NP^lV54XGug~eT+wuP&d&yha_IV%AFMDVxd5HKop=74bvsU1jNf2gbs zhBd0=x|RG+_`M~K!64*+eeurXY#>diw%q-B;N4!_O*wVj{YcGa#ha*kH8h#XIe`AJ zr;WkItEeTm`yTeQWxq5r)9qc)`F5g2CuN!`rR?`*q~*eiaLRzBgXE;c(2kcE{|Sn| z1WoNpe{!dRby;R$k8@t!oYjp&*(tfqw?jDqwSO4~~8qT-EQ8|bkzIGf*5@cCPeTMqg?i{`fE zi$Je}AD#r)8fvX(WO{?wEPAfgZAPPNSq=y5hp1VLcHBaZVLnnwqFFg8MJvKEXQ=zj@LDJ5M$HSzV4l*lATiYw`IPY!vnM{d_p z?`1~ydHAwSAF9yi)o@T(KVN_z_Vv(YQI!>S`E9EXJp3F^3jX ze+_`kHd8`dFpJdP@IeXcP6qoN@Y~^%iQsZFv^fQ77*L_0K0y!0wE#AbRtXt(4bc5^YG|P%C3KnNxUG`XD~-vm#oxmv z{m`spuXS2R3rmaF(XVNYZ4Qm_znYK8gn^)a|WW0ag^T! z=RLs6qZ!wr=R5ElAI3)>0;QB4>nM$FjN6f^?cS6bm-wI-TT>107vs}CioTHMJs6>U z@vCUi#k8<}ujf-m`YqIY3>fw@&)Jk03T(-v7gA{c(riN;qly?q+Ll^##ol& z>wC~w^TE4;dgCb938u^8`74=2<)G~pTFoKbyP(li)HpIp#Yt$ub>O#&mM)>zB)D=` z8pG+-t_I2oe7Lpr(>@QTwwMioI``v$zYp$1(I)D-YT7@YQ@K8fcJ)lAr7=>P^u1nB zRIH`N4xmf`n?7)Q8}|+5*U^&nQY&w#HFe>j#F5&FT=5*rbaGXjUq}t@m!r`^?kr$j zo;Y(ixu2tC12%mz@_ixw&Oz;+@W~uXZQ^Mg(53Kb_*03h!DeqnlOLbpNqxQ92Y{hJ z(n_Ddnjn^`spWYTNUzi5I!X?JDpkc-sM`!p2h-A+;^Sx%{e>pZLEyKTvU(c_C0?y& ztkrUbieBmo-__s=ErRm%DfOA+-NhTQ=#9Xe1niU0mRe+KHnm%gZ1>LM2a@vtNu+3W4R}llPV2~k5|!WJzq6Qz^+XmgY?(u&h0%` zLwBVp^mZZPs3tlt`NT?Z4y*?tulgD-=~=n3E!%Gf`%_8pixroe+S}Qr>SfkeygudA z(gQfhBaf|cirDF)J4cwpcpL+M&WP(7QOn95C4QAJHRT-I2wOG}4p~mWu0t;LW7YEu z8KWPOn!~2|qqTRTeKo0fFk=&nGOz3eWW9xpk+DLONy{szz0ufx7-?rJRa`eBUu(fF zPw5gE4^13Wg7SyrAzTGF)FH*kKyRTcDPbX3ljlshM18sbRocpCu(|toqQ_r{atOb|G`W5=rqqz>EFP3fLSr}o}M}zAn)GFXL zE`>LRTcADjCo(o+krqP}Wu+$hNV|*QPmmk%V$?f1WJQgCGgh<%ukjv6K?}K@`c-J5 zj+7seJ1KP)y!TN!qBl67NXeUuPgD9ddfb3SDF@#}I-H$0CogI?w7rP3M?nFh#8FF` z;O~+2NqoyGN9#KV3o*9hf!S9zRTC{6dzb?i2Bh+r(#GTXuG6s7QgSkVtcR;g=*~7^ z7ie(^{FcKL`xbhjkRTq$c-dPcWHT6}2hrQ(q0g&ev5PW&!8%WgRp^~|U_1JKph172 zttH>Q0%uQXs79Sezm?u2&?|4D;|``DL!q28g{9!J7AS`?yR1pt<_#bngsxWijRsev z8eapNSZiAx4Yj z4FuaIjH+{^h%{^i`{k6$B|S1D-TActA=l{Vs`olMwRX#CuJ4BJa*xK}4Cl!2{OpT%$RjJm0l6leBohwQo%IbH^)t4LG&od-0+C3@Oo5O<5f zR%~u%G}JDKCce>*ihnNg&H{Q?4Ze%P=5qS5j@DV3AuVd30_PdN_kPpQBST0WL&F`Skg zDLIWX&`(lJ`>Wm11G*Y2Tv3bd(0<6MqKpM*3RUs8wYi?<>z zMq=a%{X3%!b>OoCK3hkrfp`M*i~mfy5|}uX@GMcf>u@F$5vMGzyXMN$a?F<=29p-X zv<@6+(wAQYGe&O#EnCv)uGdSzx*mz!47S?LGJR639G7gg`lLX)6Z59HcLxql{C8O7vw84L6&j%pp6Nk7b<8Nn=G4VSp55t)kENNCFVM4ayq@1`<)5(ib=zf7W-;Ua7xYdKQfayw zTGY~RiJsgK&pO}P3KuFxr-A1-+8F}Xb4h-~-x!Z{wPMj$rR&9v$7{*Q(Ub0pwdl>$ z=YcpJy>d00<0RUfh^-7SwvlTt^(#l;z0VHgQMoiviFV|`LduPpMtC^hA4zIH@k>bG z5j-E5xU7_D>$q8SR>G);5!BXd8MMY_`c&pRpRpDj{a8J%mf$dSCdlUC(iXcYOYAt5Y zXl$tkP2e2hn`z!*??%&hj9M9LH$agRvACXaV;#?q#ZojtH4^8X^A#d|@<<-MI>4wE zJGzaw7Sab-9p2`CEamzEbr^M?qQ?W^G{@5(4uWdV9W!05AEMvqp;nZ$^^$U6E;aL< z(!QOmV`B+*WKZBNLgJg~^VO8P6W*}iDB50EsU_tiGF70wb>$bWM8tl=^U=nwLF%rg z4Y3N3Pae>lFVk8ZIEkk?8Brg`*~s`B85%%&xuy!qZRaX>KIt6{gX%je*MsscslG9? z4({~yv%|<(F0E==*SUQrQLYhN}9AU8rvS6 zNvm448!4wXbrx|bdQ9ngE%`l-JdH(X6_E9j;O@GD@*=mja@gBETxmRE5W4s;(ZIqI zy5syZ`1L?{KMdzcQRkV?)2g75F(|29qP{ZoEa~ByH`3#kSPX5SHnIVWelT#p09NbJ zP&c8uN{r`C-0PEUWrXyQ!+wR=+^gb2UkRL2irbVK{g;6$2i2nOAAvjX2Vw`hxeRvA z{LTW__~QQsSLkQSgO>q6qJ34gwwgW~2Y(Ol3!kov@+IVI05~g4rQ~LJ++Tv)xn&IxaIyaL`ZwKxK@D`R@Uyjw6mSd|(se-%m)KN;>N$(BT z!gCEtufCi{ONkLe85&=o`s@s|g|m~iz@TSpNni2n+^I1;=w)bHbypZUvF1!m!W0L1|0HusN$iX|n&>kKJ6(WWzpO2;OQtICV z`0RP|xw@`2aLnMy1wfwx6#fy+0|%q)dsFQQFzY;PbKIR;aKZuHBsQ-f>43Y6CS>)>99vp9|0 z8K-VbcgE}s;5Zh_M7cI#3OC0r@)D@G1%Bv+MpG$w6m3k!lJ;h_!b^6K!N|nDB~mE{0<}I_Z}-RgX1`hE}C0k{y<5b&0Dn?nr@(!7Q~}=>T%*X+tWg4+GAY$?-$d@Dj?u%Tuq;6{ zXQ}FJ^_k^sp;3VvdST1qUHx<+h8H=!LUSeUAVyjpVXk((K>wSPMt4=h8q2YLu6r&7 ziadNIzs|x=s>wQVR|egi4S++58Pj&T;}u8ZJMtz*o`9z&l=S>v_Zb)gO`zK=z( zgJu&M^#bWy&FyfMWedP`RkuCS+(?X?Fgs7IaK0XQF2zEktvYBtKiNL_CFzf?rG@dpuR_`i zq>M>AEx2R`&(h>TxXjia_sz)fZ0;hmJ`xJ3FUN2$h1Ra}e}fTl-h5ZGFAEYyrC56! z-Ids%-spn@7Ap@Gt|nD2b0Bj$rN(s(R~+@*SJSE({4HL$b6|OS-^1M(h5gbj;n>?N z>GU`FB}%Y|zUrm)`UNcT6F_~2aooUqjkawz_&WzV46jCceK=XbRrrl(GIGlC2*$=; zP!p2|oJ~q-1^o*>$QzSy)(@@qb^Ox~O6g+{MUSY5R-@lH6PqqndQ}UB)UC~o_5Jv6 zGk~^(71@hPA4j@}-25_6BWd**_}G<+A;52^Y-pvJJvbvT16c|kn7oUN(T9DZ;Ucg; z5AAdnInQCY%XkED1EYgh)rO;(;T9MzJPPoSlqfL(oa7`mMV`}LaxL`GH8|s^?=utD zLv^-h1o9;ArgE>W7C37-UBk%HruaM8Xk&D8|L&pGOa%5oIL#Poxm&H6HOBA14=F3B z-uz(cuGO_H&mB62RB~m~Wa>I~-c4xk9b8u>YLqBp48F+KnWQs+$Jo9)OLDfYeeFsA zrT@TBIE6V$HGF`c{K z%l&)M$=P=YziJZuTZb0QQ|re(&!^@qP=8zEi;2`w^E7g3!^#}j6n_AW0;6ZYjUXF= z%F&WKTAjCzIh>KC=kY@Hxpz|D)j+vuEYFLP;g!hVi}bA;?w>#pN(me3{uScY0O@` zP=*_>P8QSoz%a(jsFeC8&uMSFAAQlZnGP~y5Hb-ZGLIvkLfZ9F<2g@D`bD+K@EqW- zg=cPtsA^YooLjMOt&quKyGr+Hd zvb(|XChiYSb!#fx_mX6H20+ch^y&o4tt6f$_lLK&2h3w0tksj7Zv&T`f!&TSs=)&M z1v0n^i>|-7Ajy|{&*B{1HWn~@pmP55-rPHd(z(CJf4zkXA7W` zP{M=lg@xEdADqvf$jp8vdUPr!Hm2FdYA9F2xrEIhC^HkRj0@bTp$>1y!9{uS#p&#-Lk)Rh6LL4*WcDzRCz1S#m2bcXf76Y%tuP3ZAVKyL3>KGEm^C? zlj}N;=ZSbubK$2R_;TY?dkt`zHp`j*5#XvF9Er}~%-VB1p2BKct-&g5&EBUSV_srm zyjCn-`O%A7&i!tB?5GvE@0orxJ>&=~)%tv{54x{n6H=)ptFx5JE08f^)_~<>iLURW zxyq!M!DcFULVYQWu+m=jARZ<^Z zs1>+Z&#t2FGS9Plb}zR+`@5tW>C;aQ9TM zd>|bR-?UKs5bC+U)PeqR1RdX_(;gKeYuEMn)>CRXxP2F|LMoO>4{dofXh7vpL4AFmx9Q zgwd@@I!4JfV$cpvjPxq|d4Ao`u$^(#|901&yY%H>_o&M6t`b(!cO!AG1P@N0Peg{V zqL!;JwH%uh2K{*6Lh8wBOd4r(4E5F}Z=q*`;XH6PFb7aF#&8HSs%8Bo`L2O>kaHh> zD)6i%oDH_FXDTz!rQMhH9%Eok+UUs$?5XqG$Iv3~OKYY4wOI6*$v+nU{wY1x7gG~I zo673HP6OU1;FaiUBln?yq?=>!9t!UrZz*Z4pv%V!kHp#6x=K0c+UmDW;Ijsw?$yDsd{RnR;q|3oJ{1FiO?M zKQhMds>&yL!tOfch#q*}=5=vLW(O>%?r@c&>s^y!$t%%Mh%*l*BTin2M7am{DSF{t zq!wKfaxywaDMyh~O7N7i88kW3-Q2}9gzeU=SBm!N!%{5~bEmjdw|CW2vV5DUC)C-T+Em{e zQS$Ab5@X>w41lNBlbYEDOY28&hugf$@dq;RJPqz#4#bFbO+~NTl6u+k)3O+^D8S9m zpp^XO)RvP5(09v*pXZE9Zj*24&@1_V5O`gHw>}wu*IsE2dZCA^;p`Ik&OzTznnt|U zHcP=-tzLk_HzNzN)6qA>>Z#X3p3*yvHR14=e2#U#;oN1^bd5}nljpSNGIEU9>W^7N zt)!>x3_~nFO6h87<<7@Fl^I71y=kW{VTg4J?4n^iT>Eh^=Jg4JD>}C#)4s|0LrPr=9z)4-kL^p; zX}~XawqS~TJjC{ZihpSPgK42msSD5@O<0{AHphsf>(@rY9pia$(ardo?*7?D>(?a= zzKsrhKUo2zT*haf0!|svaT7J%xzNPOTt*A`L+6L7V>DnV=TPRI3y87oNu#M=oJ{@# zS{g*!dU84#yLM{*n7kV7?|oqWTi!`V1uxn63ux3~6uJQOhw9{i*hDA5um5nCulvL%jz!{P%Hybne*<)7v)hax;7qXC?@iZqb zT*vR{E2X23>lfhx$X(7I)=*a4t~C=Etzgz_*3!P-q^Wuw8@bk{`zRN7v8&F%b=RGf z^R(&i`3cCDYYQcM@N4ddAcf9Zlu&2At`zknUpcSk-ndwSb9{;i7vS@C;ME&#qNQ@? z@7aWQ`&QzOcE)23eH1VE#okYQt`r3CgooRSH7dZ7*|0BboWr`Ys;zy zPD~QeH&IDhSjwp^mB4HQE#=V|##4Q_!&O$_9Q{31ZfyP+qvxuTnu zd$9}5_WjNH$Tvu|kMGd~^`LTJo2--CHaiYRayv+$&(VaYoW{KCEp&pk9StU~8ao5u z!1IVydvuz?`gXh+U5TbD!39&ms1FvW%&J$9wAy?x*KDWqP&_;5XdB3XFi~JVZ921X z7TAPl$kFZ-{N|v@#gr_NFV(b&TN(G5D;gzjf>SrpVrQxy{<3@b561&rk5pHK?U6}$ z=rL{q*8TWxL(s-*wYBWHkZ)h0XVPOCzu#qt>=v#&8OP@sH|GgP$K%}}`xky_Q_}B; zqPturc6RTq*>|R9P~uATXglez!NG6P&)Ds*To^YurrtM6T1NRyhR zDrf4?IWMu#k*m)ZUiPx2_3og*+U?eque`>phW=icwKFmx-@6B(kV?wSp%aa(T}-|1 zoTSz?daicRv#_>@HdmivIQOflk>?sZ*IZ{c@>wQ*rgiCGpTg6KJKNK$lKqa##zzsK0HbsXbl2 z*H?0Hjy;oe#MfPheej6J)4G&9CGq6=#19vcrqAx2TiQwM@NVQ1@f=>Erz6%#M82HT z<+>j1C~i+rmQ<>Tr7^WXt|){@9^SJ!y7DMQ*U-h~9?CsWt6}A4B^YW~WjXe>MFja> z+I1I3In|XTY%O&DLcFJljGqjgWgNr6SWTS8MFO{dMmj~^gSL_DN>H|MBT|9}VPVwp z`m0;O`g7=6=WmOEUkgRv!r$40e%VNyF^_(mK2=lppVNDG?l$#JhqH;mDur7ZH6vKt zk#ysAKS};n8E$xhXQNWVySCUye!oPsofW^&QI9<+p<~=NEz~anPwh97k)FcUuRE0R z@SF`8XK>D|*SL$Zf0U8d6Io452Lf*obFNNW`wh>+ewkkBO%I|*_-(GK$d5lwdUHa{}at4Uh}cPJmmCvtFMXcO-pCnMh8$>D09JZG6R4yDvH zd(Kn41hniquEuFyZB*ta;%Vq9D-mU$qkcZssHd)H#-~x+->#N@V>i>2-St{OAa|AT zx|9D~y5;!3!Uui9Eau~`ALT=>aXmPOmN*b9EFx8nCMNCza|Y%+7RK+dqCY+H@8*$v zJ=!8`y$@yjbpifoL6D#uWJ6*)Jvk}DK@?QTNfpfxI5N}T8X*nLJ9 zrS{#oauy|HN1!p8wuG5)tIBg|Q8uSEr^<^jbJIFnoRPh(u0qn@Rm+P3M<8!R}sypkz z#jgNJ>ps9a8|>8V9mxmv+BJ4YA$CzZ<2LP3>>Sp%X*-))NofQ#rDP<(Ju6)6d_g$d z5?`xF8aZ1im03bHoc29--x>8VRu$H@3xj>{Vh^)WTrF$?f-(CtwDMG^)m1cZ9b7Pz zlur0IgCVEIeRh}4jPwfX5`@soIi4&TlBQ4SooQknM``a}rb$e9bvc(Tx)Gul#$WB{ z`tAwIFAa!?Vk6^=L!;r?j#R>T+)7Cv3klC&&^}_l58^ksr!p5OOpZt+B3$?A)$pE` zd~|s6uNgsink{6co2gq$b!WYhA7c_|xD}ku*(T{Q^O8G4aBkXU3htl-3B_+ab(em?75KO9!2gjsBG#2TC+u0 z7_(CPqS^iEbQkl@5B4JTXI6IqtNkj_$HHD{nLidemUq=tO}1}cXFID>Y99I>o;X*{ zZVarHbgOHf?m{at7FVJl)sPW`g&gs~jdq~=`=a(Rh%su?V!MSN{zCsU6l**@)Y7Y;i$o9L$?=YlT zi{ws~W08KxZUFT&t58F)tB~>~(8#@hYHYs@Wz^Amfcx?E9DLi}c*E7CAClz12A!~h z^t++KkTgdP4^h83_%*Om7W|f@Pul%`kC;0K*S$_lzF}5MW!y=j&GBs*<8k^b?$3CO z-y@Mv_e-aSw^_ZM-b1E?jXwSn9dfkU4 z%v@33!#LH@itoy54NBBqL{C@pCRmwKc|Xk+765$(x%Nv>-0vE^mVBm_z+8+rIF&w( zMIZeWT2~%(M^LZgE-ZR0J(yO}sY4iL*RY4DQI-$fyZ$g(i}8VI(t2u@XwCH%BWPnN zuYVnH)8D~(I}RC-{Fw5_z+OTB-2_(prCPR#hL%#f9((PoJm2-EN9LUjVLa4iYBXa- z`VKo1-)c{sfksSCIkAOSPS?5zj_qgFD3XtS{y){Z(TTlcrIDtF)1fcNe?w~-1(d5Fjg56gGkq_%MLBV zanvv_Qjeu~71FPP7#Z3~-Wu-Yg%L?-mnc(8b^E|KenCPhmDjrTq-{Dwzl_v1oP&^+ zd@7+G_uczETA~+`=q_95d$@U8`6B#w3%v@9SR&;r`s$3vwVL2Y-ybo@_d#2LG>6jD zXy5g(tXDPQE>3Oy`sPpQWWAXqNvT73Y)Wwp_XCKx2F;Ky`{RSf6w?La-XtsM;ExH06d}rcTM%8a?oWs?K zhHu*UK_fj*guabl+>QQOfhKPP6W^iP4s>xpgtm-C8rvUEJMOpJMSrT`2WN=ekvrEM z)wf0yjv~j{>=WtEckGVB-wGYF3242UEB+o@%>#4seFOcfKj`>rV=e~6$H4F=AdUuK zXG24w*hAEO4P2&!VFzi|_@J&Pw9~q8*&NONbIFIDQ-P=F==V>@(__Ems8;CY_Xo7> z8xYS?|2}%@Y_gVf1@Nz=>~PZB6QB8|#V5EwA?a|xau(w|J#mmCf{1hepps(34<#J%P71uCw)3 z>9-XgFm5#snwf6R{SUd$S%4HUf>(eG zw5dLx}9jS&M^UcXqa8G{;*zQR%FZMH!C#J6dvF7)9H3pV#^e_)iYN6T<$gVc49*lQkp~mq2oi_63 zqu=I0j~T>=Tv@O-3He zKs1tYJ{0y_a82AVBKLe|r0$Rthe2tl**nP#a{r58v%QjBBWQD=pWd2KwX^Pkb4O-<6aNzWvMTK(cFoLv#8b#~q}!488sHnVa6hdh zQqR+M*qN)L-_)cV%LyCxcW`6$P=Dq9Wa-3FULBCQ+@~x;RR_HbNR-xZGCe-(1`EE-jXVwVy(gJIB0H5na`n) z5+{2awF0slH)~HTANS!sX1YGh|M#!wGrs$l3f&NCF%PT4HjANHxZdaN?*zCpPkP7R z+WCZSEB58MM5XE!zw%9-dPclVxy-NijUK-faUs|0v??_hj@8q2%r??rzZpLrO2m9{ z)Lt!;VcHjt)blQy65_aPpkCncdYTKQ#1g9zlHc@j7t4mJD&iQ3(G zs^40(WNT{0?+*E1!Z^}mw|MkLu5mR&oI*Fnoui|!&sTCxEpciM=fZD#&mEM|I%oTh zTcB-x|8xx__&gfoVRY2%)agST&e_gMo|Rupn}PIy3*1&SEB+pke~XT~4(+lCn7<=! z3-w~ZtnZQE$jBKTJ20*E)d78GYFBIheYBVVH()z@=2m`VN@ zm~Dlg_fgw;XAk0DOSpG8t`u{=TA-f!TC0u8Z*pAJc7Bm%6|m=0YCZKg!!^=!9A3BI zzVxd;&e+PxMflQdC{;+cwdRe;gX;q7YTH+*3E4P@UtCuwxOZ30EwA}K(bvGmF^pNE zQWw#A{U9xz)-__Ww&S-)%<*3M5k>2Pga)^}vpK%OP#`~akQih~*gQ3l(W#IGvB@WT zr&3sSeVfP&9p$WgIaClAby#D9r$3{P=*hJJW?~d23fyO!D|Sf;*>`otC_9`J{#$cY zImh+t4?99{Wor%HONe*YGt=wRb16Zw7YEQ==T9-Gsi*8vxPApaa0F+=0Y**SEBFW! zU52|3xUF7C8c8n9IFL9g*8Mq zSA6eA4s##ZS5iCjPd?QNPEyxMXZtUN!X(aVf0o;v8`7Oy3 zW7HpsamTg3*DzA`s!nIrN?hk5)%u(_qr+u0BaUgjb$yy^-hvlqENwBq zfM3#BO?|)5`##j(1P30@$U7$(3mzAuuiPKzdoS*Jeuv+{bTIop^59Y?Cr^n2IUd#M zW+m?+`tLkAA_I}M+huD+g z8%VCNb+s9H&Q)@$JvFv{bH`=mEn}=+CC{jt?@Q{Hv{drC;5g=v{jX`M1U{=M<9xo9 zFw;85Y+avEo8gxs>R88%?*poF52Rlg(;MS8Vw~r^GmV3uOK^_8l%ma?#*X|V`)2Mv zGOyD6YP=h9%p?ETr`&PSL>m)38r3-?Q%got%8AePp^ZcNg|-~C<9TT=v>vWCy55Ia zlV9HOt?4VE)-Y-v0)FoD{xw)bG$iv`M0~Jc#U&yTGPe9dI(Si&{?n`JV^+NW& z4Xuse0xcq~#+weuAN38Lg-G&0f{(rF$7uRB4_68&Q_>W7%bd;K0$Oc_ZrfQW^i7_p zp+gh6egPdXG%8f8l-o#piP{lKEpye|DsfdW8UIulgohQLk{tb?KjPab(Q8}new5Nv zGd!mMA?{tX&0Mk~_UZ)E{Z5Fp^=ektHzO0qAPVr5qwgM^TK)rH2H9PoY8CZe&_hko zMPYeuO{H|6YmB?!BQK=XVa!onz5aIMzOZ9@ypikoU`jkWD^i!W^J}yt59M4b@l720 zs87;`_IQ5MnDLuSBXuLzmjGoQW!_}wCj~o6ZD8-hCPu+s zcs}g_i+6#qk0+gcyZ&;x?`z=tN$UHBK4;nPTm1=hxbf&VBQZuCzlNO8r;h%R|20Va zw3HPEwS}J7jMPJY{Kpa}+>*vYofL1FsiEXydHI;c!OnDIoV)IP)+ZOg{f{i#yMMaR z&c;^JmYTB_{0A~Je%--wyeUDe<2f`{)GzV;;m53f(uEoF(Yc+AD2_zM#?Ae z42vHAnR}UBliI}2`^GU;2EcaZ1oL(ERiDRi=d?5Tc?*4DTzVeV z`584nPu~2*{X-ZD*Nn%Y&HT2l?OlqM@%t2;(JDE#-iy$_9=}g-#&5yd|F_7=ftQxz zeDt6)=$D6mC!&dY*n^aM4}9DixrkZ=A1B{MHBG z?-Qx4^Cz`YtT+et?N5xgIwrhzXZfc8`-e59K~^#w*&KW{k5ql1HT?RXuzKC^E96Pn zI%OEg9IBMcW^n#zuJiY{{%_TaJvGjHJhPH|2=+OPd+F6T@-zX;781>rz-;!I)!P4R zbZP&z>{e_9#oqu4<^w zmGl_X&=xsHq70Xpp}u32$zrS_WSrp2wUGRBiE%1db^emp-o~c+wUc&q#hgl?wbEIu zLi0bk=8)p?VCz1M&E~BIBY4u&{}(ef(ZTOsHo?>o!-D0**3aP>s|A2NrKA5i7&;vC-uV(2?btAsy{K5Fa1kz4I3L=uAwCGuV5Z3=(O7uQt zuCxy{J(OkLQug(Eh9#=mmUqfwGxyygba@8*W3*;3)GiIXf0?f5poP!s*+cl1Q+!TX zCyVZ0yH&K_%2@|jXSL;sSS!gvS7(>n{0{O*AT_1Eb9V{ z-^1zBfxao$24@`toN9J%j>FD5KK@q^y+HjueGRqOXxeob@s^Y~ixl@q|DGDo(}X?% zUb5Hnr@IGYhgjAp|G!ms-`!f-!w$aX)s+{TLi?aaScL7eWjV~*h5JOt(I4YhUY#L1 z-X&<{+vs`l9fllE#!q+*>fT7Crj7gykQLYZ=Ammg@~j*-gJ+)7kCUU+8u?hqy-{0f za}0f0m^{l1>7if1bm#gAX1t@5f3TRgu1%W2-BZ1p1HFtUxi&?D{m9x!)5umADYd6MrANa1C`bkFJ= zX@)*4afV-;(c|$ObH3r9V@`1_vs=Gb=6^$QUz#-bJOBRoS>rn`=+j1gtI*nd5Q`F( zMo{0K%uPg&{s8X2(QpUaO&>S(p8qRwGq@F~^B$+~iE16QNQe{;o1^Ewn7aP&4x`n9 zg_wNs$ZB-Y@%@gav}IZWn69AOlG?4M0>kfrn;$U&@09v18tnbmoFV60+_1V`t>|-i zJ$2pHeQ0V|e%vV?|M8;{yo^+L=^5u%%FL>6P{KR-aHv^N-M1gHA4~6}o|;;UeGfgf0o3$Op|FY%!;xzFh$m?W%Wz$2 zhJAgn?or)d>_%HeNqr%|cHs9bmogsCIsC6P#%INOGaMua&TjNXc5`2cHP7BIXK?Q4j?r1ewUwOJ(CKYB_u5oq8f#2j(M0;r zYLq)UjRlqAa(B9B9=*QnrO32+Wg)LyLP6+)?G%Y}TW%t}FfuO|U$zdASc~ z1Njq(wX{P8zhCZ~B7RThRw5<`CpvFLOSDt-YGC=L_z}#UeB)e?YXwsG($98=e=8dGK+}~YFebB$U3UBL2(5@+YE_3L4KWOq*`k_T}6pSy&ZsH+m z>Wxr)CZ14^^2>m;I_cYQCY&6>`Sd(?-MeyPwC6^&>ol-=jItMjz2A6umrcIHv+5{q zl-%gAlGgpG=V=Y4Bh9DoWrAy}DRDh`=Na9Oy(6q9Roq(PM@au;SO0ecO~H zLxz1bRPGUqk{L&rX2?=L{Q5ze%zrROB;sXw%eCsQ)*Bu}d|*Kl;N z^&h`4@7Htv)mR%2EhXnVvvYj(G2H>|*f$|7{tv2l?z_Aq=U%Qsb#LGKZP!t%+Qde!IXK zfc{JNEmOZzD>mvMOZ#7x-Dxl%J^xIS%;4Uq&<)qqitnU#63^E|_-*QoUqvw%*MYW} z!&v-v(mVH{V>+PSIw+%eRRcZr$6OcNfPZodHFnaT|K;))JgEoS12qFUKLi*5!}tqa zCvp0JYW|TjO^F(N(r(w~Xi_5~S%wF$q?U6jr28h8pUUE$ge zT^3Wu{jVGOorIlGM=hshtekI1{i#BW-h$@WZyXH-ze}zUa$V||v5rF$F3uu{Lnl}F z)e>d;xtR99Na`i%zp#W8==mt{b3W@Ft2$BJu~SDYA=)oTuDk8c#yE>r``lGi8)URW z4>~L9)kuKfzI`}KaY5# zGc>iR&tf6xM4tY&dzVTnXa5%T_Pf%w(Es|O{VnayS1Ld3@cj`vUo73{6dW%uU07xH z!teB{ixz6}EI+R5n*4rgkyG9;wJv$L<{5SKThiWlhppcnlrg6OmW6KPT&na>k26k7 zsh{;`oZI>A(bfBS^UTpT-F|xJ)06ti9hN1YoQcUL?LcYcu7NRk*X4ZhF~<~V5K-Un zt(g|zS&w}EV)5{)58Q1%XZ^M8|9djkRIj*;OzyIGJ_X(7Wp`f7pAm`D|C$CqXG0Nv z9DT?5M%Qel=^9${Tl_nao!-daGFow!=P~*+jTy~3$f_}l1DIJ1r?%fpaDBd*wCWV1 zDDUuIw(p88AVF#r29dCtn+*F1w6nzLrVA0z#Z^EW^-*9K~8;u>APC3fR89YI8)O0&So5xh2ViZNYOj(k4(x(mROqXm~i9Br)wAA$J=qitx>}9 ztfGxB=`Ex>Mric>*7H;kWc`5>ziKw0JhJtac#2>5eU})m`zvKa6O?!gt7??MlE%h- zM`Q(Wi5B1l`AaM7u4pZKnevBYcj{=byTnB8ti{d*E1qJ`(OyX@RIctAi~J9IVv+7| zRwmQp`6QRI>NN{Ijh$p)J_^Z{)4S?Rz3$Ih&aQ4{va}Dosy!mzxsvvD5MzH27Teaw z@J2v8E1XK%1fI>0(vFE~ zkpz7nS5JKhrU}-!i%O{iW2_h_v@$ z0@CS!XB6(>Wa(c9@99YJI?B&N`um`7dc%jVbII*FR%Uvlqm;vH^x6$*jzdVjjk5kP z$2@WOrnI`)faY;6;Dr>g8Gvr_s|0@y-46u+alpJAc^HSTX`_vml%CG)*4=MLkJy<* z-QQs6UQN-M^BIdVcuT&k`$PQ2$B_>)le@l0dquQL8NAP?^<7}N66&A9jO;b2vM@Jg{ zxYuWLzME->On)tdK3Oc(lG&O~)Uvfrq^jY@FDdTl#Kg1@1vn+RKdTv6B{@cF-}L>TTK3}6H_g%y0Q(te zslLP`N?faG{VIO_%3>SiF`aSrZILQUT|iy;xj8QO$zyZn>R1j=qZzt2#xp#NDDR%H z@Q*?thj#Xjv57#mZxf(wLD!h$%ax!?Rwb( zaAXTTcTctv6Zv)!{_OkIQ&R4RMtU1Y!2C9`yMlbb#C3Z;i15^uiIzl5=^gQ{*r_W0 zwu4uhqXgzBfu9@^xe+=oUJZC`#CG07xV8F!@p22jB5K8G+8-N+Gpj_$QwsUMe@s*Wty4cacH#YTt~SI4)i3U6FHhy3#-VuDiBXO0^nM zGQ2M5Z_Y>bV8pgdr+hfPeN%&)85Yq$^+`a{Vie%L%lR!LCuO*OF?r71=EAeC)CV?t zOA!eySH?RV9otj?h^6$-bH)*G-)2wF<&{k59>$}~_*+lYmj5lHj3!XdT}vq>KkRTy zloA|AZWJ11G4n8IK5MBxvce^PN%?MQHiC8QRoLPIiHD}a_jO1?DfRM}qE4Cg<;n%eX5qG`3pC=(jp%0P*GmZQHUd+3t7Vh@=|H{v`c;1rMYR?eG}- z;804ACPuZGl1<3bgK77TZxSJ`k*) z=J`v^bGIN5MwZohEolAa#K-Rfi!U=So0ARj8%-yIfnR_2JHqF|^9QGyo;#AtK%WAp zeQDS4LY2^dV&ON}*Ax9QHmWq}H;raK>FTK8c5%c8fYp)wHZaQfLo4G%zGQ)wr&rn@t%s`(QO0?`y@}%#X#Obl$n$(i z>X}~6H=&=cWu^Rjk$Th}5}5|vNp68uInws0rM)G7lef&y$?HjVHaLzm)~bai)gsCt zGHS##Yr|^`*mQ8kGf7eR?s*?}FKx6t5liVRp@xu7Wqy4l(HNosA!`*}`YLJrwz}`r zzB9runBLiUTZ{dFnJ3gtzdFb{E{(b?>-t9S@MwlRt>;cvDHPB$s>M@4F~YfgFKwk% z(AgeNguaMXfg?`-_DjEFS3uUk$(Y6aZn@+s4NJT%7vGuspQqd%>D_I6 z5bLwe%&9v09|IfU(Rq}*Bk43}lg4e8U-i!5O2o_m__u+0?^I@Mud=T0yZO!~Kf}Cl z3Q{|qxX|5{-N02p^D;D)vC|36_ws0n5tLqn#*^!fg^VIvt+Xyct2i6)M8;oFT5@5M zyAzp9wc#OEGgtc=wWY`?q-u4VX&NPR<*y&{tupeqirS~3!P-c3j9&+)o?*a+CT? z{2jS6n#MLWPpc=jy0n5E9h~8>o|@dMZOuXJa=Kr}uXaRFt8QZPB?_ ziHKqcwDZ4*b)4++n$OX7}cmrzQ6OhJhfMG zhTaVO(50yYr{>hA?OY5*TUFMh^-YyL+YSt+ik2l-7p1B+Rl3dBI%Vbl{cHIHW!;M+ z?>d$Z>9;>Pxp!?XoLL~X%f@M$7I%ZNz0!8<%y7h=<-6{cQ9t-3JlZ#zap=D;VN|QI z$2U?+e>!^LTSlQ1?M3F1%Io6i7t8#Qbsf}b1^zL33VGz|_ef|f5WWkh1xhG!a_7~1 z?Viw_Sp+rnJgwo(Xj{FP)8l^)JDwB4GqWCR7|Zu7wz*7;&Yre_z3VvIH!Yj@%7hkg z6eW7$3EHabHh1!?RSG^Y5#@GO>O}O+TlkqLLGi=j$p`6APx|&MrTq@3Z@$!G(|-@2 zci{B=kJXV5ViT2h3Zw=bmFFSoL&3gZsJUsWi`gXf-qWnEGLf0G?^6L(+p~#*w z@57+ZHR!i$p!okSw_FfgZk->wDFVOPTc0UFxmwcRt0To?XwqW3C)!krt&$ufYET5KO7UT#PP8TbJ>LTzx3mZz%2e7;5QNJFRK;R&Y0*^8UY{CyA~U zpyO3oo2N^*c{vNF9m(zmHLQgXpT?DJucPnTP**WH)n5=kPt@82<~qoapY3 z7GT7&k!#rW-PFnzKSI3VPR7jl5lf7+J<}&pPfbns)Oz+!=?@qa5^sB7Ly3H9-~YJK zn);wVE|F({KJ188xZrZV+m&_6sayi)VL z^BSWsd`~&ccy&AZep^Cap}snvJ=JZge2iu2Ub$Ai8*OP31E%y)<43x`>Vu$tq$(-5 zFhb_}Z6DXSjIOy-6foTHJr)>sK=Oap$-zcY1Deu*D$=-~^7o{X9nL6830ukLiCKih zI^rM&<39SLMXBL92>9Z)?<%Y|tS7xIGoniwUHARUVI^>h2!^{wl^16;OOuDPn`;Mp z!Y>0|kM=qOsK&r|a2iK{3BSkx74MtGQ;3p3hyHLE$e!YN@P7Os;eL&}h(%coHt#V9 z)R*#IdT0GFGrM%e+zUQ3mEDa#T|ruZG-iQ1TaX9emtP7jz45z|I{)M24CdGFD;R|D z)=t~6Cv4wBE53xz6i0Pq6XP%*{`b2BhXdLDrE`fpxQDA6o%D~$oqJ$UL8|nVj-)kd zS^%3bCf#Jzdwu^uUR zM4i>E_cwxPiBkF}A3P$86+YWEYWqg4qn@qN=+gx;0vu5i4IhcpPP-V6=p!?!3q zfH63MvfG)741gQ1BFFzaSV}x+d{D2!|IOmPGZjl=nXcg^&qqj=Bf z9LKmm{tr2G;`_OlP&4;s$G)-3Bhzn=>n!e`!Ft}w)qB72s{Fc>LTNF+rIePz zRO?YdZnL+Wg&Hy#clvEzcaDjC<2+LfF7K2{i4u;Ba;TRVC9|4(99gL>?O(e_?M#bA zDY1`zSEEy}{BLi*vz2H5&aEi&@G|6^}EWcY$$~x2AxDzGE+B&;R-`6AH_N>K~yg1L$fAqgn-$EO~-`Y5R=6as5MZ5SP#V$@#;}|&QWlqZ|t@vTAu<3ABqPk2R}t=HPO$(x*uuP_+&o-)4QZE z2Es;KyqSKqk@GwH=v%#Nw?6}~oe@}$9=EQ0B+Ah5o3z|co>74JsI!^;KPC&el3Hqq zGUKzHboXw5p0qZwb$<2&+%PE7Q+{xK7gI{=pM*Bf#-Y_haW)jvrf%00SEoAOk*^Pab*gn)vURn^|C*lE-eK|8fe}j?nYheIPc^>t z-Hh{UQikK7b}&wUk^OqG{Cgq~m*L;}-yZxQ$u&^%vV`LxeBZkg9s40K{2T|H)b=Z< zgZLuG4;klAPLz-~Xw%G31Xr>XTV?CI_F;UdQ79BA4~se4kdx4|_t@iu>% zo_-4TehD70K)2hN@wta0a-CewE#<=?=2{u90b18Mtm{SK`K9#q9^UE(M&lahgwLW~e+TZrpfCRoNwtNY(Ed%1)6sapq(=|n zyS#>Oor$e}8?1iC=p9U-=R&*h0Bb#Z;5_(YG%dUc?ti3TZ>5$NL8EiA+($q;$M1GH zf&-Hk+$ZO#<-&N8}(PMCS?$={kNynl`6vkMrI5|^NGuGjuW$n0MFzZA7 zk1!)X=K4i=uQA)0z|$(8+&ftYzW*USm!hP407md!zcQXR98I>HG-v-iXgB_kg0PGW z#eZS6=i9M$zk#y`qYE8Zze4kEv`90(d70;$Ck*cA!Px* zDkGhRMB!}wgl)clQQXI<=wE49o1tVi*E~j#Y5nczMGDviWS`31F6{LY6XI=TJ~kL`J&j7E34w`v{F+sJ!`U1~!Z zWB;>NJEfb6L_LUBoPv#O$2ai3mdhEP4k+*xTIGIPKLsuFJ79{tQRNQq+QCtOLF`^g ze9?inoB>~dFO@Q1TtjNzdt!{lx5-^6JOUUwtlBBituK8zgS08wmW{M|8~FSmXxX=l zA4_nbMw9&r>@H2d)H(3QMOgJ;ViCR!eutq;?}kIpgntH-`YYzCbAkIfws<~}|A>Bn z6PgYL#y`W2PjUYm{C7Fp`aNRR3z8-M9&NpY>>p3P`+Sv7l6K!a`)2iqfpc@<~@S8*J9p8-)^Gx*Rj)U zu^vJh1lB*G{7JNPF1>EYD*X*MP3!*v+Wk-T=tWw+19^Fd690l;x|>mYlHXStuQBMf zr+MlP>Qk@Gc5{{YMJ9ekS?fba`= zpir|IrRN7==B+>@f~<)J|lcN?Y#@{u4XK^Vu{a3 z_I5L(Z-MO|sBsh=*+l6Fq2x>S;z_Xmdtki+-KJ1-DO7%k{Lk|A3|i)|5>`wB`unzdR41tD>1J1nzjc0 zDK{qIF`nDo~`Swc>gtOSB<*hX1Y5?GWM>c8`bcA zH_HjpS&H|)7{SSKy!O>+|MPYs-KlqTSlpLsFV^s zb;`N?7g9g0gm&XCs%*95M$`;JK)m~ ziONH*qrrF==ih-{{I|zPXhm!C4SH4r2jBR1-|#G2)c(B=HEO`(A?~Uez2)@Hm6vnS z|I3kGy&>PBxR2iZu1hC%9F^E*IEGdh0dXj;*CH89xgWzh04$_yIsHm8E!_R7f3{cR zS?W7B{$_`MNS?DyZur!$J{WsjMjd@Uqfy4ij9>UaVPpL(s~-`D@C)=leG6wIIw100 zliI|+HrCY--(>TD#rOt@|8sXUwz7`ixob!|+2U-dZr@&I+(v`T6ga|n+Wd;y=h2b= zC#T8i%cJN?Jvz#$xPF1(sP}s=os=%qp8p}F1g&nSCq^{XEsHDMV~o2%Ex*B0$Gz+K z#unvQcjowgpfY2=j;sHR<5|Y@TCBrs==Hya>Wk3o>d8BM3Rj$q9Xc9{ zJePFyccJjF;VFINTj=FY(0WkPlt(dY=Mqzx54{e?8@-qIUWR*SQ`6Ruf(jQE|8F2q zpvULoxmt|3XXq+)!LTiYBTrt?R;#@1n6)M zc%4b#Z{z+!DE}$oOo7@zh2!U;=Pv{PO=$7cY4v5S#iy_bzl6fS0K-RVbt0*~;m|zo zoyg4j<51yoVE&fh(~-Y+aQti99D)CS10%MTzF$QD-UP}ql$i(INzl|a;hE%Y!jg@m zCy!I(Hl(uy*gxX@I&_M3cTIpR zHZT@{1!gnhuP-oKf6MbPcie9d_)stH?hTr9FK~H5+#aAHHkUL1}a8G zS}aHu1Oe&o(Zm=P(TIRli3L;?6e1lY7En=8=^~&aA{~`t0k7}(-?tp|zVErd>%N}n zzW3g<*Q`EkX4cHmuD5V<7%6?|y|YPe7EVlZ_Z8(LnRNOYa`Ls1?1Szr(`pLzj_WnYiLCJ`5j>0vEX?F9BRm- zzeQzVWqaVxZh8@2PDJn=Us@ME(@5hxa=hQWFXBNpQOPt8I|ud}mB3F;`zu z8=$LELDMt(po2l0N;5wzaOi>yL&a|ES@anCy|^fMf~R}v^EkA8D8_ve_Y-Th_B$CX8p5XroIvv7Hw2*rBX+7M?zgjypQ*toW{NIE6|Zd7{~KBUn^CY zwfw?c87IC{yYLqK>;}X7YRH_#NU)EgRoQdqD4LPki-GQ)%Sxv5964VxGmqQp>qEtO zZ#enuf)iN_T2=2XBM=!)_$mMS9c-)7_V6G(&3wUk9l(kb3BvPB9n6b-{plIVlR=$UAP-dxU0BIURrWXGdjL^w4qd2B9O#gANhYDUYG8c zTJ44f4w-F@o@DpaJuJE#ZoiD@siB&7{5*6Y#9j{|@ti<83a2t+Gn!6@=X=n(6F1UV z%P7M?(DpaulZhW*_d6V`pk&6bHmNJKkcqUClU~9mCz_1p#nO(ZTyTvRgcJ9nyR^5@ z6LjTCZ~c_tZKeGAyz>-ZPh2VIF1H}i8n)VPiv{T+;}dA{$orRRA2zmzU{*n8>JPM$Th?w`SSIUjkycJm?a z)_Yox+q6xW^3V^ceG-iB_nP}V&Q&A5&_5}eT~YoUmlomkn?=w3IUHL{f_E1< zJ?ZLLw7&!5d2k+v;)lI)mT3KPwWbwa2D^*l-jn}cO7a8A;3ci|6 zD4ML^(WKv6jqhk@H_(%B@o1j%53}NVtSs5*H)_Rl=3GOBN(viUYkj<#C;pj?`x*1N z19#3~<2T{{i>}(J@$8}n`p{cVa5kshjYiK_vgs+pd9i3IYr6L+{;b7==~}`>VreDE zsPjaczmBGL^F~|nPK47uEp-e1mI?U1)%7+!s)M(!c#nr&Z-iH;qTY7s`L&3vHXW($ zX*)iz0_^snd=L)I!21)x7|XJ<2l+Hw_B~zr8db|dpMr;r&{q{7`WNkNe^NP(Mx5^H5%Cw zXS(C*N|Z15&NQXJCDHF$OcT-4^U6%75v^&-slIoxm6oEPTWHq^Fk8T^9-7`m%@DGj zMQ_UE^>VZyf%=uoXZ)w1w@%=52lyXt+pXroB)=FI4O!0`wGJcC#cJ#8-In~r4pO|B z#K*$9pW0f3-DE>|F^gQ_j_sE z6LsBLc=*g(tOfaDknYxYUE=ATT9+eTm)fa}0aYxbi;T^ba@YcSFD3^nyUlg(jMDb} z-LGVI@6UgI2zq+pSplBYp%O(@#LwCI@B$n%GrxB)oBti#|NCynYSLdx-LIlC!z?86?@^G@#GM|g3cUju#WsUd4UdhrZ5dh!!j$)@ZfLdcv^ zX8lI7iR@9l$M0mDC6B9?Uwvq2W|I3Wl`)NNYR&HCWi)0aTAF|zS~4y&0sPxV7rV@! zC%eDCI_@l5{rNDP!yoMeakg6SBZUu@oQv}83Xy%@cf+W##IHLKl_1QRm1?9 zfBHz-MAav$>kO8jzSLy(M;m5)77J+P&5TgBQ(w5giTo4sW=1b#k)<5Rtn!TXx8X%; z>}GGy%GIL$LzLYcY5n*sZ)c1yP%~PRSmrLjGv3loyqoC!BA@7KN!K#UQ__#rvL6nn zOszscPlkV1geER6SA3$orPw|51leIW(chm)FZk!#rQ)%^tUmK(dw9%0Ymc&X^-p-{ z^c2SNjmZzm?u&0}N8+8Qd0rnsX2U7*_Cp|Lbns99@;SeNpWaTaD(`1SO~yZy&zN0n z+tSF?pA7Cf{Cj2;Cd1%(9{dD;sxkjG$o($7P$yn^?>tQ#?bCevRhb7`tZj-99gJ@? zXu;Q7pYy?6TeNA(ujon#E(bH~K|WB!XE?cxKfIi_j};3|;I{{ehF0Tgbrx|Qzua8y zmm0@etIoyB^#OI0R%pG}p^{c&xW5{F=1r(LTfGx_&3d%sDVEw)6nhWcyR(Q*u%4#X zKG*NLN?gdcn!5UqyjCdvuDegUUdT`OSMq*#)0uA{sLp?Zya?YKz-<-TeunC)0DcGLzswLcP~&4|d>YH?qwf(Zj~T zCaGx>9^Z;ri`e4xqQ~2`eFIVdH=1w`sgHxj9ooftFg^s#bJ^B9r9Z@@-?>`_#LVPm zG~h`XHb?1HZ%puf8E%|W#3GBq84B7#ta35`^1kvFS>669`4{=jM^BmmQ^C4MOZOT$ zz0~y}efXREnc3+Dzv(1-*`>E>(PzT^A21w1FE92zQ*9eb;`glkGL*k27C4J69)|0C z+Qqr-csdSuEZX@K@I3Q|*S>+PknI%qID*_#i$MKjapldF~38j|Hm(TgjMx^er*T zb~ud3tAmvKo2W6-@*=-0v$+4n-#M)EPxNe{zou%4yw74w+jh@dv-L9R6Y@z?NV}#`@P+3N;V_O zzlWN3kYu9ut)w-W20rD@naYnxY4Gc;#N}!)!=nu#_QcE2Xy0TUe;+mv)AL>A)zq`h zROED$nWCw=V2sAciKI}QoexoBC2#kv_;M?mHDWX4S$|{tRF!^@r716?>qJ!7c72vI zH!INy#vA?5ir>aO%6hyTuS7L;ZosE6=)o$Oe-EemG6wNNGm#3oZJGIP;(y5|5^h9gUJMWZ0N4CbC>BE=g|H3$$Xps~UXX zBL&37;)6)ypW2c#Z~mX3j60>B%}7&D3n}9xGomrdb!l`e`HdC*{{I^d{CAiqv$=oK zrsdkmQXG~QQb+QIr-PE*{c`y6uK#-@$g<*hVuO52Yqcu=aFutmb}=hd5|d>tEYU*7 z-O~D>QN&!ymDop2l(C+}@xQbO@+i8TjMhM?2#3TG`CU%g{Leni^?mP4#-;mv|0mP> zLVu;~;KK@DB+kryVCEK6E*aHXZC(0ZdNVOl31)VB&)#Yo#W|9W4fU5*gIPKGD0ppo zuZ&*iz3lcg#`sw&|K|qSk3!eSIR3bI&r|0~bg-IxbJ5XW#B&eL&J5%EI9yAQXtQ>t zjQ#wn(2ZTZKz0R+T%Uo_KJ;XYS{m@{eaIo+p#|N|+;=G+YKT)a@wgmoi*{WEgX~$k zfFx^y@M{=ct-hQJKbYsqs?+SJnpm_0?^9D<&nT5qq|Cy+TSPh8|2g}nJ)mUfqGr?D zM2v~N6SZ|9zwFbPeL!+*bNUC#70X_~*_(egY(B%o*Z9ve{xZCK46Np`xt?`ozsFLa zC9*1$WE&J&`;UduB{DcAo66%(N%tw;bqG`S5*lIG7B=qXT)d`}vc@ZA6+~P|}CjtcSOmqr9q!H{0pUyu=eGhjy0QGFCL$DApEn z(Lga&7oPrEzVR|XaJu+=BpqJ?+bMW-G7c=%F1${cYH2xN)_z~D#TWwO^XN-otCrGN z(&R|HH3vKc;e}mXVKNx;TO0Bhu=cpq->tGUWkE2Vx)yebAwBXCVbvFDi2X_!1 zo`$-b=*v7?ds5w6MC8}P^d6JGF^$US82)T z;O8LP)QRtGhBMRMDP`C{%^FTZ#fzjePK{02LNm3uq$Trc-ehky$Lsz5Pv2k;8J>;S z)9_>wogJpO=CI71W<7Ouz`NcoC!^`3NN$tz)xaIe>%NbQ%vCq0Vbet?UH!^V;#2s~ z%%rZszqFpi(Ek;lCC)#Q9F~)NCwlZN7%fC+`cMmK<1XXdZCJ)&8h!?ep6@FAU2QJ3 zC!><3-a*D}Po!TR)qe-64)D$+;FjpwP2N3lvIi<&B%#bStX1Czw6w*EZ$-`1=;8Hv zy2||yWS^1u%7rXrXRq>)8$9no3Wu_qYrVf)d$mOLR2%K7=iwq=+`*n6Q%8CpA1HB|Vvjdn;*QGdA!Rs9nKZinjJeoRu{Nzs93vqK)N?9`?p|?<~Wsv;F!8b#I}+ z9jh9{_nnBpFDbJb*Z<{7E1%>B41!H>Fn;B|l`O3p8%RW1i5A=nyLb4wd2H_;_fGYB zSK0o?%Xy(qniq_bk-Q+hWE{IT@zr)XKG=IeN=)@Zt2ZT)bwo!Tnk_FP_O6nY-@ zH=4%G@%apIp6BIHAjvHtou;NgqGA@Wx(H8lCh-&e)Lg!`9Ufe#)GBXn2m3t!vcD3; zaHTnHHmdPU^u$-4rCi2?Tl$V}%%_oO^FQzNxJ`WKdeT^})z!Y5r|61;<+Sq=bu@xq zTRQTpXnQekCrj`x&^P-wp5Gv_F}Wx5NG<9$*#v)+n19rOb9@U!-rry43w!@uw18 z0{2U$pQeo~$z>1C{J^t&w6uA4Dtb5I)HpDX;v+Xdch=;S?eG{Zl!P!N>rf3cNv+Oe0yS?=a8%vy)ndz*_ z&rJF%cfz;6Y$N##2f?5r8=p&BRPwrbi_uuh;*6q*X2`bRT9e8>uUM-{j zH?f4dtYbBdax!YC0%Fc6E@k+qPn4ZMOJllk@J*pTE0Rwa;b&dnpZSULXxaY<4n>#$ z{ZpFb&ZuenTiL56Qcb30BHc(i@p3+=!*V!WOTEm@WaMsQ-|T;qRqJKwep;!F=RQu4 zPA^KNR6Z5|=V~9`EhAe8;p_tV#U5YKyGhKSG2+w}$RCk;#^M<^KrhtJU8IHkZQT zCVt{!@~ex36Znveg3ypsoC&j*jk!K|=4UOPLCJX`ZwIVH7?=lAns z$MBegd_M&C19ZKo`%ii=SzXCwy0v)w0$x11Kt6-w`m=c9Sd-;u_2*LU*i>-om5FiQ^Lv0AppwCzzX_k*IW&%`Ph z;&xY%R?^XfUH^&fnvi~dI38SZZw5@V^ZZwExmL}yT<;G;BiekEdhXW(U5H0n&G&@1 zsRbNfVxeorHPcCVT+y;UkFLS^dohnu&(m4%{|P?xNNR=AwVLq092T4KHj!dZX?syk zOW5gXd~Jc>8FS8l=^1(5f!f5p$uG?8_E0<&vP>F0h2-s3O4#Q=ErWTjVW2`WDW5z+3c6G?wi&79xZ+o+*{z+5hmTeQ%hY_!AZ~OP%YIGIIs121_ckQJ@L_C z9^*uO&PZ6sR?~u2*2cby=S|gKpSOBkEy>l-EW$9~P4r*RRm%)olAUi~hE;WR-Y8~x z3;fQw{T%H~CctX=bVuJ9HGQK*AKtbz?HdNaEqV!A(RsNx{xULJq{ieJ9ZVy-lK64z zs)b`|ulFc_t#W4+F~MfoK1mPe!|4Jvex>vwv@m&xecihWc2m*x2nw!b35n1`*|B1j zWCrX&X);4DWothHeKgFfz-b0vPgLIq*LVAMBF?em3$MpB$2qXzbTfxI6^cZs(tx zqc?OnQnouB-&4b4@WcIN`u>W|zU@hNf*tQ}WAu*^Ni2oaFfbS6RU=TcS|V{$Kelv7 zVG|QzI+{oLn5S5P#xHoN3vjy)$}&=Z6`6gZ`~~!69S%H=^6jWt47VFW_$U6Xhvnrs znOTniQtv9CJ~;ldJG*(7tPVLsEU|=STEOEymi8@e*hxpz8|a7+pWywC_&!XH$>!Kf zH~x--J7{ckSii@A)B^7a()kddJLBO2eAp_j_@g+PvEnxT=mcEY0+#{qPv@VLXYnGs zUIVkWx?hFCU&-|b2Q#DEU#e><90sDV4=kr^=abtNc*lA6H9Z|wXypm& z?*h}?Sa=nfoJ@0Gf?x8kZ^qrRbfpQL6KyA>e3zIkYld1Y7s*cZd6e#E*6dBr*F;@KZY#o0Kx+x>d9W;NRz!6HZE*DqQ3 zCA>?uV$^&D?uQT6=upNy;**=hG3$ELgR7xc8mOMk3QUIe!lI57U^0^a&471b9H<3C z9~dM%w+-H;=ajMf74By~rze`~q4INhz39#1eA(By6z{o?&b;U|mS1_8?sQZ~qLDIx zrBBA%_r^CFg-^s&(cfJ@De?6Gl-&D9>H7fC%RI{&7*`bU9El@Yvv`A0f{!@|XBJ8H>5j=Mc}5Z@Bk5Yg`h${n7uE)RMOj;dTC5{K~4`KuCO- zKFz*b%!`2d}$}-`Taav`yGNouT&K0z;bob1b-(Q`E&O|NWU8Z|&96DXl)D-WHMsb_ogg6r(!*NFyZXW8^9 zyDD+MUp-KAHO^nX<(Y+D0vKkKjg10ywXODr&0qV|fN7=(>UpkyspAY@;WS#o6 zQvG87DXUSk7urK)ai*)MVZIrxuhbLG>PW*gBK${QD`Vrw^K2QF{}c{gaOpSd%xP$Q zQ1uL7(1l0dO{!nR@+0?l6|`oI^Dw%;%$t=8PIOeqA!^G=avxMap!G?F{15y+QoW@e zopM&%aCAnJ<-sUPH2>=>`ytWbe;M=hXVWc5%9To)%xHr}H-GrPFb8 zf=Zwyqv8qmR6}2SCKZ(}omCr14}p?#{JHK}W6ESaeqVksI`e064&v`KTadG= zvNu}x)ydA7_u)u6dh{{BG6cUf>Yhy3oZ#>{oym@OnJf5Koh1sgpUDC+_pbi>39b04 zHt7sJ&%PJiK)*G_uX04K8Ktx$NN@1(3@JMc(@rUzEJlc&j&exv9^LEn)B}*W)!3)5@pMRgrBc9C9Zlt%X>Ckh0NY?8#SL#bL{}6=i z12!GL+r0G@y-K{dMHKS6`l^C`KDeie7_I?3eaB9)t;z%Ta<>ubX5Cs-a8BScccb@I zKD9Xyk)0L>^1p3)t|9bqC_g?@%eaCUuC4Z!THFWVU7EQ%Qpkwvp2VqPbT{DL0*h5dXF!y!s^L21_BXN}Fvs2vTXtWkQ<)xlcUx?+aGDE$Rk zS#@%Z7OcNgFXDYi&`w3?0`DD#swYJSUA=d+->>7Kz`RWn4_|}Z`=NAzsA2)`4#BypTCE}8sSNWQwJDFo ztSuS!#;s>q(rrb|)(o^AYOSm8*0`BjjO>#&0_T(QI{~fP@1l-+pYql|1s**-e-uA= zklG<^uQQ4s1^ZH1jlubCp0~h*)1ECiD z=dq{j)ps4J7oeg8Oxx3+mN1!2e#uD9F1}|gf4*PqXl>R}wWDrSJ$@zmy#><1fale*kOlTJZ80s9Fo- z7s2etCX>b5oo0OHekIWJW=^U3y}x7wROQhEcLZx`&Er3bmOqi}0@j#}gxAD=Ye;6Z z_I$4APmxw%vF3Md^m&}9Bckbt;`A-Q^chO`Kc>k?d+TPj_JiFb{^)30IGQKR9x0n( zbtwEM!KDgbWK91i@zO8Eo?GG38!tEF@l-8Yvd1d3%uP!5B!PAAcE+iB-e1IzUq-qE zP<4$uXB3h9R@}-C#$S6k8EIKJHpDx%LEFRXlPR(j-I-VKgwxA;fjzLwtWDO`4_5cD zNaq={?G8q?xCNMBx_>TBIi6&)O8gG*?9#KFOahhRKSzz3S;|b}9DeX?{AooCAIJA` z-d})|yHJ{Zx-;M~-sf2Qv5H5kioa{vS{sz~qy;0rKOJx8xVs9~zeDRt@3*687lS$# zwAQ4$kkz)OF<&e7F%4LYvqSOW53Dgc;(x}e@4%VI>t{~n9URUK-G;*7>;t>Z-M*@} z>{`(u_9N7I6~A{JT+?4U3*WyW`;4@l=l6J=u2J+U_t2)r+Lny@w4xI~qY3M1UJX#D zf%75lIhH+cr6YHOID+&u1JD(Q>uAMLy{j4UdkgM=U@HsZ)EV{3giC)W`(|Wske%A? zjw~=UH`VaqbGq~nyG_n_Bhr|Gd*8$JNtC9Ka5{a+s?v@0bufvv!>#p7cio+7v9qJ;_{W})WD&{!Sy|v%od;-0$1<<5n^jF3}5Z%ThZ^IZ|%rJS6^PXF8IKVVUj-IVd$ z$pcA_X;yn5!5Xr!Nd|#Pg}x|^dd4sTY{2^>{Hiw(!W28({eX^p(p#ZH!Dc5p|p=+)|=;PnKmI= zF~^B18{+#W7>!3)@=e|c|9<+s3AEWpn?B;-vTxqq>P`st^!Pl_yTRZE))3#9^Dd5u zLzy=Yq#1Xz;3;%3ySf}&^eM*C>-q)Ur@d2A3~?I&F-U-laLmd#4he)E+)3KO@;bv(@>LR_0l- zGS3rj?dn-2o;SPd9foJ69XZd3??(lf(v!*Q9Zg}=*84fx{$oC%f&1xcX1~aPdbd7U z-C%R5r}cf0@|U{Pvx>fyFKYWo@ho#;KYsRwdDcT*@o)Fif5?8H$#(m3f5u3Znifw>y=ZeGZQsk`|%B{4vmMpSAqA-IcU8Kt2-K}lFMBVCoWgp zKX7v@ST)_b!h6e59bD_u)3<#mdnYID-9;xd1C=?P*x2d(QmJR36{u}NNFV+Nboo$qrL)+J?+j4V60CvYJndTtyvYjZjeK=CZ9yMie7>szO5K6l zOT`6Q`_+eacLt#kd-_<*mT~zrv`vY-m%?~6e2x<1^l;~5?NDPp-VUpbA0=me9$wW# z=~57KD%l5Yt*Wxg3^-1UF+iC$G;0F-Ge^A;9-UZPO|>N_zKh6Xgs0cij%2@|sqN_q z`>9%)wRC5k9>z_cZxGLQ#HSW?>KZNCRPuYK=mT^_NdtH!Pv#XmGYI$2NAdMA&$#b+ z{M$uOx9BTZ=SMb>!So^q{sN2})V$ceoV0ot2sKcX9@fY3$oO>hAi3>3K-)}Oe@AJ` zkI|+sr|CZhznyxYhrwbkOD9}?rYPTvgeHQ1JzTy)$8voC0!GRD&S+>0nwwF`p02ZJ z(;6J>Mce1n;B!H10m}|>{j-`kkX&+Z$CGR_-TSkt`Zic3i)9KwzD|oin4M<# zon%|J#Ia<=jKP&-Ni1WyOGHkAc8-|h0{$y&!FFl2zfsR^EF)u&$yC{(SCfpZM1tGZ z{iX7&(DEf-T<_@+?NdD*8l=^(ieuipPB)&yoj`{RyI9J^VTgeO=W2o_6RE z_^%|F3sCU@jQe=@wUTq_#hV~bK>Gt=+yvg&tYU+5r|=nnhg)*u8=@mSZ~e8vZJW|_ z@O>&c*+=m)T*^*LKNsC>@Yw~@f8$+UTL%EkQaL$EJ{ZM||mzS5N3YWcST~(UQy%^v9PO%4XF}6MmzD@2nts z35D6+_i+#&fX_kR`ZY;y=6^EJ)Y;#OYR-s9X1KC+ zcAoo;J!EwL6;?B!ovbFYj07bAy%Jsg%DWl;X^xLMQEoE+JfwVjij`uvic7Y80ha2xrnWTthb+u()f$8i#^*sGpAhZ9L!aNvl1%Wi)LLOlyNSi3ixC zmV1oTWqjZ{9K06h@rHN%&I<02*y1?#J&OnJU_J+zZuIs=qUY_(H$~T1>YqU)r?7zQ zV0DLI%Y8biIctUQR`xSmF&syCfS$hJ4o{kZxlsH15FS4Qw{}W>CL&soBOlR|SVra~ zugBpHpk_{Mo_hYl`?M$HmkX@3gU(1X-#7H^GTL(vIwsQOJ=)w6eB(P{HU_f?t$&W^ z`<~rp&iEy|G}iwI!OYm#t01Orf47L2-%#5$7+$QFt9hMq_*k0lsS5wT-q=Fo3wh;I z`8RRn0va-e&K%4_vidmdrOqh6hmh}&D}C=$F1c4f#-$k_FMSR%E6auDKD~%0OEXHt zV0k>wmm|Ab%5S6jIpgXr4;Y@nAmT!50bpD2(WKd_U{yaXfv=iIsN_JCX5w0b| z|C}F7rd!q@)Kd3EwHe>z!ftoVllo{BWIz0S z#9gD%^c^2C863j;OM8lxu1h*n(%;g2kCYeaqPtm~IeAM&2;op_YYK#ZKKAHv$ z(zeHPPsX9EdAgUx7*C0Auj1=kklbZh>l9a+5yMrGI}I?65+{ag=)(Hwqvrngq&+SP0(t$lmFl=RH{Yh$MK zs?*$kfL*sqM?MugN(EF?n&N|9@G~hv&*qL5dhsy!D*;nn!TRK%M^CS&^ z+jVB{vTN4{Fe~BN1t=fwjbUJ33tBZ4W?k}iTD0uob^$qjsRgkQ^P{_HBc&jhaTwLD59v2lGP*_%Q;qTbt@0IMO0axJ~RWr z5v)h5y8~X;1HU@%WDnfT+hrBj%cz;6jwN{BMvK?UcXCX}sb@OxvzFc6k8{g$`84g} z47xHB<#SQ93LWWxCLeGEU3!MTZ7;OF0$AxYbS_%c>U1rArwU?`%h0*Rn>*dM z>b!;)&mytq?l;D@GjTpAe03$uQ)$FB_mZ8M72#Dt=td{6RBu)>lydTyyLT_CcZX4B zUhQ)c;B>k%6(0A3a~&AldE4~xm#KNGs3*A<*(K^4H7Ax&*4hrVoL6K6je+GHZywDW z9>euZaXCA@JkPoss%j#QH74{S7bLS`81b_z+M_?RXS<8vO~*A*f;Z?{^TjNA$h8kyuA^>Iu_bCqVSj* zMfj`x+v)nOOFz`2cU0zbe;rxN0#fL%#tophfZ6vjI6=v-yvWm_G-iAEsCTW{aWcQN zmNm?Sd*-g1fO0N5rN6$Q@K4!+J?E;-6Ft69X6e&RVgZd&UrkM;VO|qwyOUCH^2~_Q zQZTZ&KxT#x#m!+X@^>I6UofkRIap}DKzp~6#Rko*W$y`JXSaH z%TV9zVP1_!G^G2D`Mb~XYaQ5Gb+w3}xRYONK>FGFW&}IUK8-EZ*@A|(RpL81j#tl# z>Tj-&!+C`&-cAnNGBVB_X?C`02-6D+Uar=T*TkXZ5PyyG2Jo1MBL~yvw|u{-p6&Qi z5ihTyfj7{!8d{sy>Pl|kAar*F|6KjLNqEv5j`LxbeS>Dv(RuLLrKU$|=L*;i&QYEo>uEPbktyDtxz@@v~60rtUAdUK-Sgw#`~>&!BHeu z+I=A-cfFJxtd0kHjr4yr4!>C&{SMt(%jQRTBQP7Vp2tB*cI;Z-@KHRh>iGmzuR+an z7{$Bp(fVJ6<`t+MQRw^oyz^RGvl{iuO>NGuYkD`cv+ZzoxAr!xlrqbA7X6K+vIf02 zywk&+iE|m7OW(Z*UYB<_t8$vLhhO-PWsW1Wg&alecGbdHY9uo8sG9( zSCp=$1MS2e^J(xUFn`e14Y2BhU+JaKhsPqCHD4q!lx|(AhDol@SHoj;wjIsLs8-I$ zZjP>f$t)R;%(KTTy1LU= z9oawZM87J-t&;B^>THAir>VCtYVIJ9&afN-uiYYm>{VaK{||8DS<=e9=C`DgQy;Rg z!Lc+V5ziU89KY1pvv*l*Lp^~+Dz_@P2Pfv^^If=n4(WvEjAUL*Z$1%eH1zCTmiPe4 zd;?BA>bKs?C}Y;2d{D$8hk-N;Esgldg<$W-#U?!XK(Mp_z~?x+-Fw-e;W;%or2Ac5 z4JGkwjb9xo`u-VhJ)IwToL4KS1uSEurO0{{Ka$?!D6+1|zW+i$uMkJ))Zz3AavD{6 zTzZv0=S+^hE09WaiD!s+_s$iTAg1S#PiZ}W@&dD4W!B;6&Fs)TO8lN#m-?XXBbqEz zCi5^^>5{%d>62bi31aCS%AC@Y>&zsLD#{#)j_e9GrMMc6(rtX`hq#vWnsVC5Med}J z^`&+)E9op+la3Ohzsp&{%%lx%qy}3>XUgo{t#etj`So-XX=Lg_L`Y&bvvQqwG zn$fVpB`1je+PzPGGM||f?CxawUG-#2y}Ya>s_*Y~YWw>3N zt=^4O6}-7jY?QU!CwM0#14oi}Ly~T*jy~S}T#KGL*PeX(HgD92@k{<%(6+ituBTfs zz&Sgx<`lD8?qpy7oc@-%}Ot#Bv1WTi(v+y9;Z{=xfXjG#T9 zCy(8DE~w3zvbIF_(Nwxw56c@PUPWlpSXIuC>?E2$}*6<5>TD<9h}Cov!2(_b)}&Z+OpJwHH6jY}Y#wf|i1G>jo@;o|HTaPg zR~Lg{S8MzZ9XiT5fA;FGK~`t$hgVhODQHQr>pJn_Bu}%q`F7qm=VEN-opzz2F+1o8 zk574o9WcnckXd-0nc7FZ^E4S{)qRJeZAsrbJ8a(U(~-A47iQD=`K%ZEN^5y5?5d#T z1JJf=Z?ePq#dvZqz7F-~0c0~1*2kfJExpa2<;hdZPUcO`nU1HA&EcG#YrbP8*^6oh zJ>00Jyq1hk@op2(vmg6gb;9>r!<;siix_b=HM+_vTYPLHM3k^dHl%(kk5A z>~t)tjRPUGVOd*zGwPb+$SSgFt%ftfO(b%H80ATl$jLH^{)X#8wIt=KV5Ya9`KyuO ze2**L3Vt{A-WGmf2P}5slRLBrjO@P~|B$8WpCIj|E|IfkuwD7Np0?sGBi+S1fg0c_# zLt3yoYPcF^T|oJmZDd8mLh|oKBYJrLpRmm6$zVN%U%~2l&^F;^`YhY<^+_IKUsoMq z^Q33#we?5KYC5-p=B=PR>Gfq-rt83Y+pn=Ky}f72UOJRShO>n+c$l-i2f=bRj8^iw zOTF{0vKis-4VR;7Q6i*_)i=Q5tm;_6kKBsl7r{>7C|SgnX<~XnZDI2PoHN`1I7(g> zWi%|TrZS92Dw)yyE&iVb$E?4}?sUmq9p`>kTuQuNfmL3ohDhRL5Juz3t@z%92A#mK ze-HC{aNOeUtlxqjOduyrsx59q`$}*lXhc>^3x(f;oTTfGNBipRGJ4vY(p#L4c2k;P$K>3ol-wLx+ zQJ!-(X7XoMS@;}xR~FnI3E#DRQ#agOqlO9iKcAHk^W7bfPlMq+8q-PimQjSv!BoTd zWGvi_=bzC1$4IF+{kfReTEI%O$}x1eqa8g9E2&v{jm~&}D=sF7a|YUD^WT6s5DYoi zTJ-8L&px_awYj}vT#F`UE-~}s$@*Q7EAdyCdiFFq^+o~@$r%?H;r4ZOI~?2Qf7a*M0Oe)x>>$6zG$MN~yvgI& zP}A!$&+4c|B?psP={%o)c)r~mHNe_|w-0LVj$nP+Q7$v#T}kV0FbCjwP1=@R+Uzo$ z)m^o5te0PR;ABHBWpWHAk!SY!xLB#oMYdA={-pFJDy}2tWIevG#^q|r*jY84+g#{$ z<`nz0{)Vtzz#3|(DLE+5s5h-bJ=}Q@cU!WBhB#IO9aq6^9N3?#DRz^+%d)e|mDJ6$cHx7n5ZY#P_h8{hN!uksk3ars^F*V3e%0G6FIo}&5jpRc1lxiOg!?a7Xk z-(CsUHDH-@9EP#spL!>w7f*Swr84R7{1YZC3y;}`1hS{jop{ulO+TmR&)EN$YG?>* zUp#wXnX^bIE1i>#FcHmNV4fAOIai{J=PhyeGS@>`?``m(LH0+0JFehScH+4W&2x-0 z&!KgjXzO#n%k$8^`RX^>)d?gsoHizRXe~Xwl^#{b+atlrI)GN*jMhYI6P4+NqCc~f zMo1DFJ3m)Evrarz`6tlg_ls(i{Lt|k}yqnX6o?%H{>2O+xyYTxx zeYLiQ?VSeKrD(|r(4XnjiF{5*I&0IVnYf!4DH%!c;OBj~kaICMknS&OO|oP+`phBs z0sK*R6VE=DtCUDi&CUE`W!hSkW@KH*{o=@XNb+O2M`QjA-_pu#hGlXzuBS0eS?vjG zspGmOiA}?iOHh5DJ8fCeR@7&t_75y~v%8t!O-A)nyvy3YtZq)O`xy1#gz|Z`B{|^P zt>j?zyh`u77nEmqJ{Hxrh(LP6_9_~XJyT}l)F5Ryfl*!k$+pVOdL6!PA&6h&bX^=> z?JC~wQ!r5AaB%Vble*?r8g7Ph?}!8r@Q8TMg~tOt$ug zI+I(FR`|bsvVTl=Gij^tnPiYQBlV4gb28VTD5yzxaZV1~NMg^ipzICU4$p6-yQ_S+ z$D<{no&~~Za7xC))u8OF-;#cB;_rQmv9OAM9azNgIkmiW@@OeKPl;qnW-jY$n}e!M*1nq3e5@a^}$VEZ+5)c+wV!gp`yQ^dY+zn z`o@(+L!})R9>;}EY%BW|r!SMt%ftQu2z85it3SH;<0o0gIZrD6tui%^@|SZYva&F9 zR=sfX6!jumAY>op^qkW($!f{0o2&&+W~Xz;<#%A^M5|bGR$8V99NwG+ zZstgz*QS>v>sRSt8HwG)10PNsGrQi1PE;V9+OWG5?zKV4>a#cakS(BOgkVg;)!J;L z49z)p>W{ct27}{0djXGzk#%;qh)z62Bk!R7f5W$_MU6d`+i!Yd(?9%NQ1$SlwQKMim*GT z;MEopP4+4I4>goSN7uqK&M%&QNe`EiSTc)O6tc*^eK`}n6JGq)Tk+-f@FIKG{T40d z`J{y`ekZBjqwIysPC;QQgP;Pv*oWQh3VCuMKV2%342RN*9nsFi)s(D_FVz#>IJuzj-=UWBAEMbA0m$wxewFuJ{v zd*f@6v=EStW#sB(#l>_O8LgPv~&dD*mu;6yq zeZS|;+N70?jBl1`GPTp3FMW&lESO z#cbI_<3n~o(5Dfwhs%aT~~R&pPw zwg=FX9n3SQoB4!K$v3+vU!&f<*~)uq?LGr_3$OnQPMl0%s;Os}8dodX*VSa#nGw1f zpVPL!4rX~?bAq_LFMoQ5wqrPp`-+WC=BY=s&dlp({%VlB8OhuUt7NBVMyree^Lg2f z*?<2-tlLHOvH^@LFy6=>vlGR!Xsn7a^KiU$23ke{hT>3WY_ijHQ#Q~<>-LS`b!p8s zGI(CZvrhCdN845xW$oSDgWtQfD$|X>T!6djmkvSyAo%}Iw6&hiWcGOk{4d0h#MOJq zt_92K!J^L65@!ZE`F9Obne!I1Dr_dr8AlfB)m(sTUwU&cY+B=E=98Y|eJ8_njasJh zDw*ZW=yqk&7|d#t)tHrE1RRnCSxs*rr5n#}!vLFOC0F$u>SsBb0A zUQl1oxM=TLe_T!m>j>{ZhW?BL41nol!`E0 zPflI@C%j8L8vYVh_o|^1jd%~%nc=CUu9>uX1x_bdGrI`)BD3k9Z2++~t}MmXgW;c* zgU^vm12C7c_3YywYkl5Tb{5PDBrV)s$zOa=Yl7EUQkjWo`{Q@=FEfske8Vs3S@tVW zHbpY_>Y_VWHR$3JyiU&QQ*{6F?g*URd3t*Bpz2}qD3Ao<_jG^#)v54fm(x?`^OXkwn;ACeUUFFTn zsLzh1iO9x*H66sZTEXm~ls$)@T=&97XU@li{ z58Ud)&n!_xMtO2kB{N9{5yP zO(&7;W_UZT>qVCGrM2iYLEJ6^IEQCQj&*#)+sd}kc4xI!!IfbbS$D<2@qJLAaS61xITCD$|fn zsJI3vUqeqB$~NKPaIglTs})Hn&v};`hZb)iN9IFNmTaR&V0A#%TK?sR!e0+Z=Xg?T zgo;P_(gkYzf`z2?p>*W=;?3o1ID{TdWA{0GFLUj$izu>YKKW|dy?z_|vmRm&S>LB@ zG;@cxwmmq>=w8qFtYgXhlkj33NFIKFC3>Mh<2o<<&fIq>&b|n(!B3`F@*R>1^b~rt zGkNlE&*woppfnlpiL)lM&(Z3hf$M#}J0BN2z-JST>%pxH>3xsB8(rO^^cZ+#4zdPm z4kOR(esZk8j8~PRIOiJHqhIOyt{}Z7wE9Mr5Z)-%lr=oTh zZe~C9&Lo<(7wIkJT=9jpdUsK}0S;AAOC#`llg|tke&U@INp2L4PmWaPv)73rvI2XK zdU~L~r@Ehj&00@>@6D`>$v#!tUE&w;|0&*NpW2PdjGB5Z~VRMDkJHw zVBZH9~h|6oZj;goIRCR+)kG+13f$R=Je0uG;bmdGNN;%cYcZo$(~7T zniwpy&q+oZC(anvC&itc>Gm*EXrdiHo?Se_+pI&?A4n~^(>?Gp`>ijjtDmihki^Ec6Z`enIWo{pB$xb$|@%iO!SXL{U6DVN;YBk(lq3$w~^w*Qx_?=Iij zc{{z0y?cFT7t7R_-bdyH({KJOu6<5N(ns3nUgi#Wv%h5Yck?@QdR3IquIib~tgW6c zICLj(8fb^OKfJ&({rjv+Oz!na(pw~E`JK9Ji9dFtCV5Ha)RF#p&Y4VaHnW6}`|e(h zARHwks834G=xgS`vqJT{qSv?+Evaoc?0Xk_F$(0mAf|tljO+B+_W`{N|F@s(%nK&V zy*&-i8MUiPF?r*eU(QLF>-doDpq}-z*_Ac?OuulXC%4mxf3WqTu*@v)0DifWnk(X0 z&WfnQ`hVuxQ|#*=c4zMvrCYM=e-$GSXYjk(-M%Njd|Pl}j9NbRYaPowl!y48(zU(& z5Ki|b!R)M=-P>l) zI6^(O_|vDAz639>U^zKyI=xvd*tF=S`O)-?v%h4rs*lFiir&1C-Q0yEcf#~4T+Yr= zdkP8;_jXQ>PH!_=JG0gLg;FipeK{Pf58t!M_%XKBfUSHfR{9jgtUEp0Uq)OakyCj7 zNOAu{6Eb>pxF^{`C_0h+iIn>H&)$=ovYz&F_VN(fUh#u`OS?H`$I(BybENl5z2<#z z`Go>X^uP3p=HFS={TK;+9?Yk`iZ@+JFRQt-ugMTCQD$Rm@kNW%ag3p z&Fs^OTEj%_nOXXUcC`i^Cey{tlw?lvOFsP^oO|27Qg3cIdmBl2OKa3tqH{hhmc!=@ zT2l#Mn)5-Ec;NV|rf@ido@YFFI*uIaz4T^3*8^?m-Q#G7Qr{xEwC+@p>TW zlkqfX4rOn}_VoNYEo}C@=nm6LMdbK5cH9ZP>Fh3Rmj|h7fp@YxCt2sG`PB!Mp7iTl za6WhU3#HRjs6#F}@w=XTZ=y7NZH`y}To%+piMi}8yH}>S@ivNjx|iJv+i1tKd+RFY za#BThukM4dUENJ&(^b7yK~08u)^65RCi{h~)K1M21HSB6FoX zvZGO(oVlE~KAg3#z`M=reTTQnsj69DJeeI`g)=wPiL6;o)@N&)nSJzf()1p5S0|^l z;kBWV`q!v`hBo{Xw5M3aCrS>*%^Ptu(MAW7&HQ|IZ)ZQxajf@K{%k2J&PDq)u#y}4 z6x@>qK1uD1Xks$xPQuOPS4>0ecD67H2hP(bMSrhD?Mq}bRvk52-VT({M9&0evWH`P zeq$qQd!uALssE>6i7<|)rBmU!6O83-U=G=rM))t}x3c>9d$?vdpqw-RDyw*m-kt?Y zc8*U^d_3+gz|)*@UQ@kQVO`Bt)&sZJr(aL@llii@@l@Y=5V=?5UJ_{mjqj+}iX!`=hrNX(ac4E&X|v+;aNuWd19ADqTuv zvoawOU`~Ac3WxiP5Py!!H;WOz8T9gLwN+8`G<6Na)y&Fo5{GB}p)$*Fg!1aRxkfaR zY>#`59i{g*M1Sf^nwb@niF0#G{dm5pGTgW0Y_)%&3Wx<>m%*m?j!?YCJtmr zvoUy>Y?#*MQxmjZ?4}8Pa-wJx*bN2e6;`pD&Bq_6e?LzhSpj(m4kSbW1@g}*)(Y3n zl^e(VXY{nS*7#98tqlKjX=1p#gce_;-R%UUA?jYvmb0E?j21m73uITnvweTd&)?5B zqhU2@?t|Xif^WU)*P-Yi4q9{gN3zf1dPZ|{9`eID@q2tfO=NMI_p597l53F@lxOky z8$o!}{hXwhdCGf8XCa@n9G}M0wzYg_ske0%?O4bkZH09*1ID220W|O7VWVS@u%Tn| zc@0^OC)@5|bc5HU$|Or;tS6ZxPoHFf`=epISlJqEJEM2WT}}pi5Aff{&79YK6>inR zgL~<~^=cbVFR}~T2)0w&9c`5uWM$!_ma08A{|6puC+tf(Qir2DXB=Eg2AS!vgu;vc zjm4?{c)tZje{VgWOb}t+D3&~xjx>X5Z|@`u=hxU%Ro*dco5p%~ z7OuPwa&~)bMt<|elDFeZG^P^zhS2Yv*gW3bm!ki9FiLqY->4z0iI1nJ|GDTR`7TZ!*U55_~7aIJv*u;I|)%E@vxQ zr@aGi(T*dOP7Y6PZONbU`Ep+PJKW2v^LuH{Jo=ZJ%B%n!j-rF$njZWBTp0=0l){dV zgx_zxozalVtoa;T^rc^kdy)aritg>h3i^2`ITT|Hjy_o6pA$2atMWf^JBYVYsJsuJIQ3dnD|pV{%upnrvqGt^q% z-Iv*L1Gd*H#0QH&o&um9eJjyAlqv>CN7*-KW?!c+6QO-zk zQ#D)$%N;bM4hUDlF8g{Pgtm$B8{+vEa(#vuX|K!(12>bk}wjr7O>1QYFG5bd63wP^HoB@+$Q1hXyu-ozr05=kd3^rt2v1A%51xUQozmK>H)Co!J+}|cGhbY)CA6KyXXTvP3+TpB6n*DjW}36kdL-z-1@+~E ze|P(J2yNMoru4*n6}6|uO`m%+PP~ljhw%1JlFiOg+1WO7+LyOT|1-N>C6>R64=aP? zeqd$PBqLqv5#LY8az4k;w0hrxKT6B_6c6(qjAqlHN}+S!5KZHZFIg?d3`0LYk4>{Y@gM#7ZBz+l7tA3zYW5${0#TzI-Bk z{cm;jW=|WmeL3}_8ay92g7hD1uZ0_f^~1~1l9PwaJlpq&|G|G*!L?EDW|lr9CEZ}2 ztAGFO!^6~fCr^$nY$Uz%fB$40AUk6GcyE7}n)!*mSK1#gy4I3TmeIk*pveDc1IZ}q z#g@kN$NPx7Mra={A>Gp1CfNx&{rL2`_xBg=O&y1!=?c_l)LQKhrr!(#7e4*PQTKEYG2;7M;|G1(1rg8OTVR&5+lcaJ9xX!J^Y zZ~_0b7;i`Nd{^^`6O=oG7Ux`p^xx8!4P_TAXirUY%1PFn$U70kjU>~Z@1BmkYk2i_ zeD+|U%#Mu4gTry^8g0@8>N$gVd(*qe;?S_xc;eW=6zr^{>bfve@PK#1i?dR~_55Q)Cy0SXz1MSxP`%tb=Fl_?@{b2`PDr^ZK3pMB(RFzHS!rj=Xz+1l2`Tu%AV2Q zWff{&ZP8X5b|WsXhR=9y?)mu9qlgyPvWsKMeY0BT!oEG*ex8rWIe6^~=+)Jn+?i&i zwGNjvi1O&h*_SvykIaI63P#%hp0qGI!8xDqL$vNu&!sG2=QB0KfY(^Bt& z>!~D`S%9oNz5vEqIheV%K?U9|!95<`*VC1qB+&yG*W*n_St{eyXy5Jeb1+IyQ{Q2< zp&>i1hQrVLT^BXoQ87cU8AWV|iqptq6wj{_-snPCyTb1=)|0h- ziD=G3Yfdi7$u!e_vOC5)Tu)!{a-KSS>n01k8BVMgBV{k^W+J4VymEZ;=09*Qz1p0_ zd;z`rgyeJn#*4f~@+7kJKq9H(q`Q#rUW;>EQB^vv;~h`GBdxLQV`9;_%L?vzjb!X* z#qzvD>$AEdGyUB~208IGbB1St-A>)fxIY+`vGRs^lk+)xf*W~l=RKNwC%xt5!X`ta zDmYpHIEQ2}#Q9fwy_^Lzl>`>!`&2fZvtzcwBYPKThqg!EecRhP(Xu7DS@m)m4Da-2 z&NR;a*9RyIH?wzhWhIh1n!T0Vs&gn(K{P3=fUC0SS?;#> z<`(j*!Y6IwmD<84R@8>Y zQg|oJKO?Of56t-^vGu2LvW^m2m-3SLv*S>D)?;vdE>4_A5`mFX+(l@qK+B(|Wf}1t z2;Q6SCf3Z1`BVI8P8Pcs^h?M;yIjr0kuTIgU+Lrne2$*>dU88aQPus_!dJ8?v>8=?& z{vNK`cc6`T-t=yAC%#Z3YlV{I(}oN(M)#=N{y_G<;IT_ht@STEfS=jv<5<^JI#vrM zndd*z(;7Ipgsug{L+L?wRLNS|-i1}nf=hPPnF~T@Nv|OFW7$Im8h;ayaEoV+`I4+1 z&xs*fF__)es=+_!h2})bzu@{#@Ly#iTTob?ynn7t)~ojwr}o6b)*|8K)N%<(6+G)g zCUa=s4eD*cYn=_p<7sbYwOp>n%6{#uQG9k`UxRpr>|ax^cx#tuHEBssRG*F;X(3yX z+5u{*McS9KoMei0RVFKm+R?4}zdB^`Hu?2rAJN7&C{63LgUphbmveBkt7d<><~u~4 z>p^%4q^sc8LH*NVk#!guKk0|Yfv&Pk%uOh7$@85j z+2bJl4rHwTIC5)8V&}o370A=P-Hw!QL2oB=yRYE%6YME#7L$dUT?KM>^?Kf@1&@`J z1~UKsl9+WdToZ?7C-aMOXPY+`sU?!Si6u;?1DP2Zi1seDE-UMs!m28a-tzgFE-%Ep z%{1&L{LIL8_7!P}KM$a9s^<^#-|w-^mbmjgK2`Cnv{pO&2qv9DjoR!+wj1UdkIzY7%h>ZY8hSbDyn(~nYiw_HpR@Vu z|Ikk;^@o%Bnhe;?KWCrhoVSv7)Y)tICpfVT&kkXoABht(KYcHWBr7|;n2L1xy#MKS z9Kt5EcUTGj-hM{UB8+{yU zW*lI(d$VBs-VeQ$XZV@yK69q0XTsxRZQ}1}QFaZ^cuyO$PY-f`EnPLS;56mOz_(Aagn#9X{_7b_IMbFIoAMs$5Uzr=fgonw=vOi%NuPA#W zDb!a}vIVk|{!DmgO?Y|M^az_sZ~V-n{9$67GL&V7dv;qs4oo8xTJ*mBOjh<+`>~$yJ#VhXeI3-3`LK$lIEm)&5A(a={dz%1##=hj z_4?k*_(OIpIE8#NauQ53Mw8Q#lBNF`zma;*!u2;?zs_&f(Y|EI{gFK6N|OFLpR$pk z$$ny2uz}6^G0L;fs946gk5HnPHede;Xx|O*8`s`f3x-VQ1A)mC?aQB7@z99w(dUW*qn{ z|CLiy&iCeYa?fh8j4fUV=lS5TqH!6C%v#q~YMqPMmFQlwb$h$Mhfn-UtT$UbvrwB? z7iYU@13J^gd9*L{;mLl?JYVA1PyMRJUhnsQTQ)k9A5Cv3Gek|v=4V=+^|`*$&BN>@U4Ms)#zj+?LlSGFL2!q<<(thKda2M<$R8n3T+Bp%V1&-z{=A5=R3I3V8`&#K{?#)DR zPFINRlV#i%)j7-iA9(W?n$o8m0KdNKTwBCJW5f>`8~={xedkxsdL97YU4?v_!Lu0* zpM+n=KXSTC&J}Ekx)afq6C*c?J6nVGAsK(~-Q?#a+8C*wJ*S9U8nA??@uVi1yaJ2& z=tX)pec+JjEg1r9l)o0l$uuCN8wYEj`+6t20#6y!%iQK{{2hQlnK`eGl8lVBWI@?k z?chQ}kwMmul~%Mi0_#usb`qX%hjjz@H;{f~9xQv>WcF$%2ruE;HF(+ump1v%9L}LE z_fwQ@1p7cXQ5}TDVA+E&WBe0}_CJ}Nt;i%hk>*U4k>q_b9XN%qolOc;(AB=MmyFgo z$ALO(YKro#agGn|1d@7d7RUklV9 zw|jWHmR2Q?^<{KsjHEY<9;3Fzn(4piJoWbM`zFs49k)^bQZmW@vCn(<9LZ%5s4{o6 z1Nnc_pJdX$=J`3O&Fn_T8Wy>}Qt8ajErfSH()}g+cWIfXpk=Gy9=+H^7CDt@t&)jAoASoj!Y+OI ziSDex(YfsNbHC1`qsNkQc2Ca=|Lkx7q%zqTGHVKk;bX?u{$dm&ducs|?vL?3ZQooq zX1Al*>RaS=EPtQUr_h7f*w928oSpJ!xLyL@jp{fHx3VWxvh8E_qrKY>XO^HddnD}E zI@Ms6+1EOA`Exz5P7ad_Pj(-?Du57v3Obdc{FF#3aE{cvgyuHK5e39!1v ztk*lVZz4-bp89{Wm1Ke>Pc5rjDv4hjD3?g!GX81^zjHH*HKF6*p)-A{WWKylH=kkM z+0m{%4avzv?eRn|l@>iKXdcF=)8XCj|6}XEqphs2{o$jr_ZlTCdQC*52vLHF6iG2& zgMbx8iW&tHL6F`=F&d4DfU!`d$i;$469~OoA%cLBCel z$3}Kn9#k$Tu!(JN%;r@f)5}O<=5LoOoyh4{q-QMrH|TvTj?E#p6>zq*-}pwv`+N?2 zle>)*`w%ao?3g9n$7ARkfAHkj$QfmSay`==e=vwTz-4|& ztj{Z9dNz!@_Tr|-~47qWQ2@vx%* zxsSRtyeGox8U0m5y~Kc&MgIj#J+IB_TB@K-_KPk+lP;*%i!3J3U?P#$p}}lDCW}XQ z_-klvNfuYQZN3(-CIojw4@p7_uit(%jzC(MR(KKr2-+8EJJG#<~xsR^X%I+VYd z+{GI(S;4E*lZnMn!)i1g~?Xm%+WJ)@IW3 z#M!sfe>+hAk}l=$hC%8lR(mTL-$kwDw=eDEZ2?!}zvKJf9NzQw79ZuBw5lxLz6{UA zyJWY197!6a{l&!yc)lkO!Z>`2k5gh9%8;q-(sU+ai|OeE_Tqd{zohj%kH2wl4BJQl zeoT(OK>Og580J>!lhL>Vdz@PnC*$uhBk^j`X7?)@QTl7~MI|pmgE89p4?KQGG6v#T zKR8S$9d%Ii0$l!(o!jf%ESg%Cm8%Q(+`u+Y$;-)FGqd1x;CUmuH7WMwj@QF$`cA}q zyz?uvq0br9Ucr+e`eUDtR^Sh(e9`0P;<0T&iLe@ zPsGPQ{2GC(?O~fNZH4bWv7`kf2oM&u?QeEs#68yn&;Ka`AR7w=jyCnNiM640?2 zV{gUJx71!u*QOhR#(E!5iP+IDdP=s-WI9Nuxx{U(_4^&Zj5bo0)-xB8&77adkNskw zvfeMlBED!Hn-z5;!hfUpi7_avYYIIfZJWq38>h7=Kczowf z{wO8?`u%U>o)e8*@(|AIDQAN7&A!JMpYBw8nAYOAdy7(O?IJLg@srQ_%}@3k;*%bj zN1#V-*k|4Kxwca}?{5HC>*87b-yeffZr|($l4Q=l)3M-w!z#Cwe~BK+qkJA1f?sHnoY?9A2C|nG zb|rl!GGGqA{JfxV?oP_Nc-H;f)z4n^!(?OT>}*@sAk z`jII4Mo}*wl{Jj_$(8p}onwvYUy{9}$Yc#zRxhM_pEd3DV$J&6RXj&-!eB^m{0jl}_Bit?d zU9>{2v*P~~y2pFHD?a7)BM}J4!#3@H>HBc+vad3a-o)M> zrS(6vckuySgZ7JQ!B;qutY68;@Ke^g6>NW`*YjEPL~_TM@g<|rN_AgB!{^OWGJh&d zBAz1Ai;I~-B6oYDTs`*kKK8LXz9ci}9oigI@ajqBv&%S})!c$V@sw+3tjJvJAn1G2 zoZQZs7}@14{T6R?2UFI`JxS$VWW6#woq0+;)4FK;1XjI`{^Ec5sky*XJtqhDXXu%H zo$GKo5x}|OFut3^wbjzIoq9MKJa;SqmD$8-nw;#j_n04)?pfM^ySLEjMa6FSET4{5 zFYBkwrXN*y3C=%>5?8XsZAj%QdPwfgw>4~f>S4XHH*4nH#GBoS zPiXOK<9kz3Cvu=A>p#d(?sLj&IyPuE*b{x3v&D(tYz9N(o~~oLD$>Nv@8iv%{hMX# zZPdd!RBfR41pHq~o)S0HUXK+?$W6GgOZj*Xj3Q0L)QH#U2-KWSlat|Tyz#Uf$|W!D zv-okkQR645x?j7W`aD2y6L2dt%AbMmJ>Rn~O(v~a=q38fm~sUub06tF-rT0vZoTZ& zMtqr*UHJjM^kMmsAs<; zYo5D&&K^!De90X(@p8XbTlbs2mL(&x&O@|%8E(C)WHJRLhu8I}G6DvhVBb*xH+a&9 zRIFhOlbdD}*e3I~nc!*i%4XkTqaNbDzMVYY1(K}Y?nUcG%4D9pT$$~le}hd)&gn%3 ztRL!Ow$^eMJjRHhsM?LRAt(H`tom+8iF)K9V{>kc=|p$3R~61(LTfW~Pqf0h>|kHd zv*M4}@@6%Xv;%a!Ps{AkI(ID`z}MaE{J)Do~H%iVlbOunU&o^~}~9BlO2Lt1lR z)QMzdDGbZw>fPRE{gMpReb|sbo+X+uH%GO{+e2}t2c8UoPxddmu?o4tE4PH@JY@nb z{)Pg{uHD3wiDq+Iot~vcyt`J@f~?w3~CkCMdIjUf%@% z@9<(cn6d(he@-b%@h7N|Y~^Qy_dS^9PPYr-IRiY2yy@dN5wTyAk~RMSh@9jO+Ws_t zq>_VBCFfsTyiKI&E8tjy3a_HsYXv28zFrp;=fLRi?8#d=xWMly5M{ksn|9oaf_>S; z%)oyR+RX4eqtiaP4aVOwX!0$YDP@qp!f0?Ueao#^kCBKoidlPSI8R571?W5t7mw4& zCn&Z{&*NZt7Hia65B;>%j%;)=%dV&ITi_P2knAfZ7vUJ#=N`e=$Y&$7+{-{So1`R` zVG;ZqgE}Y6rTv_T)r-eaPQ!BcIZ4@HqHm&;lLzk+HLftC=jN3y>aHY_zb7AS^s^Rj z*)dB?zt&IoE;f}9s+q4`WJd7-%@3A(c1G(WH&KZHNn`H zMc+w+=D}t!I3{T!nFX>M$;vH$5_91Z5B97Ce#MH_0MSGxJD^zZzu*`Yk8c+-GoNp9+RjOB*5$kt@mB>Myxpwovakeljzz-6On&#|l5<4K}+#+zeC zCUa8zZNZbZaL7ni2hB(O$qizEf>Z2UZ!I)q6&KOcLs9lkQnwQgbK07olgn)d*t7S$ zUW-qn)IG|!DE{X@;{ohlf2H@D-{ppk@>Wo5z}=R_*Q4!+;djQR?8a0u8(ReG>}l;( zt2h2{WJ`1B{3W0W7q_#B2XQBP)F%}3IZEyLrmj}1wr3U00zX6ZM4PlGx5pP&VG&dyTe|e>}U?@jjcY zrH>2BwfF2zI=Ef0@8ePS5*B%vlaKjYn*f$)!M~U-8_71*(OdkCvX}fd$~7b%OSRXs z7|C*%R$J6RpFJN8s=LtRZ6$KQ^X)Ky2;FXjL0|2Rpp$QtvHB!^H0mzb-i!7FCW1Em zvzLHzn9qCB{d%xWHVVdXqB0IO_B3~(C*o+GvLlTpPr+=c->Pb5FQAH1AU6u+ma7>=9-o0#EW&D15dX2Q>`L<5KSr(xvE<28 zGn6cJM(^2dN;U6tms?{0=HhmJ_I3yke~rqI6s_-LFLEmQ_LMWK-w;X`gHZf$J&jBYLhgENK)@A*Z)mc_NiMlPVv$EdL8Fls& z4$(_?8ip0?r!&ZGPFrh#Uoxww)G2upWR-qi@jWNES)*;FG4adKUPuMC;sJ5E`Cxag zmD-5ELst0NHAx(M*4MXy`bfP0wOVBgx|E=Q__DCklHARee1Um>Czz9`Ah8<5aOgZ*xkx`-@1rQ8Rk z=+J`yskO48N*S2`>VKg*pS`=Cb;_Nx7t;TI=-mxOlfW{Ly%`JEa(bGCLa!AzX%I-S zWV4cQ=rvsU*6!1DD0dv4%qqXSS-~DWU%@hFR&=ZWE1_OHwXO&8*~WfN1p8>V}F2iylI!QU;og<-^oJm zpGmf)f0}D9K-Zkbu3|wFH=H=tJ*;@<{X5Zfv^M`z$Vg`8OHg7SDPPRGB?Eb42;1u| z)V|c`rEK01^Ox+6C(ltIFbqbAWCqW@r;)X@_46+hyMv8e2mgCvP|C4+rV^(ZpT~Gw zPwnAk^#c8Wt?kT~CZS|kc6u%SNc31g&!^GPdL*PRT>j45<_xZ>`j7d|&DzP8oD5^> zxvajDb$0^T;zjvWd>cuNcNz0?m)JKjNaW$!=(ZWRYoXWcS}up0D?rvp+c|M;398&W zvY%|{e0L7J_qdU%M$zA|LHx45zg8|eRC6QnprZf(QmQeS@3J;Y+}9QAt_MjXjqf9g z590q0uqKz!KJ-1HR8Q18mQ+!h@ha+PPSgWGyOWqNaOM#Be#pA!%w`H29;?r}S>QIhIS(C=$FXkON|uQ+@F+#a_9lOkp{vxL=uJ1RmsRp4TpB=nGg}>t zyVKF4FWE`9pWJ}g4{o#R^D3kHV@8;)^FCAepCF8PYA5Ztz=QbH-faBZi1N#n%05R{ zAM<_7&PyUV%j)|~<7whf`fIN&>V1N$XX9J_Lejg^)y=38d3z2t(8Fmsm)o%;iJ6P{*FtW%%si+lCj?Lsd_dD-I<4$1Q530;YvL>tHO*~$H=j}TDeiYS4unox@ zHr3~fW+H>Nn_aP-l-3|Oe}MZ}sIU#4p7uR!>`UO<&ua5}li9t9p6UBwmDy5%G#W*h_UNS(eH)}sBUJgWuvV)0FVNE6R&nv-iWF9*Rqx>UO!9D+cU#!H#zvp4qOt`-p; z`ofd)VED3FJ>JZ6#WTlh%7~tPM{ViJ`$|pmEpghH>S2T4V%=xbvCQ`ytJBW+yV>`D z;%4UdQ&4rezOqWV3{DHlc5aEwnm_l>CGI(UcG=l`5G=dFaSG0Sfudbmlg;YR0((6p z(v5hT*>K_lvTwHuKi|fytWuMmD>MGbjP;S`;q)Z80!Q0+;KmGaJ*%ghT1qDRWXqcY zuhF1Md`nI>FNeidBr`E8L-hFtYD9CgW?B#b#MM4WX670Vvu^CF%slP1C_cxlI5%k2 z^>ZDW>WACe4_XDLXlpr=5L&-P%9AHPw`(R#^c;G13SGDg24}|{evfFA!F>aF@op1O=h$&%klpE{H@KJ}BT@rPV}mChDv^jaUhSbK!fRxzBO9lhyVJwkf&&gHQG|_84=r8`Y2; zBx}Ve7__G)1C%>Ujoja!`|*y~>X&H$9cZ5>b=h-HUjDM^u$pyNHe z^)ErUP_MFH;xF`x_xF+iEvPq24_ipW7#yevqeQ1H*GgpcQZRo+UYC-A4WxGn_}1vB z6Us~>`Aa}P0yI~U;NO#j+!K5yxoAf+AJAJLtu<#q`k{OsQZ@|+V|}}geoZyUxf-Vu z!#kF2Up-2!)oWI#Ekdx z#rlSwX+etSXm<~+6D65jQU0Z`E789mNN$FGWtiqpo@DyS2sz&DJ+T?NfpQXlC6mvS zY|DJK{~Kzqf>|=#B}?&k*tWMOUXIi8*2$d$gF*N+y~=uRcVG^C5D3SSrOW9+J92O- z$*lsH0pR{z59{ecXZ0>1KaZpItz;;BYl#j%6JImxw$)xDeJ&tP*;T(n{fhMaXJGj| zyVaXT+M`tB1E+$r3Funsvklp8NgDoS4V;*!W7HUo!aNeGV`$-xgTgHdsG__6MvAblmATvR$D71 zGeM%$Vo46_XS-)#pzTIFc{ey47aU6t-}sWwRcnyaS16r(t{anpMCd&V)BCMBvI{d8 z1s1^iH1a(V^^@NL z!Vr{S0s38Jr6b(C(8bLpBHnTLq0tuO_TxC3ldiK-DEXRN;ctIjDCH#_0OHqhp`)iw z@nZ+5qj!^GcA|R8a=SnqiMd&z_tCJ7uXJlNv`dQzNMJ2=`G6#KS9+V(=c}~nLRgk^ zzsb>-><6VCjM~24;5R?XHINl@ zAnu^GANEx;+wMuuOQlNZ%s=>SoIE%IoXSH%}lk@f(o|GXghq5N8nVY4Jths9_QQz+^IQ)iQjwKt3 zQ|PI7H?5wo%;98YH@cM~%b(JB_Dtf@k=XlcsPj5WT0+yV1ku?Auf8-AXKy4lI**3t zwzlMW$gNAg)b9iGWZeG}7Inx~b`u`-IUXDF)Chdp<;mIQLuBGu7UdYQCwIZkEKo8u zY$M(OW3)I;E02?)W6|XB0=LYDhv_}LF*(D3ylC;SM*h$!dr&1w3SCNlrN8`?{1lGy zDfN26I=&8hlf9x6-}CJss5T6xU-vm#Dk2B*hY2^2G?MJ|uC&us;_o2ZaX)JH(8d!i z%i(Bs8q4`BdYXNdWUb7OS66g-1Dx?CNvrAi&wkT$A}jtx|Kb<=4DC(~K;|}yMyZeA zW36W@(!A{2o=K-}Bs1sG{Q7EFCiVU3+!~hZ@BUt-o!<0siV?UAn>UvQ9s;^}erK+| z6E`ceLx-_>^JwlBv~#vMqt!pmldOfVr}MAS;zYLPp5$F>RVO>QvK}?*c|3Bndy)vz z#0kvM+s*37TOb*Za_8%KBhf75VIm3Jnb&Vt?{+fsWA-y6c5b-IjV{SRmYiCNvfoB- z0&TM7j0IUn=4)uj3UryFRwwc`3Vg}J{53mQ1)UnR;5mo+k8$P|k~WsziB*fg`l)31 zNmND@yT5scA){6J}n>;~N zt)7lU+Y$b5Hw$eJ#w{Rft({h&s;1S^+D~l9em^%Le>03b@vO=!^((mC z2>%*dKNat1m}PV~XWR&;87#xSM&{(wjn8vD4Xe?dh9KxocIL6+Bb6TDw>yk;Cr)nv zO74||Fi6z?F8#0aJCmi_K|UkV^I?%anGqx`H`zAO;&j}LeeaK^?~sGxBx{Hs%7Jn> zs5khD$3;JOA|Cn6wD=2poztf$wVxBWQR*ZgTjK4rhxW17tHJzBdkz0qGdpMV*vj|| zWF5B{y^`xYLMS8%@zojyyS@y3WBZ!#I)WsWt)+IB`UYtI}$yC+>h z5%2!^!b~E`)k$TjJXL!w;I^E#dl9wj!)y~g>(I^ZB=Ab_r_qcTNqDm1FM{*qG^7Px zN*=0H&~7FRotTV_OSzjMca>#c{4q>A!nG?*CK%Dbz>j~BgXHy!$3n6q=Y;MtB@#K4 zOb?w<@kCOW>~A;XY<;aPMyY;yF_wI7#?>xpT8BIzt+$mh&dnBMQLch^pN2{NpX1w* z%=j}|z-gr8q+(4u6m-dX_5>ZvedzLQ9S2#4y)0gpkSz)J=18f@}T!03@f@5y7${ycxWh;ZKEjy7s_-#F(!^&h= zxf^}>4ejVl2Xa%#&7S4vvXR;j@16w55R$foM*I=YTZ3&iEQjFprQXHIe+gY}LwD-K z@+J5mgIaBHJUbc5^d7&^dgOODzO6AY7>~OLQ2RZ0=T4USEmVx>L9)%gjc+;a9A8M< zW)uyla)aTW#fqb;dLL>d*+jDIQPJp`Q}5)u$I@ia`1`zJ|vGnAi# zCT&Q=o1P5B(brKpo-FM^HwGP+X(^uZ;|n|7PHX#>Pfmg_NL+43xQd)lQF0e~%m_WQ z;Cmul|ANYu;4u_sp4EOkaC~R3AHR)CG-56Iy5mct@S2e2xu{&Bm|4W{>_}AK4E6)w zYzFCEFvNF%9Qk+~>|Ko+C7z8VVL1t%!Pg#QZB@deHS;wNDQAk?iIkS45;B8fNh(<;E z24Gl$pG#<3V|dL1cf6ApYQ2p&N27T+V{CWW?#AP0AR6j#a=^w*va2`I>v)pnwzXBT zzShj*QLy~j?-YA?`^_wlWBW#$Lk!Z|3bVPaxHr&`%w6MYatYk;FjB;qYZ*KAmRdbP z5ih5%v@EAgOO%PX*mgggLH8rDjn+zkl9w?eo_g6iZ149EtkY0D8bnrlLi!Vo1Q11W%4_m1CQjGyO#WI^(}Sk()UC> zwbgewWvXc*d$=o5X9?*V?(>Bp%4yPOe|O>ERd8qr+S#}gPn|h_uK-<5wL6j49ypxc zl3lF9La>a|VjZ-a!2X`7$J}8zm88pSYW9?Ap?mB?<>?Pz;8`A8n$ zCaBfjyXn`~M5@>_epvQzIUbnRO+D_-!=lCTo%(X=|$yDSK)0xwr;Q zv3pHO#PuxO38;J)a628|>Gi z&NE=USpV^UZbs^EAmK0iG*XLg+0;fT_MY;W;Q#69-2g3{nk}tmv1hSOU*mH;^&SN4 zKx?A8>d&Azxrw-;w@Z!ai}m&=lGPl3*)2(AZ5MQ?LoQp8!jnNc7VWB|&}49DuX6~T zrlaXVG7&G!+8eBS&#O~znV-!C~K8}O= z3^S(O=rc(x57N4dII{%T>+3OoqS=eDQSkU&xHlmQZ{yDaR7mEf_^K||Le3oG>6Q^c zw+zm+I{q1p)QtYzP8zabQCctl-~Pi7-;>iIPiyO^*Y~Azb9`dsN0^f(7ew~0uG30w zavy*9MRX^=6(<{g&bR*Ds#JD5N_QF_iF3dA?T58-si$~2WOZGN?jNuGiRzZt(Ai!2 zxLCzzAENa0P4T_88nu;;e?f9R9EyK6NNWj?>{KKZ;m=85d_l6#POgj}+PK)KWa9tQ z|DMIVIoaRxwgkm&_PPvdI!wI>VEadHltpP5r~sbo%kB5B*MuUFaY@8Fhu3xix zwvfahk==`6FoG1Ng&%s!=kW0;zdef2i{_$6@rU4hxpO*srvAwR!&v^#480y9pkXS%8eX z!`Z1=>6)xjT{YtKcB`@@Q938|^|aU#e2=5x^<3)?2v zgE_yC7xqf?makZc_MvX2kc4a?_ zAFWC@ThNXb=$~kSDZc%M?P~_2<6zLmDDpNOlfUglG`bL{rlCqpqhmFtdT2E{?`EKO z;=f;DZ7;RT`DroMZP(rT6$NAHbsCH|)W2LB7nOkyK4N2!AAQ&^5W$wPZ(5`kDl z$>eGsYtH*8@;n`Fb9&kVj8AGgcaG%7fy~g8b9}PavJaIE^odx!j6F!S`&^LLH>PZc zSMp>AzNd^&6XDSbW|`fNAltbuDE(DsD=KT@HTcYC$+G_vuZ-haoW!BTJ2!bNCK*** zsTV)V}hlsqC$!1^i*?+5WfxMY5?Lw|`{iKjwxzuiFwt}OPehU4`Tu$0B)s%r1n zOMjX(4ld(JNA5SPr^lrQg;yHUlPlzFeD3JCCtPlY#~OWg((=u@;=XJ@P) zZd3r>W+ld`^B}vsL=TBe2o&+tIs-40n_~vpJ1h4Pxq8C$T6q6I@XyX?;)APcCsBO^ zwU(?O$?^9#jE_L4GfCRxIJb{(RMtc3cJh&)#K&rZveVRkzu^2C`X5Rj5;6M^^ckqe zX0*(05UX+E3cqPDyE|8+LnZbno)vw(SwZp=9g~}@PatD^@pJ%Nl>D+s!1NgqT?y94 zWN$W3CznHf@ou(zGt7K!2RWz;o^U085s5y^oiHfzBCV493h;qGU~sf`7O@2Xi(U)kxn zn`D&*>vVi5PqKy@dFLwE#^-o4C&s$EGLe!VYB!;yS$R%Te-1s|gU@ZrKu+IsLqVc~ zwwV8Ys77wz8&K%V3*Prbo$R3{D&kpoGucTyqs3fSIF#5#iW57tK|6_=OO$ycQ?nPb z$h$}3^QLmi_VE{5kkkCEf3y1h#;13ckN?3}_>^dlz2v0=t6J5!`%yhNA|&rZ?&-+w zA7k+*-Vn)w8_%fL#+cFMWGBg4Q|NPjwrU@oGM*%^Xe>Coc#?JJztBIK0ngQbyew*= zNIWv)MYFDujepb9b+9SC(Gbtg}r?cp1H$jKFUlbk2%R4Ur>D&nzTp9+(x<>=O)qU zUFg!?SUr^0c}L$=j!Q3y^hpd@>k`i+RJgV3q5^FzY{?{ z)X!D$PaIleQzoFynW(>u=2R^#Y4+@wnA(N-xI2pe528i=K|H3=o*$8^MIcNLmSkoMG*8gm z38*-QZ2y~tWxkSAuG_&gNA3NjIX6(|mZvj}3Z>lBi70u?o12RGJqeh=DP&1hpaw1aB-|PwxDJ=CV)-}!X<8Nq^8RRaD zv#__x!a1I;{V%kMwA{$DEoQGq=_`JPOA7ubo7FT@(a8LI6M84VMD_+QtMir-eXmx# zpyjhDkR8-}&>_3+IZdfTnq#%cs2~59+%S15+O7m&#>)9HYU@c|<$j{KsVl1DruP?i=OZhWVaDB?NJ9%WJdeGdi$`C=<7D+Zvp%z6^d^}90gnpgus6v{Ow3C1 zI}%)p<6aJ)++@@Pyf35d$E2ey ztIc!KDEZv}K#G40-w)_P?ib9h5{b?@N^5oX(#zQSSK1Ljfa6*7(%#1*=$ze&;owWQ zf!aks$-r6i5vXmgc)0gjk;Wq+KUvw9L*ZdcePtCJnBwnqyXVQRQ2PBp|AKgdoLAH= z`5xr^oyGU~avWN4K3*o%JczX!=wvK{=DrTtjDW%O9v-_njo8J`~Xn>~2JXA zNMU-77e%r^9!{c?DPasvsIElLuM)wNEI*;h=i2Ov=A~>P@z}`S&Q-OU2$v)E+zehf zfT=&RCc_!G zYJmL;a{HLS>x>%NS$zexwamA>fc6oe2D2>5M$iake=o-8ku3Iw?BV02sx3?UDCje? zCwe-ueettByMX*_BikUByRmV0r}1^J8dqqo3R^pX9R69~iOU=hvJu9UjruziP8q9n zPw*dT+FH-!Z#Wi2Q&Hq<81K?YdpZ$LE@TI{`*t@tTY&5~JWPI%WTeepD7VXf$Bxx9 zN@c%)udy%mY>C5Pu|LTv9xtAs8-FJFUkyAhaHcu9pERSGK}V9k{#!Ij%*iD5&pt&< z5+7gGQ{giXB+0qdMftn&DElFwk>upSf1iCyM#51>l==9y1ivPs!vOqvl|?_7t~#qU zqL;h~l6@`fnU|IOJ5Ei8?G2>oa_#g6U;K`DlcK}|#0%xOa9IW0(@0zPQK}Xz<*$rs zd(kZ(HFapf94$qzx3J%vN#kGfwt?0bY9sLji3HAljq`AEnG#LVCYhKMi?9iWTBB-i zf4$y}JhQ_##;asL%BlWAt+s+!Usx|8OGEK!DQUc%CS->vxkrDC>L2)?_>Wh}Ml1dF z)?4m%Oy<9lT1(lCi9_^~^?&?_>Y-Fd!yVvWtffSkZztW!F_Lv;4N&gE*MsCE>;9}< zI^usapX5Yil|D{D!Q}s6pvSr9Ey?0}D!IG?6qo4nK9o!Lt9V*XCzaK}G7}H(V|A_q z!DwwvrpN73dc9J~#4tkZyUF85uy=%OTW!AVeY|dRUuIu$)}zTw;SxXZ-2S@?&)b0b z09j1t)@04=p#PoP9Hysr^mvZGa;tg#XH+91Fd?RUXRjVL;M}5<#kpJ$y-?8oHzS9Io*w9orM}_qRdF8pYt@a zhP~8FR>n1Cpq!C5^iB*(G9^5&=Q+l=U3wYCKEJEi>;Naz=tw07!uLxMW$&e~b-+TL zsfG7Vv=N``SK$@U?8uG zBS~{3K=LOH)&6$tl*TysT0y;6*p%!nC+@4Q{*zVbZV={HqC_N?^FBEx#(+EH(h}CO zD~jFVb6MXuvPLzPN)FpZHk^v*t@Ze&C+o><<}uf!bQf(T8&IS(YoiS;??mHITbzkh zZucx_zRB;u#osA7aW*ZCZOdFZzMl2aJYEEkp+{t@EgFr`+dUv10jhX%#LgZ=M(TsE zwNiBpNl0YPli<9X4eAfVzT_|&AL8Sl+jZgxRLb;IOA9;U9nY6l=rE9;e1>cDwSR>E zlkp+<0GzJgeR$VjJCh2mhT>>O$qR7jpcP2&hsX{{SGXh!e-(>!I*cZhl^ekRbNW6Q z1j$(P1^AD~oyy9uDx`BH&Yn;DZ)P3drKh>kF#cEBnMp30c!ec$YC3$gXV{!9#)`%x zY%Hs>)=&IH9#k%oWaGTeT{=gi%2e2A*KoGJvd5nMh;3jsg_Sy&EW|4~9z@wGnE|GA zQE@q3JFB_Fld7oD88sd(<}{7q_7NI9r0<(>c`hAEJk?4Tr!r~EZIQXxH|GNJZEuI3 zD{#6KsN=196In>^kYr@J6h$7=S8i_3Y$)sDk*GQtpMC=7V_`PeYUMt-jKP_*pvp;F zbM5s3`64|hvui~%)JLDaz5fktKcSPWS%X9!Bv)@vr}v|OKWp&pmnJ*G4xCuYE^Yx~ zV)Qa*jZ&f^+Y`y0ZB)wcPu9E5KcW796uwH1CVl^#gylksyj ze&p`1`smjJSJvXfCcl?zJsFzEfT63ld#UpP4qj(GObqo(D}uXOpjvc4*?ShiU=o?1 zNM>@%z5tfTfqFTbB!=xXx|ZniPNX5b4TotZ*&b$i{}*o`Mz!owB%^1tQ~sG$Jfp<% zdXC>-RXEJRizkZS#`@k62FZJr-R3t?Wdhz^ONJBK(*`vU0bw!=e@zw_us9c?R2N)5 z-nhEilf@`~jNeg4laVY(d}6vdL(7c?x&0$?&MU#%3U9}=Yn!zhKa1=)CkD8-HtVR@ z*C=!>*?f%lUkCDYa3`nhU;CRm-J33n}^l=MKVt5xtuYi_Df1 zomJm>b-BKqla2TbRx^XW(^awxwv8NMACCREO`EWF~vz z_1U)AfWLty>y3CYzl$Hk?T^hO-DSw=v|`oXi)}fY4%`XK#k3}?`^0mc;d5=P$P;~z zPtu8r=f4WDh|_9ukW7?v%)*?`_H+-GkXkK z@h8_tR?VfF@eHVgCU^Rt-I2t*l-?b#wZn^2hp1hOO-aAm+1Nq5f2g1Q68U`6|30~e z#N|m~$!{q;YF5bEV>uE}%J}<6Q6_so**Q6~fS@XQ$oE9MtRw5cE!w`__v{aCX4_7r zQNKp5|FAoUl8RD>j@m_?tz_cL0?*L!#iFgATD~6?Wyo(!Blm@#p5eEvCnw;>x!&h~ zmfSA+I=fsC?D2uQNQsZWr)D{lkqng8NPKP7%Po%Q(T!8ltwfh)IP!oxiB_Kv$NwTh ze?_={P$=mpR4o0C8JM#&|-6Qi+A!=uSeOAYRotHIEwZxQ1;$JyL01rPA{)V=O2UW zUpTqXr+fWegCoyqc?L>cLiak5gSq}bKm$9`_7}}8wyW7rf4Lj7FFq%u=2A3UY8{X> znE3HU6q>@yRq>0R=X zpRA2!N6(t2le$-vmb&P12dd_-;oO(A*Z-=l{!>P}jci1+;59QhxI$}9zo$5%X6WQ|_>N^fnvA@MdjCP$M`&PHLE+ppc(J6A zfZXBnbM3T)V|V;d*5PdGQ!XL5Vo~66BSdW~>(spsbxsGzc#zd)>!+f? zcer*NjwPSVw|Zy}u19e^`wk;X>|h*ji89X{m$M&`lj$qyN5+{x1&7bm>rhn7E#8f_ z8mi7Evwh$h|KHr?nOn!tH1dr%hIV1icGBZ%1*g(a7o$h^>Vo+^lzfO>Ez(+UdrZ7h zP7DXoubpb_q$|}>CE2$#_rDLM72z6BrNq5nua9VF`do%X$?-8o@AXJqMRVx-`g>d} zIeGcWSUSkFNa;q>Rnw=&=2Qo~%iW&OlZuA=T%zxG%4`Dt72sM+Vv=n!H)?GG-*R%B zmH0o*hb9#EVmukjUQaW$PhO46BqjIC*GAvuD!EBp$pMq`w+lM0LHjS+r91Fp7W}%S z)mOfsfELN!J4$bJVE6KL_??JXyT8d5G58mX2??bZ7YxVcaXU>|v@;M;* zz&s$HU(@_1+B>n~Z~M)-H$zK}K~UD;-2QODtm_0NJE>b4mHLvVI%F=Vp~>9w8QssR zTkI{) z8SmI?;IXEe;-Y@+nO z)#89AcjY%~9e$;+ayUDXK63ycUmfX?VZ%tmqY2><6LE*l}(2e+<5xf;he}yI) ziJk_3_I1DUe;V3N(Z+}R9;oIjZ>}rkrl!$x9!k#g_G{25L)BQ;F?pI3ah98AXQ0A+ z@Jjx{{dGnd(WCvH@w|f4vq)aXoo#5bw9tuo&d)-DonT1ri1*PUch@9m@(?&D*Hrwc zzJ>GE=>8B&^g_*vFztamr4b?fR`poqf%?201Y_BXCivN(Y&O8_?5<5$vmAUr)9U4< zI?9@5s;BWf^T<**)WJR{pK#)n6K56Aga<(v-_Eni+X>p=1JXWAUNjq=rwHi%m zYbAUYc<(`hJ|LWrw%Lv7>QgvXmW|H7UTo1%NkDwy+JY((2f5>Oh!OK=AX_z-1Uf$Nv-*^kk?ty=Lt zUBPNc3zy-@BjD|eUYXa$Lm~G8cZPZNu&a8BscuU%M;cd>J?-Dl%q2-j z`TiWZTUx_3_4}H&SYiWnBh*igLl>B7B-{4up0)N{#d_&?`l*Y)H-liQ7US_$oh!GJ8Gjx>UPAHdc0N8_N^hfQ6<`LaU-vZLtRoS!fBlinsUeK@5t>lFd z-^OX}H*_dAAvu))t=0JT-s8<2y0RH}5+S+`4(FPW{tsJ`9Apj9;C)=JWt51YdQJdl zp-gto5_z)-L^qMh>(M9Kv@g~|{E;rw&jz#{q_1gY;XlU2ra15@$PX)4#krL-c@55^ zSEr#`)~KJ5+StRaiD!bUJ2<}fBpF1r)_%44e-X(~#>_|XB0CPlJUz<0KP!_Mn5=x` zt5IE;k|tR9AAD5 z!u`cgNcK^VfkCNE?o&&@rTahALdgr^TrlNZ=_kFHyiCs5Vlqe6{@-`;z8L@g9zsgz zZ2gCNWze~+Cohs~kzU5fJ z*VR7+ZdoDymi(Tpbo>`4(vlNML)!cR-Ja3g%lMb?XTW_qIl9pQL>5Kfcaz;@b8AJS z4Iu6sX=P_FYPlb}R6!{l9qU4CcIo|36@o-ImghOZHK6hfB^5 zhU3+FFwX8rB)7JA*;A=r$VM5uRoYdFEo7uvCEN^a$8t4Dz_z+*7c+|&Nc&0vYrMnlf0f~Q2{ne0i zu7+e5YC|rY)9S1zj;3dO*znA0&tMIcYr7%tz5&<1;Mwna4Ynn>!f!TWwemLi2xjM| zssA^6pEI3)AkE4sH%;urftjRah?-TkIn|gtQ%jjA9%M_BRVw!+EJDq>>}~E#?!y94 zF_X!u_$qd7I7yzV=1E{!h)=o4EP2WvN7>e7Z9_4~Xs*YBa7hNZ^~K1#ij;IhjW$~A z!#34Jw>`dZ!mmUEwX<_q7Crxj-lwX!03G6~@jMIrfKti1*#X}ZpFR}U<>jo5z2Tm)!+y=75 zhv&YutjBYbm3tUp1ob3y=`V3T=gOBBxF_cQ(t>~gG_G&M@yv`e64xQ+4M|P^0{h&3 z{V&+oHIJ^K#Zu;qK!BrcPsxMyzhlUDGoYuB-PdY0uNfi zHD`3ANkVSnn}>Q=lc*28ugKCRYw0YbVZ26@Z{Tv+y`$u_VB1Uv65EnH^AdkM9ks*9 z7r{PF4=Z4j6+tp!byf0X9Li0lx$SYZ8NqXCQ48dw^zeScfp~q*F~hnHJu*w4ul`i+ zB)?4^c$|s~<4Hpowc{Bz20fE8>N31q5 z#OKjqNxVST{qe@1tjtz(v-pGefLkp5(d^?qFeZ1-zhS?J^D5Y`#j}-oG*au)>knb|1=;9}8oRV~6`9G+5Nk=oKlQe;fZ!<{PuX}R zZw5^(W9MW&k0|D?S+#ctRkHE4^fL;ze*u>T`1zdP%7gxMz2{b$Dx^L2vQK&!*c$uR zgndqg-)Q=q+!wzDSzq=fbeT=RFHw3j4*bpYXTf?h8*&v2Cl7BExF;v^Qd~Hm)_p|l z;<2y_kE+-+Os1ddsFWy&+~1ux!t>kp`vAVi3o<9tP8^Nsxl{LcQqzZR+yV0rG%p@Q z&(XNqxLuRv#@{Sa61i1&J;>r`&=#~ef-H9$Ohlv0yeY|gPcX!P;tt<0HUEe&%2iJtwA?gW;|`Ycko zNv*kRe8Q?dqs4esW;Zd>Pb+bI%J+6U-ciHZ$9~#cr|d64To$BnqRb*%l#G&z`^u^} zBYWwjr437R75kIBj1z&`6%MgOlR&eA75xcai-+M#P_EMdcW9scKfXb~_-ExLV6oZT zRa(AGTZu;flpM}3<{3>%>kFjyaa_2IWK{?4^X3w*V3EDW#_E4ZBA>vSGYZKYhKiZR zbfKv);phpZH0M>>JNY>(BxiC?`cAQwLuSfCjO0;c)%87nn zTG;C3Sp7X|&N;^f^ogh6ukm9 zxrrat(=eF9F0VzAk74zazm4hsTWr#;YE{+mHd0$&Yb$*|=x;3LNVJ(nYU{&gF3c_` z3(3v7oUA+!rm0$e9o^#zTL)aDw7*2ptKh#JH{U1g9bx>9nrp0_9?(w4zr<;7aKbyt z=Ti%7n>~}?Xz_EBogDw&aPCH2Zff41^Mw&+_zTG8DeAwkr8n8Y`Pwwh5$$No zVw6XxD`5Gl((!elOh@7$S<48Ly8zBJvcwks)$A;hN}rOfbzr)V{?tX`H>~4Ffj`-Z z6Yt*L8nGS=^cLC=Wx2!d@jBa$yRV{F9@ zGD&T2rI_2A6W92PzZE>~kB-?3drZsEq0?rT^#ziec;!0y)SaZ(*WUa1mbmtYII-LB zXf?A7*i@Nq1@G>lbE{}&HKn3iT~OjXnv?8)myzf@?Nj7-kL1|+C(Y=L2E)n7@BN=) z-E@O-|2*`2s#xKk?%Q~uvTu;RlRN#-Px5f&=T-8aduvlC-*fLr$@io5sg&U%_m7nP zLE@{BY{JRn@f14z$lrK|#LwjoZIt#`?jtMz^7(H?n`t$Bg(vtHZ5(LFGa-S{mFa;O>eN;PYcuI0*p5ue_S!2w+>?uG z-y!gQ%bRdy1PF2~!r`7~f9Z2=#TV#@Ot2-)^X3gt^Ya5WvX4^x`)B{}PE)8{+GEN4 z8+|_P|EJ9p^cY{1b5LiJ&m|uHi0nN_=5jaACm`DGQ=}u*{f%{Xq~mo@J}PRQL=VOk zHX|#={yyzT=Pzi?ha@f8=Bm-tQ#?;j>*P6qpS8T6CS;AA41K>>rZ26!M1MnQP^Cih zZ(~*d1K&ApcQSc@O+Qw^doc=IqdYF!+3_tIh?;8Q3{tqzlRqezIbB)yXuSDA;<+Ey zUuC5hf-Lt!+(L2_x!jNRJ&)ec8-m+*K}d$}2-5$jUbPsW9;G?LBg z7p!%;VnoQ=I6nXB>mAf>iyrTgj_jzECtZozxtwHgA&rg6=8IZNwy?$M{gJk|>peae zTXA);*4LBNZTij)xVbB%pONfgc5=HV zel{605`TX-sM~Al4P*3w^p}&LNhmhY|LLTn2|Jn`5`XpND!P%F_V_p~!+}KTKd0r~ zcbc2`67O>povdUAlU<(!Ai9a%o?S?4PSSG6$lWkn>Sq_|mXVX(-xD96c%A0dtDN3? z;^z*vlTG(|tzKw$eH^IAT4VptyWE*S6U9LZ%e_i^tab*1gYw@jGp7Aj7%0X*Av@o_V8I7UFvgt;c%L!}}9i zmqb3zz}?YWUe8{PM1xjn5#O?8H@cskhezYUI|Q_I$VPWkon5*k;M4;@%fdM;qTF#j zk#uZ=VRm@Cs}o60`CcS>STd%#rMzkP*Hj6&TP(Jgr=;yuuqq(4Fjk_EAM zAw!pOThT66LxFzH8svW9vN9$A^R1K}4mVLcd= zx34d35+T&T7)QrxWxaN%f%0_k5(BaeoYmlVDVzf0dY zfMpS?CI4gceY8fGdSK1^{~OQuT`A$YUeh@JEIctBNT zPmZJ8L;TMyJR?RK{Uic0pQo73KaIaVl#bP$f)`CtWF|}do~L!$+0mZHODmaYbLzd; z`%gX14a!qcb2@qYDg2tERyAYaM9__8skWoS22kAv%ejSYpF>vTOV^#;cB07(P-GDt z;`y+E#W{iOuY~LBfgfbZSy0OcN)>=v;FZS#LW6X2-GLnVM z{nh7W-ul{MK$%FWf5wTwNCnsFH>|Xex4P;`30yhM!Ae* z(Yv;4&r^SyRxc$v^HDZYl*t_SARBf$YV{xk@rawMy@x=boLmdF@Gn$L1WnpmM-J~o z^W5wm{v{hjM$kbh(B1kbx6;%C{Yd2|kfC#NH}}a;1xq*F_#=5qrn$+qccNC#u?M_W zAGZ|aWG(&W*6tmj*(p3E7ca$8b1S-Fe6Z=wCKQSC;no8+`xR%l4}oIk^{_ppvmgt*n{686$o`>@cC9A8s7~T#1#3OAAO8rGYtI>D7(dAg}=RS|@ zki3XP+0Ph*-&tKgV-%|m;*G4#1pN*mU){|*W@#%vX)6mmnJDQ%pPVSI@T`j7m(ZDM zTG*geXI!g*vY}%Wedb>4zrd@%+R6L$8qS?SFMFbIe=WD9X%Eyn+K8SJd_Reb%v2&* zGnLjCC8zlPyhojJ+i~Wd;=>Go{ zrIWj(RQ@Bh$-c$kNX(-JefIwEyTnu+ulB`oijU85yiSWBq2di}z<+S~P&8jG&gxRlWjHUKF5G>tH13Fm`br9 z@qoC{%p$kSoW_oQf+{87m-e7oS*(hlL(^W~or2e=gJhkVUiyh2M{a;QA0MB=saN5+ z6)k$9{Rp4WV*%bM${m8<=dnC*gW_Uv#p|OCx+Jsb%UaA`zVT(rt+Us7^D94zz3jxQ zWG^n!SBdcWnRoxj-8b0TlRe8mOzuZ1?fRTk(C15->_yMyv)b=nH?+KlWDhg;TuZBZ zdjC1uSge(D`b|#0$taM0lWYC$LO<4^@i}12O}%fZSMn|DUcCxaUbf~s^`E-=uZUrAt!{5@4 z#<|l=V~nw_(XfvY}b|=X~rW+H|A7J8Anx_G1C6B)i1l%v`=BRGudz|{NYgguTe8gMS=*ew zH}JG39X*;wO5XpO^dK1}m!f`h9ArOyI;<}7w5cbRwErg(SqFtT_&mATbBe7S>o?xH z(?J>kfQ)$qJUv6nufQHpjzlFa#?@p3o$7O6+&zc_tw}|8;opK=GMy%COjfM7f`3;r zPu^?=GlY)SS2t1SH!1%!dh!hUt%{nZ7^b^HFhwuf2^wam)rnk8AT`TDm37sVT95y8 zrDC;P!E7>nX?@Z2WpGtPi<3|!krxY#UUOSc_M2K2V`5eFfq&rrNE(nc#oVhHFQs?} z=eF6biQ}ig85O4B!6-PqN6TA-eV(45MXlTvkkPpYD;X)g5~WYJgIC^Yke!nOo-yAZgobEdD2pbG>$7 zF^Zgtk{!rIPZ)m%@0>^cgcSW4UB=Lc!@-dKjychoma2Ms7CE^bU5BIce!RYyhQ31E*Rc)< zjOgzg2NU@=x{#W2=#wnAH*5bR{Ux5{5x8u|yW_R^4*uj0)a*sxjgrY@8XhEvLk%?F zQ^;gI*OqJJe(R~G1*OX3WOh*&;7>n#vY4i?Fy@4-iCnA2R^Fxc*>v(H(40;??;>Ms zQ2Hjc%c)`;7-x0+OZIZSQQ|VxT*`7}Z5MBo+*s6{d_?owkjMD!HPqklv|Y~}|8g*Q zRC<8YOGtZsd^V7=>G=L2nBt2NNlJeBf0ON-(Js-6xeF?L7TF(dN2ZeXJNr3hy*U%b zb1y|_G|Z`HUlNhKpUx%u*>xC$>zCo!SiHOvx8w75JU%qnT09UEpBWFG_{(LN`Hvt@ zWI^JKCTXD?dCA&4x%HD1vW}jUeJ6XdiGGU*Tf9JW^XW9&IgaMf0%LCVOzx=IsYzy3 z*U{-YO2_B#Qmtm@HyHh!+Matqp1|W3DAAC$4@G`%G@5V3 zeH0$qeXhrLCHMb4I+oi{rl7_C!rHH+6-!u^thHV+;$;7?0$MD_m)z*EOOFfS^{pNf zAC~ocq6X`L=T@ag!YDZ_s^IrB^SIRo?Q@4qDMlnWsm{>GJflmzJi3vz>&R@lpB>Vj ztbgv1+HDOs2A-YNpNFGwdwK`Rhv=yfzPEH1aIdyA5Oxg;j} zIo9a!D{v&8)(;A({!cZggy?^6_38K$7n^8po&S zQ}D-+KPR8Ec(YYK)<0%{evUteUhiz6Z(^$jkS2sVC za57#Y$=R3;;QP@(JLU1#$erJJ8gVzG&U#u>pR9h1cXjnQrx-o67ar@?k>vIUS@zE3 zPxd>MPmYc;p2XL%7hYw5^Y85S3aiEJ3T#7_oW;fC^+__37~iZ&&e6`-G}3m%|5~k_t$j_Ga}Kss{o!D%sJB>= z#Uym9`jN)$uzjV}0^`90X36(@QlS{n&SlRVdehrDyAJek`R#xUiJhxoSjeUHHu<)q z*JD9^uYQ(mKRF`eY1S0~lgqd{Ofx1WTUMeeuO#cYvR%`R1Ka6I<~Z?(%I(P;{65dl z%u=f&ipFm(bGnd3B>Ky%>6!_{7qckXSkbq z!epOG*3883=Z1vm!8^vY>rm`3$}hyz)%v~NY`BUZtC7$b$k;F?F7W1h9LQe5y?U5V zQr^+lB{b*H+WW1ijkJ~+!GEiF9{FyCE~!zb&g(|>y%HXOG}HTo`2}`@M3F z(5bez9;Nk_w0ji2`vV)1tam5r@foE@8q>0>%8l_8ScK$t90QX_je|L9&w6cm0p&2( zC^us?K)39XJIiT>h!_c4}I#* z9(|zi>p_+^>k;@;9=`Exici2reiQMKY?}A83CUace{0Cnr$b;-oBi4f&pf$;OeFf@ zuhyicXpr0@dxDX-HYb7(YAa(u;NeFVAiS@n*7={2u3fda>HQ(BNSj)(dCPAlJ!#QKH0` z==a9=pSSAqlkY#5LHFc2xk>$FS)gZ&6WPV+dQkZ zl08WewMg$tBrQ7`tI?}IYTUs7q_1Rc%}u?p`J1x;A7ghO_vM)X4?p|9Z_`j?X3)?! zF;OaItWA_DByB?y(kAUB%ft-Td__s6#!?a)HLX{Ru|=g&qFs`tMWRyVaX(+@HNG>y z-+ljaKd#4hU7yc+p2u;#_hUKV%TctypZ~{edx4(oxwnn>{K?frL6NAPcz2BUTMmZ% zldbDWX7bd=zpI{~^pkUp@aX^ilyBidz2bVl<>zYG60KC4K_+y`ImE@NG_N?#$TJ7A zHmM(V8A!sp(rf*S|E2yS^;}OxQcq9)313Hp>QzuD(kEwJRqgDOMY<|Y$Z2_~REe!_ zT3DmZ6|owR!gn_5pURHqUFH9u)|-$P#ZY-aI51 z%KqrHnT&m|rybhuVz*AwvIlPFY&GZesc@IrrFAGg!>lw6oi~9g)+gsg*I82}Z{QC5 zt@Bu?x#*TEBxlipk?8R%y43v@XtEis*#kC&&lQCPl%B5s z$~q{$`#{daP?Zq-Xpog0vx>}BEuocB@5%#l5HTXeY5D9yDyAFq$pwm#0x(f>HS z&VJ=}(v*mn7tp&4NLreo5__Igk85F)-G2!z?(;Y2Z1E^vP0GFjd3@XxMU%LKMn12{ zhq=b7&E7x9k}M;wFB>nioA=G5tfe;@Em7-vml!|Y*L)wRhhDz*r^kzFU1c(Pmyzqc z_my$;K4&9|y@_0uR>6z3Zg3}Wy^_B%E3TH>9)Y?&(CGXIPngk@?=9zUx1!3oD0({i z`^2~8>P!~g#WJC7i z+E})*LOT;(isQ*6{5y5}7xG9pKd#<;IzVr^BlB?POj2$=j(^_)YD~Lp|MyhCbr+ za6K*}E!l(oQ4ix#JMZ7)2Oj^2)N)9EjK21B(`n*GV7<*K4e3+-n8uRg@GAKgc97i} zXkQ->5|Ofu9L;k#J~7GK-3xsaFa8Ev#&;~sUiL7gQ^3J_?q_1!1tWKPC(0!?xcoS>q7eG z!{;dylu9G<1FUPzHpP{)AYE-$@nRu^U&Ali{M)h*;cDI(r(VDT;21*7YFdk=mhmF( zx3YEdXHWaFg{qbtsk>1(b$|HhSrJXZ$sjei?TVo$0K4lotx;+>z>QGN;MMHkp;M^ z80||fmyw&d;ghJ>^fC;5ze2s7W5@F{IR-ki+gtVYwSKbu&ADHysMi9^7TR$NI8qaO z8mYL91fR#cO!Cu%MH`FDBlMfNuQtXX47!f=;XAb3TwpubY?wH;5B0hktpbIBFa09?0{xlZIgK7gqWd>06l z&*25sjCIYK7Z9Tu-V#)9V7g zOhbj_8oEQD1KFo)!(WG#F6)M3s!Va~HsO?R0-suv!p z)7YH`h5mLlYbV!fdHrs|srY4;W{WS6QeCaZPNx+MTuWZmRDfG(qz>p9sBg#j*Yx@W zYHvcx0i<&m98=flEynke;XvYso8nyXQ_ zGk&x;V_l<%Bfy)L$%pjf5fZq@l}-Ap>e-XDZHtH5Lk^_HU2(203z=xYM6Q%*dl1{c z3*WY}F7rufOXDW;sIsSX{v3~*WED>2X-#ANnRVU6Cd7O7@ANYgJ+k0dqK%iMXX(6 zVkD}Gk@Lo4GpZ#5aUB_5$`XaLdGDX|;8dGEh-SnuZ>?34XOoGx%(+AIHBWZd(Sc35o)o;mW>3{R*#s(>=?<|Pe#nmcIqTz7 zQM#wyN$IZa5-qc5`H0?Tk97e3NF>BE{rw)cN9jNBmh%QL>)rMRM=nIU>&-eX?cJ8+ zeDe4YE9Qp0KZ@UD9~4Ziepab@hxid|J{B+1=!|O(oCm#(vD-NoL=~H&igjC$2Xl-6y-U3l;Kqzn;I{Q7-k4E0D5T zMoX=f3S>O{{G58M#i@Zf^R50r#>3Gtx!8Ca=Xp;gH|2IzeA}vNl<%Wy;6yaeDas4D zUxmCKVgCJ_bA=0x8=ru9QDxsU7d>wA{Sf`02&!fNf1;1Pm(Sbc#Ks?JbT=s{BKZYOM%y+c$Sl%cTgZF5|?f1>3|)^TEAN8g=d5oktb{W)=~N;kr<1juO2k7X-28xrOsvmJ`1}s# z69JP-eFw7l$FowG!#bx}r9c0jA0=0kFM10gzhbYbEr@o2g{>sB5OkX zM8TW{@;tW_AJhB3g=GBH!_}U=(6f7RuAkO9p_z#rrS}gj_{SCYZqOr z&T3^w$_ziu>S;NO{0z;KeKVC!ayBrWM7)Ss<=MCrlz69DbzDc&vqqj(SeRs<&6}s> zcTS8!Z+8~62Jv^e*}TzQFRR@jf{WQd-ee_l6P&Zw8q7Ym^*y;N9ye0r6s|-6tTrpN zAfJ<;puyzM~ZWc%2w?&^qtCGmy`Y) zFdbvv(-vll`=|!e#;o;3V;oDKuZKzA2rR_0igc+F>fDV7^L>ktdLmL&%_wzf<{5V$ znl%JvBXWInVFxcaJ2v%Xc4Tu%#dvmRFRI1IHgkPjoXonuD$PxOwVdaC568-M<^z`X z70{o6g5}7;`QY6|x)Qtm6**4@j#M6N!(ucg$vw#SCB=$1yN>KZ5`*_Gi#-6nlCLZA z3_mmOZF>BQM*LNOqm0%Irpc-lKgrY|=njsR_`8OToeaPF+SMcldq}|)pPI2A&$H8A z!IFv_d5=Baj8Q^wl4Jh}qkm~ep9iLH{^m?29yO0^)7byvuugo#PBdy$oJZ8xIyqR& zk(y+znr>a1`akc3$bR2k72lY6P|YVlos2sG$C6+7ZSzr1rZ#Gq^~f^ZSfZC3NI>=~ z-M~5or7xu^SyK!&10=quE1jB)syom+S&^P12^Wxz_y%ntH8;R{j1eo~PJebWH94aX zp+!ShAyqdr*M3JjGTRS@eNFuAL7!?F@n!PT28~k-XExkkrnOgjKC73~44HR=B9UF6 z>unxM?oVc#u)i%>y5umcq}R-o@fgb6gyp0zgJyX?+1sK=zho^E-%$=7R=C!cwMjj;WI;ItymuBbKM&&6tf`A$OWjLN zfJABR)Kju?B>PMJgnO`>UBUS_IxZzQ*%3UV-Dh-p7;ZJgg)Zc97hbRP|17h_2-nXf z6{$9Qj#WS#tGna$aU*%W3;&W`W~J*npGuCIhmElZ7RlOnm*3++-bha`NN)W^LtH?X62Co= z_8g2diE|%^Clg$Y-^*9vdzZ8&7i(3r{8_PoO3m%g;xUrHGADzH za7f0#_h6JAT}M(i3=DaDe+sVEu|s);UQBZ(^%kc3^dL&KMu|z*)j6Z8iDSw9Qp@KX z(J!6`C$UaleY%=%CMUobg?(MXCLL|G4StuPUA&v)kzE=8;u%$&ynaoF2auS@aPKX@ z$y1OzK*@uYU0?|tbR-xzfOjbV_n>?0!7$kWL|0|au!`+W6{x{v>IiTpKhklYPBhRO zTJ$^88$X%so2qI(7CmQ@fcX6MHB&vnlE$xQ3k>6H@EHh}`F|Zbx!F@$L+vF+v*DBT zwBNu!Ii7Mh)s+<1!LgS;+1V(SaOY1xe~E(C^m8k_w$ZoNdOdT?}Iv7wK~y~RK0kWJf)_2>hHv(JF#umSeOm;s3)FWg6FBCSPf;f6H7eMdK^!! z?c~Nul*d@lWOtmXvQ$#-;fW9Q-i=OH)TR}zCbBq#igS=$62GV(R9N%_<=WECJF~C`ZH>ARXX0I* z>~*!_xdBYO&?hkmGx1}KHZyT8aVIlT^*P*3+-p_xIsnX-abqjV%v_hT*6VRAh_i>w zK6AP;BIDWFr^a`DtXBKnP0Q7wf3g^{IxN40U3ZWy($m%Cbth^+Ollizx5$j1JIQ2_ zGpM)l<7-rG&7Qte@bP=pPY&i(@@vK-HF8$7fkm!K3Q9=x#ps$je=>IJRNutLKZ*7;jeA8gSLWF+^zRJ4T~9K`gF6u+zyz z; z4kI1AP^uOgd7gAlaqm5pJ&9B|Mx|s?e~s*CzmzzE&ZwNc93PuO%Ai*2cEp!pD4r!> z;76XgMVr{6dr4X?d&0bNin74M}q{_`iVi zsel(do%pL?z%tq4@?Q5PlD6G5y*xiw%W1Ao!;?BB=p%5>A{EJ9owY-_aWf6r;Bya< zwgAan=BQO>VgVY z3++sO;w@lE2F}iS5)X-^z>txWr!{X>>XDC}@+eyqi zsjcmOg|r_?4)z1%K4dQ*E@fc)x1#0m^qRUJiFp}mjCi6!I z>D5(u7j7Tz>V>{wH#9p5K9H^vwPbnST<^Qr?|$EXik94YSQMwX7!ojPr?b zdD5MZ@N8~PoXl!@BQYOr?NDW#D~V=3)#umEkME*hS(?($s%s?3Cz%bBv*9feWp++> zxP^YYf$B^)@n>-E1B<%gz5qqXlB8F(*o_j4$Z9+hqVcH|kO<1MWaCBfWtW>MlP^5K zsCfIZ8?8e8K1+8y3SCLJ`dHn)&VtkgTUM#@!u}D(1M?k) z6y$B%MEE88JuCI>l*)qsb9N(dRk9OH6!B?RWt;G)AA06ZeFqYnJcBjB7Wqi@@b6JB zF)7cYYKisX2;&_=-@20H{wSCz`otgQZTBa}ndxu5f#VnRAS*ePZ$vx&rB3M+uzJU; zdnssp`P`1IOryuQ;oZx|NG#6*TIEb7`?H7eJ7*{1$z1Z-4Hc$)GT8yL>;4L?sU^4o zO)?jyGU6U|n?QOdxc&_}SOuFsI5Cs7lyyBhy+0;9V{jueHQ6hkPkYuDT9wLt$!3{c z@$Zn$+cVdyJ3RX9%Cz*Hmv-I~p z2(ylT#VnRA!VUG<7EGJL_AstJVU+lr{*CpYK-(MQcCrF4A=jyrG6R2l7OSPE>{hH~ z7c~5)upZ}uXp^Ut8?hB@GXmef!I>+}9)Bc}uc7AyINgeT{S3_azjB?ex4Io#O?OH^76!ZzbzUj>a-xwM;L$5am{zP#&Lr1g z>H9j|+U$C=>Qn`9@_L+sHl1*91YM0s)G6eqg_$?|jN6QMOR-N%R8xG{y0CbuM3Wfi z_Hs$5wc5 z0!6YvCd2EUEKSyTOHsX@)y^g})ra~{)YVH?23-L6W@++g#Bsk4BQgY3El?)8{@$X8*P-l9Xc>R6tU(ibu?Wn) zU^Ky%bB$O+6Sknu-8hgMgNGS0m3KR^8#$E@-SbXx5j~ARa!Ysep1B)r>VYYFTi27v zAKA>_q%oCP7SrHVcZfya&$z#^O6!m7UwSGwX{*uWJ=TNfJZOZ(FlAPl;fcgm4Mmlg z{clFHSC|`nvG3LJrxl9t)#{5v*Z2CrlV&CVMskvr#>~%mVsry)O#w{_IW6zg*?O%D z-*#lSB09yJz9P7~qgV3CzD2GpgKY!|Rwu^3HxLj#PwGX+FrzGCk<%Dp05T zQEIbH*Z)qmo8w+@+VYWJ;?a%ghezw)i?XXb{>oQ zOL(vHbT<-wN`pFP^jfq#=iJFCo~kiht)-46EjO_E$tjX*A*t?B6Mm2D@gys$cm`gG zb6Z%`A?(FsSF3+2(%kLG(cXU<>kqW`aN}*okz}iQ9}Z(^W?gdJpOj4k)AM@SPX5nDpK^N28GLurGSb!F?lm;qB(nD;uuZo!$_d;gFqGz#N&b`uWcEJSd%%9a zXU4E7V_4@I5Ypwe^L6~UN)OL%nXzXdu;To9*`+;nZk!EYt%KqsMT68}i zB(~;Ka(cZxi6qHB^h>fi(Yhk*l%^nm02ep8QUT2_f$KQyt8uVC09DQf$?fDQb4T*b zbi$zptp5hmkVv?yph=y8O?uu7_r2CGyUe7Ch;F0zIk3)I{k=F^pOkF_-9A1yBO|Lw z|9Z4)3HoKE=lQgys=jjWu#yJsizW@s zwY7|ttSvvdRvu+SxsF<{_5ZhIt1ajj`<5!|tx0k$cIjT~WwUa;*mjZpWHlLzZsX8x z1PU$Jf3m$Nk~%RK_gk~1Lh0rHE};=QIY`FM?s&HzRa(P*FN$@;i|l0c9=#>1k4D2w z%tGzVZDr6tRgba*PbQ4Ftz=Sn-ct@wjO8p;&8i~wk z>4}_p#0%jc{~l+nexCR2RxR_!>mb_H&$VyBow$=jS>f;Dml%>%Fgwe&@}%tq92tR9 ze}Y?1l=k&|m=nAnByAKd&qAS5o}X3F^Axw%WI$OM{0ePe*fox zpUTzrzYnZK_2Ydy-gBS${~0_>bCD(!Nlrvcf8>KPqu28-=PaY~w)A>VPx@JjZnO@0 zn?9vZ#ZV^^KX)Y=j;FCJmD#j5);Z^xDIO$0lg<5oeUH9`qw(iW_3xH+s2Yv?8J_f_ zyEW-v-fB&x8`%qvB_&&3DQ|>So7hUOQb|3Y@rk$0`-`V=xP+dCPgCKS3`>9XeJ@L} zSI?=;S*h45Zq)19+N>c(sbCPl#rXSPrssj+$Vx5|u6tl}sF@@lnJ?)xl}g*wn7(8; zbAD@`RWm8b3CyK;p^9?QH(K6ba zVuvfE$l)T_o;7MRx+hauS+d+7Jc$fU<&3-^d{Ubi^f8AWdEQ()fqkrHHJBaZOCTJL z>t#vvWHa|uMu``G^8b}Zr)ltd$yklixeu&{8t(}`buf3WFwV*7+TL2C7YJS>8-4X~ zJv#k>dLzI&!M7(#%)eNtL~$lt)Cy3{1xND2uf&-vz+WHDH^Tc4u+Ah2*`38#WQKNI z&}=b$QU|E375_@tkHf93(&JC?9j+gZcW?QY_dL@**;1RlKkk95HQ0jLzGY8(KA6um z%2a0pZ-H$Ms>LTDzSvXgP|i_*?#?;zh+Pji=hF3o9xZP+i&iOk5g(Dv8NJYYD$KsMBTW8=(j1%drq8>8M%X{3?==99-$Lcp$_yIg#YnJTkiJTq87ifzSPG?ttgAz9xYZaW5J)koxuf?(9 zc=@8|r|Kd3;2to_nYfl{sjQ4rIit1bw|O@2f$!8~r07h~CST7&qwg}>qwZHHpA}e# z>R@P&63t!RiGSVnH_0sD1142j#gol)@l!29mBd@sVZ)ORv#0)hS|hxVV~JUsO3Ih( zA@7~r>9Z#K{uQ*{{pNJ1CHV5De!dYaf+0EZhucMsLH(ihDN!-s`=4r>@pBsI$zFQu zWH$ek8RrAvPa#p)g6!pD4>-*dhw8q_?tSmz{b&tT6VLn`Cv z@DqWDxK6|0|TMjvj}R`k^2S zHoe^2>+1WovXiG;>L*?(718`GdeH*MA1zKC4u|!(dYOTH-N;;a+@0CzA8|ciwaKiJ z2(*8|IvJRM!R~a$>v+>8KXJ}ims{nQ;QZm}R~;TDEKBKrZ7S_uL8kJqu%{OH!0I#4 zEd*(z(%PZPDWH1}4Zrq3H3z=~dks>Pwf~>=oGK^l@b5_Sx#B(XZo zdf-LQolZ0E-E<;(0=KXU73o&qv~Ph|6_)UHyy?MaltZCZ8=6nX29urmiX{8yR8sqQ z9GwQ%*TE2doniD%o{HYoM31)q=4`erS$+a#R^ws|Q1*pS2^!|D`Y>(cBQ}hdt}t>t zy^R9JySSCLVf^9du{AmGn9-oW8NG#j?}0k;E2p5?1W!)K`&RZnQ^;^?iEjntn;_3= z#~!khiYM^_Oa+nfZ56mqa4j|2o9ZQR=y7-F3(;zj&CZSk%t1dN$yP@oPxZf8Pqgb!3%lqJ2Sv1NU@toUV2iv=}4`rXB zpRJ9Q%(+<=%mPt7m6DNYCo7Olk9En}i7;4*54Rd~F6#F;O5y~5jYfZVzdV}E1@$g^ z_^_vD(U-X>^)9Y7V5yeCzXT_~@xO!Trh_*=Gs&$TIZZyl@Tb&cFVW2#;hfwc4arWEzn%O!LwmI8s zT-emTa8F*8+SaG3@7ovd-Th8R$;ZKag*E@BAel(387;)hyFLRoZp5KT+c2Y@ilT||PBh;Q z8XcsKUX|u21-__Gfn5RBaGPlny?8x6p#?wy{iO2iU~|i8 z*6?ym5NW3^o-_!@&hyH#4uf_APfHC<`_J!r2VN$yL zJ;}Ogr8cRH`Uy%5^7$at?dyMOO_5W4>ZFJL(4jBLIL8Ha~rsgv;ruD7O5X{vcF$|=Rq_)KyTahQ zd4pR9><1a)D)uIkTYKQv!pbM_j+THkeJAJWC7!5Du7dyX^^o(HoCTFJ+DN*85ZOqE z)(1)F=7JjCJ%6hEz4VxKkg;aGR|@*3@3DH$X+vty91riDbd;WJ#d@8rhf(MeUZ-mI zp`JO{7=J3Bdef7C0#Cejk|pE-KfgEDLEwrnM?cq+l_S09r!?ND^z;Ayw={1^JV@%n z>8Cs*>9bz(n|!#*Ra*w`@lyG#-nxM12(ny{{{44{o3X@CHltF!`Q`VlSZOF|_be(wDRN zifqwLoS8}{IIt`pne9qcvFmOiESf^iTC{4=Rnb(Y$V#CkFh7S zb?>lAi^*VOz`g@hcAJT&D9uO_|IypP5)YN+QQu?aWK`$@^A#+@n;;%;rpkM+U^>Z@ zmC@p1mMA9&4QW_BMYBV{U7yLx@SNGCxz+qr?Bzmmod)O2S>IXiCRf#5(EZchk#uzu z-0GN{ODp9iK46fx<&_ zsJB#%Ta7w%>~l82X(nhYf#D^xk(G7!2b26wu81qqX0Q3-TGaY8h~713&OB04K6|5i zet&K4n9Q>y%(D};Jd;(;iZ}Vgn}hwNV&CphNu(}kFSCu`8C^c419`6% z|HXJ=*C9I#*_p#pd8>VAXxW;iZ_?Ln(0)u3+LMeQ;W&nryrTha%0`y4l9wLLO3u>RTFJLoe$<)c{Rep;B)W^o{vF zzW9%j>#W(%Bfqbp(AjA45=qLw_+2=4M~5cG>Shzj$ALILN;!eAfj6l(kh72%{qF&? zxz6rR1W!8{MmNS8y%)~sjI+H@ThZt?u%t3d8+~N2a=)J5!Ryr3?nu^V=&KdGlDDn# zOj>S5H9!w%lIK@Y|6e#UQ9tM5OLkD3NqF8jrY1x3BQ{3+RF)m)w_rNUGAV-0D35q`Gx%1KV9ldPC^K;3?ufbCZ9Or^( zBuK{=^4^l(EkpHro~{AYx<*NcogMhs$vm02uH`+M9B0phx2v(|qjI~#4*m+KHj%IX z#+~L zAz0ZOHEW>z$Q7SuU+tHQdziNEEox+an3Q0_xxB#+@d>x=j!ZX3`!C|bGa&6Qojih_o__xDi5*ox8prXIYcakd9iE}AeX!b-@P7eMm zB<2*{Ta8}H{nE&spKJ%uv32n;Nj;u{#*S}YH<~pOJ~=gy|7LaA*D}uI#!t0^nW%Cu zT}(EfIU%7Uja}%si=ZQHzA9noCb3Cu2=c;O7*U zWjp@GlO%PEGGp8b$F{gT#1Fk zKKZkI<4svJ&oit{2QA;x>uOf)IIwgq@JPMNuh6g(`kw;7|_tRlXX=*nmXzCA^#H@)}9s5&iqPvpGd}^*YET0(%D<_+gIF zW|{w~-#N38s+MRrfUG3n>^Cq>UY-uPILhc7=v5CZz}_%>0KH2{Xire52Hz<7W&O9# z-t-0Yc{~Ew8nqLe57+h=X6R3SOGTK}sLx)r3%N+n-!7ze94fA+ckyc-ZtW2pb_gg2 zSRK@K^(`=DJ(gStoAmyu^;Ys{q;5_;HfL+m8)asZ%U&e&5KwRS|0%Zi6u6!Xo3r#; z6Wpm?x8C@#7$n^73D?W;@>mo}Hjb?P-UV&x7(-#5_iA16AyJ3TQ7M&d-Xk^7 zm=Uh^xMH@)lr+_#W!asv!ReWz>NNZWW*Ms0Lb}hXo(lo2-TVZ{#-<@XrL(J332z?AX zkA&$GeN-Z0H=^(|vY*(Jti}GNU81L!;cCvMcd#%gqffj)+Tr^x_HfHx@8`MEb{`AD zn_ck(#h$G*txA@(`|UITNd|7G>8s6gx3cmdg7z*~W|6T}n8}{;?=&{#1OF=azZLGKdQNIVrN+!2_>P8W zPgw)C$RT!M9r<}uV zcK=i}>wDUqglcJ9#Hh9}&b)505=n--Ic(^SWczje{M@&mIR6IRnxT9Nn)YUSYq3drFEgUBKi9HLue-Zj zyN^ll6Rhh??oXnjd83s~c`xfV`a6>}G&N&h=1Shq#(Oz=qW0iNUGnjQXKR6DCe7`{ zf~}*4iJ<79$Gj6vp5y1>k^Ix~A3n;=luY;Mo9!c&$z|7xCdK1C8B8a#MaiO{)ALU3 z<^}A1ay~Q!W$b+NUnP&_Y7k_1e@CP%{J0Oa-&0y72aXuaqlUdAE z$J&f$PqJ;vVA4!4B{=%F=d0;!zP?(q^LhVNQM>kL-}o#3!gCW?u{+VG28!f_{8}xR zmKi>a_+C1ag7d*28MH9i<66VPku|%(B;rBI~EDLi|yF^Qk5L63f5laf*G482|ehAps4B-$q2jTiSw6t75HPX zQXBG`3^DQBUkFod8WGDNiygRF`O}sw-0>}6%CoB4DYsy`48Unlbz}edv zQ_Vd!S*7?M&o#>JdQHXK#0NJrpV!6XM4yd?_xYZSXUbxjS2w~cGkOoBRYHZC#b^z! z9eRMVG^=x2Yo3qsI(03Q%O`k0f(jplWP)C@XWIqtobk-?%oRpRY+GmF8+ayGu`Woy zB!i9MlQsJi)JR>umT^n22W1W<^p}41M|dYedSI!P#>n3iF9}oJZfv7n8rKF z@OIadDIh*L@mbkP9vES~Hm3c0sW^dQKg%o58uBJWRD}8$ecvX#H1M z3^MM?_}h*Q5A<{;zcsAJDxgL@8Nbqh>WYm*!%f<~Pd1Z{cc%82<9#Z3*EE9-B%Lc* zftSJi7kHKE=Ss9lcGQPyd?z+;hE`+262GU~pefN;DkcwR6|# z8(^M{L^rYuZP34cVKLvKGpU4CPS6ln~% z_UM#sk;%Il|GVkr<7yb4NODfJQ+XXs@o`vSoMY(b1@M^0TAU2Rr}QFS2HePyS?AFUIjP_*xBwgYofeeO$ol_(qhGuOarNTlgjGxJe+_FFZ!F<;g1+2r;(UV+uR4?H_a;%btX9o2X+<|KTle!nKw$@-l99;xx2`M9Gy$%M7er@Zf} zL)zw|X`&!=E%Va5`tOB1Pr-kP>qC5Ai!Xy|#ZJ40xu7`*zTc3G8@0_UC9x`taeks1 za}2!N8!@?9AEXPT=*1?zWiLDd_H*=qFjhWE;Urw?=#K&9p zn+!tn*~uz28CLUdzCDWP4MZC|;w5BuHclizP$Gxsfvgv;Pc`6SD4X-I_2lRpcj8kx zlCD4K_ZpDBf%`L1tepNf>1h#r_A*WTogUu>*;HKFX3suEk9*u%OkQRfFIis3(zSRn z4IT~HQ)l)un({1ctLXhJTAi515im?N zdsY?im>E{m{rd2#;Hkmn>JvS^WaJmYd4+Z(T)(csBfeK-@aHvGnvmSyI5(BlWCgkv z-0?%H0rEG==+|^*7`t4_m8~F6mj4c*or*$dvH0iGkUc22vY^TQf+H88RdP24z?GUY}) z?NjaV%7gw)(JYz!@71O%ySN%Rw>c@8qqmOwOm5?~tVSQQnmkLX3j90P{vj>j(;`_lpS6#v zfPc4OZ~lrS^dPoJ%^gL{rw*l8i|6v z&0zKQ@D{20jx^*HbuL4zucpEK z!aO;lUqy*gt|t#^c1E?dFK=f5hqX#ZIF+;~nkW@?g4tKD9<5FCFr2032W)TZsf;bK zyNa|8V#|)wGI>lcqL&wurI*nxd1m{;{xf%9WyyAH7dbl=HxHm=2fFf0wAmNMbGp;X zh-cD=#Pn=qJ4*d)PH|^H(vDSbx%NGoJc|5f{@4t*MI*GxSxI2PP}*OBF5u3?@t#Qxms~I@9NuXJwt;NS zU}Lf)PCn=baGS|ePe+?UY`|2|-A{%N*Uttzo|vRY=D$QdCttx$X2Y%eP4>qpQR{dz z)jRRIJyqWct(b|ak&K$^<(6a0)| zy%JHA>OU_S=>z{yGqYDR!ng45s9oOWZ$+;~s9fEwmepK+)V`HG#gDcz{D15FRYphz zOTmjOyY)&=pYm@8`P_+}9ErxMJaF2&+vKtR|cMiDjHvU35ug2S79&$so`JpDOD+?|}#5MOISDqcMOyABR3wKsAW>2HMoRyZf1QvB7nk&!dm{!|cX=7~o@Sr!h7(~Bq5&8YSa+ditW(8;cx^Q>gg zABKw?-Hkn-1IqYV#Y-SjrtS36fzGud;S=F^9f~zZ$?Qmyb`>dK)%t4@#oJI z(c+!@Z=#o%ap0G*OcvX>(5n@ga&nM)AX%A8P@@6-D-`WU>Td-ac1HQ1ldYV_^hE7= zrRC&j9KL0j_Y!*7DyW}~&$a9VPB2C%Pj|ImX$Z57UV)_KtmZ;`8oSZm-A=`cUd~Rk z13a1pWNw=a&er(T4xNuc|GZVa1I1nk>mW}icW&M{|Jlgjl9c!u_AfLl9wuLTB5z|$ z&>?;dwb1865RYNWhokw^G<_kior3~5m;>VR(VNaDBH`Dl+7<<~hf3VkIHSCd_ia6s z$~)bxX0tO4x2I~mn3QFf9jHytPp6?u_RCYPb0Rz6`u=6{#9sHGLDjEax!eEj3wNVb zMOOMX&*l8A#C#b4@>SZ@GUjPkUW2qKF{j24>?GsG^ErE?Q#`p8-v_g0vt4O|PsuY< zi8VZuMc9hx$ynM%n*lIwkCvsdzC)w3>)(SjiFQan;l!zLr^6eq3YvJbHCP9-GdbVQ zN%eYdtD)^AvK#-Z>(Oz(nPNminIo+vvvZt>Z<#ehh4_57Fta7U?fWDsc~jy|p87a7 z&2Y)=P(n_|gD)%OF8cle@6I>+Dd4V32Y%GUUkV*abVfJ$d<4^hEL(CSuLJiV=-CIT z)fSwq@oo>Bm}(vIBuf6oM`>1S>~7HdZc_dsyL1Ma;wRIU4BdrZ8%gv~JPjY?Sz3z* zTwSzE#Naikw+OHD#-J~pwt%F%&)32-=T*reocW>|+y}$wTixIgySv z?dALT`iuWj54!axiRulf+U`WcSE9tu;{FF9kL6tJOlY|OKPPw1jhAzX^qDu`r=wL* zBTa-=X$7q0*-2F0N$z~*YR>55?Ka4GO|1{>qg+Su&MovfXVxvus;?O(Cx?k#%L+Gh z>p<`(ckxK_&<<|z8o48BI|3(@Ej+vCHfEwx#`oWg9dS_?fR4GU1bG>)>WUfv_KpW^ES zX7oo{v0u})6T#5h(}~1RcCl0fPj&K&+Aan|eU`Y?&*mi(QVxtwL9heN$UamGqnl~@weM; zoR(JkPx*--?h?4A8tneKH;MJns&*#+uOd^2!}$c9jt^e)IKSzsq4=3>$t!R;QXfx+ zPg$PDu$x13P6O8+#xKn&{AWFVsC{j^xP?}AGK1zs<}vbBnLd1|-?KoJs@IQ#EoXa8 zNa{lJvY?n#6Q_DD9C89b7S`MB0g_)W=NePksVC8FG+c(E=k4U=O0XmY!j*7ZUd$24 zSpg>F%r5(Ys>T^?)v*&@UM2nW$<$lumda>3H$4yi?$l>8kj39Txoh(7<8)Y!0rMcT z{5M!$!uFMr`@soPg zo6RO&(E4V4I>V^T$avntUkTR}S(isWzb_eBOAn`LowMGn^^}vtg?>|oVFWq;m-`*j zI??5gjJdy_Q*&;CClk^48eSa+lhfF!R-lV-KxGmg56Bzo!hvSZPI`VC6wi6;13g~q z%1z*kAM`ZOrxNTRiv3j&EBeZ)c`;eYO8iCi$r)DOBkt$PUPel$vlVdtoq0PoqY{tt zI7-aa!&hWJZ>D$o`?6i`ed$1|Nxo9Rn=HMzli~DvA+E+#rDdTfi62|XMx=IS1DM>#TIJOB zIqe_zbWTA}0#jYod0MM-DE~5!wMVO9b-AmlOw-Tb_+9)HcDMNU8&JPtj70S0%;b-( zODCL6OiV4a*gwsEIZawcVm~H9ef|EORCG4R=diyAzwg7jct0dx=&!8kUn%yo$>^F` zpuJ#8yiRfszGsA-MZ}A%HQppM#U8CL1Wi5tA4F=Z(DC@o^e6Fw^}nmgmS*tH)$E>f zZt^d%{*zSnM9;U$%WnM~Ku6%WZtDaBKU$`5IP41gpJm1g%_eo|MG=I(fs|!k4MqeK1zk4_?i6YoFj2huew(H zb1rOiYIbM=S;qV6XFHgKRloneQhF+qjIlZANdA^%d@el=%2RjwJ;<27;FvMvbu&l% zn#FVRwMiD<#7gC4t)Bn+IR+=H;9oLJ#`mKw>zHhCC*aW&&0vL>~AtPZL#{= zMvoI4w~0O8ip$@jKz6Rlbl#mcu7wky(BsN@avNQ4PXC@{v96?xJ<&h)h*#3q_|-n( z(<*Ye&Ir@p-KpOtbg8u~3)#WCbT~QdQ{&{E)o&`dBp>2KEc?mi@sVOxn~Yq)XXyu_Y;(BAJGv&`$Imuvq>f;$g<7dI zpB#(HvHMF@X;WCFGufmjbiJ3h$^4NsgTxG+j&_f@b{8(h>+4DLMSM_>Wi78IiHXu5 z;rjVLox(=1BKL3ldmAoX2&WQr%PX!WBWY_~9t!9ATHI+>oVUys$ZZ?<@`kU1nWiFY z*7R*8+qn?Lzh)oqG1g0{ek(eDCE;8B~Z0FN#>J^~NYBk>LhnQ`qSag#Dcv0ezsOb}ZHBv(d_NO#^=9%>7tfN{V+#37g}T&R_!-`w2FJs|7_X2< zXcS-J>^}1*dxbGNf#^7+tpgQUj}OCO2fQMa$uAiHfIsRf*%lK? zFihJcP_zv>$~j@8(Av=3Mb@i13+N4|c=9$Q4NvIt1S2QfKNXeNf#rGETf!r6tLLz4 zEyzR8Nm^^)4%W%M`jY?kTu)5N2JL4W?_K;$UeowweB^qmpFk5b(F(n)kjXjdm6O1Q zAWsJ0^U-6dPv3jK650*+#97V_hM`b$azBY1-F!Qn%zs0+yV33X?8b>WGZ$Vr)3@xL zyP;?2ZsUBQvW&eb8u zSLpRAvu->H;?;f`E0V0$3#|a>!7%&VR60GEq`hJ`$@zBPbmx6wtlO7HyMq*EZT*&{y3KBrc4MOHOlf92VN zF{l==wX?zX5=NEA1ncSTThwLX(uk2eIlt}n)EaO0UCl5pkXlfdNFy6)|{xtJN>M3QH zx{M6(K%Knl9l?I2lE`sJ*oay=GjHnp3NU`aQqCn`za$quKr$6I`m$nmQ97gM>}0Da z-)ARN59%#aeHQxUjJX8I>a#ZUVfPWa$$9Ty988_<``qtO%XS$nGwyi*8{_sf#{b@P z@!Ow3`nTvKe)~U~(Q_V}j2i9PgmSd&=P}A2X3NK;{5=qe%r~0CF>Y&Z?y)SM1 z5*L2dXL;XWrj6I=c`ohfKrgR%H#w@8lc~?(R$Fg5QI0QLS)<>NYROo(g3dPtTWWU1 zN9YTf&2}~CgxNXA!z^*WmBF3adkuXk)zB?uHs|guNJtqRpW<$Er6ta$rBflBEc$XSAZ?co8zlG-qxO$^rs#+l@3)9o!ON8oXbX-Rp5(6?69CN^#^NHKY z;%WL!{ixN(uYwlOlACy_C6;SS!Nt5Q$-XMyQiI zE&Zjc(#f8`v*2mo-{(B4QDJQ}r%o*Rzs~#fMdWW8OI#Ht{~x{N(P98;*+o-+4uizozF#~)iM)*@aohDgiM@M{<>-m}sR42h`aVS( zT3I``MyZSRzX{~YKwJ}D=CjTF(D&HeKbVIavYh9jU(WHifp>_}Uv(uh%Fp7>0`$GW zsLi#TLWf)0^PFoXmwfCU$=+~0-NBl?tLH?5#Jm1nn8){cA9}SGmorLR(A9%Y&aLVj zZ4C?72~LSweaNWI$mw_BIL~@0k#^JJ*PR?KWNi|;w#5HoU~5(AYjaP`!@pk}YrZzK z*n~xT+lUj%xzo~Waw@q<)|s4-R>RH5^qy0^2erAQ*k$Kb>I1egdDr6cvkFel*q^N5 zJAfng3?DOcMw)6iyPaHBgV_^^__jX(Jtuxem0p1Q}sDfX}LpvpY+-GTHq zalWRIiD*+B@izyk>J~P90eP%6j!}xL##77yp!Bfn+~pOoFoS{VS}$w^vP7m-vP(GTuhllPjYhK29X#`{7nRdr}4Ik%9uLsWTdF z%aNvIjFugBa(^7?1f>V;JL>CDa5iBna_;d9^lYi+9cY6>VvA%~_r6blZEL?4zn@m=+H@(*9Idp9iyB1wN8`o0N zA-NTrqe~Umt(7P8Rz8{e7PG#c*sJ$wOm-F1$xv20sc6s>HwUr}eaUuj`d^pKq=IhN zWmWXsjyw*f*#pgML-m6W&bFmKUE=Q7X%`=e_+ZojpxSQ|R>7Jf`66Fq6zMptsy+KrrCY)#c0Y;Vz) zb6~xLETq!YIkYS1Jju1Xi5BJ!aI(L)#PLzuR5fFz&crIXEH!2g_V7j5#_M-~Bi*gl zb=v36%WP|)yGi+_#(124NiOyG*!G;&j0bJ%TfM^uEwk#XU-X-OfX_+YV#5QzwHT!~OI$LJGyOw2W%BKDRx|i6Hyg!^{ z-b+@Kp?Dd8*~GeDsO4<*ob3J#NK>Nj#F77r6Oq^uxBTc_}%oj1C_pj=iN9m1Fe%u<|gZecmucg zX{~mNR?E(?8Yzy}hT*`uyT=_|opQ}3x_`#R~m5_Au^ zmVB5M$x&^6q_WMGp%qDV>=yl9LKdrnC4L4|;rJ2yB>&0fuEzuHBhf8$+rdnB49vM2d>7W)|mzuBmBwpDUj zcH(hdTMv@SC_lloPgx7h_V)r**i1r_&p#&=AHrs*?-OzFZ?OJ`ble1|=4gGCr(2SO z8G4MTcj|#`CclkPEb&I`_4@(|t%J`!jJLqkIaxgfedg1dw~Fh>fGk;7l4q!YLDLzs3xeIT~RW4d<~D%II?x%|C-+Y2&WJAHi0DM z&1-T+&qA?eUT=buUGV5Tm{nv)v&a1!hKYMgbY!Zp)Mj^TqjyK&MOv8A?=GY!RSVX8 zHj$_e*_K#>oD}43@;CH&dx2#$xFzpiS+ux8FWCbpQXx5dI^cXx6*`i?u5e8>Z89Y% zAI3fYwuaXPWB!(eO)sEW?teQ^Z)C+A;&`d;Po0{0k*~1x$qK#)NxOjU=|vj%vT1og z9p8||$Su|DQAQqYzI(}>@C-|m%u#{-4i+N2;@+NJg$}RzloOnuWNi&uZbRbw;d*

!8r2pI_Nst*Cnq>PW8Xn&z(4Zjo$aNKy&b~1548b$Hvl_$W8{=2gjh%QM{hAgM(9k$J|495}^3Oj=0(N*NzF4X6-i72QCZ;nv zY69w2B&jC)tmNnuQXB+`zS3 z;JppDbx=98>PmO7qQR?iHvSEVTg$`;ud3cMH_tX^JJcJ=KIW}*V?Dj%cRm}i+e{vd zpHS4Bh941$|8r$&)>+yKne5l=I z&(1^X=+`Q(b2gJRm}I(a4X601!|Ax^n(J$p84XruM-Ra7!kx`PAbl0UI0r^)JS~P=V<>ZIT=Jcli_8PIqPs^tVHR@@wg^QOXNq^_&J@L z>E}c`74NR(Djtewl~|9Q>6Cabo@1@-CthUnU(?&AWF>jrZq_zC;hd_)>+5u5e22%A z$inAlk@D#I7EAJ|lYw`AY7XZm^r?EoJk5523AGO}0GqK8l!SS9b@Cf|Qu;)7sWyh2EE$DUx2|5BDkHfL# ztxYELWIoPmP9Ly8&-$mT+Hpp#1kQKxr!#rUiBA0N5+ipHs?9QDcDv`$$NQ{9N0R)T z$irTErhf2J7C+fH&Ty|2%aN=wPt){7)+Dn<-m?8u53@+g@%VHuXdcq;B)D%jO6me9 zzT`ysrW(>k`12=L`61V{PdvuA^GQeIplgujOYvzHYtx+cZ_?+d1#PwzP{wa1`>g47 z=2`UoxHu{3fQHA}OJs+8fNLW_UP-@+SQ^Lfj|NpvP>w*;=j}R9E?`VmnyZR^^1iO^ zOQ+-Yl3W{o@pzxY?#4g-0Cc(^T`O5z#UJNrSB^HL9|(_}SS9wS^ryu5myqVu9~)OZKLD@-zJD z>9-z>u^zRaMz7R`+78dL`Z^xBPQ~G-q&8;_wOID#Gv61Ezk*F?RPE#c`{Z`DzPEvA zU(fX^ByAf_>u2P|%^Y8xxBR?-c0X%}gN<3Hpj%_{?1m?tXAD?JaRc|KC61@PFU=zrQELaA|x`d{atuwgl_3Qem41;L~^Q{12(|z2a-LCKJ~`;>uB%S?8OX} z&3nDPm#u0BYK$|xaV0171Ic;2!tV8=As-keF$wW3IRkfZ^~^+?RXYFXt=m)LQH>$es(C-Z&0R63E6R5{GK zNdwZ*f}E9er;$D$MXS7{s$y5%3{<1cH_7*QuK)Gf&z&sn6eA^0=}R+6YUw10XuLnR z__mNvtwhQD%{<8$Q(oUq+^y>KKg|Fi(!Q<6-2#iZ(d8l|Ux^zNwTx7%pWwEEOFM5B=l^^u~KHmIV`!(o1on1bjCU5KFzu6qGll^;@C+EX1XBaDK#UoY@72Ii{O-B?8{Ez7S1pS=rsWC=> zLNB+N!xB$23zt7d?d0xF=8pr(M>3Rd#G%w;zS6Tp==l(KeJ~4>2$OMONnXpudH-nr zcJ2Se*PX|GJ?4Gm7lX0yOA;`~57(@!pT^y^z%8>-hn%XX9RVe69xD z(&#e{tb@sPvV7)r^>UZgOvY zK$4?@3qiKc+rx3S4tU;2yPW)=;r+UB$*ItfK4q=28kMhrQ7UiT==ldwBrC}EAWYCL&P}dM6piQ&l9prl*2q3K&PZce+;MU7Qv5Y`xDk_U)m6 zsc}(WTlB@%}aV)u4 z{sybiJQ*V!!0j!(j<3qfqMbRgyPlm)o!B{K@)f<^P}r$cP(InUPxoAQalZp~e9_m! zC^qEFI`7^;&KLBM zS^o@hrt)d_MP}o2*3&i38AgL@f6r%cyRPTbQ+Sbm|C+8O?lc*85~Z1XbcYpV&~)_c ztefp@GonxWm9_}h$SEbEEn5L#aN z*5wQ=aTGZz*hoejqy5h)+7O>6(B!|vvNNjZ{5n1bb5K2_X6C##%(e&Xr7`OrFU2dZ zQF?*?L^JkDco$ENA;k#uBnj!k!cBFpE2#Q;zYneH>?fLu4Q05NDJHC&5Cl0_uoU2o6+_{zn_@()h3g#l9n5cJNFbCH5>Orr56gT zo5j*JJi62X}$FC1_fasHUi%ES1f?a|vmxZiYC# zpkz1rr;=3pVz(sTMcEJe6m^qFsDm}a6#AXGyGiaO|KkF9_F(^$-=+iZC6CPO@XQ{~ zx2{eggV~u%++rnt>_C;i1uhN9a5-8WOK}S6$@xw8UK@cpd5H&*rg-dpfmh|tkCKO^ zBK)%=FAJ~KANbVuJx1I;daug5WlpkFzsat37Fnt0TXvWZ(b`Ka>Y={JuP(I+<4GCM z-SnR-&dI16zmU)MFo685)WgLv%uZ>l6UPrK);POBM>|MqxWIsDOmr`*j z?I!YQYXQ|X(r^pD%)$F73n|W?Krgy`tycb3Se?ZDEcShQ(b6A{v~QwnMQ;uubI+56 z$=*qXY!w_zZt1gG`48CKfgdiuJgf?6F6}=21|5WHoROJ-Emd1IR^uViJ#8iykd6=A%dZ zf2QA?(DpZIzL<2Jz^Z(Y`isfx7g|fk$o62}s+IW7WkwU z9pQcm=zhZe=EWHH5=}|=MARQk*R!(iuiqgBFB84o%*gYlyU(M^e1DJk)@bdf znp1ZCMxuJW(^r6e1DNlF`LhZ%TlneugC6SpIWS%--Po(>xkpX?;!LaueApH8w`pw;cyNc zmprI^fv6oXDMIe%)Vdm)W8C zBYjVuZYUXE2m9O6)iV0X zu0dD#@@9NxUiCMbHwNo9`xMcK%-6p}sT!`&r(36*S0Cy3V>l%n$zdSfj?-23JeLF? z0n7e4vJ>nj)uKnjKN*~|zw!;uc-t71XqxQxJ#3`P4oqLXS>%a}_45rH{zP7m!P9-| z(w7B<;m@h4`8Akl6*^s#EhW2d2fFvIc9UbJy1$=krKfQ^^tw_XwLEc}&&gqvJ-?Ej zh?0GcQ%Ll_#;5F5<@$}q-_t;q%rM!%NCoUN?w7pvf4xRZb~H+SVoL7iZz*z+|Ly+s ze94{ohn!Yi$)3}`r2I*I-hhLtH(OGJC%ae2k;7DAJ{=XZxAj-|O7^#YLvCwnp`;d3 zNgPqQ6)%G6DAK1G&33UqBUpl)ikVb)9I{Woie`U8zOqx%k6pNs#Z86J2CUEVWF=9B ziO|gsNoQQWkp^~Q8y2y^9~pDMboIHyGJjH7>N8lDM@c}U(zoehtN(jQTRStA+2#_% z+2_|iIf*`Jv>(gnC6oC|z5c zxwmg4Wy$$C2do`Eo!qUh$n7}VUWP@@tR&SmZ(*Otd-fZ4F)=>LhS=OV9xsjMAZu@y zmRh2_jZBF`NtNH!s{a(->Vfu4tqe9&JYLNGHi9Da#bl9PW0V}jiVt^ZkluEVs1(?ejcte^pqPRC zS-~XVd2)tib>9y*Yt7BJqd{f-ZB8aq?I`P*m$jQ*h4CA@-TRHeo4jfj$@RH-Iz$^I z!8xILdJ6gJ;q8y`A}45-Jb9;;(NJ=5Cuxr7NF7!%XSb=Dk=?bGpo^dJ9&KjrH;>H5 zyLz+dcEdfs|2wtQ8sC#;JbqJO6r;g=J~yBfS%2+9&(FP;Ou&f?$XcUhww8F7ajGfKQq;f*{Eep}Spvp8HO3l%+?#2hPH@Hqg(d<3_L{>JEs@v$t z%Kyw}521w((4vYbTA<-ayrn0X zNAdX5x5u^kyX+-BeG6@Pn3c$>%M~nZqU0NrgE6#t7!FmWD~pZGsnC-we*^KMFFi~} zt#`H4*$AGs$qo{BC@f}^tIY4uFIK}jtIw%ns!b%vPU`ubk1J2p^qXPQ429yAP}5Tn zplxeoS4DJ-H_ZfXt%2nQ>~OrjGT)w$Gry%%Pv|-IoIA5DzZkE}leT;Dp)pCj(%knZ zbc#>&b|YYK68s2i%wXZiz<)J766cwiu}5%VxjRqN!WZaD_Vaq<>@nJZ1W$j!pZoE; zWDmCi8tw$aJKm|Fr|cHoV?7ZHzf9gPpj*q?sSS9R{AtQy3*2ja;yJUEHk-#_&CmX^X4O@;1jZ+{7IvVabhU!IvS0Wy(7DI$(6K(O`b}&x8qzq zK{lc76U7R5h2Em=<DAFq;o@&D990`Eersi@To=g#r$wWyeV z@bkfRBI>l&;#`>CPaBRz%lV$D=grGO+S?n+Ig{v^S3sNHz?`NglHpGz?oM)^^-xye zSJ3WhIFX&2i;8*jtv=35ZqA4r!{F)pS4gJd(XT6dg;>lI!C& zJzNN$GthU7`Al}qn!_|_*YOS9qs3WpsYGs4HTF{2#Ve@_8!_D5d%&>d| zt|S7go3=Zn!&A5tYY>}tG2Kem<(j@HYi~x0r@Rp#r9{S#C~U^@zBQw*zkoGamUGGx zAJXNhA3vHZAgJt_zIy6LzaNMFc_`fwCnjjEj-Kn2v-^Gf6m?RID%8G@bkD)D5h!>z z$i}k#O{|?0m087H`gMA;7MyQ@F1sF&qXg}6- zM;qgQK2RIOk8Ga%4H><-0m&SE0V|g3fiBO(l7NCKe)|w zd8f7E^K9_FsF+M01N`0Qsd$gH0oxCtOJUPt(I#JO z_C_<}uP7vVj`?;n0@a{h&wy?+OZWyE+=zQsKoQGU2AzNQZggW9yt7`f;mHHhqAsZ_ ziExT%LC!d{2feo7U%V@R(MnDM=HW|rScZaXESwIYAIaU3tRlzY>%C}}OhXUCubLUo zXi&tfvXQI5dMB*1qF7_47%FsdFS?O>!LRD?EbmVKeH=LX3vpSof%u*{z$xFtj6*MK*X&zpchW7zR`s_d zhC}G#l7dQAS&Y|=LRb0wO`(meS&3~dUt%;9UAZ4TbB<6Q4YQAu5hZmQ2g2tBw(3fH znwiu9`mzOqrF=WwUQvsK|WXxUW?vE%~viJV6`CZ~S<5QDL$gzp@t+AT28p&Go z6-mnOO>9lh?N)%k4*QWT?A<}wpKPz8*9-KzA4rlPHib|zISPNKgLdE#%Vx2VvypUgsTVlA?({WX|!E;t^Zi84xV!~^XNrDjn)nHu3o zGUdKqK-1NyLE!it4N8NxJ6Mjz^+cGTLr%V-$@}2)eEoFx7yQL?Zo%^;=&##(r#jb_g7lj!ZP$>W!@v%hp-P>FO^~|>f>ADX#VQm z+sNN6@SF|4tBsGJquor9B*teZDf*{3e?;Lqo<80Tb|0ELPH$(@hmOULOCzhGDW1(< z$5~p9g^T~jf5SI1C8x2r$>9=e?dz$piznkP5#N$~eEXAG&#|bukLN0YBzqTm`gy;} zNs^jOlgtv^;PO96OgwJ7d+Hj0->1I~;a8SaCX4U}W6v?9Gv2%%jn}2<&n7&(Ob>^- zn;qaTtZn#nB#C_B|Lg4(ZzO8r3lPL7DLZQMFe=#t+5wU>a5;pGB=7p;-urcb=~%Q5 zyvYu7to|00vY9?_fnRSeY$+^7L-3R$@vG_5BIDVEMv6b8c*!n9Y7y0>Mf-xh78nxc z)d@$sXfHcQ-;|srhH|=`{Q-O=q(^k$Qw>{lCYzWY*qELXs(}E)9K^-o$E`CRb16 zcH(RIvN)^BcJ@AFS0~YqtgE(NUXQr{soFp-vyIfm^FPG@0Kl#X9^-R+ErFQ#xt{clY9q+GT<7#Uq zI#oxT9ogGU{odfchnya*LAji^C+>b5{Ksoyiym^nH9Dmd{Mq`-&ebLCMqN~h4{JR6*Wh@( zZRWXqlvzh3?+n4k)TCUZ-}8-EzqmSw4oq+-Rgs(3S#Cs6p3`P*+6id13Y9nOD<0P? zQD_=`UxHJjEe67V4DCtf#_V?O@$GkJ7V%DK3deZn+y}oJu1q%CWUYRpR+0tcL->Uw zD{*=Peq0NZWGcUn#N_-h^-7Za;2!dkc#W>wP34xXIFn!eIz3F#PwH2coOuqxt7K-) zyfUYowez8P7HMrEIBz5wFXCUMU@q-SEslM0H4>5vf!W zhOlmkcF8vQxu=?HJF)qlVU!9>lSuw5JzU0i)n_M`qGJ3w;t4a^r{Ug8)|AR@#4!cM zz6V3H72fFX68G}Cp|>8e3g}2u>$7V)yBbE4QqgHR%(I`<8wJCYnntd^THN8?OX+4# z83*XAuY1?hiJZUOi^BiX;tqdX;l^85C)>?St}1l1GaH<2p4rPvo}SLdr_4iRhsxl@ zN;8_Fs5%%Q60f+yEU=MQ5>N9bC^oSkb9`@KNLw;UuBAznLH#j!6U&^r@M0894VEk6 zc!~ZW^Zd^sNwiKCT75n^x5MHU&!?{B8?@*iRB6IOb}d#bsfLy)md#+ShGN@rBmQg2 z{+RXlDDbp)zof!xclQoLiZe=G--$288_D`Y>Gss!2P9>aL35xNa z?hA^B+MWTva{6kAqS+PNqlMIhcmwxmxi-+1zT~Ed>o1U*4Poe}qv*R(r8_HfpDfo+|mCHFDMoZy3FkwK_c%y%3UC2Rya3W~w3|#!iBe9S;q^575w+1V42SWc_QHIc?mIo*4xg)F{|$Kh7QOsH z+b;KJqDpF*+c!tq!_lNV9@aytMB;{z=W1^Z2uFH$sh(G`M9-K-&GY#OJU@%JZS?nA zT+JSOJJ&n7J`2ZELHb0p`7(-+qNNuV)+YHl%Hc~abvg5qCjS0JR$3TYE(Gf;xYyOj zAM|kuzU?q7{es?0%{*7&$KhyQ9`$4Y?$^_ksGNPOKfq)IiHqmJ?QqS?ZU+1O8k)^P z(L^3jrC~Kl#8}X^)M~u?hT}$AEhlHu8#vm6l+E$fC^Rd{;h8hx)D^qAkf8fW!gt=t zF2=rITVu7dM0wkg$lUm=-ykhJJy_UclV>j6}5 z=iN0h*{Ypw-pH<2byUwxBQ=JXgSJ1~+)Szu();JGyyMxN)&354Bj`tIJXqi-V^GfD z-@}I{bFjk2R7m8)i&jqA-E2xSnwUqP zT#Uxs;ZdDFj`Q|Z`j@fxInZa%;9h#u)Z3$JWqC6D42zQ7C#jFO0FIsYb}blkmYxXX z(s+@V<@_yKDH0>O%;%QiSW267!gLPm45+TOwuw{?r6nU^J41WP9y1L*C!+mc)**Y!b$_2_ZGwyptZ z@;tWoc?SC4ZWQ|vRZc_4yTP5jE~zg4F&Ni*Yaf<+fq8Gn*a1eu%nGVnn>Hs5kLzOTu; z^#cs9L9?9Dv__wJX3g+k{5S8>=3X$x11c6g^1BAE*~?BAsG)G}0^$;%=pNq5sn?05 z^ih<0m`zwi3zH*!EA2bnn;9Rgg1Qa94DmA(?tjLg_SQ!ISc`+eJg=~Acax=L5xAZ1 zpJ8>E{4NX8Fn&9UK%9!=zr)-0xcLL=TVR#1I{ z^~N@#TVtAWG8&F%ubO%%na|&Y_X-fSrJu>RScYue1L_%`j74n4_P1oe4nl|Vq-8bC z6YIJcEozgZVcw~QbCtZ&4tA@2uk8QJr1DGlGqIS-%+|WFn~5%}j6ca-7b)nXuR3&l zGfIxI61)yKPa^fnH+K#T8_&K}g1FgCyb_B1WR{!SouPPL+({nc!607@&T@KasO@Ba z8td<0wRk5>QQb)UIEih|iuI%=mxJvvlz2-YFIr0{i*j=Aj?`k#R;yd*9_-FYTAE$2 z%W3pec%L1X<8ggD>3Y+r)bqXyUp}@<&zN+*)okCwQVh}iQ^wmps2o4=-pGw2~!6;SR6RDQnpy%=86aV)^lk6^K7vLO!D|jxQO@joJZ@p4JbYP$Qlr|K)y(Z_2)A1tk$aCDom-W;6Qgakq94!=+>+&c4hKpexz2 zNL{0}@>X$YufL=9AH93U=Oa))J7}$oSz~sn4>9`wn{}W{kzZW;j^DM>%lGlqKBdV2Jrp1;KbEMpJmioEARSrtJ!j-d{jtXA{AVLee36f*@yXHDhCRB&D3MGJnSFOa z&(ska%Vr!#!d`>%cVsiMOP$z*o9Oy2CzXwvJs}#P*fO-PC+} ziZ=H#qQw6w_AJ&kYxOt4TE`gil9pz(V?)@Jv7k<#^xfbeA|+t8p}5McvH}pVRL9tWVDSpMv)`SC(5> zB;ur_(c;5`0)O|`O|&EV=(=d}M14)ADd*CFH|SOJdCs)1$r<`%nwFbhLWoj3&>A~ZRSGB%%2J)<>CG(a`O!vy2-33-m&oq zO8$~nwDfiMbE%Q8rQi5vH8v(D3+F15e3E&^eZKv`?zN!x6J7lf_Q$ho7t@Im;7g2Y zYgE1)#yKsn3a0z$YdpKg;7LDiyn+sua5ettsYSh+c8u{q*&s4Uj6XplBvQYkQZXA`X|$PY{E7d|Ge)OYB=BhS;kzpy^ z+Y}nG10LH72>P%Rn^7Z~g#WC)w>(?h_fcA?^q=|Cz9{w!_?FYC?_JAms4vRIpDwxl zvMY2yC{K283M`UEZA_s@@i`sisryhe^^Du0_!6>HmM*3~PW5(CzN z6kh`B;UsQ@H)1KO7UNqodL;5=G`&itq;lSfr(zG>jvsXDZjGXQ$yGZHW@qVl8u{C! zt>GYd z!2tBVM!!E7`uUO(B0ftU(0wVJxx=UI(8hQ2XH=<#I;pqZ1GVRgLdJ1`5 z3ij+3MfQi2fZ^Kc3)grfP9f!~Qu)7LgW-q@nF0^Bro-mg? zlbwqHR92P)VE&1AQdx5yXm+vIPs1_ZDKpuIujpn&oJhXQRoYH|fIDH`3Cyt|$yA>F zFQe)9IxxTF%^}*#YI0ef5k~aRMzC8zGL7cVC6iU)wiZ0em6Y9vzA*WM6uzdNF*vo* zoGdw05_K^e^*6vdQ4XPgqJ!syCDr5-s~brzhA6OTD!oLu`~nC zsQQxL_$4Gaa(h=Ur4QrWuT2`VZ&?pbel{v4b4&}@cWSp1E8EAl-Q=JnKBY!d8<=HJ zshMxFrys&Lqflz3e@7Ft&ibRi6N6jE2$mXJ&9t{lKMQd7UDQ~JJ2hY*pUBmCc_~@C z7Jj=~t953d*~d$^p{nE|HC+3U@o;OFYq4KBW2$7Hx5?8F`4+#JFX7pX#3cr4y-)EJ zj%Qa#vXGs**U_X5=-ww$8_=#Exju%*Mhb5xt%=xLiU$wiPWGceAWez6Jj;{Gp!=A= z-AMT?kag4MNqWD}r>1l?e5gy?;`{VInK>8+i(uCl9`oUt>P*vdcqolI0NoavA2ip} z;UHRqAHjY(2oh74F>DF>_>`qd_Sa6V$11c;wBcHuOYZXSDB99&E#pwm&|0w9OTZDE zpXiLKW=!$$T;g6^a@ZF%>%IFP91}UwQeQuS{#{VSJ}1BM1Ns?{W)0x5ou&;YhdD2* zuZIQjFO9x^!Sas{XZERF7iox(@sD~M?a2usK857O@41Fi zqymiVdp|4sYUp|Z3e`lt4)~dPkWJd?g9m%OIaBZP8yil3AM{LFEhTa*=W3m4&bx)} zI1Yc4?d4qWCNeX5iBnH+Cj0V=F?=mM)Rbmqk2UM`&U(*|+BM{JHb|3UC%&2UP%f1e z=b(HQTuSzj)QyNfCJJN|>75L^i7df97BewfQ_O+ypA2)4AymeZI23b%*1!ln^Ze$rMGt8Oa0*rdc7We@eoUXq?T+<&RBwP3$&UG zm$I~FNJ0B#_Bqkr+GNSN77WQW@;CJPo?N!`?O9JhW6qyZ`9u6p zd~o6qQ#WWQER)UoEkBv}KZhbs;hEjQG5GL<-WQP3cs3;N^e6Uh68X>UW{eisc|Nrl zGFoI8?`N%kiJlv2cvEur9EmQA2aWOZ@1BppZx{F5;lP{jbu1u>hv-Pp?k?=won+(` zQgAI;lWQUo)Y(Z&KEJ8nNo9?-aQbiK#arIW`PhA+o9nqXs63E9rOMDS{jBg_H*=q+ zWTytnkDu5u93Ae;cr+crs-*5$vI91trOk~2Pr>LQR&AzP*C{MV1sDgGHl#B9(WyYu z1tjUSk`}V-kO;?e+6&$K=pkcD6VLpuuvFb}A{ENxS@_>(BmGhFYiqHcC^XphCi?DT zEIS(H!>j-vg6ktRVS^cC9W7M{adxW9k%433+15(3AG)4F?ji|?;Ku;c9-pwa`u+?2 zI>9=%O5Xraa#WoLw&6IJ3L9(aMovd>htbRI)^y|E^{AK_q-1^D4UnP&f4;H ztt6MqHY55Yto;x)n}Mzy+#Qbk7i+VVaiqQ$PILb(6iGhu^U2xM=vCPYzXCW%pzf{g zVs>N_;k4AZ_TJy$n~5?y3a=Am(^V^}GkG1p*J1}=*6ua_Cb#rB_W3xHwTy+k(|D74 zLK|9E4LyD!bI+5E9VoSk?A+mrN5PW}JXy5E#E3p!t z*tpk?SP#IWt{!{h<>S^Z@pIYVZ+0L`GPfKB)_Ca5gvnl?OX9{Z_e^5a4z}jI5v&iA z%AA=Gv0L+*-s2bb7`uFv-<%D8&dwzta|O8m;=SZWI>Gh5-c227eo0X*xufyjO z{Zt{Pp;b}~x9C-Q7|oSvjPJ$;KWQBRW7 zlU!x5D0I0VSL5M!xo;)4oJxF}e$AUR($x}4D%my4Q;8}I)%L-aFN)74UQ)SQvICV* z`3aQ2Me$?`Drx24-b*d*y~R#dA{)~4k>L3Cos$2xitp`kEvMUw@VkgTNKDnMxOzRQ zxR5oR>ThfMkePnGqIVag;vPB_Z|$?$m!{syZuwr4IE;MH#`V;8>y6qw=yUu?M$pBa z?A)!boYQ|`)acLNC9A|U+KspW=wb{=Q%(o`UK$u!VJKk>e444&i+KG7(YSg6d5$KzSNR~G6&HH9*>``nluKm1|7-w6Jn zL7Me%@(%T|_K6i8ptaR{ZU?H`ENJS6KSmO^nz`-J+h^KI-h`aAJb;d`pjcuEHo^E1 zl5hr!Od=muwUH{1@xU5JI#YQeJ|~H=$s8yr0H1>~@hm@qCcA0BxK`iqF4wYJ+(hF7 zZ~U+`JN;JQdr_#z4*Q-8YXkxF-DKEpsM5DH;sD9hdqbQO$MInY|O2AP~K=h zT^pZ!yCun(j)o;U=6*nx!;BPJnTCRc+)uUH<6yE-@2!0Q11?oXm9{AG9L?|V{RhDi z%h?d*ZH;KDSG}CxuR`HAzOTpAT?t*O|5uO~Zz2r7YeWGL!$tvb7mURM(Ere$WeZ_Xx2Ji8n zXh8?o!+j4;Y~x!G)^#l$y0ZM@XN>6W$;h4P9BMT1+*r`XqoFVACcYr?W8Z_YyE~(N z{)J_TAI3&^r}>mvrJZP)$g6Giu`=$*Wnc~*k&vvFN~N?ntMu9wkLVxE($ zun#CYfF*G)dq{n9#5B?SRocAL^Goqzn*VQs^->b`4Xmo^>tn55$kHVbRN`IY&rpLk zoan9Yq-Y8F$LeW1KCQ!z@8G@IwXN*RFx;MnfP%XgD0U^o=CJ;$t?X(uD?J>YQnjpwg%9%Wa~OYtMSsP2*yOarY8Nh zYoNViv8&NhkZkKPpQD7_@q!QR%{D^mH3%#V~Y)kqv4mC=u|7HJd56#QI zW=Ar!35Vmq(}}d61gcR+l;qNHiyzsG%_=SRO;(Y*8FVzcW$yHU7zp=(tB2l_ZK65) z_9-;>zvxaS*9PNo_MNj|&;t#Ur}rk`vnO{fTBMfS0F-L$`)+*x*{6;ECWm}%`x_`- zUR#NTO9rXLAEs)8rynQgSWiDkL6u%%7F885432#jy)AHBJq>?iN+GQ3SZLG0dQkT>^CM%Kls zbiZqh3d)_K$5^S%K*r%?&J40gUrj&B)!z&)exXOnh5vykN9!?Jg>zca3&mFH?v6pry2sZfUdx}Dw{3JcQMo^AJ=MKaHEfYjeL#B?s!YQ7 zhgp$$CbaYU!(uL0P5+_sL~@#{gBww1s=4M2FlQJ0WcNFfxtVag0Q6^&pm=_aDpnnf zajp)2Ptx~LGCP8G++{Ag*|RyvOZKO%@Df*;e7Es?%_^lcy6u2j1NWaPbp2K>CX>tr zHopAY3X>>PGnh5J(J&WAxxt;^Jv*x6q*l?TIOJ> zFxeXAK7>zoy7!BA(|#FG#4{@_aLPkfS%Q|?(c7IKxR-w!XD498B zN4!bSxA=L+Tm1knWqip=M)r_f81vVfucdBQ;;b?sY{-(mh4N$c`7oN)^_%_0_^EUu z(W!uz{gz(1+RY3+@pLQcb@rKJCkB$7RI5m})A*k(^gZVS-;wBr^u4UMuhI54()pdI z26!v+Jn{d!$eN=$T78KNpOc?HFkFns$1Ft*cjPy*updXt8v$KM}9#g?jtJ6GPdGr3H0U*(%<^smJ| zql`%_ym1qHcV|sflj_T2_7;gu)$j{&E)YKirsF)F-Q|9;xETap^!5_^e23Hf;%D+v zW#?)UX-t;Ue`|3f{6e{wTJ4E@+fZ^8x@X2cfu&B>?)iB0HG1?TXUWEso>Kd1iEjf) z)|ntqE#2Em$}RLEyHUv}naorfLz4bt2_CwM;d&7U-2c2ir;B z6Z-ocel5L~`eQj!PQ-s|T)*r2)PH&ioG;P7c&q;2tp64Ik<3{8`1FIT`+#bHvUP=i zehbrmwUcx8ny?s3vr8=Sv$)V-pBs#3y=ZBuI+m5Gia#Al^Imkij~4C2QvBWD6Hxj- zZKQJ0EWcO#lnO)f{dm&nWU;x)Vq0cqKH^Qg(Z~(Znyjzd|e7?>WjydYqn1(Zl$- z97o>bukuI%VV+34`@?IG>lf13p2gFN>ADVVsh6IJuZ!Kil(qQ)&1Zt?AQ1dk&%dBw z{Ir63DO9+__tBsl=}u2ndXUXKnC|Y;({IS_{T8TeZWA7b^*4a67*FA}xM^F4aV}m4$}oJbD(5U&_Xu;+?w2uGHO0 z#+~lC_LNbrA=#a6{7aRW_~~SCy06}svssrEmbN_|9;oemS(?%0ZJ;*>={F~2)oJet zV^eq5t||G=I^%I1I2~la!0#3ER+6D3Gs0flT?p1hT`i|uLs4rIdUY{cCJvycCq}@l zEZa8)#(O|s6O_HJ*%J4j9iZ7D9gl7o>nj;%UWaoJwED!D@QUwq!1hRieM|4;v?Ql` zk9#-Sjk3DyghuVzw6WkAti{#r>r47d-uwE6mL|tlDtN_{VYqfP8pYc(xn@g)AnS{X z?%i0-*3Kr66KL>bs4-oe?NG5Ap59MaQ=54J%ai)VHCdY;tY8}>L>u=$0`qIQ9T`Y$ z-c(PXMZ;dfzj)Xt6GBDwK155)Q0pRCX5P?@&W*#ToI8$nue@ik@>YKo8LZz!jp&tK z+v@35%H4?PhoNMulqIX`-E=kQvFlxV%ab|N%bDK>S4LZO`7t zA2}H$6SWdQrkq{Jw|5Du9&Y4&7uFwXah~_HDi}ed_mGKkTAB&3L-B1c%;O((KIrFy zJSWkc(54?*tAkFvjI7Bcnzi0KeC!Rfc;T$Ha>?wd9XV-3vvZCWzv&;e{yY6_(%v+^ zCSO)9Fyz#>JWOVSc#qb)v#-fi_zj$^q2NmUDNESl!CJe;y;dMO1{FJK;Wco^KmBlU zXHTdCiND5A{G!i6msa#>2}lNz^6;)H9gBZlb9127&FBTE=`3W{W;xA#MIVo%U{+Xt zY1{;IvI_*u+{wO7_7?|$Ff}F;<2|;}`0UwThepj|otXKov9ixI7+*(&^mW&h!E>v> z=j&}1{dgCQ$C9IdWO*P-{R~dY`}s3EWGA8unk2%pxz7s=9hnHqZ9b=d(W78GUyq4o z7|PBy^*>RA^WmJjL-&~B>~QySS6^pIRT;wM!fpVq>89_=0A z`+20JF>T21+jthWGkzqq@jCOuR|?rl_UhDXok!Y+!>|UP*ML#tsb;WPnMW-|x6RH* zk|k<6?3UBfH_^2zY3;3->|WjA-d2**UMq=xcuGIXIuuWnKAx|RUenRy6L8*7rc%GC z2FY#$zQp=0fM0K;O*s;f+~PMBc5xW!lKU|cSx2IIb6Qp1-?OzHpS;(NGEWzC;|XAh zmt87#FGGXSda-MD^q9S%Hu`RAG#*FWlegnqoIV+c6T3AFu2}^)^2P?(wb908R7^b5 z zhu4Z#^_OPAiHdAbx|751T>X|u``Pc%$J`-v$5dc1F}>MYXh(B9LINSp7C`oC@-gdrS&rv&0a#;=fN}$cQWEVZQk}B zs9Msd4d~O{b8WTK3>?XnTig69e$6j~ryM*MnH8qCLRP+=QQ|YR+Ob+s4A--G)EWiL z;csHtCb8OIlBPsAUsXs!9}?XJ#rwgnKFCGz8qt#zJA2nZnL%y_OLLT4iZ)fe_le#Q zK%IEl#X^0GildAuL(CaIA?g1z7thZ1Dc*U5F2@(JA6+||%qO<=dMyor^A+Cjul>nz zyjaWkpuj7ndnav-Pt*PQG6fYk8?$!eYJ6c*F)1^ulgLE!60I<9&7;%t2WyMA**lCs z;xi>2PHy$R@aE*rl@TmPn^ z+1W|{vSvlQv$d2co_JM%Ngi{eeF&+%3T@Ytt$&fBe;H{H)Bafhv+LQ#^~BB3rxD3G zv<$7TB@btUX|Psj*v)$sx05fXirN2AZKZlpa?RAmtrbR`0EvU-W(j z&OJkVuPdm1t`$;0c4e5ERy?%VqyO3T=Oy}`JdagSW+!S)GGnTM$2GwoJ*Z(W^c(%2 z$xfDL0fwS+_AFk4MdJA?qx62H>u8$u5(s;elosxn_@}>b{F|)3b?&E5N^Nh{wVG^e z96TFczjSp8*={}Hz@)AAZJk{GU?Xz{9_({O5(vHW^)^@sHxF#Nk7 zn;AnN_w97@k?by?99^AMAx73eA$YYY3S3 zN4+of{14nHrL9RcDtjwa$lm^B?=w{2ThO_utB?DB7>m}CeM&_8sW9IH`XfkGY(jQV zg4OYUSAemjHoo;~k0(Oul9hhR{>2OyVn0%xcj6oJ57s7?r{4BfAgl_f!E!Sed62)?kR_^gz4NX)3um-(f#kORe%2>3RbX{mpk`wpjksE_v)7hfc z>`7M7sdSNQz~%Muv30=^?Is&V13EBVtBI^%P0H4~_hvB*N#4GkvDF}F@jK|Cr_pTI zy&89LF%-;Mwea`^He7)aJ z)9SPK$@ZCj?oR$Eg1WSkrau}qGEUAlgB!{sokwr8PRp6)YetaKM%~e@{%rU)VrjD% zGr^U5ta0`p60MZkegjxd!>QBRxCwgi45QMX*ornUq5fp^_ztI2EYGwjgf@cgaBo06G&u)0R4SiCv zDEV)5-rE7Svi6QgMMn~sQGPVt8tdEJ_}&>tpQ7Y;+;|G_=aTsLmcBHDJLJIT$ETr)S~RJ?ru?o*_FNbcv0v^P#08|mOSPYu=Q5;~l?fluL`+zs&v z-=v4?xHH2$siBk{b~#H+y`HstN{-3WBzhbTi1$OXLF9DqCQ{VH{~Gwv($fp+Qx!BF zO@9{Au}v((ktFyH^521MCDYF+^huVSUZf}SFv%R!5?5D)A^Uif^mM-wrlHnz9=8qc zKJ~KzOv$M65)5*NJ>K_3p07ip`rsT7vh~I4y|n+a6^DC&3$8pu#?};a+ZGSwmHiZ& zeFnBxGsEj2o9w#NsMl@YZKlO|vwwxk z^+-=n98>N5V0w5HTXPQ^^&H-OWb{vUnXRNc{xeUa^Svk<+uVyK>VgYxy?F^PcGAX$ z#@S@K9PYhoB%~(DvhEzi0(|d!y5DSuA2#x8l)qu&Tjf| zSj;^-qJ3&jweeh{Lo31WeSLjlj7pv0U2OF`sPr6NzluanM!)1Oi)U6VkfnlBPSY+! z>s6$)F6@#?aS^Ti8Ptc7p46xNh~^}(=1_gDrind0{~k=z!U(WL`Y&ObwxL@Gp@$CvBrU+DZh+>dAWHg@xVtyV(CA!Zx#6sd-Osc@T` zJ%83)@@%aq1x?*eJV9qXe+}1@g)Y9A$vu^L<||Pq6@)L<+aOjnde_ji@1j=5o9(19 znHT!tM)n7;ELM>ZgDa5=4b4L;;pa6VnPfk2lHRkzovQE6Ao>J+sk@dn=HNQ78PS`Y zQ}2&5&!E6uZ6v$WW!{-ePP0dl_1pLQNv)LwaN%0t>f_yJ92#86>?l%v3~QAR+BMD2nnAA2)d#B*L4 zKk-|&-^^_9E;MWo4zBQ~2}X7THJ*gp$l| z@xizOEZGT8B=R)ca1H#EK_)y(~tP zw)Ev;pDuKNC2H4VrM7`KbvXxv?EtvXB)NY9TTOa29c`AAmh-$9-{LLk|0W&H9I7EY zCI&N}CW*0K!mi(li;t7I^XSf3Y)5GneTYrYD*H}+$P6ofC$E9zpaSA|NXjI#{|0KN zQg~|p+~tX7B<2lT`y21vjQ_*6H4wbnOOGd2FYv~ub%V{PG^`28a_X15F@bY7DkQr3 z1lDpg>Lf4k0r1Q$^lNvLAz%$S?;%}H+2d*;+{>DO4&Kc;Jpj$_0oia8aEHEU)6GFX ze}WVDvlJ`v=zesn&YovnGt-v-*M zice?2a|kY+t=F1bxtffx1n(*^E(d$^H0Put`x;pxzYnhTxt{#=!-om5Ux(Unf-{vF zr+PbHrR8Wq9pmXCu5^NRc2;x3|Ge=gnUi~4fp>SeWM5}8iyMtjW}nn~ZB08$qi9Ri zTm}2AJ+t4?z!QgfKM`!-8>@Z;w|FJR4*ty?=Px+?d$_#eiGRTSSmSUi1bxh!l%Wst zUPz2Uszg2HjVtI>{PG&(-H|L&vS21V$^&{GPP@v{f-bHcgVRqLg>&vXmb6@p&(l$V z4BOj@9XnlbZ{c4u%w7hb!?b@g*b}>OFidOd_16f6OG#_?A+j>gwOaal$ov2FKVB85 z6|4U{+{t=%1S*!)#mO46j5q3_`J>v{;9JhpbE5tQ3h&V71eR(i8e~lWf`olgShuXt zhvM34+8=^HIgvll*tMKZu1fxo)mPT1XStSMlEFA9{ttQnEYR;?K=@!myVSf+)XOzS zlkASv!kOQAV?5nhMe>eA=@GDRj21_EswFv0<)RtvM_+Fq2eRMb5W{2U)VX)1&wmVHD}_>gmvP2foBd;m@cQnq?Kg5B$IOx26#`a=H&~?8(-B%pSc1x&zTPnQltK zqht?YZ}B-R|6>at#KUAu0q67o**7SK2esh4UvWQe-sSh#JTdwHzkl{CWaNKe`~Uk^ zvXgKy={rK-dGf!sa1Hu~o2jhz|Gr(zl~V5gg++YB{I?UXPGH@V6{DdU&BH9<-MD(a zQFcC>_Cuw`q^&M`Cckkhnzn{Kv~;~49gJ0~W~O!kdm6vs%=4GI`#9}RrqWa+8HDz+ ze(?#7|M7QdntWu9a4(**W7yGkuBMvuBr=m#^b2(C5l?0Osr{c3y$lPPGv70dCl-NZ zDl6RCh_PPV$CJ_cg5S@!XKol66MY*`vF_UVo)u5l)CnL@mEeJ3Tfk-~C(L&wWty>e zvv%r%DcLD1884Cn_9(4pZ!vW|&Lr)#eM_#9cniI&pVQ%DRJD!t^TR|TWi8DOk5lyyhBUSQJv+{C$-odu-@-4achJd93 zT&ut-qeA8^H?n2(tq&5DaFjdqTzMEQsni{h#6$pnXSR?v=rb%vvZrK6W~v@j`!Jb% zkHYg);n4y{GqsmG+CPCi*;+<{V+6|gAvHI#reCvOv(YEjm(F5KQWZ37zw>af5qOge zC)EX7v8Cm`naaO8eUFECeA^e8c@8lj9Hh6^>}EBPz0Sf_h2`xe=wA5eyzEq?*Nx`1 z$^O{LEF!rKk_j`>5yz8;SJD1lv`MT?JNSO4x0(9?R%?HTaXoFW_EbiuiXa@sg7xt~ z*_N-;OXi2|y_Y;W^>F7jce7SXEbSKdXf}!72$uuFk=%i+ik^D8wt&u@3!nHtp9jt+ zdhXBKeFEpi(^Me0skal)nc8Ul2?j(3q3YV)jw56Am)OL`ev;kx0$8Qm@}aEAWMfNJqiB4(llh_^3ifwzI*ABBXKHbP zmcC->z8PA01b`DWqb&h!)YQ6+K|vw%{G#i<6Io;r@gGIFT&fc?q15KEOciS z=-)I3_jY%LwiESyxLz0N<#1PX;+O5{n_LGfrw?0V{AjR3_s zmZB0$C)zkUTM`vjjw~g=;xKZT>{0QOx&_VpX(1I9x|k^>qB%JraxPlg`@{8;bCK`C z_W~+TLh*U}xr($5LAAPMES^1oh3i87wxl%+QTKIPks3|e@kw6A#4Trse6lx3YwuB# z7@z-DxR^-V#E<-otZgXTh!5LaG-3z{v$OCdh~lZ3Y9g7{#}7X~HfNEyWRyD?*2(3W znEH&qnd{`;jP!~9pNBK$JzcWTliik|jM&Gx_AKfoSHM(S8(-RiD7zdE$zdD%9)<_; zW1a5*(Pp=|`IJbhfB7H3`)b}uw8l63TW&tJ20v5Ja2TD4Rwu@!EQ)q^Z@aN=9@y^! zdsBS45k6y$PA&8ui$0BPr}A$s%5&gYOTPc$?QVs3BvH?(#YI6~*tm0n28Zqe6DvNM21n&bagS3U*R zVR*KKo+pz?s^wk}ljIXE1IO3dltdw2riJ)CW$!%u&0)vt*-rQP(iRng*N z{5aamWnr<4kUjOBHD9m4%s7Wxqi6m7mhp2Zxqb(1%}H!3Dg1=zsgab^g=DWDfiiQ9 zsBfd{tN7Gjn@52%u?lDD>usCOU7RH6P@SEe)$+SL|h$gau*Vz-o#o2BL4E z4$R5Pa|K!q^4lK0HlxBCdiV|cuLJvT{Ausr4!9&*WQ*iDq?W(BQRo#gdO_j*VZmRU={9rxWZgFoMz0W>E_E675&6|t|8&P}_`~H)x+zvq9 zg$3Tp54a8AQ+YY{bxr|CPL%2wnjSBnf$Ygpuyn!if1%6{tCelqE03Cq)M!VSI(q&R zRF6mJhpz81KkfqF#%MnQr?ZpQ1wT@)XE+Yu1EZ1b-CM<|aGzP~n?{eE_rFQ|AE6Bw zdT*S$(Q2!f`1~B9_2lMw5WSQ2qZw+wgQG1-)O=j{f@W30rxWz@PgG1~Nb-Y>XNyk9 z?VQY?jz_O*?_khfY~}h}BmBYmeZKG4S*KfTu(&wDv=J#M0 zrY@`66U7q|aHPKvc_KM9s^ak9S;Q~Q1tF_(}eQzmN?R{}9 zwR(OJkHk%soTNVwyVP^Jyufpd_YXjgp6FknOgu%hj&lEcwAxx=Hp`5?s`pc4X$0+g z$J3+H`>#I7D|d^vX$hvu8+!@Kn_FP}H7$G_O>ZP2^T^hLr224NDn;9pC1byWA6ICv z6$k@;MGz#uAal4*p8wd0@gTdK44Q-f^GsIvwNN#&AIX?{rgrW?fql>`Stz=qMD|U( z`JdH!p8gsh4ZxCBdkrmq&kE*bKQ+u(p>O!N5A942(noQjB%kRiWb^9c_Xx0Lf1sqg zP)VMML`7u%-?Ml(?Vg#0@GA-x=at3 z`+M1c?)>_G{wKC0{tu-{VIqkpd%h*D2>yv-tyZkT55tjb@G{c+XV>qbsr#~0iKQFH zcC2ARj>h?EXuZ;E;ZWS{M?YF=I~jn7e;7(7Ssi#wE#zMNB^Ptbnx8{I#6)TZCBGHGT#)D*A91Hfv ztnPZ(+8G^cn?03>LE?@l;$>YDHj8c!)YsjfJjYuXfOv)H;!V1P7QExliDr(8+Ug3j zJ3!ll^u`uu=i_(eYrU}|HRa}zyX5b>0i2Ck%0vcart}GXnxWcU(mt9-_I34ab}s9d zds({pI5t9y)!tZ*it&I-z1N(*XU%*nNlpd4vBr@LKpJ0;WJ1r0?sib+H26lEni>YF z^KcFxorsg0*{jy9QYuWOid!#`?J-iHY_7k>`c~^hQOP_gLGSVlT*au*} z%cuMO&#G*&w&MX?U4OmUsBk8k0mXm3~utQ-p<+# zVLRu7b2$3nMCM-dsShsJGID1%n%XWENZ^~`xWM&P#rWCWG!Y@mg8DdG{Rozcpjb;L z7NAJtSq??H>@U1PB9mXZBzMRX@IRo{%uEt#UYDMB@bte);?H>4l5A}Q!R!9siSqx1 z=L~mCsz7I#BWssGnzv;~rWLri(7gEjzYfPo^mi`I=ddcZjZVpV_ZRJt)mkDGPx1UL zJ&)5@YEuqnE0Y6vC;Z|Qd>bju4)(BMkYyrHc>;cDYzwf&I-ywhNH(HEa(g`nmdu^Ufj!v}Pp13p zS?J7e&xccLGiP1d3s#BWji<;c^8Yv5xCrIqecBe3ce~#PG>H{BL@TLNJcDN61&YS{ z%laW%Q@&8VTubDG{-n-}584eVR8LNtI;bJp;gVh)iN-CX?*@OJ!XCu%oY zIM)@N-%Y}@qQ8~Ri5E|5Yb|3*s-t=%ZQYIw$zO97?H=IHy|Db6Elgg~%W3$0ZLf#l zAWu(6%e|gY?ZE5&AIR=5K+&_v!-uH0kiEZi49yq$`rSJH>qVV3xZ)LczYoz%T=3ybRtolBrXI*z_>2r4BXisN@`2JeCHJ+7tgdcLpF zpj0bKKA7ZS*sHCTc+=76Sc6Jrqk=xK)_>O4?`rokZS4i?paP=U@4>Wtz1G*la5rsD z_KU=SEq8AdsT>H_74V2RURib_>&@&2R0Dr%RII|&&V}6GNa9wze!DSdjAyet&n|kh zH)b{c4D9bF^}WcA@&4wdcMaO@BtO|pn&RDz*VAxlmmX(==vjB-Ir9#VWM443O1?t3>@eOyVsxxY%Sbi^b)GerFM(ALZ~c>IH6v-^(sY{Fi7s!~ z>tGV_uGT*S&orM`8X5W-P2OM)E1T=4I#NzrlW8^MTl{T4@vVGe|5j?hrO_~Qa|TJB zXD*z)Ht~I%hX>hlc&3oC3b@ppP0mU$QId%q&Pna9=vo;ylk1=z+c&~YFS?wQ=j4&k zc$etU4@uI8<_wAEh}Ys*;7eA=+u)u(^Nba@daDgu?Vy!Q!SNmZ;?vmCe6JsUJjX0! z8hSNAo8-+HMAp`N_cc_{sxH;QQ*UAke&tMibTPMX1Cs4zCTsT*s1UESPM%05>cmU* zC#$;(S|+nVJp6LvpH~^n1GYncaFE$Q0=8sOX6}e zqyL?8w3{`{G8m61%ZUf7j5kfuF_CVcqfJRx-PpW(^!jUWT!8MMqv*M~ll5vn)XzwK zBZ}noJ-Ki~`}2>}~iE|G-3{+-6iQ zM?$|MqbtBSl;o%K-zB8DIob^bMfOmWOL>M~8=LJ9WRn}|AvJq9qEjll-VW=vWUn(@ zQyr$MOSqr5mZ5L*IxIJ?Pa(g5WnVuuhNbG^7BV7P^~J=wj)u&idkFGPtZ+APV=m@)~oxyg!XUKM>1>v zx*98m&lB+ZWh<-Q`k3qgH+c3vtteToW-V0MtkvVj|(QYw(#n*IUPK1X(Bc zF6W&4TU#cR)c)=^X7}Q=Hjx#1-drOm!&$GV_ry0`TdXAyU}LZL?NYxdfcF?rj6~Bv zyPI6+sjQM-vs;k}faEPW9hH9eH?VX--G0^vrAS&AvM`IDXK$##pFh!r?zHK0-082y zG9)6AnO$(QAK7k*;=^%zi#6I=Y}emC|24Z(4h&gKUx$vZi>t{M`!mO$ClPmUZ@WwZ{lyyQPFMjgGJ9=nLYkI=% zd*6Gy^MhyZ@IPJ`*|9mzyV+$pmE49>`N`@!HI!fWQ*tHi>VMLLl5g3~`Stq${GV#f z;rjo)md~jlP^-8aKZ3LWbANBq`hjf3Xjk*x{m1S;ACJ@@zJ#0}r=Jtp%6;+YX>>Y( zJ$r^OCx^nBo=)^mvR2*69(BTj3jUYX@?yPxZY~!;tz@D~+*mx~<8_pkPvW~%WuY9Y z+oh$Qc-YSSS#PI0;BalEO76zO8eR&H*wf*lP0f_#a=e#j@6~R+1{Q$kXfk&g8A;}b zoNvUJAT#|>=|p2NXN8owx>d%c2aMXQ!F7^1la-)`xyYG5RiuNNEma^TSJ8w6NaM|H z=*1xFr={eh|DGLq-FrvEC8tsYK$3`(X{h%a4VcW{#78q;x7WjIE(`cRd~f&uQ~u|q zKAzvHxHgp?OSZQb1uqgOoci|bV3i84sX(5%%K4xkgKihWArT(+U5mxZ=)Re5w?n(C z_5vrvek?BR0&gm(C3F60y4xA0KGb4c);5(B;-@g3mL3k8tYaplTTZs)2b{C(*q`gY z9sl8W1^r55VUt5|E6dl+)7$7yL#?H9NPKeIfG_Lj#IBwS;`{OZR`g2+k7x9pD3!$0 zx5br2%J!ni@V+z_t|sxfv+O^TllI<99ffO+_{kTwn3S)A zb-m1vjBiDv7LuvyAnhiKViH-YP{5epvNu%@mMhTU8m;bzb)wEw^)0dGso~NL zSAN#=VW?y(Ec0Bv!D?&qM>Z=ul_>dm@z)?3ndNRF*Slbv zJP7m6QHOvfIm`Q_WCJ5vDl{Y`$o(WM+x} z;EYFaMeoG3W2zCe1>VmjIon~hM$088?=Wk=}EkdDWT6nXNw&a9w;z~E1I#YjZ*v2}Z{0K(vwAKs{dVp)C zb}F#;ec0|RaG(YYmDt(YdYGoQ><{EjCvi48(N2bgf03VLMQ!cP+l_0<_5Vu2rDXBA z+xtUUp+tIohu5FGe~Es!(%I|?CF zRjw&X&na_jznjSLZ8-H&AqyRFYYIJi$ekW=?n=fw;9WcfqD84YFukDpyXc?O?8Krs zM5!TowMiSZQDy+a_vfq0bXR@PIM$UOJoA&_X;P<|mQo>-Dx0 zWs?yieku2nqtpt?N+hxWRms{G@TFes27D{)`vh35A@>7Nd>;8aLCbCQS{@~n?c{3O zJ<$mM3v6B}Y)Y!Jr{?*quuI1I*I@o9xGZM<&ja-?@|akhLHb>y?aT2lv;GC%zDV2s z^pPCkRp{Mzx}KF*=p4WB?A9*T<{<4oh7y-*{d~P7A}w@0$E>0T+GO`?7|hn<$v;Th zC@t0X+;H5x)&1lPnyKxa>1S6tHsd^4eF&SxpYP{R;;U0fa0}cc$;q{v%zcOA-R)po zRoJ!d+MZ#)k#on7+&Nc2wY5JFe@`TPCz;DEgmruUBqQTs?IfbQj(Oe*ByIrYNNLgBF6xtOjS&sMfy`OQ+yzhHF$MZbD-+f=pxnJ9Po#zH} zG!!S3pC`MV*TZ**pTzULm4v+jhgM|zJ9r$!J|;Rs&Qb4%a}_eN15J~~WgTqJW=%2* z=EmQA?XA~)_Hd@@a~?RSqslO}yTRNm5pz3%FZY9Tzh$tNlSN@3T^voq>f_(_<}?`_ zPWE;;j@G0R*$Yo3y^^!s$8onFTbB5kRnQ{02I`TfF=Xa-Ql2O+YqkC+sy_|iwkZ2I zWm|*n6mlJEbs~2^s+FCWYBJm+=?_LlI0@s zT}C!eM4vUNdy=xDRCfQ)RBMNsU^2WnBJH`ym*2<)C?? zfS?wvPGleEp49Gv%jQfG=s>5u~Ov zo;PJVYO;vKak`gwemW<*jwa^bU(SP%L$6X`P2B9<)?Z?ldZS(vQKXfT;XL#>2PAtu z9Zc%4hEqJJV^NZ$;eKQHcy!4+I9Ur?Ywdn(j;dtwcjVL;BIUQBWb$0krLpl? z9nQ9NS8j&Uu@}0ngIV?>xf`v%-z>;L{@MG_D0 zk{i|W1-QFdQ)m6viQeT&DORhD=lyZDlFxtFXJ>Mfd-+-Sw}el9ZN$4Ux8{e@ocLIO z>eF*pgjpkgk5=(yzl{~FR6sEiTsPrD&fIec{7f}VR&-g3Uum?=8F9(#@>{Z9#z?b( z?RuqH?=`^Xx+Ej_^*>; z>T@jDC%E3iyHL3b%aE1zCunn#vhfx=-@6lwRlsd{^c~8K(Dsk;Kg##ydW{7M4evmu zKCq8>!!fYPor`#gZPZg%-CuzD4&^Qeb5Eb|Lce!#IuRZpHjexr1$OBzcLmM|>zRJX z`8E!Ha*F?Re@lLv7BU$BhwO^vS#nJz%4BZN|5WOy|NT8FxzYdAlmGAktoIXT;w;c@ zMB&iAsh{{^q-<}`8~9CJv19+kmCFmL4z_+ziL8VI#hYqmtv^obA6c^F(C!TyFagFF zl7p-`n$Vuvz9+(HWp;QPdvYyJyA~%WYjvXEv~ea2(o~OqJ}fp1jM+!dG@B*sU(so~zPXxA_=r_iv7xkVD0c%11IenjOoY?G1?%5_< zb|16(%g`_ z$K%hYE%Ek})Gl*6791VZ(-Jwm09jrCYIfPoJ~aL>;TkN`zvh@RjLmAK3PlS z^|KB}Yfz(&XB|K?P`fz?`kEEpq|fW%bFQ)BZ0|F#+D^-opKpn?ww>RU?4*bcPSMKg+1g)cfM#$VAn1`$JSXgP-pn^Ul=aJ(j$|rMa zR^st=$Qn1Z+C?Zek(4Hz#OEaV82pci&JJ=IUz(gLCHhz*iB1FWd1Ni;Q}aku{6b#W z=ICP1-%W{6l}K*4tgTPQorlTg^+x2}9*Knh8MMjt(n|m9;g@?DU!%v`LT>J*$=kt_ z9fm&I?LiZ>e|NHPy+OAXXGf?TpWehwkCfa6$DDfa$Hg^pZ0CFY2)Fz6Jzmv?aaQWP zz;u$&LyUx3AGOuity*k>*Q04+bR#RQc#cd{;}Mt+CuMC>H|J~*u({JcT@9PJ)NICL zCPG0~^t~On$>O+&T^|g)Av7WJ6C>BP{ePV#JCc^riN?^Jj`T%`L}31u1*om{I!g5eMPw;^wY5Q>7*xwq_7#*^rME@2 zAtyR#`wd6y_}0g0HUf-^Ae~#s*;Pm`!mj>)tX$%O?C?Gr{g(JXh=%0`#%OcAc+Tf^ zdNzJ_Canv>o861ky*ZAICsId7m9?x{6I5!W$JoqyxNxK1F2|GH#)`#y6}7s6a31>@ z9owai`RF$s&E}E8*pZ=Vag{duk^hfL+fq9HYoD61PW@2k1a0Kj@hngbN1=M2<_1SE z64RLkY+{K*8reA%4ZgE2D*NWBQFEs~2Ca+rZj{KDReB$%r=}MJn%CH#@hJ)&3By z2WaOHzDL_~Yd(GqKV?U(;7M(KOC;#*jVD89X7P>Lkepsmq)WB6k^R+m`2R3?HlS5j z0vF-WOq5FwmPEoyT@UV+m{GW%f;fu1xOg9R!+Z;E)luIZP^`-F9?ZNaEsk z-VBv4q06}=@fiHR!GlpOOj%q^w1tn+|F2}FE8ah-<=5DTTWQ8gxYPwaKi7NCo~M!d zvc^k%j#z|JBxnc8JQ@_Q zv*>60Jz1^emCNn1yS0|+X;)hX$ItsuD4!?-$M~&;7gv$XU%~1TJ2d;rzz*_On+-hK z416J&K4LNBi8+@pd}HjG;(2lxJ#2ltgw71pPtMw^laIf{y_)qzX_7lx&2fH4(aHm9 z*|=hjn!IKsglA|xa)Q1&3Z{+)U-;Ot0KkV}Wm}RY(6U5^eq!Pn)61#h%r}^W3$+5BD~Mqz;Odqj8BqcL|&J7xcRpReP)5P;bA5 z-&ix(0ZNQO`SIG%?>N6DE3({DPo~(M8Rw>b=>nUqM)M@AyKSBxsju9>PhNr&AMyuT z+{D+*9r=sd<>bfBen)xm=1z3tXp|~i{JMx8w-Quo^OJ%eITQW`Nu3SD4aKw4B=${D zL+fOg$lt`KEA4GFZ<3pGn4d&n%qev;H|FknPPa?$%pa<~XMG+=H{LMIJ`VnmfFmc= zIsHwBgJX+21C$9@2Nc}Pnl`K9JZa$fe}CeqQ1U%D?*I4A|9(DF&ykAcYbeQqcruG{ z2^#6Rj8OPI=Wn}gp)Oy9|cp?u1(;M_UcO2uzJc@>u z>`7)Vxl}*ddCiG%ZoYg5`eZ>$G|WEU&t{9Wf?1}-k4C#z?7%}b@lJMd4%?DbzT9Rk zN9J-TDRFUnpu}>&YrILsm_)!Rhj;D3ai&qqtt%sXvfpMsT%C+P=l?(S+}b!$m(9!^ zWEFVk;`U``A4^HZk$Q+9WnzI|W(1k;ZHeb-@-OZK%R2wZ!!8+NdyvIjig{SF7WeR^ z6MbsSzHahu9xGCXwM^ujmioR5eogSGGd)a9*&mEa-;?M?`by5~p)gO>+4*`|z)Dw9 zFZ-O^)IC{GwX{^j?^ycVSr4z#)y$tW!^+OzSk~twXwHzIrn4e?^ z>qj;J1%p#rr==|5BWTcHAHO2MYtSHfOqYW=r`|nDXX0EXQh)qx_rq@^Z5c*OJJXOG zP$#kWZ>5zRSk0U)O~up5+B2Zbncf$uv5_=a^e)j80#i2>=wPI2<9~EBvEZxvy&Z*f zYal-OTl5^?w;m)a{_cHgTYGYL8V$}~(RQ2~uD4IUtBr@ryxP{t75~#jGsyaP0g7j3 zvD>$6%-U`Q#hq$rEuQ<>7lXVjEsTfb8d~)*iabL)67`@3?j`?XqF&`J@jaC94ys-- z&AsjHnkLua95wG$Vu0SCQR-`NJL_pI4sHeA`})eb@rJs8E@s*bl#5@&K+q*#R(mDq z8EN|{(_QTfB)mWAxK&@dFVofMztOzh@I6h5VaA^IDOrXstgwCyLG)+HD2Whm9Mb!KEb%Tt|vWgYaR!B(CyWkPh-KJGRx;x{zhf8flld zp43)i&4)TAaq|9-&ppi=5}$RJpXgZPCdRAba~!XX;yYnC8<*mPI+t{2U3M!e9_??* zUGv0z>&52e9!Wnsn#f?exjBx8=k)MamUISb{glnC()}jqKqO> z)BInpL^Q86dcEYgJ-vxHO$T$CM^SZ$GSkr~d5$M*f200$t1Z4R@kAI(qeh`_R;KgJ z4&pC-63!&vNh|Wx6~Bhk#Kg<|v5=u%=$T08EzxwBKE5&%)C6_>!wv?^1d@=53(L^6 zH|sdKQ70pMUuA~EI5`ax>Fa%5TVZs{-o;=Pd4=TMqQBe?A5Ho?`gW@|LE?@6N!^;L z@udDIlgg>E>BSZe0zqPN$HO-{WA4!R1h{u08%^m@XjL5y%USq%L*?fG2Y3?Clo`gT zoJ(h%f1j*(WEYPyp5~U*b6{Mp{`dH_2}FDF<``pfYnXTNy#|XujWqm`wI2`vtg*){ z*$V{cX`v0;yh*kbAu#?!HT~X)sv}9?p|tY?Z8ax*i3W2KoOgOZ3S=j<^i_>C$p|si zZcA=rb;O0_Qj1SSVx+aUZ_o>;r^DRw%2^NJ-FV0-Ps^aERU^P$?Uz1 za;?>tnZMq9@4#dNTC{Ry(UFq`S zD3Q2XZSnOoFwRo{4U~_M$H(ZBUBPSVRZUWTFG#8G29mRe!<61@ z_A%PKQQ+CY`oDr_zagbljVIT_u!mj-qsPr^w9#4{a+*``_(88Xmg!%&+B88TI*=kCGgA% z@R?SO8_D!pEI>^>J`E=uu}TLQ<7pRTTthVf=JIdH|W3lr3X(g7mR&u%}(pV|2=LGd7((|UeSF4$5 z?eWD=wDxcGn6<^*+8L%q&UgE>74b=r*Wf4qCl1|-K79d?L*! z2IdIQ;{O(iH-g|f|35*Ij&z|i$R~Mr2YYiCek4kNX^=EfJ6RNS_Pz<8%ROzt-jphS za?YJSh|#p_KoI60^QrXz70g_bN^Y(=tU%x$*r|%(eIe zFGIdSS~)vyJ8Gn9JS@A*c>fmYoc&?5I}6JPNm9FBL=G!`bYT6WXKc-SPie7t$z z)l>3bBtr8Z7PmIK4#U6S($v_vL}Kdz;vQNVYesgck#VW{;XABKZTcPG%0!!7s_hd! z>qr9cVRvsZ;?5xTEj_zV&zU)YVZODlQ7a?*M>Kqh(fBjE-;-U97204XJjzder$)1= zi9VT3V7F^|3n@QF&ox+x86@&Wquez3o}ll}}jYOTd*x~u;yUPeS1!w+7cH;5A+bEORAC0t@dCd=WJXt=@CSS|J zHw>JKKhw*nLydolCOej%B|6Q&@M0|bU8IMn&~OdR?pN-3G`Iou57DcejRMukWK|S< z!E9w!F(%aqSF-r#YR8mR|5K8OCydZk=A*do~O#rxbG0-v7xcK8I91imRJZ zH-2O}(Y%etsR@#PucH<5F@8d?IUnc&x?XB$ZS*c}YC(SY;ALj` z&*EHfZO>Ld|LYoe>a$Y(R~C)0egWwn6OE^?6dNmiY&cv~Nwvp{hQSmI%w*ldaAd=W`W)Ui%z z67TlYeai{uR=N@j<$l7Yq`i~Y>(H9`mETG(yTLb6q!XF!DHf!z@5u_CjE?adPj0~o-bu~JasF>8Fq@s)qC~v3A49vGh9`2;GPSeDf5N-uojD6fj)%h*BiUB? z&CvEjpVn*dBG&bGkX;Ff6WN`I*@pM&)Iz;HhuZgQs}~D-AzCI1&=sd z(f8M2YX^&;;Yeb*zf{Z_lcSPdVxA?Jbw#)* z+I(WBbfW>UvtP*}`lCLFdL9pl7t#MVy4ssm#xG%_+M)hV+^On$=9qs(&lk}08{cB> z|3dd;?c>9oJ(xrBI+~d*VH?nIDf-5nxhpE>{%|XLl6>sXX(2cKZ_@6kD3$EdIn_U? z(A#(MryrV*)_(S;&(=mx*_Rd){y3O(ue1|P9z^fNQ?HDc**(q9(jYxQLr;>w=>#_7 z2LET$@ik^rJlW}J-gt>Zv0Lp zvYg-COV)?doOs|S@_G|=I7$6UT90StLU{Bg(}_5~9WRc-!^DZ$3Y$p6a{o_6qkM~B zLT;Qs#S&&`VzM^I(9FaTi?+68`Qr=qKJFyqT_c;dn_fHJY7 z=HtcrYR*)06>aYW!te1ScSd@9G628t)9!m9hz=iUL|Ej1I~HZK5jkswwJdODbh?#A zX<_DanA*wCme}bzcTJpuocASWT=I7ONK#HDmtEnX`1<{n`G`e-+>>~Kw1ZF8qUD_L zwuN7Fb|L5Bulk(*s$>pHrhxYPi9dEV^h?&2hqRdWXHEc5)aocuAF9QRSeT4A*)gmI z-=|4-4|K1EH?vUVVK{xO_vAJlKr*u*`%APsjb#4`B_?@(F3Z)@=z9{pKY&f5ysTE{ zV>anAD>HWrH74S5g^-+3)x*sdE znq+*VZah5?B#9sEH}`Clfh-x&o+5wA0iHF$FY!Otwqa4TthZIQv>TTDmDz!6IlIi6 z;E`HsO4br{FwvTGb2#@@cf+z|T@a7f2B`4UI^w(kJkPD+?@3`Ikl#hGE8+gv-W{U1 zaxhIC?%Yp3$e7bj4@Z#MoL>Gw9uqgAtS33?EaPwDNc8i+Gq~RaY5GXcg1bGfj`|JQ z#PRsqgro+8BS=T$5G9}bB4rOQ_;x9}mO{y_-Zr39WjfmcG{H2!!pUw{pN=#&#s~6B zzQ?b+0XmeNQI}?w&t!|XX?vgg-SO$=0-N3_o0Y<5SR_8sEpSPM)>62*5j@#V%&PBu z*ah!UqU0xe`m$n7RLVe81s_W?kY`nQ6y5#+{YqkfCEtCXl|a_3_;ePHEKk0E$%cow zCA$K-L!8L$>7yxr)zfm$*tcPn+e5i^on6K32F159{w>*sOw6VlcwP#3!C)<-cMF9 zx22BO*JgV65baoQ;FOWGQn0Y&)xaC2O%=?c_Tv$vwRpbraXBIV+jj zPyD}D=rz%xmKg(1VJ+6N|L7wB_}lylc7Na<6a-@}>>_v~jZ;}AWZO-gS8!CP!k z$!XA(Vl2Oa1sI4@W!d7zW-fE})!gr=_&FVv*{yy|iQH>@ist0}Ij6rB(YO`Nl5>2k zk>?arm`}6J1t+rc$wSus$6DNEEPR2z$P99^r(^tF0-tlxwhPQ+2WO#mS=`ACC!S6@W$U4CJZ^JKaw)lu z2Vxs-EznxVwy!;}i?cmg$;_0;8fyl5p4hj0$xQ5IqPBEn16KQ#JI14onaL};Q~9ev zbR(`l3g>Ei?rTKPDn6$z`;=VG9%Pn271dtBq3`{sPCPef(bj7C7MVGVJWd90?(8ju zcW#v2fcL@t0Tv=Ll6D(|YQlK05iZ{EQ&1vvow6X$d}0DQid-!Nb$7H()||OT57`5o z$KHHmJiJ{CU*TI5@3(k+9yoWP*lR}UM~s}U{GI^*-18ks#;@|^4P$Fp+K_CwJ3%sp z)!PZIkHvTiY|JKE`D?NP+|B}BYYt-yR z$6szF*r$RpF_j+y&j@n8LWx8=dmG%7K$KYE&+9j*FUgp@2!!|GQf>l%sdNn#?o5Wd zqDR)ygN^9dlEvj9jelKW>ITPd@b8P_yX-B*V|Ft6Uddk0(Ocp+&4%YNJ#GQzE?i0G z)wv){6ql1pY#%an9$n82A*<5F7rl+FUO`)jdJ|9bfB1H-+PS;ZlH?^<`3b21JH6)~ zZZ#G)>yey&jVAFmU{lY`rmPn8_94_d!rQ*Z`Y92@n&@qo5$Pc?b^`BapOPtQ0x3C^ z%&o%dOVwI!)Z2hMi6H(WUY!8;O=!9hjPHZ$Z2I{;4No@R%5XWx+#vU{M;CMC)~Gxh z1=f(5nk1tuiT%A^qq%joxKUgE(EXCa8a}P3F04#Wm{%J^-eBtnpnJ~vE+8c%)STh{ zX|P*GpZ|gi%W$+i$~?p}uB8JP!D~Am%c(#alp710c(r#f;7;80i}5vc#AKk$E=<;^ z-3$27@OCjME5a`xd1FAb9n6`_P1HtaXOrPu1NQ&+J+aQqd%qj6lXI#s9*@Vd)mqDp z?_5$A&)NCvC2RfTr1}~XG7>(yrI&0xX)7^`5-GVBI^=CPFw}t8^>A7PmL29q^9mcZ z$lRkn8F>#}1Ia;rD|2#Mm(0amy$*TXsYYlR$*7J)529(#m9q-WX=ZjJr;?^boWByR znRjJ1y~vw|ev_>*`RLvRbt3G|LCN@@HbSLDj!Le?Un!B8{a@nPYQ23$r`C{UwWSXf}Jc< z?#i#HxrsN_3G9oFRvB+s!6>VT_pK#bqRcwnc?`Y2vU1Em;3RXM#C*$r%2jZ0ti&>w zbUnVUB3}oAcd>HU7@v|MdpRjChZC98RM*xl^P!Kl(gV!heaorSUT+hP=sr4^`(H5z@Q{r)H$&ysuaCo#3Qpk7A1%`DPdcn`zxGD`1&{|*v;kb0F_ zsS=O5tWVmZNn&6;K#q2!*8&o^8pX4=&D>%*DY}P+|47M2D7%2(FGt&vT0fcOO`%Q6 zk<<*n_n}>6h1nnauhbi@r-ZJ`af#XZcj$+F$1YKfVzO7t$@YSS~@8SMI zEZ+!xp9%NeQ_a1!j<^s$AB~1(Q2o%tQkC|<4t{I{OKH$##y#Kcb{Kk|2G1+?ox6K) z(xH>o%_-EM%tmsG`A@Tyv3mKXPb;;vlzu0_W^MS7gx?+-@CF^tEy|(f=4!LCrh3hN zy5GSrw*?P@%OBx#ptqxn^~6qgdyGU`&qRdZ>d(#?0T5 z&ZbHvFIGHYa}#JFJvt5_{|4)7gl6pU` zXMLMhRcFvv(r)gq9*J8=qVW~pUSJd}XWTxDbbkVhd)29I4*#UKUO}CBOee2UPXE4F z_9#~42Wy;UbBZs+C^oDtdiDpw5OR4x&in!mev6*DQ&0-!7PC?1eg7p4b6%Ny!UMcX zmgsoS=A<{i<1_R)2Muy&T@t$~R-+P|Q;Veh#%wT=yR(WIM0R#5*Uz5f1Xl1P((wyC zd9a}FFg^au=kt{e&A+5+}4@#ly&u}C+!_$BAr2Up8mwe9OoN|s+ zE4>H4Py7vBB~VS{W~}eCFQyI*@tvIfRCZ^NMl6yiC2=HcrNrKL7$>b-fbkqSFi)| zJ&GlmPbL?g^q zi`J{r?@4AgD@a=l{Un}ZGc}VP=R$UQ8Qv!k<~yJoiU%*D`hIe?3Oc4e);=d!Fbl(|#8TUo%wY?-9g!%B4{?-yx%73;Rp=X(0c+~8x> zf1edfj?Ag7-!JIIpYVPdTbhX8J=n%w=rx>dWnR#M^~>D21?leY+dtXlTUo2G(7%Gd zGIK~a#l(deNJkoavdfGmk#BmV$^Gp4Xd19guQ#J)Ije<4dYPYElU;=Dp3eg5sbn(#DffXPSz!~KK3RCPnysqtqqtN_zd4`2MIXs^Jpv7LRuXUA zjYgHo)P-j3Z9(1)ZDtwAa`$_P5?`Rf3u>}Y)wVL=xC+Kf7h*}tR zRw;csT4i0XLPqQ#Q-3@qah74NQ= zDED@;;@!t4PS$SZV3Qg@D!rfn{~uei9*sZK^I6*2i)z^ikLS)!+RV=LI+Wc~Eqq8+qQ(Uu5+id0E!bz=+FVd1 zIi4ScXJQ7Ocsq(OhtE#->Uv|$W^`VO#;rjWPs*;a8EDRs+(J3= zPJW#J#+5`;no4#PHSIMJe5UPMU|r#Ns{WE&?OHk&Ie!dY5|`;*QgS{%)K|6*`phwQ zUJ1VVkUxad6G_5KP{)Heeylku%Is(|%e|Y-&7`UEk4!Yn73NZ3k@i+>%|oC(%)76R z+RtmF2kOT2JaKfsqrc;_?DxJ($IfG54y~jaU4PDM9sqs?FecfZgnz&9MqG7yfZbIn} zdhdo_FZ=WjO10M8Qy}}NKBu8gKXAvI=5|^l6E8K+#83BAaueT(E$nOV(#P*=wbCuM z{4hPpP283RkIwZZxxdD+5*rJC4Af(89n~i}SJ0|u#WOij-FZD;KCMkCYuwB6>N5=&m$SnvfGJmmNV<@dDH~s2JPiO))VMCk38NA z;#x4uEy=8Sa|d84XcB8}zxirnxGq!nU2lHW`c>rOY4C>rb=ale%Kr^y_xeA=T>b1Lu*=5;;DQa%}wsK56sJwp{_efI~2BbEj@V|RJYQC#$>x-JW;v0(AB6Fe$jU6U`rIm? zh_)G-29lWAzIY|H$L%h7(NcdO;Bb7f5({cK>RqC@CV0|AEA{l2ljW5tnBBk9Ry6Ob z-wf_EajBBk$zytNr=88D>I)M2A{kGluaz)77ro+VwUKp7%;ns}&2H9o^m-RwSq<(f zd>7gldN&4sjXl|Ijk`h%oAs3&RkifKnm!H(%M2QpTTt2WdP4iz%Z&d-cQj1=s_X-H zg7H+J-_XkQAib1ynjmR(JWWQt0CIIGd;@i7Wpa z*zX|mm(q*ejrr#wm9*3MC8JIv6Fs^%-n;E8IvnklW0*8>*MR&IL*wqU;8ra(;iY@%ljexZIO@ zXmJNhwKHnXMU&au$^C*l=yolu6HkYnh7Ew}<$Bnotx0sOlOD<$(O1Db*_k_lExr>^ z71s1#QrE~zDf<`k%sLt+>J+k+NEscy%Z>B0M$zx}keDBTK(Cp&-3&+mfVcDTuOljF zkL5EIoQzuM!Da`lKda1dNPIcHpQg^So+je@2qltNyPdiF0k+Y3@N+izXE3~5tXcPCg zr8XLo`m4bCA>AoU&Yo4H`+wF)4b?~lNd71 zZ52=ADriw#O9%RY1#QpjC_N-&PohWVe6(qyn`e5Tw`Y=+-k#-zJ07?bNlJh4<$Sh2 zI+jGU&S_!dU^MVFF_wCo8P=yiKl+|0)6u!FPl@|9Nbfn7J`@b4>DC$QJO4EryEED0cpqnPAS>xI#_*C%QoYqUn6+uD z?UcTo9OoYJ70P5Uc(;keRoN)yIB#4N)^UuzsKo3$(CW z--)`C+igGkpPpvX*OJzs^Zy1T;(31K4LM6YD~mEiaWcE?tBnSyYAb#WU(<^>VR$kA ze{TdC369Kg`k9+$=`IuW%yvTEJ6{|8){!XmXVtmj3n%K0d`8nvE4qmngC8Smz4bpfl}-pKt7xL-7W zoWb_yX75`?fA4wU2zH;cpo_gNi6S-0xIe=Ji`wSmEP#(_j{K2^K&iPp+svO{k=AJc$bx7c9OEX&s_B#P*w$he-bvUn2pCnJKnQhjOYtln(Wnl zj$%!HYK;T6wUZUf>7+4nn_FnRJX=x|1B#$sA#VmKIOh@SFIe+CXXgTC1;~m(6t5`+pp(S%^H-?C-DT% z{5&nZg|_#T)WlgU$?E=&-V-k?djW6JpTwi$Y|OHcRwZyu@;j@rax+16uQoS>V3yfXsFJhuwQOp9 zB(Bz54OTAs{x+jxRnnLEwO?ts9l6~@!gE8e3TsmV+~3lwL~RJalKFKViHtY-2-=c) z(7|Lj-soBPB>wz%*v6NlAL`$QV<+L&lWJDS!QSd@Qx;1i8gcwTP!w;_B}}nfMuJ zN4&8*i8$66mWk@rg#GOY?>S&Q2)PzoUd%)jHLfj5&I-DQR&GU!XYn9W(puog8uENJS;;!F0ce6xNe+pHT1%FN z(w_DqEiJ&*ORXwK*5tg2O>JAq&*Lo2A;n6vWbGbHdy5%<&RVnc6Ca`qg?uC@#q}t= z*I0L-7VlRpRy*F!i8wJvZ#ScFH`pgvRtrBlrOyqi_fRNNw-XaGJK4D_om*kkVbF}+ zC8E(Bus>DU;lGgA8f2jfyeg2zM95BLf%2#xulkz$SU^tBC5y?anW*mHv0w4MN~G@c z>I{L$?dX3xnEr(_FS4HPNKR$_*Ca8yZ`8`Ctn71pFi}T$!(q5L_n4*Tj$w4Ml-bAC zzTM+pMu=K$#}#0{1uc?ctUBnfBn4Zwnfqc{&rCI1w%2!x zL6-P#J#HkSmwUU(v!>{>m@IWbhc>ifkoG>psdnBZmhw11S?k?zH635u{@}~`ZSJ{F zN5QOJ#w+nN@|So;4cY5aG$$5zMIr6uj5=A@dq> zOXJT+to>^wHsj1a_<6F{2a>A;$iPo4#Y8v-6Q#zdL;o6|}!-E=bMaW|2CVtg9PX9LgJ}aM(YhIdNJz0L#HOObu;j< zGmYubH!YHJu~95@xG#LVLc6u`CbN#ne=}xXq`l1Q za;NMI?Qf-_`&qqYQ%PLMw7-ZJ&&AJb+OCD>nK9-5+E`libx~rUo>uB%Sn=&w>}VhD zcV(fH+kOUF%UWp<+qwZ48|m*mJ;$?ry{9YSycVSC>oR&ZolfK~VCJY(jVi6++PirB zd(eKZOx6kUq@G}WYC$VHlhWF>{~OkNsHYQIh0BZ_RmoZW(&DAN1B_$Q=UTX54ud}Q zZX!(?uH0yr`&#d_dT5KfzXD}$l=lH~_VMbG^hRLVKqImW7-%H95Jz&Ve-*xsplQ$R z`5SAqjMZbj`Gaw_D#<9X_AE1o_kF*XZXd2p?mgx1QDRrMFW&zZ?S`sZM~~f%zpY^3 z1)t`FBJovL(ZWXL{&?+WX1~=uEuO)-2bq(>YAjU~vec4=987!PV^tIBI48C@usEnb4Z($}d*mYm@wZ)u_dCGThr9L+v* zO=U-FHTRo`;@7JnPk<8uozZzNpgq}IExfy3F>hD?f-0Ddk?dR6iRmGa2COnp- z>pLvR1X7y3Sjkj*3S8q&H(g78_0X3+O@^E7GPf~4 zV7<-LDduJ$;_znrWJCq|YIQb~&b6pCnJy=K#eP(rqTk#JdLLeiLiQR<)Lj3A&3BXQ z`fHS`N=q)qz1#rJiQ!Ok7r9MV-_KC35o?m$1G#ONxLJQjt(+xH!{^=Lonb^TQ}FOp z{dH8ku{SO8dmT=`L?YuOpRAb==x05OWshl;&;NqO17@ae$!5+v+tTN$AlgCtPoc46 z@qY(RX#}hI8xCU0vfsZC1e-mr1(tWM11{Iz+o*F28@d|CeZlZK-Mt(QCTsN$T7N0( zEJfd8;OI(z5}PuaNVC^5McE7Cat;l90+zW8pU7BiNZ(8{mFNNST25A^4(!bW{WN0Z zx{`|BdW;DapCsTs^HM^?zdtD$;>uEn5!a z>?LIH@?<3s2Jdj(O$_P8<+=@jzA}cyc4q%-4Eu5&DvyS3;(Yx}tv)1RzEWqSUnf+_ zoIBC8M*AKQ+F3NJns$ycPBfwOi3$EXnd(Q&y0RIW*|)}*8tB?Wjhw6xp{KR9RtL3n zKjw7ukgU4NW0Y@GX>~k|pD^yUS7!^k$ebfSER#{MG2O4B@9*hOXVhu}^6SABFaK9z ze6yLx`>a~-jOOnCLNI1toUCy1@W`3QQ&>` zZt*vF0upU8qwH;Zn62+QEaW`jayxoEea|Uf^3s=~mATvcjqm&E=PGa{3dDyfeGg2t ze$9H~Q+Tgnw{vS^DEjxLhgm)EE2O;!z6~PbS$!lb$wlVcIVYU1$LG+a84YX!v)f_2 z30*JKLSm>_FZg!0F+AL zD|@hwXkA8%S@7&aVoQT1IZzU_VFH?GXCXI99``odw}$?eGq)QI!|Z6@$ud5rzQm8fuMlk-T_Kr+~qK7Q^~aQOpn$NpCbtT1*2@Q??gMo}zv& zbjTRC9Y^2xW{7sH`8ytkx1-yor14?g`;g6wF2+V4fDezL!fD3XR-nvnh3p2k*J>hE zoU7+|$WF3jXUAfwdBAdGZFcn&<1D^Z50QbKZD+5jyqfQ#9m&ci%60bU zGCk$=IwOA~Y;*-v@~FIlHk18L^pTO~HYd{HRYvrY27bl-)*4|IeomWUd1Ud%<@AswD^XoyLUwwUzVGtP0PB zM~PSZ1Y`99WIQpRl0WD$BU{ci<8fRPW$O@_mUu3g{H+JC`2VE-9J*K@R#{EPTdEXT zaN5l=P}UI3QlSBgjX!FpTJbZFH^V2W(2xd|+?N0U z_tl^%UqY=!ERTF;=jU>t4@3Kx>B4s`WgYT6*QlJ>kco!01qQq5>3*Z=HK??Mo?JwN zvT9J;7@TY?Rb0wOwY$Mo?ir$aa z^Ql_d3aZ=@oe%zGN@_*xSCW|c_&xy3Wmbe2!!KS!x3h>VNoZCnyI6xqJb6^P(j-3m zl@)ukbl+>HdKgOo1|6o+?hY(`6E**7B^(dof zY#hva!xF9Kp6?K!b{RvTGMdJx?-%HB7Z_fI{a$#t_H-NS#A`NjiPzA^{+`xi!A7Fr zE2QEut#>sFY}9Mw(KevHW3`w)^W=btztJ^nX7*E8k4uZTM&L&Fq7&=60)C&1iv!8P z3iQkEn_1=r@iBg%oFt!7SJ1R$DSNP3Q;lYIaG|PR_OX|VA@vzei}%BwG&{5M;rbt` z&SM~5quo0|8@XwPf{6zEkun$fzK-rLsMXO0;vj~OZ7HT=?}Gj zXn{c;(pgT;1N3+id>f(br|8(tI8zneZT=s&QrXcoGW`!TV(!E9H^^^U{7($? zWLM~ok2e@wb6R{M*uOxr`ubl0^GD#8%u9()miwdGJ8Xk$AJL3;EO`9<=V@<*dOdKi zFOFnysWBOAq}|uF+r-nv=I)`_WYx%OCp!eW@%Vdk`93^z`g@Y{iK3f4+Cxdt1npPV z&edpG6%{@)-^sm$J!CaA*qm}MrB#_B?FPkEQd7Y=cA-(PH=A%DD;_`d?~Lf#IZ4E- zJv8zHBVoxZ{~WW->{C=hlj90KkC){Or1u;eeKuLoKF=h$eMa(526ac2o$%@+#%-^sEA_4N5GeCpl+Wrrnlc=s>=5?v=CT6`-x<%_Vxz z31IVLRv$g;iZ_qpd^`5xMOdGsbaGC|PqeO)ubL9m%o2{q@5~$fvV9%#sTv3>;@ns8 zP4xUP%)8%wlTfx1Y*(OF?l>gd+kvDdanWY88HuZvlj}qd zJjCD2;Mum=YiUMiHYqt9bnlp}t|v9|uuk-w{RN*}dV3EIPQ2^vWex)A$LwVCa(%7* zbzn>+p|+rV2Cuu(vF)Hstp5%E&mvD<>ECszl6iL-G93@JN9g_r@^KhxI|BxjQSduh zPhk%e3u}fFtw53UpTo#U3-v0%Di-f@Z~u<^dr`L~`AtUG#GhTG=VN_NB-!K0cd~@# zj$}QORn-h(E9%BWe>V>14)GK)ZP0G+Kb?eo3uwX>+I$aBHhc3DnaL`4y_tUUy>HS- zusD_c9Z5<@qsw)LP0l$@MzxDrjo+hg6D1#}PoqE*pU?SViyuZ#p6j9T$>2pd*T$K;_}vPPK8H;c-{xv{gc-o%s9kq=vv)SC!NquMY-TB7#%0|AOdQ2?k z>&V)fBs{n363O`m81zBOWwhcP)T)XO$#9uHufZhjuljj_1SS?s?wq{@&XIU`5jnXJ zh2ufgS38F*If^cqqMx7OYa(4u#m_Nh=Z`E?Ptx~KG|!&ydD^=hm69u9k-p}cU7x4N zjIxQ96D+pDJty^B@S_(Qi8uZ^C=?BthR0W--NnX=n&f&kuDAC0b5y9SPIC7&2kFUL z_}2GlSk~;xmtkWPfh5sObK~bk@0+tIU*Ss5tS5N4*!(qq7!^@!G#)MkZ)I}cQro!= zIE==}<7}s1%An6SxQ(+m$c@LRS&QVqEkj~5JO7f!D5u3GpeTj5@%5?zuj93Lota5B zFl`}ob8&kaI>mSYX&QAun|h^liYv)nTcw|eZ)u!50tc#+rDR%7tmdvyWj<51R zwbj>eyk`&bWT#p5nP$2hJ-L=0_{6uDNbEReviklKK0S>)Sv%%-_uaT!4lVEUw_y5aA#$h^;BIw z4AA=w?Yvv?YzG+QWuJYJ4ZzKt;dN@!Q_jY})XOWV zc`z!Jq8o{TmYC2@*@T>4-&F8)fOm;z9bfa$VODYnI{xa%!~XC3FVD6mT0|L?_(7X5 zXz>%Z>KiB0`iH&^!0onLspEOP2NJis4oHJ@e6UNYyI7eM^^;X=PW-cqzJY|~mUPZ+ zn;UIjg<;m)iLP>qPw}gM6IJu+6m4d;`9<;f<>J{v>|QDK&zq8y>ijSH$+>mvr0qQU z@2d1DZRW%%tI&8?4ERrtpS;EY+d|2wiexQu&|bxbN5Fd&=o4+REd9Gf3yDu=mjUVv$Q1Z8)y#2m-o7sNOm2)#>ulFl(DX|7t zYAyE=6Gv+(dyp36%m1x0YLz-~k%3V(>jCZLj4U(#Z;i|~=<>y^#b4Dg%i8_M=Tq6E z>Ui86{~GyzBW}l|`A$%eArYBXW;gU^I#p50_t^GCW9_P^v2^Hb-=~AHExWu3*71p1 zMavTjCj0GK3*`3g$8_m&7;J@QPO^8icg^U@4z>OP`hStc_}hK2?(MX>Cpv5c_XL`h z`yI`-(~ztuBl|IGKLm!F`u!N?b0!;W7;nI2RBB-?I9!=UWO;#+X(Bte*No$KwGxdu zKHItJbrO0ea(n#Evb(+tHCL0{Xm!;B^2@;c4O+G~hQEnZEy4AWQDKsH-$j)cY~n5C z{CFjr)9dx9)D|>ZBh@o5Walxl-EtBV|EtgSmrQ!UqPg*V%q%72T;lZPq^zG2Y&m*= z;qPLQEcHCIxSSQn`@FKYy5VZ>d1ZevJe^|Py&fH#d6t`Wf6+_MUUFu1Zn4&!1LChp zW-ol7=h-?nbDsSGeoZg~$gQ)>wB3PptRb)5Gv0F<2+#!&ll(jIwy!rmuMWCl5^%eecl92;3p7aC=A-|+(N zzky5fW6ddfe7)m6Itwoo(QP~Kp8=zp=$Mnt5oncrn(<5joF9qDh^|$Ytd0co-)J+$2yeC6S5O+nU5?Uul^B5(B3f zOI3nxX&P_@e6wS+8s8JYstyLC>|(~_I=f6C>ghA1%6|PdK%HO1t1-*FO3Pi%;#Pn$tH>cn z^u)2LtLE3Z^n^FL$JYuxmyx0)eM;u>>;h#*TT>fD@in*Llj$Tk&X?iwCU#&cnLULT z&a+w^G;RUG6N_M8~Y1KgG#-Ks<;l zZ{f>)l>Y;M=4|97&~HS i$&yT>diFC9gOUc%fQ{O$ccSV%TUe)6sFs=M zPVhIwldo{FY!$Zl@BCl6pe zKN3N1C(TRLx!k|Vnd=^$&aKEN@VGN>GbC;Zz3GmB-4Xgvg1){0vdHQpNX!1$iioJGM_ciUF7=4ziMjr(t08s zjRr;bN8SQMA~_!px`(vX2ELKm$Zt*{!@-_tv;u^QGyU(vYHc(ZPW*y7dYkBPA`py5 zix*g(BlPmHy7$^atx5h$?D=*rBqrr`By|X_Netgrdiq?yJ6WK{xc5E@ow|FlA-(4g&zmHFs4+16(o4wXg-V~P?yv&l z$j{lN_!(3WH+%RSXmSVUP4$N1NTO6EI?!f6jq$F9d0@OII+BOn=AQ@R&$OFNUt<|esKp**Cu;i z;CiecA62Ff$P+`LpZ;P=2gCLfG@o6J{I|k0-f8{RT?G2&zPD2&USsjko~P!`p0*~B zdq~;|AWbCnZY*qa+HS*}PW~s>{S9!P1+IUiy^An5wlk$P{8KDh@Gdf!Hq5(D;aJ;om;H}P`PKa95Bh@yW5 zX?;(sv4>yi`6aDnp7t>OUPX^GuuBxI=gHTY0*iQuUr4V?ylE;GtHoyO)FbPkknmRY zd^btEkLD(}%O8!~@s_!TMoeJ^;z1J6z#ZC)@5rGr=>>{)h5YCIHF2tTvdr;($gTNg zHX48;JGF2*T9!xEhtTpm7GN2e2kR|S(Z-?a4qQq0rVn8q&+**wZH1;e!)oa7(PZWx zW$yMY@l@|p?_Bb`1`Lnsqjko}Xyk zuY}&)=xzfeMBQQqcMywQ%loEA@k-kGJNQcZoZXEDAUFv;FVpwzzx>IQd0=^4`>(*~ z5Lkq7iP?3V&)H|Vgq>(WrthIOSr2Con|s>_s9oCn=M$fIgP>F)$rqsS5n%n&_lhh~ zM=cNVIX@-ON=}cnPm-VbZ|6*~gm1@cDbc->@9C^UXAdAZSKcUe$lpFUyHU4{som|Vg zA@PZ~=WF*=)K8x4XhGJ>wOP3Besjy_5~Z?^d<)kTnJKv_2a}vlBxI*Dng8cp^*uFz z$A0uO!hMD#cNVp-Bs05N(Ts2-NW&v6&)2jjdwMO{yfX@VZ=$Q2CnitmQ8>TJ9H=&F zO0=QmgH2qh4G6 z0R8EDyb+fk_xb;f-*@=F%^3Ct8Z0FNjp3d9YV&dYN!I-jey=S?o;p{W5lKCOp@eA2A(HP+G#sQWFd#9!+S1{FvFC}7o19J4^t}n^x1iQEbIioD9FEtC2~~@=iJ$odl-;78)M$er zm%=95Ku3Y>45QjRN+w(MD$wO*=5YP=(Q16Ub4z$En*G_Ek3lvTlsR3>P2|m_?Nrh_ zn7o{#TyAY=MV}GpbYtNty7vQEw|d$Y1#*tK8J_X)--3G4q`6v5tjY!O>ZV3NG)~0# zKcZ_N*knGn-+ZBq8n=__#Gs6S_dnUlk)HJ|DD)5rMtXX$_Pgk@5ecYc_L5k=)9`+? zPl>B?BR$=!pYAN@9M-QZS-8wd)(6L$`t(;ak?700DfSj#XI{40lk1EQr{hw*E6*j> zT}a|@{Kg;qRloJLmixp{!e_WSTjH$__AEKR)|03sVVGU(#AE$dJGIgJOFCB>d^xX3 zRIB7-UP``KfFc?s=jF zXGWBG+UK#5neFDpxd*sEq@9_E#M3)o*LzTIyw*mV-~FJM%gNBq=O@bf;{Pv==!sLEy|33$ zqaJB4Z?=;2=|N;PCuL7+sSAnEOg$0g&jH0|@PCSvy+~e7+^p+!@_t0JR+6N6*7Rjx zy5i&pH4-8C4Lwv*_HF%aF+)qMiRG71*}d;iw(7tk`Me{^OW28_IG^2>i*dLUTQ!(g zE>p50Da$(kG$l&T0W!aTTo32K?kN0Os>C%~Z>hEXoTJ6uMjgRU%}1e`TI+?2?`mhT zKC{cZ4_|Wj9GE{Q2@A>6^{nDxRIO(GOYEc8=$jbd%e~tTg71`Hfqwrm+wKeIe`)m; zoS6>Zc&XH9{j$rp7F`o1>^!m=%!l}!7zS(f(%UFqPdgXm(M@1oM_+qtYXv;_l96Qb z&28d_taJQ`rhp)}{@1AfEBsl4a*6ym5HznBcKIgQ#J{F9oBy%8xm%iOxy$vHHGF>( zuvFOrp!)-vhz*T|SMzx$=}&})XxByLGPhC=_jHK1vT{0CJGlcjM_bp!=B1*q+#Y_5 z1z1iqvjaI5<&$+fImfT3VaJ1XGX5OOdY)oVI?w2rxK~SP%PUHU)APyP!^XP%K@mUH zDq5S4HhoD)^0;Q7BCF`%(#%%oCC3{pUL+5>UEc|BvzMF*bo=po7CU$sYA?mhj8Wg4 z7sTu1R2uj$Os)h$d@O?NH8`2sz&x|K#5|v&ZbqQ^yf)F|=U}_sh?o6{&v2)zIzvz< z_g`A-y|>T(v^f$~b$z(lP3eKDo=hUY$2v@*x41DU~<0?=; zr$jwkmG~Qjt?bqrL%PB%nb|+4JMn6ZJZ5({qrnSsO+2_2?0s9koUBx0Jif?Qtwqn= z;eUV>_B3wg#&8c5jAYCw8*TjL+;ajLFZMh;!*|kx+hI15Y(C}f@iafW@;qrc9E90R zFNyq|O!w#!vjrO;YmSa(~tXWiU$v#!Aoo0~D9pI{>(QSU@GjNBbOD8aCOwV4UYz*dLmw(iAtA|+r6k+L0_43R`;$W-To~I%aWvpI5C~& z$W8g2l-!9LFKb~Ax!OW9+o^qn5vc|ly$YNQ)Gw{q14&Y5-jBg7XR^5+8DFv!aAH0E z&F$5X+14*T??~FKDY=LSg=vyen9N|c!fXt51^@#oF_IV-;7 z^qw=Ue-1Ht;zov{|G4vamJZ(!!kYeE`awuVocd z%1;>_yv>RycZ_p?_-=5l0l|?44}L?2vliV5qDrLTeQoC4I1WZ3tsj=>#KSwk?hZF*}jj-ZyBRZ za!>vg(fJO5WseB5jrZ56qE^JWHh7 zABuJJL-^N#_C>OC-kmJO9gW_f7LxG;Da#$S#GO8srmv?bxet__#anRjU2g^&<8y1T z4Lg;LHQB2lsIBZA_w}ql9o|#e+Lv&mBzIDLn5JvFFIoGX75!MfAGCV~jqS%aWY=vL zJ9{w;_kr5EF`Tw&-2X$^oq+vZrv2l$WLmVRgh)-P5F<&VqM4c+ zZK5Pgi$Ru1LRzIg?F$i-LRz#4Au=sgqD7lRHK+(lw3u}KU+>R%{O5U|-}5_u$9>%Q z{r!GE*K+RHc3$VXfIQ`fT@?_X!K&m2Q0A?zNn!kppJgj%8t4r$7nSNsIC zzdZ%Nb1x+)$W{HuQ?w~-yvW;Htky=>V-}4#(bIqV&ToSWf`j&|B<%d-s0UB-!_}=TmkNz%s&2w0@=M!tc!Z&v+}+;h+-4p7lc-m_d2=mpW`BJvIVr^rTdJMp<~)WS&PiH7ysQQK zN3_<2bhJ{lF3d;JutYV>8GEvs9H5UYVKy8^y5r09=o(+yhf!l0h{Wkc1l-4pw%WS%qZ{6|vuOxga z`8fiFpMzl@3ccg)VtnkS{At?Go#ptMe^o&75S-iLXLlIZ^Y(ERS*zdsNlW~oYol!~ zB~Ag^V|dZtPH=VyezFeB2~c+3e*x(|#lByA{77t*eNkW(8joaA;%7DmlvRqkU{+7@ zgo=#JVH0cPcA_3!uBF7NxeqU$esA_N!`gf_ASaM6H)F3)YHQ` zdKjnnEc{P?-_Ba=N#=9ne4PFhajqx*%gxoEY{5CWnX`+;Lhq~0My-u83dGy@d-gA$ z)oZ=YuFhxv);A(d#f80b>=C@ljqJn=%9%+uqkUq(e!&$EU8^=?k(YupaAaPHW;fc&9cm)aGascRU_sr*{##&dK=@ zZSSi_J$jKC%x|L9Oc2Dc@MoBfW+xldHwiBxZGd9kWBU*0WoDo(ZmNNycWB8=yww^j3$_ z2)yY^4yV(=R{BV;@2hblt^bTB!Z z<%I1o+Fq>3`raO^^}fnION)<2&ET2chRESR3wS1?_RpX^35}1?YT}0{Qou;Ro5<)> zTC730w=ZNSJA>`fc9Qx1Ia)lOtc^s&L}1KqrgC7NrGD&QC%8lhhG}6XF0JwG9eBQ? zjqo&dk1tIu&H(iOfQ)oO(PZ5@hqb!i+jr5nn=&`UXr@|OQ|tnQw+cG;V=uG%{fLyW z^dzJEC(0#q-!JgW?V^{w%Wadaq7$!qzVDN@Fig4r-e;FFxwZzgmmSb9QL^IOF`PbE z*Lyr?X8V2%8+0M)rjz=_>%SXi&cTi3xy@PjVYvPR98X2-?O+yJPe#X0R=E3ya7I2z*~*oK8Ph5gZ{Z8 z)dC*L5;(ljg5+!*O(GJdsy%C+JKIO%Ry;x5>7xf&tI)dhyekKqTIOUct)tiAOtR%> z zz`|rd<#C*j@6^fq{yVPqhr!-{S`{mg7UX6}^3jUE)FnGvBPXLxe0gi)eQv^y*UyR8 zI_JVQ8GCZ;E~l=EjT^7voXchhEs+Cj8tXH28H5V|(9-TjU(Hdnw6e--=~YsZRruLt zzNP;1W;g%K((^=s?&13$eoL{2%di7)Dw(X%@mPwFLQgjB#$x^U06C}tf=iX(A2oI* z@hi#y#eQ=8GkJ^;EMTn(sz1XrQ9eSs^2%ioB9X1)U7eNv4xl=|Xes+NrL|(t-j4LS zj9yD?;6eUpy`5Fb;Nnd@mlGd(2yJRcE8YZa_O9O5_D-NGPlj&sX@BK6kk<7|Ci=j@ zg0jiTl@;SoMO)?RW_+KoBMqOD_G*6L1Mykj=7caYmP@P41IhL-AQ|gTyw#J7x(rG* zFFxmcR??T@^KQj@>k4I0_C2e}>@AE%+3S^#cX1^Wu$}jb2%eSljx5c!{_jwviYL7lW5^85-^|L>EzihY+BA>m-;?i zt6j+LYH-xT(@tb5>!_?SldF6x?8>urJ++xzbc@-l?le8wKyxk@zsXwW4Y^PMJ(}jm zX>LmP)9Xq8{>t{=XjahI_>(*d->G#V>zy&+T~_ueBfQW@ZY5i9RiEEQ?|$ZHgIK@Q@u?pgJ;1K!gl?W*GNNUz zywLN^LElCFznhySzgc@TlFn!|su;0;#I>a`zenqz(4-^rVHn!HjEW6u$*)@Y6Rg^4 zrv}SX1EiOL;v=^6S=`8dm!@=e4Qn$KM91qRaT8kkNo1UUtn`U^Hp#p?vA1gIeTnBI zizmlv@eq(@wL01J7qy$%nQe_<*^T&9Vf~`hKdGO&>>RCjM6-8Da_+|@_fVoUw!pV| z2qy;TaIGb4M4}2;La+Jw+fDsM2^@}B?Z{kinBIlD=du}z9<{+3-3P7XVR$UM=A18n z#XrFJR9KCsNtr9;?&A$OT^9y9Gt7Bt{C*$Cn=jzB7_Ach?=^qpK@bW1-RQfBWjI*r zKBTQP>CG%9_$0O`5P<{&fy6Cf6i(_cbuHFvN@6qV8 z2&^Z;^-I<&9=KnlT}5@1e=^a#hMU2itGDCu=t@#IoNT@avvwrth{AGxOeW)ZeGd*M zi$T_kISHRg>T)9YJL;YT@0obhN*nF8o2VHtvo3FIbqft!P0wq>YX*#R!|ND34AKA1 zq%do*s{X$VuFJrk^;uU?j$xl(1zqBa&jwj3C+}jsxJVn9vO_tS{{ep4&pKP((WH7d z-pBKzCECZQAih<}&XF>wfGa24kLdFez2`Kv8GV{Yx06pNv1}8Aw;A~ED{#E3mF%#NH8}sPEZ^q*CFxi#H#Lw?Lu$+O4ng2ha>{sM-D@n|$^C`x@~$tcsC zeeaFR^FiN4kBJn!1$@uL@)X=k_Vr6WPhPNOQc5P}L{bVLv#Xd}+R0NEul?8bkXvMl ziyd!?tX~otx}j2$q~1Q2?lioO53AK&R`8&&I?3^q7|NT-@OrJkrr)p7e;K=yJNn5U zlFR{X=t%ApW>;glcOBTW1x_(?V)HdBU(1GN%*c#9^YKBT9}fR>(J3*~Ymm|8VI8Bd zoVh>m?OtYyYrO4M&@!Rw0f>sa$=*_TPU)!+Z@tMr3QqQgz3`|+hOQ}lK}+CHqc6}UMMhY}zE4|>?F zY!h`G(9lGD-5KOZvPS2j_Chq9MsB;JRiHantAp_6VDq2nQ8u>mP-TCn)%&r4kFmr_(B+2sms(qc>YL$|thqO#XZ-zJvMgm_6R+G4$-xv*k3orp)jJmU z?`r2qQ0Lz5rDXgX())#eaz?kGHm(3u;v8g+HI+5WNU@JHE7+yP2yVaK)eeHNp?HnM;Gy7_i2K~7_z0sqI!e@#j{z^@%j z+y%bF*~Jd{Fogs^46_9+cXlPlpv?tj@;swO1N2HHIDCtaAp+w8R746Hhm*%JVr`0*ABD|wO`k>GH4f{iYr04 zgoZriw<)|5k36>`*P+_IXxE5jg@!Lr;xN$@S&nUe*kgf;D(nOeH(*mCSDw*mr|ypR%Wj^ znAz`6*T41VV_d%1y6#?(?PHd85F6Ks44jN=Pvg}9(9|YRCCh?ia50vjuEYis9gmj5WFRMwS)*oGpbfn*S8z9d?gYlhXuPB6v(0@f_&gM5 z$$ymn{hP>7{FKXSE2n2~7Cj{*T)bM3N1N6tm3w&4>7_GUm|MCVw6PMsORL5*WNoh2 zR;bY&4dRPBn~e_KIqy%Dsqivq(pk@*U(kIV4*lx$o2*JQ7Qc=@bIHQ`LQ7lVS56JG zdQFz2!9K;i_g+1O7F)>AV>ERd{mL2b6tq1MTv@Asi-PB?@q_kuFV^YXlZO`SRYUjO z?A;y|mDKwdOjF^J`?8@{@Oe+U^fnMAS{V?0*>-Z9_3#jqv6J6mo%cDJ-d1kg|C`yBv9M2x8u&lBC^3ib zKF{7XVNc&8J8S9B0(!HRWtwh|dL#;cgOcryvv+AX@oo}LbOZ~wh%M^NUd4YX*^}28 zjnBf{0qlQnN+tr#7QAg@zICkc$xWIp@-vkEjJ(L7X+*yd-!EX{H{s7T{T|D@y}50hqAy_-YC!PYyjGQ&! zW8h1s(s;T40gvOibGw>tjS)}L?wt3<6EZo>=8?LL8&i$BkC4I+;7&Y=#1MRpjMpn> z2s!W1DNFKa#_#lF)JgQA?ckNWVW-fx$4SXpy<`QPn~irU*BVVOB`f{e?3Y;ecq?`G z`BdL>^YSd3a%0irWi0noX7Hb~SXoJARn<;AKNLHqLr|(7jAycRm03oC7_@!KZ%(7e(2;oZy$@@lyMMB^(2i_{JxiP;M5^Lcc^e>qUl= zF=+tl&VAA;ap~7rjm1%H)*KUAbiUt_7P5 z%?fgN^igl_)=Khuf?uiw z9gJPr@SM-Az}=knCUeJn)cc_5yDlrZy&B1#dR(#QS;AK zVB1Q<>%eCjeh){r1&b^APu6ESYN9yL(;~8xy3ru_ql69u<8jC4&97yTF~u>}GP1ymXo4RKeqRsC5-iUIpW>X#SKI5-lUKjs}{`CceUG z^vmcm91I=2IRdojusK(H+ZfeyC-WPn;$MA-T9<)8Bl$sOII-yyz33IAM!d3mXlFkD z4?W# z>07{*dvza^jG-v=0lbo{H-1T7@MtpYl)Oy$qVp1~g{A0zEgX7jC%3u3XUDQ;Y2n=x zGV~LA-HBIA*y7wW&k5EtE1Ik6)dgm5-C2U<6k4ROxn_o2;Clxu+>Iks!Tc^Zc$#{UWQ zto%k=t)Ny{|>zdeIlklmkHY>3E$twLvcnpEX5!x-CAZ5m#)5g8ElryG%l+TerA=Hl}S$3D(kyFnzcZa+i^DD zv6Z!vbIUDkRB6ppAC=>&dbXKl_+J(`vWgi?J9GDWCw8nKz8vpe)*54t-dBOL3^*s@ zMN?%DB)?}Wk=0r^wqZM~>oZ7iPViqbZ#$HoiFd^DYF**`I8P4n`BUwL_GPsD9^7-j zT!Q{f|4S#Vxz)J=#jnAaoL64sO?9>}xh~6~LZUidr$j{<zbIzA@QkyoP0MB6mORL1&i)VSh?Pq)3 zxS;r!C`kW#dc%MD_8;HhZSMowwx4{?srwEr)q@~769m~?*!KQE|93B*RRB@o+8azQ}-+wJSC7Mt?qLMEp zGoO>l=Wfbh&E`Fg3mtvRjsLFdzD80%C#4g})Tgw)H+e{GpQpk6J}tfoH!}MCgip`W z^ZRfucRHHu^-&|kcq79qwkf_Lx!3Uk>zF&Q7i%&3O%l&_A^x4pz9pO7B}zWYZX}Od zTP+=nMu&j42dh~Z<}-X6fc}|ToQ4yL2hxJ=OkVCaFzJsHiJyNTOSO{iJ5T%ZJ|l`b+Igthk{f15@+Q&i|?Ckkbc$Zn^c__RW z*b@KmOF9%E*3PWz3jZ@=WS%y@&`38tjp*@wo`%a?@hBXhfe!yP_PkikAP&>Q5N*r z+r0!H$vCi!G2>oxdA7H?`?@a)P85-e<}KsZUqgc%vU%^o`d9Ge)>iWSB&KBOcAHO` zA8cervI0C7mLKYM6bk>WrFo<%w?-1rYXq8fQ+qV-$DeK$IavVK)uiq;^)7+wY9*@Z zqrDb)MWgRfa{;|bl$~azEE$|~X0je-{s{N@lgGdMI-2-TwX)+sk~L0*nX^b*yogFO zuf%S;2gkqgTufi}lYgT=>gSZ@W0ra{Ixp4g5vY@OOwR1GZq2TLKh#g8>(R!7{qQGq z(3}FrkM=`7#6$iS5>p*-e_#oD>JLzo#!q1#x_l4ruG(#k3*Q$q6~F8A$oh5W0)1%a1L(XTX<1A9GH)L5|42N^D&tAjD8%3smLyI`>d_vYyFB%@UeGZCUD6+FnD@YuXh@nInesDcmI zv8;)x^S&B6n|c5*o zi?LQK@uc~y9*-aw?TvS%!SjN0(@--}sjJ{@C6+XF$i2ALC>+^Z#wrbk#c}xZt`RT( zb6X4RoOsytY2GTbzm=Y}(aJP()=jTdXDVtT{9AjrE zSUju0AGDu6w#1Vg==-~B?M$u{8M!@~yvyHsKUM}`AG(;6)}h*(PKK8jGM-gg;u_{W zwms;6gw?m6+(K3_q&L&`)f1#=c()8sj;8^mlR_ZD zS-@3=1qo!&`1zd{+*3f6eZSYq$#(R2A9kw~om+q&xea19{vW?6Ir6fh{@^8B2xP@YdV$G zPeAVqm?^!uorC5G^iXmJ4g#}~IX z+!LL5J^K;=**E=uha%gXcYj{2k9LJ^>5TDyZ6wxDITouji77?T833}aS zrAn`7cRG`*bu=t{U%BVfhrS$wrc>CUw3&QK+o`#MW}Ih)?uZ+=D|Lc#A>N)l>gQiD zP4P)AG(ZkH%E+tjH@hI61Yt!dTX!m>=?gx%J>U?h&aWt7p{`RSK z`5hx(Vw&W3U809R!+t!V%@;`0bH<73#q4Riv3nisFPATt_rH$U_5ySo;$0*z&Lgm8R*YG%=UQzEBgZp z&7F#t>}1Yqva5a;&4>ru0z7FzeshnYKH4PLXixP%Q#R+3xBE?t88Ien@pN!JhCcmJ zJUdCbLD~UsFQC&R`aQ-R>w7#*Ouj{0UrVkM5iWTJv&QMl+GTJ3a~!!775mZCKcaME z%k)U~7k6GP{BZ+=iVd+~{a{{s4~AWeZJSvp=up~m3okEh9}G971b zMBBvK>1b5A8%&E?mHA|2CfwJ+B)eD_!*WLy7>WC*!94ebvr?a}oy&1?4Ql-n|F2Xk z<3#BmUKKTl>pfW`_6FyJq;k04rqY0q_1h07OGrj1^yuV$d^Qi%VNIm^X)gxv`6a6H5_+zByYO<3t`C7V` zbD>M%lB|T;JIGx8S~55SMZ1FU9*~?2pJDL(OdqZAJYLW7^FBv!$=H!B4vCVq5?!zL zB(WyyqudfH&y8bZ9C4_!M8uR5YcTa;-6?m_rB%`D?o@JvMEW;k;dD^ZP9era|d z-@W88|AFnki}e1EI=RRDA};KSuKmeq;)2vKq-H+)Ro2#KeUBuiHAv=kebqIC$i71S zusX5w@ndVNrDUB*-_N7aNSZVc-lwC=G3am&tMLz%ir;Q^kY*j2ctFSEOXgpz(PIsW zdKeMcdzw2BFQeY1f^L1l^cd(8Woj`w%Uq<2mZy<`jq04rKID#hMYMc^<=KMEtM!=~ zVIPwCyf=%@e{$Nm8O3JPi95WxT!}4kY3ExgJOGw=lZb|>o(wc~@$m?n6Cc#f*^@Q( zFfC^_by`8gcI2;xcH@=t9Ihn}%_g?-6jFUZ{=};-zW1H=8H;+CdQ-?*Vp;8r4)Gpc z&ju!=@)62>0*1e$%LAnGD|B34$a67SL$16Az z)z}W&yC`%Cc{qY*XLUVBt>kUVIY<>X$AGjm%IBuoQls;OVVW;DpFqQiBx{UMaBhaz3^>N;au z7o$uvKjutpxL((?aF?Oga1wkC2|k}?X%C}#|K{Xv4>%REF-|!?6b?fRq5yax1Is@O5xGsu2l5Y!{tiNN|OU8+E*PlVeXYmV$4uP}Gm7p7-h9kgLN z8tHR8Fy9ZN&uMRa6rYHHT~YRNbV~%RGyLpAF0r0kL-4R6ISXgHsGmH4xj!?suxk%_e+6j9qQu{{@h|-!iO0=oTw-VDY@jqdtLFc% zpx@3Yb|UK@|H1462e#Wm^qe*_E6fi6?>?=RM+WwFq%B+``)-RSeYF> zFU>0}qVk#e5@?!xmcBDjyA4%JbJ%2qX|2V#QTuFfbCY6jQ8p(tgFQKv<%mDmT)g|z z_oL~-^*&WFH!WvoG0wN-9eP+#&3sE-na22^H9~q!w9Dk0tO?6RO!?H4m;BF6HF-t; zVwN}X_*Tv8 zL*2mv^8QHx}Q*zg^YX=J%KH|F56i-Ycyya_ccC zL8+1WL8TRHBBut9FN>8%O>HLz=(ZmIXY1P+E3e(*l1Q-WKO8Gb%1*HSQmeOuc_KP| zTlAFM!w=zO#@7?rzo(2eUmJ^O`W(Ca71_(lVY1n+HctJC*1tg-PB#`NpYldDi^pDX zKgk*ye~iB5{|og7vWzp-dtaLm8yQP+qFS@*-|H=YCW$X{Gsm|069e z^d>o$mXgKXn9r>FKr~9UkgOycu{M{}$^N7%UMI)nLi~Nkk()V2kI8IGfA6znGz?q| zV3gP`ll6NdoOWcHKW9txc9!wxRC-IB zj~Nj%K4!PQ9L-J4s@$}_+4K2ICl1&PZ1MuybCS_*9oltcaSn&^YpC)V%ekI)$W6b# zEL&GF9ff8sLAfv49t)~?23$cCa;7o|Ho5C`7ffzfE<4M)zmjNWIhD^{sN6+=jlSe& z%TxNzo_#ZvdlO!x)IOb-edF&4Jj&glnjq{$D!Q1vz5$9kdi)q2KLlN}XZ2>Cvv;;y zuelAH$m~n;GvP9oYjVk5+Cc<52XV0VM=RQ9SzxeeX z%qqNCj3vKwWy4hTwcJ@{t==SApgwB_2ebFHkDy zmEXgm1&Stqdnh&oFEek8S50!uJqhIl(&%my@F=ksA`PsxvNhRWDwW@x>gjrM~70qv&yBcxDE+fIVIU@4bw}Ye-?d zx-QegE#TQq!n4v%jJ*D&=UAT)w+e}0%HiyNb}hrP?C%8Q!{L`08i|tlSN5zYs$}oA zr*>~NmiM3&x#jwIzwxv9qrS5za6kI3gYls_(+(8#P$joy9z)M|`cCeUx<=_s*n}@& z-U9_rMeT;D`;0zwhwmHI9>E6AL*Xhol2`!UVf&+gbN;j*1#0TAzkcEi^%GiV-}(dn zb@uLE<$LSnuf~eE3wp-WwY}QeTWI24V-#D8GFh8uS1o>liIw+-Rua>-9u5ov&B0{x zAsl_)_e3SAM+!3YT19h~gSbA~IYFPj>DK);A^B?0K$&PqS8{Nd-q)gTa>b?gQLv4d zUsLmqc=jeA&Kwpl>*;q{i3bX3>fmH_@-2MIn&C7Wbu^vGU6-#wlBgB?>){gPd~(HP zwspTgOR-;uv+?a{`$4>C4JDB|)=seg?UC66X?4Vh(N{M1mLLNNbw9 zQSD^9$=%fK{4XRe+4)N3-mCHVX?Vp$Dl4>6D7uPVo`F&qX}PhP&;)QNg6Qv{tKr*b zR2^evX@F|E)w8qtRXlr-0AccMG*q&&)>aiWsfOw<#rrN~rYA0S*ZKrK{t-o2ux+7C z_F%4N`*SP519(26V>u1{ntWZ#o+L`gu_UQJI^XKu2Y3`7x2Ac=k+c)Rw4S|aOqw^N zOrm_uM#Y`!)%GMc=ftm$5W+^xdOay*z+M>rix%QVq0~ z_$kMLrzJ?%w7kxU{yR#oCXInktOnC~<#&bAOZusg>qp>z?#RAF zEB?&VltbSoTCb|@_)1+&OV9Iv0w}XT*9rX=u#uON%f>j9IbmY&exStlEJPy`v6{sz zMZ0PWuVgVylZfh za`;U}!OQXLJ{Tm5U~_YeqhNawSf|m1Z@fDT#{Egut?1Pf))#~H8PJtZFQ?G>@+^Kl z9bN%f^t?BoHCOf&y0?*y>IY~ zfUTffMmo+?qBVLY(q(c+WnXIvihO7VP`j|DM}oT&>cy~ z92-bLc8z1RatG!obiS{6e=j<2(nsJw(hO}V_zt1Z@mIT6-K+;Hz#yJLWl--7w8<_1 z?1`6lw-YBmktzS~X=dli=)N~7GJ~uSy2^O`XEJgR+FlHbx!$a0!+rqQakQ~Cm;8xb zZD6~bk)4%FyhN+EN8!)Z*#d$UpnjE{Y-OjjintvFnOXm$PAgR19lc-G->Rb3xoVt_ z2c;;7S?%ry-WqyHyv%CWu-Aj3wz})#Q-+K!Q(~<0%~5hM64{w#{R0&@d0WoUBgM{k z)_d8{y9#W{%)N-jwJ-zBZv0hBEz&|WW&5#y*MsLol+KzY*&iP!70K3^D8bto?_L2z zd78O@p_%b)eU}85PM6}BRTd73shg;b8`!1Rbg~som-+Mt@IHfzyMgd1kVb~HYB&nk zxwn+H_)+9DlK!0EfBDVl!TLVHyX2_PdBYDR@l)_8%U@0hYG`{R2rp2g8A|2MutA&#%e9UIa|x=L>XM!q4-~-*-f7OpFdeA{m-XandT?3?^eJYzqsrh4FX+R z7Hn`KPm#r}EwZvm*-$C+{Xg6L@83(2O2q3dC~&$SE+wz8lKlF(l$Gv$(z`b)eMw94 z0Nj)P?$2uXR%Z~7U1kItZMGJ#h2xDPcd-(K3ah%Dt=>e3Gs0}}vz*0A6poyn4^VCy zYQ18N7-?kbj++@%;^8qDk2>Rfyb)^PY4(c;>f<5S{48=Y4et}ZrN0s2ZKGXl?Oq0g zYmARS;bB)bJ~wj2TQ&KrYN>Y)C@Wb@#1DNYj5?E+Sn(_L`$zwW7ZlB^=3SV#(f%w{ zE@Q0ktn>`{j`!_&v89T9`cgZW))bp#o--*;LU?CD$KWq3=_%$C!xxJrV z_m*%TiaLw+GmE~zPIKa`w;Elh*c-WwHCdouqR=NN&^nfH7D??@^fkfL+45Ub($Do^ zj0g64_H!00cLCZbRz+_d%??&)REv-NAUG_6LvEhMb0^+#%Y43vR)2%jm(t0sBip09 zjDSY;**<5NKey{wp}|k|Am>aS;WiR46EnMQFH#?eXhMjL|7*k`?Mc z>MR36?jkgSd;GVT;cf>qf4*-$j3@80va?{b!hB-{y43Q<>yrtD&$Z>LGJ*U<0%yu3r=6#8Ruod02lAH*?(_xl% zadHwS&SQIj2clJSdS6*+>O2(L70$`Pl*j^iXfIw79q1oO+y;HE0Bz<={e3zWeIM{F`KYET6@TsTXkzYYwN<_|>a`=I529(} zi+ss0d<2f1$gPLN+a$5E64{CArq3UYeLMNQ0!0%=VTy6}CYWda+K)v#NIN~X*%S8P zgL-?CnCOo2HB6kprRMF4z222Beqp{d#k){#yn2aS_&uBTF|M_T=`xa^wSPZvvZC)s zYn}pAC%nuK#tIxfSBZJ3`aLM7(&$9BsE$^>VRH{ne-I60KjZzB2y)4Fk-KM)gKN4v z)xcgKmELFT7NN}PV6R8!jwK7fX=Q~z?;sOR^qy0oE;y6Dwu-r->#+Zd2<9~dea-(V> z7@NQ#^X;q^FC!h5aX)t&laswZ%IrttA4SbX7U-|-C*Xf8e6Gif-0lc>_5tJPpm|@d z(_uB&-{i$Ph30p}(=K%WJy;zL|5;`(iIuXJ4mAPYRj8Y+lcV*J(d)HBrk%m13sybM9KL`{S-5P*@kAa<)YikjYk&t=qj2I>9idz$l)hZ8M6gNr z*mxS;huX`s)tq@QM!}cNtM0^s=4Nr-K{1w1%(-#y{{8^xL?`+PMJ`0a znxyVBoJf?(>bO6cgoGo>xii_^X*3C`jRI$q#`}uBh#lZDk9HrSWb(dWtJgW8&b`>! zfQ(RUmFdaCY}8j)Z;9}78C-LZ=wRP2gHwDKb4#ibseB0ir+GRU^utJA@^M~ewzoT7 zI}Maw$oUlYvQ{}wxtvBM%SmOD{x$8+ZH1v)=#7?%yD@`KWZu6Dj*sHkkKpTta|e;L zU+{LmdDek6B7T%tTR~)fHh@%&qfrgvp7?50mCo#Fy0uYu*pF1BJD4lu_Fj5;o1Vo7 zf0$WO;@n@N#7r`_1f3JFAghhcnQsSKB0~IKd-=W#_{V}S^YHk^d_(gN!rAhA{f%|n z4sCK9D3J=9qWz(u?1b;}s`{c)&CYSNmS6=QKzrAbE_q5g`9!p z3?MVYtdWv+>{8!07!}Ks!DM1d=8nq7w%K&>F?M)+ydJ3hwdQ@Vu!jfIj6ikIzhlOw zwO?^|gB_ITNObo3OWD41vpzR?DwBq-;MqbdFVN?MEObSdJ04RxcgUH|ndG>V+Sh9< z{$z(}Bl|?9sC&6N@;+-DNHhB_Yi_!qG4fgbeZ-STy-S3GO5o2NxvKBE{ZdBBGUR$J z{TXO1_zg5?lEv+fv|qD_^TCxUh8s|6jymI&91Gfi8oPD}O=Z^TOQYCWJ-n#g)mqE> zR826nL5W-7Qq!~jJl(>Il_5J9fFN-xLYEJUPX~CLn?t+O%uwp?jm$f6s9%wHTi+gF2;mGxG{=+_DAur3yU7BcuX-mEI`$T zbTwmt4Z79O2rxpKKHfgA?as!`2XQr4?_VUjJ_}UayTo>1Sj?dQPNS0hIvFE3(&Z;` z{!4QIDXDMCT8yMs4O#C2EZ@^~Y^YK1N!Dv`edXqKe-=2q!7Y5t?e)ZwYfNu**I@)p z^{LrIAN^lRoZNNb{NaGE7@+Ky>?)E0iFA*ms|FW zSi9_tOe*Fu$2-5pEwhtrZ+fQ&%j!0aKAbV0qGu587F-%xKZ?YNj`& zx8wC?>R)a=iAVT}=(E7cFoQ*1>G_p#k1tH-V!5Ha82{#ZdJYUmtA7}&%WQKYO4Knn zWv?T1fRoH$7SYY@%5TM|%+7OfJGbdh!OOwMtoEMfHqdh8#<}|HtnWAQJ?CDD&7M){ z8alT{D~ZD}#n|#0YRyn;7JPH#U=CPw=iy3a<9U3D+F2E4uXu{SzoOlZ&?C{A6JzLM z(B@r3WqaZ02PEfb{l0*^XP|5%d)>yivsc!=ce>SSZ(8f|4w*h{%eC^p*I77y^`DDO-b z7NBMPntPI`{b_D))aT~@qjX|6+w-}S?XA1}c(R4YA8#aRTWDya-^B;A z20R;qXBX6(sGY<_ILmA~cWkay;&&3hS{sQKk!T3d8$UAR|44r`VG?P3314zsKC!UB zB4ekLnM8AmUsr2^)egJS=@Ex0VB&{;qUlW@X2C6x@=bZM~Kf$07HS27)QKH8$z-T-wnXTy06}vDWCxXuVa- z@fS=s#nGtpCuKA5eoVP_Y9$_4?tXj^o+f@i!p+#*cq_-VZUIQL8i7npMAO`}y%=_#Sjmip@hbfr$dA&(Td+tL$ntb0=Mf8w*=SCb#wn4ABqx#e zi}W%9hOsKi_w^hM&mr9pX*m!drk#35uOo`pTjFmWu3Vyu{RK`}z~^R=cGBPB`12>D z-bgfG3%VBU`(k#tS1}Loj&Chtd*wRya3g;;_}{@64J5(IeDFQ$>wc%@!$?eS ziuLd=exyC=^oz!kYC3f6Ue0TwWlMlX; zwmWG1V>*8Ys*gaQ=gryXz&y{$}6lUcAeY{r;5o(h|uN|#mkaZn@@NmoC) z>yopqXJP*`8YS9NbNtFZ$M3!A2>ME(7^LT!@Qof-^nYd{OYvGv)UKX-xtiq1mo9PR zx_Lj6Jk&CR?0{DFQTY(7^NxDI-M2&v+m0){@%AcxkE64{74@?Zf9AhG#k(jm!z&ok za!daJu#{GE)#<{y|1MR7><*?iPx_t+mY341pTPaIdN~WoeW}=r?TT+hwVtQRTK%5? zhmws}#_imLsid!}l``1Bm;-UP{5wA#IRlhs2h z(s?=06R)8R_{J7b|7&acoY)M5X=^@btzFgcXzd;9`9$MI3%dGQLHW6!-&B-qhI(aa z$x(Rrf%iWf8K(Na4||^xXCHDnn>D!H{409co!uE=%y^O(zNXeP_T+JToIRCAc;1b* z8-;Hdv$*v^kc^*GN$D0P-lCJ4O@G5G9Lt8COjZ(=@&P)Q7{#$Fx8nHoUxW+DFISx`C7o_3Rh0eGIw@ zMvIYnb}8F?KIuwS)nxaok2;?gn${P!P6qiM+HGUZ=?VY#M)d4F9tP({`W(&1^ng*K zuANWrGtXWHpXlo%a3xk_Ett_J~)Z+Ddpgfc5iAbkoKwIM~M+*ALulX~cAGE-+>fFoQf*|H&2AwXm=EgZ^MJ zW@Yjo_^#GQe6MpeVHqyZ@Mf6tG!cRC#{b+GNQRGTddkjg4;sA6_ouDGCa_a?fi|&{ z=8^uK_xE5^zrl-Z&@%CLa}tp#r3V%;EoKpOpW`=n=`s|^-gt7u9*$2{Jj)%XZ&=JL z_1Y58E1=dn#mZoRk%h1$-(<^{%+53E4<1@%@mdKV>b@+>j2a*om7 zIB*UO;tBJVR?Y*_*EsnC+9XnFZmd;?e{TQQ@&7BmCwk0i7A5!7XTyA{c|{`A%=aWw z)O*4{Cu&RK@ihr~%=dlZmfHqx(cv)|odKuZLHfihubZA*klD*X)L8%bTQ4=xS3Ff# zg1Re;CF|`L&?WEE_TZ||^3MXrVl6a4l}<+ZY51Q!Hi;0Q7=kaN_pxw}&rI%>WTsF9T*(HPczaiZY7e-U^7PK88MzUZwQYTN zAaS1d#NYYa%06oHegA+uH9)vdyOq&yE__O{6Hfujcorow?q5Z%JJIK7bgBlPQ$1e@ z_AMmw0njFP*UMl%9(9(Z;zIBw`d?O<@#Cp&X1fKq5@Xk13lEL5*mLQqfT9Cr(sB<2fZHV%d3kx;Hr#s=akafxV+e8?@VHKP; z*OO@02^IGR`A6g~k#HCLyV#S&_vo$NMBe>Oi@9OZNMDDeS!kcwMIVyIh2G{CSYnqq zR4!3JUIAnLv5vsSCrC=x7+dJzP*3hdtKKMj9GYZ(Jq=H@hD$WS%*Nl-W+x*|pqZ}a z4(OVb-~nbty-CZ<k3V_~=22wO*wn@P?pecVZ!#~D`_k)3(Qh>w-}6_+a0_iw!KhG+lqKOSSd z(yZ)Y#Fi!U-Yk}Mdrv3gf7T7j2{h1LA(4-d1H)@ZpLrm9fz+N2^JG(c8NCLfN^jcm zkvF-U_b@1v10Zn|W}|;{092z5$z(PY^>TA?5gbPvMfw;q5(}=LUcR6;9g98^Ss zGD;neZ}H{2(%-D=XOrshK-3fkk`3$xxXm;Mm9pqPjJ}Cx7H_L3lnviU=r18lSoUP6IM&<#pxp|W1|)5@dB-7W^tgKa;a*0f!Fcf%dAo-6 zR7cxWScVp)YCD+EQfe*Q$J3}KNIuZ(^Rz$l_=eHPoHj2<>k-(J&t^hj>RZ}gUXPRTEI zpD`|)`4XOf27}QkdOuZfG>f0@UF`{=MbqA5m=-G*AmOIIB`WuU! z`@n73!A5kizIj+;S;r?cF?+7ka%ruReADqc9tOKz{9j}oNR+J;3p!*!eg|(7uPgD5 z<0o~6^4WPF%L=SiDl3B){+?~dUQM65BU}S7G7GO^9x~CBy|jNn9`*;v_xjn3=4P&1 z6J%eaQWHFxOOAdbuUXq`i)E+8xhICBDk1;P4ZAdxZ@>Py62%HOtf9 zcv9tVc-BN!i#1PX(0#~2KXm<0TZyQ3VzI&)%gU9`kIInFDI{hvsr`vwmIFmkZSGjK zo7)5174!eB0G>Bm{i6LhN!Cp;A6m?vx1(RlmV6~0d$?HN<}BwQAiNwUcf+kA1rOdK zc{y9i9r((mt)Hi*dp%cqzP+E&zdTtF1rqrxXBth-3BN?qtcgOS^8cQ%=gcPG;}=%D zkgR-8?6sQy#;~xHTA$!-4I!m2#!~AtkBG_y7Krwdl4drS(X@mDgS(yOq4C z{y@g^G^Y%?*FTq?oa=K7cIQ^KzFIe)YEO_YbcNt^eeY|a^TQ?LT- zmKARz#XSYqtj9|dlC{no?CKq66<=u~=ifa^Un1lsGfFddCVu+swA77#iC0M@pR$_C z?tSvt+)qNEBu9Nn;@fooZJPRaF?YNj4`0^X1BEqgXIwl74{s+yA9}u*Pw_|1O^Gc& zO~;+w2VKB2j)!&p;d9q3`!8+D)IMPT$PBFk`tPaV2WabKEL6^Gr|B=TeO|Q&dYcyA zrA!lW)X`%iEOurcaue!ZxaIC{H~ef|*qRTN*+&bR;UzlnK(_B=R_r9Qei(@Fq-)op zTW)0}`ebriB!*^uDU#Qsjv5abO&ZXM4e(kHlH3=_8fYMyd>f6DwdNgo)iK(=WK>^9 zt`i+5ahnr^Bewy1Yb`YsDQYTs>lj}Wu_y6lA2d=YulYi_90Zd2T23^stuPz}qpZCW zt8EO8?1Xl2kD6&X#ibZY{XK@$Fl*ABJAv<8^LC z{7&YsA*0#hY+#Je&C$eXNmPnac(VXx?L6yWjKTv@bsfkaLZv^mGE>;brF6C$>5E_c z-_YP$?dM*6&Y*{IQ5{mh~_E17j{Ryi-3C*)>MB3jimA9zKHWVV>+ zXRW8ZgS;cXS%|ZHYON{xZjUvVYXhc!TGjujjwBRY47599_R;=*D;0Mz3qDkI8?BD?xS>k zgrcW=q5b`MIM4`k0l0oaxiMxVtH2z;&W4^f@iy`hJwMyrxVoBI=bu3$lT9^|;wO^u z2}-Y^SDW!9w{Vj!B6pG!EB82KWZvINj&gz(&hJ4oCxKvRaQ;(YozORTRg&qiJ*?Xo z7HWyP%a>@JxYF56ct&sUfbS~%BS(SYaFlPPPWI=X)anxQ__=S4TIg^P*bm)u?PYrhOHlDV{&y0cJf zKT~4wubt!lfN%1QBkX3rlY}))= z@~3IHA$hoymL=9;&7#~ywP%r(WE%Jb>V8IxhNIm*#?D3X{{iIJ(*D1Y^NnD*l8h`T zf!W3C$L3BzxkG8|RIQ$@rJNz00?Vhs|1#TiBCT10$0PAOk;;Z1C9*{9r_n?)5|evy|zj>|*Z@Bnyj4`*d`CT0bL=&SgP5mQ@?4_vD^zPOciOyM{el z%z9Np!_UakGFtJ9lF_w6zIDXMAtdHwtv*3w*P`t-ve$^UEyo(&L&_%^f!|c;K5ah? zy3h2JI|ldT*-FxX0<8XEB~-d$u{Rp*2-4@2E=QuXCtB6_zo7hXW|f(JXQ%mObCI!1 zBqvI;0<6)0Ri(F*>?vff6nFP9cDNHxCBx&5I~YE>8BVn|s#|r+UFNapYgs-&pphx;aODjdH8_A0+feb|Nu}hxS zg{Ju)KlF-dm-FW*eZRPPI@bF>Xj!_kFjZSmd%gz>$0y`Vnq8h8UT<7U8;N6;{r5BA zG1mV?kP3!xDv{W+ud?trfjOQ{xu@F+?%5mvR2!M8cGKeC`iSS%{-_trmiglKezSI( zOY(E4V;4{VKoXbwRFNf5MA)2;R5c#{2HVzpDU0Gmd_Dsd+3P-%G(F}2iTX&~-?H=} zSSMoE4n}}m3XYZL!dY2;>dE#vGRWs^Sf(X<|J>&#u!&b;>Be+U210{xaphO-WsT7c z>}61(4Ct~7I$n=u)z}>`o8x17T+BU(a>Xj5lK0aZ;&o2gd(yz<%19owHJeXTFj&t`ThWB5lT zET=ZfHc}n#Z_(*$V9MB#Q-Sx@dk*bFg)o^;KdQrgqLyAmqj-62W!tg> zUSssygo{Iz>WrpcXzLQXkg|(G-CmpTt6fX!<@B;EompnRwI0`gEM{-NlD`f(m-sqW zK{Fk{D}wg}bLkc|AUBQ=)MjD;T%e6a>Zz}XCVD^D_wPJCU5R{-FHu)?=!%|Aw3rM| z=jd;YQLHU3{G$@T;N>E1w+2B@7OIo;Ei^9ooX<8>ydJ;Wf@C;c&t;i&cl{ll8AFcl zN4Z|4bc|3VIzqGhF!ESSfDrh&0wxz7{m z!x>;t%+_)!`4;SxM`DsEH9Y%{tXBcuCZ96bk59+f>OJhI6$-sz?92_F%fXuXpxLz> z3WlAPj;~DgKQZpE@HDH=x5$1|)@23@P~XhBmvRqzej8h~hQ%DN#nV9COP$1ZKZLyJ zeop-M=jbI7hsGN(&qu*m;4}r_o@Q}M`70{uu_xVpi2e>BAyu_>OEFJh;dw1myg=>I z-u7ni6K`t)_)j9id+Ifsx5l%Tq#-$DlJ~Nu9j9}R3-QaViz<^@^Ha!oPOy$d{e5Zf z_jEBcyBE>r3HZ*#yX00$1gBAE1j(kfip<1ga|tS3q_2ATa)r`~%+?Qo-qQ9zyjy@{ zGil)pn$rUvKftGWUQa1zHZzP6xg}YO#QK8I4}*3uG}uw;MQ~1J-`XrfcE-*I-=5$* z(dWvre1ML2M61dmZ3L%*aEqtsqhOm#u98b4JH7v6F|Wqcj_8+LWnU}Z00pKRfs^&` zO+5_3yGOM?LAjJI0N%ng#Eej!)#>_aq_>=s zZpM>Q==uTvXLh*Lzkh4%AvqPUr!Tp;_@2Lq`Q3mMH>1SsX7gLkc0L8=VA8e|ipS4n zFuErH-rgwuFXLo(!gC)c^Ud?wuk(v?rD#FPCoxyAHPy+SIrEjoB`u5UbA8Gj=TkN? z{^OYg+z!9M7vGby{(joBnGrqZs;OVolh3sDgq})X=A{-cfJgQ%(^^(D>HAC)FoO<0 zT)c_b|Cye*L8ZhcidRNvX{A|iqW#SEB(tkZ#XKy&>EHU6`EJ&{nV03p_(rz;0^|Kv zXfmh3A#q_>foBiviR>X|9hA9Yc-GvfWO+OjZpRkBD3O)?+^oGb@vQW1H(FTeK_Cz|NpzIXDxgSt+En0!`}nJn%_JLg|AnB8{~PCnel&q z{_AXXTRUkpnH&fImvZ6qU@gU$Juu{U$A2P}{-?d15M=exoSY~A(wF*c_U~0q8JZAy zni#2a->U{}AH|nYrxc^Mj1jLHS&e7hk!DDl!Aw^B;KJ7R_9?ywY4Hlyp-wSA9?mw# zMfo=Ve?pb@VsS>CKB>-*e7 zJlLnqSZ2`3?22U`l=zWfYqg{C=kLW@zrWgxwKbf6wIM^<70Udurj~O0{TbVnapNAN zO>1>lD)q8fPGKWYH-gRs$wC@3NguJDiR+v>Y6Z_T=gO_N$KkvVzQgEh=7Y~0lkPD_ z+>HW>sGoRj?S0C4+!F7*qw0BNF&Phf8^aSJU`O+?D@gtudWqM@WaX00E`GZuZ}RLc z*GIX2EaQo08apaei#}#HvJZ+RThUJW;qg@`h2v>&R66+byvbR8UK!KMxN(>X|k)tw`dGqPOQdwD8B&z8|hkodKh1cp;lDc zU23R@*St&2(?xLJ5eE`s{b*~MML5`9`ya7tr}>ur4mtP#i)ZUltqn+`y}2Vez>}e1 zy-y2$QG6imj|BI3sJYI_ny9nMkoXiHB#%`!d`skso&EnvKOduLBL0nFrMl9d_!pf; z(-Hyj5xC^`K+dlhqQmW^Vy?Dpp?$I*B)U{tmUt80$l1~+Gk_+>rqfAFyz^)2cdN0X z28~KK>QhO`^PsrAn59H_m*HS196bnMVmo?}+drdTZS$jt;Bbncqv%TZV<(`*JY3ER zRV7q=6DLMGotWlL#*{|Ne@{2Bh4U)dj;3arj6`uW0)E4idQpuuhQvOL-iAfrENdokO% zr=H4!tF*h=ywLGQX!RY5=!7B{pkyzcTm#oRN*-%0?qbGtvsuSr^!yvz->v^INZYyo z@2OnoT+5X`01uwg@*Q~c1g)#8L|HZ0&WaNdq{(eGMP)1 zq6QAqZ(2$Y;}i8g-{%@AmVKnmp}&NC_9k;fvk9n@p=vwpknBB{BD&lLf^+FW=4Lzm zn^MhD^8mHCqSkxbjaT(G${miTi6Vah-p(v|{X6T^1D(VzB-Od+7YRuAwA0v;(hRkx zb?tlV1QuA_Z%~e%`7q==mR~?4sk=av?*DLA75@! z%{PvX_T*E2Cs)=&T+NM&+-7P5y4=%R$nqS8PPzN~G`&pB-*_#KV!=D3Y8|>aLb=3> zjkn^Htn0g4>!9tKN+f3a@pv&oD|fJMKRa#8`E&vt#nw~!VkGVgNs<}DWQEHZHyh-14?WSO#aotW_P>i7hn@vmCO`kc->C35#S+UUSm z^w3W7f+k~qJeJQ>FFQT=(wU`fSJsv*wXhvrlHV|%bt~ZB)U(4$On0^4(@XAPe6NL^ z*XLBRH4J}L`x)(IhSVOG=Yp#qtCtgzCE7Wc+{Z@@F;F!)94+U>M zGp5`RoM`mf!#wk!Z1cm#jB7PoWQ^{DI`OZ$kLG;E)_;b&cPXE7>329zMw`sBGD?oY zgSI4RD)j?X=tB zO#xFVy9)F((WJU}PpT99x*cgqT)6QlHP-CqVPoDBwKo{?6X|bJLE#OgdcL+>f`6V- zE&fzvQ6W2Z7qJQ#Y5Q^(;|1f)4QkHxJ+aUdziTc^Cu;I1-ku7d+%8K-l)iMa7ENyH z@8c+*JgqG}omTYpl<_3F#1EnWv+=MJNje-qvNt-_^VeWCsGwpe6l&^8&PZM}`}mYD zX0Io=(rPP}IDh*Vlsy`S=hKb$@QVLO;yAWKvm@Z0=;&Y3*~tA|{Cky5)HTiz1NB>| z&=E9a@wA@z4UO-Ko;OmBMYQ~TylhP#5{D-Hq*aXQuhM|*qqPP}R+q_FGn<9@j3y>~ zK}#*qz=dPz)zQ8s{$PJ4O0)20=yd>19)>1$y^AMAJicd;jK@%N674PLTk<%4N-C1I zdJs9vO}r;Sy{{@bAP!F%2%L6w<(=Fk!xtqcrukWOX4x!28KD_%|e&kd_M`@;>DA@J;$K< zS8#5ENA;A6wnz8+7#%jE^Fd^HEXcdkg6t|!CK-uz(_NdF!S{$l7W);_bUPd>qW3wh zK^I)lzR?YOIF2SII@~fX*GAK5*3;^J=1EU5p3Kr^R7!`i?#fYR z+E_h*2FK5Qe#N?|g7>EvJ5YzhuOg{VUd`K-eH{PZGYh(&9UDxlzQoaZb!IHiUG}eF z5x=kItWGVm^C~`PbRKG4I|%-5^gqYv_;VhL0)O@{`+~Ro8z0k*(wV=NcB@kAFwkY5 zmRJ^p3S3INQt`?=K;M}QB$sF_l&xfblom4Q+f(VB-PT0yN3@XWBm1LzGV1(-@?VgE z9hDyk+Mc9pG$NO8=x99L9vH5DCUgFtR zE#~5Hp>d*5wxAOyXd_T1qTWCs)1x|D5CCU=Bx=Hx*ARw;HC2D7Qyw#H*vwbQ9B!=%P}1U3w@bA{EVyTpr|F zil~sw(R6W{O1e;^Ix3~f<&2Dk`v1S*y*(Yx%=`I&{-5Xb-1cu@)?RDvwbovH@3pg$ z0eT;eH|V&EKEw`;#T`(8BC@?FeRv2y&~ND4p?8t?=64F1^r31A{Y35Bh1$W(sjHY# z(~&2pyVC6{v*nA)wVw11*hX4m+Fk3APeSHCT5n!(N2O=y^twqWPa%3%Puj~31>|@W zoM=5}g|~cWv}p7Cn#u2ipZ{f2{f-{f<5S{3f~rtC8EiG5d5+G#*(rV->n(DU_NOiW z=U3{u_TBHR=XVg?Cck-7uQ>ESN!{K|H5hHD#gd>@Me=oquU>)H?p~Zu|GbMS#xyn# zi*b(EBAebu3RNYaJn!nlROFj}=O>YieUQw{kzxmc{d#(G5)$ciN?d@PejYhJ73!`I zTFi3;mmpm|XLdFt=|ZS_5%NyoR~K460o=RGZab8(37>ff#6U)MN952YaD`EepF;W= zd-xa7Y+^1C+JU0N=B6=_m8XCoSFRh$M$^~HCC;?|s;?LCN&QqfU zWF@ zI#Pc%Joh;e?LaDcTIeP4upP?0PI=?lT?}46Y z=oMg1ozN8ST#o$kUXwED7_H-*pz8!IM{oI92iIN*ZrnTOZiBA$#<+Hy7_rZSXU{nO z2I|hBCT|d{1dY03r5d@sCGQeOMl&R7e`?itI24Mmpf>lhEPpf51@7$A2zq}Kbs10ZEVx+T=6jTw4%YQH{hEB2@!Ply z+rXKTiK{cZ)=`f>TyK}@2bVZAs|#gjl6N}z?L&L(ki?$iFAFVa!{6?(t_=m7Q`gD# z{qNw-+tZqY2`zB%OtsYpl-iD7)q7t;N=~ZJe>CD)*I8U(6r)_r(dc)#6pjRtvS)}=N%}~oguTR-vA9cgVnM_Rw zk#W_){76Z~TV#%$`=qmtsE>&E?P<$K`m zN!?YEN;9dY1leY^>&4Wit!YfO+DJhCXJctwi&#IGyW|$~uUeqTQ*gL2CrIfJZGDmm z_SAmllhhP<@1oC)D>n_E&}yp!CkR0=o*qUHy^b6Z+FEevO^n*d8Ph9+B~=zEjFj9w zSlQl{Sq@G&rd%I>YfXL!kB*=oqeDkJRcCa(1>9o9GjX0X+SNG^jc)XT-RK0Vsa@-; zYC~$YCTpk|(sKgkwDvpDWBYBad2b+f=Y}Pt^{Rd8zyJ5E3{=aGXy-r&=fW?5%VK!zC-~b) zcHR|x4jRm}j2FU9-oRzV)2(QUf#_q;ByQvPM{x5_q{0Sd(FE%GmR6QhV@W{w7&$22IV8aucQ95jVmzhhh(O-=pyYwQl^)~_IwPoc!O;KDn|z4v53yt#0q#M=sDN1-$rhC zAjkgzSv7dI2W?KLcRi7CcQ9Vw05(r;Ea0`6zPkoIlzJwR)4MNcVsWm8-dmu`GxYHl z+B}7}7obD(z_9zz#?ZrG!vRkq8<$YS%jlU#^j54It@i_O&Qg{^tEb?WhP2^MmR#`F5G|{vaX0+G3@8Ue>l5IYJK&{d zl-YnbIROrS1-|;0{+$a28v|hvMu}&Zy=QGD^u7a5y^~h&qEFiB?SZ!g<&DC0FIaev z(d9n8G-JVY7`FpoHz;}oIGWDuM_?R|y`x`yC{o-L3*F(Gdr2Pz{yu{?#;W_8eT41uYf@`_#w>?kjy39<5Ccw}JIKVAF`)TDWHbvwGDt56&Aqld5$-h5RSe z?ild@B;2+X+jL)`Uvoz1uaO?-Lg9^YcLS(o)a0}2kvk7uH~j#9x*U4ELYk*@HUrr* zM%ZnPqeV#m*O4w+)T`w@0C*?S8*kj`fgNBh17kIABkcun{4vxzlYV{(pX~snkAdw1 zT6z&m@1X8)fu<=~$wBXZ2;csV9=ON!8OnL0Ur+jedg)1St-mM8JBq%#bMGEtyqXqX zhx=~i$qVFtnHk6`zNE~Z4F`W{{*agNG`Xq2(BSxd=IrdVo z(ZiG2#y9bA4L$Idw2K*?N%*D#FgK&eoq*RUl`Elx8vRUa^;AI{#`pVRDGl_WLn&{b zaJN%7^z{_7+S-Us?o;!`>*C;3(ch!b!Zp3W)4z|QukCt&g!_4pQp0XWVm>^VM}1F# zz1Eaemg=XupCM7>8uN zo|>02->yWC0igwNV@QHeSHr4;8$CJuk-`yHQZKRqA5^T38$H8F1m&}R5AU{#m@bR^c=Y6bTI!F zIqJg=#>aAhS#fAT88ozaxC@OdfwSF(Oef$j*Kx;#WmkjW0%O|HX`WAD zRCo$`A++9&gx^NZo+)?dNjz7g4sT6qJ@IkJmwqBKFaQ}5t&Qt|*HuoR_vYDHez^x+ znWNR(c#^3FK9Z zx<`@{>F9c|E3PTNtAinBtax%yp4@&J^=1VeWbvJO-OI1&f6b)CZ>`k-{L*f9zh5D( zIg5~Xu|Js;3ky~td@5xwl+=w)B1->eE zwgiitkzhs_PJ^3vVBRRo`U29(7h|R5BUKJEM!ZXO2mD!+k@6-s%3F-OC(-Of(2ylT z4o`qT=0V?0JiQVw@(zk;;687=Sb+{{jTLnwW9}|emk00Q8BnDznzko(j;D+(zDnrr zNE`RlyHeBxcwZy+MA9Dvj%}n~fkfYp^qLM28*$bZGxrpIgYNS#Yv+==$nr(J+LEUU za_I&}(=$ji&oDR+&ZU2~uvA>@`GFqxgs;}q#vhUQ-_WLhzOhK*^TEb^axTC^Sw(%D z(5{|J*^iET8T{M{AMZl~cBG9ZNZQ+jWcBXx-;?SpW>=)Lac%p+J3Gm74zr7z^x$Sj z`WocjSVmr7S~Tj_W6(vc+=kq4g(T5)vjS)j!5u@SAT;MY3fdKQ`LE`L2AO`+Ul@X__yI!k!Al9Jv!YScy}@wzwb z4&eJ9c*ld=+jx3_@_&R6P6GGd6YehFlaP4lgIRCKIUCODNo$As-VT*tr0;8i(>)#o z!IIYDEA-|gB;udZ9dp6>2jH#&PdsB%3v2BOc+NZ9Uq$B5gO?hkX|DwrOUXYL&Gi)! zIEU;9KNrFO`Z#U`N56$KMzl4$@?W6nZfafym7J4yA&NDb;QBPi#U+fE$KllvfqW`ye**3Yfx*}wZ@?+fF)DV0e^*Vfg!;EIhR%iy*U|U( zV8fHQ4dD}S@!Ln=Tf!CRgA=7vKWbMpTnU!lq5C;ybAiT4o3)_I$@C}@@U#JV?q-an z;n*oa(UIP+0*X3}|7BKEOTeo0b?=;*M9y+>^G5n- z^o|7mO~G$v8O?gFjNjp!nf8rqiJ$Vc7W6n3*}s53EvLMv0fvDQc}4$EbLt(=Ieo^fHZwB`op8Pvz?63K*+7jzUoWzEJS@^8yzf1cXZzh?-Vf}za%|D|B9)x z8(63Vt$Of#D;$vqi-W^F&NHZAGZwsW?jo#naqoIyI`{yzdOR1|9vdpfvn*;a2CDmj z!e}XaU-bTcL>~3q97g9}o~)yP=GFtVpSD*4Wj9($gRgPacmcerCD;O~d@-Ei{y;s5 zji6whh_1hO!~9`ECIex(hR$EUWPHjy^0 zIkjYC)cuO+h#31+FOl9VYtj2=2|wdLGynbXujc>q>`35k?`=+o8ZHIeI<#3@L&Iag z-SrsXrO)Cot;!gksC?*KW~ZAT5pB{V@WnRp(H$IJPRcT5jaF?62|1mfXrH=g#+#s9 zFs3K)^fMqjfDF%pn})$BoAF-!Ua zY4;5h_G#qDCdT7`!5`k$;XOB#;rh$?Y|JS95W2q3czc1|YSj~wqjS*c)xd}G9#7`; z13X7YH+zXOp_cU)T%)P9B9Cz%Zl>0b@SZ17@1{=e8uj=pYP$d(G@E+2BLk#!Kjg?f zWX)=1??HMy7??+ahe`0|PgoG$(676Y!%g6repm=7Yk=o;Jp;N%StD=3d#_Q4>cVypA&H-Ry zCw0xHt}}tHKb(CElKFC|;HjZS)N})Ka3N2h0XCysw*>#U^Xm!5#9KhwhaPy^>lH@r zdP;OaJN*D|o}^Fi&+)9?LPp+8;7lzwgZ{OrNAsZKo8YVlno9qcG3_RTnIqtL3a!3J zjVs}yv%v3i;PIxLZ1QU%z5s+h(4(FXyO=&*h78DJT)MN&n1K(`L-($?r3LQ;I|Rg+ z^2>V>j6M2i;QWxZQ{cr{1Lb~;Y%~hod&p)r_&!>^j+)=c28b~lmw}s;k?yW3ZiUA_ zKpK4r7Z`tYF!-oIo!(bE3FthVd?tO?8@3zlUPvz|Qz{SYy#%*BNUocacT?f1JK&U6 z&?*ObCZWmqfHUvEv1P~7yI@Tn&<{ZYeZN}+-5~wPI3s3JQ866)zJ6KZ})mB zp_1TiCRAujUkd1p>w|ZrRgJZlhYnhZuIvHbJqb`34(kS2SEIK^|4}9w<=Hcvqo7Kf zHV+~9)eZ~jdk(#HZKD9~?+rZa4}BCd#+f(JB*Ffd)ZH4~Zv@H*!KY9)1OoTuuLlPG zPG^JLqqH!c)^?Lt4}7U<%Fvrvsihs<;d){=+~K|uZz>-W+VZ}jzaZBVK==cY##=Vm zp+mGEv@5js613^=wQAtTwH8O6H<&or9>J)uizGW6i|;J*p2YKUU}qGt>qFcN1x7=e zH9#Ew7XzUG=fK*QatE;p-9Ke--4~->)(eUa0$XEg^L_fToZcEcy)FE6kbWdd+l34% zrX{@*eZh_=SGR+wBGU9MxpzXSU3U~GZK+qSZlszlcvOkp7z_@BdhQtTtmp>hq-$Fb z0_#OQ?G4oRp#5rk?QFObwYygR190eh)H8S;=|&Fu7z`LkX+0zLEyn+9EO^ga_NHt$ z^-4A4zZ9Uiv{1F*^sF?62iMXYZCZEDyRK#rS|Ux}2HyQ4m;X)GwrE1j^Pr4&TiKAB zMXj-nUe3OOv(lvK_k4h!C;9D3WV!8lPm%lEc7>9b&b(qvnSa_*(Q1vKrn_=jopgOd zWf>>#^2>+b8M{diFMaE4idjwu&Gz9IB;lL=-}zaSJ920&~ew> zU*WxsP30(b915lJaXd#{1NnO`+_8&M__v_%evj-J4sU)1ttTVpys5-{8mEG(ousyg zKfNo)J^zy!9j(w>eUO7IDE~U}Oh$5ULJC|Pr1?%@>;u;tiP$*9v(ZSdDBi|sI6yt; zB0Kv-*C=V{(ibg`b4cq5N54T1_uIS`Wc|)y4^M^PZ$c^`#in=~==CnoMb>D2PNDY) zgAO^5QG68~Yh_*&D%bhsxD&qm9Q+JG z4&DVRdPBxl(B}j& z`V;caXyiS?;n%da5Dw}=yXOGUQD88_ZD**{6Dgt8a9@@)R{>8U%oH$c7Xsb>IlhjZY7E8(q|;hV<5<2r|N$~-A} zZ{XM7z-1gC@1ix%%TR0p&m6kj(8$Eg$oC9S+9L^n1wUT{W^My}?zP!Se?JAjrO;SufBJDJ{Ny^-9i$rNZwdTA9N1UV_C9zj1vcsf?`W_$iyqdcr&rJy{mf&L1=<+P z;fw}AVU!kcy?-3M813UDAop&%=}`Fw>=H3H0t&wfE|YNG_3+S(^i2PL1@!e|MvAdw zPbYs7lF)v*qsm*-djLlTAh?v)wFmNnb0)gXdsbcmDsO;q0*rMT1Cx+l?q;q|S!e6l zqEF;9<6@i~a{ZAp^$4`+N#D)`t~cPVcj;Xp(yeC}TsI3Cy=^j!{xl@-W-Q9t)U^;8 zJJHSrYP=YpXa{Z=^30n!H^2oQkSUkZu6JUVMW&61=NeEliL}w?PSKY8k(R&}?SXI@ zG#m^r`@ye`;l4uBwZPmRUWo28`d2pP^-|_gyY|?0zO7xD3(?uGR7GnomnUkIY#`9n zQU|^`4SF^~YOW2MOE1k9+Drh=TjWVoTFY)fP`K;K6Q1sm8jW^X3eJkat+6?b;2}ku zBb|~+)dSGd6$j_qqxnA&PFzfiJDt=^`Z{wdbuSXb`$#$j4QgzWg`ubADbXJ3v7CI3 zf+gi`McdE^-acCgTzJk^ugD16s{xi>AM>7`zSLp7nP#D%-Uic+(K`%k#ycg;_0Wu!6q0&?pLW(I!Uj$f!T7_WCZ7OJnPOg*ISHI zS06aEKRo3)9}JJ@*O$O%> z7$~NW)$mmzC91&34^iVS(9He$lR~ZT)rwJy%QGh2U1fB%Ce)}sQk$IaOxKQbuDXF= z?#<5!Lt56Aq3Q%$86N8L)TACHXBB0T>6aq0*HE9(i>r9kiQXsI&7y^)y_)7Vj=c8K zwKUI0nk%z^$ud*-7TDUtOS>wQYRv7_&;Nu~`!Nd+D;v^X8+13xyrVxHuB3XXdN>h^*^50PsRT5va9 z(+Yh!5DVi}xn2WlJO{3M4?D!W8jJ*I1llW* z-Rl@rpCYGTgI~NY_8Ro&S@dolT(pew^xsIp4(LShQTZp(Ih*bV%+Df!jq-ODpQEVh zR{pPH96SP-H)3owM2~oO%!r$=Vi}9}5nB43_Zj3liGEZ>M-71+2O`nbD+7@qS9jim zMtA1@J*5w#B}c)X{lonCO8B-iyte?Xy$GK3!ICl5+#5EQei~D0F7mWJt$hqm`UBe< zu$=(!57NiC(2TBebwQ@jq(2A1hOwa6fD@yJ^r0U{n!b}74p91)V8gW_*FLcGG$p== zwnh(eZ~F6K;jcikmcE_~R5v3-X5l9p1XjJ->RdEvA>;58b1i9(ivP+SDvuNoX;B5*Q{2KlA3S4y_wajG{ogeGclp z2o$-{(AxpNMy7h>#2{M#Z)jeVloWk4a)mQSPg0Jfh4aBkEo6jVt(rjKDL7B6T@OU= zHTr@ljcMT;T23QTJ*VoeW3HNd6W5#AAvLgZjh1i@ZQMr5%5e8o>ivTJi{NG>+B>&= z9DebJV{c>Y#fbO{T4^Pogrs>IdW-_ISAp%}IHO->|XHt8W=j6zAb_l zPciP2P)#l2{iDVca!p;|!5p4>I@%U;u?M%)w}U)gLE1ccqLGthF>aXvQj)s!=ecSq`8 z0t9*USnn)BixUq`#ab{zU zEF#q#-L#TFM3*#$L(Q2VY>qUv=+CI~1fAZEZ+Mo&)9Cv!BEONQKZaMfz)|_w9NnPR zVPv2uLyST4AQo#EIBGC0=;d$@b2^Z}7;KGS!KJ%s;W8wew;%O{E)(G_*9g^c#_w+e zA9sX~Q-Ry{#)XU*`|N$cm(Zr?lapX(a)7NreMp8LbcJ48R{A-#Nur)@35E*z*M{1~ zjWJ}kQM^HI3w+WPJ>-10J)C01uR?g@8PeQ!qcv~=`KrKI|D+CY7?{HBz?e^EXlFaP z8BU7l*s3A1hr?kH&`!6YM<)VBOX#iFDq1A+?tJL2cD9b(ph?^vJeV2^;HzHrv^6$-DeMCuI!tqoJf!wf5q#{q&}`sugJf9?R8<*e zUqEBeMK`AJS{v(-QZwMDqm(^@M7xJyInd}8=r$V4%k`(yiX+!qY-^q*DBXsU=S>1t z;ISH{{{vY!3Muyxsag#O7`Mh0Xv?ox7zgwD+zDrO0~_1m6VEDsNqJArRS%!Tktwds zxu3oTQ2zu~%V62KZgm>}JaHL0Wh;_GsioCeKy6WHr)k|4zulx7T`P;R823_-4mrwz zC3ETx)WhhyofbhQ`=^j)o@!HNersv8r{;_;vY$3n)ExO* z>Xqiyn@C% zpaCP)JCL0DJ%wKR44&x(*J|kuq;@?k|3IT(fz-JJ?w*H4aQ(zw?*Qi@Qu@F#?o5oQ z1RDXF(Xc!f^&?z68Gank81S}Q&$}6a_5~!Gv-vaO0ndP(jgD6TYCjbrU((cneZcpV zNUaahw;w@`t&D~j!A@Hs_Uym*l^X8` z9r^4>Ex$$%x-;NBXtN$V8m-ut=3>#f9`h7>qauBJ0aipyAT^;4!zyZDATUo3V+N4;}%3TUjR05kmv$6 zZ>F8U!`V~lg{zd_i+mF}|2sHuie$=y{tu&RTY{}mprrSeyMp-*w9JR^jhN?t*+TlW z0-R4oE*ROrE480P$%VlV7!$an4))e^T2@Ex2^!`qdR`6gUJb+pkQS%F0Z*WO+s8w1{jf2V-Wee}(<@SDMN)UJI4PZ&*M8_(R!Qy2Pp5^6PC&$t)X z*^hT??58g9*EIOq+m%x=*(9^znGqtv^1S>tkO-*>ui1IyvY8o8d6d9Mz|1RX* z3f$*W&uZZIJm3gu(itvzpWn8i#ZeBNY$NBr&~habWEyElf>kjZx(x%e&gfM=DW0XO z$v9{Y2Nxny4?=hSEbjbU1|(YC5!S0I?_Smfv~n(P^c(extA=yniY<&qPv7UlGw#C6 z23jL(_G3&pf&L@O<%rUSa&&csGYb$Za;-2`eSZu~?IxUQ!k(3$^_MU)u zUCT%#H;j$$4BwT7D)9Wfz^l#wF?^>CZ3_=X{jeJxIAg5_@4Nq{TzdEDdC6}q+p&+U+wMk!1|l{7V9#Bcp5jlp?&dhS|xUuv?~ zY6l~qmPIbXQ$A^G*VR6^ZEl?o~tiQ}SN&?An`qP#aXT0uWZwS5P zY}pay8buDAvYziOYEW+#gAw<44~Bk~d7@6#YAT@Sv-$7Iub0u?Z-VuyN97UX)9(dKQWdIatRb2*HrPk7QPV66*1@SOey{P(QdGTL4MMV$AvKthxN z$J0D99_Jwby@$401Wk7!kB`C)+Mi|6NS*1Yt5Et9HZwAo&{_#NJ8~?~Mdb6PZPXG@ z*3KD5Uh_9bt7OsIbm~yE8l|Zs6rak|cai1=@I^Z)9r2{z)GCM+w}c)Jzgr)Y@h^QGnYE#~R;4Z54QW)9Z+jD8(H_nG^Sd>D=>XQ;K`s}yfwNxkTosM( z3}qZNJ3hB`_AcT+NV~>xi+4zRXOesL^;LAj3iu<`^<>01@VoKkUqj~ZLEfGUH(nLa zC_IQa=_~2GX&4u*K z-6Zzo03+-bv?@kPWKs4q5t;)R~IDYY;^hh&!elz3Gv#w|It2|{rXV@>ud3VeF6^(Nj{+>^%V-c;C z$9`yvOm%Pc1~}q+YVgFf`;uLiKZW*k!d%Ml^T_!!(^fl^_GLxl~^OE zF~ZKH?_-Z?7j@E5%H19)I}twgo}zhRc{4iy6LOsbPUC6yK0xw4eIUDHiWd1M+n3uzMM(i95zbyg|<8f-G4TRQbfw?l^c_01ol;8)< zg7mB#KjL}#JP9!I;<^z}cXqH8$qpwv~!_uqhvJE4wPsKxUG^wAq(;-2sK zD7lxE!BD0yy}B7b`VBoRN6vfU7}p4%qrE$zt7oJ7(XyVN-_VOz^y50l%%9-(H-OF? z$-XA<3hKO;J}jXp?=ccKf#nzBfIlD;yoq)y6kAG9z5%ZA%6L z_a|#r1J+aG91?Jbe^xhIX_& z5^iY(?%FX!+XnOtfqw<2jyyH>t?J@*IK-9{0&BXlG?o6elDe(@bXm86zq?R z0g8)hserbfgWe2Z&J42yYneqkW%fLtTmo0>iRr@smDtlwkrgB1swcpNH+gwxwIc9u zqb_H91L29?VB{hA(H*7gW3{*^g7r77fWPJ-Z`&|F-={B2;odyx>^?@LwTz*)Xz_T5 zVlC432mFB&&QaY5JD(bdfW0`=yb3{ zOjQ}T^{Go3^tO~xUlQy&3$Dgkc%M>w8)EJ5{uoSK?qG1gj3+_#Qk+V^%r_tSMgem} ztn6Yin9&2#n{PyNXVmMdVJ*MyV}knB(?$k52TFTq&1#@lzqkUEU{t%ixexGozsG87 zcV%k@MRnW)aP=I&o4}FozRiQ5f6K^k4!pJLVTC}CBst^$R(If7G6$a0 zKUD*mw4dfP#wU=c1T1G!%W1T!z2JIB3BO!fbhV-dJ@lWR4|fI^@h^um2dJe0%BaDU zygTrDBDJrlt|jD`Y8f4Uyxtb~TDFk}@pM%DUHT~nBB!~EBh53pT@fjZMmNIL7ifJIJKs$k}K|6iO;NEzfXI5q@|LCBDUZj zqa^>khvzYG=F_+Y?u9A~HT0f&5BV^p`8UYI9q`|8kV~h)X>aiKEj$N};G86S=xMmH zA8EbdxFhJ$J1FryT5%6_K8pPP2lmWCQpzGd`T)myNN#W0YmL999Al+3KDKdiQZH&e z0GH??Q;yZfO7T2r32p3QlzOY}8l?3a{90E6(| z6yWrxULkHp`#$%k?iTRk+i=Dv_-q(s<0D|$94v@JIP7^On>Y3}A+PgDcMUWow_fSl zNXSXx!aXYkkTI_!Rc0{qwN<Ug`&IW_6&_u>D&uV;1A+L#XBo{XR;5LTf$A?+K9#jE)bWLo1|ACwSs@r1h`Rq?a+$&q8+eCTATe z-yh1l7S)X&Tp6_bP4sRPGE@s~F?QhxVD@4xKzGXi3P|n)(o4y4I(qtQ?7pS}nmgcv zIpF_8{_lpSvuVrtM(#abi^kc74Z4xmWJGE{#|dz4E()b}?@X zoU6l)&Ejlbx~bcLOCQou;$5Kk7V|TX(Jh}=tjVZ8SwNxgjr%FIJgU<}zq9G*WNZ`n zJmdjw75`*1Iy-PbvOg= zi-gi&>#WGrq)DJrVi~h~JTNMKoHvdG)50fjHFA^`avk`b5E6c6S1+d#k+TD8FP9v*gunAw`7#2fN2&pup(M;?(E6w@^IzD zNCu-Y&V}mU8h1+YJgBED!14At1;;!^9V5X|C(;Y3y$O9P3N~Dfcj5`4SLv~>O6R}QR|oK{ zm&sE^F*S4FU5fhLRU&7#MT@jXlX^m=cYwJXy7mK}{ulh%3_Gw8snD8HQ5Oz*9ckko zylbJmx4n8=;8~=aw=W+?7wy2_J^=T%4LsEm+4%w5e=TEZEi&BE^bHzmDe^PUUgx0= zYA~}}#Z2V@spp0Cc}Vz8=n>DyxzlO_IQ}QH;CV{#V-9i<33m|fo6l%9qLFb+V?Aro zqq}HPTV)p-Z4I!@K{7pxME@M#T@PGqfpi!;{89L{HJCduV`HjMhI6 z7C;-Ya5nuqK#B8!uoa)%fNUld{sFx)8$G!M&W=`$-sUA}^WD_YmFKnL3UAjl^2Vq1 z<2vx+UB2fr+kBi_TcM*qr`6xl){o?Q0x4;P{KG);GGnrY+UEeNcVTr#O0`CUy$TNg z#PgTP<-F$6(EgRcP#dWxR8K&S-Q;XckKYUR4kt}rWDn1SKfeQe&FJwq>bQ^ckJ4H_ z%6!3l26R3G=5C~}*0jGHtHJX{_d$tA(WJ44VrtP3--{Nmr;OCM?8!h^3ti=m;8Uo0 z7&wMgPMfSXRB}J$oBY27OuZB|eQoF@H+aU&JygcY_y<&NN6A+Fms+ht4Xvr+be>J8 zjmpTn@zk}J)@y@>hmWPZn`}XNBIRmR&j_@1FLdem$iltYM_mHHz64!%23g~M+Epmg zif?b7mg6plAOFH!W*7QUYW1Z3M|d)rwT z-m;hZ_c0v&dqz%8>a0QyPhyD}D|-*TRSTNl0;a0LV_#9rY|38;4n1qo?pWHx;QT(y z?VwiAG7kqE((DLhcN=AvQ2qift%r=o)e|}HEgQxXac9M-;PG#$wq_$}HL$ z!*?1g6vC%j$XZW~%*TQYc{o!TPa4EyR1IJV(>i9sc`Q{aBn{($0)G5nEs4{j?IJ4E03P@ejV??>yIRN zMs1`H&s}Jhs27t#qaQ?$9RkuJyvtMP-AL(qU}ZSz`-2AY{_Q_g;vs6wLejW4Vr1`4 zjQ9cA>y?puS`V&&^k(Ee3LZTZ{1wz0OrN5LzX+Zz4_3P|GL3F`KmBuc!r8vq*3)I* zjS1|2tz6{bct%ALXH|Dg)h!cFW{|Ru7ASy9&7S5 z$$#+?y&{=9<=xEh%(s4v%55brf``PrsrDV~zDkqK#qxbBNY{BCQ+}cRYMi0B1_2 z%aBmNCe>(p&LzJ_3Vg}4s*J!~xbzkzOdd4rk32G$t*ZeNwCtU~o_Q-9WZHPF6y?yn z=#)AEHf2Xca_C>z@2yn4mNdPrdfaou_X7CwH^`v(kh3F^FP;>O`>jVI=aoT5)-;-| z`!?MV=!&_!${&O8*HC*O`1@kyu%6hS;KSA&;pWN*mnVadfz(uo90|sqF%MRf*FC7t z50&KZ%ilyE<*ciZ3&3bQ#*`5YT;p~|wuZWmyI{1o;n*Q1z_2~w`95T}8e$#gYA`O> zk?xL@4QTXRsHsmV{d;Ox7Yspq8A-b#u#ba3ox3_W*5)w!y|ZTL#RGqq6GnE>+qnmw zIu86!q5KFmm^Xd8mM{om@a)Fcn)%@w=UA$J7c$_G4_DzszFY= zN^OKi=^uTL4Qb!?R{2?Pu5%EjegasWVHgEMndAs|-kn+dcFvRy4)qFNN$NOS@}z_# z(Q#-_$7)q<3;THdYc)O7kMG#uf=y9Odk-h}QJ1+YjTfwFb5tV8&Tx znqNj_*v@Zd?{~ma9vn9b>&@BldJ(0JK;#MELr_nT@LFuErAX)LaKw#pvo^Jwsx@!- z@!OJn__p!w9)MavC9aFX@H%A52>$&9$9_kT%V1X@KxT-gLDbLys#-eUOW&2>ac$b! zm~(6AIxXp&^o&n(Xu(-ynzY8~leX}SKHM0KHA!lGsf9ZpYbleK`Buuik`Q0zphAKg ztvQn>{26&2^-N<%cVD>C*|VPiiNUVQBVQ_D!!`NiPgZ7XlJA7QVyHXvqFO#$YPLCw zmrvt&gyVQf6f-&r zum!JstfwwAIvd@bsa0D=%x7MSFiw=XIam|!F`G)tC`O>=9z)w#1h&hdL<4w1pUF00 zP(xI}2Aqs6aoyB>aV^*~?sHMn_5`=e`i`M?chz{-L=V|o;7wwi%?0D03(mq8n?p*c z0Nr^0zfEsO1mEb%jNHZK@_vZ+^i*kXv`;06XTQq;?}PNoez_W12Fu#hirGN-K5$f{ zeZ7tD8JdC&-3>Gqu$t9M+Tmx=zTPTZP_L`=+9Ks$Oz=!%7ZO2pTdTVS?KbqFOc8I@gkgB)H_0WtrF2yrneQ7%n z3G`B^CrV1oR0Ym;t>G|xSf6xnoT$e*brjgMVkF+iP*WxFG6p$Q9AJbO8P6NZ zRS#Ha&^tAbd%fF1p|^me1uc3HuMxOw2Ax+H8RhKQNETY|#lY--8Es$VOb^APD2w!J z1IOedNj^r>dWUy&XgCyK+CJLsh$YzoiC6|1`w?_f!p^`(7>TATBF7u>m++27!xvHh z9qOq`J9=`B%2E$~mIlrPaLy~}MR$$)eFN>+rVoRFTmN7M`eDq2?Sb+=;2&ow^U!F< z=1Cv}XH(y9c&!d=gYl-}Bv9T8WW9lYTbM-+gCdUZ|KYs>zTS#Y(wlmg(PvK-L>eiZ zi>T3mdXn7%6<6mfhP2_>e6Bv!ObAb9(+Lc3nUkXS2eembJ$G0>;q-x!%5A@`D z!r~@Ut3fmQ-6x}dE}O}Dd>vWjNRdOUQdyM>JwHCj*zi<&?d(GSkQb++ktZUHiUY2{ zgd4=Db3kLPFN9iM!Momfxmvj&@mgAQ#)BE>hdpRT%h?kpDX^1bMClE%lw+|D@?;rp zbO04u2yyYCf&R9hA<>-N_>) zN*6Vk=lK3kPG{#@nnvV!l(Fc3i%DRz8r=9ip2c(+-LB|a(%G)owD!Bw>ttH182at3 zz$e;bzOB=-W_>AAo!J)O&-&@d5kb`K}u1( z_o=w*{uOzqVb2>MYzp${O`cZ9zmf(D*P(3tM9RfBj1dczXms~Z{Rr-JX?q>A^8$Vu zuV+1_HZ!I?$8a{dJ`{3nKs!$lHO(ML14r-hq$;hRXN>6~>Pri?krzh(-A&u_XA-^R?X4C0ew#AGDIINTODGAo zpx;DmX&J?vpLz$LGz#3iq~ww(&iE_iCznrWA}iuvOXF@ECppT#$ea4;0fC{xE$M&RdM20e@nm>0B0l*Ty$KcnDn zcjL&-NwBq_T=sKHz;P~|#5cy|l`^XqQx^m)igC4OgegH;yAUTSI;uLv1}z(){jAKBafWyx!d0;L}v1>My8@ z71x)1N69e_8_Q9y&dh=XEaO<)NT1{!eV0@DH2{gwn|@yeMk_JqT?KB81^7I+RUUH( z$6q&avm)3%-GJsAdR_#C6_9z3wa1+}U<1?T)}RmxT^Uv*P96EG6A%(ARk5JE(ah`p=k5o*Y+>>fLb7KMOh>0LmY! z+nME;fqJe`{D>|tqAs;^ny1cjjM?SwX7ZGf=D-yxXyQNjT_~At*~kX|Dibt#iW!EI z-6-KoMR{7Eq%_ut)Pb>+;D2MrgEO!Wy!E3fAHyrm(bMEV_CIKh%c5ho^VjpK^*P@=?^X%Iql52f_VHjqC>!0&$WqalVUHuPlxJk!L+%RHWc+@u%~*CAtt$1sd-NZ` z;#uN4yyhci?#CKDgjHpFMz?ImcY8G>_3L0kd`B&{X>|^@s#|Yiq`YfKPoX|X{w`A8)!zaPbtPpd zeKewoTILR*aAdd>&Rd30Ko-qlRC+eMb{Mxqs7vl#jXV%X7bCr#4OXLsxHtBpx3I`l z_d(;u!ISbCIVz(sowyWd9kl5PP?t|UL-ht7) zexml4;PeMD_9i&~3{34ItvtGL7#J@DA4x|gy8ivz;5Nl)pTJqsUtI{6>eG_FsRBgq zoYQJN5d8Z?z{`cm`xL!0mv(;@O6%QygIrp;Rp>)TrsR_DZg*d0>7Cw&`Mifgo4A52 zPG$vNqkWU)?aWWBCR0Zz@Z)cN9>>3qm%~cYQr~Q;6xB|z53aPvq*fL%I6qrUOC#W| zT0Bt`?MA!QfV#1M(|-AivOJTfY*m{2U6z_M>Hq$1k7ADFHHD+4G_SC`=UkqQQ3muN zMc-7+73Hn-8vjQ>dL3wx6ELL(C(fc~aLuN)?m9fvu1@h(3$Y?CKgYWTQqPg^xp#Aj zmnf~BjjzP|tj5y>^irhaij?Q?h1>L*Fl{@hT9mP8@Bau{btjE$bbkLQYxKAKdp1LNAG zdO}}IFLG75ySo9!e;c+Na)@_ z?LJUBejYfeKp(%Qt|QQ-5I!{?*F8wP>&U0YPz8SQY`j*u5ij%rdm>d2up{cO&^zy8 znG8?XL(09)SoCz{YUF8(68!>oT_tRb2yjL>g1+5Ee~jMLgwm(MM``Lwf!pfHUT0CBl57qhyn#qB&>hsX zjlL^=jnls_=o4XTK`t@w`tm8%nq<^H2o{@@dI4$XaZV<0=YWOjl#Xj}KFOn1zZb$u zY4A4&dS~&S=CeL6$KAktpkcQ#4z=~-ZV~0J7QN-8*E!Z?ZU6ex@0uy;vkW#f`5cS- z6t&=Ig}h(DM~=y^P-s5S61*Eg>lEKgJ;zf?`0fA9OPLWRhkJrDssHI)FR}idObKU2 zF<&}-kK$QrIb(dIiu56j zWSnOOtUKo^ZA*`6R`3I5R*#hP$JP~rS^1U2_|b+_^6E{{YZSc2Z{MDN6b}i}eik-zC^%SJPHK9ZRdqJ1B2><<~``mVE(TMg-jlJgdx z8c9bE7#==NjqlCW`Ze~(Xt3kV*ZKAjq1Q%t@s5HxH_T!^d^=Bt>Kpo80Oa~nn=nRq z@?VO$KS66-kAs$}-0mugR!QW{JjV3``r~@OeS0wM0_jH`O_8Q*LG4j#ZA)s1>!>?& z$4cc_XFu={1V=wybEZ>S*u#0m~shZMiHaEkesfpB~Z`hwRIYIm31%FyZujNWglD}lsw&;Fxu@qR3O z?~)ZG_FCV{VSc4~YtQPDZv&)1@w*)*v<22s+W6uXk(bJ?nb4pfa$0MyHL#5cbtQoF zcC4z}j9p>##J!%}-uylVsI<{M$uJ#AO2Yn^0el`rzqqDn#C`9zFk-^HX!J{&JGw(c zE#-|^Z-L2j)Y}|L9aCAn)OGGDbGDZiB(gVY^rp3zKstk8?fAa|D80vCSe^ft4^;Bj z+idz#8H%K-$Z|>5NrwT?{0(s8M@OxmXhL`~nfbb-?p)@IdDQExFD~~>Dqm)XA)UkX~?t4G8<^1RD=4<3+OC->&P)=WvYw&7| zU3^xB>P6I+4r!)Gi%fkJ>1c|vjbgg|srOPmXJm~ty>#ePhW1g?b^ya#&Czm>)^7*u zbhZ0nxh<=iv=$;eJjPdiuiA=swzMaH+e19gV%{t!wEK zejznxeRnuM2R)Vw>DoV1>I-W6JjnigkeKl<4tIJ~3bVaJEMv!nC*R}xm3CJ{cp(K{ z)tqq!ZcM0CJ*5{hwrjkO-r;vkIJHgqC0y~T_HyF@nsf_LhwAK#Ga}48NY_&ngYz~&$EU-45(J4)Z@=p5dNtwma zye0J(0-NcsA}N=h0Uw~<6!jQW!g;Q%UG6Q3{Q3}47u;*Cgg%W?+lzkbDNs*)VtOca z%^^n?nm>sJxsqD7r|#yTJ5(cPk5XqJ`sUf!O#xHcaJf6LXHiFYUfy}SUQ8`P_k4IJyfVG2 zhHdl&JYZztcKAoL@tk{}q8HX^T)ov--2$sa7+vucNByZo9V16o=Iq7afT28mEJoFn z`VEwhUj~1s7Pz`KL7v{h6VaQ0M$FTLe*$&)<$ru7D5btt`Z(58yc0Z|6`s3F73V$b zHnsm4zN0kJ)=3_#MGjUjN9h=ARyHVmI~>bnsu=OR;JCC{Mtm*?E0d}7|M2u8FycOS zcd@HIwYK68Ge=gE|6Rd`68#YETu-i3;8P`c`nbB~?_a*0TPwkp!k+sR%C@|e{K@b? z=2x4?U**x@N~2=_YvDVC6t~WoU2nS?tT=wBQJ=e=)IVuHqsEJMXg`jn%q*Uj3pya1 zy&igzlgA`oTqR!sbe7O^REjGJ3j&_?#$!4jS+W3?XE(^^An z8O%&JdS#Uf38?Sh_xijqLmD{utsmNR-`6BcY6H5CUJ2~41m1yg^gwc)5zaY?g@({k zPEq&9oei;lck~*EA{W_epXL8F)7l(j5n$lz0@!nDkuTnU9p&wi@}QHJ-*beN#1dl z^e|M<2^1@}>!X}ak;k#1Eu}`9&)d1Ra4Kg#v3MQr)#JH)>GMNh#YlIcE27T%{C1tF zF0kf;MKzwbhq;wg^_Y>l#%|=Jc!TPh(4i}%a{#icAz1hcS?$d;#tC<>uU}6~*>TX0 zr`gPvwOvQjU&nQ{F|)yyQd5uMbwI40C0rU@U$p0sh{nhE;bpF?)=R#c)q^bKqD)ZmH46cG4H@A%C&a1Dkr(ZqeO6S=5GCH zX*xSF&03T{rT$Ir!C$_-_b-kpN0qsw1W5ABv7mgngfnq>vHi$c8&A#!U{XnW89oMg z&AbM7a=>&U){MBe#GxRgoAcW>KJBKX!4j4Fo_$Gx8P}Q1AUV>cmA;H``WjCnj$Bz> z0d^jshyMU)j*cqu;j6SK7e+f#sx~}UPn=~}2=d+a>nuum_lz9uT94L(arm`2j98b4 zWm8OF+LO~=-KCHSf#XgrJ3mhS;+fnk-W<`E$$_8T_aH__LF3L)VgPNrvp*fiV;^`z zo6b3)GgP@%J6&zQkGHkx_qYrgli^z|_;Qa$8c3YCdYYjj_4fd?rY{VhRcGJpu)YVv zLtP64AcC^H7!)Zx>UL;7VBeEN#DEkH`> z%Uc|3bbcdGx~6ptJyBO#Z+q%UK&yM;@4ety@24?>Pe77)rQhyDdLA338nYPJsn>%M z{g++%K9yWKKzNv%<48=wF&|N@(%Si>@geUb|7gk0F9kmo2Hhf^)vfM{Qci9QIrI!jUH5z_@150k081(KY_vdP>+UUZUvA+s zPN)NCRiqzs-f39>`XB})hn#~}L4OpJcLrK1&UlO^>P;Qr(UpDE|U!(~vYqIPOYM%kwH`+_~Q7$w6DxU+S)j2K1{4js6uST+0~~u#kha zRvIPA*MeT24t1Ks1?r38)a?C?N<=*)6|gzKByV%5>**Z5!h^x$Vp^UL{ua^qdEl@$ z@amDVZ(5}Gs(Tpw*+5#3T^A?7N%D(lJJuqp;%)Lu)OhY+$>|D{+UO?UVkJ#KQv6a5 zrNN$>({-|uNLb?`7Env|P+|@czDrBs4D1e|<^G4xzL= z1){IYSlkZ)`$Tg2Ox!2|yRn}578a(|(-ni6V{a=|*vv(xBazJ;SYP{w+X|1`~UydOSIgx{Y5 zm$LCh#@ws$@<^~&9f~_Ml`r%Xx@XbZ%#gqv;<_vUkFPgM$N4{}NnP!2%Q}lW3yr9! z#?@Ny8EFisTtYopkw-g8+h`7O$xq!wElL%my1FoI5mBfcbUHQ3cwJQ%d?Za7m< z@+x}Nm{RS*e2TPM8lb1*MU(uow-1Y=fkK^SsbOd zJsZYbAD9yT7O3a`)dGGiW1nX%8SC^9%ooIjD@=XS)X{<{!M|b@lU#Z;4hkOT-w4W? z(~;^c4acqI+gaZhYI8q>p4lA%%Z}=A_@%9PDmlhtIqstVPv~RxccmFu2l*Gh^+Ki3 zeHxIBC#^QS4%`PWUk?OJ#t?UH{0RA6Wmif^9ilx|i?1kg`GSb9@JJ=8^`1wFysAyNjUXYFyJcNPjJZ+YCn^D+{r!{)z|<`M`_x$LVtXvIt2>zXtxl+M%+IFeC?>_-(?|Hs$QSw3)RORN-O6aa z%#OS|ymQFmD2}pO86D-dnk>GYc?|+ut-)<*fEa7V^Td`G>Zw?J zLqfhqPvh7(4xzj7s|T2rl?VClzNgD+DS>t4JtdccL46IU0+TXp9?&HigT&xQqkoCS zdX+Ty)gHnR+LtHJ{gzP1J?TkG93*W7xO27Ec~CXH^!Y)nzlUX$V%$68l`gL5Oo!|2 z^W&5=;)3Tfi%2&fK92I&vPM5T2(I;;)ThtV(tGVDVTry*t)9w@GCx7?Jl^WS`i#b3 zYM40Nu+L+8Zl67~>iy=u;It$3%RR%R=wT68KSMC5U zhcr|Jw?aP@@q7et*P!-LYCTvO3v`zL~ixg`}+<7%87w_AV`k$YSqTT4V zuE_Y*`f!vk2>P|ObP}p+jl`5DU_;3@i(NGvST}Z-=T7H{GpX$^?0hg<4x6AMW$m@N zazBA~MQ?gE5lOX)eAd($sqU^%?+4T?8_&$tWsL5IC-(-quXW{EWP%c=4C#XR^Y(G(DbH$-z&u_*m{XVA(PH0GvE4VEi#^a&JO;#!L%x zjl;f7FuETC3hAAPgm&Sn-l}R{3?sx#m z(}BKfg%s`mfX+J%HHc=6MjwgCkf#-(cmF`q z7C^8#aOOIs%tWA+Uv`Dk`tOaD+zlJf*?_$7$znYK_An1N|6T!XI*+;y=$DhL0aAA_-}04vd!37% z4J_5LQrtTw@9T-OtoO|)k!i^=wxs!Qfa*-jRKdnEj*s(17e7(s=M?6r&& zC0FgBS+fyQ@}!yCst1-;gbybn^V{nK-#Tov<a6dtcz!qbYQ*?kjPDmS9>fI@pL>?MjN~i}>hxXHa;FJE& zKH5^U9V6SlGt1GL+61#`S4-MHs4x81GpatzXMW|(BR|;ASv>LO%BLqB5Gz-xFz14~F-_sbw6Q8~HVr`P5!$>uHR4d0HF%l!eFk<8@F%l%>VWeC8szMkAk( zQg_#Yzk^`BEF(aC<6Ag5YVn`Htx>J2S=*xmt@MM1`zL(AFkvgixH zm9@$6-JMUp?vDBRic(xlGyZptHuLP4-wE1xv`6`=gqGj6u$~ zR9#1q_NK5t4QFW^xEim$YdP1C+)?gWl%`t5nYIhSw_4^=cwDb={eT6nWI5nIdaYbu z%%>7Nj~xlA0Ob(8svG&Wni<(}_+B{E#a{a4UeU2Z@QnwwzAQqVk17Q#J|yDyr_dB9^(^hh^gU>VaN{UiKJB8ZHQg@YL z5qW-eKIwV9X8nvQy_j;L6c+~1brc{AYVpt0E9NyOr#-08Gw-C@L%m9MC5bbt=b>yC z68HCv@B}aA{g;f}h{HVk_dRg9i#5hZm~`w(J~m~B(t4SrRpq{iJ%KlkV(huA#%Q7< zaJCbeN{}g^KtX*H+NWvivRwB-L!(~1OG2xvEo0nynX&4;Ir@C4qZD)Z+C;c*7#3gI zFjmxq?h_r$)3Nle3UC#Y>rQ@6qs+Ph*JTh3y=zWOkYO+cn@)! z)-FMc?S|7GdwG;S6^y6g9b=NY*1Z*&EUgZe!_Cu|Cwkblfb{l_0;UxGEe1M$T4L0) zZzJzp!GSMR-!^1oN3`DnU@%8zdhWRX4s0(4dUvM0g)Gg1rsc6wr0!?*&Hc^=*uag+ zCrsJkd@p@c>qmP@30RLhm(ix)o~wa1#V=)Xw7%ZqxqA6cHm%WT>(olbpSg6l$apP{q0&@I4xckouQrx_KeZaAdi)?`s?C2OjEuX{&ZGpZ*47F zrdh{H;spKhoJt$GTzV@r8U%bO_r+pH>sa2GZL4d3`HB|5>%K|qaK)&I9JkUB^C|IC z;g>ME@8=eh#s_nJ5%)1 zGh6mnUQjQn{qw*`pWpsy}Lxp+(8NXCiSv&Lwx zE63B}f843CFw8nPlQI_mHB!_NGR7}n=^ z!q2V@8@;+Fwyhy9avbY9NCas)l(uRDpZq6>Eu{ue zG8i*bi>^NwNLi%!qvX;4)DoAQ_hYkt$*1cbcOy3|(fca+ExaFSEqrl0(93Unxo?9L zib<;uh4dTft^Js1%7Ct5K;N4u)^<^saiPRQg12jO?%Pd4ug&B;h}_Vhvk|%O{6B}b zJmKaJL(AyB%|ZsKt36ef3aLIReL0qVo_V>`uzvVwJz^n#{;Q3Y$^W0e;|MFJWHOXD zk31koJ-_Q3n{&^&=cyn(jXR2**&0nTC&;ft^rI)hm(%$-ZH%&%~1xs z{(7FeFVQ)jn(ZU7=WWQ!*Q(Gc;>&&Kjx%T9KGCc1eoJwbpv8I6L=U7oz# z^W5kAfB(<-;UTT#Y>(xPz0}~CK1V6iaW(MCl3bf4Z&DNYU9U`zoNrl+lx44-;gUzY z{yw#pd8CzR-a{>Z+4DrW%%137;`4#v-*TSn2@Ul! zT0fmN5M8uuH5%oiq*{S4S0#@l;dLGUujGAkycJ^`;_g|(X8A*d>^nC zH>>cw65rIBzeAW-y=UXB06uf!gRZib zO`TG0(miK42Bvy0r9NtP=RWFK#F{t1tvnI0forY(?$mAM6=&aMq=iC9crO1R>i;5q z^&~uJAE%BN;Z1V$g>W}lm`?@MJHgH^F#@+x@9UJ|w_yK3X`TF>sgXx2Z%y|R`0|tN zFu8%Yq^D{;DNL=G(S4+S(HFg0YW!lh^7>?8`w=u}M=EKn!ib82b3%gU+K5scwdN)K zT$O0QJYmsUch8OTotlv7rcetvBrm@V?g@2r1oirN1pOK(4ro2Dr<~gM^p;d?e{)Wz zLcCgImgoNhsLon5v^j^6@2+cCe@g4MPCaXYfiglp$J6&u3QxA{FIv$-#T(btSh8z# z?*?A-vX2vXNO)qLRdyuQL$VKJ$oUFm^@Zq{XC}^vZ#oZeDgNsXTORo1c@7sa7OucE zR&Ri|hWh_1{yXT8yU~|LnBiXHqkNc)_|K`;JqfOS zah`Jp={$e+VW3>4@85+Qs_@x@Jo*{c*HQzW9_1+M?fF@}T?2#;BYqO^N&xd;2etJA zuc#?HLkj>cJ~2)L6=W>BTc|ByI8htua9}V+or9uiZjBoR#?^PtI5^qAixA9B%-E*+7_6DQR zgJ`%Z6dl1? z@$bo4T8H})V`yGKjr$Vx?7L`Rl{dL^Y9;xUGPb!}FD-(hKx}Ey^ip_-Gm!FYziRF7 zST(p_+Tdwvo+|0;u?{$){kn(Pml*#%C1)`3JsVusyD%J1{IkG-1E8tF(EFA2))|2s zF#ReJUJ?9HAFg(`)VPJ#s_keE>als1``(n{dgvD@Tb{ZVEbSw0FFaQNOKKD9IC4!_ z3asTxea`iqLam(VIE``H%J>Tx`KFp~+V0japI%GqX)Z+am z{*(5!Y^xDgKCO?q40WBK`2JC9Rvx-LSf8Ul@+aUR`myZmsr<_otVdkkh=<2mt0A!A>ZVDQnUCcBp2|MF`3p}sqyKD_gBV_ z;4QhK{JaI6%NS=;PJ>c%mze9WYs5U=;Hg!f>-^TluOL;iy)Bu)`JbiBDdi*I39N8; zmNM#PAh3}#hsAo>xxA~>Okft}R!a0;jox)e?qQUI)XCjx;3!RKv|uu63Z5MUWyxLW zH_lhPK4C32r4-r^e$vJ&ae1_-VCCxCyD3?C)Cir&FJUMDcg#w;GcCOC%|5k8eL~z_ zZ(_46U%mr(bzhaM4D@Tt2VN$AEFp5AcBr)rZk20V{(})d2)dpJr1#?aWqRq!WtO3> zFdYsicfSl@qbo5##aT9ArG0-7B(A}`eHyi21^3Xup#1e)=^q8VS_s*f`2^1!IGbNP zyh1s*GhAg?;Oy-0SHn8c&F9`mudarNFj#zf+4lQ zl#}4oo=u?sTmxQ0FwY%1gHcl6U4{nnQ_2`XjTchtR7xMm{WRWtfrtD-kAsryBKn?C z)wi$LL5g?p_MNn0Gj~@V*O3dZMpquX`?xPg=q#ZBd!)Q8RXn-j3s{53yo3by>kP{H zxlmS(mP#R$D90}YYxlni4;l}&7Do@4P_k#bbP{(tvHHia;P`e_!V~ZmF$FM6eYL{<7-E)%9P;Z2YN6{G%cH>r z?f)vjYsoJ@Yx8=pL;*xr@ID~8im=gp*$Ox84W42Ro_ubMm-e5jYjMvo|Ev6z{{^&W ze&um$YNhs)cXiFfqX$xX@*U5_(sC#WnH*Bw@{%Y1G2b+uJy4Ub@h_JxIjuCgkQx>c z;w;r-%9E2%gIae*_Q{FTGjITDw3zf(sld zwk-5!^84dU|7!BBKaQ*KklwYK!Z^SEC~vVHBN{leI^gJO)V-u}_T*9U+%HB<20url z#hnCwcuI^X+G)*|m-28Aa<`G!d3m|&?s&+wx7=5>CG3Z(;ctA7IuE23td+$ z<=LKgco4kpvFNQHzB!bWE9a^qp~Zwc*wP*0-qTmGA9)i`h7wbaRJJXj{H~Ha4jp~* zWtSMGBWUH9pq5s!{2Kh2?~aj~g{|rM`gPAG1<7@lEa!VreHC9qj+{ve>W|KD-w_;Vf9{)Oj9v#G3#s!<@KhF165);b7lJc()p#vr#EAmQ$L0e<#r*>pmPCLK>EG2Y{7>>2>|kqmbx5 zgh-jftpC+Q57ovEv+b_sb*GrRg`7IMqOrK9|?Y+n?6(v<3{+a_$aFl`qv85oan&eqZKma#VYt zZR#NPJhYL2LT8cFkHIrI6k2fx>K-ttmst1He3P?GrQ1FzUt7T41AXL`*H;?tF6-Ix zUZp=((!BLb>81QC_;m%Oy8|tE+BlE4xYx3e5-evh)bH2K3u-74Pe@~?c+YAS6GQ|Fohb*Q!Eyaw#;r8G|p)XvfZ@+|8|p-FvouCD2Xx>rKK|3coi zq2akw*p(2i;LPzr!hMx~iL3^GYPYphw_1^$NGMtFVrn`DTi$V<%<99VfUGMB=JW4} z!utt#D&!R_+ED9Bci$L*UeR!Fi`*p=x6eWnG7B zKGl)$p{!pcPhIKi%FoG^{wDHA>6|;p{Bon-$h*lohtPd_ngpGn3>?*}T~qudobfLD zH3}_J?&8|7X^iPQ(w)xJ7;0!C>>Rj(cD}RTkgm>3S z*@LdTnNm;Y8b_X1xU%bEPNbdx2(=7m1fQUE*Z4ikn?6cs;XEl<3GIBYbJvcC`}~du zBMKlZWws!DTqoayoj(@d=o&+P1y9i1nNW{h#XUqXF(MZL5BdE9+I&i2hn}_fgLQh0 z3h?y7z}Z(RyH5Cv;JUKR6^dHi&gp*(xK2dV5xd-{yN#Il^Q`{hX->{e^-H>63F!>Y~ENcR7XpH~IRXV>u*uHHWhz zix_iP0OqfoUQ{XLNMO)G8MXLX#t3jwev|VJuD_6P)p^${t?{2+*_Fta-T#tOY}22< zOkumEt^0WLG}H7brq=tXbE>iL7^_D^{<50Z<@zDD1ocnTsC(qMusnmiwnJ%X;gTdb6R9 zTa~(!WA51h(Zo#yXSKEU_}K3IXooA99CM|nWeg=%SB$DN8?{KcFk+0&87I94Rq)Fi zg@Er9k?0SB7jlqSp!z+rp#BwJG?eQqgx(W)Y6Ve6%& z=HUd0w1}Z7QP4At)#V8 zTKy`R@+SQlft7tI@VyHi^f~5=_GL8v_VYi-c&J%x`QFCAeObx;{k@bp1m1BC@OO+p z1iz~w6XsIOXX)JnG`SO@)Z8zlBs~_n=)T!I>BVil9fikt89eQ2>QGL904e-$aNDOS z;}W!yNpQMeB4Ij(*Dze>MvpeL@Ia$nMqNFnq+!{rs={w8TE=+JV_iQwQ|pz(8h z?>hTO==Ux3&@VQ43SlpObY zi?}hNjruhYS0{cGi)mu;ph>}dq-kfpJqhRuC|7@hR`}BaF?HCUXs_oOZsbYZTdP@3 z(42{D(p~wSUI6#zbv9zv1(RY^Uy=LCYlXOSd^kF|a}h}e@=2{<*O|ClSFK3@=8=@w zP8zAjyQ!y>vd&e+d$y(C@43perDZPP^2#uurMo&mX({6jS>Jee?_n*TchpNWrT_2j zS9e;-V`!`6ovVn_OC^=R{go$?Wt}lFlXy*g)P@&?tH(J27{PN97 zjK}VbM;CDF08VKW*}kJWOlAkzoJp*%tU zb;1^d7kfZct~{Ggtz)1w*I@WvHR-`Kxr=~wY7oxjI43g%Thm>J(tIb-@uX%;mv4)~ zp6C5>a{nYoUMt$sb?r(A5U+9fmurl?Yk8!MQq!G`p0ky(%ivpGSil7>Ra)E|DNl}n z^Qgl)&*F(G*13$Qne@qZNPQ7kjd)MzcLZ`j#|^~2jI@yM)C&D#!b#AieHl&Id%zCQ z-}6*&zuVnQyAP#@Kf=cC;r{P%+b*n}hv1a1Y#M|=v_{x+(mzL!zXaEDug`-($k;^;e9Y7jFj8lN8?KN)2CUKcR`!qKUrX$la$BnL0kXS_6!SAQ)tHN`*n zc$8`T->bA~JuNT!dUEIfDc<_5Z}D}~=ROl9LlwzXcWs<^bRS_$_-T1m z@Vr`58(cs?@0)mkcPx2cBi=a~e>=vOZ$hJar3Inn$}m^4{(ZzO^NgDc-0MYTNluubE?tWj)orbXP9cJ=8_0E=sI<#!V2ej7(( z85A~?`nF)1ZbkdPi!$V|`ckC58d&7YdE|Fx-C)``4IcW=fWU=8Ri5Uk1^Oa3@r#UM zFI0R8B@PWv*u~SjhP|3!>GG_=Cuzy=p?m!%3(=hJgLhp;nBN`o`&)NWvpY;(4RIX& z_lLyQvFNw(tHCLs#j-z_SkHNO#?Jl7`a*=Re&p4@A)Cu5{nW+UOor2H_?Tdx9YRZpD8Po2rN9GOP$ zo7W`5lm?|lNWH6!b-vSH36azkwQrrfbsvY;uKK6@pOx>ff!=`3oW-;7%-Kcfh5vyz zTf&<7$^X=a{VL-;LOS3ykC3Y!iLOTLNDdDSRsy*GVHgng#FinnBx&OVwCbnS;`w&Y zCp`+^bYFz)rfz`G6qGpvs=bDvcCa2l_50bB{Q>kNWnCX-Ik(W);K`1{I&e_AtnMm? z?aGsW(}EV)(R^2t^8o5~Cfy!2@lC3)fVT^QV(Kjq)9}7Lg z^S_P_esci5aK)msPMhtwP@7-J{yx|=8rsjfKp@LZv-{4WBEj<|kA z_Xs}*k9Nj;1Mur1(0$S?w-vm*BT$K(7tzlfwRMom}_<2iB$!5yVF1C&NmZ!N8~)6@U$z$4%Z>5FSui= z8u%%Nrmi$8((8f#CMm2{j=ej-l)tGp>VMx#&D!~nLSDA`Y2YlicvblgrIlO2uH;xb z7oF7Uc|D#8F^$W)UcG=rv988&y}+|vAj^5X|0ca#OP$o#)lBKTS3Ul#@w8)D#{cR6 zbLdINR=Eq&3y^P~43PeRC61$)SNbP>mb-Wt3UUXvA-xC-z|T+dthdio)clf;-^tMm zP+naC_P!L_nb!2!Xu0#H`n8XxcB%h4Vk}!)|_9B-DjirW@h9n;Av1e<0&2tqK!oR_AkH z!cYBn0(CgA>rBA+pgy^#9(4I>2_x?*xpMEDtE|?8y=Ori1-wa_=}DVkr;P=_3u(El zIFj0C;c1a)ucqD2v2rD^+*Ezm6<|Hsr)t`Moq0L%A3<9#q}+KxqZahyH%KPK$8M$P zT8_WtcL364BYjcdmY%yQ?N~UAGe{lO@Fwq5sNFfLO+ZB5Sncxy-X@Z-Mm=&>cih;U zi)g`A`0rCdLoHD2Q{SN9!_|Ls3!~xsPFD`76Dq@`@pF-tA0lovCA>SNs&f!(;jP$z zgOLUDO;3+m7#vKfJ&Hv5Pq?3+b9LgEXu%6ex;Ln~4?Q!fU}s>nl~L*?#VeG!1#9_0 z#x!@LDNVDiOW@JpCzX`Ap4PheXcgrM8NbriLw+UNkw}JI_2Q^%c`I4Zr;gL8aXN8m z0Z4TB=_*4Vc|BIHi7F^P*L{{Vlg7(uA8OpsY}||Bh0;s8f6H|G&k|Rx)73Zn1l3I4 zL!>;c5L)G5X=n~{q{$e#N0*zrogsPvs0bs6h948kG#^#|Y(&Ja(7%cXvu*61qUoO70YE@UJofj1)w zQ`22WtsCQ6o6glzatM9LYDbQpxRnsn7O;Igj#D*qJ5p83_e|tZQcI26=7xS?Jz4|a z--z})6S;z#@=V;TxqI%>!?fO2p4-Sf3mBYF3*HCCsJ%G1+)JCbLA`nbrGGUtwMV}i z<(#FfUwUYr-@CM&=V_~TI;tI%S>w5BiTLG*KB&vt8dGVLOU}d zq#wjGEH~%&2h$q81)j~}xx8vT+H@sWx+D4(TI=4z4b04boLYL|zLuOjh9*Ly`c9SB3g3!3{+POz$VGf?jtueA`p0A7@m0iC5CgC&e}8kMpJS zrxy5SA8qNNUBkhp_k!*6Ilp=334qSQ^^$T6jZi3r#wPE zyq8p7+GELy@?8k}#R2D;>eTx*txJn&BsEqePd8(bl;^yPe6V?qp(m~z%1|Mi+G?gq zO7%H$N&eT&`=_4l-LyZMPFlT^`s7BIKa!M=Tv?hJ(f^WGZGFlPGHpzM6E7 z7{9@hW8X_m&g%4UmAu3Kj8a+|-XF5^K%0bo&20f zRg&sasq)Lc30ui6gm!^fs(&c8{B0&|4ZjK)Sdd;_zRq)<5OdeT6M1B|+;yz^=64sz zgm=!oiJw{<)6ismPkTpcsMhG$=QdKx3R=I668g}nrJ)4b5Bj9Jo38fk>R1YZ_!B@f#y_7Vb9#ZA*nSOFl@$ALW28XkL##J#w(%RC- z=%ZZ~O7y>rYcS*80Y~pdPf-tcWx8t}N73U0;ThitN@|Ut4K4a|dbp7OFAC1>tb8A( zt36$a>^K~J%(-S)=xOH<2CtP075Z@!czXxX*J50cmAr`b&r+^mS#dtdwNK~4pu|3tDdDC#ozrvM6e(a9S+5~i6d$W!I zL6mnLVMD>a|Kwdv{x(|WgUmdypeL>cave?&Z|=l*rG2(UO<5mELA&o~40_4C7yo(# zjty$d(kkSy(CZ@Z`6frUz(BA_qwR?`mb43SbcWuskgm0fipZ&4Do^*BhSjMo z>q5(PU(|k)UJtBed>GWf3T(QL6sEa9sP32K_q6Mekk(o+L;`#XfBkqMGLBN*L;erI z+U8oAT5holCxzcFD&jM1T8I>1++bh&Cj=pK3t|R?vaPv8&^-Hb=p1eF^_j6$* z>zC*Sruzf2^U%+~AAXx7!13P%Crl5DT9dm~`^c+@@tZ)xSzT@KjXZg>m%Hh;8$Jak zXg9mBsm9ZJSdOQGW6y!BRoYmiU-L+L06$L$yepnZKr3>JOX%~wx<~Bl!n^VcEvj;S9q|zeWnfOZMk_#%4YTHAM$zC+I zA^DSEMaooaY(BN}e-Cg;er5{!pWec2BVH>^|C8L#JZY6#{@Y!NdvckisJBb=wqY@k z{-Vtw(|>lMY9aQZ1GYfM9D5sUqFc8Wp~|nD{b@)lix?4 zHfk~NJK-9xVRaV3J#BY_A$I~H`JJaDcw&pQs|OO-7o~NEZt@#2u1c;oz^m~6&iQ8!ZQ^< zKzVusU+1nKrjFqJh(1X%!m+Id*A-8t+Vs`Ph5TZoDYSDcNP_#IyjAF036~<^F&W6X z63aD$O2uw??De$v4D=vZgZoX}tC%4=gc4ly|M%SGG7HgMUw}8KH7%Zwq&%U0YiM(y zq@AyWdxxVtxth9@c52mL1NS_Fe%!-P56JM~(t5D`Mq3|P=-CXeLeVpjXG-e>^+b<5 zDf3kLTRY>SA8UW`@*_~eGr;;3P=kNhN0% z6R)M0&P+Z^$vH21d%(o+m{_KIiM-$M$yC5KIY*T-dH`$_zm(giaAojH;O$#ILZnv1 zTj{?vmb&e4880=UZ9s4ZVQSRI{lB~9aw%0jx8JWI>-8T>e`}1=rO{&b87WB(&$MBO zZI@g7fsGj>O{g!t$Vlsjoz#$Pey!LY6!%izVx;~+IKvY_(34cRLJiO1+kZEGtYI(A z0|Wdj%;C`cVrtWt%)FBrLoK8VGYE$xTh)rC1wD>)LtEF9sg#KmdK$D zYKJFlGrKbL`WUA}2uaQJhwuT<%5yGC`&AxrRFqhy=5GQ&EiS(xQI5ITZCmskswZ6t zR=aCuKd{pJQ_^@ki#gQd#R~QJjXd1}wHyIHcM#$#Ol={d+(8Ycwyh^Fdn<2pMWv@T zIm+^h?*ZXUc~SyRr{?!yV@f-J!D{DDLfwV8o|;|DFE#E7HjY4|JJXbq^b6{}V3hMP zX=NYEh-&$IPR3~gz3VB-uLxgF9<`a>>ET!~`CMqm8IosdwL8y`MJLvu<#*KmLfa-% zzCigGk9Ke@0A|z7&Ky?rH;a?``mFt=c|9 zg|~Li@7P)S0HNJHAH~y1FjV@!fLL4G%YUx6{Qz3#t+ZNC?Tf(vtMqRoeKyxXAn+-8 zf@k~rT~KB1-v!pFT@Ry&2LVC9=z12su>fy=61`sx)Te?GitK0II_$}c1QK0y*NQSe3YbP*!6ffp6JfBMZH>vwos7X$KF(Wt# zD1MnSnnxQZQ2M>JbS~xJPMTdMOwXgFCr&u_i`8j7 zcnFdW8HvdJAmIKW&x+Sd+YbqC8it3VO%@&(`m3X0N$Bm-AFe2(A^-Uar}?v z+6}aO7l3#tVSmMk4ECqu+1&RhqC5y|NsB`L!i?xAW1vyShvLh zpfwp7c@Uyx?f)Mzv@~uLszNp|q59UVg`5Kz(18qb=GzarIs&@${OR7y{{f^l^^l0Q z#z`RMUl4L6rqmaK%nI7l1`D_rwP-ZUtp+k4b6|gCP(mJKBwhk?bAfgjBK9|kf}=pL zlTpotv2EUsx|-Kp+giStZO%NCqt9gzg@>1YWxM}?vQh!N z%#n~3j=5<2O%$ddBXEuZ+I5g#p>x=wUxAtJ%hOG;7u~P2^?MYD)*SObH~jZNw3 z@_>PvP{JylzC*zAsjz@)Fp^%VLI+!grwMU)>Od2|<9^aoOIx zU?%UPh0Exn!oq6{uu#!mMlGr&UdvIr-6mh7tI7kyiZBk>tXh z%nx7oBB~;awMrFwV=# zkLEQj(&xL%>yi+Kr3xgoE$HPS%24RN12%scVSB=o4~kM2ZqHu}cv(^_X`E#7b9TBl z6{S_+qA9KkmC9iOj%_5QPU$Pjdm1I>bfS4@kgETsk>Pv(y9Hp$4)K;*fH^frW*AP| z>)CwlhX*gF^~WO;%bJCBofcYF6`nV}QA$GFQE@R#6%5hj)0nSQ@)YjI?0AVEuLa&4 zE+(k(_P_2#b-f4-cQIC~B=%LPIId2^WWN4+#QAh2>Kv&fJ~krSRlS7j`hZ`7OZY;> z%3AvB61l-B85(hKB82)FC84*Jez@@M3Gh+Z_G}E4t0`Oe)FQB4MW@Dp1>9IlNg92+ zS$e^AX}FtlQHA*urMRKE*+!RuuH=10t@_NCjp!D8T}&=g3_htk~yW7n^0}#1RlgH?->j{bY`!M#l#+qyf5zc zr@+o4IEYKm%)1+H8i=_w1&sJtIMh~z?Ft0<<<#e4@dMybBcRF_;JK7b7u5!2Vh`mU zkCA&n@GangPEj6AN%!-+g?9S!=QEkU6#ieJ2g@n#F1Yk(X~Fr_w~V@Pgk#qjo$<7d zsYsN$u26aW(2el4ePW#)Nj2um2yp>*FTK=>J`w);3ZtdtwIsSYgChnbzTJr_sJkUX>-3RH> zR$~01V;>`L>YN~$LMqjv8via?(c~ITFYaTken-8R5;_oNOP8*{YpFr`*g;AU0@ggQ z1a62?F2$Gn_KH%iCCAjePLd39sg6&YjYk5-1>m#O3x;%)do{m0@5$>j&op9J@bmum zm#V$jT3RQa+F$Wgo|C!iyz5GJ3@oY2e>c}qpd(~!+$($>%6vbFf1geU^VV&D^C$@U z`+m{qJ@`MFT#mPg1|LG*S+AqegTifzxqQyM{LLwfq7j~9YATg5H04?LzZ)&?QAX_% z!jBDF7A`Bus{|3kmBy&oc)A-b?`rVNuktPy$}Kf}+L74PNk25+EIaEgaVFnVDR#k# zRGycMO*KglYH-Hnoob^(SH8B8f2mel9vf0Xo>N3fmZ1bLN81pc`*((X~o>=$Q_@*rX#~W`g0lmcrkQ4d--Z`!egK#Kajl)$m(*K6{FV$ zb?5QjE`&;Km!~~PIgW^OTRe24`C$4u8vZ1tocy^yAiXho*D;hc4!EyvV69v8uB677 zNV^ML1FGAwUOL9_xjLTJgR3lb@NUesaZN20`Iwp zQ5%j_D4=G|)Lq~-8htHnO#5fJKo?TrTCmJdq&i)o6Ricg;IWOm@$A86K=PjmeV*9A zL5^s&Jwi#R!2NXmda&dFj$YMBbrz$STWk*sNI3>i@RP%)9=#7Gdw|#L(Z<~PvkAP^ zsnSDz1Nq+@xHw^>jj@pT)j`4PW=``g1rk$f`w^5P{BBycaVF1vWz>OnGkMmYha-e#)gQ)Px3krKxbS}A3e0OhY6iGDNO)vc(h6}pJ6S&F)w`d2Pr zNWI%K1_2{Kb>cC78~N`EEVuw!-iC}T$niQHCb^H6*{9F~ri7DI-fW*P=RdClp#SNV zcveWGHgxP>a=0u&j=2+5p7kCK1qvnmw1pbo-svG<*V5uy@QN1r$-O)efmW-eYDKDC zO7E_uwr6Oshn*w`ce2a6M<53`VDa_BqmDqH4Ppd@)Dq&R;D!;a-${+adS`ILnNw+EsAk+_O}X+s{jc_r?eb+{(i0{2mH zwuc#&D}cc&c-_VDM-M%jg3k0V>KTtlb`AH@a9EGG9fG#&VKI*q_6l;~AHbDWv_SbF zWYsr&BH#XCw@2J8<4t)z5(qh(N7L>F+=s)D+Tb)}4*f8YdL16A&LYOk*X!`*@jQGDE#GJjjczM#xsO!SDDU{dlh1IN?@nk5elB7op%pGh2+!AHo*IVly@X%P zyZlQ2p{7xebaF>Au|{ZlDJx4((?8t!3vsNgC0f6fu_>TpiS@`oetRh{1-z+Wx$2cW zUR6q21$+-9d<|_^AG;dqXezCJCsu8llzb%b3nTPM;Qmpp44ta4Mfu7at&LJEGQW$w zwQjxwCcMV`tQhg-JV|9ol3&eeS6VQKvFQM&Kc){W8P|^i;XCQS)`6+jUUq_Kmg8oa z8f7khwZAX(%S(CVqJd5EUThCnZ_77r7WtC=WGk3szO)ji@U#hepC!qyZwwmQLhBBq z#@>i2;asK?W-hYvy^Xf+IFjaIHMGOci!Q=7u0adMgsLxvWI3L0B6`6u3%g*^<;S+ zwD@|;{1?KVlB$EbGiZ+=_$+9rNBpk~J~N;(ib|4#aVw>YN1=>_TWNFH(hhp$u}9By z7Yp>R&89}JN+(Qz7P|UK;s#;0UIKS*3Cy?#tf~=uCO;498VHB7j~@=2O=>gVgXA89 ziu-u}J>?4ZW5MDg@Z@%Q^@)T%MBX>Ss20ZdVz~UU#@6Kmhxg&|=YwhQEy#f@ur-ec z0}lk7&!v<*$bT4RFThXuH$ZMCFqlQjH_;FM5Upr$55>ge$H@6R%0C+pmHNI*(v}f2 zm7Y5Bwt3VV)rZU7H1BF61v$}M@%aQgCE$o}P&y@(o|aOni|AECWsc)_STK{Ail zN(XPy%BOhB2_8S0S@0&^R{8A@rBxc&O~Gwv(^r>Ssxfr(@2BK@;i>KLD7jE?BXxS+ z#N5N-mpyRgEJtn1r9B@3Kl*rfL|u&N*5qmNtUs-!jLfs+{}NK|ohjdD35D8RH_*yF zD;v}(*3`jLr*6}mC6#J%d$rOctvU~n_C39J^?1${`QH*S@mXEW_$oi4>a$PU{>qkl zym`2~7VizT!5UH{adWnIrR|X_XOdFvZ+=cjc_r4XQR*>xjavLX(2w9Z!u?6)yS%Rx z*lnU6xfE88*2mA9EwhXA9c|lX@2hc78nwlaLdMB$$|rj?IiyP3>W16aNeyn4>Ew+A z@k|{=P1-+IN*Dre*Lc!9CIpM^WvVd?8)aEStR5j*R`WmQn@9h3Q(m1MLxJBQN^PY! zOZ5_ZYPrg{IZ%s`&`+WL;YQd_?k-sBhGw-eGuNNN{q?wRAlAHoG*gZ4cCu zXk-*AW)qqiFGuxyGmfA0Q;Ax*74!mac0FgtdbFpX3e(>7U7yc$^fTWKX$5PW5SF?} z(}rULQ}!g?e5TL-oqo2$Jy$bc>J(p~hEB?wg=O~!yl)P*|0Og$nm!M}9^V)3^4)NS zq39x$;6K`)b*S4TtP5!G6tL@X%DI}#WrVvqLOJfzOMAT; z8@Lx3Ukux z&mrs%#!)W+-a|hfqdSEWyB#ajaZ~RbfR24=P)Q3~x^41k3AHXMrg^o7Wt{jaRG(!u z>PV?Eq*2=Dhk}wf=#5YUez`H0X*aYtaI15BsWXAAQdN4)OMSA=vphm6BV-Pu_tx@S z(5{EpYO}P01J$_af~B$qTa)&1Go<;{#DsiuboyU&w$r*)CQexMR~XNdi>d9{-pRCc z1#-L+(pNq%1uCIWK??P9mpV4l8b@dw_@rJk5)8T%$oiRW^#?gn8yGr(zR0zDB44&> zPVi=>(H~#(KfTw+Bn^x4`8n@Qt-r5A8;3L&;cK^y2;EdIEcu1L9rHQTX$|ET--WzO z>qXpsc6)NZwa1%b%`5q(Fj3#?j(e4Ro$=`kEcOGPo5-OZ^VVw=d6c&;z_kvg3~RJl zJ?1$=7m&jPDy5H-N1lz`RYzz+H@EHgQ*(1HzX8T?A!I2%eMd+uKYaGv;QzM5Ik~|N z>E_(j?|65yn`1E$ij#u6Bg_$zGp9EwTkhOmEyA3K{w&g9cmCVMruE>H>BQX#4$P+I zwcwHiY3W}=i?!eb`Y0`F5Up0P_cIJjY0bY=>-lIbpP_Ajd}=gUGn5&hJ>Y>WL$~C} zVI=wl(rF(~fQ$VYy`_&fuAnwOQOCn`M$)52@Qizi{Vt&+fXsW~R5RgF!-L`uLYwt7 zQS0FlHF7*dk6x!mj{8I4uba*GhugSJ>eJYlbxPNde+j)A5qx-NjOVWCd7lKr9!OKb z5eiCh-eM>g?SFv<$5OiUH>2Sl%Y(4<0ZyU0UuIlS_%%T z=Qx4`fwR5*HatdYY5L?j#;PlT}Iw z(A4W{q_`3%SgS{fgYO9=m&KTWP5k?ub*eWu z%heL{;0!-yoK#@kf_^E|Ln-X_!mBN9qQ!FCHfq)9vx(G8kYNW!Nn>fhyxbaZglhcE zrPQ5uDwp$VM=h=lv$e^!atW@-8N2$SLe1%ew{Rh0DY)!Rqq!RN4#9+o}0jn;#TX%UOcQ`c>}M|9<*8 zfyL`9dF~0jxdRMJ&&Fg@Y@(&g*;Z)plcf1K?EK#Ysgx4k+;2xu_JiNH;xEvg#JvHO zmomUZ_56_1pP?`M*-NR>1)s-L^3bqsrJ5S$KMO|?OUIJm5^jPT4y6Yb^r;1bgMP}c z0w4MzFg=1N4-R#MnrSw`?GB~|>KS{YZ4Ja$?1IvdMb~UYvl<1*cqm%X4vmk757k2B z8VqNt1}8X*{wXu}M(gRMT$h77%POt^hMJEGOK~i@-F(plr*%1|{tJ(*{|uq>_`&dH zx3~|YMe=~IYA46iigIqaZrS+h7q@gL-C}ja)^n z!;xQJM{Taqc$C%_p$)eH4L`ft8|~MJdvwU24^f_eMLn>;pmj@mIx;jPso)d%1Dr8g z->6|C&#lzl5_5PjzybX{te;l+1ytBU9nOlrH^!y{#!jI$y%bvDTKU6hf%7eD!r!82 zEBHSl(zIib%_8McsPsDgGj1BHG0wur<12){YdnOT^RRo7+F6jp;XPLXCD$RW0!pKq z75W@7afQe$)O8}InZwb{&n!sUUxgF)MYu-+bb^bIllRc*i~fhFq23z2<7i@>y_*as z^-|--V2LvsHK3p$$iva~JnjPLd>WV^3+L`5bUyu)drjcC7d^O*(tiPDs^MQ*M1K3= zjHmL&mq(xI&5kAR#X49G_z^3<;P!n=SO}jmpKC=jpEVVcC)X(GTeo~^*fHsiw}hlRku@4Wg;Fw|IIBkKc&aPu zXBLK9e)6td^O9V{9JTQCj$mBXm(&5(3tinJg!Sr_{kJ94_!Yzy+?!)lrCdkRbvtV7 zSwgNWF`aEPha<0S*S6I2J1{)3j=^4N9li8o7_A%verMgeCM(xiY@wwdX}C4)wA%w- z(){H>X4i&3Wv`Y*`PQ)?8iTUTd9$1yG1N;PNv%LF^iJ+6JBx@bHPaz1e6I%9&%%!T zDRKJ-v}Z#-a~XTNgL;ELbGIsWQOXM1(!y$s?*k_f{aJ*RKZ2(kJ=;Wj+oo*)0ebc@ zV3qo)QuYdR9~nGsBGk8kbPN4o8ZF~ezQ767J*Gc^x%W^euxyqqN%6)*z zB>3J!#$4(h6|+h!V{7QXw7Lf>5WCjE|57{Ji56|8J(CC>M~)h`>SInS7=R6$BX&FE z;nqrz8P^{%ivGB6>;h_cgk}EQ@rOBx*JJ(LRfpvILlI3_%!6xd;(SDg! z@{qOk%ebM8a)Gz=a4eYx#As1S&)H99j~_U9?NNM?%iRfmT?xb;yF1`z@(*_#Y$3#J7uIYly@mG-+HfoPqe&%a7j{x+ zE!yY?7(XEF(Gpt3?Ub!uxFzKE9rVQGnn%I655Q;?VO8g@9Ahy9yaQX=M=`G7bt(J(N2$=6(+9n8wv!6 z^S&NE?K0{a2MkW&H;dK^t>2ULy>PNR<+@JIIf@UUR~F!tHp*<;x-fKUj|{3&);dD_ zNT=01hUX{9o1PP)G@qDH(KkOOqkpvrh^?TX2}4&gI4k)C<4~vH>sY~24Ow8%97FN= zqQD!m%o0nfRC21U)fjsy1sEeIa@;+f#DnZRc|JBYBPIH^Kx`!O`N;$G=yB1)Ul{rP zSVETQ+6r;1O3C`BYCO;2r>x5ry{40j7Z4`4_P2M8TR^&kdwJP9dy-*3dFiXqCwp-S z&~R;><20RCyOMZ!LLGhe=&k%ax6#Sn`u2^PK=Y?hdSt}766SeYtcKy|?e(8~%|OSh z5PBOuk|H|z)f#;>UJg@U>Pu?UVsVw96s+7S;;t9Sk=&Wg1s$ZGz^` z=l&9$aWyqv$g}e~>f25KvE_P|<7?hIQAQaf9Z6$y)-|*}dAbl<-FVKF>JkYlS1are z{q&N?me+yvt>AN6;+OEf6r?>B`sqO{tw1|DL!&LIM)NJW(0ggO@Gkgw<(u2ivVIPH>3awB!hp5wsxIz2upXhdoKGQOUN8>TeZo*l@Jgf;P6NQ#&!bt64Q=IMY&7Zi7hrcM z)Z9sXuLs&4l>QL(=t@>U4EQ{ueoFA0wCr4bLi?Z_J&QKg2H%(gY<@=hv(RtGVWUeu z>LjgjmOq1U4#V<3jQ38sj5`_f_|I0#-5Dq!0Jp2auUdFNk*9@dh~m&$#CgQJ$6(ZG zt9<1r^l2&FN~!4ltUB8F;35MEYl9Zt)M+2PplgrWSj|}K8`BdaKPkk`%p2gB%6%$ET5uzH4>lVTrZrb6Nrf)vKj|yZN6UI;HTJ z?h~7|YO*vTk*X5rhg|Hv$2k;{ zD!HY7^7ke!63=JC*)3xbIGgp2ptM%}t5;Ia2-@N?Hl}DnvhN9BahIO^>0FaO0PEq= zs82p6$GnN$AE4~i(yN5&=SiL4c|c{f(o|eD)rqvF4es|i|IV-12+R8&&_adlz0`PB zlrxjJ_IOh7bM@=VGpWs@0 zGA_fTwgjGd6Fu+a`Fd&)%I=(SHu&c}y_=Xe!ViIh7vcNe2)C(Y2Y((oT!>aSGU{;e z(+8<@9_>2`PBsdyGJOrMclLn64+UKHxjhfgsg1g;;5OPS-Kk3#w6Ke^b5GHSdH*w> zwO7AGTi$>>T~0gvz}Ju9O*LBoUUJB9#OR^$sF9>dsuy+@xPtnXYpCnI`3Zr)j&OC^ zwOoEcRwz17d3n^MPvhcW&D1w_ET!}3Y4I5B=_(-wPp=bJM2cM+{!UX1k@@`AM2sy| z0?PZ+Yjhepb6tfR&9`ZXx#SxQ!)~)jRo>QfpG;0AU~(Gufi+;N(#U@1I>8S&D19g) z+B(uk!QI(POHLU&oxDobtkEr@*jg3=#k z3(M6bmZd(DeuS;G%ly)*eaSi(lE$CC@*_w&H?c3z1NnJKd#+^Khxqc!RvbfohL~Ty zN`1F8!jw)zWgVrcACTT3pua-SGAFU~=>; zM@J1w=+?QcxxCcc29V#DsEK*l#MeUOalKR(T3C-CrBHZvXS!DTnDi-s)v75J{FiRcUi#Cd2~xi-j%%8^&$nZ)ej>-+$YAs9u%Y$ z<7YJHe;fx_vy{H9ZQ-$~A`?oRel(>drHJYFP)ZV?>*39_Zmlp6@|%dXewdz%b;+IG zA^m>F+tFS~KXM*rMsSF+w0>u}$8qQozbD_B(3?lv%tm^BgnmASPI4if>{hgk4}lfE z2pP3H5~hE}Rv9So-{=uu}QPR%v>!(!SaWh-S|4LP0g^@NE<)|S^H zpap}(dN*0@$`*7DAK#_6NAoGL%p z(Tt%c_rls9d6hBA%Y@G~@|xPaq3TV!Z&9s9Dn62*At|NpU4=QzO@t+XN&aB_^WCST zkUK@BG-**ZcF372ls>kOOJUj1NYn0|`18IfICq_nl z&f`D1yqZ<=F*%pqq|9ge$&Y->H91MaB~%Qb7Z{T%{sl}uE?}0NOPCd;anDDMySV4O zS7+mHeQI*9qjy|KHhd?hNxqcng@^|q9TNG(omS-iPr*B7N$~uxl;QGsNF-?)quX;Xs7ddw!fEDTI<%}|M_S@o8d$IL2a!( z&7~LP;0X_retyt`E0#)6iX)(wv@5x@ zxmVCRme29;VV^bfE~mtqjCP&4PEzfLA8!&dbNIOycudsfoZiW_W*}!LjKEG^&C^Tp zYdy&)<6pUgT+YldMpM*>^$;9K4)tOFJK*w5uo8P25xIh&yS8<`)OJ0-%_+3t$(W^E zLXT?Ej(dQwD;=(a+t)~OD|~k|t=vqC30Pl;(3@i#7Q9}&&`OUGuk$XxxJSp&GDyFcTwo8#8-{^@X$vo*y`4PQXyrCqsia#$ zA2-qaIxUz$&nj@>&M0v#|9Z)Oz_?q3c(|5-kC}ch?(e1LbxK)E|K`y@{mp7`Cs2dw zXK+uBQ;$};O*HeQ#OkLBf z$8*Ok%Q7r^jEAOmGs?oS2x$4_ubaBlD_ojm{&SYjn7qH357EJc-}9G9BrXT zeHh8T94Yt&p#}XH0*8Si;)X|LZo;w{fCP8^{rI}FN*=fuG~F5Yl%q79nABI)vpTo; zeja7$Th#mCLM>u?UiTtz+%F-ed#$AuKOIQ@D6ku`SC1Zc^&+D08b;)BHf+#8sV41l|1Im zqvPEJT98M5Q7i2XsA~>1x)){SiXLS~4`aFwKJo~oih~)u^S9(3gl;ZcB6SfBXnxDV4?8J%FIcxQhkiv)H?UpK}o6qFDJgbg?UW z%WEcFqczrQI%-XCrMvCx$d-o!3ujZVdd@?k1-g63SW8f|`5;&z&C2zZA?o7GNRhhy z8sND%Z7WFK!fH(U>q26+Q`F)1V7;62eq=j(hTo z0(vQUb5)b!;=L4?dRLZJ{tJUva8R1m@16WFHKk*NZn7>V`Fhe?)*7znOqcDLMr>K4 zbY1Xf+sdoncuI|<4E3JWUSs@9|4ogjfnaHw>Yeo3XT3tXCQ?hS8hX6B)qWgvz2JXO zxY|M|(#rl^&zp3lzA+q{TgIE~3p=Pmxc?ZKYXy3_N-3YRFFAVlIK5o+BUb&w6~E6w zwXVZ$XN0e7j79dlEW>>LuXbRhcB|}>2g&P%t#FbvBu_MlW1@!s6R>t)S~?G_K)ERW z+0VP7qo;XtHK;4-w#4(Bohb|0%2t+w)kIynr;8ItlKk1n%YEgI4$B?lm5pNA1ZvylVX9Z+n7c z>OQ{fnK+eFhA^V0k=DEm2YqZlIWy@RE|1)l(>@UvhIZ=3(3>;nby|HXIrl@)*+{;> z3p#d=WIQA6PPy5@=0y03dvK@XFZWXySHl4&vgV{hP|iOs1s)p-FW1dUJ;KsfoE36&A#lx+klL>Y z>(pT!AMw^0ZC3Rcb+(TIh0dV;>A<-b{@%5~PQB)C(seU_=J^=#D6K2kbUMyzg<`3+ zrZ39p5-X(z=Mu(1W$E`xe}l7ae$-qKpylcja^}U>*eaz&2hX(#kw;rCb0Om;UzZ;n?p5FZ-?wwcqiiJt#_5V_nUYzJM~{ zj7W1f^r{SAjov1g9so8v3-cN@=PaKm4k-m(XHx_6t0VPb{)P9AP-Q`i6M54%lLNXM zFl*`K%`5kj4GEsEzMeW@`r$5)6s<8o=Z^0i!a`e33GLJ&He?yr?nhvI8t*w9?mBC! z=^5HO7TUasHn38%hhc{hCh1~^K(xCn-{PS$AC-! z2&d7@;@K~9z+Hi>D>^H*c5lk~G`hsYJatmnJ;ZvJgR!=1HRV6d{9o>XzdyqD-jvl4l zb^I%dKSX}{=32%*W$w1%LIrX6($<}kKORNzT8&T8lHB*0BPKuDM7kO|#mDo(sUF^3 z1)CO}^B!Y?tJatlpT029XAn>I0NQ(b$4EjP|MY{DG(U)S8>X?2F<_PYoKQ&ZN*<*R zY`J+&CT0zMw@%ASFPJgA&=c>LrLB=`t<@2D$ zFVn^@+ATlGwT79dY)gNAvMgViHZN^FYur**s>O-TcD zuPm=d$W9S%tQ^0f^%?7oWbQpxc6ns2RzfcrTBrVYO1dCxznMOGw!^KI=ot)&C0e+%;S<+2_)1%7 z0*}Fm&Vom(Ps?{N2Evb0x^whv;nOv0tI^XnqYw0%y_Ee5c@Cm3_t-3fn|_Fz z)l*%``&;Nm&ympalq%2fR5Dx*z3)m3^dY!zy&zYWG$+GLgoBvyI6B*#)GX~k#%!Jz zfS$`4z|eg+&Zpl_8IHjQ;`+cR_Y(YlNZfAHf08(70R8CePpIPppgaaXUyviOE%EG# z>33AJ%2B+;C-1IwoDL?rvm)&^X-j#jE^m7hrtW%_#+_Rb$K8GWGV#h5OR%h?!MJCF z)A`o_D(B)|-=vjT0|(T7e0Rp7N{L0}s5bb@5L#0{H%m%RlWB!gUVYo2@?DLq`j#-q z$B!=d1-EKPwpN2vxQA8WT>7$Vq$=sdvKI1_gJiio{>xRg1O4THa|YWO*A=FRH}lP) z_9bZe;zzmmS?+QG5H-%esYl3}Dm!V*&qV>%32) zU(R7EJ?3riqx5QPdgDHm|I^?x&dx0co0n0q^66^0v+2r_bFEX+2(k9`LU?9`pKuc* zpT-;GUh{>tIZu~O$hj)W`Of6I!?1^L#uMGsC~H=5z4^S!L&X0&)UyhjdWom&DP3)Q zL3j{{MQ?@SCg?`bLm$uj^Q+v2m}_IFGtN~~sQvWt<{CNoF!eB2+D$9L?>~Z=;NiK?O#hfC{SL+ZeK3~f z%g7j4R(Ar4zR=ZQgZDg6D(Ts=_|Jyy-wjAPU!ia2c)~}Z>AXnUTKDdZ_WYLg?D^r% z=ZJBfJ-g47afN3cF8vU-+E zCJ*qEH+=>CFSuLVQ0{XA>wnHD-o~5tB|lQr8OTrSR6lVYyI6K9|5B~6-nXN4s4ux|VWkjWn6EtY;IU?kvJCLZYX?#kG0ri^Ce6Mz4_GD*L_# z_-)%>l5?-_R;YpbN+CyfUO>3i=;V6|v-H4AJINhN&NGR4wftst5#lN{&x5lp>&P;P zQtJk4ngM;(iM^U0wxSCh9OpNgW+tI^M(r}{kfNv2s!zlix1$jb;B4Y$jOaIr_Y-4B zVaNBPZ>^#}Pv;m1{ndEC5bh;@wqZ5u4LE>W2cVl(Y11kEyP8|Ruz;8qq}xQkf?pS< zROzvH>9=Uv3gTTae+KQ|hJ^cD!hQ|hZp3fY2PZt8QMm%HC_h&MZlsOYDz|dxOnEt* zk$ec~9L?P|sYk#WJ;CszsNX)%VEmPYo)qvLZJUGV-`UnfnLF<#-rf9`t8{*epQo|O zahepEoLZhBAE|-?-=zfCu6C2s(e9>wX$Q)w+-*~#R^z-jZGSR-ofIjJGFE!ldti+B z)VrE~B|SmP+d3_p)+i~{mF53;_p%lJ`IR##cQ#5Y^QJ#l%=0vSKTs_HtVJrhsyr$& zF*TsVzpmSL{BXfyJXO%1q;Ey?9P9bynX z!4kFTrg2TC#^=ao%^my;<>~xVQ?MsW--K+AZq~F5b1l}^fwxocWR0$}G`4I_rp!Lo z2(LluA8qiJk`KvwOsB_Lxtc#qdo1wDn6E*n(~-uH@#MP@ZF+-Vj((~O$^BT}#IFOc zI~+LK*IcRPiuej%-BU?D6dB!%2GIq+8^1rb*}hU5R00cI!J<}t8_y7)C7%;%Oy|`W zxUDt5k=D(i#46Mv^~?hr^0l0`+ncn@XiF7~X;}EnJoURb%9s)VYl3R#QKIu>V(Hz~ z=xJ?#4h?^lR!o7P{%_*lTkDBA74)wz{OZ?)gcj#>#$nQUE_@0#PXDo-%>d)~T2A`@Cawl-Ti&#H6^DAOqQ#y-MAB7Vj<7^24*-? zw$cx5^^&7Tn|xo-e?d+!L*l71kDm0Barv)TNb68a$tzbLii@srR*O#_ReoLktYr;QlLpOM@|@IognjWgC083^lPMurW@W9p z*2U-7f%?cuwIiqcJX~gN=;(ujYp8XN#H#N_5~`Eh9)BsDj;D=lfXRBa>J_BS_SJxk zuyc)yb(Pk$GNV00vNb}s9`6PJo+!QJ?7vW#yPMjcSaUT>P@nNqA1vTk^QmuP*fHIN zN@Zfw3~+gKjFG&QEitVeaXvpU_v-xvc;}j#9^M~fbcc}gCVD@PH|6rL zDM6X;JnFIN05xi{ztEI0q#p|oYB+&4y2i7Nm0|76N9*Txk= z8GBGeu7Rm;A79T{(Wz<2zBZf9WR1(Yo8*OFR|!5z7B z$vmsy)jz8^)BJ@v1N5q}$%nI-xSbrkQqw1~7+sBfI`lg%EOXcA<;sMcNi&fBA#K3_ zgVC=?;@{cZ;~BFWv2z)fJmq&_Z~@P5aaU(S+0OCGBc((oji0{S%#)v4EU4u~dZF!8 z3pwa#qyHta?F7p9#G|Xi`Y`vw=w3NuV%xHiBuaKqhA1MYr0Pu7|KAUJH|1aI1q%sL z0;g=NFs`{u+_{rFaotg-b&2n58owsy_b)%6YmwuR!!u+@uW6a8N#z5uI(wr8^>^$`ERKAGjn(X{9|IiMPb8L9Ck+P!yJ~$<%VGH)4-&+)e8_R$4Y*S|<7A ze|l)L%+y~qw!S@$Tvtftm}Knnfxq&w)HK|OS;4PZqx1^+xCc1zhk`3yXVNm;l^Ejs zmKVSpbsOi=+%Ke}%Y~k&^H=bYo(?XHqf$wY792$z2g0%B(?VrBe4&q6 zSNu+)tS6A?XG0xwKYMOT%688px`ebF2-%N1#9v3=u^0&Lwc`izw3NY({_5SW10PRd z8_!rzLH9d_cs~+)1scl3@b8o0FY0)5{XNhQE+d^M=Bmfmz*Kdd4u0xE9U;*#qpl-j z%=AzV1bb>B2gcFb8l|5|9<_{B+$}?^+>>KGm0WJ;PNp&N#Sz@^ftwAZZ3D64*3xga zmnUeE=OxOAk0R$YK*G~JQ}>kL%gb!9vP>%aDJ6Y^c&(~gG28VqYVgc>Y0EVhYAwQX zKEE0`|&?s)y*=`!Y(19)E>JXqVppL6_aU8pzIB88l0b39eRCiC>a

s9XTQ6MFM>jH zmtdK0IVmjN`8d~iemP`L#)=WI5$|ddM^JmidwOHMOAk_pd~giErlnSvuT3N5ipZCt z^08IWObw1HXL1f`PQQ5R&2{ujTCen(%40{8!?~Cm^jF^=D?H{!%kGRYXVZ;!U%o5x zHZqRSlh0JnB2Ei_l^$?Ew>d0ka;1Cy2)+F4dzJqsOx2|(LR;^GQ%%Q2P4|%-0rbtpWF&fXD=%&!MkN zc-JT52Q!z#JC5U{>bbuCXlxdt_cGVip8c#?w#rLwjDrP}q#Y4$`;c-1vK;d_Q*U z*v*CJwVQ^~qJw!#%i=;pT%T0ZiW=!8Ao6MSnfqz=417%aortWb8oesL{u$X5H+0>ET0jMwC6>K%a?2|OS3l@R%Q0jL2WJ;Z*wSgb<+^oH<)soz4cQm;V z4+^jGO@<}`H=RYLT9ZVzu)6w>Eu3)cD+bh?!s|JV2!Zc*V0LyhhfplE5$!o!F-N* zEgEH@RjFlT@X1L1bp5jGP(qFnW(?4G@?zP>!PkIwMtv=XAsSxH@t`B`c-T$x{wbP>13L$DxUe1fCB zn^zmV+>NSaa8BGEJ}Y=ut5;%~vaBKR*6x$KpW~S2${QRP&!}}qqLdA$G`*$N_?b`1 z<1an33%DC@jK3rKAIO!dT~StWbwdkPV*IJE)Un5p3%lRdTBGUVG-5jWsqa>zzuHN1 zSIbowv(L80ueogqXm6y{?-8OD7)y8y6w(C`sgqJ)g|mi!(_%3A(hC*eNyr*f?MWST zuqmubo+AgD#alh%tCX=8sy>FdA}kzrBulgHz5J(_BSW-;UL|!KwC4BW+F5hsxXlRa zUkKH^zxV{?_W&gNGeIF$(&;sD_T*QT_yx*!wE74+I$G(fxhL`4uV-Fo2(tGK^6Tpz z#Co36Cp8#NReRetD z^~5}hoh4VkmjA_YlRW2tDKYnfdCrzU!OwFD-VKiz!hU^m4sd=Pea!tjYvCVT$g8fb zJ~bGw>0DUyhK+wI@5=Y`uEsYcpe;91&Rz&EOao56yyu*$BfWuEn5U0lHJ*Iyig%y>zg*4MOnIC9 zW5J{sy6Uz7HS%)lkEI zaE2Agw6B0Ai#bdyZF*}9uuYyk(QS!ssv5!;lU(xRWLIeH<9PUR* zvyVeT?~WF%Mbi7>_xCZI@$b;~v&g^05B&xb-PNjxQ>ORVN%JXkU4@nWeZHo-2d$YM zF&7{DzvBKp*l{-aejs@DHxcq{C}cD(m*xSbaYSAAZQZt&jXor zDCa(UyqLNVq(|?@`@1`sZ~9+iKb?j~^DrULgwN9vJpk?FGD3b!Kkvr&+6Md%WUSUx z|2EqBAT2(SHN7{G<9B$Jf5Rv~L+Tr&%x{p+RedMY-jgEa0ow6ZTKNv7@E!E&6zbap zp79#J{XCR=2KtSyxdN=%pRu@~8twqT=R%$CIA0jCx8ch<4r=@<{|}Jv9CV>`f!yQB z^l#CVzoz_urtRmW58TWM&ZK?!gO_7T@BI4Ptv5}5cqDauDF+XP%A{A%fL#qWyED`o z6XQGixi?FGt7Y5DD7Aw_LqqQ|Me{26A1Ol&bvAT zrgVv~KctY?#SC4-Oyt0L#z_+ahbSD4GH2?Kqq{gSW(w(bXvTBTeu}ZD* z|1fqZ@Lx{p|M+P%W1W#CyCx)AlC&8SQ$)q2k|LR!iK3zfSz|11wxP74iAsf#ra@>d zWm2|OWDTJ(lx39k`#)dz-ACs8dwd`N-{bvwzu)(A&ULQ+y3V=JId_=>r4R8-dKG!s zWKVbSSj%B{O99~`p84$pUSOtf#i&Mlc0b^CGMchS`5U%yT~VT(`I0>22r`*S?xDTh z{ref*$P@f&zWqULuEe3@O!_|^jhoHGuLhz>P5fI?K+7n6C0e;B7{8^tSrr?8|B6<< z@Bflb5j$62alR*u%P4SqA$QTZ;Jm%>WibAfFT38oeMOdVsu|z-IrzDEwksJm&HT~9 zbT3bHGt;ILE$+@H%KcHYH*0UDl0!A~pJ)<~JOSVGT)hhaGS|Jm&5XYxdJ|1;;DymH)xI)U>j>Eh~Aw)x7>Gc>1yb%yhbz zZ@-NW|6OJ=i9ejdqy65|K77e3=FJ^SAFkrny5nD8o~1jzxrtx@lnV(3!QY-Q0_V!$cjPjmG>9)E)f zH?Pp@0~}RjvDu$&Yyq`8n7zc>AGAIP#-q@CATQTiiK?RX$7*Cp&m)V{AA(s&WX}$6 zjc{)%KIUz5W5v%gu<3x3Q~82#wDB(8n+>-EKt7YFZh*RW=pV1e3*}K`w0CiV$K7PG zid5Fib9=G*SxT1Cq67M#K-GM7edy`~jyfANsxQVKg->fm*4p^}7Vhof-fH;WB&tng zWpAms5k_s`_JqjMR-W=Bo1b4$a+)?f6-Tv2^!2D-uhzFDIs=Cj<2pJnBk<8ExRh*P z7Tr3sl6`RgQIdLI9B#{2$GLME4VZ#|C*aA=zTbivd?UFZMbP{&Q@dWFAzu|S{99h; zDkbaDhEL#gj2O4x?|QtuAD%yeyO*R`+B_o4>2k6J@Kkcm`RvC%whE)|J1Squnm7$(dI6!^8rvFpgWHi{N9g+t_1%=et)BK9ZBpl{Hm|z;Gg#!J^=S#T8Mn^ z!jmV-77no$CjIbSU5PJj>|e_!lTh-Iu`|?|T*RemSi?s<^rj zX&=h&4O{XVXeD}4sD zvFZ;geutuPKbm?DNjy%n9SiOFQSH?<>}K+->s~9kwNvsPeqk0)gg-}teH-psOIA==!;U{Pn zBRJ`+)%e-fiq>lAJ7%8z@4?Bnig`)xe^xdlFKxkX4yWu~^?VWgQ#$dq924CFC3Up^ zwm$CMO_4n#ka^GhDQvAd9v=%zTU^Y3FE^6R`M44&S*1FJChi22tS!puX2q!W?yNi` zZe8RP$yYM|(1Lv4cb4aY|AA9Ra?3G+WOQ}Vkf*B~@HN@cEzVvC;UdQwe@dC`oSdi3 z$#otguer)(hTjDCdot>B=Ql7q0(UYO<|g)#CpG2#uH=K|7)i3FKlzrUo#_+TkY8jZ zz7h=ylyYa!%y$aa3;m8~o3%u(qm;`CK&bjtA>m|T$&Qm(^kY{Wi+uM(T0FR*vMHU~ zhfg|=Piu&}e=7Hb?=86T8`*TfVpQ{E`qooBgS3#0ez4!n;lCQ69}^SHF_>^>S5WiL zj4lOl%DH@dlh)ZHbAP+w>@3?wQS}WJZR|7OzJaP_xZ}~ZD!A3V2~$zM-{z} zJ=^kmocHGEa&B1_D(qOE-gvzSMC{mjO#Lkcxq$rFd2+#9BDB$KB{8Cfec z(~8m0-21;jo5#{}?-g(Z?|;^bWHZW^Cy?b>aoQTDbn?mn{gWqif%boY{(ELsd4JP$ z+2T3+&udgXEy~z+PyTEepVJKgm-C#{ z3`=}kgzE}QJnwIL$%(FRU7YPlzh>%Jzh0dEl~?Tu>jSkt-^`Q0@TP{Z#=lsH* z&Nk?ITm8Sbj)%I*>vu7LYfRzeEo^g438bjiiq!X|;zs zcha0h>47X}nSM)O)XYK4dpyT9e*SiPJqf>_<(I#uO*6qcg{RqGoH`QJM3B|EJj!>v zx?hVD^;zqi=>I$YUJBY?@OzQXP2|s;!>0%v#J;u#KL@~LI=bqb! z(K}s;NAp=hFHxeYa!c?!tA01|bW6ZLQ6xNAskgvxz~c4=qpAF6HRuCjJQJ=}#oOnU zIgVyMKC*7#BDTXxNDb7*!| zNBwE-bEMixy+yo1b8nVgsVckqgco@UhZm!HvfA^=EpILD zL3ihYKMwAV!8{b4Hn7w+E4m{xQGLr|?oG2^Z0O#NOuS;$p(5k#C_X`qS zjgzsj$8qHZoa#)U?o@Lfd0vYnccb%RlKdTwuTTC<@!(w}9apgDyV=GAc$K{i9;fGL zp!-RBdyLwBaQGC*E!pLVIJ1wsH?yIt^3geH?xR#&yn4>v<;vG1)yC+|E{z*O`di^6 zPNhxvIC=-wq2)FBo*|CBj>mn_bR+IOr}j6nZvcagWxPrX2jIe>0%kI<$zt0DBsodR zmtYa^wVIqyhug<=wQ14rI(VI-jm0qhnk-h5#FymS9Y1fv<9%@bO7I^LpE8!&6ueJm zQF*uEWK!tP+FRqy2z9&poK0`CFGR))wxXx;xOu0?;Nqx;-<`ByX;Z(y9IH5rPNjU- zJfvKnO8oqDtX!qyIZS!IBL6CLrL&c;7+KF4Nm~2`+pJ0AHMCY{MHwH<^T!fG{&S^b z{H?s2-+j<9%qLH@TNap=GetgfuSSu>lx4Gf&WWhWdzhXop#DY8ihF4{bY{2wiZ+r@ zCFe;b9=xe!Fn_3se=UnEiIlsGLz%Z$PkV`TdCPX5CJ!z6S_9|vPQ(4=85K_{(`t(% zqG!ZzcNX@iV$>x&J?+QF25TWJx$-`P9ob^ui&a_uJS)1C1)W`FUdgTAcRe}K8)Pw> zevhKh$Kge0wS}#<(**Z$!5)G-lm#$ z06F`?-Ov3;MtS~pvd=u$n;9_8(3Ypt-#Z%s;|sL1Lb<$Aa9ehlRrQ%!lN@PVW!I8h z=7?lGq=qXk;Fqj6bEUF->TP6No6S7T9-=L8!Xi@KnI2q=H*c$z96ER>`zq(SWpwWS z0#2@{Mn-_I7X3>Kb9M8l5}r5v*?GmC^aFM-uBTtJJqVd0_NV_*^5sy3+exWdl&MP zP08qce5%ie){0{p{rCva8uB|0WCis$dEhC$%T!)wwGwsRS*FGZJoS@BOj-xZda%0k z!5jJBd-&Z&yw62+W^b(|E4!QT>&9Ci>34`yXR(0C$!8CEOb7c)_+^akJtgOfZ%cW{ zZT9$dAc-@z! zOvKAJ+U@7=IFYEe9PDmf=)e+(%Si4nq}orbby-lXMa)Pb#i%;W-VY3k=SCox0+5~3eHfN z4@FZeuzpl~3430QM~g}0xnc}$6{s1{OaJOy)Lx1^@1bW3jmgu+i+G<&eBFyMNG>@; zB)XD)^rqP@U~?6|+~K?@PFw`;ojA9El_$kJPz^B3s}qWQTl>wGn&L;rOu`9t zDm%`X<$_o- za-?RsSlOM%?6fJ{+JYCU+-j?z`shdS|Cxi8JhI|b#?fDB^ZiZNk`c%A3R&y!rThh#nBUUmf8hL6sA*Tdv67q~mv@dfT4Ab;7C-L+8qbL|Yq z{}qLfWpt%$F%I>FmNKeX+xGxklo7Crq_-uDIhOo_Ph}opoLG1qT^dH;GqdhE&@RG* zNI4_Hk!4FzG9r{ZdD2+QJjdtyCVx!+luyQik{SJ>INK3^R~LBYUZ|_cWx~7n!A%BM zp}!eBENvv~x+eJ4giE<*tzy31X1xl;Qn$*M|M%agHepjyq8zJCUKC9zXRlV``PPaV z?O+(&o+ZN=T=;@9JmNHdCHq!qP3lNouSf15@gmu;<67sRIzEE8pQgXGlCE6B^VK5z zlwZQ559A?N;B*)N8_0b<|1yxbS&8BqeAJJ;#4)_a5}2-`*;Q%MBBd|oj~*8hA8_1? z{R|=f8Di8*mb=(zz8v8uxPGhtF8tJvJaqE2uX)WbeB*<(HhV(O6DePkU+oF{4UT$& z`Ux!0(Q*%(k+Gh{-B0DgH%G)T++ zVbvNI=dy#$y_kw8Gx@?rv}z-aKG4FK;^2B1A1q#VgvH6~bmnQF25oPYJ_f&k&esN4Pc^RP18kk9~{ zb*0~rwYk6Y850^OAKicldFoQ11vS984RX_D9#3n(Pr>&G+#gQ|*VEh6P?volk}E%^ z*56T_ymSq1Yl+tOIC*}NZ+zkC09?rWitChnm~Ic!Minso<5XKMj#K6{p1P}1p>5n5 zq{T$VmyA;Fi(3uIayK@=jr#Rjz$_7a3~jii$kwuN{Rq^(1I{z#(h{D#qH9-ib`e~T zhUbm$J_g%Q@w7L-E$21r;&3c8)Mb&tF3s5eQ4t&T>+?PFYB@9g`zoG1&o*~c_5?7x(9Jb`TT>jLR8W39P5uU~n(%wl^^ATGAiHJw z`G1OtezWU`v7M&4v<~zeSbR0`s=D$$Tkip*(e4}%?z4356P{&+xOo^`ikHqh`w7K= z4Zn%YS&NoC$trf#-moHHDxWCjdOn%CQqBdcxRU>j9PLMMn-yh_En>lbjuVM?EUs15 zEuZ@3$zMj3D&B~ZCsTWi6hq)p8z$Mk;(gR)56g-itbAL1#(!$d>dU!XHNmb}ZC2v5 z9Z!F{K3XDVYw)sf%Ut@L^^=*yo@ZIvv$A5o@-FNv6lMhJNB7IQX$_0~AR`m66m?42 z$;C2HC{NdF_#WZ6F3gWsCLrhLsC$;SlMN=aUsFWhAu!Kbo*Ir-KRYAr zt#&Z`o7`bv|21jSKh;dBw45hL$xD&}KCAvV>?w1c+tJccw;laC7IhEHQ<4*ehP`Rr z1le*OKIKu@n({+);nk4#W!_-mR_9$Sa1WfU0$t$u5!RQn=B?RQc(SLX%C!AZ5PwN> zzo2EQpB-qEA(hWacP#L1R%lNp+WnR5duS;mG4E?@`KBzOvU{agWQ?aeubMtvxyw&x z6t8sr7+dV3hYsNFhe<{sK5?xp2+2~W zshd%idTL~B<@cmCMaFTr*mN8}m1on5(~t8f1O1MokAD;oGV1f4^1JX%dy@Z?B0SfvqTnSk zK10>Fcs-xb?*aCEIGvH9+u=P`jSpDJ$*^dluQ5wZdr&mK+3~IXV{0YvDI(#~eBaw( zE-J=gF2?mdzgol|GdA+7w#R@y4wmPDx*V6fX=j+S)L!hhSm#lVFu}qa^+?o^eZtjBMo1{ z@p$-;fcHLhcp9m1PZm4l)^d_K8u!zaZ>Y^BxKR_lzq6g2#NDnky*%N|8t{vh%AUz1 z*lj4ACHntEbiGo|Q9RCFAk`CnA4kc4GL}R5p50*lchujm-dfmIbFVq?IZye~=$Kr@ z{k!y&uA|SBQ5W7;<6&OJpXGEQ^2yWWRxDsX4&FqrZDF1#bQu@B9tY~;-ZYxCKYBCv zI9)b#f?RzX2{)nZJyG^YPXMRl&_A?06P9_>^*d!}>pzaAGb`y=o?sqHe;ea@8~i>D z-W}naK0uxq)l%bgx^opQ4w4hpqK9WH(U^StqkTuT&c>G^xN|cI?a(|OUJLN7HSIYB zu1#6$S1?P4@rsB(Uu?Yvq#wolo;+`!kiJLvrolDRz6V#gLUT>ur})X{;NHWw4#b1M zka>HyncT9Tdb9bCZglmpGSgFG(I2Jn$^oKrr@;9Fa^E1!nhf9ZbfPo;WmB$>(1GSZ(wN&Vu=>t?Ptoku}T$P;ruz}sSgrbOX%I~w=jbrpy+y6J9AAkSonX8>SscfvCXw%lJYVv` zJh?uMWXorqnM*$vuQIdh#X|BUX+#IQa6HW#C|8()7muso#I*s+je*rf`gtGxE+m1r za9+X(o>b&=e}(S|9Jp5t(e0@?7-`I*3xCFsI*z{cJ4~j13A|@1-;n-g?Lp?k)yMB1 zEcOI(tt+W!F2uRuJVwqvVYrg-_y7exK%Rxh55OKt%ITx5_kRaXji+plugP1V^m_!( zY<||bR$JfV^eVBUhlrcmkQ?dnVltfOcLm6cd7cdgJ!|;ZkI{SrDVJ+V_QjoejpxP0 zZ^3$+Hod}2EG#rXocx+ZXQ6u$e8!T&_b6V$OWt3^)xl^_OwO#5?9f%NkIpm8y#I4s zys3zkd9Ih^%z7?Yzx~gZ_GJdQ6`qv4uul-PQ=^Rafi$>?{bd#~@n^X}uFT#Pm%GBN znX<3=Zp$xc2gDrJbUkmr%TvxkO|DR4lzHb_|8*A)ZYD<>gp$yeIq4bwn&~VfV`ZKE zRwIGii}MoF(PF_zO69Cvx7xTkPHvg!TG>k?YecqSRWJChnB9M__L3omgWHGzp>Y_S z+=+dh&Bt5-=6LvK1$&;&X59Q57V-zS{0&((Q@+ef%eIpvXZ$T=VtM0ODEM6N5h{A1 zupTcNIx-%)75{qyA0JwO#k#wwH;bb`{So@zAG;RsbBw7;;lh>uq?5UL2Gbb)IZ|`2l&9V|8yn9R^KC4ha z<`c6*Fq!>QIcWBVE#Hz+2b7EnCpX*9CwoV?$D{3dneb|X7Pf+QM#t_2X|Q%O#=I3u zoB3TYD&`&9=?AWX4;-sno0-|G#`noeiYqv}t!jQ7`N*eur$w&hNz6!5YJu2ySdsH4FX|^UoXNNK zR65!3r!dTWiZ7;Ljrgs;MdZ0%i3fPkvF?80I|z0Q$o_A<+7j5b(o$9T`-llvF^K5( zdBW*v>BefNuD>duM#1Sb~H!~uc_Td zGtulj%U7JH;C{z?BgwNFH*B_8OsXKHX1_(@VL_SoHw1d|5_!k+Q8s^6y#A z!E|7rmUlzta@e-Ti(Sd)I@i|Fqv3FTf_K=+FZGB2Xsx`@+#>RZ6wNiK%q7n&$DVU-0Vt+j*@e>;}J&CohNW%XSSM=&0X-Mvo>3( z@gKBzz^xv z+ej-b)VLNdOW`sEhYrE9hxxZrq_LChFUW}Nfs?1eFBMw8hGo`*-PP`gn{`QM37&q) zZ!Hu9lIvUu(mn3qhOX@GlJ%)4f;vd4>@52P&YuP2_tcrsr!Eo^yOQG8ywpj&MmyMK zbbS=-$rIlD`JIMzWr0uLKGg}d!D{@~_X(D@2z9-1;S1R29hW1E^V8{VMkGfoJ*}|8 zuUx-}l|Dlv^~opRwVsiXJkhv|gzKR#BcEA``fC(tME(tUzfQ*|ljdi5crh6@#*4$? zl-YETxYHEXb>aL3hz*oY)-{O?_M%@8vZhh2=urV9PXjD#7`cl2? zspyzQE8Dnsju`OiCVcXI;`KtS{_TGijPjJYDL!0{uXlqxiH>B|SM;+7-5R6CGu^L( z%S&;!8JrKJO}`?&>?V};#~G*D;it&FIl7i3d*xmpo3EBf<>=EjKSl1rMf@-CWelm@ z*QJc|nZ=ZG!*YG=FG&7%+S$IiQr6gpZf%7#x66yV;lhLhr=O2lW&gMxmG}!ATf{r& zomd&W%dTQqlVC4i=ppT-t-RAQ`v&EyT-MWG!=7F#TFdODOB`i1v;wOdj?+?dw0wS{ z)&#XPPTU@>u_su+(!zJj zCTq=}1lg-HPmde>Pi|Yg$ddP?zw!tz@>h_mVC;7(@k90>Y4 z+Br}gE9m1`5SzmLQS~xksiAgO7Z?pfX*Zht5>K3wqhquYT!)KU`_R#Du+9wNyi@iE zwzHTAdleS7w7H4}o+1h?7cDbJoF}b)c+=zzzs19iG;$nGS?H4$165?FS7|FV{n~*( zLW!*B$~w~?+D;5gM)5USb}X{Dk3l}qsLkKq&y)Mb>^iabSzOwMl>39b53TIWGhGb7 zHsWbk{&WXB?}DAJ{(xd^q6t5G4jMDhXN>mxi^O}uI2pi`&Oa9|lJB0vH}}MiJKbx* z4-MoE`;+BRRAsjR;eOx3(<(3+?P!W?Sw)_wduNelD;T!H#oclF8+wsZ$(_ZJ@s4&8 z|3;B>2Xx%z`e_BXZsem{h(~MX1TW%FW)!Y*meI?F`1KZu{hVb4(MaJ&%=4u#bR z+Bp|S-xfNXeCjjM&eO)>G@y&9e-}Yh0pU}uhT{#Tw=UG@I+Ikf1 zwkst29LhuKS1c>5V^@;UXcRON+dk29#sU|CJQIyKyL&n5&4cY6-k}o>ZV9i?!0rH- zLC$8={^#%~y1r9k8U1BVcc63%ZOGoyuhWoI=wnt}yeUR^)7p!8I*0U5gYk5Cvhw9~ z$I0lk+GPxmEv6+S=v79F8q&e6!)`?$Ey$~%I(gF36L!(oU2wcVkJgI4Tc@*m$&j){|enV|$wa7bKjvqgjzW-V29`x)8Rx=Ban!>#UKD5H$ zMfBnj`kr3d8z|_4wzu)OGaGL}+iJo#vsSvI`*OB-4=YHPpON$J3tqm=N?#z8Y6aB0 z;N7Fp*Jp6Mi>nKbbuGu^%*m`PLJid3VJvGe+9$v{qFYry?oGZKdCO^y?;1}RH zw>ZnSWN5Y6-3jo?h)S6`?rv20*Qm=Zz|5k|T#e+iSF^fDx z4OMfM9Oxz42f1xsaJI?t-%GB|no#vzD&ANRX$rwb*taR$rH2SSs=*U%X2` zm=zCs!t!tat!Vuy3>;%c#wXcMvKqX%Aqvo zNinDqm@UM+jO|{j?FPJe75Pd%ekb!57b|f)Ob#$Ik|#PD6YkGOvKDt-;io%-@SQuE zZI-_I7hny;@jUGtE_P)MGgQG8*@Z23MDrxRD65(Ctgsdc zQykr?PnJ1YnZ=*ElEL$2o@A(8d^QhpEn z%skuyR>@VfLq#3mjF`?;dp>BtD95qM8CWd+M@k!T5i_Qshzd6^mT_z0%Ak}Gnp8+{l6??L>_<4R^u z^c96ix(Eets*x2XUBOSU{6Egd`hOgh&+v5x{wE7dc6Sc0O((fOk$X>iaIvV8z0rn{ z)vn6-DRlpH{CtybPe)}-(8G`M@TiK4F=9=g(Rag>w^3O|nf}^+n4~{I?Lt2L43Jyv zN3LQmJ#Z%T8N%_b7MntfHbfY<|ejnAD8+0^@?uFhi zMdo}zer4aH0sMUrelSnLzQvjEVA!SL+Ip>@skNbcN|`&eh@>-bD`V%U;>T(H_G=`y z8|}zkh(##9guY}w-j(>99eaAJ`6$h6%70|Hhpy;&(Q$T+>VmtO$J26UDnODtVtR!l5K{TXR?*#1Y48dVD;Pol-ukl z!)Q`K%u3rlCC}T;-&c1_l4>cZ+Ml$O_XLaN_P^kRPIo15l-X80<=AUE0(y)7@zzEA z$w0Too4HCR_udibOT3Q*r47v;%(H*w+QCK1tj^BbsEQHG+em9JNXPo!8YZ91GsDF; zS{TK1W`sJhQzPsB@`m@UNGWsVj6CN}se`qUOe$lT#i!eRsK(KH*=UP^e(-E@=jK;=cw_f^NM=^J%b_G2ojlb}k~-J|fE9u#fl7E@YR{{fzC56M5c- z{r>#qAw?FqsL<_rXct(I@adSu^yyk_XEN>Z51` z*cG_-;S0V1y#o)q5QP`1Ta!j*Zf5qB3ExKIMrN(9MCC+&eg*8_fWdOoe*mYy7uS=m z)+34Q)gM82Ran6r;N~fHW^cYGE4qnhOd|7f%1m{(8(d!^k+EpY9N$m0J5KCa1lO#n zZvda`S$gJzbk@?&q+1=ovyyBW2w6>D6>XzYnsrIP1Mdnp_zaC3kDlgolX6^lq4;!# z*s~1v-C)}kO;c&irD8@nzd+Wo0v=Dp^A%98(%Og4GD4f@1@mb0X|7c#_fhVAD6{SE zC^J8oYUw$?qoF+e6YWhz&1|@|Mq4e?tb{M^>27`AW2I~NfiaJt`qK5UlKH@bU{l2!SXUGOeD1-8+4Jw7wiUc*B^4$pSZ^6XH1VxBJVo=Y#lbN11Vw5$or{yb$I)7Zz0y zpAG}1x{SZKma^VE`QCbxT*l9o`*3IP^oMCuc2COOh70J*pK!UR9JB#je-!2~^C`!` z=3D$qA9by0u?t<8OoHRc@l3d6x9-fhZ!NQ{Np7`h;~3N~=dXSw(af{H1B}Tq`Ueb7 zq?gG{R=9EwdRmhAIGmVC)+^xi0bC!!?c~b$lUp=sjkCXiHc?xBU|f&4n#h*3jxKMW z>B>$!lXOjZZ-C)2G|ZuQ&nUAT4%zQ}1o~Q_wYQr6oXw;oAE2lQFLkz~XW+dYX4w_A zqj-C;^E~rBmd?eGTqlC=gx(Qo$n%&3NZ_w@^k^C9eXQhj_}2czI?^DlSyTULrRqyPQ?gza@>r0rdB?c zXz6(}i2LL*S+$*D}c zjEn$>!>5yS)~xj-#n6SWzl zB|6>!%#8MDE!O8eNBS~($ITBYpQ}X4p>JHh&T)1ABy-f#6R1NAo53*qH3rABx3UvE z-n?IT_M(qgW>mOg!K*E_l-Vuixbxo%?A|Z*uDnu0`tRTKH|5ov$DrG@*vbV}(Z=%K zXa8NN;;h`evEuCK|M#gG`m#DMS@;CU*-L7oqx)bu!RIdcf8l#8kI;>!9!SHQvfl^9 z=iOO%FaG8S6fJcp``jf0_f_I59wymKR^6RN;_XGgjx75skjL;Jd1{;|-iUJS%E(^>_>9w9AEmPI+K$R4XUuBgS?*lmdSC4(KORuz zFipgPRe0SS{LDvOqQrUp;*EUa9Tn)l$WP2T&deX|C@Ew9*+mlu=CD#=3HqWQ4v(VGw*M**@ zm-Df+%ryVPS@u-8kWb1k?Q0zUL%HZ+_RPs_jOa-xk~j&6Qg=B#?#A`Z-^nOwKQJ@; zc^4YrR6C=x=YXHN6piR!^8D-)mvPpycveqVxk{Pev6nm%NYO2B`RB2$^#_6wTw+KZYgN(|(>I+zPML;JQJX-@)z!{Qg+| z$iFq2ouvLey`z7Lsbfhoxp#JvIF9y>ps9UX$}0MFIsaEvY}yl*%|-OfNd9cN94S-D zp5iBxbZ1sH00xKJS-c{7b}}$x2shX*qn;k6Qr0ycsDg1-^G)PwmWM zdKZ_npJ#eeoyoqr-#?MlOuVUu#vIRc{YieN4s5ey(GKb~q49mlupe6Y#fSOiUtQau zp?4cHI#@sCAM9%(j=hEF4IO2?_k=>rR~S#YlW&=X?u-QNq?PPGohO8ckYwgA)Taw) z;#pRHW{25b+0>P|w1;~&Zaoy1FGh$T#C&n`d2a9?7c$*JEWC$<;Q0l1FF% z$;^(w#P2Ib-mo<}{{pP!AJhD%rHagS4|k3y@w0IzyEoP$_sMd&<9O?=@5=5$6O>Qh zk({plB-i@|3(6>8%b%{6b4rp?X6|w`mNG%TZD~Q;%x^N8tZN(MKQjgf`>jMj|5)U| zRh><7wGw{s2*Uv~j$=W}eyG*;JJ001w<5KSN}mCGV4UlF#HJiIJJDX_yd0Gc?8dO@ zv}sg4PxJ#bv$!j>u!m4kQ^c6y{)hOy4^6&;FL;Rm|CP^7Q6=y6Y039z|FKh*%NXT9 zwV!eISJcU@?x~wH>=SsM%zKy%w~9F|@BAdy_F~aso~JS^cwY}Mt0m@wlR3UKLCGxL zt-!0rE)IrMxSo;d%syTr?&J-+JCabc@%>Ph`OV3dx1%4^w0xsl+1X&eHj=}>P~cFB zA4wZqknpvSJu_lz;6dIpUBhoO%SwK0x}F`)(~l^h668t=C%?^pc3iG; zD|6)M5gLAJLA{!Z4(Mj z%r1w|@)BwDBCX{~^+(!E?>6sl$zHo~QO*MB*n|bWPs^1KKL-z=Mb{sA=Dh7ED<#TV#-|i8X7grwigqFSeuIV{ zj(#JOWqtqe_`Ydy`IxL<7mar2LB@$A$FS3faW_wi4-tJ^;ZQA>oL#VzXJ&SE{B3rC zsIAP6@Y+Mp(-~FSBexT%XNpNF`!wv6E1jdv-m--A+*`wgJms5NG96%*cdpFhA=i>` z_Bd$4GmKKJFR$1Wr?cMgIkepd&#ZPk9`4!q@Ozq*e$Zn4Zh-OuezP_$tMN1Hmv{U| z?z_YOLNdszs(ro0H=(x|Iy@J2uyW6szKBo^XClo0_<`fD}%Gb#+jZ zEiR&8yKAWq3|iCJ8EpPVZGKG`*5k?)l-(-6T*Yb=&%OZl9(tISREyzxE^3#HI5%pi zL4nOdD0q!NACdTG3|}eMnhOFp{v*9X!3)d;M|Agb|tsWt$vd{ z<|zL*N;6|Kqo&za>0MIDEYA)wKMd4i1z+~0XNR)>`7ALb$0K2oF}Gzr@d_EuG_vns z)Jd+{qi7{#;K`)A6c7@-Gsh)!ea;6zJ%r5mO@90>uh(Af^b2kRe+}OCE_8Va$XOHh z4qgAy^n0XH*e^*{V~B75E*a3k+Nle~K1Wm`0z%F6P#p+;z|im&IvD7#_yWjUGY zdpRxJ0UcxICV47wl5b}^bOvv;2OQF`Jq@(W-Ce1*?BV}+e9YYZSX?7ovlA}wsZ92F z&y1>D<-ystZY=%C$opqxxKS>(JuU>>Hsa81+I)z!3&n{0UB3vvedMp{J)VZrqqRSt zv<_xdSF?`nY;YxrkHX_DqaztB?SuX~&d$cOC)v_SkUtdfmKk>$==uiHdnc40?&?vn ze;3Xh;FR&aG3d=K-^F}RcE!!w`jL2C6U@xC$*5~qUJlo*yH<_*a6ADAcZA^yU^PT@ zo};W%wili!J9~H8gyS5TG*V(k;d>q-r>v*=5#;PH`ZQ>H|IzQ^ z(S|Lb?bz8>B=+o?KE7OOWt`~{7Gm`|2^{k zm^O7PVpDaxn(>~@;~s&sj7Mko$v?eR(4t4pVoYW|2;{)BW;?GcYBdW_Nb@^mj$@i6;E$f`gGru zSWJ5HLwS+cNpgR_J0nGzGo6(@H~YGSVqfpDlx9pI1Hz>#8$uTI% z?j<+5avE-&#*03oW<8j+!J(Ptvx~f;1rI)$<|b?Y$hZ9D%|7L<(TXd{)RT`L!6PP< z-h93GrhG4ByP3z4nwvkz7WraUxYfj^Jnw%)t7XogT(t&!zmqOaaeZi!6IM|wW1%=RkE^6gafsG-BEkKkw7dBQm9{ zm8h)j)olJ6Hj;VE?fr%``|>`IDA%XRq%+#OA8dpDmZbF$zlVur+5IB9adu)i$G7%?r!Qp0cg9@>}p

fyKzqUMEIm>)+FtXFx^Wfxd^67s(1j0i~^l`QY zJT)-0EU!@Kg5oTB^)Po1q;r{Do_I?j1A@vGsV zo>#>vc3CoTDxQD@ukvcevxk(AX6H`0*bKcD^|BM*|KDdz*~Fcsn@{rY?0=Dd;)U{= ztO6J=+x?1czr(>*pjCFSA8hj;i+#nXfw=i<5jFQ?`3+e1YIdE_d(Agx(NEEhHMqP?OL;oHtEiqmS_hKUCp=3v?JZ{O$Gdg{{dtS0I$7DNd_#6q z9LOKEhu014d<GJbX*-&Irj`5^2q z3V#5{FXZnl`J<_h7Zg+tDf(3zdLx4|O8Ja^&ko5VQBdovvrja7K{7AiY(h zSmH!G@#)|A@eT}Hz$c@TJK#*7W?!q-Ike$15vd7$s*vt)(f%NA_Q0=hc=WZF{wyEL z9GC2+^&Ea4gu_`|o_UOyqG=?0GShSzxc7k3S$xet<(X%`2%J^oTHf$-I}8#NvqR=T z)XtuqSvj=|_roytzNcazEmIFMX9bHg*oAbrZdVoi1RtD&=s zl6BCUSsQxXsvBu-_JqpH&FrW+1LW(`n|+8g zuQQmeEpnfS@Fg=-Uc#rmuku3ukAAfF2poHm4ks%=ob_cNlFLQqoA{pVLGH;OW+|WD zR5E6In;P?QD?3mQ(tZOpt@Xbr9qECV?eX$$oJe2kYTEI=QrTxLx!rp>JCF_KseE?7 z>Ht=rDm8_9w5$osZA}7Sf|MOp{zKC<2dS_Btg*`)fxJhmrtiLTq-1i>X{!yLeV;|= z8GH6Nn2B$#NhT{_Ut-bSjL;qg!`S1cC`nJUCOupT!mGI40e16cnxp+M)be9~)8}31 zzbg;EBfJ-(zIWkOz9@9zR@nZMG!}zb3lI8%_yPZ(`5GtS!mF^Fh%=ch5G=F%?|YlfBOfSfv_BW;p=b>b9!%=+Ya*!oZrJ#VJ(2#XFfnbf)qr=NuTtN2+@oduFEJjTJO2d-9zoUz|rz(nEWYJ*NlQ3H^=ferDg_$|F3g z+^0CUP!I22?cEDn^?PZ^UFuHNdM#Le zNwSGdZ^PqU<6Mq3GyI~)S9|xfwYJ(at21RSw5GN*?x+8r$GR zMXSl@UvcmEv*`I&;6hE5bPIkqpMuuIry`^!dU-6`Q=UTGY!?cnS{q#g?C>M;^;L0flb?I|w z_q)^8F2~llq~{s!53akrmOOc`TJQKa_8s8rp1f={&y5U>-+2yCBoIkYbZ1AT4^o;U6xsFdccAt4JDOt)VM-`vYzWGx= zpQy~^XP1wx@!1OR(@WU`ug~C}ZiZQ9HhQ}&NAs;U`0VVeeK6`@V{I#WitMbGsPLp# zf6vlir=QvH;V^zC8RaKztC5)UoU++%a;*I3WpcjI{Zqu1j0ER>)AjhSkvvP@mYlV5 zSBct-#ni*elXUp+%C$bPCl(R!UcFgt2oss1PIVja2vQEU6s>Uwl6yO(7o zBfBWg=TE1}9*&mn1kYoN7V=i9=hfYfms@FS=FBTg+sti6iI%VwPnji z^y+X3wjH$)n({`8JYT(=HfB%Kf50$1ARWruvd=+s>+|^FE-dC#ve+G0X1co4Z^fJgvyHOAp-X3=KcH9JD}(cWs@+K5lz zfRQIAc_YK;MU2eo_KWheRWQu_`HY1(l{ehuH+v#n?r0P)UC2+jVYf?ZVaAXelU~NQ z^Ip;HL|#plNk1cVVGiN7va4Pdd^n4L$r}ao-tzOoPS$l80JoqokWSWpdR+vs=nOQ54{WsF5&04r8 zNGf|9zf4nWqGu8>mff`%(XNTEG={_RMfTEzra#OVo`Or&*+RzgM&nj%TKG#kG@TDx zq2_z;btb#)aWkIu&mxVFP;eaRYt`NkH~y&oTgi2qvoW&X>{*t6&j=8<0-D!U} z9x`+EXDhXvtYKJTPp^^hKIj0{b^hF4D5!lM=A3KKbsX0;c4EJk=aut>2%gDGzM*f zYY)R?HA;rMlU0W=;_~z4l)YyriV&IIoiX@H?oOt~zrwA&6R#dRll^w!xif=nG)`u} ziM&HEZ~ME`y(LS9nz%7?sq_v1Fg8na?WmXMAc&11?}m>Dgv<|6%x!F79S`l>u-) zhJFv#U-%^1U0L1uAgLY3 zO0)M|xx@3tXvll!Zu6VB>BaZ|i@c5_@2&mrOzV5$L+C$R-8?n!CNDgYU(XtsL&?0k z_^=D<41v=Ock0U#G9#uw-MbN2I?{w)Xy91fdWP2KeJgcgG+$=6JMZ^_vxx;K_Lg=3 zl1=2zFpFt;)>NL3m)WhOovb12$n!3hHu#wx61(Gl`80GiT{sI)*MixN&&qqOGdnE1 zvwuY*%i!Ng1UiWq-asOGx6uTc$1l$UH=eW#Zk-RKp{%bK&FqNsIhvm5?u zQaVRuxJln;1fYKqqtWB%C#GP4fuV|Rm#@>%`ZjLxt|~wIirV*Sx0e@e!5a+rNuTH$)b2y42g0PXd?|al zOoei4Xy22hblXI@g? zfqS-cS=X6JP|G5 zqJEPYkzH=GhyFrxy&VsFi#`{s@hu8_^ZChU*0_Ec?=_qsTfpnC<0ms~Cp!V}%{Pu^ zElqGHZ+zOE+zL2vefbynw9R&NN7WpB9yL77I@O-Oty=$UQ%wc0n}X;wexsrD4uEQ7;U zFwZ>o)7e$trQFK>A+XHsp3Fdg6~y$lGe^A{ZLg-zAfA6G?RNlqNs*HtQONLF^e!%X zM{lEX8LY)W97Ik~}a>C14K{6~AM-EFT<_IvqU)T~45+q2>Qz`R6nF#8hr!<)Fvt)~p!~SV>WEaKEwZB}%SOX%vc_Afj{3%WN3wpkha15JDb^cR$B4VT~1nl)t9m%Q`d!MgIXgYjhp z51)P2PZaYr8)$d*uT?JZWv+&UhbWs2H*?aGgS94ub5WiZ6dnAhA5;~D>|&9a5HlA=1jm}6+_S9@cOR@vjAHmA=+pHXXmGpbVE~^MGfzN+v!f~Q?b_K{v?L(AX z2Y&Y2pGuk;H_vK=tTKrm^u)ohdAx;qk&&s{q;fo8+e@ie*1l05a>f`zKB=%v!%Y#{8 zJ2+-d%XdXDqYX`&iVN9wbc2?50bvh%{8u{ij_V_7#I>N+q9d)vjO^RCn`^yL(TQiw z^UVb`p$%DNbyZb*^DOwSw3E4VnY|br&vWwN^%Xlj09HNFQIo&SE}-9oml1!mvf#UD&ScrcR1h;h^)%WW z(}=u*`*^&cLu!?AYZlwi3ay3k-iem&2D7(ey)6xl-|iy2yMxA@Q&2O66tl~BEA(Cu z_Qxz|Up2#_@k;!m(Dbue-em1R1+&xXZ)etb3OT+`-v^?9TA@oB4Or#4X8~uTGM#Ae zSiIjBl$%{$&r`jQqnXL}ZxJ?QlNrOw^Q4!^`8M_AE$_g~?B%!`v{j^+{`e)h-4E^; ztNSJT_7ayLKwoBlwLtTiB%1wn?tx21{+0h^lNrhV?@#uBNp@9Uo$%9f)_9gZlH9Mc z>HY;cJV5F6BlhBl${F#Qvy=MS!=*Ais8}ViCFwPDJvnIF$l8Ig98IMSbBpXWwJK(7 zw{(ASasDfL?%d+2T=7}1U95@1jDe@dWxkb2u3`kchVv#gda0}P>D^ra6KKQ)*V7X^ z*8jW$dN3^4r{>i2NV{jdlXoRnakQnemGEOvHZs9k?v*o~vYzyG zx_K?Qt^Ho@{?2OcM~Z2CE>Ckc4|R>BJSnLTW@hAP&CzAb-^K=N6j^k#@jyM7_8-Cq z$2vZMERW-{ay`2M{~46V#o3OqewEHuk{>?_a=GVB%8eJxl9QhZ=WAedsk6-a9!uxm zU>~n&XClqJ!nL`wh4d1xU|Y4ErG7q{LDWj^YhXOy@wPO&ldIXEwH+?ES0+8n{b@rb zWwsE@FI4W4!saK_^7JM$+pFA%CH>93rTZDBuTiG9-~04K(pRf!GykWnQ}zIUe)YeP zOD?5Hcc1Tsf~yrJ_H?EEl&eL`<2-w~Ku%hNtp3G64Ceo`Q%w5RZ}2DU(DpvLCsW#) zuULx4+ln4RIB_MuWaaG}>Yb13ooU^^@Se<~>azX4_<~9NMRpF~z132Occj5TxR#tP`yOO3@_pI-YwW)^ zDNcfC`XYJS&;`sVNhpl4V9!F|ugUyyo+Yb|^5)}qJU~bAvxoE5H1KgyGP0K^<O%Nur{-CxZz}q9<;#avGqeyZPom3LvBA7a?sU0JKOQjq^`58Q;qK&ZK;<5FC(ypkQ_5WJDvrjn+f!lQ zizmNVnd|fq&g8}OUgP2DJ&e5fkfEI?ZjDhs@4gucyQ=uPT7;R84;_4tR5QEdWH!`+ zJWA%+#@A-Igv`GGz1A*4!DV{G$Gg%)iQzC`f|uP;{wx?vXhPmuQt9b}{B$LWpeXc=GW>M7FV%|yKPP{%IhTUQI zJX@KHkJ+7OklZ43o_2#r`u(XrR6BW&kX`pq(#{ZhbS<38I^}I}b$X!(N8$6ns2xp0 z9b{MM;%}bJ^jBsLEHg`O7XH;{x5ugdD0yvD$aFTSPoOa?ckfnetZ&9VpFrVDbU15c z&q7J?coDa=QuYUJ2q+LZDgVdSo=uh~) zga=KZF*C8=q}fA2d4uOV5;a%BJ1cv#LS>28hO+Hd=#aLe1gw#47; zkkte~(tF65Yfid~ z6|or1>Td|+hn`sQeUREv4ue5G3BXvC=~tmAwr%nxG$r^4$-b<$&f6h8gQ z<2*h!J1}It>Nr`(auUpX@$l_%EoOGu0bVD<=InR8{z@2~28%aQ*U)!8c|1uY;}Kuihs&=2*;6<( zH#2X4A`kT~*ze)NUkbe+0;`kxzsGSddlA1&6KZLD2X}Wv&mdf$E!zJb9O z;Z+51mMi%=Kh}x2c#OBn)28T0Z&>E3@R6Y2!DD52lToO%zD`7c2v#4{&;O6GH-Yo9 zY9IfH@QBD5OQe$Q4VorvXzHJI@dYpx~^j{HjC*oxa5fydw91q_>V!G zEeETQp~`Zed9rR47_6lqmO%oi(;phcZDSFB0}rE^w;~5?z`r){oP?eufpQ4iwjz>j zpEYW4KUkfEPbaxPip4huXyYki{^UgH?5XmP!r6yn_kZj@J?g`=yKC}44lVR=DC_LtEodJHRm|b&oWuM#*4Bfs zna92Up0~f_{+C$RLxZvz(evRdxz9u2qR>Cy%!(1pYUde`j@#Hh17}3i`>hGw65ou8HcB!HH=3n3*KdsJ+lmrV-EQp zQndixu^U@*7~KdUg8J{FCHF1-rUH*# zP5-!0k^%ja{`>#yzo&&3=IflZG`5UaVQpoh$N%|O9<=@n@XClrr~r*#MssDsGH?fC zCx6ZNkRQ6vM1CfLLp=1&3a>A4KQ}Y28sOaktu`E5%W2O_)7NG!TJL!PDVJ!U?4b_K|p6~qyZMg4z1D(_xeNqfP z6N^^Ni{`5j*-d&)d=#e+kAXB;8N6Fb(L}r#Ux+RI0F4GhB_{j&{W@~K;-Cm`oNP6D@IOTk4 z0Pp_*9^3i2R_|PG2jj8l%%A1@7MipSbgKlH{$j?N7yIQodbMM9S)R0~_vM3Ddgp83 zl>)bpNU8f~Z!)fa4$j_RMGfPT>3X#0X`^QKF#_EElO%fc*U+yrEia-(1EgCXw}N)& zs6C5*3bSJ80*V$;Gm04)!S@&gD(i_f#t?L`@kTyNl>eH*Ecy<_W82^z7Rm>@{ zk2^(x{T}W|!NbW&puRfpYuW!E0p|H|X(2k!Q>xB_jh-Tj(C1?y=>`l3DLaahM^DSj z!PsJMlex&%lSp<>plX1;jD))>j9l)X#qzcNOF%TA9{2{-=>t}cDbXC9-2w451Y<>w z?Vmx}LhvXC4!M*0675D)${1=-HPzNK4`K?nJ^&YTp)uUc>`pJ5h%Tr{?=KD|K4D~R z1nu?Z`3k!Agf4#o%@M|w!@zu;T1U`#)UxxC(XvQ|=X7QQZ+&6rQ`-#ek9+8$!;t!) zfvpVkvj?t)_PlYiccax4z&aaLb7!eEy}kyYLG=3T^tTmgkVGJU1j$iG^{z~y?O%cB zIF#>2t;Y8{lToubfNI;}@N%HjYv~2x{|dR$m+oG$GuC1iGUNWKwpan^;`&E!u%E)6 z&h%nqat2cG3N+XjG^}Ntpi^Rzt4GlXV|Zg1^41VdRGR6!ydKDC{63V? z+c2Cvw1@I90fn(e55P^c$F&VKs%LscA;)RppPk+_4OqVC?K8CYCq3(Kp6HXg3Vm^$ zw%-6Fb78qtHw3wkpr(oq_m^&4JFJsW}O0woi` zuOxC{{<{YBnPTvyInv;-ri?Vv@M-|P)AJQB(7O&ol`pXx#-j_CA}xRMEGP1=PB$8I z4HQ}(^pOX6wjJ)zLH8Jmm7CvD!1E9=dV;lc6SH;Ar?!#o zhW^@v!_fkl11oVZ@F&B`@yND5FIvC9-mz^Xpiv2=QOmSHGG$()m!b62j5S%|Z7!bZ zSM*{~YiQjY?Vw-NbnfRt51HjX-I~?@H163PIsZVH0Shr1iit%tZ2x?`_a^_7&~(v2 znaurJydl0_kT&-c)>B6(>Z%7Nv!M+qLiM_g-e$eDp74_%^Xr*U;fbf>ZallQm0`TA zjjS(}7P#+aqMy1aSDH~h7rZiRqY=N&auCM8)I<`Tu{qCi_vK;qNE}@BMA{;K`Hgp{E%8y&qa; zqO>!Y!hB~1PdT`D&!HzQK zY@k>4q`Y^-*^TGte1k4JgVbE0wt0M`7$bI}!@8q8j$vgMK@N|gKbs?o+ZcyV@Yh_* zzo51H@K%^vvN!O$Z{83++?qGcwSNK)`6cg8gl6XG9>v{L=$rA-BNDw7o@JPqdN%`w z@q}iB@ie5YK)Vep>ksTK>H*q1jCMXmjb?Z@C&kS`!+QS70ctX8(;fC{Xu9wDZbsWR zMyoDkEZ3%TL_0&78qmlS-?dzgFX+$ALMyldvk(2a&mhTqsFZ>Rjd`xm&~YTxy!B?J zsR10tkP}CUcFe1q(6(!|uIGDxs4=syZ5r#)kL!i>ojOplIMG6#e8J6rD$_J?X`rk%qnCvl~pz=VP{xhv@UGkQ`@qgP~Y< zbkhPT-HEdGDdDVc4yC&zU;6PEqW-*G#qj>`!Q5&Fecd@tOJH!P!gV`6(%ywW6QGNp zF70W(7ILP_fAah!E!JR+-3(8SLcTAs1(f*B)V&BEr9s~mXfU3- zT@&v|U7nhG3@SRC*$wTg09n4k23-TB6_A8JwEqFU%CmS*B2|^3o7rU7!2x~!%uQCA znZL7ylkheXsLUwZ8EUkn@0Ef&=ji80>EHD!RT3FljEox(ABm(^!Me5I+CO3fyK6Vp zSWMa17&k@$Sqb3yfH%#fqv!iZ`b~eZpHBVm-86)w&46VpckCfCwBLgE-3@RhL;nhQ zzhaTK!_ZE-FNZ9dgV?izZ!=4~1ZH!vMcvH^V-;etdz4Q7A=lcnDJ*Ya*kDweK}}sKn-7Ya2XQa`u{Dx42Kg~gZ7coUNGKQQM)~*9IX^I-zs?x=CYpEyQsLgB6zq*Wde!!Wlf;hC}hqvAwHnJ-rUbiIoH76r_JV2%Q@(DV2`GTkC2DD^tqkTb~$o5 z8mT{kOz(iQ-@(;4;rYlr{a0R>g#s0k{Z_o?iO@Hp+uxLkq@^h2!2O>sV7dtmvj=kK z`9g`nwH6+F9>AB3W6S8rKLJC1^x-E!n*}K=9&mOz(0zo2T%vE7d+t5xVt%*G)ND!T zdkui>8SWkqR`I^YifMsdI7gTdyt$Fw6<9=dp<+YmW?t@PaHTvwdo&|(bzonOuFgXZ zFH`#u!T8sjduFaNk5UyVmlG&F_wY9T|1g zfb#L^scrD+&p_Mj|Ld6@&eHV6+KlAS;=2Y4mV)DYEzF1FCy>iqj0XA=I`$8sr|sd| z{r25J+W`*Dp-$s0Pcn|}L=qmvmhDOXo*exy<%|-ljx-okWnP3m)PIZKaFYIi5?Z=P z>RHU{1@mB^rjMM5R<#+S8==eM`9Fi6kQnr$_PjfrI@&^|Av||C-htV@& zX9X==BK^}PxXQt}+>W~B@_oopK_sjLQXAU9xzM`(k+i3IT7*(spPqeTL`4QB~ws!Lu%%XG&Y%#MqjiayJjSL^96=%e4=!tu%I~sTgA=43DmE`U0P$?2T zt^><6K%N8mo}yfObchrz3buuTI0BvW92#1mAWMYjKvd!?V}PCeM9%ljJv$7GY9LQM z-AzBZ9LSh+6nWzgO+zqu@9jH&%?4H@)1OC5voIo=-Khc;P365&e5@lC&bNi)TK0N% z#ZksNuqB;`l;W3rVoKAez^wnE5>}Fu&N2!Ib(N$xsrCQ&asK2Un6lxV`)_KH?_p$+ z&uW70D5Sm-lqtadL)^OyY$wzc{bD zi#8sko)_SFGXHHi8&nLV6U-#C9G*Q3kM?3axmThcQUGeX13eJC$Grdj(IM}kd$cdA zpu-lT+jpQp528a>qd&sE6!#Er!hc807-+umAB5Bf@d{xjd*!1X0_6ys^#w>=x!4N35M2J)<*m8-CG!L}67YodWh zQ6%VN#CRVP(6fw_O$3MXU^ti&FpVB;cA6#NWEAg7YB!sRYld}b-AtR6kP>H|`Y2qb zjcJSnossmZ)T*y;J0Ni^>JKgSayOQ+4U{ksXgBmzO|X5CtCc)|2n_eqMq4=b3s<+X z?b<-QT4?{)j86j?zp9`Ew=lNsg$Bpb*L`WN416?y{xRrKhPFo`4W+@aDkH=;D6lz@ zsgIytB>K*JdNay;W|Lmvp4+t>3+qLwaudt+QQ&pQrWP={M`wJ#bBAh_-ojYWj-HbS zCUHO#PfOisy$qw3k^8Q|zlDStA5ex-)3asu#xw7tS!X_jV&{PBE#7QKz18?T6~5O8 zLU+aVW2}M9K8OvWTu(!W_He%!w3ch0xI7*z8|SaZqt~-}kz%2TdC$)AzGvXN4`yDe z;?z4DY0zWm6z#kUUG#6(qIK6xOUe_<5~%MC){G}cN!Y21&03HCsZ@A#iUbx*;PSEaVxY`2RY-7}LFS0n4Oru{rGI^5hQRozhRiafY5A=pLDt{X@^ZYcHkk-;M-kgWd+zz%0;H5YILU8GU3}2zu-_h(P;LNdr`kkRf8lzS- zpm~?k)Yb6r&`W+3fGD9+GQIry}I`Id7%vvj%MKZ(+w zRndWGPXOBnC|j5|J!4`C-w9}ocab>fR@1;D2YP-Vno+OmD^Sbxdq>mv3t$1>P0bmh z^loVCYTF2EYk?eWplJ3!8Ft6gS6mhG?5c1_ z&zuRbK~-b^%%tavhUXF{p~)9gyIB?VN!Hik63}I!w%tfXZLnEL575de2@c=$)+VTD zKGjBOx^XRBuN4n%qxB zrpiKK$Wz)z^i!?}R79Y4le|MyNrcz0Z2 zJN}!|vm|96fD6xIZ|vdPIm6q$RXBJ%0{Pc}Yz_C_1t^K$8OLwGkMj-vd6Ib+&WcJS zw^}% z{+9;l`Fe_mGUQ!j`D^kqg6KuYfI{>Qsqz|keq!wF!GBLo9uQErIl9x_pb>DqIV@?%XH$MeDKwX4x?9hK6Ha}?w*HH6iM_;HKKdndbXRhvSFaRJ^jel@0Oa&R%` zrv7S!UA8N?X=-R1hpeUZK0?&!GkR#9)1JMk5fWVW2*E0vv zjxK^-?fDEj&~QdRn1#0%qeE_>)Q-{D-E-!@L=(LP48|~cHkqeMrt;30aBdP>%QgPr z;YclN^BlD_@T(SE3>L;IAT&qJc6$4EU?>V)`{AMQxuPSyWIwM`8xt>JJ7wIFH=#55~H${g$&-m~<_2dA`GhnW-k(t)bO!X#I zh(vNmqAB#udKRg)%(Q?9W|S#T4UVSU;EN~a4u@W8Xop_FVy>QCz_5%_;xN>Cf&ZP6 z$yLzWor8Cw$URUj3fzA|9%sO9BbA+d>$Rf?Pam|HzdbWy3!hTptT(uMou*^`eMJ9? zrEmQJ-R_6`OQ}I$uHx8_=7`je{scMHZ~X(Ps{dLkaIjQuXgUz6M^j@ja5fKaWH7F$ zLIeFWjI(hL{RNPZhO=h;GTus`qjTJUixOqf*oUcYAQZZcgm!{X+LxXl)_{B351z^T zF831QdKI8-ht!#uVIlUAyD=T8&6v}?{2fF2Z?PSnwHa44pZ@X!clCbP`@)k576Q%J zjB_)feFbPVn)iwW`!wj}3Uza8>MgW9-ewmMpbm?a*91WMF_ zPy3)>d8oOO-{voVHkf%C>)(qb|bYoIt1@hrc~BQ?v=Q_(>k`o>&@ zmS=!!3b2^R{0+uswe}QR?*zw_krpH0JvYd6HGT!t^3)kiEAE`V2YvH#wV65!(r-6| z>-~(2kHRJM78IZ-?+9dhDx-P^dYI)^SE#AY2W5x-G?r4 zMZ-0IBRCq+_gla%`>5+XAJF&b!clWFdEUf2>X?hf=o@v45&jCg$1^?LCD0z!6ICBy zb*8h0N?6zpk*urq+}hxL6}XIqyg^TK9cwufZNA;^;5z{PuJNr-pZXVc{SZt%YulBH zE6AU5MfyfI2YN>pXS<$Y)QMlEfxy}QDM}liwT3&R7|GUB`)ANE323fUyKT-y0*j&% zj2sls>+t>z5Zt5(%JVp$yh1w-(MxrZ(J!z$GcaPlM!g4s!jp1-V?>!mPwxR7+Vz*H z_cg{ebNw1EQI(P_fFg{q*n#v-M7JHI)%o-WbKXvaYV*;RuRxuHaJoC1aT?>BacNcH zTxa-M2z+9Ym~-%KGv!?SX~C#toMjpC>PA1%KkEpZ_b=YMLZ6-kr8*(obD-9{0ail- ztNklVc<#zmaKuQ;3-s{8P%IV&nmcof4z-ff3=c~+z3Z4Sp3jX!soq}lC zFsE#~r4)grdhP18=X`k`7uZ!=SX!pu3-9|Z{A7ZH=EO9HJOjN&TP>_<0l)Rcx}Tn5 z*)Rv{!{Dr?{|&Mnft04#6TaiByVh$HLGt<{`$sNz6*jYe`&Q>xbw>#xW<%}qemE(5zW}~0X@L%snSGCb1Z-3(7F1J zS*8vD-$v&c2Vw-7mT@=_T7$-3&o3=Z*C678{e@U)^9OgL&{*riDib5)_dp$Hu+7dp z-=Yuoqgsu2DaN>9E-KHEPX-ES56{A7@qUMXDF)i$$-MD4e_eYofHb>D@;Cj&_A~IV zxytlTa(Ct~`c@|N$Isl+$7(h`A~Sr@qIXV_L|HNLTY-_M@*OF77KKEjf2HpMhC&d%#g_gb(T z7Ts)FDfBpW^vsg2-0@V2EZCPl821Zj1-o6WsRfJNx&_>VmzdqwtH{RE8$g_D) z(UaD4JqA7ZHo9>#mTEp)2+w@`lY3Yr)PIC$=7`ZkDoYvnROSGe`I5}?xd5$x0$Dvl zi;J)Tzel@iiS+_9eJ8ZbYr+wI*UO=?)5sw+21-H$AT<5PZtt0qDq|kbx$&?aHh^xgTS> z9;fH#21Czfo)EM=1c+j&)ibR=r8h2uZf587bPHEq-Bme*WpMzw-JdW=%?bE$5^VNU z_eRPe<30Up{)8TdpwTB#=pEkk#I=r$O6@71gltbm548uHJ&X*k=>D5$0@SXL2}Mb?!AhRQ!$q^~d;~m7gK_;upk^#> z>7l+IS!)iAyLi`=vnGQ34s^)7Ts34A)RQ=dC)1Jt&#CViG+%}7JsEj2)5~dKGgFUy zw;Q1QBqaNJ^xuG1T2;ZY3oz!a85P_2`&MI zt$@T8^G!gI4^F2cxBA|AX5=#DNtrZH!BRAieRVl+iOE`^GxMw~wb$T^yLUCwMxGL> zg&c$JkVM~KOP{KaP2{+I7*1`%5@<$k9e`{sHMx?w5V=T0!k5w)oP8{Sy4#V_=G6TI z?QN$wc~-i%eQ{*8BmHqDALE6UzJ~PJoJc}GsN4h1p=ZrWeih}p9{vrW*Aghw243X> zzD~5QMNxne!L!@V(|Lt9^P;hC2Y6gS`kHaS3>@;DiUi3r^;1cHIU^9qvL0eJN_X~ge(P};o_GZS=U9D5Hgt)#!0 zNz`2ZW-UEVtHv;#qhIQ6xdG~J1ba`a98IbENK<7v5qd=y02ZT=pQTTB0yZO#Zy_J; zfT;&mK1W-Nu`eD0BWGB)Yz(36Z*942+=gTBOt5q(qdq-l1T@c!6txD9g<$(DvJ(lc zf56>7{O$(MJ?USbxMF^V60}+r+_O=${UV!}j{JdsyM_j zJ~L)qqn|ntD1)@PqgI)o(-d0QKr)SGQ8x~T-sPchXX;6UV@H59iq_nrT!MtSvOa}6 zQ|oPGL@xoih9HeXa{>M5`AL89^awDyQfgk?ne^{>;gx=tI~aHOf=fYU`$^!dNsI2; zcZ1^F;L=1!@h{-KzH=17GvjXaa^8rt(3a{M7YTL_05BU7)@$2TDZ|MR0|r|tGcV4Y^Z z^E?wg{`k(5e=NIj7VtM%%!0<%plP_mp8@S`q{wsp9T(K#igiY;v9F-x1@44%mM0ib z%riS1y%ySrddq|#SLY7}c3t|N^#54`E7nJUsUdv!V2+^^6{w&>XXADJ#-43%%uP|5WUv2~gz;dR3XAPh6z^3(PmiP{VhO z+Oudun_mcf!L^V1uTNEWEGD6kKzdg4cQ@}?N^h*A$htOgVZMF}Qd+y-Tuf3v0?l?C zm|Q7;kp4J<9u%I?T>)!;7;rngGjd%|u4Kx+h9uXa^&8kC<~g2(qcel|M zom^bCL>ueD+7y1LQFlFHN~E8kq<*dOwe-wuz&8s0Z|>A1$erH)SHR*3b)H7gjt#!= zAWKcrFy>Eu2mPc!$w{}L5VgCrqV~KC2=vKnM17veJe#^lQ`Wrv7bw>bnzn^@ zm!VI0WMTzxcVu2uk1`$U{V%{}EvYh$5XaH9#$3B!?z*nNO0UqiS)D2|2K1($?|Dz3 zAT!r=glY%jR|?cJ^0g~(GzPMdkiq7(yBu7jDbHM$?`$9*Nz2+ldb@O{#(4UAYxt(; zh9i=B7t0{8odX;D8ZMiBol0F-Ik=%M%9SM|bO53h;>Y*?Yy)d4?hk)D+ zu&sb@K9V(+Iu=vHsPibwc802{v^oZyqoA=?MlohhZE4e!-7*1TQK-0+F=j!~t7}nX zTk0zUzxM`mSrr=J4IIT7;~$2;THV{=lRh4&;m&;eo6%uS_`8#*<}EOvm}k-$8QujO z@H&!WbVnp~o)q*O&yVSa_4FR{a1u#qhE8}C>O2O_FVV^-@c5c{?g!fCP`C!1cP8I~ zRu-XC^f>i2jJv@#GaS(;@gp=^Q);gO-^cR17Z7{mU;?s~04MWMJ{B0md66dr8W-~^ zke8w54M?B+7e+l7g z)3du#%N*pk75yXy9drXe6o-GI=csEfaqz_0?bFDmvfZU z;5QVxs0!C|Lz(xX%X7TxDZaOzG~1W&bN$^&N3= zM#=gD)4+sEqm#wLfjS#cn&__K48W2VdL;+XxdVCM(j#g;cj#++u!(ZCLg|MF1P+}z1FcLRxL;swxeEPmZ z`mYG&DYU)K)TD2F9P;L@Kzlz2kd@@Wr#Eb+u5!R)ow?BzUvWR257Y#%p_Cm)`&w#U zz*g&4Ujbv}l9A+OJ`2I>NA76<+Jc|#aC8*UvO>T0-2EFZp8*Z+Gh-8;;9DFXBvNxP zBqsts7+p}AC!PZLG&SBs>sf)#%(A;_>po~z8feO*TRe68b?W~byk_vZLOOiu|COnd^sYRN z01?4kuJlLHvu1JM9ihqG6$bZqoPm_3e$TXf5j^rSqRa7rqc3J6Z+a?u&SFvi|A_R+ z_qTyAJ3Y@c#-HZdReECzPjUmX@+NjO&?0Skdk7LShZbj|1I!jHxPw$Q-%0fUMnso@bUgDL2c(acd^0n%@v-W7U#DjH3XC-cf| zMoy1YDhX@9E3$2F+Cq$JeHnF5V(&x))8pt%Ps4Eis5jK#fpm|iZy!fSjF`?wA2K31 z3HW+5lFs1CGmO6R;M)|ezW|F)fu-?26xomNO9l7a*av#h>*sB}p;49l;Xrv>TFl7N zosmml;w#8=Z#Z>{{x=frJ)iFg{4ttBpO;(c`QGS+-e8#q#7WS|bvJFTBh-+HmRJGR zUC-89O{48I;P1S^9F66Hrz4OiGBz}3w3q~^ivUq$>X`!7%kt6hN>496VHbnpDIhi5 z)MfhQJ=Av!Su<8y&x5|)F?*oitvk@?o<{E32A+JZbvy(bD2aQxzn>9v54_xh=9z$O z)B*C%w7eKtwQZXqQ~C_J238MT8Za9)>u@X*xs=g=E*u?1t*MNJOOXq+_#3t73j8i8 zz6p96v2qK%%ntKzAcKd&XdnGxE`O&X8Olp@YM#WX7mc=A2?bK1OMh^j4vu=&x(o0= zZJU`iE2F-18#Ab<0^vxgprr4I+q>v9^^vikp~wzuy$(gp0^{z`NXFO+JbwrLCL`Zw zDRCva2Xzjj&U`>Sl~&Bqa|xU5OWwQA?-t1MHsm2c<9iHTszA-Bk%(<@z%$26!2Pk* zT@86iL=#M<+$3Ns#2DHMOd5bk5mrn*8~J(qy74=eXm>C*c|Kkmco@<02RNI(Dwg`p zThNStP!HX49~d=)8c9&p-e(Ta6<}HzO=29W>+$t~_c$CZhb|p~72tVizoHw>?Ys~k ztVOds2Teg%?xsHF)YG8%Kpj`~R>P|{^xqRaD-Km$llTb8KH@EPQC|AvBfvQci$?!% z&kuBl9E-H+)m@XfOVjp#=u?+EKZUbrz_JFk-hoE*Wb}<_idZl;(%~cCEfB!j4w$Y7 zeNK5T4IX;eoBg_IVB6FN+hNev6Y54lE$1OSps>E%8^EC#P%Z@9ew672rtbPUPdWq? zYiZ9l{P(%?WEIaQGoOMU9Ik^r4&0}~ryTIrgd2&Jc@sJLfHsH2-(rkzf6}INr;dzD zrI3{qNXmMieG5;Gt$rVgy$2aRgASYwOh!mwp_cHf6(##4CFaOq41cpwr*Y!%gGUPT zP!xI`qiy$fT0ygqkfI}Cs#W|MPg}#~Nyu6>?X3i}B=nT_`XNdhg}oA)b1&Bs<2@ug z0lM}Dt|FAY2(H@J=2R|$H0Gz&Jgo7x;OQ>OH{92veGWM|g%&ga&wI$Zu^8rwDh?HP zz_+$QXRPl|#z*rZZJ=g%ehx5JHiODFpl~$WbSmxDMTdB*h0(iXkxx%G^sMmvsp~H4 z92Jb4o+NAjX7!EnBy(x|Jmb<;`sW6E&@iaxX~^y{^`}Ht#^@SQPcKO`7Zrgo_LP&* zRF66%er^KAQJ#mjHUjfk(E``#GvCr{%EAAx@Xb>#+zoRd-JF4*m$(Z4?SQVo2b##J z|5H%VxaE~lXdE1?1jn|)i{13ur_pp1dA=2HWkQbj@Y%*UC)&(hM5Tdb3OaHmv^fs` z)4jf}M#X`Qxvk*fO!W74B*~SyZ_zZKIQ0PVn|WOS|8qcg8oBESeNNI#%(j?r8HJXv z_GtP?zjD|6${`6_W8wYK_SP1g%^k~!YvtatL^fn6EIA6R-FZM}q;x%YVHd{QWPV)* z&M-PxtJ{+ZlIag|+;dz7(2!Oj}#IYs7RGFOe16ss!NTGO}*(na*ry{$~m$b?Y(Q|XdfzrrMR%#7>d1h12^>sbhvQl3XW^OYA)V!WjsM7__vCK+#~3ZIOY(J2mOYq_hSvUXNTh2q z!-9Q?tY}uV9qk78YFuTY--NLWg%}}bfwL|B&EG<3RWnVE13PCl?qE#hZ~E!lnV{xo ztQoVVR%F!bjKmE_n>J?5?vEDz0K8_>8-Jk0S*+7#lzIV4GB(&1x_bPc0sVSHjb6-7 z^noo$KQ#_m|IsK$oP%hRc|be@tuY0CGnd}5{mf}& zu6F6S>{%192YAKsq&0P1qCTma3YYcH zc2BiEGCCXB+%3_sKzq9ztr=@kk6EhzMlXRy7}aY;o0(ud22iPDjQm2R zBY}E$^4$ERW08#3^bd1n+#6Uo=3gDl8+wgZhKIL+r!O4H1MDr}v*#9GMxvV_DXz|K zq#ww`cuKef(7Wjf@fR6X$ka+rc5U!sy@(}JE$>mucR(L;LZG)2e@6~4)bq_V{ZDW zUKUM&%bBIV{XfB{DDe229$}8s>+nopyy*cY*MQXyK8@geKkEL5*0hE_(clp98lUqy z_R|8Oj6vtdAu0Ciw}QE+nOYsmTOuK`=$k6&efOd~&Epx$*Qe}d`q6b-cm|#JG1A`w z$cMwPHOSv_xNEL7V>nhq$xooLJO0h!TW8vLZkfpVbBgwsBAbQaw>xy=<{7QdX3b5X z54|{zdW=$S13$I`S6ymaf&@w552&RF82$l7t-0%5-`q~-=hI@jPMg&jY0}`hIfgu! z_-^1gCgCG!vKUxaLoa={zJw-Po#JiWtEcvvopJ+QT0yNFcvnxB<@AS6==!o~imXUe z2RJpFPe%H!K3_$t!zlWZ%pzXk&6?D2MCcMqmIhM&>CDQ!9ej>Kt?Nk3^`O;1dA<+| znFVeh{ckN)pM>7q2j`r>UuTr6PEX9uINhAS^b%NQrS`Q*mL5L8Qu0%1ZN?-|WU5d7 zu5xr|EGvrS=K(`A-o5~CW{Qi2?=_IxuKaolNzMVqlq_cP^|kz@gP` z&K|YqDn_1XnMuA**++q@C{mb4{nL;Ez3#eD&N*xYAld=#CV=q^NbG#-yFpD__yvGw z8rQ{nw=^8MhOX0B-t#uTg<^@kZ=~;F_}#0Z20jv~I|+>d1Eq zAn!uID@r*}+IW^0%LG{GqBVWo%r@peR!1mjX6BJdLR+rY$dO2?=Qb7zKAu+6fHw5? z*UQi>WM=PkbUX=Xj{{LRATW!BI>a2ao-F(@Zy5_Qknw&3s};uVo8`P1{+tU+I7PXQx|} zuF!X*pM^f(C6Pf_vtNX#owp$F~+D`>uS)1?>pAC&D%6 zuPeH?ZBG3f;H+()H?S5nFfRHu#N9V@BEug7S7qvQ1^*@fTK-4wna^he5^p_uz$779 zi4h`w{+%;KP(EzS^Ti4vH7`gk^;D3bx)xBZ95`KUjYmS#bBZFa3~SI|q) z_wELRJZOfI!F$7?!XEm7v?+b3F3<1qGzFuTlhNkx99F=#_%5JmI+p`~E=Kfp82zvP z{eS<2yFAap!Mm^w++lW)!ZX!NBahD^GqbR!t;amQQl>um=b$f6fZq>7v7+?*H;~+D z_~rSBQr5O}2GSQo@I5%pp{^Iu2G!yEBJ}G)O6X7B4LhO+I$=Ki$&2Xwc^ynHgROqw zozT=;ohyNKH{(b(^s!dfQQnb4RgjkEKwN{~aT+O~$H?V+^tVP z^Du@iV+8j^i2d}<#>^7kXXyzYCqRW>(9mIN{{WUgST6^m*EwpRgY~@@Sa)*wFt8fOtEbijNT;Va zjDotJv!XHKvDvrzjW?U*~ZL|`+S;m}xZ&l0Kz zzIuEan-&ADi-BudB;H(5W;AaPmyPywPbd|tzedRt@OdLGcp9QndG0kS3+>^uUPQZ* znCa9R13ia8+lKtDL|-v4;Yvm|*PLD1^xUKRaHl18xLcJ%o!5ZIco=g{nKAY}BW5#X zZ3(oPMn9PXTp#gqb+R%P>JN0|piH5Ln%Bzxkf}iQ3Rgxf zHh~N7+Z3bE7_(j`=x_Sk8A)CrN{pfP8r0v2b}PWg8(?M@rjl@4?-(PqhtP-hr}mWk z!$`;sI5CQv*U>Ky0+(59pXBK*Aj*e+nFehl(HAeE?;1dPcSq*aZ=6dMV*D{)^;Jq6 z<=X*jjG)!7j53}g&;pp0r*d$26E%!PHmU>4Zd;g=b@sl-)>HZc4YKRWll|2A3tQL1~?)fPwKzt7xSs65~GY z-IVJNcIDv0)AYi(8Rg9SSsKZY<#Pi_BH>jsI>j@~E70N+czPBXn?Y$;!u2p%74Xm* zhx;svP<9_J?W2cer#EKu=GDErR^HgSZk4#Ay28!O+Y? zbD_^(N*a-To!LrdMvJ=0r`c7vQX&RAOaUVE%$u3wBG2m4yN$$t9q5dZdK@a8;qQK0 zFrUf0=!|pV;n{{OpvNVsdxTczQ|DDURUN(?PvGuOBz@)tv~d?}9er^ZCHm5n%~Fv7 zl}=Me606BuP(!cxd()Ejnj9&M3AP zEi{H2T_cF1NA5+}7;)!{QCZ4e2TsrKQQ|$vp&AnYE%;qS_b7FpkkuVvz81{irM*`e z<727Q8PO5?ayhPI_^l2o0u+Oh{l#dr_Q?BAe!U0R+|hZI(ay2lydZ7RQMY)12s+!0 zuT{Y<^jqo!9p=GT?P&K9hS3+pc}gp;PV@DoHnY%WyEA9eM%AkQf7_V)Y8R%TZ{>di zkY_~3w5;QpZ>;D3SIA=mcN!zn$^3Wi_$T#P5QtI=X z7G8L6h>>Iwe7k~)H9c}?Zr_~i@6e~x*xaXj7U|J>3!CFGbaF(!iJTlqpPXlvZ7{Zt zXNcG{s|MD9{?XdIm+A4IYp72x4c>i#Dl@uNVB>Dq@{ttQRRI` zOyi}`qL=!hPs;?*6oN{-z_uH2uK=rB)YymKp+CSJ-g}$&(xA;gxMQA4vt)b5*=XL^ zw!g-^=94swXj9mII{5G6*=WX!a*W4jwseM=%Ddf> zw=2}DPn8yQ5g_+WZ@pAMK-TBMaZhW`gPY(e-jE%VPoDzLY%%gu2YF6QiD~?l< z%mYB;Y^@>nw&uCC)XPTSBja30BBf>$X~*?BusH~3zk}mx@Z0mpSJ0~G z+vNejhoO=_Z)OMnkgq!@a;hoy7;VyX{+uY5hFcBZ1N!q#FWxHR5+5n(}dEsX4N3T;pnZ*9?4XVdwPY4OgzUUNuBq7x^!Sx1LNsPB<-g=8xenN_ji1~)TYR>2iw3Gtou4i3^ zUiFa+^Tz6rH4TmPB6M(XN7pwtKS~S|dIoQpFqwW!yMPeiH4uFp3fOtILuHfW(-Fl-n29j~e zl35hD(vL>N@9p4W_Tx#=$sA*@Fdc(;datUJ4@2wq*hR+2UE+VCfNJI+zd~0Q^c zWhMg425PMiww~E-ez`?R_(Wu5KCn7dHI~Jd$M)#L_rOybgwUL^JX!L>LP!hST4MmF6H@YKXi@3iRd@@0!vH`E=ac?H{ znv2dG0+r0r)`<17J;Bp=;Aj#&Nu^GAFZ-Yc(%`eFc-F-%HcV*pO1V3Tc)J({>`!5eOqOV6oXb&0* zVSI}-8KYG_t6(T?mc^R3grArB70z_t!hSWIb7k&igF74Vln5N_5aGf352zQu-IlW}IJ0*)Coxf6VE;NMExtWlS9LSOpz{c}t z#XN)c>D%^@GB(rF8V|?{YN|8cMAqp-w!REOG&+qz-6B{PrLg#|Kek|>k{H*5$h|*z zw~W0o$|2-HX1JRZ?dt5IHsk(gBsH1V%rH1QIaM-nBvXg8nvVnC zj0$SZ#ovi^oQ>oU$YFX^#k>OSf{Z3-##Vbt5m9sY=rf@Q#FHL=j`e# zUuk77v5+paxZ{pxR^Cg%MtA=F@|{+GgtFm$IfD0h(ziT`?IHB$4?tvYG*3sa$~fJC zo;ZPCFbFMS*0*w52Y%he@F3VRr;GN_Z87Ujx?U*@HV5s4o3Q>-1VG=DD9T*!Q&$Jd<%p-k?79U=E*!-^7jFuCq%e@y^z0O0JEO{ z(~uk^w9kW^yD@d3&o0Ka{#;+A7e%9`VtFzZ$)8S-DG$ZxA+O7!ogUOPfhZgGnw|9z zYSK&f30n8maaXplfah=2--ud`T%UyOYDG3d&;15%9nlNHh5M*c&TgUHbo%pNYPktk z?%i*K4`!-e#9epY+)e5YB<_}bUd9zDTa^!NDt3)n9N1J}Bo-%O_L zB{bInIIWe^13Hc6YIOjI+}%n2)vz!I^ZzM+{YBef)6PQP@m!%xaAq>Uj0<@Lc#Jn} z1}4M7hrr(5otGJnnnN2Sr++~gyZh7ydi@2>TTn)CqL+DQHr1TS`v>T+!APSK zL+6m?Q%K5|;GOMIx*Bj#X1sZnzw>!&cJbaob`3~MKxZ=yPeL*yq0>Nc%SY{NfN(dG zy&b5YhX&63dqJb_Q2rfSY6Inqe~g+r$CSS zNZUbb&5Q1;N3A!gV+rrA1`20&ThN;=kuUQg6=OzG2RKesr#S{L(NbTiqZRF`aZ$)p zW$KKDPRrqEW29v;C9VOzeoRVUD*C}#d_7Ap)3P}>KY?TJCk%uV46BhH<;n}OemGS|5?!uP{K z;%O3{(P#SX+-4-w8}1^IyEpYd)Nr5F^_D8|$hd%3ffW5pi9x~WGXV-_L9+9q^UND< z{H?n(o{3}hMNev8#rsX5?E_$EZl9;HDi3fc8oHmR_R~o3PhfEqy_XmHo(guW&=%LJ z*}OyM2Q!P?F7&{8p!$%SrvS4t&x?`o`rJ8A>q?b5iQfW8&r{Q1RV|d4mR(ztN9L@Y zLf?K18Vsh)XGq=}-oJ>xU4+bfMnVEK_f(`uq0vNYok{;#hb8w6y5T*pwGBNp&qx~M zEpuS67~O47X(39_`1e7_? zbG2SPwcg_YJ4lZbS&4q&X$(&R*GgLZ0cx&AB8;|n6)_hxJ>xxp;h8y9N+27x(S&oT zCx(x^xJ~KXVTSVt)Kiw$^*eh9T5Acl#IiD@tNSQ^6ZocoG9XED{0d{mKMSt5ATdT# zWas-c6fOd{|3yvR;Gv!f?oFHk8vSiNEhK__32-G`F?atVTFB_4pm- z)8{+kI=Z&0wY^E(ji7H1YKfyqZ9p&RxmKHJ3#sh}RMVROi4oQ_13aJpRpeaCm>=pE zs}tk+Rgj0N|9M$pa+#FJR~6*dY5@loux{Gr!yHj=C3-yvYs05qfB97@ic04ReXiC zS$}@?{dfE?2i_z27Nl%OdR%Tw>%Zpb5vZ*{pK}sV+YawKm+;KG1+?-{S-BiV&q(IK zvt4JlVfpaytpED;-x&nLr-lFXv=VSRyUYmOuTZ~p7$XJ4*b{T5xNGzWV@CyaKqgu# zNB?)V$z7c~@Gcu1%nk3>^Zf?>|8P*ZQtc-L-?NOqX~>Z=K&Q}VJsC;nU~R;sD+ghL z?uH_tV-aIF zlsOz|o>*j6`^g;UdO1g;!MmbwLd(}DX6G4<^k`#@L~cE6>^$}BDXx{P9kdNye2SiR zp3&nXQjrr4(Hyy-!5FQds@dSoDfBbrPj%W@&A7gfarHq;bOL9i*#^^3H*wEdOF!=C zrOsCLmfrC129W23mK6dh%+~G+b20okzx(l^p1J(aLw_63yX(+!UoehVhf1Yr-K^uA zkpGhW@5tz;ABHFCx^CYU$*zY?HKv|v)cYV@>H@_ZLdinVp%m~mq?frO_#AMq;<;yU z)#qKTs)Aec{+IjCame&4T_V^v;WfJVb39q1kdUFsIzSV1$`N2~R8BP8n^^ zRIqfFaRM6YTWk*3nSTprwDYLLEJl~1jCuYBqTRIdFC$%pY0cBeUGcBR_~B}}dnVJs z*mK{TqVY;Wg-OV`9sxhYm8)=kA8&1@Ts)Gy0_m;>cU|e8M!Tbd$g}$DP}3vOr8Q@U-Ktos72bWlcx-_6;alALu>P zeLTez{WB8I#{f|zaK$sWdjdu;YF8@mp>zjo)pDE! z)@FzIL+?9&YXHxlpuVEKlbzPqQ$qg5P>W}% z)CQ`}V7rl-%rxX=IeMuUVJ?iZUWr82Yu_VMFn_JmfvY; zy29XbftHRT^~NzdyVpmvG*CP5)q~?bxE)0eLl|$ifMXhv-;Mm40d08DBl}S@HH9D(@79lV3n>Z9enSyaTqg8CUgu zF{4H!q$kpE`xum^c^n_9R&^>5_wCf@!LT)U#RHqt+8LOFM)?+5Cm z=o8P~Tu&S3mD`F$wnHyHMeFtWI`8WPY`LkiCVI-~c=Ourfv>fYfyd~zSJB1m;L!ly z^h|5_jat+8Iwaftv@bEDr=Zy~AwA9Dpiy!CvGbM#+i$dN4IQcJ4`^k6#u4!HINIuc zuys~%-o@`}e={u%LZ_K0_BL1d@T4u*GoV04?i5GLoAS{L{RY

)Q?0^ljX0Ut!`c%Zu^_;Sd(0?dT??GSR;@8hW z(g(UF5-I_?VJ1^^zKy9=vl!=39_XE{? z)HD=L<2l#0C{Z2Ua#El9!M>oD1(a%pw0;HW-7Cq-|Ax>s8@$t((lzl+aO*r=)(TaR z7w0NJEg8YEke-(T%3Wt%(97vl*4W*_a0ldXq|@w7Mz=o%1n+QH52q49v7XT&1Drb- z=(UTqEVkpJRxV_<0M|94?!V~UCAe$sPhp^T53mN$w4SwxGjhl1k7#=GFgO&!C(Krw z1A29%ca?s`+g+h zaU{@8vEKoekr?y9!F`b|^m3t3;BN%|PZ+|FI~jg*&^xU|UplS%XShn{n;FXb+&PHz z6@3By>s-d!WBQJnWy4QMrSxC_{I`!NE9rNhqs|gQ=RTUA*E52ir4L&+WN~WHdNTAa z%p1wfnKGjN)QR$2E)EIk<=o5}rn4!j9gPk&$4w37%gl=Ev};JuD)8FI)lz7rudbu* z+uVN*+0{RCFWPMo{eC}ic>0vFk9`@(Joob#uFo)9cSEY&1+UES`Sgyuw78OIS z284OR((INoz+p_$4J@omyxp1crXyu9qL0m<(-zC}L&om2K>s$}n9nFNoW6Y${=4Sm z+WdZ?U4|5QfKsKY>vQTld&kx^V%B`6Kfu2O!0M`*QGP~iuA-jW;8q%~H3&Pm1(@yz zwzVe~_#FHr36dEi73eP5T%~~%IXAzzyT3zOp)c|+TG)dP{Ry@8hDI%T{y3c12PYa{Jw`t|3RT-s z`!hfti4NqjRyK(z(sF|Zj=yiJugzJD<3T`f-l=BzmS&n68W2ZXBp8w zlf#@s&jZJ9q}N%kXS$hlb}leFhv`Lc*dOpK5w4cvjoQ$#chI_L9}Iwp#+52DX7X)I zn`e=#d(n&W;4>D;nnHIoH`S-L*N~5SaL%ky&(We5#7*dJ&+7vG<_7Y_4f8W~rr)=x z)Gt7u7Yuqs2XoVP1k;MhNGy``2XyNU?yiCAkUH}5^_Ab0-F0CSPMQ>8INNH$#8rlzG z#661rC-ei+-ZhXWb7+~vOMj(H++7503Dj=nl`9GQjTwpH{a$dh5p}rd6y~VC0X7TZ zsB^wDz|#}jx*y|tsOIVSj7QgQ6QO)Z=E_%r$i2}N?zM$ZRiMM8(8TjX_2REbdjqlm ztMG<7Bh0Vm40be<_7*&w0hbo?IG)D;+W{nP!FfKE_O#D@U|}4_PiXhk!7Mxp9u;L& zZjEGW-)y6mSHX8MP`@1L$iYZ%b9h*uk=9f5J`Bc(mGI|HY?UVT$1?Q6NP1ph#-J@c zKMmE+0()+3Ff)7^ou%YmM0XVgZ!_}w>}tTf`}l9%`zt`L)m$F@x>84D%9#7@q~}8w1v<|FKZUHiwt5QMyC!e$vhc)mPZ=u07`}nB^#V&T6Vx+X z_BiItrvmP=Khs4g^`J3!AkBSIJ$w6x;VOOJXgl-J_dc>hl<6p?8`8wM?#l0 zB)SMRO9K=AyCV4}fWNtKW007#$e*5ko9Sy=d3yv-uG6E%y zwwj+i^8i-NQZea{fPb-3i>+ zQ~E#t7JJ4>lxRp&NsA^_D4|kHq{UWAh$59jmf>bZ3)#t5O-Y-wBqU3dvX!OMD2hZ8 z@>Q0h!Q=mYy+32-_xnG-zsLQ!@B8z4zt3{*=Q`(H=Q>%Z&&qvHrTMcg$zw|HVE?|%M zAO)Y3g2ptZHXXa2oyyO6lKG?dm*K}@YMjgpb|4A!!4a?gX#Dxc^S^*EaQ9(VKOyCP zyj#ErujzL`oQl8KhAe&svZ>lQ1V6fwm0=|IQdAmmPW>(@Qe#Ig_9OqnV^2M{^S>KS z&l+{|D&#KV++vn}klb@|I=QY1@|s%u-6KuxZuGV#?j$SMy?pV1fwh$qhZ<3GYiuPD zo~+!lxc4)vCTepXId4N3lL7aS?CM5VY=Jsg!)+pduhwre)7K`yYtichI&c`d{+?eR zjW`0m;@jsWM)HH*tmIE1Xzk}ST9`HR+%{K-bhg&kkD#syoB8m#(^%bJUmaNZ-1(I# z>3HSAcn!McG($zDa^u{oAkW^%Z(vx6kN1Eg^R(Go&7M*@a{h}a6;N!VdVO#(Z7tAF zc3g6=WL)1C)>%54<;7tkidRCY<)lH&IIEu(LL z*G4E*&i}IFe-$te_vtXdk+R^N(E8&3}6GR`I~jz)Ag8~BrWmcB`usr-gEQobR2ku<$8e?&IyRE zTF%=4=SHjE#)#aTx)IgSQZwg`>nQOK9nGx_Tgb_m;O>Dg^?6%Qu$S2fPyF)g0U7&ZvE-{w9DwHx1_mM()_nPEJ;_N~_!bz_-Sm7jZr@FNrLP z=bMbC8Aak**D<=}?&$20C+kP%e~A^oyYN-A=TVupNxraOjHfyA@wGCyf+)MvSs_~q zx7>i+z^4%?-_ZMe;c~KGzBEQJHB#pk<6BxemxW7=e7v@){LXufcSq{`W4Ps{!!7JX z;)ZheaT>S=<6O?mBqC=G-{fezlKH_2a7m7y#KraV_aU}qe^5Qj-%8wlW@m|AKOB{F z-t!7{=}xYffwH^S;)_2Ghs(hCDqA}m2gZS^Hwo#FLx-T(99lM();^6I7wPRt7|+1J zuh`?tusp%)>KSU~2K4!A?hA_Crj|R;rsH1D7-fxbrZ(cWR{{Ab{>91sutaLVr)+km z&SfVy`<4~PCVbufyv;60c07(oy{1NkW~i21RkD}15iHN(;!=2JcOp5Ea|hdBNd6CG z`CO$FDV;czQ(-U=hLN$YO0^@4-QgPFxQEsbQ0H|NPcGD_NamvdgRv2*+EnOE_8^kYE;-!d=XWQekAOUT9G$d1P)j|u6c7Gj(y_9b6Lj^dKm3!q zu&q&HB|kMgT6Osz_vvdAuKx)S4pt_)6cT4q8Gq-4bC{ND;d@rUbEbW@QjckEGzgaz z^Q}bECo5R)U7Uo=--0BOH2=^?_PU~LE4B8SGMkjxQs_z>+ z^gGCAqOLyl|3lc%RX;Iy&5f!1uruL!3nN}Xb~RB_7qdJAX=ftQ&gIV}2hva;Me+pH zHRc@z{su~aXKa{>rmN`QJ%t1$!{{;a8O_S9)cX+y-;@2UrMA9QFZVxhAd9u&8yoZ~ zu3m*|*tnH^c1U5Wl$vvL2dt#rHa}K7%Ka zy@;mtE<36-&>~rmhZ;Et!0iGw-%cOvL6DrVW4%eXjgQd!&tSe0rK*w8iR3POL0PxI zgKVFH@+Y&ge*^bj+FQgTjU&CcknKc+Cc9s0P5mu;m8`MdVV?Um@7Hf~StlRW6g|y_ zZ+!G?Vfie+4Ki}>rHwvdoe9I7u|FLQk^I-xO62PCM)XnooC@Dzbn+mQ^&4rp5`{81 zd5(NWyKcazgHY>IvXvVI){^HtwG;U`OF!ZBHGY!k_@5-W30jTO*8MP_?CEvv-$U@o zJ*Rtxl{cDp6WzB>yDu#rP0I&(p7WtEDDkT?c@>)cLgUW`@1-b^ zS=3ygFGRm{Xx43L@Mm1!Q?I-8;U@WeK04$Eq2tI;^zu;hSyRjZ(9`{BnVIPlv^-wD z(?OFt)Cz5$U+kOikB6Votei>TjfFo3E^GODPpa3Bl;)n+m&n*hFw0zQsBz|N)OrV` z&w%GfbjjJHt7%|ooUF};oJc}*ny9z;%h<^cQmRFk6(nT5@xEEw!hj_~+#D747_n@-L&@CcK#dv&s7C%ma=WsFJ)DVr`%SH$;K7Ga(C7+_sk>{>jy@RCb+XB8_~hD zGVSNA#~xtYj@%yS`Fawtqjx#AopW z@_&kwyMti_S*Zh(?t0w^412NxLrK&3q`4owa}#epvUwf~X5Q9c$-(}&RW9cpe#DCz zD0Hjdt}is^1a&8}I)h=}!VKtk96UukZT$aZ0oyWm`VZ{e>*^thhf5{={GA-n0$cyMY})8U+$Hm|d~+w6k6x$*gq* zY3qq1$+Wsa?d?HuGAa3*lw>dRhC&9au{p_bQGv{~$Bk9IiPGuvie$Q(GRZo8IL@v@ z%j~niex6?b`;)oP|NAF%l?(kQ!Y=d5)V~4jxi=-V(-wM9CXL)=_!-->01eYx zIJC_uwGLFv3y$aH&srQQE8hRN$9gP6V(`1LVp&ajh#!$tean42oGpJJUq4~#>KeE2 zN2wX;oVAyaS=GTj;8>XKI6Pa-_Om*28QNV7)~tvRVM9jHkACb&3;Gq`aTIG*hx}~8 z$H%?Txy9GWW%7OP;AwsDmXO@9z%X8qpL#c6ds%(XJZ6y6t&AQyxlz}foN`Y-uOrp_ zk=6U4ke1%0;Wtw9EbI#d#7wVKGYcvp+?;8pxM5cX#o?_UA$Qc^bn zUdgCD5glqP)d`F(;r2f`T8d;lM}H5%B5`R$SiFP$P4B0IsRha{!P_hJ^tcvBgTGGU zY2}RCSGbhBr4tcZU!R*v%wj&p$65+zIqiHb{m9K6eT}Dg7o+w&xHyh=X#(P>)LV*{ z)xnvJ)Z>)fW?aiHHd&9qAH{R(rE4J_$uzx6yI=Z#l3w>>VRkgaKL^71(5$a`=ycj2X-?NL=Tay?~8fL zABvgHA)cH-=ikMJ)09nqhM(Ycd4ca;bmBF5XXf~z(I>0E^I(%X@eZW_Em*Yiwi=jv zfjIe*dx4?1aV@#Va++i@O6894%#-gV$+NU_71_R)pYyghXQ`cR?(c*3Tbh5OlJTm3 z#iKLOViH_upyB;mN<`LNa@LtV{6_M|v+0TOUW^Y_Nm@6prq2C^e4U7kYvD2gjF}f^ zrf`&RxiPmXia&}c+CYg@^B)&VPflT*&gm#`Y%eYGCmU!%pGJL*E3l2z|8pC^&$Zg_tg zJ9RR-J`rq*5j={H--gaP1(tox%$+_3#pk5?IMz5>n{vixrIxa365)S-rXiMRICtA?Q{X|My4Z z)5%3LZzLbyo3Q;9Brh5VtI~;_OHBOIW90Qs@JuGVmGyBV?U@hiX?XD+-kgCqSAwe> zIvs>od0vPSkrKu1|Vvk9t#3e9l3(swSSK=aqtCrN~>|`D?s;HIH!F?YzDu?H{!!RE7udH}tsj?SS+NX%mw-J?h!J~yN+v~=L zRmvXh`PJy%l62Q)y(`hW4?0#eqHk|b)LaYM(b^H@yP!`+7I`8%G$;Fm*sZKRXKwWS ziGn?FKR$h;c<#_%{%_VsHGIjM&ubt|9Kwa3zkzP)?`UffuNCuwZ#*5zYG!_yy`0zd z7=ON&yE;}&Gyq{RmDN$aR<5BAZmvv?`J;5{` zv~Ox_tu|VNZ7xpO3Y2Iu+T6CJPBdi?R)IYoN9LPDQTeZ=B=gunQ9ApPosWFZ z&tQF*6`!&Tv%60@iE_W5e}YHp%|8F`@laIjLmTR#P9&lpE{}v+pQ3(w@|G6!vm-cH z8PgZ|e5e_HA1zm44JxyEk;K-_1O)TYF94Vp|%~rsNfS1YMrjcL!8x@A=+JR8(gb-0$%>`}5bcQ3n`5a&{#d z8xni)nQxijr>D~2a&-S-vT!~vOV-;4%6&}te;`ZoJ-QloI1sIdA{{!y_3oDd;QlZL&@}aDap*b!*Kf09WOeuBbO`x z2tOrx+=s&>E7aH0-W%A7554JbG#Q9rS18fXm^c-5N2r%^>0#s9%SM*9BrsXgFV|Df zj^}o#`$t1 zS~R`Jh}0NG+rcMUeV*ilAHyt%Y$ zDe|0t>#MH^_)}TSoJvcMW@~2Q@dq$%NTd3q%&n|_BB4J=tJl2Ceo$9-BEHu?bnay% zS#mZ##Q)5wu?2pAF1(KC(R?GQvM;b)TU$W(HZFHA#^isXV_SU4Dg1*_Y!j;H#^|-A zZoP6X`F+ED9#z2l7}#r&t(@%%_jdzb2e2m>@Eo+vZpHxh8yb;^kcTla-=OzWjM0{2 zbtW0iF2KLrScnbU%UQeJgFT--ByZX=>W(Qa{Bi=mC2CAT$EIYtccBFr;bQioa$3C_Z>%vZ|ByF(kcX~XOQd&h zsZ2awyv&>oZj6hawJ`;pvrsAXoMig_fTrF|z9xg}sDEP}zvymnlI?9SYg7;3Q+X5H zJiQio_Q9pqXxkL!9w)6GjcYl_-&lzw(R~`4{fpPVWOaO zZ+cU5+}7KVz|l(iWX-R|M&?fQYOrsDs_}ml(LS3LCR6EFda@KP-XJSg$=Wl#o!kl- z&A;5|UEvX5Kj)tNYq@@*Iq}}Yfk7}IgG1eMKGN_gi0jeRKa+>?^lv3eoWLV_3mOST%=F zGW|t6*P!j2+L({#z12#*R8vn*^ESJSOAEdD1T`LJk8%zq_k3?eiOidNu>J8#cSn;z zagBCXqy8SgRrfb{WWVlnb~d-io7}S$$vyW?eZpk$Ub2MeSf955qOZYfHp`t^@YUMPJTqFoMd=dk zBgy26pl!h>$I_ibdpeMWu}0JFadacvZ8nnp73{mywJ{)WKmu_ng_xjOaL)FJ4miZIcIh6zN|I)-&j8*8g|WY8U0sCI_dW_yVPCuy(mg?s)x` z14T6y$e!H&e62mmz(VUicfu+$LhVUQ$^dO|R(Cy!Z>5dHaBLXcyn=(lsv`v z?@88tp2{|3)g;y}84$mr5AS%MTeL2<&awmAB_b>FFPVwte|{^FpKE-|Eahx4B?hbm zp2sIo7LM!H{=&DBO4RUn2U0Ma9V+v=0zPHG<~pBCKRHuyy5~P>qoq1m(b;9BV*%~H zRGIC```=Oaew?bN!~(KdQRzQ>Tc)>UAAj!OZBNwB*UFV67x4>w(YcAZc9`#j|J`4D z&7D{Eyt$m;zCDiQ-jPPMCh<9=VECgpa%al%Xq;8~WL{`Zs@9-f?%gb9K1%OdH@{Tt zGnL5x#eu9>ZhQI0^ApHX&bn;S-_H1Ztah`Ly9>Rkfs>QSawSqaMa}Holw*s^j5no| z8g=RT3T zsPSYltbSG}`Pc`6GAH17MY9EPKZWM!4!1tO4!}DmS9^ zN3~~tsz3uu(vcG>(+g?Y0Ut`wO7VkxqwenTs6(PZK&kfVk$s1jYX6Po|DAQo`JbF5 z+FcLXyI)7cPATlU`hns6XY;-e%{-Cv=r>#^|w5^3$FPo z&6*<7kAb_MCz-uX^z-}f$@VB)S$ip+dF228lM^gQg- z%nv@M>&bMI{Av5){)Z&`WELtn0`}B$EpnVStY7gfUgg~jkIaV<2{Vn(5+c)gY$B&sEAuaD}l20V@- zhi}rBR;*r5FMO-tCfdoG?VTuDI`KaRtodm`D~2ea__bMl_ng^0o-XIsq2!3IL-t=c z%I5xr#8W3Od?mYFM~kD$N={G4?^{jMKP3x^$&Ftyo@L2u&`0d~x1ehXgOlKpn{5ZO zFc^=m0J#<=(?=yGoQK9D@3y+oPaN@jD{*FkC}_U0fnnB?NgY2`Vn z6n}6ZRGG%I%woL`)80(9PTqj*gO6DXPPz4u~q?*-F^I3fp8L$X?G|aF_w> z+_g7$Uy6r|!G51-*%dnz6_=yo1#n94-}~YFI@lh04?^1aa@QJ7P~m^thFD3n_;D;sM=r+17Tcj@s= zmaG;TfTNVp4WCWW`#rpSiA+C0 z`*PQ3DH5+L>D-}mZeN4uk*;$WR&YqYtGa2 zdUBPU1Sa8KV>anH5QUeSxy)C-8d_(bQrov<@iBbOsiUW~ncfECR|7cLL%$pG?;Y|} z3-w2m^@S+)DhQtgK`CqRUHI1!Y$I_Xp7-HsG=lctfG*L@L~17MK~7>MLh=;S8*NSu zVdmzq(v$enIWM`V|NXR^pDWqCScr?kpL``fwbvY+Z<^;wCUk0)~%`3cX;ngOWS5S6|~?6|{{F%gSB$a@&yhx8a-H)U%`Z4VaT5)6PCg zcvYX1>B~@5UP*RtfkmkH4cZ?`WpMnuh7clFkKA7nOeOM zofAb^pEM++8{(X9G=T!XPz40ERkQiKQ9rHnX63Xj}Osy*0ax}XVuIA%drJja5y)I zFYuf5z>koIA6SDf{F~ft-jTOCT#LIK6ZRyx)qLJYVh4I(5!E|_@J2Py;g2*bR_&uX zk*f|M${pO-q4sAiLV6fn^k0>HDnEfTURDAEu>{9Q~R`(~;qI}MYsz%xfmK*#{G|;ECp$ybpixTN48l0VSk@}gFwPstnK*OQ-o-ito&4BFgol{~-O z|62|N;as+*AA6S>NOB7w!Vjqlwi|tm%zO>Q?C;bfA-kdN&TR05xZPXt$(ehg(WMnQ z0$KJ6c0+?pS%#J*WHKqpjzCr4ccPc&jWEZeL`^@hgDoc}azAJCk7Y;bEu+8y`Y@GN zEz`#o^p8auP9wtk-DuxBC9|KG?8_ZMca%Drj~?y&KB!(L1ByELW+I(76x1 zR{iIDa!+*8Q_itu2PE_7-^*r}dWvzU^?#m}+9~BRE0rqk0OdB&Rjkg1e70myuSPEp zQ|mnVSETu^a4DyTYHNKfUOiA?eL6}HH_yw7#Ox2AM|ZwO=ZQ49>_2_}4oy?kjXm1L zMr^@}@pL&+?$^QYN>A6L&^&hLe~fj%;OT4@I+k)Ny&r1S-Ug0wv^23)Bh}uD*WaV; zx#Z(goZ3c)f5OLRY}v!4HM4<4bmfM}<;J2nyvt1zXBGZYZj%0zOm)@Ey-MCsU*4eW znb~~BR&@i<4lH5LVs`iUOnev(&R2ZjMqcL9ln!j}c+kXCev0;gXdHP){oLC82?(BM zug}EqcI-#|&h?~sH9kMf9wdYL9DSa^mRyY*UC}%dV#|13FXF~;o@Y-zXZeprx!jlW z7jLrn@NO~pr~<~7EdMmRk&*sfFw9{S8fxojKE`l%yDz*lQ%lCAmy8j)IpI6J8)qa> z{Og5m!rKK`l8b8!uHUQwQnXQKHVaX658O`V%os4e!^=uewK`g#Lx1*$YeV*S63i;Y zV1sfGgE6OE9z>}sEM{ZvO+bfmFSCK{X4D1e!#KZ-x372>Pd;Z-uhM(=5pq-ZPau7x zfUTh#$HO9L@{>7WHrkFWxc8+JIpLEXzsFGMO)~Kl&L?wYqIgaN>)&Ab10KFtK=8Dd z=929X>BXtw+#6*5I(}tD??JP2r|_=0*+naT3LM%QnX>+$h@@v|`~-G*7j%du@2&Jl zxYt6Di7Xg|v)LycM`oL&Ry^q3GMt+vZpF)+jm(KZnnwn6ZX~C1@AAI|NE>T=xbNF= zs2w;GnLpo~uHIK>&!)g7_w^k>7KfnF!KjrypWmsMRe;HCe&W@xpc{!JdYT&&eS@hLO2%!RI0Kapbp)Y1$*8HoZ@Jvp?1e=X=% zs@a*gzXz6_D|wQ%Mutbx?sN27AAhssms7E?kn>ZG1y>gM-oj#R!l~p{UPu#q(Utyi z%k9mt;^&^8btR)8lAh$`t7e{AAHstP9cjN^-lH zmHvWOY(T>^*~HB7+Olw8D*Y#V6pkh8;YlS!jVU-DT^VVl%p5i6f3otwme$-&?iv}J zYoWoHXtIR-HN=x=P^&LK=gh_!eVvB-Q*irclHoh{UZw>bw0RqOYV3U<|4hF zZ)~pu--Ud|H^BCdH__Aev_CiQ?T#|9fp{8ta+-anwi1V*Ocj@d{S11RTvJ!%*F~=@^fwd*G72?C zy%{V;VpL0OUpaBG4@nzT*z2lzm#k42>8VX29mCONxu?lSms__!)Beuznp3P)e4=&- zt!7SihcRg*9Pa`5JLKR~ZA9byfTx-9ZyX#R^X(zl{di;fQ)pDXDDgPnwAE^MQ6C|v zQ}8*lj0;FePUzk3`I~-{MWYAr<4m;O50&m`vvX5hqQA~U%fxK;D=2n|mOs=_9hN4U z4VP)F3?DCo^=ec}=Fg+hGak?%P%E>9w%U2R7!R6~#O$HxwDUSXLTNN^4Z??v?8$U_ zHCUVbKLW=q(uVqEvl+}kpdZ(RGbhehvr6S~F%e)3$aARLqVRRz(_dl~a*uA-GjpD$ zGw<+lv^}5Zt%P50y-DWcrZAcgl8&%G0PfkdI*^rk6MoHLxVF&e$>{M7sY<5bed*B6 zdP>f*hj~=z7gB#Rjx+<$d!+h7JlliJ5AmeEI-PhsV^R5Sc4azEUZ~ztyr60Pie+ei zGh6gMcz!0kUz5i!Y{n=wFWnrHvtReHLPxR@Z{u@#zMTE}k>qSc)0bJcqxJC$$(yIu zd8jds<&EDJ%J$?l<|d1pp?a(c&&?B@L@2x!zO{COQo+Sc)4bL<)+MEvWukF&x zPrS#{ZpIXvp1qT-Lzhm+@l|i=xXp;(m`pUF zTiLOChy6RrC|HReb%kw4>q;c=I+RYHp`3E~m459)p6?@>H=^rVxRd)NYokN(-i`!h zzb)SPE#zt<$c{JK%^^?i$w78719>T5)pImB@k?Kju4IQBf?|oa>g3r(I+v3#+3DGl zMC8=`2k4N!o*$Kum!G>6bE54rT9~M@9bvTqE`Nnz?r=;re0Ei`|By5De?rk@SI$|g z%rUFel5Mzum^SZ4#rXcAXCJaL8Rc^4Vx~@jU1)q|Nax|*Z1@~lGGPQv4#B$ z2)okOkCmGW&K3O7|Iyzj<)*NkV~k3X7-(#dw?Ok%8=Z*0GYwHJL#Z7jhD7 zafbi*(A*thk(mEP6J>UA8l2i`Wj`F4>2GfkB{S|`XnA@8=bvEnAib%NtMRtl^7^yB zcpyx&HlIvaxvO##ulNY#(;5^@j+Mcn>2A#WkkoY5Ywj*g+)h@$vVQd;`~H}AA4ant zAWl45u-UBi;XKadDY^-6o#2~Tz%5#wX%t%HN%F8>X=M2vuRG#pQ}T8MNy<#{0usBF zb~U0?d!Rt}$Tr}>D}|?!c<@pLQ=*_x1;IA*mE3Jj_0!kS#pLxeRx)P<&eBR#R0~~x zhF|WKsi*Y*o-ZcHThVnfx!Kz||0#*N4Ak3vo6YXkP;O7~Ohc{AYO=Q21D$4(iktP9 z8wyq!6BZc>+p{y5;%7s;_$KUzB**D0|{tn94W;G?WM-& zbRl<1&tPHirW2>)%icaE5Apw?#seheCVywJN6Fz7tNkKM=d|g3+A~CNKjHBMh4$ti z)NQbR3j~Ru7)j~|@MMEw8y@6Co+fVKd(@1V-b8DOnQ5lSVd~|6xXL6dUd1T9%k7Ri zbNd=jq$~Sl z16jKjG&<2QpTR7v5q0rqXhE^@Z1q!G>c);GUNSp2!%;9fqIc6rvN)^&Pg5Een{o+g z_fRVGmCV48pkw6ebA1g~XEq%=mu$X@bIGtgkT;n8WIubGRgUAlNmjnx6P}Yq{XE^o zwyZ+wpV4U^9yT_P>;nI;#?05ytsyM3I^7hVa+_bWgyn2eU!2M*iyPF+{>5(kuW1ZS zo~nEGpS&M$!aF&S7Qp*R{13&)u(flwnZ38HHqRiP&x7JH92?zr~$OF0eR556L>V6*SAp|C{fbf^{jI?8Y@zKe@*&g=eXviK)C0oi# ztawf{y$iyx%@Y2qx11+gq;xxAr;&o#o8@GF zTrrZY_IElvHUND$!={uod7C=R*t=w$$T&94m^y%mcLqrwN#?W4KDF@UMv=JrBsIAN zzW~D^{CKO7{>Hc;uX+<|j-;)dNWf+zPxR(B7Jan;$FNZo&?I@Q63Lss#9;jsWa7We{O+Np#!gzx*@YuX zO=%^$3h4G^12Y#%)c&I+_qf9UikH?EZ~MYKnc604u@pg-+ukz2$ok?JIGVXd<`MPK zIybGSr|gTI2G)EoMY7fOKkL$o;F?PFv-9(%a)|@01@;@&Jqe}Cp6v3>#bOr{wGZk)OXi-oN^w40koiF){A|feOki_5yAds|v{B<>}K{8^v=hh?qH2Ij_&huOLs_I~50YM9-PV%I9$3*@=c`wP}_85tf!rvFINa>n-} z^vHSIpUBW=7SkQcM)a9@vRJ!0TV4en64{hH4DZ!m)_1f29lB+mJef0c(_QjnRx+xL zr@7gyx*1P{L3^XsKs3Eo>&-z>ignKoE1NvoP(U#w zp2xXc^xVeJtGJrT?_|a8Qmpx`HGcJl*`fIJs}UyN+by_}oF6Z;)R%dd?4((xeg$1V zh4YPY`Urpa)>GDgtAYJbFiMW*o9ONYxaGu6<}9`KI0rR4pw7w4Ro7B48arCKtefU` zyiY)v^C`(l&|OO%Sd83WRT2G?TRHIFhbB4E-d3B9z?R*Q39QCqGq=RUP4zdY?q7z* z9Gq?hujC{df=Z>CURJJeV9|e4w+ei72I?d{=%$V24E=$;g|0u4o@9W|&C0V$Wuo}j znuqO2!tORgbVAke=5D_EU&z5g<=(;F3sCD@5FA1Rex)T}`Id4yCEo;=ZD9Qnc{mro znHR3tUe>k`FjpH0>myMlF=C~>RBeqFseLM{pQzPu_3#Vreg#La!Q;8${#HvXl?$|s z;JPc%>_n2hk{&&0WJ|C1_Q$td0qJlo)#`)IPdUWE^E6F(wz zk)iDWCt%%Ex!favE9z!uG{>CkFg0eNX&#fqH_;JIv z-`;OkttU=#k)GQsyMkwtJP!A2;VrR4S$Aj#zcwJw39Rh+T|?(5mHO+$-u#-!KO%Q@)nQ0gb|GH1-*!E;8l?6(YMy^aLi;)0(e^*a)! z5|6ew`#7ESe5aSyU|I|BCfZnw`u*UMJ>RaNyA^zi^v~RHz0#k6v#3Z7NBINs)W(vn&+%Z9T8q)@auPS4-5LX??&>{9SCaoKk!T(8vZuc{l87H+wgb5O zlY}ZTsEHxrhiKyRqw&jxqWlqqq35uafh((J8lok1`&e zkJ6ve_uO|<4&B9-Stm?qIrg3Qy%S)ZB!_ zy$W4gjN0E9R`NRSXSZn~+;SeN397upN8X9#LsMNBTjuwLObhun7*IXZq{j@ zBp;ENFU zCzdt#?KaqC=A4{S$;|hV&m-x2?gdHarJCsVf+vZ?+1-NC3n@j>HtX4j*<5q%f-VGIsT z0Wx+tN5*i7B#e)n?-Mc=XtDi z$@j}i<$vlg>*PD*T=v_JfNh!gf8^`$3%B^bx&7h>Qgke>-wBq(3!02q`fj#l8qUlk zUH?!oE!XsIl@>?Q-poXk(|Ne(e`XyffNd9$KIZuXu;;$PN#ybc9DWWJ+IjM*XUCJs z?O4d1YyTdPPa-YZbJ&5j+{Bkn9_M4V*#bPDfjfCE7r_5m9DNsD`|%FGMyJdS&La~Y zmA!~G<=p-O_?t}O?Rg8??K+gL%9)L}ps23h?`d=knpugTa2OdZ`M<%ZQ`au@F=dOHvwSApbi|63Vn+n`F$5}gOr$Xw6jZ)s;KnTGQ$xej;4i_^iATOz~L z`1ln(JM7<|Gxs{a@b~9~V|$n;Cv;|-$C90LY}@I@{$Yi1>;C3 zb~ss*zT^RAeK+f2m$J|+&?YfY7s0nFEKe=Gv*b^?gf!$Hp)G7;GMpyURX_5X2&F_I zwb1HPl=`ahs%Eh#$vKm}PKoUKSl_3y9AjzAVDk9`2o@THzf-!ecS}*~BR2Lot;D;o z%I3vS$=c&ZTFH6m+;layut6hnx3b^E_bmG1M}zt%iKVhTvV~dEzu#f zhTdd;0C~BLwkGl-w}G`GTP?}-{pdLXKaSQ`;*KuV=TMT<+voV5`-1a5b||asS)0ts zm7Edy-fu?4lfac5fvf60xnDCc7=^~;*|VXbJIRQf{AIa&Be%=1G`>Hjy(;h=h0C9? zX~`qi0(~ZsuD_6gXK*+B8(*@;kKs%|wyZZT&D~^!luH(a>{EQ_b4~)EssD@cVy-u* zk*7piTt`;A``t#9s;HR^?a8{EGhMl{ZG+Z_c^f~jS;2$c;II|6%V2r}nVLYFYAC-u z-sZ-R=|;cY`kN@2#K^6s3E^ELv*u{^EHE}=36qU?8pzhOCXuM_DEuH=hwq8oYXgTy zz8?(loXh=(w>huV68x>e(p_uGcbrJUBf&Bn{zuT1>?FK`&W9V_CV=`=wB6JHeax&9 z)AFJ=7L&W&qj;wlli?}xZHZ9HNz|Em{j^@@kcEug9ZB*n{I2zdWOXOC51`^tIGer3 zqm`=$k5A##kyJhk|MhUpU82d2lAN$PWpJ3eYFiX&Su$9e1GJHw)DHl4@+Q2DJ~@5(qS~3&zGEy}tf$j(?M)oWO$iNHgy)sM z8ID<7eT|jq1hZPO?9AiMDWR)L#9ivIK#yQH8CIFuzM#es(wq|)xl<+mJjW+{5M@6n zO+R^?IsFGk**W;J5bPW2%;B`|Nzi5PGg9k^YV#pPgk ze&;Ct--v5_z-$x>-^6OZf-BYZGn)?Nwwk*7s^@*qKtB!o>y23V8iBJ5oAWDKZ5XMw ze&Bx#EPH@qXLM^o4qM{Td|cnG^@Yafqv3R;8o6El1Eb1~@P6IjWQjO~gnvrUYHIxj z{8@?4Pvby!803u5(`FdM^^+WG2avbYN?|hE=Pb(vxM$TfYsuApTCH|&ok*UlL^Eth zs$Mr557AcgeIEhF*UbY)uqx&CK8wcI1j$qCPloO5Xq?U{4bFHWqd>d`kwijoNF2%9~iOXHn-vlGj`Lws<-K z=a!JR?1K(Mw`rhy6QsS=F0H8~PhQS=)m1B_&#&y*RN7nvMvMIHq}}W!jX}L{`3|R& zj!Nw3*<}19KR42l<|K2q=c7>S*ndO031*w|Blacd#hYm-`~NjS(?kEcrE**`;wDSO zNET}>TamSp#Kb(I-urO>6^$3MYPpH@XQiL<_hmev!S-Is`rk~O9|X%EK;4~`tV7-8 z#Ogwxs?v`A3rdeDXuTbZ?2a>CP%GYWc9<$CoBN*23g78>U#y#Nr5%8*JSVEDjWw9~ zkBQf>>1`sqayMX3ew5}2^9tWDw~8c^Fwrs7iZ*v-O%oB4n+{VpCp5B0k_-hkif_r` z6fbk%!Y7=EhUup~uI|F8YfZk+V;iz2z0yn~bLD#Yo*NXG`?Q_%H)#0)QWy_0dm0mc zKTivpnN-C4zZFlC)wTJ*F+Bn&%bUmFqf9c@UQ9N(1J6y~{|JMgrIJ$r_dmYIDoG7l%U@t@Ku`oK1_fkWhA)t)M^M&zgcYQ>grs?ljaDVInek5&2 zZI<(MyE^5q{U!TuV#iJ|W?^{~pZFkc=TmM&{koV>l%dX^=2f{*&YTep6=_}O8l^fz z>;-;N=>H?^!UZT=Q6G6%Q_H8|OELxQ4!dwUvUMTJ$$9Xc+xd*V-s02EpxTM-zw_@_ zvjdZiQ>9#2>8lDHb7m%)w=x?l_5PRQS;|HlbG9OxuCnKly^O5*r!` zR_pN@IPY9=_6~Nt0(di5imhzV8Z0m#57v5aUQQfSR#Lw4zBDIF`++MpLj7n(_8iiG z=J44oYh-M=9Ob(g<6ZXA8?tm)urj%C;P7I+Z^=^iv@$W+81j9`=|=S2ik9`?{(P#1 zEJW_I$n0#kajGlZurCX8A8CkRl<1^ZZ2BP2-e(<>7jqbG>`xxY@)v($p{BDBi3DC; zcxv(DpXB{6AVH@YQ4(A85;|-E#cwRqS{AtE6LkW2b+VS*3!CtLzCwpuc(}P34bD*G z7H}?N&2mHdZM=)lM!m#ktWzd&P2FMp2ui*L$Nfo1azZ_-|D0|A1+T|@yHxFDqfe&8 zeNbVRPfbW>=#<;I;+1xTK_edb$@)AW&QFogHfR<N{cT#U5+wz04H=|_} zxU?#~*UVAs!g4KaM&RjMeO(BbHQLR&tTT&pm5kFPz?zfNxu2tHfoXLfM)I?LsojHd z;$;#zRH@u5nK?=BiO=55l?Cn3qMa|1&z!u-KGjEba*p<9Knc6MQ zNOGol5$d1KP9)D(M|?gL$Hy6SPO!q0sEQh%)F`y|Rdl)>jd%d z#;X)*Gn-ruHIgUdaV($WeSCk4{w3Bvw|wWMK~AG5m(qMN=M2jvGQNnMo~G;zX!kgN z^&&ly?@zRI2;8@kzFv6T(DxNuN%Uc6*%zbw=RU^|Po&lHMH|UkwOosd{f=LFGOp)5 z(}zatXnpS9$h`F&*1C?d7;Xo{<`zTTC%XY*L*Y$sLqNGRdXQePgdUt^!}x?{q*-a+TDu&GsyI=IDIDD zm8j0^VEwhHcY)&QqE<7l#RAkeVz)I?jstH-v2w8a*eozNeKcnc_R?=oQ9iHKCVb9K zwDH$Jf=%*aWyi0I(Ii<7vu3^lR}Ntpmg7ZYpC|K#a_>%VPx*$`$i1RX;a(k0a~EDy zwa0mOwU!Q6BffjQy;-<d#@R z6CbpWwQLXaM0T8^?G3Es(|DQDF1eff74w%9N!mqtupYINE9V39)(T9A>hBUbG$37r zj9^RIrtFSn4PuC&z43d1`Nuh)RwwJ9nO)uJS$136>#GYs=XQ`weA^E64M4a6?zN2L z$pUs0D6$TbJQlm7cIK&7^xTeK#HQu!b>iyhz(hUf^>r z5>_AlOY~KXem$kPt?(>^+j^g4%X5eD)jY5A`bpH+P;g}T>WyNCRaRJ&MC^s`*=5Y! zunWqr^*86&G8@YcsSkoG`%^g$mMqFS2c3L~J-kVb*$gr_NBLwRdy>ZVht-92C;114 zkbq?D_zkSd$vCONav8W@<(E9~TTWoLGomNIRTET7R_0`q$j;?^O69Cm7qUDR?e{Xr zOjOesdYr+|Z9<6`J-7qkodUXQ=W6}8H#HwR_2gV2)={gGrW zQ2TOyC0FjV;QLg6rEIKku<41nYDB6#u@Ga}=eJSnWE%fG?!-^%)dR^RoY4p zrp#wg!J+3A@Ej1r9|Bk)m??$(%m9nFhmS zj&j-GPe$Iq;qTwPPaBC|Uq>HqVS6gU_9n0-hjz|5ixiZd9U_PRGM?#k0l*-EZ?f?^~-K&%a_Jr>BsS9r3aqy(mMSwDh!3=jo@S)(4W1 z++di+R~hA&urk$f z|6-Ofw`}CxZq5xiVST6SubzGn!IAmOzhi8vs@Fs|Wqosy+Ea^BsD%+Z{_;p(K|d0j z8`4{{&_8N9aSgd4={uaw+-PrBU<+J3YbiS$4;1!$lK+FWob!|~d-8+vCwMhh?n1U; z5sP^*oEnnl2_PBEk{+n1oFQ4nR#jzHk|F*QWsZem&P67t?PO&J(1M)moQ#*rn==NC z4bU<1tmAo#o3)(m#PJay(C__ZXg29f+|MYb`{H6E)z8zzEYF`&=4lHly&Km zp2ut5h~~*+l#xG~PM3h?E4&${wd28g1l>N1e7{}z#!EpK-#-3!fAAco{cH6!o&CKQ zjsr-+(FGlHk9iyYCllZ|aDHEJiRwx0PEHah+Nde}v&b0V*_)gkJy37!jd`P4*$GC- z2faB!yIBFx3Er+W{8aS31)Q1RhR>xNJ6`5}pAMHbq&RbxCRRd{-8JV7a!W$)gU?P@ z?j>wW79UYIx27yp{v$91qM80@#O;TNi2*v3pY@?Wd!ow+N!iD3=)H3k$a{S>6S>EYm7I^ikr3BH9mdn zP3}X>soTtHx|6$2BsVvB9gb^xSb^l{pHpmZRgz`W}g@iI3RT-#6)AWzsZVPuKHl za-)4UTzm~BwyE8g#n?!n>*HCXGL{%;-yu1PWSa@wIrOn=#xpF{o9DT+|;5fVS1 z2fLAc48_$>=vkS?d4_lMp#Ep!`Z~}ia^PYRPKIw~^cdprk=kBE7C*(W>{;~X@3#l{ zFmk&>%|<@wrl;H)b|KmR9v2R#&*POFK<_J~@hTFOD1npw{)#V|RZdrCC2qGuxnyOp ztF1&8Y~%r+N!OCuY%E*Vh6O3zztS3SIu;&RytZy|%xxaEU|fMu79S^m@nNh)eEjTG z$M<``1C4sQjFr2zQSlF<-0K zp^)6*@G&X`>gz$>jeQ&C?@~P36CQu(*NicL*oDpdR;lcc?u*{rk&eN5J>0tdQQnp? zs!PtkB_U<_zZHfx!MK~gKZ8$sz22brtUK=r%RLL2lI=J1g>q<^7_&-b^{+lGJw{QG`ZsZigAMx*B zV2Urg%Ku7cxbcaHk@X7XAhAUU;(J<*r?i_AnKk5I>f9sPfQ>u8;C%Q#f{oaw{;A~R zGaf@CI{vEH6-qWVmUTjj^z=s>5bk7+U^@OM57atk@-F^$EpLWvr;?|+1@*ZmC*&Y>wS;hp(OcB{(JHD}YaH8xMZ^glF7e-n%Ur9GGmg>IIfPvmeeO-;aNBo8=45? z#ApvCdjnaL`)FR>!jHKh#j~b4$-A5Z{LH9-0DIY-Jy1Td{3lu;KmHt%9eCP5Oz|KN=0*XZfq(We@hS8=U`w^2-Vc&NI$! z(AQ8t<5WHT#^xMKXP1L)3#nUTR63ED_==X^)q2h+4btq zwX!=~xDwV^fON1nV>c5E(jN_9fp0Cp$B@Hh{Qn zBXx3@OLurYM~)_G{Xp8?g}fa@N@(nw%d_0^`MG z&iOJM-53>eOWk{1QQbT#YC3#B(P~B6s?c!Q6^68LtP>m7IsniI!d@{c(M)=Tp4s z)9x(LZ(5s9(h^IPv+|i=e22@)F_ zD3chdsi1!wm#-$xdwTXh{a&J6=9q~PTnUccC({d+5>@dpqyAU)eLt8!iz{_$_*ydj z6aKZpzb}eU$;p}=x5M#aD9W6Mx}TG)=W6@dMf|N3}o0 zxdWO$0+NB`=2O~~Y-Ncf%xZsEE!5Fo);O1gA#uF*wJ{t0qG2DfR#jN7+~1sdl$@td z&W6*Jip_b5TxFLuF-Idw**-D10r|~tbwaJQ3#AMP* z_QH}nji&illI#3MhSSJ2l;|0i6{d?FG5nUTLj7P4cL+y}kA-x<8gL-!==zYX8S zwKUUO?h@&atC#8FYZCbi$|h&(x$5LB(?)&eMEYn@KC9%}_&*LkTd*X##X6^H*Ld<2 z{MPW$uQoOgK-1*w`VM60c>k07yQp(FZEQr3{{`~I6ee?PYuKI&n{njpO_H{l);vr8 z7U`=$&a5qTAh*7BC;y@6t8kv@>1_6+uKuzfu>_7&VDTWG84a#`Jv+=el}tzn8*LKF zHxFmF(v6RJwuzW%N=E0ROK$$2NuQ3wzjuu2yOEr0Jap8V_)bE!EDcs+@CcLKos;<_4Up zY{76;xdnx~;LOfs{T#jA35s)l%ZzCrc;@QkB|WtvX)Q@ZvL^n*6X@Z6VgzDsyVI7h zjV%*!d#L&Y(7P5pxRibkBauz@+f)l@p<8YN%1Xe2TCU;Q^V(ekx1(A9%lH>3sC_Q` z@|969qkK*QUkc}YX<$|wPGcc5mOai2ByuDxJM-!HZ~DrrUv`R<4LrU@3%w;uwLSh^ z4bxfh%#9PbkicW{bFlsrn^E#%vY(oW)9h5N`_J>pLsn<6S30ZvIe`$bIsfw;uPL6< z=UT}r+<2UM(v3WRjTX86?`$;B8TPCS=LWpS>|D+#e8TI<>AKuCoj&K`{&P5#NaO9` z_OAcAMS2=blgO=P((Q`Z^+@G*de6#myt#e+-+&({!|HY=mXoy1RnI4d$wn3*HCSE+ z`<(ax8YETCFS7Ptoh&`5)Zaa;h?c=3XAHhpBiV}*O*5NJ-APIleUuZ&{lMND4C(tU z@>~P|OOf{3oA@J(QQe!>q+t*$3{v7!WghbEM!4*wcJc&YuFXWxt|NKDJ}b>v8z)

FJ<2Uk)5cGwwbGBudBUx8O{Feea=+$RN`y360`ONnua@jsj(-G zNJMM)mG?*OoX5CWds&4q$9m_}&LBy~+&5r#iqWwT>yi28Km3kUe?(EMoE~dxbq{5) zVx4pDI{QvJ$&{$~!_jO9bU2@`%qZSeAUm10gc`{RyAX|Xa-<P+!3(B-`=z$_rL5^)Hvn;s8{B5Vz&PK?DsYsvP#Ls^cNUEf$lX5yVHoCWTmPr zi<7&&lI`X$I=(-j{wmUPA8U7lF=jXRU^YvCB)gGQrOCaK_1CPzhTEr;&D+?;L{VK0 z`Uzl6EXj1Awy8hXDER}tlaKCPR;8PszGroF3Lz(hS7`AXbu(%vgZ&bAEjMAjL&K*T zQMM?16(8Y2P**l8HA07NaLE~w;UqrUNpt$Mxmu5dv?-l^Q%^6DgooLy#Ik&&m1KYI zfpa5S_tEJ4n^F5JkmuyU*dw$R)}&1_L_ij|!w^^tY{Zws1TgdWLA zm3Y@i%4c=^I=J>_?JlR6Q&HfI0+++oi+7y(l*izCHY(>d#Y!5T*x~pmhq9b&^gP{I z6Yus~w&XH!KHyt!plIM(vd&(|hBrgk)gb#EWJ~>Q)XxlMpZ57kmboXoUV=t@=^=U8 zvZp>B4zra13Llc5^_#9(d&U`LJ^0Riwu%P@PRM=VF+}x3wRvVw+CP62|;5EK{FEq%#&3#$5Zr-m}dK22$@_w!V z>kB_NInqX=<9Fap4D>7XBKsYm(zs;sX#$>m!EhUmd75mtM1|a9aWovNlE!6Vxe4{# zs<8(AD_O^HNL+mWC9t|e?^!Wlr1b~cqp@(Ah87KJWU@3o54%w$Hu*BL*1HWCHj=!9 z@jmPQzvyWW4f)RZWCRQBN5g9}{!~?O7>ODHx?54_a&L2@DG>lGK>h&S9%6+DX(xM$ ze}ZvV7n4zJ82u~dlg`@WhwNrQ6wAD*vroxVkn;(r7CMx@rgLGL*+^4mb2_RqiF}(p zT&tx-rX5oF7N6kn$0)XyT;+`aW_G%i^*1+8yalpL*z*-+w>#U@KC$!~S5zP9kH=K9TP;Y5wMrE`xPZM))3LnG1v6#Pl+$!oL(b!LG*dqv5l z)E%AoX0!W&bt5|6jjA7`ZZkB<9a-6_>qM#&rIZ-7x3!wRn7ioOBocFrGVOi(J6qAZ zpj_4|59LEuNB^sN5tT^X&RWU+E3wJBk?1{nJHa}+mW}|y!T2-}-u=nb z`J^PiS9VKJMz{OG+Z$Cz=`mT~bEACLbH*6+4npyU>OF=!O<088w3FOs$u8QHd}N*D z6%^~O*10JE91LI4YqtX5-^?u@2kS^}RRqUaoJmwjaz1^+D>$x@zq)9B90__7&LimA z`!q2T@-_H%_n^Qq<%jaxE0WiP)XO=DJv?7RJA0Gv86@s`vfh#0XFp?ZF<g-lirE; zNRHjvMXi)+2C|%$ex79HL_qG+_{{IFXpw!hx51T&p#zKY{z~;DO<$6sX?S%CYUd92 z#DF9p?x`@_-RDF@bVH})=18vGPuAjvj)F0P--j_*pcCi&l* zWjKhYDHMDi4HuEcU6ji$7mu?ohiE5W-=50k zB!0Y~tVL&LQB#?TWH_s=JNcIV;c4JlP}InrBQ?wGr?jS@Gw!>7_3{AcX-G0JDfJrUEn`7dkw zIkkKVT8=MP-GAii2JR1VYd!C-lX!AAdz2IAUFx~}=fut#SW9R0H%tZdi zz6R6syzrX}x~}7$_94Ry^i}33acQ@!{d(amccv@l^^%Mb*{iLoUR(8R;%wlE*Pk0! z%agu4^pZHtvkJT)C-un@obwFXBRE03;X->-Q^s;-X0nr#*>&j&sxsbC?k1jAc+b0% z-!kt?KbepHsAh8OUR=!i_9uUNw>_F?kKuM>-Cto^qj;J)*3t@mY4;^}+8tAHv~n?z z+JT*pMCE+VmGH^_(60Q8b6BPHx&yv^2JQ-Mb9;E);MpJ8hU>{_W>@j=GaJY*M*jXj zqb=o;&6BGCma4r-WF#P%mokS%ViJuM3IF{keE{l7eIy_=SQhUSCVt!VkRw0jJj^)l;w6A2mw*R$Ay-Ppz1DD@?N zbtYkhN%B9~tZk%gEzN06n_93AjbYW9Wvt7FJ>WO{3-6=KB_N%tPAfKhraqDzBzp~w zSdXs8fU)e;b-cX?aQZN|aRsSbq3^7T9-;qzXzb5MxGjZleE`ynj6R87NM`;kK{N?; zo3!!{9jy$i`K-lU7IzGq4E6SLqt_5Nv9}T@kdd7I{eava0>7Co=Qqa0vsjPuEMU%x z4KR}Z%9B_N@9dk*VS7rqLVO3_%m^B)mA!zs`Id>J+N`(TeLl^|I|Ed!{640BV^nws z?gy~P_2D}MRkQzbxc(CpeYTRqy6M+IvciqY5%p})zaS8 z{F)75%o^*{N^gVX6~?M$0Zn9P57hdRcb3z5{n+(TGkZDlD090~vU)aF>Q6?12T}PQ zw7CdP5-~HMzE?$wZ}1^`ypnY**>_WaCHOX=%h5)L%V1g=Y>BQt82%0M;RRGk9Q@^K zoebXzzMYNN*`;{NsFgeaPWJsWEzW~+FW8->%mngx87d9}#R;S>yXfnU7Eka|wt@Z~ zKZDq;>^mj4XEaGV9Ne$L`dXuAZ#p;?T$vvw%3}by!vALa$W4;V@u3-so($HUEbj}_ zmFRmS-an=G_h|SWie1Exw7~slsM3yJB?Dy-cJz31y@K?8X{2rn@?@H=1Bb&|s@&r7 zmhp3jCpmeXlN#O8FmuMbBs(XMX7He9gS9Vt8?BWWVEGy9B|Gl^H18JUYTo30{Cx17 zs&uj{4I{Vx$^Vgf`Ll1wkdO_=_9e!b*YN6KZJnvre(b_v_@AKnJ;Aw~x!4%=SqYoO z$UTeet$aU?z9&P}3|_}4;Ms?Sb<<-q?!JHqm!tCuU}{qE>;o2c1Nc8To{UE4WJ}H2 z;^ZvouIEMU;yd8X>CbU2erB~_vh~U1+YfHZ{hU*!S?7;0zY&hNXsb&>qg6_rOoz@^ z>oi!7BR{{vZNBy{EcCp#9$qQD%q_Uo1KqPXbcGVPYO6M0zhDH-ec9)*M!8WWcTFx; zYa^Gm0bM_*!R!xlo$$pbuY$I6j@o1Wu`OTo)sPzX}@vM%1rpNB^&;0BHbX!F_ z``~a_9&2(n?W_MDAem|;oQ;Qx49tx4Od2&87EhC#ocA7K#2sHi)tH@Kg*unPHY?qU zsc5a&`J^p#wcCX!S_lvGrR@MVXRnN&(rL>V#^N@&wS0}|UzBC{x&hwHrF&qu%Qoc}rh z>vMgk=UKx&uY0ZMUiU(=#DSeiTC+zYv4uG)GP}gD(Muw)vX7!up@lV3p;I9_AF>mx zN&DsC>#f(x@W@Ht$M~J~^^;&RhjiRZ>as_Atd?^Q^))2xXpkj_VHpl)zwv9Z&uaD% zzfOQLd9fmZ9%yw6GF?;B0nq4WBO~>5a4{ znJLHXKiLZV75zVqDl1XHDR{4i=Z9Ka&T@_d_j=r&jn~;H^AZXt0_-2CeFf`XnhOpE z+22twXVi|--dGT?P(OS4E28CS(l86n^4#nyJ!OYvp0gx6ChIg~)ymVLAK8z6NN8s@ zN*NX2hwldXbY&;^FruwA!?+jzS(7|ex$L%lw6IU@Nlfwqb@p1WRuy{s8(g!W;~X-# zPOUt1t6seN`@cWF#zO93G*662IVBHgosw}rd)8LK;(1)povHZqw%(3}T|2bA4)+(q zXj^a%1a%GESb~;$=JYL&T**R@!NW!&7tygq zktUbdek{bH^fKq1CNoR2RR8Gz)5VB%Ck>gwf`4Z88V9dD8MzX!c`njc`8oLfg*w~n zHS>;AY;mOY`$E5$6rYcyQZM#v0qd4?U9T%F%)TfzT#e)PUxNg%(e6l^u-xx4U|R#D zJYRSmU016806Jx7O04QI{CO2uvK~H!%MiDW}C7e0D9$=YOKv^QvfDLLmZhtSa!NaUsGn>}6>zh}{o0_Yt(Silxb>uSl}S-m z7G!5v17l`9!J#c2vQnM>-lw^joXnH_%Q>W(HSa+K1M#Iw?ttp?*prR51?pZ*I$kEh z@i?>Z;cW6$f-SR*@?QTmre5fGq&^vEGuQeX?RnGpJiO@&rcOAPxymtmno8!f+LIYn zcI@`U*E3kHL{lAtlR3BWE?SUN&gyD8o_A&(iwlpo0ysvxk|!tI`OXOs9~BU8^gAo> zXOOeRT2<3t{O&DDYxct@M?zg!rh3;#m$dq+&rROhEuH61dCD??7QR52bAOG`lINYw zNSW^iw+d+XaPd3;e=aZ&PZRH%p8KHrB3LHk@u7l$nGgNH75@kL4Bavl*&G*C zn$1ov;QVi$@-(IN?*iZDNl8BUBn@qSPt|LlH&y1XUjhE|v?V94q^4KR*mo$)7JsftTq zuyG%=`@@YXBYC9BL%J6%bGAbwIT9Cgf_pg|{8f6if$W^Yo+W2fH>1Mc+RLe5$$Wh~ zDLqE(i*D0SfFGt zi3j=wYm>FxW<1+>)yV0@$-Z+|BMbr~R2honSP`KJx79j)$1}#uBxzgIP`=jrZ7` zUL1?AUC`%s7H1M%%D|mf=R}xghiPBj%1-l~;JU29?Jd`a;BKN)vfFzwpCK#Mx3WX0 zqCPc1GZ4XgQi(G*e@Nc2^lCl9Mu#VQ1q+_>}ynS(TcFexnOXNdEuC zV2ml?$fx9NV?rvpu5rLrSA=hWo{ z=ricUEG6f`uGLPGHDWx)Zf@HwtE7Qg$;ZTUZk?uQul$L?=gs zeE_(V;q%twZbLYK!V0tpU*e&+^DAr1gK>IidQo4E1;&N_cn3Z4XdoY>gPL<#&5OXZ z9ISIme3{Vl5+;z`E{QjbH;HOT+Z2fEpaL5l8+{@U$b~e6ue(i za4K0X*Bb{CWtCXA2jP~SvQP1W??Tbes4?1W4DQrNm)A&AY|R21oA!4>hYs+o1+T=E zpTsKs6VEO-YK#TTj@ploy}<%}Nb_R%m01Vq;CeOG-Ah|}a#;_C z>-ow9XwM=Ru%)Y);%5&svlICZZppcovo^9n?P z>p^7vDC1f(Ap4gBOEgN1eUk#4E46h$ z&Db9e--W?PFnJ%PJL2^vBzq6#EAtBo`~uB6vVll zB&)Zgz?mpe59G<4)RpA@)1AyVrs8ZBlu2|+;_GI@U{4VChQUX^ZzkhUYiANDlYOHc zsm_kz+OA#1-Xx+XnPVrxzZZS4sNNKm%}VrE=>8Gz#Um}#a(xn$bLU2*SWd>w>UU1> zuA^*s_O&xA?h1;WA~J)_B&zLGGVmMvM^+ZHscSixEl*_D_{{F)pWVGq&s!RQZqU;|*sRSvvCC`uWdEwcvmA?) zNT7>~*~1njCset{XI1qQqZG)p3p&5fpts>bb~q<$D4DhDDVy0tKC^~jp02g^s|qgW z3Bq536P1K1z2p>)L#!o7Td;KfhZ07YQqQ_PKL0_;c2IMw~6^*fJb@ zgU01v$*XT${Qh$*|KHz@(P3MhyAN-EW;dI$l-sf^8&I_)?l&cazZ72INcJsJ0>9(S zE=Gs!b)10f$s_s!J|&myGg>*GCN77~Dg1`ne|R6`+4Dz@#(QZkr%N4(rcGcl&{4FLtHzx~Yqn#*4m1Yl zQ@`^IuPy#xcC5dq_b%YgUdR*lomo%j zRLKc-3D0CW2&x%Za*|t~@n56eWO%v+1;-hM>!C(<*k!e^ztYpdl+)@y2Hjd$+xwko zMtQdXfcFZX!wOjLPs3B)C8xb#z{5BbzQ6e2pS8`&4Ewf~8oauRJ|DYJKFR-se-xR&*wxAbuh4EKP` z5TA)sK8hvCzSAAxk<7vyLH4UQ^VH-4RLaw-EA{sjdF+n@i5JhA1jq8)e^UE2@J_

?|g7q|MK z(+?gYDTm=zXY$vNOpON5NIl*On^I<(b>tv>UK77@3cQYXrw9CF6DJxG(o!wHLC#oS z2&x83yovtFtJIvPOesdm=kQ}ip<}y~glf26nZ>+C$($V4Tq`|Tp?a|RH{FaEei`|> zMIX=O^no~26L+)I|4uL_y8b3KYoygt#=^`Z-b9nVjKzCuy%&0~0MBT5ZX#vX{Cg0b z)s57hm9Ob~UHG+Sg%UlJ^8tRL#noA=M@e#^-j1v^fZ0Oan1=4zVRElA@J%q~IeGlX zit5MX%1H<_(f>2nCeKgS(4)!t*#+&|lgnu5@9HMXbUQu}JH zBdl`rd%Wv|tWT7(j@BUkn@GxGuJ=RVt* z72=6F(iXi|u>rRfxQ{~bt<_t|n`(!%t+kNIf)7~GacUica!;V+x%#Y4gIWc*v}S7rx^lRiV8r}1rJ0ri*o5c_u`+*4wR68%A62}Ie$ zoL##Wd~Wu85=-(l4fwbEja+T$?&dZ6aF#> z*|j>_ouxR`RlD&6N0Hy&gZqgu=tg;lYc7ujQnI5Y+csmu;N~#?sAS;HBwmS z*UlumfwEuu{f+TE{%=*im*y7bNJKe&P5YZ)-T&w%|Nl=P2osboG`(*8utCwAt~ zzI66jZ|B;a|7o=nS{;B!$!%1j;7!^8+}qsq|NDCj{^MXa%f3c~>sYz5@bBwNo(|48R^(jS&)BV5c+`tM zT1}p78WrOG?ZWr{1^>VHe}x`8SXXGK#B4Qw*3T{^tAmL> zQ8=9^=X;UQPf+10d`Luoc8508#`Ux|Ip2cc8L;`#2$`qXUxDi!)^OWm-aQd!_p=5$ zV{oOC_Xv*fFbJ`>aW6PmO^rP2LSHyMbs53!k;NaPB;<2LGMFnH_8Ipu=jGbS-PK z3tA=$yv)AMxR%+&dn7p0kXb?_(2oOW$%}2} z>OCyOPawDrHIjKX^SGQtdN7U-1>t-=`WYk{H-dL#eXWGu4d|cjOF45Zr#U2wWF|RJ zMvLdP^}9ZA(|Sg%b?7n{1zsbKmx3nyOp{M&Jf4fA>c`m#gG!x)Y)b$u0kbq|_k zjrl&5Y>!%5YfZMMBlO$PE6;@HgC?`afyT&#jeK3upgD^%+h_JWXAkvb?Cng~a%S}d zN;Ok=KfX>fW!$XYt{WQDYuaJ=bt}R8q?7*y{_A(_)yZkz!e^*@TOWH=mt_eOS`lu-x>`~Bn2mNIg zWg}?185_FjEm?8<@g0(dzZCud6&b3Jf{7RWot$?8?{%(Zzw&=b>=K;K>9vRYoqaXq z*|1aT{n`4<&i5uDyOzd&#ad@CKx8I+6>^Gnbv@RBK{643gxgs^-jf_vz>%D+Fc*KD zsdtu=Tfn~-&B%GxiJ!>13(0(P8>t+}8%eZL_Qs!z^3Bn5s4~eQ^)C3vf+qgq7$xsv z>mS$881QGUGcnUY^38MpS=L?$lf1-k*HnKOP~3yIpTZ{@alX)3qQEaDD=T3W^swnpWv`O=ff=bOgF z2k|wlO<8%n1!tSF(R0Dt2@Ipi+O4E3(jIssbNBh5y!SZ^xS!we(&mZy)f_x^aOuGU zif>T46gxizj5%+)qCWef(nw=^PQgfipyc7ag>-bmwLQ%Ij_}?RMjibds?UiaXw0tN z2$m*Bh(7KB84*9Dk2~R4W_j6V(gTbOw0s`^RiW*{?soU9DBTm5eMsI=e5%en z8;!1gN$aCpP0okQ$Z{*ZY3cWWz}nXDSfB9l-{`m}N?b_R25YY>PP8Grx9RI{JpD}z zbK&|=E&om?&NTiF(As%s7LmtYls%bDj5Wse*VAoA*x2RyWMHY%wNR^vR&D^@EG6To zRCM*X!cL6hr6kYPRy?UUX?bIou;i=fEVewmc$0?IVC%BibrAly1j+NL7d=WuL}$AB zENZ-=*F1Zwi?ca@EBOVk@^2b!>uDn=Hl3|bvedlmei@Fuf>wzaS*QMa?j#<5fs!l8 zQ{%#uyv&tO^yWO}9x1SRLcMR$ZykM~58rs!7vk_2kF8O+6`Py<2bq6f zUX17q$!(t1BvSkd-`N@SSV6t4-2d+p)PMHzcGq$z<8)|n6%PK39c@cz5nSmcMyPM)BvX}LVgmC>?o=whN&lIQDk)EZvgeHkSy zkn61G|JAG}aUk)3r_$}@Lrw0P(6xnMe^Ih+p~-1CPc)LlGdV%);!-j??N86*lP8A0 zk)F!PS|9Lbo%=}R<8<{avYg4CoEY~Z?qs&oR@>3_a&U?))i18)UMN%M^GxtxR$!7f zhte~W()~YwhT=IfE;EJX-pL9{j% zrPed2YVgPJKliu!cSSaBjFER1`}QtLSjTgH5Zz8?R|d0PZyODod1X)jZKUNv7$$;x z1NvQ6_?5rA7f-B)5%y~KXBC_OEFC$XWr_cmd_3RiC3DlP(_LUZf2kM`?qkCqcPD#e zb8>oe1HP+F*FukOa;-P5JI3hqsqv|Up7+$_$>gr1R(JM4F~dW(cs>bAe8*gtG&|e! zR4&i^d%)v*6uQ*ioNBZ{EBBC*j%2-*QS33$e!}}X9j2S~k~3rDnI}*C^Wf~wPJ95j zc-YCIcqH%s6FhwzHR35(XRYJ0oJA|LR(&1Mb0i9VLXNT@_cmiy_Rh{>69$pDob&ZD z=;M9PV6hf!;ShevL3*8T+)e(C>%h6%)g$P^6urF1XLyoUe#}Z{XF)w)?^pW$fW{@} z#znbaU~h= znxbxRl)A~Uj4}Ix@N-zq#<5nY`?k?AyO0ON@(I{%foB&Q1Aj;B#NAJzozrM`R7Gtb-h%JLq$ECda+@3v-^35pVNV4y_?Z>8K}?J%GGcy%|jl;v78!}bI0bBvu-HUgJkXRJ%xTZ;&*g2O62_Q z%x-h%q8wb@C(TdFaF%q12mEC0`EH(pA1R!-30!=x?eMtw~};zLf*_hAL4 zV+R(H(yd{c3^hHqoTow+VgI(?PGJ4|z$B-zCEmO>$;v*k7krlb%bBQI0nIrFncwB) z)mEhbT{OELHRj;LbGWu|;rE|TVn2s*GuN{2v^C5I7y(wI$P_j-&#CP3W*4sG(cY-c z2e8jBz}{fby7y3YKdKlHk`Z?&n!OLmKhp2jv^hKN`k_T6uswP`O}A&^U(V6Vp8WRi zZYccjScSuJ`$eU1b}#edxu|?H34M;X<(W)kFy_NyBFb(=+m@s;xpg0a|6jqCJr;jA zqR#@=4y0fvjCUtRIXQO~*pj(2>#SAT>2p!&23RG&|2gGvWw-9d;lohqW+TRUoUEm< zN!t00-M<%R7pd2fgeHH;QWR_lq94(H9x3^YS{c7<>9HM-MuQU_GER#J7T>es`6w$~ zmmK88=*)21xpNeFed?CXXf-(*ZY=tmO1855rke6?d1`m#|0iai*>yJ-#*wnVg^;i}oc-$At@$NS`ANsL_9CTo$G zNJnO_S-TuV*TRVwO8tXRG_~lnf*R?&sTM1c(#fpJzm@*Tzixi_#Fts*aWB$Vf&EIv z$(i2!gQLv&^C>J#k?FtCk+%BSh%|XXzWI87bx}XFkL>47 zzRldL3$HwRdWS_>hsKG7dD-OK8U?q+>P&_4mE`C&R_AObZw75k zete!g-IACLRYKd#3z{ki=A`*(NNX(^eCchQ;VUWBK;j2g3P z-_JOdlZKNQJaHx6S>$7AL&m7}Y|#oMWAX=Q-8lP4lC2<__Ij`=8(4}9N#y{e&09v> zLs*9KV4aPN_ZvHYM~lPMO?KT&SMuN%pdfx1M+ zW~^u%wYEd6uVFLZ>jAX5(DyBLCh^)Aqv#wIT#CkvQF9k~bVtoMjihIStE!ftpj$^6 zvvacVOct<}`b}AeMIcC&?QMSD#v*n>i-;<^!`{&7MGg*zakQ1d7|MH+;IScqBkjF2ajrQ}5Br}Wk=fUVR zR~u7!+nZukjO-kKr@@&7k#@wQ>&5HigS_nC$OL2bd-6=It>0LaBX>R$0mF z4bQG5bqPJ11)?rmT#0wtq5K2uvXqP_3(f=>b_d_LN-xs-y~eHVa=6C5mT;d-hJPWU zb=-^BoZV#ixe~s%G0NSerk%Z)wP26`krLyMsn%+JzK76aj zTzy?jZYJ_SuEn(-)te9ZWE4EPknBj*c-A~mS7sVhP6XxY#=68C%pm_)f_JiZDv_dU zaLrTs?8?}Lm+Oo6TJj0T6;@*t?WjeMhU0%9{hUAsdxB^d`8pqt8`REIi?`HFMEpF| z%lzV5W5iN6XE6RHSN(*-KY9vvp5O`H52K#mjnOqRL0Z2DN24LgnWWFGeeEqqW5_tMz5B<1yD#K?(C-AL_3+#d^$N+|P` zc5+(S0b0nMZw6eC#*f?4cn9Oo6nBzS<`_JjXOvrH96F%jNlt=G1b++Hrqi7xa6Or2 zP6yGQa9M6_dY=z=6uM;nwG)c|rmf2OwvEv?r|V{3o1L0B!L%|f8UL>WJ~h`r8~nfSql?=(cI`)30EG)u^!;8XMBiHolGG|7rm5+(H{6a zkX2bmUfxmaEc`F8)#SB$nS5Quqy7cFSMzb619?s!T*a=uMW!n#GY%9(@c&i4UZAIw z;Pp?Cb~n22u2kkO^-*U4uHL1Wv3@u7`I557!Y}&^j-)3OS=Je3V=G*`U(0Whk@9qZ zwDR$>5{tjVz4}JLky2iCYCBY7Q$=3T(hgtcr$-?A%WA;=S}bQ=FF^RJ)P zsAen+uV2Ke5B!RB{nr=jq_v%SPRF^HNR@ghR9CHq-c?;`O}g{M_3wC+b^1HVY5b&j z`R0Aht5%ck2l-t&?JBeQsrWz7NHz(~-|Oip)@LKVyH4pgIG2^zC)CeOwks*Di>l>F zQ}&z8)JE28r;^cgcuRBL$w_l-2y*1>w3=GK~za1zuJF9`J$&ea+ zr>dJ7N?w6<$3K2=^qm;^%#+GUe5pieu=&@YzeB-_=uiQb^Et+6xVZThDGrvImz4QV z?#y$255(h!tVd+6J@`uH^L(H*gWX)e^j(^b<#%Zh!xrq{vqha+v~HuZ>~R+Tb2@M^ zK7YqPt}G;TH<(<(#-7LOwBZR1BaM;RoM)RXW;2vaELZlXWvAoz+G(lOeMPBzaPnRK zJcYM&jXtS)rF+TW_dcKPL3;TZyq_fbEwu50(PteSJ%Nq+$@n*1n_b;|3$~xomRq#l zj4U^#mm_Fea+YKz>_zq}=W<^Sl9So6nkbZWe`0l;(6`J4nvtxmmtW+5PCU&hzqRqC z2VN&~J-bOdu}kmZ@V8)1mXNCCel{phrN<}1yb;La)h63rPR7dmM#j-aEXOeAXR^Yr zShxcVYujAk@mX&N*(z<`qjaJMa|&WLbgQmp@<&%Ci7btro8D%f z+0UAtgLjbf^TBy9%Rdhk9ndXtKfUQ}cGD(eInR}|GMm-ahWgv!-znY)xc)WkIi;Y} zfAp{pMb0vQ+)7)r=9sA9#Fmfn|2um4lQuK%4uf@`axX9jo&|^Ny&uJmX0$a=VP9gY zh8O-zb5#3*tqGprqUu_`4gld?{K%P z$n5UBgQjQowv&>{v^S9?{^rgXBr>~auQrP01g_P5w!_?cK%22oqro>{D~XA21iC%7zDaBUaV3#5m(qx9 zP^>rnW}s&y<&*2|RSE-(&{u`!nJTnoO|+-e5mi?;62EdVIcbyefQT=dypkgd4bn( zuisJZ1MS>XK$bliXR)4{!G1@oZ`S87V4Xv%r^6>+c2}j3Cpp*u`J4P{ zq0GKU_|CA(I`lNu{HytSW2F;w{v@kD1y*0M4&TFhvH$V%qxDA^;o>nw?#|QdeXPx7 zEiA>0?5{n*-OP<2W@pBsc;ft%iRCk`zlWmF>gRs+%~^Ln)ycUF{{neWz05%4oDP=r zk(=W{Ul1e{#&ERC?0TL)Uxd|DaBMS z9AQMyUd!faR!`ZV)Ee*l0RLXcx3zRG>&_XYCa~&R+wF%gnE?!OzZOn!EcC8|u{3AI ztk%}^_;EtPvmUsYMJ_v_}_@8#9uuUKD(mdS~SXz_GM_eqh9)&H;qAyW~Agq zqek-5W}f<@k^K!Ec|ytf=cnLpV=W&F)_9XSud0vP%)dca8HeigHx5OyjqY~h18k%* z9nhq^mWLUKj@R0UD0DRXG{K{+3zXqrvQZ9zU!DbQU?n^9HL9R)T|GBp7xra$ZY4o) zuw9+pyOlhgPb&6RVryQ-o%*i^zAw<>@9-YzT6a{w)won1CdrL>1)9%CjRma4E@+tS zEp5sFv!w1WJuikuc2Fhbce2+#j-n^JekCuYmeKzga`Xl$8>3uTRyg@>)`00s_%-x9 z8C{#wrkt>t5psk6-k@o>vE?U$>~|8AX9eSFWFpW`c6EgQo^dS^EC-`|UmD&Q_r3w` zD({m(*TCy)Fl?odH+Vi^Z`=MXl$F6*&Q+kZ1YTm*ur z(XcaW_of3meJok7C*fz#0R0DPh@ILN|FWu_9PxY8v-opo>nUFHk)TZW$3)+3gDYi) zC0hv6>hx)ZHWHQ7ThHt9btKI_jRYnG%9jQAy5dDL@nwJD<9_G#+C(00fqGeq{kdqp z96Dx}u#I;GqgBq}sZn5@HSFH%Pg3_w_fLa$1!KY?v~nAG4ne^$>EdkA<|Lz>I`kw? zBws@IWelO|*?U^1_upI{r~hO(oZ{-d0+;t-@)b<7KKv1Ejv|XU6kKetwRrCp$#fSj zlukwsCBmTt)Y$y0Qv6CxPIx{;f2o_;k|sEQ1bG?`o=8cu;a*L~lXdbdttQH4E{b2q z{vANaZYWx61IB|34>u<}R6)J$Yse~lsmJX=nyB-{+LYI0t}gWJd*AV!vp+UIb8^|& zEMzJB2ZHe?Ev*6B5dZ$_J9{c`B*)1d`6cKYxbhbe$Lr6Twhh5qrls$Bwb?bB8N?=7 z4n+MjluIO4uD*fJ!8gD1fAcQUJeNzH%Ut)i!<&W0-IUn;nqSoFT2M7{E;-%yIR7Gl zm1#wz0;aM8%bX%SRn7AHP5tDFsfbQfmC34p+u~|@G|v33Tv2m#KbyaE210V;Z~mW1 znd^)3;u(5&H_6KhKJ7_nP4FfHVkxVi-Ow4sr_;($Xh-5Ou4fez#l00eOhnPysFK}V zIY)Y!E6ZJPNgJLZhwJ=$Qtka&^HD5r&JIf+xkPa0dFc=IZWue4?1l^J?l2biH}*KW znkSIkjO>>f^O9fUck*+Kk?IMSsfW?MuQJ(_nLHk=+2L)C3wfGdk9=kSXA9ET4_?Xk zwFk+cqE2?xO*1;>x!`&#Id-MPIj6UilE1lowS#HIgyHFT|Hn3|42~oiidZw{-+zQrs+Fz z+=uHY@hF*-?TunNpZ7(-_9~>N4cf+!nCJS1uxm#0lB4-B{|*Gt4DyxPOd>TuMuidl zmh61UlZIdQ+*&I+1L1a3l`NvINzh&}J{RXY!tF3Hwp4aCs$5`%Ue6-T1W|I`v;p%h zl>ZoBIeqUC_0ESu>0IHOsG8>#SV~t7IkcGv5wW2-!@L&W!jnu+0^lAu} zFW9tXx$X(4_{XQZJKe~*38XVo^&i?g0OZM>naJqf{QJ;om&rmmpS!p{jY*kDI@DRFg~rH`>hhZLWibk zZ7=lyk|ZY5IXf_N>h{jY#O|p31zG&S2(lP_+oI+GTCrMd-N9cO7uUh|ZoOyy{Uf?^ zxc8By`w{dznpg1!Jeu*<>XXVb_>t^lYe0}VkLtYrChlY$Y^j%7`n`{K?SMxe*q48i zr-^LAJL(;4gt^A+F6HXcsR???)2MjTP2szvkvL~$KS;V_8I~J+y1}l7Ym=2sKJP?} zB{$E}aK8eDa%%P%)Jt64C+t90=O^iFBwLzn;b+6Ox!1X#P$d#z54esXl{cX5k!(($ z%uJv^we(qAoy3=S#oeapR2|jxjI_G`YWn;K%y;CUB}>}zq+%FfZ@w1VfOr%d^y9~# z3-^oo#J?zWGb;20;gf85zB|L}5t6VqYq~r9_C@uFjeu3Pz9;zZRpVeIRq{rjjE)D< z;Hofq2bX*CWLBVK9W<&>8xlK|XtzOrHB<9=(DdRFbl30g?Y@QU#IPw zdQMF4;i!^5FJGcjZ2ku{t|gqClFQtUcJ?eZz6!0`9*!;WqB8nqj?@ZW`hc^8|1;2M z7xJHV^gHn&nMHH9Ogns^j$&WYiJXVg#i*71EbZB`oW=DZ%4elNKIBg*^%EL}Dl@l6aLzYkvp4?grmJDA5>SwxOrVd9jdBGM%P0Ab(v8YLw%7#Oq5g zk$5)cm5o0&7Nwu%W5ln_Udv-#{fl1qK=;}p%nIpT-_PUwWvKZi-fv!K%<68|FWa)w zuPImZE6Z#3BNi!nv(_4K{>CCc%nr@v{k(}9yZG*<%-wL`SMTo_0g{mv%)#c zFVQ=@tKS8m%e$H!gPEPIfp7fNW^CIqWwI->3d+?h#Iffb*{D!P`0%CewhD)w_*+PwPNOU zClyX$J=(LqD`~{FXn!q!onc%`HmE!FnW&S++PluboRNMHDUBz3h<0+O@;jveZ1;CG zMka!05LzCCH*>(c8|}-wZ{j1~r(1smP2#H0Vj~B$u&ZIw%xki7z89OHn2vrZ6$@}3 zO+2djb-jMFQk>E))vTx0@7desu4FG!E#vX2H0npCma*sA2X`M?>g9f(@h6reSqu}K zk$FjWOKpXJ^}(HJkhwgjoB_EMozq?$wtF^AcGTkeWHb?+nYDFixpzjz#q=oIBXVy2 znQA`C4!o$Xtg+2xAMa4}WFAL6z~)Ni%-du;PhO6GEY}31efH&k3y&j7*4t>`)BE7U z6L?*nU5atxXw>=)CnkdM9Cl=q5o-!sCGY*O#?UdWTvKDo0NAdB$*J&2yv-ov#R>Wv zfD$i(tq!<8F{WL|;_Qp(t?+*a-Xv?vI#wbc^Eg=LX~9c8ulab_1Js}EFPR;$MZ?xc znjvK2C2b}?JaH=FQccwTRgazcZA;jR_u0eW@h0d09tq|L!It=uJ^jBO*Q%43WT(GW zkICD4BEMiFNl0GJtfxO`49z;|4>V_~8Dg?K#*4}mmS45^BvT%1JgXiN; zqEfCNuo;452h!s8Ajk4Yd;oZdz4CCZ(mx_7*E&1d;s~_ z%D;EWdVf~yI(^?yzFWd!JsPg3d;7C&?Ts$gj8cQuugeY&@|n?f3{A@Fay=T*f_7J+ z^NANv9MqvK#O^R#Lh5qnZ%g)Wnf5bZNY0UE>iwObd`U-hLRKQxA1Ea2Mtz;ZF6H!~ z*6uY2Lk+Z#ul))O*Bo`TS3c2$lfg6sy;d4w_l9$_(AHD`;zFA8R4;oRlNCFwo4KDn z{C%~M`0r%Qyo)yGx!pb}krlryae5Lep2V&UH#RqeQ+*Ul&ir1kjwKfpwRJqama#4$ zmdU(kF2FC+)?=2?nymHb%%YraloPNLIljBGAkQUFr;X?Nz0gQJNzKf? z>KWNvz;-nmzZK?tlkvIi;3HaWLl2Ufbu#}ed5sb|xEhw1skL`;Wr$LBL9#+Gc`8uK zu~QlS7K5Zatm4-skIkVZCs|f=I%~2oCa-lV-nAlm_=tZ~6TUE}$J z!9p1kQ-vT9GF3t1)Zjk?#9 zn+sr>b^o?{dcim~6(9SdN_jSBOaGEDy$KB*3e$LW6-dXMg@k8UZ=Ph7@ne(0C0F-k zvleUV6E-lXQY8=F6uo3W^zOKoXq6g#>8Z-@>iNRojkAgRR`|m4uq4clC|8{lLpZ?ve#gv)GqUVWHL$oAH!;Wqs7tmBpKefqqE7}cPz_w9xZF5ZZJDmOK%z@lT-b3{T`@vPP$1J z;!Q9<1e6Q(x*B(Dv8*|dw5f4AJ3lYwp~PCopUOFed0sF=OM9ZiMe0pPfzE7WFT6`{ zf#z8wQ?g7fW!F1tHRr4*MrJ=)u0!W!?mJq))nQd%%TIu7HR;T3VIr^QW|ICkIFfBU zdFN(<^!oyemB!+=`ncV9PH0Sqh^)0w*4kLqydB3{@X2ys;IAl{tZR?*GO~m5aXlr^ zc6R=s=uT_p?t#fBy#4y92}bncJf@y(c4FnTi!`w~O_liB_wOmadOfzzn{1#tmeO58f2HMQ3 zc4c$sXzfJ0Hc@-Y#J?XKm@}QuB#ZsL`jVpe)z1#SwMMX<6ZHV-4%b>_=5e17qTa4B z&Xbb6l=&R>&moOD(KfjnM)=MSv>E8q5zeEu^&uIZ=Dh?~jns&2CT=gjP8+@KO!hB9 z*+uFPLY)IavYM3Vl+5|$Z6jQAC9zy9{2D>duLD`UhQwfXr*Ff^`Wx==jBC%}`gQbh zIes0ejXyP z=4pt#4=FziOx<0XgAR>JO#{8`K*rXSn^Rbo%mEtVcy(4IEBwd8sSybO&g$hM_U-mrKF;*nIG&_)&(DDV+dnX^Kvb%%mW%BtwuAO8%U(DLw45qAX zj@8QxWbZLhKEYLXit zIoUP0McXF$m2<9ou@;kzG5hlW#-i^Uen1W5SF%xO&T_o|`tc&OKjRdmLZXn8ljl!1P!Q*aYL!Ndu#>uRaXGiZ(s4xJ%R`OV`U~%(|q@nh5Hc_(f+(b*N6c`^$8$+qn z@cmVIOoR7FS~{0ax>=hqpiwQY*41M2y(j}B!NYs4gRu(>AMXe<6wvwL9w4RI~wftU2 zyBmP;PUG*PXqpi%c}QA$CyR4uaQ)2)+}&u|m^9bbLRNgvEWDsZ3*S%2M$+DC-qZ17 z9~f+bOL@w<6fNIn9j>9riKp8D&q3(EK#vP>>;QVbi3Cj2XEGL-o_P$7n6On?-UfuGlQ%#W;f)T zKdvNtI6m4*DDjQ&&K}xn3Tnhj?A=*T|x`CrMaU?R>;PcnF{R{bg8|W>0x?GD6#BO6BeiEPSX~dgYYR=v)V% zS>cXE<)omt%Krr={>J`4#1b5hTQ9&owRUI0Dv-+Oz#p7j827R^pS{Uh%T7E^U7mf; zO3i9^1vpNli&@Pc3d4V+S~NKsVcNoLYqb)|lX+e{*D|vSm&<%sWr14x-ux;>*>C>0 z`8%?hIepfCl7ThXx7A+O{c6*^NL3RYOO$+deV5{5M*Dvh&3_Ot5(n7@=KC5svVxun zsV&t@-N4<}XN#i6tf!~Fa>|t?HvjWQtZxl=QP5zaXo(ZVAf^1SNp=U zHQ*yBt4j;LUchoL(%YVJngq^jtX?y)=1JmkHg+8z%>-w%O1?$|UjacflTD@xIju3) z^B8rKgXI;JzQ^k@qebGWd(n&Jux`(DOGdZbz%jzOdMS;bg@V&aUSedrfweEl;|(t0 zcQt@(@-pUm{fT^vDa!1p)R}61uZ0h_dXF)(HF!pVCmz##Aovs{=NJd_q~HU%J&4bV zaPJAjoZR;XShE8%ncaWan?5#2-UT_byi7F2$*#eh*hOD^ERe zY>F}JWf(QoVxEb-#3=C6ZS>lJYcIjED@=0oVorC>{?XP2eFoFkpTL}1-a4ANjvqGDX#b#k+r#f$ z7$l2!Eo~0Ri`5`Z#+Mm>|FfXSYCQSc{hW)s3B@mh_vSo}e*?#h=+Tpxa+|i3&*fFv zXAi}JZ0%Ck>NQdkj0V%+PsnI?r>-G`v)S=W)I6vd^OC1;In7J9{VsGQ85NUf`#O?3 zOMeY%+oC$rNc zy+`5?M&0d|e~WZZ0{60F7JHW3Imtepc?|bDq3q%AFTt7lTF40iS+~6pq^(f6C5$q+ zNZ$O+md0wYK91$A+#00%UUW$&qn5^(?B%!sf2XV66_&L@F%V5Rq5KSzoc!s@(zuO# z`?;1AjStXH_SYt2xs>^B1lV3HsGPmRIe+R~nz$2r3x1Kt=uz^%Co_3}b+dz}qqaW= z%PBPCKI7mQYs;$&UT}fYBmUfv z^x$MT=N#MEuiLz1TPB03%&)iTc@37aE%-hH@i-FktC{9;tXShh&-%72n z{Gh}KZ1IWuI+X++f5@x2K#5~ums2Y1qvJ}YZZry?tZa6X z45s5X3-A0s_Y&KGI&R&^_PnmY?1`O#x<|nOUocLj{&?Ss0DHfnCppP@)PM$F zkNX4klhYT@BK42sR6B4k(_dEa_vUF;K=t#D08i`T5i)%$_^XoMeaXTL%EX6%gA6SO z&!hfLLf>Sb{s44=d5#f%9XYAwN-``~B_(ww+-Nji z;`c4?AECu~MD@@%Cz0Mo3mW2LGNM-}+nK3n-nkJ4zJWyrstp&%a zV9R;7IYoRo{pKvNZ$Oz-Y~EG&D>iF1Zmx&d2WlUHMtgz$S1mq_XW6~}FbnVm84a|v zS+>Br!ANzXepisAkaZ?co|8FuzQJl~Zq%3hwgKy(88p4>@^lXfaD z>v;}~HyKq%=qr0~?giO>g)j7qc6-ycIymw@=^L)rx9*;abIC(;ISZORp|$b*Wf<<~ zT2A?Dz>_=@)k?3DpB&F4dG6X4SL4~nkI(92VxN=SF6T;?Jg(E>oT!JU1z*eSw<2k4 zr-%6Zeb6@=TTzXgWPK-Pr_$Cp3m%pFOf*hn-Y1jt{dpgWV>l89=aQQF;C~+Ot>Jno zk2#SzU!rYCQrR6vE+f&2b=!|sKUA;DN&b`ac~(~mB}>l&i{XquH>>^qs<}UXo)6W3+xY3e;p6!CZ%prKD$>h zAZafc+13@UeL~xgCV9_mVKl3}k;P92;`R8G9is2Lz9k(T<<3rI>>|>%3z{XzWX?5v zgoUm|PBULRT2F}@`q?tf(K1 zUdgz)h0)+qR;>@epp};QN0-aAnf2?O>r@`TWwfC+ISo&KRAY3}$1E1QGkHv%tjZtm zGjS@1(b&upuB0jPi}T9<*ebpgJ2p~_iMl`2=sF5j9|uMDiMIWtY^lb7KTH1q%YWQ0 z#oTYc+m)o&)Lt^zXJ_p{a4YdErM7D7v$TdE*{;gc|3v1qcV;H6Q!~$rwnDR9*_jL_ zm(u38Gq29?#@;&?(3kEU_eY7u<7BP=IMC;bd3m4b;oznCJdC$gm3Cjk;@?&Hku^~? zapPanl3(3lOEyLtSuRnoKRSQHGvASA*g%StD=Ir-t|rTaK$)lht4UO{8e|nX>+`eq z_^k0|MNy_1TE0NC#waz(c$!t*dyP@2YqOtUi^=9ZH8-)C(e*_v%}#jw9XK9#Eo;)V zaql4gxOh=tOyGT~d6VY$A`rq^F-lQYD!u&g>dKejQ)_2x-YNB_jnGAES zVck>7|Insopt{|aw^^KIufE2JJ_y%t_s$7Qc~U)7iw$7hj7+QpLZ zc!|67z?z%^x4=1P&$fi+ViaqkWJmNE1fo^sbR<~DYPDN23(R@iXQTdIRNyYdPDhZ^7;9>UX8vzo6|)>`YC4 zW>$9)xNlYSN@bHBG-m~L0`n>o6&W5$V(xaOJv?iJ`Tnguo>1&JJ552~|ZmrT*muzGLd&`dl`R7`F5IpgJC zU|dGevTH5&cs%~cw>%RBeew4!a*^j&=d*p?d5PB>iK=L0n&0PW?_9F|FEHnn`-e&I zN+V1e3s%j@elBe}AH`EHnPtO;7vT<;IAg^tZkK zmV;rmaq+=FG&k8eM#1+dZJv$iu~6}ecSoBwuo|P+J8-Qv$|s^NyA6+knt>|^%hJ)FpyTvgE^`H&J(dlJt7f^(h8=R`Oxqxs3A*%&6R zw9<+8KS0xeAT`PIF_VO4pHH~97<^Zgp_=Yzr8be$eLPd{!$%7a z$0oi8%Zj9=Jo|oxG2#f;u&egxvxQHh;%H;~SUji*(pvr}gZ1By`?vTvixuud5>6~U zfH(abh{o;Br!I3hyVJ_Mmi?&7EWL<5%vhIb!sE1YG0$TvnBRtNcFra9=^C{Mq2<+j zTFyVIM?aD`{uHk&q@t1${#|@Z2BmWPZ%#JW;$_Y^PG+Dy**=fN_n_-vxte&S2|S)_ zX>XohB(|>_{i%q~Rg^o{zpQS@Q_NaZ4}Hha@2&Q$@XwsG2L7%ynr3F3ObFBPvNnx~ zpV$ls=D7Y9jZPM>oDO^jm=f!qwWIyPl{`m}xY`nY-{EElEpDmgsd%td?KWiX2mcSH z8NJZ64}25#)Sdh!-_im6twaZ(hnnTpdIfX~N##yte+Kx!*TZb>ZilJ^@x3x#%Btd| zg4*%+;>DL%7)vXPiC5|4%GE`Ql0TB1)tmPl)?gD#tLKRmD36Xi`R;>i+u~3IkR=wm zl>6{FT2T%}yYuNvp8q+dD>JilaJYg*)zv~>6v>I%&zOTWC27gWbS_H1jbEpt%{QPP zPA6_a{0ebX9q#z{2zr);%2@xy6$3J_*4(y^W&?!vXGrC@oroF)o{HM*q68# ze>s`#OZj5*#5?i*$;@>KNZXQ>exy4)l-DTvVbS)&qOGiim(j^`N<|u8Hi}Ly?zCV< z@-I4iwf3^+ld`Qq9j(j`kTNt)#Lz=1_Xc_ZthXk!dK-4|10^n{Ws}I&Fxr2Kk?B-+WIru5q9xnn zZ=S{WWWhFh4^wj;`d>{~ayCOt(w+$T$8n^C^4Fs86mt148}uVD;8!#0{q*!Wtg|Yf z=;G|D+L`XopkHH+HCq`apW@x+%#Y?gkDlbQFG%CvEM|E|gDx|Q%mR9-^$NR}728{j zvG{LBqJ505@rRRR=`FUdzSqw1{}wFYg5)_g&aS>j{@+fwrmLItKC+`N zXDH@8o%h)K@t}X2NLlV-`yq< zETya2o7$gz&d_eMk<2#M)Q9UFo=54N`0OXh8d*;E%c^BB^vW}jw# zYNMy=sG8lOiD5a0q>gv3J6KjJpFN~Gr*#*$BRdf`fI2ZKZ_=Dw`52Sc*u)}VOTK!5 zZ0~|Xv+2rkHX{2q6A|?~ThmjmoO`oLt4*}Af%kU}E_Bw%Oz%X0Cu7YicpFLo2sZ7# zhq7Y(Dc1q#8tE^w&FSe|?H`Xfqtv~h{4Pg_P4i zCs|i+;)}n6Ldo&+J_sHzCO(CY9NgbIq4`L;F^+`?u?})O`pXGr@K(Zrn;* z5{vsLTY5H5{2S#CS9%S)T%pf(h4-|jcB=ZH{hXD&t^xmQ@FzwnvH4jo&-0{Y<(UcJ zU2$V4*FPaG{m`zGett5dw=v38WZx?k-2IDs`{VDSaLrk#*=aeX7=OB;H7U?S?l`lH8nh|C#cS z>*YW#PuEIvNw&hvlkqtgHD_Av>-RBe^c4)pqTqSTEjQ|PDXe>E?d;5MS0#@*t!EzX z7>-Jxs+)Cz@*tRDRQj1+y_-e6UW+Z1iB)@%bX*04>{ZGW?`CwUJWh6k!5S3Js$G1j zoE?x{-&wiN8hSEx%tzHkb);0XHok&_7vu7DGW~ZFa4-+CzdomtoOokZQ8_E#AGn)w zdK>hM=URGC_1zO~O1?p&rf<&OTV7cch<@iahsBvX_dU~_$h5+6+h;qVs&(rF?O|Um2st}@2~Lhc(kev=47P* z-nGVPaFZ*S;c8->oA~`z;ZuF7*GSnp#)__F_6NN9ik{WcdJW_LC=_}goU4@B-IcNW zjVIfbm8gWqy;<9iq^1&mYOO>WZvBc@@z4@akt=y#e=!U;`aRWmsPZ#Pl>FH)G;S)6 zl|z{_uum`KqZG~2+toaGsYa(S)JJ07JAtA+7#6}Rdm8GZ(D``K6yI{vQ1C%y8O84)BiZ@dj$S0%a1Zca&Ym<@rlpqS_pWCZ%lUl>ol4I5ax^f$ zXEXg|B|KTK6LFrk>x;ZEBmsGH@mKm#=ISQzlBf9>x_TKNWRTJby+%M>|VtbAz= zy^?$Jp%XVBSpLsf;LOv`|MPn3e;d~`qua8$|0JrVWM=4z%gM_7UA{}t5XxLn>|A!6 zA63Y4IWq7qNE2gsC!PMMaUo9;Fpql}C53WzRO!Jc`?w@M>abekj)X5>cD%ZHarlk36g;k9Xq17-d?b{HtVOHG44= zjlQ6@Q`pyC@Nt83bBzOesyr9JBTb3yeM_w-XdTg@ONNMc zM%9j_^cJu+_b+iRZDEpD63Z8h_Ku#CkM=^+In215%rLW*$*NXQR{M3B)-w9P2<9`u zJ`3cp7M|2SV6Kmjby(P(y7MYtz7Eo%t|v}-hO$Fh^Y8RB9d5~8GRKJUN#R3HM9<`- z`HBrq9+2cQ+sWq=HvAb>ScTRDl$d}6`_j)7Q2ETl`@B;9cyTLStIl3_<|&@T_dCJs zS9U8SL(W>7NaoH{t~qMOZx{>zl{EWPJ=7kRdJabs0)%(!! zpGJo~2|a~(oBWB{C!00D#Nf}QLmjk~Gd^nKb8O+eYJUi-#Lb5~=Nff7>wh>&%KLNt zieG*sJG&Y^#=$acL654FjM4MKn+TwIrSUjEq?PBOee(3j&pa2uGB4OdZ{iw^=#Sw1 zfwoIrUEy6?xPC+||?xwB$Bovo@%$}jT! z4w|rB*&gWmxc>8`K4+NkO;*2GuP)h64uB^?ax9+5N4%D7T#U}gYx5vdG#{*M-FY3| zN1#gNeiUfV2G2RP@dT@{9rb*db|$gayU~VgNoGA9Xo_Z6YHK3tXp9ou=%t1FzpxY! zqIGqWm3^yw(9?Te&;H=U_4bK!$sC_Ji$vIG-+y?_jo;Buh+zBB!Z%mwsNlLdt^6HF6t}W zk7jR0ieAuL_I1uDn|G;ut~zt{a}n?MaJVLFY=!zG@$7!GJC3&BtnQY$6%Y4K@_vWw z`{QVM_9!eaAoKTO`3z(i4x9J=kC6%;_SVo zzqNYR^w0{VtH{a>UgRRzmeAnO)JztF{xC_VRn z`Yoj=lCkA@lz8XtV@*cB<|uYJNlEtP?6LcvzggX#x?p*k{mGMw8LVS6m=85poP>kR zw3N(dyVJHkT`i-@eTq4C{G&3|%DVT#C|0o;O>=tc0`QJ)R!8tjH18OIOB!)?Ho z(Kx%ia&pf;=#=?-&YDlo_HFdOf^6h@)F?LP0HbN*u(Pu+S)#6DE%z+SwxCNtqjNGD zZ40LZ@a7gJ#JE4XAGt%lcp(%gu|?t^0SPqWf^KP<9-*+9E5YHd$; zYd`w>5n0_5huapuP-16$!z;Nov&y@d{$Iq?M7}3?TSoponU4Rp=8v&Exj&QT;w~_3 z4cao?DftwUx^mjD>+`QZ?}kSx{*nLjbP`z{nA>VKu?nSF+tG!GI9luV@Zd7EXj_!u zuc((;f&FnkF|avnIcu!t*rn`dxtYdf{kbaKYU(F@d~-fpo`+OM+nngR1?kEOnH|Z` z5LY*`Cv{zoR@G()vNqa=gpH+H&!hEi_N|L zL1QYI*&aq~#*meNgZv>^enpLg=uuA5X#tbWa1MZdZM`NMKAf(=zP>@ua@tMSJ|n#y zY4BK<;T9Gn5hdHuzfr70W+++t-ir_P4vL*aCXUj2<}ejeB{|(w=3`KoXN~K)lii{5 z0Z%I^xC$+k1@L*-vNv!tpCtERF8)vT`7;@8N~SX|^}&Vb)!Yg;Q$UlGH518^`@<2`i7@xgMAj(~-~r zzH)*VfO8h^gf(sRC(ZS82fY4^M_H@aWV z7?_jam(%J8(YO}8R?xbfR6L5$G>pC-g3fms#iy`9{rx|a9A_nN1KSnf>K0aGs5VxU zlMC>9laV7)x>;Ra58_2E-5gX}&+c`>tHceTp@pX4PDI3=EM;=HB^OC{-?!G%7*=ph zp`CetdY3kGw)4Kgw(d3*=vlgC- zbe)L~OGtl1(6z$5LqMOid%DrA_C}sD^y@(-vj_D$FmC5g8``mnHYBd(4lQK7-5c+7 z+HNwoC8G2h{GW+3ebHw!OscO?vIkK#L2H#FF62rHo{Cp_m)M?v-Ohm!p9gb9)M#`WIsZ?D~)XD zYwsHTAK{gKl*w!JI^KVdTFcm%lvv=lrIxLHMD*Z(%2YBM}Q& zss-k}N0Rog$}KZeRY#AU9&{->Tw&WWHm;=nz|s&;684mrLEef}3+cLKNNnEnmCm6D}}Qks!Us%eps zh>0npqO=g9iBd_TY?I}w38f?|N|uCFwx}p;QkE7ZguyhHFqWaAv47t0@4BaV=Kn6A z`*T0fbzj%H9Q%1(=XspRIT|isvdTG?=@5JgJtw>SPxliw+!xI6`?~<{+u#@PERh&F z`zEU;bLq|v;49HAd(@vH1CJLJt%1%n^fXlYH^A~dd2NM^9Db{% z>_Crx-izKZ;NYL}DcOT_D*BsryfcdvANX4k=G^tn1}CBI&n3>;_AhP|C$Jk ztMz(=F>e^EbpvTjZH~v`3FNRQs`LiMJqU206mQ&1Vwc1rk zQqCva*BeMOhg~FdTB4HFXZ5b;&Ya3Dyf>dnJq1hLlbu<-54v1#Iilpq^6M z^)VNpKGkxv#b>_vc39-SdzdyRX!%p+e`O0^hv(h+`y(s4r}>j)h~10Fb|XCE>+gpj zJz1aRZEJwjc}M8z|9IGRrWd92DVveD-C6#(yIyWYZ$M-F)8LA@cc)Tq+;459sz{Qy z;p{4Po70dn?!Jl>PojT&TuCHF&Xcb1zR8XsaXJo4A9#A9O9A?EPGk#y8%Z&3Q@x zr_;;E#+2+}eW`dm_{8(|Xi*!s8ILyUsRE6$DPP;=665FyX!qoOk?UkTTr(_4A4RxIr5W_Ob-q=Fu(c~Ub$HT}wNCUNgR_i8Dt7td!k~!#e^cPNMr%iI)?Fzm} z(eel|3@oJPZ|wOW*_F(s_n;GR>8F9-^RAaSv(!#BL-sI~!QmxT${XOe;>qRgOm^7j zlR5C5B~2Cg(WG%UJO83%lvLIf7D#5LEH~V2LkhZ)p=DlEL@l7tm z26SK=naR4q3R;$oQjz4u`0qyh18ariEi1nD|MaF`{demrlbEi#sFJtVImP`7H2ND9 zdJRuUqxEiV$G7A$a}cBbjUW6xU#Aoy7*F|A5}PwF?lU5u#(u`%JsejO!?>6BTcddL zXXTWQF(hI#ec!}mItix}d9)SBhOs_3wMJcUh6=7+>XWWU-7<$vXsWQS&A2a;2F6zN}%FQ=;6jzwNXz8kRw z$;yzJeIBbGFE2BfU!l+%KFm{kN%ro_ zr13Ep=VJ7l#J}IgRr13PhUw$V9qQ_QcIIZ%cehrOeIa|^+JW^JFx}#LBQUkn*Cl@6 z?E4Jp64{a6Y6Cq>4A)=a+})GCK)4?`cJh3aJ0EECQ}jLrw#mPeXn^PVKP^Z~azJOc zD^V>6kc1Yzk9zR=J-HeT&!54NdGfc>x}{z&hxaCB2C0?U&d*@-7`zf$I$4b!X!Rw$ z-^~7ORQ^6x*n^$<0p(sngP|J=963$-hH!3!!jGd|a$RgyKK}N->`rP8g~vS9 z$$4~b(XF{Bzo$PR!X(k1S&PU?A=}8*HKged62HOq5>V!xp}y)hq6Y_&u?I=&?I2H- z#|B!n7%dW$vJSl_u$hkBl>uCHi9NuHZnWh5FR<{yCfQGRx$$bmPe`u$erO#J z>Pa$qpL$)$YbQ?+=5r)BYW4%x_w0GU>cH#<^vv4QN^s7E$B){}`p!0Eb5mGnhW!)Q z4T?7JVNV;vat(<}#_XLuUuCQssNJQoOV*FCwbGAO*;o71S@4Tk*#_i&KW(;RtDn$f zWqKWZk$0au_%Pf^_zODJMu#g<2#GW6W5=%EiOY+*Rom*ZA${gQ7J$?8+WYz{@t; z{StqUOL2{oojy;S|%gAAMy=1rEK(=BNth<2eCK7lQ znb<)7CX%yPj0B&e>-XfY4=JoqKG)Ke?9q4(1{>%@FV}O~sDnV8+}L{<2VeB74r_5E zdgs)`e(Xr{JZH?ULt=h10w-g|IqGCxJ(*sTk0#j;x}bVi?Q<@1Rx2_~wM>aO)a`}m z%Sgwy;7V-Gel)8-$!?Ca(beoA>qJ}Ef@F8UYpb)3XY!Ws2GX5OAjvXZ-TlniZJ(t( znY~R^?=AZ2jtV#Wojq{bGZh~pkrrv=RraOiA(Yp1A`CLNmyx$|a4Y@J2}C259iT>K zwr!|>=HPqQwl7dWJ@3M=X{hyNU3kmy)+iFcdIft~X6(z`%!;Dt`0Z0*7%3~KzZbMz zMXkS*;PvQwf_^KZ+88jGv8Tzn)KPB-YvWm5ZA|JDyS15JZKB2&Qm{hpoD94QhhM_= zabzPWw*8qdO{HTEaq&%1G%mFFINT1`W@tHY07FQ4c@n$Oz4&Iy#1YT_Mb|m^H~4KK zGE~a>N@d!-MhoHKN2qtEJIQ~Snfp%shvh6%sja<0ey#Qs@sKrw)NBft zKa!lCVNm*=_}_Bg2&drqX0UxqH&%O|-FlJFl0REc-Oa8e2a(OvRpcXU>G@mwY@cB+ z{n{SeUd7XRyRE=@4lR#28gDnH5;4&O6_a&5r}s<+^=uG4M~kk3S6%nYSfCXwTSxk{ z2RbD8Y$Mbki4O4?lO3eAn{ap6o$)Pit2z0mqu>AeBui!9GRt{Zx*J@wXLk?0JCJT} zF@D6a=ts}Huq-+I{Au36Cw^zeyQk4;D_hu#MxO8YOq#w->6~GGc_9(~J$<{7kE@Lz zud$K?egDWZ_r{Hf@Mr^!5;b@w+>%oy^P=CQ_l0QRix2WUBg(rh-5snaIyv+Pvq99K6`y0@{ z>K*$ex{`b@*{68B-#zj2K;z*jzDxZ|c8ROe=R%Y#WzqYTHTjSn57BQMwEhD*N;L7s z;2h18^dfo5L6KP0VJze^uzX5Ozmld}u)7ddl2NKNPGx`Kp17Y~h{-`QUfVyiXFnDm zab|W8#pU?Z8`aMlE{7JFWVSxuYWCzMzr<#E%+~AP@O}?99|XrnG|Y_QC;FHS;{E-~ zI9E|$i9lRHE?y|4q>nbTGSC9-vyGm~(wNwRM2-B8MQMwJ@wU=iO?LAna`G!k62Xz& z+u6O}22YYNqa6uIWN+dDW}!*4U@Y(~S)_9^eN|pXe9P?D%wD~m|C3#zhZ%9Qo)KU3 z4AvvrLUMLtcFG^DzwF@4zOjo)!5e;mi0^H+k<-58?Nq~s=3q}gfXqT|(AS|#CN4JT zkuOzdrt!K1z0c|O$t!s){KvAfAK>jOe!`wS&HKoE)~Qbcb#iUQbITsoAH2^EW831H zXXY&@iVuMM`RdFkAq&V<;y736B^enqx0h_X3-L00Uz>qzXHt^6%`a$GBQ$svcBi83 z7wROZ?~9TLc70bEABzd+Ps8h);!Mh?{O+SjrJ#TiOc$n-`(`~8h))O zY4`Ab29V}>Lo1DFzk^5C)e|N9rM^B**6xvyNO}iW}2#X|whdH{25U zl09G;sLoItJ6)j zp3KUzD$Qt3L!X?uU5Soo$Lq1IV<%MF7gWjKUC)#Fo1I9(SoJom`68^3ASGSUrKj;c zu?}yN*QO*mCw@%P%2x823~jF&=dVTM+hN}oB@cEl=V>PIw$>I=B+57zt9Jh^VS=<8D7Gs$UU9(utlv8LmF6T6Xo=xsdDK8DA5 zO~;VRVOn^cbe(M+m`AfGlh(zq=4)dukL^MnTtQ2}WgQYNJQPkRu)-tJDjA}0*TRo< zVoPU5IbX;F6pcFZw-K`QNEo28P5}m*&<+qQ=u; z&Q93mLd>qi%)14%a!LeB7?z>WjechZU`s|}&lmVxhxF&Xz`U{EfNqDm^Q`)b8XiU3N9yIT z?q^T&Hz3Fwe`|969BnU8|8K^bbB*X1YBT3fCB`l}<$96f#{T9M%ct3sHd^ZGdG3Pk2Vj z$$pkmWV5?Dg`+2a4k0_Ake#%-tNvHeiIy=PHKq<*#V6{0#P}m zbfR+hCbik;7SDKsyVX1&;D2c*G%>()Xms*yXLs(!sF6${fg#xllU+K|237oj2~}76 zAKTCjr)Fv^>uMFWoOg@={%n`h(!B%!$Fu*Gs|Tt9|9-}nvoQ~i8Ss^8r!pVi4=Mmx0Yyet?}drJi3;gbRuh`Sfe*s zgRCrF%!cKJ!NXB3-oyFI+=g3+`~SMp=1SvEFH&;7t4~PeYToQbt-e6YkK;!SSEGI5 zH^+f~JMoSsmZ@0q;W{ZKq7FC0`zdi=@R_@C_kIfv*VTC<6?wbxqK-^bxd zW|QZE`6gb>??JGTf0*-NlCv~tA|3*V?9t0!+hm*Wsg>?%(VZVtU-?+{M5VN36Go%L z31Ev~vV|<>Os^AImYf&<6fC;K>;t1i-p_NYd13?eHoBF5y^C^p(6H|8%~+mPPL9r* zhwBQDBhO#Azr8UOe{{YYp|G4HCiX`nmUvw6m>r(o3t z-48_DM3QdQ$`-b6D6b*0&F9mwQWQtd+PRWk-9oRElm9fhKA>Js>)vQCqCZKT;i?Z$ zbu!3?q53NHONO|tjki@lCuLksem@0W-nG8uM_&koM<8y(%DB z4fesXUQ0TY|DZbyR#(k~+}aI8n2!u5fi|en3iUADly9y z{OSje%(I@M#2OYkzVUqhZD1#kEIh$k?l$c@--zCrOyq5DHL7Hv*=uMLnY@tQ-Ic!g zF&17zf^LP+aVS+Cg-=sAySb|9yD@3%MRIn>g+J=`QFiKDn029<<49l zF`xn2$Zo6}@XMZ`oBXY&<}CjcJD<1eWc8^5;$dK342EPHuELfjlJjAGB_iV^{hrPe z?oiC@&Uf!#xOO4|d!g3^Qg$@TC!gCcxK_hBnpyWmdk;Z_>Eve}2!8{U>P_B2vvjkJq#vwSh8_W)IIw9aW4 z!;E-&ql*8x(47Wo&{55t1(Imn&-J_)`Zq3zg{PPJE%vu4d+Jjlc8BWtrdQ(Q+W3GzE)_)BrQ#a!@KDCupX09=nK-c-e)Ad z67lgg%qpVSx$uA0sMmxXFTvfcai5Ogc~eMa@7**f9!)&SQ%Fu?jk0$Bt=>B*6Y4#R zCrfauDLo%YgQu$bmYy=(8?OAQ&eXzdPjtnz%IBoP#5yOw;1ZT;1<9Yvx@SH<`!&ko z_o-h~)&7JImZRgltCzkKW0X8@iA_oLby_J;6BBa~=}81q8GD?Q#s_$k9KM_ALf#zW zC021SQ5!k&GPA2w^bskVMhbUVFFt)oC6B|` zOfonR{WsE-Ehv|aX{%VPp89P zIiTe9%_&<)lke>DkCZQU_d}BKuu(H-9Cn}`kKy2c+Ss5>{Ni=my`PR8t87loI*^pl zW(^aGoZVR88+C6pt~_aMJ)ci-g}$4p+YpCm(3Dvu=^m~3A~F3*M&e?7qCitE-=(db zY|)*~dY2_lq~Gggb+(e7i#F!NdlX8X1Lk#f@=_zt1Qhrqn|T*%o{M|Sz>+zI#b~oo z-NVqmiB_+q&smetId_@eIEyx%2!f5IZ?K*R;`ch(Cc1D-5sPpx>Q_gVjmCp`1DOq3 z0LN4K^Y!)hvK|&IKTqB40UQsf&aCVR6gY!+9mrd2X(Y&c-ytBlq_D&%pz-B2w+5Kc zq_gYk--UR&7<7r`$nL=B(R{J?{_1_XF8i0L=862O_$SFp`Uov;tKE}8+N6-8 z)6><-`QM}b8j5>wf&NzYhk!7d8fKu$dFkvB#-p>1l3n56 z7R|CYx>%b(;^%GTD!B!3VF|B+UrvM{sP&B`J|~cWsD3S2uVvMeV=FVZ%UHgfN!&6u zj|SDwU^&MKo3V2qY4{lC&+w@~(9-dGu7m^ein3CFh`P;TxfjnWu^|tlOHK~Gmt~yC zhHg{mG~Viw=#ZJ|#Kff4O~&cNS)%T+Dn+snV`;wE|3S)4K*vz^A{v>!?kD3{Z*XMx zb1C}nglikX@Bl9(u>k|%H_o>$KVY~PlD}T~V zk6zZ_lju~N{A6W1+0t|B=6G~_7*-eH??4zOk~rBHW*Il1^>;l^pF#r9rdMCWaT0va zCHZ&jE%_4OLDyaJ?>qIA^)4~i*-@D@JCkAI7SP6Lv z-ydP~_d(GcacLj2u`3vw>;GP*D$?+2tigJCB+qlQCsfjQTfM)oL_^qw2FYcRvsvP` zRfA35NV}nVGxl~e%GD-06a21=woe;_Zq#2pGeW;%Rr;~h$;eR!?5VMozKkaoSKxEj z3oas&4=P_7-j9O2Dhj=$M6%qDQ+|mu9o_i|{>iO+Cp$iZZ0D5P!LXhOgQL)W4KJ!H znj}IrG|%3`rKEK%di@=SCzAKjX&9J~Vns!&fa?PH5)a!zou9Q`6O@VVxKz2E8Ms8h z)$k~@ySwutK7!v!?VbSNtZ2A`b9lsKro*Di(o9RH`GLN_DHiRha>h#T0x zr)cgxpN62x{)F2=xzWAkbLge-oUHyjYMxhsrLEGssah z{>fw(ajbi_w)X#9b zmpoTFeJ&noJe#MXA z7uAZ_HWmyOXlHV5k0&=1&^o!#l5=w)NXp|@kB{O=ZSe*FAU&F8pd2WbjFO$x! zCUi%?$(}UCoz84mL-z6yH186etIJN-Wgl|>_}e5R+LQ>AE7|XZNoE%k^sLg!M>vc8 zJwkSx(WvuS#bh6g2bBG3|8iB0c6HHT<~(CB-_ie8crM2O{Ymn!ph&z@vUTKam-@=L z@wBYqLJc)b^vitWTX4LLr8}&+S7rq04BJbz){%}TLr3x!u5g{%o6z?o?YE{CpW%8B zzq6+-aVp8fwF{glxk`?QpM2lZdIvC;p;hKo6A_Tp_x3~E#H8%yn+Sw@WTYdCR98Du zFMs#691ksV5b=7qfBy8puM*o5?l1CpnQ!6`wwJ1?*7oa?JmuGBrMJt<_NRNe*Vo_lmr02IdC-ZvPxjDw1*OpM{_G0{$hob|@~~%valsbR9$Qo*{ji%}nO- zDJ1W&#-h#u z1NC(vd|yDvcfs)+Ry>(MCmLnG2YXhYpXamWoPu$%Nz}rvV6KfKV_3$V~718xxQPyE5Ve7Rc1Y}#?;d?E?U9{6v02AyH>w%#+hl-#!2 zd+~de>*T7f--${(1J&!mJFD7tjeYl^=Y6y@(GLsUN#21jJb~=jP7B-gpLWNfPrQz{ zypfzuoE)i%XPe0XOAE>Hd;;9F1MMIdw!JZEo+nww&Px7qN^EugJ}i%g!5S2Q7_508 z`2q#9vn<(PGe@$8w(pJd50a(paVY0dgdWM#kXAE`n)u+v{O+OLr`l~pMke4(=6Z74 zY~o~((9WO#jq5}_c}Oq+MB!W2{Q(B080lnXPLADv#ar20zdDnhn~MI=W`SoHe8@Xn zJiD{`a`%v{%Z%GO3$JEDi_A;DMwWZAk%_uZ)ZBi2??hx@2FLGEGbeT@pYCPwY=i>6 zK$w}KQWmx3>CXGhL{Mh`d_P=Frno!twiPcqxhD?yKT=%Fc<_!kP9j53=^wXeO@3ul*T5v+;Ji!3)Lj}85g-AUhFVVpVf zDay4dG%XpVKO(zR$Wddo&93v8K(kPUMx^2=jELRx@3x5s*kMHX6#L*X-2}V zfG0}#d|vfCplz%4G8E6c>0`#=+w?sf1(RW?3W=D(4wwS+`k>nrhSij)fd;eLtg~rJ zBC~JB)9iXmj9pe*wxLtb*XyX|=h?zmB&8AmtP#HUg=0sSWC`u;YP89$`HlKb&Z}n1 zCX#7Y!L4Nf%M44+LKAzSStZ`gaILff#bVSr!PCFu|3NT0m&}hMqsjL74QjL{)$QP% zdEBRP?lo36=Leju-&G_tQA2O}6@T($*k`v!a@n8C;^r)`w`tp>MuD#j33vt7bJomM zGCKk6*++O23-uf=y}>y80oqK~PBg5JJHN%jjfGupM<4fLgSOzxkKp;)IGOB*kHaw$ zTZb#XnXDvF-|OlgiTW*2yG-9Nv0vGbmh6>hkljQr?^AefrFejHWUryocs9Cc)-3U5 zJ?KU=mg`4cei2@aS-8rq$#9x;a@Unc^vn}xpVeWwT*uWQ{5%gWrr^hVyMXOh$c{3Eo!qVS3`58F|H9kg{P9`5QokqRr^@1aKgf%dL5=a;=ZBi+fTJdUn} ze*@Pg{~>d$+3QmS9glQ3CqvKicSTX693IBoEO9!1#t`3RCVopBrFL@STU9mwrJc<* zz8qht)1@_607>D5~#W+JZjChdvg zUx8oO;6N3S_9k0{3d#JN*0PiA26$$yJ_!!lqjexDiZ@&hRIT)pvjfki71LOYFZA*4D6?(SbtA<;_qe$WwQP4;Jxev@UI{=BfZ5~a{oO39fGe3OM|h_N_px>STH>8f5l*4ongWehtm9Yy|ioA5(vhZ>g71Z=7GHh?>0b z%`b3^mz)7{GS+it~e}kWXFlaJImz-Vq z@+hxhb^ij+ecA2vU>Dyf^Syt9+eTDQR>q+ya-Jt^S+@9}t9U=7$$qS4B532A4AW9? z^>gmsp@nZW(si=q&PSnRj2~x%>QFTK(s=qPJG+>CoWhD?of!hzQ6HACksw6G2Je#EKn z#d|)+%s}(O*tEPG+Z{ z$jTcyRtNlN;BWTw4fFd-Ha4^B$&mCHV_vc$pAMq>%Dn-u%%uNb+3D{7OitEnVYQxn z!udCF_>D0wZwZ-i%URuvwbcfnl5uSwF7HTA=HqSRPx^xV~%iz?azJL^K`^w(Qr-eDCvGoy&u`9A%11!*Y z?hY(kPEPaW2S+Fyqr>Y3+Q_9Rj=7I9A@BJHVPb{p0up|ML72 zHZhszAFhBN++^D)`t)Gi~jPC*#dOy*`-@N z%Q{Z{?6XN*)>Rr9{qm+df=4%#Rjg@5pGW791?fPop8&cOaPfB5Zz-v7?rGwfGSBxY z*k+^KDV|nhQF1;{Rs6~+UyEqjRwL9(9#vJgzbS~mwW$UeG(Is zth1d_Xa&hBj|;Qmdo{_(8v87k=K<|}qKCGyAFtJD*B9uX{Z)h5<)2;GBRh$GjpsC; zX3Ry2L*Q1s@RDnQCntL>!Qtk7w)2g+#S2^qtE?R7 zq_$4l8b*JXtJ~AlnkajM-YcP9dH3Ez%fG=gqj;i|YoqGzS}YHalhJG@`itY5t>}7kJ34)W6=5(w7nZF z2hho>sL@f~uI|K($T4_Um@)QrZDdT|iQa7We=dl-xc`W6_7~M-kuxjT6h?_v z9R%B~2>%&Z7U5i%qWzqCR?6Ve0uGNVxeO(8a^O{PuZkk^fq#M94aWSma2(y+T=*@C zh+G8!>=P_wKj(lWesf|ib2qbnfu-b8?hTgAg53t6_$P@6>RgnFM^J`i4UE@o3LI)^ zw<63JsS`ixu zku9xINWn9-FniZ$s8dTzpRkZWvwM+(9Y|1iuGU7)*Nr73!J8=Uku+;6iF<%nb~H8| zz&0PD|LvYv=^Thf+IR%Sn+wk|USv)?+d=<%N1hCa%sDS|_YdShd-}32{}J8IY;N)q zOd= zy@QddG^g7DMRR821N`@nr0cDMFK2?KgvXeF|1ZVH${KMtC{m_MGMN55B!OSzCoL5ExYf^ihH@A zD28Z3M>^Kq|H`!PwSPaY;QvlAj8C2!>^$vBYc{eChiU61)-V~a5=YseZ0t&ox~ZSh zZ*S6*bIT4f@=s!)F4y{HEYu(Q#PdD*)t%4P&pUCl`eaXgXRRlfe|*to_S>TERb=K# zHs?Zkr{&rBxr%+t4)C$=-@?Cm2}Idhe++Ks4Cj~G?PMOWiGRuQpE=5p__!}9m9@2u zW2Mu)9_ACwCEabca4SjsoMavjlH>^+1=D!UIlDPKm9m?6mQiOk>+uA7B$IFQ6E_1( zV(;=Re$hMZXZx-n8qu?6{IwR|H8z&^;Pml4F|_@`mkcQj;IoPZ%|yL7zd0Y~XMzRplS!8@pP3xD_Q6$WoUAs3MD6cacariLeR|NG^bk}{*5~)Z z(~|YeiPXssoptBzhdLIXd6QhFw=eW}mVT2>Yn6VIlOUNb+Tre4-stTlbrg!cj!!S5 zW!^cyqTz>Y>!G5?2ry-qH+-3l3iFNaPtdC8jR5=7$ecUZlQ7-3>Uu(2~ zhqZXS!1YYheGV?(rT^6^kXYA5d1TjnGx|D8ok`&7#%H>n1nsGX?7SMR$GoE)LYkTv z?*cg;emF1S9ojGmKMsfAj6y>WW4j(!?o{-hLbnFc>_^m1gnM#6XWgec8R`V0L>{yS zOK;aTQ21PY&CdA5dM0OLcKv6qqOs?LS)B*qmbcIE)gR1m4g%4EAUPCYKgQv+$lQZy z*c|O=gCQ|=ZE-bk*?o8~b?MPNe2JVRk*oyok*#L>e1t6ZV`T^Fy$)TTLavh$BV%;V zt>~cKYa}F*(~0BQz-l$8OPBE{=ApvB@E|8Je4&Nq=y^QKC8BB#-aVp~bG3iFK0Bhx zWH5f?*I*E|Cet~$GN;O|^d$D?SeEz%_AxLX?dmf)EQ!?7}46J6r3K zuwzKwG*G>4%uD?HZ7B5wech(adB&9*;2A>Ot~C-ymj~c&a-X+lw{tp2*6V_FOJNp}5YZJGoBW@y(19-ZA8 zil2eC1?fHpM<;+|1Ikol`Boaaaxzh7p!=~DlW0)hs0JvNy^*((=c~X`3FRLoiKAHJ zoN5>weK@Y4hbA}Egq&7XgM{9thc#qp3@qB~s~Jhz;Cd}NZUgF^hCR_J+koCJ!Mo~O z@2ubKhRTdzR=q1J*OWcV9-iua;LXOcrjM?8n=`f2awVAW1dASI?HdyCF)K9>Stw~?MQ^r?>%IXNy-H-9hQG?IsANA}|<`0O`*Bb9I8M*H&Mv)0DU^139 zbtmVL?9PWz)I+2%Qjyu$K#dgiF}t+s)4`x{AW&gM?IGAmHzesS#;w+_u_&6 z!8fr8<(01Pe^vsTp;IOD@m4Xr{V;n~9W)oPUi&GP713~DH!!p)}-l*~>THZ}#w&?nI+{xK94WK$8gRIsbl^5}Qd< zng1=w`AcZ^AltK$waq+mAEV*|`j#2oTS&w6#`WbSxe+VUjqE>P%-8+uedT3xemt9) zQ`MA;5qi;6c}X2bqlQ+4d#tTs>Bp@16Af$ zKEbPrdaK1l%t-$y}Zj2re{=CKFd(W7l7m`W{w? zlD460KVbaHDt~=_zHEG5qW44`oUdMH3~KNu64iTzQK1_w5<{1>mu}MLGIYpXb56?I zthML;e!}l$PdW&9vSVl^Z+{EE&P4O&?i|Wj`j8Kwe6Vf(y;wUVadU4iKTQ(%B?l8h z*at+(R%h1Eu^ha!th{JxsA7cDNcm{m(##x_c&ddn*P6u5{X@2#^0Og z>ET*g4bPtFk&{H8ATgWRh|ZoQhelSVa}M4D7UX@`IpKE_3_9`nvwnLo%3t8_Ia+%{ z3ws;IzE)!;+P6pdjAN&fpKjotNopQaW?o@Qvzzf;b&|2HEedR8{d$h#Fpk%j`qq;N1v?QC%fx8 zg_N`*x8-m*(b9R(`9+UA(351RybvxA(~V0}tuN>wK$}h|^f9?hM99rJoHw-0PoGML ztKt1}HvK~VCt^Fz=vXM z$%ej^4Ao*`yYm$iiITbFs(kidMcrDcc{)z7LYY3G=mz6kN$G1SzYe_)MXkhWB<>pviWa89>SQC_*(~A|T+2?me`Qiv-H8vMf#x3Ymce-jn)FrbEH!Gu|0QiLL8YUOYG>*HJmvBhzX}vW!|Br@g=9C`Bp`%dz7%)}`(P?;>lj+YSA>nIIxqqJYcVGpQVZ0^H9RZHy zJGvBQUO?pqdN_fFkA+Qs+Inn2_6a4Y=%cJ_ovz)D==Xy(K4WsOU8{v(P~mwz$X<#0 zpqQkMUG&zC9P~!HtUaDhE)GJ6%of)KU3|ciEWrmb|H&KXBgUYsNNKWeBy0O@@^K{j zJQLlr!u%c!*&Q4&)6BK(#T97MnWk*RpRD=iROy`USeyQLM2EYIv2>?me9cT$Q+E^V zP#x{!fgVHCcGB+*t#okrH04jEJMsG}qjEBVb#_0qJyl5CKA@-!n}4BBE3%xk>#{HI zx5X%)kuSbt_M&CwHrZQ~VPO*aoQD>7gRwJBZbZiJ(!(fvnS5F0K|ccx5(}G^?(M!n zQxIoW^(8dw!7^so)d?gfvuZi_AYN)dSK>=z6HCAHw>+D)eNXzUe&@{K)6}o&y7vDr zR|5=L@y(jUUFedUwaG>D7S^T1Ie#nhCqI&u_zXw0SQ*Wi;L{v9XXR<4QR!Q~C(B`0 zd7f9RA|4)3Hv7A}(TH3IkN7<+NW&poA3#5{%YFxv*Tu76P&3}r?j-A2rNX0;WGK74 zGJ7@@Oqpd%#dd6aviTl}Y|*`+z!8R>4$@J;N%QqtI!b;xYv&Yp}Y%3a4w zl*5e)peRT7Uv~di9&B<@L?c$f`j4b-Hci^3wQmdgyh5v|8tV>4h0MFwhgTvF5?k<* z`sb)UkIZzS2Z>AWNQPR2Eo&V+>oIfK$>f%3i6Jn}4#9ACIs1~cZR2hBC_M2+dZ@sP z2isG%JkC|30piIfL+?{)*;$z**~FKX$?n23)JoRFa4(-wEUhPMA(70<6q4V^c#|k) zk4w}+q8So#(Hk^Fd;@ttJDLs2|N7u8-6{P&>)*fPZ+7%ed~vj5yEmGvM8V{5_+9p2 z@|ZJdx`TQr5RJo&?3rn$&*;vN?0lk<+LMHjwVa(B$&QwciF06<6`er!Dl5G^It(m& zNH56`oHJ8Oa+{1E@i_D3zdxZ|ekUtqbvE%1GMH8U7ub)S0hMf5LqW8SL@eiC*R8^@)1a< zx_X(6O&mZIV=zYjcCcRyAlTvRdOvF?qv+mT*}`- z7)~You}cY23nl`a>+1xo)LYy_F}dB!et{V-JqYr-rN(rv=94#nO6IQ zAS3%VaE&}4&7vnCSWB?|L+dY)+{j>781fdAID-v(_`6z>=*Y$^Mu`#XEWx?8g;Xp@ zrCvs~oOe-+FPsdf*RkI{X@4hmvZ8r6nJBFroQi4_(e@aY^#%OA+PIz^b!+vt5rna) zQ;dM8!8bXg>XFbB$XGnnjRgm;fzvZeXP#~g4ZDwA-@HAbDLl&}kP0(l%I9t<>+URnd--#g3 ze!h;{N3Rg2u1IH+J@-<0)bYKGL}st)DmZ=#`^+H6_iF?07PRj` zR7o^L6}>j%Z{A3Er-OKvp4Q+^-aOv}!JD|6v0x~Pi5|Bk_c?`lTLEX*EgNWSXSBP7 z%-x6z{b3k6Y(iRd_Vk-z8}CWx$|67ANaS^3{GPN_MU$*TUQfDelhV2ceO8g$H)!d@ z=pJA2Yu`4sV5~B=artl*ji<9~VQX_zcGiFw8i!kwj#Ej&fyEo^;cD&4!X-yd_J?Lw zcNV+a7i96nj{|e&ieFT(js89_Y}_QWc|7}fw!brs%pcK~tW@u-m1jKrlwNFQ-v_y$ z`TZK&d4WC*1aoiH%4}2xQqqy6#T$rq40eh6uHkPfPB8y7=e`Ur?^dU@hM#k|?j{+X z@Gh~BRnfX5|8}bS$?f$P%G72V=BSx=8~DzLdB)WFBszEM(9fnYT7+YXTe$+?n$m*y zWPU%Ct60>Ws;x@oD>I8ZNvQ#^DD6B0kE6gn+$fS&?=t1PlAx^m$M>$y8fW!)IEyh~ zt-V>d%KXHvLF|scKY;TOcpHuRie#?P!`pi9;!fu0CMa3*W%exYB;#qG3{fU4w7Y7j zDp)eFmnhG(!I|d`Xg^y{Mi+8ANom)5E$tmasxphXiVi)f z&mBLOs?WRC5uz`_8&|KSGkIJy@Q%NEAu>9 z8?z_zps)0;z@B`9>T}qImE>$Xm@=!pgSNw?9Z+Q%Ih?Lu6*6)y?Mn{l=+dO!$$X6d?!uC9=JB4T zWu>rX`D1fb zxyqqm%Kpa>&Q+=Q|M@rf_tej|=#z{HEwpztUUy{I`jP$@&@9oyv+tE#OiO zH50EhuYhIeqV>G5C7VQ65neT-|DSy&^1F2Jf5Ma5>(Jp^-;>ya3zgeS`kuzyNx0dL zC4G(EU(2Sq1;bD7&F7Q+tX}4ezT}T&*L*UOf5Lwm!e((`mNb0U4n`+nX8vsb1w zjj!uoBA*ib`Ji@pCx5kZ@;!dV<#c2zS<2jIB6EioviuFcO(Ju>QDUj~68l&Kg+3?u zGtsD(5j8VmIkEUJBx!H5EC*^IEH-{QGD$nGH=e;9+p>#hxd7ejTHGcHeE%&WY@O;#X^vt;CV8E%Z3i zq{-tjn?&b~<9o?YP6)k^_x&x{mZHdX+)4(BoR!zd_X%EH3-sTpTtgb!1P1%N9)((? zQL+#Gu7GW3O<$ok2N@rCgW1t)oDTj)>h6jzr=a{K&>oLg55fIs-d1?I5p0JjF^tv? zBFC?2btb-?f)Ckuc0FAF#QuH6@_fj1Iuad{A83DVtm7|?2f-i|PHe>lfBy)F>{E=K z=0xFSB7YFQE1rGMjyY_kH3096Hnk%S7XOA`X@- z`vvI6lI>?`#9DOS3zpg4_c%!MM)xxft)utH@#I!=eoi5UiQ0OfEU!Y9N6~N;uHOaL z%){38JGnv=CHWj(*xOifI10wEPo}sIAUg+^iM*T$&SVz2nwIQre&PVOe4Vz2!FfMc zGABg0^{MZF%fgzUMVD$8)H;BCELJwBkNwfThNv|}?P+8zkvMtJy~F$Ra?qFd2MzOk zjq=HDF_(@_f=kY+-jfFGKnl(?R+Mvwb)p($ zX0E@6tLfUV1cw{Q+ETiiQyA|>^^?&z89lyt=O$cDoO0e$V-<3yTrw+gTeaQVDtoU2Jmv{Uhi!RmlQ3I)W_0&>)$jbg7 z!XFzL8a`(>~mr`7lYnW1fpZbyP)3i|wn z&RMa|nHOd7deoDwQs<1o_*RR2FY;s#J;|D7c~YA5lXmhQPo5g6xjPwq$+LL(E8zKc z(NEUgSD9m5QTPEB*|Efy-lMJLy-G%f3ciDLVnk2I zvx=VXKoZKKMGNqkq71X2r!w9g%PwUWtuZ^9d}WCRZLeIsgAVM_Sa2MS3Kd}zXpZ)L zH%}Y8(--$Mw>Ogo&RMsK`gx3uPd6{Nf_`Rg>PeE@1yji%)aAUQ}}14c^|zGB<+LfPvUix1-U&eDwD4Uq&j;hhu~dh@^P{Izmmrb-2c3= z2U)v20w;e3N7iCzfhOzfd*g3;S5rwwB?m`|^&z0`!A>ME*g5)cp#FHYnL{3Wkgddc^gxflvHr=dyEm+wE7QE-)mduHan%r9 zwbAS$SD8f}3b%KR6_2n>FQQl8Eh4e?$#C*{RsmmmV^K#KmeX!x00QAybXl&=;rdLL zjFN0bJ}&m%EJ4@bR;`9IKlanG6;SgUh?=1g$@;|RCl8c;*RvuKz6qhr#lk@CS zf8z^Q<2Stq>&^IG50qKq&Dr#6^>xq7!7s7E+v8iRk+s~-=@I!v-%9^Wd&o+6qmK>T zZwU6`{_nz`m-07UYP`$I3lAFc9@5w8tkVo~cr9sq1azB>gM--OF}_)mOoU#t`Y(3< zBmQ28%ZHM|$s}kb3)>pKo8nortvtqZETvQTx!0Tw-bYG@tJelr>x_J7vu1fiPt4;G zQjs^Gek|WbdVG^kWQOt+lJGb3*HEdOjVVpM4Nlhf0=*^oLGl=6-^_TW69e-wX-xkA zMEvz;%X*^bNgzy?)on`MuFXW%zJjlJ`5Xx6>=CKM6k5i+Jbd9udzjn`8^JyhJfa+xB zvo~!N-Kqzl>~0>T=kiJ~A)%{yg6+sh9a50@+8&Cgp=B~dbOmuwy-i|G6Txr-%6#SdM59s4>~72*LGlu{@EHokN6gN<=ap%T z6Y);FdDelREk)N$aefaN3}luGa}{mq3xj)5FtH}L;eRqR zTtW)=@cd^jH9>)WmG~SdGtv!(Ve`WKn^^FCDB9eHitW_+&ePBE{dm^;8*W_(yc(~|9J8ybBCmdV}x zvh2J&pz!e$VbBhbR)ZnCbk0E6?8olH0^9`G79gq(=DT6@77H=Bc$2QBzdPW)fcExf zH-~9$2AFQpSKfj)qQ~E~*at-~0Ouf5bt4`f#J-JEZYA#i9z}A3Sx%q+kd5dI`qQ-< z?<;Q;In%kP&v^w_{k8VC>jv!NAS25v|IbC^eT@k{SkHgZ@5CWwo%V4u)!4XnqtdhW zd6u?^u(zL~Y&E~Lk}-?+%)oS;)t)A1J|(BKSy?5y4CPXGK2Im77-xOq9Cjitl!J2>5-}faby4Fq z{YBo7Cf$kQdkU?u^YnPqH5}AgaT@`W^KiKf>8+-f*<`6FER)~rZr11m{|DpiyEsy{ zkjke>aNbgS<5!}C8|n2V5}#8mvijZ#ZDXCY611~gi9Wr6-A=r7-bVH_l8jaJYIeIC zNt&_{mNR<-vvchQ<8A!j3T(*!g|z3*C2t0k>C2yda<)cx20W%t8@%jCmK&4I zrZJRvqM?CPd?8}L9&8W=5*9}(D6ai$_)(D*w?On%MeYrO0lJk{#xk7uj+0UGumD3(t zf^8pjxS4&;dG-fuvkpBeosHW-|M!uXyS0A+s6REPex{eKNAKuPIelf;=VNlz$^FFk z?WM(>H89ezdvGjyBs#-?37QAy9ccSCyqS*fCQE5Y@MNcbe4?g!GL95v{%|h1<0+LV zvwPF6oN%0934A{r3_Fmfir&0()>s8I%+*Lx_VW)Ztp8WW*J(=iNB8pddv9DA0?U7}CwXhQ zRZn}tJ?Fvfi>7%)%)ZfNU)=E|t~Ex=Z3$#*oI-ac!i!muulgecp}BbJ)tG zTz!BqNBSl^Yy&l~W5IG(Sp46%Y{j!$eI9*}#qHz;yBm(l-_i${7vTCaWVbR7H>D-n z)0O6FsGN_IRl)brD*MMfsdt0+I)mq1HOtUv6P?d|ejS=r zi;hnK=}pQ!0gfstF%ut;q-XIx8{$Qo--+RfJlCX=Q?-`JkYC|k8IGAp-BJC9BqSc! zFElYTsnzvxG`tcSn=`JHU!aF_Ls4OOQ10dK&IO&{K>G{fHb%=?H$6tr74&~KixJKbo{Hx(={fS;5ZH)b#|CBts*Rp5}we$g~XRrwm=yfd3 zJ6%7eUEqmuJc-PW(DKi)Zms2^WM&F`nw_v){eA@|%k_C0$%*IvP2t%*1>%Ju_@Nlj zlHcoOWmbcrD$ilE_SWOomne{V+?;vUiF}QOeNT0UYCUllTWHR~B)YFL@i1=$?ZGy{ zvu9{%-VT99%h{DQD$0G0yA>+MLpmxzS`HSZHKzk0S9g;#nW2hn&f^-iV%< z!bE3IgnedM5{LQ|ERzf9GyPnT7jMv~%oS{8;h*_g$KzQ}shCxPV# z?I$8D`Iv^Xy797~Fq$NHaDOEZLD%egds3aQv~M+;+Jzh(i8CLtnAxqq6_%^CoslHD zRR?PQUHy&%-_JgW(eS_H>KCZ+E}mu;_ZV7w5bWR5^AmnQr?-{*%(yZIF4^&N0ZE<6 zW+dCuI&T{L;6w6whK4zND<|qSHx8Z#-<+I#z1+S-ARw^QzMZD%$47*aL}ykiT! zn+3zpC^3`0-^LOp_H!TzH{r+;S{bUfw`kh&#aNa2hh#rI6*M=q7Ppg|y7)Iht)BEN zQ9b`c^AqtYt8x$1-B{m6O6C15d0NlY{z~O{A$c`Obt3*IqSt$HYJ|sw;eNON+vqbh zpKr5QBeYP-+r}u`+yGwhgWy_PISZBtX}2wntb*S+;d4$@O@8Iq^}L@p-v(0^^s9?9 zi6TCOeB^9~!%*x(R`z)Rk2j9qfop%l!NcIuM^DKN)dFq$Xlp$`E$bY)pWFg%K=T4D zvZ`H!40l4s3cbue zm?iL-qqW4;CH5|>v}fp{GO6!K#>?aTugaVa$HdokRJI&#n**NVaLSpgnTu-7UcFfK zQq%AHDDzv?$SJ2gDs^`O#kq7ewX=4bjG7~Tvy=X1_#`6oS6uC+#j9z5IZxxkrCfHr z?S&G%!LY60IZe10iu7>3FM8bso94Y zbCnFTt@M`c3^@fW`CM~qM6v;&pww>kuY)$fB3BMZ$n(KK!c}TqRE^tc*zd@eeuI=#bH8r!ZaESl0nEU!) z!~ZFu8}GUTtnVUCKPlhR|IO|^kA9i=KO7vHpZtB-ImN8|GS?^jCSD|KdI!3?!1w?5 zZOg$Yvzt}477SA7|MjWvZcX1xxSF$gGY3DIJ*q&zDxi1HYE1@^>Tpbq%v_(b;Qa#r zX)o_mRnRyQI5o*kY)5Iv^*_DET9n@5O82tj@SmrdZ$6U7KB157)?7p;o-9V6_7m|-eWoCC! zPDwq8WF(*C%`ALta^4!wuNU?`9(;EgeeLd<`n-ePk7sx-@9JmvFl+H2lBj2tzuG91 z6~mnLlz6^mxEuqfTxB2mx3HMbwsqroB+LIq^!-@`&|?a)2z{fCg~r%^Vi4b?)MoY35p^sZr} z59Ytz=shd*h{+eR7+s#D--!fW%0C(jkM~gFT4hJkrq!rAgG_C~jd>)p7CI;2LZZO` z3GRcynw(r!(Qq~F8^b?d-#U7-d*P9NU&wklbZ_E~?^1NhJa6LOXBSW2)52&HkqmUh zX!2|=CA-*in!Z6B>&ZsmGCKR*io5YFG9#P0;)B8b9vRJkwVWrig??o%@n2+WF&@l@ z&p5grZ)pR%tir>IddWGw$;CFu8{h*x-{gY&0k=NFp+n(&G#=buaN~L2&vW>Ycgs!M zPlke)@J!B*=Saniq%gBWvBFu8yM%2?Zn8Fdigx7G;z(p&l9>3?tX(IwLn2e!ki+c0 zPi$p!XuXKK%}D1=GO-x6@%I;jDA_abqyyDqoYSzcfZi zWZ%(IddQB_zv9A7n2*Q9#2+MXW;JTv&i*G`!x$FpHPSec9@TX(D+V3dyHm(z;!Tpn zewMcO#Gjcw%|M*dtsOq~!_&)fpth^Lm309{*0m?V(H6!H{*1JJnqR{+rp#>x@{P%pUJa7CU<0%CkgxtTX~<&HfmF zGYZZpH_3_L-zTHh9&|Du+mRs2rvq+ef67iu9*@7-9ex977b!EBoUSZJt&Fs3FXQ$s zQnLZ=_5y28E3OQ~rKD|dyr_$A@jWsVmD!>A8TovILfPg1Qr8Mb^k2x*O1&go++yuE zB)Q#`DsSv42d@U?u>yFjsy9s!*^^U??c7h-^3*^EBffdK6PYE{!AlwWOuUP zvMQ`IXPli{iAKn%_ggS#F7j$N>v+B746Ed#JCOvJxii4`V{*3>NlBj9>)^kFhV`er z%h5S`oDM#GjSRsnV@cF}{A8y4AE=5ud)+clJ}&*Uzr%A52Sb0rz3# zcz2SS8QiV9KITN59xozNY&fjf>erLIUTlvhEPsGPBls-ob$rQ4u zXM2#3dfLgUw0U(6vN--g?oKy0)}v43jQ?3Fo`NE*GH2Yw)iz^R=5eQi zE!p>~fTE0dv{adz{FrD)dGeH*%^sd+g`*NWM}sPn_Hjy2Qzp>>fgv(q86;D2=MU}% z-uR0H$Y#9Vic00q_Lcf8QTpAYT=rv@70;q&@g7s||NS{qooM%p;(7enl*?P*KIoLw z##ex+GEH3uwzQULod4-BmuUH)_M88Y9xJiV+n?oaXO1?iuo1bJtVgTa#Ox=@*>>Yy zUB%}8=#xDTIYA~lR}!;&Ju8tJxD)Vt0ekWpNqZBovxmDr{fHmamPXBGVUKV>d1<%N zl8tD48oTujY0uuOdsw$O)c%c4;nWyjXHm$XO=d5!YR9~ zUf@a1WDzsd^b2j<3hJB;P!3Gl)04QgD`Al{WP764B$P{>?r2!O;O_5Offs4+ZLCCQ zw_h;YzXIO{dh3olHz?CjnZL4r_tC7(Je4z+<*eAlQSBI-{V1!NOi@?z`ZBNElqP2V zIhiiUq1!nH502K?YFyYCUf<}eIU09|?|pood8nTC_1_n^wjo>a4XmC7>tr-(hl@?{ ztB1NWo3c`zPKPjdQdvKc)` z0(Ql#KE}r?dTWMvIk$NW{pty_S5Ut(+_SS}FH}xT$E%fypyVCO&h)H0#Vd<{KCz%q zvMI%Dn8goGM$I?0zdI`K3b(FobRr_2DWoU6xZi<6q$L@76Hj~uD2CwdM6hjTBNBP? zKK&e~pL6w}x5S*8b{YN1+J9!s9wB|F;Bs~f zJfK|SZ@1CkF*r~OcWwmJ0=)hwDSCuN^ry+S>Dj~h&|k0XQ7~)&O>iJ9D#^eaXt$tz zGQH-!)67BS?d~BMG%y*GlU;cdK3xpDoH+P^(e4!OX3ytE;7DKlC>NS0k9FRV zCZI*&YDQ#J^B*#O4<$@V+A zk{x1++Q~k>x+t(*`&GOR%r{2ko+zHx))W2BEb(r* z(1Z242;DM+mi=*Gsg=3GDe@5DE}>TfHu(Ft}dJV`F8bG1-`KG!pvynv56C84^p zHE(j`SeOs~Jq8cZPj=3f;cucotNY)cc4waPRXEh(0RUP3x6@4nObBQ}#fHUPhO~mb$JXB<_UR3KiRL+TUwa_XUSHt7YaLc*PDSIsb9PiFWh37RA6gl}fxsF~1e;Fv& z`#YSpX9jx?o3yJZ=g^cgoP4(MIkL0+dYYFVG06lr1r4%eD%fTB*fw&$i@tZ!OLA}w zH-46Zd>c4|%Qo6Did1B8exe^Qa&;?9-T(yGutJg7#46NxKRbxllbFnM$9o%~#+m%^ z%#UA2R`w(F$ugTvQbRzL^51|mzF-4%$?3BTQLY2szli~=NZyObNl#0-Bm+wJrA{f{%x=&_ z_SaQJuO3R}RER}nEaxfQgGx7edLAmop3m`jIjwqC?dd+1aru9-btYgsmTlu7W9%tg zB+4|&7TKjY%MeBO?X`um?-DJR`NVw%Rmo zy{oepP)|X=u6}4h+iu+LEJ&Zxk)tz_yV4eSpqyoVa*{Tg(pF-Vngib=aNkD`ud@KB zajl{Vudb}M@1a+IfdAXURN3~qI|q$jF%>L*7DxW*U)lffyX#N?@4utR&dA5Z&qT`F z&Yc?G0?3pO;<@%B$+Z7bEW^Ue4Rn- zf5wvO%evOAHriJNOFj_Uh)yPR^xYjZRgv18uoREurIf&TXv;`%O`e=Z7SCe+^jaNf zxWvD!2fI_^Uw9VpP-as!*%>eQSD#0o3Ski!!qvsWqr1qHH^9`n_p4ajDWrcDOHv&U zwgt=OjjGe4cg23s!~)p^>VT(UM0}$t*zzodBOCC~CXr@2y5-nbG$LCbxi!9~zP0;% zjD}>3>}aDq7}TR?SbS@#jn=QAl*M>7Z*l!9RDKxT9EW}aZWw#DEBdDO83~>~`wGmz zM{cHaKOIZ&=?-a>W?Zj3!Chb0-SfiWJo?A9b*|K0PD?WnNS5*TMP&B^STzLqx3Tj> zX?vXg-$<@c!NnG10Q9U&sVA{MzlT=bl?_>`>rt#zS88~g^o}R*gK|>>#9u&zwaDWO z$hLd+oK5#ksU<*f?D&&d*>|zR-T3{0r}f~g=TA9rnICvsqJw%~?{Vj>=n`snY|aRt z<)EBr9xTCPjfI+eNCUxc5zk#`@hOh?rINn2xmSSVR1OD7l z(lyJ@k@tmeeM82*ROWv}`(J?gVJK~6UDq&w0ETY^nPZhJuyNxk`BP+O4Nq1e z4acD19Js0PS(dsyeQIKW#x>U4p~DWIwx^{qlJR@cBnOhxnRdlJj$dIfT%Z0uC3ir6 zW?+T-llE4)C55c*0GfJ=+Ut?Rcr7c)*#pbJh%%hV?}b<4iiTT)=j2(udc5P1hXe3k z==C-CA?ahOFE{D8!tLB}uqsyR7-fG1^ujU`*|05GgM6JNm!sp8@#?C<&%yX^*42}8 zd*VZQn(#Bg=*i4)L$SZPUyP0%AoUC==e}20E=>oQ3Gl(QR|i1jOz_zg;vI$d%#|v% z9xZ}r>T3A4q|JjJF)n^Dc>DxDgflnRok*K80y#9w@G+=37i;9|nzG>D32kTy&8{NT z)6tN;NaY*I$3Sd>F^b(0=}y68NQW)FadE#R?HXu*1iF-_-pOczdzXyH8V9w@!#VT2 zuKshZNj9W30o!qickXR;RZnj?-5D!mB=;py-f{ZLVBz}SWn2wKD$kSGwYfjy+j?U0 zdfr_HR`)=tX$Rr{GUQ(<`@vJ!RF9=)?L~brKz&C(%5b+48a|1I{Q@eyO{tC?8%^3> zUX$SdSg2YGy&i+jNd&VDYRXG0SDm_lbw((m9I_yG?SQEU)?*WQj%3Brg61NR_9i=i zRd)QeB)FamkADI?ccwV@J`-$Q&1boHlHOf%t!al+DeqU>zf(XnjQ8#!GeT=8xVi{k zPQ(T*1Y09B_XSFKPM;)AJXDMWbA8)Bp?$TrOy-&WE8ALOn}(d&j>a)RpTYZ*$p2}i zQ=h<@>l6Im36!qbbo{Xh&*G83?Z{dQczX|Uz7FPPsiiP!JxzQLPhAV_y4aC$tQ}=F z0JiMp)#7{3^iRmyCrD?1ERv&Ywcx3dLT&v$pS3uW?HR1M!M9J54dHeD=y~49A(t=G z(@O*&}tSGdk{eq~L?|7`c;|B1?Q7UI3 zw_#fyKRg3%h2(R7Zwx(b9t}{G+5E^?Df~#4LsJe@1M#6Gs+-}Nvin?A$ zud*Z0!?80xfb~-(U^1Vx)ZqEmU7$-*s9ga%r2@@+l+hi@AAJqdfM#djU?fs)>UE^k3! zqY^tiG%3((K5!WSYzdGW;kXLl=5X&QoJ&AL+yNIua+U>n^lUvVJ(XM$gyI@q=_QT| zT)6&b6!^uiy-fhSWUlk@uXm~R8}%;vnpRxTbN%0Q4N}kteP|CMP7!=<`B9lPFWwW@*uPB>6MaMGJO#&I=Dj-%JtwIo?*?NX9V_n4v&WI_OXOOP&+Fdz{Ls_+&?-D{ z1r4sR_Zx{t>W+*$9(s=7mT0AZ=M^M;Y49Xm4fi!(qGQZ@(P{M8en1oSBd&n;Q%JVE zryRq43GOaIKQEGFHtmv;$0h`uwgFvugA|@{k^z)P6~6>r6R^?~k!hpx{TyoO3nrhU z^N#UM;P(kEu%}P;2XaqEc!}~nZ~YS}(*ww-aJL++zs0`vhDvvl!_j0%Sn|_KbfPRz z*eMF{+hEsJn_O*#d2PC&zC zbf+Sa&ckPEkA3m<=2yub(QO;l{*jW>fU5`C7luYHfM5fy;P2SDZ)uNLgLO;te?#r= z$-96J&w^!G4n_2D*TSXg;P1FmPtqL(qH{>u*5Jdt>a`TGcumg4nXUu`AJH))h^2WU07Ckn%AO^iD=Az`0a@o#?rWvC!Hv_ zBLA&JyWRsHc89O|(Ph`Vri0%Z^8B0n3j^`1Xy5`U(hjRxiC=w!CU|6J(7jyL=UTCb zNYR5xs&k)x!S_FK)ZKs$&{5Y0IQ!j?I%fjA>(9$zvpp%U9ks1O+ulS=XP_BIQ+=1Z zjQjZpGT9wo%|^cGV0~YLGK*;E?gfqnG^ZYK?;wrwvpm&y8L%6x?jpL7pY$C_Uk*4@ zkg1h{cSeshhT{vAJsiHdnsF)?eHgji&FM-4&!xEso=iado!fVpq+_`6KzsKOy}+CA zk%867_G+$6klvY&inJS3!D=axMV5C7)aeNxEh%>n<#vPrWobRX#PT?Z$eEJOzRC&1a2NK7H@lY3Gh2H%U6OB(usFzP-O%XSd4O?2JcF6*_>59U5AkPjh4{_I-lB~sZ zN?QP}?VU5SiBQ=qKZp1vlg?4A9QYj>@U9Ec)I}PzhZ-DZ&4dk1;zTogJnges3B>up#842=>|^v;_q_Zkx@N&S2d(1{SOi` z2ELXEEtoUcr@_W`3`;40HJ_9KZyeexP7Q$L0M_ST__&WJ&f9zoA51l=wHUyT1+04^i|b3uMP zVnrq)g_V(~MDRI^yeu5~p_WAyeYHf=a%#^^J6&bT>UuPq$*B9ww;Yy|mzVzMeKjz0Z?2wrGFoiUEH{?k_zF$2(fYY+! zHJHN39sc#uu$$0({ihGGD-YA+dFqtsiqiYeU-aK zq&o}UZh!_W;8G{JUle=%2UvKbQ)eu}_h8b4H@$)94{+L#CdvJCV7ZJM)*)ZUFft-# z2{`#B^~FO^R~qy|5*L8)v+!**{K$=Foj^xDVQC^1dm70%0%#2Ajacy{CA6jWN#|>% zk&*DIA5a?6S1L{ZZ0pF^JxeQslzED1FFp@Ii;L9f zKCP4ZVhe%eX-ZfMhBe?@Rcxz!e3l~xo;T_&vZwa7$DTQ6(+gcGPN@}xKh*}@$A-Jt z;Ph;u*Qf3e|L(?jH9FR0G|Q1%&#Lfw4dC&ld7~m!C-+6?{99qGKf=}=fm@}K#3o>K2phbcr)tL%pnMwct;I4JS-^dhg+u%H6>{ua z<-=My6;JC6_A;<*_Yh+MlX(J{3}{M(ve;VTS_;6pNyzXFtc93z0+Q;PE9qbEcE z7tktKTpJn8QKT1uxH+xOhsezVU`?eqvCV?aOn~OeP$q^>eTPQcQ^*dbNpL#^l!K9UM^y*# zB!lN8Dc||=R3zXiJaIivGA(B>^yF_SwjJKSOp2F*-?g}jNah$|I8ABJFl;8J@fq~C z9zr&5#F{*d+zDAcSZj&0BB{mGtn@9^9R1dhtZfIbaQ z#NbICp4*$dfbCFPHjugtX>yS>1_lp+neay+dw$C1&E)h+D{2@C|Cf>08FbeU+=ce} z*51z`@Ni6c9XJ^W!;^zI@MZ$isT28k9dIRD-Rto6fgfts>*zyKD87d`)2_YE44?MEV1jY zT(4{I!Fh-a|MT{GxtW4|`2Ky~^+n2(LmH*;EYP}VOfP#HR%s(|tHI|g@S;Maob=J3 zN5-U@x+MLaDG>MTrC3o`+TGCD}r2KhU*T>Ofci+2r>uJjVi_&hR zw07j31O%_p`ZvL@tbw9`0D*gyI#GuwxqH%RdEP!luA#i&O@8P5t-lX4)s}jW0_ttB|ePJ*FQpmO^7N^np@Z=l z>Y+!@2PR=FT<6$<>-Ny(Md(l=`0hL5-a^XSMr|jsrS;LnW$?i+y_$>>8 zw^i`($3XRaz$G7;8!LWisCy{<9YVgz$gpvDi_kU?2Fh{Nw+88|jc#t?$zJ4s7ZAED z<6&Sv!`&=+`!jUi50n$AsTcHFjgN8!JL@d2@wkRT?IY0X8?4M+YD&QjE7T-dwNl=eu^A*kcNy_bMvD*mSXe)^KXGZJQGql$3Z9@J=LeHuJ@ zkvz?S_as(kC-5n?o~2X?JM|%wIGC0t1u65?d)EMa;%5&a@eIYsksQxqSPm?n4RIlK--y7^n8|JiF%<|`gZpR`S}Vs z=U|PD$gvna=0b@(LmwcOl;0u8U8(Fz?DA0Nqa}%)okv>*|FsIW9E7%m6dPAS$=!^1hY^Z4M#RNPD zV_I*awY~>g{D@ljL9-j-{LMgl0$nN%rlQ_FIB!SxT+QHFObci+7C<4-tbHHZ zFm}UHc#(uu9EUG!pmQGTZ-rJ(B3DoJ-*c55_dZ7}_6%C`AgMhk@HEy&zCdWn+&OI1i^GAmHCVX5uo%AsC^-fM`fqwfdLoYT+45fE*-g-O7yd-%Mu%w87gCPr z%iqUa+x?&TEeB+o;nY1?D#w)TP-7jQd7^_Snq{Kx>`OgGN$bA(T*1~HM&h$kQhW0C zLE?=6xe+Sd3BQ`+eTn~Z>UX|5Kh(5Wu|BkfTi~uSzmEc&=fKCIY2Cr$0n(&W%Mv8m zC{~`g7sIE!gQu=KI1YVM(2bF_TzY_$`E}IrMt*OEQtkoDA8PoPcSfz)2&~3y9UaQ3 z2A9{9Dm${~xde_UKS-V?+^1u|ok1RetQae!E_Ws2^8J(@52qRe%|UqN3e~H?Y0sfO z@AL;VL*Wb{_>q)iaxc$v!-ES*_!=lv6B>3W{j*5HN-X+;AbY83)GSgi0P@$-;5aPx zFnnR--cBdwAu!bUzngNpAPs%+X$F(Z)3}UVr%zFux9OB$n7qYFH4Y!ZxVD3V^*oll z5>n>+Z0ED@1%ejP-P4_01G6h{jM7|}=lZ|$eiZfSF{XmOb!Dflx|D4%FOhpcuBUa3 z*q(&<@w7rQ+FE-a((?g$0W(1&;07d$Sj5 zv;u4KbY%6nKEHnRa2@%j=KSx^Dl9_b@T6ME!ru6818Sy{P^+@C_9Z4|0 zZ3=YH2Nhj6Z|TvvyZK$Cdz5eHAZ0!=e&RCzjWr$P-;e88qAMZQUvC}m3nHc4$-5iv zHga`+>_=6&{R5xY@b@O#wzl{+Z_*;_7d6JWD2DAkK^yiNn!E?wZ#3Id=tvtlRTjuj zQJP~QRnXjW$dTT|My%y+*rX}ssf+$QzwAoOiST+kNe79nRl z@HO(kNbNNF!}?gNLYZSchTw~>zoavlQfSwY&I6_|%!Jxe=djAEl~MKR*Gp~(-DW-0aD zPaX4UX`cdvi}1~L>T{r0e>l|vt+#LUDv-KUtTky`0sVZe-m`H38+toykg?_9co%ec zKiVYTxr1^jvgxU-wy9BXbtCp=HfdjlEA}&zfVm75ayMOf?$gkf9~u>)MV|uN%@Ov-Kv{(pw{Sn26tzQdq&hU& zh}?I9mi9~bAfIo6v(ZeNVUe3rqcXgmI{O1@BWf-VBpcv-2e9jpY|MsMtC5Cs=$;b+?6v$2#XZfsFzxkF zs470=$?+=m^<-FMtNb23&lK`|V#}*QnIF089(3<#Ap06C_5!w2oA?2g*h6|p_b&n0 zP*UqXm4zOj6LN+chtk@5ns^%U4CLA!WHphHuaT|+$d)m2YD2v>aOrt?v;hsYMrEv1>okGvL~Q6ru2QlY}~~6;m_OPpB-EZ0!x25{4}Mn!=ES*w5|_{ za;@NPHOlHni{#9qntT%M9N+R><~Jx)-ERk-K1S0o0_|WpoR`l8xaH1|n$X6`1#^)k zqpZ71aXq{>2Afp57aBH1(z79Tvwv)%m;(Y_V%E zH^8IcpsO)^^I{)8p{z0OWh(Ydf3ga)o`)J-6Yp7FrSN{zDYY}H-Dg=hP|F$pBqS#j z?SEdV>iQJd7WBj~GDd?V_0buTdyo{@DUU>RJ&|)79Psq8;m{zSvJ%nMV&F9aD%9jE z8UMHm>4!tZmT=fr!5NgM|FsPHNJYvVx4i`m91Ae(<@-qlKYK%-aPeB`_c*F+jEhY` z;Ov3xLmYMVTtMRkROKrKw`1!~F$&68p@uPNtLF;jru;Ez=MkuufPT4(!x2Gy2hW1{ zaBw+J`Hs{5hHMOgFGig(a>QwB`iM5el^mt0&2!?d+x>{I(iXVd^A%uCrI+?89BhdG zPavoJPmLt}D$+ZRa{G{D2d!T^@V$Y*T#%=^(Fa$Ul!xPgVj~^_hHT(xEN)NW&4)f2 zRY6$Ga{W6{xX!mMAJ>c&px&|MJ_}b0Ajgj(mG2;76Yw*Q^I46O^!^%B{$}b+20Q0I zJcZ&%a9fRb<>A?FXhbGby+CQ6+L#h_U@cZ<3B1!E-^|-o+LD98gWZ74Rw9R^jrM`o zLnT+^u0+l@LQB`ubOeWa(D^jD8VPU|v}uJt7+FS-_#Qs)eY*)bUHjpx5a&r85iSTN z@20Pn3AmkkPNo)P119snGZ5EDyQlHoou$#vy_>Mvj&vGX(3z;s;NYs;D35*Fc1%KrY^W=MSkkQ-v+=MOST%Kk;$`f3D)5g1 z7Ef7oOkp?@;|_rvkR8vYa26#Q40dq!2GnWCf7CaK{BZB|)$QxpfAmyn*VRNTO7ys0 zHB+2tPokkQBy|;a{uANCE6<^*5p>B?uE)`{WTVvCXU-bR@l054fNva4$bfEMS^p+z zW86d|%(TY_Cxcg%&fikt7DErcc8#*XUfYxS=X3M5@_drW*GPI};9wg3UV#Pd4voG; zJJ(~4TnXEY)~_aQay4vJIsCV-$hA=}YN5$3uwt$s$dAs=#m;pk{g*)cI_=aI($<_s;tykiIs&P?qgZ$%;i7N{`6ZrB6Yfsd!|Ypu$`% zf}`+_DK8~Jd=c7S#B*FuDGvdMqxwJL+x#1f-w!62`0W4(zk^N}z{%4$q_sUpeL82; z9p`Qgz3n-Ax{R|8B`E80`VK{qAWxxoC%Y>F&LcIu(Wc{+T7tJrC}}>hkH$7Eg=^p7 z4P3#ScmTTZgi5o)V=6Y_8L)6)WJ7R04h={1@5)pAJrgNuGFXg4cFzEdBZl_>P65vq zu-r`x)drmyfb@I5##-oJ7Je7Q;*O@of=I|=^#3aKJV!ZOz+wh6&>6XQ7R+Fs|aq^?Kaa^!A8`o6&U0kZKOW&Xg|9*_RbX|VhWz4<+q zaDuYVLlrf&cX)D)+?9Ct5LB_Y7GQrBdS0Q;e4ML!Kx$N3dkLz9UFM zEo5Lcct4DV`81U9IvTkKzI;#W1h8_vyB7JH!h=gdbqSe%5sZGL&Y!SF#{K@8=bh0b z=iXd9a*lsbgFKJr5ULSK-vpkv;@L5BeaXMiS0VxaU&hw`2yOFH<5j5r7jpLyP>n`L z9Q~fpQ@y-m(CI1?vxgF{BGX-hZyCe(oP#otpjY|H(;QA4Z`hI@sWzsiYgY5}?nN+Z z2=8tLuS>LXUtHtOE997pESu*zrQeO$aU111Gtrvg#^CuRuy2GX)hVwa7_^}TqdXh+ z;cfU~ObzpY4fSRLk=GAmjpicn`;d(^-gx@oBJgVw-fqMiZv?82l+*|b^wgt2fo&o8 zyMgU@TD>d4Tm$)<8hQ`Tf!qxzcB6CA2|8a<+Z4Fy3ZQ1t{c%$64-h;S>R1Nlegn!f z*oeW%Ydy;82Tk0=avvNUjU{;<>R*DkKR}rRJaPWQNZrD-980kkJvVN^W~6Emc#cMs zra+&oz_*_Aj)28((tH55o~0HscfNcf_Hz@kD3yb-0(+5^hu};l=<_zMmFbGoa^6So zQ&6`ykWK^(@v8_$jRX>D(Fr&(3d%i><(wGuxf*UrxVjm5E{D-u&+7b~9!frB$rwT- zv8%>AS^|9>^_vPcs{rpH?7$vIDsDsq^K(~%Hsv#TGmaD&;Zt=eREn#UwCJ~?qsAu5 z3!k+TW$Dw+r)WJdyTkG40O7&{&*Tp%3Tq z8JyFx)csf_d#-Ds!p*?34Tzj!stLVa7g--`^Z=G3Gxq8tEtxY?&(J~{PupEEweaQg zkz+PA%?IrEJ1awzOh`~ItnQaU{0DXJ3s4Nej@RMY_t-*b&n5z;<6Vw#c*5xiNY*gQ zJ{0VnJ9odNwC{n+C{XUMp8%IHViDZ^?2Of&@Yk6%=gZb&4+>(dk5Xz&^nW*AxO*?+ z;DI}TCR1`7((R%xa~{q0v2C$6FJM2WQ)YRjU2F9azUUxQEJQvVK%I}!fhy$x78#$6 zcQ&08TR@w-*i605l}Lj-_c9@OjthH+#cck+!k^O1D~(=frkrL-N-{Kg9l9BHTA!{C zw3)`WGeOaaZAY-!4#$#^h9$INk*8c1|4|R^HA?=0{{;9yl$Q2cV7HgxNfg8I3tZjq zOpB|hl4xI>^SJ`9)4=LIY~2YUxQSYYPe0Pxs2bpK-TOfH{>g^VuRtK(8ynT;S+-c}4_Et!Udpi3Axv`1j(YqLZ zq9iQU2{ddL_UJHjR2{qza_?T;>Ugn7@GVADR@6txKsy`oxt({91busqck5_x-HSMv zG9y}Dfpf}yBigb&XolFP@Fv znoe)=4(!Y>^uY6+CSzr$BUO!PJDvfqTGUwt-L{wP8S%G3uSV46nw-YSro9SJHZdN` zP3YG_^lmKH_AxXe0T{=l8K;5&Cv5CEY-tUsl^gpx1kSj=wJ3ObhWV&4AL3l6=U^qn z3D-6?r&Q-Y#O`HE_z*carpqGimpzs&JgHCW4ctXN3g-^plWfnaIXvHuRdJpFIO@xU z6uL4^&bTh<3)*~l$&92maW+m~xUbdsu7axs1oN=eA7Uwmr4T*9#n7Z0P@Lj76F8nn zUd*)vj_#oRR5T`yRC~blAa9(pS%ro;l29s8H{#Z1-c{!t2Th$XaX)ot%63wWt_$ zkM^WSGh^=Mb#KC|&<}Ipt8myCj%uopzDZr5Mtfo9k+E=uC)(>G{H~&ng=aoJjlIYS z{PGmcqrsBphkhfl{bjIv`;axGW3)w!8X%j0p>xZislNP=_#gX_x>3;SWv*u z6l#-_BUyrsy5W6Om^m$ax$4$ky^EHf0=^q$KqJ5h-)NpgV1` z`%{ww3@d@*5ZtVdrE&Ie2zVEu#=Jp_Z>J4yf$lZKVtr1lw+SC}IrugU^y&lcXN0<) z-P#7NY9I$j>8pkh)D$UvF1+1MX+n4$=w1ecOW?54m@f4~=N z!kbHA~4154Z3ZNqs@jV?c*$@9#lHvp$eKNG6UqOMd zNbfqUHspE_u8PZcQohao*YuOxqEAoZVNHZyu0ttFjqlJ>jK;1RQ?wIybRq3X)Sg|W z#9!ddB(S+ct&OnD)q(4FokG4#PkQ--V9OY;8^GPD|<@wBK`v}KP`ZZE9I zW1*}r$U#52zaOZ64rS>(6ob}RD6JW^{^$8wxV8a3FeZ%A(yj9jXykexdp&Q!m;T}D zF-llLsYj^06S`(f_xyc<{c|VwMeLM2*rVRa4(Ml!WkApjxm`gGtI6q^O+O>qbD-`I zaMu-_?iw|cQWerw#^(JiwxY|X#b0oe<#+w0REfttISsp3?~3dCH#n8NVzsv zI+w<7wuZLf2H9*C?(-urMq&CF^8GN^Z&Uvt)bIfocRls@1?xY82V)8D=P_3NUBKTL zyc5CUYxM40B;b`$M@OW=lz-y=76TqfBf3%gTJpIo|18$21-LKa>UMJf4CRge)e&mH zOBo%2uN6{x7)pJD)!PUaT2rs_QEvyfUr2o~Pa5-kmDH|?-3ed6gvy>T>Tc+q@brG3 zw5Ifj;YvkPY~a2Uxtkz;)1js(=B&f2>_n&5K-u9y=_pwTIPeja{SE$l4sb;z?h`c5 zwSR|0FThp4y#kIegSQfs1PvaAiucl6>PxKkG(cWx%x=E%xW&u@Q}MhPNx4cqd$+gbdV0 zVqc-PavybB=+l`00chDs+K4|X^*B^ir0tZru!>LQoXx~n6!kSXwMJ<@)e7Tq386WW0$$j@<}RzOl+$yA>Dk}w^AHbcxXnA=kxP{vM=p(wuU*EI<9-`+RID0W6U=xE|o`0Pg=<;JpoT)fK z?hKx$BMq*YyB`UiPp%~D&>uet967)soojcv`~^<)uv*TtY!7M22E6RC4ZtSUqE*cV z{<+YgkFdzu=>xZh^OMlUFL;*+39@BRhwGj(E4;a}!7I@O*O|>k{&G=A5u_=Dwr>Iw zkd7W^LjDhfNj&Aqp?i=CcYJ0>3RYr6?M1nh+0Q6yi-)2al%IopJ?SL$(?~bT@c&L^ z*z*r2Qd23o^c#6gBUi?JKhJe8VEz$UbCCW|^tT^Sdq$?w4oXv}XWSaau{E@8gtnDM zHtW%PjRLMQ(0(>Gq=3tOB+(g7_a3;WW;^uQMQzzAdlb1oMAOuvqtsXe|Mw^~)4Jt? z6P{jmBdyB?xNpo>cOg#&zI&056nI`3i<1|xPoBO2M-tHCa_D_&FnAF79h>}{-xw6q zyS$N9%duYBk?KSsa9>dY{H1!N$v_Lo0d;Gnp~y8^a-T^=&CJNU+#i7c#X_$wdfp97 z=sx%uDeXUY<<~x#5g4MrT$NBlR^%WK**@`X3{T%$j|}3!c_!ZA{_6-VjqA?o}1SN52pvbGTyms+SU4I$dzYZZ|8qFclD97Td^X?Xc6n9 zf$k}}LQbRDI7aFE4$oM76}#$L)?KllpOD*`pMUW*2a?zd$&A`==l7jQd7qLeg4=F1 z*>Sgp(B^yW=M`Wvs`^~KjPH@s7m=MKSfMwN^btsbqtN-0#*4`CJCx=4Mn#^dq2G=0 z%`Syj%k}raQqC|iuuVS+Y)h!^PFjqO*vh{H6dw9r590|lHdco`gwm^qua*8QwigLl&y{eGT2KcOe*pvoKI+> zXv1c7>@O_%?NHz+Jdw|_hf=2~xK-oX1ag+9$MXkeT|)MaI8+6{G>tkw0IH7A_ct{4 z9CqD4(^;tDoRqEj?Uelxx!$Fw8Bi!cd6oeG-_UtKly|OfOu%(9v~w1w5BBO8Qannj zOZgpw{7USTJ{tBAJljwDhk#q(cPUy~8D9O2 z1V0QNr^AQJXz61>^A_-Up64R+J%eOUfQx$Sm4j8ax6}rFuJAQhlJk`3XrmiLt>xG% zM`ApW(l%l{ICvgyS)}J2Hm(a+U_aXC&QR(9tcG~zB*pyH3EnI!!oa86y^J*mYKFT=)obE4b z2DZO~voO5`4E5ndDX45reevssEbgP$`N&RFS|v{%DF8Rzsq#6cHHIR_Sv6lS=w?)$ z&q#k2Oi#n(H}gy)8{3C!u23GL!Td!fm%?00ru=75G@AcbL25iG&4OPFTVhEbk$Z{-X24r zPQ!)%)MMsMWj%Y%Hhl=Boxwc`sA>S^hHJcO9B}l!t<+zs}1X6(<_7Z4?yw+9?C`NrGNDu_cf5qH;@|V^Vj0_ z?*NL1aOwp#UAr@n|L&CkDRh?xg@CCyDZhnR_rROm!R01uEg$4tT046_3w##@I?v#J zJt*kTZ+Ff;&hz=us~fc@Aqz&BuZnEHi)7pi{`Qpg!FOMy#ce=66HW}Fge}l}BIUos z`}eT|SCN?CsN3^D3LxVJkwH(5E`}8`Hqs|()%6jjF3|sV;Pz}?=UZrTRO%t|TrE^f<$>`+qKikYBO?YOtp=2Fc5Vq@^Q0cOzl?+0BuZHgI(h zl+zMrg=sqI*ZPKP7mNyZcoQbLoT%P)qLf-L|T!s|(U_3$BJs3vrTmW6%Q`HmL z94}qY|2*u&NNkhq;+FF79;Li+%9yy-f-bpd?OAMDR?79%T*vyWAfIty>Mn^z;5ruU z++SkcVdFM!N2a2=CC|C`94^);VJAIhz-SEibPka2Cu%XOQ#NRR6sdIQbwNHCdFQ%h zWA%E%=tA&XM*Dvcm>H+kGnwMZvmHtp?duFYbbZ5gp4SeO_Rp|4n*XUY6Kp08i*OqxDzG+X2IJmebzT{8obDf{xa z5hcdJG4eJa^=uJ?k9_*&9F7>0z40Zr4h^W zlgdcArGde<)Qtd%mWw_OyyDo42qP|#SW1)z8e zR>-%mFiqsY4_`UuEJz;yg)!P6=A5FEX5nfNwEUeGraN>f4&^!{hXbhP zC3?ddSUmT~Pb6(&BrT0ruLCxGM6eLX|N4V=_ffp_PWZlips8!$pTWjoMQ)7Uc@kZ{ z8(V5W)adQ*9CY7mMWDFE8|VHXK?aYI#?rJrhp;W48o3u4`J3`OQFr8%ALh>al5f$x zO=x~c>ZwI;N51Cq#&e3sQSU?eM8JYvbTS zl>!>aosImxoc3C3<{TL)xwe zlQcN}99F9Wt;ak3H$$)O(YSMVH&lHb4xIvX&-9u|i{*Nof|UF%H5%#XUF`gAc;0`* zg};I8EcDzBCNH8tMoIjGa=W0jO}KY{$@mGYk(buwtxYQ9F;v3pxrV3_l3o>wG|K)A z>TOL4&ro`8YI&Tp?U{Cj{sVdT8r&X_$7M{7@%U~ZLxV!p{s%2UQ~2X4jcdr!0?8c) zj(xG$GteVf6&M@w5WL6*?bh?`DdfeGW>>!%DR&q=bS2$Yp6Ne3vptdDVx%gKALVI0 z^Pv7#p8O6>MznQy^C|eLym?B~TjA;y{Bs|Nr$|Keou5(aeL!#rbcoj5OoDR82v|Z} z_b+Py8<}ZDD)+@-3N@<*f58R!c^#wXcd7Yis9c3qyMg%2P*-76cSIJAN%S-_{uXq5 zo%(C?{|jlxK_SNg+_~@?()bP7JD0H!8qb2-^WjJ>o-7Kb-Hh}Vqt2#K^#^G1829$8 zjOm*JzI*XKhl9s_%86!nJiDY9*Nws9OQ>`JZ|w>?vXD~r?WIgZxDxf9?xF2^2ktykp5xl=6bVxuGy3?q16S_jD}a8u!qjz9)yx5z|wtHqmk{W zf!mb?M&Z4JUAe^nc~UooHWPWW5KGk!Dp%%dw@{0Ds`Dlr6c9@Hh%AM+wSdDOP}PheKa_Mw@aw6EYM%(t{r(?WnpECD5js@jyK=ZUSA5nQzBe*B!51b7~}smWhpzZc5*?8_~NIJapG+d)=xt^sC-}=;Bh|~{2O(SMyL0hD8N8~vL zZhOvDJoNi1P{#gVI;H2Km)M$mnt+S(TE~N5Mc~ar&MM^e^r4l&t-nRyg@Kxbr=1o-hpz=J~a#c?j(%iyzRj&3!9pjBU%29waA4ftT#Wwwfys~qjW8SG9GX3L1++zs>hL$47{#kNKFc!mNVG1 zu)7Jsv;~Vbo$D;{|8p>M#4tbhpb#{06v0(^SK(Gx_`8Ez7Xfzxq*!m-GgWT|`u^m0 z?@mW(zZ|&wAV2P^G2TfZN}EluqAWW0DKcn0obtSxj`gVy1pDF7RIHKl(ndn(H%Q}b zz!qvK20r?fgRmHjkOOxeEC9PisMCY1I{aq`Q^&AVLodpa=i}t+2dt5on?N0j)E7^? z9)tJS$B?79(eDWG&Wr|MpnRWPp99Jrc6R(JYCJ%>Bk=rtW4T;&*o9Q7JhumDx&9_1 zqt2ptr!Cw=`|t#KUBv?GEk483S(G}AmSQvQgr^>zM>elwIga5`T!jkmuw96>xH`ee zj+e1uMqV9>q*lehdk5?A1lm6rP5zrwULnVDq}(;Y=g_amdGDUhKk)yWqJy6AT8vap zsp}%;*FeH;MYmCN8EWmt6IbvrLgI_zo!Ae$8BI0KQ($ha=cc480k*G!(^dT9rQk9V zyQCL39Bui7e2zjIW$;d*IENfI2qk=jG>wJ|Wzf<)q399l743pp8~mQL+!?{&If|dr zP0z!%nR|#_)%Omi{SG$zz{Wzj zN~$M=43?qo(0jM9)(2h9heh~{6i0dMjK&wVCeMKPGBkS=HIBvtKaO|lNnCoXsg(33 z)_XGf7b8E?=SC#?iAEi1(buC}Bk-jfBP#X$9lVxO@F-!xqbq}yp(i`cgC8U z2WO>UcIq&ORVU!`Bw<&ZR3nw+Plu_g7~ocTapLY>!Z#4gK)K z_&42_-cuNoGx|3!XPk!OuGgzP>;o{?@Uj*%pca;@*W}sn%$Wa0dHx&DS0nK_6o4FTCxO?GM zF!`KRS%7sXd7mVYGXQ<~|CV>2vg+=7pWgsHyRePlA}Pg@?n+>7x|1 zQY6w=7si5n7OQJdW|1ZXN_C?}zZF`4M!lXW>*{K~yLPlNiPW~0Qoo?gX7F|gtz%K1 zr+`rlBxV-)RYE5#@+<>sHA<;5mfKOrHtKZzZ#;ZBOS|H!(}(bI9NC$LpYrMXG!?D2)p9x!;J#v zTVl#O}c75XI5Yb`~6jj8PbZ?o`i57))^FqF>D2*JygRF7+8YnGsH zz0(wUQ5|ZRg+95VTOQ;hAH0~rbpmi_M~cP(vAYmrl=T)VTtD&~G`*LS^h)f>XO23Wc6(9E?rj?WrT;61FF zI~blLmoPtvoaKi*IiS4z=UqK}nUb54rZ_M;<~xKt<7MRlN+Yp4hQE_v^(c`%d&xPQ zvfZoVD7NQ`l%ef#9fUCsjJf7&|E}nNMapqcR5~?(x#7O2!sQ7ZH1`?st4j*!yQJ9$WF?CKs)Z~BHByn#L)grd$Wv=L9>q9w z7A6zr#bX&3Q;xo>pLo2PWNJRmZ>!*yE2+-#WXD!J?wlv2+eo=_f!Cf=p{H%6r{+9! z&A-^Ck#Fp^eUDq8{bB3!y$dPex+Kf#+OW z)H;-ViFaC+G03fTL_ORD-Z(mG58`~FpOF_1U_+;o<|uEXJqC`{yA~_PuNV&iZsTnn zrPOqCBnD`-70W{k=ZI3NyAV*DgZThjbmzfR!<{E|Orl+M`!Vqi;d( z8e(OhM9bSF(Fu5?)36q|f?YlC-{Wm9sPr=LThbnTzOa3um$|BooIQ*NJ96S0|8=zY zOQ`jJaCW485p7^sr1=IU?=CQI#B(9vg~pV{SIG}|qW#`cd*>NZZ@>XtTt|S0AtSc! z1>u=1I-f*Z@1oxAl-+|icNJVQwtOwD%R#>PL!A<#{BN+ni;(QfP;UqDMr~p_q{=q6 z5nOSE#0Ylc&@MdP4M$(5U9;tKl&CPjww<1FZkyho7RwpP$>jYEn(yX~zJ~Yxc+wxq zy(Q#11}`#rI~Q(<{Q=7VcfjN>+HiZ!162G~2>+_ALRp5bggo)=_WI0U)!JfabNovT(Z zT}fcHfvy{Z zm6ucPz~VDf$J72KV)2|)_4IQir?@8Vb~vXcaRe)c+TG8Wh1S6y@kUa)D$QQH)+j#M z`8c@$5qC+vD+;#pU@{r|sE6bF$)#wN_9PB&IalQh@=uYen<&?F+MQ+YLmu^hD3o_5 zbscbL0Xt9m>qx8hJ}q84WxJm}D>9sow$qi-w!D>SCs*)pB5l@o-nobELwtmLu$42Y z=>Rou!87+PMp<%}@il8|Un(Ix- z*%`{Z3^y#Z4=taw(|Lf%8B6W0yiW}O+xZ;;4SeP&8nwB8mqB?kFmBG9o1oD3=Z1RwW=`uS&! zCpAEC%`x(%@=fE{Q7~JqJ<^&NRh7%Y*G`Tso5Z57g?TD^8;)o;#m_E(w)yNA;r>a_6=DDT_DJ5zAsb(SbBE6iq!NY?G0FVTdz0KAKQ^{&~sO5+As5zlL1hm6_m(< z3>~EHbVZ1#Fs`G-ocsz)Inri^mhK{4$H$neyRc(x$ZMOF13l~qrCqOY)RgzQZcnc7 zsnr(9-C;&+vYbTHlq03{KlbY0;+bda7NE3t=ujK1SR8GA2KAigO>Roa%2QYJ51@^3 zG|n@8lKAfn4F`bJLE7Wh=uRF|7p3&~NOhWAFCnv@y1Ei;&>2`=<#7X7_Dejs2k5C2ama>#ppxas^&d^iuhhL3MmxrkqZ{uh@x=3zwE=ngx`)WQ z54EBu6xfKwyY7B)DBJSXr+WNbi)VOPeh&C1TseSr6;wMwU7jYofOhDukl$2KfvJ-H zJoSFfy{F73BlAatT}cSirv=gPH5%;@u9fv0Nu!lOKCL*Q(iL9h69PMK{^nu!a8|M?Mw6*n?i{R=y7{Eb!+fbrjRQg z8X8a6{SiBX(TKwf;r8qFdncj|G5F#MLbM*+s$NDe`aqk^wDQTM%N=U9WpgNB4Qq^6BBZHM+8@EL2TKRwv>c<%60}A{Wk_bR(tfjgm>_%%%T& z5-;=2@#ltUqLDc=fYUwq&hgcugw~`gix%~Tay^mghQM-=*OG_w(P@1SO*Kc3>P?LpL|*^i#2py~D-79-<(=q2_q_ zAF3Y(I-|n725~FYNrxuus51pkuEl*cTfGyH+Vf3vA)WTUJyF3EZLQV-h+` zd@pduu;Yu7O8Z`$X<_uZVDa(w7 z%TD=qz*?N914|L^d%@At@ctn1XQvcnZn(lYMxVo7!L#7RBa~K$XtkPHJ(@_($|qa?l#4ktP#uMj~|7-(Cl%_Umdw-Sd=G z6uh;Wj`HXMBqLu%>EHB6H}uwb16>^ac@*v}pe|QuyZXW?gbiu&kMqe#Y0IE{A}}?= zAFdmC-y5&bm6O(ymvRzl&m5^6fQ(2bPYbUBv_?Gt06A^|T%MEF5_p}z`xB2$?YK8|G}zi!)I1hy z-Hy#^2h^_3wpF@79>@LE|D2?Dd}}YyT+8A7{Q&sxi8$M#*>+?uoidCEIWN%uD4L+` zw)IOBoEXZXO zuoxR;BN|_vM4F=4(rIV1;2DjhcGu{pL8k>k;MnUN4TJCWWTV9^k5^@QzoXi@_kG8b)m7o8{tCHA3{_MYlfS{7`;R?6E- zo8u|r)9I-;g?st9?uc#ZO3JlxzbpEXj(_89le5~MtGyMir~s6%650#Q?a(9JD@O{e zfx#~F*=|(}HqJfJYUWm+Rt2(`so9fg9I+CHnY_!z$1$?3z~-KDdp>2M{W!|7Rb7ho zI^WiwRy2+>EZ-idqh$rLST{h6@nH5KEwk&6+ruH(Ze4Hxno-AC(#BItDwbevkoZ}o ze~BlVkoint>S`y)t6Z_=NXj14IwJPZcQMcPEPKF{3~ExpoqtNg9&YD;J`!hSr!3&$ zIh((d*Vw$yl{gPIguXyDr`HDBI3Cyz%sOJpjfm5T_9!0Pc{lB50<{zlUYM(!mSGKJ z(0&6pz#UPp<92p$Fp}Y3;>XDI8YPy+I$S?@*%(l zs8z5o$^6cM6Glu{vprL31+_YlQV9E^mpKXe#t~ecUZjYZbKjZ0rG0C|j^x?tI+`#5-tH3X=Oz zXR7X z1fF-rFR71g)`Ny4NMm1QFEZlj!)4%V4re{b(Z0)EXdk2eR3tkF*3GzgUe8e?$ILt< zHJK;NDCZ|~>CxM>Il$E;;FgnX-nZr*!+;k)&<=TBJPh7*8c8T$(BsM z9Z>ZO)Y0FM@@FTNwWRVjs)}R9^(ki#ys|w@!-H!8-|e4%$`g0uR6|~)zD;Q)^lS9g zJ@Ssxry+ky$hz}Rt`O(|_ZpMuuE5u5C4sxodLi56Nq+;jc_8?$qsDo_l?AHj z3Uc}!RM>=M8inUIs5^o>wqd6-sJSy#eGzKk!Sk}A#hL(|b+Cx`NH5b$XTcWThu2mC z9<;~Pw4e21;8BsFtxwTXmH`4+FRmj^ef;>6z|fqvM>(|I7xrJ&$WT zG>Ji_`_WSEa#qS6h%^*s1ZD*}^N?==5az=A970F?<7c0ThYi8S_$T>j^NcEd0i29( zd!F(gA>TnuI*n4@wQW4?m(i)Iq&o&AH(_I{(#|+fG7h;p2Iixw!EY)v!M zvnc@ldPlB;xew|dMv?|&wKf1*5j5aFv{x&=6E0LhqH{t8+q3RyQ7xVyfZEv@&2=we zR;0yI>ReFHRj)7c&Xp?du#^jdzaiz#fFnllD2jJmm-!k!%yrm6EYZ?Ug>e@06%y=yoIY+z|3*0@FC8&l#Sx;fYaG zqCQj&U>S^TC4)mM>9a6SRR{~9Z&Waprrbs4Dnq-t?i9u`2SSBGl=&QzIv9LEMHXv< z<#K2N#=PJy3eAwwv&6n4gS3sHaC6H=3**m>&C4*ebfQ!)~P#`9|9fX6iy zo;;_YVSgtpsb*jm>_Z%+X2;|5BZ(1=(P-fH>*zlk&-&lbv_?Mhj0fjdEjbAf(H@P@ zqk3)#yp^Y&0%VSNTYoF;o0dXB(HrAGMV+@|+;e)7Yca(5DiuZ38UkYsjP& zaST(x%9{PSYRZ34GW-;4)EFvDch7HdUh8|(CV-bz=!-3DjV|@1lpf%dozlyK(*(W` z(_U2u=W>)~ujftvw}VG2?-EI|C$vWPs~k1!fY!LS$TLS5fL|Tj`va6#kNh#pwiOzW z1}1R-J?Wi$DS+lXThbE=&ra>maz!&B&e=GhmY3QJQ)VtkW6I%Q)&V*rb*Ys@&`{@& z9O3JVRMw%DY(U#9ZCg|BUNpc~bpm!{GgNoSLlSMBo_twq%7xASmTO1TsPZ(%QsAO}x^y)C)+&N-^K_{;XD#`5+z?1R2&F7i&MEO*vaps$#KJsgKd zxFW$lxvr(RCsdjh#)GjRi^-++^ZeIW$>$hKZ~8UO!6cc!d=?TX)-OFI+;O^LGWz?{Bg|l&;z;hVS9FNUMTXKebBLnASz3gwuu;WSHN$0MmrC@B#zMfdmQncP`n*6bC ziy@II)bbP@sDeb~Af0P2{=*wRdHwT6T)T$xGG%3=jyQg6BM(V%D>vHZ*p_3s(N4rjhk|VgT+rAv6eh%3;9z_bhM_Y*O{M!HVbG>JGecmVz{vAax+k(GyVzF!K zqSVo^G_)7sd7+k<9Jpj3%sVB{b5>28NV|{$U)_(M5uO+a%y!Ur$N4i`k4eD33SMcY zuEJ;6oqk5m(liU4t;xSIyC=obyIA;5#dc^%8=z;#)pAXSQOK$VdfPJfgirC;ICUeu zZo&6@pH2Ei>nU7g@D;RQg&Zeh;qCvphs<`&aYRQ96Zxj{bxbafPdC!L;x`|9>2B&R zaL%?uAFDTTT_L5TdG?k()3O9uwLmuypch8*a7BB6ESw{4rm^qhjQ0w>#c0JsR?@!? zHWPR{A(Ui0JqNluj}wiL3Hc0KB>VNQeS3toiP-g(aJmuj>;#hS@X@vRmx0&_Lt0ly zw)Cz&H%Xi1btP=lYsi$d`;)o1x9!dzPwJK4(X5O66T6~cvyc(}n>eVGNXi(z6Wh96 z&4quiR?7gIvB2(lTWNTqw`k8)op-gf_&R>6@0kpAw#Kd~asIm|5IUar45d^F98g~3 z$gj7Ui5w02eE0$CCerJ+Y+F_^b&u`kYX%6eZN(Q!X(fA{dJ%HoU`{{ zdkx?E);FzhP3}R~LT$A4oe6OdcsZW)i|LVoUNc>;8FZQw)9iki1&(|Z$X%~@7Sgkk@1EgMhh4msytFIML2fsog)^i{3uSyAx@-f2G^ge- z#MNf_H&S+khW4NyPti7YK&EZ9j=DJKMqbs=(u%~@DmfHQ?byH^+Wb;DvKi=3A&<20 zgYBnfQpe@Yklx7S#XumRmI2YfQG}nmv?%C&ykQobzbE z60PF+a|>bP_$f7O_~}0>r>swwP9yF%+>SSoz^wmh=)zs2>d2o;suO6DPw{Dmg z;y&R=TL9b{waAh0bTC(h0<^V-Y!l&|@M%@i3YCQK?5s&i1}g)*v{cIX@Bb^v)zH+k zLrAd~nkP?)a}#<&Rl=7+-R`B55l;zS0XHktKH%T?-;ts+-ckwTy_Bc(zvOh~VFmxr z&z=?~xDw?!LO$k~tT9i;)$`IA?-`UKDM91@$la3x})ja?&rnS^*1(IuX8~NfMMM_<1-xYWIWt5rr1Lu)$1T;}jE{{Nc zwaCgJ!B2M*zqGPs4N^MaS^jwPc%GX63T}2UC+9jXfL4@%%FHE%Y@;5>(aX7!`?Xm4 zf1v%Ep+)z^tcZE>Bf*j<;I@Sgegyi(IRK7?H@8-@Yj48FQ@=zE=Q z;l4JvM2ahkUyp=YO`K9Y*%J`odY#nl8E`#LjRvF9XMz>s6$|bp)&LG%30xWS@pky4 z5MiFb@*a7lS($m{m^S+k$~ZUdBl8~vE=F*_07-BO&@ZQD-a+ye0>erX=i8JKJ{3Cd ziN>uB4~Zjbe?Z#G(UsK08UD9Ni?0C#V+bFF1164-K;PfyCajW|=m zw{xI2k=r%2{xImNg}BancmrOkpe_DNJkP^8F?3%BWgXT98cx6o_#rC@$?VnQL^`X0P(5O{*LAkA%;6UDM5$98^g%k{pijibPlbj{>g$z zO|RAXbK0Pcev7T&&Qo`Ep69pwT|a-3yD~E)Z`U&a8NBoiy>$}b+$Z-czPj2(4`p|J zRQhc#%NYXabmk@PNB+ z7l2+{t2@-MwW0)k3MfwJiz&SZ?s{@9rusR3Q&yW+i%kzeNeVmHUA>j39Zatcm5(Rq zN;qRoSlp*z0}u?tE_jT7zPl>OjfWvg^_1QWeX46rpHe4nZEC|D^VWK>r#B9Kr5QJ^ zXiBNJncs3GuqSrC!8bkjy|Iw(A!*s&3SZ~pl{^&s0(niMmTmN~U6t@EIXk;ZUyQr4 z6@l|H%IHa3>%&xg>`(sn@tokP#pas{T-G?PQc#{>4fVO|U>cfT+q3j@29o3FjxwHz zye`nnH&N&QlyOSLJ%V6kv6&q>#>RW+Bio8(NoZ?`(YeM zoYTS@n@>7>9b%)I5~|^md#I^auxI1hYMw};$3VVujL5O%4&EAgc5IkNq;d>o z0J0*-_nUxaI5b~{m7_%Q%<4SxToENsYN=Hnd3l0WEoG)Tg8R~M^!btSNI1&W#&>7h zxTe_NW-a*Fo;0+I9O`;B zWlLK%ytNRk9*LGbp(jGCc|v9b$!l1d$3QW?X)V`SjidF)pxMRgpiUlfhR{adTA=&A zqb5B`nd8Y{85D6g{6fFgRL@wqDHllgZuo z_!j!!z4M(bxC3mpquDHDKEFM&95U`*SD(PE)>U7iS3~=~FRf^tKF~su(o+16xI5~n z?JeCj@^24TYtuOF6oE!}e()s`!_$(LR?drQix~RT|JJ{p@=cPm46DHr?Os@rXYK=Q}l23q~K^8%x{ z!w)|om;PwLCxYrW*&OLocFQ{oi_p+z6{Q_x2cU= z6);qFO67V&9clF>?oHX2#Zv$VqIt8Uw14+`RG15YNCO(%}iRqmN#uk zrB|{Q6I4ZF90e!qmCr(-r|`f3SGS2ZHR-LC>l_yG>3UH2Dcr=DTu``U!CU#F7TXJm z+`D`L(4NM7TlA*;fTb;bnTm|iYvW2gPdC<@)1I3_{DVogiJII4W`;xghtkXbI+lta z>Iadg`-6`Qd0xgBJuedpJqsFdLB^H5E6<1JYJFeAI+G7RhW6e;9_FZR@+6jOsw9}q zZz6d)OH66&x*ah)WjE_Wd0s*4kU+zLH8E5sSA=L7~eQO3!9|L>5O zr{HT?7h2(Kl;KWC*OF=jE%X%F(c>$})KJg;!PTCGyN}}rTII@+Jf1-|1AVRJs6o$o zUYXu=&vkkd>^}wU&d(5|$s22aZAd=7Bt`zcGN5;?^Kk0rjB#5jU7q9BKpvhc?o4kr z?H%M?00R52GteRGXl4IhVI}Q}7I_P}dqi$pujX`N+>_tHcfAhjs_DEh+>LAf;`n4K zPCby8M<3l8zlG?3IC~=pKGbYm!LXWD4Y(QzwMd01*~rfANjW8Nw-n}klFicNcx1bD zoMeAB`1R~N_Y0X$&YuSC_6GDy=z-9Bbp%|#H5b>QISxMpUB4I9xQs8&Kx|+2Bc5jx z>vAaJ1gwM%T)s~oZsV)1mgnEgam@X>U()%WJTvY*P?`bMz>{-GlEtY1H6Q7<)YQ*X zW$Es7Z}2lVB&1SOuekrIG%s8tE*Hdec zbr9EjT4@RT{%xQi2@Ty!8Ok0dtRsuKw{teYo+Yps~8vSvH~M}X%+l;-Xd&a|>_{b}9PkbOlc zZz1yb6~gQs+ zs14jNCwwvGcF^9p5O+J=;D}tFRIZoZNS;qpA7|Hm#CNS*Th zw%bVFsaKmcb5G_6`cy;ilgZDMJM`-GqqeTSl5Q8$rqX#m68AZ{;cc*>7vdw@L(lg} zG;R@2ABi?|2DSbMy>7jcq>gw$M;`N#S)-}RBUmOIsn-vX_e-I&xrDmbCxh#5Ctf#Z zx|XCuSG&li@`L*X$TQVwW~HdJ?Oa7F&YOAPMorZO&evW}J*&a+v0&swz7>GJgH&GH zfl|^w_%8auMH`UL>Nw{D^oSVAhgX7Na)jmLzZ9menBu08G>^d1q*4rf4@nAk4;l0& z^iLV}khaxN=Ad@7*9Y=jtzSLT&b+D;dy(H zH@&$9=)v$RaII&d6h~No2xT=ARzZ6lhTrowyo4jsfUY=oT1##>0Rnkbf}6i2$o285|* zwp5PyWQECvXHm1h;t2TGkS@;g+K&`-z^ZcdPTKDUYU|FM1JD4^ z((74I4HgnRNi@B{&iE)2;%bE=xi|CgdYq@p{Tle;AoM^T&+g!O8uWY?&(9J6RqCc6 z!#?$o;PFRDIiEh&caY8lk$O)M&(^8I#yFS!+zGgfwsMT~+mx>cv&ApL&pQvCrr7p9 zrUOA4mf>rR_>=?RdAy}rw~kj_N&d%^(%p`d)-lxGONQZr9Yon`5B-|gg{GND>I+7N%T;q|<-V4CF7;8dKqGb%T2qI@{J3t^=Me$adGXyK2z#Jv(43ZB>qa zm*`Nx>2*Nq4oFgkr)LP`9{gN|qs^JTkCkC5S498aIuwEE%D|$l&6Ed@gDESzF8cw6 z7)RF^@Ord&>WwFhWN_eNF?C73%^yM`Gr)t=RBkXQcWAfoE5_vTBaxVgfo-XtfrlI1-mQTlHD z(p3nkN^hUP*U`q+#FSf1pWBtnx%s_-tsQs{3W;$7b*@HE?MW%CsD~#>9}jKaK$@Ce z>9na*FVWF{p-$HH%+MX$Zdy`XNjrhIsO4UO{?v5#804_DOOlheft4JX-wH3-H&8Y?{-BrGxjT;W9S6@`O}VZHb#A;?QXBTc ze5m0f(p6FKO47QbVNGYgX{_FHt{Z99#*nQUnE44b@)jj6An#Yn+u2xmLw6aqb4`nF z^ZB23ZlW4$TEJEtu)1H{T=dE`ERru1!!bndrZwQP37)T^PS=37 zBG`EqTg(}cD~MgdIvYoOc#8W?hXe6}}|MqO1rL`cWu6{Ga7E-Tm)FO*FVUpV{ zyGS}u`0!0Q+y!9@&w3aPOX;O5zO?k;&GUxpAzzH+whPUOwWls1x*+`*$ddbsOJW69FfDELGfdN;k#*YMUWtDhs@ zlkj0JaUP@|C!wwFLpo-64%qt%5cDB@DUdPKj6VA9q?!jlo(1Y>;lF-Z3gS+`Za(0* zzH@j|(^+7e;p`I=MKO%*oHqv>dpjnUjgU(8TVsF$?Z|s!PI;zqakNd{t>js z`Eavi@18L60O?;R#~Z+?ZSBdOQ^4^^c=lFgn|@E%+^LBUqWl>=Ni_|;Z$v{mzgBC1 z0+zhkR#P0NC@}Z@_V}4xSt9BN_{(1yB~4sdwV=39U@mhN^L!DwUKb8={wLv9-f!F z?E9XGJd*a8#<+mgZ%*!{qKtPvrlWB)Xdx+bBsr#48l~B?IYLddoxD<7`zDSt42qJZ zN4+mz>KHjjYInBBqvY*gRWE_FN^H$r&@lES*Pzp-psyk;}Xy$hhysNDG4qRAC zyBviUky_GK1^?;wc4w8Au((TKy6#E`v6*^0`Q7e@l(3fKJ99l3c@O44=^?+^X7Zo2 zklG_$3FzvrJYkdg(iVN58Yo(6tyYoaJ?@ z%t6Ti(X>}ae63bzQrTlvXA0fjgnik@~OTToM4}1A(p7Kpx)v6 zZDfFZjk%gapRDs=--W95gcte!7Jhan;?O8Tzi31B#7jCeX}&JMe~JH;vFWmO-;%^l zwtrg9GnP<$DaIQR+%K-xGi`+C&{27)@OH}A!#XbNZ9X~TFU0FuAiNJP_d2b#FHqT& z(I4w7(DTVzk4%x8To!iAA*4DxG?CWw&(T`?WtI0eU{kBQCVDdMVCPxv$~>|}&9aEt zz2RWTXZ@?9RS^JB2@ZZI>@T%IR!2jzQ8@Bx(OY=R@W=izGcs_vf3yC*2P z+j#}KeL#FiA?z=X1&S)9qPlbe`eQyZH&Bl<`r>-m`o$PjE%|kW3Wi0?EF9Ev&?3h#hYKP1HRwfIkgD!zn#UrF3m#QTiz-Kg)cNc}Tvy%b6HDw47a zJtX$Ezn1~Ga>|kKgTrdH#fI}!`ul9FEGEttVy1Whm4DJ{|IV<5UEk2?eZuw{0=H<3 zIhNiJ%J)?5pAaucy?aM$=TfFTJ@4CnhJ17WgR;X*8`a)-B~aWEA=T7Mne4SWbWJr{ zwa8mFFgFs@-d8ifTO-b~$iFhEMz4-*#XKANVIbOzc4~}wPqY0uLfjoQPcBarUq7aO zdr!6hA~LBr95_Fuk27_Z&N*OfMQdfme2bs6=7niq{QH!oXL|blzy3l)N^w(t$!Et%9$$0s^RJL@wKh&pLA&ti&b0*Ou&e3y4+y-Q@>lW7|2}hI8y(7MdY#YP8rt2u>g=oj|wDoPotwevG zkM3!KqB6WWqo9Uoso&vI=YK(x_GUfy1!&J{NP!;&7E>#J(uq-VosmN6>8`_@km|`U zD_I<=-ThDF=w0tViBa!TyWT!$Lb|&1t4PXCw4zpgY0T$NFeu%WGbZL7WlM0zPx|f- zm1$n(_K0Jyu0q>D-G@`(jCAdUIf9!HyU&?Y>!_)Hi`2S#?%IL+*&GG^3BA-QXvU0K z1+e=jSgj0AR6$RpiT2l%xe!Q)!#VGe&Rr;f0NuILumH7XP`DnQ-KEn;LelIq%l-RH z8cJ8mie9uU#6C|~UQ(ReIZ<9o;-<7(HXZyNb4}y4j{esGzayR9LUJXZO){udW~n8T zzjZma$jC)oXFfP_{KK7R+ymM@U&Qg!D1RpL1_a$XPV@v+S!5u6-@+!#p7 z4b;t*RtZ0rsI8go)RSV}LPlxQ-@?G2wM{)L<(;z_4hwF$kI>Qz8oi9J|xlB9N()(lE?4Xd8Z_DE}bFKPI4dZU)(jFBfve+5$DKsfsr(mf9^{Y%801O%TV znO!q#&%+&VTp#bqUIENG`n-`a`zz`tZ7t_C<%#_iFe(Q4*mjY@DN~?ste>ReXplSZ~75x2{#kw@l& zV@ot=XG^m9J?b!#^Mw^b#Q63D2F5S^B@rTf4kwPv{m`wiGNxXS!d;f*< z^L+2k-PK+0;-9|Cr%9@~7q(%R-^^7J<@E)=Cy}tu=w1<{o$u4q%2fN~_Bt2S!t;sY z3DDv~|GWA8g;v|hUH{6r!)NYHgE5_W*b|P(QMPsO%dcxCYl5pk^7mO%=J`_1??&2A zs?UHv#c1KK#3}pSn42pawOQOlzK!%*#IO`$TS#7dSAQCOV%SnzQCi7^`Q}d8a}it0 zc5L2Q=~52m<2lLYG|qG=Wz>-KJS4yZG>fMYrQWH1l}%7WN@ISGftPZ#W%B$=J}=uY z5AR3u)w-MyyVi^2IB{|fZ(3EkM)j*cL2f+L*s=^mxRujfM>T8l(Y|Fw{N zd(z@<(8=r6c@mIcOL^B)&ML}tFY8Zvdw}>Wq3#8waAmxsE~PhvqXIUiYkZV>)r9pT#Jlm8^J4T){`IF$2lI_^lXk#fe47ha zir^~sDZ1e&nMdeNaCvj2c2vRE8A#e4gl(YjULLbk_C>DrroEq}EM?3K;5^a%`=sd) z45yQey8+a|)5@MV`MKhAC-+~I&xN$xjLuRL@9FW>?#Us?nt^#{=y~V6{S@pjp}h96 zpFRfSLt~DmyyvP-azjX@hoSgu3GIQrEGP9k zTDyp}`;>OGmF;C~zdlY*`i@+8IDm5QC02i|>{aB|2i|;za<EXyo!{Ea?kUYPHib!2TBhm^gdu=DD99%3TK>VgdHBRn@75o$8s&L9$t4(X}B5- zJXuX?#G*Ys0ghtk;rj}%_eiaTY7RTc@k}jGDZ&|p=BJF^z`N8lAt=rA9CfdNtFRwv zKYb2&qQ$gy9ch;y?T4uuTH@KUWoHJ7d2?68`rkXen9b>c7rP=lrJOsm+!_PNtNirv5AohXYyo0S%*-DXHge` zefK_Ef~OaC$pRmT1AVeL^VGJLcE6dtToEN)N}lu+m)6XlTSaGymMUH|{`-NK#R0!# zzMg!mhSc|L9A_t`RN8XZFWEs#J#+BgIgCm6_6xb9HR0^4IxMOG;dw2!UQ15)w0!FU zy^9OeO5GWFZH;F=@?!rk{vCO!BEK|(au9j##OhcE?l#A)k5ZrKQs~S+s<4{7-pc|{ zuHe`gd+H(bQ^u``yRYG`iLdTRsz$h=9$2T5_5+|(-mQY)%7N>7tgBxk^SV*$ zHNfF6hVGRA&*c3E9DP3dT@UT9#=3E~^e zlYyMMs>jy-@1?S`&{CU#bPM5qpnOji7(%!+zMTs)lUS1jwk5z{q?8>YlPf~jpC6X& zHn=rM%`T)}X3-WoXr~8dd`epTV(M1s#lA-lqsdF(p0lH+`+I1swCl%2TF{f*TpjUK z;{Av>_wvverv`BhXpbm6wTAz$Qg@_hF_``(C3f)E&QA+*a9Aw^Bg~f16J}W@Ti0H| zLZsOi=*x8z={zGbslHvozZ~e>SiWgFHc_*T8r}~y6W{@PTG{ANTR|RGvONwYMefx> z_l0 z$KuB0F=!z#`=L_2{oM*$q>PXQfLT~?gF04F)Az~iVOpUT$ygWoR95API~k5uXLPR<%AlH*HbcufCZE;>B`GzLZ^|DrmwJ3+pg@|8ccGmC z-juS&bDx16-;Jv_yC1Ej9aGkMbv-rg3Nt@v+j{Q{AIarf>@C!39ynT#mV1yEtb<>i zJ)}>u10TQR@F(xeCHa{>^UN7j%JFn*$Ld|x1FA&U>TvEv1^kmVt=hL2m^~+a_3Aw# zL1v@T)A&ppg_%bRdmhb%Iup*Ze=RmWpR=(G-Ji>Tj}p~Akk%jpoEMn!-SfBa=j{eU z)0+Orz>cechvV58$rr~B6Lyj(wG5ebTc^ykCe}->bR>0l52#A8?VPtW$^TOD@fBR;k8Zx8xKu$we$F525Np8&&E z$V6qS@99$W*!%D;we3Lay`Fk|hJA^e`UmAD3nrygjx0qwj7DPQDYX~xwebDDKspB* zd2!goj_6&I7`V9>Lf9ksfJec>?v+e`I^jA?FR?=?6gUoK4q2R?@QK>2fU7Khtu0T+bpz zI!XS4YA9YWr!)P9L0jwB{CoO(qU{TLa|FrWXFFjzo~(uQH5-7dowV+wZ2uzt94B@b zaC5YbwfRqQtiO6WDNF77G4gK*Ja9Plc@nwWYT7;WUm6X+GcfDSTW8s8$&{nd)v1q# z)V9_uiNBEV$%+#CJh<7Lm;)%$mMmTFce$GIxzKbOcyN~F7HaI6xGSU`!6=8KZJPnK zox4FcQgg?j)<747pxumGs}nv5y_ciLuF-SHE7$R7e9Hs7^J?=v3(d)tp@sQ6Z!N@6 z(5Q3rQKtAz>)_It^e>$a058&A2Cv-;uIovYxIn$(W$r)fEa{qPmmILR1%~zEX}j7^ z;!o`9JM(`wALns8}(W>_=o zKx5D1XU#T~k9(Ck_Ei%QNEIt0j($7i+VbwVr##lqR9ZSe(XZWU5m(Z>%aIx`PguH= zwXCdCcIX}QcVzU_rU6BcFZ1ckn!*#NW|Q+-$fh;iJ+ZB)-U?oSkX-(Z-R3A-Z&IC0ycYawe*`FA~ZD_YXsQoamjo=cinDD~@5Q;weaSY(5_dM$>d$J1_}I$;T(eC%4W!Q_%w zv}z|NZ=qbVojhI9^ENVTCpC2}(~y}V*@h8g7`bKW%q#d;8hL754j!LJi?|ke55h0t zX)%x=jcjxlu&1BC6<)ytDTeUmuG>o5ZhY$jOrBq%f8{sGJ9*gt-1~gjr!b2r`{DYQ z-lcx}%o2|Mz9?e(l;o)N)wYYb4c!xs-u^eSGO*-)r@GJwg~&zfb0)$RetOwvLK!=# zyB4LqwvqockwOz`tpdM$k>`2hYDu`deQ#Ro2q2$B`9hIaY`Axp{J5#JWhG zllQ($J8RQ6Qp>A?h8jrWh{Rm-`Wmg~seEb(^=bw;u7#>H-j?%|PwwQad#e_JWCqm# zTf(mZbI%a|7>fBG)xD+U>jqgHU7P@m36#Ni4eU*3pC+SSvzRQtd+iniG9 zfJ&n}-ga-@N+@2R_iWhG zb+)28pM}a!1@}sgo2lu1B(xAt0LM9G%}vnzMqqiGTHYEobt`f$&9s$n%SnARI4ix8UU!ZStu)7_vJd0XvLw^@3wI3xkBCGW?IG4SiunhV?5vY5U$B)6(0yCB@g|=;%i7(4w?bP*@^!DIOTcZ8(w>SjuJ3}ARqV;xNL;z?3_Ka)r9h1r zMbFSvd(!BL5Vg_6&Jp88sOiS=JbX-BH_@AVc+L39eG5t15+R04xzdb25G|pJ zV5|u|G!Xi6d^e8sL5sljeDuE8Gly*+G~j8tn~`g$gvNc4T%BiRf5@Ezve1vpLS;mE zu%98HQknJ^)}ncS!k5*AC%$*o#oodAfTlH8n9YH<^=@qpx#g~N&Ivw%)N*tMG*lX; z7h*B4U9Ww#H(wgC5DK(kvX2(i4=93DwPNZk+AmX5u0RtekA!|z$9Zz(a4xZ=)(_Ak z8-Yv>tiN*_7*g9F9JC-tpP^myr07duGf$0jTxs^DryRaVZLLEy8t#&?BTHCto!l$b zYyfgg{g5omJ%C0TnKg|6+nx{Pr_chD~9SnwQe?&@QaCT!gEvMa^=Up#=7J#s|QitupeFG(H>8*utP6XdI zwD-xR?glM6Li=^{_Kd7S#JdWPaqmC-^BHMc_-RMzZPYf@TU7)O^+>nQzLl#}h0(nO ze#X;r(C5p<($jPckc~o1e4AYFB89UQJhj46p#8bap)=7F-{*Y*c%4Igj3nlXP@DGf zPV9>{l=lT7ydJ#1hh}Wt^keASzI+R%jRUGtgzZoL)dhzjlgfeUcwls1$eGBqN#uJM z7S@fx*hURsB!(?g8|6CwS%4XCV%Cuqt{~0K8qY_zc<-DrIBR6N`FViU3nzcxT|4Xge1?ZG4ep0 zV!LmBqHMLfz5JoH#USwDc}hyV)X#Kw`#+JJbI!_<#d@I|!Sr!xyJBY?tzRXTGlWY1 zDrwI(XpYh;W3*L!p|Nj+6Dq>LtZ!3#7Kiq~UIkmqR=M~I{vU-~jzW{wpf{40t4(Lk zu8H=T1x%hD^=Gg+7QVTjlH9S|^UyZ{k-LO9^X#4rKcH;;y$i@eEKC8jTDiS=&!Mqj zYQQ7t&z~$dSW_*ZXLNflW=a- ztBe*nIczcg3WEYm&MrBS{QIMwH%7kJ*zuh*aOd zNX+>r9jDeR#VbksuJ@U=&JDzxLELd6AH{W!Jj8+4`>2?!n4^Bq#&G7x!PILUxof95 z$3tFq6e!2t%eKgX&e2fc_3*g{cWCs-$55AtkwZ0Tp;hFzFYTj6rI*`lR%bqWp5=7q zsx+4Bl;Q}P`}EkmNqeX0CCL*icgpV>Eo6^NOD|wSkE0MOzlQsq}sD~2s^c*~q z!EsdgQPMU#0*Q4ba5$HA206HfPyL#t)JX0#Y3XLDN}b*teCY3UJhPJLWc%nZfn1k^hT{dlr;%0&hPDx{+ApgNW;jl!xH={-k%z zY6!Gt@1rTkhg^#`fuFk%>q8PpE&N_2w}h{m+^5k4zkpKf`IRFx(zj~bygCjM^$cWDmP8DP_WtIi8aQbQZ}0GI7xX*HJ0 z9n}2g&i;rp_XJv`K<-P)&%I&AhF13m(hUK^y`rQ^^e=O0bo)U0sL3ig!hZNoVL|t% zb>EM0=TIhl&fGI%Od@0!dASDNv9FtG$2xem5D+<<;Q1{HJNCf_AcxXh>HmXxU&_CE zb@e6(pcx7gqVM4hFz$%UJN&EBwAU^{g1j1>IuD$rUSKKLp94uFao6(e;$PD`=Hu>P zt|xPk9VKRJOK~%joSnfUPm4Rpv#QCZKxww1vZ)LXb*w{+BF}UBZckay!lm?GR{(J< z;o9S~BZt*tyEoJRXMmxKkW$G{-XCr0stiw#P`AwDsVr)^ff^*q)RU(ZY2S|MA-Ga{ zHBYYLZ$cJtN6O_XwP$DD?uIYtQ--sRr$E^iwCic$Zve1pHw~sd^-~jRW{_t_%6ZUL zf9(FT0l^W(yPw+H>ci19Z=-p(@_am$=D6@^Wa#&y*(T=aOhvjB!Q4i^tMO0ZuFaTM zoG!->v)BC)7*&3GIR=)VcK=Va{F6_!dRNTdW0ani6V8)!|9j_HNejx-YHXDr^dD*{ zdnxp{0`JOoon_^ag&m|A2%Nc~3UzrY$9y~`IV!by{#2ezxu-`)OY&$i<2m&sq9x@6 zrI6C4o~tT2v=ttiEUHn0BhZTydkd=X12bAaeW>eDYHpp%fNLWZ_X4?C^M#?8><13wTLv^*jAGSu=bZ)h z@4#3FrHJGGcsBoEQ0KR>f7~%|5I-qW488Y`+@fNK@mv3ee#;+GR6#tuvq8Qzi#| z`+=FSf}1he3MWu+Q%?x4m<=sE7uU8w47$G#x}HFO&QKb~+<-jLd7@AgdF=#BPa5>x zPe#BI#{K9{tn24@yvBd&%ez!@VCR#zsy2D+uJ;r}ud_5>VfkqE6l&RGJ83lUOQ1ld zUk$akUsp-1I$FAwT5Hpcg;JFc?wpub(B=5z9Dpamx3i0F73GVkH|_%!$q|nB+z$?1 zpS?KhX8A|()qeC}iL2E(jg(UMBrvlK*)p53G*2aIJx7a#$hzJS{nr7PwQ`PBsXkqI zy@lQ2xkkR-fG(^Kir02)C8a%tBzHW)D9zoSLL276h<-tPNcFUBBbJ3a#hE5|ApP5c z>1A*u<{m~`W`x>ncc02fuq?}=sTackGMIQ5@!d6tY3$h+y^@s5`uAt9BX`=6Oxlq!&J;pOdd6(d!8D96T|03ex^mDE(S!^Dd-Z z2N>K+&aWb=`vLW9=N#k`j?SgNk=6; z<*s95HsfbLqp_W|zC6`J1KUxlL?{rKNNrrds;x~36<9^_cgEWti# z0N2u4UM~LkC9YcHs;G_a>}*umEtJzY)!wSXZ=DYfV!N3#%@cKv|N5XQc}i}AoLf+S28y96Yd^R@3GK8vR{8j`?+(7 z5Xj;85?5|;Y{%U-^yjXn9hBHg(iYOb2j=bl%d^@oVkM1VN|&dDF@32uV0$J{HBnYI z*uEJJm(C^DkE>`+>5C7R^*j8Jj#gojr7`nCI z>JQgCIKJsxFD1lDSd?c&!9}oPzpxiyE`n#Y0kqWCa!+Sk^x|zeKl3PE8+Sb!V*c7i zwcxwJf8ldqQ<}dGQ~D|Y^o3W1<&fwmPnaC>&tM_XQ|i@N=K})9EI3ZjlV1mcC-YDG zw-z1nCKwS8L(^$s6M1T;{hXsd4*Sy{cE(R%%Qf7uB1J3QD8Hu}SV|E4Bx%itI6r@*|e~><);Dlew5-Gl^cPq7X9;|!NLE9)P9K6Ili`p&TEUd7)O};r9ghO zXzx64&hAVw&qO;H{)PIZjq(wG^8~fYW z5pM<~L*m6eOzHZJHQ;Mer?snh-E%-qJCy$&;8Xsa1CE-&zHQ+?8SFELMpwtU4)_CN zv=VOuE&m*~olC5lw9#V9y`7dy+FP1*e#ep2IrSrEL1FSoe_D3~aofY*cjoA7aB%|` z+Gz5A4Os3V5BbCy7`>sgjORqBYS+ENwj&u!$tMr}%nr-p$&mZjv<%Ir^(h`J;JYY+4!9D|T+L9?mZ>ad#!EXSP=QC_J+t{6_6V+>HEhiu6 z)Y%&_-9YMmVeo`LY-jc7f+K2yNG*~BE_>tJ2=ej$@WgoT%E1f3)eLZu@opd9Rq@}1 z!gAEf`do?Z65n~GR+@{Tm%y>DXGl2*sf1ciCb@>lMe(#Rk2^H^85g3`3mr@ zfA>gIv=H|^q+~htaXC4sbIqM;)`ynp9h%v3$G$+J9j68LC3LE-w1HMgd%!+O-%ClR ze$i`U%#P?^O`-(t39WZ8`vvx=^k1Zq|NFl=X2kUO`K#BicS|nArhA5eTdZN%7cDfyYtMt~9K*oPmq9JC~YQNV6!{UPO-H?pHN$)zP-vG1lGsc-hh=sjaQ$7iFiE zs6@`sR60>7w~A#+6&5+Jh{9NpPh^3Py>5* zR)HRrPuc1;8C&b7`4(_{i%egC*6#oQ2i@?c~ffG z2XT(Zc*>a;xtN>thCIVkpYpBb;V4}J95^a49t-^}+HL=^iaqCM4shmam&?HQEPi?F zIhB~wL^b(%8rZ%;iCRh5#Q2MnUg>-Zu@9n*jZo9kyzdP@=WzG>0yr+oA4dmk@eE%G z{x(L=-|Z|R#b{?V?t9e8`Gj-1s~OTflgY&MWUP#`Tod;e)G!e1NBN>Xqz1|Jzlh(w z(6&-|4Xw6;yA;|JYa!!Of~MW@7xJv8Qky#S!_+zG%ASy8&Od`fEwzkVI|rk8)IQ0P zWJ|539*yMILL6r(O9y(fzKp(919-NE@SGSj@1&5LN`A+J?UT=oudfC?daSiFl?M8e zg{y)8DkyeETs=r-54{@vo(SDlBEwucu#>0ZJn5O{e3-|dTzb!UR#dm%ZRTGozGo)=?vA>zv&5F zLZBC6Hd5|e&~q+)_wHilZu!E{4DH;CzSC|#3~UYpiaho8MA8$1W+cC_29=H^#2IEM z!4IA)FqP6Kz)xuw)oAijdfHQU*A}e@R|vdEde3SRSL30oJaF6rqzCi+cWCN%%9E@9 z9T@UFbr7#Nl(v%G>&fR&-0g)crI#QP@8!*P&EG)|y2D~CRA|c#hadGBUc>C#T$H7! zL|bQnX!sCtDAt{4Dd$+CydvNDCdc`-im%i0EjZW9Ty2q;(V`E6L*sev-FbKa?K6p2 zK<{fkI`5_r66!D{X+Gk-2^>dK;#uTbPmNu%cmb*UAWxnk=XOHcBIYbwS*!1Gu(}2r zHy3QUo9;An{oiopYD!s!Je~qxUy1dto;30@U$rG~B#$}p?=b;&7ILecZ_VVX=ly)X zyZ4zgAz9k`5cF6&w?Ip405)&p&g+QLK#kQ-w)P&-^mVk{EZX{ed{dXCUhz8MSL^uS zO{F+r=Xl^usg0#iaV?>8Xn|PnEa1$*l3drHqh8A*jkDd%X)Kg^4DctZ=h`Z{YYi=0 z1(j_9rw#O8^y)fNc0QDA4{ao^&=~2R6{qdHIc!Wd@itoGa@y8)J=R!HhM2F0Mv~9B zDWt$d;N2bB^2BH$hHJx}&9|UaXL}lL8F)2^e|$;XmMFJNyGiE%qqwQv=Mf`Eyj5_o zJKUCa;=mMXbj7C*VhQhD%Jx(qC6zX$S0lgQ#=r8vhBss9<6DYh54Rgsd`o;Y>|=6q z<-5}53i5RhQeWtz5&wlWGmBrkv{_OwM=L{HqZV#|CCX6>`c?0M_uVfkOqCs zxBY&#=sSVOdCY8=e${i%F1;xBb8?I)ADrljoB4}T=ib?m{1I*LT>Pt$A>)wj-FY7m zm)ToA0^WEL*|{EEJ{;b$ZN&8arun3H7E&K_us@P`LteG7mR9ARjWxFjALT$ zTRX)a16-H5ncS|7JrW#&+X6P!rM2K9=>hFJHSA^ln@hrs*jWn}#Hm`rbKA7V?7Pb! z_J@?Der*nVDg&eTTiq9-k?_C0Tzyv$%%0n7stvT2)X~mQ-X2KbGR;A%!g3LcE5Vj~ zjM?+<%1s&O93nkIW6^$>Q=0R?^Tbgmh}9%FwGmQ_t_^$AJhWMr9_g~L+z160N%uB* zUknWo2F@biwFT`zX#rore+&0L$x&}X25s1vDC2%I|6XZcm+PRkh1)wrlRVzYTR(hS zS!ZZvJlPJWOjmW_w3e`px+m%_eUsCDPLwO|t7$E52IH4ed+qxY&1;2O-Uc9iDDaZ! zT`8wDs({+0KCdG5ml3PL*JMSBq2#woF_M%@&y83&j?N6G1qx8&Pmos+z*}AOK9p|_ z*h(3BOo!HzcguR3kKUT(AxZiDqkpZ|S;X?v7iT(qkoI}q$zvg*jq&YR>b@M>aMa`5 z;M@LaTS$TVc+A|*?kYGagW{HfgF}gH&s*R040P5h(N}QC`$|IIf?~Fj>k#Y*?Sf_W zvWw*FZWejMtyK+@QJ-J(Xen8Ru@5DFpI9TPuk+^5fhMy~`g1RrFG5oT(A~~ybajq% zucwgDVszmM(#G7VlP2byNk*IYIhTw>kN9;H$Z)(Xtf-;E|5!(-$crh zM;+^|q!s@RZ+(|ppl^ii*Xy9vQ5%<|mrvwrIuav8Pd^EC&PjLHN)`Gl4_4Iz$CE;@ zf}=`ffujJH9H~4SpHmeQ*7ZSyz~-y)ud|+te1C_pjtFS+tOQ;$ZM)4yulEOP{Q+tI zV_V2ZcP4a=zgTv)!b4z1eyXJwy#m&Qq1s;{!ADbLXLPE0cM#_j(oY2{WwR&B&%@d| zfU%d~ASGJJFQX*6%()?Z0-x0XotTSRN6HSqR7W3WC_Qs=auELwF=K9D_oi!;ObsCMzlj>pY=yvWU>os}qQf}FgV@5 z>WU|qCaJFUa<>kpSyk9IU9on5QhKM^0Ip7%NGj)ot_*mropanu@tfgS$A>n;AM4PC z8^MR;^2LbZZ2FRvTtbN9|@(as#}Y^lQM<3CBDCpd_0!cSq=xyp;u?$;7`;=57o)R$BUu; za=1k8Glnod29{X?J?5~uHqk~}MZe?cNhHTmvO7QO`)Hug=**9`l&u%hbZ%1)e7~;-@XIrsb)KG?82)r3-4-W{Ofm9^iy@8b-`n9x# zYLN>$An`nmq@!0*U)MXj4!sJgrEkG>Q?Soop@nB{!Ge1T^Q%6x-&j)oq;0S8@1F&6*RC15#GKnSpUFboH%0Nc*zMKG6=zDEnTdkgIzpBSUR-xzF}+ z{)a7*))2f8ZMdRGj4$QqDjzji>XF-jbuKX=pwg?C zzW6VuMn~(X7BQ?GiLxjC9(P=;30|)R4-IH|_2b!LuNdzoc+dNx(7YZ!vEqu~0q9&~dCBUJ0*a&xPM?sD8yA5>~dJ}G;j$;vfWqK}NH zq}jdOYAq4sTj@%0XyGmZ;#}lClz82u2W9W#Wo(IO0^S;6bWgT9k&h6%t8oWu$Ki!} zniQS|pm*e-sB=4IOhRK%gO)}{tJ>e3L8x*gPv4-OoIBvsdcwMan@6dAGrCY;fIIRo z32xL==?sV!(25v(mKeu_w=^PKL^4^@kzgwAIONW1X%3h4=KABFF`CX%8^+C=%p{^zd>> z%Crkx7Nu^cR;~cox6?*zoQW0gnY+vRca31(k#~mI-QAkD(t=Xn9x>+bxx8A6X=L~U zXinK*25fS5Pkwq728X9>DlIgbZ#%%aeUxW{%e3%3fp-+;l;N3Zryk2`tujV4&qbcQ zg1scslEtJPw@+^Gzny1y+)k%(h(W^(+?5ipj4*6G6tIdi)p?%sX_$R8dxFDwQo^T8 zY2_HBTsQ$N?MHquL(S>yP}=@udKLN5_SK!C(t_WeN2GjAb45xycFA(T`E>!K9>b`SG8501o=Y0Dx%Jz9G79JeUN&huV+6E6*vFBVfO)9Le9 zhRhB6Ea@S=Tg6?uxMr!2w}tTUP;yZ2Y410ax4(=q*KRH*P94}=yDOzFFe2Jnd-d_o zaAyc;DZbDNo2!XkrE><~cLLM)pqF;A;drq95!*xDEr!N@TEQ>Jua#fA{@#CJ{HJz( zPqFmYIqLW_|9TxtEnmU2{$^v_TBbmpN3h9Uzw>MO*0Dr2`ygs9j-*!i zS&+Uw!BQ$;gp?dhPqH_%ZXi&(E6MwWswex?s&~Q7#w-BcSTN!)a+}Gyov_!C?s8C$ zdbsmL6|(JBV&6&MwMZU)u-=s&8L{2J`DJn`<2s32-H*QM1@^uZR_<2#=W?vfcC6Q) z=(ttr^>Xxt`?Ylo3Uz|S_1AZbTIyvH>kN9QGe44YN+MlPbxbHzes_2lli%Wmil=Ba2yMNCw|Hk zW?DUZTlv2fs#+gm!c?Nv5{H(gPqI}!`AMIhIe01kcE6lat>#PX8kC=|nM*NTmE)5= zZ12u~a%SogAs>4~2Pt0qmw%K|B{<|4%l`b8dS*2dYCkWng!7({H?digU8d{{#xim( z^8EQLQIGzWJne3OSa0TB(pLrEQW>95W6qv2VyHVo$=ij_e5>L9QfxKBtj;$jP4Z#i z2G&iXZ+8o&S(3i+QmQ1~^yI3!wd~~v^|M@+*8G>w&`W8Aq_j(yA&wlo!ahkgN9>i3 z`VjVpV=m-PJ>$QtKZ?XmcA>PW7U>$xN%?pECmmxOYEx-9dbc)tEH+Va|%#6E9HMtb4RG$Tg2Uu z-=iP?2o}~*%F4*)4stNJt+b_m!efE+25PR?Y$TqJ^N_%RcCdq=Lm zDFE}R=NW*ts&T@XS2M5u4D_dgBhm9zGpk;t- z3eTU`hh;DM8~oIA_Eud%Xzw@Knx=9!ST53!=gA$p+X=IWwT#fwP(Yq{W&2j3a)jx3 z*eP|`Px_EY0=s9s*r!n%+K;j|)ql$d zq}17@u4Ifvt*|}P%>q9?(fwMn5FvvXDowfM4Uifh21W&RR!lwMZam4IYg=9#1=ZhVcHt>iN5_D_uy5?mzQS z(TDyT_>tB#Ffk0iao2YD@9j_hKcvLZfcy@==8;>jg&G9E4*;|7>fp%u4C*qL+InuU z_Uz8EyId1f#_wOq=Nz7#Q>b>^hP~bgx=}(6$D&d@o=xqf9@nLfhq7D^cq^C~fz`VJ zE%Qafn~`7NCEgHZ-Lr7%Mm(IqBF6WCXEX4;1HJ4-lI_G|dxiF01`oeOy*{M;|Ddh( zRK5wvxYFtu!2A|rGq|*#U90R2s`KE1=YUL3J_jnSAy2uyd#9fGeZ;JU*X7V0Ioozh zz6#)MpuJByatj)L1bEm^E7_|!h5U8|{q0cvuW6GH!Ji|( zM}dQ+GbZCBQs2dVSD^!C4uW!8;$`7QX+7N|uB&Y^x^ zCB^2`PB>>o9S@uMwUa>&BzZVM9L{rTgGoh~zcS+dfq%D&ssa2xK%OrC8t#4gIg!7H zhT9v`AHx42uDz-9z8EF{MA%mV|EYX0`aFq z)~BO%j;2COspbL*|9A-gbegaPiQDWtps_co+#6CK!Zm^Kx~Qgc|9^k|AK1?>O*Pl+ zFiR0+JItrR=*LsH`2Rl$fHavPVbwS#{EFG#FOU6lL-Z=hv?)rOV-F zFXKD)&sH*JKS=OP5LG33IR|W>jBs*uzFOQzH=yPoB;SFwibGH@K_DY(k8R-Z6xiv% zXx$~Wmjc5B#vke|-{G9nwL7eze@^9>>T2y%9fj)iF-vc@pDss-u3eoCN zjC;W;RkXxqs23-=EauDPP$3g}(iAzEdmjv>hWMiMF4f2FY`;YLPe5ZHP!m0)vUQXdO$JS?aj2u=Z>w+T5G#oq`%95QZ(m*$Xm11TmVfTshwj^LX~ z^CTE>bGQX4f$PED{ZL>9xoRFv0p3?)XzD8nzawawVVFx!(CWY;;?Spuf1D0CXtFr5 zNNTbHs0LOCHTCH1CK>VtetCWxgekT>G^6wFuau_I;&+Y3AJSSHI)+KfUWvv%@L7{B zby=4KtyFBc)u#f_Yq_`6;&x4r3*$qh$wO#_ARR`dE68+zdr8sX?+i^CEtO@yPF%f@ zdfV|$opEt3Wb%>NFY<3kA>}(VLeq2lUmmTY!I>jYD=jb~oHnhUDaxTMIiR-leKqmK zx~V(Bl@v7^mAn`FWjZuc4*goMKGbYGvc|W*U{!fCm!}4(Xg=6Tlb>FpwOTMrb?85i z7WzJ^oKm*{`Q8roD|6c6CnscD;)ndY1y8s&p3YgP$u~nYqe%G;B<=a|r-$_1f(et3 zzS%$LTOsX0-h%G`~SaQB5$!Ee5JcvOdIlhV9@0t z)?Fw)4+&EPj5>JQgS+*yWlEjAj8+lj z%CxQIUku6jK5vf`dNFS~?z-X3V<#NegbSb$Z^MDaVfmp*#}v0G#%w($E<$@;3Oy)$ z?fAdQyHe-JwD3se*v+(r+a$>+x)xnN)DaTX$pzwl2-v(IsLllr51IQeA>G65{s2jF zCeks-WQ82xKEsHUJLH6Oppes{z6wlAxBjdk?*eIcj}IZP?(z3|9)>Zu5A9Zqe7O;7 z%#hoc07*L#?|>repiDPPdxIPt=+<Kl3YDLr;fZwQl?W_b>Cgh+s8ch zhfY^uV9Kp)NvD&#f~QrqO$G*)gI8e*EayqLg40`Un}_(;1TOZXlr~B{6}olsh{~tCCTHn*V0|I?8Mm;z4~0=`O6b)BGAnb&1@|Skuu3fJEiKG^Wh^I`h&P9w^ZaIzdS(o5d<-SryE*^koxC-bUd#r#Y%H|wA`q8wsxwznD~}hpwZb=KM<*8X0!(lI>Qa(hMSMe)T2 z4LTJrrp%S#P8rw%o-X8Gjn?u=IH%v%hs$6ZuDBPVsU-|V4hx5Lk$6jxi*AzfJaT#z zl2eMihFZK&%)`m~e$t#yz4oFG*8|5oB+3t*3P9aQ=pZZ*j|$;{}sx6 zmKJ;m=pMknc0%zQXloWyNE_!fD8j?z-Bf-h`46Bk*)>FXR zC@emw<<^0d8MM!6pnaUOcY@~u*r!fd*Ciq>9vv?|eh@P4$5@dLohsv=1mcW5`r)Q< zL9UBzUm)!UzT8bY1EG=^;VqA|TLIS%r}Tl;;wZ4yf@WfP6OH~vaL12nDYv(7-X)`) zKy4o>M+_a>^1hoN4Fn6)@Lr)Ibhy>gx=Nml1A3{&C1KJ+7LYrMy9xL&1~(2O=J+NR zxcz0KJ7YJ1mnUW5;&e1S{2X4HwTc0!yQ#n{=xj5BBAcdp+h~S zrim6%Pxc0r*1&phN2YXx)KBTNXG^{J}5_*&KfT00yd;?wK zq@tNf_M79LC-z_YmaIgrFg*rC(LYKQ^}H>SEC_XreAEWj-UF2@y{w`7xP~y7?xpvn zTT0KV$LU)xXhfXY=k!3utDxZuWQ4s!H@lEhofdCwkI}HVmyycel(eKQt)ML{!F4U) z+-PYjSjiLqIx%&t&fs}Z+RP0}bh&rnf@=XrH{ZDuyT{g?Nt|o>t~cRvYU0F7+oFkK zrM2XtU2D&y822=+dJ*xvN2?8^W$aBz!}8raaAHqez2X66Luj8od0xVxS~J|L<@Eu1 zx@7R1KyfZ`_e2VO6Z&dFi!MTH*2Hv?+Q@4(vatc_yO_MCxC->}z0|{rMN5#@>j_y8 zbPj{>!2fV1^7UQv_b9-1{C~qAR=!2k%J1FA_rX{#E z=1*X=20L{fxp<6v3(tE)B_GkIuY<#{@THyFxOG4g4U(tcO8GC6?ha`3G*a&iz1Y*f z5bVB!WGw=#{qR%CLu=m+0>tWU;){zjfag9+8UVc>2R2vIrcS8642m009?Da})A@DbM^mux_abya%N< z!j;=nP_H7H~-krLb;!Q$3sE73A`&E&mj^06n)q(Xwe9>mE4W0jOXhJ(! zPO9a*PySn5Q`H828rlttw&XmJX(gu~gVwHAjC`XOGN$&c({XC}Z=eOlq*Ec}zBI8Y z^;43c!QO|r=hDEA3%;BXBPUwo4xY`ueG1bpL|Kv`*XhS+Z#chP-acz4&6>jAy zJVL4j6KAi`8hX8^@Y8zh2F*CNxzLG?Mda&5+X2`ut7s)9WyW0#ejOV9P-yvLLgeoi zwD<@lz4qKK(9JD@{UylH8u;L9-iCHgc`B#PR+45IRJ#y(9^q*i@3YYEJ%IU#K}$ux zbYR!yS)kJmDiIHEYw;;DUB+601fBu6xuo)U#C?JE`vA`a0dYN0RZ-)#LG@7JyNk3A zu6GdkFzVYuS}i0g+^s4PCft^t2lZssO4)q_+Q8|MF2Wa=+BJEwFq!9cX)o;oMmEs4 z4x9IZy34_<_HXad+9fSrN?|*Gej;D#OX+UzBl%5%oA-=z{c;r2N*MHs^p7^Ro$dcP z0b?R?E4PLJ!)V`=u;*Of+(0X7*Y$xWv<#H#N|){2m%v-<95)iy&#PvbfIfR3KGe#R zZuA_j2D=5`v}(Sh2eh2Y116^N%w9LYjXS#CucVB2@ zBA72CzbfdHd2r2usK+MckiC_~#5|0U!9aQ!Gf)k-hOJB;7 z@8?kFbHH{GEwL{t9gIB>Oy;T0#n46vw5ZG-jxJL3e?!PpQ;!43UBGuZ`6qAbWctyK%T7g0V>x$2e^!OlqTn@Q_rtmJ>M2Fe#{=VoGQWoX-8NhueRm)2S%^)4gLh1A|@ z(H-FC6KZU%b>MLWUz0szzuBwEuRlL6r7S{HxKyf)zb|5#cbD~3is3X-Z5l0BZRupo zF9NsPea0#e4CW})w~V;zAJZuHbHL@7wY;y`Rk{+#*rn_9cJ@WkOEFSAoiB&}mphfU za*cAKA)d4NKOp{l#D6J2mvq{j9)QeH%2h%WO6O#Q^^4Y0yCey%XXPV)(?bfYv$Pry z0Ai0inas0tBjM*w-t@L837n*+T|E%m)Q&Yjrx7^8(4J@pwYAZ&^Y|<`e*HQ0a0NBL zl05S8_#dd75HA8VXOd4F&(>84j-y9C3X58Acm+Iq6;{j_$bTxdni2Zb=$#%4)l4Ub zvfU*=9!}si2#0b%3VX4PQj3)1h|Y6ZDbK_GR}nv9%4wP%(JK08d~(s`aY#F5h%(JK z>yKSm&$9=Y|DJX)+09t}N(@ zHaijesRuv4sRxuf*D|e3&$Lt}ADY9T;!P-x`wBWN**?R8$c^yIh+j#KGPr&ius_eB zdqzkeJ~h?rwB0shU4slc^T=Lk^zOjXABlJ~v~vtrcNx(1#skqx>n2U8cVJ`WISSu@ zUu2sb5zN6h&4R9`)24cLI`CO-BQytvypC4UOLrX7!8)FVW$(Y+tse&c-VGL~aQzQ7 zoNOh>1El9YDX9mvn)KdC>bL`%Jc_y;LXN}2+j9-Itpz?eNa&5OT1VTs?MyZA!DM3f+? z&kP=LjKb3n74`4r2Lrjy)LouF{mbQ6q*6>wOsp|6mDI>QpZ5Fvt$Q8^%-;Qd?|XgM z^`Mow`KRpe#J2p8owXoiA zrEx#*E+-t!^Bl_4((MG)@`?U|3gBCj-VAYjF}K~_E48(jOdLuypTbVg{fPP38_@Fl zxQi*yq;y|TtK{2#S0H7bJXoT2GVWQDy>h0{7xNbKd=;_!r#$50Joo&r##2UW*BGY1 z_EGMRaus>@mppo(wPG(!F(PH?Klw=sFaP>1KPs1dCF@CCl?d+vgx>&%MV_X@b9xqQ zh%1HbdnzQE)t9FQ#BmdL{ZZQAj&NDR18s8UC}4Y;R=brp(a$lDytFenp#L{eg0gTX zczX(%N0QI0*nETW>N!c=O=uiZokQ-Mcvf;gmD*_x@;M`h8;Q=QE^EkR84&J44$H8N zv;o}!&+)%C;Nt^m&jqWG!guy2#f-fD)j{E< zGFA=v_FhTbw(w@F)+L!}sE57XV@WlIXP2%VK(3~f>S_|-9}UFYfi9Nr7O|*u`(`$r z(K(ge!qtghF+yyv-E%DM`c-n6LXUek?biUdmjcIK=ua2XzfXwv_jY2cQSN~noc6X4p(WsU zl;a7?sORl^6P>U;o^7lZXAGrY;IGOhEjGpNT zB>EeULe3VUJD2LN0J8n5^{wF0{@!Zz)CW*#KgwvPSMhW3d?QqO5vgX7%D&=Pp*wZw z*=dKjxk z?C%ToYrujV@wqLn@=ls=r8d7vc6A?G*3Fb{$!6qe3$aZhy)Gq8`?U&ueVKQC=TcxV z@Qg-5$0mbt4LEMhXScPF%L?LSN}UQIXU`i zXmrD*Jf)VjG&_YT0Y~vv3k;U%woLMxa{%OqptP(lZt&Hayy-c~PpxU6oo(cVSbx{3 z-Yp~^Xh~`97nIUd{G7IeSn_dvt>DSGwOlJxEUDN~y}XWi71Ns#wWj)si#Qx%@Tmyw z_BE_umhSQ0vDkQO;x2#WZ;mGGb!dSmz zbl7cJTH??iiN4b{XafC~_8RqPy$0>CBD^o@j)5=sCuCZZLymR~!-{r-{I&3jEu&=I zjgp-xP8+&(32>b3wB>wmJBg)rXC_lWEmqPo81FaiXMarG8TAguh zg9nx;%2)O~AvEYu%9nO*<*O-G4^#!vxXFT+!Y_bViqNwdJuqoW|HezCXy$9kHuKp4 zr`no}f%=f-#n{T*Az*U~c$1R+ao}m>lVSOFfD;^9T#F=@()yG7TeR*oJa#`ShuOf%e$lz9T{5^Qqa*eGks*w)iquGSi8AoeBPe?cXr}9-ssphgz z(gXe=+6@W(6_VcOSC9zyFh&EBIqs3{Rc-KA(v3pWWk7HKTd^eTk?t`%wUP7(fxY!m z>9OGDKID+wAbcK)XboIC_91yK!ge15E}RZO0Vy(qn61d?3FNr}>G%pZ^Utvq_6P1& zP~-z(;YYMbSK8j$b}xdV{fRl3oVHM3E&EoW-^AOaP-8u1IXb21(T#Sx6Jk$H&%o!C zty=*cH<9CDT1VSfJ?t-J^xjGD(TD60rsj};jLmKVj}^e7zw;GxEhE`qNx5~$Lm}@; zKU0BL8{QEnq1X$myok^YsE6|21;}qpvTHJ>Jb=Zj);6tvcIO%np=GovGxFD0qTfV| zO>gO+iPH-AZlz63vxKuVnAZDH0`vNnTuxC#J2*$`Wh8c|#5=~<3otmP+bz+a+X4(u z(!u#H;gN8bW0vChOTG-OZQ)Igr@T<#R3m@w$yt7Wgye}ge=rli65pNiSIu26%aT0W zjVCwg_G=Si&D`$K8u|H&2<- zf*IfXCVt98!u@9AxXoY)Na>+#mQ z!G^zae2#oJ0hv9S0pQRvm6#nijJvq;a2t0qF0JXEkVnsg`<{V%)+1N@Ad{-#Dt%o3 z_{&_o(|(P#cSe|UvO6-_R-6qbv?iN&59oIrly@xcs26fHlv9EISVGG5r*tXu0AMeX z;|0_-G{ke{HvtP;-kO0n@yBwG>081=u2O`vWjup7rT~u{rldKPXmtBVpG8~0mgp>W zTL(Bn&We_|&hZ>SA_(h4+r=;Er>|vVM0+T+o`c&*6Rtjdnig`ztcv$NNTV*&vK^kV zt6c31w-)&N4wNHRs4vX@*5r@$+23h9L)BQOa5IGUXr>E*+gTv>lxM73+9>F58$3NS zNquvA98G5`k7KbKGfIhfrH8gxNAPQHorN(OS^83v7A{6n*Sdi2b@~D74}bA;2GTgA zRl&~?FblU21a+I52rz{s$Y@@y2rqu>PyY}cD z8-9d)58~LeZvl^9U;S!>=#8yI3Y<@<9-w`Ixi9ssr#*|<%S$;nQJ0zcjkn-W(dKY; zS*xZiSXL)LhCIC*S$Y`QelgM5A(Xc|_4xKhqTEZ(R%5eB1IlwPXXl@nD6KybJGLoh zY9ZMhRR%b(#y*CAp}?}TE+f?jzQ^oV!|feQ`FTG}OEBg|*fUTP1h;oU??(w4_dW1_ zS3~b3dG1d?GPGihO1tp=PC~R-{E1eP=hr#s60;BWzc5jyG%*Q^P{!-0tmIh_#x|b( z-O)JkJDT2I@XafPI_lJ&@x&$MAq>_!EVEkfVF6e0-i8rpafcqYMX z_SwG#y!NuTCJ8wjTsX?wiQZi^CH@!R-U23VycQ_bpVUtVyKWBPnvMnJ(+f^^Bl9xP zQ@A=4WfhiDD``BOKk8jSoIerF!24H#|9ga)XT+*+yE``g;K$EFa}h($+X3ibhK^zj zf(V!X#n-*q9uI-h@CJ+D8xyw0uaYJxPiZ_8KT;k26Vp||yF0i#D&b6t0?%rDXJZW{ zy{&X(ilKEZC0w2?7*j0ceKO~K^r!PPa>*Y$CVEc#gp{kRIF0))V7JffOxIP&w$9i` zCBpmjU1?C0o};8MVre~%l-`PjnTGbbf>v=u_-lx#$I!f8B78TcyZOX4Ftm{8Db(d9 z`1xhvKMVN#Oi&8N4}6X~eO{zKaNwd6Nm2wdC2-9FIQ17J~~gmXfj(L4dIv78W> z+R6VTk=hkW`W}JxQXux7w2CrTKfB|6YJU4m&TY%!8sls8>+#tH?T+Bx))#+D)e`=g z2RKS-bmv81Kz^;zvzu=yv(IJq%`8$lm&zsZu7}aTruTF?_YRzOV8&d};EYyRVrVrv z)(}1E=;is739VrJO{@6YkT#OrK`v55%zv%mNgXHOOJC;@E=B6E7>?aJlM+Y3ZL^a! zac+E;BvdWwTpJ<|YN+`E@V8#-4s8^>h#sCjU8yRp5~Y-~AxpW3x7&J%pSWxJ=mW4m z+fvxbbajNn@vDgC=$FtfPVY)5&n$&{txj*o)edgq9L~8W@wt7`5^u)J%haxwm&`RJ zwJ^E_*57Ljbslr)yU>_I%&#I{_HnEx(l$c_Zla^TJCQGSoEVwSaINer!urEwPr(Ca`1B0CfeWxm21ET@(CjO*S~ftx z=IFf2uW@&7Sd3Yp3XH3$vCG1h^;JM}6c9MW-VFn0Be%PQgO4clB5ZdU-f2TDz|v>2W_CwCE|EwY)t`xApD_4`*!{h_FdORZI9p zAjqiU;MC$P$!}uHUz^rhr}nk9(xQZNy34)e!TrJ%L*B|#_!zFvY-%E}zSLqI={#4E ztE~`}y@mT0YQL4Z)A<&bk8N3QqhZH{ja>&#WC<(6?7Mv&XHWSiN4!C69tL%UNAKyR z=jK2I%5=Zm!t-g#>&fR*xN{!X)&ppy8@M}DZZ4QD&|m&ANu(8Mnjz4Q`lkj?adpaZ z$l?c(m2TT{7ZkD{?X0vG!aqPY?<3!DCZDcIjbpL*ucj6~kZSs0{N=!D=v&8^w3VJA z=Lz6rD7DZYaWv5Ro81$RCm`kYc}^vUed!DvQB9yF<9LH}g1RMcbq4dM)S6luBZ=wQ zzr8-W@0|4HoaoNrrULtBD|iVlC&ejKoQHKNUmbH(LbaycHt@V1*jq^z_R|RNdhlPT z+;7qv@}WnE6vuIo448^3ObIeN!8H;H?o85e5wzi0uwEr;qYAG10%^Jc+XBL$fJ&Ul z?lur1+Z-?Vw~JEZq4*cJkmflwpX&nNA)Yg9LU#BQPiF$0ik-R+zj_O`@|%ZV&5oq1 zPJV?5b32x>WH)fPZRHld4@KUJz@F#Z3Hjq@73Czys(B6x82L02ul=csHdNkLvqI-? z!tL$Es9Zi4rxhGlj=f-|tLx!%DL*{r71vx2yA5(ixPFaBo=PcBUg{_BlYuYz#V^u@ zcI;rN&U#iOZS(@W4s;!Pbs*Qi+z%nN6SaSjyK8*9U{fd&BKH{SJ19Zh6=<#nj=tpW zmIE7+=DTArEQ8vQq0ZWbA&qOGosE1clFm(T=fDNhon!Yx;V-q8ehjPaI-SraWOX6w zr9MFY7?g7(y_gS^Kfu-*OMY4;bCMJ+a1ZZlnKbvn8`74WGC4|oC3Wfw$GO3vR(ApE z_O&E?a?0J48Z1v5TFsi*);T3|=x0fBD$mMZsZEJ&ENy(tur|ssrCOvl7fbSO5;e8I z@@}#X_DoXUZ5iASc#mY4?uJZoEv%yk*4gnpskKB2>iI^F-NC{_aIKDX%*v~{%ecmv z>`An&v+?XJI2yH{FM3-tB#@fz8|1ho**U{_>dV)sfOsP%hW56BcMX5Wli}?{SZX#Si?r3Us2DzH!4)||vk}9*2B+8)&fUh(4y^)v`;N{o& zUO=y`33yp_Jhq$HT;jJUkj(KwY2rv(6&SmJW-z8$36d+B*ii4{4=I$EF?se4UW~ zDR);S-pV(Bg<6sBVrMRW3niI0QO7htPwIA6y5*gd{1eUvZ%t|BJLzAVj$gbgX_bN( zAU_J^#)x-4-Q9^Bxrn7$;c+j}Tu!JPudb!XFahja%NGbc8LBFh*8b4*#B!BhE4gUF z_aLkbbaVpxF2k1Aes_lNMc5p!1CdMJQhOjdIbO7$SWVapQ@A_+eI>bST}>cIw^-4W z>^MVL%5$4vR}xiFD_5)bf`YcfUCO{#>b#j2981hrs9**;I!op?T3^clZsL6F=jJOL zp|VD>{T5&RoxD(ghdx*p0gn=R7*@$}N<9F4>x~Kji2V!mb!@LQlBkfhO-EpJmEyt# zLy>O}a!vtSbxIuD(z80LJ5p4SMVV_@MB2J?qsdo7CQw8Pc*F=s17;4lJ~lK(-x#d5@(oP()TEwmdFaR9ZjKk^XQn7!rrqZ)89DaGX0 z~x`x%qu$@4LySXiYNwaO9VgU+ErJWq+i+bG6Enh<(bl2B89gq!PfhMsIe7|@XiMj*t#=%Q z36B|2Lg32kxOXLYe<1D*HfJOXcZ_rS_yx&;l9IDE1!mSBb_dW7To7)61K*pKynfB zTz~D1noiW-nLB!xtH`53PPb6fTYPb*o*1*f!Xw80Wul|&9gvRhKVhrbf?0}TEG6mp z-~g#*1+v{A5_iRS*^<6#J=dZwx{{_F-W1pF+Vk^AoV}5)vw_Cliek;Zwz&SHVv6C2 zqqFWa|u@tJk6{xXM4C)R2f*DuR4)0laUk+;JS!3f|HE5_<juqC(K*VJOS zw<$lb#-HLSem&QFfyCBrgkvkgk-bFMCpi~v9J-;B5yV@;;uWbD1xnpL^|p7TcFu`= z5nNOP*)`CEcApfhKcEE7_ohx&)a^9tRe`J(*JI#{nZ(o!>MvSv0$#1hq2#kR`P=Q$ z&nIRa?nmYfi2)yn>_FOwq;&MoUmjjX4O_`sSx`q!U3p@OYN|T&?@j(|$h#8kHS+G5 z#~NU`GwBa!&exGvnsW=j4)BXws7U?Y1w_A%u()BrGYic@Z6x(wkXmvW?^@HgY!3Hi zo~}<^FQ;kuxDu*?P$_IrH2MObx1{*Cjrz$P+7D;iImg!!=hwxT7fCPQl)2XMTtl29 z&-Nw48zCKKgjl0aoF2w6w#C%)5~dS)_qQx^ymIF-?A!;^w=r#yC@(6>L#d*TB8|Fn zqVpl5H^F=(xRAHXly+w-V;&eBgrvDO`6tSR zok^ZW=wT426shL#N&fQ^ot{J3^GM9S$VYi&%xkbOc97C_cdnA_KxO2&L5BUW^3DSQpA`x%5fHw-UMlKKK1{Qdjs~ND_I&SLyMtfs?mN4TB%r1Mt;}R zF*dcCc9r}r6uEp}4!+Idx>P6qzRzIi9F$-$^Ti|fxNtOJB^0HUx|fzzmiJHArkvvD z@yZ=%xB2Z>`Y4@=tEXZ)QeI2RbqjMz{W@~xXOw3RAK<&t2QHGSTlHyxY2Gf;nt`gC5J*k0xXays#LKYG~u%kadpO9tPCTARn7@cVDqGxjGA2sc4N|q32EzU6ZAC8+;q{ zEA1&3q1h)Xzc*UD21rgP)xp%co|?Prb02bd2c#9=0lKSde;`XIve;xTa zS3MZ|zwJF#a9lu)K4{cl>CF%i$6wq8yZ+-)<+G>zC7zH%wOk^0`%}Ibf|ge&%J92h z);wMzJvEVwwe3udV%xW|=`Me%djiTq+;@W(v`?gyjp$YP(%FYPG$)y=6}3Ma(tPz< zZAkW9Q+j(XwWgBKz5Ub^jsf>VkJKPD9I4Z%U~gh+vclcPP3x^d=vkB?ZED3j-?~ib z`A9+M|4kswqX0x#!57z22kn5~i5gq@G9KRQ4Bh{bGEYSNccqN2NM_~Xgj5%IX)95) zvw;3Cc-1lAYmvhRIBx@WI*JtPoGSW?4U9ZrPFbIC)72+K@va6slyZ9`gGZ8j-_-K* z;j5(g=aAn5=0(7sL7BF}7TQh!Op%-%33>(0iq)8d<2o}pIImCAOkM71NGI&&p7a*N z2OM)}9D%H$g-TGBqf@q*)@U>5TCR=M>Su%(cq^0pYG!%KX=(fijeZW__5BnH+mll& znMl8_O!`0K4L^jt`d@FFz0q0Jp`Mn~v#17fj=DL-JZB-@kMChCZ{p1mEfY1AD`M>N znxm@@%bZ!_UuWqkFV(ZTH}hE9LB5yLu1I|COk`z?zspq4bp`H?y!{#Ig~~jSf~S=g zZ-Wgz7rVoE$56^=uyp;gq3gI@ZQL*MWgjHuR}*}1(Qeb&;Qa@rcKya$+U-E7;UQ$v zhj6pHT3^(0d_N1?v!~$vw#E1=I?(6)4ry*kSnY?b6z+vcnb#>tzs3c;3x!&tGgymO zJ~UxPZ^;B8?g;HHp~ak4>3$vBJR>-@&*may>q!;fxxT3-w37Osn`@EW(M;L$4%c_nbENB@Y}&AA-4kXtcyZ3TlvYn2eosxD z)oI^PdsFQ`8v0lFJI}1hQ@7Oe=GT>$(jHMyv|AICz^oRa$r~n&Ex-xiB)*CN8 zi1pmXQ`c0_MLdlsw;F2c8P6eEwSL5(Z+oP?cAaZg%EZ-&>P}$xHKKJZ zsJl3fbx6@um-;fkKgy}zvS00!(s>MJ>C1>67FVe;o(+mK7KzIAWv;Jl3s z;hG9~UwtW*YTqKXt{pp$mJ(0FHMX_T*~=SFKC{U`dT*QHOL zEv45sm>gY)cRzIL7XSL!Izn~sOj`!t8YG}I%tt_lFD5JE9x$Z+Cr_=yN*)EA_Mu~^ z8^;!;&@rja3SdjW#1`xUx3=E`O*<3lx4>1Gq>TH3ybgv|A?Y*ZuXfvRq}6_LZtR)7 zyStD-cfAT5!*QF|b?cMZXyjDD^1N1wIVJWSiJd`j}f%EK~C3SdevDK+9 z&Vc7KFglu)y@;jNpzIk#ZY$EWJ*@6fk`M^{ThL5h`eHq`#XEr6Gr4Z(c?mpr*6~Zw z=oxQ)QoP*Hx}26a)Wg*x@52+;^t8lx5#CCBN@dDIE!BSL;Z0yf+fW_fkf^C29C!|4 zu`a1ZZ0EtP!zOo6q#9bQGcX1>$yJ+)VLv>|(7LQ@d$u1F(1_Q3j)VRfsWzzB{&f>? zAsNjh{B4n2e$97~&fnuM@7b0c(mh}Gk;k!L7SF|W9}1pb#iEt%tdBwpErMGmz(l?c zro{`imX`Z>2v^FMfXo@HS0w9a6lsnG+Ky<**|b$ds_kHMtVP~s;KF$V-N?y3%VM{TO#}+&}T@CMaxgw8CuK>@d=K?F1*d+UgqKm84DFXG6ic z25`(<%q>V3Lr9&#r2BB`AJC(F0XEZSWQSbV3%h0%_nt|s*dEF(do+4#ovZ!`)b=27 zWk9N(?HcT!w214kgfLc&?@e8V@f6s-4I=J|Qf z_QRP*@{fH;v8~^#*!EprYD_KR;Isly>ydiGo*zOfho})UuCD0{%@(u&J0sNw09zIK zy_?#pGu^}Yb71#cp7i0Y^L+mR_{WQ{9lT>tBuex=hyP9@&CV942ElSp$aEpZ{x zYtK$4-&08I(~p5}Eo&w3qOI+RHXyI>04`@pY~hHp*&ijSJ5!RQj;o*w*HP++SOB)= z@&|$1{=}Zt=mnlO(S8R*RjuH{y*9Vf!mehY48-mxJ&ZU*u?>ra`vZ4vbJyMULdqAY zYqaS$VvR`>GUxvskM;uc=eRp6=?Gm3Es()q8FZKtX9zL%4ah^qkJD5^T}AlPaC`GB z+gfi@f}uz)cH_(Cz*)`RU7YM^#LO1ur(Ws#N?1y2*UP&uTP!$+<=z&~J*vbXHz;Y5YI2GZWi9*?^kD8bS{x2D)v~0bKfVy zFFk0(Ip49w^;Y7%MDE%?yK&k_@~5>=gE@2Q!8fTuSoWdaMpLg=a+`wXpe~rpF&T=B z+Bza;|II&O=ux>63Z9?j$~xLTWd4ERLp!395b<*jvaSrxPvCI2y!tqLt?oVOPS@4I z=~&|uFmM)!9(nuMr=$6WYYHP_Q^twF6glq)%l@uWxC8zQ=K7Qj+_AWp! zz79{2h?5smN-3(9_N^rzauSX1-9wo*mGrgHma@VhzbS2<3!v=LMx09OA{N8dNxC|E zbrRtjwQ@&ib-}GbF*JEO^jMep>O3E3f;rE7IB_o~FY!|#{N^+&cmx)NqrC2OymuR9 z&i-g6jTUM#VaU9sV_U}YmCn@v*XW@d@=-4N)nhSo+HvFxO7n{rViiw!rf(Tn^?)%e z!SP{e#Ekct9q39=_Y2nYxR^YgyS*G4r*wLk&_mIDJ&+&%cHFhUkKnD5?k|R3SJ8U< zdi$WKZv|6N!L^T(u8eHC7g(IbRmagvit9Pu!)#O1=H0QYU4!UMnY}nhV8wofH7Py! zLxMY7p+LT;&>J}n{InuBia=i^7w3vNcIPh7%7xj4^hU~Gh>WeGrHW{IEl7J_*4e|F z#2hcj<_wW4J?wSr*|c>cM2hN6%fwq;_v9^X0pImCI;%In*$%-|uI9;j%PCJ?YDpVX zOueDk@LkEN1=gQB#>yX8(^sdRl-yCeNIjl0XvwW+h_d1jNy-8MH3wN!@_KBUSm2u9l^w48e!F!h^ha+SKA%Urk zC;bSvfu1Y%yL=P2gt7GCxW+(9s?y zao+8C(umngcs8%AJfQs>M-^>bi;Q$UEFd#r_4f9ux`zKh+c zT^Va$1CX8XC2Xjb?!=Be7D(%nbTK+!OtJP#kO)mHaUC6Z^uRR~mg<<;cWLVwsdYtx zRb!&Y}CsWjeLH1h%yEk{cfQ7YS21VVP2_2rLTUcqD-)GRnjGqStR6iIRix6Y zcBZG$i!s|!`gF#%6k-cFvtb(e?oK@SHWP1yz_j}eY3olVhO>b#OB&j~>7$8zUFB1N z+kXHJHIw#mU@4-t?a8R|I#QmiCrikJ>r*}TCA~;q%1nQ<>ll$EQy+nY8hHK(^scO% zeT4$jpt6l}feo~sXmR@olFD2i^ z*_ZIo0=Z)~;?r4Uw%>ikbY+74<$pi|!^)pM5~USrd4Ce#0ShQ3r~6t-1=87baQC6S zO@nhQfY!YTU2CY9z|qYs;Nktyz>dSzq&a3<7a4CA$mZG1Oj90K@g}@|z^{_ZG0Ym? z-MQM`cUGkoAu-kLdZ|uJ++>f(nZ_~F<4nF1sWKq!Pc79a_Ng7&E0W3)PD@sDsUf4M zZ68ou-5vd6RBcpSJY!8uDYQh&P_%O|l;)R{=25=5OP9T;e&{iGUH2!Yj*V%b3)KzS zb!%vK{TDqMW$&FR@LV`?0xA6!dwrt2{h)z;z}BBg;|d4&u00Cr@4sEVZ$los*6pkG z43w4c5a&>0YDMgU<@E|}bq3`+N?0adk$mJP$432s!(SLLFHo0NaPaATolf|O6i51) zPilEe?pw%Rj%hz~TTgD%pQWaB4q9!BV>^k_x)koSvM2U=@U7#=d~3=VIOE7?aOCgq zoj)mk({iw73e-_8w>zz<26Tt30{r3JtZKrAOS{&3Oy+cEzw;~1gK4Cc{;Z?=LkzmZfTNhbpn@G~?xRv)>Z+vu%EXjXAGt7;I^20g(B{m zP*ZyYVU4RVz4)k1w zm8z{SRa}=^^bnqdex!xanIj1aP43G5{JS=;yPLZ!Zz+X}@z}k19$MR312f@5M@o7D zMTye(1M1;u2Q6CHeeZ@n^&~b=Bb>Yj+57<#%o&-NqcQA@geU$Icyk>kEk~p4KX9f= zc=v4^SGG9^OBfyR^{)qI%93{HU~l>F(<8u#6uSV9n8(vEp`?Ler3neXfp|)O>!n=H zxJHjX+Sb0%pfuvAo~$Hyx!nG%ePPF|l%Q&xR=y}7^z^#thkRnp$l1P_C4G?+Vm99O ze2sks>?hWMk29(18Prc7ZD-o@#$=5Sr7ioC=4>$PY$WH4I!-$gJ?hLmXU4cv%YXOO zpe@UAuKs}GSa%JqXS$X{%5yh|e#k@D39d>0DR)$xgME^J43p}Lgv%4{hmxTPOhJXp z1zXmux+wb2A&*Nyqo2<7y=dijz22UiPp(-ggM$`g{0vHx7wuIYOg%4y=G?($5@l*- zI!Yk76^Uc40$8Z$DaOq#X(X-dx8RT2X_ZMeZz~cd*^-yiCS^jDcaCv=0SVcewrPSt zoRx0-*bCAt*$2o+CW)kmxtMs`cdin+lc!SKnNy`T&Ju0{0_pX= z2YovJP(;#b?>b`OeEY|^p9wvl!+jAWm9^ZJQsS*EG_Ji>;Iu9Ff-5EuBGI;#99Y-o3MADfEND-_D+T3wB4TO+l!?AR)VaGHot}U4PebZpB{mF^_bSc zUmbzzH-tND^H`vs3OzVBT0;&eB;7v|o|^?^y@0g}NG}9NXJFn*jBd$#E%VgwhYXo% z`)^08>7^f^aOJ3=6s%uz89b@AB$dud`P$E}Pa)%puVowjd@bGqP*IbaNFATf19a6 z4Os1-<_E{WhsAF#@wB^Y(Pv_#76`1*rya3YCp7Foe($&o7s~Z=*zAvU(iVxs7O>@a zwM}Dfcf`nhc(%QM^p_HThEdDiIOlWRPmiI%H~+nqTS{}O?-Jm@3+*$G?_*OL(F@gn z^t7cy?`=EUUkh$B4>0D1=BFwRoQp&}gc0K}QhRjLfwpCB; z+wgMj95bH%{(7p1{dXm-Yxkwx;K^u%>!^o4DEpwMb4RIGpmBeiPROMKkSo>2+nrb{ zazus{DiCKNJe?(ImXk{j(7V^d3^=6{K0g&%P@DQRlYnzN&(0m`3b&oW(`LfN=>?>@ ziudQ?vw^h77T{2#db9$An$=ZE?$3EKr8bhras7dOtwFXO!AhzPlzcDzPCZ+vt>|HH~=`QQt#It-P{^HKcZL&sIEL+GOre>nys8#20dl^{eGw%U_?K zZQ_`WJ9tl|R6P_*BV!x}RXGmj*nuk#97k9FtV-UDCCI?@sn3~7&Nd@ct*tWV9Zq*R zl6%}SRmtL?gBzGm&x508PH8qXx88ei$@GQeadGFQLw-b3%k2x2p8#(B^2wv94 z8Nq3fK+f|2-sWb=Sm3T8)dtQ-;IeAwnT&+<!P23Bm<(~vdNk1e9^uA%lqXwT2nxjFXYteq0i`zMdwiuz=@6kxte zTEF=eVR=mHG@gxVxy6(w@0qyPUk^h|`l_@Lk3*?ti4ez0w*!A~;Mc=!+sl@1Q73so1XW!Hqd!Z*MCG5+CBjL{ip;|4&I5Tpnr;a6_&1DByCEh%GlLJ%9dLW8e zMmIvmu0hR6cNARfIH2}rKT5H6=EDb$E@z}Sr`7n0{NskJG@N(q&YgN+=K=rq$UJAS zT?RKg_g78l?uYhe){wXBZiTR4@(4NtTFoJZ9`?DS2eN{?Yr9#d9D8@_!TMj@@L{YZ z?n^1ngd_)SS9PnpHvV*VFYngY{(-F*^lK{?Xrm7FNz?_=vuTCW%-I!J_EP*mhn`aR zk2ZW8aE?lHSNvX{;EMi&?XBPUM_{Cpm>&BkIP4kPQ&30785DFAF za5-@fr}XKcRIEV;CL8#wS2FVNfJScx(jxDBB9Zn(Yr8v_GteDf{*c(erw-a;Pr^4! zvsJuFJpEcRunJ7}|0Hiy#IHFHNZjMzTKZa=h}jvv zo^cO7HHI%*GY#bJ{H_&gmHKpWs~&!rzH%A+zS6+6<_LTOM-PGWq8~ zEU`DaccRa&&8l9hAg>)jPLQ*K0;=we_6DD)0*PwMeB&9sYo%sa zTDt`-`&TLZi>`ePic#WJlTNB}eacNpx7n2M9tWQR26t=e%d_+5e?d;}r$2+7djb7h zNYgo_cgBslvG4vm_c&ZVrxog3#?#AuYf4mN@3q|)ma;;2Yc06COWB#{lIN8X5_uWY zH+_$?_QgwkT+*41(8aLCXHvf2bnVL$SGifq8@a_*O=s<8t;3q)mHEMZR@Pdb@@;8%tiU zt13a+?g*}~vahc8a~JL*gf{cM5SzU#bm1Q&4kIjlV*dBHhOe%3>`d(!^K4HiJZ+uZ zuo>8~$D4NhNic>Q zTLWc-(` z%E=9c$M}$Iwxv$(mJ*>Y;6@L&K39Aue06^;eXz^O`xRPSYqXH)Wd*U%K=w)X#uo;q zc<9TpHhQFzGGbZ--#n`5UF*NA#_|$IZJx(S>EHG4q@J#>an5sBFmpF>Uqd-cz`qk? z9x2bmd-6akX8^UWPqXn{S06op``MGIqpMk`aYpIxyr(VD1&t@geNy7=)kalnap$<( z;}2+ee#%4s`8|5|N{EnPJI8d*$BK`0-T8}s{M@^yzr~qm$|oh&gP+LBzQi2pNJuTm zbkd*uc{>KHRd35pLj$-sydI+9irv8IhD3+XdfrAXceizBrt)PpsSicsNQZ+{e*R6` zGF&%zE-|Ojf-R)F0Zh8D&bfs9x10u`k@AeINBeDf$ay9w0jc)?&E$UoVSAH)CORna z>l!sZ%0YS3PDml?ai8REb7#nyQ?N6x+Ml%x22qCpr*>>g|4{q>6}Dmfp?@sz|0`qt zA{;SeP@Z?KMDCMwWmiaFHEEuUw5tBgy%F26gNt{A$@4y^v1Fr>W`{z(k(9^+`Vee)OXU3^D+W_A5e$@b>GhV-gmA4)|H1NFu z%wyqA`@Q}@tP!p^Um>+$*~;B~Y_}rmwP8Cl7Fop?M?X6y+E)^(cN}Gqhr*YpFQx`u zdo@pMd_t;2Yr>Cq&JIGg@+?b_WyD(1W*5YKP4}_2Cbn@#YgDjGS>NGiDBaayXC+DL z{D>@3vVN5t!G|{U2jsmMIQus5&YIf+q(>2&C3pj>{92tbD_&z1Psnora_M6)bRIkI z!-*Y7&P~3R$UhJFUcbv*`xjG->HpPXt%GLvpz;(rsL2Q$6V z=oe61?SZ*`b4`F!xH`eIJFQm*=jz8dZ+B2}mX`VLhpqGmci*C)wH4{rlTl=OwgXb7 zxs68b)wzb*nIqDvf76Mzx7KMP)`N4Mr2qH}{S0VrJg2i7A4a3xhHmReZsxEG+EIeb z+4aB{qfXW?#@FkJ^Jzznc6rsN4qrftdTKYx&&6F*h4e9>A~_d1oL}zjJ8gZjGaL)N zh>eutF`NbE_eeL*FtQEH!mG5G%hjUqfS<`pax6%;K!+r)CQ&2DnLFaY*wn^> zT25Nb7f@3rbzOqP9jN_-k3u>CWV;Pf~B8`S_kw zpHJ?e)dA})Xk8zS|E(>M`Z&IRi*WsA!ec+$HT3TIelel$t@U4oe~osrf4qcKe0K)U zso;NTg1w$F^;`w_B2*pJdJ_`f9ePm6iI?ZG&R^)~zju<6%lF zWtIJz3BYqQS7XH}fivfvW4sZ_?jv+`@=(jwebFQ*lCu=&%5MJ@Tm^sKmn_6J@QGM! z0&o6_J<1K*vAlmg^yO>GIQz)2uK1SnJUT(I6LzUvSXx?6c@I-tNXuAij6H+mOI+K_ z^Sf)5cD{M)~N@+r8^(1_d0lcyHy=i0(X@RsYyCZKHLu&?1NV%LX;gO;F03G;tm&RlR5)74XVaxL?1H{SJQ%_rnAC{zEx^O%0!hLaxHy2ezF zJJ(5RRt@e8P>FvboS1ZwV+#>Wicv}{YcuYjb_A{Yr9jCyqJM)X9q%$!n07tXiR!b9 z2v?4Dpms;4IQFAmqwfr}LDZx_@Y_R>XSMI01#atCr}lH#N&oIU6Wi6*von!vj?oR~ z>U_$3(TWY^;Qq0V&}ZP@xhn@z{|?0102Jxn#W1f$=unCI5pqe{;5-cd@L5W0NO%y!gQ^^Q2^w>+ z4z0S_pL|53hE*w@YgONY2FiSUKXH(&#AgAM^9fv+Qh?sKqWiSs{f|lk>K3-V&(o<5 zOC4xAdHPgRU3dI{Vz1Z-lIx8vb^=2=?MD5SxSLZ9-^-l#Gu7k1Yqw?TNz2)g{1^o7 zH4#rQog;yH3Z;8kX=B?`s$JbVeh@OU0o;jq*CUNhF*Cv&$s@Q#33?+kU@|eB+js%+ zD<$8COSJ=}Yo(cf6#pdTU!2B6gENyoHH{X0EAghY07eo2Y07iHLLWL*Lx~wB^o25oPQTPLY+d>2o+RDeoh&%d z9Aj+D)XrY;@LwK3tIEBgXk+P$6S<`U(VQts|Z<)5eQ3wBBNsuKKL zSWj}kJ(wkY(X%F{#S9&v_f1^f6R4#+@#T(d{&(nUC`8Z3ja&~EAf8aeIVxgK;Iv2v=?cDpR`lAhqIL=kFHp>bD*QhM?&EHdAn zJ=4n7^^EQ<;rOtY&=4qGNp5TpIXrxWt-Otanm*vWe=00cvzHP@%q8V?zWVo=ENMs6 zxr(pMySYS5T4UexBlncVyAT@^{YGiox~j{wk5fcULn_Hj?`?e3KW5+3@9{+&qmsHu zoyCFS(z~bxDxkaeD_q^``(=5S&toUPf>P$ts!pyoZq9peY4G^XLm;u5+%1j zC2@HoPX+veYq(oSM~_;;xV=OBAZj+f#s2eUGkhUEze%e&GgVupje0o*T4EiV5llXI!>TO3>4?jiC0+hlEm zFaPd6$3qGMlO-(QOPS8DLx|qeB8>_p5m~H{*ZWT z<_y2Pdlu}@Tj!*yy3<#KYSHLQ)Ot==PFucvqhAgL+pud!0>SQZulBHG`p)zn&fyOK zgNVP3FJjV}kc*(+cfnS7!rU3585uF2Ty|i~Y5`PXv&q}BlkQ?1}@| z2h5^WwO;giHX!kKCmHk8q~Stz^Jp6W3RV|kybm}bg`vROz;GR z!n5;{huV%>PI|`FPI^$(80t`MmavozHJ*pI<<4;;g%?4`z9|8u zp$SloqfS+{kG-85beI2jsbus|iR}Du`KX5Wf0`#}`n-tOzT@QErSYaM_XS}L& z-LoimWQqMMmBq%jEhE2t4taZSj==hNcx6>DD9AbE+kkX$%Df5*-3vTj)>gZr$ZBV{ zKT4>7c+~>Bgi|c-gC*|@%YM|SGu{+ek2n`l>(7~+j+UNH%)#V$A^AH?X%c56c}*mx zyf7P?q3nfVa~Yu0fX7 zqiK{qKL^r~L1K9|mJzGO1o8u)Lh+~P$ zO)UYvCR&{K>srZ6{1w~y&=lHH_1qoj5b_~lw{FP@o3N%FDUt zy^!`ewS?wJVACGrn3PvwvyL{=f(wo4$bz*J`i{ zTXngsKiwljDjozr=Tk!|+Zm<9utT(?4&l2yYw6FskDP;6rVxJ}IJ_BJ-ivQq`Tnaw z+HzFWexWNaSCPkg*gY$ur62ZFMyg9Fy@XVd4!G>oI)6pk<+{9eq!8jpuD0^} zL|rAGw{kb=%e4X*2ywmn{&W)>C?JX-e{z$l*R$34@!sf_ZtObdFNz}Ir zPUrwVtMlyb`3KUB__+mT8#batZ25)ggfh9WNE{RC_44}EZfOPY*O0DAxbyTXD8c7A zq;Fx%-}Aex4dOYhm5?zZOSD#UDPf%A$F+;msP?q+^&fN3OZmrp#Rub+FCUU z8$QdUo;#;BR@jB&_S23^+Aw6TyS@5PHL=i|uD!A2m#|*UxqfFrTk@5&+@lY?JJahDMhn+-v7duws7xWyLMXXQ>0vLE#@3&b>2b=c&qRsIs>Up&D}jNxbjTmIgWK4bsEUM5&n%F z>{qz{sGRaMPml0@`&VAyJ_}U#6m6-C!IOP^{VzkY74^$ZCwvPsq(JBrTGkcV?n~vk zg8g>whM*I7Y4PtH;a8rYo+`+fHro7QCsa{}wp8pmbpm4@(v~|N+jlJSCVulH zcIq#F{I2Is>T=cf;ZD)y;m-flK_G4{Z z**i-gdtK7vuJOah*)`s-F?>_w7C({lld*CMDkg13doQ8OeE0mvBX~u5n?D&Rm&*Cw z*{tP+l(Zx|t!q;Th%%Jj9KFUl*|M-3#+_C(Pc6fD2&T%*{TyeWkSO~hAup*qF%&k}hG zF|Nk&U$T$zUHTBhh%0~hpxk_)!N8yK9qrNEF#%exg@dOh`t1T87O1_sI|p0MZx7Y? z8YrxW`%E}WyTLG9-H~D~59^U30gA*Q#27#`bUm5xjtm#L*Yjk`<=8f{w_iU>n+^Pq zkgL_~TSjlV3E8%Z^!H-PXhTdu8oA5nv$R7!vgmwpxD7hi(<6?QVZ!6Q07pLO6J`r4 z4IH)m0WEbC&x<(qt#_wQi+tI{yE;TU=_rFzD0U9FX6}#vYb*=*gqzvc>kzwZuocfI zowL=Xfz}k(1zAx#rSyxo|%E5hvI#vx_o z&;QTv_Ac|7!sc^zs-s*HXJA{qB99sOYTmUQcRqIGt-x88=sIX`=kJG7p8p|pR^~%( z(tbC}^RF~o|F2Q15=&UBlI&SXZhNEo63UXgpfr zyfg<#zt*fIC#3i}nN5*XT(hHG$I<^8~3tyebQh+z4CPz^s z-%7&vC0_LBWB#`Dpw)<5lQeP1Dtm>anXM$9O+rRVo!a(KL4iLaCuOyzID`8wp0A~> z@P=2={_bGspR`RYg#24dfw z1?IxqbDx(**rU?FzLb6T%1XSsj!wuf;p?@;ABcte7t;1m zejEKy^3P7=%Soe)RmXeAAbZ?1O}klXRD)GCo>tTcqvY!fHMpAhF1~frSp^>g@V9>3A3z zAAsH(l%8!*`|^$&goGGR$Xh^EBnS8XE%4q!gxE$_97LH|^tjIw7_UoO(X^}6ke0UgJeK>J#4R>@_Prc5m)KyNoVzr5Ox{0T*X+%>E)DItaJ*BK5 zRDJ7=KV^|KasQOsQ%>mkMLe3a5#fvZo|^msDvN*u?5*7JM}a7mvP91RHu zsY+Sv&gxC+sh+FzKU@Qyk<#x{X(Mv9LmF3eBvXIZGQQgbaDShrJUch(4PuMuZeVA8 zY6JZmd%-cxdj5r8-nSitHlIQ}3y)T) z{|s;k;VSZSRO7>h#nvSI`q6tUQ$kn5yokK?|t4MN&cM;$>v!I#Pw-bn{Dwr zHFdP@BI=;j_ZZ5Y%Am@@2|L?>Gq9-b)RA#0|8gJ={QybQ7Z~3pejO`x>*(pe4^$PL z+WxLN7EkWms|P@vr5EqEfRf|{?8D%5L*k4%ZIRx+c78a87^N3_cMvZgHR^q~&I(Qj}kH;7qyud)oFXLLK9GHf)J! z_b}hcliZTgF8XU;M}JOEt>T_-wu1h(6>x&9hli!IdO`I z=qRxw<+HGj&&IWd{yo_=t}f}v_X046w?i96-*HB~MTuIS6Llt3bQ59C;30bTY9n=* zGoos#$v0>@#|Ny5vSb=h(t?sii$UwkSpg-&Yzd`+wX_#Ch`b8CsR`Go60~bBAwMOC z<(We7TIiM*?$I-dF$Lc|q80PEQcBmDYNBnO)uZsn;nD>xi{q{Wtm?IdpEiK{-EqwtqI%ffL~`#M#4 z96F}C0n)iPz%tJ0cNDmfOmA}V5=x8z-83SNw16&2{J)%0DD`JP`l0S{&af7YImzo4 zNg`xz_Xv&a!3je(d^rV<-IF@aOfB^yHT;lot_QSt^b25g?Y^mAPuKWo$#81(U2?Bu z1l}3V?%Ef-xR;3~RoSO3kb zkSRbC6dzcd51hM73Ta>%VgLI_yF6%rYR|1e&ZS*)|Fl@f-V_l{5Y)o zZ$J&R;Eg3{h`sPxo{4O}gI@3ytU*U0dQ@UWCdH0@l0X zGCf}ZH$Ks3IJt#-ehqrbux`Eq97i*Uw9-C~g_cop zd|g|JeF^(JVKW5;Ufr#EBKJ7tnfzJKir$EOwb_5NJaw^gg1bTnwDPWX;OselX>+(b z%9SP9o!{f`nx|rg+51ugxu=?Q)gI;9;~jFY;H%c6YwYD*t-R<53B_hkG11C5YgS4A zT4jN^eqi?u;*3lAXtQaRxMxrU_xVXzw?GZf0xc59{+zHEk*SSHX0dBOOWW4{@!YN7 zKZ9o}e@8vPmD=4JE+V!^D>e4+?vg{HoOsh(GBj4KYjtE;+V+HwB6JI z+Z<&uMLpN!u)_<4w?dh7!TWUdpgWxGfga6xa&+=l;OX3ExtX7`)zN(Y=!d4Brma6Z zNlQH}SCFF;qlp}xb!3>ci#mfLrL~w2?22LA-NW8l_!*>{F~pGqLK^yAO0cKp{Edmp zixbxTByemAS!#LDTKJ>5-iaQ)x_%iX`Ip!|new~)=g8YAz`uq!4&U^Tq zR??sFzg-VAWB3CgX$B7;BFVcm$NMkG|2uN~Y{KqF5>F%NgQ)Mbl>cv>haho(2ju?& zjhx4qo1y5F;P_(*KN2i%r^Lm;a2%!n4$S@;N`ErJd>c}A9XVVD)JId+a`@y!xV;Go zpG6}q2S>-E$Ib&MN74@8L>7-E#X9uW5aPT_nwQZ!XL0?AxKH6fodP7kB=?{3^eEiZ z3j9Zakw?%|gW#0!(I(#qrb}tjN#Ja8^8cKJN8&)F%H8016tMq_yjq~m(~#I-qQCf4 zaBy!RokvOMQp(?;x8+d73f}IetgETd z&uPJX$bT#{VFsmt6}@&MQsN_GUrf#4pvJ?%%6!`QduZ7|f#C)CuD(R-OTpn0(Az}t z)k5v3C%U@_430w{O(j47ruZi9cO;O!f$p3G#HW+~Q6Tsnuumt>XQ9Mt@XIyO=A+mW zi-BP(C7ujseg$4nrbgEz6HPM#YX1qfnFY+Qzd4cXsg!X)c)5Y|=jq>TM}qM;pn*S= z+amHm8`?Pw=nth<&(bQl1N~^){KsJYk5Jyfg5f*B;O~Ly5UlW@L9=gC@3Z*YW zDnCrA<4Acnc=;um{WUe|#SW*}!&^h3+pj}u1Nbt5cng4e6dcuuJlB!p8epD<^!*b3 zg1gB1IND?saaMuPkHF*Oz`G#9|12e52Ato8M}B?$@o@b;)bV)YK1NDsZN7+%pGW*x zNp%xtuB5~P)a68SI)-|ljIA&eO}QB8Uj)O8DDPZCwo}_Rzif`$J5p1y^@-(qJEc?_dSHa z2eqFHguhMnc{Oc*JE?n9-d~~QFN4Wea=DxQ*OKRIX-yaxsI|MwZ5LbPNzL?fNQ2<)4Wai%RsV~c&pL% zhXV7Zl=D1rtb-!Xpbej)R*#bFEl8$1=*m9?{0}JZJ*4VtsCq7xmeHyQBG;}1w=Y4X zlc-HS)Ubl^1MoBF_Gzv$cfGf3(w)P5()!xDl^i!v>tV^O9Nr#xjjW@7S|UZ_bW9di zSV~^|5OM|K+S~ftM)2$rG5kldv!#b~&A7%lJ)2>x#=BR`ZfyJRd&CIeP&ME5BUN$k z4}7k*GQMTzIaTpykHEC<3x66ID)amfskEp@r(UwP(2g$>_EEBg^_i~#->$cHHHx;4 zJK#(s{+`IJu)6ibnR7L#81^~ibYGY7wL1$rEH1x!I2+A-M{*wqo!rf}nYO!|sI%Sdef%CAPbAFu2JVOQJ{F$wkDz&ct#hr-_)Dh!C~dpXky^TpUcZ)Z%%c|y z{Q}yvmJq8};#sdygqwqykC^&y9ETGdUAgy2wX(h4d8Ur==aSS8_`O=|IP}X5Om)x( z_ltJ8y`BH{A?!Qbv7a7Wh}tdLcrsOlnAfw@EhdOWR)!1>S?SJs*jDIPG{3DMw+GI6Lee zH1`*PY(6O-qi^;Vp#6H%SvLc<`Q3s9c^mlb`9F+|f0KM}hTGnQ2mi?1d*E4!e*{fG z36%Si_miaQjlMY->9!o2{sVaqp&hSA3O@mko<;($M6>n+Q`6AK+7fR8*ZIgyJyH)- zv(u=VmTMg~`!#JlpFF=o_?O5}?t75@ZXw-==!~1v+n{89-;DHM1TLDO_xq{I!ASHv z^u$HT$NS0Uuaqzqm{$YmH^|}hQ?WUeth`#V`Dl#N&)+fNuE$Hg+!_QY! z_XnZt$Ef*QLR-*iXAt`VTBJ8R;n(R{iuSz+z4$m-5RZeP_0{O08wj09Zj->$)#%2@ zIkfivf>zy5d4~hf6X5x$@ZT%&(0NeCrM&-+(r1!-Irhwl97hxODBALmK)Ek)-HBHG z02(@(JfA@K>l2y^eI5-?{tT`D6^wliIEK<%zk@dCf{A;erXK^z*+?<<^fc(`L3H?k z0HM8>DM*&RY56*8rZm_NJxN!er|i{Wvj-)8mE%Ox{{~(9Jk)*^QcudN1Daow+pExB zN9g)?>iMsvT19@>a6b&%Ig1>Jky4B7Exz}MI^HAn7r?nTg+7Ud{uR&HlGn@N{ntP{ z4*B+X@XxgCiPYp6%6tLr{U^EE`xy<~FQ6qZ z0k)Sow$sA*67yh6I2yWYrahMd(-7h;1C}lDTQhkMr~K()?P&1)FQjS)>!Wz?2bSlP zkM^Q{)@RA-11!V4XuGYn;;(6=Gr;CxaCJN7JKt{>^*IKs?sigL3-?V2qB>&UOwJFI z?ojl+y~7(4#%g(gf>xbDt!I(`V0g^F*w4V*PmzDhpp#PXz`Zl6vEzWvK=ezVz6W;P_v%^N+&;s%smFZe_m_b5X*lC0 za{fA&#kI(Y`-!uKJl`kY-o(6_Htq>FHbZZ-z~Z@3{%SCPGj;oZ!p}F*sb`}JuEFMc zhDB&Tr;}cM_67EEB)(5+K zKDi%^Rr)I^>@Q&RBVb(zom@kUjfDnYK(lP8gcp&Ct10_2sOmWQpb3lQ2B7N+^-Uz_ z_md`hkJ!`cM;(+T!=Gr)<7NA`Uqf2R4`blguOx}~ z4KVX0G&Gz%-=LoMTAxKv>!Dcp3v>K6ZwzkW$9Dt9_XEF-4!Xf{n2jo7-7| zhB;d2T|cc?|H9NvTO9yC^oV!jbltM6IIWLk;AJ$hGY9nfZzIgv3-Lb_djxt4oVOEu z;D*nC1*JJo^&*tu?iMFe#-+&Ou+7ipYm7=|q_hlYUw)VR4V_`dVuu|=_Bsmfp*SH?-fvDnHsrLr9M*B~`Mq~^o|2jW)hPEp5HE{G}u^zRii`WsaU=DZh7t(i`b~D>k=h;qUaO z18dyN9NK#ItYb?vQrM-o7nR$}jy<6_xka11)INg6dy_@nKrdT?w3(V}v35^tZK0mC zJE>XV%9B3(^FrRliLRusAFJ2}r7vUu+S8D}@Y=}kW{$9h-X}BppQ`63(A}t|#KNxC z3*M)_*~N&UD-F!7=PYertfxGkP0wm$b~Y_xdnd!-SuNke-v7e?Z);3|~vidN| zy3!22(5En(rLU_^S{sp}v(fBIbZo87+Nk=b{$yLxH&tTC-q6NMv|0(PqYAI)M769_ z+d_Wj5FSJyP#=KnSA({L9x$`j(>(bANg0XH-PC$6ew3q@McRKs>jO}~Ii6&`{S6ur znzSu^qgtpm3hbA_Vl_Bw;mK&FS8Ml4{z4a=K0=GjV3)b_QM9}+92df7k$crxx)arD zUKv+6dcBCY*OIaQ+^GtxS}4?v1mC0oXh2RcB2V$k2jOJ{Ff8&4Z|0-=8c$y4e-o4+ zrnU5Yb=ZxaWMU0R`lplstd%^1sJ{rx!u24ns#sI_l*TdatUO2dDdh`!*NzIx|rhkBsFw#b-mxYdmM<*0NHZ%i=R@l@X4|& zJJr>eXmoZVO$TUUr5ZDe>_$TSX|)OpOj6Hi{M$tuDuJUl8pcY@rkP98Inw?tJeHtt z_Bm};|1dUrwbruEf1bM;%}qqzVPLxkE&G$7=u|3D49$ZUVsr6R6X zWwS@(SH^xHZ z1ts6nODyr!hb;dxHD_n};pi|HpAP|H*4H-DyvTTa)M19zl&GfmD;_EqW{?s^Z>GFz{75?SeiD;ZPv z!IM9e=5GWj3I)kt; z53M_1Y}Q^~aD0X?EekroLyo$j*@@t}mV6w74%z+r4LqNPN6p+#EXOpu&=?P1){FN> z({7%q5863oeGc7uo;)9q(${ODo0__UxHlfvL7S_!{XQJWvnDy?xS1M8}UxNB^^nT5gPZhHI0;!&fBA21vm;8W?&@yJZ5_C;aEqd}I zT8{+9sW|uVUdX@uBJuw+E1g$nuaEJ>_xw(T;y&m(&$Yw@Rxhq)#(z*zHvZ^`BqwL- z#lKI6#CYp}axLc?BuXT8=GxIT_HU#znFz|fHW_o|mGMgA%o=|6zr0$`wwCj}xsv(i ze5Z!J@Hbiaf9ro4AM>xHp=4Ot1Eu3dmMbJ@Y2#_ql-z|geCDaNno&fV`(TiNiM}pJ z28kT{sXF2V9{tswcta!w6P_PgU~?cOvz zxofg#wF2AlgwL$MBo{?h^R-nyx4L*TQ5?x0bf%uY#6Fo4E5J5cvIBe80NyITzeT?D z{~KD*T>lSQiBt7z`}sYY@AFG?-To^)l5H{+&wh%G0pdYtPiO^F^<#E(IQ~>uCbi`Z z*b<+NEB3_A>@RNW-tizE=Kd68z+V>Q$xr|3L%*!=309MR=DUJAjwXwV{Mm#4zEw~q zBdP3_$%)v%(Q{-B^>-!r08?VbZX{Eii_t{<_-|=D-hG~_?j4C7>Qw>d_Vi9p@ZGCS z8CS;h&)(q4evLMBkS9xQ2YKe}D7135ss^15zE6ARPVaK1;yl-~9+jOgX=7hKdd7Ug zK9P*a`xQ8Db??@%S_pjqyH|DP#ua71{*{qx`SiHrSGbeA|NV6$YG$tbeC!6n~_zoDQc?Ls<@0d&W73i{t9Pg=v6kD| z(K>pW+VsAqHa=kupVZDYbXbTEQ+;RFXB`Ndsec0AXD)K9I!4j>ns6AUr#Md=ud~mI zmU)b=ewA;W9wad)U!dXlQ7*pdc%?Esz1VY~)7Eyb#w*L{dIi`f^OwJ+e0-DDKF6}l zE#29r=2@;?$_~zCr(ade9q60*iSXOMl}xcx=U+`5t}W_VLh0pDpf<@*A|rVT{^P~Jz+mp%kjt` zb2YP|wc(d&ml`Or7_aB!_Lb~ZeK2J0rd;EDR694~&p`a14BwaO+q)?EDtJHCa?bf_ z0lR8SCDLLGDpewz2jSxYus#Ta88BI;-{=m4`#_pVor}RVf}}hSlG{Lbbh z+ZVJqL&^1IrU}T}f_M{pCeHY;DDzj4wW0m@`F#W~Bs%R;oV$o-o&eH0qfB@r*o0S zo7A`uNgqhATA}58tV68JYjl6CHfs1h9nJ1AVmLy}Hc$v}L-CB9ftC3bhn`95$ z4?R19Yd&n66h2vEjRxwW7vt?lG91|-f)Dl4v=QAtmo;DQ>BeaABB~9BeP!6ZLn?aX zMXW(1626F>zYVvK;CcZ$yq#_w55wtd%*^odg-q1(`7m6kq1aN-RxZ5n%&>d{>ZgkF z$Rzw8qn_l#ISgM{>bu)}u9~`H9eTp!GqRi+jyib%D(d|bcjuGz#q3f$(%Tv@zDw3C zYUxq@yaWY?(8b&EIgvWoqwT3$h`ye#j&7)4SJ~|38U?b8Kz#zboTU6w;5o~9w>FI&yIY&Sd#1M)FiSxv5>~*xHB3(PSx@PPhh%x5(|>nX*EIOOO``5~J(g-Bd7FTH z_mZ5xL{P*u3Rq5?#1hN{3-d-?{wQj7VzxEPdwqw7rX2{H(6T z^W2Z$SHrb&KKZ-kx?GpJ!uP%fG|ko0#t1%{p|Wc=*@1Rv!rVuX5&w6PdII%zp2_*m z!8!XF|I3~D;K!oJDV|MUiSoqOl*q}0$;C6q^+5BW&x{>*zmg5}cf6OZLOowx%^tGc zO?=rn`gALwC7DYrc_-4S+%I0PA|(DkyFmNuSI)q%WZcZjh-vpNpVxWfc~EAB=@?v1 z&cR>0emI@q*Q!HK{m5?Iw3pFseswQKP7T@KtfAz5fWNwOTj8;PN+Mc=<{(ceduO8d zntCQY$haY&<@li-jqD!JWXEA(${NUzVbTRBlU1PHHzy6Bu|gBslgH@(IZAiOi=5wco4K3JB*s!E+9t6{_v2>v9<+3)FKb?tm$P2&BlXm$ zuqQh_n+Wp1urxJUu^*|kGmmSC>${9>H|c#Fvvf23TdRM~ns3Ggi9yIoa?92HB^!Sz z>;ICrTKL~fd-0a1c={A}>jmCQeRW)3sUI1@gBa*_v35^VXXXXtl_V$WaVVQUV=6ytgc2RKJBCl! z3(b3>$HrnLvQ%xA;4xL15h(vUDrR*fv6;`|c_KNj6e{#8cb%R;_%h{7O7eRQz6Kyp`InVd*OBGsn=P zyU=Yr8~U1miKLo}o^zGG!@V(T%NYE995?_b)4-Kk;lZp|k3wt5qSZXye+so4)3QdO zSP7=qXco_G7#Xey&;3d6VDx(!7Jcz)b)iRNQ1{PDCsSnBLbGx)vhaC_pvl}<&*D6%BJx_s(@&u_I?Pvc{C)Ej`1nW|Hr{Ak)E;tTliq( zJ=<8z6KLpWGL`*4`?-_(&3CmNjoU!GA4lo@9-^gwY~=m4_5zmaS0G6AUPdR!k*;oR z*CG(y!K!Tp(e3IE2NKD(iY_GvvOY;aOs%ugs0H{Ikg7y89f~Kd!BEY3IKK5~xUrKC zy$AnVo~Vr<*;|<16J6CZk`K30s}tOa*SHQsHc*BRZ8 z#ft|(8XK2bz{qu1rLtF|3+;`~sH&EJ?mf+R@5I65LE8@gQ(4-pc;M~eIE`F=2IhKN zP4vfPS5DQNtb$)X)PGaE-AK%QJ<#2Bp|w^T=r51f-YB%X2GoC0av5v5g!E=~^ePNz z!g-jsc9QbBB<(D%T;39d-=QgWGjur|uySa7i?*}W1>z3JVm!jEr&!sFF>GRQM` zc09;mQ`7rw+71%a5)2vjoPe|aQTGBo>w<${=fQk(F82Wk2PNx2JU$CAKUj}Ek;ovW9y!*_rtc4Gxt@ApI>wD}P# z98`F*Uwd%h0avmYe0}9Db(Ognb3SqA&-O>d%*tn65f6V4aAj`(7kat)#7Amp5Wdtz z^=jyx^`bk_MeYVYdP7_s=lAZlsblghqKkh-2P{M59+dqrwDEEyZ*?3T>)@4lAr z_`BsuWlwedwD{LhZ;};`te)>v-bw||U%2*` znlqBhIN)HlWS35|wI1zv_K7qsp3J_FhPaj5^2)ewpRa!ZNWB?3CRgyFudb!FKl&Xi z{TUrXr;;)?V0^saiHt~|*at{tVz086midUv^f$ZPGnTj>uEDUxIb-QW5GGT}T2h$Z zDZ>j2pQ-k`Ub{~Of7cU7v!i{KI!CXTHN(8#@XpA$m6{UWcAGi&-C0p8v&&_vjM+*| z%duj{a~aX(UB(Yq1AleT?`z~4YK7u?m1V-yj7Z8{vf<0EURmu*nP49J4Z?*)h6SE* zIO{r(FW$~?mV?iFqL*H_ja8~)psf4!UE zd(!=r^d*TiPSnwjH2Q6pH~#Enw_*Q1KUYP{MU}K&)j;y!p>yo z^<1*Ejpbd(mRzE}qfl>@8sZgaW_+ibE3-_Ws_k4l{~$;bchgl5a-rT}qSB9`e;N7T zPp_~+s{@sNQU7=)OSg*0uvGh5qsXey2DLQght2UXF;MH#p$9EK%KdU~a|~a%l3M;o zj}!ZqaY`rFc_cjcZFH+vKO-9eg&qigs~_p{YIJ>3D+kAtYGex?JfzC|608?UVw$PV+D)RfhN zVJLMDYmnIFH`LWhudp6P`hanwTG#8jwF61`>5{&br1Sn6TGvO z$jVZpf39+WBKhdfIwUjSCwiA9=#xw+6VjL?6XKTXGd^!z>)M; z?LqxBz0o(use3Rf+7l&?(~~#Hv8_r^F&1Bj z(%&Zy(VLgy+?GW=79`K{P2;hQ_ZkhOiD+>e?o3wW+uEO@Y<+Fj1M58cI~9)EOYja_ zp6TiW?K}?t=+-jQ(3Pw#LC20-%Q|o(bAF;%D?TG(-DnL^8oVPX{xuNQ6Wk zo_pqo+Z3}g@A63!BUX*o%l!3HP*m}q9rqWYKt^v}J(<|74WPOQ%-OXwSX-6VHbRen z4;{!#$kXVWNV+k2`3{>-Oti)=28w-;!rjJ z1xD*p`4duHm*t6<)n0vVwE1@wtKx2NHohNj^(3ET@M#-eS*hhV{NCE^@+_K?b%2ji zxsnn$>JPKVmT15UywPo1ZUVcMjg~Dk`_x}su_l|z)nG7XCvra!Yy?5#?HZx`x3s(s zU%Gj2EQzg%D>Yr;jQYcH?o={ymNsjX+V%Q@d7gcRRr>>qzwe2R8s@^`RCX*Q{H`EP zJZW+srX|3RGI{`9Gj})*0ieUx*%UK$85d$qi8vWty=0 zJ=Bm0&{*GL{ynVk%U+*KsCSm%hr?o&_wUGE=A=HxkA>Q*&(4g&_m^0@zUbTpgqcJ8 zn5~-4w~R+R9%PA$JdMsw^L+@Z+e#YNfP4;V__Tofew>>}a;`$L@O^}q57+BPht4#f z$m-W~pi4Y*nV+Mc)_xAg)-b9K*M4YP3oJ_u?mta4vu0e|6V;T;9?q9>b0BLyUrV)V zMptE9pu{U+3SL<=?1G|MM>-81v-_~O^0jI14?vk7CRENicq?u;qnlZut>tQ88k_y` znGenC-d);09W}a=rgO~%EJx4UAbPK`2g^_d^!sdP)>jizH^R7cEL;|_3CZDcn5)TLlDr@- z^rE9c-4(~rL9@Q@UCp!jkSEq&{cBNsENp*`uFX*)YfkMz)IeS5(xIF1V6IxyyC1K` z%$ld)$?m(m3)wqLEq_JD_tllTp=PW`=2T;wr@40npXp%sc($J86PRZ8zde{Hp=NsH z!%;f(0gGwp-L7mWDTxhk2CGB@Y|`^48aOj*nPZ&m$_CGkVO>rxpzA;~v+CcAzpxIj zNAkpHqT2mv^Q?O6`#qcFCrb6x_@nt)QFPJ(sxk7idW(EiCi=Vlwz5s;>lZGZdZg znc6V88q;B3A{@#vM!jfQP ze`4qJ`BpL7s*YZj_=x4+u0%DYjossvJL&c>^yoPszL7FnY3mEhJJI=2EvBxltG&f? z-wEEt20loOk}>R1eb)Y@F#U8xGM@OxDn9SjyM2cY^`o1K8hneC40ClqlKn^0mh+LH zcBd(?IOjtiz$(2C(;u@Jt@sSL7LZg{YuekhD7`2B*^TS*>Ijc-!^fW#T-%+aE7&&$ zOL+oMl>*o76572Rsm$heAbpMmmTQVR&7h1uJid_6M2Oz4{={y6owcBp!*Yfp(UswD zr12xRrW|#5$Ny^n4f;w3%C*3&{TrlY+Bmj&W~k4|P`>~DRsEm;$0_~q_x^YPGF!cb z{l5t3p3ze#TKQErH{-1ls5=rB-(v%QfzDr`U-q@n1I>8QB-6??R{J}yPb8ad)blBQ z?}NXsJfHQbO}vC@?BFKeaaZ>1c<-y&g&WxX>`rf>scwRQAkyGnxD)T*9FlKmUIo-OEg`cxZjnGf0v{7C;Ge;Mo ztt@)D>{U3E<Mhttj1tIfuuKK>+j)9%?4>ksgIDF7HGOmt!JWp zU(~At%d9UoV4s(3G3z?7gX2t8NMvNPLS-!Ux-u2{6y1Do)E~rC9_{%ypcss+Tl9~! z)qW*9XD{+5t!>rzR{dGl;C=w6z&X~n#`>K^(`9V2gZD8&-SOxX{q;H=D;BTJf&PXL zt$^8wq~bPox=YCxT6&%YJ))igpctmss^q9DOh)rBJM#Hf=_fNP&Hl9bZdGBQtEZ@A zr&`W0Mg_@r*iJ8!y#>|S(2gk5(&y)Do$hz`65q&DWc_yl+Fk?0-YD>M(C?rxTimGw zzKXod{c&OoO)ht#WKMe|Xs*M>p5SkX&hg1>X=e!zp9|9gYTb&;S=Z`=`ibE?2YjES z*l^_%BbEKk@oFMnJL&x{v`*aQv+6ob-+B#6O%&=>C8Hk)gCV2ltOI1!(TKF4prxL$ zxEGGWw3 z`%P+C4~Jwnc^e+tJ<}g9j~6~iZ*{js`-<)~B3Ea*_Zs}-6Spc#Wh}gkrY2VD0JS8) zQ)YS&*C%9Vp)FZjfhWhIc~*w{vF({T=|`7-gJT2KJRawhA$4rg(lA$^)zV~8w@@-= z-_+i5a5_hs#ONk#)M38IyPLJYD(L!sEnSK}$-Z(JoQLtBvL?0_hUd_d?%;k@=~rp) zQDDugabifjleeLye}-Cz!DDZ9ZtH4x5RG;<A3mIKF4Jlc=WDnb?BF?%l|TxRjLaB5~OTSIzUC;qwDjI9-q4 zgnc|y-+P$4UT4?;u8qu;WS*lF3@WNMvoz}p4m6>S&%w5#yIF&aFaHdSl6A=AacHI5 zPe8p%`1pkyvvaXJ%4J6GGyLg+g6j&N{!IC-**v3P$-c=%<7R(#vg>r>kt|a-tMi*k zN-c7@$gjK6DQjnmVY^%{XVa%u>P?Fqty*OE$GhvBt_H=Y_c zqw937Oz_Mp=#uz~WW>Fe#zy-4xYkU$L$%rr7ar0RXRMfB?kDtjL7~}KDsdlejn%#j z9RCIXg0gp!z7^WO2p(B$Tk6%BR34+wtXXcsub;7?H>oSTKBEP_Vzhfkom1Q$4&U2YpATvFJ!I?z6sgK@8_8!*4w-7SC2Q;P z(QC1bx8mCk;H<9?yqeTzoS*t{DP%R$^dqvFxWvr2&GLUg+`Jg?GN1Tsb&kY~w+jBB z!IJbd>VAkc&d_dR7gn%eSp}R4(n%PZ<4@7-e>+ZGI6#R+K_|JuJ6SCtn^L5%Z=*F zh<_03u0@~I;Jc#GxTA1nFRSCBtLlR30dx z8OXL@t>x@f+Y5f#!`sdG9D3Tw^NBn;)&DBdeuL`G zS&Gcje@e!8@u!Z^&X;Q3jt-9(Ho6+Pk3i3fWG2z!$@p}xT9%{FNV?Mw-LuQ^8=k2` z*E9QE)6>_u_Ofez;F~$az?SvCX(0bDE}cRqx8qD}^|#jk>tv}WyRcP1mfhS9(C8_a zcCF6?^oE(oug9v_g2f^{+*z!Xr5E`vN?i_<=HR>xX5~)RAt1~yy_eOQ9EL9`)eL15 zk+YekcILg7zHCkro{Ccq)iqRKy{9@R)1r95rzjJjcwf5vw7ZE`%bvz~x;LU=SwqHU zDPQK5zaF>qdmkLmiq6w&J{s1Cn~AHgo&Qqy=Y_wWQt{8@cbC7v{>)id`+jvbQIvu0 zN@JEkdpfI2&I)O{in$L8@2(}a zmgD9tXxQ$nyT4QCfwU#-hgpl-sg;am&LU@x)&07bn)`hkS}%dm@$@pM1s|+@R?YId zML+Vi@1~ynlY6`C{-3@x5-(plZ6g`kTB$j!Y+5<<%p+KbR z_^)U;*p-s5S%b}dK;|pP7r%Co{4)A0%k2KF{@?!ZzWy)Ydh~z!_iw*v`gc1HT+EWB zmz}CF{UI*SVB`P5er2b2xv%s(_n%;I*BTRKCOG+GlS%hloXhKJs)?koz@=!?M8m&4z_kO`_Ti$^St^gIgpf{qNm(K`?|5NQ`y?C`hyOz>(08} z4(9Qo`5pV)-mjWU)nm~<=X)m~*A8_L#IMY!Hv#WSaP6nnj06@ad%ox6{UkTRVDIa} z*&N(cL7Mo`8(m3Wtush+efD8E|0(l~S^xPisvJtcGsb=jT@uaMT*>5HNMustY?8b6 zQy9I&BBy5`2)_X^S_kXJel1t>b<`T*)e?{|UgN~=rrL6vq1wmgeU50+gqCguK`$O$zg=n#>x3I@O^&gp+{ZNhZ z8@p;fyh)GPjR%qa|1(fAIj^!auqLl5&%ez>&K|zR#q|bLBb=#?SHocXs{VEjdz3kW zTKcYGr06n~n!uK2rMeyKSGAz-L!yuAZlmFS8_tc%UOxzE0xGdvn3v7Py1~CNme~7YV$!_mUyNr zV63Rs8YDOIKGWgVn!Gk4i(}yzFKZ3RTNp28PW)MvxR%zHE9fKCvPCVmabNPJfX!{vdQYnII1O`$sBhgxQ}F$c4_HuK3Z?}HzIviNoqqaWxZ%C zx+R`%e-`Q!^7xqcLZis->1f`AEFD18vxYiL-T%PPi%|XpZD&oi1AT0TdRh1RK3Zm0 zKC7bbefI`wVyH*bieIB?FWk+%;wly;D}<|+TTI@zlC&u#tP?uFtL;mRmf9%!7#Zn8 zeqMlgcHG5se?o4LG*;#Ng&uAA8F`2&e<0kJXg53F#-Kq{G|yW1NUud;`xyT7JfAq& zl{7WBE+gPE`pzY4oe#=sC~yl&%Ly(M&}Vo-!OrA+DM%0XnR6=^vVh6{)I`tKg;z05 zuhJ5JiKA}i{SFDK$lFQ2Mz%nYekZ-mj9^ySv&Zo$t)2#sRrvD`s2aGJSV6wnehQDlpvdgh z&uP+&o=gvUrE4S6;=Asw@yvN(zLackF09V6q(1AnM}TOV7ES`=WhCNOwq*kv|4F%= zo6v)m?*)fw_Z>#D&!Fg+p1lv1wxdW@-rEn%2ek5Z7hK57RkG{mRYUQNa6g83a_V1%6Mw{y1?r1lUqrKK(znb#Ji}gp29643S5e)6fGtPk6z7hL04^m2^n57b^yc+YYDRhW+iOR|+* z$l|oYl~-7bB{bts^y!P1EpcU-S`*Kd6OwlFL*v~f1I;IeACx)3A?TV2{qK>rjPI)| z*{raAnV)T~&$?Re57FHTsPup<8~GRyXe}A6{(|o}fMygZbM99^ZST-xWfZ7FvX`Rk z-?e-XdS>N1@v<+dX}nT@AU8t`ZOcxEX6Vuky^ zMB@vz@DH+lKAe7pj@A8|sP<^oYaqy8jk-8D($mSRHcJij*_+e!Qg@NjS*Vib|CgmdM1GGo~NJeSzdjK*%_M+`!rj$|+^S&75? zr7IQiCM#|kQ6}c{J9@(J_}&xjlYAeEmVebk)@`%e_mSEXmAjvJUK#slq?b1G%Gzml z??GSvFUKH*l^LqejKJ>nS*}a&Wu%w&)rMXfOJ$GY?pW2Vc)kUKdG4gHvi^*YD*Vf< zTmvqz>|Qy|>Sf?4$Laf`!L9VKH<&W!p6YrDW3bCBW6b}2B}dEGXMrF5&r;Rc%t30% zC~lnJ8NDUu^aj>DqrRL&m3^h<$g(mU6P&YK{~Kuje`;q%F;CkX#|IVnd*b0ZzkZJP zIS20h{vGUhaz~!ZBK)zqmpkEl6SgDktI0o;Q;9nicxMKrHwYsc54pFeUr#F+>Snb( zG(8M)K+~w3Yk02nxj+Me5MEP!}nOF-P`Hux$14p@}H<@$o`~cM#w2M{n@t6f_GH%a(3)- z7HSPizhBQVj4j!r?Mqpl%RQgj)Q`cF_>Q@r?aMkPquDw=OH50T9ZGwMh1Ts=dtkf5*Tk?{f;a_rVXVqt? z8WPho7L_s=|5N`K`PGrFT+B9H$CCAAv*(~#E45Yge0sg?Mh^`$cJHHf2UhKDGCxIs zGfH357|z{T;bnY|#wfC!jZMwVQRFUHXVI)cxemn150hL3^>FrPcBZ#~$KgO%Jw&{x z&Zx7B>0^qx5OGy4B=DXi+@>#{RD;2*WwH#g~!#L z{%|D=+5&HrjkTliC8G7xF?D+n8c|VNSlZM*r zN{;z^@gy0mvvyfS8`*0$0e<(9&ojweGO%O>SQ+iJs=1gq*A^D%fG<8}W7ij}JF%`i zLE2plUG%^GJ((;KJo=g&kdS<&e%lzLX zK$J|F$#|XB_^V*F1XViX$J#=^7NJIU7;Q9i%<6qIh4g|~G9+ySNl);sMvJVpk0(jl z!+4zcnAJ* zT*jNM>6M&J@l9S-Q{s7_aW|{}$>fu{mF1*yE^e;?`zX-uP1joCI#a%RD zXhG@vAjo-XS;7B3dS9({q`ANL_V#Q>pP7Y7w(R${m24C}dDPi8TDh=4Ipg95_-(<> zRk%4>xo#{?W3-}E`K)FOUG;zwqX+dwd-pi$y$lVztinq(J2xxzW! z{~;(eR87e{mDRIkVGl(zs_X{tUlrEx8uxPoNLO&??1A)Y!^vaTp0a-YHt7F|2JPuV z<}#B<^H>rz%`=zyJ`qHjPum~FL+He>Y5OcCrlI3ZQhuR&o>x=$D7Mu`vh`#>Zz#Kv ztb-%ele3ymBM~{JrMViqsqtHdg=tHg*0Y!A`9FdLcO(19k>cj?$y{#@lzEl~Sc?`- z{HqD3>^JSJrBR?AsP4>A4Fh}TTXXI}D?NH0H2oI4RmHvZ%dwD8fFLmt)o8(~<|k5P zdr*(ZvqM387f6SY(+2#XCTv($yTb@w+XB~QlF6## zJ*br&LDBc<+*7WW8q~n3X8;*W2Htohi{Q5a&WCy;qmL!v&&uy!wCP4X&nX01XG~tD zE-=j+^s}_*C2h~u=e^-GePW_olhbpxS7Lmsz-c1M_`(wp@ZY~px|72#`j>nO$w=DR zv%|>i9qi))D3u*PnGGAJ|2VPW!o{$BtAJ<;eaeXzhr%uA*Cc)*v*EYmL!zZ~LPl@U zC+47wnzI6tHT~b?@!#N*wTj@ED5vBQ_!wT1(yLl*xHC=S?A7P zmTl;rne>}cAagE>XdZ}yRoIg!!IP{FZInuWhpZ^>GK+T+47})EAyBnJQMNj&deUwbG6+DMn`G=GP=8>Xzwo| zi=<^eCpiOplCD34v;j-Jn9Qx$QhoAryOH)L(smqqPX3FW9`~s`oy-+9gL4z3nI}BC zC+V&Z_x=3OdgITFab0rt3?i+ANL)sYM|vLsvh1v!Ov5sMIg@nfS9u~pMsY*cv>)r4 zv0;9VF+UAi4>Z}mnN!b_+vV46 zNk%t0`#CFy-=)(_i+alZsbv)K(Vp4E`&P1(+(Q|!jt52h$O`7)b91(GLvf7&Z~PiW9N+B za!Nv;Jsy8DKb{Ed)Gj>weZX)Sg1SLk<8CcGP{r& z^3gQcbfC;M6}Hnp~TUessgeA=hv|a#V7q)b*c9P>cryAt^j+NR@o`!g6>y^(bQwv$Mv%w|C_atQ)rKHH@SYULft2nDfiGW z;WN%D`mHCm_JkfbD@aedo1Jgn+2Ujw`6K!yC+1Xb^ikt2XtN9j&!>Gav%t@>x5;*z zJ(L+CW_9DSVs5q{DqW2p7vVuSPi4HFotepLzXZ040soR*+z76WuIA%kBK&SC?#zSP zJtQQ16q83_HtYR6)cz|@+^1A?cx8`JMr(;*t>)?EzKHjktVvmg&HThie!uJAweabK z&r|(sr4MQiroZ9Ncgg-x&tF0ka#~o{I0oa(Ia<9{>65|V3fF3BGiwZqfzBG>Mc%Ol z&0v!=R>zXp_(qxg&uU~&|9lLUG9!^5avcgRN2^xkpex)y!O{4yE7g}hc3F?R4)yNv z>oA!8&GnTioA|w~2{&FQq}ek1mrkdZxZ>sg}rP-y@?dPNUB9v&Bh|1MY8qtQzA zc!@UjL$_;DZzfzile|VgPiE2Ps3j-ZCc5@696AP^!R-v39YS7mYV|<{{8{s=3d+~q z?L@xEYON-SGgpyaN;xa)$271ah%!=bgl5?@oRj62(uBmUz6O>9$wtOJXLvd*BFUmt z7sTHovjcdWISuJ?@|9gr4M5WjeI_a2U-ZIdB>sGO^(^G*A{1W^v!`6U5T%~=KjVq% z+Ui7Z=ewJe+_EPtr*LK0`r+D5gzZ+iW?WJcH23dq!z_z`R$%n!I0K0_+7F3T1me{bwOV%e-$rCdR9TIV#UE=40q&JA-kH77?VM;#2Z<Fi2G8Z;ec2X{X}MCq{wy85cF&{1lioMgT8UJxqyO2Nyg%ID_WS~Ry-oQ~d9hj1e~l%~3}i-1f&8Ar5B!w;#6JvF z-Sk*FReBScW`VaaDOs=eN6{dAS~sg}WdY@4)VfZ)IcH%N>yUjanLSyAV#BqWefj-Z z%3a{g2>Ckmv&rLjvQo9tpru+~0%7)qCzdF)pOduzARBSEHWKGE%+(Dr9jV>e_=9~$ z!#8Mkv^v+jmI$Zp=Rbr^OU{6tcb+|vJ@v;qLm|6aUxUq4I9C~Tt?V5@`f)9LdJTDO))<=l!z_;DcV4L@dp>L_s3Rp0rf<3e}dG~>}; zshuPt>w}wpF2;{#TFhx;8HtW170Iwu&ifoklCwu6Ck!2fzRUPS_kt_>bqq;713ax^ zdMbFc)?H28Q^>-lsPPkWR+SCdL!I@slFZ^+<$nOC*Qj$0IAVP&(uJqN+Z~jPmA@E- z8SRaP%_2`%)vF}$PdEA-J94r5UZwdX)Kiz&`7rukg!YH3u?5T$^{|(J7wgem;6{J& zWW|3o43j(I&!EdVdW(OvVfPEAvX+~Y7b9lE-x;nR#647eV(hkL+3X2L8+|OhTD>ERlhVv?y`&In$-0da@?E86>^not(H$ zVY^IAwb1AhxCNU!X!8OJq#yaK=Q4ko@!f0~U*XxOP~|p|XVm^f9&Yj}ruGP0oNU|9SNEhqcGg>HmP_4!WmSrQ4-m&|09ne)HqTe$T~A!iKghn@^o})+GH%dYFJ^nMExj>fO(ZsL9Sf=hXNcIE|kpH%V|&gZ1SK6I^ue&lH~`ve+g4&XuM zTcT}4GMW)!^8S=NjnDEgYrX#{#(=U{xtCSpuk%l3jFuUItO_?Q$~N@-H2kW7I@hTu z_p@s^!QS7dpZ^&foE6uMH1AaF@4kX7 zk;(tMF8UVCQ|ez|yy8?P-e4AC<%xRP>v(4Z=9`FC`UgDd_ z`M#<6l^Kk(zTFY~<-cSBE@ukL|DnV~?Bx`7lo8(7=~=SBdr$nFi2}*kJI?3u0<+(n zRrv;r9cA2pg>na@{;g`r7(X>OWEbBe?M?m8`%3zLE3RIlhUUsYU6d-&g0*9pDv6e`anf_$=f3Gs4f#?}j8al*oB7yD8DoujIo@KIYKnc`YBKm2wrkvHGfj zG&37{WtF^)I}a`EYA^NVUCtszUQ#X)1(uZB-DcTtc7lhm|L1k^>=!Skt8DLo^Z88_ zsloQO*RN)FvMnikmnA+IM_#5~GufN1wDCxmF5|fP0B5rv$;NsF`_jVG$)B41=+V~; z;G6iy)D=Iv4GXcIjcBFcTusByVD~a=TFL4x)z7Two%CWO7Ald^Yex9ljWdRSml(RS zN_c=lmeVy>;6Z$_&sc)3 zEcH_MX@9;)&ev#4ms_yQbwJkWZ+FiUORUGeURtO zsV%Hy_KOXN=ZC&8Lg{3INRQM;nar>p$rd;8bn*};Vm;@3Wn`ZWA3uXdW40?R?%VLQ zgBB8xo>Nz*sUdOU$#fpO+W_=mYA3s?`+@Evwa-`IJEVVs-mRDRZqyIYC5N5au*GI2 za+*gxk3+ zDtyigNiF?t_HkuDXR=@%u6%NeC0l+ow6CYn_!CaPLTWOCJsWpUK!uD!k5FrJWgJXS z)AuIUB&UEcS6lM$)YQk_h(3p-drmE1jOOdLK8Jk^1+#{@LaWJ2nyitR)3v$UPek}4 zZRHG(?9ti|^2^m5dUoaeoe!q0S)PRs+08kOoX^3t+J!Vl+V9jt8)f6Ec4HSm@V^I0 z2P^vm?Ydvx$(1t-Wn1CdGhmyF^I!Tr2B)7#$rqK%tZwE55;K_<>t-xL_J!Pxf5|yA zTrF8~N`Ic2g6zY6OMNThwMxso+}(*XHq)LV!Ww=3BijkCh_j2<*QKS~ca9^+CJE3af9V|I|9NS>#H zE_)A-WKlDJ5P9s!BJM}l?jSjjgR7qr$Q*QTtoE$2#V^X(Gtsw;;Fog@j{{Yr5i)x) z-&4`2d358YLLw4zUCFgBEMhyfodo7H{MybUCGXlqR`+Ev<(#7h#g*)iNp|#Z!s>Xso z57KzokAf=aEMzZrBt1JUeyIFPZI9BzY~#qbc$+cG3_Qr$N{@jpBmL|e>7C(PKWuoshz&eJ+K8U_=>ldd%G68im63z?gZ#_w}^dU)%|w${DugJpX_!#nvZMC#Op$lAtB2 zOuzbl{~qQku0gHD4Gn}vW_2=WJRVM&MgJ0xH`AC7D4A%JWw8F7bhLp<`lR+CxQg#N zlUDzlpWGPkSE1l|?Hop?p23UtO2^hrr{8DN+g5NKNdgk1bhEbq3!U4utu2jsJA)y% zcWQn$>#WChv6f2>GK*Rv;Wz>FnP|XU^V&5oKz*WNQYH2{>o1ATPpc!*`%&fN zi8RN<>&pDWjF%caC>H9cdiFL?+whv%>hj(&n9m^tTUvthD%dF@zu#JXhNB44^&D?8S6wF?_ z0k~R5_B;aaM2+<$uaVzMWa^Ja-`KX$+w>{xm01Uhp6aPwl=(oNIjMKDcH5ARnVg zS}$=j*;)tg8F=ys6kG)IA9`XUD6>!GdNsd}zKI|?9qnquFZ%>9@%^~(<7iZ7kB%`q zY>oe8XyzT@`~a=igDATkf6JOqMvWD0%L3FL=jsDEu?i0o?VikqS=&D!MBdfUIfb}51uJivF-uq}X8I*tQ_q*P^Ul}{*cjgby^ZscustL%%bhka2D5 z$^SAYJLAuCO!=?hWwy0)Y+BZox_;`Ze}6q){6CQOdK#}PkcUXfCaq)~u&4I27M*9R z0|6Z~*YZRnE`sl%f)@D>H}hTAmvU(#-&yC+ z??Jwgb>)A0?fXC8`OhczQqvG}T?6(0q2I~Ay|(nI9*uv5-c>}AoRgKzwK=mhGk=>{ z+zIHmowg^(@d8$73Q2y1h0Jcqek{pc{a$8?cd^>HtK$K7VxgWZ=RWsfbtb9pcwD-H zE*#EN_z_Fc-Q69mNoQDQ*M${RJ^D)iM^AQSpi;@-HN~Cj>ez*!TltW$y0?ogE^*v=mMfb7BD!PEPla;->zR$8i}d)!2(@oy)v<^NH7fV0`g?7Qp4vSx47=`cBm z1@Er4tjlLL{67BeN$lTJFtv8)Q!URRFNxlMgWb>W*sNQ>gUa{1pS{{Si=jS?opbLd zf$cUHe+?^Z@_YWjr#E~K53@HT`wW-r zP5OfT19h%5!YrdpGG?v~r_5n=gy;I=*GX#n2(*vlW}?=g1@G_RlQG7%>g+;VUP0gN zQwS8*SeNXhPJg$E4CbWGIXHc(nedz(pDZ+wfjQZ-Zc|tGJSUnr`RL!rp+At+sXWO; zaV|Oaa9*2nCk2*PL9v4k7jNoyZ@5^`f|R; zWw?K(`i~+n@eb?ZY%3a6-l>Qp?b(C3mHTx;{m4$TxX;(lN_5TM&*6CgDVmOC3o>8- zG%Cic${fqhC^buO*90v;BEJ)HaVH6>jY{!vGkdU^UC*x6WA)-W3-FgT_BGO-M-e0t0IOqIM&f%h(VhLWvhi)f^^tWIQA^DY#w>pdBS*;kNJ*x`O9!^G{NP1b^S zTFwej_F1I;$W2%8+A!(N@*NKLMcT@WRK}jm@S!D{UrQpp`kxbJX8834ThF%L;U4bRuhVt}qoUz(m3 z%bd~j0GUZ1>CB1r^S%^cYvFiel;Znjee_&%nB3Z(!F3-lgw7+;aTi(nl17iXEv< z3i}k&GZ5B~dA6Q+W)QNYd^z|Ru)oPszMLIA9k09kKZ;k^3x453Cvx$G`^m7Jv3{Gv z&Sm_(n4EqOo{0p@oL9y|D{lDbXly-N|0GFA*@K zNqGl+swl2GQw<{CXG*hj|vKtLu%}o3UD6xe^wNTTGg+(2I;YvOF z8~Ur{)O#A8%CoX6^0>2pGZe*RUlU=lN_|5?K8nw_z2Is)IQ6707wGkpB`+tX{k@nu z$*!cQiR8(3kP`MZ6wD zdlJ9Zq0rFt`JU0wDW18K1oU(LWz@({^$~ip><$03YqQ8jEaMBF9fJZ};PeanoJ?Dp z#|Spb%a;gm zPR`iPPOh=EC#wmuq?yHh0o9Y0VFk(^Q1pe1)RTM+!TDA?a)zg?lDOu|G(eF=QJje` ztBfYPfVwXkf8G0dnFcyL*Z z??sc2qj9kc&0%^0d7Z|VH`fz1W9f2kLn7(VDeUI#LWeJ8nJ2K5!|^xu^ufdAN9e2W zg>diX$@fXyk?_iCEy;DBF;o1S%nv>ap6zHbQ-2-Xw2O>gkFPbr(icn}XlD&A9bvq` z9p4wxrtG~R?z634yEE)38tcTbuE{bc8-5~|YNGcLdh}}&mmKIJ^!GI{?1Y#HruLeJz8c!`Gp6}NNh-c9<8mY{8cUn6MR3y32lS#qTekVFSRA`!jZV~j5W{r=iS}wxw}jM_wV4B72mX8 zuFvm|7*CBQrWv(oJvjTv%gFS`{*|)`S%XeIde)lDdNQtW=*f(a|2MD9g?vNFZxo;Z z`osp@Ojh;J)O-{R)tqH2bDJk>p@QqD(Y)*{&uLV33SO1-2Fcj|O;@tl;tDd5)rFh{ zm8YAtS*QAy=!ayN*-u}c5&9?KSViAIay=vY%m*|jK|goz3%Z?s``P9GEp=8S_u2VB zPRZ~i?}NR|JnzXUnEe?UU+?EvM)$|y=peeCxthGDusO+xcLg|G6mpb&;GuNRv^t=W zhtJ(>Sd>1CX0mX^xjk)3k1p3(hKuQFDnZ1F!oGY=5>s(b&x zUVE^TEzx-?$S1LEv3jqN*LHM$0~>P@IyGlKj$);T>aBT$(u-cS7MoE@y|5 zW9$SHT(z(|+er5*s2dAZoz;3ykA5j#O{~&Xn9ZjB-_t+TReyFO&7kqwyPch~u`mO0 zc00S)uBhQe<&#A+*)?tgZ(IKtv3Awa?ft?MOy{A+HfC?t@vPc57$vs#T)auX>}~pi z>`=@4TuvRW#G0-1d>69uG6*u4(UkSg%tk!k8Mu*ba>?_(5Z>ERrjh5KLy^o}E;9Cd zgGSFU;A;uyYx&3{Sg?~>Q+MJ8endu+^EBg+%&eEQIFGP&caW}u;Lphy_p{k^ z^pHzYyBpmY1hdR;X$XH;KRUqB|$Ts#XCLk@skHfe5}EA5$sfA_ zzv5+0)5G1V2U*Al=JdvP_;P!}*NO!s-PND7x2A)zj@~R$1(_-9;fd_BJr`XUxVsC) zeQz+tyDs(5p*K);5BhKt0DW+fElbu4}7{7KeI}n+`~P!kQmhEVA_S-cjH%9 zROitDS*V(*=Qgw~E1t`EmV0}00F3A2SqoI&=6?2(boX6VeaZBD2^?m*dmPytNc-3G z972ypcz7z?^lkKt@0@XCeRXaGY4(^W7P5h-lFu)rfh}HDS&4Nt^mO%P1}`~?rr_^j zEjJ;RZCLR8*rm*8eL}Ocj`AkxE3;h59e$C%>t>iI$NpS*vetW@GL^OWj^1Lj+E2&x zesuFkAh`*A6;V3zO*z3JS=z3K>kuv91lFALo%2WMf;jo+4?~yD;7q>r8YF2LN~~3? zuQoc8vrpkX%QXbBh}v|bq(5*_** z7V9-7KJ-j)&(6TP?eNLy=z71BDY_C$oJ-T+FXU)_QNFJ+#8V_F7P1bB*g#%BRPT*! z+G5tEiSO*8&pPU=qP3S$`8(+JB&r{8_^?WWQ#1z9w&2MrHSFzq#5!AT^WmAbZSX z4-Z7Qjq1p0W!b%51845Sq3_~JMp}PFi%W5ODk}5=YcH+rhYN|ep6Tf!WIJ&$TVVPd z+*(fZ69d0kO;>1dn(`~vl)V*=@T4_3vm)74iP5OCoTYdXN3sL{JawfXpRI=GBtyf1OKQ79ZM8|qBr=t8Kz0P}4Z|&3_z*75^+CTUAD3uzfajC{`fgmU4BqN! zpS=thT{vHlekf@kqK?~WJPB{FS^X3ald8t?!+^ITyn?Bkawf9tpya&~ZG9 zRz#UZDz#F>d@yZ+V=!8SQs35Q=0>y1eKLv5e19*!#|5lWqVYN_bGkMY*VNPRwne%p=>GRq9&2_HZ@lpNCFe!QKT%D6*>Xcl@8L zH@ga_GO|p}O7`|GBVSjMh0I5`W-Z5nZIF^(@M|lJaR}PJLY8WP$gUg}Zp@lWxAN{XW<6ty8Outcs{5-AJn^F8{=o&p8rM=po zO-#sPEXS{1{U*u`$D3>MBoPIjw7PdO|1pZZXPz$pO`=D3((lZmPD8omOsP%#Taut} zsz3XCR-;DdL^pY2hPz*ouO=Y4%@Z>{xmAnRaD6S_mnXzMiq3}?b@oO1oM(|W_rH;t z%onuL-Wg;p**%k!A-NV-ux-QP(S}y-S6G!qR-Ea2W@mnk&!hM;594TGG(HW-E=GmZ z)pny2ZCpQ#6;5Wuwn}z(C6+!}>AKt4R{HT%|WL`Hgv=BGE+kvRLWdqVi$9ESk5_%l}HAm z^l~>UeH=>N;rBsBFPZ!gng4ztB-wwLK4=E1jwJsC_XmS9+Qn|Gxp7jO(*UU=RwVOl44%HN-!=hy1UXgPj% zPE0Om2mVpioiXXBs8fOUs9`SWcodxI+Pk>+Qy68I63@8%Kd=Aau9VpeulVOvgZ}v|eRiJwR)Nv( z>qn4)Khcw4!|gP^Yevz%@o^9-{t2E8$CJI#<9U$pa`$RFu*g_28S^q@QAR%`bHo_7 zq5_+rHgbCPzAQj;jAZWNLG!e)80RG}KWBL5q`9k6XYYc-*SVM2{mcr4>)+uKx51xq z_aGK=oRSm4k?aSFN61(|vlE#gD05YmkpL-I;+4!3yXSB+qZ6M0SnWUXtFpfSY2Uea z-h~zP=!sou<6l~j#AV*8p_Y?FF*7;YL6GwQ)2oDZHzoe3QswpR(n~CI7rH$O1+$`= zRoUZw56}nJr5U4PTSpIc8QuDaUT&3ASv$YVXGZL!)sXSg7`!{*wa1i9EawcL)9Ghc zfO6_fb|+_yF|^RcH(BHNXy1E!k;k-_Of)&GIXi3Ds5__noJdQu7LXHh@2BUNyPJ%! z4b)yA$CfMGhOJF>(E{3^o#N5*pJ*jx@zL5`NPzJi9ohG}*}&qcxRAc$A} zzS{0!G3T&XnS;JXiOwupTHwlQTN!1g$H+e5i`e_N=rkDYP25e^!0f3i@{-;7+gxv^hZ{?ZPT+4otImz^=;O&jaL>%~wInHg%n6&3MTne|i4|Hs&y!1-9V4gW}cstJ`X;*KoEG)9(0HE6LUV_z!DFfy_i4O2>X zb&D7Vp~29|zD#J*kY%K?WJ}39mP(eapZEK{uI7K=&-1_E=XpQ#`OW<1y3Xa;&+|Oa z<2+7JG(G^YjVNqRFW!*57Ff?fg|m2f{)v2S3f>)~Zs-U4iI&Y{F%o)Dpp+|pGY!oS zR)L=2cFpT7Cj;0vBE16xPPr-?j2zWsL`(yV_KZ@FUV3t>0mB?-eBLVLF1qXPNzmdd zb8hcXG8e-z^uqg+j-mmcMK$uL7IHrpnzW&`8RZ&4r35Hv=E6eE61G5Nv--9J!Zp-+ zGSJ8FaH3qWzIhDnT=O*o&eOo*&bm4FQt53b`n&}_?;hyX!eBnRlK;zK2Ob9_Glax~ zhw<9x8|(n40~mpi)1Rk<7r?h0X>kO!&>yH?rSUN$r5dz7hFo}-#Ow%ZQ2lG_EQWg< z`0iaf?U4y%iJt=kar15fb^0=NtwtL|kjj6r9*dZ z&6~)oT*wP0+W}=k`kI0S3X40?uH*ai(-ay<(@F=lY!J}u16T>=63~fzp{-}UpXHe= z<5)P_6zrpr_o`6D-5X>!5XJlWfbFv>b|! zU}n`9YU)4f2e+O3zryTh3!FMm|6AZ=ANbG$T~J%EP}i(u8>wafxY9gxO=2dkcIct! zvpuu?CgbIIXtg`~?hnQD!pM%I zP_Zrbv}#SEW)jk|5ZH{p_0+&@emp7L4k*SU*PhDoe2`f#I)j@OZbOYnX)BWQcd~wX z14!B-zbqt`<{Rpj2(&x^P2Q;-v3cp`!0bz ztfAy_N<5AQ*p2oXvsIfuy+w5$Qgs6RUlOUD1hjhWqLA=pM)b{4rY$tq+ctw<5-E|! zEXJ&P`nAnB-w_CncRP$F*b8^|f>%0{dk{R{1|qG6<6<%`#GzR|k))|m$q0fJo*n^a z&sC*iAKXKjfQCmRaq)1kJ(kD#AMc9Zi|kIKyk3}!K-nmeDAylm#T^7R&3V%uThs&2 zSE0=o^w%hmeRtorysePgZ_(4?Q1VWwYz_%;^E67JDl?lj`Z`4~A2Y@pljtgY0MfM( z`uqdF>nDm}l%7kk<$EqIGmIyZ9>)sGr7RRRi#@~VT8HWxfPur*qK1$wX)J!r>&o?om9 zZWHO%TYZgZ8jehP)7ep;cnVyW=<77n(e8#9o)R>!w4C3LNrBU9zu4vFq0|G z$nYYNe@Myi!1qwV)jN^VwQ$D0fm-x%5l)(G_ZnmOZXlb*`*A?B8LaeOO@n@ODDg7X z+)Nvu8Pt1S2>i5!p0)C9Vqf4$qn-3%rRUkSn&4qw8PY0l4NlglMSP8O-v)vnaH}o5hTG%0%<;MaGS6 zGI!2y?43LJuGIDLtiUeR17dRpR6!ae@Rfdq{#QdY??D15f!p2Ce<3(M1P=4)nYTSnt;TrLBr`e3ZH}dj@|%9<3=NBxi3%@hWnq> zitzyLpyJ>Fg1fOa4dM8Bpd88!X9}~O5m0C*bl(E+mV#SH+P+FVQr+mNf6{+ZsOJrT zdMUp}8>c{Tvn-c|i{9XL3}~l-MPaO0dA_N^9f5i`RJcN!2qdEw7`B1}-uLth)UOHc zoY%YowC0@HO05F$=QNPSGRvxo9xbKarL<)JscOtrjZZcE>D-`wZRB#ss!>qHsF8wb z;G5Lk0|ktLtcnHjOr4(hbI3q6RyvYd(q5o2R;DYwFhgu9%B2Rj&RN+Mc$N>&&ZWdD zo|^T{EXh+Sd6Mzd{2!U{_-*WcxaRO|Rwrm?tnewYi-iAOux-7WEtyg4b!ae++9k1% z-nO?9T|A6dx%2iHlz5wG-ZF4&P+}=m&Ve#UZ?t0lxd1KqOuKO!JHgvC+>I!|6^<+d ztFrurD>_d-%t7*9ujfX$PyhSfQ2N&E5T0qT$~X7)y%)oqAfCXAf8PVy};68kgnZw^+Yb)e)u;No3=X>iE(dJ?}!@V_;~pIkV?|E32|!~AxZGOJ<| z_GlTcnql9#2mKeWzw7<|w{8HF*%iM+&dft#3`7!gc9P!@Gk2^ECSlx%y@c5gO7YDV zeiDDfpZ|2uA$*e?jgnhZIwkS>na^$i)&lQ?HG1gNaLw;26iZeKzWaap<9${Cz~03o z(NBWMA>{84sQVaNcLe>nR4I7qoeXuc+V1K$g!|tIZ@OYb^-L5*%5Frengo4(2i`Xj zw_(OxqaUsX^!b2s=?>Pfx&DTv;G5AKxgHyDJD84FXF9d&!lUq1i@pX=HpsIIz~S8l zW?0bsa|`y|zeZ+!$zN{~`1ST_8b9{l~+|GCe@zj@)6Gqe}5<(r|^87Q5m4sY%VS{RQ#y##i)7LVnTM)R>(MxoV4w#aO#q7KXqTT-t*ZEAN)V{dPu&5yB`#-5n>Tt7r6?-H>ryXa*N$U0PY*vb-*rda8;_!)^eiA-%q+zC>2C0{1pgJLZSzlg!=F)?g^`DT@GTfnjt{258_b%GJLCa`}?jlRL?xdZx-KoSb`z8&MfcLKPF)R|eUevEATzX)8b z;C*Xswi%zzMs^H7AAle4Q%}!%CQ^AH{dv#KAwI3pi*&d?mDYu(J+Rk@PFJzD??cT4 z)K7+9#wqH<_g?#P^zVtA!?Zh(zPzXI8sqi?c<4Qe-rJDPY`_yV=B+S~o!KN}X>}EN zxUQeTc;bBv=9t&_<-HBgqnp7UGpcW)rS({CV#AXHc-hgCUXaU?tRgcA57#riw80M-^W{&U`I6LnsgUpx}+_}V3 zT4@XPwJ7D8)CRzNH|;tbGS0dnGXrnwJ&)9ziPU=!S}`y9hIvX3BWDJhtHK%gAyZjXOvTIO zInRo~X$(O&@Na}#^MR`tv={=E-iS2`3ZLXhPf}G{6x+tggm+pvch3aF?r^3mR4vVj zC{{7SZjU(!Hp5pVxb~r^Q-H;+DDJXWr*222%RJDh!M`_H9fyl^(Vp{^cgE-}y$n)V znDY8t=Fpy!Hy^&u;Qu(*_V^nz|;4@yJF8XYAfO*9{1@Zv2n@=0=d~QXX z-{r6ISQ`QyE>QY?XzPh7SF^t}q93J)C`PVt_&b6Yv}~R>@$8K2xIdWj?1D?~^~;sz z^pVEX0?32w(-z>cg4x1-=vg1eh~Ze#sg|nvplxsJyasPxhd1VvFw0w2+AwBJsrnX}roxwX%nz#4Vr#6P@eJL7Wg`?R0*1@L zWIfbWQ@ri*DkIByAkU=L+o}B{RJ=k>DKH3nX3(cOSNeltA+$IXUb%;6?5>&IqM_;$ zdi#~y32@bWbMAuI#%5o^ns$W>g@Dc#Y9VC(VS1RysNlUEuE1lUZhfew1Qvi_u1d}8 zp#R6chMx>@HdS-0R z6G(t@*lO$3$iQGUxji)T%*0bjn$f9dNicF?3Q&2IjAt4z(bgE=M^g7o@NwieW8}R^ z-vMxrL+TRwT^}tt2i>xfqLW}>9Y{P=m<1NzYj4JhsX$hm|J$*`eGp2hA$@7zoQC^> z|9)htBlf5iQsEB!Smu%&v2Z=1#XfrU4%4TR#R6Ciee(0c;vdk^b*i}@&C@awUUWlG z8$(5JJ#bv93Ag0=`d|z-WAb43fl7m87HyA$8lCw&18r;uF^jS4E=ELe>iin%^d6Bnz+@3~;)ZZk&xQ9K&j63pz`KX`iXww2pgNS8KGfAGu5)g*Y-gE{pR*qVLN*oY2egtcRG(Q$Sj zvf|yZM)-C>u8e%tmy`oEVGidCK>Rp->Ir9iQm!ejwFG)cedl}LF=#&b$AGagR<|AU z)E}B;qWdpm!#p)ropyI1<5Q8c_<+x=C}%9CS~U?&+wk8xU|tHxGT@T&DVqcSevTzc zr;o<;wI7MB2&ejidpTNv0-TpXMdQyxZ$T_H?1J`XP{ufcYEUA9XRkrAWY(0oA<^F> zB`*gfjk(gA^ND8eKb*DDaxB6kXyo}pSNi57T@%=|-jr9DtD+6gE;a$@Gtl3R*6xfL zzxFeuPHpCaJDCwWhcmP7!{8kWUh}E525Rd`&?dK|=0jjy9iAkD?Tg^m3oPn@X-8_D zz-He<|K@kpmvfwz*C6al6D)5SQQ@3(HahBwLgOjStoaCl(3T#;$2Sr9X_{X~@2FjQmhr-&0bsMlZ$q z`XMdoO}HMM-VNHjjrXoXO9lUhedYdkMeMrT$p7$LIV_BK7ygVk-HIK}3-3$u?&;wD zoZz=5pAMe%1m?WKdic=a?dyFo#vt^I7$Hy=t5}J*w*+{(LVpmR8*vcs^_w@M?%(bH zyYIqY{5J0&;Q z<6_JI`YFZt>499|5iW>;o5Hq_VWVsnB(Z0dG`)s3reGd1+fDMu$#NE zg?+FB`rcPSm$B5Fjpkp&o){M%j}GdeaFnyXFR-7kTaWTQ7LD|DdVOkpuks$WB!`*C zO!}{lj!a{GEJ2B>wCz6XF2;d4?27BzeYEbb(m9@c6L&hJ^ik~bKzcKe^lgmiV<@p7 zSl@&5ZGox{eS3ziIrhzn`@VcD$?VB^;KOKQHrAv)&^6^xD*uuU*Y5cvh#h!@k z0!&X}f#+eLogcc79)+yd3ufc4+?{0^NpI%qx~mG!Z=};9W&S`pe>h}t$Vw1E#8`c;6*ShL<{?Y z>l`#25%AQ=IwRZMzjCLuAaERp?=9i3=Yu)}#bRokMKKGye?*I`v4_S%?gGjU(AKqf z7o_SEFq_SCHGJL$tu2$nXf+dyd4iF^8Cwo*&Sm6m3>A{G^#d4Dsxqe=#K_^UK}*_m zO%;XI90S%y!1)j~UP6lr;F<|+&gPx~gYfji7_crunf}nRA#lz_9*uc614DYijhR88 z-WrmPB(Fy9%_h1ZIqC_=>R=yw0fo^t-$IMVluN*Zq`;jfLCc;2mbfGM-(SoT%uhK2ttbtJ5|HCr*q88(zvuav1~@~1$w0RS?f2feT|D0e4wtaGGr-Edcz5X{XmJ9X?cGz~p)bw`^f%_v ztJxU_(pE|O+8fZ|L-6R!)00rN4()ntUQ49oC&s}0kn3zDCmy=Ap>0?F_rW2f6?Q@S zSX%D{-e+m=3Hnc_?b5WCg+{jrgEqjHNbd>YJpmYNQTiMbqlTBD<-O=sI`(NEtrex8 z1gKh%|0Xgs%3>67zBU7>e+k+N?~e1V`h#FM04g4#PCvBmGWB9vNv+{|r{`{CM0ZX) z40t**rWosGi{AwPzi3+R$UI7}je;hwP7a`L-n3{O=tOLm>%0PtVBWC!6A)ITtrW)e zmI2?5c-PD7P3fSWY z1e_{{rP_h*(X-*La95zY_Hr{VM}cESU>k@%$YkDl9GbPqGESrJi$Ga}{?ou~3bTL% zlsV7)=O__PfBJW_;mlHcGs8;`KTDC2rf_HvD=*Jv&*aC5GtX`1@XmFK5!*G9^kii6 zAU*749n}FD8y-k@Qz+9P+E>Sp6$Rr0%tVcC>qZOj(Y_}+KV#izP7H6_%?zMv0zVv& zGLh?3$YEJ%G!aW{Ab496TBma5WO>yzj>~oOeUCLw8$) zpSL~irT@;sDA$9Le-&d$dtlC{pAx)tcVj7a9>+pC!}31AlRz?`k;phwJ!-YEm(S8h zSNivc&TOD<%37febV|V*PllJSXw2Ig0giQ;oee{t%OMNJ;jms%y}^CqL`~XBhK{G< zwI}LQq1DImLC=geo@Pec3fz|M%7(G@5;jE&d@>WDTica_{WYPvF&X!u z584aup_v4FLm%(eFs3FA43qh6hT>)%(KDpK=P&HGcX7H~pnkta+f|^vHwqXr zlqNlzNzi6v;Nfz&pcS^}80F1|Jsw(`W2G5o%t!G$J>&%kvrKLZG{GE}2PvCL+eP6- zBT8lPMnA8Ta05JllM$v8{4i_lRG{!AV+t14dp(7q4Y--R=UF6wJ3VivHKR;E;%{E$ z>{Fda zGQ0~s1~7IRyJxQQKd=VwxtUkWJmJpH#?Z<&bg2-OJ%D_fPdbIaiAaxeq?zzw41IY& z%SB+hj`qty+mmR3dubieG~v?ovzs!9uoy-`763VeA#poYg0G8evn~el`7wkkN_yp}$VnlBnXp7l?B7pvPq^LI<>aC!T$F=Bl z2^{(iN*aeX99=T%HythO318d`^6b(W`qf|fBDB>T8;Q(~0)`skulI5u{g;H7O?f{8 znwi;H?{g;bC1GPcIi=6Tdz-Y1&!APV!FmPy)06iZNYVl7R10)S@BaJn)QFNqYCZ#{ zdLXqwA~&z|bRD#gq&06}b4^x*(%sRf@EqEb0H%M!5%b0@2Xk}2G=-AKf;X-Bb{1)L z_NPzy3v{G1<-)gNT;hyiX0ZSa%-(qsn(c*t>ZTb&H!uzv#}fl*&jEW&?CAm8^W2Z8 zWcEUzD%AOvHV?qvUChMvZJ5R6Go)_#Ka=<`AFakBgE5RfcO$c}2YS(r`M~ek znt#9V&>JcBl=w;dzpSBMS=+llq*CxUEH7mF1CP&3!M6?2upAQp4)Fhs#QX#OEdrS0ni3zG-THt_5d zBvwC`Z_H7V#NY78$PP~k81w<#RKEe(q{ zjsBNH%s0Q}TX>$z{2b?lH(`DgWydo}%F}dcGo7}lBPFFMg1k> z3UAn`3pAH#-`hUDrPF?uh%my*dY(VZ&DZL?dVE4pQf^k&u+G1J3w_v9h{wO){MT>p zIuC))UD4kh?A|&0N=4>e7aH>&i@a~f{&;(}t8Q&nbJiVa;mZW9)%ZZCJ28KG3yrJ? z_2*zQk5P9gTIdY3KPCP`?jD2g-Y*f2zMsa@n)k6b@>vrdKfpL~7K>~2bTnnE@N6eN zC&I&L&~JBxy$@?Edglsw0am;kG>gaLZbt@MQQ8|6&(nk1?BcOuje+IYV7`@zG@S#c z)!2&W*t;pT*piWNCjBi#qt+v9u98l`?bEdWJeK1saOp`%a?Kzdsu?e!1y}Q z>i05|p*M6t1D@Wdcn=s(L1vAji>5En$GG1c2c83f{1Vc&5Qw~s&g}5rXwSLaQS{{# zbl*LR9&jNRyp6Lw63o>~L(@00=$=~G&a*iBTY$!#r(|NlKW}he3S#xANDWFwZAk#RjC&GCL`Z=r2K^ZOr`oQgaXQ-T7X|lR~s$UW#<^ z=|tPt)4TZ<;{re7Ij9{4jswvfJutlk?Whg@?=Xkf820X)7w!VP{b-zfcEy4@=34kw0jSHsy`IRi)L8<2ckr|lHVGd7fGs%%Evf}Gj8r&kZ2nlN_9<<6+stX6 zea5WLeSfp@z5%BiL;ucL&2Q;Hh4-HAnS{)DMH7a@;RTF`CxECQJdH&<#=|YwI1f?Y z+g2xmoA+s)fNTAbjNM4tr+oVpDtBfcR~6})4D|1!&1U*Ig)E%Iu6Wvi5tP0JmTl42 z6m-FCgRWbeK-JH|dM*4P#WU|BG+xR*yYFblmF{BZ1ChX_k8TMR-wo}?u!`sjB*nm1 z`P++T*5a@0JEb573AIi$WZ@t(upNBT=qDMvrXbxF18l4_9;s-|lRtr@8nyLKj06AE zU}}_|zEh)j^>d6wPt1Zj99W;F4bN1317)A1uXb1keMCKx_}bvz4@v2Z_AcaqqmLhf z-z}hzbIdN3TL*1F1I|WpU2TQ`8ij#90%;OAPgWgAuWKWN z%YZ>S^Rz^1uqw*TWh*Uz5$K4kjAgX1%{D)K16uK$r$8#g32mG=v@3H>v5tjk%Uyii zd6E{qxw0aC>&X$?KjE4ly7`Q=i=k5`IHh%P{ac-PYF|%a)W_y(w--|93dnq&o^L1w zO`f9Gc_g6}^w*0r2fOQNaF7|r$Iv_6D>e7@W^CIQq^lD(Jl&m+Eip3Q_`Bn1SbJ#K z4T(+z15Y8iD)sj=U@#|unN6~2(LGgf4I9q)o#3FS%xtDgw)YmAw{SoD^&_P{gK;XL zX+i1_gC0%zG579BN_pdO4sFck&2E0oP&^s@Je}%EC!>{KMhDE1lz>b*a=G7f1}aVs zMjPXtQsK@ZxVs6O77IqK(@e9wg+o2X-)VPVR6KVSkhaA%`9)Gq^6 zk%4zC8M=?94SfVPz-m2jqruo{3~$oR298Bwyc9d8H^G<+&oXJV`-0VR@QH!$^TEn9 zxwk=q@Lb6I=IP%U@H)Nc3~K6ij_cO zf5R9`N^!j$MmaTxAKsIhj@(T|7R*=}?qqj`4)f6WDR5N_q0hB5wr>cdq<7@Vl|-y$ zDt&sw=?{1v0rkoP%>W>Nk@Du?oq?r)7yjzy+rxixl-BEN1n;lVu|5>^bhLRhqoAUB zHN5e&88k@Y{}|xd#kUrGzJjicc`}ij?}6P(pxzFzj`Q7!ymbCELsbpr%>1EdZE@9S z7M0JjcV>8K35I&fv!FwBxO5xbE5>X*iYJq?QTqS4(ZhP)bznsA99SXG!R0W2HgY8n zOCAT!^w};(W6if@MDk3^ECCYl=oF(YH2DP9&FqT$p^V9Pt@kF>d;w{3)HZAHE5LAw zca?yB7QK7>>rYVeLGac}tViPaBOm6psX(uhtW{INVjVb*pqG<;-T{(ntX{mOF?=u+Pa@OXY@fPIzM<`dH)pI*~F|My1`d6Cw-q&f&svgN7;s1Oz z_5vI!Na=aV;TC9RtXeG8-c66@nA2w!MUTc*c{5!@o}24RZ$(~Y!h7AKgIUXW&@hTV zhM)yS;A$lFQ!_@f%2~m@$T&p%-UuH0R=s)DoWRqOPW`@)aNa>O9QiUgYBpGO;%PEi zR0Xb`^k<}CEY$O~-m|=kroQ8x=geCm7oJV5!vE&)>I%;Kyb98Wx2YS8W>i*9^vd&H zU6Bv(4Ech!Ml@E~GjXX{oD5oWFV=a8-tP54*$)~o0In`jJsN85WvyKt>--z-CxB-P zmhBkyOQv;K1)kdX2CK{HmHDJSU-%|^?=J8v`nZH9Ee2-ulHP<(?~j!>i)(mWX(W7I zOUdy--kkEEp@Wt9tET-#tM@Ppc>=E$KWRMe4p!CZ

gYFZ?bEOoy2zoq$$m4j+$} z90tZ-^l6?V$JWu{Z3d=T_z@4@`sRND%OY5O&#&*GXKyRW!I~R&w~c4cUQa{C$KkPi z_r^ju=b3Y#^Yq>UD7PRp-s^rZy_!}25br$U6$xKvQmPo#?+SKW=i~G_g;kn7G>h65 z#>6XFGSBzk&3q<{)!9~RnX}B?3)|5hPieWve41~rPJ5%LdBD3FWvc^O4BxslyNpAw zf97{Heo|PWhfyd)_?{QL^*wbOB8#u^H;j8R9>ViuKO+rh&NR9rJWurRYvA0Br>?N; z2CHr}X_!T015oQZ2+t>_@z--I_ru-$_`8PpPotan2cCzE^jVJYVXoGHqT^Y}jAhL} zZBF6Ayc+-(Z}HbmV|n1(lZ+>3=ZMGF7+>@n6#9yGy-(G+&U{dGI%VF2+wbwbICi`) zwcn-{;VOw$(Myw`njgZUH<3nh3vW%$3!OgTzsLCKfADP3T-x`U?`j}xb>W>J34Jgz zaMiP-&m)bN(aYe?Ek>;51-tJ2Z|u+A=#J4f7x{$IES}B!2X6{d{sW-ai=p>LZ&7&X zsJUk>-3%GBT$ou#ABve~OE7kBz^fac2Fr`ZNdng}0?8HX_slJ;1})yt@A14jgf@#$ zCu;x7jQe$-?cr00XV+mHi{r=q5{Z>&=3n^`y}lu!UI9uOb@MzHArCz21+Vj29I!5bwSIcxtN&i@S z)4O#R%Mr?&-XtYb=?pXOh7#J8_e}$;hh7}_5~QC$Qc|jiln;N0Ps8{B{kMLk|LLa) zmd3SC6r*hfmdz6cW<|V))!c!ld=Bb2=Y2Ht;r^OCRWCAHnU^`8Sw$!8$r5ZtvtY!y z9(*?Q%xwM3dE-g*#aJ3|n;V5Ln#toA-h9eu7&gi|Ku;v8IeLDQ_S!OES{-OSt0zYE zRAzxy;Qk=&)2C4R6Ew%W6bB;(-_eUF>cd*?_r4(iv&A(vge_>6Cd0nVXBHH`CM{dNH5f!)Wvv zEbK5=(^au~8?klnhI^+&cS?-ny=&jA*hO=Rc#D~*7gL!XY@q!FYQ!=7ca9nl?>jL% z?uHhT(BctjW5nw+=S` zTuHQ}=4i^g@3My)1^F>bU?Vi12Stt`5ynh!hU0qkl%c|CoH@vhe%%KLdZ71}v0Pi> zM;0TCClfpuGy?hS2@D6(g0t|~`xjHNO?`o|DX{c_!jIC|i_|M0!1N^4&jdf`x&@%b zL0Z*=HwPGN1#^hXP{Uj~8PLl(BXiq0Lb`7;03BTbE^B{ zjD0xExZMdIX$F?&D)8L>b$px5%5ykxhk{=T_-Z7Cc{}@okGU7B0{Jjn_z6pO2szln z4A`?QUFq!w=4U@6t!9PIrbci2Urt+B;ha7{Pl1<#>jj{B=#NZAo+iMd{J`}VrOq&O zstpda;YAvwX*BiHk)vZ!$an+y()1viGtvCUt_Zs!CtvVxJ-lBHyn5O^|In4vGno_l zOwZ2`-;7jwcE>Z! zW?^!6dk`Dj9Vm<$b6>JI@a_l4>w&`gQ53pqe%^YtxQ6$y(2Bm^hiN6u%8{3+9l^oO zQ+f-^!IOr8tQzTIqr=#2T6dLiQv z!R;TQTYpNP=3PggZDK{Cg*9hNJ=)RYxo&+Ao%H6~*ZFJaqG%{S0z5X-zCMs^(8_Z> z_36c#rrwryP;L*HM8VBS_*wyM)qN>%)I@6}G94J)MV$xaufs{{uCJp!b)p%c^xm0+ z^C~^*rJO9c%sCQ6|7OQ-hBg;u z44=su?!IAzAYNcIP)2~+HlDfTI}@wpS+)MO;!h1Iy@UCMXEb&Jr&jF~`rAWkZ_ru< zZA*f8JwEf`gHayWumIXsOAVx_teKrP?6ZK}z2F7(e1R6S>2DX>un(HQflOzCWf{s|3ZU`S%LizC8ocj^EiyuOAuSxF zRlT-PLrEh@8=>dvw4sl_D|$2<{q2UN!~uDA=ywi1??>xtv|ItK%w9PX=`hCNDi)<1 zFsA|iXl6pO*bB!1&t3Pi6(HF={Z${7W=xI7! ziGX^|c$W{DM$?~nJ`aLo^^uou)R>Q4E}(rs>Oxk6#;JOK&p6~Oz?cnQG2m4he&#@hv9#uV$}%%) zDTf}-Ow|C%J&xrW1-JIYxq-BAq_&ZJ-sIvwl$k`b>A_JXnwIoyZ$dst18sFU=FM3X z>G>~cc9L(Q3>JkpKe7ILiIVOj6=!6N0Xk1$I0tKu)J;To<%oGZ_R+p)pC(XC3#Io& z9c#*LECLD}LoI!-=gEE4Y(+oo0t%%-)BMP{F?7nbUg6WwI0~9{hCX*;DdM5w>sW}P zKpY1Qo?QC?c)dx^({vk=Nd5Uk=*hd+M^Snqkp2ObBY5Us{MSg5Q9fC;Hx4^c9}nBM zfCA}2^J_3`x|N=dLtKpY)Rv5(&qU^JpH9dRKKaVauKt+kI~j5VA>DLdFR=Ba9c^us_^?&B-opg z%bXPT{+vA&Mhf-G zBm{c8i`oV0B>_E@qfLNp9xZf3mL1QB^EnJP#6OXekH9r=^+^L_V_#p#ZaFXT{R7nW zhLF?LH;&em17<6XMpsG$QzNj;rmtTDIZA=6-UsY%o|e~a{tMxbF_8-?m=~(n4>javDsHdTk1n-f6Up64zmo76&x-1Zs6C zvy4`@^4yF8B^iD8Fdy)2@nkGqnDw|M{O}IHLDZbccR$9&4aK%qh8I1ktG(!th8k$XuS9 zVRXpvgX!TNc0x+3FKA0W5HZ6sHT?dVt0jGY6%E-brzJG$Q>Dl@MPUmAC;k`p& z(TZ7eK4AoVkoAT6vl`LLXxhC8&gvEV2e`Wu$j6Aj4yrZ`py`DK=+QX@%y%KB`mV-d zxsv{dAPH@VMIUZpEYVZ;6%gb@4s)Ys%u`z&A6ag6mOdBH#T5$dZXRUjds;Tr&fUNx zj?yH&F<2a9p`pL^LYcirs+#dQ4}FN;B)*xiSBi$QLEbO*JyLxum>L76f6coMyR+Xq z1kNYJ(Y9rO|q*M(ot)2cCP;_6R$-;%zf z7kDqd&8Vbz%D-V$moa8WUcJkcn}FDeuh;4A^MJ~s&q|4q2gZBN0N}8LhAsWqpX;HVC%-!~W*LRr63bvb#9-`c z4l;NO9Wzq;4@SWT(6KLbt1Vctp>R-d!3${Vv&izdXzeKEt0--GN614wX@ypMcW+6s zI}EQpcWHLhSy(bp#TwyUAG#bw|MS9uG0@t(W_BUF4dC$r_~0tpUDZqU;x49<)wg1+ z%x#r}cgs_5E$GKp;{xU`?p7Mty%ec91J{j}xCBIf8Ev#Y9k94D^gE2!_XFkJzn@kB zr?ODMh>(%k&;&Hn-On^I*av22VYq@-E5_(2HqP42g6Pa33k<%XW%Hxi-;7{X){k{N z)R_UEX6MqUR2uC2GK=1Y6kiU;KQm!I%hRd!aXe@-5=inQy;bRRQ!saF2sFDGIr?H{ z;-S}4sPHS4OXK?jG-EI$bumUE^V5w4q7K+}_f`%vYZw46(ixLVux2TTJh}IGnx0PZ z`zjRwCw$WXW8C#2W@+xwnb9iQTJIwrG zJhjb5(1SL|FxtfN)Vrcef|cunsbx$`o?0JOK~MnZ>D4g#_fT8oLm2ZZ*>vE4vAUoN#=G?Vf)TD+ULW3kG+;M%j$#1jJ(k-dNNzAC)B zfG(r~3_3&_D34bQnVVbW?^9mBH3E7vEb_o3hz~JLGLG#`3J#6FJT8Hq9JhSfi~u~ znhl+fu&!8(1@t~Oy&5g)!5M-pd*fZ+he`vH+I?W{dFU-zJTt&fp(XFYJ3za8z|PsM z{?7|wf1VlkowRoCxo;WK6R{QZ_3WhUqY+pQvsP?CgS|C<3^*)f_FyFEtJvss zJV`~u)38nYq0F>mZXKn#G+OsCFpT5>pTO0O17nbVJqF&E6|NU&@GX;BuaRSsaQ9(a zJw;DX({6d%+=RV7$N#Zt*F^ek0aWg*-i#eL)+W3~%u|=1wQB)|Jn8BwDCdtwpieEN zZ74YY7)ZWcE6x*VuuUkT<<`@akJ;olU@~*?2h4X5L&;|JucyoyW$;NdaKv*Y&9)q0-JY$_ouZT$dh@U=VQb7K^r~U z_oMOpgQDPU2K3mB+`UG>cffFBqD=|#-_yhG;p-?M{*u4e9Drq54s83; zcW-p7gT?t1_-=>d{b|=5I~t=^)xqg9ly(ffM9W{Z8t6f5sYp*YS{p%)fwWYXcb-x) zhQN$mW??a2X)io7+TjwAE6<5wGze`E zN@$*tjbiQbEHY@GeWOXthaBb|HF`P?UXMqg_JUy{jDrW`8K^Uu{x1~nFRkLjU0-rj=NpE54OSu)a&=W|?GTLoM2aFXMjO7jY*REsc zrmj|li`{8g-_k;8X4kg44u>~ze zBXe3(HLfi3oB>rofHOUyWHi4Wi?t&|p`d#d%F{p~hz(?}03&2PvaC%g3e!;c(E*IBzks_k|kH5cCw+M#{VetSaT4pKnD*TGGGs33t_6 zFmfd#2aiD4Y*xUVu|X38y{Hb1p2FQssa3$;3VVDR{N=1I)_|tf;8s6cTn>Q@r7-Wbw?xgLjQ(%e|MSqu zO=zpWb@j3p)YD(L1FAjAdd&5*x9N33nwv1YmVdqIhE^qIRFN=C-h(XbX+ zlUB5_2EFh!p{GrX((cQ=EyG(gM$SM_F2GsOESIL9IdXPTb_8!+&Bdbw`ddfw&VBgt zP%Dc5D^h1aH0%ad;?ahF@XK7E=JYTtn>P%2Q}*w0(39xKna;)zL}FK5%dEg|7^5BD zN*B(bHe($;8Q%?i=L)Ml@C^=1A4MlGus%GBym&HXA26!xHTiy??|b1|J=)Zl>R9{* z*jJ=q?W*VBMW(s(0U>^dIPPUhA#d1 zuQ;uxK|P}!^%MTr75D$ke=`DaLJ~D^3TnTJCRpz-dbkB1_T~F^^zOHn6znH~ZKvXiP;$C4CYv@%s$z<%iy$Un|XwOaEB~EP&nx&+y*@zN`3dEY0VX zc!#$o`R?sS`S~v|I+>3iKBu1W=r>B@O+KiQ1$E9~wSJ(z>zVE8^STwArPsn7PrsvI zb7*Zh^NPvzl^N z@O0o4;8g>v=-qpd@7GiMSl|KEBd9+~pQG}q$I7^<{BXD~5*Wr>36baRawDef@n1jx z`RTvj=fPV2_;2MyKK=Xq&{J{*yH^I-%?!K-i}@{<)%=R)7p#sfuEruX0P^p#0a=te zj+J=}Yw!ghPojLmc(5Jc!Y9n>w?j4UVpd?khJm%W=M|@ZK4z_Y_uVJbk{FS+8>?Z2 zdn^{+Ok?`%*J3Zd|8YGwJdL+&;HvQ*W`NI&6!oRYM%Zw3m@KEPyBqE_8#iGbmUH+^ zNJIzfH=&ffzWcEF-bMQ$w()V=-iDs646IBqq)!i%H&9h#EL+P6dX4e@N1$=P_uBx9 zsnjbB$IN+K9oQ>EtGU4T1$J{MQaqIZ%>QI|+BjzK?t8a^qRGIU2EN9LTVoKV#?WSA z;EBfW8BH5c4Kq$2q}TS09A=5~M#K};^)!Sdb|=QZ>sTXu`_QkA#g&EO{ zGhW0phq;E7b%tN=@_449Kl1b~oSsUbtI(v=tntiO>b}excz*%;>47d9tL%R4N${!; zl^y}^2hk`!4~u}-F=j0DZ|9n3JMo@i;}|^KV6IyIJr6^X6MVDupkPjR7;ao;q)BG1 z+z(#Iv7M8cRhNL0<)N1OXub9G257b!T3v>=neg}|@?st$=S<$l@>swReT?&f&V1wV z0qtUD48zb1N4&C2F%k|| zWuE8#pA(?5G02@L?-`~cwB!u}r>JB0)H>jI3Mg7*LFZ#1oiB&^&dO7NKrq_pAb&qW zMoQJd8$Yr1{}DZ>AQ#=BtnIFnhQ8S?cD>Mc8o%y_f*0OVo~wbugK zEdD;ueBd0^)}Pc8>*H>v5lubmy%`uM((gyW=&JD%o_p(@r;pnNJsWjzbjpv&>Q>q_ zDy=*1E#cDw4NRw%GvH)={q4{^6{&L`@2zDgp=vakbw?gO8{?U-15j)UT-yX)mw``N zo}1ChNUsR!Y5ah92Tcw*J|9`m0QRG_J%C!q?4N?G-$Q>r4b20(u7d7Ousk0j6OY2F zJ9*O$s6Iv82BFCl`Oj6shrs_2DCo%#_eAa4kP-AMBYbZLqDVLWhiU@ubKRv&8(z3J?e)kNZ;mI zE)0fQ%pK1n2P?qW6=!dLTA=}*u^Re-CqcVRFyD?2T&2`(Xca^M>llYuG1v75nl{Ya zzDABc%i>*f%aP){k^A95_ZD1U4xje&ZX>#y4pib`#PAfjwi^AL1q89swKWpk4vF`4 z(l^Y)a*)6{?EXsVXU3#eK)nfWFNH_ma*#@ElY!Q(L*_l+2Br1~G#|tNzaeddp;-@V z_6Mp=__`U`ja2cZd`mcyPWcGhG5)bQR>2z~T(g_OI0cDZfX124rwg!@299sBIzs|V znGwjfwD>t8n4zV}f%ig^;WJ~A?o8SewkFTHK(*T8DL0r&r4 z7Ait8QOb>u`vto*TQ;EgqF`ks|C=?Q&paFeU35XJTvOK;QbOX zRpjFtLu07DsoQ9YC|bXWU7i7F-AOQVZXc^*{e}mjUJaf^QGPTOzD%7NVC+f2ml>Nf zkP7Yavv>h|f^qkNN>|XJyOD}c&@vI+CWGrlXlmA_JpL(`t%HH{YfBd_J5 z*&3*r#rNG<)REB1ybC3e?HDZ7ia;6;2fv2`tD}v1m5jtW1zxi$`y*qlH@X=W*A-5i z8}EB)UyV0rTlpC3j6~v$#rK@nWFYv9{;Hq>i|M}w__}|f6d6_I{#kqMva9)3JU>A% z_ae`?LJfDYdei$wXf_p@+Y8rsK>2NOeH*&z{db-~|Ap4H9Zje|8_bTuXHP!5PY_Rk z-lcho)^g~12RdY2?FFcQBhVW6vKu*A1dq+e*&g2LYkms3UkVrZgOzhWcRRPzx*lD} zpU7ZD@TySTol#p+PY zoO(y8`8&Kgj3teOUkj1m-=XTS)GJ84Z?hgck2Nw={CxUyzS14KdrnY!sgItOhG%+} zKZA-Dpr(;r6QTNj*#5r!=N-q!U>d>w5fYJ%ZR^MwVFYX>vhfUd$q1SiXrnpN>M&ZK zr0uhmFNdVQMs4#VcID$aDDNs;LT`_Qr{2!`&|(|?Zlj!gUpt_H{?yCNRW?H7H?cTX zfFptW&Ir7JFagc*1l+sOI)$E$92`p9?s%_;s+mxvIxA0ao45@);;5Mr;FS$0f1};P zNMH=DjRR-*4Wppy7Up7RlKK!zyCYQ`8l41I&z0^(dOK3C1HFy`N_|z{X!a5mp9LSP zAbWZ(-BVab8S}s$LFO8wHRfM@i`wmwp{bM^jo#_k84l&DAu(nla3=Q+H0ptT8!2r| z#$7(nI&vfZ?tp`%d3q-{LVw9_YV|~FzNG#HATJH9uQFz21LtPSR)+>jl%2-NYMz4H z$i`xzkEE@`@WFd5cT=Y`mfdsQ1>t!XZ=&g4t+;@ksBfLXvjn_Rnu<`vJh$d&^+p5t z!e0jBKK!2m_j2gzEKfYg*#oLM!WH80RWLCI+4jtU~gP-%lj?AThj zv}u&tUdF$Bscm0>!L{vZgeyXGmY6jxk@;o>WiKQ7XOZ@7c-EP5dq3~2W8`5QsOn8R zo{{QHFGjT*FFS-$cQ)GaB)mO{Esem=)a1<>B<$b)2D#tEs2#Hg<;M4f`GfR}=`)&6 zEzhIngLa;*`v98h(ekWVA87Ivw#RHpMmBlEO>dPs16DG>x*0kh=84|8>S#b-;J6VV z&mmUAqbX-g`Qc4?4(&zs$TmV>Qz3LaFJqHWj4QGQPpRnvt4EJtQaijU-1kOa83R{{ z5~Y#!>w&BuGUd1;j?i zy$?Sw^X?^NMDN!CZ1gzrEJZut(X%c0K<*Q1?Phw@i)pLHke}S#Vil0uTXgvC22{TfCEKU2mc`XF8PBU+NjeThN!A_}_QA`NQRBUhIi6Un6*v zM5+6cio$&Nb~Gck>Og^5dh&mx5(~l4+$>c$1GjSU|Nja7Z_odSH%8>;Mv?v3djB31 zc0H2oy~!0A(KkW8OK8w(bktj$)I6hw&2VxUojix7GONZotiCZD)sgvj%ty8&MeZW> zVx8-qy59J+3oX1Jo7A3B zO+aHtn(l#(b>GH2LT4k1X+Y;~m98RZLvu5_I(zH^g`BNbgKq9UyNYkf$g>96PSAdM z*2FthcOYAPf$K81->g^OZ{cZl{a4eNam?o9X-GYir32gg7M8s=wxk{7`vOLRa@dF~ zl<>@h`6bRVK1qc>P}D3JQIz*S(8ZKDpO}#j=~(14%mW^SAMZc|&+4axy_pd*(C0)j z_<L*-L=7n}Dh;{CkL5&!3Fn*MQSo&5SCVO*?z2dlt$#g5;o2 z#zH7(pV5=EfXBf0Q|QtN&U@l;FmQVhaS}Dk!QWo|*B(BEYmW-lG1GASV0~}I?G9-4 zJ*{R#QBM^2p)b#SnYrnGyc(5xuAeF^n6G~TG%rFi<0Pu`W-K!r?@9R3CbO3Dw2*>idhX!|hv0O-C!R@1@9mQMlyFHlqegqb>~pfw#B zEBXW5DR}LP)wWoOH9!{yjLu`0f^8>iWb!c|l~FA3!}%ug+1xt%mE+OUU%>r4AU2+4 zIZrz>^D+00{~K9$623?cPpnNs>N_H@O{fzMb(;gJ`I~MG>ich9m$V4_W>&#{|)@u{AJN!4ldNU?_TER%uJ-quC44$Q@Ch(*@ob=W-XG7*abGF@? zzJ{SMMy{O>G|&~hIc{$NhO4|+kCm7uSj@$N9=js;zVCrjLr@P+S7U@MhMby- zY6kjhzA>|Qd_-&Eob5TV+JRI(4s`bd_ekjMX)$k5FrQR1v%S$+JMWzICLS|zJWBsv zu^97sQ<{;?HJn=19KP294&|dGZ)(xjc_e-p{OL&hWMgGSUrC9c;Oyx~E%6WVXA&PH z8FsQ-zY0HEGFx4T6)C{`=Yi0@f+O%le}yBBzS1+um)V<}Lg#kCmVu2jZ=AO|r$Gbn zar^_TsqaFmdjKqdL?YdJhyvoKwAq<8*)H1fWK1fY@m3?xFg$_Y|3r(msPFzvXC%EJ z@@;kuqsnSf{$uF-8d4YsC3`~A;cija}ddIK#)0yi0hD@6CIGkI=tY#P0`@%u zL`FSBXilGo%mHgbxwWi%rhw}olplvY8ZWt&=f>XE59V!ip}iw(6xeTv4he86l{Ozi zmv_Jqec<8gr7QH~EaL&{Jp>Ki7id9ylYrklmG+>`#&tKx#tcU`o6y5>IAd1555U@$ zo+D=?}a@rcKJr$RRR5S#_X1v*iMsMZ!PV~WfwYL`(L_QY7x!$x?l(kwHtb-Yeia}H3 zpf4dA-!PtpF*-HSZ0WFyzA}LLHMp6I+zdmu;-RCuSc1An7yW{5@+@Zpt(&326}cm0 z3h;QFto{OXE_$k7$<{aLT~)=9j?O@{A6=*jHXVUOPf8oO-++DK@#nYJpzkJhyMDC5v>r0EjRy%E&0+sIFMNKOO8 zVsJ4+z9qU*ogSST=VRR1!*e;S4|+Uu?cHV_fzzlk{U%b|Gvz%QJ2wJ#eWc4<%EKAT zFTC4AFIm`yEarfY?)L)MF?g-bxB%_6D?cEuQfM+iofvJ~p}7s=%yQm^{p9IA}qsO!0xW;`>en2g4Vaurfcxu;jUwH4S4N}*jxvbD5sz1E;wTL#E-GE70_lS z$6a(YcZ~znMQ}V1&!VA7Rd!&WqP_Q!4Nr|f0u_xg9?v(==Az>`LJ7Nx^~G z_Mzahf-;k#-$H0m6g#jF2#mINcBfZLiMmQF?UA@I`8| z^Mdr94!q~UyDT)EguFe(`+tIg9su+3IU_SiVmjZ=TxfK|3FPcX}Ld;fnS(oHizPK==~C$Dh;e+VPCBX&;dY3%|0=bouC{41fPaT7nl z8zbc0!|DMHo3R&;EM^Rz0MzDZPUdePdTkGEXZX*P>%Y@aEF9CyR-q3gS=@E=&ZB;e zaVbdkd2na~)m+I&Azd}#%}7dab zb{=6)sfWpUx$qOmH>LX~v@zGuk`I|NzD;jmUZ8&kix+0BH{v0Qn#Nu|h>Rst#_Sw< zpw>!c>=5IfIggWh(wP3c@pmM!e~rw(LLcS}(dLz>l{~as7y9Yb^Ya0^{|EmUfhIed zo!&z&XLPwxO{b0UdnxMaocZSSP*$7wE1&X6@vy(?^ECfQ zV2|^H{hi3DKFG#s(e(i}f99=u#ljmDU;i6}YxoxK;He%xF)`O$=x;R6C&aA|(E05) zXkM3*N({nw`Z8_|1?~#S+`HKS@MP(Av{4-HCDHp+=%QHMh75M1b?+*i3f}G)#RIz$ zk;Y=?#h(03eJS`hnzn(mWs$jF;PwnQ<`bxq2S|?rL0(!tgg%&IEkF2lglDCA8baHY z)?Y&A{LiSoFiTk@MjmTE5xnuO-eLX8cQP92Ni_1#xV*656O^sXNMc@nZ>0(2^7JC> zvyKhwgW7Gre`RfqA|Lu3(|5mODx)`3kyPYxEd)_Kx3DUvc zTXIVw(VGKl_z;U=Ou<#8sWH@X?&-~9MqfDRpC4GGO-Pt?-5m6EFE*(MI(3wmIxt&r zz`8Gor|$DOFEt9U5H+`8quhtdz)EFcm7J4=kqy_dB{O;E>faMsgQ;->4NSv2m;-+g z);|TCxQ8)7tM>|4vntR#zwtip7^v(1#9-hxi_v!8N1zisz@sa6p(pIOdMjBkbk~t08aem&c;{T3sTF~C4uLL9881AU)Yyacjt&WHDCORi!)H-W{SR!dNwF>b|(=He1q% zQ6D9OUEWmMoD^89i$KwW_C|t_*_lT(o|ui@NQqVGwls1AHLVXO2W?nGV& zz~?lcyU*+iyLW-iv$6@$-*aBW>DOJsOK`pq)Vl`GrLkaUA@RoLw-`GQ(|!&uL^6K< z$+sc+8kPop^Y*y0SO7D?d+uX5PhHt8q}}t_z&B{UHn3e|)M^IZYB6(IKv{DGwWY23 za791V<7ls$x8j%?c;DC_r0WOjv;(J2XqB)JL`qy|SgtZs*^v?2vz}&tO{1?Z@HP$_ zWl;_Tqn4>32bIk)9Ct4z=ZDz!t9du3L8um2b zjdC(tv?J2}Je)P!zY)AU##`^k{2CcgfohMS+iIw10W+Z6Vz@N`cn6{}M$qk~^>?Aj zVCI-3sO#Q*eIT3*1SM#vF)des)A|mLyK}W`uAm9D;c1jas8Ap1deGMqtb`ID1)fji z@i>d_r19R^l3~bZ9QxKAx}T)hQ)r1dOaFzoeGHCgnZZ2{2VJ+ysmP$OgV>Tssd*eJ z^BlGwTz!jrX(k~9=CQ0E=&YxRyw$WfHefMj%*^;$fa_FxGA_c?zGlGRhPCm&DNmK> zfXUiGJ{lq!o*7*Rw4<`=)r+QrzbWH@^kiZ~bC z1H~)S_NRgTw?O}zQ{N0*FN4>!@WK^X2_(gRWPK1_d6!MyrC{Yfshgp2A*9)i`VD~1 z3^TRiP93=DN!w2F$@z9XZ-u1XWe>uC6DUgI3(!kDCzNbhgZW;XCTA1=gLVm6=IwDBG=Ze&zx2lYn-mD$eE zL0c)`oHFK0xxi-#G|qvdC(x|z0bK6FdSWLUsjdao=4M<#`^%wUJZ%@`+XIvjt;R~) zpTd(q)Q<)e&&l-XGlClC*qnr96@|wO;HEhY6Tn=_*vpeQz`l4e&Rv5lP0;AG)UOF; zKB7cRdix9t8|(TaHl!t1B^`cy^W8eIYl{6{ha`H6ZU%iBH+lwqW+OdD{TNR%7|NlV`}eeADq5yr zz?h?K@Qq}J;N2j`nU;Xg1CXbK;FwCO`)TO{?;KC{pg+!2t#Tt~6IYm(coN$D%I3}z z7td_3hqkr&ei>>ur5EqKbRE?R{rL>O8I2po+hyq72yiS1^;4Mv?nUa?B0)yLrD4|^ zP~tN_-tqPudesG9dK+K@F!kpD+pwx;O#X|u%szD(yQU5F>}gRxo|!5K?>vdw0bAG^ z`s#;ziGDT#Su_$X+{fY9UbHd_4s6B}eTp@VhZF6Q)b;!>gA}Lpa~f$q0XOtVF2HWg z1FnHQH}ALj4Mg)s(d#VA?**dD(6%6)@+7~Ke}vw?VXT`(y|&1fwUGHrSu z-PNJdf_KrPx&O`d7YWt{urp`jmQm%gv|ArKm`l0=Z$E~^?_!0KywvkeZLWnwsE^80YzTRUUnv?s`xgY9xS`ZAm~pIB>p^3+8Zt8O+3@@_z{#gk5rfuz7v>xTEkJuS)y43PX=-xX1|Nz{RTAFxzZ@4 z_i1`>fqYDW(nh|7nQGIJn@NFG3QA(OBQzZVy*<5VJpUf-#0!-0M!dy*qOmBAkt}l)7-_D>$YMUWA6yfOxOphxdV9t* z<2KvT+gSR~fQMy~=8N=ee(i2(tGg$Cp+h?S7{q^XB3my9)@dL;XF{i4@Om{ivK6rO zga0w~?j0DO;QS1`tk=OPwtH!{F8Y}XmaEXT!(ipDn$82vm#cMD^NO<0oChb?!t)HM zH-^7@VIx^>73c4C+BnWwX2hSdB+h2ttvkxI@6e(bg7sQCI6jIwe+2kWf^rei^Axlw zNh$LwWFf(a&=>tvm1s-8ln3IAz-8_bb5hJiKH7laKY;&3-o;@L^75`1J$?_|W0|M7 zXWkW_OxXk68Aw6_o^FDVmC(eKtS&!9>u1n@9M6mg?9ZC=2Q=;mWYIglMo~`B$avto zMvr4?dmOE#pbb00JQ)n~1J8Nr8V6J_1T)~DvDt56Z9J`(i5?e3G9H2dYvFBQ+U^M+ zyrJPwxPlYv^u4Xm&I0w}js-fowKb z=u@cQ8J>1VON}e;jBQOplloA$7k&IetuKJsyVRXawS&fe(6Xc0=^Ui-PkJ%Ryg&M? zM_Ip91S|N1NS8ON4nxnqv*Rj~t#`^uW%oYkqRG{fyk@lSeFejK(;MA!*E9kw8bd|% zj8=jtk>IkQnbi>5@V_O{#aB1#pBo~^GWtWm*1p(H`b}{ zQHK%EqbQ|Mu_nFhO*%@ei)g7BzeAdrhB7VCo0HIP6`Hj&kmz;L;a)U(71-85tLI^3 zx>MhLxt>5bwq!PZU5*?zz+SY0LNUO)nAW^0E*8F<`=$iFc&GbRs67QZyq#b*d}>NN z4bl9SfrZ@!U!B=RGe7YL(&0#u+pu?4(AST^qZe&#WuywDqwfW#RJb;r-)W5HSD^4IATgio zFHkWN$;tswbHd2$<50@`67-zc#DW+{{xvkZgk>}DNe1nt^1CloXa?gHs55?{CkI*2YFxLo;L0-n?j+cR4ntJ)cYpe(Q?~eIGTL=jBK9FES_5m!dyx z1Fb$t$uj&kH=CY6PdABmLo7-T_};+pq@ax(sCOsjj0Joi?al*L@<8o8SW9m{*I#J9 zN^{+ttJi+5D_B=rK3T3XtW_`(j-qidqT$v6nf8mKf zyEoyF=c@JGy~XnwTDT*?@o}EzL3Z@@9il()v)l)L_XU04&W!kjphPa5cR*MDiec<# z?!6FskZ5B$7L3w?HCSePefBPhkY2Wj2UoZx$DIPgwDi=jXl`t(B2pS6{kuqERJ zjgj0qngG0~I}AXifIBEv?n)(zh4$@BBS{oMKAN)(}uqG-)nZ2vhZQX;SuYthe6 zj0pqLod3hun}GRPwhi2@6b8SQ;4& zjbzIV@?_1L#)QV$cZSLgMI7JncmH#|@9}-#@g8&hXZ~~F*K+RHb)WZno##oc{7$UQ zZltOOa&CSxBf7ev5p|h!2zpussa?T(U}bo7=6YhNGZGYoy)!q{n!r|cgThuA+<_gi zGQIE4`auUX;xu9uGuH2e%f7$JfdcxB%(W1X9Bha0Nl?-gd2M*k)b{wcZ7dc^I7?u& zJUQ#mrwg_~`xg(if6;d_eQF?CR?~7%xEAaFCo4oTRtTTC_90Ei!MSUEm3ApuJ=ZI} zIM$(m2nefVZ~DR0ZP23+?NV3=kFXZ_Cb~Y*S^F}LRt>O?o~3Dzc6df{I3s;cE6)`s zGQxwbL)Pr=z&JOs2<8V`hSeJg?SBTIPg(!EgVoo;##+FFRRu% zW}5>i&Vr}W5wp>;XVE!#a|7XqZ}r0IY2=l0mgi}CJJ>@%0ycwzWDd}O46dcXbpX8k z321uLe*m<+&3e=cYImXc5ylz9_)joX33yeLT}LOhD2LhS20MYmtiQeucTW|=rx`RI z%4qI1>e2QHy)&SBEqWxvtrF1HY#}~{zlD_5LY9opdj@KaK!<%M|BGGih+vhu%2-FS z_GH$A!)eH2C*<%N>)8R|UkpdAnwZJR+4S&TW>F|)E_qLnw`9FP4}9N2&t!P>7SOF= z)ik4AVaAFApA2Rj^bd^|Guqcs&njAG2MU7{ZzBQqSP$C;kd1?%9a(|D21B#{CIXY5 z%4JCPnP3ky6#QE=zL}2(2TzkmAiaRZS%2EIsLF0MIJKpR_0P@X;fb3%;N%mDF$$h? zwCX}8lJWK~A(ir&)y4(TkU8CIy;4aNr zbJ3$5IJJnLo(EpToR@hPFwed5Z?&-F)>zR8+6k`AM4$gaCMvOe5`ItnNWmUF8SRHQ z=IAv#%^1(xXtc4sNl@RKc304I&x#m#vY%_l8t!AnlSqWS52z(uycI*so+VTNzr;M;grZcMcf;LdMLeGz8k1Gf@BY3GR+%f3*Nkn$N8; zGL*rUU(m!ljBHeVYCr}3*vjKxR;mG9oe9kDarK-32MYffT-nW)MD_vNl4LL&%kM71 zPgPo00xxq*nH8fD`duFG{TtilQ=YXN^U-=3x}{Dp28O5MiW##`@EH!}S2DUj+y5Z* z#-p!B`t;gZYfWF%FRa@a`Mnr9yAeF$8IyMat2-ANj|Ok^KxDHUGh3Z;G1lU#547g( zSP5==o+{9?CwrQmNYEgz^+CE;q8sY?1A%tTgofsqY6U(aZ)6WB<@wW=fsHE&%Ot7KhzGaeCPEN->o0uT2T(DqL4_>@Ttc`+57r* z;L~h>D0={U8B=c-QvLYmAk+`J9h+eV&gY@sATVAF|6XI1TJRu0Zm{>ff+w|IvA<$H+=|Sac_g$K$;BA1#-q_ZV~v(_e=X~c(Le2xO;4kb zWE3NT!olMUG_WDGu7wP)fT!oN33^H~#jflUE)wlpm$n%dqq80!)`V4k$n;J2L-%>*_I3s9Tc`vGY~G}w28-vNo<)o&R2a$v7rX)dGH zW)#?jRxb(c)|vokecYaAD^4FH;bueE-Jjz%Ip9pZa6YxJ0RbPSm5$^QyW z&j8;-?sz&oFE(Z~ysFKASxCp9!1Odc>WaMRk*dO&?qz#Ib-h9Tu@FA-ZAU|wL;Lp7 z$MvBL+&Rm>qhr8R*J1Mkw*mTSC{Pg?&71G;c@tD^LZ6PT%pI{BHFy$w80tqs#h1}P zz5JJGc@FvAgM>{3j?>^!6KO5YUF&pw0Q9rzT|Zc@wM6H6)|mz7Z_~CPl$!#kW0~_f zkgWx?t;lOr)~3h7ArJR@F^X9W!nksQc13vt$q!fOu&dDKjb)tj*yrb%`5o5AMNp{> zYj`9p(WIcIp4jbRRt*{Oov|6?3ZZRp1a@#H^AvRSGF_Gqr1K7Zd66ruYjX*XLHb@`AAL?=u(Yd*3UL0-bH9`^@)@GR|xp5M^FH0J9AY! zJ%g+-hL;ziOckzui=3=OPfCHudr)&eHr00>=EKV1UL4o|0#u ztw!&!z^BT=9@2c7Ynh=D(lMTqI1owlnRxNZf*eWW`p;;$mBpUPlZn7!SexNNMWb*ePAQRQ!nI`SSvGT zbwRFkkTs)Cc7m(9*prb#t39YU7opj`jCC75GJDHVR`FH9b{#(Hg|31Gyv6F_n;_54 zHh@z5kRR8ji_F`DaR#$9b>A@#DT@X7`Hbe7o}NIK9N34i!Nq)@)&_qH84JVe`<8bY z8fjh6Thm{(e)?zhr8Ea`#~Fz}dv-MBtj~t4UeL;! z-VUzc6-ZhHy`!;|hqz|;2r~$1Ip1c!-+jimF$ zYE8z;=DYPYh5(5^t>#z-y``Q?Zb1uA&AtRBBf&(E-Uk7W)=@Pg&RDYuAh)i;F~&6d z(Nn}5(W_9D*(q$Yk%m&r6J`0}bv1Y@Zr1uK4nND#BAjQ%x6!&+1BoaJq*<(>A2V() zIAq4YYG`;b{tn}lm!9|1<~Dj|ZHWIcMtwLwj4@8KXa0uKN73$KtYTRx;Lp=Qpy%>4 z?225x3#`ZSdn(%UB6qZeLOK=O7eRhoKo5vGgdDk_!%yRP6dnI zIPNGJ#^ae)&pd%4Phmc&?6beV=jP1pSRsF&r-MUOe-LgcU*;OG&$o~#S@=U|hwTx{ z(AR(&a??8G1N@(J#kTmre*U))(5QJUYfoe@cl_2xXo;>)z=Cu}Z$CxHW4L~epAE>8 z+8N85oJcQs*+zmr32z=`XH*l*ei@vKLd6zHp?={5*r|zFi{9AV@z_5jf;R_y+jZE8 z-mDVFzYWGVOv2VIU_I)G)}QA}5AGP3Q;7Y}4fwy6->Vqq3KlAj|JDQHAtXjCs7Iv( za~tP#87p2J%l!i@Llp4pH@bp-JV)<7NM#f-Tg|ZucIpP}yKfW*0K2jE{g`bm)~_>i zV;*WFq|J&l=Jj!`=$^BRLYiJ>eDl>9d(soDEX`&Cll}!yuztWdPm6eh+1xy>Srvz4 z9j7thFn-dYNhEzf1kVFVjwk4Huwmu-yBa*qyI&7Vm|x5DG(I7FBIznCo)uohkc2(J z+mLnP8n)gjh?=Zo0~qCV@U05%UPKDJ^4D`vGoaKc#?D69Mg)*qt27bW&0(HXKwOUh zJg@1~+B)pSabSu>x<3apZLwP+x=+ZRx*+m24Lj*%GW^ zJ+UI%{||sU9!p;nN<74WR+_qtAUzGGS$#a2{1^7^2=ZnHg84}O zWoS?ph~sJPn@OK3Ju9BZOuoZ*W%n)PBqZW6^61mOIgMv?wIwoo9BCLtpIY!c6)1eK zXFOIW7N!QPZWLVgY1ByjFPO*dlEvZ68^G5in7c5&H{dl(2OA?V^}M?SZNcojnN_S$ zK)1Tk-|BKp7}vNw{iZ9ybspTx=4o;Tzpc$_WP{JPMxR(ADw#Xy(WzTdr4{Xc|6c}b zzY6b+{M7524pv73I_|`7G~jK>2+{V0jB+or0>Y;vu^^a zbb11cnEiJRJT#_q4#FSHxA|?uvtTQ4{aACJNme1BTucF!At>R# z%>g&#IIRmf4(!SVy8AP`pk(ZVCywU8=MNe2Fh8E@s|Hq0xN1b8=f8TuVb7M8f~Q01 z}FTn1s3BWT5IKqHV-+McH4unUWpp_?7^m!fN z_jk}JjPb3jXk4{5E%k+XwHDZ{!3MY=j0&E1e8clS{8+TX{)D(Emd~+@t-;juRpYS*TbbD`n%j|Jqa@7KV2*!1I0smhn=xZY zu6#)=W6pX)IV&HGVK&bW)j+?@Fw=`wzdRIJ3kA$TV-~e8(Bwg`C1cHu>#z#(1t`^m z{;|xZXU+F8=F6)Jy+>d_5@_q`*%-KQW~fYhd1`tEqZy}QPVL8X$9EVG^hyRVR~n)K0cdlyzk$%|&KrQSI9urnCe#^S`Y{u-aP13VLeU^p1Q6Yyai zb5ww41(AROjM$R(+sFi?fXkp|##cYURnHhLfjg_gdMZ>;V*Q*7mwjWn5xKHLhVwhF6=Fq~4u=?bJTm9mCMDoK zWW<~t34xxxjTAMY&lEVd6UbLWDdWF<>T3$VRw;08_!#-m2R;ko$sgc-9r@F*v{Xm|_U_XDQpE(#y2e=gi z-yfj8`4{#07REXa0{;|#hSFvY`;Rm1#tOmLOUz>Sg=&oGn~zn<+121lc`p*#hM9~^ z`;B?^*ZzWbeGDAu7`+oZq}OpQy)U5QxsfX038+oS!N9Ymio`=kjB-VifW_g_qCZ8~a>!tC5deb72%oo|+cZN$J0zqA@fv4d;4e9=C2k@(h zov@NzBkxDSr}m)Z3}Hm8-97<_VfkNhsryB}k( zjd|OF_FQG|(4MF#eW$W(F%w-F&v-qs9-fJf51^Zd1a|`GlSt5qz_k^+6+rg%%~S!l z6ReM(P&viE;azOEQM(b$(jV;K;W^3^pHqM-4EXM0X5YO1!JWLgsvxiba`rLMcL zb>Q|RoST82`4>GF!3iTM&3ktV{BDC;0dzSY-Kz%`_95+_XKcy{zU?sgkCBU~=@p07 zYsfzCD%@+xEJhKy%kU{GKRxwczDz6qk{Qr?KKgDg7T-{KQhPb$DM>xB1S7ewpP>`- z)C&o=n!NAKKE(<&Vb8LMALF@VfHNLjrjK01k_H8-d4MdvWnQfL(g zeNHoT7~@$xCJ*D6L7z^8aa^F!A7P;?!YjRV=9Y5>n83Ug-(P}T_F>(I3w z6o~{gHw?Qj9SH`WOXnjakoa`Mj4FNXI1X!d9RvC2EZ!U*PZfpkYAH+LhgzVY^)YaGw(z1a_Z z57m4--;Zl2p?WJUzmaD}xH=uU&!Oqnp@KtcL@{?I(sxsdwMj8-8113Jz_BkyCs`Y!o^SE+jeN#7+tgSq4hmaqkBz|tx?!FGYm%r{+@;Ocn!%+0^7S; z(T%k7bnm@ba;qhXpOsxJ@wXnjJq0L@RkM{@O^nU+jIZ@A#`DnwX7)e74Ft-*+z~5l zt3C-0I`Q`|qOWj8_2o@*q7v^=bGye&^%=nq037E%g&U$T)Wch5Smu(uLslBxwY+nMzN6 zJL;r&JQ3>Y&``ZlW77OCM;%R1R1x&;E}*k@uE4`p678=^&j|hx#iU7*&dgZgH0 zsmQkle7_Fd%dwJc7-bQ?USy5@fLY90vX_<2=qD@oh0adIAr0CIYgP`!)=tM3xI37F zwV1&Et^`+%+R9?5Z?(Z}Y+D(0<5BFyY%EPF?1&c5ce~~@^ZbKR&ONbDJLqrTZg)S4 z%$JOu`ZV$+{dWR?Gc0fqY}rCSV;SpZERg#apYD9WVtmR#G{ZLnRk5mlphzgch_$(A zg59t5AI!>-#X2$=i#hNg+&tf%g9H`=A~ViK0moGA>MC?=8g_I&u)fF+<1kk931q(x zqv^v+gI4AT@=RevFuBB!XM8+I69??Q87Ul%IK#fi+{1q`t2_ARNJ$4~OaNxzTYeF& z-E$bn^8gvF>jUBE7_bYyTV2c8kMrzMnDt}={d|HpGeaMG_s05O0Q+P7_U*jY*M7%t zr=Tqhp_~~I$^%Oby$7>e7-!)=zdz8Y2hY9cQCfwT8e4o67?iCnpzzJ@79=MUn5F>1 zH_ZM4Ixr#Ncw1V#>+b>`tSnv>p5!1uZD^y1YBV(J7Et^ev>1+rmd4h2s?j_mWsncg z34aacpMbvgfb{^<(;K+Ufk|&D(T?>jfw9zThPGg-b&1IgjN1D)XomZ%cS-1 z^j``8^#dM)9_}F{;lOPqM^A#;g12GGjPklnuZw80@pPeVFeRWy6z$Ep_zm*i8YmX? zxq_x|WhMI+ZP#Z8CZX}&&P?|vJb8vk zW;4s1;GqBJ6x1}oMlohB$bSQouO`4b9typHeq2GvW&yLAy1qu9lE88?R2d80TLXzS z^GqqeH9@b8#as@JdNWQdTJ3{Iy|E@K*iUOad<_kKLuppw?|}9Q^BC#iNpS0L8AW}Q zSq5UCVwpPziFulpSgx6|zA(F()$G>RGTTn>jsTCbaPlnwpQoobwKo5K0wvo4M+_sG zb)^^b5+JQE@pK~vK4Ejt2_5sj%51$;MP6*~UOR=#P=NRKQ zD|!dU9|&a=80AUU%ahQc5VIN!(HH8ZfmayxmKP3uS&R|7%Vu4bNFNMCJe z*Au0pRqu5pICOwn%y*}sBpvBE4CS-14zAbgXFc?)2RdPNMN_zb11ej!)Hs06%xiYSK5*WQ zFrJui3T4xIrp(61nK?cb!4^*2SJD3Uz~yP9Mex#hG~1BNO<1xej30~D(O>L&u!3AS zr|cqN_f2#%eD*C$H#DLJn4Sn?O0(dQE79lBQO~j~xltymXkrCub_w{Rn8&vkU69x1 z$kY2=>k7AqqYF)t3w;v44KS~~=cvA+=V18R2W@kG^`uM^RO=e7ka}saK<}kcbvnJQ zun^*lPb#CK)IuozH6vJOpy(X9yd1sXht}T;Rt$Z6o+vL4c3F&F6j&no z>nW!gEJ+G5O{cvXGR?lZm$4GL9|kWxi)>6t0(y#btr1|%V^-x z2YZR9tQqk5H2Ps(&qK&gf8^AfVHw<+%wE3$^tZZ~F;Lssd({E%#f(%BSu&1c2VB&P zR2@08{`yypYMs}^jN^N+f3Z3gLC#*}s*y@7pkFw+{tEpz!$qq|T0u4%De;_}XW?cc zXZrTca1jXxS#Tf${Nu2C_2E_ql06(M>_#*6K5fGO`UayKyfqhu^{0CUntl1nZ5Z&Ou`BEEk z)%v5tFcnHHhHsvnj6f!rvm*}QGW zJ6(edk3gw@P}aI^6XBUz>Wo+l$2BMp^?SxkLgUZ^hpTOuxs1b{_ZNj?brcF~kWjTy>94Yj~qTbE% z=vyIp;M+vsnwYCChi{&6%%r^;L|#TxmoZ8~a8*7$zo{QrPFh7YAGrJO=UZr4ll9>! zaNfuM`6r|-kv8Aa_X%e2M7t{NCajdufbpZ@e;D{LhU&#%ugHpS-nlez8VTI`L%(9a zVq7Tz@7z7CgY&nMI-g5CYkU?<-H}n%pML{`(MZ1USjpP;4A*{OKA%NS!0R@^;(Oay zq0kXv_oVq?wBj%l6A8Sn(Dmo(GmKG&veMSWvNYpkUPpJbThZ2P!Fm(Q%&ModKVv+G z7J0@b3c45@s;4L)`m_wVqro~B$^DTT(vZ@tz-ImDOU#oUtYm4}F6&TuU-w{b;Q@&Rnt6(c>j(Z6Z36jh!}<_->?aF+f4lo;z=b(OpE_1OKU%kgyLc%87XJcluAqObQL3wmet#pOdj%wZ>O{|g`PLx=8oc!c*}TD`;QcLA@s zm;vuN^86$7ZA4PcrTh#SNDsB{LFA++>!Ni_t1!|!aGZ`DenksCPsNe1FVT~E=-`vI zGEQnv@V~WN^I=EL@?vhZ+<|-;4d}`FYUuY!dc4dWzi}@&P(<+aSKzaX;@+=}907%y5wm}*~{EOV(eXTYR2L#m35`ws6p=r;Y*tQdlEzvJ0}3 z^e>E(z-V)sBZBd)aJ2@_J_jZHAOk%Zr6a4G8PUz99Xcr*1yu*I<1rfOEk^L1jd62Z zu*aScnZ#B7S#sbPs2l^8jjQ&|WCnUN1pa!Wbw5~HsWl7!l!vxQSb^T6-$Hm|eiyww zF<|CN!S3MsGxWU@teR($f&TP72Q}NVgRQG`T5_28wZ_?Kc;Z_&v1@(pl>5iaxZQ5>Nq=}4BmtK3JaveBr z>~aj$n@FD;SgekWa|5|Zg+`vKa6Q`rwR|tti#wh9&um&3fTBGu%+FGt`4d=q)4@8F zsaubAcPv*Fn9pSeUWO%D!(Z#CreV!?(S89EWkt;ne0QBMgT3}0Y(wbVk+HI&ihHye zXyDr?39-8TbZDWUzdBsKg}$DrrP)m9 zf{%Rq0*-h}@pW38aX$$ior`TO4>ZODJ^?%jkrA^-orIsBu(q0xF)>%rg`a@1Dp#%0 zvK>kt1;&3fQe~h&jKov|*MiV6^u%@vIV;K>=5YHJ?6TP@7NYeZ$p2B+0e28q?rTqr zH2Op$0Y>jz$9*q-zJ@NI#mo*QvOQ3xq1kcR_1_t}7MAEFl+-`59xPLU(|EGJNdHsF z+A?I%Jx&f#je#n92Ujt^J_e&#TGD?ZJLIm6k;vHT(0DOASs01ZFKN!X!`w;Xzx8OO z=XKpz{fBikhbMwHwD3HMPh#bu_d({;*V-5UdOEZ}S3R|t0AY3r*I^4Cpf+jv_`2Hr(Jt= zzZ}=h-qsoHJ@4~8k~9an^i7pRJ`$ntSvYRolpf4UaH9ZsjCd*oE%vd0G8Wa0KI0hC zvo=du4K}gXR)zZi0F1u-mE~`EeFe&S-n%o{+y~w9CSN6b}B6B-Us+9GU^dW+7^UHuR-+43x-%3SU4$&(MJ`X#+Hi;iTF3qgYuQL7xNk&Vb*&kq^Bz(~*e!z|oZz;V$mo2J0kbY6c_t z#=Z={t!wMK67AP+#xjHHaHv}k{;h{z?!&X-iFvoYSB*V`v7|;WSd~(%xq%tefy@Xk z>!lS#o2LPR9;(`CX13O==g zmwGp?U~Apq7myPpy=H;8KI98<&GST+u$_MZ|7xVu43nWs=kpn(6x5!?%5wne@;r($ zLVKWc6SPWg(i^`4={^eWk6`mQuV&t%uGGjo*N5w%wnXcyAD6rkzRx3Xj>L?_b3*BE#G}F zaSQm01KSctHR7ZMQo9E|sf@k!9bF+>8$({3UGgc~INQVUycFLLAwl0m#cH%LE1vn% zJT15oxbs4pSY#m%KJGzEwj!5hSl_B4t&M<7|4$Tp6Ap!}#$#41y|TYCsy<6I2rj@j z8R_9E=|%9$Jwy$#PC>HLfWkV<=24yT`XN^I)mYd_`2GM~G@5KCSLCGcLtCN86PdRX z*c+kYy7~oB9Dyh1ku@^j$cPy5xEtx13Vlx_SN|IJOsT`kV^gc zMbVohwCqM3{W{Ok&n$7*u*TYD_nq%yXI_KaC6H$GjJ06KbmV6aT%8X@k?2!CC^{5* zECt=}2Pe<4>Sf5}ik^aJfh`KE{D7>#2OLwOOh@j;(>Dy7FM>Aiw|g>TFEH^eTq088 zTMfOErQz%jByk{8W);#*#xbTX2j12KDx>a$4 zEEG>=&4_78saZk~q66cgT4Q9)82WDXx&XHOpzgC!RZpcSw|@ZU2%zwUX8~w1nWrFS z(0r^JjI;_|zonQxF(Gps??->R@Gg!AnnZz0g zPfG7%{omv1@*boz4O#g&^w^87Im3tr;cs3jzl3M!Bf#wQ{3om?pYXK!Dp2o0@{Eqo z&*)m_gXqJvw9x`e(dCS0rNz6TXcSitF_&>%+tC5NQHgNY{apdBTZMEpJJ%>^WKD-* zXr7gw3n6{Y;QB$lh=Y+I>tu%`x2`qy!KD*$nMuwJbhDYS89001d_G*7id^X59m{v4 z1dLOg$bX+AJDaiNnLzEkfoz~UL%%&x_Cr>$m0++G96Iv11u|G44xV792zCptq3%7* z+Z`W7JUUsKnVYcMm|xV)X&=$AGx$EoPQ}c(uh83P#gRyfnJk+?kzT;C7y4}F=`1s_ zPEC>SIq=iWlmmd|emHDiHskQZXx%ZuYDPf0eOLfbS1*LGKF3+#?Ky17ICOdm5PIG@ z8SSykjJEFxJio>Wz7a91MxU_t;O1e`eg=Od)cRq6K4YaeN^Tt-^<=5Ozs|s6H99?? z(*rx5hh3amYldKfo<*)kAU)O|(u3$5i$~C#8R%#j9M;EJ8wxc8%k%skz%nkN-4^y8 zIed(VF!#V#BxMtJxEXx$ZB;Zqt(=#ROq@iIIy8hG12f@8jc)H`K9G5HX}$Qq-gkd0T^=Ujk84z*2UW(Ku*aQuULHZtC4tmqp8 z_;Rp3=BbQeriX#xU1X*(R<;)HbD?un&;z5xr0^*)`8Rq#3VPiG73&1Dq3_y=xb5r! zjWN{2n~N*j)D&o3nLD8~o=*oDmPIy0{E=CL(n`{mky&LYvYofc>YF2;Gv8JB{Lj7f) zso2YvC9Hb6u^!fHnS|sSeeoF5XT5<)R>j8|-<@y;u-J@6NyZ}j{4yQ7Okl1P@Ol{* zf3_((xU{7wq)g*ofLbuk$#S$i!^NqVR-=7b{P8;vi0a{f=-rDoOIlq^2e-b;LLEsk;{F}h_JlAe9 z%2Y6J2gXGi%V(L%aI!D`p66~VHrA|Eo`Ld&xKB14&D@AVwaBwD|m)hCC&K*Dz-65I}hu zS`T0b&lvR#Rvb@ZdFpm8&`m<(J^-U1nbmlL9KM^4ejO{uHE^xP*q%x~2+aC{r=zte zkJZ~~fI`B%h3cywkIX5PKZh%Q9_^X}16OIK!vt-N#=ckxC zhw(q)ZW8@MIh7~Vat~B7AJtFX_jH_aCLkL#!D}b*{mqCMn8&?Y7`$_*VHGvA@tB$W zdnk7`y= z33!r0+sg3J)1GD6X?SkndGz2z=&+W4(kzqqQP_(4%(E6r@u}Es7ct!TIp_p(WH!L| zjDLig&8&X_i8Je51L$I&@P3T34PKRm4wnP`jV9B-=t;ZA?5_qR$I6pW9A+`v$#rw` z>#xaSer3vYKI70@qb$FJFT0pK42dYqsR&C9Add1iwOBz6MWijyZ^YKJg4krNGu^LX#L^x7J!+ z?0{6P&2H^cdR<}<(HdwnDt(u?L+@-{55}D45Rgc8p?Pb;3`2oW1_CJ@0kEimSFqMCEA8Dwlact@EmxZ z;!_Qnhp_MIkBnuqj$Pr(DqwmPUaHRvuqqwk@GW`|=T1|2_d8a^(=ai}QY6}Si9K0< zXrh1cefBn2p!09=*34^>$WuK1S|F1fu;CAan{h09$*n)L4J|ghs2Oyc1QtGl?SxZS zjMVGs2^iZg#2%UBq&qU`x;-1{{|yx`z*lXlu?ES3W`y3#Z{mulOj7C72T1i78;@?4 zqZ{CL9!{197b|jZ0$x`XD=TWneA958nVta3TF9bN3n$rc^kMZWh(x3#)mv$0ruiGl zLmDF+d%Oy(Uw*zjTqrLkW^9>ecKd=)$yMPv2L*9C~Z$aCeXv%NsTN2zo4}AZI z78Q{ua}F3OI1Veb9_}QeCFVew1r+U|s3-5u3Dp~U@}yHNT9L%cu9s~p)NH_9P5EyA zrYb;hF1h~vo&`trT1 zUfdOwqHR_Dq?eFpD}{^(ci(3Fwod9+gJR|ZFh0)6)Eq|d1I*TQHS0}#T2+FwT5vNr zcn0wo^kETj88J8jc+1c$3TfT|L@}%)p}b*M{yU8fP3MX+#|8QBdFcJY>bU}rj|Z}< zP}Mx83z<7~hOj<-D9m_9#(5&~7@AT8`ep%PW5)Jm*aWaNsy#W_cN)Vs2Z)8uC&;?k z_7do0J9I_PmY~O2^spViu({B|cew6;(x8x@v$nuy{f+B`;e1)Scihel{w*78hKk(mb|&9#t8eNTET8v)5GG<_m>&BodtId8!4 zwP51fauIp0idGuI?>lsDaXh*bi~njqPZpCI(<~3>3pU@oC(f*RG#h+hqsK1pS}$o4 ztA`P8&x64vWw z9W!F46eFMF`NjC0`=M2D*2|^vuPA)+otW32Ag z2nWN!H=a>kAvfb6$dBAS1MT&(`P5zph-O25tD*nQjOnyqK~Li%jjnc9-=_|Ti;dCO zepr!iSj2ebaywjXh(!608%+;9ZP&6G-+#eLa9s44@qk#&*!^y0_HSthX zhevWs@AJEiQ4kCAJvh}u;wPfJPlJW)%2D95hIv_JSI<~2G|}4G16YgevU9SgjVstY z(8SZPd6>gIJICRimCO?9)07s`$m&(({1!6eyTEAV&Agg3u-evfxIxeM%;^5Xw_dA| z$pPTwTZq|=xt#BH7|}ORzE3flt~)T9qjNfvei`aN4m1;y5TA*ApgX=9c@b#7g_hEy z5cZ;Tuq)UMEa!mbE`FO2{1)x@!}~-iKNF~|*i?q^{lKyuGgu#PN-&qFrNWSrI>B@H zr`+{fxGdUo8f##zsXL9Gcp_Q?XTNAL6^hbO)-&68VydD8tq*x+@}rY?d@=v~Z!{y@-uSDBX<`1Xhk+;LWAcAb z8bOxa}1NUG#%hAg$Uf03%D4K8&7=**o!cgg9zL_(vE+aVh zLyU75l+@4k4fJfxw;t$@adLWr4g-(AqdLfv-n5ZOxu;e?XEdV@qZ#34R+1O^Z49jy zaGF4o63jaS`i=@#+Rqu|LFPHk@9Ok8$J`IXw;g=Ti!>R z?x*=18bL3i@tL9jY%2^Q$U^`9@85D%Z_7Ar&zHe-d_(L`Q)t)~3uBazQCcb7weE}2 zx{si>J+T(%0C|@6I24IBk$ugp(AC(gR@lc=co_~LU31~H8GU^JKb;k>B_p}dnathZ z$m9ujq{_!`bjRoISy+cOq_h_Jj;GDfaLi1}uLo;|Z{OW7|3WYG_ddhjNl2La?b|@B zLBNxYTwX-$l{Rbn=hx$qX-2Dk&y5r6Y=9tFyo3yfm-W zqOXMqgOTG?>=BG(=?NrhtVGMPD-HR50f_HsEM-Ywh`wImjaZdx11Cz(uWVTDWM@hifmHy9%ZO8tR_Gxys{;8H%mfYMeW)sIyJD`G@cC$9aB zP1qjL+Gp89w6RifEu_TL6)AXJ$^nNbtiNaGc7a?bvJ1}$pzDqlr1768x1I!-g+QN; z?mfkbp0t|Hw>N>*Z2l#%&CQr^UqAuRGF8ReZo|^Y(&r-2GMAZs5Y)R2kA3T#&eh>u z*#bYakw*6#a?dxB^I4OOo7Dfj9~hFse>hagqMg3>ds%heA9~i#+`reD>krlz^MHxB z^6)X%yhk9X`mp~C#|F^90X;(zKT+IIN3xBt^K`3kp^Xbkg-#QpTit-uKEFmI&030Y zk^eSe(gU3Pu*Wpg*i$1~s~krE8IC5fZ}GHK4?dm2Ux_o4G?JfAKot#7eJaVpc60*v z=YXd@Il?yHcL3`|{XW$#rYcKM)Dv&j^Tos3!`roZMu@D>8 z6Y=R}N?YzbDm z@yBFfug$t=H8D?2pTr*Rf-Xb(6b6H9j9D(w-+n;j&bup=Z$z&*fyg>-uE^DZygu-Z zL!!-q)0TVsWd3H~^9$1cH=K#(nWO@?JQ*5Q)J68sY*d-o&fT2R=0F|#G|R3xS9^#%)NJpam_CihIRHWiRaLL0tuaSvhu7I zC(QVFKdV;)5F6t=9cm^aKfR%p_g8|^8T2L$ikjKSI=KC@sm8eV4D@IfGuA>&%?WAL zT1|A{c!u%Fy_RZVm8s-jQfvH2i7Es$(88Phop%OmAE( zaBBb!%{J799{T#e#nPMUvq&^oq zorOQ0`QNv|3A8WBwZ80bjd`yFMiY@~p9hvf)w8Tfd7+N=A9e$y-p}LUbRB%7fZMF2 zpQG=da5R>p8-jhJk^Zm20dtqs297aEp?Sk87_{F{e`AG>U|o(p4Q9+E;Pn!8bhp_TO6%M6-Dgc`t;Dwn!&GoH9!C#( zQz$eRp2wrZ#sp8sqIN{Xi!p8^=(nBmoTE25mP4Xu1}oOTXg?Gk3%##+miZGH?)bv=aLo3XD{$5$F9F3W2}R2yTIETye*;G$LxLf(|;{}x^w4QzFp$uspXA~5Ci_6 zIvLLh=4mk3!l!7-b)>!pT)#<=8_ZaY7Vq+8r`PQ)oalk|33;rS@pL)`sVR>nKguY3 zd7|5aw7iTBu(GeY=d5%3Jkp+vnIqwCC1k7s6sd=-)Zi(!Es(#7?7qvDrEud%=DV9w zPh(lCBOSS+)uYg1BskWBx36%;c&meqXuU%v_+z+Umi7@)^&-5mviK^j!E_`g1#Vco zpenZ5GrwlIybLcdz!xjTKgyLs><(PzjPHG(-^GA5GLYTD!15M!GEU7?wiTGuGdcR- zr=tzefVnY(bJ4)1U{ZmZ3iA6~`dJQurJ#iv$!s1hhsl z9A%_2T=l&DeE7K+>hEEM*N~5K(9gG#W}}?X$a+StTx`bCUi2Ksh`YhWv1&l!2xu}H z%}4{&hVX9@@NEa5N!X1yv8h(KoP!Mclw}008h9U)vW2-lAzu+V%sy;B*kRy*9>`|~ zJZg(AvU;hxu76++F&j@7n(%&L#ag2`iO{7g*5TP;l}KSk-=JaCtU`o7J36Aq2X zp67?&%7@;$O7x3FlN$5wCUp4>iJu40K80WJVATfjttvAnz|(tZxs|btga1!(tPJga zyIKi6jW{ifT$SWn4ceV!6!W6|#P!FS@pEYKIe(|X&lZgNEmuc_WnIRr&9|{&G7e15 zer9ZyF?J>SJ1@XC4|IBhS(Ct2?DGP@e?1whAMlf)eM2xds!bo7afUwuu|Ai=NQ$}B z%$=Xfojd0+eOquh<9Y&9T#xhZWuS||?&<||d^7GhLC!}ZZ$AWh<${H2thLmyV)H1w2E~U_wxaJ z8MwHdmCfk5a&Y7!^v2dz==TYKt!7%95zS|opXZ>^8QJ&2;1#4&+Uhs8&SV5*hUU2s zE^DogoV*RD`c?G^8z&bLvnKIfTk!U6 zy&ZVgXryPvgfhwzQ^pt z%sqmg)5DD88OXP2zkqN0@LZ7^0g)MOZUTuuji1rFdhk1jzCD5ZTQtt9rDp0ftDAW= z^*b$Mw$K}yblOTi-~1G0#1+sv4az44JIi!znof$-tPze6!FueU!${>&2XlEu9SAXTp?iGlG>)pU61ib?;aQxfqK5JPBQYK@Pei-*2&wn4L9)wImao zX2CJnYU3&Q0N-w&52hktvw*`l$a=$Kkb`2Z)Wd<;*tZ+#?{GBTTs&1-KU*+DHJ%s7 zA>(oE7~4R3PctTgNqIiz^cV(|=BAB>=j(yC8@ybIk__CTF?k;DsR-47y ziiUS%iJoHba)W11PuiF{O@GKPB<=;MZpI4h?iu$skdckaN&`22TH#z7PVaMEZ3`u> z%&uR-j9S(nG#Ze^%r zyn%6AsnEAkpdtEz!?0PI=+_?P#dG30Kot)C3bK2ek7YM%uQXKi-9Z{W@|@f)bBdF2;ZofpEnh(he7ph_~;prvCwTf zR;mZP7k50fxEI4b`WYI+)4h!G1038BcI)|gUQqq@wBd)ya2=#lud`fO!ixSDkk{v# z(dqlRXZTY_~k~AOM8&7HNYrSMAcnY;X=}~5?hr}KP zLoKHqz|(UGq}e# zcd6BKQdk3LgIgIm)Ek~!P1Fb>F)>rY32fx!aAFSs`F8yf*op5^c8%rWooAoYSPS%1 zneoTjn<9DUviKVqJ-=25t2GUr)M-!hTCdPZ;sK1c5D3>{7q7uNb>Iw7c4yGchXZQL zdGnun^0+HnY%HPGKPteXnzZoQ!jsa*VZY1R?YQcRKr_o3_moPTs$6Xih7s_~C@Qm@ z%wg|&7=D7_B%km;#N=!nqK+aea;g;`P`Usn!~f=HY*1>=FWs zt!5Q8lF}GvW0GoNzxB57gDzEpr89OWi@WF1URPl=z@>7n4HWThN^#`vM$p6Pv-(gk z1zH#b+aC!s`(7_*brttrfR$>@g^LgpvCo8#}>aJUE!M zvJu?60bhNVt^qBF2iEfn*e1iDaOf}&E9WWkW!!rfeYL7ZQ~LT&-nS;^<+4t!QLw(< z^ZcY05u<>~2*r4K6b&7WEv$~L6@iBOFs(qT&Flit%tCUKot5!z<{8`ty^MMHbag>m zN-ZNr`(QEmu_Mse;hMl9Fe<48ae=4=Zz-N367)O`vHErwdL{HK)7WTY9a5Iup~ zJb0&}RVLaH4NdFw1Yy;KaOA5HJZ}n&JHg6oK~`)ih^4ClP2CGcvjg!g(igy9jJx~L zCav=%cxNQ~Wnj_!c7_$!bG^@i^J%WRzJ$X8qu1OW?VH5d+#0K630{7P5NFzMr-n)Z+euAoy;5`P+K>o$-C86~)Ms36laY%?Ui>DbW8VlACS`~#q?$@gU(>ZKVdFX03NB0)JfGH0% zbU`kRBPvXb70{~~hWbRaco* ztX?>})P|9>z|FTvIq0POYa_CZ>FWag-?PiFfwCf3tqAoSBiQV9B3 z`BcARLFTaXpwXWvv3Q;U-VX#nVXL3VmiR7X74YaWN(Nse^Yxf7;Mys;eT<*2=#zO1 zBAMA0^%zq34OjKF7;T@M`5!@-x`J~AEse#f%IIbb97wBD{20GzUR5J>&oXN`E20%o zen3AqB7erDW@DqDrDau~6gI%-CDQ>_|}#7 zb)m>KFzN(7HZoRT^mr-v;=!*R5*WcqVfZ_~1%{c7rN3 zQ=}xFdp&5Sr%X>{Ix<~>z1}Y7uFik@?7G53_jIm+R&nnMZz9p% zL4HGf;GwkWz#37QS;f$um6-{BTeTYeeLnN7@M@&midPF5)k@7Lz$y~_tc~P3vMl)R z8S!ngJ80U zKKc!}1Jf&dVkiExWbK4oU zbqwC#gJd>g4`w}0<8xnyQgJ}#JG_#twTqz5kI-`@(9WaXcreWkAIuC_9Zu%Ps`R0a zrxN}I&c_(@Ztmw}o)`IE9iBf#tCiUFA;9@Icb(lTq)|X{8=8B<^dhZ3g`U?LQ_of* zR*-iXoF`~#MbMW62Bi?rboiM(NFW- z>B)OfTztWG-}(DSz-YV}#AIjxcacd{E96dOv^Mz zU5@S1e|4Sv)~s3%Jyvr)FV?~sOZRe#z}5*`Z37mc)IWgwj_F&M3BdC#GnNRTU4|~q zhq8JY&0VsL9ZV;1-3EQ)n6p0L<|ENF`6&-on()`SbMvE{J5E20CsKz)r=RJ2h&kd} z(QiRbPXix;`gNG!r|%_Tb{hKXv-33RDXhf%jPD7nM&Q{6+WFpgIxug-{@tKwb#T^W zXr}wWS*bR1tq(ZNqg@jFJNJOTPd7vA>(Fy1I(3V6_b3pZ2mAHV-TgxeV2Fe+nP{$Y zH^$T#0_R7WYbG+7&MZdc2!qc$C0H9g>v5DheYToP@2x?f>cE)zdGE}n|F~JeXneM`*Nhi)wU7RaxZJ7ac@=wzr_`OHPLW5 zopH?B@fozVB8AZ(?rc3>?#Zr+@FN2VLotCvki#3uSQ}>R1COk|HWOVmk~9fjUj=95 zk?lU%*JK4b0D%cA)2A4C~xM<{UI#>(?-pTY(A3arM3{@R$Zzdc&2?WNoor1bY z_(_4Uv*>3n)D~!F1`wp6ql=Iovn6H1zer|2fzDeiE`$F&L9?c`+70E_u_l?xBa;6f zgGQN9)r!T9nbAygExH0V`{5%)94L#&|cVh7r! zb6@|W=eyLsw&1XZJ0CEkmEC^jT5ez)z*=cuD^KLMLmG^!sR{4LfZ1vw_dLCR32od$ ze$Rkq`f7)=E3H?TGO0WN@FG*`_LbeU>Q()E|TDTkp@6ih8e%-zOiXLf%GsRhr2pJ$Jf z0{Op+wpsnvC;M)EAH(=@@P0SDFyFBoL-`hS=xwuRg*h6Y;HvL4=3 zqr0i-N_lL6Sw`BTQx|xO)eDpX-G;E*tpuAR;HB?&1rT3kOi%H+qxJNnC$*miwhQQ0 z5)>T*1*DR>aMb02K;4Yys?f6=SWZG-^jNurH(p4sSc-f`LkDX< zjzK2Ya=ih(^c+`Lo`|g6W@V6FK>Hy$_%^|6%WaWBD_Cgdtj$(}nK#h71zcYpte}a= zk*9d~uw$D9j}nlrOr+2#gY(F90Vs4oRNoJm;=s8oGnqa7bO3{XooPse(q(*n7j}+) zvH6L?2(GQ>6DWv84P(vHCvSA@u0TugL9UDy%E6<07@7JL4%G)wvzRr3PU_x4Xf=+p zjY{%lVQH|xzn}R;B!mi9fK8-TD z%uak@AS;8BXd|J%2D($hb9F=dHK3&tQ5V6t3>;j?m0CbE4Z4_1Q0Z;Mn(!9X7|(iJ zgi$>0t<`Rd6g1`u@*+~|X^!dOaUY|X=Bk-#&BWf2UamRc@z;}xRcK|Eke9JoW=+ec zt(iuxJ@Emv7DA$m0m}mRrRHaz$;f(Ej9~KJUNzcS!_jwmZIGSIV5wipvjvxdWG!-c z5MHlm%yrE91G?3grzq=i&gJ(2^fVrxSSzz27#rcz8CZ>(*0{ z^|2q{p!ut5UkaWXH{BE7S3{n{8Rsm23j&2P`NOb|(}BGxlDd#}xID9UVg^^m<&1n8 z`lVxm&O!+@IqKV;ha}d8-)5-M>PfXQ;P?{Q-h`LWv&MAciqZDwPV5CnX2YxxL{Z3w z5&r9;R5-K!4K;mN@C-U>zLqXf^(?4`EkJr&oG+jG(CZ^1ljjow5Q08G0%b` z3COWhVMdAL^zRJzcLjJ^+u#G_&nmG`aLw!%7nt3A2jhUXTCjo?XUulM>$w}>CFo-w zgOsiTq87+vb9%=FmOLK1TSH+GTup)-z7v~{gjo-=7vuJZF0L!Jkhlks$uh{lb!F>d zXP!e=8pG!)tc3;X@e$T_3X)P5sT@noozNi(Y(h`%5j$>B z=mX4PM&xQh*dJK8!B@{p7+-yy>((Uef!^zPvbKTwM#eIuyK#3hKR^#DRsgu#0bN&U z>t4AwGP4kR8tJ=+J9`5vgrSrB;jsINmcTTKu|DFOQR_LtU~Z9%e9z>*&jEgn^7;{2 zJv00Q^IWCH3M^tIZP(&gY0VgB`|vdMdU#q6F8RK?2Xp3vBXL-!y^Q4bH_%nHYpz6J ztXMV;DRYgAN7g#hY7TN8kGy|~Um_hZSr?vzrU0dH7q^045)xPrDojW3ttGUCZ_|*H zabTGZcQ+z6ecmi&w~4r3=% z0)FLX7Hh6dfd*B9aWg$VZI%Vx-#|n4&&&?)K4P(VnP9jcT+_hbtQvdJ@3Y)97E*cm znjJv~xS31ccf13^xf<}K^SOo;SbOgzy8!Vq3x67#>$~x0NW(H#>+fmjJ5e*{uYikY z9sC&T`VPkTmP?`D$3U=zS<67F93WT>Z*~IvDyXpqN_yfVcOVy50qcRh>3eR4J+W@n zb5LMA5HCjxeuPUofp2X;5IlpPdHzvvPXbb7bh4EFEhuR&&2OOfQ0UT( zk)zP5Yrr>^b@wpwXlB@J%{W%)Y~X}lLo-h+IY zv9LLiJjMuSqPv^5@EEg>`p5e^k~R4r^y4vPG&c~vg8b#dLTQsuarHT*JQOW?3~C#9 zIUGI;?Q*QfFs!r@m3lIt1JaGm7YnV9q2rzr-p~Iv(1mO4eDVQlS*&zEY}HSUT@z?t z1Vbex7cy-%q5`Y|qv2o*(r?_N`55yuc4V+K$c=1T`}YCpF_PX#_^&^3kK)Q&#?+Jg zDmXulY}5qjYxI5q{5+em5ZJH%i_?5y}y;Q2u2tAo_`^D z@4-4u#Zvf`R0hiZhh4AF#^!2DMY>kf_7oaEo-4-mdA8sdHrE($-_LHq$|l20HLoG` zZ;4eg?xQIdWGJIrOCofRY8X5=2S_9`;EA`jz;GQt>N#{Jei$BFsr(mavWja3B(e#u z+`q=MTPRLDpSlJx=5=gi8akT_9ou}n#;X3ob1793y}H-z@lHnNT*OftfgRR2Jmx0 z(T5S!8M_jF-KDyx(C=D@mBKTq9axD|*)c3+Z(+=q@8o^+?0R5is8JACSrMbKIF+#5 zM#&gC;pq_b0Ec1S^dW`;si&b9L$%`UOS1X7#fmtbF?|0T+WFmwb(ss?Q{mCa(8(x^ zzo4YgX`L6|-7(1u`%3b+yw7QK3X7T?9sA1N8-;(Op4ChIIACTxJxdFyy zpvO$;k-})6>+T1{N1?5&$`W>lf59muJ+qMv^Jz?mqf@|tGb4=vx_Zzo1^h0R?Ud;X|G``3zJK*utU~!(lrP$xOmyKrD zYG`;Gvw2SP1~za7)L07rzoTtc=rD}A>-|5By$5tw)z+vD2!bF8C!iDyg3_cILT|#M zdJN4FniM7U5;`afL_H!UARa^s5a56&B!PGUDFH!5nh-#YNEeVAsvuZE825ShtN;1# zf5*7vj*KMlyZ72_&A!&2YtFf7VVv3%P~A2Et4Ko@cP_#WBUGC~KdVTtW%baPF}tFV zH?drYp}+Az(Ok(ybr|;CfOFw!& zUg!(7UUD7Y7~H9z#MnmROvUr?1Jtd@&j5I!hAEluK3h@Dypwdk8D-|_JTGDALwmyC zunyXb6;cn20F`gHN9o}^MtAk54vhuxgN$!}*eN&>ie)lCQ+rXKXTOJbAMnI1u<8#q zpYiL-tCQ(*9Cp(ak&WQ3f&4!V{N}-T4!lmYfMp5P)RKzPPk7251z?A_p-47AoQaLsJp zJZm8gL3%x-rVEli8eYUe&qeUYx?IK35Pha*Bbyz092kwVd7M%7bb6lBRC;hu(BJ8K zo#nvo1+XzL|2!D~6KZ*Cba`f}7obo!pNwZK22@6B87EZ?d?qt%6hrFte|vsIG<{zQ zq)^;Fsl|-y%iy$`Rh@pC4|pnqR#80hBX49W(9DwoW(PVrkDfg3X&5Vl{aB+g;590> z6D`b#T8zZKLyv2)m(8J;9!GPhFF}K9v^&bx2%ho84XeUMqZN84%R+xW^PUB^7_9Z^ zdd`-y;|t+)Kfbwh@Hy|I&CyC@I->Yg54kf-c|G!PCcByygG8-oCcO-ejr_=fSN9k- ziN1b@3WebQN?ct-%xGSuJpWw`O{rQ;|3!G%`1fEHACPiBOCvx$Dk(n_VC>kP(58V z2cJB*MvwhY)*$ET`+J~qA1e$SHwinimtM<3y^-LT1!p`hxGb|mejqo0WFGcvJI{t{ zC0f(Ws-*{+sh*|p6s~VV2P^VDqziYD$_2onXD}KnMDwFY=$|kzp;$mAV*)*Kqb9w1 z2J8+rxiG!9hThpoof%pegPG?$?2r5s{_gx7VWehQ!g_F`Ke)M@+7;ef;cg&u?Wr`Q zcw71rxQw*yhr~s~i%c%N~w&u2g-Ze!47CQmES~0GS2@U=v9xW zUDfGV3Ww%;Z71?nBD&WMj%F~9Il~76-;q4>tc6mvzlc2CxHZSDcmUQo0Z5}(AUXBo@bu@2DC%;x_vud21h zfTka7{Ca4!*2C-yBgUIBW;ix$2qRX6n(iTb!jCJ4qTsAIP+z7teGO1g$C9L?d3sY9 zqBHK&{ed;Shb6Xd+Z*(L5Zme=lUXO`x|9Z5*F|pxy?l*MoZ#Lh##qB_u^8%lqQQKg z@dVt1jGllD>K*zSD71yH<@H`!^}7X}9M7EKS;^&q%zDMU89OKZQV;4vi!|U{fyDfX z&)_KI?_x$D2qcH$urXV|!DCOYXp4sZ6Nta!z0c_D55d-4%t2slrhF0xGb`q^t&^C?LOgwfX1`2rIuC)oR-Dt#BSMKZi>5F!Z0zRYcPh%B~ zc~|G%-_skbRjmUZ@-SjJ(r7M?@s5MBF>mwtIQ&tYr1(PUd>l@01_r%&39MiHam^DZ zv>5B)(v!$U9gi9ffY*M1MV(tt>Fqg2%~V^ab)$9}8x7Kmn|e`tN-A3bdNVwHDCv zIpn2Mu>MJ5OtaX^aMv8`OQ_SA(TAf0TalS8pt0uMVP=vs{Pcle89?wF)P99gI{};g zJc8EF21~8igZzCJJk3|}i6Y45awH&(k-mfn)9Eb}JMc8Ph0%X*q@oQy6o+2#K+jCD z?nB=L(8O@8nX82Ad^e`K9$c!37O4Biee0{M$y51&);H26P^2%?{|=vx7JU||%tSc_ z6y?yvEj;l8bE&5$S+ipbR%ixX^dvQRoj+ssV%6eTz$=mQ@*p)rXH}0_@Y+ESM`*3q z8&~dr+F1HF8_0Nf<8A7rv%P3xeI={c%GHj@vh_{$NsoXp5m>==EPpoIIv@F~NZ--G z(hn%EVMm>zr$U8yfYP0t@1d{tLC!$&wp_Prxbyc{*j8!r2+uUf2DL^SJ^;Hpz%hUo z>l;whoTu`@5W%%e%mv#RyzN8(?*m73DE?10{VhB`u6jZ- zmL+&=*u`jOB)kZ&#!7lt_b#AueH8<>Pk`~mv}+9)a?rCChs-<8#fY1sr&;?2kb@79 zJ>Qtjn{E<#AB}L!4XyRcc%p=UP&2w6yEL5gihj==XmfjTHDB1=Q_r0(0MD#~(VHu0 z=y?TKJ^$k*|AlTR64DYavI6(Nxx1M@w$tZ&U>Jd4G90Kp{Wb;7zlesvf*g95a1)-d ziniCTyIR@);u!Dw*a4q(dz}IVkteYMcZrfy%}`(yr$pKnwfCl zil4_AZ!;_DeDv;5$lDdiCw+@yj5iEf?ZLBFV$|nlZg6?V)eC9%Z-^4@p+{q$_AI0} zz?ui1h5TJ3@Jm(*+8ISzl66NMJT@~rKk&x^p;4~|=r=FltV5Xx>5W25o6=hy=EB;* zy_bI7`N+X2-C5O^p{*yU9tsuU+rN>4IQXhpuMYAi_MX%dhh&NKcZ~UEAls+8(wuuIf$b3EyXH>`q~}Ta zl7rT{c*@vFPZ@b5=(RuBsLRS-;~?|##8RMN#QjOZNQ)TRs7(Frx%i|f+4_k&1DO;O zrECj#twM?jKt?HlA?cqS;Ze>GedSkwl_`8xm z-beF%Gq-ow(Br`WEOX2bMlt&<47t}Xh0~)Ik&eKN`*?aBD-q*Q?$Bd1Ebwt=gvChG zlSs}&p0;XrO~y7KtrGq8WR({>^}&oLb6eb7b6xivQ0htWP5&}aSywNa&z`JdmFU4} zi1WJMz3#~Mx5#QV&{%P6GC%Kv!%f;(Vy>x;jP?yI{UvyO87Rs@-BH*~Po6!_Q`X(+ z1?~^AYVoX)mRxI2f9@6fe$)oLU@o^2SW#H6t*kbpph6uit(LkyRIG#ry$QVfRgL(1 zjVC%`m80m*6CYyHeWMbLUC08@P^8TS>~|QW3}CfsBu)g{{|3-iUC6&31JLbq&7@y*2)2FWPnxSnC4k zA>cVj>vQyPh2zJt^PXdufu2Tj{TMtnHn2SXR>KOr^V*54$63XjyJn_WKcG%!o(#SH z7Kcl|KOA7BT2Ox%Gx>1FKMKzlvR;0d^}%CM=N5CDHAIfH4!Vw9ScTA&MZl&bR?3WqjREGK z(lwe-%b4X?(VJ&>c4fXWEjM!;x3HYR!OPhpU5;9sPmVSeFbzUV4CiBdFeo+3kC%XLZOS0Y}EdAG7P{WAh53 zn~TxL8T7mxJvoTv8ke*eNIznAGZBg#jhzkE^}K|9R%5v0X+>ihPrqj$;P?UDmod6g z+M$b)z z^BsX{5SI8bTJaU5=n1Syn`CCv-SnCbRYrr88T%`u{-0c=t_Dd0>O#2P&9KPMkM7%MK_NXH5zIHyP(UBl zyp~Y1!`7HEk3O)Cvo##jBtDlS2 z^PG~YXpZ^8J$VNjio`F4KC7^g`r*`Hb?gSX+@`OIv=v$-EaSoEA~s@YFp3`i4!|D| zWye98RGz&FJZ3+Xp{E+igERX@DQtt#w&~!n#TgE5 z+$GSvnGsNAJ6cvbu#)a!d=7SIpts=pRIt^O8L>JTt?)f^IsI)z2aRjBN_-upWHG&^ z!KJf2yByy5wsso|SSzDHGWQztq*Q&tlh&Cv#wjllS*OI>Yp>Bh8hPCfO}oJNfl%rQ zI2=P~uG7Mk?)zbf%^!G}|L)d22kf5QGz!RnfD=U+-E)CcfY}P5u3+}UvqErY4R+Ql zsCSUHfx#NVh|tk!nzOi>=hpG<#uZO5=?DG=87msiux@o9aEzx-4f>4WuNlF<1sT`Y z7k<=$2aSPfB&)u$U{IUYaX;+nc2>v3X?+!ndFF~423D~t1qN0vS-=XiN7dAbZlo_E%Y)<%i$fGWLd=gwJewB1U-*6DFyMV!874R7r}^XJ2PI~tF6>ccc+ z*61iRcsmAqG8@jhOY$3>k3)8?9$Fu2)MuQNj8_$EcL8U!ek*g|YG7NT{ZyW*g4AsZ za61Z&ad61E&B65K`50#dAE^Fr>nS}1wZpI|8)(x8*vg_ILy*zuv0w9> z`(8lw`4!yt?J$v^+wp7s z?mq6s@mzC!X$g$-3v0PoXUT+PeMWSv+4p&HXmW$DT@pkB^OR#R^RmetNVfCGMe}jw%D2HNwmv|?;qiaf6IW&{pk8Sp79M( z?{YdkUVs;%0FYXHr75zO&C~iSzef(Nck>_?w>S7{sdn&8Wu(RRopnX~)0aN$K2Rr_ z9#i4J@2lZx#un(%8Z9mjE}opz3`}mo%Oq^f9dx`8((ng1)N}O8vIa6YUq7Yu#VF?6 z!&r-=LEAC(9f4&yz>IDzi&-lx(A&%ClRGgB;a^WESprUEAg8W@tPNu>%CktQ5w{zF zcq=w&2i)JubMAYYTOCVZeUTGAsNb;OxJfH3Pm4oczK!RJ-&h?yPwUq7cx{s z9zdR~iIwN<=&%#ZVbdSPQzxl zWoFCG%us`sf_-*Czurc6Vwq=*K^z0jddPMGkLMwO%&!$AUI#NhczR#rklSl~bCs8e znY}QO=`o+lXjaC{NuM+6zYspDMvPD_xb7(&ivsLI5pnw5^aSOm@3(?GNytiBuxQE% zXOOWtEZ0b$O+lB;1+EIjabWrl@S0H&I&Wk&5Z=U|m8O*$S9&+?AqTX$9|_+A4u3=Q zlAwxl$xV=(*1StpXEf)anY1^J&v z0+V~3uF)St9?j-98`=1&vGCb7wC4sJH!zPCh?Tpaqc>yqe2+|LOn2I}&nx(`Hd!in z)&gr^u!#rP(SNQg-9?!U_A_baStv)bL1&o(tV!GkNix>#aUeD~(Y@7G%wm^-vl39R zxWp?nJzdIl>nGp~)nT#tNA zfgZ)NZ;AAhM&HKZSi9Y`$!?-IKLW>kp!c1q7W}V@hAyLBIQ@>|Dc`7Fi6uZEqiW|O zndX`KwqzdPaA@;0vhV4n(!Lm&T3Omye=9dPW6kLsoDo1~q-24G5r<~=y%#`w0qbd0 z#ZstYhEo=EzPsta({exXnu%W@tgH=F9)5W`O*S^71@fi;IF;v(6&nuKZ*VP>tM%Yf zCjW z>aGl0z8uL|ON-^$kC=cr!rqpV{zZ?k15*a0%|&(}N5b?qSu52%ao;(O3|kA$tS94p znpIa5(Qj+#j(~>$;L0tY(H|8LEZeXd-2)E(2G^bj8*7dBMAAIRB0Z3fwa~8toV*Hm zJX^r^th*2!!6XR|7=1T@Z{47n`8x|3;cM>b*SAWd(RY!o`+I`%OJH>yI=aFdiwrys zq`s+F0BSP~w=zOk{#FLNYT%uRKJ*n@?Xf=B`>>*GLi>4K>kE&JdTEOem!S6xjHwU5 zHoRdLJf_midbBVdxIK^6(+e)axwny$3?PW2m$vZGlc8#WQBiPw4QuRv z?ro@Els4Vy-JHy$*s^O}{R9d45O|hALF4~Y1G!1%yFU5d$ds}@m9}bAC*ZO&eOajH z{y`?z()ZK-tnbVQRi?)9zF|c!tCFq){?Axxe2S+10sih_Gy$u}f_EU->(OAWH{~LH zPKOeukk*yZb^~py(c1{*?HY7`9|`%A{>{AVgba*CD#Ms9>I2;Z`Zqg8Y1j_tzID_G z6W45Z^A6pL+pMC-@$! z>OTpKbOJ4}&-FLp9aDm`p2K8C$y-xl2bG3w0J8yWh zdMdOx2UtJ(7C2CgD^}4o5BxfEVO7FB@Tvp2TgS_)AXX393-!-p=@Pi!hIa+)Ba}po zDgxIi;Ht}LchDR?)UA;QqoGFgy9tdmyK^nLx#~GfZ^p}=zz!!c?rKIHi>_Wls;)q{ zvW%Y>sCPpFcS94fa@&Des_9?Nj|^_Wf-1R2byP%;ItMtV(^E9MQyiV_#+pAJ%6$SX zLS_A~DagLoLyk7bO8Qp&E+gOK{bLgn(3W=Bpn5kx4MwAuaLpX=v%&l6YM|2g{RquB zLR<4QTreeTra>rI?w22e45OL;EJcoQkaM2h!8Dw9dyd#_tLg)?2m; zU@s)L9W#0?Ek{BBdC2Q*MsP1jp19xJo-0vY*~Q;wU>C*A(~x(KHE=MB-=Xj;7N`@T zO0nn2Xfk@~l4M`J`m%RWFQ; zX2L7$yf(zL+D9(NxeXp>5Vk-cdV#;$3eO^QW6;^{eBR8n*Wq0}+OO9@pGa$APXX^}fflJfSw*gpJt9dtNl((=6o9H==cZFU7J@)cOG8DdD2&k}^_p>kH zS|pM%o3YYqYc-^@;8KRaR_ExA)wCYoWhB+=bT6V&*83>VGghHpfV9>F@&v|g&g@W} zw}_oszM0I!;aKR|@VPLYYz4KDHdul#8*>@z!}1{j&b=Q|Qzp zdfy5yji7pl9wS*_6o7}$q@MM%n|Z|=ii44xKcIwX?zMn+@4$!JP{bLq25a>@jBEXf ztx)IVfEvb`enl_Viu62Y&(F;R4lzjezko3h9JZ2xHF>N$y8}4Q8rQ37?ob=}W2M0D zJpDY7B{ zk*K%eZx6fsjE^*!H*EjSN^32^Eha3_F`SYk7t|}0?A8-6#7P>T!5r?yK8;I0dbt98e zv*>FJ5_THwKcw9(#*Bd;k3#jD^lkO3)4}@f8n&xUunSQUc#JwZ^XFTmk%JeZY6@4o zVFCI8gEs74q@p@B^=+>?lzfpf%kZffT+j#B9tkyWOh1PwR)5PZcaqPg`8~!JBigKh zSOA&Oi)5T}ek^tZbZZOc7C|w+d}eJ|0Mm9zc*w7}3mRKBtT9jdqc278n&+fwGt8a! z$GqS}@TMtaE(U`9!BxFKf5nv5W#lM4Br~CU8LnpV`v|mo08UzAa0e1(y}ZKw)k~5B z-o|m|fkPMQu>cq~Wpp!$&vU0D@^Y5v+}q8C40z^AZ6GyYWINDU?P?a|b;C-&9ayI6 zVAq9K1^J1m&w=!5{L)>hdmpl6SKuFkR?Y_wW4H8U<)KY3xRjd_DnN}l=_^Dj{Yg3Dz*%~0Osko}_1}4qEzQjO zDH@U!O^JgN=}5sWV2|fYNqA$-buOs#EOci+<2n72alrHx&q}jL`FR4HTRK?1zKKO0 z#+<$qC>w&iQI6H=VFa=lhaQ$hgF7*Poxqkhp`Ul)q1n1WK#!sPj{#5j9*qQi0DAXA zhrDZaTv7V4-sK@6KgkICzK#V_7r`6Pdwk9XMXlN>)H%Wb4gQwK3N7M!BL^R6?)E1v zpr_gGC%F@<4r#3BX5f@2#&kbYNyMl!q&nm|C{S|D<2{fe_pECGq0Vb}JjaV}R{w9>SW@A32(h5e^ zkQOmW0w#Ah&tb#NR@QEP!Ik4^yfv&YFjHo6)pJVbFb51^4L=22l*;`g^m>WAr4-{q&Wj06}@$UW7*afW|QYpQg{+%(xqY)mWRm(A*5{5jr*|bEND8|>>1uKt73Rs)%oH&?6$tFPBsn_q=%o-*4N`ql(H&yX03 zj@qB~T+SmcW^#He>>dF<)so#*GezY6ra75)qY(~y^=IkYf)b`#dz_eZnx8-mxmU|x;^k4WB# zsx!ZDfFkZDev0l`9W)GEW6dP}B1`%2X}~p+6LV~epoLo*?`g1Ui>p;(Au*OC&t!DHEKskuHEtFHe2i{rr zFcJ;Ef%Gm1{<~18BO^}4(hsMHXPK#nA|3jjcLJ$?HS<2(KvQkgEOcZ&{jP-4uFb4t z7dkQa5g@dpsqcEdupD~mUIeG6^eR7UK%HuQmxEQfW<1$iXz~kVm>c+QK*vvM@dDWC z#T`TsbAa~)W|p_$Rt@lo4rDzUS+5IctXAe*td;f6Lhgel)35mpvS7|r8~EE4ez;~b zKhX$=;$Z#(a@G(@(C;+`?2Yj9oM(3&%xVjHgsoHM2^ah6%~PU{x6Oh!M(OE|8_hR! zGoHW(f5*46-0{5L-t=b080TV7QvQ{8#y!ttHLtIy3>v0SXbL(STIbE<*)s4ujGs=- zwL^fxx}EO_w7Ubgx4`8m-jZU`@w>pd3fuKNTzd+>l|!&`=8- zNk7)E9)ldV58iGffaDph`$ZsG3VwR67cU>#8o)^VHR@z-L8p_kj|4?o-B2XO;Fn z@;#K^zCfNjL)%DZ->2x|4`7~wZ8je5IqcYeAeqLDJ_`wYAIOrKS6$6t!%}uaZiXT` zSwMIPO{)P8&5>07IYqH_-|)#(9U@blDxNbu2f=xMxVplmWLl!jbw z&ixTUmIYozpw?o3^ks~JdY&|Ejm-wMPeFRDUe^E{e-*3`U>RnB`6YO!HFdu#ls#$w zr1jeeG5$5=dMv%wjo^rU4CwC#$R#-Gkt~z*Mf@XW@#VAcrd%X`1Drb?5@GI89szeE3Z@j!&#v`~k zgsXkQbvrh}lVY>!eHovtGTJ=a^@8s$`92(}+QtY&;oHNs$>t{w8YZKy(acvn;r=G1 z=m4BOfONgam=n>ny8%@T!V$BcP@xNJT5}xE z^U3H_BNT`8HadDaL}p1Xi;nae1i?p2Yi zqZ!|KUOnB*u>zm7Ca~f~1nr6<&x?`di%_j1v%*VgUpRO5-x^nQ68f2;u8*`Z-)r%1 z>$wY$^0ZNS?SlW!fTSc^X+A>g|fz+ zH{*#caOuoSY9c-8vvnRXz>|qwjYg^-M4GIMmIbz+9rHjSueAb;XQf-c0-i={?PGmW z&!8t)kmzN!5F_U@&+gK*Yn6};IOJ)ZC4&2{;pT@>?JDp_B4^#vfJkPSeDJ#ja@C0W z#*+-51yavYPgP&)K=>>d zGJ|!a_R${o0Odr!24m^0ljf?#ink@`QOo4J(I(b*b9krD1dhRQ;eKGxi5|}9N`G)# zNMB|m<^X1Qy7UtnX{wz!!rKZa2cb+}ytUKe!Ez|-xhQ2AZymPaU-UK`7~M5Xfv5U) zjhitd=U>o7-%NRWHUq|5ac0Ts(cS?hp5*DdKe?fpzKh!Mq#~GXW{wE$tR!NM5hpkz|2OZ7Nbk^7X zXx9U}4q^;r5}Y^7!S6!MhB0upG}Lge=!ML6<5}xzna!C6T(Mj&gLSe7;TT%aht6xk z`$zhk4rh{rIiGV@cy=3*EJSzv@V^e<8ZbM%vh^KRf9gS=(aRab9Fz(7MNI#|_u74Yo}B&s~rx&z!Ru;!J)U>T!&mVI0J_6+SkA-ypWX*k7TY3atdpc`np?B7-YK^@zSL##Ne$#_B?l$y38@pg`?GtEw zS^7JSq!)wEVZh#(Rq|YVwE~hWPOaBk^z0Mn534vQV#^~#&xOF%nD9`XWMSwyo4)nx^`_4={N#d@`dbGhPZ?PIqhRnbl+MR!g}GmhS*<#7 zm!MTY!^wXH@MQEP`$Ug{ZkrRk!@^c@$R2WRpaL)|jmxKSF z>*Cw36$@4%bs_&}UMQ0SzpR112+Z^pH${)FF8Bbr?}S(OVNWw@QHq|Y!Mz9IzpL|s zVB*Ou-_Z9l#(WMKY5+kywBt?i%ZDv&K;OBsj&Jhce#HMBW*g6fdJgE~p<+(PmamT^ z&;J4cTzo6V|5D(YgBe9%s1l~9P2XxE;Pi|a;}P?tRgcnQ7}uBbYyR<*frKpzzA3YD ze6o!)F^SRIA?b0*%wL|sQhXbL_A7NK`N{FW{g?;+zh4Q(6PksTo8F8h44ql5XVNo^ zoBjE}@&E6+|MsG<{$coV3|Z21bAgqwZ(1X`UA)4tBe+ zP{tm4B8oWa4r6|r4Cb3y*^J@uWoD@j{GMhW8;eEtjba7; zJr8VVcUklNBCDfHNVu75<~B`%HeJBGB%e~TbDmcc0UTCJxdB!ifj}R1Gi-ego<7X# z$hrmQKIuy+0>vgWJEU<(pOO2>J)!mi=B_Mei7D9QSe|qL+nQ35z;0FS(opYRu2|K{ zw?#c7*J&M(l{p7If9HP(xHOxdjBx5kA69vr4SzkIH=TZuG3zXc3OCToU0CQ8pzQz* zb%3M~W37QZw`tiADC^LEFtn7y)~H_vjXg)MAu_ZIS*eeOeV%olCo$@2-39loS2&$9 z%!XZ#^{fwlGLZpSStS^KF*d(bAg$+s+{!}6sqNre7@V;(P+8VOXX#}wGT}R^5gVy5 zyu*yH-!Pl`XB+deHLs6DkCy@)=^NNxATYDkcjd*5UIGc&0~Zd$_cxFzC7~^|e0xTH z9BVKh7(MZ}E0}MB6VtH*g?K&*j@O3969ddW0U?&BF3{@*{_89Znd%$8OSPq4njhI36GDh@+(!$fPmoj<}_@!UU zm_*}6ZgMS=ajnYJh*^Ict&9$ImFWqr`Q9bwc3;H|~@L@K6 zTb+0s;~L{;&ioUM;wd!Y{JjMxW*r-=`v*F2j+Zson)9^l2IomLr*`vvHhR$vx{6aG z?Tj+CI_F(_UCk3|z^lh~BAl_>pI#BGN8b&kt|oLalIJ$?8Q(aK=gkmV3^cD`)5-(q zWvJoqc1^5}zF=cT?y&xF&0P{0ThMz$@W@~^<8bBx^C<2a>v9u5`L5?1_ZV>S^y!eV zYATk&SQ}4jZ3~t~Vm-8IRe}6> zM$pr*WxdP{mJWg;#=U?-d_&r?TPXEfYjCXIW%AtkVVsXRX9EvUVX&> z*3ikE$F}gVJ92juxOxDWIr%yGzL6F)c$Y{<7xYl3K_g=}&LL;X^l0|`XlM~l-*>oD zm)@HL%~tf_ZRX5WG;%arkP1h9_nFRL&*Khf2Hrw{O9QxDG4rpa)epc@8OY>nJp9Pu zSx=do3$A+W-QW3w7WL>aR6%hp&l#f=%E+0_^_v0RhX!=-&g|lue)qu1liZbo&!Auw z95@FZ57Ku}tfzj42|%R}Y%6fgM1owiU*$btuYD1Gh*|J{GyNgk#k+hSkF2?qTAsgcm}4{e-wzCzu{QCXB&#{) zrDyGqR?bI9qbz{0v(-O7xvk7ohYy&n!)*FxXVVWeK~$8B&j($+nOXnGAtb`sGq zPafET#a;n@tOYOwX{(D?tfXg8gf7P1uDpldIE+ao6_}6p-6ykh?>)6g^b(N&tiAxDtJAL=2^|t z6C1MN@(axNZCE?#3$&W5nGEi7SYzXP==Cct_W)r!bB9?C!x_6LwrUl2VHH-!x^C`d zb!3LT4-8sD*`tAdd;qFE$}IO4J@sKkqr46=a}EHz?^wASv;0heNhHrZ7isY(aOEO6 zyX#@?N4+yOx%&^+RSkGoG@JKl-q5xO+zf-d1HoMXlJ6~r@h2VziY&&Mj=j=?PlrEa zp~`hWndhdbZYUPvKp9}g|V`bQzL?`Ct{Rp0u(C`t@J3F!7C3d z2b$9cJv_`Px1doLw!}IrQE)yV-#VaOeXt@g^KCX7I~b^j(pNlErk}AGwCzhT-{Kpu zh)fw%+XpI?zzP(HJ6(A85N#vj)?#GYN|w8Ud^;NO3N4JgOQu)P8F>$QE?^rD1HV4T zHdum8;INj!Y5w*Ddi}=B7%?3R=3!-RbYKr;)l6Xh&;R89B)DZ|N7ocqzIlaj1CWU< zV6z&vH3n}1G@1@st^5F$n zlKU{jMIbv{krqAK2~f5oR~7^3FM*vmn?TL&kEE=H%EQs4Q&^2qHGi|W-1+eIANQ|4 zh5UXj>|P{r3P0b&A>-6b!Ive_Z#H)_f<0i*2z!B9OJBlmuE|sP94;}&ZX_(4UW{FK z<@PePwQ|8A*6Jg;n?ld-r|FIT1Pd^d@uQ%Fvyg9M-RRGiywCr{YAPkL*2RH3hWqb> zpOtTnDE58wIeOWMo!r2zuU}&zmT3)78UyopBzp?|-G(CiG1|d7eKMY~@eZ&pf_L+g zBE5v3+^rYJSh+>PUt^$iAuBmp%ji`*1 zi@`X9@jX{WADSmMw*v>Scp|1=&m7ErrRdo_XX#i9+=|okJFJnuzkzTh7qap)c>30O zUtoRoV(3wR5Gkm}|Iyg(5kQa%$MvLo{bio6L2p8%H&7ay`)uEhu@S~R=EfpF0?l*t zteM7n`Rs|*`B+1~f=p!4-y(YU`lCQvf*ud?q&`72ls$(!hAW}(R;_Hu9e*C+ehwri zHxkx_t3_EE3C~}3DfPk1L4Wil>#sfpKaFTM{_#HOtF$P|))jPosXL#m`T6}Xr9*v4 zcjG1=UN7w>_x7LZyBC>i|e7D{^#Ggpk?SvzF=H;cCyjP zDM+H{qrHuU-^QA?Wu_hnHMN%C!cXn6nZK{GhH$l0k!RdTu~wgF($-^U9L=n3WVgAv zX6~6w=gz_co^S`46IDsC*aBq;QkKlq<^3|7Sa=N52M)~0t@sj zu&e;qWJWB7J}*I6F7nOwto2~3@oazAdmk{0d0z{$X~ubVp>1pWvu2z%4_slN1&<_V zCZlP3puJZ0ih-iruvh(nD=*LL6+8+prZbxF?k_OHQt(M)&iEcEJ^RuO=yK@Wd#tVo z1C49v5y91w0euqT@)~sXH7H?D+;nCwR}zh}!+OlRvuga2?|W%If^VO4Eu58bP4s;w zeUIZgJyapDhZcT4Pv|N_an5&W-ic+*qn=ScM}w}YK~Q>J*DCgX38GS zd@a!l>j#cN?rJk{u4evlC(E<30r{0R9ndg8g@ zee?>Frhj(^6xd4-p6|LADO-%D^aTsgLcYc)BQEu($1wlQfU};;;XB=TTv^FXrCeF# zxg|InL1&dKYrB^O_mYfJ5SmWmS_wwCR#Gz5H_I<8cmuh@`td3~&PVok^Zx?#>I9%R zm$eLbCLCN`56+<9J6wx`vYw%|ojFvSejbRVzNfP+f#SyWSXso#lb@hQfBFoa)B1Zb z-pRwmPc@N^ib$5RubI$d z2cM_&L=$AI7j%0ct?!4_X2IP_T)%-dF9$yJ%9}G*5?lnG4?unM(BqjMYD435K%4*;tB@t%p3J)#1kH-j;$)frPT+H@MH?t z`+>WejnlBJ={#A5eyq~a5dAy~Ev)*p0L}EY9pigjFqi3XZHs-hB34Q8-ww`Sqg7IM zHheWI^a9UBpu6*stk%p@x4AwS+&dvlMtQnFwH>MOd}w|DgV3vm;3OZ6u`)x#Ge@L< zT{LAnvUm`w-3B!4k^NEhWbDg2V7d=Emx61#qhB(XwXd-d)oIrVob>X)z&J)VCqgs# z9>yVG&jmA2H+tyDHP1D##=7qk(%UnzO~&6F|B=MH;0Bs~7ryA7KFHNf{u(#b5;&|h zI~2Jv>R)f4?~$KD+mT@68K>uvdVLI8v@6GVPk1pWGa5+K0$FGX-$t>LNdb;ndPqlq zJo{+^5UxiattFX?v<~3O(ok?UeJFWW<~Q2E2h{7tIOg3OS5ghRn-8s`z&{OKq^fU9 z<!C7lV>gHi~{aVwcB)ai@R+&Eb~s^1f}g;)zknvZtl|63hpBq}rer4}s~g$Ut8> z*cQFg({Pag>1awNEZBO+uY|-ULj@}@9OZfI6%<9Dwd>3It-}b`R&$?U_y#flV7vpb zgQ;8CJzpPaCkNHI{Nay1pNH zjRji}ya8Kn@?)-U1zx#62AY=*{I1rcx&$pYK(9$izghc5z-T3Q#oFx&jM6&L!mYeH zdxZ4UN_1tQJPoBf1axy>s~9cZL0QJ=d$F7`j9&#<8`0-FK6_5E@$e&&z!)sj5cqbP zd+vi+hc64dj08esbRLDeW~E*Ni=}Y#8z`&oPGdD52_IK;Z4^>jo|g5HxF66T>*sC2 zF1*e&#vipuBF_bFPr+%Ul-_|y#gQbVJv%c0f56|7=&RngBGAs0lGFGxSE?qx{Z2dM zj@iMre$aPur}Wma4@b-+6_R0K7eQLrkJBEls;7h zz0%KZe&~;I#dX(lWZHT#`&r41;rUvuhw378R^P~l?9?ExLfJS29rX)T2H)Y(IwvqD z^7i8@ZaDTb4jNs+KI*mhl+8;>MQ3oS00k3yGwFa-&4Z6C)F0$JZW#Am-;9D2W}B8}l*){H0;tTqyTO&Jyluuni64RdUEnw8`YO*f3Gkl^ zw#FA(v)~=z*Yjdc!UK%rjBVZ2kMT*Q!k=TnHWyyF2W8ybA>PFMq51AUtYB_3GE{$G zEIM49eqIi2VHi3Q4*#v9H-t|KwEHLhCqda_z)}wyKLxc?pwkb~wmR@GqX)gb`YPvw zrM3CYF$`m@#kAedxPyVP4b)skPv_7vb0uD(=Vm-@9#BF0J^)uo!M(pjuWmpT4iwJM zgRx?-2CJ1a%;-ipt_p19dq|6W$w&BJ3eK31>mET8bhJ)g2PESVeY<8ai40pEK+RkZ zg}Z}eeXfl{CL2Q!-)p+icO9hMJqkU551=h!^qP$%dWy?vpl$|@-E-RlJPA;&7g!r5 z76E?CxNluOBf8^xzbp=(2cfHRYS}W zqY@JLCK_><`Pa37Z+O2KzBtcmBaAWiO>I5&Sqt5*dN3DjA5Ocb(9LQ-R@s}5B=*I+ zmw?N@bD1$>9o0s>&qQ!-4o?`T8xBsMy)qYQzURq2a4-{kKg}vDg^~O6cPkR+Y3B!k zz=(@hjM<+aj)P5K+V_F_Mkl(H>^z^&TXZ31A6}q@75nPsxipaL+4PSLB6p1Yc{f{%O9#kgNC zu-<)v>(_vORnVf(;Qafv>;z=ay|KuFJ2bVxIRWmK2bYhb$1>(FYt!#%zVv;>2;d6z zd=^PD8_m6j^Vp;P!Af8nxK5=vEBTo1@iSCu#i;HQe!!eO~as0g>p6-Db zMd`!(`S#(-C{ftAGxTnLu9?w=uog|Ardh^2k*ab?@ly6l%s2F$jnecrjK3A(O$qG$ zeb|)`k(8QT*YDH;Z1U6RX5g5BWLwv9AMHQpoH{>!CRu|vjCNI_ zp}tA8PO1UpO~$E4OTBY>;citV#C?=?^l+Zh4$-0s(qiW3o8Z+NNUMWw8oYgu>u=IV zU)@fgH-B6|*CIao^E!R#36|#N;E@%*-(tKe*tn%!vu31zGAq}ff%;Z6e2eEg1mAv# zhn|^Il+|cCXkt9!g`nU2q3klA{2N-@h~6JV8~#qKN=R4+a$|gCLp1LOpEko=vzqhL zwDF8+9E?+%Pg2*6`zUn3E-i-fg#JgpefJ^R{xxgcn9SKcyP8j-xLPTe4}2O!DL;q! zT@0I}Jf94%OhzJ#GMX)wB!3(5EpKpF8Yz>`FO{HXsFy{2b3D&z>A;U(PJN{Lu=2Lh zyPXq?H0DR|r=#lwHs4;2%FSo9POL;4>i@6Pmc8R|=(_hp?fjGj8f}JSuKc5s!&oG~ z7M9csKgNn|LV7)W&GSO6l5^24Kj1|SAdreF0JN+AQ#;W-0h2g zm1Rb6$z5a9l6ZPFHg!KeSed2}a^)L^9*vLasR$Y{ADdGW8*8R%G+Mllwb7@@$g4<( zUWCKUpIZJYSe@atdmmeT4SKns(H?t}0TkUC%~L9j`wV$J`ONnY}1ZB`)Z^x+8($Kk-Y zJhuV9#6YRzjJ*-dtf6i)By=%-4}^AW&{BPz>)@NSl<|6F>3KPr-eErT+{rUsH`6nmp3MSOUWUM%75uK@ zZ)ji83^w1Ny2Dw0E}3BaBr=c%j#GKIelXA8LAR_jRUfTh2AtivdmPFwgq{bnV;iw@ z#*6;KJ#!9gpnvPZpa}HXj^;l`3$tBXgT)h2V;MR+7&-dt&(>D?^Zov3?tjRt+nj+T zaKzQyZ;X7M`C<+5SS`!kiXyBA3i7En5Ke@z8~EQ1xhe|1Q?OOf1wC{RWL-~$8AuDk z$xNnf=;Qlcdnl{-&G)f^-1SVaLEvbOu&?=D3+zYuwZd~tq|ei=Z?Q(YjpP_<)gL}Y zAi>ob?IMs)gpU)T;Oq2iPN9)`AK)D@W7Zld#@6fyM!g^rv^kAEFh;Eq5}nK(xdKf# z*Wq=(ouI#czg!14(%rXkTKKpJ*`!>huK?y=g|Q z4B@kxv31~s8IN`7A)24PtQ9;9xE?s`AMVZw5m<&u=8NO>+=c7LMJgYeNW(*{oN8iS zrt;UA<5pPPPT*sVw_dxO*ogYGrQ{SDr$umg8k>)!<_&K9R zfrFGbEBaQTKX<@zFRQ;Oev{Cw$xvoG{e4EOQNZvlJbXB?AtSLk#w*J!NoBFZ&fBJ%&dmlnomN?w@0S%0ipG=#=s5 z{b{R(Hy$dS_C{xpM%%8#+xA@7dwUt)U19d9i*)vczvjVTLgvpvn;J-rlzRa>_6=x| zftB$z`bD&!NG}5dyJ&RCaz^d~UI&;zM?+hE8|Jd_MTd-?ss)$c4W3xQ8sY%olYwv# zcJvr)hoQ*d)5xp!ElWY`Cy-Qqd)=T?6l463JUfR=kr;691wLn?it+2dS6V@#7N7L) z8jCw3kl7TZ!}?}=w>@2LC^#86ZIlJ_PzNpuhI$MKdV;5z^8Yd3Y7>e8l}bK)4@{ z#Ph!U*7tc7LN(6B(N20%mWYHR@8^-wy2mM>x7jv`6=hI1m* z-!XzYwb4*;2@s`#)js%bbq(XxBaw@`^g4o8b&#mS!F$m(G;(I_I)KR-+V^?A?VWxtNc7+W`wo6RwD_%r#Itzc`()^ z{StJu65l~Sn~OOJeM|>>R~5s74$Q_`hN`f+Kj&IzIdZ1- z_Mp`R^m+lE`2%@VsV@Ek^=#N95AFj9=o{>xZtq ziF`f}C$0SzhwimTQV;UqxMAb)J&nm&7V8}r=ezl_YRyXfZHYFT3-! zn;t?Z{F$9J0$j`x?!%lH1NP=6BnLBrG3mK@PMcwzwN(J#qx~Q#5{FI8g2JDm!9T+z z_sCWzQ|KpD@n;vBm>-_M25sZ9v1e%aCiJUL zOEa!srsW)LMtwN_Ay5{9!=5W|Eyj3oY{z(mp-6F_^n~gufv&~y*Ewx7Ix3V|yq}bX z2I=U-bRf63+kB{&4ZM@--KZk7EE=F^^O228KwcS6#2~4sz`QXu?SY(j2AB0f)|S3= z1$;}yGNc0G75MT!qkjs&tW4>-rp}-%k+%^%;~K9Xa-t9N1{`rWy)+#13`BDP-r`fq zV6Rh;-37GdD0<>(RuYIsZW38{mju%kFz}S(XteV&sBET>I~60aBHyyUT}*!q(VC5L zt032FGFSP&kOd5%fUl>&T|%$jCm0RY+*cR_wq23sF~GeLd8>l0GUh`Mvh{qjk=dW2 zUSA~nd2FxeuOwn|p60*NHZPwz7(!0fzj;9mxLj0!N=4i&zE)3=~qLv&#}I2-A_1lc*vTGAc; z+fc7RYmQP_qh$KN1ie-u!DYE(rSV^(=Tb1O1^#A6-$GK1hcN%DGurk6GUR^RTS!S6 zIJFFkc^PT*JiPYw{~cWO9sdRr(}`!R({mO6_e4&jk(RA-(r{k3rq5tQR!Z(qm?oe6J#n-F}-^Slk5K4#Qs=kH>#$@eh zyc~>?o3?RS8RPeM^6V^hgB8S$t%7N9j;z3<- zY6p&HM73vJeSc$u@%1-Xcd!h9g?ABu^V9Y<`VCcUe8U;5AJ#0){Gl}Eix$-3ZEqI~={~v|NIqCf&Xfq7Xm?>&rctP&F z57(H_Mjd+gvLod9^KKlU9z??z29$||DrPQvH5Ah*d`eyD$E;%^jAN|OmBLUjROzq- zcf3{!y8HjHm`Ga>V+M$WhK;%Ina}Nl7Pboc$h>2=xzUOtkC@-|REGR^)}XZYe|=(p zGQzM}@Ra=(4*E4t)E?~VVJKUO73Xs7h4BnGkzH%u?SbxHk==KJsvd3Pu{?DHUGVgI z_w7nxL%JgI?#cTOV?Dt2tO}+=J@?2@VJGe~%VuNczC|XiX1|2>dnIgWBXssF#yJL` zdm{7hGi_nsorKkOHNTcOh*W09EcC=Ylu*o>Q5z@d-+jIacxk4T*%bG%A%C!P@U+5y z%rKtTaE>RK& zgxCu_&7e#cW4jOONyVu=;W_H9c>@~`ouXI^+~&u4M}0l+Q5Zx05mI-Mk!wKFj9}$w z41HTP-%JKiC-an&RG_GY%oqioPLIZztqmyYj<@e3)@97V=3WMptx(76LhG5+eA`>V z)AO+l&G5E_L2omky9a%n5tm6@tAlt(z%%q}&8W%X5ee2s_&WgD%vYJf6J|?zwqpzC z3@fRX3oKa;+L_U`2by@U@(A9^l;9|^yn)UQr00sPydwCS!-_(G-mBa*)@md&=XnAv z808K&`7tbG1J>9pfY{oIQShrfQs^q;5oU~n;396rpsvwk*Wger&y)uSJsy3K(OZnM z9@}A5P#ANu-qPx5wfnp@I4Xi7nr(Oo% z@RU2Fm40BfFM`>0JMfmovh4$g2(Y~hZWGam(MZLiU_D?~w9;#C+HG{q^H{8^Aq*xMD1%-j1QrCA2$n49*__il%7d zMplwX7_}dE*tob*MA&Yih-bV`K%NM+7l8L?M%qJLGx?2DjX+ZN0c$orN1=T;u!_~8 zu5q)A11p`v|2fcd43IrSODj-lbIfBbgAQz_2P=JaV%*{I);TEwtgLx3o+q?3W<&Od z%B_*wc%UvyZ~87L2KuBmGCD8?o*L7wFLNw7+yq{Cw6t5s40?uxC(4e7Hd+BIIjZ$# zkhsfy)|26#y5O}O3LA^Blp0TA7VvR2tR}rh(npcNBi;!43{|^3jtqs~0y2U2Fuj_C z)<2+Y8Z>t&=s3KH3Mg(|aTGH45_+YNI|(YB1M<4qkO|P~VP@@KNWyq5!)jW52n{#z z`~f)l3%~WiNQg!Sw4DkC3&EoeNP121_#J!c3A~=7)hVz|M#33^oDx{f{jB|*@w4H= zTI@+UI`|VZI*qZ~LxJCc-*dT*A9n}PimpreZ_W1hJmb6mak!(;c|R7zN`%IL9tB$~ zIp}Yz4aR1q?xpuC=`FGeZneEb*t=*}fUb$IA!%EXdcBdJ zf!PxtjN|8$jdE3jeuM8*KuzTn#vQ0i%5_dMo(z-A`4`-q;fF*}&; zyF8a`C>dZs|*#1QpGX90zPA!BqNeWM#hu+OL87%jm5$ zJ-FU!#*9!Bm=oduYxH5=t-ZitypX=hoLIem&{S=X=h+nQrLyAMhO|VW8>jI^IbVMT z2Cl8rSi4@vmX$=R;*q@hSgBRqJ5M|PBGIf+7GpD>L^d1ZXY;hjpIH67AFX#p@28qz zt;d+aZ_ysA7zN|mYU7dJ>#Ts1khP^q-ef3P9%e0yTsSnJ1D$6EcK#Up zr@!iSpwZSNcfU@5wr3J}>YO_{`bPReRWllmchR!fhU4ndd1yBe+E?V-DCk_BYo2f6 zN#En)*knfZ{2$}c&7w_b)nd)pN0{@cvwF^l{B~#7Sq46b!N!RByV#*JNTDax_C@cM zsO`|d02FNxq#uL1n!1}&%@EE7_GV_g{!0h;GVr=LqYBLnSh;J!xRUITGW4UIHefQ2D4;H2l}7@M~f&KsK^4n01D4#km5Pi8yJCr`dv zinhAf{2^l;pvT!z^$gs#W~#n%J(7t?+&9p62vAsK-IM37|I(e&ra)cK*E1@>{fCQK zhPzNMj1guqPAq-gglooM`+h5*+HlW|m~qhiM=aLkXw0X`YZ^GkBLn6tyZ~Oxx;A+| zQZ)|xn|aZJx6-w&uj(-?Yz4y7NNg2gS`Y1t@NFklTZ)!W`$Tf`(SJy>{KLvQE)d7DKo-N?}z8&{)c;33#`VkIgIA1>H~TTeDod-<5~{1 zLT{bkxi`610PX)4Op9U%-l3QIv@wg_Q`qwHvjOe1s-RIw4S3FX7|$lpg@&7_S(nx) z(af6M&x8DFH`~Fzb#ULBNsVcr1m90GcYe*0BwzNR5GR3;*u}BddkaMhbGEXETwt+}P+oe5(QEJ%A?}*zzM$(klbF^{UxNE3P(1 z7IOSaH|si?)tMLE&2R1r{B3yJI*L+Kx)p#^Z}Q|}FgZh;lR(uOJKr36d6TuJRf}wE zq~qU$XRINZ!EYEckQdB;3i?^ZJp2||=~c|Z?5B4vj$iq;6HVB{(*?i9W{xIMF-x9F6m{lAmnoUhjNZzMSwQ4GXFzGn?04auCiv3`Kp~#U4 zXbs11^Zsds)lwv61J|tQ^*V4hhv&X`j^cMN`q72a%((s$3I75*xfY3pg04A@zDoh4 z(76p~xn{oAb>8cn0;#bDpF%hNM>pxUQZN%Z+HcI3rGR)pPaXjNp0s_2{&&*;-_Um; zcF(oc3FK-VGx2RS`4la`1p_@s6`<&K;4;%=BclvqZR<);*8Hh~hPlI5lV9I)tV0n)o0pgY z>jSfSG48w_r?(6+n8IB%R;L6Kx0-f~u)Y<5XC3^h1Wx~pt1|)fv3ei=ph!_PC`%G$ zr^eVP#V@3+lYL9b*p+=tCT&b4l#IPmgRwP@l2P&tqsAZ&WgBE4YuO{N_w#wadH?_S zz2=&k@ArAmbI!e=^W5jY?_*87#c*H$&1Yax2?(w*`&#dPF>o1gmI92f1Z%)IJ+;t}H z&9FgMQf&lpP6NHs3!a}F!;{02x{XlY8k%}gCWB=??w#P%ZhAEC!)&8NP{B;^_OvtN z!485T(Lrvaq61NJBhM;rLBXOmXC-tqH9k;(kk3FP?F3jWyRv&La)62WrL&o;X3j4R~e> zHYPt3+6f$emr{qnr>Ek`*!NH=w3ojCn_LVllZGW=ca<4^BhOijd^Oy%O5Ff-{yyl@ zp6^BvsG$p>b}g`~!nNvr{tJD14LhyZ&a=LauyC))NGq#wbmF%uI=>UX?WT`)Xzenv zj-=NSz_&a+{&3l7)B2RZxnvVlhbNVof^KocvY(-h-IqRe51AqCN4$s`dXuPhG+wFkrh7OXF|*~zs+dWZ^&nN=xm+5p3IiV=*LqVt=={SJ{T`F z8CaZItp;L6W~-Sj0jgx4X~xL0(9jrqXWo81S01dlA@y~EaxC=Fv$hFnJk`Z`>TINF zJ|nh9hCQ3L3)1rg^y`ddwq_Q{V0`_L=Fuh4Nr;ieB_s~ zO1~guIfFOIQSir88;lc+L;^}c(dEFbZzdAD?EqI-lo4=u3oyG6s)dc>?i{$%oRRlo zxeEgG0Qj+#e)<5_P*#H{{eDbtWW#}gI?*5QF^xdKT{XpmG7FJ#~QqVdouFft({dL@X0gd!z*y6}h zPpnxMQg;`=Z6m5$(Dz$N+^4`f_z(2{4#^SPN8ys^EPFPm5N{7` z>%5>JYc^B_Gc!xw_q9r5CQ?=eo9M1@bDr{C8RPO^2G;&SY8`dGMFo+G5sa&cHXG`O z&TY*=Q_{h557(>%WTvDw&x!-v>)4GKp=nF#=pL5u3!WLT|LzakE{Bdraas}QD=4CU z{17=nz(XDfE1c+3*qW zj-;LW#?7%M)}U!eKRxKN9vE3!(^$dn^fMjl8xFp{1O15(SbNfVa$}~R$BKJ)VJdtb zfvzk9$9C}bEV7~xc`j5N$#}-2Zv=*EK%wt_IsKXITNWHW<1U6Cf5I9+&sDQwJ&mat z&%5i800!<|RfTfP_s@zoo=Le zYY|)j{co;JMPIUzI#+Jz0(o(jRVTo`GM3OxD`&D)ppdpj8D%1r>dA_(A5!0jKCYvg zUqF?!NLQhzmzdG5pp+l0zY=KDk;vWj=xG7x(IX=OFJm*a`CEt)jRvqz&)?|rqd@B@ zP0`lhX=g3_FKA_^?ipq|BkEmcyvk}T($jD0gvl5wW ziwwSqJ|)rfcl7cUn(6&XSiXCilPdDp7{b4h3!|6Pk-L+8)<2*>AskYK6@)LhgQOHVV$SLG}+I%M0Pd7Os>BMt6bpvHro3*(i9tDaurIxg5-OOlqX%j z4jmfsZ3CS36q{1my0_>z7n<-B8sNFg3(;gFFqJFMaWTR%@o-kdUesVMVLL_!9E#6_AFZrw=^)2pKM&j!R-cxrV^sOqP%oA#n*%O zdVCiKS8bOAz4uBf`i=%lVwfdo+ZX+k9i~TAGhlBqgf$$`v<@`yuO!%D~<|6U_L87&ESpEHI zga3~LOC4m`b#g^u`3q?KqVJdB?sw3;UtIyf;~Sa4q(@DNnfn>AOrDF#DU; ziDwyIL&wd3vc8yehp}kmxNFr3t-%E(aU3nol52s4G-WPY5%gsCMJ98Nr=qN8-L)A= z+_`c6S6WPcnyy(Z(5oCXMyeKfjb+t;d700i3wQylS|doWd@tnW zGIDU5r`FJiQ4@pVhH+o*nZHNCjh1kz9`rZ|)J8x20u z=)w(V^r1+|3C8LHFf?lq#MYFU10T&~)Q@d;z<%i5h<=R~YKN9u0jVc*o>5Beu;`~) zWmm;O=+qbeSjzS7P~Zt*t4FVD#{GOB!>Ffu(sPJfW3Qis zmbYk;%GGULSqNm8k>TdS+Mox$n2Gv6ba@U9F_O-Vwomx4_e39u8L)%EFA8qYrB@^2 zJmGF8RQ~|2HlySg(xE5+p@5QCv0%|W^$#unVYCHE>IYC*kA{2E4}k9tAn;vxJJ?lW zw8BV0GkP-uXjcH+W3-uvCK%7`xeK0=a}t@|h~%BZDt-?~4cV^nD*@IT>1kl1l>T1h-gZ<%nrESROy zQym~$i~L)6Pt9ox*R3h%+sS+=;A%fV5_AULxB*1fdCGX{_vrOWu4PdT|hm$cDO4 zG5&Hipc0g^KE>U<0aai{qo(&~JS&JzgFk1`zbGIzw!(Uk)>>T^*nx)F1y>VufX9g9 z!ARm$^e_|(-asx!G9L^>t9obARLVw|{(ugRkaR7Z`w3Y{ zfF3+g28v+B^MS2d4Rj5lvpYQN=pl{2`tCgIZ9M&)0!~jl*$6ZT7|qjXe9zfNZ&t9d zHodi!wQ$-vPXTqmV=)?948GL>b1N?Fr++>5#i62pL1QexV{O_Filw35RxA{CW5tW1ijzEsRIi z!|EPXZ+bb-o0=I9?ot~O9tS6PLhnRm`y*&#&R#THV>I(sR)}kWlBy z#?i7FvTSXOCV}77`E)Imh{3C14pSNA-MF7>P;&^j*NoKMw2-cwXpw>C_dL$0kfyij z!92`ZsO!G?Jf0p6y^_)IcvfK5pw(>fRa?zp?T^hWFtlWX9a|ytMTB$HDjqLYN%P8QI<3MPI_`*RL*E4 z?qbxnX!noEPBr*pO}VGgpVC0|CNg@C=RXYQ1oN)N{2cn;1v?N9@2jK9VwVgRq@LMk z?!r4?c`C^~EY2*r;>t=t<4|ayO9ojH~r(N|n2MfK)FQN&LaL4*@@1T9lxe^9_?s)#J-KWQ(0Y66kyvyAK zTxo}Q!8%RWpDY!4NA#dAM7wf84gF6!f+vi;dx`JvqrSt{2Y6qJLb?xg?HMp@PwNNh z*^E|wa3_C0&9P0sdwgE?1X2hKL`xg zDEu!Y=mYwhQEJiSchKa&JZJv!5$JRvfbk<>G3&P~)Skl624>sQP^boQn7eEBy!)Vw zyIk6ReKp2M>WB5TidTR(63&+b0y)^4XS%?*`aG%sX$pU>RNaWS`gZi&{uhds2g5(P z;!jTKGKVWUfG(CNLvelP}RS%VD*>_ zm`eb|b$C{rJ|1NBj_88XiJnJ&8$0A1VtptqF8Vk1c6r*c)dh_;&4cV50h%yoPoX## zv^Vc~7u3v!z0vEczsx#+_s~NBT|vHwd|o+`nOR)%uFv;tYGj;VGGjJ#Kv63d#)8Q& zaPl6;`-m$C_?-$hU*Z4%{&AF6w8)DUjO9*m{+eBVr%ev-w8D-Qg{GcKHJUqjs_X6g zx0N1Kuj-*I9Q+RpVrv&9~mGE81sxkMdj!Pa!@X%+$M~;Y8-H_u>6`?9NS~PGjE89?eX&6b)zveic~tpGVF-<0=8EE=jM6*y(eDJ&!`ePVjjSE%sx7ufp$4V9LOn zd7_gx)pI#6Hi)&bil71Nz;Dci)4yL)KxrN3tS3NniKVcRW~* z!0LM{fz{rP{mG<9BS)PXVzGr)k)97(VIIdayRWWq@ntBJ&F^|JF+0GRsME{=lY#0U z#+k_c;pwAG7-20o^INWa*4cPu?lcmk|H_!t#(b-ab+z_YJM6D(t(II#2lLWE-47Xh zp67>w&lq}H1ucJNen|!^{i>(X=rOD~Bk4&mixs-ZLTxMlbcP~&%*>V4Z*v;xtpM-^ zc6mRv+fC0OA|nZWDg;HWdsF~EpT^=R26#P#6k9KA3tDw4So_U_HZLF_Zvw6LHs%GG zSZC-L=An?sEP@f9fiKrs9drgKy#Tkt#_DtXpubg%tjBQx88Q0E%mjCBegj`KxXo`k z0F|x((-(?-3wON#H9cNH3S1qxMnbKoq2HoCko5v4PiuG-+f*8!84Gfqo_nyuc$vB6 zcWl%!Xu1SA-JQQmn|a8D-m(%vZzjiWwDl3BKyR(lIwyeF>@&|i91F!CqP;cz`!Q?p zqPI-+|92owX4EM>u?e^q!_$YLU|Xp16Juu4*I=G14!k|#{z&@lh7L4B9ttAGZQ;Y; zVBuR@E#%-NQkTGr?lk;0BiqU*2l)FuqYYr5Rd(D7bRXK7jy(MB&)Q`z5b1X|vbF`V zd0Idg5^Ch;Hu_US%@msgt(T%Z#wXQ8uFJwd*Z%F9@oq7D_o2mFdes9ph#n&Olm%8r zp-x9&T^rCb6Kq^D>Q(8EY;EJtv)pe4ZGC%bNV_qNlFS(9M}%^FH$dgejN+*`gMn}( zE22WQ?#X?lTPD$8@jy?+qB-ByETg09FiJNt9FOf@1CLS}-LpbHZ)j|Q+cJ8z&hAm> z538Q(ht@yW2%2t#6JPU`R_Z!>aRYyXQMXsH)@6ai_if*(h9OaAUYe`i4eZDA?N`>M z6QReG(5MYhxQZJIhi36q3NzA1MzFq>J~_{2j;FO5(F=fEFS)rOap;_@hZW$bZ{=ZR zts(FhU?%jmk2>hV4BnqyJzB}S3-&dE=cA!sUua;QmcDv3((k8l-{{Ifho6HnBhhB- zlbZc9AL-5v-ehNiPXl^0rpeukos2S-evQ4dBESb+QKvdWW%J|B`LVk8Fz^hWv9Snl zO`+Gh%-EiKWJQD#NZ2oY)-O1kr}PC*jkN;^D{6`T6%8o4w4{Q+zB zD41BG+uD)F3_k%)lAy)c(6l#RxyES0??^~NT4+IzF^?U92jze{8GOuWtOeh)!RJY+ zroCv!=W0A<9KC&e9;eZH))UzY#kJvYLK7=)TKOWB|EE8|$W61!jT|!H!b$?p|9Zra z0*evZjp2DeuzCfpF2cAs(Vv1?!3enR`83Aed&a6&Gkm`-23PN*J^%=xx$-lLu-_o#beXU$q3%9{TKGr&$@HItF8%6NpjpWPi*?w^nLKL*NHh52 zc`=V*fj+?so(!aNGIpW@J-*I2Yf;RDcHc8syapGp0Yw+q1DR0TI$(+PpzIpMl#Bk& zuc!oOp<3}1(XdWHvJ5QsAL#Y^iYKZwW+GavC!-NI(No?Nuswyq&lAjAgZ&t=TE)}J zf$c5GXe-bgpXp579!Ov|{nkOMjqr1(xBg5Fd_0R}HG&?VmgO42h#qUkIja`tsR-z2 z#=X*M-PQ$gt}SmVda}PouJ_Tqdu~PO%^k6ad9Eomy@3`^W zo=l+iaA@nksM(CZvDcxWFeH08blibOJH}tD%zN_IHLOcVdQ8UtjRxCDT7J*n6xN6O zEz__9deDo*jYagS_oF)Y=s1`R14`p=jbP3n%&sSCbq(!qfR)(JZ*%&{ht+-oUz__3 zljz&pX&KPtKDcd$)(PaGApA8##i|+34NU?G-HVQRo~fLPK_;cc3gCW^Pwp}oqWw=u z!Eo$`F^8*==Y~jMezeFFrp)!Mj$fi7yqpSG7cp_~HS`iNKL%GOqPK&g zd@ppu6QMn?w-LI3jQ%Er^-y?~f!+<|&H$(pk0sPPn_-?4nuf#0Ho)ZgiNHD=ZWxg| zpEYu4a7%=@Wx(PUs1U>bjjTuiMpmudX2rw^FpHp7SuobqC|=fctODJ8qnFma(A#cB zN2~L>yYM$MrYm8 zFB&roZ$wMYmh26ejYV9KRv6`33z^i)JkW(2R!jeD^G*sX*g7oK}1qLI2%(vK-Q4WlFO?YC?&GV8zoLjU5EHlHfyc_+ib2 z;c(sxEcsY}SUDvHF4P1scj>1vqmQD0qjl9+>wapfK7k5xP;&^HZEYXl?Ts2W1Nt1g zxDuPsu|7ouraUd(1bJd9m(wf7?Mlw=*`oYb7&)`Fus zGe+zt(#vpUw@AP-F;<`59Xx;@)jcoR^7jvTn;U$#@M$JeVFft9hfew!La%LT{{*zxtJMNZ6$0`P;M+oe za-x0F_&vHHTW0$Ih_uyX%u=+{8(AMr^qk~}PkL*txL=<;dZ0Xo&sJg3su1`$LV}wk z14;1qF#Ok(V^(e~Hli4mwTg!Sk76ktU&81AKU|2lj$l59MXIJXr=;%;M|ukf^|o4iU3Bl!O2$%fF~ zdZ#mZ@&p#z=)QNMiQ^n&&dLppR=+I3D*7_^qdEM`2|OQw{R7}NiLu6DH#~c!58AvE zO!cW5H~$8-3P*}>gr{T<;uH2WgQZ0vw9lK+dP+tZzUKtH#n4|H{%9h2Nri8jqz-N z{hf;?st6?A0=`*iT~DOh-}$)m4YV_^Tu-E)aI=z?LC-Y|(L?{a6%h|})fS-~_nOE= z3tD}|J!@cEpU{XxsZbx9?LvNb@ofXYdNA|+d)Gev@oX5|-uZqfKm5_)iV`dS#d*@U z@BjP#7*smOs8fP5jnsWOxTBXp)YG1S|A(G8VlWhO*@o+PgINnWVFcm-o2MM|Y=*As z^}OS=)U#>!vYE^JG9Bj*Rjl4wfVT&424|TX*Y2S{h~VTs{e%{us_w#QvMPY1HI8wA45BvA{DqcyBq4&P1ZQ z2N-=MvT4mj&+0Ie?as9M|xgZ9bRRP+ijP@5e zb)r9GQhm1?7+AnYjIs%g%z^8OJPzRhC7@_dTfMfSD)F-dJ2D;Uogv*#?u8Asig$a) z*Xw?gKFwbAypeXm<@(P&A#3SWgvMq^8DZv{)I5zF*qpV@P({Jq9Ma*?x)G3?x9Gd@ zNhoK9M&H*XSOd71SC)DF5Rl!#I*kERV?_r8M|0+XtJawBIR?CjVMkq+L zE&+|J@`TYLDa;LJ=tHlO)tkof9&MD1=UiBU_$u_$lP53EGhS`_)VtXmZOjJpM$pYw z=REK<6T1_i&8%Aq4XxDs6uldHwHr8$HQJ0Fw))i+a5u-m_xM!CG%McSf$fYCTDjB% zwlPScZ_HK`oeOWmp`PCNX<*?=m`BjU3H0$ZG@Zw zv_uy5C-i}aS?FID@Hl2NlIRJSg~9h4--jcm=37pK+5_O9Z{_Au>)AHOZK}AnkMBjAQP&83ky->&DOX;+|u=Xz$L^KZO-zIK5| zGzJb&H@pbF&2je(4$m&grvFvY${OhA+m%Ir_5$@R>fT=IfuVY418o$TtG)F=@)Av5?kE=AL?Hc$DLsz5m zHO}Ih72xe`?05$m!=Rh<8hDWeiQS@e2J}W5} zMjoF*)9VDie1o+)0)LD!GUli`HrLfe3-HLKA)#$>b zSmEE14gLA|0izlGg`jMAbmkEE3!!VSx6jkch=&TuiSu50XweHDu7uR0mrCDgfNOx{RK`{rN@F$ z)q02S!(RgO=UD@q(_a^OZo>!bdsvy#h%2*5JJMd?$ZWKy4Bv%i1MfMVwtECPKgQ-2I#}tq|3c@jRis1M^-e=J59f4IpMFU3EYgdD(GnqAw46KTr_%0ecNx}R!Pa2qllt)9kT);3Jsk7Q%8R*;# zi(*8F)>My<=P1pgzs*6P?p1_C!Q?=4)czsI4x zwL;Dzx9$jJ0J(8dRtRp#d*ub5)e~X7Y87NmySoi~$MadeZ;PaPI_G(KKZSO)uv`a$ zs5kTYW*}P1Shax7xczMUTY+up1`frbR}rwcQdmu}p2C~YMCj8R+4U60%FK|S391!} zfw%9Y&wA)n&}8%MD*;0zu$J0cSf*Gg8xg#t8v(fwDK-Bh6FyqW-#soP|1P7W=g=)P zh_m6uAGElN#<{L>&vq`Op5|R5k>1<&=@IYJ))ufe;EFl4ZRwEZ9}$=hBnj3yn~s*d$ZCme&n5@nx)l(C&x- zNm#ht$mYbU=ONbn$@+zWjoxjr9mREH~<0*B3mbO^HwOL4OKiVcUo*A5*fO<08 zrN?^_El1JvX)v=MVFjq@>b5rHmVt|U9m@pX1@{ztadm5;6UK_WYyJaVI)~oXL1tUQ z3A1(PV#Cb3n@yjdO!fq0crMsD=A5!TW$nT{eyJL8Yc1HN(a(EGlmWZhWiuo=u4Mq`$yZ{Nf|LVp&sw$j^{!H@BX@wEAg@%8ZQjd%T%%(~)xAh&wp zO(^O5vL*6anD3K$x-c?Og=a27k@BqQY9loxkplNZPa-GQ8P#j&sNGm=zX(N**M688 zX+N;K7e0zB6VbPmSO|T3FTt~&NMwCvC5`qOU}QDE&G0xKs+L46tc*~LHBbuQ(iua~ zxa-oHVBl#^;ZVFNIGB^*esDp4mUG7)p6m2(t-%i98fp!JYdKU5p#{Pk)vzaG` zaorjlv(Q}6tiQ@N=izVY$K9omkY}syCIF*ZB>T~ymwEmi66@K>QP9Ry0S80(UC4+z z()zviQ2huetc}|R?A~UKCBeKl7ku1@`47@`9qdw}aUw7cL?e=+`_J%q3-kW>K=>>= zydCO=GiEJh;0FDz;eIDZ{2I+^k5)#Z3z=-LTSnpGdIT<9B@>zZ{~N$XIb zp@*{E8^=#4XlK!O+?Wv$2KKyP*RRH$D8JKCG;m4T}*Jy~_yh@W^?5$+P{2b#te z*JMgu8T9ctxLylM`4G6jK(kyezrm=r;o#@UNpEbRwc3-R;d|&t2PAAKGvioBi9nWK zK_Xw|*<#?ep6Bkt)_jGQO~7mLCGe%=hp=kdOK3_Fq%{<4RT14U4aC>62g%s?!$9;A zJ>{US=h}V(l)qz7at6;dVor2*tAC{;GHpEGM0i}Dca6K5=Re_ZeV#M!awM~$u_}jY zrHANlVAMmYzs|FD$HK7140_K2#q`Fx(`e1YpXf0^w0r^He#8?-<>hBx z-kyI4FD=-dzOB_wj@jN0Z(M-qNgmOF_{kTnWQBBdkR0F~Wb~`B~oJ zzGM}lUrC?qJoKzNJ+?t3%R&F{Tzi}uO#123isipv!0Yh;b)Fr@h{L)56yH1>CK(J$ zV0k_1lfl)uQ@_ym?G5lf+!^L^aPJ)Rz)#GK{(>AGMftNU5yuK?>D zA=hJdFTZo?t2C6b2B7#DM?aWPt@w=&sHvCi7{7YJY9ej=iT!jpZS;`tg~IOuW$1*G zJkU87$i^d0OPF`YAQz89w_8YMN%UHO+#v4uhW|pKCrX^mM>e0?$jMspyBK}E1%6@l z_X@l>*3k&plfj&4FF9EYn#U1~CU_>W*Zj#v3*}@z9DI+vuVOpzcoiSy{;NSNrSvdd zQoi28#)+@BWVdkbTRxdH?G>@{zciDp+u`RBM)uqP|06w11y}tviV=*Clzw_rLqGNZ z{r@g^ydqT_{a;U{@D!%C+52}ve|Zp!T@F31_tGExv-bv2`Sn1*yP?&mu(oI5PDd~Vl(mnK|a&479FvoHIUaU^kdvfL+qk)TE^_}XY77h;Y!G*(Jr2@>)t|AFn4>N zw;8(Sfo=g3I}h#fTo!jYoCPLhM|aYzJ}v8w=`EYhtkRF(Q=#hDj9Cn+Hqs{pS+Xin z*8q~%d^<)zdU&>g-P??~nO?f`Tn{Af1-NI9WeigECeT}WOPpeMjR zajP|&g%}UsN5NqVV`uUvu%9`_2%U++Y@s#$md^)iX@;uNH)i|JVDAP&Bm;Pq_or9$nU}&tv^oxub2Q30ORMjEV;X;t_6;JJ>`ZQl_(4uckRfz=o&>j{3s zy_b>7;*91xKhoJ*kjIi8#a4{ySAJ!+>I`7iLe$T*7`RW82?U9+HPb1=QkD4ut7 zp0Qifk0;2C1c3WdW>ckw#wj9Fb$pj zS|QMkNGxPmMtg+S$4z>+c7L%zW_|B9{zOJ`xnf<)b0^@T6}$Adn6qrgo#*CM1}@*{jI#5DRQF1ZE3^JpHe(w{ zwh5lS&9&Re`IE?%GgV(?Y$rObXY4rdll_sLF4!RJAnB<(iEbL3GKR4{VK)W|@_nHV zTyD=?dLQtOhuej?K8(9_u(^xi?>X#&mE|fTf8~Lw8}CND7+VYJDqnAvdipb5UIQg+ zLMtPk)vAL)BV6LCew+^MrruKXx~=`4gtUwXvwlFhj5UPOTfKR986$3I#j_ZSMquId zLI05V#nZNrBKga}?hABnI-fq_iGKVDi`K3)l7A46dTz0HO`Fgco_exkS*TMO4lQDx zc?5}zW&DxAIT7fjuQB1i8CkWn29SHo^g%Gw-aieF$ABY>88{r;hw4BWtL>-dJo6NE zF%n@X@2}DD%BnQZA@VzdpU0qQphRzZRr?2gdV%C!tsrLr9Ee<&_zHB$}>;zZO zwz>svFQ9Km-}vU527Qk)aujyee1~6Y-wvJ7tJsG=ha;bdz~lf=bmg}^cVcMs8*Syw z5umRGSFH!O1B&;DGrkEwhdg-3_GiFd6$s4d{TS@pF#1BS)Ic7K0c~FH41ilj&5v;q9fG2$|}TD6ms*F`#kQzEyJQ-0~>un zdTY01v(35Io4AdB8Z&R7VODDgK2~tC3hWM6RVR=F=X?EEGXk65jC<|SvnV9PXztoz zw;ilELiv4x?V82&Mq(L{-=981^d5+$mEp-qey!hjl-|vY?L%uLea%MS4;1x)qb(9) z%{KiUW1;B|EJir^CD88)Xkr%mR_>VB`6=`G6=;8!+1E31*D?2%=Lyfr+5*>S1EptE zoDTS}H~BB6-G-wD{$m}NJiiB|Ei1|O2SACJD}K-R49VWqeo+%w0w9nxo3 z{6hQ()&;P>wK=DrzhVW&WF(;sQr!x>=xSsSBO8aL_0peg#l)HDbpv253*=0|_w?`U)-q_;9K{ffm|1IGrT-Gjl$jGO|@4^~~@4zBFt*{V0~ms`rgp(|P8}k64*h zdYFZrJs8ZfWw~CPo{D0JT(A6%tkmUgc0UlUhx(5(QyC>(5PR>gVmYAm43lTDl--c{ zEsRnPY8WN^EReNfCVHP4{RytPp0ACDB=E^uFC03U=WnJ>d*r}%qB|q?z~&w9Y+}5k zNT=3y8Lbz>;qU1G7!*xGW*p!7>?!n3i8jhK4GyQkk8#Mb-a)IRhGN3rLNcn;DjWy~ zf%XS2EINYZN9m*`e*d+PJv$2X?+_^zT=*?6^f%7X^hy6 zUh@HMe>gP`xKGgD_-18nE^<+j_giBD!Wnx2Jr_rg(!fxT4raAd3T+#VT@~}kfqDy9 zcEFu7aQ9~*$U^@n()&T)?bL#~T&oOJ*O9uDeDkE@bf|I$ysZUw7@1ab{zR`kK~??3 zZMl*L-mVdjv#MB$giJ(Zr^351o=&2*QOwS5F<5h>TAxGSj4!axsnL92fn`OcJ__1( zM8^8TefOaf=|7U635~Vl#qiypjc_);*XW1k=-NV7X4<*d@GlaW zXCPtLMbPePdrR_ZIIH|gjG!laE>8;cTwu)yb<@z&uNm9(#rGpW*Wkrgw8~jq`{XV_ zKW4?#`%hziz6dlV8m{Q|If#7CM$Y%K&hU&Gt2p4Md8$Qb(Gr=gX@gL%6H z9*C{jhe}wc$%ZenuqtmubkM>IY`O`xU~%FY8=dz`j7N)=}XijCpD z;D>ektP%Yd-@XB=chH)_3!0Ib~&&A{4E0g{T?!e@ZzEWPGHTb`y(48IS8 zvmPL$+q!e7IXw9UYUQI%4ZfKt9ooA)#JpFBtKotF&`L|C7&D%0dOOV!zJk4TKfsE6 zk0K={S!L-rI)|=WP10QM^V~5j*S%i%uydfzLxJ%p`n^dn#t@q2o0Fb5AOThoOu@Q8 z27d}cfnmT_2Y5ZT?M?o-1bQnso@Ol1&xr+leQ*ouMNeNp^#2vGYl~zI1cGKz-kI|X zZS|u~gi^M=7ivC+o_x!-KWSM3I4d!4hBi9=1vt?kiCD&1Y#3F|!f5c2}P+2CduH-Z0VOckQN zk%x8obc_*0XE=wTXu_JXK3D$acPzj1r97jDJ}ISP=&Ka-nh~FQ=qKbc&&U4~fp@ks zkhK8c97v$27+DeWbnw(qTr*RBA>aPz$7||QsPdz|m*U&Kv`gS`Y|ye|@OL5Cyrb{( zb#%l&?(|mi-%m@L=43`Ut~rh$+lMmPL;V<08+u-yeg_F%0Utc~#LU9Z$X#((w?>lO zgZ@2<-AREnPhxAVE4Lc`Ov4^-fvT0zUvqRqzK}Rq`i6%Kd7z70TPXCm>QEPR{4 z#aP|!M(S2`*Ihx+yDG+CvtwLe_GSDeq{W(2FJJ-YV9AX#^jrc@6)DTN%SdkuG`0%9 zr_J|9dgmh-t_!UPm%)1Z8G5(2iM9KZfNd6*-7F_F5xXMaaage|U~0-tRGxl}_;M$w zFLOXOFzbpW9tF;c%vQ}<7ddCP0A4+k+vt4*`1jyRbFSPi9f7rf0{*QMXb|V5OhH+>yeQk8r;Pcw3YC7T<0$ z2hCuGV7`oJ(wzo6x#f=I$NWD8gzJIq5>RD>!CGiol6BX9cv^Bjvv zpqjv(>l^7!Fu4sSwQYVs7p#fDgO29O`L1m}u)fSAhv?l}k4O0Id+-e2ita~Zc5Uekb^BMJ@NZn;~b)ML{SL ziB(C*Qh&+{ClfeM(!L-v@;(yz9GJU<+n(7j0XrMd47ZaR(i0fW?=b2$izlwYlgGLL zI~aTq{sVz$GSInF_7t18jM5KIc~12iu(+RR^^)jgHcHgEKC8N$9bmPUp}^3H)sOvu z$~r~49!5VOv&PdudKlX04_b9W? z)??sa=xBPWhPJPO>ZhQdCvoUIFvd|2^f#Z)1k%_d%1h zzf|jCn2Y8f$ zTkk&P=QY!LBWu45q{1B$t4~Q?V>^e_S1shd5x5)mItfkJ%kdg?OQfAUACcJA7C@s_ ztO@-+>Dconw*s$q7mcnpKI8}zwGn%G916FCV~Jqud68S_Aq?#q0dAh55Qok9hI@yQ zsuBSn+KbgtK0kDG-%}kJ1I?w`B;G`xg#%-u{1iCffYo<6P%UOY$OMWcELb{l7;RdY z5ng8H;7NwP7;gpiF)N`L^06ndMC$Vq`ce*?!Fk^hx&?av0WqRTZ5~6Qc>``C=?%tUJYf;{e13@ z{`*$75V)@fKkoh|VBNd&n~t3NzOo#uMnmzYV0aF`SLb^qlJ+w*x;ebt0~>jOCoaLw z_FS=!^mba_kJa&Q+8FEN$V+SJZ%ruUP4{9iqroMLpCj~OHmCK~%zU$cTwSPQTw71p zW%`yzarG_m&B>}Q9NwOSCK3E!4v(vWvmUjR{I>?Umi-&%?X!Fu4y|qj+f3-)2Yjq- z`ycphw9RV98cVOMz%rXwzTeJ7;!@$4R_#%!dmA0pgPj^s;s}z|9vSp)(w(llP-P}| z!T1cl-}e+~zd zz1Tp#=f5FqMUmKFfyXt)Vkne=%t`r7=zSYXNAam3^7seRr6qY7tZyN?YoN|naJ&+% z1@v{gJFp5%(3ak#q2K4U*#f)`;iolTJX>@ta36;wWf@@ueJ`a=CbNO|(>1Md54`Be@^Bk-xLy=`2Pwy3O40#!z7q_@~3+ir_GXb=FPz70&8EFI?7# zGN0AVZ_w~vW~=;+`3{^O2lfZhy_(Q41~|XQPPkV#5u84P%5g|X67q2fT=eR7LN}HL zE2X>WCzG|b@2%N9l@lxa6}a7l%v)8`?7d}tJH+VGXh3D8R4X?LUK*J-K3LhAGt(1T zzXtvjSnf7xNmt;T0zad%atG1dCD>3aHdg@pdPu|O0G4`a(kW=9->VMr>LJJjEj-iK zOxIrE`&huoy6{p-ei*9egz~<>>uGL>#5SZwZt&ZVeotd{(GFR+vfKb9`z#QDLErk{ zoiU3rmV3QbfN~4Z#RRnRq$;z#%t=gyo}LBL97?oXh_*dh0g z8Y4kxxpFIzJ$Iyin{fwC%c=(?9cbMfGWW;vOF*}s^x)dtQ&Q5gVd+>Xqs8xMga!09 zHn2@?f#n1|@Pv|1NTqc^o@JIa6S^k!SQ6-}v6Ux~iripkwm}q67X|+{NSfT)f^ASL zXMxcXsFRnm%?pj>`g)!$2W{&Hu>6P&Z9(?m1|Q=i7|;c832K;c#izctvREi6ZVC zE)V!>^mP{2*DR_~{lHZ~;%OxfSj%SwZ}}4gt+J}06|K~6Gc$UD?<%zK3|7OPR9Cj1 zCG`|?I2a4NhO6#{O#-iYTI<8gL^j+B`T`pJUZm$S3-~f=v6rV>AOUGm)VGGx*y?C( zb{BAOMOz`v#`5X&3J5T8iBp&##njbm^J02(Ot8tty0hiT>C=(F|0{{ z0Fqtw-VaQxATe#B$XD3&1SDI%zZ}f{+0Y_8kcw%HkO36S8TB%-PGFqpv2O$5OB58h zGHW~@f^LXrcfg8v;9F1NoPf6fht`in z<0V+3Ff4L;xThEFG2Z>!G;PI<{upymSTH~A;(sZ|*nlqI2Uq6PrWZEsXZjh$72n-G zO`##b=5mJeUOhGBH||)?G>P6rZ}=6l^Lp8i0Kr?xg7x$!U^%*@r4z7|Lx9shOJV17 zB2CR0C7sb?u&5hoJ)IWj(3V4@js@483pb!Ei}|03#d!yb+XY>6F+Vi`hn;9+t)Pvb zkK91#na8nc*mKY>KTvMxN;%#^asi7zptAfu05os#za{;>1l4k)RexcJtln4}?)}NN z8AzIbApJzO!RtqGGRiOP-=C6j-&%myWXywXJk0f{={F9k`35N0K|eh?6L=~woV|!7 zhJmv_mcy)qav?9Y&JMMwc1eC6)D_{L24pjC=_=ThaSF_)jVT>vU!`y`1`g z+&%vT{cZ&h--ZUpqSc2+Ij~99ncJV?PFpBa4JaRDonfWTqq|BB0IT(>|J>JC$eGW;kWdx zA8I-rj71CbBlDri!`o=R`2Ww39y=ot{1&CRorV!PTe$J2CMD;oAJq|L>Uf_05^AB`Xis;VPnf-+5W&LUd8(K=iY6spw-D- zIXA>=-sYYju5fIzaUE;u+jzmMSY6*jGH6|wexhmbuDbEu%jmQ=#tvW|!tAky z)z};;T^cy7fq0aWrZf8vWIobgQ4zU#oe_s3i&5YWJM1!f%v_j~1g^3XhM`stbI2 z3YuE+uLAgAhsQI3VSK=kMm*=6iuq6*k=$+EosA5f=Dr!v$ygPu|GUmG0&qT7`w>QH z&I~Xd_{}4GA4nd;TCc_OO#WuA~ky&72HJrNeegP8D0_jTtp9kr)6W5l}{u=Z)+Uf-$&{ts$j_+kgUq&ycj3sraf=@Vo9D%}h2mxf6XnE=K8Sn1C${FwdFb{YGt(_(%zRkiO}8-j zcrr&F@H9tNO|jPJC?u~n_gjMJ`*2NEi|-YASo?@#2DCm@UJwQ`aPJtWeX-Y>d+rD7RPJ=*j_1Lq!2MBB!4G*T+3Lh&Nli!7jUB^~lLj6o;A;G4C`&B87O z&pkhTGFWW|&O>0L<*EhUb_Ou(2b;^RRf)UhfvN~p9moH~;H@PRJ}+d1GwAOg{tjZj z>M6^$=rfU(z*n^H36{n!i7%g$`blZ1b$;l zMxlq{XhL@&D+OhYdRU3wF>2-n@}m#Kx66jWYrR-!A2T#P*TL#&zE5tZ_m#-MGg&?O zWPPav(A`sHJbz&$lF%N!qxtQ^$mZ-?sp=xQZ^d%FK>wG)q8D@;id^WQZ5g~PokAX3 z^0ZMxp}a43xhtbyfKso5tLut*EaVVI@UyvG*0>K@g35uW8zo#2?5DuP-HbkhA9H{& zL$My%^d3-h5tbsFemx=7tl`;cA^g|owe{hu(LMW16Z4{7|RM>L!o;?WFifk{SD8@G5798(#9bHze25kaHI}eRSO!L z5846l`i?#qzP&@gTfluF)+37^^Fuc!Wf}dH1GcO{(+@%A&h+;+ZL0ufSE%OP;=bWt zY<_iM%wV>E7)XCY?o;W-b>R#AO$*i{VZ6-o@D{_{job09r$@@M4A=qRJr zVxUbreOqO~sGoA!@B{R%B=_NW6B_y|^0EdjcEdAISWe-)yfRvIHSkS@r|$v#AfR=A zEe=#}(-59?AgAVCi-H0r|ft=_$ilmJmWTUFgB2X6Nq3m#U zFB;zJ(KGif61mCEc<%KYt8TV?2DDwoxJp+toX^9j%C$!v!1SaaYo*t$kGZmKK2)&Q%Ifn^0PcOu#1VtvTcfh{l+%TtDW zA<3>@h5}Ds?v4b)ujnCy-na0nA8o1wv-|V$*q>tHWfjN0(8)Z{fk@0?u3kj0mH?|O zV)Hb2Ln%FX)_^oRE(tq4i*>^uaFl+{z+7Ga1ZWyEnrqPZP)OZu4zwSk8J~cU_4%Y< z3^bjJB=^M5L3Y)hEi8zcU8Va7ynzQL46X|XrtYeJQ zs0l2dn6{GtR)U%d6&KLf8EYtdbdj06F0=AU4-AzhoHxjhgy}u1;^kAMn)^O@2ZyrKqPO-v#}Q;|fC~%W>6x z>q5{e4jBL8N*-k06K=w>C^h+{hK2#R5ytv~e&N|SxRwu%y$c!@Lo;?U>ICeFo~WZ&*1z+NNF*+@Fdt(0FE!Y`yZ(I26`+7q@O45 z-o(lpJvfV=^a|a9rs1?aeb8j}5nbaKK!~Cs|m3#}_>hn1VE%Z?7 z&npFQ8eo6w^Lz$(7IHld%FAzm99d6i=$jPww|LNx*W__YR_U>Xt- zua;>9SIh`Kg&lm6`;OC`K5C#%=a7b_!JC15h}Qb*jD9p`q!0P@COue-t1UBjC3<)g zNje9<*O__l4c=y|)3cR%t*cXp6-Zj(^U)V#UXXLkr_6S@pvfs<`-@eZdz#@$-F9s0 zw`g4!_S5QYN$6X9dN%9Ay|u|uG#Tsd3VID%y8=(nExgu7wV0df`;}gmeq1qgD;xT{ zn$R=oTeyCgbx{8XmdfnO80I5)jy~h<<4zP|DmLCKTxPGO@^bNkw7zb+oALUBIJfTR&LIZaEmh>D`KW`33i3gZ_4 z;)-vM!|314(sPXK8NHqkV4OI`$?#9)O1K(fQ6(2RO}RxeSD! zPZNp7H`<^p9P%wR9FCrYUZd&#ztCJC&8NtP)d8(8HJhUaGUoN*CWF?*VEt^Z~Q zTRldY0+l`m!a-2@2=soHr|)O(SqkOj;Di+rD=;g2j==-Wp4Mh_cg~6s$#BlIxz&b! zv@MDBT;}dDID8sfEMeSH%m>A3XTPj6z>TyQm{3Z0v?9^GiIj5^Roa5cQSTiWM?f}yqVEk^Hc;Qeac#* zA!CN3`d#&Qq;)c_4gtMBr7Os*Q4iC=qBqYO59+&a64#RWRujwj8~jv%zd#n&!#C@X z8AG!JXw2(6Lm$_GcO^Ys#sbwt!>r*FkFJ@4xRf5sAt}#tKZCy4!iOzbB^I##e&S!zSO>aEVaPB;O zJrC!rAyuAL-42OZfLwS+(NZ{mn$_B`P@y+#f&ujGDl~y{ckuHDvK7tKouPkC^uqV? zztFI8{FXxp>#{0w#b#|SqZHI|?deQrtp zP+DI|7Ei?htEW#C<>~#{$R=Pskh@<1mA-*9{59IKGBW2IvtC3aKFk7ZgS_eUT^FpQ zjY>8ur8P2gmOiw@?!P2LsbR=l8!VFb8LPmHs*JUYXNxk-u}D^OSL7##aU!y-e!Nct5c&Hu@ zedCaQPk%OR$YV#KxPbLL%>O>%P>}KF@tiR*Ef^&qdacE`3VsItTJO+#!YX+BuOeujgO!Al z%##BCYlTDa$Ay8~)rIkq!;q%DK6l1l6o)5yN-SfxU_j zHA4Dc%Sb%b>*%XRDFe2K`~aZrx?{Z{-N}k z%+pp2%K;td0bS()663#|FFjjx7yOhe{b*Yq{VIXY=nWT}L2p-I=F9DbdfCi5-LP5f zz`;DZ;=o}v?p)@+NPc|ZOk|uLz<&mfh~c{@$XM%cDScW;!8lec5p04+?(9TD^*YeU z3<2ZV=D_odaI6zOTj|vuGvi!5r=m4a8BenlOK2pNC$VV-jZ!bpooj&?s1jV+3LS=l zb4BP{gHfFMtXy&eh|ON`T$Ik>QwePw#JX=KG&0+;4iq>9oSV_&qsY;GR?3OYNaohN zALtx058h`1jr}RRp5tNN58svlLg(h8BUYRE3n@-VD+>UHr)u4X&L;v+%w<%)I_6rW zKxcDubQQ3eHE9ejj4!h`iKl=qqQCRdxjB?ths?Vgtp=5Ef~Pg}YSBjpI4}k6 zEWx#%z`7carh~0}l7BOLI(WM4{0~yqnzmP>v9P*+q ztAj>GBM-%}3a-A)WbTAcP69$t!7>+B|HKLS{}tEusJPc z6FiqmuUD`=lYyWH)T_kZNGS3KJsZ=#n_i~CxeI968s3bmfvvu=E#POqo3(C_1M5(r zaz3gAzdTFV6Jb}tg#z5q2Sjt3)m>ZGZ!m<$*k&QH=Wg017!Pj%D-`NSpF> zi_i9{59lj!GFRBodFlIgV9S7_t|ZMOZwg&p3#?$g7TD&O!NXIQ-3h8pf4z~U!TjF= z1|xvONREcUbXUNo|6&8yp|e(!nZ&#LVt(A=`-ItRG5vRBO>N~frP*3@ZGkis7_S5U zF=WJ@?ut;qApAXmmCa%_YX|>HD^JEV#`HnB(4YSLAy;n#btT}pnxxspz39RDJ02d@ zg?b6#bsUXX22N zU0lxen{wxjut;3;* z@eM})1sfc}b90fjDuLzQjQ^uGoV*0@e4pqC=26Voy?{!L%-(1LzrF`+_fM?rG>mI{ z`M=?l=TsVD@7scTTv5#2--ChGCz6A9zFSVB-=F9$8xG_Jd*dRqps%N+^~LX6g5E9w zy{n$;aN}qApme&^YSeB~=)4u^i-1cJ_}iUTe-nD}9F8L3*aUd>$C+2?2@YZW|Bmsd z;=_u?f22p_9%OF{ZC|Fp20&{Bj-Ii0aG^e05{leu$c%0r)sJY`eOxn|cp0Pq1PnEp zwT+h2@1SqZsGc_5Gb-nf7tSa3Im80ZS7^roEc9F`X;#*&^b+zU=!vq%=RzQH&oUM+ zKFIwrtpC$|v-;TUK#&s+TLo6l;O-K>4@4T{nK#x1{gj3F=8q2K4fizEybFldBGHcn zp*gSh`TxSd-VEO&%kb0?X6XjNTN@4(fC2}h#;dG|>wwo!j5`EgOr*tG-dHL^%Zl*p zzvx9Ol9LF`o*z{U`iuv@n&|C0?1rZ}=(%bE#m9iVr+AK{f6wyOM{*m8F4IeKcu*CX z-r)Wc;IMYQ@l&>GgVfeXQ_NfZ2b_#glvYcTi4A4^ZXqfP$mcC<^`^Yz_C7UYH;Tm+O?4X#;w`e<34fkdBQE+oM7#_J(*(x@CgJiz}qxTcTs(V)G4T46HQ?HfLYP7@g&aOZGv z-Cq4M6T-+iPeh&@$Z3;*?|64%@VV6gx*}ZO`QJZENcsQgN^babr^UT|dL76@-}Th_ zNe|vTa9oZwLt;H4+PsCmQ0{Hud=6^n24CNT-S;|*9oddexer)9VboZUwpiE2SQ<~^ zv{rmSs8IqtYBa8v>209!*+Eds3`c9DYG+&0)^l$o;r9nfjB&z7%PhxsJ%zn{4Gnz& zt7BwbZ=TE!Rx^{3Cc2t792N>P=E3*+U zv$nDBQ5F<&ce4ii5YK&g_4V|G^Qka%VG{H(cO@m5gRBl#7>FWhS&?UaE8K~_usVow zB-W?viv2$wz|)g{Q^24#?d#I-ea!Ez;l1a>d5Yxh&Hfgf@f)V&v|z6SdgMjr;>Mlh~#cjjTv$CjEES_#@Z$N4T( z5B>j!_MT}Nj>S4cABo^}n!iRrnIn0bHQb}{q8s$e!UB5wqZw_HP<0X1HkQQ`IjnGE zM3(0yE`)3Iv1Oh+YFwZDU)JTEMCD|n^8YkX55;d#3%G~Cg=pwg1ODl$G*-oGM$gmIGe=^;c`#CNk^6^& zS=jf?7FfwN^e=_J`q5(o(D42n+#k!mX2{=JFdK)Yr7~MZvNr0%%4jU(PDNim`PFym z|3lW9fcadtjeqQf5XzP`ijpwK*q7-E>B-s{DmyXu-Pnprk)4u42H~*`ktTbxWP2DS zCd&{))}dsJxc;Bd_gC-xUf2JcYi54G@AoYCcJ6b|eeQGL&r?qi{lZZE7#gn?GIfwX zjlQ90#!9667j&Oz8z=cZi23~?bUg|eVrVsAK(8ojOhEr?Sv6{$5tp>B>MhoslFN{Z zaC(>o4#k<#KVvmH7)=-l40vwjsrR<+cE)BDqu-1X1K?LFw9GzMC&y@C8{`dE{GL`fMt8V2TLn$$ zKvg|U;=$_(ee?wX0%!oeSrYiR8aldGmWy)&YJP^4n++@0d1J1t&o!p0+ zVa9X#5x`TB{=IYBkLP-1wxpMyK=de5pbhmH{U*_h*;Cfjmr0e;Cy@m1@=xHxp z_cmV|W5wG{>Q3d;eXX`My>i+@)%wsXg>`ib*q)+2N2ge8v31wkAIt{wTMeCa9?V{# zZFkn@8R25Mq9+Ml)8(I6l83CK^;Buj0X1>>&$zUGiRrM z2^9j`c)PSXyfs!~jeyT_$c4A+2lGZ+E&vB}V|S#@$>7ich+hCRTh7S$>g;yRw=o$W zdILC_aUBOu(pdX+r@wT@(RApw8Yw-rjD^Iz2P*Ibz61l;~X^+%v=vnW?Yp5x@vGL75QF-)M?SsYvM0B zm>KD5L0$8U%|}v>BaPl))2fihPb?hMyF8RrR=bp$D{9fNk%)4Dlb&*4GBR87X((@d z(6(0k{H$R&A+?vF$P6TKGNX0`bk^_9DCT;t)Py2!_;iVS+Peq9^>}EQgjV#9<$mg$ zKTDs{(~Kb_PF@0=uYvVv)fUSC18xP+w7hzwL45TNiJ<0f#@JeD)dNbZ zwY1i60Y0PGx~}qWSU3>n2mVN&nSH4z@5>@(X5UadUuKMs<(r3FoQcfQJ)whl_fqMv2mJXHO{@RCXFTy}w*x@vS(g0OyKFQ6jX2?L`8T1S z_L#ij8QKRL9nYMb-je?gl853V-xS)gCqi z>HCnLjjCm?`n~j}g~uC{4cRmNgv@w)v6dOVF+D7&gr3;rXtO#H82Ml@H44%CEaX7n zZto-hNelW-4G6I52tCYQ&=|?R$d8uq4fL#rd6G57RYu8b+E9NQljIlZtxa@4t?7qo zHnt_uJTAJy0i!@_O7}LdvuV-a(%70k12$YEYyNoKZqtzjvdiH&eau*qV~9 zfn87a|IkB8@ZZkfZzmM;wpdP5^;%N(Drv=bo|t31qe4`*7OPdL9oJZ?YCPhuC;@k6H5cqw{|0 zH~eo-?ZQaYHO65_M!GBL&WzE#Xv7W3-vd{ve zt(&LQJEW&sxn1R(7ARXavf6H-c4u}KtoML%0{7_f;;kL z8s)V%g*dkroQDN4L^CGB7$JIad2{y*DB1~5oI@hd@kU=p?=Ea%yk%mIrvG9|pwqs2 z8ko&StS!x-yv!nlk>O_a=iT0#{H@PO*-A@!;F;9XR;E-Hr>1#irGs=5r{B=vitS% z?f10nTgJzJIBy&&Zy20F)0bgf7-vlznRzx{;VfcR(hKeLCEEQyC5-c-EnzWpODgMz z`)F!!A}yuW)9Awt$jI}-{H?dqLB@u$1==vSKI8jZKAnNH52$sK-wnw2W!m13mh6M{ zOs4J3uN`1UPov*%!08RKP5d?T^k2|^OfZ@jp?_i+o2kfQ6u<5?yd_W+jaClcI~QE1 ztT_NZFPMaV&`gulptJWaa-&&2&F;j`)7?}zpy&lud(f>V;l3G&h9O;g9C@;`6MbPs zlUB6t%37`Ky1E5D_d#rM$ArWWQ^g=ho^!SR}=JdB|fbO>yx}QE5i#= zICM7T4X1Bu$$L6lUCba+ViZ&OlD?#}J;GdtruI%(LG8V*jW{+FY^8vGB z!C^eW^5k@8FtZsGZ7wo)nYyujUP0;M^m7OrdZ)Ptcpc@bzGrb@ zevsD9p1g%;o^pEA)4YRKu(}=rdQXp2Y2CBMWzh9Dt@VS--hHf0d&c;E0T|32@GCSJ zNqz5x89~UnEAGI4gToWRz+L^%tTVM@^`^Whj|C`M8>}9{i#kYAN3c+)uky*$eKQ0< zPMt@=)w{cIgNe5Vlzju7{auM{?r@jrX#XtOxucNiSr$*+6jp~%CR&{S{1A<$AQ_IltsNL${ejH0jZz}gJh zyoECoUj9K(C#aDEoQnhd)pN9ZG8jMA!M_M=jfcRRk2RZih3}xJD}rx;^dsh+LiGC- zoI8zdni)f%4D+^*qvr|8%>ndXGCEF7SAj`&eug7?#y#8!*Nl{;?Rqz>KF9KUDBTwM zJ`Y6(^VeHk`p#(2&j?l1f$R)xTm1qC)2?1Sr+C)@edx`k>VceBp_TPObrvo97F^bY z=_j}~4o-(WACl!@?XncNm~9K ztj&p{&stIHE?|Z+j>`db(kaHs16Etw1NB~-%8cKP|LH;N#xyoUOe)kiUZD0GqYFGv zZKK63Vm$2U$$8#7Z@dOi%{Vw8s(2^H5vHBUJIrcmt#2E^LjN3ZNMGWuyMQ@h<(|+8 zgtwW4HbS3$d^b~~8GzL4M!JnMrN;8)Cn+9QskOT29zh0b+% z5rZ^bg{Pjo>pNN$dAkiv;mpLRXzO$KlO+QyZb$0>La%MX>l9R}Lkq3ZY4g!BUjk(s zupWdrE$HuQC~c-rqx{SFowQ)&yf%~?#BUPwf>GI@gnLgw8{-We^e#53x6CD+!uUfgfhx{tgJv($`cVyoA)`1hack-H7Zhd9G!^mI1k8=+(vhk%K|d zP`gqr{pN>X7a8$MXcq6RnX{-ClA{&QxJ7NK5e471-z=0{qqGtdSb;wDt_WE||ASV!oL6_6 z^CX3F?MdQ9zE@zhMghZ1jNW$i@+aD_26Xxz9Bxs&HFf0za`rB>PH4QGK+bxCbu2vm9$t7~G8t~lFntRL9!WtMCQ2P^U2`^-j~ zxCi~s8n%`8x}g&bQDZKcoAs$T^{U0E6glJN+9QR14TQ z!1q0noM6;ywW;ulyu~>HP%U_RiudVCoj= zEmyh5g)uIwk&|nIb6qI$8GRS}dyPF9$?r;QwZNzbBOx)s@hEP3unPMn)TF|30iSMy~*nnGZOdERua2_V!)plPFvoYH3PD49*OmQ z*Skq8U{>b)X0(t8T5VE8glab`gyIyoz!6EA%r8g7N&_OLu^)d3k>l-Cuy(C4xHo0`&#f zrbwPXS$=Ac_R9i?OVDFNp!2-pmH{cv#`m_A%RsAnnfpp0Io^HyjlV;wmjU_KLR1g_ zA4O|@!zXP=dbypz22~XpU#5ppcFsINmH`;A)51+S`6DIMkOp%PWIzs$zh&!NUz^E{ zew3Q6;aoAe@ix1fmw?;cv31d<8R1VZ=y(VC3NTN6!H>DUs?nqVJClIZ>;{kUtpg>z zX`hL;LCBvj3fQJlt3FRp)4q}7=Alc>jhY4cM$>}#6!io1rzz5*wWSI08(%D%Ct>K< z1Jsr>*`fRvEDIkZf!bP{g1K3;GBakvsPQ(RW1!3#-sJ+L$9UQYdEG(}3&DCSl$?Zw z%b_9Axis~|ke|c+ji$bKw|QvYGe8%`=gvIw-rekg6JG#%d-}=)we$x20M3MN)RSs@9SXq-;PL+LfB7cgU*&%> zv|UZgnito2bK13p`!rN54*u58&HGF|*C#6nQ0re-oL(NK<}5zR5pR=^;+g!@=9&k1 z^mo(#HXjX@jlV{x^U0E+Ra=e@KAEAwZ~y;~T+;%p?KJfBT=3uTeoBA>A!}I)zS(a^ z=G>`ZH-vulKRScVJ%W^I1snsHYr?gwKvEm+T@e|~$Z5zSFrE#!U4=w2>LUYvpC9dW z0qwM#-(7Gj9euHqF|-Pe(}P)P6Jz2R^lmuc-w(8H1W)xiSBfstLuiHQJ_!Q(sX=Qc37L2H4!KuB47Tu_0)Htl;9U!iQkCpB0t&%shvQLIRELZsu`s zOL}8%Z6IqGz~B6pjnS;edtb(zRn+lBG8U?M;&lrQSD~$qIJASg$oM#BMbZMdnf^O7 zi}YZwDa&ujHr;z2 zp#^ONdcX8WH_bv*{tOjEIpWMu{U|V+Suu3W!3b2xpoHw4W-2m(9sxFvyd6ro%z81-_WZz zCCBr21CZZ`AJd`RA-KN)9_U?gkT!lq{wDJ2N$S+6Up>}-WWTrwdDsj+yy-Fy?9WkF z@7T-8#0c6r3{A|-H<{JmVJO>z)p0{4+Kke5C{vs@jcqK0PW@@`FJw(0$RXfv?gAqP z8UrpCsB5tL`5M}Oh#c)i(`AC2dcN;~Ql)^&OoV25@^00SNJlQD{yRpVUPorJ+yQ0u zkFSGHc@`V5cB1^$`Ve@$^{Gs2F?BDf&DPs|`mC;}myyhe|AEgtq0cV3@Nb~@o=Hh$ zq8XCg2q_o~9nD+oeldy>bBKCt(XFX`OG830Gjh_i(p`;ylt-bF@@gis0&uMWmcf^-4aOUObsbl!35$m!34`7C-_&&N`%61|ZU3IDvwR)aT@ zaK|%5@7Q~0zmxfVEifdZ`SoLYlA52wcO@zU=*~jHKPaCXSnEBLR>sVMJ(+pV$nafg z*WLeXP(oh;d6Ys4BOe)+d;!+K3$#<6k)Use_SF7Jf>b{a#-)J15`8uU!fart?0O$} zDU^t!wM6J=UWbkF+xXj;;pTlL#NCN`?A)hKqyBne?1Y>*V2;=eoP&X=GxLK!d|KPP z!=pyf*DTrY>zdG`en@(^>5noVt>S4&Cq`!%zI_SI8^P&QBv?;6@3?gW@(pOrM(7Ex zuAT-}0k5^tO&h2 zjgL44OhzMJ#+7OeFN}6_2|c%#C(ekW`1hZInNbwY-#VJle;^G9fXg$VZcxySsKRfg zEWK==rr$``blx8N6$wq|%>v+=iT?96OD_ZONcM!T)xlVMjrkNZ2O9S*deFE*`_brH zP}L8f3hrg}?uSNN{ai1^!awJ_6!b*WVs57O5x}Fz znm64ZV~3^ZXjin<>wK>Zue{;pX;dNb-$N^&&}4)ft&qWcf#&%Xo@&3*D@Wg7{lH@R zR2zz~gfCjn(i!3XfKpEwGpl%pcLZtL2wrP|r7CzC+h+lvA{cEEaAYC$k4MUqp{pAB z87MrTRj_uoEbwA4vT_d&WCaRi!Miile)Ao_T`2ACB6CaV&!dj{6Y4IY#4$>kMOkZE z8toXv%7_Nq7TW-yT0TEAZZ_a?jDW4s)VtLmK#R^mlZ?#efD1;HdKDVhg6RnSqAvNPA{34QC9Ffwqg_mU?^vQsRx?M7ZUN zMJHM-0lpb&xgk5kW9UAi^c_jj6?`Gg_FR0~t zYdBD(Gn#)z%g$nc+Q~|I7ZmiCpnh_nfc>|O;vBU7DAei?hqMZLWAb0{xCYSlVU~-9 zntF8Aq@`K3zXG0p&(qI=yanU(An)M2%@6OH^aI&C1jT>rL?$W#h=yaFBrp+{Z# zqE-xN1Z{@v+TIoc=YDvTigcO5ekZ#T{isS)CS(KjX5;{7TeC%7p||DKcO`ZoX>lwW zk!uY2_vUkNBv)^z?!Z4k(0(@V8C@%@=Z^R2sTO0a3s3Y|nS%@+M0%$&c0XXp zG#Wj92FhlE@7cB85d6&BrJc#%LG?J+#YT1@9=n7vS?Jy!V`0 zX)McUZ_era^936GIey$lwue@a@V+xze<8gVpsrC0)t5Q>$&Nf3HD0@bk-+q&eL$=F z0coYsb`i3Mybrg8#QpaR@TCOf@q5N^MI`JCW=JCuh4z18!0(RF+gJ1Wbsw7$d^VS$ z{zxI8BX1fl;ko`z-tWqY#3s?CG=g4r9Im z6fqaeZFm^X9AsVt&xHF1)HiEpO>ppz{BCNLN7}~nZ9DyDxFig zYUqTcaJm9ziU;-^X=1)ntz_Ctq>ORAybG8G+{7*uG)mxIsK0}}$pqK+dGd)-v)co$ zIBnst-f8xy|B}x#^H*B*0n(~LttwE;zI@xB`oB?Q3w`)RY0b+B_hzLYQXwqEkT-KF zgyhQ-0kn{n`R_U$@kV!M-h0==SYz(3v_$P^bT49V@KmfD z?@GZnWA%}0&EP#c+vn9F7is$ZBH-TDUv1@ftF2H18A47qx(V z2U@T!I&Xb2`t||$c4Y1%RChl38SM7JQ=`zArKI;CwgY!z=F)qNF|&be1m-6gX(>Pv z%4q%sco|*7Tuw#6wLAEEL$)(;#Y3|Z)Tqy_WyFyc%&aG%otgPs(~~nx>tL^J#KRTA zD(oP%bcVhT^!=f#yCu)(E--Q)KvN?)Ip+-HnO+e_;h%%N>YY;!`n?5Ry8z7&U>*)E z_EQR}oC+V0BF8O}-h<4bLU$Po8tLI7HRG9s3I-O0rS$8q=qu>_2T;YRtedGb2p+`) zixGCE%voe@F`V52#l1yuJ_7FWZa;!^UfLq3GpM_3XOYM=&nMRCw2`ak-H?EISUJAoW*FF(QX*v3e1!8xO zS1D(7s4t;>=p?-#&>L0C=q>vF)Q2Lj)N=&*8bLb?EqPyIJ$N4iyNA>??p7iY^rUU0 zq!?qT30&4n;p%%jE0#@QP!HI~^J~tq^~lg#sN04AHCctk^XyOPY_7K?s9=Qgy727+ z>-CGTZDL06PcQGnmlj~VpRu!vrr%>~NQ=Te+hn}uVt48YZq}ThIamCvuTaW5@?N4s~Rm72<(`YUL>bx?MGutIlNeFV%13^T3X4V&OxGNtCyzdoq(z|seqS_Mt# zBcTW3owu6xhIJRAmz4R-N}_jeLk)M0XVF}vfwU5Id4h3s28wEz^iFsqq|w)M4KMi!l;*GHx0`0cV9iKt2{3{1)ka1_^%> zi8J$6XLR|yf&E|uj&QT;rN@B z^sL)^3$MeaSbidap*}FU<2eNE-pnWfkJUIGkO?K%d=+b;-!N82<9Xf%DR~v@cA{)K zbex$>^y`@g9Ntj88o-wy49$ab5h)vl-ZS!|I{GAVy0ecZ zBWV+@8KG)26w663Q>f!YJBD(` z1uF+c-X^R8eD2YWt?V6{eDJ+Cba@wQ=`+^>iEJ5AuM0DUyDOtA=SQmab4aCrC~j#Q z{gvgrSu*ZHUHv_}AVn3Cyg2YQzFSWyWF$``MVnv!E1r7)VLx>iLY4hcen0?WUFyf7 zso$s1#z348Sd;17Inw*lsf;mCuHIlg6a>CCP$?0-7r?20P(FbYdLA1Z z`9L5`eJJl~$V|qkaaFxH7D*q*+pf;q-&sBuh)xG%@fiIafL{Zl|n?->v^#K0ASkJzXR^AVeW*J=zr?hc?$$zby+u=i1-nwzpJ`u-_1_ zr9sW-p-U0?aS-@pkU;Be>3$xVrQh$=d;<)OlH>`du@hfJ!)eP+;hlcaD*`T>x1la{ zT7mA~!^j*;{pFO=I-{-K75EA;GHa|fa>nY#SlS648wE0X3aItaJ_r}?1U#G(%+zxk z_1n=zMhN-{Xv;F%yz$hG5$5PL4)q6Uq|s=iY(VifZ^IeqS{e#c);xONgwo&S4a$uL zQ!O3l7meU~IX)SEaS43T&Y>sD4fuKu=(6$oG%aOE_dWrIn=)QXLWM1qoXVc53~MF# z=m~t^4Ar&;Z{|RO&|d5j;CU5_df%!jQZt`1o}0LI8%Rsj z#=O7lhg#7h6HYxX7h#lgU+UcyPad*Kk$g}lm93y8}fhtk7_Ako50cCSDZyB{(--~c0VG>*_L5uEuhlA5J z__~hI-kWhptXG4hBM(#=g_LVSS_g)nKY5eHD1>W(CL1u#qzCUMne{CLynH91WC;G> zXgh(o^Y}Xtt@I23eG}TtnqxR4eY6jrwT8nlQGO+5a-nOrSJY=7GVaDF(5Mj<&C4kA zR?%}nwGyab<(tuS^cX5bol(ezv5R(7TZ@LVc0xUBJ9q+E8$qjiL4RS4RkI1WV(?~_ zw{=3dnT*b!o6!}D6qOOUrRvkjQ%@k!K9CP;WMdQ=xo9dBo5qhfn@-bncm6-cfBAO< zt>}HFjOaIS5S^yJJ{nEo++a%Rd83s?T;1n~@sj}z{IBJ}`?hmwdkZ`WJu@=5ejOn{ zitON%3B!`ec-i}Ef$p2pU9jf34F`Lh?bttsrs-~32Y*H z(7uVhuLz!D;8`3i+B#~#hlUNKZZyzOp-uTv49W6S$aYbfPg-`G(qh>E`T5`PV%iif zRu#yj|4#|6KL7W7sCI!sIzsoe>QUlR=qI1Fr`+SSF%;UPT@Um9G_=f&9KMWxc^PVi zp__i8&HBhrTR6M{Ej@*iWGt6-G-_|g=t(5P+ko!`I`d6rxC-(ex^Md|`ff5hvn`rp zKXtWI7~jEFX))%6=FsYAr0x?&o%ww3p?%^R6W!@$Agzpm!voNz=|S5cGb6siY|sH& z8c#nD=*6?ak?4y4v|Ss0eiIp38kC-kMxKbitPjO&qlbGVx8I`o|IMg#zk3FJx>92R zGih6Xjd41k_qTx{mA*a0)XU`?{%?amMv*i6nfE+WSQ~kxstWU>eiu9Vw3&U>0O)Ge z#F^{`1~9WUp{;>P-$5{418kpySu*;^oo^?gFyH$d(DMmk_13&TqUFJN5EMOy<~9bw zDYzhaZU0ad=3s~^%jhmj6mH2?i>U|{VH4Ywjvs3 z6K$7yPJ>#y4-|5Tx!E=obs$ z-LHBgUkvG5&v#=pdPn^Pntn6T&jGt@;Cle6F?w~QfP-d1Ggtf)`ty#TJLyeOy)mo2 z&yeJJdij-A?LcOl&(Q-HC}B@ikuPHgH-rUDvw1jv|2WUG83U`OQ2{$Yx+Rz+kKzaU>{aYW+iL{9^t(2 z4pwhcqYZGFrz$u9W08+HDDUduNG81kSvXEDGjiW%?WHf=GN9E$d<)1YBJ0Lt-$v_K zp}RJvO2}O&+H#K&&nVgmby=g%YV3u>dW2|enh5VIAeYAef1X}u@{@@6F9PKE z=uoUEb(hu`b|YM_Ty{T(m4?1LM7LKg2Wh zBN`|DI8vp@(0wpn17E^vXDMwMY26t^n@T}s*jp%CUz3sM74+h|*ce3jkk2tZYYk*t zBL`D{1oSHi-p_%neDwxq$d@}8bC)~CLA2Kdsz$-f{Lr~QpTC2u>Od`5XBoGTp&5)B zl#|s`Y1Vw*z+opm%1l|~dz?Ubd82VYZ)SqQYUtgM5mglqo6CL?(&nA28sM>$(V@rP zE=OINF0$<)g=u%uCClmHD+!qHp4WUR#njf#KBB-fU{0|3Z4s zgJBeKPN0noQ1Jq?^$dMyhWepsH`r<^MjJsQo4HIcDVgHkcbT3aZ&8JZd~X97}c?%m1g!G>QyuOreDSsWBdWK;H7p()%{(sS?ckr}j{>9pND-fSWL;V;~x+~at!{-R& zdN?{nOW$^+sv20Oqd`hBqc|gZV>g+xqMw1cy9P0~TJc1`O0#2JLw?#Zw)J((L@RnQ zYD+QBN)K==3m#pNl{-k+Qe@&hcvPe>^U$OOR+36U@(Vo3g9IJ|dwqtUWo*P#FPt5# zdF*q-t)7gDr+7L5Eaa!BHu_$94lxCZ>j14cGdlG^`U{D1|MMi!cvo^iR6hZyhXLUj zbn8j9=pZCp{zigjRYvc5#!+c#QixI9jB;z?Xd?7A8h7C4dvuV!hV$tK_!<%5 z?M>Nl(Wz$iy@kY@*I*>O7qfHrf;(gA&p6EufJ{5j8_+iq-snf8MaVl{cX+>$ws#=C2M`%`vnX?gaSXhFRUA280iDXi&FawKIoUf-sc_>m*m}nCfD!4r#7W-e zV=U;U(T`ShFiQOV66If}1)~#pV3z-i9gsHtzKnv?V3w0w_o=@H>8Z=_X?8??8D*{c zKMF0=mz_yYpf|g~JjxfP?0fW+op=4vJMGZrdWX2r%1aNQ0vX{T_PRfbiO?R*IK;YfthrHs7u zA)|W~RP0IFskE{ST)cr}w6i6&^B(QH%R3YFV@n13u~+YUgnmMK&_cfdW>WV)M+|@6 zDMtf`ZDfX@Kd=j($=}y0vpX0)7Xl1RA?Ic2Z6=aalmFscIKU!-(Ndjn*?2!2eN+^P zZi81TY$@JmbMDIxytU9i#^}|DB?FwxiX4vt6K{9qhhI&Qit$K&0^iGmZ&zg8bN@1Y z%ZQ|CTP?@C?9jg&t$LQ9i@#bq#s=_6&G9^GkG>A$-92PWZI}ajEKf_B7}NEDNZZcK z;2he8o5d*tc)b5s03CaRl19ST7H}4P+5`P$-sl;S00y~{n6Ajib*S)EU^U1CPPyQc zafOW_P>K3l6sAIxo;>@J{(hzPQ0$zL&BfK~Zm3Zhdj7<_9IOb>F_z?m(RQ?Ze9h-D zJ`bmjj7Y<9^v+z`&kKz1?!9l+gi$dVo$zQt5q&1!N6OEGXB1d1fm&J7ACEBmX{&e{ zDenbWgf0WM8lyXm{^Bj0VvIa3EP9G)@sOh>cvFJUpYvn{lA_0qb_gT#glr$@fZSW^ z`XXhdtpsKRb6wAaHd`n&9E?s=W*Tqw>)6Vh42Zr#yZjn)rS~Io2k>9k#G^3njK{s8v03iwt;uX zY(9Y|{SvL1$nSZiZ94B$X}K42VP0f2&Uu620T?u6w2p@ZdPH<%ghjyZ8%RreW~?M; z^b|@A1J)VHRt-vovZfsdj>Giy8T>ehj=0OOzQpEwdH^mL!MPr?t{k0UZb=01-{|)g z^y)~ReXLuwOB+F2yGnKBY!Q8GsV>hbGw;L@+PlIV_80HOAJEe4;^yK$+%Wjq=0%Uc%rY0J#S z>*+(!msyP9^}z0V+kAA5Q8qmB917(NA|adMc_=Gs40a{0Shwgo8Ok`GXMp{7+HZw) z={XsN*6Tq#b?{cA0FpEF+bZec$b3ww1h6+j`mi|9;iP9DSQ#E4%4&u zkCfiY$YBkh9iZ*X)HlYuXP_~Zs7;MAa7OPMPoB*Lk_L~*0*$$Z?;*XR%n?SEb$2-h z{!FB{*@BJeeH=M#2E86<4%?2d8_HVTbMfV1I1`@gksgo!@x<{@q-+ngp8<5<$1r-! zOrReMEgGU_yooiBlKMY>3LO2wQfs8RxMI>aV}z#R$k%ANS(;IGgwcDGzP$@Kj&ao) z+B5`4y_%;mhLy}T{{Bw8o_}5n)_B9=h&K}A85QAB|3#>w7q*eru0bL1)1>e%Lw0;e4-9h#p6%V2NWm?@7=qGnW>Z!NDFdtGgoz`jtVJXUf=;24G<2m9aAkyy8ou2h|X@p$vLQmd-6SL@TJ4(st`l-dh0poAQG8epqOlhMOM!j=#AZN|sS9w-==9`IuCdQXA z55_C}9HPfK=u-x%9*R7-hNAixqyU3C<(5&}?7@ejOFS*xbTh4{LIeHX zjaAeWy?7t#aYgLee*|labMW|Uw9!mjo5I*W3jHUtYAD6pNZ)%sk@RP%0Pe|wPSbV} z$_}r#f1iFnK+lb1#8rb&Mr){z9DTwwW6~97M%MyocJZPR)-RvVV?y570~Ni z(34r(OF?5ZPTWBwE`kOn16a*gwjP+w(Wb}FU9`@A^jAN6>IjENF!Dd5b$#>}F)F$+ zdtG59h2o}1^1RC5BRq7M&6wfdnmhu9N5FaSv>EYa6|0$UJdI$bHWW$hf}TjBkFs!P z8C>WCPxM>T&uKH`{9WY8s1Q5B_rJ6g1)Pgnlj}+08PH{@rR-DGDM>jk!A2ZwPWkc3l$JF0=qy$gAM))9uo{9CIO|V?quOAaQFc9Y z)*RY@3{Uix(#PQ{@APfDiKWM!wbf`lAF{F$O{(?RH^##n57eHRX;;w$@Gvs%9Bo8D zeIUH+`WGvn&|82@<}67KoVaCE@}=%@XCHazg|oKbN{KnwjejzgO$_#MG4{w$+Y`wo!Mr&Osa=ND#xd4>mV(ak&fZO)7`6LzIP)YLb|QG0!`rjT z#n9}}zt0_yLo03jGm!8&xcUXu`+`xTCEgXzE-34n!KL8T??2FU1S9pI$R9(%r!0Jl z<>_67fGUG6wb%tt<(YHGb%Pst+ThWxC$4_Wi_FV8^i^|i`?+ZAVgMRx%)eXq` z^GIAYJ$TC?D=mlY9HE+_zgeJOT`+6prHsn>G zfzUa87BpfODAES{hN1lnP`Uu$A3`B*9RCZg_g^vu{Ps3l1?ElTNX&+V7vYN3(N^N# zT)&%-*rn=IFcnEObJBdcX}+Z_@Kg(lH)QmxTS}`N!PRIjQ~AvSHNEZq2cO46<>JtJ z0(CxuGWr&2uL)xgSi)bu5Vj(z!ZC_6;%26CEghlkRaMZh?z-Or&LwENOvr50V5h97 zMHu~iXQUAP@>WPDs60Esp(wQU_Rv+zh|xO6p;m;u;N36KO0hB9x|H zNnohV|0-axjnUSZ(%uUKUuN{0d}|-HU@MP;cVB)Jf+y9$OZqkja^LzReYJqp<*%b4 znqQwZ9{~dGJC9Mb1Z9+7f9=Z{a7)0UDtgUZFOjU)FEB@#A3c#3 zYAezGfw@OdM8-rVl7d^gut zMId^DaqB!33tU6NYb>*<=LT&k=Q^$-qj4brQ@}eXITtOe!wwQX>bcWvMho85z02q_5`a;qf8uF>zL{O#d%5Y%i{5|t9?e=fIF3^J zz`p}Elc39WR;!lRqu??`$ndxj@%__hN|+=b&i;q@=n{G9Q!9-PbZP9J@5 z;dP-8vtdS{&x-QCHxMP$#%fl9;YfW~YPDp%84bZ~$|u3|5bJ+$t{X$8O8{x9pse;( zV|2S;`4OJz`BDcyl|f>5B1ISJw-K86Cu%hXFD-e8d1f9e<9A*K%1x9Q2vxO6#L-GK z-VXxyEX3KW8Gy&YCW`BC=LqF!X*aSXb)6=iCV{WHH{B^I=0IA&1 zXf@+WC!V&0dUer}e*v|3x#OXl_boizJpn8u!EG)r_hy~GjD5p5v|vt^ve3elcVk0W zVtr8yedcNAXx`xlD);y8XB=f{9xFf&MgK3{@IT4weE zr#Ba8qO)E^BE|=rcR#J`x8r%T_YJP|$rzBH2=oK0%fR4%%#r#j-+I%tn%Ue|=29|` z#aJZZY38OS(C%AGUxMfLp`bo!L#btks7Zm0Z3lnt8%e<79Zh?7`t5zK4Kw;6Xy$#X$?&m5@J7iW1x(sI z2h!V%%r@%aX7Jl=+*Q#8#@aG&N9az3Ipo&E!>^#FzEIl#Yr^&1&@msgZZfsqFQl+8 zn~uybq?JnGV15bDs~ZE;tzce0%PDC(8qO%8r@_hjM>-usVqQdI+@0wypw03Cn7spp zPco{^tmX}`Dq!D}e!ZdTO||EMt2xh~riJd*{~U<4F~^{nPNFw|2jU8>173sHFT&;0 zK$t=+-Wodz4cwa<$uS@8zRG*!Wtl}v2+T-h_V0_VIaUDwAHeAdZ^%f~$HB8^z5Mm} z+Ka4I2Vd{0B|!nt6wWXr()oS>>6nc^tV@q(@i23dQMm^4vxYL>J1k9mxdK@_OiRXd zT?t(uMc=Fj#`&yUI?-!GXgL<%-e4SSfj3U1WAFvESqhX`#!ow_;5yx1W=B>-*XYw3 zNvLiGnsz9?drp}Kp6=KGLochKu=aJixs5h!Fs{ea^Ec486{E|up!%%9<|0>FsclS$ z>9lDy^<cBo8 zjPx-vpOuj|4lrw(k;w?Xf1)9cCuM%f0bte_EcErd3RPM_Q*F8{>F-H?Zlj$JF(MWO zJh}vZXCt@TIy}kPN!fv5RTye-gKOp#*O%%La;LsGNQYDg9e`{srv=yVS_F(f z(Ey$qkF6b)$ia%hn@!z=zQ$Ab9=Lm26FT?r9*pio%rGaZu?U#DBNKWU)P%n)f#npq z{0hbYqQ6p%l3Umz`p`-X`1cfai-gYKgWp6(XGca~UFfRyaz~&S@-YjnV;q*D&I@SZ zylDDujQ=gPrLV(Iq-`R4(z_v{r;oGV&&eBOu9u+f8EP~@9}S{C?PGdzXn%Mc*xaY) zWKUoguuf3*2_VnG4ou5LW+bsY(0JZpR-Q^w)Ht7><@aC?D}+RpqJ<%7koHJvEM>fx zPyud5@i!wJNucID#>;cOI}JUJGw>*w$Me=*jC&nV4x2F>HRt_N-g+WGI@q@u(eY2}7v&3> z?E`o_o+le<&;8XwaPUUQLSS77jmq;kJkXZY7;7Off@q|59^-C4dff;=DQK7Z(5pRt z>$AQU$q&V~FmCNGS}4khG8>Y+?)kuLq*mW~k7O&HsstQb63or>1CZYUFLg-fTsZKXMGi-gIV*Zk|B%o?wg`oyQ0o zdfAx!`!pqP@zw|=Md7u2sxVk+=kQL7whV8igxd3O~I{x_Jt0nDxr z=dwod6!;LjaS@!%i%#|wr6o_j<@y>@?Vf!&^m`_dG`*rd5q7_0#+>izXCtG>2y0QS zu=OmjkDmFG`QS-P_NQfU7ykxT?$KWpp!SYKc_eEQ<-Esf#A$Ow8y((=`(}q2#wx%J zy=DUZlQ)~ew*=7W?Pqr8RkUh!&lAioM*q0U3ZNHlTxYdU{})#0CNLCA4l;urb$+vu&;5t$r0T^wpFVfZi6ys~!$c{aGW% zvo=U)zgL0rW#+-f;Q1QbS5KUqv|+S^POSVhAWQGk+9F!$$gb=XP`i#=M2R?B@Fqwp z=9`=BaXtdFFW~nPdTj($-botBs??03BcOrtW;Y=l#yyIM`-h;D_m{n^X>6AR{I&@+ z*m!s*eY!C-c>-Mvc{SFSr`)x8Rs^2Op?cKW2K;6Q+>OqP0r!wEYAo&R+iqUaT>R=| zuCKrXxERMfJ=)LHmZwGYfzmuOu8*{=ycev_8q?c+c=t3s+sGVc)T8ehEyk^F1T6<8Xwc1dCtF`1g4^_wI30Tk#(J$Dn@zXnjFENx8Yrwg3b&w3go zUmN^gcu|0Lh*DAxD(`@zcd4HTsGJFw0!=>XR23OJ!Z`Ygr+S9!li^fd=BwWrmn+ea?jz0mY}`O^JJqDlO5krx z$urPEJCAf?+6k)eM#9{KwSq(5)gKMr&7Qgu z?3W{h+6<~OI@eOC1(b+Dhdm2b8nfq_1;(eqwmj<=W$QfcO`?_GnEN(R-xI!Uz+)WK z0zj%2ur~dvEtWxZC9M#&iKk36_+K44c5nF}(DVV%B-%WSta$gd617A232&7c^DY8f zHiU=O;MgJf9)a|0kIlzBlg$3J8QFURv&?k*sH8LYdLv!a!AP%&0Z8qA;5x;~ee&8bgBjEEo+R{D{ zONpw~`yO6D&Wvt!R_(F*pjBm_No8-v4rHb<`j$468_>sSAHUPvTS(ekAl(3cv}YGY zYI?#8ccpJK3TFiSynE0~FKPXVjL!4~80s(8Ft8WKA}hye7?)agLKmFTe#byJ|0QC8Kl1Ay;a zM%Z}DC&F!`AbW%IacV85o!QJ`#?ML$#`z?)&M~Mxp4RFk6%A;)7n1xUZ__Aktjbz^ z^H!a+esL(h2CV8ZJ|@D|B>MEef%#Ik(R>6wonO6A)BtGoqVXm`68*Ggys7nKkUt~4 z>eU&8e3)-K8EPrhzW`-AFup>c&U*!DQ%mIms9lNqA)OuXB`|iSxg5NQ&}%DtbLXa> zJ_~g&2Q;1pPmR&Bg+9IMsx;@|uU_uQ0{$BpVj(SbfHp6p+YZ3DI<$M1cCP?KK7JzU zdm{SGylP(pk9tHK)?j#l5UQ1;hX(L(1F##tGaS6i0+F61QSi?k$wp>~2TS+x=P9dQ zYa{JPqSxnuMGxMm@WkCsCB}!gTxaDGNMRfhZld-IIILFpzS;A%aF7ymCW==71e+DW z;cciLK%z9S4aRXb#^!ux{G8A+hHS zOG~p>ucEcoK(nhe+W_xjG{7SCOgOTprKT$|E8U0CqQ+Ml0Tu)4uQoUiXLi=!=a}_W zW+ydPApK@wGfr7U_~(7*32?R&>%kq2T0ODO!;!|!kJ*B?T0NxcGTKEiDkBUx4C;8( zICM**5p}hMnqysirT!WFz(AY&RHSMzl+TOQ%KtgMEd}-7L-)Ds_H;ip@XbKFJ@H?K zzVY_N7P#4${}1Uc6EmjwAUtz;oc23H`wFya4v}5dNTAFkaPbk=))m>wBp?@+z@a%D zss{`ig3)jnDrNve&)~AN1JLtf5M$gs03CU5K91}_ksm$&3E%(s$5Yam__2(8r7%kQ zwmF;)<)3vgoELlx@x1^obcUv`AQ}SQjG(=Yj4mT;cpJI^?VO~&Jm77{-P}+oD|$AJ zGM+P+V{GS#OV7}PyQii=X}?+$+*#?5Z~t0YLMOJbGnW4g@4X}RPbqI^Xq&hi?2F8w zn?T!P(5D0aR)==sP;3aCG>U|$!Y%0a1?XzDPtT>zW11J)ie^;x1e&K<4UPh?WsC~X zc)y?x>A8f}pAkDUAc2*@&ie@2d7~|+86$lvpS0MR1H=>gt@PWTej6}nz5q|W3BCJ-V-L3Qa~)cPz4EdD?f%i5W-apOGIz`E>mh&1dn|Mij=VvV@T>-EDi~UKD-f?)?y>`M>l~6i#Rh z(1N7YWQE3C`0S52PR!@~Xdo3CfL*F9A;#&jgwkF+_;fCKQh?9VP-RQ-%o3J0dUQg- z?=Y}=FOUh}lmN%+z;X`0DG@L8sWuYo6I*r+xkFb@qM4<>g4br1Gxkbj+zuALvJ}>yJ%_2mbRgBSXJbM;tc^p0)xBfHq{UK`2 zh5keM-Um*ff=}kiS&Tg0XMT4s{+=;X3wpJobaABh9eCl&*=WY@aJ{$iDtfLP`-SI` zqbPW03@kGZ#G~)c+;EQ;+Ats0XMAYS9>BQ22Ojm9ZQtV4Fh-2=^=B|+{sA1ym#5HU zfvz?4UN1(b`GDPvuVv4Dj~Q$!^(V2;^2VPx_D3>SSHk_;>|>V#fqVOfVDveCbYl5@?osx@y%X5@?+Bg7CRls}%v`l3U;{E^pXbkVLUgmcZqo*b9M+Owx!|3kC z3^WHgdZ6EX)An3ieLJAflfW4RW>VNYA!nhOxB5JL(|aHV+AgMrvrygp>LdB)`GnDr z9zd_l%pVt!{l?TVA4U(xaWy_`6VW@MFqCj-{Vl(G7xoR-;Gz74?wS`sk?EA|h~#+Q zQH^=J7#dzbiYR2%GbLlWWerxf&3JYRT#aRKzKxq`3-h+Qc2{Sgg-Xuw?@~+uQ=?qu z=iNYHt&85@1S~1cq6MLG4j^=orcJ{$Qs=)e=-4NrvUx7O0b=&$i%@uGuu4cq8jY?U z2S=K)QrrtQjiwkn<#vC*8v5Ko?`MOmZ^Qq&@a7slM^Y;tuH5C>PsrV`;P^OPtIsn} z<~9PMd&};ODznAKfT#JF+cKN(MIS!|r-s1yEk7Hy$WQLk*=R8D}Qf8&B}}qy?M<8*n@FZglBzeV+GXI=Ab9s0QlY(S%_y1ppQ=^ z+WryRa;+cBPYo#Moy;%kaRDu?LDqicw+DTcLOVGsmO=d$(4s6*Cjngyt;vy*ftBtu zy!P}*k2y2tx%)KANiMi;HqhD7&RYPkrRN}P+vxKb+V*s53KTNyb{e!zMe4pn-i#Kb zEo>frzJa`rV z0PM7XR-*;!?3rCESgxf#*D{BAk`3NO(*JR!tOo7OfO-ume}whIU|=`b(Fm-dJt&=% z5qXKyde4{**;@_2QgbA-RGCj5(dTJMWC~FH0BZdGHuWJ9_@y4))pZM)YX+Y&SP>c(cZg>~-N_EMrXj z+e4m@Lw?fOh3SKRkP?mH>Leuc5#%kEAAN3&O|k-MOa&sNZcbtKH32PP#3E0Dr|`zn zH;Xc{w4yJ8XUosQDQ}I3{1x?lZwbYWf~fU#Hh6x3q%36S+{cXVZp(d@URK67GXH`0 zDD#JU%flP=dSrVpXeOMekS8t4M(oueV?T0W{N=Xr#vBw{HQZe$)0g?y2l2+$SYu?( zn5lYR$HUqEP)ZM|C1Cvq_~^qrmyszfZy_snnbnP6w~D^ZV$qX2>C7GVq5f)mE)4w- zQ|iC)$Y_YqGe7qS`kT--IiObrHH@j#j((Fl2iCr%?b8g$+9WrFgBrCT(DkOwVsLy3 zou$kqG79ua8AI!q*?{DD$Db4I};=BG61RE`l~K7-ew zgjNE_)*tj?%+Afsy^bBTZ!O~cbw1@p_6h=TE&5%6#%};$%pmT(5)0;O$bepL6Zx)h ziMPMrM`PXMoAC$=P(rQhyp$V!t^$wIME8KnVMg#RDAO2RW01bd{N7?D8c8-AQri-l z^48Mx{CdxCJJRXN`!{?$1CRe;m8g%FHumPo-4MGif4pAV&DfmYuMv#s2r$C-?ucUdpr0>*LBYCA2w0R0+JdNyzN z(85)CvkwfkwMHVF%>ua;$KycoIMBKFT7n*10k7ZYHxJ|P6cBcV#usTxPg0{a6rqjE z(8vh2QPfxje(s&WqMt#m^516;F;?tY;CHvK|Gqo>tbD42ZjVD^(;3m7f|9?ntI|4f z8(iMxv(l&?{1`LyKX(D9pcy}6 zb|k|{3&v8&3YKO^9g9>Kq-1-ZJp$AnSkdnUuX1412K{X=pRa4Fv?|6|2%P*)0fd;ga|WK*mqty5gqKv`ct+z z{5;R>l7OUX$4CI;D*QDnK^W5FZOwSfw?i{Lhupn~to4U`X97JspU<5c64p^frLp!rDj3)d>?I!yTMLF4K0Dv z+1BzPZxdJ*(4~J0j(%ZS~v#s*G%bpob+dOXM@t2 z7%}T;<@vu$Wux8xz-3;~_D~^o{;pqwd2Y3V`R47Q#VW|Fu`~4%@cciNA)_+mR;}~T zJ~{(3r!?ww@fg2S%wC=X&Zod}Db&f$GcgU>SpK<{&=ly-P$LWP6ZkLxGV}L`;F}f% z<33Jh>_t;@YEU8<@3NsOLgn>L2w4HNg!pX~<8@C^GDL0PH-HWy8;3WC%u!r|65fBe z&kW2paxQ^>9B);5pNCa>dp--zR7!-QNt?kX%X^c^F)=S_MM{}D!r0~iq30Ko1Wy9b zBbm+70*}#dZN|VAX6S#R%c9V^*O+77SzTtfFq-=Ta zpgm*I{;sQB7v=(g*SnroR)Q0&&6yFDbssW|4>gp3-SI* z=AB{8ww@}ODPHe1Grb!V|4pzR0G#Hing(5Rqr)BoznObS({n2I5(4hlg6^@<(0G7m z9(QkVMmTRQ7KDPogVo2hI2pbiWX!MS(@7vQdcp*_e1Ur26g7f$1a)Kh+Z?#oQojyV zH+Mv1o_QMkA>{_~d*F@58&Z_3YIwqID_dc|-~_UxQUpE$FJm8q;zCG_6DX=1$bBS1i4a=q(H3 zzRlAjP{B;4@su(q)_nR>ADC_-`+t3AEwnfdR|6Zy(k8Oe;O2Q3El6r z(wPF5<7n6Ysk(nIu(hRk{W?6G-$6rSBba8OUd*oS>%!9z$pZU|6>@(49 z7lAkh{m_Wnqc&9XK8AkOp8BOw+uV;?7}JfRp|@;*q^+y8yqU3RJ`%k^P6YhZUXji^ zv>cK&p87lJNqc=|pe@J_pa8!w(2sXty!T>!Q!Q-T49&|h5`3yL7UIy0=}`1*M%D`E z+&FYV8|V}cM{7WH?Gn9#(;aYU-Z(1E)#G|p-{G&|SUhxFi6)rA+GY(9dP4p!G;PUv zkAfog(ME-#uCEVQwt634i&8A5-C+!d8bg7y1HF3VgQ4^%jMf#5%yB%CUq**r!uQpTnm9PHmfG#;r6V+JgY3P+DD+N$ zBH#7^;SyRN1Kqzv&Ibo_Xhz1Tsp&bcRupf)Os8~b##uKo>qPC3nQtFKN4sD5rj=)} zGtow&+c|nf%z~%8phgWK^ak;E#+rBIy}hoF`D&;Vi}Y$E$-@3apW{x@F`SV$2`=S^ zZe}wxzNz+!2>5mZn$DzM>+5yA09ovSj@9<7C4D!aMu3yMQhlh-!$a?j-DNH^I;S@f zoYz{T1Nzf)9r`kw+!{Er76|kSy3g-KWHN!#q{f&+DZP5EkwpCtP{-`;M!vig&^sqB zRiVFz;JA}xZ>3(1RXmpvk z(7fK+{)u)x>(y(jIayNJywBpYOil69j z8SvMDAN?uSj{oMjNd`0XT7Hdu>p#<<6~GB5?5H5_?`G4n1X z!_`G?=7oOf_`^WzefRB9yE51x2ZA!lP)Q_Do8@5s8{xiso_1xkflP3oq*QMc=M3H4+qpUK28#l zdB^7t)IAlU5 zN{~RkZ1t*W`xPzK1icujA}SWFS`RfYMdTtXQbq6gJTr;*{?6~5^Iv9p=bd+(cix%d za}95df`6Xz%VKntd(JpIuZChDrp8g|tG-B*ZQ<=?Abt`4ac@&kk9?c&d&ufxV9|BK zXVUAg+;2(!j^_ITQ*nSJj_n$nEcRf5v z`z{Mm?g48fz}nfAUras5_uWCeQ$fQ75HNJ5Usi%^*9W+k ztTFbvD-2w{ypsAF(8JF_O=<4kzpxUjI|@`^0?9j!yh_OHn9Pxhxhs$i3xHLhfKQDc za7Nsbviom)S4F?RMm@c_E&)dKR`F}(xuY>xvKw*zEa?_80jYHO1le5(Hsj}g?wm{*?>TSMFN#VMe3AE9f|M9%No0Jb@V;JeY zhG&Je?KPxb9&)S}%1~!I<{SZkRD$IeaMVa@$)FuNkfRbReVY2FLuu|*vNW{)MXufD zu8b6^fGzi3T#Ob@Tv)4n<6GQO!3I@224QEnDc7_%}T zC<}nPg0xG>=NPsWx^X?LBl;B7a5et}s70^Rm!vl4FvB;+eTI^ZZ9m9eTxWI-P&ymk zfbvg=O1scE2cV+mNCiip3(LBG-fn|T4h0l;S!RG{R z)CW*SkEQ9)s(=^A=0*fKMs~h@Aky1-^=8QO+5E>bt@HG?P^G7R76VV3>j9)m^Q4BF z+*@=r-)gj^W8*b^+`qux4jfCzl^gE3Am*!h;tB+7&A>_P!Cp;hLw%fnF1%?x$AXZ` z*?OZ~j0K2iT?(t-mISG`hn(x-_BO#EPzxs9uR_n5=X}=ECL>uY;R|J0j3ZD2L<)1n zz5_fZdG88|=y6Def1zqz2bndRHpa7fogJ)()@Y9;8t%`?um6TV`y)4uhTMQYyb>wr z4gt>5#C;F{0A^+d9o-qZ^Cg^lI-K?fTyi(u{s2<-YWn;`c;9^r-EZh!WWdKr$iC2Z zZzTJ2o*0ic5v;9-6R+U+LNw3;B%dDm|AITd=Iv^z*oeZozn`9H%YTd~6OivG`2Gsr z^cWicOEmmFly79**HHLTMqB5jo6xc-7GXt%o;{0V3#kzxkapl!Pr-o2E!pHq5I zY=9>z_gw0E93A}z^nVeh@1T!9p?hd=qY!l`U1>s#VY*`l;A0Po+0Px+_d> z2f(WH6c>=+J)YhI7i}p2I%@wT((of{IGgq#puU&raZfF&2~y};=y?n6c%PD;eK7L$ zPw>p2==B3YsinLeo_HTBcKyDy9(%a=e3F;pC1XK8gwk76&SC0xT=Wtxx{VLf;wX;fmWnVDwfn@EH({q7|-+cbCZD(JTKC{H&uUd+J(p z-3fOY8DYHV|3N=p0_^_{Mz0HW?P`DUA@&Dp@@ znuPAi?P+NA3i@&qaQqcK{f)l96x=u;Ek1{0V;&AP@icXB1>*OC#mMWckQBecUUEIK zT(^PTo)YgI`)GK>XpHB9(x?vOmz@)T49;+l`3)fJiZs>|eH$veHt^rsyfrRpN8p(k z&?(Z5vDxm!I+;H6?3vHN=S!s8Nxd6*K7qWhLm3McuhZ&FpsiE%r6Y@UelB{^O7G#xy+1DQL@zD=4R z(nDLo`T%M<0G=*HgI`05tEsOIc`twumeM;9BE?R}OYqA;%R7)h7aCBOd(c0UHfJAu}f z1~-t}*o3Qs41b=|A3(D2g6eJtw)c=jXP`;nr2a>tp{L=X!@RqgUevDH!&^@))ZTfO z-Z7rs9Z0_bPyN7uFG|&4x(vzp0h(_Kyy?!s=YWlmsk1ll)_{R;pyEd0M<3ib)O;1x z;rfzKpd0;0?lbCG(0TjYXu0#?p5HVBNcNKEQ!v*C*dBr-TJXkoY(|B|(+)2J zx9=aK!;iIfL@v0gLVvyAIwp z;$R{c(GZ?%xy%LA?o@n){(K8gb7u+nZ+nU{YaeKGIJTj4yJsTF_S1r=Xklxtt3E*G zDL<|~Hl3^H*Fs6FkvDz7)ok!~9xd97<>Ywx6TXgi9TWc>x!weudDxAY@&0*gvFs-( zr5Y_b8+zXv%G`=}sGoIihh!KEykpTRQ+R(RcpN}!aXjqKmkr_lg;m47BY!FBg>N+QJ4UF)R&i!p>$@G2 z+r8l2lgv4y;>j|*qT>D#;XC7woK zwWVgQ_STFF9N{~|9(RH_uY1=u0qwP-q`oQm4~Ei9;?7f;GXPpU{&SvaoM8RiJ>0c;sQb^$62Q`pG+hXCbvHVr*If-h9(MEHT?c_XsI`4-bRWsXsCODCWCdT{_I5E#w-68J^rU)6TN~a`ZY9YWcSBt z3QHM;Wph1P)gpQao#MFhGw$3++Szf(-hZHnH}mT`zn(1Qj%`n&=eJP9GPvH6{kv%U zTcNstAP+tUFE4|saoEhy!{j#@J6p zZ@$F!i{QTvl&-(PQv5bW-aG(QC$WX@3{O4*vZtXbTVaZh!M^DW?|e?)N5I-$)H4v8 z{VVWZ#diy|asGkrFSvDoJ$F017QT5NSR$;=k#{e_(aXsBOX&R!uzMRaZ30krCWp~@?&I%loKWwh zZ`P65ofTc#Hi_R?kx#41e=U@-m^OMkf&KX@w%s`Lu0|q0OmB_@2BT3sgWWaMVid4) zY6^X$-SQ@RTo-XZ{po3yo;cvjfrZf7ELvqe1z=gZz4JS<#BNkbeX4yINo`I{j9juYp##0q;$L zJ{*17dRGzrmG+#1IA5rktm-F<*c?q*YfY$2EPgLjleg9H#tlX!$t$_bXCg z7;ygYq?$+D^1)08=;>zO&!b0w9{S4F8IJu%V&i>52}fzwV{k)%Fz^*x`T%tKGB$+g zHon8#CSd44$@ycRNpbl+^tdsy!@#ujI5TMJGxX5k&@E2_>jU8YT;99Z*uC%y!|5cFUJnn3}Cu$9cE3O7hH={MWlWRWtejPk%ub@fXY2wG^9!lAd zK|w`O=RLGy3ludNEPJx}d$eUT*x3&!45y8IfM7cB?hTk8iw!l1I_HqnJt!Vx6z|Sb zyTOY)V-Mqr5x$Sn!$unzQ+@@#UIbTPjYd7fsOUEM^>y%MB!h8=f8}>JCEib2?(sJa z*{83gKkYKg^*_Mo8(?o6P~8GdKSHWHzqJV*^=0(zJ{zAw>#oM_ihO+s-2NGyZwEV5 z;1p*!or8W3+A$V$HMqM3`nv{c@Ov=+ODu1LXdrnJ ze%uTGTw&|XxOVHu!0-?Hv@e)5vS9+4{RcUWmw6X{c!qkrBJJMg-8;N{nI1Wta{o>X zk5FG*vx}?y!13y*2|X9|p}#LrT{`k?!I75hb6){iEPf?)aGAJQ}FzZs__K+_~#mKR(r5 zJBrvt?;Q@Khc={GMBlzode3WcjB+jgRYS_v@PaWI+KA@rPH(=9{B?}72#GWlj6BcV z{@}cRw$HNuOwYJN?|EdSGITeo9Vf-{&v2-_anr-y+mEh3`$@+1SnBjPS# zk5Q^~k)`xm9zA{zWelUmeb8xVlhSD8>GZh~z`{}nKKjA=_STKi)8lAKvFMDVJ74$V z*K_CdptrobTx9_^XVP3J=vXYK)Gl6%7LI4E9pYavS{uIZlJX2CxYD)4C(*gYL+^b-^YbLtR_p#t-R5S0A^6+r9vsi=g(&v^p1RSw{OkfvXmHT9ZmE z%UK!if_2EE?expNlslaE4?_#w0sm%E=FOC|5KOwucPrlfg>M?_asK0NN;}Rwt;15T zv*^|FaNQi*Vfnk@s7i7=(^wgvINMl8PTxAq(~$ohN=VSB@ABISu9yebU7MSrr*{K! zHCXBi@7U&1^xJGS+cEU(2+G<52k%4TrRk3?^s}+7&b7^;2N%Ex?gY1$J4e3S=Z+xU zDf}8rY{GX3ta8^utO{vk9xam6F%%T)Wtv$$RO`ia5bEI22 z*4x2Q&TY^@-0k}mQpT~NyOL`;J74QQVXx5gM#wI|5A$X>H8^%Mu3j6w5!i4~d@bPC z)H4%o7J{z~@UEcG#v|(;A-6!USHp+y5oX=alkI}`rISK>Cl9{6n9|d%Xxsvxmhg2y@N%9s9x$Pu2dSAz4-JvpfkWWMT z{%#JnPUBqyYUu=ZR?s&|(l3GU-F@FTc~F&{=5;=Fa$30Z&f4mu4ZPD|a|qZjBj*uv zG=V}LF`Wx_I>KMcZ&z$8*D#+8%w4ImDg1LMHuGv^Weqy6C)ZjB-O$Cu(Z!XNvI%N! zNy*ER-9JTE?n26_y{|DCrR!t<@jmuXP@ z5NbaPUVi|mpA6#~J?)+=w26_p2HxeCZ(M z#pjec2>8E6cWQ88ak%D9R7NOVOPkIXWnzq8# zfhSJ~zF~YP($3qFc&EYR+sMD1Ct4RLvE@EQ{~e{C$B;Pd=-=V=@LWpXK>OV*xf@c? z-JTxCI(3fsX6iAvxDzz$nByYsW^EE<%Aiev0|CV?EK`T!oJ7Rp1zFg}T z-tN@lc)mXnAHgR1JzV+k)G-`txteze>AwT?*G-h`8MC8+dkwwhX+)pU!pDF_y!506 z_d)}6!N)zo^$C!T16RMGhYw+0SzlY~+e%NrLklK?sdM3XvHxFC*PT2+1r8qqKck`F zdGw=W>>@DqGATRI9~@ece8lLc8TYTD z!HR(S`Y@t>8QdFFyOB0_#6r%3I-(`njxo-!X;Uup!84rCLMs^8+=})*z^^{o_rSV- zrbolOLNM*B$}53*6)klB;Q^>dKdjc$5Ps{epv!jOVE? z4=HvbQ?;f#@XiyR?j=PDcgINaFew`IE(Zu>?!NT0*RFtyR@#-cuHM2~4Q!p^%IJ;0 zELa=veWGP^Ww4ztKrg)rk4Ag#vY_RyL0d^{rYUc1nZ7k4cL$XWy(+xgjIO`hg~re2 z_iv=qrr8VSMvJF66kr+S>gtU1V~0X{T0~lH1>`;r?8Kc?wSa~o1NEgk8a5z*j=>3cA?>oE zmeVOY8yTq$w*$TDXemt#wgOKE4O#*k(A3(WE^U0+3#OpjT%de-hP4uG84y`Xp!vxKY3TrD7Nf8M{w zz2~A1glaq`r3WSJRUU;-NP#0)k_{l8>wC2NzU1AHu*HmmdyN#%;_VKQ=sBsyBI%6u zXa!9*4E?8-wg5>dSA(w;`?Bhb~X^+~1K%zeT~{UkQF2lEXGwRu)(FcM%?Kt~jcT_r`avM2i`1 zS;cQYHD=IuIdGSp`c|+HqvaO&a%}*7W04Kn@L^}X9w}fq=FnN3!a6F~i`x~*q=zZc z;r*2E%%RqytNkyllNgJTz)GjpU`F|V2r6*DEHzLbmT^AmtGMn(`*t!$jeY@7b?i?& z2SZae;Jk$VM);a5pVDVjy7E2B;)dk*S)5-j1)dA2-_5G|mWriOfpz&X1DJwYPWhJ2~Vj3C~>zdL}%u1xk&UZ3-Rm z3{oZ=c$PCJ&Bx{(jf}aHt9`(>2W#hP{y8jMtl!{7_w)A8(a4`WcECyHi)AEb)?l|K5mRb(cCim)^ zg`Vz=FXAg?^$WDM9A96Imo-xNB-f5Sv(Od#P>d;GioR?GUgC^&NANqF8dCf>Mp9J( zZAW@aef1yMwC+THj8Vrf-tVAihhlNKPlRW6MN8m^^vDgs>R2(s)q0>dk~sqnDT|wd zh5DJgX7qRtbWuW?bHJOU>rq&HS@8Tmu9INmLw--Ej8CXrU&R#KG?3CPDMJfNz~w^5 zXk|#Veqb+6E0eq%i6k3J>(m*K0E?044QW|h?vK+y`go^Paut{|PPrd#oI(t=-2TxL}A5+9M%PdBHfiMz zJr}j4aTSeNE2DkZysVCgy;hUedr0K)G2FR_?HYQs6B6J!v?{FnQU+6JA#e3ObfcB- zE}&i>#s4I({KOs6wAK1j+7g~b4O%ZZl0czPBttugkop2j9Y9I#c=9~9M1ql~;~7s? zSVjrEXsfX@IeeO;9p6LCC?kxa_4iEd(xzyXtx&u7yJ=AaB*!*z7|T?TObFwVHz|1> zcWM~-5AR#2|K@^2YkdhSbNr|FH44L#NYA<&o(S^08p)szn*)zHG8;$wIL3*2q{`*! zc~6os8f65z^lZBZ#_^0gCBwfEv;p(()(~}s_KfR7JJaIWf-RI(NXpCLpImD3{0Vi) zWME5BQYR#HCG|I@On27YPJ8Yky>WTk?#BO^eh@OLE!@y9)Lun@TnUwpM8dcizy$n0 zJ$RcBOP}9_Pfe&>U0F&_ zZQXW^T806g`#~D@AEmE!BgKykdE5(84I`~a8q%L8UAmV?L!QMw2K1!r8INh|fAyW$ zUpJs;$DpUtUZY^7y}eNMeo7hy42}TXA~mx46!EqMsQ->sYz*AbkhURq#-~uyVshU} z?>z<#?wVKyF7%5lj~DP=7`SaASc}jog&l`{igw!#%nQI!8r`LaXiiPZz<2ug)mhEq z+X9}QPr6HixMQ$)rvlkpYB~np&LibMa?c4=BL)ToM=7?q1jSFtL0OQvP*rVot+i9L2sd$IxlFtF7(hIAaIPV#L)k{5^vE* zVVq{4Y@~!dAm|pxD#n#My66wSjC}c$9DT5c%Xu<{ul|Z&U~4qFM)Ju5vyGu=PeeTu z+95VvWwadHZAO{yR^Z8(*_1OIdK-qNnFIY5fZ1{=U?_A{Mo+aM&k9QHKeo&KLzI*S zq<$J*KMuPrj%NCiqdo8x)3bSXUV>iOQd=qK6sZ@&x1L2S?ZP!9RwMakjoJkrC-nHm5{M)DTThBkHQ}c}kL#c4BwK0X# zeV!kvP<{M5(8+tQ2kXiAYu5y{ zc)bwnzmGRVNEgTHj#aJSaaR_!SPL+)9QD+%5xRCUH>A4={ z>I`_4U06QeD=I8k|4O ztD6N~ic~0|=$5|?sH2w2w6z=v&IYe>=2ZQZ8}52Qt&T}c0~Fc8Q(6{A zJNbcbnjx!dfW1MGgu{_ASJ1xpi!um(!*Cp zON>1AJhVbA>I(3_0qQaiRz1-H9qg_f%lWh+h35tp0$EX@Wi9NPp|)c3+q2P1bVXbo z^*APR-Z75lodZdSYtN^N>HqhybCZjyB~pu?2B}huNIvr$=XkWM;t4_x;c>@Mrfmt1 zdmx#Plkx*-%ySr4(nlZj_CZ=LcfJe#oD3FOU$80;+Xl-o8yiB6;<|=D$e{(`Q_lLD zx}8<5g^slp)m8)GeeLRXlrW1H868}~yN0AGhQE{?ql2xMLSninttUtAL=qd5nF&;u z1>W0Gjvb);jh=M{W3()P!?-F#>pTrddpwU)wSAUzWhCj@^j9wR#`S zuc5rjl-ekynTxzsPHT&%!D1(@!>&k+Y`g30LV7XHh{9P1S0hd(T@GdMrWNX9rEof2-x>5q z6;BeR)&_Wjl=`WQC?yF5G2KC)rs<#gq?ixCyWg@}shXB3k=+5YG1ycyd_ym5S(L-? z`>917T}PQ|M!;_(-70C(F>;yKS!XrOeoC^fuCLg|yOY>!b3z{vMHB6TBh@Ce>B~GI zIT&<@Fsro&Q%V~k_B3N>VW+|o`=DKSh)a;({kC)H8`Jv{tDU~Pa}{SL(C5)A>2V+J z5Ep*a$W2EKS&VOu*LK!&BrRzHJQI-^lR_FR9^h~seJ7XGu_3TOap6`@L*;nzt8@ZH=Y3Mqi zzWX!pl?9E_TTS6HJuuD)8DFD5Xh;oBgXe7&{oR3&qml8z=O|+?wnP>GrSNJRj=T*> zV*LM4p(C%BAiEj&EzlSVs4b`6FT?aIPbLa4K(B}G$-0Q9y$Sy#Cmy+MT8mqzQ z8m^qHDW#_UP(ly1$skHDgD$l?9)UhCfH%Z~y6Gik;SF5%rbMNY?Qs6gIh3xn*qHuG zp0x+}zK@?YaJ(I^AEXwaIb-NtP+q{mw_%>h9rY$cANAwmra*U;w9a#M;HmpYJVy_2 zprvYLwZt;MYCikZuQvNJAgbU$Y9CK3u*P}h{s*)p#3l5za#z1@bMlo#748nGzEShc z0Gk>3QCXklD|LI`U78lA;m{~GqwoRu;~8B&gXifL~Oa940p!Xdh=|6G-dq) zdUKEc_erDNFdC^owV09fa)CvzPw@V3awWKTHb=QD9ccM|pGTP0Tgs%ay!-x51(K)) z{B^-^qVKj48EQ0>-i)!($PxbaW9q+mKFF{2I}*_&5l3Fyj9K)~9Z+QS@GB?W2f-N! z*E=2n#%4k0X9LMSl;GIMQk)5JmRyf^IW0dF>RTA3ep9ZuKw*yOTZb`=@v)0}dN&wX z{>PZhRkZlgpfS`wj&1bWcxt;lXm$m=y{RL5g`A&A1BsG*GN~Lf=pWMK?>zeyAlgcO zo&?z*oVA17wA-D{QNrgz`?JYWOy4N4#dXvW;$SG4GliZcC2oRzQiD&`tNzE6Rc-yh zezoWuBP(4QHk|xjX}6fN!^aJCldveLenZ=;w~IUO2rjCj_VyM zq-c+${S^Iqh^t9R#pATz6sp?sPIq)uuTF*Uh3*P)mjz!mMCw;T5n|EU zwCq5|(b{mA5N$1YJaHCdF|+7hNzs5lb`Qau;EY1rJqG-oOAUGq@8kOfz1RpWI$PA1 zUR_IzoHsY}zb9?Hljp<9eK{%g*4+qarbF9r;$E9btTqF0Mu}F@LU~7yP&RZ$SIJ%4 z-a=oAMY4>MMh75m#PclpXD+|50MS|SKrRw%2=e4Qq=x&du0aYI{U3LZ|1@B2CQ(06 zp~;+2`+)xyV6-i5iTvf5%>6zSv{K&AC1z+H?Q;+C*Xs1fuaF+cNij1>so|8;59&>m z<1kP41+Aj5)UM62P+HNy?megkeh>avn-)PA1UQCQar^q+I@ z4$CPZS9fXc4?}iL<#{bR(&QO|jBs7p5GZ>oSavMzI9yrY3H_1_wN42odZJz-=`JI8 zKCRa`mIUfXNRN2lY7Y1AAiI!O35W0q(TdQMUX@ecB|y^-I!mKL^#dBk5H*Qqm2>CF zx;V7#KKjSCee&_(pnb$!iuBI;FQts$+-pH>rL1+NOaX(s^fUgA76M_NFv*nMPlhZBhbAk8!lplN55>GM*_ft0*a# zG0u-^>$g0QXXJK)d*x82v~ozTs+}!P)TLdK9hJz1G_<4jo&{c=PrVY}D1m#s({8n2 zSEx*%zxydEZx13LQ_$&j(yBKmg{QVn{2J3Pm6$SmPU>}W8%iidvZP4qS9{y|_k%>; z3Ei#qj1=T7>s!G~a53$fKo8HSrY6w0J6nyR6lb^fd8k>IV^s;q7xdLQdR53~A}=bT z!OM86Buxj5l+o%EFlIDzTk2>?n|C6GRwKJhg0=3rYb_j83+1OMza3n$A4#}?)KY~s zkwYCZ274)Y7XZmlN_3^q7_jsVvhKWq$@SoQ6_&a7{_3#8d>>TS5W0PyeAmM}*VBvZ z=$)3xL}Q$m(8jyra<%<~NSPF{E(g0}{0+*F(L~jx-VWEdhVRCOYrQTdH`-y&JJ zQfCvW^iVZ{6>>Ne#&(2TR>#UXVrEnUa+R*Y*h{I{pC z-sD{a{*BCj6q`uiwdFH`yaqW|4o%qRSzIZ5oAAuIyAPnA%8+(E{yJBrbV6>qw{Hz& zwo2rfT-=YR(?hB`U{HUm`aK0?TK`kgCM}?#D%w*`41?nk$KO-nxAM^Lmq@>tUg(OZ z)J|T7hSM{4J-EohH<(N5Gl5XObuFcgLMp$2RPr>qG?*@czH4}+b)nrRwl08bjzfDF zLYMkjD!@Zuu9W}!$J0PM3@Xus_dXQV2|1af)!IL0^tPudB!FM3ae&f&F3$Yq2I-~O zJq=&v!z)El!m!YL+TLThP6PQ`eyypN>$~X>M-yGbvsq!}ELJ`z|JPxjBOe%-(bvWf zjN$jiy4sdu@#r)8aln5992pbmT&1+~-$1DMPx#W5*RQU>v|g%!sxsU+!tR?1c8qti z1;xD00;*D|+-Np^{zmRM04sy}Oh7XDq>8jgw<&|Arayuq@$m3Z+Mv;08KK-WW>YLX z!&!-R%!E3W`&;OPPS8UW>=fxs$?Yg`Son30;b!_TMQ!TF@c{xUU(M@WWE?pRf##!! zte9R^ubRS-a~6&fysrOOHkEMicta~vKgM8SQQ8?@&;xw!0b}mWP{r?)lzA6z)Q5jL zJrt#~B}bde^3*`q>`YLSb}H4K!)b@CSqWF@nNjLE8(IcsyOTx}s7}i~%ID}QHX>j$ z<)oqJGH|0jk8>L1UlH6H6YFzXF!$vEjUB%v!B-8bSXdSoMc zW3q1T28DekiK}iG+;(ED!`q48cYs1XtE@bE6 zV8;&(Ev`i~_l5GbbxMIjOU0G{GvU-GVHPaOb1AqxB^qbo=r|8<)fc@6p4>`{JBKt% zyLSUNjSP3k39WHG-OfMi%XMYDmh5n>_)*kT07pE9m(Mvw=^_`&l0#{2$=L&p4gr$6 z^iU7*;GFtIaG^J~63)|7GtO5T^eQ~yPVQ;aJKEBl=8Vsc^yEOUot?cZXufsO<4O9< zxn@UD(}1p&);@>cX^JM=52haEP4r7hXa5Ac0%#}$ywYSI_1Gi*XlV`5DFLd0I}hyl zp|o>&XXHg9SX^U*l-L|{7Vs-apAJRrWK@vNZ%2Bs2|VRoN*=nQlHbkzjv=klS%djs zNFUn!+Fdh4N&2{*lSo2Qg*+dKu08^k?_uE-GUjl;$ekER1X3kZ4mHlYc3UcnGjN!nugT7pcK5?h(oG@dr9l8n*QRX@uxf%*d zWBJvfQ;o&_5dQI0Ue|qXB4ri$Uk8u3Aje#waIBe+Mmh|3PDf|&rgu(JVI1@RMUbqIG9B)@zD-i7|!UR4Q(^lU<^HATuXbNui|^0XRCmx8tQOw7j2$C z{3_p%!6l6-&3ThtcxoDS=(r!Q()j_sS#eb0yA*gxF&b(CeQSsGC-opGr-py$DCUEOY9Nj~TE_K`Lut!D z>4}Hv>jn7xYAD0gW0dx{!I`Vj3ywNGBlR<`<_AcP3DBeFTC;^gQj`Iqr*l+=}8JpnYG;N0j9Fk=1& z(sd15$k-^Uu{UpXNt+FYtEb$*(-<_~{(Vicj1tCl4Cj zO}e|FI!BSMq2qCIOM;&5h%A#%g~}0L1ueA1bREaXQBv>k%C4_NE-NFI-&j*uXv=wY z+ml42SB7$u$RTI?C!u51U)``!&Oq`O@Rr#rT z@k?4}4%ZSgay0&+RgS0$qf&Xn_(_<4`uD`4PGn+iFvn{E6k_7id zxL?8d5b&gUKANvO)^!hQ#$}~kr}-*z%aK~{E97|8ndrm7b{NRJ!ym@6Pb9?vw9b!c z*+iZzn^)5659!l9{@(^m8<4OiyeWjMs_7NwX#q8BIk}5)g1qDTb%g#c7}PhqAAD3H zuUmsd-&dpOZw2B$P~w@$kuk`hTt=@q^L7|?-k`1(dJd*i*R6c@_p6=u0*$-*Jxa}S z2Cy@^+?&T)L|d+HvXXaqgb|3bdFH*1d&hH!DPtR{{K#K&Lo$qou0`g0j>~PpVjQEe zrYP+}?3ix?RA2DalWN4Yww)v5SdvlF--OcMrnj}nvhh|uPVNhVCJolLgh$W^Pa)Gr zhH*|UZSf2~DPuk@YDj8*pziM@r^XY3wFs^S#^Lz$%h8CXK%mSmpv3-+CoX4Sg(~o# zPumYr#?#25hp`ov!_EWP-zlEEm(`naXcgAqMxGcia1i-aK|jo)v^;3SGmq5>Ux0=8 z>5W=O29Czw=Dj-|xoSlz(taLu%a3H4Y;gPiQvwZaNSz0M;Vf=Kfoj-=BKq)PwNFBA&%Lk4C{GuoEh)LHbT) z=1n=T-d=+2(s$unFC(NIk;9lN=P{eWBc6aU9lbZ49Ic?G(txQ--XEm|rCl6(>mTwg za942c=GV1YohUDl*19&{k@)F+6UdrQyq!s_<@~+0S&3#eVU!R`mzRKCX+49lEs)=* z)8iR9;_vjMdUX=I(UYa@rEb(_+3{oS$wMKRJ5V@2@VU`ljveY##Sw<1_y^esnKi(^fnHh4?|9z+k$iD<;u<7Rgd4{jCB;JS zUZ9ty^Ed-0)i>pd>p2?m&b_bWtmggj&aJ?m0)uyg%YOiaSok>>v)-QbXmeAfcCQ|a|mtoLf1LGYXb;PwN{b`Fb`y-%FgtYHs&5z;Q z8L1`Yw|*_sT6}DkL7%yc|K}lvsRkfX9h2*!(yLfSkLpQvK1zL11zb|cE4(%SriNA+ zu1I z!NcZU+)dnNNauQ-lTc0}*EvY918|D+RPCT$orY(%=2y^*rL^rruxh-wGf+85;|W1_ zM?0+*t-OP$a968l2_85Bf>ppguh%u9lS& zg9b>=0Q#s2Z*(X%-1cXs!t)$=$p`Uy&Jgl`8`Bk?!2kBCg#+nMh1djFGU*^~JYY!w<9+E{^nqWtX)9rBMSUd|q0~dWL zBOeI1l5#u!^flV|17!8V&@b+_lp*JRK?c7^?Zw=E%DXgeab1eu#ac#EC2+soyBl9d zI(^(KaDlkZ&HG+ z(f*!~+&T@qUQX&15KKaTdA3CXl3@}lihw{r+XCv&2V)z!&Veh`nFHae_0;SP>KOj> zgGM_*P4gByBW-;+`ok+}gXhDx2UmJ_8$qXiz>{MD z*VF8Te)PlW-_h1_uI)AMU3E8v>yf1Ez}?*7_fqb*p*_-17Fx?yjYeMGLSLSb{AorD zJPo&;Iodu@&jM09?#n0jut3d2kqD1NC+AU{I|{A|cBE^_pN0$6&qk|v2GS&X<4Md} z*b;h8?6uawxslo&7wH=^&bx*&=f0ohpwW}Gtb`KmSv~*aQ9IRn*v_Fuy*_$WM^e9| zIQNlKDx}dhN=4Vg8NYNq^!+u+AEVinPR>WWV$agO;KtxZ!wU8{h`{br5d zbQg#MaI_z+s%xsjV>dXVEp~4=a=Y8YRMHI%zN2zu>oe(C3|VEfLdk6wbh+*?W7yAMs#hFaZA**W$r&{^~Z zwe_I}rA_-#mr+N?xOukg7yRonP)d8!>V4r_Ep55$;e;ywQ@qg!YdZC|JKl(`0*3TrHul`dUuACWE%R6RFldcVAx*~D~(LV*WdJr{Epgogl%Pq9Q z*_^E*=Th)dUzQ^PwMSeh;J?~LE7SkyQ{!S8V&FqRCgBT8dv=sZ?spnqKsWK<-?Sn zMOtaM0KBwEFXbRFoI^C?ejhEhRa%hEseK^)kWU`(im*wS-?eLk9s>F0jpn_p~x|ky;(F;gW%_q^;hh(i%U4{WyzW>4BWO z1$yX=6w@jj08dRK#Yi+$5^O90u1er6r94+lE47;;6`zK;(m=1D@1L}Bc*5&Cp2 z6rBWvTad0#@aFGae;lmA0${48B(2L_#z^j+mr&g!>D&KwCY~kpOM4(tdl`5og{S* z@=!}B7n~OI^i8Ot8?B4$9(#mw!AK;s^Y?{VCTVDNCmQ!0u%V`N%+N5@Y7|NmEK9+s zapfM5?)~bH*MqUBTk-r>B&@qPc7tNGNt*&A<}+ep0zG5AynC2yM>RnsuH?y1e$(`9 z8oF_uY1BvDNn#hWB#tOc&_~Ybwt#A~;iWkK?oIzT#>P>0z8Kb-EXB%7f&2YPbUA4u z6fqcB^LaLjk_VGE%GZN|R+7~3sM0aMBP{3NTx+Z>bw+drrS0drGyM~wLtAeAyxawgnxEeq|WGF?i zko$*4jXgWiQa)VZNZ(m$wdxRB8a-x4W=TJ(fGukt4E`r@@3mNoAN6!0&x>ix_J1=Z+4JzT(6|fWePC-WlByr=bPcN> zg?2o1Y$jx1LouFq@4DO@sq1FGGs9>hJB$?cKj{ffh7v7x9{wuzo*q>rA&&7Z9}daT z*V=mto(&EBBbBG=p>m#SaTf3|4LdI;WxBJ7+A0@G(VF*dY4bDTz3Y}8yUeG=xc`yf z_fyc!Q?${Mr8AYCNYM*>V>o#ICGV<&m!JnNb$s3${PYN8V)tP`KTvr&8X(Rvxqs*e zxxWwyo&QcU7qFZ6;-e-&_Bpwi(IR`@=u#tVmcRi<8C?TTD}eh!@{H3oea{x(51_^Yfwo8X(p3 za^zwR-8dpwln^iAT~}hZrj72sVsyt!`1iNuaK__8u;UuL*U-|QAz(kdgU9($o3yT7 z?Tn0mu_t)@3*OvM3tTyAWKkNqtNv|3%e2yxp^qn%6>##3|EJxMV1 zKa??s);s|mW%SU;Xbj_$+yU3cf{#!6ok6C?7&b>!dcgEbDN#C7;=^1WJTBuv?xf|)9!CV;+kqczm@R^a@ zO2iJquIo)d&g5+tbeINTu}sUZ|3o`npJIeVnOOf$`_UL#?}e;@ul*WFDb5sVskVbt z+~2}hyUU+DsJXKIS#b6=n%rLA4NWe9ZuDa5@w}YgJOY<=#^Tr*q-`f4K1#|9LOYaD z=a4qZ_xsPkj$PaV${KweM;o@klDkUiNPd(XpM(}okqp0H>2o*gSWi+aaOOz3uPC&{ z78d+uZ&WgKc6jUr}y8tTFC z+^yrS(01qHvyfNY==lavSU05ZcKD-+lHJWh*iV3?e5AkY+c(il?L{rVY_w1fW5XAb z2I6)N<=exT^VE3zlc9g!rcNc8z87Pr*3wG%XD))5$6{?91XLDU7SR2}mR=%*M<@BQEvIRSlrP5L90*RrlY$2yKz)L_aK{T%102JLu1n$pvpPNNN3VA2!*l0bWZz}hgnRO=l!Lb>z&)F%!s#9k&UeUt=RgBOxD@GUba*TLHpa>}2V-e^Sjp)s z4E5t>^lj`p>vA6kOPloFv$*Quf9nLPZW8IVn>|l@70}oMt<}zOmGRR*fFk9Oro3@= z|4005$E%Tc0GqQH?^E9>@|DnLVK9~m>{bDP9%Jh+)T0g8 zn_qWFyPX+|XGp&YJSu6mjmkq`jfU#`(x-7QCyfqL&J7Ouv5>9@0k4xo%J1YJuf$FIq~=upZbobVhb$;MY5$ z(qj1k5n6sc%&o3QrfB8w3~h_p(z9ZIM|Q?iD2c?Hd+a##c8IU^4b{Qh%Vz-%$?61zqis1KyoB5;wSZ2hK#C|}=?zCl}j z9x%JwuY`MJFy`|=n|^hVR(VsqT&i76DvJZmkX$IW9l>52Z#KvhiMmE>bGfnYRsa3DT>ikvc?RHRhIjOY`%c$>4 zFeH{{){W2Hu}KeAb3Uy}YfNlsdbllZo=7Ss?O^)*_dt~cT^o(>O2Z}ezAMK@1Ud9J zu*?Jpo=VUW$lZH)Eqv-8!v&O=f)|Y;eT39wfHcmaHN+-x1QEv%3+Qjx3VaAvicxzm z$&)l1#Z}yTu*L?CX#s})M%%mx?Vm=0{5K06K(wnCAdSJvG95`TGO6Oz=dbN zy8o>9_zmFr5ZoiTxC@$R)gES4A!YVL78~oWccU4+F%qpd84PZM&h`JyrTveR;zYJADDpAmu?N?{oE4&@p?^N5_GFD|UpGn<4ckaPVZ1 z{>HVxPwy7vA=6?Q87S60BDH(QQ+f;P858{c`t;Ml-YN7?XDG8Ht==A<_}u*xJA=jc zl=MH`Yu$-wC7He8K99>NVFP&RKzp1|eh|op*U6u@@R;L)?$qttV{`QY&NR7_K)o7j z-3RBXZ;cw&QWycRNiq6Aj4L^Z{Khipkx#kfJc#n4{aX=zY$i|gyC+J2?S>h= zna9`l6TNv-4YaPM)?@Nx+Ha)CtMHjQogs3!id^0$f>+LXLF-U2rQuYs>p%AQq+kuC zzI&D+UG(wG`H?oG|GRz^;ZwiWVKk?{%7sIc$h!Spzk}^O2Ykd8<*x780rg4Q##Jd# z`e2FNL4Vv9sKr&JU4xC)2g)|aDG!>GXU*fdS+CAH^rbSP3|XRvHpVQE|3>JWXbC4m z`!|I=_OF_GVCW5P6ZfMj=Hu8b73kq0x$8f(L!XPG3jFz-!`JnmRiv01=uCa%$g^Wz z`N|zBOHLA+N@!yz@7#6FQhi^Eq&^nzOf!#Ejj&`>{5p1U9f{F+`S7WGY3>Kw3~%+4 zw?T#;0@?&N?-t6~OusFlbXTx11J6Qg{DQnCIft1>=pWc?XpHPwtJ=yPJ7Fy?N5cCqV38Iaf^uBvx@l?7waFpOQHfY?Y57Q&(9smXIN??HMSHPwicehEZpAoETk5o6r?0wj;I z?~YJ^0fzrdyE8yP5lPzvULHv)e+3Vtc~=h2UPZsOpe$F5>CZ_cWg7yi=b+pTbo%M% zl5#3^kfzLgp=(DaQr~iNiX$V&+VMm>c0VV*6zbrw=u2gnyNoU2s#Tye$NX_kKmu73 zWy3PK+cg8maDT;HBT8MrcP)6jh4S?Ac+#sgr0x{&nD8y0Nf9^GiVSa@3;7a$`JAVY zeg{E4bI7A-)VU`2|Igx`w3LVc_7(bR7d>4Wu;J{mHn8hImhwN5ynRURJY&Z|1v9|I zL%eyGGToa(TU}48{&eRcq^TBKv}p8!0Y>-7mAC##uN@_)>%tpAiCTk>c#Ut*t;4Gx zXHO~8-(;!~Tz4kZ#g&?`U{K{p#AhLU2_`jpE$> zHe*-t% zOrBqK5GaPiSI!;X0XIHPR7?iRdx-b$5ZNb4@$LL}B4<~$?LDDH{nA=H?h_|RsadV* zb)I!0m)u$r@+6S8J-PeWkK+{OuW>`#Q%c6A0fsgq-AFJwy3QVs`3eJ$ih;N{wME~P zyZ)=!)uo+)Yy)L1!ICZ{ue3D*sMB!EVcw>pyxru@LkcS?rn7l%{{ZfE;O3YmGa`J2PNNKinjq*21CX{C`g|diqkNj&0EHmwPJ7 zt=G&rUB_fEVe>5Dojb9KWq0wgWpn9?_rRX?V{Cie@vI*u=t(g~Q?5!NqtD=e9sHmb zQ$bmt#k`AB%Yku2(3@AH3({cbNRV%x$<>TL9uC}U1~s424W4xDY{H7rKdu4ubd74# zWx#;G;#?$jN7Ck#vL&+f5;ViVfvs18uNP0Yz~i1&a&h2tBYIbX)wjWd;|1d>jD#x# zQw7+#>yh}`cwbxxbO4UOk+<6AcLS^b7Pa@Yl<$~y4!C!Z(@OgI7;<(5dZ8CE6>^sX ze%DJM12a{W=p0mQxJ3^}2Kz)!qz6B)XK`;n<-4mjV{eVXsu=_3^!S`dnZ_E-;NzG^ z>p+JDwj{RZ(N^#741-# zD|HTnbtT$K?%jbT4VO7q7=q_WPo?Wy7J-#o%DI=)#E&!H%KRzxVmZ>qNC@N1q+#co zmh*2!RHWznU&j(Do+;VIi0dM51=sFlyP|H*wZ5`MkQko(@GKtr8t&!&eQaDo)o#1lCi98-k9h6vP~O3CE`^n zev+^DmNC}0R$kU?B`5c%9h-rq3(u5r`mb-M1ZS!TfKOLbP6f}N`M8{@kk_f%y}~Ea zv+`*TrSxR9rwyo_+Ds4LK^_0Pw!w%L+mgf|*p0l)pfkPUYPLe^&^n5(iS1lU%1XYQ z>z>-SC>33&H-Pk&z_Tx4$#G02Wt5Pn2s+*hp5~#^mm#s0Os-%$hBQSEov^jDEeAcRjux_PI{4}hCU5&Ymv0*n#@En{1$~zPNXuO?!?Z+7cF~1QjDdO1O z-yBb=8|A4q^x)nep1D>HY{rUcn^pm(D@V@&(^8$j7kT%G^uDvLn}OB%)(mYD#+#r| zQ^}*Z!)HS3ne64@tOSe}1J63zxtA2uh*Ih(J@Xa!>#&K8h`o|l{1v*aL2g!(<{W5R z`&c{bB(3r^#1-`Z9IkHT-(5NyLc!(8%lm??H!`&i{ni4lFpU0~Og(yt7m&(5jn1Nd z)$oa)BUfYYL4T-sq~;9TbU*Lp>fezsH(*|`Vurgf8H-fXHg}^l*3b1=Y3|=8bwg&e z-5K}`(n&3)w9_4IS5odnV161Yj3amgsj4LKrDo&eH2DXj_rYq)ouF5hp3d-LguOW^lfTCe3joI6iEZvZY!NnJ)OoM|1; zy!pqZ{yNw{YeLJkK@uUhMoD@FTrdMHb;Qn{2t*~pa+J!QKQ-2L7pd}SOBy~imPX1b z4Kn$D?mVH(=mpP_GL|LI72S)(j+RG@P-hj;7>An*HC0feqY))q7vzbw>1>$0!78!!(_XIfpVE1Y92^jnR}1pa^wrIZ$W=AA}R?XQ&-v3b!_axbixF-h6wJ@!g5cs+)8b5>_Yz^Xev+)H}v(Spx`>$`v%_v7D%)KquuU^HfIwrTXF81*OD z97D2I!t2hkI{a%RIWY{tTR(PhDGhzZuWt)7S`}QRMy&#<@EK;VgmJ z|C+DG3e;Nndm|a$`6xvX9f0y@5K_ex3*5Zdlq$ z${hFB8qEK3zJ5Hbq>_{s^iG#xQJh3y+V`dOktvi&$GA79)SbeL;4yaua|PNEG=v&s zbf5;$1$+h!8)tfgQnVzD#c7S#-N+o{fLl@HDBAls?%l!bUfQ@07__F1#Bp{Z7oDbG zdKz^+!T)^pg;Lpg_WrcO7Rd?jcdF&m42XQ<$DIO{%hlwSw%ikW5&!dOq1K)|K2~vW zEWJFiHb_k6vAplt+_%!%W`3*bHSxBXUUz-7@XXflE z_febPVYzc?h{U`L>8QWfQJ(Q`OUWZ&x=uo9o&PX)O}y*fZU_vOKx*vB7+MxBHg|D~ zTEjQ-tx{?OC3%jqkvsYwib+3-Z~Q1r>#x-_@lCft3(@zI;pyM#;d0U`U$v{WMJCdw zH22xqM^Vcw4craOr+%vVIuFvB=k6MIiu+dZS$z9BrG10VZpkU0sYx1;cYNLZSd%oQ zW_v&MhmuZzg(c|2H9kDb%UGsQg~8}@zj}M(n~~J}Z`?a);aQbgU@!|@yQ)R&CKoBt z01Qrqj?1|!0%v`Id@+YC8o6O#Th0!0H-zKd2R}h; zGQ67<`X)oKoQJe~j3-eWxc+^R72-Yz~2%$>parvlNt$+9;U>hU}PTU&EkIu(0ay_R&I0p zOe%XANt2+=)j;O?M~>{a(GE4k806*0ymj}_Q{*@PP!7Hz@cA(O?d#z|`Sxv;Q?nC+8ej(r16C z)CWneB|8cHRUqSg(7rX$-~sCB0la&VQoCsPdU6_nW50R|+D5L{2RY==eMLL)ZR^;@K zv|~HCaAn>5f%cUy)1i0w?m8RDzokX4I+NpC1D~^g$HItb0oKe+=qnB6?uzviP}BhN zaB#7KG+DH85u>sU2Dmrxs=@nn#kq5gI?}J zOVdEFWYR(yO3B(OQ@Kfb)XTQyxJ zpT(iqhti&2^rPBcs*ai<(u3X=ca)8Gd@RFXM+25%1eh8}>omi^5x+(Otim7fTB}27 z4(ECc;NU~_q4OeDNEzc~wnN7=`OoKW5c3S~E71x`tuMt=lxS&q;UA<-@kV>A9ryEi z|0FGny15rvDWtcBpkAjPBK>G=yv5{?do1RW*B#Zy&`-zVX73yGT>@?rU~E*d8*1v> zDZS4pZ*TtfGBrXzd9B85OgZi(AbdjP9Oan0|59*m9F7*%E0pCK^~T4#`uq)^%Bkk? zG_%K;fpQgwEo}_3C6|SrMe+d0?vues(2#F;Xb{p1X5DgH-BptwT>#yn|Le31%Em80p*? zNo{F2(2|G2ojPXXu!yYHc+yo0V&s&agGYk zqn9=XdGG9+X9V3tkGONf&-m)KYk^$#Y|4qi*Z@2F4Qz(J$o^4`X#0ZC2H<@>eg)4H zE(F`(;A5M}eRo>Eo%Xh%rRM|TFnn2hW-bN82a!SU82&!9h58k1fkF#^5AAX7YXXS2 zBAqj61no(E+3pCgm!}yz^d3s`>;(5RGS<5rxRwv|XxBOL%no26om3Mu3@&r_u0_VHIzo(4lkkcJUr6c>M3Y;Gc8tGTegP+fs zEXC}_K;)x)e9WTN{{W}P#eP5!sL`8&--GbPP&CIexP3Y4?gMhYIw!!~t+e3~H9N-j z@c&cg)o-)r)Yf@kUaq%WXS8Bj5@=;YMeJJK@w<(dj8%( z@cSOMX8 zGx(fbwevOjsRd(7vS|NJ;(fQeujQ`}oTvm$a_1+-*OR1DLofzh2+Os|MkzZ}b7g*e36id~<2fMA{Y4Y;_f$`3nM$07$N>sBHaBX>NLVxvS>__8`0wz=InVG<^W8A@T`7m%E=Plh4bRB$>;r<(V?^aqT zKds{F6#701rR|2JH{egzn)m$F5paz5i;Ay&A5mjXfzh2)j`}rrr?# zdqSycq)w8%KSGUBA9RG)GQik^r?E}TkPJc^=W*P*(4BT&{}}b3UR&qU#KsV$NiQhR zu}CHQMH^xdznhSLY5wh})uD7}Or>|@Tzim5ZM6gItctwOTy&-=->DjxsW|!*SX|c%VO5-%|Iz zD zlQt8^XY*)%A0Vva>zOm#z?CbS8iMU-pkXcgUfe$cq(!_rjhsURW^1WQ3O@^bum}CW z9n9^c*B+#%Lr`ZN=`Nsm24h(m|Mhd^Q3behmT?slIfay5ik6+vowHd+{2R}tU;9^q zTl7aLHPx=pKe*mXt6?HE?Fv@s7S!r$^6P1XC%f%N^X;ZhdigzNQ)-Pfs>b6bkoxXu z^Z{4<=q)|$#^yS!b}w^BQt%>8NAm2X_d`2P0}oR{^Jy&1X}Sd(3qRY1RH|t~_Vlyb(G_XC?Hg zcC32EeNq~MH&f^ht_=Sr;n&k1;=F?TE{>0yesB*3N7wf&%YZAxcSU9}Qz$C##5$QiHGbx`H;uidTO+Kya(gFHsc z)ywQCLF?-njhs=|=LdUSId-hB9Cyj<8Kjx>Wbf0WI0D!i-uI-uBwyk009UvJeI@tt zsc`kgf~yK?TpL!2Jz{jRP$^rDurSSjuZ@DV9nK~~cr4O~YizurL{5XXc&Bum4Gu*p6R-3jmjLfB7eU9sh22PXCGYU8JKfUgGXKHe< zMo$l!##KMQ&th}EfDJJtP)QX}xY`5du z?(m$uW4=V*+u^7d*di@}xd!~MM^bel&06~4VoKE#%Ar^F*U3HAv?I+kM}_Wo)+=Dn z_UPZxBmQkj8Cw{1JQ`h#(x{Qe{7cX4$z=)plB(cr;}xY>cRPNQ{}X7F!|0P5u-p=e z9FfG&yYRZ4p@*?QygY}tzkqyy96a?$LJZ`0Qs6UpI@ZE+#jXBKEpqq4E&cy}oeOl8 zMV6?O5JEta@KBx?4fyXcQ>Qt7R;5PY6MH~ka18OaQ20&{isoho?Je)OTA zP`{obw(N3RpfCT=$vqE@X?vRmG$X**i%{0cmWC4T zr$F-&b$vwco9L0laNvWa`-Eq%vgm)|sU(N_|0#H`qvr}3*}K5HdlB@USq=>DmC_2k z8_Bl?4E!$WGWOG>lq7|mK%Y7Z)UIFro~wMZKH$Z1Wn?7vgf7(O+{b&@G_>ZqYCIK) zdy#WHJ+loMM$>ohJRe8feuli)Bi2=JCr()XB0A|bSv9E%h{tO%6PFD2Pzm;WMkpQmL3;};@keoZ|y+2d>QTopr zas%aFMhRM!p5y;KU^iB@p1kG!w#BYGJOJFz*Y$wz&DB}MT)uv)!hfx`K3^Vw&7g;~ zC_A5WlKlUYUwy%OJrM8XzCX{$hC4^#-jGL4OUl<`o`f#r_v`RZpBS|ZBmTNWAx3-{ z7rbMX)KNm!lGYecLe&MHouZ5s_d*swk+a^`_$$r|l2DJM-(UUV9dg>3THWo;&zLiQ zsgG-6SMyfWGv1v1M7y;1WY22QySg=alXE&Z7B^~sdSi{^y}F4Kxf1$37+P&@t|mr| zxMrCJ&oCZJIaK*eemO&Mtnx z7|3SKxMS-}@|@yVd;Yt^qDGWQU-aYZj4*?}-Qo8jc#=AfPhE@O1D|unF3o?(O7u~3 z@2ycqEW3)Q?wo%+%t|^2{-PDslYiFnPD(aTv^suQFqQ@EtHN{NIRi{WXYNqf!&43= zU1-^Mzilm??{z*e@OvXA80E(CZiH&joLk5JzjaV~nzCJKel&0m=hV_`k~GR-S8J4O_j7+ZJabLD z8R@Or^<8%(sAD)nmxmNBiBY948J}cRuA0LDAd!<=auHuY?vs!o+bU1hsAw!#Yj+o@ zSxq(zI?JcV6n6)KtvvKu%pXq@bXF8g_M9JOi4wuFnn@+~W$rUqdAZN!YR*~{@Fe+f z7cgR@BFsDF<=R|qgBbG=xl{CicnNfN5j>&* zh;}gko&`TC(iwMpC9=r4wQ4wP8DnW6I0A-*S`9`Izd}l#L2c^L_M)_;XY(xj?|Zm% zw;)$OA>X}>v=lWgf~Gyw^hKzD9Wm*%9L*6&WjgHEBYn@9PN2YPW% zwt`liqK5|}l^&*7oHMQfCn?6ym6TCT&v08Fre{e1}kv7UO=<&upH$!+RYZ_9wu&nHrV1hu|^#KRYuzgzQic zcro-xH?)!Yfsd#w7~lLiVDAy&*G_5lfA={Z1^14v1h7qIHuHEGUs_RCz}Y_osw8cg z2$b5RheDfr1kHjHnuPV4XrEj_Y1*!qK`WJ%wud~Gq)741II9Wh^bfQl37nqy@gU{3 z|R{NG*f4q=|mBuFGgi zP_t2=(>G0dV7lA*YP}87(6keOr zV&$6WH3)efRA{7_!?dXu4&p8?V`jC7D&r34CeWC5HEWzRI!~7;ZD(#)8?McuOuZsf zc)6~j<@-=U^pYaj88l<)cj3oiSj-3 zIT-GIYHM7vaZaHR@=cT#(1et1P#&^sxst(-ibHUgGufjE4E<PVk&(?0Fka}CeA@5;)&Ical`AOOywS*G-U@koP^+4&$&;;p6 zAqDM4*AjX`vz7d+1Ny^Ur}M4?ttG}rwvX0;=R9=l$-M1GyS7lri}X$!4)Zy0?xEDZ zl(rKs|2}Hh$4BkQ)m`U(`YbJiC+3r?oN;T6@ejGzLUJc9y%H#lQFB({UB>#Dz-LI{ z7LPN&_tCRMgQj-^2$YHLSd#_{p`x=W=jnhu&o(QCs~v#X7dF28SMaFxYydj@{~)|@ zFOamQFN}TaD&lf@p`O=USXW8Vrl*1GW#BJn7HI#(39A>NyZvuJrKP4Sjgx%1Y9IPivl|kF%+#9taM=y}QCqro)xeXtpDO z!#KKK!JhlLmO+2|AZy9#ij`~wznrnRK{9x@s5+~XL|JzOHr3JK-dS=na?5iwKZ0JI zDW;&6vzc|d7E>AKhDFGC^$hK#&KCBfx#@Gihq3Su5@}_i!8zy@KcNlon4E*w;(CPf zZ(Q4$OR1e`qwyDt!koD~IL2$wNExkMGl0c4 zIzRpS7E$k3(mFe+<$p79m*M_e>fQ-@oV(@cQ>GTIVNkK@7N9GnXuEo<-rRixebn{w#H-5C%Mc&x(!!uWHnys7sXzj;11sb9NiyC~x~2dXL{oxgg(8GcVnYju8*gip>x z&vl*7GnD0|`EbE?yz3M!71@l^;c)U<Stv+Q&;k(1TB0 z_3TZa%~0(JaHqC`hwTD#*DMmDrQ5>)p46CTK3xjOQM%{@q{ebHxzA?Q-9&9h0(16W zMowveD;l(`f}Mfs0Ie0z`X{YpJg9{jVLyqi_&5HG(f_0-&nMG9=`8V0S|V3ShPo#+ zO8*Qem=K}>7h9A&JQT*#b5*fZJ2pDB80Y{)Y@eF zf_tOwJPYrg1E11Aeicx60@gm%Rz~^r_~jnD9YJ^O2|XAmW;Sh^1+T55M~ulf25$JD zT<>EBQ5-nuS>!dYmN73pXVW$KQH*oPy4Ek(XQlFGK~vw)c+iH@1l%dVT{W`p2jQ~` zBz7g7S=l`{!0&vi0Pc4Pn&?O?JQd4m5lXDDX^Ur%rI`19LaW`~eiNL03U8;a3^rKAJU>#^u*8b1U7MLf~o zzl`_aLEoN??#hr-^9HU)gN`0fe?l(1U*1^WdjfWInHMfXcc`V_7nm10b8sKbXx^&( zr+^_ru9LLxAljGqkJqSMs(pbT$>!RL9$F3rSB05H=Rgb2A+q=kpadz_SR9_$D1UaDEzN;LbRCopdJ)yTO$XhLzddRziA#aPT-P*{c+=t-~bzmlXuvm+JU2g)9 zt3zs?&h4bM%0MfTAC4qytaC8W$50k0z=fPD$$w+JIE(2PN>%nbGZR|1U{`l-#d*}8 z#U~N|N4e*3Khbknj_SSIwpzDeBVH&sVmkl-{rsUd&!VL8eMXb-Mh_cRPs`!lSZ1^6Wf9yjfPX77B^J0qO88+K_$goFiUi3i~Y^ z_-lbwoO|ZQ63Tp;{60yc`Q+2XdSE$!9fzKu43w7zR*b~FkQ}QhQ_sK@x!RD%Q%dxG zH%gqkykq5h@)?n*2|S{dvDrCL`RnMp7gC1neETTdaw3JD&sC`ty=k-2W4F*^SDx2X zl5=ErhBWUNfHU`t>d_W?-44e5mE3osT}mWRnU+sDudc-klVH|9j1p6nf|ZHyY3V7xcDc>^S)b zBNYz70rx|9?*Ez!#Lam{UE1+ z6ne$Bp*OCeu0vpJ2>sxhI7Wb-4Sts~<1T?Fvcb@Gj0R`0BN=g8-)ximgI>BX7v?D1k=$!^ikz;*@8gp>*V1v6MagP`d8~msL(@h&sjvwWz0c6Uy}5#Jgkym`@w_SPnZvEq zyC^%f75OwK=^a4fNSsfJOG3Hoo<<<72)R20(-P{GZ**qdZXsnMZ{0;LtvFMc&YY1rS5A{& z+ucTZi{~6{20NaO=IqWjIXz-ZsjrYRnSzSjQlcKfN}3e?5Uqr+UT9^?NK?-*GdfK- zc%{3Y+@Y2V6z=}6*h<^qgYVryb_JH|9bj+~={NE8hf;6=cge1Y3lE}~e@7dh z1e*20xe0F28VX+*C|*mG>%Lm9>uKM7bUk}r`#}=!(FPt>%@|z^uh24;2gXbIxO%i1 zPWl;Twx?uQMIVIAsDbMxc{BO7RqsWjc#_otDAe6de6IE=Th4i>VA_th`Wq-gY^y6@;vmj2I^>!lzN-i4+fS7a_bxC(<+{87mV8HD(Z1Zh`t8Sf<0wS zt#&q2kE$erFDtGPDtm17S9s$O8n_*U+xact?U z{3v&88IfvBsqj=Ss+RuW$7s)?HnFeeU0YWn@a9u;Ll_~^+sX1>DVICNJ$IhvVpK%q zVmU8R^BhN?MIL*MQa|Ip5-jtn;!fC?a#fF(w|NGFI8gEe*IaZ4)r;)sO4=wc$>#Z7bz!_WE+h?qj?W(k$+rImLGBF_xh9u>_@} zTzCr~F_W2b_;w55C`0T8<0JU=9c*{XPNUxmTR++)J&p-b8Ou@)dIkSGG{R;sIjNS+ zEsV_QovKB~UEW>ztq$pWx@#jx1NWA{w*VSv(~dFM=j~O$ThNYs^F~>+ls;A3*`srz zp4Z^d!ucfckHDdfc`Ec;yrnr|xsz`JsojURl)8PdZ?HRL-0`5c_AVT+1KhJ6d?VuS z9Ij1iRXOj}PTWhkoI7nBd5oL$p=#xtvZ56HJjZn_)R@aibceInrn`YsevwD2W29Qk z*tsa!G91-QfaYrQ>0#z9%jm0{;P;-iDTiwfXV$03UDVDEPEg-2QvHhBR|H?!jzHy! z4sS3*^!AwueogyWkk0yVy$)3RQz~;_0ESo4>T|=0)K_mDvZoxr^Br8mHD&cncZ?;u z?*m4hYq&f7AitWy(UpR(6MO+5e-D{g1*b3uo*rux!Hy&SZQyX_)b*emRCJu!0Tz# zM#$|BY(_ED>q4ogFWCcNtqy3%QNx$CXg{>kA6S0|B(4VS4Kk%opqypAF?!P5VDdal z&_gMYH^{});7s2kZH4Y#NuY^rCU0EL(vGwh z--;fAYK<2(fc7n>Bu_Dt;#x_k?%%*Se)P+&{NY#T zX#r2LN+Z#mW6Ntn%YVw4QtNbPlFPgLzssWqJs%84`*}~^%VE66PnzFq%bDM&uX12h zdrEOnS}ph7az?u7oug-FIhs%zwMS2GXYT2M-B^NjTOIOEXADIM#lI0)GJL+Cx{SG) z2Ga*ZPkbJDPvqP7ue~p4%V@e$>y6N6G<k*img+^roe!CX7H6O zi?djQPxMW1;Qw);w%#ltbY^`lP>TMq=8ml`<*g+t>7$pPJiLOBQOq5u)~XChlY2=x zqv}`iv}EA!+BRxK37dgsJ-=Er-jdLwYfY^g539LzZOHg1X5Vv;`3-VSPqwjOYanB@9VI+ZKF`s8kut|I>h(j^V`&1dH$KT;u%=JwSTLnc z|2ppU@A);jc@Q2s1YLe3INnX!>*3}v0JU+B_k|Vnra++WX%@V&9!%v@+rrQe=}8+x zH?Wci@0dlIE6JHp&d1?{*TQY{gPqL1cdN_^V^kyD<~@XYX+v(sw!B<@>7Y=d*(yjqXrw!TNa1TLy z+T8V(8bB)#2X1y84(x2&)`<|IPg1&L*%LdwQEETPi{|>f{;O)*lPrB1e>WY^; z)l>ApJ`B5g=J`ghpE&Pc#~bG^3ApPX?r%XlzDXMQJ3DuA4xw$(nX&R`Z5UlYrCzDu zQD;<}3E)xf-W_Z!k^fb+)N$pkI7L3=wmW}NzGx4AminfU-;w98bknMdxuWJgMC#qh zb9spUDCcqBzaOWbx_>swHb9jqz*caHUXEhKHZ7SL?YPJ{5Abp$_)~ zM){%tc$_u(JRQ=R)4L!0$N^K~O)AiWy}6WUQIf6uYb z^d2xbnEAgR&Sg9sL5}%Q(sZo8yC^~I6+%}#fOs`&#H%L`w&%K(@})KFT7u@ffK;DB zy~cs6qnGBxr@N9y{@#XkGoat8l%v#frlX#!rK|)_{S@@CHa(uQvKgD(xZeeir!i_S zq?9DT=Azd;i50;!2CoJt{a0KI9f3BynDX1g@9Lm+{iKq}h9-=OkKm~dT-_;INX@QN z8)5S|aB%f_wbntD@(xnWxOYbWmKSfLwVodF2e_#I7M@q5bx5BxcRSvM_R<1O|Blq2 zcO|dbNBW)cJRZhi_!3O7rKe_q1y_^gnD>yf5c`V};#ZOT0{YSYdG1DdgdY8Z@-7Fa zW%Tzz+BTJ(YTe~L-$;sE;nUrz!`LW8si`?>JX!2>TH{%U?_%BY#EeUjGiT9L+NHX{ zhl~_42&~*d$>sF*PWovh|C>{fQQ$n?&V5x{4u-+m)*vHNaFJcmgfiKfh((Nuk0^Zy z?YNJ(8)&C3tOi2cX*89#VA4HJ3!&#u@HjmLmHtNjbS=+Ty@BlPK%HNZegjY3&0th3 zwKe^~JM%dT|MVohQ&7ofYL{#4Bj?Jr9!1KJ2f(nqc`d&UZF(5K(u4m;D5(Uz6w(WE zpWQTi-BiXsnL{g#lX@QTw4*#{!1|c~f$QzyTT7`tI!%3!RJrm}w3L?3H*C=TjkS|YS0I{KxqB)HH|sf@Pk1ED3?{jzdCz1`gVpy#*gE5K_# z*fcJaUT*uK?_&Y`o9Xi$C{b&;5%1pTy;3&%&3(nMN1z5rxNDes;H;Egz%gIufBJ{F@kt?l?7pjAz_BTmpv@)4U1lcad70IG^(&oEi?jHSgcjYE zm%+3gHw8voHI|+q$`Gb_eydTmHj$!)QfnL2L^)#IXr+%9`UsIV%bWEpcV1P+w4NZmm^Met&>({J5u5j;VK>JHw}hmbeQmO&;-{0x~71E2?$iMT=W0 z6ts)=Vi| z8r5b>RW{#6J$i{6Yp0rVyM^~@dh!|iQknJ|?HUCaGQN=!x3rTiN1ywW5vN^l2Aswi zBavP&Azfc+(%sSt+PNPp)|TnI<^znc$9e8NZzfpI$s{_C7Z?fEa@(YT-bn(1|B z)Pz;=rt2u}J$Q3}B>Hm3_g9QWy?u+3T}G~Y8MxIejqcJ0zC4RGCn;+zZB~z4121b0 zSJQ^Q4vC>%++7Qw@YYo}&s}x(^#l;Px2+oaqg8GWeX4)KTR?B`&VXhf1@66pUwL+2 zG5W85`H>1bA(8ZR9!#s8dAg$h46VM6Hh6CLB1&*ikEhhPg`YkN#y2B{+R^&aaQE{m zM~hP$Zu>ALKTWUA1h*x~@NcQ>O)&ByJZL1dH)BP9#G8}I{1fy~8S<=#_MhbKd|G}B zI!KCky@!;@1}C=b33|MqJcGEVfZ#CoJO#aIecu2+S5yDZL(0j;>cn7xL&O z>#B#8*qXMv!dDo0ntqCF!P-IK%7Qkv;(C^=qhfRT<@t2R#=8(2eg?cHc=J2{J4)m! z$NBFF?S)<-<=jYp`Z7Ppbqx6iP_FwQUDdbW+{3kvRyOB8$$z72i`5i(G&Z2r=?GVQ zzcJM8`laU;OTRgAy<@z0>{L^N`o#{)5$d7zg=d=TkEHb2!T*Z_qUaviy4b8x?jXKJi+8m;g#qUc8YQfopdo@zhQ`V;dS1Gzc(alVnyoi>BqAfL2* zBpcJ2TK`|Ukq{?I^Y`-#<1_}xvHD4n%CP*MX9& z_sjKj-@CRhyz&0b|JFOE@p(4z*m7qG?LryOAIw+A*BxL=?=y2-Q{?lmE5tX^8rTC& zcH`YEzy1Sp#3oHs*onIy&>H4Nty<=hZ?Ju8w*>SRd;8 z9L}Vd%x6d!p*IqWu|1AK(Z)^Afis-N3_@w=iuZ8JyOGu1LGViF^EGgg1Hh*JsyPrj zljsKDH*(!f?v09|=i>B+c`+&xIn1m*MtuFI%* zIrV)3J|_Xwh=7X@Xz^N6=h16D;C%Z7t_zTolh8%`(*N-!>Xz616D3{(SG$h9mxms6 zNB0J(=Ph`$qf<-c=iuWRW-Arc*%UnW1sk^k-$&rEA3YI0KJ`mJ2~Vs>vgxmIDOkFb z(%zu8bAa@9G*&sqrNG%h%Hh=W zE9fSlRt}}yI@)CY#)fvjdN%nN!Wrdj?qpUAGcw~ETH$VB`M7qu`Orf(Z}d;d!u!{j zZG*bxnr&$5Ch*Z5O7~om-sEu{CE@n<)Vmk1CFgR6<&KsWv?58*%e(Z)h@8*e=nI2H zl=rJmT1Hmj*V-kF2Dv{x`3byG&&T*am(izXrH+!~6MblQ(t3SI^LeHfLA#MwAtk3A zHS)Yn`Yc9jGUR@{k;nK(+lTyA{&PCzkMqIIdpW0mNfp#4h54!dA%EPL*0T}cXVG4{ zu2FpSNy?>fk|y4RmEI&Ix5W4%C6ujlWDKPj}9yOg|qy zrQv}I#>}CR<__BL&M{Nk_euJ=fS%2QQ!itc_?J-VJ5XaSl>7pvui|%i**t_cxKM7k5{oKmP()T{kvTRGJeRDoVX#Ge!lvVYGHpJN4ABeld)|%1Nmgw_Jr z>{o|eJAu@>WK;Ui=!3_o!zR+gJIKG2xkx1(K>g`1IN@u+ucq7r31$?bTyXn7 zIlch$8u<0K@I0fu{xeX@539g~whi}(%%UE>-aK7Ei%A_&B%y}o*fWOm8BV*kkUhg# zdyF>P!_7(e9Od7H20a=`TEY#FLJ#|B)mdoP&USXw;u-W*PT;o2B%BPVG1_G-WYBd$ z{vQ8b9nlhCv^}|9F8I?vbDXx80Q(B+a<6w^z~1%QqcYM*|fBV*~3;e{DZu8SAaW7#a}JDox4l!1?pOGahw*sNME>`=d&zIX+b$X z$&-l0Q~d;?j1a>XPl>a zBlqvYe|??Z)2+Qm-aa2l+!>Tl$?n`<1Sg2IRdwm+$kAS;$Op<4xpU~Xu6&zAS9a;Uuf2D(Zkx>kMT0`kto*m!Kfm^<_9ePNF z@gMzvYtN7ZQIA*SoD1|BzGq2G`6l@m(y|@2EAqOYT!lUzuHMH_`1e$X%mft(?nUq)L-(`MN%r0%A*W?xqFV`Gz)?6eSqnt0%Y;nr29JwO~2x zcOgfF+nJuZ&ivRW;ZUpD()gw{aF^4ml8w(#Kk-hUB`%hbJKdNUD;$C+ABVST1scXY+&%Yl;u1KO=W5haLNQ}+I+R{Z z59%Q=Hy#g0GV7rF^gGj=Nr|53=ZR;F>C;YVLJ2rmHjvx`_pYPfdnr{7(or@S86cn3 zx9T}c*h(Eom=Ae|)d0rVt8npK=#P(pT;Habfb?#9FpqSu!o3eKn8_RcVFyA7XH)Z7 z>K}aq8=+eN_~t>%PN2m`i`5UZ3d&Ww=26=Jh41N0>A5#m zaPdz3ssz@P!1E5UJJajKn=yCzd=Q*CLn>jmKbG1b0qfeKjD)ohDdg&?5sIA)9t4-3)-xP=?pm}lYPD;04eKH= zhS@@C;5dEIb1Q*)CZ&4@vOd^aWhPOAI}EM$2&3*Su>Y?>?zwyQ^uY;IKLISp&D%lW zIEy&}BnOae=Y|%~r;QWI{aezwx{BTdU{p_J#~ z_?zhAEsXR2 zdkbnYVnK|P^iC)*m$yobOT#Z~P_Ogqyws0;L>(^yzG`_ZUwodck}jsuiWBerpFZRM z^wZ3pV?az^2*p&V`zI)yykG_F{-r}t-tUwU4rpglQLyU@N|Mqxupn}DCV0ua40 z3qyMM$*03zrarIA@H2l$J9#NRvJ1@HBDtN?Df5}}!(HY{=K9xfti2jc*q36&wz;z5 z8in#t$(m-?x(+!%hquy&9N+yH(I(=GrdmUk7%4E5;!Tu=F)oySSj_k}>Sh9X#aJ~Z z8VNZBlLd5py!v|H<` z@7`lP9;coc=;QJ9g{LqWSFZ-WBp=Pqb6%fdcH{ocu|V5!&1@vPtD)cXeg$_|p__h8 zS>qX7p0KT-s&V!8+y0pMZ}WBUnl?Y@cSER<>>&s@rPo}H?;))DeI6*#!5YaF09W{E1-Xxx@u_U zPH?T4$uH;~qXD%F6bq{cP8d5G4!$579j521+xyfGr7 zHtRlI|CJUDK!15TU~L%bm09lFxq|*yD@$=-OSwyFc_Q!|cT^}9wjxCxIr^@ud3+Qw z=;~u7bo3l)T+^8eJ;=>kBPr#s&am1uO3J~y`kFlAJY9EJ4jXzF)Q zrZL6j+iLVJX>9^NGLspL+K=mgapWs49w*gLxH^mKjUFODuMHfu4V0H4wK_-|YMak< z^X2hNek?~;|6U5!>DfPqejLYaye|^55Ny3h&UNI|%i6%yOy*fjB;^>M7QkEL_gqpnrJm#u`7&w$JG+s!_$i`=akqo|cB=8I z9Bu$Tn4%r_Rt0IYz}|LxTD#E2{7N!{)Fq5SFTLwUlcc9U2v^s-9T9Oap1q`%$xl4p zHr^|n<6Zp!9>26;MLWLFlxS(%Djz>HrTsPtoMxV>$ESbzCCi_Hjb&Mx))eah1J{N|3}i4N$Ftl%j0&&DZPNE>A++-VT3G{Q-d+y+_ zs>x$Co!6MHyvVP~Kyf9oegN)k;32QUjY`4GMqn^vaCh`jIgb%)rvqmPVE+uulczkp zZn~W}u4ztWbW{NC5T31P%p0@FHJ%EnTZ!$-8QU23u9v&(^J{2jAP{*%rL)EwFnOFi z*S=5D+L`dwuJ~te0(UL0X^TWp)BbYc)1O+~^itZ^3oImfzn0$Yil&jG1XpD0$$uF9 zUyS^a?(Nm9!|Y-^c(NCYNbiomGVU7Se}jP76BMt9R`l%IkF@w-w6Ggbe+KqkwVg)} zqe030TxWTO>p;Fnp*sQw#&f@#cDNfPi+a10>S1I|HZ59D8&lv>e;(Hpx`E&J%(YVV zohK%$xvvZ*xL;e}v(dC&y{9X5b7_F85O}OP0R)~ln+x43QM9$W$F>NZr-C&?Yfwj? zwF)VWx*?X7P9^lF@#?B*M+?4MpOl}^P@XGt(Pu?X$B(1vU>LLBMSWlYosP6^e|TcF za<#^pq>EfH!{h41`$}lAeHew#(EA7Iv~xsWo(#3hR|kNvSf^anyU3fOjVJxXJ2~1? zex-udLC&`c%7{6mh8yqymVf$GyM))clBbOxkmQ$;JI_7IHGbS(l%T$1erq2u zB8P8cg!~-FV$`D(;IRhUNkWgF0Mwq=7>Ofd?RSju?c^FpuVhL-eRX~yrtJ;qPw`DD z?Tfjc$DB?r)Ia`FoBiuIj;AS%KrLuF(2_LZd{3=02MCpI3C8Q809`4m6Fm1S50`o@ zPYavtz{*xVg=c_|r?^k>E(eN}du5{=t$}7+Fh;c#C?l^YwGvhPRE*Q8-`1A#N8T1vehc_S zJ2W}R#zIPdklcFW*CMk&0h$_cGM)bmz|~MVu3C2u+)uyf{^a=vyt?D#95{o~L&bt# zjn4dy1$qcdP0>@s>FI^!j4{}YLppcJRe*)#@JX?wzi1QYK;G{G68A4pp)cLT@G)Gd zhSd~3Hd|8Oy|m95p8en>wP*^1kQ44QegK=&BF1!gQqKprRf`x5bB${h z@B2}5OJML+r+w6+4@(L>=pE`F6+JiJM=p8V!>!C8hSH`HP~x3f^Rx@P_iqhX&kQ*g zp6mNK4E}x}V`M(~C}Ng#Pr!@grVBZpxyUsefZ3BHq``$qM9Z5E6rK**lw1Rm9k)WI zTIL#{6nCT)k=xzr?}4Y@w9;Jz&5`I?j3n3M-k`;5W7X&%+i76~^kOtfwT&eItEojj z=3FpX$oP@#s_ADwVma~xwL;^nCArF@wG2h=ww~uH?$y5D;Ol9UcQ*2}jzH*|j6T2C zXF7MvMV_FB>Ui|yj&rTo`kd_ue8Z=ahqy0lD_B`ftMs99jn|#cTC4P+8iPbvo;c%( zaV84F?vHgiFdx; z&PelaY{0LvJDrb3EH)?ge%6kMF+NLxbxX*xnx`lDwF9iU5A7oKfpyHL9a}YU#Cqls zU6EJU(XR*LT<$A70+;k$ghznNy}~u*_FU+u!0G7~?iTT^g%7BqHTi0QqCL6wlGn=r zE+bS*^6bQ~&|0T*|2ie=PdW;U*-x47eo2y2&0Ox$pZuOb=h*{#0d0T=NAkuf)1Egb z&whkD-)EfH!3$pCS`D9(3oV9kd;W3+~9L{f|AN9%><-hboCMkzUu-|_|b<&{&rUbQtqrEczQ!?PJY6}uA@wyR|u8O|$Y){XXYl2Q%NU6>woNw%`D`Vvjy{o6%Mk7}pC99dkw3XR(^M^)eMfobuOy{M-yr$6I-&R)2sBC@dA_qT zJxJWkX%6|b(nb$HHE#8K?f+`$J-~)i#hHS6jcH;O05u}_&!{h3Mx3#DSHahQTN{QqUVAY_b7{{YeUZ;BgbMObS6+j&91Lh!6#~Y9%1Rjbur^}ELU4%e|83g?rx}T zoE5q|)YG?KViY8Jum5+3rm`68&SS?h{>9EnXmJ2^ZdA;wFt*=?3m(B{@dE8`!nhZ5 zXTcAJ-cABn2~e+L6n#sJCiBky`^LXsgap!qOkK_hKX36rfwY_qJX)(?q|ZHBdL$a3 zo)+$c)30&`a9jaznFps)W3;`uQcB;z+b;$7zaW#0Myq{aUD4er`iou%|C

85&05 zE;#fq^hQr%_S_iPRdeBJ+P;i%xjV#7T10t!=>b==6Qo=ON7@4~kp{mB)a5F2Ii-(8 z7mpFm<^r8O=zLo8U*ua)x!r-%oigsBI?i_~qrg)Yv?CeM;t;K$PZ=$l?X?8z0x;ys zex4&`q=yo?tyU=GTQnj4G9VvFefxq=|6fSA6wj{!uF>GPj?WW7kfN7bbC=Cm+oN&e zT9axDt+ZRp*2Y(Ele-6EOH#K-F#FJo9|A3p28}2V~OjS^tTC#qpYJ*1CZUSB&3N`{)aEyd! ze3xjUCRAP()haff9PSnHTt2Wsb>ecgG-9FZFEm%uZ&*@x0CIwEvJ@YP? ze0r=o%YTXz^wu$EfxIsfO4R-^oL))*p`NggeEneEOQXJQEU`kKwBQ?YnAt5QuQzs9 zHg$|6=RC@(gMzA|OL?+fSBc|C8^i+Miup5b>e1RGxAVJ&ubeT(^>07Pzk6~zMby_b zv8|MttB)lY27CAw0TyF@TekR>V~yo;W-Nh(@T948qy-M7pSPqR!sh2VebLfZt zK^nbFUhN|#!06scPc=*POsRAZU)Q>tz&YizE8!Ej(vQ7)>T1SO;Cg^Id%oH}u%#Vl zD`Ui%8vD^5w?Oa4o9#;augL!M^h*G-qld9JnVIzRD_a@ zm#jz7QF!83O4DBNE`#BW?s7h3XmMqz!_(v|!Hee&v}CMa#jkPHJQkk2lzQB$IfvG= z=7>bsE~787>sSkr6SH~qAu!INeN%xrmmHhmTnWls11FtLT6gff$L6!ZNnLF@5N1Y? z1zZ33%%Z0tA)lsR=LVkDFppLkTiV%+8rN+=JChZU1W)gzFE2qW(FW^IlP{o(%jgSb zz43seC&F#W<~>kz1$E7XUVl&hr+~gC(pk>ZoAIxHzYHv?mmdW}`Sl5U(tU!;&N}E< z?ys)V1-faV&FuoktN;h@MC-`AqonYxqyIzMdgqJ+mq(#Yeex@zgY&>?Zy@oxd)?K2 zqTf{>z2G^k#mrD@X^r}z8k%!n%Z(#DYMOEkHAo|72%+yh;U+F z9A~TA&9z;c*4@Sjpc7N79jC(6ec``5dn(a9)G{*`g|oqyX=1EnHCWds{BCN@qbHIj z`-$*mO3OU;n&O)&)8AswmMA~8Qe?w3oW#Sf^$0d) zJY~o`^|uQ8(KBH5%#FVK`Ml{sUuR|<`Zk(6v!_{`OZ@6@Y766Ab%D&C*VrE&&Uhvk z<3}4`<~~VVMp0sRXkR<1pbS3b{HzUM(zS#UC=;R>I=HroE})&AgH z=)BiUe|fmqcH>%ZlnfB80}n#WV;ob|2e(p$pW69;M}q=*VU9%jEpIOr>m{pLCcK{aXD|)wflgdoj}u% za!&x?E5K*(4521t@!n1Mm1Pvn-HUrv1zIv?IBaMyf zF@}3*m&R4~l$v~OTlu`x%2@~3It;7{@TRYA9u!(hFRiC9MnFx^!c&$pdfL-#NFcNg z+gpu(wt}w~xmUs5XXM^UOQqT8f%|E2w3_Fwn0@ra>a!>G-5&bD{eSl`zx@eV*5CLB zr0*~21=kwBCih6{HTGXM&~@k6zfq3opc}M2Z%P@=Dc|=SU8P` zt5>bDW#r!t{8y8|3Chw_TfJDGyaY%pfmo~12lQzH(tHqYT?Tzg0j7MJy3`*QLX8P> zxvTj~Mt~a6FiLxq^c%=Gn0_gzG;4C!lIDMccBOezMc->DHiqI7u=qTE)qrNV3%ybq z*^Duzbzu`VNh#~Vsb?RngO)<6?h+o#D3Ut$!zn}OJUjHOeLE!Z%1z{~XQXeUH%lo` zKIXcbdpBMI8=m{4g{+*iJ)ie!N|&>0Wjh3UL&vaH_K)-JSs#oYyWs?@`WPs0A zKYMpLJ+YX#%c;fLb_wlFfnnnZ{R--9OX&&9l-^u{)Qjy?%GLKu???5bPNZ!{DtF-z zqkP-fpR}!LwHEkw)S^YlaWVt!sZ~V_oZefp6=`fcwUiSrZ(XQ=3UDq57PS@UXbVEG zSMs!;aUxEwGwNbT_@x)4+Sm(>t0dPnZ7D?BX$O0X_wMJMNeWMzQQkWPwKYe8Pi@Ej zmqzh5KBFs#^|Zx3L{C9eWu#IH>pgMEc{vQtG&Jiue8S%|k zaO3kl6LeYQk!_&`#te%4k9vWJCy-5A-JhooJ!z#r2HKB|Jr^}jeX(wYF04_DY8zUt zUTb6!aWs{$?ao0G-3oL~7;_K7LplO&Hz+agv2rdxB=pcblP2BJHY+P@>Mb+xnsnQQ zDN`p@-92;itjTvzn>oF3{Pgan}hBvXb= z%M?~+%23EMttcgP$h`dD&-d5*@BiM%`@YAqkA3W8zwbVF&+$Cn_wT;0^E%J(IbHMJ zyHu%iy|Qip_9**!_<8*&URAX$zctG0m7O+xc>h-W4IMVNLfH<*!xYSi`8T8IaXotf zTZOXGW#gLbzlN_ZxWr@H6&1Z`8RX2jlm&u{HY-KV#te zLr07pK5F2|n&oBNO4S@XXxpdd6-s57msjjs;q>xKSC&`qQ(mP-sY0n*sp1voRsU6V z(8#gF&Zt>l?SzWN?A5Nkda0~b;mYzFeadULC{;S4f~OajZ`ZSD&+h;3U-75ZX=Hh= z)2}+EeESwB)b#n_p~FUv?0?>ABhD$WeM)(q7N?Z&kY@gm{yF26^1A;m2xWl%KkNqx zJN~yI{Li+D{|x{=jHcO$~eOTe(Nwnmrot*kk+cdu&&w$7@x4{H=D6Wfgn$u9X5; zHR>^;S}A(mwMA*je_v!$xKWE#Zd0pA%St^uRO<2X2-WIQwc`JKeQa03_TckQJL`<{ z-A*ZQjNRQ^l>WW+w%>K??NI7oI-+!xUx}yqDJu=|I=^Mgyz6gQzm-ZA{rtz@N?uhe zRsPrCDy8bBDt;>asZy#b!&NI)^{kfHwft=7|Ly#2U#eZ&QH%M%uHQV{p|rE#x~1Qh z>XmjXerlEKly)iA_q?9JI~LFD{TEIu6z!&+{MPWBzxB`z{d)fke*V3*3?6mKSE)1- zCa?IvLaC`|6+I1q`TOsszj?B|-{4xs?|=T}*?;|0p+GkBRE1o1@&vDK6dr<3D2Hy) z59a@Y^)}iU`7D|#^I2V=?O58Q)VkCKt>2Z7DV>I5(^5HH4k%45O)uSwezVeHrTa_I zK)X-r_0rd+Jxd3cPA?5FU0SrfbAf(?Qp3{kOM8`?ms*rsmi8{SDYY&Av2;+WU8#NP zU>5PG(&238$Wk}9)4kNUbZqIk(($E!r4vg3RywgXkRzN?IFm;A_B{epqe`Po zV@em4#+JsFCY3HLU0%APbYV`+Bjw$l94g3`j$ z?WIMfJ4%a7ca|O~Jy=>=dZ_ep>5HtEj?R$4$8|*D@rR%&zD{(y;xdZT2p$X z^k(U;(pnb&_tHD14@)1F)|WPvJ}zx6Z7O|I+Fbgyw59Y->D$tGrLCp!OFxu;Ed5ma z`M>B(mkpNxsjL5Z_1~|ry8qHwbQNt?rh#p~5lg5DgKhuEPNMT@t17K^p{;84RZeeB z3vE@WwZjUXMO!uK?LZp)n%<(V*ICKVbk~NAzb8|CCR+PXo&C?$*ropqb@qSP*#DEh z-hpGSLRZ^7FL;)fI{Ez`LJerW13Nk!HoZ#su;Tjk{}vy3l_hU1H7-qn^o&xeG!5!) zN*}QD&aCK}(men7ROe(^5A@kPB%f6Jf|Z_H`b+6(Iy`~xyh=BhL47&j_>Qf%D;&YT@y0i6;?0O_lwxV0r-_3Zqy7UA~{XygytjrW%(nif$+M37aHz;4Pw14Rm zelt$lfAFU={#Kcny&(29<^^w(W;Z2jmQG>iw?p9u-m_3dA0`U6#eIF0ekt0iz_K>s zz8sIsl-(H*y+}F~CK-*?V?VF4$~F+$>it6HYAd-$tIbP|VAz|7e2=%UQ9lCD-JrBg zy;>|bs64^brkf9y_xyPq_`Q3=KDe}yaLes0gn_IeBeCf=~t`*;o5m`o+K4Trrm2X84 zy?Jge(j-f*6on3f#=_FMw9yhL{}6*F;j9UZDA7*^TE5AXsW2|X`|Lu}OK7`2sSl&G z4rHxR8qdbQL*oscAIXAxpw>uht9{m*<=sF_$BI?m;9AdTA8G4%^i#j|tEl(|de5PK z@EI*oTBA-YT3$^T(b~mgL>YarCsDX)MB>qf4!-#pCWr8bZme=5N#7EUH`C_vxcQDP zwxPwYeq(J1iAOU@|C-h+DH$H7!2D>mCzE3}+v~?xnxk9Bf~K*NMP%2>=;HR$^zwo+-&nDyTEw!h@r;q0=ctbay z_7Knb2#uC#&(cB@Iy#EIRoCWvR1T2?=NIuR{4c{z8RnBs@SC{#xDDGW&R%*xU22>VH<)0NzjCklPH_5>)>ryDtn{*|`7XIXh3U(lTu$;mw0{sSOwl)a z)06tFcNNdx4gMGN*M+cno`x%yp7Q=d)V>yt#>g}$Au>&rHIg%v9XDw<(IUR zT-X%~AF`*Fcpie@WHh4HNnVvf^kKC};IymfQ#`BY|HPEuJgWu0ABOHt>|~)&CW!;x z{67xGGM=<^^@tbZ=Lj6%j=yzyuE5eKlVLC<2jF0h zUP&uabFz2)so9dR^z!^zI{b&0+G(LPtWL(y*|b)6ngZp|>98g0%k>53C=%ASL>ABy<&9-LP2l4b?f0rU$Oqgd-)aqYlay`yn zJzI>|-Pu8UJgw1dug%J0TU8v?^}Y?gRnT?~h#u?J6!ntze^$OdX%=X+Z-Gi5mT&~F z+ViJ*T zd@Af;5-%(Bu;V;$NXzfbU$2s^qnbBqXA7LCq199*s)Wu4+DmkKQjh9g_4_GV+vuQ| zOmH`kevahp>24rS*5Iq59`yO}x{efIiTm%t|8I2FR;)W)jM}Kqi#V$c!8>J(0cc+1 z$y4He9lp>OLZ7JLj|Knaw+`RoHRq#7Ap!t6)7JRH(5 z)4<{CbP?~)B2_NXrFDL0vdPO&*n@F%g zPi7u0F2mmh{I9~%AKAhY^6iPdw+3tye{U+H$s))$S8E=N+>D<-d_Vn{I#B5Z>v(It ztB>4Kk==h#WVQqG+ndczU=>fwR7%uX@oHpv=E@P9~-X}h68y`Mr9-611 zB_{P1**+2z=JJ)6#x;sCP0VJ0Izt3R^fzIe=BLPH##2CFlnd8X25dp?c6 z>ci;xWH(cV|)gU0RzYi!C;~Ud$VY()eL~rUt8@tc6-=M+2Ks_)^K;NY$>e z%rZ3_iP0_DO{BgTT1WG@hqc>Cj654^@6c3jlzP%jMm%-R5v5<+8*=T%$8J7Jo~cy2 z5Hgc-w2u~(DI2k`L;SxH?uXOfcI>=0{QAniztZ{9FzkcgL>87_?GfT|`i$?xah0gt zQ@exda~TS={GH>cH?CTX6J?}XEq-^!{T0w2psm5YEIDc`Bc}9JkIy~i8$Pm zgu9C_^|XJSJoGwlyC^xJ=x24uc{7^JXr?8!H=-U*jK+0x_W*5XBspDN9EOMGWJ`~I zPkx#n#iOLJ$cNta{u5fxIQKBm+i2|oxK1X|%>|F^dHrfyS_QKMS>Ls&uOVTT!sZsR zg3Uhb#Fx5xGK7RH^gVZ=+1{k?1i$W3s!Jc~e{}T!GV)};X&(PrqQ-$@>3!O|S4-*1 zREOFe?Hs6g@IJrq!8WF#@E%%|9{YZQ}R|HSbs5K-H)_M<~ zkla08g#E(kW0uw%!DSx&4iz!47K6_eZz^kJHm(jK+de$yc+uf(JRd~j4`tH+e4^y{ zI-fLW4_l#rHFTQN=>0HVDF$DTw<}7+Wc1qFUMmiF5`}ue>l8hTV?^4_qt1qWQ&Hpt zSS{znhv4Z7Hu^I@p1|!#WZWG$4~ti|_{P5Iw1?3MJamx<`r`O?IK3@D{0hI@NHdsp zQ@kIcZ`fKyzF&lX!f!RPyCupQ?Y@qFHBs;mwsHY1jzMz=lAeRpK795CxV(kZ2w41s zj6=!zuG-yMR!`cP%O|h)?qn$6M!u6taH3cH((tkD`8sHRhVH3+<7;xQWG@}H@P=nw zV0r{CJXd;}jkINHzp;=flsaCik~W8FYds0(lVcn{?u9}_H2T8eb^3k8^8;uxJ<*@} zUNX&Ox#Jf#K9-f*li@De{(3wPfzWhzQ;lt0ucg)~d_)UZ;rwP44%YsyT9}XSPI#-s z#_OQghi{$8kB9N&J!qgh?oKAlOOPKz$7|6U;#s+;&$5d3TIm9Q6o z_Zrf@L=XR97Yk|nLQ)Rb)5tv3rP|1-yCwbIMN5b3Ph9264g}Zcmm0e6)-X4`$o**+5HrYl%Wr?cB=GGqXB_R1cxJ#^2f!uw9N*)U2A)^K&vESHRhE-}V@4nQK|8(veR12J?VJVs!_?2HYF?3#kHgoY z&^Qvk`Q+V$F8blF4=YxM7N(&8>8I4KAySYlrBIzJ`xZC?3XlR^jd6 z#O`0Tya%2$Vyo`+20U>-yNVo(#faC7I6eozGf6%FUmSmhm&_g~lVr^+GsxE$x#?Z; zBYpb{@X0L9Su#*BPulZ^W&YoSyGlIsAuYX1M@wm|7AvSis~0F!fxTDpp~Nwao557)jm&lZiR!) z;*COQf*x}lJY@{tkUwprg@uK69?XualRdq#r|F?H{+`E4W{@k3Y_r9X*~P0{mD zKZndz5-wvEYrUEY>sRnl8$X%T{)>9C+cG6D*Kb?SJ~|d&oV9|%O1;Ok;$bH%^$c6< zBZkai|EG{8^UP&zs+CXDYiZzb3;36`G)%qYeEP9Sb2yALO6)+p?a3U@UVv7{CntK} z%%@NC=Hv7mPWNsId%jJrj`|OMP?$riI^u8}I{3YJA1K{|1slyW z@6{l9RU+>weU#&A>?$7o6N{UKvq?0*s<4mIGR{tH>b)ZCcThVcrrPYaD+Hf|*3&q< z5jvCfL(Zq^$LaV{8NC<0){${%vS-%rWNn?#D^7;{Q1AP(s0zh+Zz>PDNU`@%-aA;UA)VR#1h;+MQa!0@oD<%2Q6m+ePXQBp$~euS2gtZ#YfXyAIZy z`T1=;Vwle|FIGuLyUi%HiuY6WWA|1vo_qouxSuUm(83(v(i)PP@f})3zeVIAS{({b2 z^}C9E?MU*4&u>R5ef#5Za}60Qv(3zNCHu9()qSW=DMmvhMdLnN*hIFOC}vJ>5BN=` z!K_FP#N%sp(dJ)yp#m8OvaP*PY|W?M<*OOto`;t=@SXX{mNYum-}>ri9`XxxlP}L! zH-307R7awk9SG4##*0j>!NVR@MS!x zQW44KX{V-oS1L7=buBC6@L4QnBY%7z0`Ksb)qHr72=JYn89gVj9IgH2r3c{Amu)X9 z`X77HXy)r$k#dL_($cH3{A4fjG10L-x--4X3hrzXcmci6Ce4&0LN_PpNY5%6Tb8ss z6`reUX9uPFvhZirTkq*oaWiY0Sz+C-$Y?Ej(sgiaSJ-DVYccX&%ImUbcM0oU zNJs6J`X{TIz>+%igYS4qR>yuST6_xi#t^T_s%{l=(#L40-KYz7R`Sqk9aE1t*QN=*m6zYyb|_3{T;%N_F_dJX+1Gug_bt3jn_$)vE3n_eJ2uEgnoLN zn+tzhPV!cw^kZV+X11Pn;aXaoOyaDSPQ+ESqMw&}{=I#EljnQmaR@$s$ClDd{1bk6 z#BauoukxN5TJFGBe!|n?q0hyz{I!wze*i!ZZEP;7OZf$wN#U%MN z3hB>Y?bDrM-2|SC z65q$`?R|^)h3u=3pJ??bpJp~`C$Vy=mj0lf%muVn=A1&qGstp3J04E^-?6D(@i2_6 z2lCK`a6FkcZt`?jKKF33kKrL$S6~BY(OPyL3?%XKQ2mtOZzN|&vFmY|+|LqPsFT^b zhw+*ye-X;RK=%ULdKFjM|Jto~8vq?3v7Livxh`3LX5!{r9NHzr{{ zSY)n$7Bp+H*`qx981jSU^?yM5bI+dk{}A$QCe2i`{m7PIVJ%H~bA0|caqJSdcsMUF zhge1)$3Z9d+P0Ya7%UpbJ_q{ys{X_N;^P`+m+Je@)K)s?2KwS`uW zQZsAD$ufx;cktdhJSFksXm-;;Kj3zJG~pvPv^H2PKNoE!2F_urN3(&caGmE}a#}wn z%V@B+*PVG&0~sXi9$(-yef>nzjF+m=+AT)RH<0fRt!2fdrPA%;c7awG^0d$>lPR+P zu$oM1ExS3gnz2B=tcL7BTNQCUSUa5xJAD$pGUe+-Ei3sgSZ>yllXvThhPxK}8Ok;? zpSFS~GMkw7uiel~?C(IYFNy}Y;IW6E!A$)9$k*ENj;%gjjY3xU2Pl6iDj7Rot>>9J z>r2(TR0Pf*fJ(SZkDyF0O;oQ`M9f3H&rXV`p&YC_%X@FoLIu{45#%8JH=&2~#hr{D zvZmOYwvU4NOqSVG-ER2J40m-NG1U86IL+L`S4A#pr*1|MO<O5=m!keyap z>3f6+e@3Rm*wT}@`jnq-;1S1?DN(jF%WML>ty|lTW+-@{5RU28A%xdeR!uxil?bGNwvjTq!h*G?C#61j&36Hk$Mj&!lXV6>i$=_+~eKMS=zwQMj`}fKR_n`~`>?wGaGIXMiLlL{!c`FYPF#M2+;{TlceS0J zRgG!k8?pUy*<_wV#{E1PHwy;JdXlmELxa1ZSt%HAJ>){*4>TwMACOZf<&WBL3w zb*_173|!<#Z$&%R&`R`QU&P4i`e9kA$g}CnWgOj^ zuFH5(qQ{Bk-BXD%VnoIvL-CeX{01W9T<@ETatEqW)zg)%_6S@a&1dWJ!;C&(Kx4Ub zRai%M*km`}o@_Du1~U_swelvccvt1Vpsfb%Ww~f@5`WE%U}hFppqeq-Qog%d-MM^s zAZZ%nEtcO?pQj6CSF?#uY^WTV@BxR9S5SZSRiH!?iGtzIG7x zGP@ftPZQl+Xl0x}UV1uNsm(6rXyGDMN3*%i4h$>WI!s?FIi*>FYDcY{PJ-;eSxT}C zcvo`N3B}m`Ha5^mq&gI*2N!mcoxR`SZxcPIH&I4c4M>-)(16vvuf2Kdt}8yxzR+Do zwx&vEPx?BoB~NDl<5oJUTUc3;uY%`Wl-jPqH2NFm-4M2yeUfeP-(FOE2C^e?x;kdh1N3etjcoMnwMI{lCwH8iA~0Tv(j1x`k5t26xu4v%oH!r#=~87{k)iU zyweQP?FD4qR}0yb@GH49dvYV|S}jkE5<4ChDOR)b{aMmOX#URgp!W?qen;X@>1=oO zO89OgK79#|dE&*rxHy*m?CkSVBI<4WW-qhD8Blox+Uec?6`HT$yP4U#wa~nb?L0`5 zZ^htpdW_$$(cUL=%}adpeSSLJ>+k6(vvWtQl|3|LNYeo)ukq|n?4g2Z+3mIg*WY^g z4vuD%vVGCp`jO_-+Z~VJ1>}DdZ|Cx}qv&p!_xC`l5pFW8n{nz{5Uizz>%{vfw77)S zr|_Z1c)Y*pt6ffi(;(gv_u+q08y2m0-guOIoy-(d6`T)swaE)<5M{R5pHDrl=xJZ&W?=Cem;T1 zh3L$}<@cm|MBVq5o5`X+#o4oR_-eheTi{if?AvyIovFPOVKoU~*RlMJ$(s6`_1pIN zxEdFK=c8*$w7-&P`+SkiIi1C1G*FwYS)FPG`7`nO3_EJ!XEdC)WuEX*74_efKD)fL z$FHL7R))?+a7Z4g$|GmUMxUdXb*pJOIsj+ci#SH?`5o-;K_l~kz3Hk$F|tbDZb=KL zk}czeRy?D*G8ePJthdyX(PtDHtT$Wz2dUmN>d2hgDpsBdlz!l0Fl|VS-;$u7x*0cp zNV+m;=A6J#zLa&Ua+ET&m%VY@k@-4&w83TetR~i$v5RSJGo!Ke%6}1~o>nS3dN?^g z!pp-&hJRJu3}zWEbY-{Gao-vzyQr1D$xC6B$XF4VZ}8oQ+U`))*uZKA$g^wV(H+L) zN#9$&$vn2Ps5=d3S%bJ#MC-%uw$1c4XD{@L#pF>5u2WeVwvdC7}d-U z)}xIv>^gf#dTVzr+MQX%*Pf(Dk=5Eoc+W#{G|XlL|#rPi{eAtUZ8yz&6nl0C5R zkzulyAIEWjv1G3Qqtj(Y4k(jBroeBtQsw-odx6Xw_zA*U*~-qQjUw?}l4k!(Pxh64 zTGxrbZUt|o3zz}-rwXMi^Sw-@H88@dx*(b>ut1wHU-0uf0A-j`u4k0_DYr-wPq3)zwL-IE~`#nq+^O&p) zrJp%QRLB{GRqXFIvHmsh;)`9`-`j=`~ACAN=-_jO|85U+mHR+*TvPCnd|wbjJWzUoay^DY{HllHSS z@qN5J=v@<9%gLLQaXugaecAO|RNIs5JyCoP-@luzV|Y?m{luF3qEqo$O>1u!@qBLx zf1}Km^43%tWeKg%^!HBu9xO6vboqsn<8iW+HY&sYG*35FA4%4{ zjz28M@1fouOs6xA_A;(+$YXXPd(I(d_t3|%n~MAI@w*Y7>`y$~lS=A;Mf)qD-4U`! zix_92KM3vT@S7}uI?Js>(=RK%Cp0e-FDBF4lTgo&mDO@-LpW|hC%c07$K|iAu_biI z^V#9#x*OMb;A088KguSV+jx_`KB}cZh<=~QVfSk9bn#||JbDs7meX5T5>8VlBciox zj#BDjvGE>1k9+ktZhvBj9q{%JJ35yRpMuC$?B!#6%TC>El-ZppWrp=)Qh!PQPAugq zHglmi8}s(B(Y_Bx)7a1v`pBdCPcM8o7gK)2>yz}oZQty6N@mvUB=4({=2C6+fq4h^ zavDEqj6xkb;3s{L_fXuKX3rz<_I&Fdv{uUtd(usHo=Ql}GV(7oXXP?sI4@ zk-=8T_CvgnOfB$zHYBoN=NOUXc3z+8vQwdV6c*EyCKDmcioX-YpjGtKlZ@$4 zb=FSCpV_JSkytSn&)IdomzwWDGrRTfR;Ct;BiZMkq)1e3!x~P&Y4;*yPP3P}bFJYcF%QiMW;cV$OJF7v604cqaerg`>edU_yaRpHd|meIjjM zAb#W|Urtd?Aocd-JXDOzT)+`fznKKhNtBtLnQ~nAjb?r|G41POer_PCs*t||OtO>d zVE?}*PGx0onJCwq^x0SYH+c7<(Q)i_VKLg;mbJU_;`Am?BY$JwjKLvYZ55hHlnK!)4BjFNRRO zF0=T9U^7k4O7e6ge~;2zxCyq8^W?onq8oAEm9~=sf=1R;YkJ+CoY}2%I(nH?f4s=P z^M^{jq?O-{*e~H@nQ5Crnw(+FI`PN;&)nev+UP3QSD?Fz z{5$K;IrW-z@0IktDk{4h#4=;Qx2%wvgPn0x)zjAa&)KI2?7oh2XK8P*qG!<>-pkbL zDMEgsrFv|p0sLC(Ge4(hJE&A(b-5X!o07Ys-HVP#!|Iiyzn_yg>-kvL3mdV#8Dd9f zqwDEEX8p4|Zy8DcjJcQLJ$sDTv%4+iA4W?#U)jf#HFO`nW-ccC<*)bddD_eRaORu4 z^TaaTb|7K)|F*ylrlvhtemJ@DyBLDBpd^%8umiJL#*;rty2}DkFub^h4Lm!;^T#dUTIK zZMo=NLhpMKX0aMqXeTGfy0NCZ>~}1jmWWuFikjo$d@mb0MuguTFYQE}Pw@6KnKGM_ zwa)r#o=FSaMz2|wzDSEF;i$E^x{J0RK;aY_>Qi;||h8;!2Pv~bHL)Vkx zGM8TZX@4WzK7~x#J9-v+TSCs*+$<)J@<~I{ZylU&!C8M?{~1Pi z>Sdkj$)6#%9V9ZM-Qd~d;>7-<#0j26j^7Gxe@Lz?X!1D`<7YVjl^tD41H=8FovEMc z-H$}M5577=U^_NG2p%tyBs+KdvXANorMI;*l|4<;-t~q5rXRHn4>?1f5BTxt#nZiD z{%1TVqhzjr7Hmd9vV|7UBV9@M*g$Wa&>F-~vvW0bx&IWy8uHfaIOMyv59d(0f<)9mbM zTKY=F$V~_thdhVRg}i0keA(+>WzBXj%yMqAOh(TPMD0TA^!hTZdn78op^zQ6nXj#e z-sJ_iStEP_pS}GIV9#%9VG>=QCQD>icoS~2Gk+BS%?fdLa4tZ-CF>ng@bH$;JG0;5 zc@lli!`({t-(rWY3QYDQ!T0o(d5&pf%mNX58}?y})1mjiE%^K4d&+5QB zy{v0#Ag41jbD7ogjN+!CR9SW!kMr!T$&RkF;{P%kauEzNCZDHv&eGgS6M2^1IobR2 z6@=3R?BvNso;}JMFN5C|MJCVL&)lnWhMwgRmYyBW6`}kv4$^1a;_uC*To2RCY2*fp z&HN#=|0`j>nqJ2ha&=XD1YYxG0R)fav$w#05ApPAd`+aM7jV;q1zpXLDv2ARtiMRza9hLsGuh6rg=Wh{qb6QuWxo-x$Ot`WcBaxp8@N>Ck1yabXT~zdyFfPH zp2kvkxH_?ufh5Wb#L1A$%5(aH(QGXiu`B7bUpqS%vm5O~Qf9}*a%kMjYQ~_{P^`{u zMje@{m$t5j-YVL^NqkCAzbkoXczrd#W{FPQt9>er2Z_$PQzz$4x@m1Uk?>Hxl526< zmPdaeM=T}vzT)50+8VFKH?)12*tQn+AIY}{ru*an6>at~_xUP5-^AJT`ks^FxfSJ~ zlxvTpapFUE75^l%-R09a<+>Hc*q|S~$!_pHNj8q=8;Do$;^9SB`7A8n^ZXRGepOA0Mq7>LH-M8{8++Rta_pnD(Gt80B1oOFdkce*)<#dYUji~W6> zWe&l|C%o?p`uisg?jqxxo=-1gSoXX;0{NUCxmjNORmR={51qW;s^w-rU(8O2(smh? z){0bHyzff3GoaE3hd*g~JAIC7xcgi*&5Xg@eE$XiucEb4^xYW7XOQ}CR#DIM>_>S^ zzWv0L!`XgqbJ@?)(FHKNlLsD*pSJw*-6Fd`BQNi4Wp)}57$WzbLgK#i&}aN$sJL`4 zDu1S(^eg&n`vn%6nd_ZtZWyg+5AhuZZ+r8t-_-t8k84!H!+x?|_95ifj2}sK0eQ~w zt~;dWL;7j4;`5>}a-4XPld|K~$XZ+{qvdP-JWUT-?QVmg8^!Ct)9i=*dLaG20MX{+ zML*@Iu+5x0&#uhPc(?~HnRPnCv)ypN1C6c|ZTtKDLgSAuKKl^@&%$<$G7l>K7q-w- z%ZUs#e7aSyuY-2ClkeNI{G3YrrxH_qx)Y13M%%sgoF8GgYeo9+pg({GZSlS~9u~6O zvFz*y{LaJK=Wsnik9Rh|nc;8tzb$3Yjg%fIq9hk&M)@R|wP6{FrM2kgLir#w;VW^r z5c1_LxF+j3fCdt`PsK?aa%Hxx2ChGn4aTsF%x2!lw>Ge$d!SIkXLsl)3@YY#zvX-7 zs8kX)vUfT6Ow17zYIrw~XC~KG(Nb=|c^qOH|72I!VYHHbc#4`?Rq6_n?eX4?x24Z| zj=%Xl`}LctmHFfBTIo|feUb;x_N*HFY)y)u>gB9WMisd~;|B35GmGPJl2LDF(67a3 zM%Ni9_kzg;GVkVDcm3yPsAZ&2E^(zk z)Uv-e`?MY<`y-_`GJ2VxtQBT=UNZ3vZD-_EMuJ0NI}NfeWW)6$=XLC>8cRvWYN6Cy zG?@9=jDX&Qd*&7@vxmg%GMdkM-~s5QCsPaM&YqrJ%z0*4Z$))8kC{%9k7rH}XSrt%7Pi`Z8k3eU?q6fMINwY^McMA=r z%_;b}K)b`xOwVH=d&+sC)iivu+E0oVi(!)!o0_Jx(-Lo>0AJ+-zFh1kN`qGHy+;swdzJy}2n zTo3SR9nonRoiv6?J#DtZMR3|fT@WdcB5Bj%g^ECCOD2{;fvW$_EN2bVjp~@{}I3M;c0d-&MmUS!}8Y%T(puq zvr9kc5Hg#*!bssNcJv21=|`nw;X9%JDqqVwQ07oyBFVP7g~RcEpg!OzQFfqKzJ%Qp zJU%LVoeq&oELzKN&Jbe%rjYNUmFVF?%lZ%#Gz^Dez z{J;Yq7Y7bw>mM5#WHj@JjFEd1lPA9r6&~cHFNmI_SWY=Fx{N*K=C8YDimTc1Nvz^N z`h9@y{NQOzvcAPGmeAuIJY*-vQJ%dc-|V3ea<^8pn{GY3StqCEG(v3_mA$v4JsBht zCp#ZWx)VvWoE<%?r}Z@n278x#G}?;vx!qvA`tgB!esjmzR-e8K?`|xsqsa1!r$Zpy zgB|`JcAtB8E*Za+&9Bi`4?K2M`wjkg171&rLnRUpEc$NW(da+fPCv5j;Paew*}$5! zI`Wp58?cda{%;7~L)lR+`W(dm=NF!G9vLUHnW+$j@mcgII8eizo<6Xm{aDE@j4eCHSQTeW<8u=+Dm$gbp#>@>G2&e8TK zaP2Oy^<|GU@jno{*O4@{WbxkIi;(D;y&=c5yGdFZO50g&%s!!6e0Cwc;wMv~k;run zKC-7ez2(^~HD{y;^7-_vkKu(m&z`x}5n9f@0qt4I_G)M5YpYl^xv=xEeEI}jF7Pa; zS?|KZMKrYmt;rCotBuW~)m#>y8Qs=?R^F$uqL*W<2Dkl;oxctYok#Ul|^W^f$A-+ewp5ojuICd2eO=w$7XCL`#eX` z=4>7g&(KaWaI zF&@q$jwq}kH8Xp^n^}?iGGe^tEEF?KvnxGi4QdbE)WiQ^yq3%J-IeJqQlAaeXf%85 zz9MTqyv?QctR-YjnHht4eNTSXm0unTv-DvSZ~sx~v>tgb6g#r2owbf=KKGn9Vf9l) z(dYdp6Eo3nu?-9ZoDfO9p4i@a@*5$YLw$Cw=7M=S@vS(#7%EH-JZW63&$pO zG!>0oyq?Iiv#0kR{0@U!Zt|GN`X(xI0J)m8!g`{4=F4)*_=m!Zk0wt>sy}P1J6?im z1#P!uFBzZTh1y`c+=#!2iujy-)}OXFk?>ZQ^dZ!#}6)`;VJS!_Mv6m zF`Mn>By{FNb9yuV-ktUF$I59PNqG^QJC$U2$@cX~`5JDnkSX@{cPy{FM_js?ES+%w zF{{g3NA|CD#LfF?zoxZ=@cpJZklV-hL4BqC^>^G4_pA}>^TnH-ILh9_oIB0B^Eujo zgIw7Q-X7}rih|#Z3)jgGxexcVV%)omW*dv&7ohhZJ2@Kt0knJcW;G7>=c#j8(JtiJ*I4TV{AFkG zcC_#uo8Q}$*KxH!9DZR(8ApD>y2gtuTSc@DaJ^XUI+cY!R>Z07p~)Rmce0E*EbbMO zcO(Dfe0MGxmZFv@nbr7a5T91$n)IhHCf`n?bapZH#eLT9s=%)%OFRaxj1K#XzK77l zGli$!$(ARwzT5>j#?!%CZ6y+9{4gAv86n!F%{)3EPGe*cci4kP# zMf1r6&ES|j>)LytGa?mmJBRlqKYxmY4r-*2ksVj5o%p!D_uq-??WZ9(lRCpU_ATeC))Q6BX;S?AHrf zGY@_v95RkLKyPXRZB4+@VEV|-q<6|#-B8JX%qPXf%mn7#NzRK5^Dd+AMA1d~Uk#_h zG_Yeaa{42Cc~IT@q|GR34%(Y|QfB$Koy5qEG@tO2OYw6RJw=9&B4<|MGXI=@!#ir% zRx7K#-Qd%V}v9T_Sa4o2M7B!J=zLn*>=u!I2)gqRzZbqRco;Kk=_bp_9apuZj!pRdw z%Q+qOEvc_IlQm05Z$z4DB1uc05&KG>NZp>iv87rwQS42dIo;HWrm|u_7^-`UiW#?! z&`M{urr;-Qm0Q&94uRyMBj_r#zEwzmfOgk-(p8Pn&Mu#u=^}T74lJxLcWZ9dc24{> zhktegb@3z_{1Fn*gmg}|v=Psrz;z!zpLL>Lc6R3m)Ys8_LK|0z86VK_81`9}9aO;G zAmx4&uhuD%9R-;U>WxkdR`LpIn(5EwW{2$Xeb(n^`+FY=`mpod!Izt?rhA^*+GgTG zZZgUWZ42Dy9B2Cco5-0Hf*0d@wg`4GYkTK0$FE0PRVDqHdTbZQ_IiN)`zq>&(rKL%YN6-_+AZOf4_*Cn~1WLA^ULKi=}n7 zQJ;jFQ60q|hRcJSMC#1F{fYK(*G3~8XTSedo-WqsjZxVx}WEcW4Y;9UWZohIO?U$DPFxnBf0JV zEOPb{sV~yfFYM|^wH^`!YALyZC+(%>eep1yU9R>1d|Gh_7pDQEkCG#o_bl^8=>@Jc*usAB@?xm{f3A5$y#nkP}ktA9Ip#-^dA52%OcKTQTw2mJ9qZMZwu7> z^UU;h+v#;=jFp+&WwbsVhO1=weNf8I+^ox8LlcQ<6=8Z0y$|<)cB4&X>(`1zIouPUhTKWGo+dVCw(}^ovM=v?@AfQWOe+x}dzioWe1S;ug0`~0KdZ>e)7bLg zag~{gk45R+eUp>RlVH1ntlfF^diD44ceXO8K{6R}BfOH04kuR!8cE-H8q3>*1er}8 zT13W)xKG_ZP|OO?FgBR8I?uB6HEeN~K4?olhMXbFj-?63I6Jv=5kKySm!UY$is~zD ztUKEnC@cR1&CDgHZ*u@o$b9YPG_VXGIn}(HJ#=Gd?}-8@DzPn}RnS7t%wDd=%Cww) zOBee*v+monT0Au;RC1$Z#%a5<*O{=nlcq{^GY&VS@pX{bD@ajO3&)6H*?FB4_dS%o z*54+g#9CaW?qFFur#^GL)i9+l#_b9SWgc=Kgnv=4E}FxkSH{-TPpjm8C$YIZyg!h4 zvNz;W`1MA)KMGm@&7Q7pqpaK%I2$gF)Ey@qX0L5^(SAqq@_sTmBmYo%tnx|rnIDVF zE+T46C}ywF#iUCM6LHsuy;p+xJDxsNMA7t&R`A4)y!~kuGTWT<`=fAQ+}tIjujDbe z!7VqJy{3Nd-p^P!YuC9)dVyNGD=7W6TjBjYP2J)58Cuxr)1j<3UOtdMyRrId>SiBt z<}LS!=aZsOGDbY`PwHi#RBqI)q}{XiDyw?E4-4F){v7o$r;ls2+)$rpp?oowo!u>Z z^TfNNglgwkDf@W3`9$*uv;kN_CZW2>Q<7F;e zK8Z}TXml(+-OAVR7i}|dH-aWE!~FyPPbNE$4V^@$%sQ?WTeE9D_xetvo9sQwx?~fz zGCurE5$hX@!sA88Kl6<2(_GG4|IC)MmYjRVb| zqrMMvo4vaqg(WOx5ZULVd7P~9lFvr_Y0Wn?6I+JLn?_#whJxeCGmNEQiL3PMKVZRG zeOaq^?g0Cejz1L*tLZ&uoKuaR<$j=ny!DJ zaXgJahr2m?W}oTODl#f3U|hMW5*Gewe)`#TXfyI|dIJU?J6M61b zqrdD#UV!hDSjUcD-$_sN@pC`D)y98f*+V?$C0fsTel<;$qwqSZKQP+wEq*p4`I~y= zb7iC%B3eKEH{idy#brF1R^qc4Yun0>9#U>+k?uD(`52C`;_c0Rau5E}zse1TyRhEq zH+K=f3W=vs&3zyX=;@zq=`4KpF7nqzbVkATPN;67!(Zt)E08_;+x}4ap0D1(napn9Emq%&@2R|R4ISnT`ZS)Db)9OwBp#aC%Jg<}0%Q&y z%#~+*`!qe}$6z!-?X3RnjnlC>Ob+@LR?|`18R8ZA;|Nktz}L&HC+jXbd7s&${ZPq0 zqBGfY_Rr*&vZ4H^JFl(dZ)P0ZK`!4tle4Hb#e@l@&uHugHM^i!mG?C$G_^a4^G#8? zbE6^jvj=sY_>$hrP2lhAk1Zdh51-z`aPcFzMC`4N&g^|UtI6$hOGuU-ozc$^ zB+Z&+?$nsWzZw=?)FRnuILN-5O)#qK^C666qym)0_7IN2ww<^G;@vl~q{faz)+uP^8ZjbNU!M-_4Oej3{g7CD*pu21F_ z(qx}&&Ysp#H}kv0_)5muo1rz&PtL<9-}fPFRzWilmuPb!YfJvloraTHKdLp0$(f0vd|)uI%bAkDD*Y$+JXc?10ZWM2mDtZ&I7&Y!=%nvfk-tA(^q4B~ z`0P^6eQ!Azkp1Aftv2~5yMRuokNZWvFa7^deAf|+D)FG4xy@akJ+zhkN)Wd*$vxD{G^<2md1V=kc@LO?^IHrw@{SSy?69 zsnAwtEcO#qPG_AJX|p1FxxKuXQb*ynGfKa}r=o0_{pQIcYqXPf(yHX@MuwK2KHzUf zeC0-vapGz+Y&8hgqQ~u_vjaNGtIK#_?xzY@>1pPq(0SU)Eb}+wQf6#URi5&=>F7wUD`}N}^wGH(FG9OXjL_OMmux+%1;> z#9#l)w=R|SJ`tbJ6j3^mekFdF(9RZo+@QbvG8E>cwU)fug>)kXUJ#p3BIO~TOowF7 zR8}jj>l!$mD*D}_g$>1L7aKL6qm``Rt$}rR<(G>Y_j#7tk=(OzIyrlZX7{qX-yo6^ z`Te-ul|TK2tDG491fsXd`#H6CmsVzL?;sd8Ms*GfH$iQNS2v5onM*oRhIp2C#*uR$ z2%P|#bD;HaeD$KD*VT_CIi)<7&#q82_Zz$+*1sh#b=TS|Z9Q1@=0B#zbF?r+#8}Pl zW{^K)(0ll2zU3^pn$5%cR+hC;?aZlHq=&3#m5Go`^uiX>;qiJQi@fU24_c67PjY0Z zaU&c|VYSW4vy4@BByDci%QvOvKF0Iu;_qZXo<1%RSEk@MKGa&g`r79WNVg19$M_`Y zkDkW$A!74ObeVBOZaMf4E(^$aDvXn5k0g0+&~NDPGa~9{Ij5X$-R{}p;$8Lyjux*M z!~Y-=@g#Wc54{~(=1^9Z?=E_gPd%yaoVLwsb5(Y|gw!*&oYKQ!@f#G!@v}qp7Dkf4 zohY#rnu}TN5B#DpeElZIeO>g3Zh+K{#j9@osgBx% z*mLq&F1Ic?PN~55a?;~l;=dU5vDpFho+mW(;R-OOp=`oPppq=6BbrHey z{T4a3(_dWQjAHr~z41R1&s%udpK-erE(W1=sFI0FN9l1yC&|pw;i2qr4d2b~#_TG( zN!fGobTj^{ic>9pa*yBKYnwTOem?6B#aY_Viqm&E%3ZiGz#)4>GuxO^z;wCvJnc{7 zZ*S1fc=F5^r&?M zCh7}h)^HiyYlEBIINROp?CZ#VzBg$(_Z{5f|9!-;d?VAglbnB`_pIa|_P?0;>shnBB%3cv>5Fk^3Vv)A1!KvTwE)Trz*sl0+}yXgBXR)9cmn*rQkl znL&=sg5QL~dq*#maZnw_C$|a|$iDQ1=zFPu0d! zQrwBt8(2tg5~@T;$%tDD-9M)NoGi+2t3m8#jW##JG`5iQK^Yw+R;CB`6fW~ES-D|6 zqvV`P%5M5!MAqy*$_S*5comI*jQh;vJgeLQ{?y+4>7>ZHrn*qfje@)3Fz0r&LYg(8 zw?(Sl@U^#hZ?Vd-M7OwJ@GvXGJr2bF?nF$T{(MaGLYUxyLwTn|!lSXOZFJ z0;x@6+XNPpz0dh%fKM8!m3zwmURYa2NYB&S6H4sB8YZfJF5GjvXaij(I_F%(@5r3{ zM{cF-_UtONXLC@=Et9#M@e7uaT~(}FMo;g4JJENLh?7j6HK0@R6rX8Ej|Zun`(&~& zeG~7P?b!$scp-f+AjeV?PR3t)M#)J73cpzo@yym7DU)W#uM&N4)^ay-F!LSj^i%7x zh@7XpTK2C5o9zGFUYoVF+>)-wvy*pmpYLv|i+0XNKfw;KC*PZFBE9;oaXqV_{*;K^ z0+psbt`2VNLi-6Za=C1O3@>WHllznFP1-n+r%Z##cVgr!eU5P~;YO%sc6_#Sqm(+2 zSMN;Q2k5aR7j+U@mufqEG`D(Jk2FVUV>CRMq4Ny7qeaT?#jSl{w=b@8uhf(5<_%Fh z`xqvQHy?}0dur=97@mUb#Voi6G`9F`6^^p+IX4%4X0*R;2A~g5Z>8RqWV^4(0uQ3_ zmRO&=g6p%>4eDf%!zFySCceI+gWR0eO+0w3n2WE;ULS_q4Jhx&4mOD)&3rO|+@FiV zxhdyER-aM#ZG130+#ZA7=OT6=yew9uVbS)LIA6)u&x3h8cHb2T*|F7_=UqRy~90Qu2us)oQTtX zVR4+;o>M-5gXvK+*M*)OL(5~>XQW+C3!~t1CacH}?93v3rv8t#p0h#Ap|b-$4rG=2 zZnl?kF;4rdJRimX{-RVTrGDhOFS5InVLDKYztC~^nPqSKZSY?Tr4N)_=yz}UWT*Yf z{9_XYvX6Z)BDHsKs+G6bU|pSiYO%LK47Tc3$?v768u&cAD(LOm${A~``*LXvsQvD(mw8cw2ChP4L zaF~5BSv9E-`%CdSMfn4i?8!!QukWLzt3)H2Yd)2YB>F=KpIM2X#1i%qi(iFj8I9!r!ohH@Me=LtD&KGy zzMG=H1HN<9NajJyA@DfQ?jj22Zj+qC&YEw=p0D$T%-O-b;&oSdnge`~8de zy=bZdnRkO!Zb+NY!t)I`hq011WLZGgd^>0QSnsJ-L4>$pgvx%ZoEa!9B&_G@DzEb` z!1>;ooKzd`=Sb4_;L+=8JF^LmN?T;~9@@&?(3yeR(erJ4@pi+}FZ6hkmX6hO`ia>u zoBJ3uXPG-kuYh81Pn*LESBq}BrzIzZvql-eyOg#v7ar~OSG3p|JWY%w>_&pA`xo5a4DO-^*D@nQk zQvxn|JaEn?5kQfc1A%Bey4HCtc31X3t@EST|GlXl1*jky8|3c(;sg?rKdJF>`+72fe0QY^HI6$u!=$tJY7`epU!8va9=K&Mq`{qBxe* zKwlRzGqd^|M20f5^o0BIY`Hl;GS5{Fh1^}&1$u`;bv_<)dqcjN>`tSb%t_W2qbku$ zi8gjNb64(NGtqUu7<-P)o86@!icJqgcB9XSi(7ZFka9@n{`TCwaSr-9&9DX6JxTX2 zpZkGK*|E8V1!ip^JLH$E^)qR1WSghSewiz|Lr);zH?)r5+ylqV?v%864!-A$@As3f zC5pE|F*`qU&iH$IIP>sz#gQ3&`+hP#!`epB#ivT-u9bXO`#q$e%JR2^d%pMfQ}|XO zMI)Lx04nKIB&rXltDIyQO7`5V(GDJcq4YF9ZWcpxGB_u`_K;yGL;qM7_C1=Joy>e} z?r{DS{A-yP-Z zui%w)>0iR}bMfF1abgWQo?-(Jv$@;RJCEKcYGpD>n(3>R$-v8?m>jbOjog&}d(rnK zc5<9|vDtjX)Ayv!ErWf;-a4MOl6@P(_h}g|w^`oiZ=z5ya%5k{QXE!<(ohmkHzTr? zcRWDOrWPL}Bv93m@hpo83Ee-cmonYCUlKK@9%=b&(#zhl_V?qXNg6Y{NjKPWdC$Csft zhRj(V+<`Sc1E1{0%YFVi6>>EU7tmEF+}A@RC> zbAe|^!sJ=@R!gkfo{eA2=NjTOGe@Vxb*cJKl4+}KF^^}yKCh-)-@Ry z590q||1U(N9r>?O^9dy~gLS-i`@?BJa_!0v9%e`9u=}^fhBu1X@-)5l6XEksvK1h* zQ{hQjmHY>-gw8;emawZXqDV_I;TBlUQhz)0orlvcucEhRS`f_g9h(ggSQlvzObtf}WaRWdU*8|@J|$?DhP5dK-Oqaq8)x#+)W z;ZU#VvA%r6L~hp1dAFP#uZ@>_g-y4{XIB#5>RrC4;ZFK|8v+erko!pcq2IFLxgBfC z_kq2`hO%Cjy%H5r{Lc6{yJ&K2PEknPmOUn&w+Ta`G_unPiqX`|)$)@+e5$i&n-R8Q*7bY*w)L;}bc(cM@+I zN^2val0A1r)y@8j64}!K$u5d?kn%BH_^i!6~-jTt|Gz)q&JgzoG*cSz-XWqXn&{mgc_$?AW?xedIpS~<({RH4IsS7u8Z&36`860a`g zU32vt=P18|d^rdDB1|@mePc+UQ$hKT%%*;S^63pw-{fg#ADf_>laaZ3F}rG8vA{Wy z%XbCjo3b8(L$tMkl(`qN1x{a7BRZ#l(InWdv;+b1AV?mOQUEam`+#oIg*^I<=@Df*{S{DlSs^VKOmWYOip-A#n1Ineu*A$(?&PAHM)$FHcf!+3G=#R%Awf!wUq;D~pHV*RY5|Y0zFMNp0J?ZrZ++U#fFK9f2 zX6_N2L@yshVMj6kBl0(8`Q!EDyQ2EAl5J^lIIDb!uF{+S-m~0p-wVY9dG5K|t%v?Z znmQa}+22&VG($!|4*u0h8GKL0MQ%LLeD`2|@cY?lJDKt*sMSFKR-V3Gjfq7js731B ze=(PAAEMBPB)cj5vASQ#K2NgteYKoX%zECMwWVEH!5q>&q5XW9(WCh9O(SFJ{tMD< zg3wW@?anra(C0$BI1lCQft@UhMPk?ZBpda}N}g35}>)mky>lV zdr#n1RmnC&lsgd$89Use#S>ZO8~A(CNHF)Cq-XX5j@RJo5&hFcyvlbQwW5c3-u1B0 z9mnZweXr~mWuAfIUvLx58j5IR*;Q^S%O0zBg%xJxdxY4(4!`|S>Zm7p3EV#=TU(xb z6_36SR=HX0Fud20lds}c>qMo>Zr}IUR_N2NCA74Uk2j$Z%&;E&9 z^%UpAt0ycoifze4TKgiE9?@q}++S9!XoB6+Jx(~Ror}uy0ElN^GzDf&aB%?@5 zMM)ac5)l%Ggf#dzh!R<8C@VBnl9jJjk&LK}hKi68sT5J!l*j-1`rN<&<9^)t{TXLm z<8@u#7r$?TpL^I7KVACSvBT1!Oj zAoM2R#i8_-u|4;S=d{uoGmqS$b1WTYNBkKU|El)(RI;I#{-B>h%7y!T3$wyANiK7< zRzf3gS?d*63-`3pwmz2&b!Ypz-2qoFGehN2kjC?D(p}HVznC^gv$Gim$z=aM{&utWk^$~^_K`jIJC#nf_rs)iBz{`UU#J`0yS(3+Y#@3s$K4rS#zX?ioRCMwy>=ohW$tkQHGoK@F1PyK2YJVDj5^wcM^;g&%YQA|gdXgtE&$olrT}X1dEvx|@ ztze6ZU`@RIDimC%-ZHgElI6cNI-JdXYedgkxxTzbLmnr0dF7;aVhfTvexvav*~XsY z=N9u#b^UFnt&2%Bv zvt*;n$>iJ@UyptgOSGD|PYl}tZSISoyJ#`Dz+UIut$a^zH9wnNUQ#}HFw7^HJW1qk zsH}Cahxwedz8Jk%u#4*C_pR~=>GN}X8bMo^cwL4pawaA*?TI9M*PNsz{G8>p32Ia^ zGL+{#{y|T3n7NQvcjCE||3?{tvv#tNmR3S)Zc1oEVhh#X+nAQ|tv^owE<7jAgR^bE z|C2_);Fekdl+3+z}+*2S^qrsSL^G2qjYUJjfYrZ%sCT!jzjn7bkLCG_tbChG~R})zsU4C z<)^DV0nMlA=UoVW1Rq~|I>}8FHT_+y)U{gaL{1rF?=i;ZUgNr?o}JJB`pKE0NclQ- zZ=$Ul&~t;C#?KI&D7?W=T7fej{&d+_}?E`#a4(J{;KAQWeXGOPB< zd7TrRi5aAaX77mkB52CZCdY-_a0EOAD#W^o1i!At??qK=;&+1UdeRl4&SCp7!p5srSish0$uDpQm|&uVAE$_HIGnIsD|mI0@bt8Kdtf z(`l@}8W|@u+;UtrqRpF(2jhKPla)Qp`jXS-1(=(O>f~&_gl6(o(?AvQyfsX7(W zU)-xv(0vE`bE-Y3&#$1RM^N@Fxr`&X@#xNK+#bf~+-txiz0=%&}ZVfz7G zCd2u1wW{m)S{xsQ`i}TKm~?U@bW{_ z$bDalj7sL!`Gqk%bEjzhXk1=Ij@jkx#3oGV`Qe&TQV zP3E86SdsgKcKDlnFA{s7wLqsyjp%tssH9ZpG+&_qdzwhr_TA)IU)!_z(ag?DlTTKw zvkIS6ayR>|C5pc?LS(l-Crl@@tL&P0!+p*~Uq~7`)0f;|6L^Zu7s_bqb<`(1BrC{Y z@ubOznti9NE{-Y82eRKi+?*lLj(e$>`)nIR;-36c?%mETd_MeiR`**PE~%#<*yC{j zi&or|*&>+C>SJz^$o#YDBxLe^wV~M{DB0h0Pj)g!bcLlm z$(}(XOy(E#*+#9*$(qwtPLw|9yZ3NaT)#E+^e)=_l2Ba~B$Me$w3xer6PZwsHaC## za2zLp*JEUIsL`|{>E^D$#8GC4JvqP=n|YbhBbjKE-SKs85Ac0v|2@BDUCuZb#eOE6{7mI46h^{CZ5|IHd-(JaR1M{g-qu?7 zQL_K?DSB_mN#<`W{49gvB1zB1e^nM6#AoCjNn4$ayKBjR3_N}g7e}EwyDs5AyR!qd zpFBF-P!%0?P-9F%KMM;IX^XeaR}+i-BwhUiq3`lNKby5@H=wfbF2m{Xdb|=|)fD!fr{=6oC4#$?zw1fqUlQA8)apexO>upq)^fh~6u8eF&HYF-h_ax8%B41P&Jty*;%gOs`w8n=HAd{N3(w2qavfyJvScCRW7k~*RY<0$sl*Cv?BH7uR&*jGk_U)WkqtLaTqG_iXxkM}LWT zor>rA#^kxY*9~Z2srFKK_$3|1gUn!=8Bf=eX3no11Xt~Of@H0~met*)M&fX<)N!|I~!xe#4fsd4(7%Lk0-Mb@#N;y(Y@ykQq9 zC9g&w2%L-U*P@fu8!nLMOU7!3^+SEhIDCPOq~^Sz77jIXM6da$@Nl+TD+4 zb2oT%w2Z)SG69{BgQ9zVvWmT$uFf`USHkZlWO)}ZYNN2BpPZ+OUA;<<|MUL=ExxG5 zvD*Ge?_>F_Yx$o<$TvXuaps@BdC1qiA43vDw3)k+w$f5p<$4xoqHjT2dzxKEG9SQj zB0c{@8?y`Z;SKb2wE4>hntzcLb3*1fKg&tKCq!Obu&S=OT+BNA^GrDrbfvzE(eP;e z?2p#j>f1Xtq7R0$)qKd!K2N+vb|8L%{Fn6Goos$HhQyva(@CC`a{^#GdHqVB2cj%l zPX_T%HyK4gisT?xyEI=yPB4Pv)Cs)6X1h2uVInelPQ_hZb7t!kB{w5kvgtl19awttUl2^>YcHlerDZEDJU($SX4s0^k{lza0 zCFPQ&k~_J_)Y)6-3K=>p|S-oA1c^JHM5qsFw~K4 zy~HLP(a0t2JSY3xv4d;iJrVS|(|sT`=7eFeo7__+3brs7Lat(6$+a+7k58iVSlVj@ zF?q7h35_>dSaGj%qe@mqXRv`})W6EBWN&^$d%4r0=$7(Qkl&ELaz3;xer6R$wc0H7 z6v)neC^xt?V}Va&|IyfOd0ss3N`2@;4MF24WViya?wL;blCp(U&e~v7_0( z{fQ2eYqmHJGVatyQ*k5QDg1vo{1)xTXKgiW6j}AmliY`TX~MoosG0MRSx4%K(~UHo zEI^6dxsv{}vRf8?nRRBr{}H{FCxHd}sqDLRaQFZTY$5BCG<7IjOsuNJgy@ zJZv&6XN9*KiW8st5gRV0$1{DJQ$L^TC7DWcUMabz6K^mUW;PoGPE_Izp6N6-hT%7{ zus5=lWY$gYye(dB!BgT0yYW~#@%<|VXT3c$loI;P$=sS~k5!gL*+BlkkLRc4i^^K& zsRgfA5sJoZ|GR>H9&HxzC#~G3bk1P!OFw6#usxn9v$13eIR#GJl1=7q@i?5uG?SGc!Y3u-H&0P_ zLrB)@GA~S&&N^dRB90qsb1`1dLH|hY<|J0uo~&^V_1H=uxxZ_ycCz~NsFo9DUQLU|(DQ({ zuOZuUFqg>hw&d`O`B|P>61h}#OJy0}vlzKf$A5FZ&ouTAApfj0B`-?`IrmPc`LB)*g7xFkI`Bm2W~yQv_JpUquzFYplGM8}!)Tn~Rg z!|IXBe&*FApEco~7Sm*J9y+VO3zXdn-^u3n0E)h2F>jE;9dxxn8_KgsP89CK^DH_) z*{A12$`fRAKjs8r zA9$<<83*X2ky`WM`g_tT@Bc%jT-&(PmkfK;+EZp86JYjDqgYn^#OE5(_oJJf>iozj z_p+azciU-+W2e?d@x2-77E!ZERc)hvFSNPtboykUw8`T)3+C})#h>|<3xDh;+xE|9*2(?>G@*gUe@Ja;+OU&-$80$ zi_7bA(}ZVhY23fg_ZQ=~m2VRBaV8H|%&2>Y{z{;G5qs*2ms43lc9nA%Z}#__Dp_<} z;5k}oqfXBHC3?TP*SC^n&dMjMet|mQ(#!~0{}!6F5C0&2w$NiWn)?OX^R)8^Iuo6d zykcjfa6P#`ZNx9yA5ZS3uX)D7W^ZTn74x-T1?8(@Y^~OQBF|)ZPUg}d%)HN{iE`vR z1l?1~c_6u8ug9x-wl84nTP-Za!7O-P4m+P|>jbmJ<3#&hLh9G^Djit#YNO~1Ug>N+9tDlpkwZ&Jx==4YecuKp4fGnnn2{yt z!rs--ztGXn=O?JWf#zGY-%scw=P_IOraymll6TvTcz@7aGXB&s_H6RImN%Kp8g7T> zWZ8X}bnYe1vv{H>^^}`nW_!-+z|-DM2T#!6b1d$6d?gli4eGOt{sV8(RQu1ODf8=Z z^mYaA_9C&#M!XkDt*gHm7qIn#y2+IBthRd4%H{aXzHu}7+>P=oyuvHE@1>30jMPI{M@d?nJRhM&dk<)|KXe#lj zMYnq;+TsP4ll|)0=Qt%wdc6j=%R^62Sx(?R=i;ImeKaP$c=}`@U19`HhROR1Bil

J9Ckm=uDof#&DeViJ~}*jGtes^DO!k2aq`K zJ=MG)Le9}!MaZZEFFWYq4KtOjcjexp7kS=Mr202)oMTj|$)fTsJOi%!p(au3xtBaE zeVKKyBEjvrEl#2tt#bnVcL>`8bGO1uZhcOEvy<6dEq#2a?dp2S`gG2Sjx(MoSL2?L z_OjO(!Pxs+zLXzo1w)N-x>66l)n7^nnW5}OUvH~<9Lq`$nWEe?xtk<2$^(>~0p(TE zIss?dF)fqb!(w4*$I|-mo?dG^ml1t?5?Wk;&hUHGda7RwevmTw+HKigA2?`uTZz*<6cQ(DYg=WH5E?t>HXoH_C5EaQi>CL7VG((1ktojXf9li}BB zPgc1Zq`$g=uOZM-yr7G3ASU^|`+8qeOB+#80awq`YA|{#T+BlCOKfPBZyqd+vf2Gh z^h!=5-GTbV6?A3|D{$LMA9>D8HnrR{H?`mwo8fyPJSWFbxX8RPnVwIChure{B_s?g z*nCdCCy(4pTCJn4tVXuMLs1^VdzGB6_r9dF8vc@7tt2{fD|&X;U%^$@cN_B;$@rZJ z>{_m~7WV%UCs+N%OAg_~sh&Cj*qd>z9*0fm* zR~7aGk~RAI|MNu2V7ZEnvKF7`j5E|a9>xFC=l?O|_)5S3DzlglvrGCl zn;gxq=lj0Cv3D?xOo#dG*Ir~~$<3WvIY{JqS$0qj_aD-C9g?Z0PVxoaPcz9~{gVDh z;Ui~Is-hs7D9%M)d9-A0X^{3tp(?i$EhE+35nfWgSLyLiR5vBnZ8~roRbH{y6 zqegXYw1NKJsFt%TBH3gSbI|YIUDp5?JXsvo-AZ?ZUIhIMjXxH#xnWQy#g+s_67%;+^kO zGUw*#kC zlr)FJ@;I2TwG+_yJUt!dfA%Vp?;$OeQ{z?s;}*4=klA*l#3dxq-T(fyy#rQHLe(bp zWE9%$^-e#@9@d0?KT9UZL*l7qnN`4#NhBFF{xo)dLx&ls@4)rTWRfgMPtfWjHt;8I z&ZD^#aQhl8AK;U$)&Ho>#acO=u9IytbEq*!|D7ap7Qd3agxkQ(MqcGGwZDU*zHIVM zrAEN_^-6R%v`>b~gHA9%JzYWxTf z*_+92Sh*K4=ZEqvz6%nwUY9euJ^6~~$YedNWp(&!lsu%zhtAs1h<+bwBlj-7pC|XKuFBl8vX+5i^Ip?&FpYOn5W$rRl8;Qqam zMKir6r%z>bpH;kTSsJV6^=j*wJ;x^Sznb zK8lV+K;&Hd17wxC=_oxN$z$fcer81*3$?$)d-h-}(fhCJCL3a6KQmV;%K}67DtydQ zJL{gws(6I<%adb#Pa;Gc`Japp)5-V%vTux&tT`T!*CX^Ymke^>=ro#aYAm11v%JYe z(9w*3E5_Su~ETlvZkJx$?x=0 z3L0|)t&w&!GwZL9oC91L*WC)^GC+yh-~xMN>)}nP2DrhT^o77Ed!y1jiZkCeuw*-)~em zxwm_>%15B&Wu;a3`b28 z%IUygdDp~yC3E(HP?D^zr^0G7!cWCPPwf{|FEZ+(MB( z{fhhKR{aT0+ej*N@o)895hc&jSh6$bnfY>ZIK+szhw<>25F^{ddWX@KFa49U6X|}d4r$k;lB0$894t_ zjdRs$&hFytwkdrws&o3WlJWTf@*l0|yLinTe10Wt4Ajqjv?li9ETcoLp^-WDV?1;A z0CL~N(d56A{FArxMpkq(`z}da?PN=+$60;yd!1<1Zf#C>>G`mhrb)J5H{nH+h+4LTpS6 ziFUXFhA&X|1${R%pK8P(^eW6|GGEwbc5pu~lUL{i@9NNF6aKa|`sc8!GFqxcXLm!! z`98@#G$q+Vb3Ink)@QWeOWo5UIJ+^Ip*Jy+E9m`IPfm#gya$e+^!r~P=_}r<8-49C z4_l1Knr%W~Z&X~5if(#O#P>XMn{HmR zk?*(`I*OBQ_5u!Md5^Qx+{e2Ca&|z{qO-Ob(Kn&&W3qaQWLM+uAM(x4MJKJ@2o*=+ z=wg<#kS<;zo4Onqb4do_|PyGA@ zk$)(e+~wcX?bBMhNgv-re`4O7q9nIJPV?>aYCQ$D2h-`5_(+D)t;&!0svHZhYi4&d zguaHZ=gH|ltuXp$~PZ?L@M(!Br5O98z-&-fso_ zT8_r)(0(C~%F*l$5*kEKS%0pfeidG2jv8OlZeka!7c6@M&-@0DeIkTS@V`6Gl1-#M zc^<6p?b>{WOm~?*=2YZJ)Qo|#Jatuor@d)oKd=5qSuJ=>Y*z=C{TwcnZ{sGIypp~X ziJaN}63-0(lIx!k8X0eZxh}BLfQ+}{Amj8{<&ztCyKh6wU9gwivEI>V;$gC)*9=WH z)j9=c?xln4luHheSl=ZidWxCF7o?Xso9SfV+5BiW9JSK_&m^!FH_2R#CVmWS_bk-imUj(G^UYJIX}J3338iUcjc2Qa4gAXn7}nXP2DdN~_Oq(EZz5-nQ-&d0KZ>~!WH z>a2iXKlKYsI+(uVeylE6_oNvw2sezPa@14-m$VJ#@mtneas(}{#L zH*1H=+>o1nh8OjiC!u8Do(u0s!c=j(AI>W#$NpZQL7Ta&v~@w2$wPJpN}l39s4R?1|HpaCD@8E7D98 z{{8Be#K{>jw;4_nL7dgFia5{nb$$Hh$?z&#$-JvA-;(?`ozzcURLw$bxi@>HR+8T= ztL>Qw>}!-twDwGr?NC5M*4rbqWDTy5;&*uMC9s>D^~R7ycEYyM-DFstO%GY^9jeWv zaFPsGk>fFRpNvae*vh9c`V#)0RU`ZT!&%Cnw3{26+A3RFo$RM1i}7%uCaNVle4o^M zvavs}^^WKmub)1++OC(ZY2{|TWV9Hr$E;!}pGh)WXU;p5EkCNwwX7v44|4jsKI%_2 z@5sFq53!+SI$r|0xvQ)^EpE|6o`0{=@0)C;m40&~WIpU}(MFz`TJu7E^uLbBD2C(Y zu2u2&k-H)?q_YfnbPFZG)Yb|rC_=iclyzv)AOt%TsPM z99}U^hYYrdl*;;bgbY_P&X)LmOkCxgRi;8I@Pg(D1qdrgdJ{;s! z^bsuMASIJKKkv__(PY%WkZ;Itz#1An0*#YtHK!6vqy9OTRqUe^f*#~&JHTe3D&h9n47{tGxuWmmaBujEuvYw~i ztT$Y3KJz*pSB8k9``nK}>k74VyVNVtb1jM8ZvMiymIZ!A^+0Nl#c@~#cAFNdk-H6Bsz1pbe--{OhN2SCyNd+gRBEd> zGNXInjA9etnp@GHq=ge`_cD^1#lJj^_EzYfjoz&JE;b`sOdrR4{Ru0pfzQNle4zCs zpg-0egdSS`us77oe)#`f>03#!rCyfdA{h&QG~@dU zEq9>hUOi-;CV6cyh2yX2i<8G3X<374g?!x3*IR#z4wLg>$m+_Dd^*D;{y+aSLKxZ_Y ztgM-{mB8t>>g=WV({wZzLU*HKJQ~K6!~Z-LR6*SXP|=y@XQRCbw*>b0HkvgmSXPjoTeQB^(rNU5H~G#ak!R76tokpab0PjJ z_}|s1ogm;mTxTyUH>Tf3W*3`ZJgd#DA0&s}Gv4Jae#-(HBjJ0|Hk78S8<$>#_x;Vt zit}<``Ye7Vx4hn}<_)M!RP7l2oukM1jR4od>*Hp*#r#j){yuokZ3CIDGH4kUsNw~bWP~xXcB%`&to8_8`{geozV&(o?)vdGxg8^UdmlviCBDlFWKS)APH(3a zYHz^92}a}GrPmC`Kg09UN@g`S=M-`qYDNFs!*(2)G!fH7kBJb0VU0`F%B*=0 zeVys2J**^`!>NUp=G=aBE8XY5$i4+i{vxpwG%*v#k}D*qHcM%&&9A_M1%2d+MR+ zj)J{;y_F_%p(A%89Iw^aS=!oiExSL z-_&Aq=vG($TD(s~Ve(t9<|XdLUosL7MEiEVcR)ijzLfSe*Q_8BkY|!zH@z>xPi_F& zW+dA|&yjiwzGFNZ@?_B33?iACv)i?j+>`hCDA+m0TyZ#UmLkz7dF9L!vm2DNF*Q+} z`w6$JB?scu+DoifeJu@CBe`$#+>snLxuNnp*7ztoaz?e5 zUXv{;&#m9ka6L27+#o&#?w;0a;`&#x;F8|w*3x7qtV*xRGrhdP?L~MSg4;wRmqkNa zddeO^p53xnb1P{aO+F>@I?0n@c7l^%qYny-qO1Oem&xQ`n`bVG_QhJb3f|tNx13*| zqQ()%|KSi7AF&Ybzart0{6rIUH6x9oWcLz&vRAf=ZB)`*J^XfKcboN=>>M@pGemnM zSz;@EKA=P**WcuclS?8e2J_^UyRvf6MGtLu(NA)o<}SDrYPRB82fb9&?24yUerCh>>SwvxY0h)xM0M_LNZj}kx+%lXrjzMw z@Ldj7%lzHW{;H7fnab~i#q72&Q#&~ru0_G&MvAA+rT;XVWe4CvZ6$K6rk)m=f#t65 zt|Yg?40#W<{|9-f*HWO5ZM4q!_cu!<`F-$wI0Y4%av4O4n3&oN71 zU(?oLbIU)Cc9VI8G3>t~IsMP8tITP~lfYhdU5y@-34VbQpauNiM2A(CTdd3wl$>fT z&;1~|)1xx{?@ON9;W-+&$v%~cf))H;YZND&akA*1#;%`0=RdqmY4V&yzi-j}uS(xQ zB18F-g|N99o)RISmAKsFJ73=mSxn~5EqIMTc(+74v>~0d;QV3jUd}Q%!F?kX)j(z9 zRqEk+8#KIYL@&Dc{xb;gO!|kxM)C#R&xiiaXYTO24{EXt)00oQ&F{T%ya#%x`Rp+o z`c=>0>n-u-iI2}Z_7;-pPhzLx^EwiEkUlE0pdGL>7VeT^Cu3vg>xnG;18;Y-h*-yq z=pX3&t7#+W_{Q)o=fPH2Z8Rv%`+ha&{9D~CN$Xqlj1tOLhp3%ep3MppA>0;acad54 z4a3PGpU%|(K?RS{1ahCE*Oj!e#xrepA+jrR0uSDgo{vQBskm!}t_!^?kFLf39}Q!3 zlxqzAzo`EY%`Wlxe4m^H8ztbeAz6P1wBB_bv3e90_wE!gpR25^3BBniCn2?C+CV^KMwU5Cu8ylvT_{ z$|pu=COf&?_t~vHj5j^ij69ic=h9K~tn`4W%qD;ID$%q@6voT_>HTXo{K~>UBY`W` zJj1(EMuys`uL6;^a9I~Bwin)c|(eLPvatPV3%*hghP z{TQ;zNt{W>j69VVgS*_tRE!qx!a-l!Nk)qnYM)0VOL)B8u(|<_O-Z;oS!I^_I$p}c z)+Ju$_L1Z;$lUZyeO`)>%)v_e8Rfgo#Al#vHEF~;b6#qR&vO#^d*A*-5^wP?b5XK| z$Ismk)qI|KkE}!fMoza{Q(DTGWsN;gM_I+o`S4_V$?8c7dRpiwPkE6>GBlK8e?xiW zH+h%b9+Ep9&V$d}@sqn;vo@O(&Jf*>JpX zZe}Y}nCZ-**?;KxIeb4s28m+-jwC)R$h;eCIYtY~NtYW?a<4$`t zzrNc@8jq1{@=?T!axQkh9-lztb@V@z+-jkJIuvd8-Bsi|h)kMzmpRyUn&?mSy|tGt zrls*QQCrDDGRgm6v~a9ZI61+x(zTe3wrgbuDJR=e;<6rNak-J?ZXPYRTBl$`2hmO< z4DvMh7L6op#jB7RD(lg0EUKecZiKQfbd|`y*Z8*J_9wqNAJ`T?l5en{TFt!fi>u@r zE?!t?ol2W`Ku*!Q!{pxh3w8S`xkL+j`Yukct$6$)xc!9Q?$p=yUO$VrJi)Fkplwg> zPocToXu6Reg3SKjXYZpl^yHc451ed8Q?i*HOgDRz$vU>a8YLyq`rC;9obog9mz%sY zi>ZyWH~r834WYgQIqoE@#Frn;J_acL2kY2_z0KtZDiw4T?~--w+z<0S#DA)88xmNc zl|#^5)$2uiYHB`EiTB-7SikuMW_s&kZxrPe_Dp}{8>8=CK6^loca22-NU#)S*ihO zzLEA;;-v}+{zBd#E7RP}AW`v6^q9;fH<=^#=sD+)$ldQC(nrveMt+ zIO}F}p=ocLsHcysa8;e`Z-L29ILKL|eIW1#7M`poE0ue}XZ5uH8R_QiOI8iO*K3|x zhT&`{lqOaycRu9|SE9Tl_Zdd2*U2uKOKvvPSWhx%l3>mYUTq{lh4eeKy>gJ#9JeEt zdxd0Hplc6Sz6YwaTAKSEhVXzdkXN2D=J@PnoR>%4zo<$~&vKtmr=vsyUdOl9HZ#d= zY`QV;HFJb(SkyJ}m>umZKHV3}5-)ul{p1$t-F~0bOQIKEMe(VSn0u!Gi}Fi-dkda_ z)z5z9a0xG({rZz&IT_NE4QQJ_b6!1pLVM|>H5qn6*LN;yp&B!8~YAuQ!aG_HbzU2N@8(Z zM$0vN7{%*7%cAD~u#~VSj*J(4i0q1=2gFMdLXneU~&#%&CSD#!0ho{oQ=g_d9R{PLccFV_T z^$t8&r;}voOI%FuZaWDMD=Rz2tRXi|)FQc^aF9EZHkr{T)A0VV%<-Ye{$QD zQD!>yKcUWrFj~oM?;hCCPT@8Pcp42qkXKpATMki))9cE^WWDYnls=2&4P3!~=)`kblGl|KK2F55%FBvf5R zOONv@xgGa;y$+_a$pyrd`PVym_Nb-x1JINBpJtGLmR@fsxkjw@H{_ zt&98ctiOAnc@-_U>*r9f|7A@#pyz5j9>ZSFR%bd(%1vCeX{HmM91Qo%mFNTwyLq+b zL_VB0r_fDuw&o1a(>(t=TwZ{xoWhz(?q|{ViFp4EjadOXk9@P=cn)NgrTs}{@CS;j z`Je2)`x_S;LhNR}{K{|4WHX-|L;f(L4WNON^!$O(x8PtQ$&?{?ccCkrSfRUp@(m1( zLRBK;mTDz;yY$xLt!Pe0uA#JKZMBp$0rPp3 zU2MLs*1Di%3khXs{RSQ(`4y6_H#4hRq?HKRUML#k)5N)^x7>!3i2Fm>-3FXyrZxqo zL$sAB;$8e)PAe5fo*$0-oInd@xgnwyo_{8nMA|36Y|)MMH~KU?4yDNBNaJW?v0I{{ zAAXaud@yw1O6p6rlo-@vd_WaEH80f3n%(sf(%g3wpg8xaeZ>CPlVkF)y$Qc7XfQXn zWbb7K3!UiG$Z|evG6PJ6!54b|j?d1$9xEUtvBjtQ>CT2vq31*CtQbmfW&Mvr+IBT| z8_VjdcPQHSA=6pVlgNmy$tJ(;H8k@p{m&$YVsN#FW}m><3c!8{T??HAa zs^5$j9z@A#S{cVToIRBYFv^`nFEP7c*s(l@j77<6@i^QZjqXGE{$)z9gy!<_GnJmI z8b`B+xW(wW-Y6UN=XA~(7%8n>G9*5a;zZU>@P4%tUD-`?`7DCAoSMnLP8o7u0)^A` z`yP8)uJwU@>fP+Q3*K6xsV>w-8i(;si6);(j=jv#n&UfJeE-(+9{Nw#;SX_h7oU*P zr@NkyW;2Pm%PG(gwRLyvV#BjGwEoYYS3Xg7%M%pjjucsogv`{ly;Y zm~XYf{{r&QiMT7s@llli0xf@Q{~7eZ0pXpE679{S{;Sq=o^4Mp=zj^!bk|d!4O{R+ zIbCwNa=F){iC&(8{lue}m~C#%YP`X(9o{)3Cz=2hEBWs-ST)~Pz- zaV#{{fvfFiR(CMeaPG@9-xAm|9K&f$;xklkoVT8E~F1wS)RTjkZco!9t_49|zWQ%E5<2er0Wc+>y1<664T!S<5oSg05{6u!0`NRYD zbUGi9NXhI$WLG%n_cyY;89cmw1iyKfUXJfeah;WvoV72nY+S;$9RT=#y{gXDMqr z#B6m45BoIkbCY9sgf2sIX0%!1{D|l1h})Iq^^>u)6`2m?>t1ES{YbE?-jjW<0bb69 zq|WrS6D5q#Ve`jUNij9FJrEzH*53iL;|OJI2jA9*~zB@f*Pv^0oLvk#G-zi&w|`w08f z+WWBnDdfcq<^1t2aDB6uvm>y8{N6Q}y$pvP$@MmpI*8>=hns&`&nxJBnV!3w5zNv0 zdo(zS{SVe#b^;Uq)eTL%wYH3huS23k{N%Rr?sU~ne@FOi7+&5d@vJ&^p|@vHeF&Lu z!O`33tfBv#;B&ja9w~UR0l3Wk_XY0`L35ty7qF>NzMIC958zl{B-zssGV^+yrt)k$ z2Nx&#>}xeo##=HzB}T6_YKG!(BiRn4r~8d|OO-kXC)HSX&es2df;qTIEZzdG6*I%E zNMjkjm-xMyt^SR|PEh}yK1ZVF2iV-9%{)hDC3}Q^6Pup(;s>B^BWboL!>VL9fQ~b} zTuZmxP?elN@ABN)e_Bh&$t`l8ma^KD+2ec~&Z=P|oHy_}i8i=T>u;jEAKUJa-gWrh z4|VM!=}x21r|coKlM~thSRNuPpP5Bu9Xt`Po$2%hmVbg?o1i>-+J7^P>5Tqsl}Id9 z_GPkfQ%~*W&}oUr%}UN(Dv*cq7WEl2rC@vbR!3AK5e5 z2@84VyOh@MAiM0PG=k&I6C2?n*(u(kd==o~^3@7Fwd1%k!Da%0lJo0!1_t_0DPH*#Rur{eQ zHwQfi`Udb$#c{EbG?RNEbDQtIUa$PcP?_kiWWK4SpW@{90GuZKVxq6Jk2{gv$B|4k zV?tGW&l!W}?5T?9j+*p+j#_u%CTs0A^_AV57BusXR~3Caf=rH8JF|=t5TB>VdEO`H zBsYa!LgtCwxJ-|^D=DWQRzh`mZC%77bIW$lNcZKXav$EU%7mkH@%WaNz&C09SGw)x zf97=M`JYd{9|p1YY58z8cR*7z%w#^A^U=e}DN*;y<5_{Ov%-I#9?PIPv**@uoe210 zc+DMsxlisdb#gj4c~le4JAm&h#}|Z_?;)kXUOOAn_vAOSV*WDw--D!P@gkLdn%$se z;M-~J&duX}P_+rS|Mks%F!KkQ9m-237e&rD=QK*5!}2_ktlife=Z+|pn~IO-{P!<< zNt}JwEq_MynJ8aD0_Uogc)#rZ<^I%ieCaB7lXDn%psSLxDX6*Mx5-X56#qSyx|FVy z+3|8#b=ChNZklfs-;;a~xkEZ9qi;t=L#R!jqGScXmKQmiUT!uTvy{C^q39gX z_w>iYabBgR_-G$Rh zT1Y;{+?Dk^Sq^0>r?AiLi#&|dx<2_Y-RIn51tUs!LieHhcJz4yudv&wk+td|D^Ec| z@CN@!=o-WUrdH&f9(IvE8sethP=;~5! z?t-OHAwE(4L(J1&q{kj)wN2S##*U)(x!l5)c*qf`%BiEVM)(1wnN0R`^_&>Z`}8~k zp1wi z-$IJd>ZcPNWkfrT#&d&Jb#fhS6*MP0_d?exj%KPBtmtT(Ig2j3kXgF)nXPW{1iSA-#o&CG>yc?yTWy&YI`VwQ}VeDlc?~&N* z9WEccdLc zr(H4&6~yzv##l;`=2m3WxGvof2L)%DCo%i}+> z+LOrfGd7_1d=ywo)Hu>&e_x=aFHL3-B)+aOTbN%My;`6=d8cwC zb?zj;hVOpG?-k_!w)*1=Gwu&?TFSijcCtQ?zIqvVZ-DgN%aIkh4)ipKj9yUpX?6Rv zcfYj;~qP|5#~nc6pRWC6Oxj{L%u+H)~5!?CsTFtCwHfG&Tp)YGv#GmUAkv7k>Xx?>f@Sy|2If zG_hMh^DP%b?qr+Az8H-hGDd0l)Z+ec2;y8tghuQo~fiZ7?(Mp@FmYwOJ8~7$g0(4Wb!gO zJffAHc*sfboX4z4NAqc`Xh$QN_=;&|q~12+D!U(vQ%^3Jb2Q8CLBcJRjCHgj*Sch08|BF@n%QMe1mrY(Gd2G;Urnq?&MiOW zT^GEJVt;qjR@Sq!B084zGV{wVNU^f|g<0oxGR$qPkNL@rz8o|+WE*+1%hO0B+FI#< z^3CKarHM~-hBtR><_v72hpN-p!+PjMlQ}!wmP}d{^jHr?i64%{pVCU=$r5c=8AXrb zydkaTY3N+?`;c53pgb{o!QZ8@nv6G1p{b7g%kcBHmWts%clrEA)``zQk&JF8>8#c! zQ)2C#_jsHsSn#7Jw zBD0HqlW34n$aJY*6Zuubcct}Jg*;l~ASa{~)t(Fnd0zaHymGti653e<1?>uw-@_=K zn=v0nc{{Snn*L-SH(6fi=wmHBO;e(@R&s)NfAYS((8`|b%vYnn^4`&A6 zc=BtscDfoPahrQzFDlqn zQ`tw($F5;R$qAbbRdwlZHwl+Ud3GhT*YzyvKB({P$oF9lgOtzecFsy)0g?aHN_G7v z8)jlBawao3Cuc5loDw}CDku7LJN=W8^bTKilqa%&o^+Bwn+HyZcjW5?t6H+1%5MSB}j9#HyXT6_hHS{3kL zo3-Uu)HUdRkFAeE{bsE^$tH7(wH__x+|(#`_X!;&@;w<+hj}tu$1X>!{g0=%HoQP$ zm68kKIIZ6RbuG<o zq;rqn5+8QnS%(?Xv$Oga-sYlx9vX*2%S!W~rQY{38l0y0j%?&J*vqM@>=Ne|$zzN; zt?~A+5)J&sr{)IP7HGeRUhXwIoXoociSQ~wyVTY3OJ*`>DS*XubQ)@nHKa%RN)vikZ$*NoX zHn%?{Lqr$e@;%y}Y6LvWx7YiDgPpBsfg^7#PTOhoigD9&@&uV`uKr$3yWqm8*}sX=mg;kgNWO}^4~r1Fb*m$JwP zW=!Sic~2DYqMwCk42d4gy4ezvn8MOe!{^WBelkkBp)WIo6@2`KdOMG${-BKxYA2iB z98xU0<6s%APGOPnp!C0Va)|H8seLnA`>?1x)I6TPa!=_7x~~9{=ks;P@eh~azn#*x zXk!s7|Av?KxUH+jZmcr9Uw4tlR{A+Y>CteQ+ko#fN5~1hifr-_ytn67pM>D59o5`K<|BC1Pgz|elWWmK zYaCWE+pB|?9^|`J{Zk=4(Q-ML^qrE+j4wOM<48J8WWkSYWF60zIoPdwe+!LSUrN@C z{~0Hed-+|ZTawACd{p0p9BbVF{n!D`BryBfVVWMRj!u94br zO?S2RoHROg2JL>T=|COIjW)#h?~m`r+8l`YHTPo|IY1v@OwGG@~MNYd*G=bw@2 zKWNSgvqb1jW<|;6keJ(>%|^DPY!*5)!^ksEvIOrg%n0w(@^&~ZI?X$k?fis>ar|da zN1RR4r=jIjXegnt&uJ&wciNNQe*AAO->t#Tuk58X&z|hu$=ceR6nf$}CkI=4eHfon z%==`W$hn{6>7@ds*5^lZuW{~VnvbJ!{U2>Ng~zXGx+F<1)l2R+%?*v^+17aeBsU(d zq=gbFD^0d9YBQLx#vi<-rxAR6R*FZn^rG89^X&2f=_l$gt7QMdOI`T*MAsi`*ly=eMKE}66cvyq%D$+KMxZBJo66^!!P$7toVZD{x(5A=t= ziz1QcXtgogTA=pug4BEA;$`TXt7bCK&LW8oJZ)m*)~T0hpgny*g7=G*b0Q=8Jx}&7 zv$T>0{R~7=Pxg=}hmrhvIr7aOSDx43D$I1c`DT|I*^5ZN{Hzk^v_M_0XT2mR)RWz_ zo?f#HmKgKDp=PDAsH?sPdLRG#fY*CMMo~mgZqFM?mWkiEo$M;1dRe7-<>DPW?t>d+T0wV@2nM#g~?tpk_?4=nv>3AZCU3( zhei)I`yR#tWcWCiYI9^X$yNzZKC^MJ6->BX`B-w#2Jp-AQ@Sdptk!Y)f zs)c@wqRu+=OmDI86G$TK)jeP(t3iGAw-Od}GjwiyJj8gm$p8DanMm=RJsgO`Ewr@{ zBs|0CKCWa3++A&Kx)V)3Q1LBYy@aFpNMah~eoV62f%+WP-Hp|6kyGv=y#RH~$niO! zeujtQ{LeBTtvmaEn6&RD+eDV<_KQzp^hz>|rtVbwEA6%;`E4vK?d9Ig<~Uld$4g=4 zZqmug;HUZf2IgEP$#*sR)@7l&Q};=JtO5D=WtF*S{1Os>5tY>obHjyXks0ZJAuPKG+oULZv`@%}H)3h-jUH~gMk=E5b$D2mObNQan z$@4^d9mr4D(spL(Ipg^$Uw=O8{zT<=nwUzrHQ2)|WbhF??T?!=^!zh5mkAQsM=28h8)AYzfeq3Q7bKu zC97l%_*SdANiaFFmuMj;C==au9eGS3k>vK6&PpEl$uTT1_sl*=^B7AFY3&W)<9zCYKv%a4~B*nZ!GIpZ%QV0h&x_(aV+e`x&ZM z;45d&7V75|{ay$6xufA0vz^_fd8GdpNn{$y`~aEJ-_2xopjNB!on^^(ClubHjqFDz z|5r5}HiD%y=q&LKb>O0c*n5^c!r?=J6 znp4ds*-}me)rP3oSY%Z`FEPk{(R-vevQoB^w#Goma&jpPnT^SOg0VPj>dD(&i<}41 zemgSD4fVgNJ55_T$)9NY+)$TH?&qMmf}TDgsWEIiYigCyotbl}eboPJ{%`aw#flcf zLsmdLvcGd7>VEAee)V;n9Kp^LpVSFm2cn}FzWdYu0<|XM@hF(d9iG>aVKO_FMa_NC zQ(AACZ(NPT%vQ1*6I)%bMoxmvfQv+pkJNu||9M$&`_MoIWgFvRG6XFotM*WmyK&Ai z4*d^ylJ#&PS^vSemBUBk{%^-!XWAOa_Y6Tp;tlpPC%K!B$HP!t^4U*Ii3S?Y(iV|s zGtx@bX(FR*Yb&>$8$8S zpS^~b|KX=NOPS^KGmHbHaZ*g1Ik%dTt)%yFlg(>%dlM-pqTp@VI)rQo(p7bKa4-qy z889bxn;JcH%BvbFcR)i=w)Bwtr};E1idmCesLhFxzZQZfKvmX>564^fXA^lmi(P%h zW`1S;z2Po7^d5!$Zu-gT(rRoeG3aIal)fZ70XO@=@%ugt-t)wo`-c+UcoeDjQ!bgM za?3#pT1@VV>(PG&%Er*Zo^VuyF0xB{nzoXG{sq7N_>425azA}9SN2^x$QktHM9;cr zvIyqvM{YHmT)}_wj`w!$aqpN1x+>WE{Ky0DgRcO!Iys@P45cwpNc%*k8 zDvI$Ichh?Eye%!vw3AsR_l@P8MotUQpx0y&O3s%&QB)^`kNCqxlRt*aWIcQWrK90= zJ)Ooc%*0o2e2*u{ie8?(5><04@0V;&hmhN=YM!J1Vm2|9M?F{n-}4r$l`Vz4ceM2~ z+#F0U*&qMA;A3((S55@=hU@*kPW)tjzC9;-e`ER4eC~!#%u`OsKCjoD6FCxIbGkCm zxA7!P*j;O5+d>>(qNi0_N`y`|HTsiyD|PlzBJ#^qPYs;hsNdW-mR*iShFz?^zqKCD zz9q5pXdXls$ql+r4?~R*?ReoSWRVlpx02<}Jm3tH&Dn^)Y^fam4o6utcC#KW6>w8R zTZ_=p(C9Lkf6Qw1muOx?c8yr?Px#Iap%vLwPTXcT-`UvFhU9Wj!4veI6UoU6TdOc5 zjeRtO-&^&2gMKHPXPt?=V5o6R|v~f*c$k$r-!;Y$QnV;ZX`PGRdven zj9a1aExkQX@-MT`+{3W}b=l`BXXSPjAF-)0lQ@;svP+ZLyQ6Vd0>$%SbOcY_(CncD ztL{XeISo6BeJ?cLu0Y`q8Vx6F=rya;yP%{A9-qAV$-c+ecgC}s%0G+OX=qAh+%(>5 zDfwIgJ9+jx&wO(`jV$I5&eYbI&|Zud;}bgK`w;bS!)?xU%v7rkU0guFxlM7j9;cA; zY4DNfv6r=w99;`oKq7f7sCfggnQRds!gfEs^)|;mgcfqAp!DT1dO5CF z^WnLHyeRMW68K-Cx7?SO?7XeDeT{N$3S+=&5LYzA`-nVRo5x+Ne(s3B2Ciq*(`5aA z1HEf`$oI`rH?sEZwtqzyxpitI%Ffc)3|{62PYrpty$)WJ;boe7ck8_y{e7YBqv3I? zzBfWu_6*n1P*>mI;)&=cNa(MXtz`3(=a!o6=txf|iK6)d4f~UDBB&AzQcuZiJiA;2 zn-|hXN4ly+6N8O`dFGfxmt9cU1kLLUwwAoqNBRB=s7Ve1`G$>yuajb83|6zH`}nbq z^f!#e2ebG4A!>Yqo|*KrQcHia^EV2zeG9e8!+#)*KZuIojh8Qw?JOndu&~(MGPEW~ z(econybQ@)yBf~+f%V;J&zNdi5&&E$;~=XZ+Yg;ZS=XJd8~53`sOmT zk*zS)fTgeJGn3zS3hd5AcU9V&@Aboc{-4Pot*^)!(_^Hek3yX42q{dU)oZt@Czhx#W-qcQ#LrNyrN%b#fd zmX{m`aaGBww@=2H*Q}+7v!LzCLTkOXT7v|J`rQ*vC#jL!o)h8vDr>k6Mac^LB+RxZ z|7v=xL3*$F>=$xh3nzmiDJyq>;$$w~#-r{NG(4p50s4N?H_4Rq7hK-OYI0sPXFQvD zox6N;p6Dy|-_OIx*27IcYloki84m8a` z!Rh)~PwvTA6MMfOmxmf5a_?}SKa!oXBF*MRM^9cfQQYHM)n&@w?6)%=UV_5Y)o!k@ z+^n69DY+BrOq3=@Ava+DrltGQJ_U6n$TRU}CDD73zdJ~4meS?uVHEokJEC|a z4)T<7wz?;vI5XqqvoCMFU#k6|*ll+G8sDuhR?MT*&kGK<_-It7sv+969-#+jD}2 zMD%Xtw@&BBx{|<=I4`Zo+-sPX?;}}D`X363-^JgeaW~1{EapS?SiSceE0+Sx=-8ZKv{D87uV|lqv=k- za4f#}f!m@a$`&OQ-?Ee<6=kWUP+3}4w4jYP(k4l1MYg0(N_K@xk!U4Lh0;z**+NuW zwO#+u{rdec7vA@M=9xL?KKD5@&oeV;I`H+3OI{LvbHdC!F#m%*dFHR=_gnt7Tyo-~{N?ZXC53qG_}%^k&{;cQlhHL@3f zu&6)8=x_>IG6pP1y5TsRH-qyraV1XeN&561Ek%@4{w9Z3o)3nqAsMc6=2*tkuhVWH z5xbc<&=?PHVEOc}*)LIsEa}ISH*8J8@;eLMc#d_4>AP-%P6s+1POBZHf6S<0Up8K* z#X+o`b8&MLQQqn2l;g5m$;!B#40gP0$$j|)h8#+&>!6wOXWoi57ypXTE3;PL!KN-f z)7zcJ(~`mYZf)gszTL5Co7ho8Jz24~I~#08(W{IW{!+sw*inl|ZiY$r{nao&%}$*| z@OlY3GOt|}N3!-ZZ(IhmQhzvH>+_5BoJfO#N+y?UBQfA9pEGZJGQGby{>TcR8~ieY z{7o_SY8=a%qucbbnNiusbGDG?M(miu+cNuIlTD4tI>#>cjY!b%1Tzb-FeV zf>y9-q?UKYijrbQeRwrxkx!vM2FI7MUe4$KM%_IO3ze)n{La5!dH+Frk5N$1yl8d&`iK-cVc=Af zRy2d=>+i z8C85&=>L)lAM)phIm?lGc3R>uqxMRsUsEq**=RK1!lv-&fm{`4MKYwvx%-FQ)^4NH!P zMAlXP3BOZC$(+cyr&yG=IjdRpGcuOPt_QT)Lliol1nad_&d9r;=+u=oH)6y0qOK@kId%I1zYk#hx5)A=4YMmI`_B(hTlRP7X|^`qCL1q*M7GOVI`dAML9YkJ zCBx<5-~bGB3~dgmXAQzKAtjh&iA7|7EOM0<8tuydT?_GTzMz zO6A1ni`CiFR-E&m*+IRhhiI`P-k)tavB1_hpNzu3v;z8!+S%5%>l1 zJ9`E?DRndnC-CDxMfmS{+u5XfNH5V5786dNtVcfsXMbVm%(l+gUe5Qc0j+Q0xtZjD z@r7b!o<**sNxT`FkCStMQXI{SquFVTZ`rS%ad}G~`x5yV((-XK{D%$03jED}=In}p z37Si^@u#-3t73sxlG~sqBz_dl{-t?l;j_!BsrxJV`#uS?VrsI|^Y!Q#yZ->Te%R-M zK9AuEf3VCd*K?A}cVbBkBdBjlQU`L`W0w*WNO~XalJRG#diN*A(XQ`w-;K)ViKL~r zPN2;!*!}EVXBOBcuGNCnX0?vS`*?H-Xgvp?^F`qS82>a|loQK3VAh=&oaf&idY!&3 z^eIpN0?s+BrZuU*f>kLFS%qJgwSE_4Wo*Qmp|cR zHM$p3vMVjK+BCaz$ME9g@S&VfH`A-AT94CK9Z0tmx0;Z(yION*zzY6!3a;m^)KieS zNnOQ^uO@mjdd!vMMU;#`d-2wO>biy()S<^Ukl9Se6-GNbpFZnxv+p+J@v}&ncMx?& zk?HgtOPjn4e$MwbFgc#zEOGrSvg||p>=K(r)?yf!y||g9*^>k#aXF{w<;>PMv{y=G zN(P@BdEC8lSu92`@h!O)%Nsdl&i7he8>qH+B;JEHHfteo@1sxF+Gn0W&r(;DeXu(& zW2cOzPE_s)?Jk3P9bCN2Snq5cE3KxA*z%j-vg>}GHZE5>{p`EyuPNs3!NRYQK6%p` z;m%SK^AD`ZnbCPmo9wCK%&RQ?CHe9Uou|Z(bYDW36=XUT7K_wdnI>6r@h>)qWiJ+D zMDp(b$Vw&j73n>*erc(8|e}El>5kaKd)@2?$vC6FZ_p)V3FFx^0x|OtgJ8399L%iOVHqXZ0gTv zYUAx=G`xaD$ExW|?H6-3>&R>4Zr+mjQF1z4WN*O*xNrpUk9F6}?;5kA2yXX9} zk2gPXp{zJP&M#S0wayqSXP73-$1_Uoo^{$wnT6O|hqd$U_FVz>7f2HQ z7wVlxx#MAW`azGCQT;ve%GqnhM7*qHi%(_`&(mVr<311fw3jpPDzijRZp?f7qsWoW z{_*0hPP&N4WX$o4-+olXI4tcbs$^|YP0_Iwlu~j6c^^@t4T*C~=nB8KSMnB`Wc^g; zAhPc&GqagR*+Y*|R?UquZy%N@g#(#$dBVSWs%XYz?#8mB{9`_Je`n|8Mdpz>xfAz4 zhicZ|?GX2~8zH9!?{XziX2~Zt)-PX+YUg16Kqd36n#{ZLvh6sOm0(lF)M27VJ52uo zAF`7D2^!}tx9n0(bf{0`SmFRE*H!*5Bd471mb3pll6kvYv(lj(7G&o}&Jx&5)IW}v zQ}poJC0LaW^M>k7zm3wXt%S_WIGs~SeqpgEwD>=A703Eoy{7#z84Xjail^(q)9412JZOGqAqh9j&z(lw{cjRaMk!4e!~! z^kgN*(LK9=r)a67sGE7bW3W6cA(IOr`_8A)FFRK*q)WblZ}oD+`OKN( zKvAQ~UrAU*+ zwg>M&TDe_f!rLO&Lu@+D7+|gO>SH`FYw?aV8feDjPABDC@T;i4WN_-u2O45QQ+4## z%grH6+HOgM%(lO11p9%KYt`J8%z1A85YG~ya&CH7rewY9H%fKHqB5*>ky356c^{5# z!>c0;;F}$~jQm4XjAGjnV zWhD&C=q!0=3$@_?kxA!V3Dk}-lV3Q?s(GoWS)DDwH8Bgl=^d8 z#NH%07{}gcjYh^X#d&>pg7h>lc*^}*pLG@S-EJ4DcJSeDn3DPCC2Gpd@?dT5689f*E&JD!EoZYo zzp~qRqQhr;frt4+Tje@qL|@i=1v6VfBKwy{ldLf-%@?&YlD(4Ge`r*aY+uF5mL0w2 z*e*GQ#<`wRd@>|GM6>(-X^ef7)qEKy{I37c{=m(2ozGV1YW-?$jAW13pz;Q8?ZmmATXNuCZ*kCz4k}0*Z8eYZId+3xCPIBJG`C1>0IkVMuJNCSb zNB4+lBQfhAb>}SPJ4u;Q;1)P$N9;E~9Sf`E>dX^MXVHHxR9`?spG16;EA z_!_7`<&IW*gh}k#jhrRao^#hebyu?SAH$N@8wp&k-tpKn3-3!9b8TSV%r|65e{v+` zY@%CzzSyso$(i?_?bK46mb2L>IovV}_=hr6TrbD-FCpbq_|O$P1ISmOw8NDg&ck-{ z+AdmMCUX6!wclAknZjGbEbk7F^>5boo(YB5pf!)QS>Khl+UxKlQ6ahbCy}*kAtq%# zaApQF6Ll3P_odtYBwOOX>qyy_gjcY1RxRi3jbHRyTm4oYdy>6=3cTvzQBH_{jkjzO z;hvz!;mS_J*3LMxgpd5ihuZjdm^u%p+X_+_bAP zMaf`zl=|wi+Jj^-h5MCQr3#w%tbZjbSb%S9uEib{dU$mT+2X{j<=e6WnHnYSt zalDcmX3~BGmbQah6>U!BIp=71EISux#d}4%_mte=*JMr|Rv4dF^6OAGNgkn|aL<{f zbH&S|1y4VlMZ2+EvW6vJYTi@N5eIT=?G8vi!wSjOTEs7_dC(m=oe|<%7SH?dVeGrc zcq|!0GE2WNo-cy+7F^Ek-wiBMQom4(rzSUKb`2y`UB-jk*k!Z0nGEI)ar0zWe}gpb zXmB}nlha}kWgo+y*?w*BuJ!agjAd`fr=WDCD8GAjaI!Yur`s=T8Ngb}(%Fu6uF@xe zz^7LF`?@+0z_qHR&I;JE_>*&-o)KdvL#7YQY{2G8;>@EkodngsY*xjmv)Hv13ug`D z2-k-CQ$~cDAb#YOavr+JLKA8F9)exNE&b;@}Y(*n7 zWUo3m$wR-@jJb(go&+ ztv@Tfj{SQ>BD+INxYkrF$#Q%xn_L9ftQ$I1UGei9^cgkDJ3)!8d&}smAH?n=UlsbD zfdhH|uMW*a)EtW>>snSMW}Wb9?jFfP_xb!T3ua`qPJQFl*8@{t$H0XIGZ0X(+ z*!-~AQ?tOo#JOaN+Y^p$$&x&1*@c|F3oTfB2^^A*Y_i_5i8^!gSoZQ{7kBnfG$Ye; z*Qet}Y<&$xX2K*VHw^MiX1>euo6J>qr`^f)2_E6bJW}5TtC#h9CH=Y`eyi!0-Q>y8 zIv%b?vA?}hL*6GO!&u&JJOizRU^#;~jl-9lA)n{sIU;QZ^>kCJFHNsvp{Dwx>^aX) zz4Ykyac8D0Imxj$S;PBhNSJZ_JB9dAo>%4kyg&H=*(A;r+9m3Lh;@T5F@6%y|W;{-lkR2+U3cFA8bnN5>9WfOMnP4;`WQJO_6>o>Ay z<5hRuin*`*ehBOqXf3-AvkU7__8h6@jHMnkzAdlpDw2*errVCur|S7OD8Ey`)zsMI zQW7NxYG2>Gl4u6IuNN0?#r0&!H~}UDW~auM*BCQ+?Dl{ zng28KWCNZhA66&VK37jx`#c8cue9)#(k;b=+7Mie7nx_Oq`xVlkNZx&$>-HiE$7l? zt!TO#dOh*!Q!*xh&ZW@En&0#ESq-rACu4-HH@jK1K9R<~{5=Y0pJUcMS07hy6Te-n z#hlNQQ@|gD>sTJ!myH)-^CFyBsI3`LugzEU#5;`j|G>pO-*@$S7~GRl;9d2N(XXx` z?{Sz_Qtc1WICE*)oA|CMo^$UH<$*2v$0XjB6U>rx?{+A()BY3g$^NA5yvP&zL+V?N z`K#4Ap52}zb><^;a?Dasi`~hU8T_o@$t>a}G`N;mw1#PRP?S{f^I}#bT&+T@7G(XA z6f5CVjz4?_)BZ5e=~n$naRFOa^XqDMIlQn6>t!;&L#BFk`_GlSyeHW@=J4obp*<7+ z1ITo{sF3|2`>|q8ev#cJv_gaUgKX#wwk=>!T3gAav%O>4cX%m zXhG|b{QHea_BWsEVVrgyw6>BZb7Q~2;aJjKgkASyUUxEoWM1mvLT@rnY)n4YoJF7W zh_X6ohd7pAWC^diSZg^$;~01>hsDd-+DyCYG3U_Zd@UagiDU-7Qgr!F9Gl3}RUwpq zuREk~XOoxsY9)6T#p^||C~0JKrKs0SnXFdIT*OIuuu!?|=j;jl{nWBXjNA(gtEwmY z!E5l%g<8oP$UL(a!|~fOv>inEhvXcxR)yWc+G<ifxyWXFO|Xw@l8$9Isp% z5v!GmmQ17PdlN8E+Z7=93N&*P;1PaJ&Vm_m`;%nPW8L|l_^!gIuVGlypEg+gvFQ5< z#-FXGzAs~?n%}cp`WqzJMPW9<*de; z;8S;F>kFZIn7fvd=^DS46(LsPTpRw~9x_+J=`LJLu9TcW@De7ihwlyujbQnlpKu5X zG7CM_J;T)S4Si3?|C+vCrtJ+-8^n+Ej%*eM9`HGsEaTjfUEUcd?;t_;ZsfiB+5C1`A@*lvP)6)bF0AA* zO}>DONb`vL8{urE$y4|FEclq{auCdO22p38aU|9xThct-Pp_X9yLs+^#J}Z4(wxA3 z7Dgu<;dXX7jCMJDyD@3n;9}O3b>|H?;`VX;qNHnCublNf8D(XyezLp$AvWc0R%dqm zQ7!4C%8Ha(f7b|lbNO{LW;{!RuG-8=uuqfjI?T;z;4}3d#}l(xw*x$KE^J-6J)}oU zKYp`zpMi8^2xP?C2j|9#{>dP^i=+?0vk|70^>2256rd~`attYNz$Ip>)%UZ8@X_jdCg1`TWeKq_th@JkV*?y4CdYGDe z$J*?EkVxGGe{+s&X11DRM6&|3E~9T3F)(@4_J(%$1ZNG$2-@C&sq3`Z5f&Ro{SVlA zD4yTS!++HFgW^G+Su>ZiSzFm3lV_^(IG)TA6X2D%JlUm^byQdDsoxY4ZX{`Oa+PMK zj6k#6Zkg7z?k3r<4)NPR?3Wy*S+kXs@%Cf2?1Pw!8TE0dnqTt%yQJE(zO$CrPozV# z0@q}*-{~-kb#v}*PJGL06Ui_=gl)3VuZY%5iXPAMgKybsAg$hZcTN~AD!%OLbJi!G z|347O%)u+_ z$vHn~E7OD@XN+7#8$(H4OpR@PDvqVE!ep^)Z3{9jAm0gE%=326`FI&JKhwND+RpIkqm{et1)|n_GJ64^#9otNF&&tgH4CAT;}0=z$Po;vP-=LHpNFW z$KF)!v*0(0OnLra=h|{uW@k*izLqP^#e)%IQ&o(}PTp!5S4)h^4C{&blKmC!-JN{B z=ej4eUhBl4iOTk(LDq(r!jj}WO=LNjhh$$`-o2bCnq@~)GFOejqpy6PpnT@kelCm@ ziWaO}1loJ@h=%N#wK+LmGw1tVC~Ax)Yxd7(FIX(`4xMHyKUnM4)Ra|28OwLjCuVe$ zGjodgUYCvkfX!WW&Qn#M**f8CGjTBI$Dhl(_p(i9?dtI0oXWCA`MY`0>cZ3Xc}oAv z4ml?>r_SdT($mhJ(FE~yOK<1lknzWrJM4ET52rKlTKiR%=X=Cj5$$z zc@H!|Y|8nwr$A?@zr*~RvuyXp`I+K(5%_1lX?A61hkKr#lGWr`thkPqW|IA8dNt5z z_GOvu@pu5%>2*ILO`c}2BH0eIXP4#Wh5jJty4R=CPwKve#IGp3m1VCn9(#$6bFy|X zwZBG+84z6R-){PXMy#5*ZYxQe3==J|ZD=75ozCtD;#778{lQLO^4JG?;(N+$@oQG8 zevPY*SmsCdrvDtrTfSCv&cOamTt19MSvAznox^GNvAVO9`$9hdt+C!T-?P(UIA;9n zzO&Smd_XtzsOBv8HfiordK0W$U`s=`*$DmJ>xX8m;aEPB9W3#X^*pZ%ynE@x-Y$%u zk9R$9+PfS5zNFUVrN~)(L)3eghsJ^R{zbwj&}=W-<*nYm{HTMt zK2I%qhjbHe|E7n1h!t*j_biwuPv#al9SXDTsmlnxih7>lYx7*)ov~sEpIV~_dQZK{ z>@rWC^hDayg_b$8j z5IeF*;TSC?&-qN)59M`V^4zQ<@2B_ATavl{%^4aw19pF$cweciWKZn=MGdRyvmFN# z?JMYQKh~pfBj=TL91OE1n4MJt$p!TpD{XMKF&nqWrvaX9uP!`)U#CAv@6k^GdL~9x z#elMG(y&nXVKmOJo~86%qScxH&3I`VG(T4Ud6sw*ht4KP3zCoY9G!DR-^HdO{({FGNcRPcarcjJ-TUq3GDkRvk(e-%aHldV=*#&p1;A5 zXIM44*tUy6w~=)Q&(7Y~DQ)-Uo~GNa`r_4-(Op7`)R{#}W8x3XA9-V5onSueIl z$;WulQqq?9OL3a6VZpkj8-bI_-M5JU)q?yNZ5`>bk!?!jWi!{WWBGXy zs7>bN1U!-?fEA-V!`y+}sD+kCQZKtrZi`h8tJ+)6Q7dy@i}N z=|x^p)4kf62$AbC@i}#LCdGH8SgVbpxSrg=wdnSle>2~{kQZc+;u#`L&QW-mM&F4u zMflpDaQlkpkCUjnnoc&JxCYnX(Q@AFmL+8}m^6YxTP3qLB%`Mi*ps=XSw`SLh?Tdh zwZ2w*Voh1Ly$S{`NiQ;zJt%G^Gjkc1 z=qrMqsK#o1EYH2!^`6o2H|)I>rtTQW$UvFaN@#;UtJ;T}eMXX8AG2&rQQh7x_X+)yyZ|H_5%%Ss;>?ivL5LQi*9`EQb=b-W^(UOAlsq5qm)u;IOtTOLn z>4{{{dd7@I!`FtFE$`{jy(*LE;YnoikgqhkU(QnLTHhli(UyWrb2kl*{4y zjS<-i>~f2Um&~eJS9h&8o)RsGvF9nU8{_Welg`SToS62Jc6ZXUzVCk*n9>RR-{8#` zh-f2Wa5Jqk&t931)0OM1_AA!cP9_wWbEyO>qF?h*e}`do=g+#M8cQx zFFn&`w7Hrt@32=+CY{7`LvS*y@P2dORoIcY4n>sMLE3L2bOXQ34$G{N%`C<^ZPyb+ zM;bSM&WjqeYv$7a@qe8Hy6bQ%yY#BkVjmXxm1VMLjmuqPyk1Rrqro7=ly~`9OvZgZcKq`t5RY}>M zepjm{>+))mFgdvDy7N$ICWGrm{5T`O?yeq0#wKd4$Q!c9uckjY@zDzl_bem*WSDQJ z@oQSnK9?OvP`_eF6<(DVrn|2g8MB(ZlwT)_L49e}o(x~IQfAYpljcjUB~x?8=%>*v z+0~N!c`;=3?7B|P=_gOaon&bkg?UT0oc)~rMf0x8kAxVs*QM zPqoMV8b04B%6{(-Z#!sOhb7x9l{|G5U9VU0{Z(oyPxe;q@fInM)Y_T2maKv~S2nqG zz9vKRHOBT^3*6X@WuJ<53wiY^{>`)cD%e(I^OdkkX7N7m&YF=X2Hyt z<~j5g5wttWUh}OOBwC1u*}W65&zi;TSE$E=d4s#2h4XH0gh-hY%rmU@JOpZz>SB>= zk|>gl%Hc_(SF1uyY)1ZRTB_mRthFcu*UXpBBXi!OB-iQn7*LZ;S-YAnM_FOpfmZb} zCuhB6$Ib|?EK_#4wvv^j8a}+iq8F+!YqB@;%2;MEwRd2Vll0=fl+W&{U+JE4XM37v zo<6IQj&^6}G;>aOC*Swb7kyIju(~4UFOc7l^-tng!`LdhNP5HUZeG3Cr$pN9#>}|5 zn0R}+*qo=GN^1X09N)<@pZJ`)`wNX{#*pzBvgGumWTlwx(~}sI(Q7yO{^n|b2q){p z^W>fk`5Q%wV~ott(8C`?!>pOx>`Fbo?df{b+2Yy)*c=b_3-m*;7Vb%w{VrIL9H6hM zJ8vS7#@_j^@2&nGTFB{~gSDA6xbn6l*%IE--WGqGVBi_*|6AGX_4#2;JNoR&vQ6PL zM*YbGm%T{2mQ18)vfFx``O}p%#n6M<@NJf;fH8OC`YMQ=g(0gUeWBR7n#X2k)no{s z!nd-DC?}cCH6GiaO^(pcweHC?%V>yaP2$=@YbUXO_8zv>LmcOGEew8w^vR04hu@ac zHrcpGYxg8Ae9m{0i{JpG)WI-2j%RiiKk`(13JdpkC7GC8(jsSjR44r*7+g_{-)m<% zPufr0Z?M+hINgz!&tY$W-q=!fdd@xNU^tj9e&&65`2RLIl~warSX}CQ&YsDd)rs_) zC;DV1z&~0)nq7P8a}QVFnd+U6vFZ1=`gI7V{eTHUID5h8;`{H$54-mVv{zepi@)uj zPsEK2+;J26r}Eef*{Q8w@^NE?jV!txB133@pKpiq$+y{Knwr|Hw={pRkB{M6b9bMi z%mlW}jQQt2-LL*HA=3{>hPmr1*!MJQDoOIqYWfxIgFzrIZMMh62 zvD~fd?8tgKL1-=3KjW@TAh!nl|MPp!^BnKK@08ra6Wa5}*L<&{M00ocVwIjGx<#zr z#`-1w*3cNDn)a8Ib1_Rd!|P+&y$nm2rBw%3_!0iEL;Vz9npL20!tzh0KQBm_lSPw( z@lic`XYyoc+!{UJ{mQ)Hm!f*@?CX4#M8`n$QGR)oHqL=mW`NRjZWSM%#0Nx=&PZUr}`_J>OK~27k6}xGWgIRp!XSgIb93ci=#diNf{ven>jkn1(woo)EMf;q8@*Ha{!j1iCH5%Vu!HTQg zv9G@)_*$I;0`F)cdBd+4dy|9oVZA|4dbt{pJK#xWT4dMU1yK1<|CxLrSrhq^`11{a zssPWop?nFx<=o1g<-SRccc|lRk{qp;4J^8vuT*BAQ^ce?EHr?{Tl$-kTyi00htMu| zy^_aVBkJXx$qRT{4ZP|`$Ej)Am{^c&Gu+oheiZG_)ym$i+J}s_$oQnF za~KKN^RORy;Tf(>;&Yc3`kd1A%o@K7lzK#4@z<;9o%z2pV(Xz~|3@3YvwhBR-5=JY z^mxe`*qqcih(Y6dUDi<(aarJF>Pst90|m@?UinQ9A4Q zhA8*GesfPe{)A8LtEOFS`vQB7qH8VpS0wMfWWQX=V`=r4cG|&a0%^-*OXkH!U|x2N z9OB-SSYjyY8$h~-$Q-}lK-0|aG=e}*7+S{O2dKR&S&~2ZIY>T*$4!*X?#5Q4PYGCd z&_?cijGsQE{!MgA{?)A4sfEq|`8*dsdGgE|rR(weI+*S0+k0fs+v#g?rH5#jw-`kW zPXG;FX^8DP6)Lj^hiKzGx~%uhK`^V#L$j_kJ$M7(PSjRrhI(SkVGvkO#uBVihu`mh z%36UlQ*n9^{ZleR)FbCn`jb!Gbqza@!rX==&lIe3QAq?A<(%*B>f|*L2@q>~=jl@?@L62H6*x(>3$vHgAG{Wx;F6Se_l1Vdh#j z9>{hdlfODmYq5S#%&e=-5q{ZU96z&c`h@znZV)T&}EC9#pV*PTU=jNj3e= zT8R6#`7|#{-r_l;@P4db)qNkUZ>O>0t1OwkdG$O!m&3z#7{60{N0GJ*$&-z;F=-x9 zV`i+E!nB^=@C0|oL(k+}MOmXej<-~%jBlCQkDvA?`#5z^@q3<(_A$N*Z~BOF*&*^j z-n0V}14)&=t;rjk%#}r8TLhnK!R$m>YAKGGQ?%%bdTN~(P&s$Mvas^t?3-gF~*^4yo_%Ew9nhEHG9 zV+7_6RPzGZwnNU3N1-3R*H$&wv{CF!5c6qESJ z8vh=zk4m8I%iGB0 z1wP&1>@UvV!42xFh!M%YG?5P^^G%r4UbM^1PjYtWO;>4=V}E*P^l+AVn+)L1u;vUU zb0X0?GSpXcFnx+*d(KbTqNVv{{+;&8ak)?|UFgp2%$)(LoC=cp;Rb4IMBdLxa{#nQ zVNf#G{7dp}qD2$7$ZnQI>1Hs#jikl(1z8Atl_amg@q zhjL|&&CB6gdnjghYJ{tqE13tu@6?tNOI8|w28o;^au3P(5Vh`6dsFrNq=r4|b36%m z;?H4hxSGb-X|I{uvZt#z#wMR&PDXqNQ~L6hypg;JA4)*C4jqmwjP_>px98M0NqaN# z`2x&+AHQzI-{X`{PKwqzdXjtl88v3kz8CzjQ%i09c!k&Hso+WI&xY_F(0WoE*&&{% z;j?Mg%%5cKP0pVx?4D<%GNSf&dOYZFp2xFJ>T`aWOiB%W&MK|*c~uLXXb!2ldii8W zNd|?(aiNwguW0Ez?e3@59tH33A}S=$Z&?<5L+pJPhC}qa$%ua;ogY!|N&U}QOc;Xc z+r+MeSZp42%R^ux$;ZK>11<7wbTLVr;Lj#n{0ohZm~^)`7xRYh%3iB4%$u~F$&~j} z$*jJg5_{oM#=SW$tF2GBslT1px3KG6w#eFq58yY{-N~W-I0QG+c7bc%NVA6ad5e}c z?#YJviu*Q__bnJ*LDHH2{~jN*YBT#5UQzcC*peMCw_!soCHj%%LjIS%4Br>zt)j=B z&oh!;q$lZ;6K^{Y&+4g*;g-B>IZtM^-+SZDAiP<{=l)PrTN2IUbCc9@gj#nQE4_$c znP=?8zR4o@DM_18E_Ij5%uYmZ{3jyRF?N|P@#C#okaa0@cNqvScP(G$A$SYW$R(gS#OkMB7hpdy=p z?dmIfv%hKgG#kyPdHU(RYY#T%`T2Q#A^W;^uY1T@z@KU@o!D`_p5g&_zDlk|yeqR=*_rsCo^c{sNv#qoQzop;8`1hbL{|Du)!J{`wyvHZ=D&CJr`%A5I8l*x|}HUKp{5l z#g55me>E27tl-Rv_Rt%a!i2~CIZ$8oH=oIFnpwO$=R`kLxZ^}}eMqLPPG7>t$0$)+ z?fdhb3+a6?RMHcd#p|M2@uA4E1Y_Y`W65$!ytQ-0X{==W25f3ojIhGeT?&y*~6f zz0iy0ZTvJ|lQ%_&>KVI=h#x>CIb)9&`6sd4y{@dG_rqk#6X5$I$fu&*%lf3}*|vz< za(;YH>aEBBug8kD%xV%A6aR$e_hxw9ebWcKg` zy+SKGP9|$+&)VWhS=OnCb(e@>IX5pWUOzOp$k_s?i3$&qbfn0T{cb~ea@J66fkcaf z_IuGQtBZHPX?~nlSM#lZM5&!nZOw8g8g*2~^kgJ>QKTFO$DDTctr+(_eq|re^APWJFWSgjr%RAw}$cX0#HryNAabu>U*jXB*F7f$15iMDo^90bJ7TsakLX7*spRaJBoNt-o zt-#B&_BW&XTikuWS_iV;n^^yrGC9|0KAU_3=e7KC0$lReVhaqC1+6CuvhVz0{Xusq z4W{>=zUTCm8S1R($^{}pX|l{=u{=Znpj=iOX7+QV2)D`a@A!QYpWo>2r)YN_-0mam z4w~g8=oMm3eZM3p+~Zh%6y!cu$KLLq3aPWijcaJ~7pW#{vn{LUyqlfMCzE{gNDR$QIa$$lwVdJ5i^WTD5!yCH1y2mf3I^&#{h#j{32_h1_LWBKg7{t)s# z=w8zOnK#XQ;Jw7Rq2k;p{PS?$aVk61!-6|$-$1*klO*p~US#d1g{QiCJnl#`WXzmw z4&(W3brL7f#Bhv0!M*j|l~b-hfN&Emn-Bf0+f6i1zL*JOQ3uz5&}!!Gi$F2Cm@@m; zo*W&-unDZ!mK2@bd93y_tCu;uoL@Qwqn=RLAEMBDEhJw?2RuyPj@F(o8+z_ac8wyU zVFS8tgGUE7v{&{m%srG{Ht@4jtltZx#*+IT{#usyiJGU13;WRIKt0Gs?0Evqoj{jA z**RxV4`kblYC6}ijqpC>-K**O0{q9hmUlRb!o}geUT=^$Ay2FQRnb3B#;^0221a|k z&+M2d?&f^8tf#4tP1ovc+tE8`^t@nf_YX9SYCF5Hf5eVttA9zGO>iP-YUiEN_k}oi zmRf!h8xCTZqVE4uPn8|8IV~(FolP_D$?E?rNpKLB=RI=j{+IlD3dx?&a^gbH$;|WE z64tpDJ2NkF3+XqJG-oR$f6D7Hnnt?o@Zc8hXFq9jhGwqqMohR+{C>-~t$5MKm1Lu? zPM2Slnx)j^uJ%%(4wcl%dM+&fWSI3CF7SR+7vR z`ybX`&P>Se&g5chQ)nsctnVjT^7`)&*#@Hf7j(*uQWHq!i99*r1{sm8Q?4~`oS=`% zE|b1+FLio7&*(d_yC?>JM&is?)`DS0_Sl5WdFnh>+dc3mXO{1NUVW7xw9&Wjo-;d{ zy>Gy^iQ4*s*X?j$MXiirp?0*dCJxpYiT==2Jb{^+olj203t==ujoGbz1@?c(2TGA@ z_gkq;)vY9S{9 zBu{G237&*GBgm9oRlsqHq_s6w*AFnopg>?G-SnszN9HU!>tAk&!__JQ1)JS4gF z`a`fZWDoUAHTSJ`HCZv<6|4GV+PiF)JdD3#!e;GfrRksQPQI)9>N?!7*;kaTh{*_% zytKRgp0iCmi7C_h;|*+4kBt|S`wCvQjYswIdkJ#4bJuV5zZZ_Pd26!pbmURnpfVn} zPG!9&YFWYq8jEZXK;T_yxAaR5Xy#jl%DIJ$`H--*>qC0HU^ojzqy{rC;#BNc=D(#Io z&DEc_9xAl;EE~30U-nUS;<5E%{xjY@%7%Gj&RqXeR(=}#|FPNb^~Oy^@maLH(XTnZ z;&7P%Ch}y@%Vx~YOw`wK?Ev@eX|13yD~V-a=rg;JF_{%JuX~+nQBlkAu+$8XSOED|yRG?JpO}$FfUSuFa(N zx!SHvrfKwlk48`6=9{AG7AT)7a*R}OZ(1M63SHdymDtf71F~{)fm*&LQ8DE!tN#h| zw-p77y5lIa*YY%1RSfQ>56XVmj3669q6CS4E3|Se%g+?C`Y3t1JBlcs-MXv!Ph0X- z=XcN3{6#*O(}1M2Y|ie#w(5S{Sno&}KUd&q_7ncczq8x>5@WA_lv&4q$xuBV!?M!2 ztk{+Pl>76^tg3&CZHJR8BhhVaF~Q$*{CW;NvmbgkN#?-0A)K=-A*a?g5QF0n_vj15 zt;cccLy~28!ZvbW$5XObr?ubjz>xomNbiwhmR3I!L++}Fd>}J$o!I$jWs{vM`;_u_rcyzt*3jtz!$+W#x5A6~(ron|0FRu#_!%Ed z2HG3(XB)mIBk4pIeNGQ@Eewy=e&&`kw?ACm$gZtR$Y+|ICix_}h%P zO{C9xu3w|}{UN=AcK^~~EU9XVAlbeBAUk#P>0>b>BaQ6ZIZ~`>iM;%3r<}C$hRa{K+|Pk6_+Ftt@l* zPFLnCF$P2bgu^U7KxZ`$W5=8TUW}J*DOhqbDYNtSRzA=kqjJXa9T@xUgo;wJwmKAr9uu?h;}_<}Gsq$7!(3v-(GHn9hH)zPGu0w)_1e zBeqAC%K25h*kS-XKdsfAo_-T4=izZZtoe-|YYKhk0)6}LeDllHT@w1YV{YE6R^nxE zlI3RKo}zuunr?)tIiKKc=w$y_c^F^7qLW3?_egWA=&}&rnR%$Ix%HRm z_ZtmgC2dZex{#%=#la3FKMSgDMYdYJI;XEc4$0nP$$j*{8CO4{YjS$@6m_%r>}JyE zw1Z?zd%~wtSop6}BS}m z=>|5HGST&G==VOZ}H-&-nSBep|~Q%JA^o zS{v@u-nf=tH0#iFvS3yEEY{mTpw5z_&p3BX7IB)heNMJ1MTW|7&*@)vXcC5vSGqXA zJ_wHn;MQ`uodxIpA+y1qdFs52UnDzCZFbCCmg20F9lSI6LDp7X@7KBfYBftOg5Bp@ zNG{n^vG+s1x)fqZv0qg(mcpK)bbsCNV|>rLr~NSgJuJvm*KoEy#I?7Ty2n_qsyd61 zFFO_=$KO1Ge+b`o@M=bmoI;+HsIppUIv(`qgZr~+_M7&k-JMX_hwMAWlwG8(;g^#j zwjar-@$u|AJsF;PyZy9lJ!$i$KilXWzp5*Q}4-kI>utllG9&CJUa?)m_y z4pC-z?uVaoH95FSuz7NmWj{++-R8vEs~~zc6xWbpr7}6qBYPk3<(1{wbDRiJmt@%+ zIuU+{;pyG}Wc}ocyfO1uW65(nKg@fcMp&5qN7s>Vu!y`=`TLZr?N4v44&SR6>KiPQ zC4XI3Xg*uu++pHsR#Oio<Y#oo=SVKdjn@ zbWMG#3-uRZo$<)g?#b@sW7+I2Y`+TfjaaBXEIZOJd+Phq>~3Y&!7}?FkJWD8t~^S{ zBYEXYGOeO<_M&CCc&wOJK}FOsUQ2tiMPu>iZ_y+BEn6$Q%csrkbEc9xYc8i<|-#j3Kg@ zc?D**Qf{I?5*DXrpmmx$bP5vamgoK!`P;jzr|gdYIL0y z(4$4}$cXS9?G41*@gldad3I4XW&caYn%QY^nf_;%&zaHB8Ftk$IOlHM$;Yw|A@lEJ^a-t4^gW-?A=Ay# zlMautQ}+L)zpf~P*H-h#*pziu$u97czwaxZv#~p|Qr0fbR<6A}vtK%KDsPhWUi@DC zSmw7p*%raYI^xQaN;ZW01)}9W{(TT{8Y+Jwi3ifUy5Eu)ZY16vDvD*j_9$)b@Z4|} zG?$YsX9(u}<77;E6AIfzpd*$4pZa@~C^OohLFjb4^kBES`oxV!cC|^rFRmR;lC1MM zMthg=jRQztAF?+?C1+R7~%c9evuRv{7u^*@$jthZUU9;{{0TZSr1;-m~knlZlYmEOxcTc8+~i2_dlNp z(=02$4{&9%mMe>jf8p_=EeVggg=ls^sgGlwtLQwXV3+KE=%Xh(0zTR8pWH}!#@+q)1H%Lu(WH z!W5-ju<@;Yq&6AyrlJ_@XPlW;k~zQmWLmA(`{zW}31UDoNR)uWEl|A^j!#2t7k|ne z-E49!SL0YMeCF=Q>9o?_`-_0R$&uNbtiP$xYRN`&okh_zF3Kz_PCdt zcF^)5zTpd%|xYcWs45_9^FCKUuF9VbV(? z)d1ywFIfHnEY6CmUrCl-D)q55v$t*Z&-W^sH+b0tnR(zb?3z>bZYS&QtX&Jf^^_ZC zwOMviWo6#=c$fDk<3)oz=rt5;Tav4d7KW4SVm>&NpCvQeRW!=T<}r6vhfhwn_|-45 z&#VHrXR&QExMyX>1u$F#wc%>3CQ?^ohvKm41)o`1(ukCo8Z&jnjqDfB6Zm5|-j@G2!NE>ydXXImuu#rB%Lw`}KKHs<{-b!*8jj`Yld(iCt^cF1 z{+zG1WX+e=mEGAZ@wy|s-GHAn^^Nh-CFDvhtVaIQ7?rm}ckrs7N?#*BMA}24TN@(| z;>R^a#Q{+M4NnHJ?4vY_cTd6Hr}@q>mfDJmIo*DV{^xTMGMTV)hR<|TUw|vO@r{?PM5A9(zC`z_Ov0=1ZMZPoKag9jTRUOq?;(7=qr)9G1RbuTUAzM>V z@g>W((4)T!kzI6ph0cTNa+`LtqoTStM)9lUB>znU#;aN16S zwXSBq_hEncWAUt0Ib9Djl~t;+`D<#}rlwhRzJ%A0!O-W~<~-xY?!G@EUUeYjMf90j zu;E=a-^;k*QFtcfY%(p+(hDbl&iU;0GprwmWlwbt69e)-;w};n;&~HUzKnmfej;bv zt-#AilsVV$Ii>z{T>TQ7SMZsv<2YKa^&obDm~o0RVoodHh{G%KX*$$8vq3WWbr#if zGQ$Y6uT<&?E!XlV?D!N?mH1NjQN4tH!wS!!Ga>dPCZt4H9L^qwiCWLzrLyXNn~c4U zw8|PAHpl1+WOx_Du3^ob&b|V}l4W2L4wQ%OBn;UoMr1Wg3w`k+DNz`bgwY-~~3(JbIJpqchV$-o&t)jIL`9l*WCX;j`2|Cld z4)iL}Y6yNjEW)=T^-Ef-Sr|*_X?7#bvXY}Vk2sa&jr2!3>Eb}l&)F8e{hm4J_x+NS zgck7Ge*S#w+er6LhX1)ZcL*uYBTII!eop4|Nz@sOvRnGLf~QQT%l*nN6VuNkQ%N`< z#ll%_mwYH$gHV}QZKPXPp`N4N6ZqyXt(ApLcBemtbC<(5>-n>DH77b>;P2n$&Mxlk zTuw%SDzq3u$Nr+rAmiPAAiXc?k_9xo+iu0A)mm$cr$gyDA9gpeNak9Sk$%3mlH+(I zufLKdGR7PMy~UXDBGw+s`(EXbJJpmkBTM2yGv3`t4^~-AKZ%N&MXE@@F~)U6Nik1+ zsj2Rhpwb#{dA|OIW%B%#jN0dV_ISykE`HDGGN*ptLgN9VSymKfk8C9}e$A(QlcFlQ zhGFxo*q7{Wlj#2{rnOxStIAm%%<7r>K?;?lK*Nao^23edgJ80 zf+nZnQL+s-Bu^iI?}OJ)7;F>)l3`;gWKO4D)-t@MrtH@HSbc3-Z#0>YqIY|;dx5w8V}z8<0^@l4%>UzePEyEw zfb5u^!moC**Ta1FN1We}4;ANG=R^8Q+&POjuZl)p_*ybp&D3TxeJ8We*`j%68qLuD zd#parU3>eUyv})-eLCrq;qO4#AA`+}n4k6gt>E8Le>_Z-T~E3p(A-L<624Dz^*UEi zpiOeQ?0%DSClvFvScILb;n6&JH5W5RE0atwRj?x`))!;FMdYgj*<^s;Pm9$_Q5u?g zntj+)!YcI)V6&VevO8;2{CX%E|6;ERBIeZ)t>$`m)xXb9SxGgJHFCN{GFDB5OXd^r zRAU#GP41&2O6FO=6MMWtu4iz(4_)&#IZd6HxG#IHe+J=-tpTrG(8zs$%u8SKbdUrW)8LeM49O>) zQ*grJoUhZ_bLBGLk(JtiWAI&SJ4>4fVqi`{%RYkQq~5F-&3cubiLj72Bv)JBgzbq9 zAD{lJ9zD5mC-KW<`+kwVg6mzly^St;OOey}PUX$tiH4E6UqP!G;$=z5baE){Qbl4*9XWDV>gYA*-7U=6)Bv~9X@0v{J<5*!RHq>DIXmK_zPJ>%c zu1ubloZ*s8bIG2uv><)noaC)uc?>w7En6v-^F8~LESZr<(qO*pgVde(8&`|UL;RLi zK%>d>JGq*W_C6LoS{?NJbsyXZV$4~{o>O|Jlm`nIay74PkFm?c}JU_0^5~*3O|>?s4j%I`P+ro z8H2rO1i6HCV{s~bqd(wZ!}!8qSTU3R?uS6;FVAvso~v&Xv-7lbkSB`d(aj0YOYrGa z{Hp3o6TSOuN@Zr?RCY?9y?I(aob*-sP6=|J{>84z1N=R&%I?#w!booMec@3Q~Izd zI;s67t<=P^&mg*yZ90o51N_Zu=`O}m88MyV+fQ&g2j0^V;2CJvqbk3Rb(1q&p#0LL~bd zE?KiN4Jxhq>U8?v&GL)buo^36-}0@nd|dn2V&F&cErX%^;8l8?=h-T2fwStd8Z8d= zC-2>JV&rzR{Le_>Hg^qoy|>nn*5X$XeM1}_&9B#B%JpQ-diX))c#hv^H~%EP&QGFZ zQ4!*4L;f0GlTr}}4Vr=`DHYxgHB=UmlZ?D;LUbE4uR49+OG zF>PNU`8S?to0GATIPeu*b4prozH}Z-ufWRd+}9ch^Yr@@`F@7NT)ooy@Jfc1WR9yr zgU)okPn(tTHzzSw#-9OXyg{_6sg2E$_`vVCYv~VC=8UGz@7HMY7L+2F@Q&v+9Jel;2xs%M&O+?9PL|6jY$-F1Lka`r=~!sxyc9WN6hyJ@wxyOIa+ zAvg?DW{A44)#4D={aQ;!+3E}bZxWC4PCr=$OSAfsG_As~F4RsBapWv`W`E-p^<~C+ zf^v5h>~*|0EAoQ9upv9cE~880?gead9$qE?#%QR1&-$4$%bT{$3eCdER(NtB##GUx zcVnAldCUduvw*iH6GeZ#SV!Bu;YxHoSgCJZO>U)81vbtkUoqCs`>BOwdS0D9MCUx? zy+eZ@@XMLDH45>fC4Dc(#AhJ$BfrS1hlBMX&G>irxm^f>JK1TB+P_rGY!UGi7RzkZ z7_oP|Z!Jkb3%Y}O=F`5P%!dbQZ3mgMC*&wK@2ABUxSEx$$^ZPAdY6;%S{zE2(U<8_ z2Tl#xGCQG*V{$XRTL#fS>dIb@Y4lAtqK@#}n;sYPnagRDr_k!Gv7S7$wD2Q;nh(Xf z*ii>>Ghdf{?$?n!+0qB$>2xEL7wG;FOBd5;WzSR2uFG@hOSIa%Fb>T#)k^+!nz2y> z?KITd3Y^azUDlpv|Ki>>f1E6*Xy;WPS`~%^NwNgDa+b_6b?1pBD|(VEwsujadj)EWHn<+o>E8D{F}xjed`39Tk!Q#=nTN& zT7`&xBB`@m;bnY$Tf|ye80Cy5c~-FfSr}_&PkgdzJ`Jz!a31daK<%6;MkQ-$-c8j} z`?W^S?bMrdYR8i%CwX_!Qf3&FWos06-OCr=6%Ui+;T){ERs!UX)nX{CF`~ToHg|5jlJxAGvkl(}_IpKOcrhdhfW2dZq&N=kC8d=#Tu%qnK;xB_Al_EbB!IkL-{o} z8V#4^CeE7mtMx2*!}$RBzD2t6T6-9Nd8hG!cCs3Hvfu8Y`_1b82g=QNU!=j(Y3u*9!ZUEk=_N$fgM3`>^T>~qM@ z+|9VyhD^WV^#QoDU2FSmzbwpN7jLdE#FiBz=}9coP(*kW7MXp{$sk7>zg9GQDlP^z zBx@bKIZs`UN%}3Ww()=7iIsKrD?ZdyYdM)EEAeNzn&>;CV8?EJ;6zyb#!l5Cu!Aq; zxjLib2QaZNZIZdI3FMM3Fni1Qg360{yaT7J(X2TfKjcN(VU+hLW!;*| zvM$?HQMQAaoM-h8bj+&lVKnICd(K3yjRQ;7`$M6gx7cYR-@2VtXKQ_{%V? zm-j_)7ot`bZP(Re=E}3WzmhRY-WN63-`3VE)N&7B$or3C{4P6`vkU)qUXs%j zOOd7{E=?rK2)*vxe4{a&4`St#g_??zqKP)M_TWCKuEn*_aB69xXLuVopCD@)<&T1F zPFpIXM?Y0xdy8KyXg9Iue7(?p;_{v(?2k{gXn3I#c?#~}QxPoKz!Pd0ERpqhgE45E zNY{=()+_X_@$S0%yW~7st-e9Hw~5?0=#8>A|7pHDNF7rPmMc!PtORStk49>9l~^-? zU;c*!4MmixEVhI$PtZQkdbhykO!7R+$|G3uU2HqJVC8ZUxzEVuDE-}g@Gb4r)wq3` zyK+v_cs}{C5{GLq5wjdQX2X4b;l8@?Ez7nyvsBK3E~d3l)Lj(|GS}W#FP?oP6QOes zNt4ejdq#?q;YGFP?ZvbD^&3Rki1YiINrAE<3@JXDWNs^ISheTu9E#eKF!+S|x8o7u?vW<=#e5uN3;Dob8$2_Jj2a zSx@q-dNcCPu8()2k~g~x^dcQG^;B~76xF}yb^DMsZ?zUd?{FF>;^b82Msz-qHLlZY z#s`@R$=crM$dz6>*$86$&&c->Rz6CTz2N;0EWXeq#CvXl&u?sc9Au}_cmdryiwq;^ z@H;fFW|i!3%831RsML0)0;w}9$^N}kS}w%{l1Y4>k>By|x?YK^;d^Vr^ONl;r&A^8 z(fKUe3kHwV`aa&5(~~OW{70<1RSe40Y%EvC=RMf>K(ZvyK|hwiUTn&o@CxjBgaqI4 z?`7o3zJScF97><-l+VtX^-#DLQaO=tBdxm7AUj~Y;&|pcYl$Y6@asr5WKBW^{nQe_ z|4GO5@ToCxEe_$OuK&OilSuGCD7{PG=U64{jW_Vkn&fT?{r_nG6n#&l)lIBh#E5$s zw5#L57jQbklj1R~upf*{!TKo3=V|jFHKqrj|~fyzDT*`fN83YnZ~9Mi6!Yx zlZz%<)%)|NpY%0Z%iIeFr--`Ou+@AXP~7iDdB{H`NIt92wfKbI@gTKT(^huYRMXz> zRf4Ob__C2%X7cu6g=Ci*E^;i#vIE?eTwcfE$|jmO6l3PlFmIQp=&f6dPQS6}zQzK5 zF(kd>Mx5%SmPdF&FaFm|`EKl*UM6S3^(M=&{9_&+Pt)EdJYt?c`CNW`yq@@F?M*ZO zydIYSW0^d0ug8&dp!f>U2nuECda=*RgkBz7#z5y9-kH;fheKdB3|nA8c^2pbqs)~o z74=&4*7|Jo0&FkE+4W?s@2;Q7^*a4Jh|y#4J=yn{vGKd2OD9;ruOE6x+@8;OYpCTy zkw3YbGeR9hyTpy;QQriq;_Nun=Nnn`eBVn#>UL2qHeEoX1$gwf7A}KUb`0kI%MHpu zjcHx!v4gK(1ed9}T}y;N3c`8&(H}yYefr4NoB%k@-{bheFfHCfs;61wAyVai%UtZ~ zN}HqEbfdqEp?VMxxLsSnEA=!_YfSTtV0a5#E)~ZI7b3*#?05hav!*m}%e&EezPR0w z9J9#tr&{y;+kod}ulw#D!XL0~U8AnAXm%*Mv-hhwu4Fe?CFoot8jcr-|DZ#j|FVXE zKggWMu7l{^A97g_`x53nM8f8f%^A$o`F(2Lfko9Z@=7JU@zsC*_Kgg_9&RF@i$p( zTH)1~>Km&^PtMt_Kp#QQ>~_qjCQVQa|bceueiO4f*w8@$7u;`Pp5`>CsR0 zc!zI2O0q#Pf6Av1Nb)|!bEf@e_&JA9REKVM!#wT(`}HUlarl2c>jK>Ri-cFPZ?N^8IbC#xwte`#2@DyYM-dy$3g6@~JOoZH4dq7;?E7aDy@A z6n6cA9`Et>N@To8x$Gg!&hzAh$%!dB;iLyEH7j_>EpUHId*fg_jC|R1k@>n6H0#ci zUmI=b4cXaz_CfKjfnRrNDd!*7VApYKtb;o_Pwinm7z6jLA^ckDiiNmYS8YdWZ?R~4 zse7}s@lt4ZAms#Y9>uFp_O}b!v!`+?54%iFFZ1`K*}5Jb^0ZWs?q9Lh-R{bm^cly#TlD%vS#k*7I^aY}rEX)f zJu!P9HXepA*|++WcyOpO_GdgiYyL}M+5be_7u`JrxAxcey{tJ0PHSOT8pDp`@s}1x zJIz_B3hv&;?vL^3^1Np!R@H(`KVFc`Lr?I|^frI8QDdLJrTYrn-mcYe>3^?2<{q`K zW0(8=+d!G@Y0Nr<>u5Qe($iegY@}=B->$L1WAd?pO{h4U$Fb|i+>#Hz+DI$!TGnmkK=9?V~YfA;8=z^FTws(|AI-7`nY+u)d| z$z(8@j@cQ(?a3ma()$i*ZxVT4z|j7FKM&(u!!CPF?t$`UKAqye>y%A4r#skSEbp#B z|3{TxtEQ%KDpiR4NASJcq&XOei)p_*$+MI92QrlwYwAO?N}-?3y3}fE6oE;W;^R;~TPk;@6yE zo3}?fRk9%jcaipd*4z&J>~y#dQ&+e$SlLak&xiF$(LGPtr}Bh@wXl^mgUNf3amX5+ zY2g0N{ImfS_R(vf%;M8&+7}L)QM{Fg*Yo`tdz+N^cMT>0pmA$({wfGS$!s1 zvvM}?)sllZCtGLbV6yN&!NRwR%^SsoJj+#J`5)OPSqv6K@^~Kgx4$*j_K1jo4UKLu zMDt|gr~#krS${O!B^O#%E!AhgJk$Qg*C+CW+i06?GDVb2-uUdR$Znx3KDFc{joIc! zNS?`>J$T?H7?8b|rTxDMvvbn${c2tckH(mnlb))wLVcsOoGF)`KqaA-HS!;bl*=%u zF6m2)=E?pz88gDje)P)>>ya>c)1P+k$(yRIJ4?=stO&_|>+J0Kgtz{z|44?oQ^}Qe z0)1I2Z!n8P`Y|>*i+(*|k(s5#S)&HLvd5r4owi`k@oJjD+Ih?PGG=vThuc`aEXsAckJY;opSBd_${X>iwCKjpjmg&l*XyVur$3Ds&mM>R8RR&C zoHazlRfTeS5=`FBoM`+EiHp#0KU!o>*$JfF$M>OhJR2(?ghvnZDNU3>V`)Z=)F^ozhOu*Q7>n@oJRJ6a2)`NZ+$C{r@ONrV6G{paAtC_U22H0rL_Qgr->Ka3+$xk6;P51MWG zUVjqgc|Q5}Gqaso(h$F@((q|DXWxIu@0;n~PwYx$>+7E5pq1R8IcaV^M*O9R_(hB9 zg>uHlP@Yp98hK}v9UT?PGf^#hqn2mv_mvEiBiKB7-Exj|PEY<-pPY;bjWFd&nC9Gu zN3^>89n!ftoT#<|hL7{7+WybEz}Yu47n72kpbY8r*7yql-^{lbuvA79Ir}wvF$ZB! zPVM`DG~Ef7e-}SLpxsp5 z$^1N0P8WCPlQLuNg{<2h!WWtQTmYltM!eG?mK7hBos(zNn~WS!8zGZTqayj_NWc_*`!+dQJ=(&3eDHY}Rn@35Tm#V1V=UeCs<=^-bF2?nK zCSSn;Z-_k+-CV z>7lOpm_Llb*_kYMwpMS#gi%^-DGDCM-eq8u3}+APc^eyUqTlZ9;K^;CyqwF8F#RD? ziq)%%#qIR805Zw>u-?^gJLeUBWZ&Oa#?QaR#$@QY!0cutFZxNlsmL~xw`6@**5746 zZWF#$i|szfhHY9|gdsc0lXn`+T=}n_9_102;Low*Sk`sDtFH^-zeyW=!{$ZzMlWbL z&z0U5D^ja?o0bn^^*oh-h}9QEqckgZg~)cEo9B^V>!AT(Zb{?Dq#nsaL$v(9f19<| z9*aBCV>6p%^cN3$f};(x-wz zOFVCg_j^Dtb$xGO!>iciMSNK&Vze)e{mF@vT~1esn-|eKHGZm*=u<4}K=bZe|C@~m zl7ES7w!^Lk4;W|+UZ>^E@4jN8_X?v~-Z{+HLj}w^nvJsuqqQSh?S2LhTr8?B(#P-G zAHfDKUAdn-$iMyH0FOIqyN}ji5hLqk!6W3_SB&}xy47&&P?7aCsPE86HQFWezK9`D zz&3N4NBG}DHkpJu=aD!0#BLQE^K{@bR!FYsmaaI?S@)T5U*Mdfeop5j^Yl~?I}S6F z9|PaiK5Qj2{J<`K$@3jfCS!PQ*W_MxGOp!LB{g5#(zraW+8V9@6Om3P=V)`nom$O~ zn>Z9^X9T7EBvUB z+hLw}K~rE<5r;2>&s#k1d(w?1Z89Z}g>80~X4TC|@$g=VR^|bzJG}C&kB#W}yfI15lzC98OT&ZMA^W8(={q?J%WJUSUQbrI=m;z3o?71ie?tVs>Ex>`O82Hn|nzSj5DZe6l>rfu}U z*f+Jvv`X}O6dPAL+7H84kUq5|f5Y&+%{iP!v!c0F;i`$yNsZATu`1cLpE36T!|I2@ zKTiYhA<5MR=01TZc`H{72il0GHAMY?M7*Cxl4E&x_L8@TMxHcm(L-`H9mzh)XYfDv z&hGHk_kP!>%*)sDuMzw?_t&dw_8=P_>)vGxnuv3}&}Qya zi;}A|KfQ+c))vK@VeDwUdl@>(BQeu8sb$@Ptjk>4)wk2xF?l|&hVhq1gJjdVi1vH% zy7}VVbge$eW_c642o}%4Y%#q*6+s5!U=g~d&P&#cmZDQ#GM|7Y#o=^6n{?ydxj(9^ z^>OTz93Yu-=1KBAy{4$@!$dD#NQ2i*yqITll{QnqQGJyEC)zeX(8-!nj@7+Lz2X@SbDI^t@xq z_wYnvL;_HZ6O3rnA4|2DAiV??~f_xrO%176ufzY}r0sTMwFnQ!!XByEec z=^uXPT~gjEwZhUn$vz2tQfuTjm@jtiRMGAjaxC#(p2J+M-=%ba9VeePj;B)KHntq* zyE@LR>YBT8s+(9o8q#gV{+|jQsOU-1K$>61qHprqWAyf7fzhSOehF$6`zXaWb%pWJ|XCpL{js8&>{osnh7+;mIv^O96b1`Xw^Y_QwR z+{=iQx6WUQyV*T{8vb743DoVl@UoHNJMAQXY!c~HS?dI&@kA}JXQS+PF3tjvll*Y_ z3?V}Yp0wHbr#QPQ+=scM7%oqvL9(f)npB=e?-UiX?>D1zN3G<3=mjiX>-?5%vJZA{ z)kofCOomlYQ7$v9-~3BGs_ae4d$fi)o%~$=`EF%QJl^-M{XL9bmKMINNRQoX)^itC z%31yBp54qhU_)ttonib==AduLID%A}U3{ta=io3EoBv|r=;Fpyld6ODTjv+zzBqWo=CL?-UStc4} zmhu}!au0L4DAs|O45j^Hq~D_TtUdooWXg`JH^h$_1u2r-GOO}dY4aF1_>qj6`Sx+< z7BO`XSnkwjOB&u}rkOYVD`+sr+2!@U7d&&XzfNS`8#>qeW)q8LEx0xjM}mT4HCe0^KQxugBeTln=n=>}+hW>n6)@CNiqUZ`XbxD!%YbDPzHmlQ^^ z3XpD%)8FIsP5R85pNgV$D%2ezswAJoi`x8GoBe4%icXjNCb`)5)z%hvt^(&~I1%f; z35WGO<7m3o&`S0){wu=dsY~_;ehif*aBt?!;dE|a;PWhzYX$q{Ia3kezfb<;I-kv> zk`*~~hU8^BjBcr~kQMp2@%2l2WL8up^I@Ktykn$k&kC!>ybciGQQ$@k-kfS|6}8t; z&nkKB=6JO!*($F@;u@> zmiI|07 zx~11)vZph%Vr&&T2IKBsbV{DC-}u9P-{l$07r2{RpNSx=S$ms!bCy_{)fUN4cDepf zfpgvucGA-t5p5a^RMKB3t&A}`WDmy=e%8RW+aWkdFXdP-adVRXGZr+$pF3%u`}M3( zAA^f4`Tgs#O!kPpf$Bw`JfTXy+i!h4w=f@>D?U^ycwSfM?_Rk-1Ba3wIB(ppAzNP< zb|z5?@p-V{)#zH%(KEC@7m}q|J$0LM-+lzGHqf)3D>G^x1j$?2Z8$ccMZfCU_#|B4 z!^oMg89}7l|LE_bGlZpm(x0)pYJqdQ`yhAH|Sg@nwhpe`l$S zF}s0rDj83cgK{laUazGESTl%4Hqg8)O_~`!n;X-U4fb)7C{L!7NhAK9{ZYw0koTc^ zPj(60oezmFIR333Phpv?k(f<`5BSY?Xf&kXq0Sm{zJ+x?4OJ&l?D-pC{i8ynYW3 zEqH22Jj~O?5rzBk+@0?-ANz>*$qe#7hAh$M>mpng2xKmsn&8*t=5pxez4HX)`Z6AK zACswR`r3yi1p?xB84;hmkYy zlKb+@x6A;``<)7!my_~N5vVQn7DH-dVV-t3_TQtu6Gfxk4OA7w$BT&{!L<@zKEx(j z<6Raz{&40G*pa;I=V>oHl3u4lyy82a))8`9pZx%IT8kA~*Ki?x>hZpMnAl1z$tarp zsaay^rSyE3Mi*ma)as#jEE{0PSpjSEKA@y4vg2nQ9IIx_M6aF}e4FUG5Wpp;2udY(%1uA#u5pAp1%l7NMrW zBDL+a@ADtm-$~;Wv2ul&+?It7G~Q+RX({p^S+L4}Sa3Go&(Q8gV#+7<`bAW_p4_Qr zn>Fa~i8Cc2`Vbi|((_BWGab%9VDCg0%HF3NaB~JejfF|7zb4!Bg?#mFGNzvOt+c8D z>(uI=%6bzZbQ~KU%$JhKAy4wly7~j&l3j^UVb_Pw=>fS)uCEB4)PlQ>50<0x6|8%X zD0>`sw)I_0+TG>5n)sJDH(5{IO{**PK0}Xr%P~<)&ub^^g7d6kju!S4NhTY!hl*Iq z5!8}>uH^f9wlzR|m%%W5-;Q$DO=btHp)t)l*I?W4&d+Y^X?iSZFiTk6O9LiI@m&ly4?Ky0kZnW)*34LjEG4EMT-g*Asz&`(xFP=X-sZfZv;rzl+V&3Jf0S?*Wj>+k=tNOTFED;FagFmD#)u zY*Gg%85>{baapZ6RX-i^X1H&X@nQqnW{WI0^ZqN~ULEIWku7VK-@=G9X`bi4d9w5# ze{BZ4GZ{OFosI^h>soQO@6|aK`Q@H_4mYTMIXc%!B#BAzG<|{r#Pp zGydjxsomR?$LGDmCg{%Kl_&Azg8yvsJDc<2xCeU2>E zM6VW5$n5SIdK`(X*`;4eTslNQS&8tXzPI@FCawG8*sJ%XDBg_(xhv^6Wc9+(ECe2Bt$}Ze`zTKY`$=uS$yrC0IY-h`3*(YO5 zsvhJi>Dy4xN~JqkJA2PgDU6&m>5$CA54!e5u^_u3`trp*8QNtWTR_{*7<~`Sx{zp3 zv+oNatSSq2@jHK79EU+HK=Sl|+eK$=8TPvvE8*%--Rh z*VDQq4;h9fc_ws+@7F^=8Cb6{OK76yUXV-Gh?8)s9uIg~pQGvaC@G7OHqTVDdhjxl zc?ECED4UE0$pD&Yk*s{F6_^}*@8ZfSY##xA z9~3Y6-B1!YFN|*MegCzdQk`r+GUOhsI;LF?&68Q>VU}zPxiF;VSu>32KNTX(G5WYt zPsh4$Gue{@MfXdl<~da^%39pq;Z}%C+g*$Mi*Bf)fe$JwxP69<}YFLmPM;MUK?jj)!EQMxJ|R?MW)`6=D0ousLxp zIYQg9a`NS+j@#~MYpJI>6f=7oOFuEr6n9?sl@zCS_6Me3=~UQ`CHaYbJ^Mqmvui-X zFDg4XJ36kSXBTmDU)aP`iqY*i*ROCtvpX|hIsP)t*weo-UVdOaZ0V|6q{5Eg5ucFnSdR$72 z|FOerEe#{r6Z|~;4f;dkH{WzKYfWrCh`@)cXM*$-(O2=r z>s^&6`Ke5otOaez(1etour}*YMvEKo8Bekn{w?y1g46M=ww}%BW6&@x>B{f&One!C zy%m1TAeeZc8UndTKE^kb%q~ib)XVYVX-vPL7nXy=>xEg~Cyrj{%JPL+u@7E8M!W2l zP4%4A!~F$=KGj0>Sg*h1%*!XyE1A-h|EQG~#P%_heb9PdrfApAZC9!cJ1Kqx!w226!=z@Z4bu91`w)Gs$~8-fJeN;URij<^+ouR6_~AAFgdU`;`Y<@9?8b_Y59mAQk`J3 zxbhCH{}%Vw^4M#%QNp)*H=0%53t8`8eLdr^y!mOM&s1^j2lX%bfFJkC{N$} z*#B-e?ycwk&Y$VLZjdM|Rt{&Q@9_K!=$65UWc&HTEN&QPo?VDG_mS{bb{|CM!$^@# z+++2XD#n?wrV8zMWY7DDC+K*ebN+Le*_ZbFVrW-dCJWy$Sl2}K`;wnL>fW~-t{h-A z$g2ARuC0#;$z=N#-u}Z<&tu3Zki4b9kL-p{UW}&?HS5c)c0Zp- z@cpbdUJK)_LBCQ&IFJWCMwbbs%H2rrsJ_FrRs8)z++I$nM7yT?3zqL2(X*bu6vPHW zFmDr{5ZQ|G&3tNvO}qT81j7SeJDx__$JWN!bw9L{iESP^GapEO=0%4Ga`99kEfH4Wn1o$%~9-Z0H*ae=dv)ngnVei#-5 z>E8zK$uE<<$XQihpQrBPBl9up7iccU)&tjW{k0v|ocr(_Vi zfsMzoM0ehijN*6e`*=sQ3UF(|qdz2l>Oke~TDbTY@4cSKt)zceZj91)NiseP(>yWx z)c+w6UnyE1%O-i}y$V8k?lKg{rJcJJ?$bzAk5*}W5L<-b-L#cFhsV(P5F=PMwi(0< z6)@s{wz;_ATRRKqCHqKsQT%k$WarnJddmK~;FS8s*`H93geS7be9SwM1wMiCE;?L| z6-+9Y(I_=SvR9%xANi9`bz%IO=<@;_U01+1HJHxe$vdDogdJ+@Yb~r^ z)PG6WWCz&`zU{%Xck`UALp~Gpw~E2j{7W6YHn15;y5nGwS|}H5@h`ai27~)}#8}?l z(DCz0`ytlu6rtY1gtDZ}o{4twc#xjIV#Wj-l=EHgr?U3{ArU64xPN2s(FKfK`}-Tm zQ;p+YGlJxv+|1*WHLZb&lfB)^0bK-&Kj~$q|9Mw?0&ZNSzdQ$c)aT?aOU|$t^q*Se z&+y7?VU#sL$(Fs2{a4fa6uy#msi{359%LnTQ<|lA_%>4Jy=@b=%Fge8qT&0_dYmlh zIV0K?jtziUQsfNo%NwJm1Q=M|8c#`|i>@V)1pEB6Ihz7~`Hx>(v zL8U4$$QtuC@I74%Wkki&P%fvvC`&q{3#Lp{OY_5n?oZic6oaaiFVfmQYHj14W zi&Lw#lQ$;=-M5S*O>#YTgUa!u^JG{5N}p6$>8FR}VrYuXuaP3V%TA+VsxxJs@i1}q zGg`0J{`+|QJiLyD+vRM2qi>S?EKei87ULU1sf+J3$G@DPls2MI^H-{KWFlpZ8~bveZPqT_;jxRrd8TE9&Q~;!|Jx?A{$RR=dk- zHi|U=gLiYITk2EH(#xLC{-2+@*BMXhUub@%V_7?pT!P)nvk#UvhD4rl->vPc!|;fxAckUPMt*z&qIQ_m>72+Ht>JSX{x)%M21#`u+yrarz+)=$A29<|tW3!*zXnP(Ei?d>OZe&00!KAA$TCWpbwuuD& z%t_MPD45mqeM_z1Ey~Q$c6S=y%O{@KQu0`I6$!58(W#4Ang`cqi&TV6?yJ=QI*>FU z7cjewKPQvn6|^X4TqvgZyya-@yS-eQT1D@>DtRWJV&Ap;eaG3IVfC-}a<7$@DSP_6 z6UN*q4h>?7YK4(_wJY-6;$o3gq zYZomR!Ktih++EFMEd8&A%|bFC&&~~bP%6xIWy4Ku^eon9PjNEkEyRL3TDt|)`a-b@ zPHZziv=xc6cVsxvdYXk>(4n^e%kqw_9$73(EZ1vtLuGALNu1tS8x8ePk!STX;+BP9 zc4$u0=3LjtpNIH-1DWQsZrny4mM~-5(n&fWlXDU@?Em}LV z-O67$wf5QPzf5(q`T>)?|bAn49;eRaj~%jq(J#j|em! zYigK_?5@$28g#E~Df^APllMDztB>JX`dD#$pC1cqR zy;d+!$olUa_)|G!+@-$#83Q{(>;-fFBgLjhT1b}HRGVC<&*a6=toAzIK0wcX$?yn& z_?@hs^)ks_@JskQ5_Zp$U^H(2EgEMWJ1uECy%K=f@brzU8gab?%pQJ9ivNN-`y z4&SWjV+~021?xRk(DfwprXqMf?SBZlgP`+-wlC0DGhTcK>?(^jSBc~`$-aZsR*yYVeD(0<)W_Anha{da}Z^EuT zC%B&m{b_eCPL_+l|4yKENvKb>URNBNL;20(cc_G~6u zszY=xj71N_FImV&;QBhTsEHXu=C%78w`$UOo)JA&5wb6G5Xn2zxF!~DqQMxt*2kq} zJWno^jabu0`&n_As9Ko> zY5fb{^SmgZn%JH2?h8`BN{UoLzZFMMcit+JE)=J4XXRA6zCa&Y*|-V^-(&B&M)H>J>fG=jM<&h@?foGEzxnT|1#bG z;RkJ;v5Bp(!j(yQbRbSFW^B zp0BLw+JTX0vO`woP4nGk-o6&PgYjV@_Fd1<8nN~J<|p0!l{cq9oArKU7BWmvPhm(; z=QSnM5`W)~6Y=hPJgtN)c9QUHQs=2qR{bw%A%^E^T?rnR)iZ01 zMtR$|lP_-3P8r945m8dvx+O2lnx166n^n;6P`2KWpC%_^3H{_9THoLt}g>V}!?Bi(Gf!wdJZWxqIj#n6`I*>lk9y&PpT+(I@;dnT8;V;F> zvWz|3iYdtz@deyd?IF?iA)a_F|C;4@EiLChIV**W8n3gjJx?>Ua_u+qe~~DXJIp=V zCOhr(w4nq|KGSyvdgm=?GaO!Ch|2Xuz!I$U1(aq&=_I;lM?~IxA6%FrXUB3qQTB7K zcA$Iq1&q)~Wvp()K38b7sTQ}A`b#WnM3TJyZO87n({ev^s!6om1Fl0|mo>-ZM20~H zTO?Qi?Rc8p&B;?UhE0Cbeg)?pBzmu7#Rd>dhU~06UJl(9t~AONGV2)8zr*#%#n|MS=<3DcvM~l?%y+JQ`yAQ9SmA%=qIP2z#>4EfbMf!|UGjXt)-@}~sh5uv3ls};K zxhU9yzjS1Qx)3?uzdTCr zGe$ix!ars-n@_vWY%+$NHRxQNuPqUQ+dE^Y)|a|^9PYQ$(n&DAS#M7`ejwR5VaqYD zc-9r2@#7sWq;6H}ZsyJPKv>lGZD;M)CE*9yaXBVg6Hb}Q#@P7E^VAqQVYp&awzcstEYDIhvue%`ABzNkk})e6&L_pg{u+%T&0+hlqj$0VXFmNR7Ox{&>T%wJg*{p4 zIcGoYyPEXL%FTL^=?shP*KUc!_wt)DMz=kkF@c^#+38@sz6CG;arSz#`7!-ogRL1G zk`?$uG3PhE+=KzkjGb-uP)0kSA%+;^j*;y_8okhW0q4?qai!U!k*D zWN79{Cm5!#V4iuN$GXpndW&#qo_R@Xyk5py4ft(FrK^RvF2h(RCY z`O`c$*e}Ark_E537NY+Z&dLtP=b-lyy=(K+HYCp7^9sMe!_|s(yqLF+qQwAcRmJpM ze7gX)|FFPJ(q4=OOG(w6e-6Q*AEDIGnY}Ty4(tC^7>~2-Xb6^PC%_PWJwWDYRaUDt zwb+C_c`N_ES!Uj`oQ@&&p_`qpUGcq)GfG1#wVN*0?l}JN1SxxxViL_hq2((u{1Yp_ z)=sL7-h$BsNq89!e+ZZ7X?lo=nSQ(ALPLzG&FAx;?+I6YsFyqkUPh`^_B~!7<3)$* zdL2>l-cwloI_PD^;;m3PTRUHimXFb9oxf_sB0FO{knh4mKUp6=9<#qPPa8?1-PzO! zitE*QdNP=77uza{sL2Wa7(Ff^`In;M4x0T=>$@SB_vm$CG6rk1r}Gxy7KhchP|hls z;&hC5jcK$mL|?(6mofi5tjv8vG9#qE!T?fT#s8AC{(km)z*$uxR)xizix#Dv^|C&) zOW-bcdd1(H#Np(N|4BT23s%VhJW+(~Mc&Lw+rsBb)|-p@sG1UGopXVQpv}%yP|AM99zw&8#($tzhB7) zd2^l>u6a7s1~PBJeTz7HF0VKNOHSfn$!&ijOV=hx8~-09>;E9O`#F88VAmF@r?Ai4 z@EnV8e`9EJaWD5NnX&%D+AH}>^2eNmA(?5F6c4ZA=dIZ+nbd}AxsrZU(WWfkT?mtT zJnLFAq;hv>(*6U#{pdOvX4~-MZfDl@X}oWb6a%{Rh8kk-r|d9KJe^3t<&aNJll{!~ z^Ctf}eLo>a)g@_dvx_{VeFEclVnf!5W^ZF=;UyhU&iFol9-{T9jnXfZIV&4E|m zOuQ+=?g8K5T+smAvYss&w31shwT+W4vI76_Azth-DnDjqEho}sSNXkSLf*h_C0Xh? zO~iw40t{()+WBL4HMwt)U;8c-0`&@^!TxGHM z5?ADoD|NzCNo8MJ&h~v)jdT?&lmDh7|4H8M1N@!*i>Wf(Tq{TNraJig5YPX@-;+r= zRt(8Iw&XEScCcjj*iORABpol-W}Ms`xA*1!N9o}{I!uCT*4yP-elmi!a@~>SZ|SOq zM#ziAnQK`fD**H6@@Bj)%9@ALIW>2lcg6is%&OI}pJCJvU+y4Pb!^NUsPSZY*?BX_b&+$DaV44C@@94p*;AE#mq@Z7 z3-$6>SIi#3i~2xs96#C05?M8VF9u}J|1Es?)M`iQeNBVxV|$Y92V;Bo;Le6$1(@cE za^4LUf$6Jo-lVntFfBVu>l8-2Q(5(3Xk0IrWe@FHSX-6+*|*+}l&N9 zBiCnG6o2Z6m3b%mv^ZAAzvNuVnvLuyE)K=K6;55a=X^e%gt;&1FMi}*P%@yjqES30 z*~!M^uVN*14%dM-kftaoh9)30XZPYU)gqWxRxk~~_i^jP-)?!}%IOS7u! zGxq7I#rH*&*;+ryT%d?=|A5l#G|!ra2c4JwBAc-{k$WHBl6@t4haX;N?{>0fU&HDx z>9ms!#i3r5B{M@U#xCJ(_CIFz>@|EQ`I$!OdjsiHqqHn}QvD=z?Jr^a8rH7YUUn!< z#<`W`UuG0Ou`r)ZUHH@xE9K0`$=4VQvd$&;Yt8Z((Dxhc>M4?(!w$bVx(sJWLUea! z#uBX16gSF3_7GYxr031Bu8)b^Nt9j2O$+kHZokp;PJGD9f!9Qq8tnO}Iev0nr7Fg8 zBGWPAWNP;|5$SGl-f+E6He&XqL9*KI@5;Py%-j7_9lMdtxwpGkYo&4IF1jor|3CU) zL9)C--6IsaDbLc+g-vQYw%6VRSeeN4HLD)XhIKJKPbR9d&nPUN#otTmJ1b{~ zxZ-~pGn~~@bFGND@ej_uT;N+Dq7?YWVR>$I7*n=_o7_Ys@VT5d$&lMU*SH~SoudH+W;e zJf;N6N6|P{-^a2=_7rUNvkJy^C)-zSz88KROM;(S?`BbB8SXfQO9BPDxA(Uk}7)JlhnzD)zA#?N?7N)Tt#wC^W6-+C*wnNI!>l<1FWh* zh8A=T+iqv&Y5ejg9-W=kPmt&)$MY02Yv%Jr>P0MGL&l|AnCk4`{8ik!hqL}Ft(7;1 zG{%T$wV8->82LLmvxwQwd#+1G#y??kHche)rID-i45~6KzGmD@27~>`)skN%%Slh8 zdg{MVfx>ceJiGWS;b{+eEuz^yFqwjN$+(=H5al43XTYcF=S@C%pNKQ4pj#K;9pdT> zA=8mR9O>@?ddZ%hC&`yQxv#=z4F9Z$d5_WP8eHj9h<+Vuyiz}@tM{?vukx!@#Jn0V z9oQ#Xhc2Ld9lZR61}}=$d4Af?JYy~y+h`|MMYB`16n5SUgD*wJE6mq#BIDkCl}kD2JVDAmSuyX+*YmjS z)LbmC%`vAL>1WnFRDk7H*AFN6N1|=k|1=}hvF4#woiPgQzQwazq{+IRGsy6h*59Vz z2>$#v{YSEHMb_KIk{MS@X)(_@{)FZwY?eA(jrrA9ygdXrvf8R9iC!Sv9B5w3T8sEh za-}8LS!%m&f`4y5aG&v`t2p<8c#*ZZndg5gV%+H5OGr@E*^TsiDr@%B((yc`B1Uc$ z5e_mx`Bd+@1H6~tEpb&_*F0?$pT*-EIX~GJO3)`6=~Le{yY{o&V2QX9Kc7hE4y04z ziB4HfHx8~@{gd~gli)B5R)@G#-vino8yoUY`yjrSyv;lKN}gq3J;j z1D{W3!v>gClZ*r5(uMzJCD-+&pM*J|LV2q5KXBc2_PB``?qrj^dChZ_eR=&azRCOg zs!+*2&?7#5>gt;z_K|kiz~v%REN1J?yfV)-7W(^2|I6t!b^Dh4pIsJ{X!8~MXV5Pz zaz7Wdvo35cuRgWF?I-E?09K`L)?bCYgt^+ikzXB6!wYb)nn?Ys=#wmp&BVnaJ~tE1 z-+^t`mR6!$eT*HAsmaSx$|!oAb9!lKA}!A&WkWLeVwrpRMrsdy3-|iw5zxFX><3eD6wjZ3FeI*t)oAbO4@Y&T@!Wwm_~5Os;RuI=h%9N(mk8?c}>i%;QQ8MSthwK`c6S+bwEwb9}L*jJ`c6CBw> zvh2f6ZLh4sNtWH#biaxW-LPa0JC9@ItOz|HQm?aLd1KpwFi929qRv~6w?)}4&(^C! zdmgJ_K(7D!eG^`#+T{|cyjRfZOD%rQnpyo<7IMi@(28Yhh)l`R_OgB_i&gV+BKJC} z&-`B@5{#i&GAUj{m+S~0%^wEqF+2BD|8WMcJqV3w{9nUj$MUXP>|Dew@Gm-_L+(@I z`n(>?dfIR$U0aG+hd|;)5_hHFC_a!q4yTEJ*J#Vm5QR5*jEY6>gplcPqtS5CNe7O+YE)@|z7wb#9>MHhm5^FM>It$YV z7BF3h@wuB_MVh(d%~TR(O+?;oAK^&q`XmE=auP4l&j(Q6AbzK+;7Fer`I%?A)p+fW z0(ZxYW~Ep&@8+kFs|&tdeTu42V*^nH^LKk7f6_Dzrrxj!=2fG^! z{&z8nKBd#g?6x-sm7q-@++9byWWziY+PS0q+L%-g*Hgc#8D1}P&H!`HXB}PQoa~HG zcD4(&w^mDcVnyo6z5vt5Sm78pOE%V8zP5#>wCM zcj3&FwVTS)$KgxX^2P@)GCJ%HjYQ0>`d;YwCpen?oT*xt{0PTrDK*~j!=pS)`h_)v z>k*_+<$+XuY|q!T2DD)znmxnvnS&3-tK1bgE!gYq0(zG@uaRTn_Mfgg-?1MyO;_6fMO&e_RA zbb~X7v18su?*XC8+CI?vdDC|*J)WR{@=atfeAcIC#m9+I2*2)t)Zsig`yD$%>>z%f zTwlr1Fa`UvANXdhZ%M%NpDj|AGdulnG@^Wok;xdJ_YYkPvZPXXa+;)$W%j)9 zVN@N4ZfF@sy;cu*@u0upm-ns3$d$bgMX}%(BgKs(_#d#F z3Z?8BNHxB^)lY@5;^NCGY?C)_+4*ueFZzkK>uA3knYWuucA$ND7pap?^c&Ysz}wID zu#5Ch<4kf5rfOPhX_X>LYPm1Nr0kDaOs~wi-WAmkB4_D>eM-}!1g0HdkUh_7{-RGZ zwSQdT`$cTo$cWdFoOc`1v(D!ajLIENbvX4S;}VwZhvTo2J8L(8g;wt5e-@vz;&7(l zQ*f@KcJuTqnesCBH57~AH;$Ebb*jWB19N7I_Zxfi{QXyFUrp1)9L@gC^5)_9<9s8Q z$U5^HbiIX69i3UstUZ|o8$xR-v{QvXt7nd8qcxBzi6d>Ga2@M>L7&t+Pe$0Ra_d~M zOgTrIXmgd=Rurmvv%5DuM$q^Medo?T_n^7cFOR>KM8;GT`!I%p`1vkawQxo5ygS1wP;14s;3n0D{o3i^LdwROke+1Mj z>-$qY$`hbzB5|H9zljTvk@g3u73b|uS+udwi}jx8UJFQk8hKlal5_pcj-D4?e;_|@ z1f%So++Ohb)FZ2)&tlGhQ5%;#w;|M1qr0ike~Wyh&G*{zuvL&t#kEvG+QGtk3Vo+| zlGRd^wUUYeADdm&&!}FdXV`Bx{cHGpqhm6Z!%XCXzhcp5IbuUJ-) zq8LQi@}2ASa0%OA55HUR?R%}h!T(cp<|u4z#&XG~kntpIv!5gR<23u9_8)dUYw(g6 z<3#p;(|0SeGkaE3J8T22C$Qq5xcW7ozQO}%((zQSr29CT8S>bNO~Q9&S3lOfv<%f^>A}aVeIVfioLb8+i099;+UcaplaX(X z-r0kc3YnSX`~a6n*l!B`F2mZB==!TT)l&;I{eOgIZiDq&ae9I=_gVc9;QNc9oVpF! z*|CwO=g}_jV6yvlFMS`ZhYpZUrq>|;HyJWtxtQJcfu~I+3 z&*yawSYSI$J3>19rtf8e?OObwvEfVHPTs}`G3qlTLmAp0ixr3Cz)g6v0FN%w`n^zm z9s9P5q8;gx(fA~MJ(Z6=N|R*FIN4oX>YSHh)kTiHqy5c34~F^i5L^X?;<(xbYqM6g zGYuv|;&*bL#`l`y~!j_87c}w-YpWX4`LeVyB7q9kB zD%kgfzy$WGg;C{6*NH#%gzQA)NgWK?f?wGS-JiU@T-BZ=ufu06`SxSqTMJ&7Cv(5C z{e1p+BA>X2-NK-w*yL-i?O>sjBpIlc&3eh~G!^pluHt%UXD`u(bW2pf&ipH@Z^vnG zC#(+k?NiQ9-o$F6$206U91qLlTqDtCGb^3rcye`|gn=WBr2A{_NZ$Rt7(2(<-bY;d z7kc$@;yy>d(pvTjW`E^#_>vqo+v$r*9U9iM}7PThl6P_Mx=bxIho<5 z7Wn7b)E$a>WBIc=#Zx4xf}I_Fz85=QcSbAhYwO=e(KhdYhSB(YK9xM3^;vPW^Z#I{ zJxMi?1qRTqEDO}~O*yljPjK`=@nNBO(;RNk;cs>YJjwg=o`D4RCP2NQgVO&f<8vEMRlAz zAClt>^SVtenNh2}wwF6Ed-k#hu%F&;(B{kdcfB#U1%4+}UnhD!j3vn~GQWUi_6=l* zZt^+q;On`o-U+w6c^ThM zH$vk!yu6+*t7xk;9^`#{-Xsn;;y%Cz{jsx|f4BPlv)H)`(#bNGeafk8v);MO#f|ma zOzYXlGl(45;nIoPPW{=kuFTuKwPIWqy(b@icI$3(e-W)Tw}Sj zxs+^YWBO~DcBb!h=blyVTX|a}h-bIJue3U!c3Cr(N`{NHvKV^NWq>pG$MIBTuSLt; z!PJ9u^6B5q2WMh$@_%HF%yrsMzO_^#dY%NW{jUV43TAm@wDvZR%kiQY#@)YG1mY?Ow8Vsyhu=`>XpCH0K$x5Sf zI4h;|ENMUUrafqsU5Lj!zld4Qc|KoD)7JF8#<+8zeluEjg2yLxSS{*(%^Ium=n>a9 zWwi#{&YQA$+ZZ-T^^SGMHRFL1y|I{;%>3UP`Tihv_A)Gk-52o5{pqca?M2c%u{ibV zQoHziy(9x{vK6KVZ}uCfe*BeKvlJhbeQ&lgZv?hy&+9HM+>V7u6!0$U>^v(y!MU%w zzNz2m8Xb%Ku7>{G&@}l;X5r3{eD4hy&Tz(Ibox!}y*`c3zXM3>fwR)RO#=$hHz~;kTv%uN=!Ygkh>+t%i zWT`=-c^E&6Mp>&}77pk0=05N_-}QfE@Gxz$qu4mn z$RMxJ1xnXCmut{u>lT)={yW4Pp09OdOk@neR*M4VW*x#>YlvAgFi3eG|IPm+L~wH zS()-@!Is_eJ5Rg5A=B=htS|D|?C;F1rKl?h=;eM#vqmrb59Spj`$yzXo$o0y>S<1$ zXAd3qI922=Y5`zTnPn7&*p6@C}SvB<9S9&*^6AQ}p+`?c<(Cyf~{tbNAGf2RAgc`IsMk<97$w&~1 z*3xNO?Wy-#^yr4|=ZI+)$o{41+*_QVOsYS`xa84z4PUyld9n|8c5c@755UT%DRDoZ05xqYQ?sJdaj_q5Rc|E}6 zJLBA~+8Pf3@5!HMtow_Lc{ZA?D~;JZ>k+;(=i3OcZP@vM_I@Vw2z~EGf~|DO{a_I8 zCIa7D@R8#1+ordV^!JDsvv=_bJ~9KNx5DplEK4T-f1o{whL4I`S#{6^a%C_h*#~~p z&rSTjlHU*d{zSIkLdRx2ExQ&f;LUw>&wGwH^_x8+Tgt{2$G4=VK%Fe>0x6CCyP}{}+EB z!q|PabSEiq4U7hGSy#?|0*s#pwUM zG3kNAJS=r9caY#h8l28cXF>2sj7=Se{(8@f)ZDS1L;BCeo|iG998XAQknM%YlV?l0 z~2r}lEHNBEm~d12b+jV_qlepQTcRnGJ&uSl45<)OpXSn@S$p7o0bHduic*pX#$;G84`>qIYsEF|L)Ko}u9+{q%t9(R8gK zQkRBVEAreby1egmD$BI<=@B?3_sN%hBKM=`;P8)l(*rN|VwWvsd=V}~S*yCU`|`nu zM3lQ|{w8)Dtgi~b>xYNQUGRhRZxLI*$AEoUWee@gK&=SxPpz%2qD|eHUHV)~w`t^C z2IIq_`iaqCKbqd|ijwf34(0Bo9BR~>Z*0$drW?eRye%kKm_@vUVYlhEI~l8E)QQ>| zr{#=YP04yL&h_Sfwa8Le8+YkpB=4+(C4Fg_T@iV!)|Qu)B6D|RP^@c>4Cj7iNq+FG1jrtrd$4^O`Df8CIYQ2b zYMz2!$3K!S>RGstf$4bHX9d*75KNB0?Acw(c3Cf3M@-4?znX=(kySvISgtwdJ_y+s zMvG_a(W;^VB@g4d24btfP; z#kwU}B>O(uEQET3wI#Z9$yEDL{5>S?shZm1iu z{Q(>s%?mz)XZ9Jr2;m}n7;Y?YYXrWSeyNw6_xSJg#CPy=JPY+#hQY| zMWA^pMs(0uThTE0fG5H4Mb>=PUzr`Iw%}57X0Ll6oXsxPwvfwPuC0YRUiRXa((Y^I zYJzD?@O2N7F%^C$(5)|9B_Cc}{HW$g?o0NDbxC}FmtFsWcHTRuQvG*YO$FWKvG)I0 zv(7kilCzRex()rtV?c6-l_$?On(q=Lvu`xH(Nd>kG^@O&kF4ohgb&H2*PdVO3&oGH zCV5jX(L&Zqt`iYELO)roE~MQb&a~{& zzLX@l`n)Gzbl|DW9eu+bZz`#?qpKn>-syYtBr#)r(h zr_wfcp!Ol#sqpK;4<6r84d|i<8N=NEC|0Q^~bX97c^~Iab zyzOcTb%bMTHe}ayY7n%6-zk`w+{9}|+|-fJTl?IlWVd=BEjJel^873-WggaEc3ni8 zCa`brz0_IKaFxu-yW$0LV-Y=`*ZOkalx%X38AI!1a%zywbwvf* zW!=Cx?0FVzRdwYv;?X*1<@xl%P)KE;Dydjqks3<3-XUsRxqu ze3HFH@>%9O3-o+Gi)3ehYIGKZ>z&5@Pn`b_zBkuK@_}z6(|@EY>8kqX>90cKAAhfR z^(^CJa_7Abf`P1JZ#gls6;i|Kpf1S2c^(^bkYC2X;JgTVO_WnvPzvN-;go*d?*u6-X_rIk@ zxvDHyo<#ThlzK(Gu>LS8e`#F$4T9sG|17vh=6!Mst4gkTY6|3fXI$*qscD*^QX@ z-dk|wdN#QZ;)_Vul&wx=^C1PF{}OT~#mwCKpX7}F{EII^c|N_~SnMB<=& zB2;fB!vJ=g3-e@Fo=BUi*qXK6$^0`9JJ#qeJDqbsze_Y6OV4F=&j_~{376n+Ka78n z-P)sCAC+p(sMRO*Af%6HYraBT0vohu`C(#-V|?&i+oSBLe`775Fhr| zMy#CYSBuC~)l9M;M4PdB&jOBltMk1$k+nZ7jINu^;=ZEq>H>z3ks#~vug3i+oiz%o zjkT30EsxS{G!2uF@+f{?Oa%Cmd^I5Ox^^bG>TlP+tH-09--UHMxiVE~KV+j~TIg38 zl}2LP<8*GI--^y$%NoyPNm23UKO_2$u1%JU+x~Af2_Mc9A6l?oT~ghn)t!9cJf8Er z$X}0j+R^(B|2xqn49K(BZy=wi$yxE(Rh&7Gw=9In0X}`^S{4)wbXhW$a42OmYSKVN0Dljsb;%f3unPQcO6yq zmO4rovDzwUWOT}kvVSnY7$#)R`~rF;%fj2Nn5#~3O)?|gjEkqT)_b_O5$YEiJx9|1 zD$(pBQv9Kv$vo#4|FaV49y$(UuhjZ14*g&B`3?L0#D}vB^nCXoZ~Iiou~vMeEB0p& zGSw(C26AtTANT6v4Q#oKryS2Ca?jq-wIAS3TYghYi~mAp9*J+)_G`GgyN=S?yf-`6 ze-`OaXP@J>oP4Y;p|o8Oi(H#KhV_t}0LxctT9x&Va-zeBs=dFrAM@bJ*@;Z5)gB55RXPglCIAhmdp< z3I2m>ajpNPzj9i84Ppx%8R%FKmK}$Esbw{S4_CnbWOQ8*lb~Ntoa3+Y7a~i63Wv@-qqYbR&7p2Vz_-dM^f~2G21( zVVX$Llnlq?fA+L5H~RI!^X$)fi|yj!|I=PF0w+s$c9QN8sj~L(5V3fobBeI-FMKL1 zZ+@p~cC_4D7$I|)Hd6E{XKWlo<_RQSfp@+5djF7s7RB!g)uUF zjq^V6a*`atYWr)qn>Kf6D()cSysg!r=-Jf0S5wzt?ut}k&-2@N@n$YR-c0MBM!;|A zkWY*0b`JbTh=+%;OpAM-}_S;w=pbh)^mI=y)(n>W{aOH!r~?|ZOW zc9UH^!fed~7zpABOoq;9KmO-2yelm4UoA^$&VFcMMJT zz?$4w=Kit>#$`@k9^*cMz)JRNO}b?AIh8~$wVpimZS_AHOEb=Y>aX0jWoh<8gB{D9%O6>@8kt5&b~9vobF-se_;GHL~{?A6+C&O&=j}d z_b=~zJMs3skIG(|r6l=+l;iO{mA1CCZE9r~EpXs`x~~-bYKUlW;Ko;ac^vnT)oa#B zbztvP0g@l;Bvf3F&#sO(d|-KZ@_~ewS5pQvpVc1oJy7Wky;-Fi4i0zuHATj z34Pb_{cVMami6DKX(jI*v(j)P%YVi8dG}fm@^7>KWLoYZ!SQ-;?YaSC+GuS&A*!U3 z!{fM@nnlUt(Fj&sNZ;LvzCj!*E1nD^aYG~XzxdHZ99xDD_Zd;Mk}`b`Bh5J0`9~ie z*l@Jj$V+nr(Ynvpf@6aQqb%-yGwZg&Rs_E_7pFsI9^udEF?T?+%W z-U3JSKL0$r%!K<3V%x2bK0^9+eB)>Fsy(z0BWrtppLO)HQi} z{U6^r0|(F2$28n|!qvBlJM&obNLU}wQp@$#ooB!3Uq7}Q%u*MUA=yZ>YkdNKKBtAf zdF9Q#JFA>7E!;1B&7X?m>^QcX>&!dYwOk?6o#U#kZ~788^UcmWz$I&qFTwaLjgexVI6kgLw;dCxnyGJEHZV^#oF^>YKS(&ip~bJS#c^I;nCuiv6w; z!;>BEcNW;6Ei*IgE-KGp`QNbp2CY5o%q_g@Bj@zShF2hPg+7L|&n{8vACaan3zZiy zvQ94k)skCq%zGM|_br>C^hpQmhiZo3`q+3M&#<+R_ z4Ig2Hy!{XQmDo7)S9k8ejtp^pAZ!jU%xqfn+nT=pTpNw(awBFXYRxle`hve6iSM2H za4Ib9jbGcGHMbDyZ^o^I*=Hjyp5lM8z%pL)wb)R`c$QpuyK~#WB69rw_YNP>E6+T#GVxgL)W_kA=>C;a%Yj@MRW`xodu56-K^isbJ2Ym@$D-0DM)Cq?4-RbNdUsf#;JhXxUe+R12hPiRsT{!DKdBN1IBlmi0$p(4(ekHb)d+ z=Tif=%G2;$-2WtZ=GkKDhoq~`Zw|wUEF?@&>%AIFY={Sq=WM^WTI+T}Nsd69+l(K%Spf!#|U&gE>t_TsmK? zuR-M*Og@q|yR%~|J|~~)FRb4jtJae&-kyAdSMY~Nctn5J>O;emATrh2_hQa0I``4* zoB}^i7YF|0Wxx7sch;YoydrDQPt(>)%veaPlVN@iEz5}hgT;>D*?t54dWdPCkSaK2 z_svl_+7wDf%r3L)V+0>P174S5-Oc*QK9bJPtK@%Hy**g)xKh~m4c>Jn!{uZ?frn=g z+fFl+#d=PB>TkYvn5#>egFoWup*(2~dneOg@^0Kjr&26(j2^G#13$YWdBC!^pebKY zKDAERl4|O2X}>S7{XgFBJ?hJ;>l=P)Pb3u~C1sOH$R?r^Noij;MY$vtp`x;@NJY{{ zWfRJ#WLsRMB9bVPP!vVd1|do=N^!rR@2~rQo-y9{9q(W7c*Z%-`klw`IM$l;JHKRAs4Bkfq5XerX&W8$^uND|euOp;)@w3`bjOq$+CP~u^k?&bMBINcF?&PnXz6oE ztTunl9@SKsX|BKbF<~K=%@IGgLG5b{pT{zH79!{>{Z%m9EX0)w?3`yW$!j(YpPv!0 zlGU;zOEh4qfmpn^>rzoMnFXuBAkPA?gJELdxqgm^)Y*Qo#-ILVsHUat+AYdvSHp8Q zJI$b5Q{Scf+4W>BL;h`~&6~$Vjm6`P9*6m^sqrZ9f3xO3E5xt#U2;oCgR%JgfNS1% z)o@Xwh$HcwrDnOgzj_KDS$q8_Sr+s4tSQP%mCYobjGfuna~@=p1uDA> z*J(F7o15u7YbajkLpPBmwOPlR3l$gRYGPR4H}}KqC+Iejg|mkD86KDQr&F9i4_k9b zRG;+8gkIk)qYU2FWTQW@Co4v85l@O^@)zQDMUp)L<$-WWuCOlJ%NW$z=d3?TTt0zq ztNQ*$HmHly>uGfaNjI?cd35Zfm)v!qPsi-Uo{C*rMSV5Pz2<0EgnlFjq<&^R;|8|v zgOj^=j;Cftar$P*O{&O$!#bylE_wf(8BARg!cIw?(6?NXIks5TIA0fsB!F7%Qx9i_6$k0vV4IaYSZCG@ZGd_rzm$Aet!;`rmR?5 zyn7B4&(~*TwoL`LWWHa9A79boX%_j?zZ?9YY{WgWu%G$HYotmKi}YI*s@c<>9Uj-3 z(e~3rs>@w#RLFbaiN2}G>*wRcXSB*4Nq)99WIaqH6^QYt->Mi0|ByoMQ?M+8}vS<;kh$q+g-Q$?FkPO)g^}e=hv2AkS)#qbN zaVc}@)aPzR%7@t~`=Rrc;tXdz&8t(1;Rk-1-2G>fCiv`EKzf5Sv*Ww9czB1==V6x2 z-sLaMajt;CBYY?O*7D5gXuTh$-!nzQpZsL~Up+D^9f>_^Wt?Mt+lXBbb=Jx>o`hSou}C~Az^lMiK| zMQX@=sE6~hs;(aEk~L2}Mw+L$@%JZeIE#EA;nO0qUsvePg->#|ShAmi#tlKv{1pA6^3ygqdL ziWjE3*!#Yj2)S3W@gTC5=Os1iv5JhTm(kNV!^O46G`koIsne5ua4p%R3hh^8@6oJ# z477h>@gAckY5i)3rL!yyF3!ZI64|F}{YZOO421LM1Dheq!4TwObQ=TaqVpiHYP% zo`h$}`j}qVkm3|_B{P4j(cKQK8)=#qDQD>Md0fw1p^VC1MBU_xuWHt@Rxj<$JySvV z1opZ>T+1%@)HX~d^8R?V+VP!*+5ZOP-E_aTMV*qyiCOec>q|w6wZ`YH)jI>C?dhNV z;nm5$o$SxD=f3{^h*XQ9_%9N_jGt}rH7lI6k2w_$S2;T?RcEvBjoNsQS5$NCIP&*| zYSxuaCH-4ieHF%C>-wUs-kVPMIwyDE$%A$_mLy-)5BU12RvyKXn|$9EkLHR7Py0>2 z*#)9TYPa1ePHlm}4t5^wyodOEWr*F%qHS^I7p?TgtAT9(DTI>Qvo8$)Hb!Jm*M1Oa z%63naFg3&SCcYH@oLm^2D`4PWIFjcO#p!uHYk$pBH=1u;<@)Jt-VT2=j^Ck;jaW5S zD+d>1Wlii$-o`vd91g>ij5t|Uk#(27U0n?mHW@YU;R@RzRA3r>~UxK{YJ#P3!CrL zU#eqw5IL@JMO!}cw>y?W&Q3pzurS$uPSV3T*X6nY-E_WO{~PrFwX0`|nkAjNx8ENM zjOaqgWE`9e_2=N9+H}9**U#9PwUagAnf0fs1AitSW_Q)CqW-7Q`2e~bpptB#KkEb77`qyO8;S(k z1(0V3Q*m~LK9XzqBJ%HTd_S7bdGnQB<@Z4|b&v?GE^2UlNlO|%-0=|#14jo({f z^`Oz=Lq82*QHzIF!`ZC5Y)IQTF)KFE3j*5gvxW{R8Px(`<2cwEnXf;+AqYjhCDZGrizfVGQ>ZI#9Z9#WQp|Z^HYzTdh^|O5XihMWtJ~foG+au#b>elwc>h5G)M4znUY6_!D z^v(PJK4Qx!7`F*8yJG78Buhqt1+H05=H`&xsK@NLJl34-X6-+tpS_HEcVpHRY`=ho zC)4wKZT$j?(RzB(*;}z`6e%wvUFx1)%ir>3E_K9G@#aC^avn)Gh;~^sll+xqot>TX zecAdml6BI~Vlg_atPT{rvY!7#yjrZU>8|>Nw%bUN%GI+Amc0w|<>3D@nT|IiUaYND z!<}95odzPxXS6A;<+F{(1K9g@^OGH9EKAyQqE@Oh97*mg$TLBFxP+{eXqgeaISKO~ zE^kLq@zX|IUpe~&-qY0gt<9MdweIl!D;Uw4Eng7jYC1D36JO_5i+E!4qNR3dGT*#L z;!ohYgG`n9OLhSM#;fuq;(I7A#>NTaOif2h;KwXu%3Tm?L5^D5*`0-U6UirH*JAPN z7R*`Z`1yKWq{XbB9tP!&JSq9E4ilX^LHZIu(_L{NPM>28`O)V*v-pivC!6nN#08dPw$n_9k zDH^RJdr@poKCF+mQ;$89v8xg*wC2e}MUaww<16P*(PMTfW@qt2?D@m_<#F&beNBSk zMs18|xpi9kSUf#WPZQ|;j7T<(t`9jk@1Ro0FBPhW@zE9fnoZkdVAURK*YW&hdpMPr zCCw`rktg}X;xlt0bC|1BU#+~qGsnM9PZf>tdx<}7@GyTR@6xYYd7Qtda=|U^UZ0Pq zZd;xiv^S#WeR*O^bt7`tLneP%Z+#_ZC&$MVkW04k-8EB18NT)q&pwS@kL%%7 zXm${rb3c`QhU?hrJZS9BU%4G?N8@dtsbuDqC*66AaggyU`+1idIos*GF~pi-YZ2!K zl_BhO2A=ffuN&vXBls#?j6-IoWBfztU}Zq5EH z`O8^!o>*YSQmrO$L1`MM`c^9lCV$u~w1342a-|+pOK%b$R-x}HIPw%7qwz42VGdu- zv#I1odkJZ4;3$2pV#QP0 zWi-B5(8g#MOy$81wE2;z)`n8yhHk*Eo}R5%UUT&tIp&<@=F)VfIvfJQa&fg!(AF$3c$R1>Ti!KwKm31PQ(mXk}n$oBw zTW0?CzVUasI8>hv^Va$}*Qc(=w~i(k#OJ&wHF}dL;4rpmOS^kTj;zsNNVlD=eLuec zgGZ}qmZvj`NV#u&7^_lSD0SR(2l}kB{e6Gc)yGWi+XvQn`>PQy>?1N{wC%)}(~WQy ze1993WSzrAICUWPY6xb&^8${w;fv!~XHPafo(#z#`lFUw@a}&bTWX6!6|m?6_#KRY zPx7tHjW^@O;1AgGRPvrdgLB30<)mB>nN%zM8LGLDezY(zNbJa}rZcc1S%*&3&Kr8K zg)6tyCmAIxvtu$QC&$AiIw$@lPr!7T9?Nni$W|Nv!*T0pxEkB$jjYX@qtbHwMj)3VFOvv1MAo+*rx2o1= z=%Wk7szRo}=yDAI%xv&v*qxxSfmpDeUhjxAsk68RDl4#JAZ^Hx9gOXvOJ(*a^z)_{FZnv=K^Zi1(ZHeXO?cWzmipm3L>U6_Ay- zbIs|KQ?3p)lIQkY-=}8fnLPO&eqKsMD2jve>6v`-Hh(4O_)YNt3bvY(J5A8QZqCEM03hG&*vX;fNDkW7-+R!3vAJ&MW zD@DX=tbYgQUILe*vf^ide>KxeBHpRm z&hF+sf4_kz@8g@QaLKdR_9Qw~yx9byR1bZLi_gzKQ%3CFUI`bsuB*a*8C3-grmDlMeG^y6foN23!+b+@zb$Fr>%^bj=e74>$I z{#}2irs|iX#Tf0^#IhOU$B~%72bOQcnY%^mZdmjc&EKJGRaV-hjl0>jBc!r>yN}T= zqwCQms%aj6R-ujLe*2F0+5I++9ru9p{i59nT6Bi!A-t;(%TL0Gr7Url7S5o_sh*4+ zuZ6*UtrPBMRa9@NFVWWxydXJ$vLfSh8gwT01&}&bt8cjK9jy)2_73b?##@ue_ZsrH zVUfIhpFyLHi+AwJYS1|qzu)tlI+bHFzXdk^>9;$}jS@dciCjf!S6g5If?H}K_s5K# z*#9J?*W%8RM&aae&;E~OLQ6HmAbhv6<^ZjxicdL_I`8+klkFE+T+D_yvQ{y+&nz+3 zT7TdR+5guD-v(h**+M+%jtg18QkSNOux6fBHPXXl&c49e*J^74RA!4Whxk+%!he!G zZ_ujv{$w8VA{}0L-bG}&$qejV-q;ooi#hKM?IsghvUIJc#b-3y*JzRIAH_(zA5L#` zRi3xJN{7@fYt1^f;h*Q~joGvnNzT!G36V52)2HB*oy)68*HIi@s_ly~=oceSvf)gJ z#~_wzOtS;|^&~AN17-sj+~D6*MzrzT=*LfM8LtNEJG-^^bl&2^C@>Oc*}1GXeXeBM~UYZSbOmZ_rrHO>uS@rUX7t@g_Ch66C> zOsys}+7%e}Ie*RC&q-{R+H{%W9tg4I4oIffPidTIJE7>yVKONVc1<7JC*f@JuC(GYQ~5*cH5~%YRQoDX7%4Us#?HS)hP+vyBkH84?Gw&Vw&tRw{K3Ct z{k%`g=6p3zcr%AjWty_ET3Cn+8Nsq@A}hTo!6Y;O?0jBKre^d?o%Ig-IFOu)+mWC) zCa)_*_M`FQG|}j67JZxtHRc25#PuRRXXR;_ktaCM!*aOm*K2W^Sd@FjFT~XBTS@J_ z8mv1_l(XKH8rram3AN-FCLW;kyq?v*OSdLdF^}ooE;(6w3(b-joGXji!9Q1 zc2*o#u=)^I>Br~JW3M~(aT?APb5%QDm|S$psqlgbRb0+CU0Zuu)%>s4N|Nvpr;uN9qVviUsmHLIZyad(rf z-$TeVjx5>BmQ~kNX|}gxc@xtG_SqZKiWK)ltB>~bbm%I5?XGJ(%53a+-mpXK)mXD1 zfBb~jy?(Qk5R&4O2+09PtR&f&arDydV)VHTvbN_`AH9~L=IcPzOOj)3D< z`X^IiJ(WaOo5$Se-{;A(nXGxDH=HINajraTZKK@| zbJJw=8$q)c?Dhy8R$=wOaO?@aWrsp-Gpj>jQ=4q_Xm+!flF8>>=Uq*LdgQCX!b{om zCtN6@-Aj0X@-TE`w`6*}U;C*&pQp_q>G>AXrLuTZos~}^bw-&T&Z-Ny+z%e)c=E(f zHoNYu{XgmUSRta-gIP%&yMTmmnAwb=(FRv$W!PR2o5eqd(=c}-Syyuq>644(Wo&9| z)KC7ZQ`xN;>(;~AWJbx)eyo!Hv3Zl0T;18fpUgaCMe@vtFE_gW%aQeBZzX?StgU5a zK2nc6oHfvl;3fJma@~vUafOJUiYnJ&&sjXW18*M=k9~|4FF30V$+yyMFFK@F=V^R+ zANUT3%}wU;&p6&o44^pE7RN8-Xk zERdanome;TN*`wP-C6$HljbB>-6N7-hM^B@w+-vP47t3g?Lw;G@$yy{kLP3`e|8(4 z2dR@-xIONTqTkP$n6=rN<0Rusa-G*8*?qY40yMMY<3PuTh+%#8zLRC=;QAo#WG3}1 z8E$0BmT>ui-CifbMp5xx*50N4+|Onu{!|<+1+NWcJ&ktHXyr=sjdbM!Y&=n)!!T+s z^j_r?my$Nm@opzmC0M_UkEumE+2}b-J9n_%OZb%M6xC_9Cq9*Zg(q$B^^4?dN}v^J!B)8~zPd6gu|V%P|~@)R`hkS0SSl>#?n?;4)+v%gZk_5%FL z8r6^R@I7q1+sHBoN4My!604>nK_`qp5sR{_?Gsl{G)rBh|8GT$t7wqb($}-y5^XJI zkD)XfFE(wYUu9@!U;ku2G?oRj>mu3WE8x&Ftu%o}Y1mJIPqLWb;QT*~M6Z$TJ7{KA z`PDo%JJ^%WU@4zkAp*>zZB~U>!1?Rw+<|VFwfM`3zFx$gW@ffYl-S$#saCm!{Zr4q zue0-{Ts`w z8_A(xNsD=>{t~oOfjv2AdGzA-2qh+ElU}%)eK%)tExu`GaH`aSo%q(Blf9}CA)Xsx! zHApL;W9#X>V>RZb((@|%q(a^SFkV8ZX(B}`2+c9Zf9qK4KpsN#dJuTq_uu=;{<151 z)t9b+fOotL<75Dy#nyS}`U9>tgiO}E?*+{p9ZO~Qyi4DcHrLVebY9nw2R+JWE&P=| z`M3JiPn(N<*I%m_6|8p>#-$d(r~bN=CR6Es4FsR?ITZ!g;q~5yQDua_QuXLd?UfZ( z@?3iaX;T*@PelII#>b*#FQ4-?u1+CtG-auioPP|w@1tQ>r?%Hp@+2ldc`~Fg;v*B- z?jllt1G6t3sm0^6#wX8vZ-9K>_V@Jnx32t%9NBTW!ndcuXqPie!9QyotnXpL(S=$fMVgfei-m1DDf2U*W@2&Frdft(>hpG0l+L;Hk z#tgRXhat(&yM@j_>TwQ4lZ*9q(M>%hawb>#bK1%~-1#Cv?(B=f?@tlqN?2rH&+awQ zH9jplbbNti-CTJdE+pG;q*_h-WJP*J{7lZjXUX^~A8pKIW7XN@AmLJZ8PdOK84=c{MZEg79JhFk7M z%kZLAxc8nIm;7SgcxO9Yxya9ExWsy4OiOrArc0^`WmeONPtKx2c5pY*Z+}+GUcIv7 z$PoHuHO14k%i5ZC&P{E_Yq9qz=cXR=K`fQ3>@`?&iFU@5u83C3ic+aN-$@*4zylwp zPeVF)f^6!!zi3W56t_+w$13sabk@wS)~qOg-*p3w>Sw|vJBQk0NDYwS>%~48f8Dt zk@%PMQ!(HN=dIuk*~vP}`FpzZ3B2Ej*QR1}Tl17%B)rWrt~MVXCJ+nb!3EZ zsu@4?uBQ`uQp+rrzOtiYch<5JM!=)Q&Bt*em^Lzd+*|z4JOAb|{e_?W>iAL4=tlOE z#+Fo=nM&U7%qu7B`?+`XLvy{tF-!_QLRJ_)mfNms%6-QSU{`%K2QIj~BO z{LDRKizmhY4!C|1->%44{=trnHbwutm+}Ss_T{C$=(k$Wd-yJy8dK9Ym=?v@{lx40 zarJ1erAk2FIc8j2ERICSl6ow|@A|OfD7<(8Mr-Ij&e$>t<`v0M)^9@?Z4nK!ej;^( zl80?J5B-xpPGd--;`HYwK^{EMd`=+MnikCVPJ2NP8B` zeDea{U8jI~R>UNG&sVH;9WAn7KlMOclQ30OvU2BsIz5U5NBbG2okQWcqrlzg&4J!G zyXnpbl}UQD^HYO7S-d`nMrxmAh2copW_8+4WOn}o`YtELR+k_A)2mo zWuDlLD@dB`HF@HYyqc+59h*GqtPLX2`C>y&HqEZp-wK?%6sOLEa%so1FaK9~l{6nX z*Jyl?)?Y7-kR3>y{f?!ekc>@R3)kjJT}#pqb5$#{EWxUGSbH0e}dH`7#`b7%Q?X_XzXMd+C~NaF4R$V?654f;Xrd&{ICslQ`0#2Cs~oXjSa?PW>!lk z%fZp`pD5zzeO(#o9IKVom)la93ytvoMKm5QhMn)MDP-uxJ{$D-J^#7J_hlUU3Oncf z_kJzc)puXz&XcE7aup-1_U<=txuu&rijnw zeJWP?_rC=0JVGtvCmV2gWBWe{=ji|b$#4@ilghS~cFPp-+TB7P>!#(j8yf`k|G|29 z+qe5J*B9|!dEf1aqQ|30Ntm`ZAeJIqZ@8R*&|?w3G$N#+<9xWb6Iinl?nG0flrv4w z`xnqZkoz6ZrH|x7wUD!($ymzIp2`(Z!-%tyGFuFq;ZAcwqXW0h#`KE>ixiy9AbTaM zUTpyCiNZH>zJA<$fFO2@kaCwnaTrIxn;VTl_i=*e1VhM!sQoZHKWWlF#gyP_!@)BG z{&NPFR|@pbX874${dGZOL1Fs3kW+-{Nxr_EtA8%&=J8I-9)D|?SkKKia>?(x$R_Tv zRe;^b@%|%t{plsf4wLBru~*6s#h(B7pZ{&G*z^C(UgiJWUa?iIRh$KO`^N4o`}=-( z3*~)RqF}9(g)rFGng3#|Y!ZsK@_6^Gg3V&BGVFFDi)~@ISnJb*t!lE&w=A{we_8E+ z+wA{Oi{1Hu&}RQni~YaZYZV;J7HqZK^Wt|!Ot6ZuX-Z2>6b2vjdnVRQVewP_o9Wdi z7GH_e*{WX+e?D&twRs~0rIO0CM&sP1<3}Neq>b$%y0{xVu%5uXJES1lh06b_a zQWcvXcXbxEX4%EQrW-wZNEXi?ie*n5?!I?}S;OT1C_~-3LV0Ok^pZ1DZm$+5kHW(# zVsc(KR@C3^T05T~jc{In-}T^YXZZJdY;9kdc>YA2k))kKr;iLuWenI!&a-qNH{lj1G_bPrZM*3`9+ z_AHQSC}f!Q*adD*#>jiX7ylt+WV>M5@#PG^M%Q0>nO zbJ_QHz2w@V(EY-hd6;((JHL&KziIy)d`{8EH=xwqq_v3adqepym=(wKgJ}MVtFp=V zOR{HCNqK!PcU~Jl{Fc$+G*Whh&O2CtsB@oV!=ZE?s>P?Ym^3^q@TH1xi<5sMoK~~y z?JR$a5$;su%QcvKh1=*92)M|#wXib_cKc&x1>DK2g^|uk$;6Ig|30qC=K5)ljp7Z* z<5UxJUFPg_p`4e^YgjyGzCVW8qt5sVfARt{rQ;^D_yJ_w2TCason_R^d^3u5vz_oS zx~HuyfN#empDDz{q*cnM=4^oIuZ1jR`W@>(=64T6;wdd$hYKleo%vj*lCMBHj}@Bh z>sOZA?ATaWya|nLS#O~IAWOq#f8@ROD#PNfE%L?5?l9|Bx!Ta4pR(lFm2Lu(X~|_4BLfkj42olYJyz zmXhKN*u}z?VYeEum*MJC=VuwpTJ1Dv%ccA*>B2{gP@l2n4y~pfKtEUxgKlmGvq&MS z2mABr6uZb)xM}7pdF`GBS9zJ2L?CT=?@uIYe3A2CrB_mCb9YO zmPS`*m-^7IWkyzk7apXCgYkMQNlQ6XVR%YvV5 zb{j>n>uCQtJe!iWz1EXqNHPie{u=i%;3Slkq)eNq3ob9IoBN#NkQW-V-;@gZAZgycjB32y+sN zex*(3rwz6Jin;mk#?%s|y`L}Tk$p!a;%|kS)Y)2G#7|S6=sEuN23x*imX>1N<#@-n z@Ho=BPdR#zI2X)bfKOgdWQ%Y!|JToW2)*`fc#b14k|vM5@6lrry^RD(S-e!QE*0=DfyS|c$ zeH9O{!*{chy(G{3mHbbU`Z-bbE#BJK=zXpuyF`R7EV;cfuKg{dtTT#LAX&{84;ay3 z!REzmlQN1OwU^n{w8FVv`9w$8r65U4P7kN^CF0aNG7h8HV^}m7-g9C2wo$sHe>Y>` za2}g6w3Bd&Ni?;tV=4^uI zG5B?}yWESRmW?tQM{i@zopcz$)3dm85eD^y#@XzUqM_N^|C?{VG)moW?8t(o&&04} z=uwAEOW;3O-?K%gY(442=6xXi1S#HPyJNJJ7qX>zU00m{2O9GV^N8)RS;9|}TxzxP zD9e$v;pR~h@fsddn>5)Fk`j#5@Ggs+=lfJjoWFt$NlP^!G9MdnUU%IPlK%|7iLgis z)hsn05ofUYo<+gs z*e3D5Iy8%k5=kO;ItER}rPlDt!iy|F>VZvl^`7x9%M8}QpT#fwo~5bl!9kHM}{KwTgG}PLg8}fWCoXoz)i`p48PvfV-r^- zRZWU2XR*WRQNisZDt5vk0u{4EFa>soT z_GC=In>62P^M37TVdT@=`MS`?@o>r#$!UD*C9RzA>Y;^LpKYoaz`3lxN75r@DH@S^ zt2mTdT8h`M(Rz?LfHXz<&k9HL`t2Gq;|o!v7CYy)(C=_fDbSCc)xY2)M{6ZpQ5O3C zF!pNXjJ&*Bpv}SbS%itrNtKk*ufeK_o}>NWW-VRtAoH)h%1oiIQ=yRZTtzTvzVZDN ztV#?n%RUVXe92tJ2ba2D!Lgm_9FjWQd}_Uqi4wetNxcv93=vuU}GJ}2n=b`j-W{&fn( z|0Uwp)7Ir$U7)vvp`SbUe$JeOQ->NS4kh&@*DPj*>-Eskh`zs;%ZM;p^tfJBtmM1V ztouGamTTi#ak(-%W?;u?9KC~2bTCG&VUrX$_?Z+p`SuzJ{(uol%=fdO{h^a$1DSoL z@XJOcYKlBmHB0$~{@?J4RU*PnzxT3r7I5ZOY3>{U*5)9yxTGqrNWbH>oYAkmqgUxG zNk0dYb*?M!Cdo%EvRr=yc>PG1|L8g8R8DpME&4l| zrPn*RH7x%rc*XuOxmF+h@xIoOx`;jQH1Z^EdLr5m7?GT62Cl#&Uhapfa&eL?qNe=EdZ$Nr1NsfFaq+-j(Z^C0ww zvszwTeFmB9X>l&C*3mIrOx_{w?PA6`BFWWkRvq8-WU&thWdwgq3lH+8Ia)g$Zk0uc zQtYq=s+)1^TikyOr*^UaY*xwarlI(@5f2XMU88yO5$)O`_~A&U#QQw~23awUWh(-(vq-3`)tK zEqr&VGm|PbWlgs7uUGMLqod=EEiaH}61fvu-=RY@Uc9$H8uEorj_0}QBCI+E!fnWM zh1iz+n|W+Ch&Pqj_m2gHjxb|BU-Vmy3yUCgA&W=uTCiN;*jW(2AFAil?KVAZ6&>e^ zE|;=&w)pMfc^!@T9kC^w42L=+3+Aq(>pD!{=FB8Up9Q7$q}h(+bxAwjH{ZhG9kHYw z%~ok6&$_bUq6oH4z=KCg^DlPo3xQ_(YDJ2#9l4lQKfu2%QR@r))?^%5@T&KD+V$jq zL+gFSi37DX3aUv(mbB@SvK@q;fpaa-+qz-PLfpx+@2P&qxoRy`8;NhnXlD+oJMz|c zyg13so)rBvdXD$$DOPUGa&vjalkAbCNHvTxv-HuD{`--9B5f9#hi4PiW#o@0SHkR4 zGS?IXs^R)v=S~r$`t#`{AomYkJMxl^B;JPyUS2r=Q*B;KhB?~L;)yK!PFb^7`1cXb z_Q#W?;`)_sTCjK$)h^@-=V|vk8ouwZt!#dxUf&e&u7Y4@2+LqK4%T(G6AL~m0;TB4 zCb6`g@o6*rd`9b=vHmpw7ja}F>@p_ZXk@v`xp{J!xq6;pwWC88R;Fy`p>!S$xkZH$ zp*C);C20r8%F#Y^w63_3Im|O+OcfsUf!>cNQ_@JJus}0oThgS}f?zwif8xqnkV$%x zqYa!CJ`uYtoqh$vV?m zt{w>Q2k*(9<4!%KFi|lro`)G(zF(@vR3mx=j64Inxqo;|KX1D_ z#ds!SNbVJ;x+c%Lv&bQ-P%0E8UgOMcmw%5eSCD1|X5FK$Jt4R!U(TX{srb5shMB2n zbA1Y8tcPmu1iCr58#X0%$hmNOmK6>p$8gpe&)UIqS^@hWMv;Bkyqy*+s&pF*Do7hOp2aX9cM|t39qTdreW2 zY_}Vt_Y>XI=ZWA#c1ZHkqI6H{qdGkDQ79K9XHrKtX1%dudFDhb=#Z2?DZf*cUym1g zKEj>7**cplvvsR3-yVhK@zlJtdYraTldPN3C`;v5^62@bNMiq#0XTu5W~{ zk@7~IEUqnuO{Z!10T$^ct~|s4%JHp<{G&gw%l%o>4phL?sy;7s>{O#zifn8b$Cm3S z=?dpK|2S4ZhF?r~{fpX~46l?j9ze^K9-c(5GoAYxj!kqlsa~^mc(dLc8ZZCjCtHOQ zm;c}uEBv)w+&KUP!uRLk(BBAoGM40?FNK^|o84sb_`gXonq0f*XSL0*#?x=Jk?KHl zH6U{zb_sw@%u5Z%+%XKB>BVHZ|XD8NUw42FKqB8 zy{m}=Eyc9@ERlIf)LGouu6dl#}Z9cfOQMJ&6Yh?x{RZRwun-k;Jjn?#csbpmdssC{>~*e=ekgZ5;8 z_7kZa(|w$8Pt$8hmfbg@9hpOXZ!yFL%lYTghZ0*h<#CY!5g@b@L= z?mran$_``gKOF5Ue%wj_8}xY{cdLtB*zwWm=8t9a%PY3(CmKv`V$iK zAaemkYmwvk>OhUwWaUZ&R=Ubb%^-!2d>wqN48)5?b`Ej;%f5T#!_3@ z{ttGV$9qnuMHgt^AvV1rf+Z4ViS~UE`<}+x0Qr{Rq{etw+cSJ#ja_*n(7;{IXS7}< z{(Qx|7HBI^Eg#ljo&mn=NP96n$w|ub(>z(MNt-+s=t`Ph7?DlINl!79Hs`b2!{!8c z@xwLFdKQzCWan7z&%uGOw4cK2JK?>{==M4s#=&5gPg%q{1zJfV_Z6IvgyG%v{gU2; zpwf_BPZzXHluIGi=k$@3rrq)L9dfNL_`@<5&bEqd!Tp+7ZKCDpzR44%iR4_34=Zr* zOR@b=m^ERiN=Cnwu+D5J$!+)GYu`iQGx&c5%R8Xiti^Rk^gPo%sSxFcV#gz_Kg_ko zVD!AWb2U`5CBgbid6RV_j^*OARbRIkX4&oipiMdbG<1dJ{p=mEZ zdxW{Jo^5CpO%!$M;#Gtt-#xX}7ZV{f@lG(npvv+(_5Ion4H?4fQ!e)XNst zM8UZt=?z+JK%?jBmif>)sGUsyfvy;#|K-j~a=cCKI$gU>@a#wUFUHaocr7mu9_0V- zthpaKt`lh%i5)4em*Q{-u;Z&*JB3c!5;2Y}qn%eBBY)KT-(>6rk1SU{+>s_2Rs&yZ z^XSrIMqRwz2d1^0mFFI(!J-vuH;E}dG2v?(?Q&MOsh=j^?1d{QK{LzU>yqjwZC--A87t6P;PpBVq#>eovk(W%l;}JSR>8mdwplGH(xhW=7Xagx!;b zZ(vRp7-!Br1LG!>DVwE}9AO#siirn#oAN9zPc#-~r9#ppPKHjBB_t;0ZRK4!f3v>c z*6!{!F_{(S?&>1^OoFyz+Pi}dPt|e?`Q~2i5?+;*lGPopOuCH!c{ZE-niR7x;b=A? zWVVuGyY=YxwUNCJU;YnCSK~-ZOis~P5`A8-r&%(o0P-d%PsvXpN7Cb+L5F5+ zdp^WoC3BXCw_~BK%SyTG@35){cFxE7%;i4i6FUoY=~vtvBvt50?d93?P&S!i1n%VO zY}uJyuzzEHHRO#&pmhmfsK=LzkZ-E9Geb|IgUpAUiP?E}pLt6ocFl}F&$lYlWUX_v z&Fw6VeTM($ndoyQ&hq6XwaH40qS!dZUo8t9Xs?&dGg7$kGDu9*UlF73wLT9a*BJdg zRq&mWqF(OU$`wY^E_BH|^CTF`2LC(Vd%22JR{VaE%Ny5P}|IK*XynxDg@O_F+vrb|?^cESD&WCjysOA2#skz^|_|Q#{ zPtY^#LXtFm57&ns6`+;n*hLC{(GMzFQPzPDr#e!e@0}pBJ`!Z|WNa_7>vvqZUK@D= z(tw25^1TY;>49)L-za^YJFB<&*wwBqgOL?rv(h|mHOoGw#oSvaU1C;trL zXBkgl#E!$sUIpTZYO4tgo$lKY@v$!XlbC2SnO3?oTM<%}qMk_m8H*)xZgJQhNv4d% z@3C=8yI&K2La!==T}FF2=#yM!x%TbG-5CdA>YEKPex6 zhuKXF(duq-;2&`?qs29jjM2j_TJ4Sh{qbwC^WNrB*YmD!B2PQto#O14c%5-3^MZqM zYAh{2r~AX$unqEkNW9QIZUWop8SpPqJPww*=jlquq>M<(=7v~M*%(t@YxQB%8P`$@ zybXq2%Zge3P)@H&9Q_|xE)<7ySJRaTq=-@G5A*4mJCOGZBlwT}`a1gGjLkpj<0;tZ z*=B1vT?+MkSz&KBSWlW7bnYj*j)iJ(yF^Qfns`=qFm+54V!xf8oev%z#c??(GajDab5 zx1Llx)?~n zOX1Et_lcMCnqllZhQt-Mo^nGc(Des)sRxVlaC#P=NhtS?`1Tz=W@BVG*1MVJc^kd~ zx6i_m8-1#VvwsyNUj*@w;IvSyzd@%7=8S`8W4<~Ub1vf{fAWc}QA#?q)y{jH-{yI7 zXI}X}&h(=7Pg;EzPj;?y|AG3QfC~5G2=;>lnML8wLc`1V*7oXBmqM$T*);f zp>Z2L4`i2=5X_zHsYcXlEYJq4?#7Aow91qCJ+*s=e%E4R-cqb3_1$c=6-GbPb08c~ z!r3bzHJ`NinvFhV6#Ng(Q(mMJy^eLwMDnc1^lZ#2rl%ySKT9Nj7rWc=)7s8R+O56) zY!tDxy*g`9t|w=IGtS%SkT(#Uv2Uan!}s5`)Q1jly1oqg9>L~pu}blfWBKq4kUb98 z%S3=xj2FCq{0@@T0IIck!R&`zie6LH<9U46RvZDm2Gx z?+URoiFQ+{suPcTM9j_{^G`p$ozoP`hwC$I2U2F^LyT&~pMPWjQpV*LKHuu9&a9M_ zP#=h2`)EC@C%Y6zoa4y5fM&~Scro_((_2fkgx|@3FWt+MFmFPVC~&=Vzbi0#G@Kq3 zRa)}2V0tp|8&#Om9*Ak9A-Y40dDoW&jXP z!_XtKCU!Ym3mvg4v(jv%YXR%L-FcI@U#QKjp{mGN4tAuK7}HW8-->%ziI?}d;wIPR z?fAv8t-*T_$K4`4_Hpe^AXT=D7IRMC+%#}iC9KMtx6Wp;9kraYnMpN1p)iBW{4lBh z5{ci$q`Y@dF~#+`neuAc50eyhWnfVsmfz_mrDO8mrw8sl%U7D3>5Vm8J_Z9%)zeEj z@gw{85H)6tR5QuH#>l@F8c8GfIW$-3=V4>&2KH?R>*q-Iuo%~yMjdE-AA6nRs*$94 zf!CEM)oJh;M~l4WYG+n(EgkdhG09%`Bl8H+=|d=H#;}-P)AW2LoZ|Z@!gUDj)et9O_6}Pi8y&f)famA%{T}R$mY1UnA?ntj}*iBSzgd>@CJ+JRG>3JIo z20^H&XxfckH?a9Q5$PlU{zLjC5v%R$p<2#-;aFPr6bt^sxJ__8i_NNN>u{0vUo3DA zoiEkPIC{+2|3Xrqi%re^`veQ*&B{Z(Vwo0;x7g2!-iJQTU0oc;jl`HdkvfW$*-|@; zPN!hs2yHH4pCl76jxnS0b&5GiR`YHaBi6b$_Z@fW_fej+)p?iszXvRzhWHTN?B?v; zDP(onGuo)e;~M+-K8)ziT4VJ3wf`T(maNlH>d0Hzs+V?Bz#uD@i?VPnJzu7`tTiqw z1`KvxeOFH8bJ@7Ph76~|r;Yh#1y)Wm?zQ^c+m)yC&K|h%2Yjnze_mIzYp7Ijd5vqo5nkngeD2Rv$~_Jvj&r>8B2^|lgAvJ z$!DK|;1>mnZlF~~e0$fCM~!;#(fuV}@BB9k@kts z&D-0voKcKUnU@XY$p^YN#Rn3hE@h$KTF)A$t64OOD6-wQI4}8<%qea0x4!FWCne}& z&D->J11~ z>AfilC*fidKi@lVAnfkKlpkE3_3#z_mGtC!qJKR6lOnA&-Fj&6-z@m5F*0}2SJJyO zHf@7h?!wL@We*Zo(fS}-4WivB9-ljxp^m0#(sJi#Hq#O+^+#|H%Mb0wL*5lbV zw4XKc4~cL|Z=F3zMe%o~@qd@NvE7+B@tVK*`RQ!6hX`=1F{3#Peq`41jhHsj8SBK6 zog}$UFWX@=L;O6Bh3e@itGSbk=23RJ8GCz?F!y*VMKunZy_ z7vg0#OzaA)8Cts7Z%R#-flRjHmVr^`tSOS0@@IdM`c=IjSJ3uecc>Wmw+gXg z1PzYk^MgoINz{waZ_(=v1C(j`!Q1POa&AsCJUTDdnZRLvV!= zq$+&7i}~xxF&2u$NP3~MDJe>Cqi>$fUJtKBMChvKkPox*X*~aLtf|H0^3*u@Hc1zK zxTtobu_O1Y7h`Z22&Du^yf-Twch56A!1fgV2L zx`2-k#-0Vxn+}1cuK3ZvceBmOV#RjXFJ!&qBpRsKU2L1X*H?M!AN>Ael0R$)R?T=@ zMg*wgnm^g%KbSn!700sW0H2$X^ewU_)y&<_yP9-KwQ#Q4Uwy3~gmc^JuvY(xbyeXr z7stB!lsk-fNESow|bk{sn}9J$taE9jJajk-puDo~zFzCQ{xwIrs@TdkdD?n!p_3U1{|O!l%Z zA;FvM`#0<5-9z5$&(p(`&V3$INvYb7k0y;}?na)~Z(RtiV1XVay%95N(Ld`sE_19j z9(_rdudqJl01v^|5C5AfKazJ5ChWqFzwxgpNs^4Rwlk72Bs;~D-upuK{SzYhX{$Y~ zw&27NXTQu|{hg77xY@zAm483$+=k@cg;P&qK#J)t=MlrOB*mQ{#M(|6l*Gcl>9dgL zCD>pjFTQ}5&+_!r5L;xVe2l#_{~Cz}d+~+);nqhCxWct#A-T}ZZ55sNhsufU-kGe~ z&oM}ECy9_VX!1N}jE2trdYfZzleKwc;4+Ct9>B@{3!YsOOOuwqshD3@OTV*JYgW6) zh_-=;mLX41UewyW_RE4LCtzn0(j>k3Y>_JKRX#N%J3xDf;O;P3y~lEMAvPKYogH0* zQE!P@EwCf2oksg^8NV8ZDgB^X9I7c`INDi zM?Ba<%W-h}##Qn6tww_-B-yU7FW6#*vj$-9Z}hmG{7D_RO)vj}-vVPso?VT_r0I0{ z82Vph)|-%+V-B)I%bm2D#BKxl-4Z%hAlp_Rkg?=h@va^2rcg%KT|5Hoy&PR6lI`9b z@f`jC(sDWS3}LSe3(~*q*i5$Cz{2l9G0*+}!rV$CKvL1ahGVzz(xil_3E>~urV^`{ zb!0lmT|nD1Fv*jig?wS0{vQzosvDEfgouZ>)byi3UludK=~>{orbN)Pd6+awdsV){-uxZB|`n zZ0cm3jL&Q-#HiLov}AZA|zSj@Q6B z>-&ac(}QBwd9F$t?H~02D0_cN+O#-C?+eTko@c)=p?ACM`_gK6I@kwTCp#{t`ulJQ z9Y^93q&ySDZ#Dne{Zu3G8pn#jjm6ZgM)(Q$mx~i^F+T+$-xJT)z$VY&v*%?ztJcBg zzghVkjILaeB!$&}a`h+p+0(e3^UDghwy~9JUDkJ_4(h^5GsX77l|$vob$B)pJUV7 zyl*_aJce5jYh{=D!!0B|8Ws=pm6=-TrKKcyn#1>&(dJ`EFM@FPQXI{+#g zd3zTwr9}J&{ClQ&*#pgPs(`n1@6 zm$@1L+QL1%S=+$&5V$sk+aWw4>#xH9r2Rh8*m;&%I??YW_@>ZLc6evseO-MlqeE7! zUoFz4sKh@kmpuea`A$z~HK0>F@#Aec^l(M*7JnPjdpM^X#%Bdw_Dl}f&KUaqQ|MtT zUZtqkNHHc}d_2Ez@BewUt_P32tvZdB>hPoN1#gd4>v%wK{AgZ~HtYPm;`mrTQC%Nf zSol2Nmvn)7g1iBq`{--2h*KSh_A?Ain1mL11Qa1a z3-zMxo4wb0C)7@Bc^jIyd)0Vm?$k@_eH2TSCFy4vI*+|OKqaN9KcUlwu%83#><-Nv z$Rt>v3bAJF^s-)>h@f|or4=hqbN&u!Br(xtM*6(>OcCLy>5(LYS=*X>+>{;upH+Y< zQFXjllX7|*sT%t}>w>eFb+D2AB;0EYmzK28bHe7@TJQH(Qf8jmi#AV-3rU`s8Pr&M z_Q8jgjO{|B%={|3@>7<13zpaGE9)*Fg;o-!b<<|b4L{DupJR*BdKf^OH?@C1Y%@FR z#Db;8rKPO%9Ghkz(v2c$Pkz)we9QB+RcxD7^-YXu_e1ta7-p|XH~N1G^UbcVEfQuH zaIcwo15!+ZNOq{4%(~f6pPkCH;Ge|XwaFB}zsC4D+j&FvdZCE3k&Vvfefwd-Jadau z^nb$H|91Um9I1w-x$at#KJ3~~&U*Z`n{Q8J<=+avbBQCWe9HU)Yg44eckbM zps**s%98UKG4ct1@fjrQ(sRCuGRnw#9C_X%!FEi`{m%KU{ux`mCuTpZ-?y;6Ep!{w zXrbScKHbAI$!^e`M@7Of^_ulTD~zD!v7;8oPS)m9QQ#nK7{qf{k@`20`AQKp>oRY_ zx+H-6nxt9Tl6|jfYcuJ;C3{y9?sL((HeIGde=$3C(`x3vdE1?2^tt=}rXYE1y1z^F za*jVn?(Cb+PLN}v`MkcDLbnYjWIw_g{C*|dm4NvzMzM3uamSMDK0a|Hz4~ZB=`Jf_ z%q6bMTB!ki`apB9i_HVBF|uB(kLSspQ9V0D51@6Ve}!d7i};;slyxgfM|lKx-s|cz zESi!3FY)sbh~@m_Frpr*Q=~X$5th+EMH^GtttJM|;VmDqSaz9|HZw?yvP+<_P+P(9 zdcEBS?~}+LUbXS>DR|S^hMO-Tp0Y!>yPJI zNrd-<^Pg~Dl7T))+tqq~1e#g%pDY|Zd245W*_0oT(QowF7dE+j&&vFi7O%=G&5bbc z=xsKcTF@u^4pJ}FdnIzFD^id)5q*ej#S01TUq z5z}3peXM!Xo&DHnl0Q3du40K}q4_bXTbnceh2!%fR9?T?GqD#9?xW|2qHT7treNvs z&UzNY)%f!aV@T=-Vx&Rf{LahQ&g>saY$=+4v2O=8AP zV%Rq{cvDQs%A+&sy#r6*VPxXi8cQ=o6f40x!U=YZpnu-$oIRPk$u-$FP9{2k7ze5{jw9R z5~j|==)3%tJ=kN72#whK0r-s&1@GfIdH!%B=2mk3`yxknZ+D~lY8L%jd)vjVq^)f# zQk;#;DI;B7f7ua!8#z))X)g#pg2PFD*GhZ0!mu(HU&FHDb@p+dMgG?zG{6j`q~~_Y zNAnfCG!nJmbbJD@AHb(Fr#OJbFXC11;o2C{bDz^en<>rt6-y4$<_r>S)%s>g9Kou& zFUfj0b~FkKPoa4kT6X8JCgdLf5V~HuT1tGw9S7 zXTFDUQ(jcXRX5TwdzI!xrC%ZDe(LYfaeYvsmyP;q;_5`0!+HJ9T7R9s_Zou+(fj}r z;VUSfNxEsq^}ghLPt?u4A{i}KL1Y1Y=WTxyWnQTD_9Qr*-Ba+m6MyOeoyzcOPy4k{ zyGB19wfCtR*VULh2Bt64CYcv1^4ddis3Kl{?CKkM({;4Yu9<~2%sa&F7!G22CsvWY zvjd@n$nzDemgOIL7ctme<~-iAkq!ef<_a<|ht6wc`CHFNkuP)U7sc$1*2C~4>+bV5 zE9*vE;^azwZ^F*(E9&ly%zw{?N|G{v49mIvICt@Hu;Fa2*VA@Aa;7k5#@-U7o{z78 zz%+@CmuR<(^PhLLKN)7iZ}e-gmI6gjw&AZvR3IsZaWo9L+kK@8Or#Zbc#7P}_O` zcp(`Zk}@Thj^%X=F(_+9+wy>e#h#sP`~}$(7rtS|RbCmmq?k+ zKMPzfZ_MqXg(LCweL7Du1OFLvd7k?gbUNYp8QSU&yG5*!e0X^xm!j&Ua3?tD?ba;k z-;aTh`TGiNErv0d)8!qW(*&liSt-Q@Kc&$%%I9L^ zGBM~kxSmPNUA%OWwoldDW#UbH*ZhDn4dB@w4)uBeT#U#%r1lW1&yF|iH){r4`1fW^ z&aT0Xd5ItUJDODa8L4|f=M54k8EZ-zXYXk;Xk~YQnEfgnzbv+$jl)B=(L)cL;P{wG zu!$$V?z_Wy{zYPU?$WksCnfJDX|Xgf=*uf|Pnst~$y3*lML%(D0srW!txq9xqPBj} z%LKi>Ccb3OFo)DfLiTf~Gtmfl3N{?TqPG>sq}k5;N9$d9TZ(R+g)hIc z>rxsdW$Sn1P4@njWW|o6>73^vaTd;A#E#wB{}om-Z)QGciL;@mx@!=A57JU7yuT zSy%q1Hj0X3SFlc7Qa$Lqyn!#t2GhmNqnxu$t9e&mSL8|-t7IfRSTwjwi$(axe7N0( z^T}Q`pC@HCO?I_c^le$*^&P9M@l@q5^NZEEl&45?&cxLYQYCZ{C!DL+*XZ`na>QPw@lZ-2->3kR$Jj--k(7+-7cj1nncyZ&=6!ytSIh8>D;SzG?BR+C6Q<;PkXsh)7{O5WPj*>#QE`-zo< zaU?6q-qv0pa=a*Jme>0i=Hb}`wvG2BuS#;0Bz1o?cudo0Ru=EeM;?IUdyX%_lOFuF zA6af8*VC-KFRw`EvTw!3Y5Mt{^qV2^G_CSYWF?KMsP5ne43|SV2;v{ev|lRFX-jzWeR=tAz2xo@inb- zzn5Y?|KXdvpARGhX5v*f=bR$~ZNi1SjcFC>`?_BH8C~8MK_{_&_B*8T*3nS-H(oB{ z@5u*Pfd$?Yk#1%Aq-rng=S!ZDoLgCYn>Rz9oIj!v<&tl-E5yQ*Rs1+5L_RVb%otgT zhI_%MgR{p$V2$fDKg;{6tuVerA5$^z3A|XQ^^-!SIGy%i=wlWe z-NXa>(Wn+m%@LyV#my z!VPfdG(E0lh4)1M6dY|x*J`YA1u5pi?=ZS`@%QI+8L92!X25&7Y9wDdjC~%4_rEbI zB@^e;yB=(+I5yd+FxuEQ8Vg=yfh8=n2#d~jT^-yzmOfee-IWx(3iFSw(VEGsVaa`X zRGEEJzI)O>+ME?D;fb z`4rljf2X|7?yOiT0D2_v>WclDlh*RhPFHMk&MV^XhqQgo)w!>jgAv(*w4I(=P1R2i zqaioir!Dlk7f-hud&bfEJYJk4o1YtPvh!n^mXb*(YouEjxc9C2zlN>W!}577tm^31 zMw`~~+}~J{k_59MkW59FlJRE}CxhoBjwG>p*87gY$%fiI+Bqo-Hs0Ug|IB$aeO|xtJE8h&G!+Batt`^7K=gE*KqfZ)94>UF%$F4nj&Gq!` z%)WWcn0$RJupn8jr#p6+Ryr1*d8OR(2R44*QitpUfO&`-9KtK2pG!<(Nr2dY7Vv-C$#l{(R3$Z zTTR{nz@w-X2~kNxXh5bkP!Sp_nouDkDk2n>C>eTtlL{G1MWRp{8sC(RWhxrYQjt>8 zAhYZL`QH8hSJ%^XKleFj@3p?`yVl<4oW0jt$kWR@_ZPBckHr7rb)B&y*)aOxUf%0h zfKX9L)D{PlN$o@^#*^|4C(-=~V`2^3RDoMjT>p(XWDUXo*fv;PUk!t)INFQFa=-Pm z?~0MD3JG!O9PTs6J z#{aC@URsdk2OhT3x2;LApHX6+YlnzVwJ@inKK`fA6*%}W8&!72cl_mbY#4|um$2Z| z&f5Xs&5+p)xvzY_o>beNoj0#{nnP{JwmeCC41fR9OC7$RlA>S0@IlyoS*z+4=nk8?4jL zV66K^oS0|!k|&^>;QfmJQ{MC~_Rsuv9SdBk&BOGtOys@IIbHSmxtYLhdeza(zpl!z z?5tou-}PI?`F&t{H$9p{EOYy;Q^|OmaVQb~46QA3WoCtQAXFBf5AyjVM2|1^n!W7b z6>K>g|7O7ca_5y5eLr+<5!e2y<#0Stp}*8eBhmOJR_`Ic=B_&LYO)4o2%K_N3TU^c zdon7lcGVn5Zg75e2z&ymV|e~MEZU6^oPcHBp*s^3+rauBu_N=%0qmP+rxREpduv|P z@)&I%i<^n&bzyunZ1SdOyz{T%VOilf1PdF|?-po%q>UZUtp}l6xR@My$$D@(7LPLD zOJU;nyuS~WN3s4~UilU8{0B-E99>EJQz2c?b$R30iVSbCURj^>=IsDBo&uBFzR&v3 z;h4VxIw@Fk3SJ#lh}~_p&<8uez_wO+lXa65*lU&Ek~8KcT7Smc^U0U!nv8ZQu*4wV z7r#liw)e%JJTL#u-(!6GLGQ^hHe0W?a3JrSvoos$7XHszlwE<@^>(Q~W4-OXa-ir@ z6^l~7XoA>tE39|u`!QomGQ&=X&BaEEAGF&Ro95uxU_5+}bsloG9#2o^qjLOXx!GR@ zJto6y%K02vU_#buzr)kAhVUkSJ_`oJ^pIx*-JDSa^1r+KH{(s7?BxAl=Fk7t(|EE~ zA#E}OeAV(_Bl;8itHbu~Xf&DbkBUk4wf2Qp^8Dxh0y~G}`*vvjW>mafOD)YbXY$aA zW(WgGxsz3U`}+XCRZGh+_>@`NM3UT%F*96qCu_9FyORD+3A|UZs0zNEX-vHq!+wX) zA3Q31xtIEXC;P8pvAJU92pIkgtyv^Y=IZ1dJ%@Z*Ip2)V(|tOd55%MY)=zTcoQ@w` zSh^0|ri{;nZ2cFWZzks#q^arrwybp*tV zN}vs3!E0%EF)XIBT6Habi;oS&g)ith*RgGdGfu+34LG!zXV%c#NRi@Mz8QvX!rqs} zwU^oCKnNvkQnDleE!JGdQtid}OU;wAf;VIL@9db;KI;l&Qw!K+_gYhWpCdN5CdnCC z^e8=l5|QpT_P?mVQ|PotuUUIMmZT~1`M%G|0r!kZ+0&f6vQeRz-pZ2vcK$I9Q!aJZ zF{DiK;79pw_OQ$n59W(U_mZFmUbpktaG##1TT?BpWQVtp?%t(~myOFr1Lu6q$*s<3Ve-{eWvg*?AD3#JrVBW-8KwvojzfY%7seT%XBUwXbN|THBHE6o@91Z&tHpoyamwOh(DPiMZ9L>Tf zdzPFHvFZtWrO4iEBuZwi#XR9|HteR&`t;2jm+ZSpme7gN>+ihXqE6OiCo+CO(vl>5 zk_5+yuURe941co1Kf4KLi1m5ve;uuNk~ME`_tDRBek!>x`7^I^BzZD3=en34rO7=U z8hIj@{IuIhkTUdH%bNY(L-d^}@ECq&Pu0B;NEy#pM9(&bh(E{w(JJ%v*Vw5Q*^-5J z6f}=v>3@u3gG9s`IG4OysZKJfz}W?K4^y(wrm>zE(fetPNhN~3uR06ok~wvV?@l92 zc}Mc>t2@MR_HB0gWNptAu6~Lg^434|+X+1MJIuV*|IM*)mNO2g?O5$+A9p9m9)kQt z(l(__0}<#hQr*P{dHVB~Uj7nmk~456*4<)c+0Ngax&9{n&T7|EVq5k;o#Dtsyk@ID zFNF65*I(dRJJI${xONj0vIi|?0E2n1D{EGenc*rDy~Acz`B_WIK7oB(Nj(}fvdj5O ztt6}DTY9O?g2}l(fLC8i^5GEJ8^4Q-n;i;carwfilr;x!aU%PWf8nWF!*~Yd*U@K* zULT}!OOk)C)z4`3I*Z%@rK|?Ynv6^Ex+ZCpn`xP7TviL0!KJ+xmh+c~ojpc-$;gm3 zy2%E%iGOBS?UgX?0f+hc@E03r1=e(UtP@+_g!Pg5eh2nc>>(y&r`onK_pr(F`*F?wem=nWuPL{k%~a2iEyJ<<1`=$22x>0lVbG%i6mG z{QSs1WgzsemhOW}R@1Zq z$$tShcIMf(d&oVBY z2m9Nd{WnQYcRVYf=8$6V0&_lQ|1Oa3$3wc}(Rdd8v@nugZDid{hEMhV4!pN%CF{hV zz@GMWY7Nncac2cSM{VO>h;BhJE{R>M@qgPMJWYv5_)~juN zpDxmTub-v9c}j%djguF$!3^U?Rfv6xSMOt8_9bs-k<9R4Bg659_bkc%@u(S2){RZj z*R8nFmd!qN#RNRL!AP@)9NkH`#kjuF|5LQuR;y#7aTA$tb9GbKp5Rk^|MEU_1SSpF zZi@caXX(_O$=uIp>E*=Xl4)cZIXU2cxt#JsjI zuNfqsoP$9J`~GYw&Skl@Io>FC3xu=ZEaimrbfqd=B{x~>T@CeDe}0pB)c&|I-Bo{K z<8ZA!&8M@6<~`aBrP=53&i&?RB5hYPq}E+qDYl^OI=yE|2W*=&vxl*vOXE`xV&WMA0F9PlbpS8$CmNfAjF)Xt%O( z^)9--VNA*S|6{l7`ObJ=l`2Bb@h(}Kzs1z`aQj!EJ@90+Gxnota+2?a#rse=4||{X zsfx3z_&c?cQUm7`=kF3@uNMvf%V$$GwX}X}VRz~#g$sG(`W{{-14|87`^sp(g`Cx~ zHqXhs!0U29H65wtj1|Vu6SZHB>|;p14R+f}mK?|};FnAYuafN$V|FDL&3?PwVPx$< zA4vR-x!cJ*6#AoKk(w}Ph>t07{}Ghqi#5fOTs4o)zr*_;BJ7{W_}OMXsp;~pqfL#i zJt2IJHv5Vxefilk%$_V-WXHvJe+?t)S^7#5?u9sgkGN9L|Lb`4N)~$Ckt*z;x&j>w zv7roklN)ggt|oi!OJvHO{y4E~3^|h}bO#wKW67S~F?SoSv%aJcD zHrOQF>8Y+rZqls&YpRD;M)1b8+g=#oAB4zKlKn2`uXIMLfgJ<)T@cS3kL{x4I-dvf z)#TKwg{M8Vyk5-6?(vSUoLP`QIbZ*Pdj%~Fh0hEa_J-`;uw90M-@s@lWE>1mf^EI#pthjR7-vIA@BeA?RhZoCyI?H?Rb(cAbD%C{{-?* zf$3Oh-{IIA9{7rHl6|v1-Ygcy9wy1DESg;EV{v66_D|%I*`Jrp3pMq7uNnD5y+2Ks zDKN`Q`iw@GX!jS)+*^N#WB*#R+=pL(xOOEj*Jk;uyeoC&@}_1OTfNUR?fLKvj%L68 zIHOhOcbjncZzKAPuKSJk^G>IONOl74Pd7T2rPo#wB6I22T$vh{2VvhR?R>}vd1If9 z0nd?OxhS(m4BM=Qxjg1;<8g8o+|8e|9-t~TU+}LiTb3vN7?!Js(+#nAAsxEWr#~%{ zx2=)Lw-P(%(mT(v8|ZN&thbRl^R8q>>jRbS20lvsPW7IW`u>&Vxw<|~&%(^kq+V6f zH18>IGb(M<*9wvkAX)M%W+&Htnt#Fvw)vd9=1U4O>pMKVf=!Oo`Z~I7g2{t8pK7z& zVU>6C4e6hK5GQKkd6KWgsjR6yw1D_6^g5KLUFkG|q@A!Nnf-dW4@@qxTJ&uTwJTsW zU%y#TmUobO3i76SaxK0+rO#^O*fH!nPLIoZ#VRP|ZaphEvR}M3j0RxXdm{b}*7=du z2e3d^Fn$KdRQ5Vm|0z0r1q-g@VMov`PffB?I!~Qmh3i7rs$l#m&!T;CtTioCi74*@ zsXBlt~Y{=p+Q|-+z&>xT>BqVolmxmoJC>xDby>wbGS=1 z`5gi~+34E>&Z~U;vsOF9qp@o*!He(d(hWvk+2R9H>rL8bZ+B%e;9&70s~Ym;G0)JG zD{%)+nu%0>$o!e8RMr2-(_x6KQeCSjCM@Ok&tSseuByl@wiw~J!ulz4R3}xcg=Vz> zO*AV?zS=zCM?LNXomc2nMT|R+#$&|W{%o}j8hM{L9gp%fW~8u*OzkDwl+xA% zdQH`{fyRp?A@*>At#`A<3LZZTn;ylI&RWfz_pCJ=3&U-C{G!mxxsK#%^QEwPmE>88 zGFSY|y543G?u!k{vA@G?;&3fyN8;yLw5q^^W$Ztj4~!+>U0Tmt%MUTIuWycp^iWJ$ z$9ky-P}NAaMErhFR5=(oQfqQPX=@i`O#$}9Sta-W>%`$ZeZNjW^I*^w3)i_K(feOL z{ixlIFewMw3b4&x;J)-fpGB{SVe0J_W8cK$R`5N}*qtnHFFSh;d@y zF8?3HHpyaJ4iaTqdn9a=BO-NKdeglZmM4>8Q8Blok?T~N$(512~%4w^Tp0XBtJk0Yv>1Jo7y2-x$An*UbpxJ0G zr-nigeH~Tfep#zkPMC=!!-5FijcAxD>o9k4k6nx z@}CLGN8poHIR`;tsUA{)t}F(hPWn{1`4&2OJo`W36e}Qyx2#F{2u;R(Gtrt0rjaIE??3R7J3_8hfTP zx;FLS~~UhI!7xPLNfF6|6pk<3}&t5J|sOD z)9Q(Gk3evZ$e#UL8`$mwJov`^zcT$(i{UUWoWVMGvP5dVtr1(2RpJ5X%yR5j7QKhY zpWwz@JoP4Vv{lRAM)a&OXsoTx{;gr75n@DXJxqYa4Y+l-2v-aKgT=#3A>50O*|Rx` zrzL;*dwg=Hzq+zUW(r*k{||Sp751hA%Xj$KNq@-&awARV;6_`hXH95pBW&JoWgYP2 zymOcTx8Phq@h5hkfD^kN&)(GJE!#^A$?=}L8Od^U5Z`*ncgb>bvu~0O;4$b_cE&ws zT8F{wH;k;{oQx-RX>g%)w}^n}vCONy_X#?b=Vj|zIXhBk(C}ZQUaGtFC-YT&`XgiA zFh~E3BiFG*Mw2agIZkhflKp1K*J=HD_R5=;kH~g0G$+t9GmhjG%G|0PG#=#RSqC5P zA4ZC%aQ=fFXNzoiiOH#SGgL$vh6$}%A+@WH)$T&Q{^6VC@B2r~ze28p^VeX>CK9~F zDs7#Ww_neQ4Vmq~=-T3N$-eeQB+c8>m)QTmu*m+}F^)FF_b=(V6;h|@JFC8{VQ?xk zHf637r>@&%lx^DB>Yn)he8KynvNbV_1L1HI1K1S}(T{i{DDryJ?UvQy3eT<6jBazoYHmY*b87uR|c7)56GdGhDXOVxBf;JF7ptCDTFbt{iME zs;;M;dyrXJZQpOC!`q_YHnLA(jdCJza~QQ}k=Nk)6sfyAvnHf&Vb{z^4~AgwL*_$f zE3|6kUe*ET*~(;=O*Ws}u1!wrWdB-)Pc2D0nLz{w3uNiYc zpxbM#lU0DJ##-FJC&~7a*me~Suj7;T9J$zc>uB?=$dvp}kMZ^W^t+R9<$dd7*Xg<+an~7rXuUg#6TKl>_*`p36Tk{PmTg30Gv&9mlYd(4jOV<6Rl^=7d|b{sFm?d);A#pg|~ znQ0_TW`Sqn@rW71D$!(z>jvxLCcm@lypyYv_3uM}|M7qGw?U%KpY+ThENxu<}08UE8?ob&?6u2T2fL~PMD`&`s%B@$d-5cg*ZQk| z{$ZW07|%1Vr7(P$t|wwa*6$U^wJx+>41w&&c$j^&J}mq2zK7{l_Bh}F$#?NHd5@s$ z8NB0ASe)hByp=v1H;D>k49DB) zJaRr}r&44xpDYujlFK8z^(Sa~E)QAC;&+nlav1*3whucynOQoCzUT6W{b;Zs4STck zTjE}_qt|p@2ljtVUwN)I1`~du!)fgOJ1hLcC;x{2jd+z^j9;yyz3A$9ii>k zdixg(TZ!NQ8tZ1T<;Ipr7||Dqg?(|f3Efkz>K?M(PoJsK?gWX{nz@G#MYOdG55{3j z{3Y{{X=I*Chwl7-rf=G6;dR$8h5sO<`2fuBfd$9HatR5u3%DP-pEVci&PvVb*3Z$* z;9kJ0+TYP2I|e6dZ45bw=;cbjP?O!(@VkDv{1(Y?#el5fI>U&6Kg8l2<(!!u zxi^t%1DjXF-W$ZK&&hlx_Ri36?p-cqgQxLh0=yQ}>~kp2pxZoHmF7pUvBqM2>#m~n#`Q4IX^Sy%h^6}oc4p~jjVbry|WwU z|L=*cCL(;{rzR}!a7BBl6mxd=Qh@$vQEaM9DDQmhFC_%|dNXh1F7) z$)X^E8;T+4i7E48+7*NHj3BuUGqdkc+O}l=oorL+H^qoo86tQ2{ycvl#8WdS z-!Fd7!^OOlNzRvKbeJmw=l%H_jOh-s4{^6NdGdU4l&EwhluOex&&E@yaJH6a!0G^2 zeVd;pn`3PV-Q@q~WPVK#2WfK>A4x{$4`}-x*&6FBtEg|JRSD-N^X_=I&)&eXcz+Ll z@*egiay~?gld&rJ<$h_lE6S6xi_xU7>u+VVi?o{k3m1#e5BqnOajGh;lXvw*d`Pvf zRuH&YYfJHB44Wh~_$V^mg{hlGptsn%C~Z!G!X^lIq|GAMI*LCgNB<80=dLn$B*}g8 zC|2hQ&2GA7gGDMmD>b0<>GGwR$6 z*=uPr$M-eNs6U5P^2{C!>0g{V)y(ceR=EuGJ`xLyldU60Y{KG4*?bK~@5vE=yRkFb zNV2mt6?(Hfe}96Up3te{`Bvaw4X+f(An0gi4t-*`r;vZD-iDcVL{fX){xKOP6m#oR@yxg65Q+n;$ z*L1UXf6`~NeO`ejj~ILIBlQ(NrIOaeq-tZvUrEo|i@04kX3_z!KHCiSDDveVFl#!l6Jg4mk+*XFURuc-?qugorKhYP z&fKFeWK*&AZI~u+$^EV_sh!j@XuyWGAeJnHTlIC69+O*UkjU9l3#l}Av#3~tHL{92 znIQ)2tr2fI-RJBbzn%n@9o@u^A3FLjM3Y-5&qZeV|7~8A{4xv3^%G9rL#9JmIXl~0 zu+on-Z9)Fx@N0qnl}X${&uvJWdaqe$Q_c0yLOpY~ar#QN*n55Ylh5vr3t5YDuX};i zr_K75;=V1NmEsGe@pZ`B(%h9Pd?5k@(Gwd#UOy<*fj2Kh(Je4K#rto-( zW@q8?#`99Pt(IU;(QCf@C9u06fvu=@Aqv5(O?xnT1by3J|Cv-+4yv&aU?UkEA_oV zZ+j|z{8$)aUWfggq&mb%dJ)c4XUh_>ZSCj`v7-gaI$+HwIB<<~Z)U-ZojFLP&djwO zdn9}CnWAKQ_FGTei~av4WKIx+<7<8CdJ*pAN$jl!`>tT)cx1-LyfK<$wo(lHi^3)E zFjA%bKkS;fl3A77Kr2fze+sNmVxdPxo^yFsQI{F3P_hN(8pGvNYWBA-XEZdY8$>5rt6?u|g4A#jVIooU|d#vV(q`hc92L=c5 z=R5VWUh7}*VLkB}V^=pREd^)dc;`exZKI7Aidbm*}ei;I1Va|)NibrL4X7>CP z=OtM=nygi;p*&n44;IFytXS$t&#Wa&cH^?fvMuy%gg^UY*k8^+0b+Tl)!zT{+*G?= zk4KLhiARXgnJpfH@yW^hBs;z&R-Vka@>XGr_Wz{A$Lv%V+se=-&m4;sIMUGPM6_u* zQ$aLIe)yS?uL_GLxYGh>*J42zxMrVVGNQbU<+;0EkJ(wJ@H@TFHge?&=Y3kALyqjv zYys~QG~XHT*WPe_cgEMD^eV$1HH<}ja!UQJ{X5Cr(eF14cRPzov(b5ZYn?e-?!s>; zTRT$EBu^2(lFCrYq&wKZeoHKE^!?|gV4X*Bs3X6L!v9;m;D z&QI=-2739PwtvF^5PoqPrmlp^Owvq;f1X3%izS!hS=Ov~6BiOMK4r1ImFQcz`d@RU zX~y+Ur23rBm2f$`K9;dwvIx&6>w31#TfnT8-bCI`IB^3_D?xOWI22D_OM~*{_|^Gy z^qRG!-FQJN2;EPPv1HnY!N-zn3SYjAG`S0TT5R}+AAZJ94#(wBNZy_wq&eMk>m%Sc&ca%HPVf>{Xe@;%8RJkg`9$i^8c~esBdA0Lu<6ibpTmh50Snv(W zvSQ#n5q&Q04<=P|iKPnIS2QUCiQ#_lL-y>{sZ!9ksrHA`Xst5`LuiZshyYVwr^rsXHw+pdIM459Z_Ysv!21|KS=hiw#SOg$3gUV)_;QDHC>x~ z(&X`ZQPeq$1qV9%5*tn=Uow@AGos&)x7lSi5MPcKf3veZl|^4CUsj6NFK~XIejCyB zPFy(w_ETNen{8g=(U*#d53=E{P?$@H;t;E%_aB|{B##`xnyK2Jw|QHgzZ9~U@|~)D zCHKF{3ci87=lkYDvS&qX;&KJNImzebZu;KuRQx+g#NA5&t8m~J|C$tj`eDE@u`D|x zvuh(+`f3_uufm`?c$ar_FBB}1tXy40r3O%{&nIu-QOQ`kFC4ONyrXEdrVxXYL-qsg zO(p9*nV5xRr|^(1IMI`2V{zeHau0;pXtIwbd)~>e!~2ifw-w$kgLA0vO5W~FED#)lEbhZNt3mfjfv^3=jP?Gm$rp0JmZyHh*zYQ`KBwJY zxV?>5b^PpWj865FQS7rq%ucQLO6-+vi*4YXHNo#fDRp%=I)Ad(=E86yoo*s$R)eR4 zN%Cgi#j=}t*h%zi0PWNtOhulFqSn`JI1pDxlQd7C^V~1@wJltodhU7GG)~OSTI%UW zrLoQ`$A;U)mh42&JHJ10qMPq8(dstWCLds)Tuda>K}NLk^uL3?*U)q%#+pw7wxyRD1v0eOr7FWAsV6r`a zr`1vLIZk_L`}b4fPBU32he2eW2$1`PDkKPZ8opE?S5T4t(_wx zHPqrv?7YA?sY%vGoOqw4*&}kA){F76JM{aWpAkMy)M_6wI6HnGp=I{q_H|wrtY|{s z$Kd@GhF8&ESFCA9$MxjApO>Ge@9ebbtp9%6yUsaf9ox(Aok+qndBUIKRhXXVfRp_H zka#s;PpL%nqdrQBnc?4jcY)8ty)ldDx%o-j8IFI;jK%Yfi-+s=1TnNMJhO9Xh+bb1 zZE8C64&Qy}`X_1Eg3jak`lIBY%__lr8zlPp?rA=X6@VrHOy#Vw6fL~_Y`#HC6LGF5(-3y}SowY?YuSECAIi5Uu+Mj(BslF~2T^zNLHStLM`9V`mrPNnP1;rTAGAnm^DdPXzKFb_EvA zg5p7<+yVHU>@ii~Fof=BV?%GgmVA(_u`1ax^B(;)(YL-&MIFnNnq3765;c;=>vsqo z;Jfi6#{1eU=5u!a#m;#bJdj07kZqFj>k<|i=`>3THo{Qr{R95 zBU{+GDIJ#M)`MEyMCU&6ZRh&$X}$>84(1*I<-y%p=L)@iNrQ)=uoHW4!G(DLCYUCh z|8ADeQ|dSP>F|Orit)D}a5^=gYG^ZWFBf3SRd_cXDp~En7oIGIXQE6449m)+=Xm-4 z?3#RnI~>=3k==Y}!**DwTQo^_zL^#6@f_X;x2(Bebd{a0Mco2yjm zeuX7dIrjiQkoAiv>G?5SdRjl%VNE%nl3hCsX?cfl-o*Z1w4W^=^rdC?L8LBLvK=(% zNf$%$FkG818hom!ZvJg%^{)7zr~hlUl|5-^(<+&r@Amy^Y}*LJ$?E!#b{~Rj-aKUg z(r!Lb%=yK%`K|fMMv-8*vF!lbyg;7g^zj#_o`FN{$eq5kdht_opMb9?kS)0dZ^Qm% z+n?=7J$UD>_#oO>b!2aUWp7hWe{q)$DR-zIO_ZSMlE&&~2>k)E{i`+_AK%&_*Zo<;E=dxihkc z@NHJiDxF7hKeLDV z`aWbyF5U-y_c7!r^Q1cN2s^XpX4d}F=W9fcytz48JGsN{#4CQG`&2DW@qKBG%)8~y zBzqP*&1sS~K3$COomgp?Pd&)jx`50kT3ru^tZ>Q7vkK5zK$6c2cM4PaSgO6WBYC{? zL+ngein!fx`{5CQ%a z->=~nTQE5lxs$ap&o1)3{1Ck9C0eE?VX`51BK;Xgl|8H9Ylt;3n~mH--m68QUVQv0 z?F}dCyEN?TUmrS#0bju^_jdR5ghyHDEa*35?FwW#-&u7alpPGOk~f;I#s7IMeHYnI z!Jlf5Pd3|52EXaz@NnNf&#K?M>Kj_M(97}UAECYdpubKu>7}=E&TquKnz8&Ttg{zx z?O^lev`^jMjOJ&OrlPaYr27XrR6=a~+26bLzg|Sl3cT#1IY;~)MEgG+ndq$NNU?-< zYw^qW^z4V7sRZh|ap9?ZWceMp^Bg|wK`z6+V{kv~WxL?l-p-sL7A~R7A$%rpG={{4vw|J60D){?#$c*RHz5M^Enba#ahFRm9l{-a9 zT@T8sBA4n#*~4`lOJ%j{SN}IFO1W9>lvOS3jCvz@)ZgOL-p)(4-Q*1(>dMDymi3L9wOvlu3+Yso_v9^9 zR&t*1?5txQ=F034JCa7(z26+>S-<{*=-dgvvJRj!CZ+ayGT2=~rga!p#iu;iEhREl z(n@w&Ho>~w`m;|ah;^(!-v#|c zeKP_}a)&jZ-`t{&N~HKzKa=553|3in-WR*Rp}{5eN`=Ud{CJTgsb-X%VORUUG#;d~ z>KgW6Nxz|mv8{)hMhRMH{a03i{p{?ls?Ix$td1V!Q!0>m=co19W)O>>sjUvMFYfqq ztg-prwk@pMpU9+C8meX|@KU%5Ran5POX1gHvEmTyxq?xxR5P>m+X` zb05rDOXl9ZV4iU-t8TBt-8F2N=f#V8Nev9%<=o4}_tb@Xp8akWwSL6Vd&rPn)D`r5 z1+J%d+`aHm_3QY+l!7)DNx4E#bG5#P&z+7f+xhG-Fi1w5-^o7=cWiu!@&djLnRtmv*n?1y)wkqb43~=VQsEn`(T?+W#~Dd=HaOQ2Q2Z z8?pc8M#Bf#Wxs;2z31EGXm}GgzRe1~wU%8sxkFBV(k6Ntf)9DV^_nYcvFmj_;cvPf z>HijRPEG5%EcOEo9&`N@m^zsBsuJc-b_;}31Fs=m@*ZU!W)JpFXWzHh zS}*#R)y`&;?$Yi}JU(lzyFq<|n3_6vOz8SP#h9jrxcOxw}Y5i(a z9IL0xaJRfxrW#Y{YA;pO-^8wYd^Ky$uN7mSa_$&b{)KFP#mgq_o2)wv^mvnRUgjye zSIWM)2G}>(wR^IIJOz>DQ8^h;`{?-xcI=0#f4b&IzR-XN_T@7N;O};NW_(Tcj9Km~ zZ=q)@?jCAH-vOnW;?WMH>QLHcyg!GOJt2Ft9`1K`Yr3C`2}iMV=JnYLzYLOpn;Xu; z*V^RTLht+W;2f6UhiA{C-__9B3l>?A_omtKXj0wG}iSvLkHaL>!w6oc7t>`vePqPZ6+!mg6G?rzj!qfU$qQ73W zoezhiV)8Qf8^?AJ)1s$nct56}pxv{yUl!-i)MKhnWQ9ZKteLZq$MIyBsLWE=@aWBK z_`bG3g6YTnvG&9*8YyO0qosYMd#dWnef%YY$@1v5?yV&Bipk7Akdk$$qX+zTDp-b({EA6`OzKO`kez z7=Nv+zggmEo?d>T#Zl0mtJTlM+aBcTh|A+Jc?W-(!h^nY{8v)!$41@Y`Vo&vcDtdX zN@fQ;ptDr#S-<|gXq@~k$$7PeybrO#!FoTR*QTP;XLuIGf5Wql`nr=HSCZf%md>5U zM1QrSL#oQw(C$GHD~$thiXnNb+?I7VvS@eue<3<&pU-W4W~1mm%+c&Q&dll>-~5eV z$%m4h&RN@#%0+ozS%HP}bZxzFCXp!X)HB1(9*fCrH-@GISomV{?BWp*^R3*8HWoLs z_v|bh{7U}vB5hXD{!Qv%_}9_!cu!whhcLrEPqK9XW~|5)&WU6yO0RKZ$QL+&syO~U z9;c%EX*gOMIt_VL_U=wIw|NnkLs>IpB!@1${eo|0W8o9I-N2VLacJ*)Ea-0~PaxR6aMuxjp-){0MOk~g&n z@}95-6t888?2YS7i>&1;!XmrjGfYprX_GxnnGtt}*3Y~ptJ|g*u5Km{9*?~ZAkqX& zvJN`WX$SHA&kAwpNzpXbji?9C@0v8s+WXHEepkNLgNsgJ{{! z|H-DAXD&zSy;xyZon3|-jaI28l6G_NGz5B?quysMN}Z=~wAU0nHuHe&Tg(25iDE%R zv+Dlh#Gy1UN`j;C@-Cj1>R+i8mX%nki`^Z!hOk2`+9jfA9Jmj2*ZW=DRU6p%PhL}k zf4%EyZ}Fxt{F6_A5&cIL{Qh69d`hBJW63j_tl9n8m51st6;e-Qr)8qmE{q(&=QfaH zB-Cma-lM;!?O!qa2HG|f-SYWzQE+CwD%tf zX6MDgLQMGws-x-nrN1(Z$S&!uMj5J)-!b+hSf_r_qoi2^rv})(O8aei!4~?R$474w zZ$1$1Qg1o+v_B;4G}=B%rp=;y2|BI7zpNTf701cW9E`K6lYJ%RGe`Wv45Bpj@;>2r zXq`Z&RJ%96*oER4U?;`NY>_6?zwT;3NK?liOV<5XteC}1TrxjnV zEuQ|&c5_^H5i5?QX#-OB7l+>BD-Ymno;UtXqpBp$dyze}wEO5V3!lC-FFTW5J8|tS zE%)N(hnhz>X78HXPldmK*mEEmPs8Wm%q@n}?+7DuvPG`p`NLQ$6{${h?Kh;V2ZdBa zyaWe}!0r^+mLz*qM~=g&-85}1`pq!2%N;}R4aPt%>#aJn*AMJl(TcEX{}cQpPe@Ot zbA2|<%ES&d`MnU|lAq#Iwm6h7d8)q^t3EWw=V@ga`Xdip0NqreenYR>&3`=ECa`Z0 zTx_SoB4VJ|kEh`S41^A>qh=P>J6UiED^9Lavt;{54+ zEqyG4locVn(Di6qeG7#%*zE@6RU$M)oXzmomeX(_dL ze-ouP>+4I>XKr0mKi##x7u=gb?>#u>2}y6UwjxfyQ$W5xyL=3V4!D)9!bf0Q6;j@g z#qUDpBj4QR=okF31TQ)bS8q07n2RBqP4{%xSbq9Ejk5#fDXdA3?d-EkCat_LZ6ro! zXK^X9r##%sW8z{sWzSG%G|7BBnmk{*ax2e2$W<4JO|@BYGtZcWCt1h#qe#=m&oVOQ zy;7=@RCfL$5ZglQsd#j+Yg5Pm7zoa#)0tv>azv#rL?u3Tj`OPf$-aqX+_*>#nnI%F zmwL_h$ryZvzp|60B@X>c_Z;h2$?_;YilanykDhcu~&_u`~H)*NO03*f&}1Gsnwbqv<%^1NXB^|61H_D1y{; ze1)qgv1YPQ=V@~8ssTvp2wX?s=SqZMhs|7qfYF37ELGf*5s<1@9ZlfvYdR$Gg_6$ z)Woa5ap@)2P7d~y_(B(P^)>xWX#b06kHoHCPNGeXgn+C4g5=3YgUv>vDX#mRNA$qnWT(xFo>W>(^`xn;e3#4{$@RU_ zA-ggbi(olp8|#iE=`>OABfnGAWhySF^6_<|^3_;zj`>~IfF0;)s)!7Q{GqsY8uYrt zatH)FyW%2TE{<8

    fmQ*-(}Y|qTFBX$fbcw+Y9&S9~3{32t1p6ZPw%ix0deJZMs z)KAtur`~oQtlA*-8ijg1y~zg zyhW?ogE5uQ<*D~+dOiyqHu`S7$o#U1KN3%pAMz_`Crif1uE|@S;_TCm$EL1u@>TTY zht15zYCATICdq2o8bh<^I$063rt%tXWYtmDYc$5;?Ac0|kc@bl*Y9JVbquy8U&IF9 zocxuGwU&xkz42)Vo4&w3tIIF?-M|H&fUU3$!u(y`_#+0XkVNpr;${IV22 zCzI@SHaXBtCOZSxxew1>#6lWom(>oo?M43SG`bM}*?;^Btrm%0nIHD%e>dUNF0y~@ zjPly+s?YAe?@Zb@n4R^gE9u)0=bmGyW$+4~GkMfic%1zrS-nx8e9!XAHHCYUe)K#N zI$0ZgDJzU{ZPp2Nfk|TjU@hc{SDF92CmXDnyY!otnyE7Pi1>9gE?0-%-J*UqS3R!p zROcMw%-l;H=!`OcPl9zTM;g(*m{`BRtE%EocTD;iy4jt0HjJyn?iiT&a8>rYBo{_j ztgj|nJ$>E@$z;B7rQf{$$g{Z$_>?RX%~>b8V2kKIt3ZqR?sJGd>x{4Idb%iC!QZL# zwjL@M85uG<%+*F+(d8um`4>i{l3DWMCKKizuKtn5qvc=F%Q~8?pw|q_B`{_u+5Ujx zP%Reay{QX-vsjjucH{iZn6;aH{hd?3z}6$QbgAA)l5hnHuPwxft~4ABgRzcOgJE`( z?O>g}*{_e6?Zo>$wMiz|r!Xs?x~E!sMe;pI?@eOLe&!WMBphznie)NXWj-5B@dV%373{ zXqV~>-Jw;@+@&=+7yF$%_?5m-Cd~gC15yp=T2bp0Sij@S77*(W|1Ys|963HDXEJ0w zY(A1I4JV6CS=pZq$eF7jh#_Z?_CN4`kK}u4;a>jL6x&`9-JXQaLQF~ayJT&@4+4Ee zop)LK6F&JjD{gnyTZQ>fb60F}_RFMy)XyQ>Dr3g5P2Z`b`aG7Wj(t;K`CwefX4 zbpIiFvMXjKKvpCshf-<)B>(-HVnTVGKMm#w!#>Y^vS;ISl3b_%JhMK?=YvFp6ZD$# zud-e`wEVz`o{{qh2yB3SR#0TD%0BC@=4Z`B+q1NEw;oc%q9LSvz;4g(gI07+<=m_J zLsqmm&}ypLZh^uxm_L~94;39QG7dka*K*=w`*W-_=;#+9OCd-k-Bfy6CPDFVq^`hAyu zQknD{7(NE4V|^OOQm3-OD2jQ^DVA0tXWpq6rD;po{E8FD!Z&+5pXL>B>EQ$I)WL)U{q7=OGMk*ID>67KgbH*>=x)o%jM4aiH-9-_U*!WmzrRc$?&SP_C&FZ^jbj_PsM}PY?nL+ zqnz8C-LfYy)qUsE{Xfvj)8gdZUZB0baI=|vg|1{eRR5Ws^}_bNSLi^dPf3srvTek} zypLbOs#)cI13t}T$?dGx1p-$)_gq$A$Ob)p*H`aVaqk4&D8kmM55JV(Tmrk)bXZrI zh5kT`9+1n<*OIuF3UjxUeI}NzasGYcas7g3&-tz;30{I^ad#WJJVFzarn!Zi1< z#jtg=9?xUfgB?#r%rcNne*I#&RZ(oL3DecCIS9*IxIXVFw_-zIx-R9#?dkrgZW$lD$uhue7Y3oTJ=S^+l5}TcYXxV10QEQNe-V2HuWsV?d?;tebqTGq6JQaWG{^QcX_V@RN0P85o8%s^ z1s~j->=^-$($i<+$Ju;pBPJF1Z@89=(&v7h%ASC)abq8So`lJN!!7H8MzBZLoaA2c zW8Rg!ie%(Hqc9I_3X9zJW{+E4vK&IH+&6EcW!5hBaODR5?Rf(;o@KTcq|UR5tl4;z zKc%wNa-6R3C(k&a_tzlS$r`lW?OqPgtUyl7E&0+nV%3X!(`joBC|$)0_Hg zi!-CyWGZg0Em$+zGpg{2=dfijEqwr!i^aP^&OXcUGsMd-M#(GbGu?ed2^hEKdyn(V zWaOD9!hT>xijG@Eo|VS3JSn-9mZ=zC9jZf(0*8rw`(wryarG(fPIFau4SmU?|Ke^L zI!?o*Cs@0-SleAB2tKLC{XSi@1Lks`k(}x`V8D%J&J&=f@X6cz#{BOGh&0j5rN;f_ z+|O#O^$=Xe8;*DV$%R>Cd3Jh%&6l!ro^&=gy1mI(U7VG>z*BsmnN3zt?FH{&Y4D_L zmN=(w%dJNAtc4#z`}pHWaLlOtCB(ziJv(wI{@nDgcd`aRe@m2adX&X;(2py*PM*11<0EViZM_h0P& zFcvI^V|M0b^!(DX-df9^+B{RvytOnu+ehgE)m(XxBPl2^dseyMu<6Vl_#Mi;M7gL`XAdSYsTTYIILigIy`BOZ`MJk zy7}-+@NUCadG@*v^4V38cV0u_llNR3oSS_TS+|?I*FEU7g9JCQNL@(uFsIvx)}KQs zPi>xoW;e5=WJx~}N3u&}m$5(3bdy8;Nxu&_@;w31JT>a7?`b^aIDeh%s#MudmB-UX zkQ=qX51e|6!WW9KrCIAyTI4PI6?|a=jFJ`XHb}iLrhMrMY(xI`r^s;x7Nm~b-B_}U zM6029I1IHlqE=K7Yq zfVt$jnyl~Zska!hi{~a2>HyLtf6jXtv6G!!ks-TH4;Oo{G&de^R(zlHucGY@V$WzT zK1QSco%=262JybL^i`468!+J)nE%d7&BcR8(CcozzRS_&JiI2n20`m7Bia_RC_7b` z`93+lSM!u3o%u1wwQ@#Qxjy8a)NDS``9-u|m7bkQIuHtP=<5$LV4*gj^X)lss3cBh z2UioidO`-=S{R|NvaNz?=A*pzf#75$JpUX(eN*Q?S@!(y-Xo>_Dfvm z4yUAJ*;|*Lz^Q^)ghi_s^!~xQjac$fj69L==bEZG*ovi-!J@Qydkr=f!RWjT8%_2j z^_u7G8Jmau)JJ3}36B#ote>;9+OV$U*ODnK(`vHy=gxdwzonCV9SJn)WyQ9oy{Bx~Xea7w<}9V_QCydB`t(IM3`qb~J0q zlhLOo@9ZX?&c(Z;^t_rsB(qIJEGjChChvMFKAg8(wK2YvvAQwrTGM6|z4HvA2Xr6e zH_zbF-uRJ>S*z)i>aEYhY0USR(*q(2xl zN8?Bd-j{s4$<=#5|1M_!u-=(@>s_1gZV=B)v3+LC-;#a-S+?+wWF5E~LqDKpN#kNN z!S=qmeE97}0;j!&F`Qh<8+A`S18kcBM{%SMJ_R@`Ng6%O2x*Sfqm9);RZ)uHS2!BH3R%>2di4Xn+1*J?>K}PHl^!PyrUt^>NvUv*Jfd4bH}UW zRyFPX3-4BBufr-wYUfyI{b_VMlegs=K%T~pfzg!(e*MN?*|*mOex0FN&o#-Cxs7d~ zru`an&4Jiv*RSM*$vl(&IkQ;mCVFLT>SApC9(H*zmpMeTbAICLNVz9FaB0`%SyooP zET+kF*LI1hDctYkL=S+Ht13qvj%s5qHWfeZgFLED374yGG3Ytvb!L+ zk2db6<=N&{S))~vPaaFp*D(!{lTFOdkB#oHP`DQ zT0vA>4#Q-!7)IOK?3s#6L!DiPSEgQQe=N!Kho%J<9#NPLUPh9pyyHxG?(|J(Sf{GY zDt7&_V3RxbzZ*yMeDezOAl@CN_f{Bnzh0*s6`Qf_ zLNaCNdiES8vq>_9FJOxmg*&M1&i)kdvitY@f_DrNQ)|vrA)7W}S24(+EdTVc~Q1oXTFU=-EcRo~g|xqF6CZEd%Mv zyg4h>+LHQK+8k}fy^Zb97Bfy_;RTRS=G^Sno=o4o@w$L?Md+J5uClPZM$6^&v(~?4 zTF6_r?at0jrWH?2X0%&*&+}N4yZgkI?EQa}B~Rr|HTZ5#QDHB-=AGifkjprk>c*a|t=vK5|No|^vOY(!Wab@L<5E_04ii6SxHfB4 zX5!1u&>6;Ks=6{y*|U1As90K@_hf};MduG@zf~jH7qZ@qE6}88X>3(w05*&My`8W|J)MV-^^p$7(CPiN@0W8wjUT zRe4vu#Q(k_VH1C4-N40gevQoYFsNHW{@ioV!HfBPV=H;y!`Ge8zg0v&*H2G=y&Ic~ zvBt-cxKKY+X*nGt*;i77#Cb3CIVRt!zxTDYP!De#X_oTHVYC`RqD787!H)ge`a#+iH+RbZzSLKHhR>gYW7&Jygxq-_ zIs!5~UH373uV;t<;d)8q!c~5!=)Yj>O`=f|pX*{#HN7m- z)@^F;FveYJOG)4~($+P>>a&ojl`X`<%!G(Xs})7j$&xHZM{ zwJh31AIWMm02lY?H+h=%0G(4y`6}NXhsOK_Eazi%J&a8C)5l?SDyj3N@myYbgkC;1-$`DqHMo?V>sk4A9L*=v z{0{8QUh8&j^^v%_jMt6Eh>3L1to$|ObvxtcXjtDz_qnw1B9c6>=e_7Yl9!}r?US^8 zz_s1jubHzqIl3>svI6~Ya?Zu}tW)}w)jB!zZSm$Ktvs!d7x{2jT6bsNj6|)SmpY2m zVf46m{}LPXTo_5Iv z8HSBCe<&$td|P0`O`<}c36&Qs4q>zLu+1u~Yel!zsn1=-OZfh*sE~WWR7>iooxGh$ zo|%R25xVO?tEf_Aq%mzC`Y3H|xos=Ml**(+y8L zv0y2!r1oS7{ny5u#UgX^Lgy|a8K3Sd%*xZ&&CX0_?<&yf>}MPOC$n@~xSk~*!P(5! zszUezk~bsKPW&83n`ZvrfbBb2bRN5BHQUqJ*$sEA!gL(;vxeg^T${42N`{sl+Gxtg4Mpb5 zab*;#=J1%T(dtN>WpG@~-v8A~IkBRORzIQJ2yM*wleLhE*S}&$GmK5<-@T3MpE*_< zr?PtCaF$N?oFmxcQx+HtyPNbo(Ad&L--AWop=N?}NWYTpJJG!d=3dEu*)7l>BH5Ri zyXcWvk&IKBEoB$yVYrl<0Waur2?@LK+hpFohE}&5$u2I;BR>*TUZBMtG-y?jt|EO) zz<8co3&WC9JKU1`6z@w^T&|CPpH~hx=UDr4^kp{`9aItTa zqqch?4z6Xd$q@g6EXhIC4?dYYG+?Rh2T29QtWDWV!N8PTuhBeTS|tR|_2JH7OBF!{&QeXaK5E#EumKrBgStV%|b2BPEng}b5d z7;p^y(k9;Vq-D0r(_Im3y0}q%1m5s<(uRh?tqt@>6hH3sT-9wmC3T0 z+J&hu@|s>t`}Pgz&l3So@zaykdG_)>K79eNtUx`6)<21Km3dNjT27^HGV8a1_^Emt z$UV(49Z`p`PD}7ssUTZNg`MNX5+lm3n%bfl6$<&%%c*zK!6;wSj zs6OvcO{wHXD$2?&STjC+5qbaif2u(pMykaulpvF7v}G$@vQ8UF9PlCWyxxtyrsxfyxzPq zJI)@_W_k03*7__?irgW6CVHQ)ovekw7z2usHgBL#Cs|gQ%Np)AhyKW{zV&6k>Vu5@dbZZVbqqoQc|4Mz&$}e8pcIeBa9V zc|Nk7jvLAUyngpo>RQ5syRg9nTCJ*$8T#FhWe>XU0=B;hi?ae`wMcop&v!#^ku#3N z^*P4S(IQ9%(w$sDJi9M)uUAxGdEe7WUuT*5-H2hI73L1#VCY6t4EJpE`& z_JL=8{Fkn&Mc0#PmOW2Li9O%yac>wmV2S=voTmLe#ahSWnR9+<9^RPdso=cAS?^+C zGdP`y|HE+o5s0=U=|GV+t4TY;b*8^l!)~N=Qw{qkh`qv(y6IuP)|0C_BWp8^%Bt~8 zjjrE2`!_A*o$9fLNc51lrtpBV>~|1*?{a^4G#`5qV)sJe4-)-|VHyAPL~R3Z{09Bj z&Uzlg>k9p6O8TBUpMNA-EtWV6Dihgrf{2qhhh_NV6GrHfY`Gb4 zK47U;yz^+>NhQ2N?2>!Cctv}y*CExz&a1!z4SmktNPT+tEIh5rle*d@m_nXs#l6J4 z1?;hgcdjMn&&I@I?k|4OW&@Vk+qIQo|6le$+idO+Ub}^r4=VWPnRuBzs(E)d+x5xk zkm@tpshPDiYv9#`R~|~*N63`7 z5i9pHI%TKZo^1aArR!)T=WsUtiXO?Go@{2Bh1^r%TNOPdkK>)tzKWMjVTbVWF7`{7 zhrCbhZ$$qUf3D^+8(HCbBh2b2aTtIHRHC%fz8X^dE74EJ>=;KTk6&e7xjj{oGEPM9}>C!T8ryB8M8CR35DHy9~vn*(K?MaG_Kte)Kob8+f8 z)>`cEjOODp?_M&uHs<8{R&V;ez&obVEl;y^)w> zGUwyqQzYxl3cu;84mnHEvkEH|A>S@iPUP)XAl*Zsdt*_ucx9fEypP#Q+`;(!2QMxM z$CLEY5BkafnKu#1MKKjmkH@6!->ZiOw}^^8`XiabwARlQ{HJ;v{33`lPE71}Qc!R(*O zT>EV?=58@2>v3vnDfiH?LnG_e=ZgE;^<6^5*-wPOpSPXwUrF|uX`GwuypOTGB&~~z zgaylPGdl0!Zd%O(V*(bb~=UmJi{%gd`+pik9vjQ6J;q-$RKk#?f9zRI(j5VLK z|0EpBSWrrYO5M=rtaqMPzT$P0#f5Twewy>P>g9H}PoCl%A^jK|-K_VldU^$d$<8|j z%ih)BWi0$2Tz}DHeKuGJg>$uLv2*+_^@Lx< ziBt5GH)gqOJDdcmqH+iyZcD<r#_`Ex|Wec_q0C)KGlr=0@R`+0079P7^~ zo58LbSr6c0i&>#PUtUD}k-W7(uZS;vUKlTuJ7R#dv#uz$wJwL*NWNH)ypz~I)zn8j zmKjF_V?%qMpHaE3-?>9e7NXP-$bPEhV4AlTncd$7yqxkrPPeQL zO$M{8jb@9;^9qeCL8!WO+Q4}`PMzaZDqG~~d@udi@_Cr!uaoN#z1``mRKEIIWL%@I z)cSZCv#QYH6w;2=b}E2ZXPIOR&yK93B2uF7>157Yh~yf~+Nos5?JrV}AxByIsy({7&E&Bd->IJA=;{vyxgdaK7y$s3=i1nFxL4R7-KN31R>*1o{QtLrB@ zG(RTso)riiebdu9sXe}l2lwO8M;kj2pmnM_wl)5~%r0wuGrZtcB_Wj+01dF>JaK1A zVQk6n`<~i;8UoE69i{JjqR3j(kEci0?(|@hzj%6G*8kTyejg7@*2J$NFjLG)ZOk)U zo@PYf#Q*XfJGG_n;EiQ*IJL;8J3Dz0hw_Fw`l?}8(F+=L#F9#oeqT(_6Zy>Vvs36} zxXjm5a#&pl)wTNH$wO21?i%v!7Rz1~wa$X!TReXhzID{^iFzADzw8D2MTGcCf3;wp zneItuWEtnS`_zNn=hH7c$);mSZ5Ui?+-y##cBGz1{sTxkSyVh! Af3A;BSVGl=# z`*)(j78d0BUt2PqNuRuJZcfIg*p?mGm3UTN`n7hqlC{i3 zA^W<>F%Y)NiCo6tOL^iYr2LUT^~J|ztR2QzeshOVg9X=#I=dnG6{!Z|)@E#JCzich zxEHvDee2P>l;5|*cC){>X}3SDuf_1@F#7-k$z}Kn_Fc_ivd=0Nx_01lbJ!)TUst2$ z6!MRQXWp*u;M=+TO>VFeTFVpJBU!xz>n)?(0JEYwFuPyuuH%}@K26f!>tf6fk$Mwb z=1oL$9(>ITby=^1>z`wr7tCgE#E#oBY9YzGktfgDvM(a}1~Zl{g3QzYWrjMPzn)04 z|3}h&fNeRpaR6^nMxswBS`m2{{ysrBi=h;h*hv{Jxx8_LFI)!}R=fxkT>+}NY zL!VQ_(fng(n8uT9Z#1>m{|%x2yd`CxcZK#c0?uB6Zayb_Nair>(#qAeRDmykhLn!g zLhv;ZN0I0dbsxzl>Zs#!vOY?SFQO=s+?$fmDE;(6+P}=RZ?omZoygi`Z`es+mNSUi zdA^VxUrD}i7J8ErMmLFtl76)*E9lQx=1lf<-g5)`*06v^u3W)de}t1-BvJ}`o`SV2 zVe(^0$ZUINQ(BP72>QO>v$M3Gx2@-o^VOZa zSaV1+I(`q;gL&kYWU?Ja-=X;rRyPT)nQ>c(i_;({`!HXkp*LydN3E7r^D|_%Sh>4d zUMZaIrti;uDua(D?wG&Gnl+5;AfMcM6tLS?X_@ux+i+U)*2`;$=Z*kh3YWy zke0LFToJ!dv*qbL&;Gbdj@Pz4OZKv7B_})7Ho99<4NvpQCuwILi5^UM+t|_sc62@8 zk$5VJx1G6}WK_;vXx`<|b3K-pd7@_^vl?D<+Ow&iag2IzRx*+LGG}(_zv4!G-t z&!_1=yO$HcJ$cs?jV$N9R@2U5Y&^T(p5?EX^Clhqme77*bk9U@_A6eF@9E@y85vI@ zg^Dygo3?MEpFHH_vQ^4QJWu^Rc!rfVnf#o)aubYoQ34jYEBQ=wt_6_Lwb#tKkC#|oNo&TlOwM>+<(j;CF4&YUhXcuB$r=x=-s_5DKcYV~I62!gmv1_a?$*ITcFbPknNDcmOkam7afRA%N6YE>tw1`f>G~g1 z>BYWs-gG?gnv5)oV4ppDZ{qzDHJwi)IgOiG0kxqxCuXy^E+e=5*x0eGs0VsS)67+P z8pS5E)1V(;k%;#TSWpL6kulG~TFTy_#%w4#3hIzgcE@M!FwqvW*XUbUv%cIz>st%s z%UZBGn15JY7zq{S+s?q(w`6%D@0~fKW7UzJ1ljp81(mcckgMD21GdoybcN-yI3hq9&QN34K{ zhiD-ubuyzljo-godvDNnIW5g(F&$|-S)Xqyys=)RuDs_=q4^87n>pJRXs-!n`_pI} z<(tvYQnJqq-nsrxq~8l6^<{oJyW=`&=SDnNAfN2g8R+?k)$oE(*?BP@*N2nzhcut_ zPuV|r3@JBcw^`5qj?`-5ubj`%;CGs~yDDAEwQZ1BgZxg?<|(wElXNLng3MkcqnsR^ z#v4^4o1sbv1IY`StbMJ$dnL+u5q6vNF?mBz)VHIQXbcgzvf{+K9H7qRCR8IuBvZ|`WRiHoIlWQV{W1kF)WqKnuGE72L-@Yz6mCOm z$(&OLih4jpcK%h+-V9cr)9O7)=PJEocUtU6)`>sWk_@x=rM#Md^Tg+Po~V`w3nRz* z@VSl5vnn|lP7+}*Ie$*3`zt(^C@Gs=%?wo5dV9HYn3itG#UW6&P78@HTAQSj<+v7$ zYw3UXpv^)@;(6ZBFJ|2LZ!#}Un-}R>szOIK_?*us_n>GYsb^1aWgjzPnNKbWKtZ( zgW;$;AC!G1ccCWllv$Tc6t_e9m$A+7)uSH(CEr5d7PU1jpz9Pgu3^U;=({nA2^UqVm6{MB(#n%+ZJw-_AJv~A)uVf`D(PHw3cr@Acqw5;@h%9qb zpqpA_onPVTb2_cgLnrP|PNZH}@T$pBUzhw=`&omkoHRULxx8I{NiHMd_c!R6T0r14 zBzG_*w$VHM;_B&aB`dO-Ye;mHL`=MtkI9Lp>h9$%*d?ByiT}6JpD5nfu!`^5>=f-y z#=+T;dyg8*wFERb(C{XSKR2 zpP%!DS?B#ozmeUHo1rf;JU%DMo7`LF`Q#^T!rsc!(OU%%*^Gp`sx!M-K10=ddi<1! zXs5S)p7#yUCqsJ9-Cn}-Y8fw;z~#{eZaeEM;#HcE&!dq2JP!NdYP>!mW9hAUYU}zu zHI`*L$D@BFul5Nmxj+qdaM4H$udueSa5|TTB@af)EBbD>>$ie_4W{XBOQHLz|$i9XI|)7D2YdZ7LH$`?bGmAmFx~6m69;m zOg+geRY^VRSGVfVy79)}=*RCtcQVY@^4w$+&AaIW60C!s_i6TCBg+{)N^hFo;OCWP^L%X=r$s1jkM7rZEGq^d;nXHA`NBW6zMdCx1RYx~}(?Uy98i$h>c)XA{v)>?d zHfNGlGCE{GYNDsLhS|&jq+c6}i$nuG44p-l*?V?lo~M%OFK46oc}VfY`;20Jj!yL(!j%bN#=)B__WLg-^QvlFWZE@7r{@~l&WYga}rZX zqB6NPp}U-DyqJHQ;eYZ6BpPXU{w97v@=uL~qO4JsqN&UbtiW#-T1xh-*R_{xpOaR` z3{}vUQ^y(o4Oh=0^f=jbhw*b=adAG0y~}3qVyD&J+o{yW+POzN*(Z~|-;?O_Jk;hq z?7{rkk$n8u{LoT;UM;*Of?CD|iFY=aBtPPZPQ^=d;AJlK7`mtgjiqsy@#XF4nhl#l z<8=i;Q-`E)R^JXF`g^O+9xQdR~V4ZV)?1nT~AW0@w1E zaH=xNFjrE~GMiqD@t=pY)5Om?m@N9?=o)q9tve?$JO{ z=Wo?^C8LE=xX=8+&-_*;66vP?vz2d*+vjL;lh57Ia2;>71twas<3s)AOkdV%qtSLy zQ=w4qbZu9JnCI1aAvtYhCy$`1B(x-w$XMQQ7l~vy$R>1^q}6%_*&ajMiJg3x64~|D zgYQnPrL8P#8s0Br?RgiyM2}vR#5a;?4`sX2OEP}EN+;Q$P|tH&CGWrkWPNlp1pUAt zyvcL^$g=ik_*ukP|ERWx5S-cjx^&su^G9eSd-%>`yX(}OIN7t%-5L+CKut3U{mQ5P z==&WK`U?+vr%B}NM0vR1=b!MAHKDa=ZU}R~!c!Ojk5O_4%W6skIej+@HnVco00mdl z%}r?EM4w+NGXr0lXBq2y2YqVJ_03~H?Qv0=G`1`C0LhQwTV7xX7eIDm87Ei7>-y^s z{Kx&C*h4Bg@f45Ml|HMnl5zfKXM1wET|pj)!b)G#pQ_F0AomBfwtuU}TYeo}HJNuBE1YheFf{ox!M zuZM>(dFSq=btpUQz%wTbXl4lBK;uN8@8<`aLG~PVO;YlAd}eR@Ffu)hZn|shN3@kA zl}n5;Yb*D*+SZZoWcIcn%u=JBQmMLGeAkVjrGQ&p{`mk!PfppWAc`iW)0a1w}O^BL0&JsR%Us7V+;Hj4o`va zktj?|t#Rb~1e%^Cukx^xKCeXT2%$ZcD-BXf&MzH)r59d70X%xh3v0o=fkzK%I#d z|0Qcq6pQ38e3o{0KuP9DvOlIhIX9r4%rR%>BXMWukw_v-XXNs+`m>wo1Clw5K5v7< z8?=)+N7<8-(>~cX_oi0YtG^o=Cer;|TFiditj$ay<(!L}YLt;4=qA{hMWXf9^aiWw zgN~dPpN#e`sQZpTUS=j{mU`1a1quJwc5SxymMbTsXb6oB(qh*ApQeSY)Kiw1d>7K6 zWUJB36Fg7sZK|^6pnpAIndmq>(enrFB;$S7R-gB&1s!K~;!g7lk7%P@!ADmlm#ors zfy#I2qP`yf6rQUtdudNrC)3Tna8i_~E6?td!{ku(AA_!mq*fiO6B|F7k+Q=stEjK| zoH@QDp}H>GdXmj0 z64j_A>&zNoeA5r4&=WVAzdDXyr>b)(dEKG)j_O+B?|o#ylYfc#O*XAY3c@&m)Hw`iO_2dnNS!L#wyaNgFMmp-lGfbRfqm&~yqOCl;jh3MzkhHGX=8XXe6u zO_XFdJke(^;x(`FeLUWiO?L&HmLv1L!6sr%5j{%Q7#fjxU2UBSsec*2Jjx&Jm40?i z_hR$up-=E!PTU_y3yCj~6|tHmoB5PvG;2d*Ilo$)t`bdZIa!V%kM&S?Eo(?7i=5d? z{;S{ggBPLpK%U1$r`{gYaULw9*vPCTS!**qGB$?)+S^dyqb z*|fMG1&hcp_Vc>7M$^h3a$BUHv(@n?Ok@v6cih(Ty@=1hqA^jo2l_roYl)-TMXBL% za07lbn_CoRZF#`tHMtf_<`|P64t@Kwi1XFi3lHgk`|$4P6-wl^crCuWK96=Jp2v~Q z3*G zS5D_gpM{XJ?C?n(W$fLbe)2w4LG76j9YiiaxYtJc^*BG2{-40-AUggCw+(oY=V0kP z?%6B8*TG()DQOCp*Xvqo}snu5ziUEOJJfHd{3dhYPfg; z=5org5hOM6d|UqaEl*8X?+_ZRiJ!h&se;>qyuz_$FqVeKkVhrbUk6p)J(>OF$!IzO z1&MAv5;iYVwl=FymZIcyZUQaI$}kz`a&CPJezVtRA@5ZJPai1NRITG_ZWY8<)^c}# zb2`6q7=LvxFY_)Aij!pK{Kv8Cd&o5-&5Q-^rt1x4*ML=JX0|R5P@7a=LQylCC z9s9GgqiC)H3d{K(>luZ%v-A(eJ)gXqSF331iOD!DL&|THc}dhyC(Yw%BQZI< z)5mz9^4>qha}UtnM&BM#PiAp?qo64taRwRHQbVxtkQOT_F-LtpTq{CXrO2eDtI2`0 zg$$DIU^Tj4cK0lIPg6^x`&?nnxD$Q`L1jx4DMISWNO-B9_$W_yV#UezmmSnKX!m#$ z%DQSYi9Z0fM=0H!ERQ3D@0Cn$q~y_V%I=%1XJ7jNfXpfueEr{~mUo}}P<i=&Psv6g4CQ@R!=`Qt;ie z?J;O==FaQQH|o(xv4@L&?^AfUTTb_Hkm(bsO-zjBGn}puuLb{4`oE4<&&TxwHa|nH zi5igoAWz`-TofNkpC{1sY3M7buK2}QP}z&6C4=Qi?W8Bn`P5l(lRd%3(NK#tbp!g=xSs^*X7mU+H({md(*d8X{RVF^HA!${DlboSS_S`qo{ zt=b>NmlSh%7@GI4I$Vq9Dg`c{)gx5nUlQwXls4Z}XWn9lkijS({Vq>brT1ex=luzPatM9`Wh?qlII7y@(K-ahxDQlc_O)ta<#kP;V7JnzRtY<(R9-k z&hsYzILr;D-9+I(ialmuOxDY0c)NR*o-_Mf+4Uv-OvbxMs3(1HBByS}?Ljy=4az@J z!`raAM%#yz=v1<9h07B?SI?NIF7GpsKkcN>F{HN;g6k_gLko|os~sxtfrNGBnSDfq z`N-mGz6b9eQTqm*v@iJKuhIMr{oUgEyh&u|{vO`-XB6L~o(rM;I|!)jb94Rvi#*$R z=xawBU0`Q3>iXzozEr*on*TsUc0RujnQLe&b2N?hqZwb6VO3>ZKfK^u6RUl>7RP$x z3{pm2eltTKSSWK9eR8!*A#s%KEb^(*pO& z)z+Bg+i8C|&ybibuan_gG*_gXd2sizGCj$l2`_tvRtK<@OHr41&||cjxJXM_;}D3* zymcbF-lX+pdyB2bi)5e0K5)5J|MdnBJ_?qz%Cp7Y?9x7(U))KXH7E2&xV|sBWj{@Fl6B)rwrTwkwNKK=CqC}IJa|^0 z%0p`Q(Ja?)gF??zg-kLNpP917#x2FvnK{_Rn@nV;8J^1e>T0;#yZ?U@ZjYehypwM! zJeTPFr|?6G=y@${ZR9_u`YGiPiXIk{Kqnd+4m)?i(Gc7u0#{;w=7jo#>S(C7Ps|Qv z9`I*<@OSw8UOnHj-&^#R9eI#M2}tCn%ycLFOL9S9K@U;(jWiHu)HaKx$=vCouq;s4>y1=Bo1wJ!Uer<~+G0#8z z)DQ5I{N%}0l>HUi&)tsJ|I1sI)n8QR{||wns`{MFy~S6)<$t2D?cgt4!%lk1;`q%; zy~I#{-;*oZS=MSFV_~-#!zY$$c2Sju>c;*q(9-R2(F(o$L-axXZB8oAA?3j|klm`? zNP8Lm*J$2UkDk2M<)I*%wYwRsWd${-7KgK-mr$Es<9TaZ1%cT+w;tcw2RoJA5;ZhC zaI%B?NOC>b-_mR&d&Pcobts)AFU<&;o}lKOj=99&jV%9U^d>J^-u|*9G#LN~8?F4M z$7;*>ZsRXcrL8mY`X$+HbM+fY&uq>Uu#?l_Z^J`Q2T#*-Hy)!N{qABRIjejUotARH zXMv*~p6>veUz1oPez*%d{>0PSYejyZ*5bozP2QnCs2xah z*U;Vv?0gKQj%5LP|IRvIR`sjWLE;ES>=5POrQPhj zolA$$l2G0&Zs30^Xr~4qb{LCg?yeO?-NC2-!g^-We{;NNOux7PyUGZs6Rjqi#anbg z8}_ocyaOtCLw4d)r?1T0QU`o(gpuTMolMfJ$mwS&z7T3!lGr#%+K+TH_P-8iccG~x zD>;O%FNWZbdb)quR^EqZ@WF|AFbZw2(qXdD9H%EeSh>xvAFaxe zB?zia;?wxA^GSLv`g0O{wEF&_i_UOcztERt7t!WzMer*3W zmY1>9=SpSA`WxCC1@XyI{|oHhMj~gZr93I-e_OMPH?QuB7^_(@xj-ljP zx{RcI;(I;+kXVgRs$&|QAByrZp1xb{_maRZ>gcD$EBxAp?DhjxWG`n{l^&vrb+obt zU!S0?ADJe??$_!}nrmaO?=KSb`|i@O1<0J+yM8w!DB{{ISG&$ ze~ElPO8viSA-0w1VaKa0`?NB4Y6}15`GX>)c{7T$yCo+KTB_@6b})yI5^e7}_j>4E zZc|(0#b-CsVy$k3yvXbr@>-AY@R~Dk&6OF=Y6jBjb3T6q?-%nizoKp(9VdQIvXCe8 z{oU~N1`7MIhXdj6U1gTjP_kMrDU?hPe;%ZNPAC7vbKb!-lW{Sg6CdglbtLOX`l2p4 ztBS)5@Nu{jOL3fiK5wflGmBZHJxiaGvu%m;+>R9I6ns-n{mkEdL}?a!k>8w)-br8G zq3KxM9fF%9+0JT+IS7Xzv-8<-_%lD!7hQkSYoeYd-gqb2Pt=o~EzjIfVk~5D;#xdy zqUSrw>2`iLC*JN*YvNI!M&ilP)0gg^M|t)mzE3vC=vf-+6FQ=3CcEFt-?l1fewR|s zS;y!2OP02GU_KFVCgQd#?lWr3*_}@0o_;=a7FR-j&I9a(i2X_AQ(S#Qk2(EyeqoG~ z{h22i<*#Lx$KvS&WzT|`A86zW*t(epxBJ~6|H%w`FOE+o$@}TxL-O8MX!i%)ovK`4 zz5Oz!zvO2g#z#ZC%baXqG}d5wL+CXp$g}%we_YI`o7>f#aZC-`{gvk#frIHRBD?vr zTRw9cvsvDsN*5v7#3IRw`**cd5*4#a=>^#8!2Wjlokt?SDVw>w%V;(6udBH}m+b%6 z-lv|tMf-VgKa<=h)5N`ed(K(r6u?BNNj#x$=r4zw$}p2C%HOfkq0pCX;E(aHt6UrA zxm9$wgI0ee`-$jzn_TuGmoa*dc2J*8FV*@BNMW$(%M^%coaGadI*&N9`ED zr?9qlS7@ak|MU&NnO!^&8*}AU=vY{emrK;c>@UnLUMKapU@PN19gkB>OF2W?*9iVf zv?Q9zjjW+Ev`;17Q+eTJiW{$vL=apJQ}yX1+1;|Qx0S!2lKjEYS%iGgWk-uWKNXEx zTdob=O;DHUFNZ--5whD!HvJ1@jRtCXhE3-GdeVD<-^q;oDbhIQEYgJlT`D z$O+YF)SNtsMbvW#SvJR0_AO@omJz^{N>${GYvbfpRG+4FXEbET zjgBs?s*yR7PVkktrtC;8&x$It+LoTqj`#iC%^BK@QCOJ|%%1nmE(N*IvG|Mmu;jeW zw|J%O+v&rnWpt6a$5%sSMXe+oclK*$mrh?6zlHtPrS+Qh|2Tan&&XB2B`#0Smd0E6 zEO^5|AUdP?OVRu-OsyxK-Z=P~FMS*ZUCI4c_zndh^5~6eumvp4Afu~cZax2+d4{p1 zJ(avOCYu38m9&@h#$S@+hwf%|Z!^2jPPFvp3(@==Ix}y$vq0xMR1VWZW=^u-a57G^ zes?;(SM{kb>fcgN^39ahpSE{3yPgx{H(53lljjqX%}R4uy~tm_A4tL*QF=QHo8fVV zC$jIN2+EFzxL=eyfu=WV?J2&a98dih`OVet5;Tt2dUDl%it6+D!$bgSO=Hv8^uxT* zm*}XV?Wge7-<^wLeTjO1;xGG<&LUhtPu2&qkrVYZm-396H_n@L_MRMspKWxWGuCa@ zUy9{qf58~C>Wi-IZGKm47m?-xbeim^HF?&IyYi;pnyzjnm5fP_^J#C?n`A*>L#|Je z`zX>+oZ_}x8G`TR;W`UN{b;I<`jfwV5{bO#+8g9iiC%Ih_hOVZ=Ctqq0A83vaPrt^X-_g3r-VLM@r8%sS_JxQumnBe|SIZ)cR*9o2mc zz1OP+v}NsauF+QFe!PI*HE61$wVZ7!t5^IS0`fMP_?toZINW5F`z0E=OKXG3;b`?P zCg1G5tXJ?4f56dpxCy>Wkk2Q4&A$u1Mm5qsgfD-Mrnca5FfC;zuqppCKr4+r*N2}! zg?4vncai5FQob`hKdFx|fx?<B-Lca}YWDsm0Ks)7lmLv48v*_KFyHw5bz_I$| z$(XI3nR>Lc+I*O_JK^R9ws#`R<2!QZICDmqL2fxH9KvT*W(kSyb{pz5-?1Mm8neHw z?j`2$=~_IPMBXC#&iveoEUK$Awdg6MX3-4z)=u>r`uAS_T`i`zDyOaFBjXaUvyQ^GD zmegt5&A$3NMuR)hcAzq=ahUw|chKrS{uYDULB<;|KuZreUZci#Xh^om>^$mdzlbg4A&W{Ecg6wmNHq5_mfVt%_R@U z1NgtMFuHiy{iY~AMa%ChSCbyTqJ#VS;G^^rnXR48TFc|^DdU2BwZ4C$pV>}MR}?Jn zV!kA6?m2U@$o(~FN}lg&cx?lDB_XP*+CHFz#MpUEnZ4&CYrtG`O_in7A^coTnt2u0 zX0!G~P&?bt@g#UXY`m$rNye`y+0f_EGt1vZQ_60(oZuQsLJzt39(0__s+!SLqQKNv zb~hBhf%glbs5aYr7HVIFp(I}k02s@+tXvD$gfZzJ+c4A$giU#+caXq?N2 zF3?iuSMyGu`M*tMd>gGK>dp?5s*cmF?`0)9@Af(M-ijojq0!6f@B^Bkj)u%;XV+;- zJbu9M<^1O1T3<;%W60!vTK)+yrQFF*`#V_qD%PUwCMB}^zLhO! z&qX&rv6pg7VY{kwx4Dw@&->9$je?~Fb47TJUH49mS1<`mw#lWxE4M zIS1B&j^dS9qdY4BkMmYJt$r>KK8A&?QO`N}$trQ?b<+1%!edc<^mi|BYt_|rJiI?m z?w4pG*`5=#IU}hH*~EABcM>0zUT_)N6(^h8MuXWadr869M{BXp>~X391COHiMViSR zQFc0>=W1gAC2qk0*SeweTtBhOoc(@)l(Lf|JChPcEo(4YMXl$F$DwN~PAa$`4|{sdeN_e;Qdd;WJgxgg(Vj4no7dTFP0qfp|;B6)G#>BfBL|hQs3_=?t~zH2LFjx*C;<%wGckhiW}J-G1}@Caoss){Uh7APYQ` zmwi=hyU8mtY+4yje68fOFg6_8bD}-5datH~L-_9ZwV!&o6OF0lDUfiN3($k-P^6z-7KX8jVHI`&-l8GM>$_xvE|Gl=8Z0AJ8JU_M{9iv zJv1!H`*$|*H4FM&ukyE6#%uL1$f$zy^ZAbYC`qKB7oodNVQiXwR5`no%q2rnmR#`F zN%(mZN*tp~de<4AeiVLl=4fBs*JFuW$T=%z&->Ja)?Vf}%0l(M+L&bQbr%c_r|Cpi zPJY7YP_+~{E7X4kD$BY4AS&lTR5jy)W(6cy#9J%hD#7G2r1h}2%AjjFUwtNApX+Y= z;k9Uc&y&eow;gJ))6R6dF5`*Q)V+=;${O2r7|mS3GI&kyj27&%E}iA{(~ER}5vd;s zEu&EtTU+7zL=R0&!rzrhJmJJUoJ$5D=^-xRwSP6P>G#-TVX7l4DfwCGZ%(3*O7U} zoNoODSCf>y-2HO!Uj+(xxt1JH57AQIN{{x;L*$;^OF!@@nPC}4OEX}6A`BGw#Gm^A zyvKZ@`~q*i`=N6Lyd3ZM6LegLr;|zRT2u~YhfCPWGQXG6#Wcu0fc`Sx*^lphir&su z$3m!HPp&cIC8W_oI1;%p1fT?eXjv>drdvC@n6fozco>rluR&K2EA<=$Y2w=Wlws z26{5qc!_6AwECM0V~A}msIu$XDNs#4-D$orX|^JpBD`24;rxXEkLkJ-Uh{U73=k{P zRu0ylBAtf%(wk7-9D*BZsj^G+)~DZS7M`USA8|7)IUENwY3E{ExE3lCzbqN#+Zw;0=iC2a>~1~!ChdKV z!>p7h;z%_J%o*EWsQ&_2MNyJjsGY9lykYr5uT%ktd$VYD#?c%-`#mK80j%UaVq*3` zA7ke;`0bkTlk*|ch63_ z%!GGRe~^4QDv~>)C<*j~w?)u#s~#rtk!P^X%Zx?(sdoh~_9fA={89GBobQ=WP@l|G zwbZuC=j?O5fhIEg{E>!g>I)`$@>;cQfVM>_+nf9MW;nS^8xN85d+14KfE6gHh0m32 zvW+^@J11|~&*&e;raxvc*?D}h5ynt@n@V0|z11a#LZWf);K7G#FRLNrN$VIIoC?X` z7^x=qQD?}nfwwBSJ0CrNL)@8k`<3^nTXCB=hUDUGr|#|Klv9{9c)E;|x1**9FLgKM zwdK*rlisZ~@RMF~B?R4%_K|A*7zS2B!FH0*o~N8qN-V==ywQ=ky8vxDWpg1p#Y5+e z+dSNTNwWLV$}k*W!Z*}`jJj|H4b3ZGHL)gM;XkM6W5I_(2pwKX&-FdmnM_B}TVjmlgwu7fm^tj6 zDGK8D#^V1SH#wu8n1yvvHCK&a!(lIyO8&23j2z3LE|D_gt-9lE0i3V)Kl!ibp?@)Y z?%>IjQ@##tm+<#Yv|U1r8KM7%zWAkk*ue#``5ihQ)zW*=nkZ4n!)Q16AB67CG?D(` zAQaBV*)Y^pMOS5LEX51ggub~vQ*8WUobHXKc@AV}4XLs3b;;^vntl~s(@;5)Urjcf z;&3_}#b?0dINVpJ%V%ij6s=@Ut~f69_MiQ@S3}HVbY_3+HH9%uGyeJ^vZ#*o>?GO) zvnyS>md~Bf6WmW?Z+g2(T#%d@xsLr;Q*(QqJno6!ilFQPvt& zZ<5?baxLddcC?pZSDBA4%VX@#-7tawPn^I{d69qg)5YCsfU}t_;8eb>H2T}K(2Vtl z(fk5jJg?nFEo`HpMxOkVw4_A`qTM_&ic`==qXAI$xShv*7t$#Vd~k;E9NxQG*2`qk?i5$ z&)q}VcuP`_?`lsscPrbH6tnxjE$XsA?_itA7d3yBCq_`AE17Tnp zKX)6-a!TP!&r~!bO16}FtZl95a(?i52)mWs+T$fXX5YdXWj9`uhb%s`E#529Ugl`F z!&W1BFUd|XC;Q?!s0nLp(3Ms6oVH5VzOzYm6|{GQ{EVjdQ(IOf%F;^_vX58%wIJ0* zJgwxZM5}8+=PgnAG(6sj`kcYYyLm-@^hoWUTtHz?5hT;cF!zo_?}2PFyP#(AmWgHE z20w}Gl6V(qlTjJemM5dR+D|l`8@0WinE(GvMZ;cE4xiH?{yiZAMQ?HI^BlPS<05so0LWC(R|~0lHQwD zG4qQHyz5M-(~ReaXl*sbe#P(JYQ)-~4iot$arnOQQw=(M;xK!6uSDZFbiR^LZVA6@ zwLcYi&$I58xF}Z8;#H`b&8H<=%UpE+LZVNQ{}JRf4HXmln+XN(E2BJ~|3g~tS@4F* zVzNIf-hu0j3erD~&ORo!-r6og3OSS9+9>4={ZI1yCM#%8&ey|D;*lms+JS{J`@8(c zuh4M`dX{TrFe}^*)6Xc?mXuy)Sv^SMBR(KAH!q+t**DhVC3!Bk($`U>-V;iaYq&bg z+Xpu}6_j)PCz4cG*R$f49q~CQ{Fz$kqiimzFHw51ma@{&83yOO`#dSX1o1g5*a7yl z59mZ5@<-luCbZV3xxs8aD@=*Xv7J39CS3AW9p!foY%SzXKk%tJE6H1O6+K3C+Q>=O zw!B1LpIWl~PSBZk*Xd;R6bUw@k>$#*!p%Q;d4=!FD%LHeatzJ~8EgCl84dA$7K$3c zQ%((C2b0y@`!`LW2gw!5z6vUru;GeucS1qym-6U;>TP1jMQPx2C>l>f_3?flpSl)i zr{gR4SJG`xc=siZ{h;m|-#;gXV{x#UMX&R-k-eOWwu8{vl6FVpVyAIK)+@h&(js{M zj`U~oTgmBhGdrt8tJ$BIHnKK!IA7cgP4U^66ev!9j5%;|pL>_$dV(G~`NMuBq2Ew+ zDKyMgVvgSI8Z#ng;XS$RcS6$fWd1Z+KTac$(0tdz7-qMcHx=4y2NR#N+p6k|yk^t! zXc}mP_wS76u6O4l9Nh>hZQ&%bC*CKeFX-tN{_!Xh`ZtaaC}=NpB8dxiILfx-;zgzN zK6);hAB3i_@Klce&tv7uBX}m=4MTTr6o19S=d-hJy!OSO`G-Btq{lscLJ!n7*KhpH z>n~T+BpjE~c6oMJq2MKw`JsdMo`JrrpnIcdXYks|vXNECig43Ri^3o!CjybafA8WC>JEyX+{`&3R`j+e|y%8$1TC#>#e$uZrp~Zbj=UA9Xtd-f` zZ<6W#Zn{h6kr&n1oTXgBKGXN*gk&2wdMxy8)Nand_F{KMj40BxoccAE$8Dmhj!<(6dO5{!@&-&m!$_!l zpulklbndJ7nXR>#c*5jHJQ4Sg`BuX-$C66+1?6O0C(qSoxp%rd7)8&rw~QuED`?_p zT7T2^!#thrHpv5gr1o~uL2nqifX6$B+_L}q-(!+R(N6u`$spe92`a9n?ndGa(;C2YiS(Z1{d>{K8!+Kxyd!PPv z1I}`G>IstmjJ;$ZRibzF)!LbADx#gM-MyW}vQyw---n=omb=+Cnmkc0(OHs&e?`x7 zpGLyz1H9yDvdF5>63>?-!(^Iz0@djUYx$hms+l#>~kh) z2>srOljHgSMC?hn>%UNy_%4~Z&un)cc&w?`%#CJ-v5NoM^?nsRJPI8j<8?c9C$HOn z1zTtU3EAV`&@(gL&t8=ZekYS+GLrwo9ul*zivN>HXDI3A^ygUD8tNA=LeK4Zo=#@J z<10Iuvy;9wJIRbpZ=XxRK-Ta}qVj>lz3f0uEcN1Atk0I7MD=34oU6aheuM#}^CT&M zq<31YwKwQHX5 zU^2R9p=_y=NAu{hoSem*%I>qXXl$Ve&H385NGP)T5BV0OuP>mhI;o6c>kYm4F48kq zDA1h9)mxNVpyemkGKyrAZ7UH@j^H<%z!z7Z^6HoiOMN7#=nOUH} zd5IZ%|KKYpJ=gkuK+V7Nqu;CTK{Qw4K`+GJr%*oy|2ZL_z3W+1yIo2vUqUOB7Er>N&u9x`z@N72CTbo39ME^_T!-Y09{%W3Or z&!o=BjS8Bf{~}iZ7mMu88zwemNxVm*^Xcv*S~#0Ec>aaoc(e+} zugS7h9cGT^)7G<`yzkzr?t97nO|$p=-^UyY=*pYylNt>B$n|~NV$5)F5^I!0jvpWkR?KM6->xk9#XswL`hv6x!X?cgag&k*n zdVrt(Nv1tJ%s%d%Kg&*=7nMqEkxjT-qRpJNFNV6Dpf62ZQ+a^o&$*QExD@(MSAG*K z`a_?Tc>I+~XMpdk@tt@r8LfZ94iYag(#Q$M#15LNjSpcdy92MsVdg&{#(O+YqCU@n zftR2#`zd25mnt(6QnS|h1<4H7V{A2goC8Ta;d>|U-$L_m&~U7$b4v9jcPEg3&g&%3 z&j@`>5guVCXs!k`B%qJ(ZVfI3F_RKA$yAopGq>)4dX$K|Q^?sMOPDN1{rE8GQZZ+No zAM5Zs2S<0}`*1jI?YZYrQWM>0&{*E&Dyn%2>@O#+*VUZ-XRFk4ELo;sy`E>kfoA?F z=qhMB68#V0=m)5p!*?Fwd){N)D4CV6_Ij%u$nXx>n**DP)bb~~$`>^8rPgYr>))vQ zi1jA!&19|iXCJf4B>p}#B5UEKwK_7>d09b9*Q4TP(#=ZhBCRB!^He_jPkl{OPd~>~ zeW2}4TK$>z*5jx9!NzVpB-_mYj?>P4Mt+Hkxk9`3Y2s{c%qFj#_-R$> z$J@fk=g_v0L^215(v!W=cOV=jH(KU7XY(_! zk<7CY{0>=mLs8}pO3`2w$SDpl-;-I+x=m)!Un+Sx>SGDl(#kIN-RN!`QhG*P|IqSh ztg*ZMH^Ra|8eOiQ5-7VEZI5gF72IY&*WoySUF*Z?|7|raGB!9}kAEs@O=YLqn>5tV zT5@RX-X19JqRkIn&-?E%(wWPvzJ>BlB$sTr4OqyNaCHmJuI0fVfzD*I>!L(rcXY>H zM^nPpZO8waJ6=~;Wj70OQQB&8raU0 zUhet>D5^kuTh%gAsaX2YuH38cdvVvErZ&++V&Wz))k?Nm)NeH{W#rtH?0(`eyXwvN z?&8i&U9v$O%OlK(o7%j8C*CnJvj*bzIMQj(%MZrqgIb{Wf5KOCtzDswWMW@Vew$duBK5ZCS6+s= z*=T-+)+Uo))<-LAe<@98hU`7HJ_G%q;He_|CcysZB=f1#AJYGSQS}S_{sn>Av+|Sn z=CamcX!;V<^C7+tNzn~?G~ z-JekCWuH*X4*f_Iy~ANNQ4YPsl%K(OBp!6eD6^pJDAGK{I3sJl-O#v<4)^DY+p+K^ zd|^(q?1B1A;JXZ@k4N_z^teF{6IgEIYeYLU$+{NV)xvdYDCo!Q?WW&K?oEO5T+L2_ zCTwvmPO|4IZ#wf((G2=WQ^>Cw`x-?r^P%PrAMR zeblka%w@OG`b_$)Pe(bWoE_$ql+D}uR5ymlR|q~9LfF!wTW<@Xa+~qQ}z!g2iCjl zdL1s`VN+T6uBDxf|2KIiGirI8$kps#JRMpR&FwQi`d>y5N5W`l_7)3UjnbDqmB{If zlucI6^JwZTK6WHIXC3Bedfh_?b=108Jw5rn6{x90Lq8TM&AQs5>|~hxnT@K6#ujX| z4SRnzp0Y&B*@4?c=Z_cLqNO!3IYO(a zp?$J@H@a3rD=Ybu9zNv^=DFn44ifKyfy_T0Lkh_Tb-A|BM%4(C{FJ7$j+ZyFesr~s zZWjB^+11{jm`R4;tM3HX@*&D^qvOxe+=tC(FX}pY=;+zIaCSQBjZ$Y~`OIbu#c-LE z-0SG)dGtJ|)&n6oC!#9Q=MBCme&-VOtWfVjTF6q@iStv`n78{rO1`T7%E~9>z+d{FS@e)uvl0cn z&B?=Ekn%MYwuP`3o;`|;GsAvA`F#r$IV&{-l{sNu$McDJnVe9`Yy7GMq@LKFIwa5W zd?s?(@kR^D1>Tz1%K3+9^`PBhxGioM)6ttsy$dBX^f>3?@DejMvg*Vpv^9KJ~Od*l*rMXS0-LaB?Kw zWXyDx>&f(x@$_OAcpa2yraLq0$Ef)!y1(Dp;~pGcT|iCFNVHXNF}5^@ADjgv>**%> z+SAW0U=ufy@}ubg51y0d=VzX-KQ6N~@;CC&KBVUA--Mqrbdx=6Maa0R8X7`;vVzSN#?6$ipgH!lh8E5&wY4k&z7gsVjY~$rI}*r%jwL{XdhR2PijMot4MP?f0w!H zhWLMyEN^5h*?0dVZ?m0z52w3Z(fTMoJXg?rcKv2{_&R?VYpE`)c~4!Hv^Sr(xdeVj zcw&oMufoq|bbh9KJCVsO_W!ri->Ea_;IG5WI}q_-eyT2?KY<+j)6pX|vyfzO;y=$| z5pUC3P*?^|i;>?F{lGIuDZi?5Jp8PNpn$(%NX<)gB! z_rlC$1zldoB0BLh*>{oHDmN8$FhOmdq2W|*ZP7|*(N?-LzzE=T&*n_-5Wed=$S6i5 z$ys*`4wm99(G>cly{CSoD+_C?u4Exh%$&!d;94>qg{rJ;w}G~r6zv$raKgtXx!O_ZJ#^3DEf0grO2d!ppt%_bP?`YX|eJxu1&~fIe zlF$1=o;zNk5u4pdkN7pc&Y|l+;U>GEr?@wjt}@T?7|)%R;C=LOYf(Caug$rstQz!y zsDp8^8y?PAXZ8^!>&G(mT%knbrM(580>hPyO9M=L;3Y|um;-RA*KI% zE^~duXm6cbo5A=cTIi&`V_E6{=-?aeK1O>dsdJE4vrgC-_S=za@+kkN&B<(QkbW>@ z;Rn(7Bebl6(d-!gr_fWK47KOOK~32EndN1zzPM-C($*C1B!g-)IvuNqlkodJ8Rc|+ z`pE8h{Dlu$@9Jr;9q02v^xsQ5iO;l!El%cn2eQOpj0j8P>RJ|XjMlP;BCGP-_>yk) zx`|)jrIt!O`{T6Jjy%4E>#VqTp|d;Dowvv#1s~Xs92Tf4yJHiDHKUm6Q1TQ^-K(x6 zal0?Xi7$X9eiJm;<7BHr}pmgE$8;WLep({ z%gKz1WcUT_o$vX-$SiyAf|Dm*xd<*dyZZ|~pQ44FbWbMlk!ev27 z13$8&=hb|MHa76RZAsu)mYZGv$FYNCMA*T%C6C%hEv0{2&FaR`X(RHR1%s7HJ(0z8 zrXc>W0VHJqO)I+DyBgo0KL0_%CnRtf-3^8Gkwygzlub5(MeOT1?bpP^9Ak+!WSx=n zXdW%`$!F`$|E2cK!1jly4z7N#u0->!&N32hceXzN0@jrr7_Y-)Q?j_+y^q;Qasdv4 z&c{h5RL5t$fu3t=tt`r`qUIgc{NSk$`ii^@or~A6^e|naKI6vOD7;sh?2^vPU48f} zipT6>tmRu9vdrG1`f$5~_P_OXBEIJ&R(YekaeBhy+PRnfPjG!b%v^_>$~gL&49cQ9 zCxeG8l^x9GwRac!)K%ZbborG#jc7UXueQNqvX&IHinQISY^^+_}5_x`O1%E^=?prjJsUSoIv z@JID~CZH~JOWSDVL3pfBpVcAg8alm#u5uMbj0yy%l|((D^Qnt;ErGe(o{a&P-a~8WZI%S^FxZ zd4P64qSL&i{R#8qS?ztEd<6wdAh15lziYlqk6s#ei`7+KiB@bjZ;sg+os&bi_}rGP zZX%@{(eb{gPh?5uwD7ZW_xS~zYD`2%J$+yjk(|2IH;Ij^iask$%tm8 znmCZjbv=!>XRpUrNV^j>@4Z%D>oeR^;|!QHcls4vWt&jGU59oQmvW-=$CaLS4zO zk&H+`vVq%`=no^`kVacbdx7SX`#I}mZOHW$K5qgv-a+pFD%klD^q=6~aRQMAm?SCeFz_kMX*ebKcR0VyiVhVzM!e5 zuS~mk`1RNy0S+&>(1Gs z`ZdpbG#{`SH@}hBMf`ZN0>-wI<8(G!9$s2O=vG`M^ZZ+Iat7<3M1tep%ifRdeP~i> zcM-eH-kxQw=_J}3K_i#(PUGo%gVqy?H1pZVv8Dk^>^2TO8WoR{&@rU*KCc!uUuh&U zL~mIW&a*3NKAMK2|0fd4$<}0p8%Ew=k@zc+^%(#8xf=eW!@cuUiCmnu)kI}!gNw{! zr8jSbzX5EdcfmT2;q_nAZV9+3q0aFTnGEjL+3+a!e#wqr&|)`Zj%2SN&SSlZvgDgT z0YV=n#TEFf0YS5Ps6;7ypLd%7nnT&#f?eg zPxkzq78e0|bn*Z+oy0r$=cR5Wk(2fKf;H%T z2XFJ(`T){R{giK?*~CD}nDHfbA4$u}`?bTf-=S|3URFce zdKTFUtrx&}V`w~+H(QUN8f0@TgteuWWCGfQx@4FCulkd@q9YIUFimeNXykda9Y}}K zPJLsWjpUep3+;K<9`ICAt&ib-uDUX2--j5JnWus51ciC`DSo=if<)h?hQ85kBk~>;a-$4L`BY$f5WPNDD-x#jnPg= z(;l48qJf!O{@VBK{(pg9S}A`V%gW0B`!u=Kz3e_YpJZ!8{8An%-hZ4=zpFoIZ?nht zQJk)01$A&*iY2e{?It?QE|4c#?Fp_Vf>Rx~Jb-t}zPFreYD|ASwQ(P0w4vc~H1r~` zQj|4hwIq2)T4=AP=clsYU68Q}ZJQu_U-IulI(O3GGTbDB!Ff>Ag2&8$%31XM2(HgA z^kz@;&*eRHk2Y^0^LfSqbxCp{&1QYOEo&}H@>i05i-J#Ft2ax;ki@LZ>ErQQxs>J; zu{r1GpY>c9sA(Cw61{k`Ivyf{mUua_fVY3a$~0|P zHpa~f-xd&)GoWkvp)LC29lYBjBcM0PX`zJhdSw#1x_`-#xnv;o5@mG`n2BG;+cfa)cT=q5@+7kV|9n>Zld#vI@ zpZ63#-Aj*u`15lr3X20p(zGt+a zhyn4^Ez~g-KlS{)SmM4iI7t#DLDEyinL+MGl%6_q&;<{bES^e9FhWNG}YDv7}8{qm6 zPmJ*Q0k~a*-;$91HCgT={mXFINsrMNHQ8^roc@EbF6c=t@#qPPUV zlM}!9Ld9f#_^FWoI&AImy%7y2Q`44$9JZsWJ=A87JTony;c&frvr4*{=Chyr18rVG ze+{5{4LVnF5BQ3952LHDG_sNRD(+j)!puY>FeS6&Hz*p+;(jF6`RL1hcTVGf!|pEk zb1o0~DVh>X^G|j;4BvJ6+RTq(i-BjJ1aA1v(=x|O3Cp2 zHLN~Jmt!FNanf9X>da|9LldXEax_~^ynu`YUn;zPB>&dljJ)+pFDn_hvzqKqo215Z z=+1j_DSbsXk}1Wq`k~`#b=^+y<9XR#d`x9JXwMtG4q2<9V;BvuRU$T9#qTcq_#ARa zpmv@9v=IwljrQcp%Ne%^c&bF_7(;hCVKvlv=xxKXnzu#SL!FT&o1k1S=~&eo#klV3MXr6e<(Yj0=sAn45|xQjQyY@5@yxB{)RcF49tHp70l!shfIBC{LSl~pik=Vg zaGMgz#Plh|WybemeEdf*o%QM|IGIbgbI7zafAK5(-UptS((XRw*BD2?L&BqQxRET| z;AsKM4uhaY$}NGn9V�@)9k(qpP6~#f!Z6X z&YWYZP*IaAe?{U=s>N&F^O<4WG`O43M#%)}o`)ac9| zokh20jWEAd+y1QPaX5aI{oM#7TmJL{8&Jzenrw^pqGlrHm6_ zfxITZWe-_v`k9aFzUt^sCdu?Uiu5L-asnzZEcn6CV77L_7k$i=B|d3Z0dIk~#9{me zO~uqdg5>(3VJFYGnAb~IwkcZ5dA%#h_H0i!pyBIK{zgHfS(!YPPJU)b9mw}H_cF&? zlO)E&sJz32OEErLC$K% z5~txk8T(2ZF(vZ!M!I+eY7!wc+2FHEo!)Xas(Z1Y%xES$SmHk&K<7^=u@MeyllfL0 z)k9NO@}|L2_8biH|5nzSbKJ=Vob$FdptC0o*MqGF{N3IN=V!B!oIlQ9-mmeSXm$tC z)TcB($JLoyOD4ghB%9UkMDI+*oa15aWC(Z`HWCx)LG54ab7K8G%Kwa|t>kOVnaxDx zI!xVVmF}(O2T|IJMvf=fV`!p1d2Zx&<5{!Ibrg;s($>vt&N@Ru2X89aD?yJ><)V~;)T??pd#iRYM{>h%q9~_zYFyz zPRKA+B@S*SJg>yx1-wHgDE>yN0puUf?#5XyR-BmhU#a71ZD$|nl@POpq}tNr7Vllf z$#E2&K8NF-Ji`m5`6FyiarG+ zlN|fO5YiA+#i(L=p8L=l{2myjk7vW%Rtq zs&6sb48z;EDEK$--}AYL5)ZkO%nI4faRiONtevO$-Q;CH7Xt2dy%zZ&2_YlsCFAe| zpldAK`9!OqD?JFVzE=Cq`m);#_V*InTha3^dh`y)=RL?XxuKV6_YeKo{mO5F%Iw5_ zmX$w>i&-$V5b`Uj@vuTI4{0-JlH={(r0M^`Q=a*Vgv#in7r2^eSCdG01G+P=`_1*1 z5Hy6n7lpUejhX*}!Y|?fPGhg^vw4O@_eR7S!5W6M{p5(uUXLG9+DV^N6^EPD@;?$S z>&bY}UHHDsz2tRG+}*5=Z`1Y*G*a5rpV7xrw0Ik;YLitl7FH9biOqEZ>$!<8lZEjb zmT@Jj;+4L@e`UU=C)(ej%fIw(Ia~D{=_lKBA^|3TS5__(V{aYbd4WE2BJ}1oZ88dG zl`K0ZuR(w2VH+vg5uI~feUtQm;sc&mYv#)mMez|d|EA5W$?z$4A3z>sXgSfXIKk&=^JYCT6=VV>-GjtganGO1x!N)Fu+sWErLYsCzxe81A znmK-flf&ti=ecj|bAz__;Wb~tb^AeIL8Ut*-mmHup>oJgi}w0~H9%#+yU?aO_oQ~6dc8W+c%2EM7tH%=r`)_Znm z%`Ln&xoS5%{$zU2*M39ssz0tA3)3MuQHLy_Y4LX6If~EZYWHS#?*!S*N9SP2agaWo zrOy(zFJplpNOm@v-($1-qTLd|pA{s(M3fol_&p20HHFmKHTxtS-ox!JeC!XS*lGN% z2UOM@#}ZSHCE>-ckUALylj9=y5-ahodz^cS<1WRHvG6&ZO}-PO`oZu+<8wvY{Kopf zIA)P=Z=`K1+zm6gnI~#5r9tjujWA>B3B$2aZiyesaPhGg=EL$r7&pU*)g&41*j$Zf zlzxfipBmBc(eH;oO&z_8{3aEJv*xEEjkB7#3&|&7bZvNNhL!y#m(g^r@%J$!$o~3T z!e4H}knBhQl{e%LZFb*u@oI@aGrP@B;L@Z$mWSR%zc4BF<3IL$hIe0>-z2Bl1f$yV z_}I(;Vi;HtUh%}#y8FrJLwWE+1&LSs|1MixMD}`qx9Me>IC?B;YS6a>Y0A)ccRb6g zqbH!a9HO)6F&4jeU;Cwo$#*Q)hqO)j&g(|Y>oH^(8{Giak}&yQn_ptU`=anvzgbb0 z^)nBNRmot))8G#o|Wx$#(R8Dyk-TLp8k4io5lEwm;ts z--c=-cDluptH}B=Y+94y4d`SwP*-ub2j2m8sA; z(RmNj=2_Y$vUleL!^HBez)$6#m7?oAEVzut^Ca|h_Be!9i?Ku2aStuTkj(S@>Srt; z%!=*Cm^BK2Te8YaV%F!{-&^mGV}4289Loo5k|$VaKY!Lv=5BmPBh+;M5g+*(q6hHA zL)j_2sCx6?vLZ}nEwqEz3AoY_2A_~4cXaavG}YegXtOV?Z{rKe36#6h#rWSe8m^?n zS7b``TneSP`Cpzq?GhKV&Sf^vHm2$B+&%;JbP7wQ{=*S$^SWL#*J{W2&oa~NNs4{+ zRZFYq@uK@_Im5ZxV^L9jN-pzc&gu!1{q=hf77oLV%#%9!_6zpfC<0}rbUjuthruOi z*^cHP({~@=CCk@vEi9%%#-xR0m`92U@jSu)R&Tkzd?jnDl= zs@*x|_s8rG?7o7AsRdGxua+ZOs-z@4#(eLlX?=mSe}!#Ntv`qOW?}gaI2mu;zyjI3HwiAiSavVIc^I#J41Y_I`b(T{f)7o-%Qfrm zIDIQGPbPpV&Uw)5bM;dUUk7UUVAj0dt5z`F8^Xn$6<_N0H&d2f``w<#76n~Qwat#SP@yOIut|q=L z;!E9VmYHmE($y(2Cae4w>ajA0Byan8dNeG=u^Rp-pIFxYXIwu_Psx9jh?W`9a1KXs=#U$F#cHV5h zbun*z+i}VKwTdK9vCnzz)Cw<>Z8v)}vv=-Hk-8##XI0(lJZ>l-eA0;CMr`c>x#Z8u zv*N6D$u&j=c=e%O@}8!C=*`f|Dyd|r%kJ(EG5$v^D}~`JwcZ_CSs#`RC&k!xJO4eO zCV2w#m+wyZNv;daVaprh;eL)jkF~OTcN5Fpp`SY5y{4s!7?M2w+wuNQeGY==QqrYv z{6Re83H{b_MfU>DlV71dY)WV)Pb9N%HaM0hjFRLXnPz@ zcH!`7aiJGfc4{}7=&peH4ETP|FV51-i;yerlRw$64~xI*xF=X+nD-y!LPy+ukq__W zU%#5~wWrTk7R*zbJK*#hPnbcz=X~-b9QMSeoBdobrW{|0{_8}f30fN|V(x)uScr4CLi<>e)H_KjQ>IPpn`tM;xQQ4vN~m7 zHmrarE!e7(Zy#otCB~#*Sms^adP84-@_<*c^LP?GL5{z$esA8HI^F51Hgu9nwE|7Y z&_1i!kHLe?mXl*T6#?>$atzJZ!ah}s_lD^J=T2g~Jj=~`pGDZ;u@DQ#(y1c!k{301 z&9~60Hc#k?bKmP_na^?`?_Rclkf&zlWAcFAP|$L`$h%5^rP!nlEf3Xid#$bZDivPm z7>zGtt97`%3vVvb^D|=6Wm@k@mtn4Xvb%G&BNmhWAlyhr=i8lg3g4~9tH;Bw937s} zZtf61L5Ad`9ihF0XqP8*SvTLn`K5VJ)+d*Pa(2uoeswpq+Y6ub?DPX1ov*)H=7$g9 zPfxsk6rN3C(1c!-d^(5+C2QGSvfZoi8YHdY+}Xarja*r)ml}oDyc$UNyD%rU>i)-e z$sM~F2DE}Nk;ut;W=~Qnr9!9(0Ntkt}#h_MM)cl3CN75ltvO4zOfxG)?*1)YVX^7M9O6CsfUMyAu>!qP5D={ke3(1zQ?uF zcu|+fj-$m;C?x;Ek36ZSIFel_bF|Y?Z~N)*JCf$y@TWn7=|h?_(_e@}MHvw}YGIcowK8Il^8z- zdZ~Ev82J|zBpa>uD@3P8;?4|F_yk^a0Gr=P_GKdBdiwv#dYP4d#*c3@BD|=N=+$0y zyNWj17289*cfz6tzbi?T`YgL#i$~Ha)vKrY{&zTxq<>x9sLQi*-FP{vlG85ro({sj zDXf_4?tL-jJUyhIYckShpGsoqn_}~abbA&PR%_u4DCAyRSIEBL=X9}cF8v3vTCUX3 z!1sDSnJ2!rqf0W{rus@&UVo>ZEqv`xa)d*9!g>sw&NHH4Xf*D^N?DWGg!S{xcoA80 z2ehTwoE4duI`&_vJW89A+DbK>7TUQ0M@GXp*}=ErMe5SdtdPX1;zTCCqY;B%ID z?_+i{@pfU^+*?k3J{(@T`;&TMZH!1oAao;Lk~JsyOdg=k%UYT3`;2$ld6#S{VPBY8 zl})l&_zJx=AaB->WPeagTFZ zqtKB!us^wn!9O)YQ!Q*d%VfQBYY3IWlq}wpD zQ1Gi{<6bLXM(P0aVA8PL-Yml|2|A1G?9DW{D{?%U(R{er^B(KqY zvcF50&ZIvRtG>|sL{aEr9NUAXs}#I@Pv@Qu*YgUIIn^nO`}QaE-6?RsNMFecojd2{ z{OwJLWsW#hyU9E7tnuRxV_{$VRj2oDyyP>Md4-*BG#l8+vXhM88)0|{yM9fMIpWVT zv|p;7j7HnE_zFFXYBX*riK=m!6Ns? z%aQR+oGWfrnFx(L$#AgvkqlmWj+{|`82(<2#id#HMCTrgN8wL03ETzG)b`0U_pRc? zjd;Hgt$q+y?xOQ^Ec}C6*Uh-ykDU8p`TlxrsJ#c+VX)(7(s}~>7t!u#c(T){$tba( zv9c&!hO+OWShRxg^yhu=7FfOt2TEc) z{ZDK(lXmABC9~W37C4W`(oy{Jb9T!dWG!TFh5886PiEIMcxj$D{*EnQ(4sxd^nz@C zttBHsCrtQ)hS@E!i5>o7hdFvYQ=Ch#p(}ake6JShRnZ`HsVX8F)n*7itxC^ zv`@(S6Z{X*|6KSMr`I&>=z*IV9m{C9GoMQqhU6YfO|H&j-)dSXS5GA|^-6kXbo!F) z$=Exd#j3O2RIxYvWM+tS9a*n39t?p;?qPi5lO|%~5@_E7pLH11(v93+Ej`J#IpiAc%w^C(VVufP2wKJF;C+B6R^vOb*dT z@;e^`_1y7X!#jRaYxd1t z#jjhk>kbj^QLlzzc4m>u+wvF-41&$a{4BGstl^kW{yKEY{he8OIR*OJft{zTLt#Ht zpI_j}2vY9lti9-SrGA&=M`rFP(IQV3o+o8;Bwax7tfTmq%m>joJAzlT=bd6-GTrQl z$ET9`Wap-;{S9QAZeE)8Er*J`WqHPHe)9w|D_#ab|3i|VDM4=Idxlm)?ll4sT=<)gs+BI1-c^wu(YWH){ zc5Jpw1pdc2`_nzEM6!xHE0ZoFSz{K?6;JlW7uf=Bv!EXW?mO-))J!;|AeEe@vm^n@?XfhC6hv_plk7_wGJM(VDpry3m13$B; zx*@rD>3bfTHi?y~EY+=Gr_|$1eaHJ@Qp4i$nl>ZpN7ClNT$N%_5S7_ zMI|kD^=U@X^)Rc2d-oT#8$+|)=^n~-mcjkmH(3I5WX zSEVw*t9TL#lF>g;R!g$hFMi)*moM<(4e{V%7Jo?_zhOkY=Rgbzb5>zyve6`a(PEPQ zM)rq%c4A>{U0_adgnlmMb+zH}xYoC^&So*YC_KiB`&o&xhBfZ;S!#IvN6sOl&U!jz z9=RRM2lI!&dCEev-c6@u@hQ#oYw+KLur@WtvI-;Xn38k50d%+FPCp)8o=0YenVOh= z**hy-I&{8Bt= zjp4_V<^z%shQgcPA4uk8A3Xxk#`!K!)Uq=;`w92Js~!Bax>r|g;Zki4X3KZk{Sz_n zFJtgbQK3JXvcvZ|T9xC4$=EUqSNmehr~clk&Fr__<@?k0V9snz8seNp zkK`Rl1(Z!<=QD+PpMBFE%}n29osxLIfL6KMyT*NqpNu)@>2*ESldmp05rWMM_Bes} zv=$S3VAmuX=bq9Nj;Ld1p66d#Ly=Lvfw|Yc`W*m`NqCT_MyWvFlLea;c(6A_n!qO& zZKv|JtV3_^++6d2jlCUkE1AN|Xt$z%cd}Y`KJIj63HT(>Rux=(j?ORXaV&I`8F3N| zH#IN(ht$vD`4kq*s^d%GKR`6N4-SnqY*Hv@sK_D#2hb zjJB~!3%XuT@=rypkD#)gXH@h1BgwZwD!V4`aDH;^j)ZdV5w*hKJDs0vykBXRrvt=fIWiZsTrY|{Z6JLjjMvscQJ$*MB zrr86NycNkxUksDd*1vvcVQpJXI2Y57gZCS(v&Q*%Vdh?1N!{IrUMF`@4e0%YS4YEi z3H{DA9*xoK`%r7ctFn_md$*D+Cm9sy>T?Hq7HMxaZK^jfGn&S;C(<#i`2L5t!}U273%Bw87qvdn5y^qoUQGB2KaLTxyRks< zYQ`Iiut@GEPcTQAidp-Zg$&l_xvbZXBu6_tPrg&XwW!xm^3bj#UakS!i&i7GmwH`! z7M9&!$(1=v-_78CIH^A2RoQtw&*#S#bbk~_n_|K=aq$NB-xu=LXjuz`EA#8^ym}q) z{1HdW!#YnYlIiC~^7SUsWbeyj;`ffpii77!(wE(`BlRpkzNZm7tFk^Wu=zoH{>-MU zX_&m7nd!E{n(SCgEv^RQN%qif(ATec_9X4^DA@XJ4A_^=iaM$d`QFC8ZiU%f_BdsK zM0O!&+)FLEy+oPBz>f6Z4-%;?w%T`pYOkXCN%j~WrLChxfp^8H3{fAE&%{aXd^`Iwbj zpJm9FDhJ8Qc?#sRDtwmqw(#XWwDXYPitwm}d2P)$GDANW;AhZqtGnbrBj;v^Gjc=VvGK6NiEA$Lp@)V&pxhpw#J#X8^L$Xi( z9n46kvfLj}h0W}%dWi@9Nv?a~w4SG??rti|t|a9#Fg*ko4aM=VNSr5F=deUpr0*1? z4ivB6)ZfwM%@d`2weTfw-GvLyd^#D1i&_6Z@}(AWcOLN&R%GwjSGZ7#zn0>8$%Zks zFeW88^~qi*A{>q1$s%;F;~yvIzI;i3ek1x@B1skJF4M-N+F$P6A2Bi28&gH@9sM4p zr|iQV!gq4q_s-28#Z&lu_8eqi&P1Ah=eXf`^Q52hMkWON3KDV z`?s-nE;H{*h4#i!N}iCf3h{d--`c%~w{u8H@#Uhna>f%jVXAYxVvNKf? zm7esQ9KgwfnoI`MNu1c7>$s96$j+)S#j}yTAUS>Ovee-$vyER5HB(L%x3TmXO0JBu zhl|iNSnxW>rb5*QF{nKht7x|-ro5^DJD{7nX6|6mBGF{=^BPS2%lRkip{{T9gkv1O zZE)s^+V3g~=SrrH9{Y%q2azv1ihdWHH^40Qxcl+z|NJdS?kW(h#>H#aY==_lk(~Htmkajpszy+((Y)OZ9aTKYkRakAh1gQugoWzQSz%{Yn1saPLnV z{KF>W^{~hpEy#7Ax!zA&`Ni)hmS0cjtcOT_jjUNr-mmO`AEUKAJIwg_H00macXm!> zuU<{xtm7xM*(@s+vk&Vwp75tn{=@ncY4xU=%dMhg$3i@O*;)13C;O?Y@wbZXbD((e zIUB}HzlFd<-hF~0+oAJ68vNx|#^%)+dy5{n88<(n^A?2@sBr9iaYai0ob^cl{q?&T(Gh?-V18YCTSF`KBw-|7EcYaQZUbdCB|z9eD?9ybQbKbzDv2j^t^}`{&Z|Nr*g1hh|!M9@ic> z7ivYf?T&54i_2k4AJS%>XEJ&mq~&CDc&{)XFQiQ}joj~wCRt$yvB*gM?n#eS^(p1- zvqkWCc=2@pRM%*jdSR&~^aHjX?3}E8y~Y`5kR^Gwzrn=rP)?q^WrZu%Tpw@H&w4n; zKCO6KH#3HtM2&v5o61VrMV-o|!%ueO5dbDi_^v zoG6cH|B3LY(BNG*cn_2QVXJ-dY$hU-7Wgl{1Os)i0Hk@az-7@${x5x?{i3>95(B;pH;u_ zi*fg9{dWHR2RxfOe~xIJClgOVXAX;8%BF8)L>=c{3jY(?V*^g)p6X@{-G>#nL9RZ3 zcvI}%+vmB9vqNmz3oEkr{BYm*BGsRwc&^c}6nCeKmF3vyG12C04C_|F@EV`yp7MXJ z^SdkZH`uK#M6!0RvvyMp^%(MGXWT?sFLBl=D4oU=E^*FkGM*&9F2R)+@HvTfKQBl= zSR_gI(kpSjQ^C{Uz>nlOOTGK#R7vf}pWt$lqmoVOa zM4PNo&+78qwY`#V-|)$ztdZ>gy+pobtj<-zfmoHy5}%8KZJc*Ej6M<1R`T}mSmY}@ z=Z^CWdiwzn&hVU4ywGvmw7XI)D~}m%*dv*!e&t!IfL@ffMC`hO{Ci;D)kca7Fz#Px+(zoG~C#sj=- z46apS_3TGpCeqiS@kmiBJ7hcJYq0YkJ>3j(LnbAPW> z19_UbvIEyIHWCblcJ2(t3y0vqwQQI8o}KBnSm{GnXeIj8@aj1}brhXaySfW5m7q^W zkuK4AEH}~dOTU-cWHqLKLiZ((>I&UOsc1-4(nd*{T}>wKl$2^*6+b>t)6>`QU^n@0V(S^x)lxj z<3(~Tze&qBc$sJXNBW)Y$gDg~-Qwlq?{pqlQiFf{l7x8n4CZJY^%QRGdY;Nh6P z4n93uKYM)YvCANQ_)+WqVU%ilZN-vUFV8fxcXW_;Q}1e!=sbm&WH#H2Uf=VeN<6C{ z-^>1hJULCJ(G$owTJKrimuEQR3f}r-;cs?#&A@@<3lg+r?^O7B6ecH&fm2D_h+fIc zy41UByys=OHu94STX`<@BcD6Rc|GwVYn6+TX`DFPj+UvpHh?|aKyw3jZ5Ge^vtIJM zy-4O)>5yxSiq8BIrc=mzx9^)3M$F`a?x&BeB}ne_IvBYI>r)Y_l-cY~T+g*ouD3gj z;_Jx#jv2?BJf|C^vbN-CI$zJ$Lt(bgjA9op&hxv#XUTQ^5?0+rt2`P1lHMD&cMUn0 zimkKQawE;wvB}AFI-kVZF*uji&ZhHK?Ekg)x6(P+_D{3(Wxjufv{z{1GJ3XT_eNT& z*ywj5@(I{ybj}_2WDaZ%v*ZOyMaxZaJ<2@sb)!Oky|#i-ckkZNcIuDxg=C_0 zp7g&)`!o2^;=-)z33G%uj4WBX(UZNq@tdh)Tl^_C(x1fhM_J)C@3ZdvB>Z^5Iavih zObkv`9gOj@$Xig`D0*i;oQxD1H&ctC4O!Y~Gj%0152{VK%ttZ?jUZX>1CNAH4NR*_ zjyx;N734Y`|AFl9lJ#L0{KwgQixt(dIr}!x(ozrqhr#G#y3HkhZPF*t#&of+Zoy|d zh&`!^p7r;M09g-lH2KHS;t0nMhuB{j_?iL!^WGzzo3$^w>dC6d zWD0JDnRD2?3vDlOcIsh%ss91wxPu+H@%m##=-g*aF0YS$Q<6opI)0G$KEm=;^gf3k zb;R4$kedqOtcvL3+y9Xz*EGv{UVZwf`f{q3e8=N15|w-7Yey1}W!uhteT#ACVz&5( zbe(y1Q5e6@YMVugm3nzcpMPj!BAz6ZPfapE2=CGCu?PQ6g{al$|H*CN00)L^J@vze zKQS20F7JCKSh*ilTDX-6uCxvILFP zPOeDbW8;l<*n~M*laqUy6=`|BmXmuvSy69d+uWzD!Q;<$^Z|bNq*?BX&4FG?=QnWv z>vS7S=WZnHTk!EZI9Jvvvxct8%#i)CSy}Zu7PckL7VVX1)yMTef?aZ7cNN4RCrL5v z{Y(r`Zs@VFs9j*w+jvtH8Y_54_Om5FL{GA97XO0TXU@&K&0I_F%hL`bS6zI_xRiLB z6`#qBm!~GJ==qy|58{WFdHgI)c>|x4W$hZhXQg=)kv7>gvKlM1`qXl*OQ#yfnXDse z&JVL{@k2=D?)-1QT@UfR`v603)=x43XFp8a0>;U+mC7_(H_@8**&!1uE+P|dF%rkCh`?liW@i;lg>s(PLcXMin)gkM6 zajFu$D;dqpc>ldIYK~F+G?)(-t?!4!4aR~cFe*p-XMFmdce#dWqp#gEwde7VV8+Ez zJw&@3#DV0A{kCx22>$m!Y|0v>b)xW2^Y~qSK5JG}y(@c9GyZ>1gG2dgb@JWA`s=;B z!Ik}dM@%(>U4(sE<+)W`f9k6j#8S0+1`B<{kCJa9yCE7HWv_=>)`RWi(;wKSC6Di+ zhvYuk0>{iYe}QIl&rR`uE!~nmXKy%k!kqr*P9L%RYB>Gj(=hpR*c@t(kn8-%^!_>2 zE~V40*m{ZkCW)6Xv0U;zk98$6famTc?cunQRWhmRSdIOD!j2MRTUqfj&p)fNazlLC z$I&+xcs~n*2f#O1SO@X3+_|0X?5w7I&d)SBClbwsKy%nUX7otT$LZuLqJ=6vBUR*1 zB3nQFNGA11$zRP`6-b+V+Xu3DO~}-P%$c}(s*(N>XI@B`xA@T-(r4xS9C51*{$vg9 z2l_pfCEjGYza8qP+vm4=H49K3&61bOZ>mb;HW?5I9Ck?5E^Nse`IyX60pA|tm<9#3T zHgm*`aDO=dXGdg(|J8gtdHcKY)YBmL0rXR+y)z`Ta(^#9%!but!fXB} z3sCBY4q~4?Im-&*Q%RPmCQU?{rC700?{|s_AM&+x@pK3d++Ogcr+IV(ZKTHDDDu@1 zVVW7qJJM`}qp}|@>oGRsdrK|tr-!Fu@e^)rbY{lk0~}L9zxUB=6Z<@*&&BMrMO-~h z%-WwE$u+vS@7ilIYk5kYa?Yl2pC z$9}P~_*IgB%C{zC`9+SXO8#;aB>|PNoxhKw~4-8|*g$FMe~z zdH9(q{0^PM|Cx@uxJ4Bs`kAme6bcW({w$;NF|0oaC)%1r{f7lr*fQ&SQs<-wJ6$cp z{!Ok2S*xYb-^1Q>9FZ)_dl(b5UVZ{598R{gNI6lA%6_9sd?)!$vLodo7=>l$;qr^H zz7YCL{rrR(eXwLNy$;3CbMdw=zt{%xT+6g2|54s&eQA5gH{zG%y%z^Qpwl4!wpuICdc7AEii;O@q4WgrO?KUr z3ZqcQ$8s#PiBuop+Bf)HgOp9Mw+iq7iu7a1`5Xziu*%=g&wclzc%MvK$sW{I+p9>~ zMl5@iJb7-hO1!Sh_EmZEa_tVmj?6u>GUpbrGGjlLMzcl0TJ&74|0}eWT24y}{xlU5 zjUClTr22*iskEMT>e+LX`Cl@R)zIr->^26IKIZ`o*lL@0o`vaeP+g$K@o?Oizn`g} zm3rw;nkFQF0dvOF_Db`)jANa=JDM+6Wrgn?Q56f0#m?bES4#i@+K6 zQf)WSIF_?Yd(vDg8s$#y2=7K}IT;?x!)Ki1QkgrWRaaI^zL%3l$! z$)A_I;R~Fz1IM~*=UMGn5XowK)rmiLCG+Pr&4^i=ro+rfiuh@!w@qe>v*5iB>x*IH zUS#Q_NG_oxpj#NZR5pSqLDVp1L6Kk=93Gkn*G@-R+)$9JX z%oBa{yftgX9?*Vf=2ZgeEu(KtJCOhM^wa;zm2_j(dbCC zu3XK2N6KTgmYSW(9bjuiiJqNsZRzrO5M; zukp8HVJ^FlT{8Fh5f8GOAiEce^PbE$j>eh8_(m$0wqOgRMk?a7T1Muh zF!5N(rN-0loz=~peLEX%cKoC8P944$zD?!N?5Md7-`YTY2K$Wg>OHc)MaP!BsHYwp z(4ilFyXdhziH^pl)OP)gu9uAwoqW|M?^%DQ*iy~c(uAk`PA>7Lz zi8~52r%#;!iCDSAh~5OISCjcom>)*(*5;#iMDK>!xTkZ@rN?mIxm-V)fq!L2-$IWw zFld#you>i4a6R6!g#PvPxciCM;dI!_KR?B}PucHaa%cVggFNg^()PgL6SXpg=l6i` zzii%sBs1AP>sFHkt1`cRiJtY1C|$AfD9Df0%bujqnue@|dL5HyV&6a`-^m64AF0K~ zB1>|xj=_#;KFto=Tp650nyhO$Uo<*}Z5H_K6t9ngWNj8bUaOsO@KQ%~^KEgK&b^_V zMTJ?s=q|ChHh<2F&Q>hi1rn2;wXgUyPyfxumh4(9X=M4N5X&y-*@x->Qe)$@7=N5t zmOE&jv7n}JW;i!%LN?;}RX$4vlmBqy3CCTDInT3XA56(^fZUr(Ci6u!e!}s|Zk*jf z$saiimxl1fJSBb_qf*Z>*DL?QrLfqoGseebX_0%aFJe+R{bglkcic%{f^+<)0?{nEKT6*YBG0RQ@(d9m`+>3} z;!d`1u7`&H-c8P?&K>ONy-AXl)vszfdxmla9Um&|vr|~OJOoOMy4h{suW&W+RH4r{ zxUq&Lsl9c+)&^rj_C+Ks)RE%wa+=*n-|1|U%u+`?zq!^H@rcgOtH@6}^0MqR$rGLJ zKIw)ZWwnv2R9Va2k&ao(n!LF?*zi<*?_+i_93H7>@H>pAi~Mcraw=cyT(DjRmOqb| zyekf5?mEi&nOe5FR>-}C1+E1aiwTK>gCUbFrcZ0Fn^#$Bo_mHlD?4LK(qoy(aJci5 ze{>TKrutpW!%C7lneDgYaxzae(%$)4k=bN&#UEOD+H(`n8>+PuuuJZe%HqLX{5^-f z*RjmsVnXg|b>}s|`lP0OQk$UT0SuuAlv-kQFt#`rvRQt>I z`8wZKa$T9-X{YM7wj&SYQ_JaHOdL6%-y}!!DPsBV{RA!le@68#P7QJXCOynx-+DNG zKFjU!x0HU1@th*CyN&#*@SL@P-xys6;OuvNK06TZVCB7d>|yYDg|=t#tP${?%9Bpe z?&%~R>zK@7Ce!u;Y`P9kRrQ$hIF&K;@fdd(U^-@tS8tb70yZpW0}# zB|E1!d==h%rcbK-+gv}Xn|QHtdI2l7(oPpx9|O;~9WxAfeu7J$L1k^hInKF@FJDOe z>a@*Tg*UZQ4byU$Id=^QYNLfu-sY#*c%8i2$yc}wjz#D`k%xSS8MCqE8&PF#D6@B2Y5hCHLeRY}l564W~;VmO8}WPHgrKKi@>Ucg%K@mo(M?=0Sgw-kb87>_x~< z=U4T97E45jOW0x?SuVr8Myz@c|8GOC?hqV^m&tT^AdDB0GtcW&$0D_vGH1-%@2j!B zymN+OPxjmG%Nsw!tBPiyrL;I+#7+I}RuEpua@lj2YwNq+eQAtQ$wO3B-z}iq+fN7D zUPpuayew-qZXnTg?K}XpS0J9fVA-)TO@xmObM-O}yOZf6yV_Foq#VBAFT&idy%PBH z2PBsJw5%Sxlc$bIHNc2ooCjaXSEl;3G(LRqm`mU_pD*`=ZX?nrgZA5`OMJ|8?#6J* z(~?!B$o<%o*pv#**+Enee>20}hmHr}MxN;GfM#W$P~C`^>-~&Fo6PBk!oC)-XiSy` zd@hw<@`PVciA2W^i37TJ6OD-S?S)EsS5>=^AunrUwnr3^OQDo zkLO^q5~}?nkteQqI4b+o--br6;>XhUO;PFqJ?-iGv@}j7>OPB0Z<+NpfbsKu=mg(q z9KL}yE8$!{EU09*e7cyO3})#q5v4s|oMg=YK?K_do$Iu88Jm5>rWsv-=K+WM_Ac>o zD>Sq7I6K)E!uLnLWhd4h;>s8dI7>HR_8+GRQjAMs+V$_ zD5brH+R6^YuQB#=UiTbbOY8e#K5`r_Zh_zXIQxYjk|FdzS2M}^bBkVAK=^sc=lbCm z`XB4v63ok*=UGtfORmwhs$aMo`_T70==mY}a^GqU&ZqA(TAwE_^vA5^vRy&ghiI5RcxA|03DdJrJ!{?`)@$}R zPUI1}qOGOR+*L@H>eM^QoGNv2+hRde+)oDk>SPM5Ci`c}!l;qTQ+-L2^{!v*`+WX(HcY#*aB_qvBY##;excQE zv=~XR8En0VH+@RZ-K%%&L8h+uQWx|gmTd{c)YLl6yQY3FWv`)_mb>E_lZx?zBU!IL zj--D03!+wbC=PUHt}h-V|1o~rllWq1W*$%-J4e8MFKGQrn(UCM#j6&>dO8#3@ZB=4h{%vj5#IpP7Ajeq>d{iQ3#ujv9D7i%#uW?P|Rw&tgW|IXGG#=9Mrr zRd?TH-_PinoDt9P*#o^F&3egv`h?^2-0@cZJt-F44Vh+aoXWmM=-AA+MOf!o=iLX} zgM9v#9$OjFe-US2(8n}q{SV@Ec+T@;Nh--$S+=o%staZxQvf zucN62;c7az1!7VjVj>q>p_%wz-$)frOtM18eU;9b+swIwZroHSjzK|pr z(`F^Mu3@2`Sdkd_C6CEfes*;~ptbk3`GD4!_-sEuvd$Q=i*C6pSxmbUWIG<;5?v4E zIhWwsXEcA2CzKFv$2jwb!c4PmLDpyJbTQjxb!0Lv=E?lY&Y0=g>&cUq8b{!B?jld% zxl0SkCND+SPd0{Be>l!C(>h-p--@z%(sF=!ktcw?@L?AA-Ob~Zk9AKiAIr;ziR`(n z)yC_g<_mK~^oK>-2SklBJYb}`LY`Fb3H7be+6&i`IX`!QQ&a5PLYymuqpge_7x2V8 z;hB|D$-L1N*Rx+Ro;(b;${W~Mjh;YpEcGxei->O)2-+1P1u-~TNxu}<6{Y&Ppzr7^dDgs znLMr4jW4NQv5ZW4`g9!(7vkFk5M1ZoQOeZ^Cc-t< zMs=@$BtuE(%;nu3_}1O9{DJRX$r{hI^pzw(p|)8A9DM>yt;gT( zyuKBTKW4k0@IBNF=xB&^!@jI7*siZzF*dc3Ze;cCq)APhJ;;{*N~u5=pL&#Ej)K9# zSa`lyAJgba`p+ZReC;)5nWG@_iAb3po2mX#S$yq-O&9Xp=Z%g<`BGL@XEkMZu%@<5 zo{OGIgRnKb(#)f93f*#M*k;lGWR1!0!^HTCV7m z#c&2c$dWaaOCWgl@9U9t`c;P>**UR*+z>bB`-Tnept< zNRPQg@FyELgZNF(J4F8*%v_2=AkROZ)qe09!WXk)$#E@=p~a^R-bpm`NX5Ac|aQ|j>f!Jj>=B^3Gn+0@)yz}yAp4K_ba~5Gs--@ zx`jQKn&&5T-1q#TDh(boaxN%%`9ZK6!RzvT>{p*Oq*YZvPr|1=DVp<=>-fP;%pWSY zA4R6m$$c6-9}Lk8j5}BJ&y&4+P)ymzmdPxb@hn$6Cz9_cTJ&^YuE{PZ`}t;L+3E5i zDN{Y+GX9=Ond`jQ@HG3eQz2(MZ8}5a6keKIfNzUkuR-!VoNMIMo@A_sK?gvxI6k!0 zQ}TYNO6?_Z-bCk7WNnJGcYYDJ9>z8gimILTJB|L8up)QT-#5nG zK*FtfUxz-`*m10HQUP})q&i{1Z7h@ZIl23E35JxXbN1V0FTu^Qn}gAHAoIMljv&>E z&P(3Cll7fdGLOO}D-+7lXa^)$())EXy+W&P+Rn46X?mW7uh|=NgzqQfNf+|}iW8MZ zkW@62!_A1kOFXXQ`}*3OW|TUJht-GqW}b31*|VoSS>V4R?GHR8SssRyBKvBywrXF; ztTbxmsyk~%lJ#qpqv!Czo-BU4D8DydyA-Y-N@8{^N2G>QMxNIl8NV*iuATgyLXv9> z+9X4MD=qHF4#|{}{nI}U=Dn#VEIJ@Cvvs@Y0V2i70lqWNz9aC0&-Sw0;|KH%wKRkUu7RsH$M;xDc+J|3m zr|o3NAEVFg4tNm`UpPKFmXezIU#n6R$^GYR*(TXl@*M60ZEbX9Y*3ZAd_|(n!cO38$xYHyTzJda z*@C6-6@f|_hcd=D7q4pL!5W-uUl^PB37Q;{!RnB%=B5t_O<+KGN#@_i-C^Fv%CFi_z|n@Pqqnsc_7{I zb#$&=u7p8Rc7B}YniOV-oAuq9%y+~8YBDTz_jVvBOj8j zC608*rqQgs#PJ_s|2g{0v(-ekFEPBocgcU8d~VtGaVi9Rkt_EE55<#an2;4g$>G;V zlu548N8zzRJI`RnHGE}nJ%+tUIPy+>JzA_vO~byppXbq2>GL8cW-nK&*e? zgUv(w`y7Xp!MB4EyOeL+!zvZTQoHVI?3m3%s$glZVa}#SKeMuP@!%PAyKnTF%nGSr zPzB@OHy3SyPYVmYY+0BY{6p64J$ai2=4qv9!A6H+LPN4YVYgT}sx!w3?N;RrS=2=cX1-?&9Ye@E5q08AYxDr;&Vzv7sgD=F@BlJpL(QIuU~& zq;WA(;CbJCgdcTjw^rY|(oY7e-0>-gbM>(#5|7kF)>7>9?qSCr&(|M^%GG?a2Yr7d zSqIT2@g!Lc4&Z-p6h@rfcbZR+gUMXcczhqO+>a%#UETB%CAPBNK=H1RUUzDHU-5g3 zZ)?)$X+C+4s5O(0vGktC{Tm&1ec>AD8FJs}Zo!`z@e`Xr%CaR%v69t-$!%KSoA#+9 zJ(0i8W`)Xd$ZCfx9RDZ0)!rR_U!Nxf zbgCRqB-_=PRo!lwvXvhFJQWlk~FCoLSg2-ZiiS8^yXfaNIj z)K=L11sStq^-UhM4{qMh&tD@$Pm*1L)yV<-FDvHmY8m#~Ot(`Zcb^Efdl%6TR^7#0 zYVn%i*nA=^Nj%GGm zQZ%TKeZ!3cl}Hi{XTqgEk9kawd+4WxmizLL>`^OArV)i`^`vi)z}d?ovA?5oAGsZ7 z45Z5*{$@_Hw=pi2=x4*U3Vv4Lk+q>v36ej9oJWw3ZY$JkeyCRy{c zkQ_tEaG1ZPSp7rNR}rJL!Z0h;vJc}}UemygrYTM?g=g;dT!gnhSpH5Z4uw(f5uWb1 zf>@X}Kbv^b3!>IIoVoU>POCI3AZt6mk0N@?jIx~6*8Z${JEWV`}O$#~IQyUAgZXK$%c za*619lGw9ETd5VYlvf>t0gsTn7eC7go7&u||NTDuCQsB*sHD2uQ0HXz*Xf2U_>jQBoF4@ zj?Yet4zz28lbN}_gM<6|zKZdED4tzttUZ9fACe?_GqZnuC)>@2U$VFr<(<2qINU|= z=4@WYZ*ouOZbbG?XAN;jpRU#S4wfEEx2zQ$48z}8a3U*|cg?ng-IG&0Ys|BgV-QVy z!78geR)~vx`JcU8uZjp+<<%3C#}w?`L;Kg8i}t~;>|V*b)B{O%A;z7=LpS)FJR-?y zeLj?XlOWXuPQ&RbTFUiqB48Cvse(209N*AUTgZ^-?j!KxWS^fy+RTc&vgQFSmfE1h z&Dm1rCaad}lB2leGy6Rp2Ts@e)zHoTg%S8w0~Vznk^KQR;e8rgU&bDd9CaK!_gf;>fI8yyTIRd zY+8cdw~8MXjQ@u?JJx&xw}#@{hordQ?{$S)Q%m}mhF$K26zA0+@}d2Ce|_WBi}=09 z&qQ%!h<>v>>rvcI6y64ptS^jBw~MRO^prh0xw^}ams7}5-?6FrUIm6jVVJAtd+;kO zsI$X-tIxJNE9)8d!<#%Qxr<%D!hq#kn1a_;cwENt2Vj;Jj9m*%$_l&&_;)n4KG4hK zn7v9LUyCch@ayC`%(Z0^qe$*)WKB=5E%wH!R4;hlF}1KDYbH;ERqhT>5hafjAAQ!zv(({8#)F@ov9DgU*8IR0CmYd|uQJa; zTe5SWrj&qYayJ}A+CN3FWVFgI&n}p>&T+Xaeozl@)AJduS*PD{w==n0v*ILHINFGx zI=Z={?@RVn)=jpiN;JqS%tkct!!D^{m#XQNdD9?L zZ)RuWe+3_DLi;YFV6v&_dhtGSJe9#x!~7G{tR!D09`Ogn4#3C3ykHGq+zh?9yxSo< z+zYR)Ue3O&m&rH)FIKVKb^0BNM{m>UA#52zlEz|XMvY|4$z9c(p>rg}ldt_vzT68s zf9pAw)JJG{G7O4)KUM_!R8;HD#>o=Cly@X&YwE{dtEcMXY3l0~A=7F{_keY(4Xq*3 zMjn)%JlAV`zVFtFhsoHyQe4aa(mmnwGghu7)2n)@#_R5)MSZqw%p*II>k;g#p#RjI zOiqv%g&3BdfuBP53}BI z<~RE_;%#4$dL&C1W2fBpsUcopV}4P}(e3%#d7{BWBXJFpD;|~Wh~u5LfF?z8?OeLl z_S->EIXiVhlUqDb4)@h-ajd#t1nq!}!=T#U__PXQ7U;A*Roe?XJ0|r zo#IdSz+42c`|vMWu|DRZ_4)j-Wax)!dqHw5Yy8Tds~z81pUKd-9HvWH`ANOD7EO+Y zdg>5X&{h}R$$gu9w3?N@r}-vp4A#IbYYLtrNh%vBpTtx?oc-U)b(m)lmpk?nTzZ=P zSwlFQ1;5AKJe8>E)v=`em~CI8_dcxsFAkmLXE@|P#h0K{j|XS%Uu7}cKJzG%P09wXR&Y(SiFgQ&xy24 z*tI`9Ttc%|WPcfNlGk91NZT2Qo3e9VpPYs@*?sa83lwwAq0Y;bkgRoooi-Gl2!Z$c!>O#dAI%2AJ-LK39=2uU`|<4^czh$Gv=eo*vo-5w$Kc;wcIv>g z^GKUazFCETxmcK|%GvGE3#aPBw3+ta6%$g|Xqqz;cTy>Dni)veW&Dbp6CqiR%suJc ziyX-So##-0>FW{9TutlbO1lY1Ci%V@yKII~c55VST~;lnYIX8d?dv+|d!tHu%u1H! z`C3XorF~$U)qkm^d6BkW(RL+Rm)C!?9nWXG?v8(e$CjXHo|WC`+nS_%gC$mKKhKrx zlXNtX>do${u<;JtCQEJGf`sQn;dTByz7RvR5C3PqAHd%Kux!S^uVMSPSdhC+YxFXO zpEowHmxE*x$Lv9Z6ZKM(WtQSdDlg`a@im26E&4dH$-m}XXlG3ZN19=()n8MO4h7; zyGe^@`u;?SJ*L%5p}ifmHt@Z}`B~;(*N|wQws)VIT_UMPpJP6S(67$ui)rimZ*4q2 z0AKeb!EAn6i|3^7cr^%gz_?D%%1YK95U!=&^+WJcCdAW*08br&%+;agLb$ zB#cXl;hE)s<#kK)CVNaLTpIw<2Ov;Kn9R2j&0h_z5jbX?p-qnBvpcBtBeA>z$RIbX`fA zJ3$X?cOk2{r%%>}W)D+Melr)E3yhOl0X>es=d(%ntSmCOXn|?T%TpY$Zf4(>`l!pw z=Ru$kJ$GLPW=-LNKAT6U#kBbrTKn>;T<50>eRZE@-dU>P-#y_nkQMU0Xb~BzKsqa8 zbDtsAA~$QPCp)|$POpJ_YH0q0$E~qocmbPrd@gyn#=t30f-1wb2|ubSf>y`ByS4ce z#2#aX=irhV-UFh-O+2@~J~kDi(5>{ziil}ua9_hCxw9MMUoE|C)z0Jm;3L*4M*4={ z-^*tw@XTCk7ZDSyuxnQ`oGrR_bpD6V_yJNUi!8bRn+yM}X+J`{=WBILfs-3}<#CSQ zAD-)QV>X{`riDBaK9w%rFd%u~D`@FCZI6aWQ=aoC^yaeeBl@pSpL-y_j|i5Xj;YZ) z+4+@>G}#}W924_+^frBFwb3Z3X3tV;&~&70BgenYFSC2%a^95sBqiZc8@JEJl(DRG zH;GC@{Au%p)5OT^4Si9J8N)x@IOi(-O#ZrSctY;GWq;CVKJP-`_qF&oyX7uZ>T+Zq z#A&{rK(4VYUzSbY!PsP3O-7fmc-mhDpShnMbI)Xd{rA;pGV^^*mv_XHT+MZX{0?4| zH808VkSw`pKqjluzSh$)^MW(zn02$oaW!jDQxUE*jmwG?sTBT~Sf4tD_1Lzkma_Np z`a)c3sn@jEj!m{WCUfNhv|Ef@*@sw+#H%nSt3{KMvM(ezh@!c+JR7fu_?!I`$FlZ5 z*wC7^TS$6|xmQg`&Z0q{2A<9Kse{rFz9o3y-E25t44a838_2vF3YB1&-GRxkKg!=* zcxYBwWU2Q{ztcrBFTs_&EH}?_BNlFhJG^VrG81C`)1V46U_2j&Yp?- zG|1D|tgk;o#Cx+~tyeJnOrK<&--aXE196*}IY{&>=ev`zVNt+rieu-K`MesoOb z<=@DaJuLh3&*bLLJ()dyej6k+E3S+EyLY)~^m?7;&n#HJIo=P+wk}jGSlVLjygfZozbYK3#}d$s&?H5bN>e z!NNVeTr-vw=e{e*bu}N@$ev3ak!zpIJ*nD1FQ|6HcMO>CQI z0wqPd*R^$w^Rp9Usm~TeE>Am`v&?G9ZE((qB)`~*{*aM1xf;GA$&GlDdJS1?nRR_7 z#i}pG&_tX^{5_8yE0AR`h)&}V`}*!|NaeZ1Amei-y_IyvAx80Qu<>8tf204*sEdnn zS3x7Y#-Ancw>UkbVCAgn8;>twzd9suuXReQv&;3GiWh)Gm zv2m)O(ppbEKGn|@(!5W~tmw)$UK71t#?osH_Rxqe{T-C!ANp7 zc}9|dCA)vc;|7tcD63?){1-dC58=`@_}8~PNtHRmKyiEl_J3#${~DfuVone7>J0LK z4Tt3GeTPihwUgRklW-+#e#@E%9VBvGPm>eL)kTE)L!?@5447^F$~Aj+@|_OHL&do^ z`g~rG*B9Pr7xUq)_?j4;cK*SnPg(X``fYd2SKeiBL$a=R*V1JqUx7^@LT;FTI>C4} zS*npK{?e0$-eIW~nD>SLuBOpwyf~Q_htVZDU9*}ka*~$5!R6+7n`h)r%pbRl z$|J8C7>d$K?J4(*>q z_ol|a?7iE=Ct0~!8joHjdGaA9S5n5v)7kuT<5K)^9nX3LFGjLKa?EA+|FU<<|DT9{ zAuc4&Uk&3vEZmrekJ3H1s?5)3YWp4%{VOgfGj?Z@aE!6_gTgqJ5w528-u08KjuW*z z-SN+mWg+W4=I;#ej@5THF?xcym|R@1X+4#6=fNm<&B|)I9)xP*Ozs=Z6me3UGntou zWU+ptZ?f$+fn_h={k3=1{k@0&Khph3*jD0OH`4z((xs+&Q;a&^x7iy|9f#+Wxe+{j znpb3B{HHiJmt^}u@C4DaskT1X-va35j$1O1Pt!x58&?vUM!_|6jqEGV3Zy3N{E*Qz zng31`pB{(M2IzgxPG|bxP&}E659ZfUv=arRM{R=$0!S~4ma}GaCMfWK!E;pk8MdDPFc)+nW&29JLL;dwI zm+$0GZe_kQ9ny_Ngxm-2OyAepE9cF|&C9i#bxygr{k~5o>M2#`{-a?B{_!sxt~F1n zz4&MD52aGlHg>qm&l&p7I`EpgO< z=4gNa2a;r!{?3Kza{XroLUx0H3*|@HWI3KLH4ARcdRaL*M62y+w;zpbi-gJL(aUHv zSs&R`{SrB{6LYP3!oTAA8h*SF`)7^+$>hIeKHrvwS6 zn-%7HSQ9Zg^}^rep}F?Cmn`QxV=m8nmiB*(wKK?cnq!vn!$as3kJ+NPd$e?yvE^ZX z*AO9V>c6tQFogh5cUqEOX#)dah-xcnTKDv^D}FQ+eq?-(8B+iGo*= zwzk>mDt)IyQ=TPMAyW~|e$w1v2YjnK_Xiq&%f5%w`z!u+B?Jev+!4I@4O$Jsoy=8= zvsYbLShI2KXWt&7hpsGAT#H%pJ{gkPe^7~!yzJG7EWE<_JBto8*&E{0J%Q!ID$7`1iw1k{+%vi=$Ga}=NaFd!1p3NTqB00YHTu! z{t1PCta~DzQhoS!oSCH6JRivw&=4r>z?%D5uBOqrEQ@Bh*=^W!9{(E8gI^Y>R%vg7 zR`>HdHJM&!r=zv-gn4bU>*Xq9BBa~GwjthS?R0C$jAG|p_ucN>`Mmr{(k|EUar(%! z+x;Q4(YToXwAszm2olNC(h47k(7d;nlW!{PN8c&Ltjxhrz>BkSu{nM2#kh?$C@m7E zD(!1Hp7jXVVRfqM6yw|NdBNWL%+t$^2tVS*M|3{O5!ta-PfI76!IstQI-ZdyrDJ&E zab!A&H|4oid!IMtU1jN28|!z_>JR--cXT&+w!r7)pLonXU=TZ(_ghUb+v&2PF#mth z`}f7#uzDSayyNd+nB^+3qvLW%yDod|M}u`3avaHCgIX)`w>>nT5(^t+=UN)wLG~Sm z@$^;fSnllXPyO4m6AN}Of+6koT3(+!XggmV?7{x+#KBW&a=LS>dcTQoS(*9~tMAT9 z5S!LBV*Ui-%Q{rOziy4E^}_j#^$t>HZDlYDl4UWKwm9en{Ubv(1e zwXBHr#(UP92a;k(Hu?%HxCaL}srgoTJP=M2zpfK|-;pHZ`Fg^3_ABgwzRdD{=x=qX z-%DFrmAz68&(ry7S}sA>la+5xZ`F)TULwVH#r@H0ouIv(mf61BJ6XY&6y=t~Q8I8P z_j+RDX1=HjdNVtcSQ?3Jwo*-rx7b@>ydPw>!r^$W<$Ul4SowyvW-U9pKmOq#?)Us8 z?+@{YzZ4Yh#?s5P)JC*=IW1MjwOnZ=(FsRcW6XM5DN=id<&N?x(Z}YJL*@x~b2slw zuc9z}J2Iz`ot=vd>Q<;Ddr?Fk(xXp?+2|#+A&=tzSbpJV*S;q0#5Kzs!ZNnLH@*Cb zk2_dbKYsii93@8oPlaTgl0jCGlP5LNnwrt_dnEHVt_P87;+m}S+gPvAhll8o-dIX@ zi_J1h*$E;O|7smu%1)x}j_Jp99O_OP8tbF36)nFMFPZhnDPMfDyn{p7lHZ=tk22fus_9<9qjZ(!-4KY|g(ui|@DK z<2KLN^z3w!o5S)4c=mFwjPhv_jSW&$T~bKhx079FSTClXW!lM(nDi1k-+h+SyZG79 zxbiqid9@hHXK%+tq<^w@v%lvJ7@e=|8rQxe!DLl@ixdtgp+s%JK@I&;+Y${Yq2~}K z`@r@iG>~_J#=ce77u?9w>*(W_!dv2oAFE_$Fq4I~75`VC-*|`bt;Vz7s`T&V{u6H2 z!{zo-=|;7mM%E{g-4ICsL%E)$@h%OPB-aJB_cGaSfy?t*PR??t*ME<+^OoM9on#z- z2}FGg0~5896PYiO_yxt^N6_;R9y9*?5C<=j^kG`cnT6r5eZvQ=z;g|f>Wz-y)qOka zztJCU_C4{zOBfe?X%tdPJL_O*ErhJ1-~Hh5Sx+2`##ws27O-*xSxzC*llj&}+y59( zquJOR6#W-O-{!d=N$bU8B>Fb5w389j5wtOa=lV!ZO^ujp=(&69jZQ+ucmYhSnAsCCyXwV7)+BNXeSyVmv2<**?X1 zBKyp8YNs^5pV#AG5AWs4aXG0!&fjG}L@R!K`#H`2OHCf1MW~4fe*iA_^`7+=Do$29`#B$^>!xU*PJWf);Rt>%Ihng@VG86g*K*Dv z{6xw%$ue2d>J;AO8gk6a?pIKF6-$lp8pq$&cE3r{^W@BAV!s?(^ynv%_9itb}(XIS_eO;o+WQuXh=h?5nO=(b~dWe_Qkz z!y$QR==+LZPVhhHH)o^iCwj?lyu{c22A(aG8v<=zFbbib>8$!|9{o;S{&N#$mU z*&Wg@rkTuD?%`W{+S^!D^2Q`PW9IAQo%f~tME`lX7(+Iv{ToQIFUjS!>0Yj96)fxg z>(o}g@a!FEZD04FSLZE0B^pWQ|AN2FZavDj8}U;;Aa6ZPZSR8$WyF^*Qqpi51Y(xw+im0{~U|MM2@S5yUZIOi1X9Y zmp8j|eCmJj(TC<^eX-)2kU0WnSs9t-cP=f|hWn?Xpov~N=UrAAubd0@+1r~4b0ul( zFMi`e^2nKsz121as*cv1j9^)h>0?LHL9*&iWZ#R~(@Cym*X%&_ZK9Q4r1uBkdIPNW z<{Jj%DA{R}3wb@vJgU{vkWdBc>%(_#J^Ih&eY#RT`J~GsI`6V4u%wOj+a9*Es<~E= zaSZ!dqIAaj_p z$hqtrNGJ19pBJ<8=~`Hu>H+;%hoOhrq}J zTHMRMxiot|Dp%q+r@w~K#3)`VIT^asY9%uKhAuO1%9>Vd)Eq)qiDT87teU8A`}$St zT+4dih5yFddXDWMrCefFyo~R>IiBVBV}8B4`>oOWj2?WMIj;N^W>FJ)$E`h|s8;ZhuEdDCxHfyDtvKw4a#DuT4zFz$q z$0sj(qESpx8U^i?%sc*>^l+hGsh%F|dhKgRy_`AG?jij&UdZT!s zqsU?Cr9IA_3qCL{!R{gOWpw&_mWRflIn=I?7q&9>znYnm89}Enn)a5`JUPCRyUGO zRN(T^zFfPd$@d0*QX<0+U>A+_4X3)7v+M)OZNATWS3eaZ%d^?5mAIc&KP9WY9S+9D zw!+rVR6|p3+^JYB))%tc&Hr0z@E*_Z19!X7N-*1von;O!yEz&`bn+8bCaLUPUFpe2N+wp@ zbu^YZ=)dv}b!jqZZ;z+rwo2#h@+55~!`>n`(OrE@AnRDDs|?-s{k)++$$I>=FK2M?0siefC%iDY-9 z|L58GUo@MUhJkEvvGT{Y+gp$R4T`>H=d0=A9zJIoe7(wM65nefKi!Z&nNhqOkA#YP zF#H^^J_J{nk$5FOGTG>|i?bw~*zDQFipd-BJjlo%Ogtk<^ip=Ch`6Fu&hmFPfJ#MhC#v%lrHi1 zot}PIc3Xw#dkLPF@^a~ue$@{qM{EQ7yPVDJOyY@Ib`8sIP6B88|1Ao-qW*kzC9B-d z5OXZ9y5a3i$eG9|Y^L{S%5-HFW$=Fx`}qNaCiuNepZNhUFVapLWbWqsz@Wdz*~11AYfr+VMLV7^5Uc)aATgSyT+6qf5}0Op6cDW%kM>a`-l0 zs)W{OlE;T+m<*e9l-rvf^;2V0p1F;?pR)G>(9jW`iU0co+snR>J7D{lLZiu6Q_>U3 z{oG$)ob1Tg7rr|YR$}uN)fVqk5)I|}qBEf;dvoTpiLvB1o)?G?9)jhI@jHmeO|+Ls zjVvz2*Ny5t4L5D@Rt*+2UTDHsJ*dB#!uCqQ;3c$|TzWDaxtDAhkC9C`rCO*tXOi>Y zpPfZ7YI_DWkB7pn9PCIU?`Sz0WLA;e&+PDZ5?{;OUs2*Vy~jpqe*?W;luBHU3u&Va znm^<#l1U|_m{VctKB%0A`lm_feRp%(=oBS0*YzYz=;Yc5B=H|KUBch2PuJ95_UV4)^BufuavmjyW)1!&*)oRG-(37( zi;KKft%9$N+9t5sdtozYQ4$k5qpc~lvAXcvCzD6U0&UPzo_O zYpeA=EhSILdwkq5&+S`qnD$n)|-;*vAM_`;bv&wcVzO!R?PgGV$;iE<` zWl)&4+mdWHnGknI^^QfGLyEumu%4{+J&)VUuS@=Dw;?p zsC<5wmmI71tOM0R$Ns+E#p-h^_a;)?k8IO(PGt%I>MO>0H>m^Pt7ty)7XH;|H!`LT z(z0{uIUeOdZ0r&o_be!T2xWOgSdISAluk60x@yfCs_$Xv6!bP{8D&V~9J;Fy)g8!Y zEgjC)OAJy^TE9WBQ^)~WdAK@nFHTUus)D!{|yWnB^XiHQ37Iu(TuZ3(j=cs0(YYv&U zBL4~MK2xi`J^ei`_v8WIHWt~D7JEVRC%p4U-ft$Vl22naPtb)7dyw9(=y=}vpoALo z{yv!uk5tc3?!Bj_PIT0gR-S>1?DT%$wURz}fr;bTMt8od4n%K7^?RP0kGALN{m8-w zx6%AFo=GgOhe@g`n>$yVSsi?Xzg!H#$GaM9KUKMmn>X_ei3-w*)VD_=&s_WU`maN@ z5}8-WZws{z;G4hleAZveq3IYkJ*VVm=zmK2WRMvS2fbmb4ZVEp$u%s#G=%)2%tSTJ z)5_WWP~t`99sB?#o2vbE+*}L`9on_kqmQALm$ll@^~@h^q{|v=Om?E2)<|B%WTwsR zQevznqQSX5k8Sev{(aPy0Df0O8idO<<#608sa1V(my;-Gs%_vEqW)zX*+ZdQRX;E zI#i7{aCL&#KZVUil6aEbM|q+Y9d*@f)zF`0c6wh}eukWGBe|WRYC60m(oA-&9Kyav z;4YENv%7mRS$(QhfBoY~{LfcHJE8gsR9=L-%rE7{WNpaJ zzL*!(k(lVii~c(?eGY-zSJ9Tf;S={hhs}!YI<+)JMY6QtP5ux2>FN3yTyJ4ziKX!% zbY)d%D6Kq3KDBtNNIvn$PK1C&Qp;@e$684I`g8PzIg9fctuKW9mTYhqR3&0iqGxR+ zQ|b8MkNKWNH~$OP zGb^`3y&LuStw?&TdRx)aKyo~qR5P2BH`s=x`>5xygsOGg$)5CSG_VXtvY+=v_b=tK zmy&tbGCPsfGipjs!_D}ut7LMeCCX1mOryzrrk|e16Ak_U4N9uf%r%ggQ@ag4b15m+ z(N5OI)|2j(f+rEsv0aV{rw3vH;G^C;Quf-Je#aqLe0&%%L-&= zHEhQ9!7!0G@M>B=8SdXC$*kp`0V5wNGXoFx_59<>>IoV=M)_CJ(Ox^*m(kF*tbmnL zbJm6vllC=McnsOy%GQ5yx3ku^&~-e-!LC+JNNov~mOcpWxatw41ZN zP4Jg<(vS1hr}1R5n*I5uWRYr4s+najqYp@w_J2s`cr{gHhlv0)#8WNMUmen$d9ogw zc2VEekam-PjzuMKx-|$JOPhiK+YTN_!tv=%jj?!-M)+FLtOnCswcAX#ERU3ud1Z};f0Uen`di9 z#zSd4u?YLB|6*KMpvjz{PaKUzf447JkDfj7nN{k_YrKTooQQ2g?-ls%;59S+E&R6e zt)sF#q5EX@B&$j0O?sfb0SX$j+f^(nnK4cz-K}&tj6HltN)^!ZE4w^R$@)A;1^mue zM@QUd{Qn9ot6`j+{__g9b4)QeJDtq)?vs5y?etL9)s*}eJCMVnWV2BXzp}Y<=&pi} zw(dUVQ_hXAHyU^v8mr=OSEXhum7NDUJ@6Io7I}7zdds6`vv0}B_6y8hq@~kHe<&Zl zAO4!6A*)L_kXv=So~4zuX=5dd6R|pRXV%gE^DJ+x7G}al=9_x({Ch)s#lqf`?{Gd8 z^dhTqXxpNN%*9nUVm=2Y$<4MSAOE^l=@4frz2D>M^kgww3Jmo$Q~}&;#Aj4qbMWOM2JY7 z(Q>FuJno!~Nz8*0+O43TQfL{@3O*<(s#uJbGOzGH&#*5p_k-_|+PVdo`_jhge8Qc}A9};PFo$t~Z}OT@7Eb z$ILM%zHR8s`N0Qiw+-!OH8*E5+mgl!=sHWw*Pt?Ibsux(4!-bRwI@#Rscfr(mXBev zRe7zPkUE*2--Vl4>Nr^GKx(aUaT+x5N!#h;N5DWCzH1!ZWcOhD&_tj=fgfmxlDs*8 ziqFheW)I81#kbwz{|Gjp^MuJTRa(6paQ>&BqN7rK`kwtrN0D>#cPHAehP-jV+_?9$wAg>Z6H;=xu=VKHZC2!R&BzUVnB(cS_SM*vw;|xC&aWzGY z9a&Xxelc?wlgN8-dQ04*o_u;D=2gSfk^k-I2B9ivUphjci^}XU{l*yMV^aDB z=SR~2d-y4#&TT&BJa+?}B=+IgIBY`m2g1#Zs4q)Kud@AQr`Svf$;gz`6We2UOh9o~ zoT|gX(R#g{>`HvGZ~bqA?h2mDh~q{&`T*sLTr`Q-Npz(D@_n#2ejwGVyhjzjHfPme z17(>rZPKUN4I%zx(Ax8qGzN}yRo^~+_@f2Ka$Fe+Phm@c}sd+yE!e9GiLwz zzYssilW*p?>Zq}~mJWu?^No-f>CsF0bOaC6Uro;zSQ`%IXTj19@c0J}Coce79S8eClrp^5gB9F<#9e;yy&)%DTZd(HJD z=_j+gt6ASUP(l z;xbS6TZ+5OM(gc=bD$?n3ETgcrnb_4a+tJq0pT(_=(5Ny@3#WiQr_>*#I-9mR4o2y z<87{0q@0SJ&>qyBjf!D%H!lTMU9aqKrQ&+!|AUgGaM<2X+A3GHn~l`F6!lm3>Hq7W z|G^<9NM>2GNNVLdD9Sd15o(b|6n$ORsyG#MCEqb{x1pV>i_;J>B(?an$*gu zw_G8w?KLFad}>a;k$z-KzZw^EAmd9wzPe1Zk&fypfK8hFV<6%b#?X=@ zZVfw`V#x3^L$b4Um))3LFAnHp9o?neQy-=}h=~n0j2Qx;!#IRdOkfPS=P1QEMl+SO znXdD>9Z>J&UhgB<`#He}IHw00Oj1`pZpe`kEJ@_EjJdwZcwXl4SAu!c+9b@`+l>1i zrtltwf8gciGscuq_@6V%Y`^}haFaiA$q5+p8^`?z6V2i8?fObsl<4#S{PVxH6@C6c z>1+4@)>m{DZS6<{+ihd}lUKXHy@uW0D_v-7Ct5p^w#v}gmGqWGyf@NRWtxi4qOEfD zmW7HR=q=h>2PJ#bT_Zf^z-DYFTKnHR`+rkoBmRHX+5c5z|9ATOkQ@_=EgFmE#qLVz zICG>afxF5ZG!2K>*&Jvl26D(YOM-bB%oIs8IPK_8!sKUKA|a1=G6YGu#!ONyg{e%6 zPgna5o>~KC+5C|}`PqJ56_(o5SYB~@Lfr+ZDG%S3A#$Gf6C$lSFL$DPa`^u*S59Lo z2|1c6i!0bg4iwdf&-ExsT7)?e@f)Ph)Lu5?@9X&-3BAfd{t2y2WKZX#y*aIqXC)J0-Ue~nQl%Ri|%YNDgCna zm#sGY=wRFW{0r`q*rpX}H7q389OsSDQkfq-i8Uu!U9KFdrqPBsU!wgV$V}SJSG1S_ z{R!%`1B$OE?R1hml0y!LW^-Z!)|J*qCN+L$xl^INvc4$;(8#Zb7P9>D0A4He10VRb zg7ofnElK3BDS>xa_oV^ytEwT?~QPi!y5_AwJ+rSLYlkc{!)6{8|9gL+);a(#_vck zwpRS-V+vT1})DusLbL`CP-$IOoE3d-Fkxb*VhX)buTZq9SRC^ z9J(&+$Q17EqTU=oE#+#$DbLZ$>slGg`#zwyY@*wXbkcewPb5Ub6xTBG8gy;;+%NPs zg~_Lyg2!j_bq0MW!*Rm+k)dqv9TS?^f$X$jQ{*4X%Z^ zy10CVE{{R?ivMn*LSd~5SXc_57yLILJL)UP!*gBqWtbRAS7y`i>jfTkCBfIPgqvr0 z{JiMqkVsW`o+YpQ;bKeWK^1u*h%RM+76Cx^(|Pa z43mw}n>4Mpj4C3N0jP)DhJ~LUiBh5EcZnm0|Rw1bBkFwsRRZ0s>Mzl76d9(j z@_L`7(caMuQ5*E;$l6@=4JE-k{wDNqHl!znMjaf~@YF=Mdv_bTO*%6ge#1(Uo|Ygd|J4^CQs|jyj{Hjuy(X^rSD02jA@b zE3}o=gGr)XmDOb<`WCdWQDba28%!$WuDw>0yl;IWg)2Oj6ulMT{dM&=rHO3wt_};$ z)l`PfT%nc9Ue=z#-CKptcj9f1V`cZyVZ3;@p-xu*Z~a=f4}`7~1=1(Fm!ROM(b5%K zYQ*}t6c%`rav4qZCeQWgYedhnxatsot-9MmMI`@*elH6!b9#2!|^!>prfhR_LJ3E27RG4oA~2x698cqIW$CD!m8Dvm-Ik2Na$}P z=4=g|nH}_-(9F8ZM90S~0{~C|l9Zs_dw;nIul^CSljyRjB&9_mLMYIH_yQe_g z*GgqltqDzxL+>%3%%aE+u&`FUCE)lC+W3P;E2FfG_kkSa-4)FlxhGImWystcP1%Un z4u@UfFQfY_l-x*aiy-0{x*4hdEYOc9y97mh3U(I0N1n zX>}I5zA*t%-kAOn_}JZ3U$UH=OmbXbj858nnNMQyGil&!9zEmx13d98KAQ2+%TV#A z_Kv~DW5yeMpzSZEeum78JvW4=bFA}jlL+(N?QELiEcVnEcH7d^akxq1=gr>V>OfCp zvZ@Cqk$VmbzK)+i=9ALHk&BChX2*#r^6R}KR|h04wN>__H3)tbu)1cO3;Yo zX*IoLf`H}N@vg=zO?RRNm9*;TLlkn!9=rqZz$}96Wyv!rbgUG&fBjvy>YRT zjx(OF$OG@HuRcVX%q!pGmKJLuUeG zwpafGwe079Bgo13%&g>HIiY)+|94+Cn-<$^<7z0(_Qe%!Bkw**YE@ND-=Zqx#cVxY zO6D2gq*uu(H(~zoM|Z}#_4&#yo@DD$Cw^m%r?L!__tA0%5{{+qBzYV`CR+B?Th;qTqD+`O39~E6n@RjVMmI@pp^q z`c`atuKNGvUnY=1Cea(v>GrYtdnA~odwto_Xh=SX)lKs4%))24P_`QsoPnbG(6>OH zf8cDO|GU7*m7dKJyMJh;7p&D%{!vt%kLHU=D!oLuwKuGPaQ>WJCf)xC`cOEa&&VxzjO&b&BD&t{sdN#!z@(p}l4 zI~<~L?WENt zG1!H4N@?M6KH^|+DhV~5AjB6NyWLN&kHY;f>VAwD>EI^^WY?l|CB7$<#3S15NcO#9 zq%U7IO$%-Klw(nGIt|=}pMKi74!`H>(|YPf-{)Cw_q{(36K*J>9rsZ#fs?b4U56hV z0YeEf`XE|5;{Op=-<`xBg6af{NScLbSz3;Jok-jB^nX|B-!4RFHZ{IWa~1Jff!2qq zF$X@%z*pBo1_#ksKRWK>-dSdncSg%3TxJexe>U?MO0#6Qqf(j68xN1MhJ;Fg0&Urr zafoMf#JL{LWUIkO(oV3vj6?t9`mzsjtam6%A*zRhFk8Kyh=mC%n=%boV55 zH^A*XxNVQeJLvXRQhUPp@}67?^%XsPq2BJsVia{b$`kf4TTpkP$)i#52*iy;Re~(; z!^?C=&upB2h?d{j%GacvErUt>vYz#25pWw>*Mg_B=q+!}*P5fr?CB~R8;r82*~IZY z%!l~O97I>#zR$k4q4_WtxDp>PklHb%bPmeTC&wH#oUWJ80_sZRj5!cF%d^MQUn_iV z(gR~aUbho@ZH3|UP__}j*Qw(%y874nayE|drPW`2 zuMfpn(pW}ZKPmeIDi;@?tOh+yR%R}KDw9pNp_ieLcX7KDYOjEXyg{C9RF)av1ocW_ zkAzCQR2wJp1=%o^&_fCDzr`rNX-jVskgh)x>(-IorCXG4z zhw@s>tX^mGyciDlLP3J-uBFX#Xi0$6%&cW*EIu{q1RwPGNEG(Q-#DcQ(ZB<^$s&0v zQs4eQxe+y){VYR@z4?kHB)?G2eMs*}HPmD|(|CrSkbJO`WtFPwQ)U%Z~tNXR&4gBl;z103(^H|R0P=IYSM$L!*JPuC%vE76s7eOHpk z(OSxZ{k-3QP8z2fdE|iIDdd{N{7FU9*yq1-pLgvXvOSt^5}+pU#*KZhjnmBECk;xD zqh&Vm1<1|u<{aTjFPzXW_xo0Zv~w&lBf0&Q%e!^9TW)E{~Eyyayy+6%&0XtO?PWDckct6V|8-SD|qdzD#Cl37nw(HRM&r?+V_g zG_UYCOU^Oz9QwG?-R|BfZedxq*=^S3lK5vK>CHxCwnF}a?oC>LTA$mRC7!H>Y>+Mk zmz(&B@ep$&jQmQQ70_6oZ`sd?JO1W+ayvm=hmu9Yz|_{_7P3w-;>%FG1h<)ezZxGS z;Pxw0$*A!sNH~-2PN2!x{N&i@f8pv;(%e9g6X5K9ZQtSdPCPxx-m)=#I4ta?>|6*r z4^7|T@7LlD?Gr72M>4mfe7L*4wEUp^)%1SnljuWc%bwJ$EF|;H!guG3m+>Oa3TcWMjkoT{|nhJhoQ_N zXR~(>6t3|(p$x7r<{Gmt@;zR)i~sx5U)C4O>M`b%>QX4KMtA4qI@VjAKJs>-gR{RV zaSIymqW`m9-BZ8Rj-}*{egwN5?TPt#c@d9al5WBnWbJhZwBPQzEAzIa6`?hW_lCmtG+Zqr-)!XQs>i*CU%j5*GDFiG8b9&Oc;9zZC0V{DS*= z7idlMNoW`kI1~-H7cGv{>Z3S(1=Sak)yHJf8=rX(O3J-(+K|Vd31xS%_iP5so`U<0 z6|(IynMB-@kS zNx1RVC}@Ss99K!O_-q=@rsWyRpXd6cc&vlhMJ!`D`mR9L2)##1I&RH_B&_N)%1ok% zx~%F2b$o)_@-Y0N-t;N&4u7F5@1Py+TCE?@k(v z==5@*`@m$5hzz9Lfj(uW@hKXtK;vKWKhGDwsude;j{n#6tp~8O_T=yc6pYj-&%)t+ zSKfex`Ec4;FVa;l>mezL>AJ#nWqcmtT2@m|r^6gK4oEMZQO{!c@4(sfq?cKt%6ie| zyulUf&4^~EC(CMOsut?7?TdK(Wm-y5|IT>)Q#;uVnJuE{@?M!`d;uNV{ydny-08Uk z@R#FNSCVh$VKS5TGHUifYr;d!g}S}@<6}H~51%^~R^B7I4J7rA^6{@ZIRBnfmQ*ww;3#Q!9)htd{M{d@`VWr6 z=QAX_8P|6fJ=_dbo~@NO5P6Hbx8bf8{pN^q(vr8*!tVjhNV|b`rGL7 zVLt3l&wiq%Ex66<*FL0_v^rUJNcfxeDA<69H9jYpNY*dEA&Yftp5fc}0P=qnz50$g z{Sx}}W}MzMYloTRZp2!1NVch;tjA|{Z+qyr9B$8A%fIwB6Sp1pUuB^@Ye+lrZ;hZa z>pod?EQ#NVc&f}kl6tO_+J41T4R%w4O_t!VGB(frQ|i1MGMe+M`$N=RPmEyUN25OL z82dqZeH>)Xur3$t(b7#2oghP<_^!^7S`8}ph2X5rS0LrAaVG6eP}NZ_N%NB=etG}SJVQ;mOGu;{ zN-o7)X6rxYW49-J%Te%oP7RU=tISsM9P4M{6h4Ysp-cO71` zX1fHkN|JJpmS^=jdl~W;nB5SWecFE1D!U%Cx-i7|d2H`8IA{thm$BM^S=SQs>&MzZ zA>FE^(9N5B<}DN0q6gh({=28o?~=iK)c0olk@g$3cpfZnD#rA;=*1`N8P0>UQlk zG3P1!qaJ+^Wj^=(sR}Vc4 z+#LV`ooMPq+RO3Ka(+C(ow+BrocWh96Lu(4I}9dntsr z(`w$hYm)g&I=!&?_KZ4qf#6>Hm-Do`HxK+14eZ4WwDsJV5Ydv(;~&Q1d?3tjFv4o3 zH_V80HSaNy%y;odxe7;lr@K=v*#(nb2uo;fDu0pph9A_E5!v^6*~aQinA6y>Rg={WoAY{dkuI?QM$hYsqp3O)Vgg zBl(bwkxn74jEJ&^_zn#v`NRmGF+s#D(OQy?&7rHs{?0Krn81r2<>}@qPU_6@kd+jA zTm2^GZ$EaFx4_|~ajw#t#q18nwfL`*^po%~3;2mnbi0k_;xEd=Sl*LIqCdgJ#zVja zbZ6}@2l(s3|HJTe9dG>rdBj&Ixm(gjCSAfpI`86%n~k}iMr9k)D#4rOP*-*%-bv;O z&7PEnKc};c^|&fc8`I&ehcbO(VYAcH?uWaT+F6Byt8kaOz}rZA9_yLq-s_&d#v4q6 znoc6i%vwDSH}BGXjwmnFT9Ts#kFU{tB$K1CL1B__9?DB^)aoxZ(1YBE`~M8hui?8# zvyk6WoJ5d=S?WCh_vbTrhU9Na^-uRQgZ&|TPGPmxit$0-3KQmTE_)fv8aqMgP4s;( z9>(Yg_C-k#^;dR(2w$+Ay?jSnf9nYvtN&h}43`FEFwp9cjws>SSWMK8(30SezvEf&yJ#6u0CNDcd=d|yWpzo6A}`{hVIdP*&UO^KZwwo|&oHq&0xgo!_pj z9{q8Ae+(%Zw~oV0QjB&{Pxh^?rj7K8my&)C^_E~i1N2@=ai85ajd+u=8N|EArlD9X5M5*ye)f7}SNC&R(5WYrYbqsw!&lJGCb>1Qgj zwCr}s$UFP(*5f`$n@YQO736%**H+{!5-Mq)kwk6%#!o!ozvT7{TVFsrUD(eS_LF@P z3)FcVtIUpvq{w>#Z!gkm_H8_m>gP!AN2r_#<-hV$v)IfSSm~g>T3YSN3T`6DtBnG3 z=wUpHjv~=DTK<)FB~e@__-caJT1sU6lePZ^klj;X{u^vo;4d3N+*dqtEBDXiDGoE{ z>8bTYpfdqEvWhoOeeco8H}ugEJ$kG2TA&#ty zUkbwsW85F64@XyPUVkx3F2LQ4HSoO>H5+JTC_nlS zY<)s4xTW{4OEyJ^72j;9;)TAEMbLc3XnRhj4TqJ#@y=B4uje zyoqmX@O2rBUxum{3c#*=u;5#qy0Fz9W=A{V2tDsP+h=HW|OF z(D*}mEU*2fR!m}p2R%ERwdN#D<}|WTat-_IgSXY{{}26LMSFjywcyCWf%a5SFy$H*E)r_%K@s-3crwm9N(%j0c;*8>xZ?G`Hgz? zR!?mQvAK21XO%YNjJNotthglj#m~hkTOYWpT93cOx7ze#7XvcW$ieanxdtwPV^wFel)hm(@SYT`>w_mn!MKi z(sa0jJ%32L3CZ+|_BZfvHH-f9Zckkb(Hm*-Rkm4!=gOhgz2UVVT_;IPf^Cn{mn1ZM ze>%QfJ6CAEtuabMSI3raLx1*2_u?@ssk6UP)a|4{51$DPnR%6syz?9AIGfJ<@&a$5 zG~=}S-VLYGXF_&lz2H07jv|-a^j!NuVdgC_fwLrG%-*7h;cAm{(S4+x@V0OAIgffS z$<^AB(I)LCuvZCsI0F~m(eN;doreBbaNduElRoVUa`+m@m(zPS7~UUNx6xr`lCEhK zwU53te)9nOZw+yyi;-_a0(IeY*6}l$qxn}|r?Kb{=rX$~67)Kjkp6st-*&wCio&M$ z#>o^`k)!QL(9|%xdXt~s2lu<_S-S9X?MU|_<&tJO`;js`eTmktf#9VqHU}abYcC0} zYP(jokY8!Fj3NJ_#Ymz$d7R2d27CT}UST*1k9OryUh^jWT!yN8ewRY%43hrYw|(0+ z)uSimX;&p~=fBn}S56x*u!Bd*@i;u4YJ}Dpr!8qDyYBmt!l|TmoEkQfT@IaRe`4@E z9dA|0_i3mYOR~Sh*^TNc$tRCfc0FxnwsW%fucGNeI6gtixvV6!YgPH!Bsol2!6#9Z zG!fZ}+=VT+(z|}aPalVZr)as4+Kw+~Am&5dZ#+cOzKwDJT-sfT-Usovr>ld|pB0Y; z5*VcB<;r(*ttSeSTKzgQ>_i`57JYLcmUbSE%y;$sqR+{W{buk!pI(+1Be0CHhZ!|x z#c&e3GJkdwJI(vv6FA8}`&!D*hsS8LEUFT&xGq#K^*e?fD-^WH_a=~7cYXNt@RvPW z2_cgDJF&E{@llR!fAl|l@s}4|B`IR&YfjRqWF)y0>zzZ7*%6p=;$|Kxd&qYsuY`77 zuk>0PZUU2~`I5X9)`g_ZK+PuEIz?^6=zJfVKONoM{ANdd5@lu0Ge;tGj5cfFfAcBX zHUEv?>m$~iGacD!`~csY8L*^)Pne#Xc+Cv%sr=+}@=q^wuUe9fC#h>!(qi_>CoETH z$PXaB?FrM%LedbZpQl$k-IF7g+EFbzjd-(u?H;Y=h;m)6Y)>(iy}EB|E32^M(EOeH zvqo_^no8hush^}sPjI2E6=&Y@C)C|c>Xnpxoc3C?h}*TY5(i~))f0W=lzRqN=4vfH zc1DEzxSE~3&wHw$`bUuVckq+Fx(P`+828Vj`*_#0d#0T>2k=r^!O6+8aXjX8yuoeI zl^r27NID^tu6Fe-^(El{d;Dn+rTg;?53z-2bXuA%O(6HIl^#y=ef0d5AmvI@OYqm< zNTZFq=9ADGI=LQ75(Xr}MJ|MrN674QsQrpQn`=FLJDVGWK8~_8_@WV>OR$2Zgx`Sv zeQ{g@P5Z#l4P<`4D}R&DQZ)X_LVjdvSqaXW!wIh6?{78MocE3N9t~L16{Md2b0fcy z6^bXd@nM0=dTO}B6JzN5N*?ELbazz8^*qBBFqy>lz0~m}Z*VzklZ0x5er+Z?lSb`J zT=c{5$z+p2y*p?n33{I4i?Vt#k!P*uY8^iHAU>@Ne_Ef;>%d5oB=)7vrEK?Ez3OAA z8?ME-VBmgMc8iuXyLJYdAFjQuh2(rpf}!uP56>?1DeBumQa96IQb9(dukft5!p_OG z^A6c`qy0NPu?v5c6hm!%%Bn&6!WZ@@>z8=&HCpcvqwV;w#k765UakpjBwxlk_c{?&*hXq3GVxm%^9d4#4k-l z`?u(Rj8*pG@#Fm}YUv*s%X!ZP#Y_t2B)Dy)YzOlFweaYV;w=fu7vl9+R+;d5Q=#NA z(oI0p>{hD+CreP26Us>obPTWkC|Os6$nvz;(7mL0OiOiPJoCFbUAP6#ZZBp_-XWEW zX#ZOu`Zql!)!GnkWxr2C#b=az7jE7u`m=*bXA&;%!~YUbw$tam#zXzWr~XI-1%c_e2DiILkA55d zUBKUG7j|}Sm7$YE=)Ns^W@Vu(JE{e-KjQj9+CQGwGGji8PpXFYFUb295*QAdn_1v$ z8q4mE1i8t)=tca_zHI*w{A4z;lV`_6>(y$$K%23X>f|*6M#|#-R~Q*a%a@_?EcU)X zv?S2)DM}}eO?yAXy%j9PVFIq^ZRl+r{|D0Zb`hC%z|YHM_MZ0kSAG-h=G@zUyv41k z|C9`pLhx~1?8@6bro|cPxkoFRlRKY<)%X21_Xla`BG^ny!t5Y=!sq_H;Z)_S>%AND z>;uUAcl0I@@>8S~WDnxk2dQ@~$(~n?4yDyp7J+!OXL` zY@*yEx=UJwn@Bg~%k20{T8kHGdUyEg%M;Zz4oXPfj0HRC#}a~aS8`9v##UN?015`6 zWHpVxOj|usvK)Oc(m+l_`~#2G)sS#aeQA3MPTP{fZfxTf2J4kblCid_g16 zXybL|lRiBshPLq-uc9Q#Owx+;i~-h@T~aO7ftis;AXx+b*QbmczK7}|^tZ^U_%L>T z7s<^gf%i~*4$t2TCabdL?ICquXTcN5;APxomvI7xXEy5!wZEv|^l}+3ya6LQhnxgo z2~hMd-A{zskBd=d(k~`(-?8+#2t9k?C8vI}J3VKcI@3jF#(Me54*nZac^bN|g0~vQ z*sQ*C**&$2jN7v4Kgc_?i|t9NB+XsGiq@g5EXgGFUjoPHBv@Vckg%xZjMctXXLecT zMD(S~jU)H$m_M0}57yG(`pe4Xo3nx~pemu9SF)J5@t<>8FT>I$^p{lcnG^hO zwD+U>XB6(_UP2Q!EK2;&mUD9M6XiZ-f3g0OY^wp>d_|jc^!KlObK1d3^=W!($4h(w zcYSepEDdK|`j@iZX?LOalP;i?mM%fhMOu0gjvgnGLG;#&E*dJ^3g0h~*JIF;R1u|U zIjf}WVKG5h&g4-(*IJTb^wwfdAwNLNL-gt|)9IfqHKz;WC)s6fzoq1(zjgst?)USNNrtyk6$R4r1|{cbS5|!=Wzou#3@o zwGqu>o}EAgPw6Kr=+QnQk>lv)UUf9k@&S6bq*eF|!hX}A9?a&FdaMfGUgO&au%Nt4 zEP|Y=t|Uxpd7f)78*d4p-=pd+SnJLkj;HH{oLLUhN8`7y(raO{CL2lAfG^RyI~r%= zbrWv00zN{kHBtV$XHPEr)uZ5`nHl4@to=Rs?#tsQ_`#<(lkO#{--?8bqx zccUJCs`fK$(h-I6gAe-NLHX05>KxQxtd`eccOsjbN>26F_m~s8KS2kq3oHzciqn?w-!9=-i&JF5;0)R@$G)7beWz3ZT& zuIq4`^QuW_n?QiOqH7&1uLY5pX?G9{{aml~j=S%Y@fEPxPhHu6IS{>Ibt z3g42t??Jjb5FT%XyIDm&OK9Mhg+MUd{vf3L-uT)bK2M#5VoQd3p91;%tN1w6}Y8Kj0 zRkAkS^r6izB=Z$-G_cUZE56^(`j;v39n9oZOA`7e5=eTJ7un4Iu$I#{jmc{`pV5&G zokLG~lg=5I1Z-Qaj;c_YmeKl!Tg0W_5NTA>+q3BRlTtOdi6n6d>sh`I>_CUqn zIJp{9vTN>i^6LgIkJH{g=xq!sYqgZ~qTgxtQ66DDP9IWt&Q2|alAI=Q;rmOjoQ?aO z+-?CSHnVRTEO2tuD&nGc^TdJgY`cPueO2BwBhZ3ANG896q|LWn~YoQ@=?3fX(#e|LapyZ!og_C z8O@%Ml)2QAEN=!M*n|h!1V!0(opS}*H@lJkTGGWT-=mj=qh81NZX~Ou&sxq}{veN> zL(JRMgX|$;b`ut^GpQu$d=m4Ga!C@kph$^c(x$9>8rX4o`sDspQ!m-VPj72c!_=%@n%VDc`pez(0WVP+Ov_Z?p&wk|23X2O+K;Izuiq5khAfB z8f@=GR;8f$82l!#&nI+~y>BnkMG|p7LDsuM+)Nzb>TjZyj3kxd|5Q)z!xjc8vBA@G zAYy=aviES7zx&ZpW+3l^zrJ`(Y?aN*)a4_y?wxs^(Il1h^9gU6_p*~9d@h7E()Pi6 z^qhmt2q-&oX0y^ud6-o2g5~?Ws9r|{F!35%a20~hpUEU&k zqvv7hOO$~7)SF=Vv8M&(JAxe?Q`D8@w@E+yENk4%EB51evNNkSs%zpZ!LffpStU=! z_Ri-CdZ}?N-*69E&(}tFVoxKJbJ+bfR`V0D@(8ZpQtO2%-JMs+&d3QU%f6P^`Hjtf zk`(S$cphB%mSbsQ7gm@P{7=*VWl(UGpHbxaGG9{z8nRaXDxY?)KL1-@Kf9&+!CcNH z^w4Tf{_lh55fE~)?+Mr6hhJ!`h9hVuZvP&+{1#KS`$x*}*zEPO`m3 zGK#NA%%*MV&yI@8>P{k$L`J&Jn17AW*LXH({EmgiFV&h}B4Iq|k!VvWD1pL0WW53( z2kYUM;Xa=7Cp;!H%R{VWWx@G##dzXml1p!&)JpZ(UJ~ej#G}09x#RG##iw`mj=#{# zAuMnuG#sU5cCtK9N{6uP?4YX!k(Z$KO7cAm{Vhm2=YrbM+n&mfDg0elajrty4!qb> zT=z5fZv=lG*y?jyErIrOtefYbDrImt+1-MNz%(Yr+LgqDbb~sFJqQmECYO}jJ ziI?3kub}-Yw%f!AC$^My5f$+M7YU4oypf*I4%stUVa~<0!|ygevmQ-k{=5y}IL^-s z7_Sa3IT0|1Og|#6B%(izw!Y!rTGM92wzqS?9G{Usv~MwrObnP$$t0&2699as+V1n* z7G>7zDW=2vN3QRqhWm>*(Hgu!Y9EV+F}TZq;9b!BFc}>|_etz9)KeFbcoTM4i{w_r zQ=(IR%Z3)xUrT;6fQ`KQoQ8|caY)L&T2?)s|N zNGW?5GY6Ry{^QViC~UQ)w{Jb2gk#y0bSI4`J;Y>oQ2|mC2)ZrFUjqjN)Ri<=OVyZD z>)GMp2nC2(o(d7&!;L79H82b*#~Gd#&w^pK_!;M;qU$dnsvT z_2n{hd4hC4#p{7Ie<5x!VJitWn;0}tv5nWD^|6AU3-lLzu(+|TYm8EvZ_0{Hb~E4R zJ!2XjUF_RrI2sBQ*@wIyGInF{Q_1*TZ6;;rG`tP-{e5=4Ot~KF+p2~yVPG|EevhsS z`2CDTlU(Q;-ft7@%iMNSi=5(TvC=tlavk}7%c9!A)ew5yog`|j_aL%+*6+3CH4?Qu z;&{2bGM|+7+-IQl2zI=bJtVfoN>a?OzYp1KPGCF@Egjs+9>456Tf`cUWm}o2ukIS} zphw@IWI8FEmDj2Irc+T+lB_e1onSQf4Jl^sVhXP6EBOJs9`(5;ET5`pI~c-e;_hy5 zn3*%_$1C^M=2hCt36rG2=tgd@YiT2TyW{0^2-(Q%Wwk7CA^Yfs2Eo=#baaSkx4>ui zcw9>&kMd95Nb6=gD22Z6B)x=X*7I!~WM8^pel5zA zsO)Wh?XT>9PjeaBcYB8C4?{yI^}c0v_AT%71rL>&5Z`<9eXS)HNjY~~^Dm`fA~8!# zvBKVvdJ9=xfy3|Buognv(Bw{f>87|nU5T8=yoi_i5$>~py{r2z-9N*%_gPuSn;Vs# z^LK_jda_LKRuF0UShdrpzJ4CZiAh~S=m>q zyvqz$)6Z%c&JUb#>~a_@$d0n_(U6p3pJ@HR$g3W`XFmEm63L!`%SbQrJHLR}FY&$` zPgahn+{OaSyOX^TrM2^E;rq6t`$JFGLGb`r%RqikDb{BrIUoK28I2;9Ikd7n8RTU1 z0MFMYksDcNR-+OxbO`@h8a{VYSN5vh3YCcxmpST0gzIDkpON||SnbTOWfym85;z`> ziD;QK@tMa=q`3iwcRh{u4pUpAg{@>et#Oh_pK0$zQaIhc>_+&HI?`_^h2PiYR#qLG zVXPi(Wu;>U$z%t^Y|_oV{#CfhX{P7=jkml@>%YK6JyJan&TgWW-}D*Bs^>&DbhZ9< z8T9=}yAPo5SY?uor430u$1ne=?#uy>HOuue89fC-qY5wlJzIN+wtvD`l7$`#&&Tta z539c(J)NhO%RTWCbhfAOPvLVR-Z!z2&iu}B<CGePc-zOk)HoR_SOFcZ<5j_+6pE<5Rgq8!@LREF`LK=6W@g4kp zL54@EBO}FGJjz5IH-ofCTu%z7$4R4k;lDm0r~YW(4EY-%BX5B>ps2pqF4kH_xXQd) zV|*V@`$_71CX{@|7SE)wrF`KMa?QEsZ7jKyXMU!w?9GX{d5WI<(?fQ(CVJRAp52JH zhDHDN9n5xvjPL09E?g|YPeRjoB%?KWE}`X_{N|zLIn{G77gTgGDmb0Jci>l#Q*%nq z_cO}V`)PkBdEVmrT}UOV_-4}oxu{sB{ZSD4gC}C&i)rU6*V~iG>!e&kjX&c(=Z$80 zGCLxx=-DRG!%b`|dxh7K%xIqGBKjE2PsR(aSIeuUb}rwt4(%ryXC4PV^B{4danA+* zf5g^<>d*0gHjOMH?}ez{nZ)-%$z0Mp6#|loG~7Rdru$K~JuSo(R|c`Lm0He;!mKuQ zr-RqE^$)r?>(LXPJ3Aj5Lq-yjbkq-I*Y^Up*AO>tUHQ}focek0zqQBq_R2 zqoFSJTAkne2k-rP*n`+=du^S8)+^MTS%}@~JE_`}3Ojr9=Av}~9W-XYx3H)QkotwE z@($LPr1m7;nvgdYKc~P%qRr>5(gVC?&U7VmZg{BUTTAskOM_eB@?6io1Tnk9bH*MA zp|rPW;!~!=M#jP!{ndfPoRZp`F51B7f9Sdi-g6%D93HkS>193idbazpezzi?9#iW; z^dxHceRMZUTUn>th5Sa-$CEIgRHI24R!^;8vf9}^Z+-P8`Px`=yaH86({kQ^Mv>&N z@Sk_RiTc6K#r660|B2earq!)#Z)tSU7u}=zus><>MOsQ+o(gJ8vdcC;4WR9s&{tj! ziC|L;_3zNhxn$W(3rTx446o0i=`=M>DiApvN(Q3&cNk4FpWRV;2)~&5=NhE7Sbf)d za+|t`(_tkR+)`WBa8X9hx8QHSC(b0@O3F=v@uWD)$>TXjLC5KLrm>}*u1UI${pmU} zi1wn_H)#7H*Rvv@q?m~lH(AS>%j!kn%jo7lT78dxlh$UYw&P7_!pKBY%^TqEz9p$? zBU-qhAKik}>=o~Xg8lJ$DE_mnI;ZEhsw0sPrulwKF)|JQ8+!IS+Dl4~y;gq&i2JeMn9LcpG_!C3YcB__v{L|k(I?Co`=#S zp)s+i2m7B@s5|hN7-bo|f5t+llFVXwc!7Mf2J^gnvrlm^*4c-*dYL!tKxZZ0OSHM{ zYt9PrVt#M{YW9YM7kQ}cAsvkJyh9}+a#C!M#z#)IEx^UIkUkHx^1jg6)r*w*O`Tck zS*fkR-OK3WaTMHvvzjoQlc6u-_hQ`i=4e+sC^HOZ+LGcW=&gi{q;Jfg=_Aze0Xh7O?gnJuw9w>rIQ|;aI^!=f zzgKJRR^Dwa4kyCW6x5tVXJa9%16@Asf97>YvE51D%M;}@JDwJ?f>=yOIhj=$$&zZ4 z?=||Y$5HnbEM=zhWxguw|KHMES=cJac5~7*?^nmtU47*z@qgJr7|xb!xer@dO#-`+ z)-UMq!n2-)&ZXqFFEmWlR@OFt)RU}JL)Pa8vcdy!(!g($-HrEs4&Su^UVnv!Z|J28 zdmV(U%-nrXUWcM*F@!f$x}Fi+-|TmjmVd>?MLhD%g2(K8z6-(=o$dg7oy-akpv8CK z?GL&+2VFm)FiBGCyE94YoEdt%7)i7v^<#Ow??@%HH;3|8iAvIseumOgd+ldG#|jpm zIhrHT+?bbILc+(R_YB-^)@D|eE8yu%TAzZlL^;hFw6&z06tlPDHpx22lSE=J&({78 zzTe4O`mz2*Cf%g�$8|D71@q*ON#h^C$jbXXw11*UL$>#6-%<)of2ppwk)r$Q(Yu zEM5|~@dDSg=CftW$T^j-=HANiZuCgVx+b?qf>Ul)2xRO3!GoVarLDVMlW zNig4u-POWxHK@-TK!49{j|Y^9_}{5z6-11OxkO4j2uH1K<6R5b#6SuRv*wQV0&SSWFlLR_I>3-y~1j-vImuQPw&E1#0 zCzD$rG@XvGDSCmN%^c0Dlcei$a(oSi?P$1?R^BJYht>Q6D$?p>H2FPSN?h7E`1DKY zByr8=yB{>>JWV2`{f6T0aifxJ;1Tk97p-;RD*owTt@hDJjaB}5=+8N=>@TSUyEl`2 zV)eEm>7nRPbhbq2Nc!Ct%Fcx7>7L8k-_>-Oy|Ei`^BFF))2#|gOhmv7tki&X3%pilx^tOh->O%_8k$I9Q*Qc+?`GBlECuVBxVoZ_TApJ=p5p<5G z>#U5ARO(Ec*og0pAU2RxMxr<1IQ{ufC~c*uYh9GT1+6(T+n-L?X*2Jk6AJ`oWhe0% z631&e9IV98Qhjj7SBW>7s7Q~|ZX?pX2)zs8ubGk2C_Emc#>?>ZGf$f|>+jG}brxBM z2Wm`96+D^IT;jNA40bFp*aU)R@uv^N`$cSRCK+dhnN)M%vAAUQNEF~jDETi~ElV5w zDDgPNcheWNVj~%QWzWFfyyid>NR*5j5Rk~_>ASC{*C*KhO#Jp@`)grrl%K~)A*&;a z_jo9s=d3^#_v_F`M$0WdTLssNy0Qm^WgQ~_|Lxb8t%TQH_l4uT=nJ5 z((SNu5$Za!%`em!FO~h;!%65Q7V)>b$}4k(*7qjy(@}LC>5g`_53AcsyKCU~6ral| zKLt0R>S+=+^-O%USMLhQUf@m}R?8idlHHkka}bSR zqG$RNI&-G`QxZK$50n_7-}!w(TTeqXt|btU=K*1 zjPnQ7UmN9BweS-j%FuQ%TCWN-E75hdnK7+T+p_R*PAW!2x0A^=sO>-sW7N=^cdp{u!_~dC7*W+$dI-(7f&Jk|AH!Hl zd)}x4$wog{=_MXe>wa{QY*Wo?=U-gRK*eo@ah@aVmC9G0I zM-p7C{=Z1#2T#n@`cKf1)U`d(b{Fq(CVe(UVIO?v6zy0t`k>HR_oA0s#uJv~XV+@^ zYI5I*=K8RAD{1G1Phus#1kV|xet`ePIr;>OV$nCkQ{J0?aeW!YCSt%UNY8$aG3=)T zE3D#M)@0ZCyALk1w>s;qAJFihB$U_&!&%Bqp67gR9_-#&HD#2TNGJ8+a0VO>)uZ3T zb8Le8jBYl#+n(%7kkL~Rek*@{klr&%lrukgINHaN$7DYniavS;G-V$|JrX?ws$N6g zQ=Z?&c(pNE?L}6*pyLuc&O6;>O4KQAyPRjSRQykx?gVVhse2!PNJui3DZEkAKuU&E zyoofEMh%8kG}1uH>qU_v<&A^}4Vp(9RD^FCnuSu7WX@QI$n5xi&fEWgJV#IWb6?lq zd#!VwYwdmQYwxv3i+gqSym?+7$`)5@siwHroqs=1j+0rv2>w^4F%MfxVxhe@(Hdru&Y-#cgcxFCH|7R?mtt$wpb#U$fl#7LR#{dY1v@cvxh4-S^22T$C4G%Ny^2)=(N}%}m}fXMX-<(Qgv# z7Qv)Cm^l?M+hg_JB+ed)yur-+(Bw63DNfepAs3LOEZ@vpuFn2X7S}pH|Ac3GlfBz_ z$;w{cr}|3wh5mc`m4*<>4x8z$oLyEKhbM1-_Viq)h9%fo!t?Z*{_5hcV|h^yiO&<$ zCVucI@5}n!kyw-Wt#|XZJ-lZPyq^?HMziD-*pzcL&eir?*fJb1_H+M!+P{WheM-M0 zAo>Jc8msp&(RVB_S;QkAbI%FKAMahl?bg9Z`tJ9)BF6OOTc{OFt$`wAPx9-RH;w{Ni_L9FA8JXM! zpE7Ko72rE)m9w_4^iztqe`|3fpL`h-nSn`;svd<=_eUhyq^F-QLN+DO@WQoe81V<~ z`{Q9Va%Vrt6nZ6#Zhw4Fc9D5xTEr(0=0WfC^7?RVD`IBPMH7;Zz_9ArIZ>bYyttM1 zKzZNOPy3hhnB;TnMXp=bR0YSo7tqKI)fQHM5GFYl=|FPEXP#DL-V=3X>GsA@)!bKt z><5#eKa0+Sa`JuNCLZ;m&8d2hoamH&WKBu%I@vn0%O+A>1LH5rum+Yt^0n*<=;)ry zXqpTkKP#J6F%@tvJGo}Ne-l}rq)~Dc97po6#e~eo-Nf?AgI){kz zom_F5)6QIIc132*Te3-9Op}-3R$5(q`Re(+{Zf_~+T=k!`bu|AP2leYWG&gRGNmf`7kmxB3(0fEv?-OSTjStFR9@jHJzd-+ltRQ*XU7E>}Y5#zdmy@78Yrlp~>!JComU_51rvMhw@>TBm3Oc)ZLRMxcL&c+ov2^lYP9tee z@oxx?Pt(ek?tVywx{c>&MdeCmVyj0)iVd_Mfd7fd6Zq%#zU!*&bh0Kp;%Yjzfo0w$ z4B}0BdL2!(t8qBHr}Cb#8(+EK{iEFX4J##^T58IZf6g#C&XtpV*Oo6XQ!eX)KIXxF zSUcVJ|iZ06_p>uu+fu?-2Xfx!XpJ4JL_&PO}It`SUH@{12(n>>wG#k!Je zTv=%SS(?_+=8M`+mi(Xm%w>V>|D26G=Voa`tQutj>+KsTy+s)76yH4AA zv!AyXRe0@at^7&CE1_Kj8)~}yY-RF>`)YM$bTR<@lFgvLw(5z-m%D!-Yi2a{A(`(| zq9OhBcJWlq_y$T%Xwe>n)`%|u6=}}L>{{fxPYlkyUFPt2)9e~i>uc@)hc}!&$Pm z^Zs`pmwC+0V8-XaS64DaWGCw@?&?O8tfQ<<&nA$|iO<=eFak6GO^Yj`kX5dwSZpWz z%%k!3ET7XcT501tQf#H$ZhY;gkL&9D{cwJTyGr8!hKV;2H@Xuj%DQjil{RlQcO36=oSn{7wbXRZRZgNLyf1OC<)9Fbq z|K{2Y?#@1otoq6mW^HKoCBcuh%KrRO>P`g7yvos7Gg&>~u}3l@&11i;R?Arsvte2c z8?z_pLN(9f5gpk1I&zm)TSfdBO@hUAtm^;nh2AA^+8eM^cEM#GZyWfu=dzRl{}S?V&z5(r+a%&!?pNPTX&e>p8EZ z9ABxxA4)=}I~JV`zZdz$u^5oNb;*yCm4}UKkvBk7eU~*4gV|^-RL)|b1C31{79mFC z);aub0R58d`*#xVuf@DAZOLXAku|4=zKLtY3qE!zZ=HeXJ=B`hdLI|%rixH4_}m9t zJD&W9VD1O3n|-buNSsq&k_okuw%*{ESxMU*r}Do2c`TahU)DX$(Q?jaD#yOp!Rt=k z%uLMZTIoaU%tYpd*!F7w0Rl7CvBdYMLjE_r%QNViBIQZ&yhYS%qu$1LS_6u6dSh&mymr z;S5;hJxDKjK5fj9bHVbyX$Ed5W7WZIaW1qg88PL3XI*7Ck~eRmGjhoZ@+;Lb7YF*{ zRu>E&DH)BLuNnjX{oRM>69 zgx&PZIDHU}^42^nqu+pY-uLfjspNyp$o!^4iAJQmn}%nIbPeI3O#Pd+o+q=1S>b!0 zcQvm5PlQhnhK$sbv+H_)pDIS*!;>E;`y7_8RX}GF3uMJeRzA-p;RhH{m)zMO`wccv zCD{O8+ZiMG@b@L;%=(f;#EVvJa=kkeEnehdIb&dLp}+c$Y|UU*LH`ktcvJbFT7CyR ze#P1gMYOyT*@YwJ)RJ8m?`vhD@7`yXe*Ey!!iZps-;MqCG~@>G;hb8QorGCARf2X| z(U`MHhKqD}v&>ZE)ODiKBff73=??T+kB=W|Aw5dgA`RqURehd?Kg(HRKi8Z1_iDis znITIrob#`@;zi!s)*w^XCXZHcvW|q|InC)Qr8oMN{Ij2kq#fy>r^(_NmMjdN)w2UX z2B|0S`*Kp=0&)H{*j=NA>iCr%EKBrKgIFXv0al9xeSJEByjx&*vofPu_!RNwIt+YQ z>HqNJ^I5Yy9iQ;`W7>QRYm)DIw3fFh{kljq%H2O``xtWU(~~6M^0PS7R^55>&NJoz z;QN_&7DKhYp72au%YKH=;!Vx~xS{Yg@{72g(_Z?*Xa>&Bg;qJ<*jB5Lh##-9!0kBH z7^*!mIZse?wRj;r<|Ohd>fXvuFR+znWj1PB^IyuR*%5bo{{*M%S!@mH6{rtXIyU${Kbr!!HqlT&X81i=IAK%j{ z`HGV*;Tf7e;Afq--p7foM9W!k;~;q^G-mU$g`T6&BI&_mU_aVzfq4tOX~oK?ut833 zZ40&I{J#K0F2aDk-E3fVkSD$!`!j2uIzjje&0a9S0R+y-+g}a9{nIPW@T-5k|Z1Y*?j91_g#R+ZQyx}c19aVG=*1A zI{g{1@}50e-*>tEEZ9TM6IuOG{pK(Xslrx`3b8fW6;}GY z7XPe4qhi>Q^Vk0J%{X!E5^de;b50kZ#anMw%POpB#ySHF9#Dye)~g};$MZI9Cge{O z5qIcWyJPl^kWXfz*-CFG?VX~-Sh|%X*E&*sq3*=EjLp}JL&;d*jJ0QwBYAfJ|Mx06 z5i_~Eat2=q+$pQ}oXLBEUg3CgEIEyDQ(rQ^4)m0E4ByWa(>1Q`!;8HAxx$^<{heLM zGia7G4G-d*&$!aVPj#QiVRRFoya!fISRiw?nWx>OopW*jecEJ9krjHE;>`f^C%?~Y zr0*>X6e(~#o{}|6SFz&=@hSV+vYMs4K5d5b>!5uqog32TAZ*Uw%L|n#hc(InoXk^bM=kb|G7rq2G5r<|BG#=g4qnvn%jt9GwR3^vK84Fngvm%R8sgW=1Q?Et%8Y zUoKeapZwutA-+{#P>s}27+WOM@sBinSbwmO9vjKGU98RtxJSb~c>$6EsvN&RfsP$S zsc|GZ113F;5pv!`D`;gUKy@+gNYc#1p?>Z^TvW>_Ww{aTI0$T@*L1#CQwt3XG8N}H z&x@fSlBXtqzN0@FqrUmKF<@nZtHlS8N`U-_O_<;iG{ z)+fN^)!WHOEp!&!aMw)1Dm|3PuAe%Yku9g ze~Bgi$ajs+0eLAYZLu@+`SjE(FJO{01|&;KpU}P zpAxGJ*13k&mP2uxxc-TlQ51GVVD~!iW(Uk4crek=OJZ-{7+*~PA!1x}(RBiplGFZS z7RoA%jQGO_>X?N7kasw zh53oxdyLrfi))*;a~bqUx?`@s=V&-qqxoU3<(%~SV$3QLZKgYa(j#@jk!ot5L5>gE zJejig(&GV~9pZO!EP6%FbK%_tPKUwaZ5rpzQ5{hsyXl|QuN+_ShA%MS4gJwYqQyJP zFT&01w42$Fp4!+d!gN$_5#+Y(lULx#S-zc*lgTKY42m6a;u##AqJ@8QT(@WMdDyv| zjssYBIvKj*@%fNUe){Kdt1-Qr^NtF9rVJ~;%0AKLYgWtt!g-?h9(C8pi)vcRJY_Pv zjU&}GOdCwgd)%KaML%ofGW8w7`pG%516wY|piS!7#?E8mnrEd$#h`gOyO<`Iu*Y%a z+sb1e7t8O)kvgpP27G$a?h^_i%V9QcRpWs-;B zYVjuXvIl9cKkwhm0?E$x6s+cxXr?k9;g{IhqtGX96I~9%sp)XYO5bF?$V|l?I{&lV zw4B(~h~9aYe_E8?3&muOd5C1S=u$-M^RaX|8H$o9bNLNmdB0W$kT2&{)MV8IjjX2_ zJ+>8lmWURc3-jvTpje!pD#PhbS~t?~c2ugLpTpF1u(DZoG)2srDH>MtcV^17>!!1B z`xheFHvL+0{*cuiLlhgZYk)j`FU8m+Q;F{j)t=kfn!)JvX-ocx+qiCqfOwIWnj zsw??J4#be;*`7(W|ByZT1}|rmOZCN%k^W}<%nq7cVEl=zC9!$~?p&#cx!oV)>8HDLUG1-VN#L;Q4CR=F@aj+{CHYib5+iU2PjNl)$YH~$q&tY;8JxTHs zblK?sgV=l?TU8);-rf`?#c0wdGUdtmQ+F&C^IQ1&gZIoN^ijt0 z=A@Q4;WP~&8`5Zmf0vPHrdXG`i$;$@Xc}wgUEPNyxQOP5xa$wtjqrc=Tb=;VGsTgd7Mu0H8CfLvR>mHW z(=t7D_NAZUde-S=cIYb{S%b^TUH+lkb7sc5{5x^xEHY%R{+U>_P|3^(ed7Mk81bh5 zs)f>9eV^=c8;sEMcI_B_*BpAxq)&3eCB9|%e@XYAz&4kWsw*6$*LEIQji-*MT|<1^ zLe}-2nTZhR!{YsZpOJN&N3%uNGv}d zob7TG+YjT(Swp-|OwYRfJcZo~g*gyy%eSr(o%TZHN4WNJ-#u#RN2B?k7#@W3U!r$b zvge$g-r`k8+u4(`PBbh5|IX}}OhoO-6_3g(4)5~m#o&OfE z9wvJw_8lnpJ6Eq;bc8*yw4&EHe{YEoaW^vP^rn-3>*es%!o9sfN2)-Ck7+3#66 zaRV=&B39%LN5))ppjrhF`nzvA%lCI>uITfoNY+?1s)Q-U$Z##sxd)!ZV2~*DHCDZj z0omiz5~G~6=G#)@e_~V}(ie8c>CyMGe`6jx24{BaK^MX|J13I8>^QAmh#jTWb2DtF z>L=b11F|}5KJE9TLC%o58iuEl^eAP|!i~DL?!n><)sWK}p2C=O)LD-`YG7fWoZe&S ztnF)HCmhBfegdmO!5zcT^m zL&%txl98o|vDB-+IS5mh;9FbzCW~ZmEsytIC&=ea&16)Zrc8B`9n2eZB1ZC&tiqUy zkoeB;_K?mFmaJB+td?IPa}{sPiEg$19D&2^89|g&&T^VIeQzh>(oLok@<~ebt^;vGx^DhBFJXZupTY)blgK9k)7OG z*^oC)qeYY{g*b7Rmb+v3@vK?PH^0+$3>#eMdEpzDKb6fUvS20kO@?YUo_8{vyr{%u zTDk=Di(zNZcFlW~oFVp;c$8Dhro%6p%%Xkv;N+a=HBF71 z)fyM#nN{^H75UYf@cB~x$qD%>9&M)oQr>m99%DWXheIM+)+fRw{lJ+@{>hHXD85M@ zc_&?#)L)9FKf|IlubZomtT3IZoz6z4nICDZ{dnquunVVpL#qXwx4@&GBI91x%3ip< zy~znfD?HO=?dF$C?O*6KdWc$|k#B)Jx{KLILu)mw9zcqmx%VDR)n|d62Dcq*$&B$E z_9uIPC5Y^$dp}lgr|$SGBMa5!u7IF&-;(((+}#$v*#Ypntg*{#ko=F&3`Hhf%nk zl^>6hXeFkX#^X_HNN)4W>|I^?y5jwHyd^!*8nO5x`aI|7G;ySa-sQjUd<}+KtJ0Fb zV@12;#kI`Cee7y?C3^T~I82l4vA;g@CR{m8i;Qb9_iW$n!O(|X z&+3MvaLOBoi?HnxHJw3^N)YU&t($S829Ldx1T&S$xxLx3`7&NCgx5d4RTnX&D}
    {C)NMaV>tQlq0wORXN5L$9@YZL zT+Z)Lb@gz5e7S4Kv1xX%J^#{d^>_}S#^*M>UpMXsD-a4!!@-lJEw zd%JIwAGnNYn>T-Tpq_kVhqF?imy(Aw*_6^J-4BVZMx4eA_UHp|g6Bzmt`ZrVkufXw zlCNzM4)nmhqqLQLmpw(7Q`C_&Qx6xfrn6L5)Od?#k;H<@&0mn_S-outRlK37L zh`iltc#|t@*k`Z)XE&s7#FVTk%x>yK)YJzD^TukqJ|im#tMTldQ9EDB({Suk?Eley zoACXAT20pJXLvzNj2{Er1J%762RpLMMc6adJ%6gBoj#?y5kqpCWPjXx+)9?z?0Qai zjjTp`iZ71fU&;QSEQ+fjUW!Jqu);SqK1Yk$pW2dsZE-fIy`H8-_V}&BpZ40#DZp2G zu1LP~yYaTGnD7WowTJ$vTBt?h?vQKYo}C51`h&dpK=m+JbAo92mvek`?t60OJ?{GP zaLFl{6G)Zl)?M71BZ4f!-;;cLiZbgTaogtU^PtEiUUHHh^kSPlP zdHDGP8*hb0^6&P5Xafi(vL%!4VyzAEy9Q~Fg4=2_?QVREpS5R?Yemu>V${|2xK9MW z0mh~H#CxRr9^%=LeFCO55W}v9#9qj4gl%vCM&SFE#=JM!4w;!3iK@4=z#M5c0D{6j5oYxgsLK41A8StBbd78(Z}qy8?~*I6sCu+4V< zpZtXV)zg|y%kaC*I%c(3_NpY4@pKrh=2=&44w^MEd7X+-O+0DFhMo^ofNG+ME2E$AOd_Jyv$tJQ-r z;8YRvJW}Uu*_UB`Is0F#m2ST4&abwSFqyEov-og#Zihv(7Ti+saoA!Tn)3$~Vbk+n;P%5wHmq^#N=Di>x2%Av@z+O5~)&HDW|Ht>zi~KHq=JN~2-X3NuQQI)t}X%$%K5V$eD};UBu5~5GhTYUTQc}y{*Z6FL}Q6Y|+~FZ{U%vQCUA#3^N|V zfXog48(xQL_eS;PJyk<0ZvK(n1+U>qDY_lVh8JV=W;_|9 z{bb%uzOv86g!`Z{jI@*d|2)m7^3D_Cb-G@16CdrMmbXQtXXu@iR<>yKZGFK5Bq##q zoWvE5LcbU)Z{B4>G+%~5JMOkQKbtUTGl?=NAI?D`M?$LKH5W0CB~ z-Cv~o6hq39z73oQX!~(8cG07c)>_tn=M=BD(D{Qmj=`Kf>s(0l$r#oK)(bIpC^SCz zZPs>_(zj#;l`L0pl4?1P8j&hH&1S1_ARC;fw&VnS4R6Q%dmIenP2ao9m;KE)wfAf2lC5V{!Q{EFVWeg%8Htu>^2^2 z-i1(B0c15rvguwc(%ykHnJpUz+c-6AB0IyK=T}&d`!d}jtiII#SCS7xON(ktB<{B;73t0;tI1Cd1`He8DGHc zK@9kbMJBpuyt-ebTQ`XP#!{J`SVPBRWH}KN`$Dlg$+ClTKU^NH-K$w<4~DkGkn4HR zKhI%%urB)~cf#d2_tz4&U&PH#kR7AFtOZ_6o}6RYoc%8&Z{E+Y;U6VgDeK8cYPSZw zUUlz4|1w)O-t|U)KMMPg)b*m;zlM8qR_AoR^7wj->jOx3t}@NAsx5r?*H7<{yKj;t z`C)T%YF2Z;M5kn9eN2h7;MfV)$7W)!@QFV&#X;YBY7D! z*7%Wp$(a8-sh%fGJHDJ#r{2N&>AbE8Hec$lv*}V*yvQn~vdX1@PoB+9wsKuAz^am{0zNyekStc#`?`8 z;Z~dH{WA`3L88^{nDbN93wDKZW;bu6_g@gN1I-Uu@^I8&T8-j@M=y0kHIXK zQ)rS!=UEt3#>*SEw3xp4llVqVx{$}c2>*jgo~)XO_`5s*UIxucZ2OBg8?p2A?#Ybs zbL?M~g{!j8d=V}qlJr8$STC6x&STsDBu?g-to5zKqM35; zGyW9ssLd`{^Pt)o`8cF!&}yGZQw7fdoQ*Y2o!R%>hZi2i+ghmUceQlo16@dYi#iYF zhf4~)Ux7hct<)Z}$(}vj)7(yMD5t(YIJuB*SK{Rs{ZHNx6{Eo~WNj=C#X5cUEj`GU ze3K8k<3ZSNP{UI=lQS3E;>|y~tusHeK=~`7yV3|UIs9t#j>^hr{o5eEa5@ZgqV5Oy zlq_@Y{639^O7VozFj%4d;XJVitX9*i1Kg%SKl>GnYU>7m@GFf<^2Nc0o_QQQ-0ZKn z^dp%Q$$tLZ=~+`7rCF&V6o1vDUx%y7n=}iS@ABNmTFvRhZ@TYXk+6x@ZzIEtWUZ)l z#@fa4JnM<_R-+EPO=F9kW;+4Ww`ge!eP4pf*P{AH)>uTdYHEENzF)I&c43dlvdppf zAx+*!4|CT#*2oH>FW5SB|6kEFnI=Z?xC)~4E#$7C7tATp7eTs;pQY+rM9T7F{)0T| zQ=hYzI{UmzsJSRRY$WXt+KpnLYk0&mez{TqemiErC)T#Xi7$E7A*5P@$?JGnJILjI z=5s8ymyWGP&b+leO1piPc>*FCvuBlJ_WxYV2Xks`_D^nyT~3+U#|x8F;2Z4fT$sIm zhLyI6@$1Ewp5j+eI%S_;Z9Pb0J1cdYYI8ArB(wSRbg$^IyhqFXwA0w(EiD$; ztKA9pZ^?B!EHhrn$$pzztO6@95-ZNZq03xbj4x-?^H^GMVC(l;{9*0qbfA7}8_K?2 zXm}5QO)m2Z^nOxpgJAW5zx%k7C;Xh8@EW_E1iN9pZXfISVVyj;&G);O$du@HDeN=< zeQ<%PjeNUHExlRi6xe0NSsmuRFmO7JN@8Z-re&Q#GRb|#s{Pe*mKc%QjUv9uD*4mc zI+-i)5StEX$@{fA4GuY5B4_Z_rc(o-`|zM-7(Yn_$~;gaeNW%k!L-aK{le>i=OKqd zD`$n>=vvmI=EQ>MSYs)sCUf77+G{8}W$nYuBpm^boO)iJd?zTAyhPc>(c3*+aV&dR z9#H!;F#Z+82kKF4=y77RN%YFD>FhimtiEKS%!$F{Abyj3@1kvXmuw>2rRqLgsZW%; zS5z5A&g8q9LZjL6&vX5e>~jL-e^KwxqQWI&|A*Rn0NX0^ljIi9jMIVLMySb9w|;#A}c48 z&+KAsI?(TZh55`^*)J!^tkO>QD(oXe&TV;LbbAt4=D@3pl8=(=Bf4ZQ=u8qFU6_w7 z$M>4kyBrO7uzFU$cPGI;S{&-Hb49U^tX-Rai}8LcT)xMLiR3ShYdO_w6iZAc!xy~d z0(JB&^sz_K;#QXb)3vr_I2R)l8z0uvD%y;2UkTcs&;K$O`3mn2!ts~Iq&xHv%Q1Cz z!8-@QzkXpvkW=t>sO?I+&4yQ{f|VL-c^*4ffM3omZSC80)bTeB4`-EbIF+}7-}8mB zkbV`H>qGbzTC`X84q7C$bjBnP^5IchNe+%L$duhg^?aXmUK)tUHAr?ZyJao(7wY@i zwMo#LsQ#Q6oYi{Ou{moepVS8*1m7mU%_)7ilRCS*Dzin{o18+&kf$kp>$)fN3?2P_ zAA9t{v6=j#DMrs$cQfADUetJ0y%k_Oht+d3N<2Q;jQ6jND-+o)8Etdg#w->)2CI9E zPG!}T6ArR6d)p>V0k!mXYj)ukt(&vQE17N?5 zy*~F{-ctUKuZQ4jQTA_3%48V+mi{fpkOP$YfJ9ByyqisuQ>M53OJH_JX-A8nZ;DZG z({K{R(m)%63m;L=RE_0y8F)^fR-0(U;_J$&MxX**+K? z583Q^ea%zdBJCZkk8bYUgZO!J`&HL_XH;2^HFJh+PV4*_9$Dw#n!Vbw!>jb%=I-5C zvsUbTLklmE;256Pl?VKjHQ*(G=Z)Fnq)$%Ir7-WU)ErVK%j%^C-rWx6GvRzSo4r7W zHTyaGQm{_8`t&*_nw zt#k170W3-GggKJT4`x={eZe3~s03Q?h#eqxU`db`{*7(Z(E- z=RI8~48PL7m$6y)U?qmm)$3)&&Ii~t4%J8LTELu8{e zL;XD9j#~P&8`v$`b+V5A`=QJq*mvm_ad3=lOh??`;aed6t2`eB%k7&Ms8zCKOakuEpVtG`z9ApGJDTZU$R+Wq{RdA zJUIljk|*;@87FjQi$h)eLEG=qA#a*0K;e0J4&c+J$d|L#zwuX9HTH%3`A}I#g4@|; z7U?pBm|QadfmCuFzN4Ol`C@V$Rb{ak*&t)V6Gf>eG;9ag^~&b?X&x@`AZK;~?+jMM#i0@c5(-1#F6|?TYR(K*mnjWFNbl-gLVT; zAHpWx^|U!-dmI^Sveyy%@kwNARp^1|!aiAXp646o;hb}33=<#S5kvdhHP`l{4OM5UVcaB{}i%4Wp>F z`oarv_n+Kw!!RPNUWc$na_mkZ>j~I-HGNm%b=D0eb9QGfbtUmsK01I7_tAYlt%|cs zcw596bx$E?%usSF&&wXiH_3Pa+cf3PUFn%s^as!>nLeM^^4C6R-ROmM`Ux62#dbYL zuXV>Cw9byqWikOlakOUjzMA*2f=5*H^{G`{}m_2XZbzA2!^AMfa#B@iZg%foz%gJUP2357C|it0pEjr`1~Dj)eQyMkOEP!ebbDf>G)QlD#ipF2#$N zpnrvHS3@eZ;&Vx!-4DIlt|1;~wa<7zTh(_S?26)YeNyLq?-ICCg>`njvXyO;pS2z? ztbl$C+Fq~jleBaxu0Kng%%rE+O&-eU3cSwB&W7&DImFrH@G*q5aysW1jMh60B~ji` zC0k$K{$IvonQ1wceAn{K%)ONJIV)ffGm@<7Q*9`>hC~N^T|%!0c=U|+c8De?i&%+> z-So6M2Qu~CLbh*2kmj)3APO(mk5}c_-|^k7T0a*?IoWHt?=B`=3IC60wP#^{Bo8)38bqN0|OS>=ORvCinMIRFfD(Qvt1oRBNbJEaEpL${KHWBhZUh*4+Tasgu z_`eM&O3@)1dj??KpK$7+S8v9?cfqLd z2gQuve~0pJ?R`<;z&-5z2ifNPxk3Nm4m*>(w>M1gbmxg8Oc%7u{ z$k7I#tHtl4B4tkuo6e(}@coYN``qXKc~r7cXMQD&ZBpQ4cB2gSU2*X&&mQR|d*V-J z@|6~E%E7I&T0Rp=A7zuQdC4==Di&LV#l_ir65e# z`BJ>z?z=g7c&fkV;=xe%ZbgPuS^spnX1DKTad;d%EaXKMS-h$d$&Ic(i+6iPlc%AS z`N^Q!g1_V$=UNW|^)F|yR?tFLz_+5`+alHR*l`^VX7P@u zeCG;i>$tttK0Po({7nDLZ{%B-u)` zjKsy8d0wO`RPr%2*V4bBv|g{(UR!67 zaV*<+P%f*gf`8Tn?jlQBpK~tUGCscy7AKIiI9pcZ$;pFsHG6D=?z!&%KRlf(7F`Ov zJk!0Vf2oVTufTdNyVs<_d?j*jXD6Qhl3MZvR2;vu;wbq49 zGFm=cEZ9$}Dzwl0?dd#mIV6)!pf;~tZCu$J-;$Xm?}YPQ(bHW!#fueC$V$3veK(0d zqsjR=cC;7yD#B)AN|F}c!*~M2cfLivH_hX+D z^f(Qx7een(a&KnUa{7Y9Ntjv1ceIdPGRa+1+PEUCWgEgNIS@*@yBhtH&*MF{<+-ga zHoPL{r-ylv1+T^EZEDH7ua@+#M3$>~*%s`t1(9m{vg`0YBcoYLJdPtdD`{m>m~oOcAb^W!>roA<)Uu-;Xq&1ra<%X!oF z{_K1y+hwG(PVEi-JRzoKFTnl~U9Fa^^`3|0HN>i;MfKy!{wBNa7ODHGJ!@MI7j=`t zE$cxtbMYWq8sg}0?4NN{Rk404?l$q1-G$$dfzx95j>MqeWZz5DO|U*sTzEqQrhI{44*P3%MF_ewp?1toIHymeJ}vwOoV485ON6SnyO<&6Yq}@v$0xHdEt-X#gv~0LxbDYJjcvX*Yq# zR1~?+@_%dIbOt+Cbv?PLYG8QY5VjC^in3`|D)cZaX`=py(CH4tM_}HX+?fY2i_6Jy zyA=<5!KsdKvV!v*QL2Hq7Q$ec8oq_lgW78b*XP+avj)XUQj=`EFl7y&uENrdG4E(~ zRmIu7wSQL}&svb~Mj^df;YOc7f`9gVOe5pvtdfikJ7ILND<@(?&iPzm^tyswl9&Gg z(k&Dtk~Q{N9A3tzJ0Nl%J(7ENFfY27)s805-z-~G#QTZ0vg;tdKu*tF#y)3=DyOJ5 z&*<|=l>Bl_)sc1AIsJGqJ+_j2I~hlc2zS$ZAK%KJf}TGAk6#A6H^rqNAlgK)lAK=) zXz@D@x`_|}tN>4Dt-PZ+jcl3CD#r(N8f4CIPcHwa+NrOHUdd)dFh8>_IcIb|D<)6x z=_Jg#K_h8gm#p82A_K+YWXWjAPB|g_SWn^rcy^R*V#Hlc5)#f8sOQ`7ocJ z3+dR6Z|@{YU4HZ#JCCLNG<6IjO&3}xPx@H0oWUCV_~b#@m%RbAc|vz_ZV+~C)5DF% ztz-$`rM96gwOCYbq+d_IyPTEKiL~uWHkQX;j!BQIe}kG@@tCaNK1S{L(I*)?^S)$= z*w;)8r?_*wcCsI@t=2PRHw_QB;@#7@dJI`V5$6X&@DDzny{E^r=#Qk&ITyWrdxB5b z7REBSimkh_cOiKVKh=6{{fCy1g-JVDv?ujBYQ9PNQ{7j@ z=dal9U3ERpuNJr~S^Cc;?+sYh!szH0mYP|JB*o!*me#ju$TYh#QZ@gLC z8S`{t!xz;2m?)A{YvYpbuXy4$nVTQtaC&j)>10%qT0%U|Mad+j6( z(JB!pE1>hN(UgUghaBGu`%>jr}^%H0pe6L91KJb&Hzu0MH?gIa%Dd;*~!c=kT~J^+RLeR@o6 zPK}Rxt{8zg$@lVKS|#@V!rrre*Figv;y^<>-(TSR3?8{0-w(j#H8{~jv>ZtGaqb-{ zitVEDIyhdY%z9BHr(1Lt)lS8jl13~i`+J(`F_`~1ghS@(lGARTDE6vR-X*kuLWHiQ z=EdZHSQJmR$=>3eXO&IQQ8n2hk{z`=jEAwsR;)T7Gt02hKBX>H z=QIeP$!52cH1nF5sQ+{LW(;+tcHiK;t$0TT{+89!t=yZvC^^ULaDC8rT>F6x*-6|9 z1GYjn^CPE;k=wDi4fNKq(BBB>j}eSCXfx@l8|yw%p&%;C_OU-(wKp z3!CJ~Yfh6n2M z{dr7wOPr5MFR*k*YqzQ6*aGKo#h}@Rp7tb?^%9$EySE$fxI>?JE%rVr$|iepR@%)F zi*owXANtF8v23+C&>Oy=@sgwc9sVZ%Cn@GG8u8e8O?yys!GlRizvis6NLR!$5^9+b>1wbWB*hIcJ(|BIa$ zi3m9lW-JYJ_VijD9FGgPv14*j9ICEiY`xX*1NqvId|)$qUuUnX#;qCgy{VR;-J5fg z@`f&1D0`{@IY{oNS$0gef_@wKbtQk!9!bQQMvr%4wFgeg$@V7;XHKdQkI2*JCei#E z?Vl)uzD9@Fu%s@3PLI)?MAxeQNIl9mT5iuayWmrmJPrK&Qkfgnx}UqRcg*sW*d^`iF5^zOLnf)@!&*wr3Xp=whwqjMq4#muO1YSA$4Z5&)0U=R246bjlbl% zW65|bmgK~ukJPk`PMMR+TiPOpadKJrj8VtUIQyV+{B}0#M4DN&$y3C|>^PP!D!M)u z`zxs>tF#Z-_DZ$9tyE4ROkSY@bj}&TnGxPe%H)Jfw%)9GJ%C>}fp+r9zRG%8d;A%k z8k4%KHnT=G^VR=`L)NbsQO7pFGa@aaY)8I65wi})=aR~{VZ+6uL(cl@NvG9tOn?7` z=<&RJ4&%vrKItZUt$}&oQrA(cKMeMx$Ju1bipRyY*sZ0vSZkwtN~<^Lw6^4@?}-B0 z1$rRuM&L-+!d1q#C*W}|^uyC@)bJaG^EUd;g4eEbb&UEy#_8ndYl{yDD!UIFyTrF0 zuD*^F?O>2h1R0^8qx2h){+-Ng=vqy?xAWM6?0k|wZ$H@l%jb%4A54NFtg#W^ImbHl z6+?JI<_5B!;dH%o*2(R{g5<4zinUvjuQYqS#^!y=Gz`kgK=v4Rwh(1kVa~%?wFp}d zabHf(OAeAP7@D_-BgK@gddo>bAR zw#KL@_+`f1qv?}1gunW(1v#^~JQ>7xl75?-W20|a`y)}dm)|)LyBn>Oi>L_8<{Z%r z*dS~DFEzqApT{N(c1gC&Gtqf^@|?MxRfLJFz3@F}dM_6HUK0c6`kd9GN2)(7A#?W9 z+ayaSyYJP!1!m(}cMN1UlQ*lGv#xp_eUF3pG7`=u?NEI9oM)_cM=}Xt1mE3oy4k%S z(CJ&gG8lXJ&|#?SZS?QiAChO;$s+a&TzN?hp3iP?x%v*QXQ-h*$&RAc$?)u^#O0Xq z7<_AD+E)EZ&h1N<)r-srmxj|Pv|Y(|4~zdT>9-L2FKhontt2mhO*Z{be91Yr^Ym?b zmO5CAH<2crv`CRSHW9>mO-vF)8o7}BO-;Tk@`F<8Wk~MJ{sXk}9^I0-$ zzy2iAQa+H=P3jtjes9Dw0~2mjrUHDMk@VSu|754narpQnE8WWuIpuA%SdotW*=J)rR{Lq_NHSC9BWR>WZujU{c~uy9j@7Hm$f~gLit*j$bS5meC|CS^e_ty zW9i*wKAcsKhiuj!Dt{)n-gGJ1%r;p=@IyPG5%(U#X5-o$osBW zX0iVday{!@24Z0ur9a2D-fWRo6is2%2Ln1o_y9da-dBv!a`Hj9Vf%S(cMC2*0*Rcm znDyV&u%ie2euGE20R+DG{_%KM$!ki86x|nV`1u=7NOY~3U)LIla6=9N-yRU@N zJV;g}*$(mLUxm^8MZ7X^=_dGj+Vv(FHy8Skx%&WK^RyWKm%0waj}}HM%azWoTV^(g z<9ipiEW^olystJSb0TkYTeiooskF^Z;+-@;gBR{0;fG4KQQ}3C+$R?NDXx9(S~9?N z6RYx8klt>cX7_vj)L)(liIR)`4W0O84{@fjPX&x5c zt?V4KZl_UuZ7mj;kH@D|>6LfYlU!ZuYJXRgo2)EPxQl0(XTkjoeRxg}&s+2CcnO7tA|u)|tk%|u>plEW9?4{D zUsjMhnd`rS%?@o$gy=gsnJg#CXmpvFbA<0(vUmF9o7CN2*|}s`45NOu-H%rvK!*L) z(#}2mFtr=ma?;Ie-(5kf)}mJC-Di?`g0a#q{=bjxoADrL;6AL(LOe}2r?J|~nB{q~ ze;9AN7;1;&NY>=MuC}5yE02K}vwAehS=F_a9fCWbz~yL|W}j70s2qWPrP*jck}e5$GEas1hZ zE7_4;7k+z0gS_1+O`C~kF2b@u$yJMGHpA)^a%ca2Em*!Ieq6!Ubz$}{t7m5ALH;_} z_qWl18{NKD@NQ*UL&}Rc77;4JFN0`toGKSwe#3*r@H>t2|jQ&~ua*iTYrD za#SX_P4a^G#QnZ>TPEf%hFw+Y&!pF8T20eN@;!ftYek{^1=hVIs%?YJd1`8=<`4P9 z=QK=|_#a)L6azATX#|V;dW~ZA-bv4kjjnQnb$vfs$(kqoNf@^nYjX1U^?bY}OJzQ^ zIzIkwY%z+TURH>N*OOs3j&0I?i1zcW7rGv~%K9OHw3HR!5v~r%#%W&B^|gC#9^O$jYy$ zctJ~XqBxmO;hj%HZ!Ozpp1g%V@nI2aGBi)8%L=yMA)f3{gPifx4-d1e>R+PGP2|3k zwL9sb{-kl%3C?7xRiwO{$Cc+JXVD`W?Ea4?b=mv#Cgb$zY2g|6nOMN}S$)tK&_BcV zrh10Yc-|J@eB!$U;J;EEIRW8A^}WD8|AFv9YA@lg^g?-Ry3zGk^v>Sxn`wI&E6t=` z#sK5UmE2I*(D897Y=&+-*DBI-Fh6)22OIiyxpC|7kjYBzYUp&g;lJmQ`npl~3`pvFd!>eR(=b4vMV$&L}u5SN;$k zX2K(Huam*#0%-k3$J-%S##m}3*_s+dmnPG>P+x+PdCHhV%OWhY!nZU1U7Ais=$Dz| zw|)NzIr3bacS*_qmUAtB76}ef@@?_FDLuaATY2xFw{m&wl#|77)7P8_`6BE#UQB*U zUvj-q1N40pc*;4v_cgfO!zxc<@gkZfT855kWVX^sx!9}9USSVa524uhJweB0J#GmlZdCoOybGAsaoHk*4-UY<3YeOa{ zo4gItJl&4P&!1VSzdCcWP}YF86E)|!FKh0yyL=+F-q9!ikKdHkvrczM53QU?ujHzz zt+qjAZ-<*jFeCet$Kh1g7471y52!7tMZV?kWLSvTmL|m!*ps|ZBT1SSR>=pGG2k*- zT?fBbF#a0Hzt&@%K$7gq%=(g>+2_A3mi?PO>3M?qxK@;EMvF#xUxTM5$Kv54RVj5P zTk-8sZ7yPspm!%RC@14Y!g)Bil!P;Bakn;}X0P#N$=Sty^%w0hXp8!ns{3rQ=oUP< z8B<&1&k=M^PLv@ySA{)tZqZIW-_I!ENPOsw3E3M{8BTLq?;tGAEb@9apQfJUm8%Z* z>H74eNj?C-bE?BHIzp+pDxBacYuDG%RJCdI(r&Co`I;$M6Vv*N95pKZ5IqoXUl7B;E zp`Po%_|Sp1{{{Cv-&{=M?c~_N>i;3-3XvgmTLUm^9$hoj{}Aj-sp%M8?5KsDmCzmH zIo<3Np8p+=$~_* z^68E(JO^#&Z}%uQAF^A-!pxuLH0ICYo!P7@xKdo1PFS9ELAUYw?6=#?SK6~$PBmEI z+B>BF8p>HYc(#&n!7Xb;ra){6hAt>@x|dH)w3{cFRZx0Z+-Xam^y@#XZ8DbBGt!#v zX{-vi*46T(BH-)TJ6^37NbninWHn7P!ZxH)-jjSm`}vSxsis!YoGfazbaw~3orxc1 zS+F_Xp7(uam|f^vR_A0?bsU*1EBOLECu8w%>g?wFShB@8)0^ep&V8&>3bGfg|28O` z;-@L@4st!K0^W4bczya!Sd+JHyU8+KoP34N7V@X+eBf(*ds<7EDsjI)FS~J~Zw+X6 zg#R17eFNN@>x)o}>h}?4% zqyNVjOVX+_{0=l$SyC7gXNP0<>SsOuad1rD$lH`i{?KjYKSG=>2ifGItpUN`$odBU zW-N85mXl5MZ87==mVKXvdeHPLUUn)AJz-R{ftJU@{zdn-@cRLbZ=vpFwml4EXOett zp+}jFRYTZkGB!Nvo9r~r%A2YBubjU%6CPt&<8ZQlz@sjsQ}!CSgw$;P`AzI|7frT6 zBw77lBVTs)l~>m({(gWBllSCljG74lJYjZJI;U7(taKgq*5@gi;rOriCgbGeTFZRl zFtOtWf7dP8GOHYtzqBb|$u6Gpd|`B9JXHd&jSGCvxGZmPo9PjrgYZ$>-v+fyd^dru z=3!7qDb-n-<-SyKVsq|O4R1{Tj}~Tu6zW)gL!ICtT+VD zA3(nf<_r|Sa$3)atQvnxoq3})Qyp8_Jtv=@3+b=i@uN6&o;qq2Y;-ppKE(oc^l^EQ zJPh-)cI$h19ioN_H0g>1mEHdkJ(F$v2uQAC@0@NpgAG3BN!!)gn-w#cSWRs?@piVd zchd3~QswUo#HU|y94UUKb44-GyR6?U&`R^ zZf#|?NM_e_s!bWX?%@&T{F{#jt6!1~d28V0pSBF!2MTFfGOKR%fa20`mtmOPgo28k^>gRi8x zxJ!RF7-#OlgGXTWAO>`WV&;S|!^?Zfm34K=uW|+ie_)Ys#pj$Ao6~=@mMQC}CSYDN zFDKJ*UwG#|Va_H?Zm#L9em2c3vu(z>&tYQDig=n_^GG@q+C8+kj-?LM-ZyG~5i_4x zXP(U#vr9>v`Yt22_r$M1)w=-F>)305e`Rm`X0a=K&T5NhJIK?BzFVMn zJ}jGJOm?Vs^4;4!C+8G=hfmpWv))MTV57v&`kJDyY}C?d+F5(0M^DD-nvgh5t53qQ z2L8|EsrSS6I;`Hnk1LbEJk+ySEIai+B55)&=1hbu$@xB8UQ3$+N_@|+k0ePl*1k@r z7qvAKI(b)?^FhjzVj?Ex%+_Q|xKn?a6K6L;`EnZ8Rc0{TCPUUp=wy%I*Y5fb8d)81 zBi+w~LnGfbr~eo|QwthQ!MpkJuj~3!e<#koPmi0}a}w(&qkTQv4TjKScF0Ne$petH zd@@5*R%>fvxkf}Q0+kENk~QJK(lk4lAEW;u)*oH)i)6X|=M>Z_Fla#6me{&OJ6U0Q zI;Kr0TWy?7R-)=^xYkd5Qf3}A>#P6euU^VN%bMAVy^B>Qi%Mree*wSfLe8JH^)1~y zs`GBrt%lP`@nmBme(ZAR8SWij@PtELZH&uLD$!g$_p*7iSls~ChAg$5w>G8ir+n&Z ze;vnTM!`2Z0GAf#tbfAOobvD-`R3C7Jte2pWv^Bny7nI$t3~uXk7%Kq*z&jf{^hTCv^EXT9%18Xmwol8X>+dV@&InG=S@#R?4v?V z><8yTY;yqpb5=$V_$bvmgXZ?6q!8PJ- zuPQl)eOAzQ1w>kFIe9N@u<7Uge<%xojFl5f-(9U&D>nj)XR`Y#B+cmr`}3sNwbPUJ zPN&HyA=<6z_0w)}7j z1|`4xci5LXw#(SP3Y1>J$PHL9o1EDLaE&o~&PLDb$o+Zc$rw2cD?X*c0Ff?PH}<)= zJ+^Fc*LkGd2*sTLm3&Bh+2>Q*9pc*$STQ>g9$?iY)cO+rlQs6BLXUTrJM$#@E4luJ zOY*ZXWA&_wsLU$wLMi7t&DEbBquy;~?nRp9hrI`02k@f4@Xu-7S;u=dCLE}adm;4& zjq>(vExD3`VKgq?ON%?mvH&J~3i9Q&(wwoKH@Ryueh36!P-9V8jl$COJjsWcT!m-i z)4P167+E^=frjkZmu)UFg3GvTFg)fcl`&Ci_@7CKtSp|U{8FfPfI#x+?#763Nt|qY z@6mpm5nc8qo{DFcaOp{`t4P0gy#Gx1uW@H{K9W6rc{WU5wmNhvM&9gDNWPqTB4{P7 zxJYl3_m9*W2}j@b4+OW;Q-KVCpKF)tv`G>K#m} zrT)|4^$d26<+(3IBzaiBX6c;zlecr@mCIR0nN59|d|9vXQNhpNfJS>rJ_`M0g}gwY zJP$s{W6d!9P2T%t3+V%?N3nDWhJTOqO-bk}TC_Q>chy7XLI`u9vYFeX(e?#dO zpXXw3C;i0|(c+e*lV2JYPt$IK9|fk|VwXpG;h zcx|4|2WcT|F6t>W34%Fg?_BIJ3As&L$!^bEAeenpFF-N*va++}auVhY?DQ8C;580T zS^t>#9XXHgQ#xm*eO5sxU*;)VtN`om6n}!|c_UhcO@4+|_Bmds#_T7qq<_df%d>Eu zCMxC$yCa)sCFlygxl)Ze$>I&IKZ`pX{Xa?6-peMt*(m!7GM7+_+^h6c$(eQykDsH( zT4H2YXBEYu|6uA8c1Zr)O4_(pyjbRrCEofmw(M`*xQEu?YV%57o_Y6+m3#}w_QGPSeyER6Su6Lr zma5@(1M<#d*UV!!r1Mp@E^UN!B3@jpZ<)eQ$u?O{OYcGGJJy~~lbW!)g5|QqemQ$5 zm&627J3Ev1)33KwVkQ0ZX8b$Y4`P$cwcHn)$tv(9os$!%3g)~<(|di+8~F>c;AYp7 zUoLOL&ts)#Soa}&XJ-E!ew?>=w<(uQ)mi&AL2I8BbUp>Il8yW-$Q?rSvNX%eti;j^ z>YUF1ZWl#vSL3;2)oe&EV7pnQ+sOxeh!2b4oV=;K)SQ{9m&DMlp}j#1r_;QLE2H?t zDfl;)_F3uBL!^5MD@(v}0;FeX?=n~%?4G+>KdWq3Df^~4YG|7G|QJ?^1H zYu6h++yW;{s_$Nx` ztb+W|omXN~Z`{pZu*_$?jSZzpkzE%#`{;LZwGB*0Kyv_F<@q9g?=%s<9GyEB#-Yi< zu@Wwco#W^{kY35|lni8x;JsSiW!#am)pGT(hstgGgnA-hHD3Ii8gpXcdwie;8IqG_ z2nO|6e`_|asg|td@5w@W4^@U8O0ne0?r7uQob387yiSGgnHaIxHwTjNBvSMckN?g3 zPr_=fD>o{AGHafWlizCTVRd9)G3(C96(UJZwmX&PN0KEuhd1z)xh%33BZjizZgH(I zyF5bU`h0Ds>%-MNOf8u^sZO%2MA%9HN1>jzTZwkN#IWpve2$)HD8CD?@x1Ieou=AvJu4mU=)`?{Ya9P@>&%H`dFp~_M#ICHL?xlP(@a`x6ZT8(#e4T=+ z@rv`v@uzPyD$Z)QQARsk;l6+jMR`Umh$NDo2g8TyU&A+>;G9f+Im767%sL+9KA`Cz zqT=;zdI(86k-e^e?bWkh%-C$iHGpp(%U)SAl>Xv#EX}*Uyz^~?o5@p=XY^7=oR^D5 zP*(S-n9uhpFF+wXg|}~s~gm=XX%`8wSX_?y!s`4 z>EePvHf7WET%U{MuPFPfS}O6Id-V<36H$|%*&(E&8m$bHsKa|u1zC^lS_&8JREqG%& zn9g-AJ6*Hy{&O|2B+X21msMYJ9(gSG^~a|rBHlbU%|6mcq5m)j&&HSJ=+Aiuqv5+& zjsJ%OkMZ@K0I^REqwsNr`+AA;3+X$UWF6_774CnE8hMh>8Q~d|d*=pG?rOx-@5lTu zY&xDVwkOvy_}ao>i*P*WDxD`{Y!Yz})pjRZeX5pPJ{?8Ym8_W&^j{E24_DmPj7H{Y zD{nKhLbNl@a(da#BEn&?Kij|G`AHXyO@4>l$$bkA?lYn}xDX|tXVr)4Ig>2ajbr)pJrzc4a)xV`hmDKe0hh#@I>2%9z-WvCfES7_VKx+R0AVij_W6avHz6k7e5x;_h5rZNxHpLsJ7MatiM@*f-E(Z#q;J zC$fKPA&Vrp;AsWB-L3u4)p019^5(Y0^fT?$gApgUFH{u(L$zt!jUl6@I0CcA_4tPkWnIS(7*)5?K>-fGC}@eih|rvwt%5 z{xdVVko|kZ?*y`afGexuHjFO+hDr-9JjsUL)YQ|JacWO3IbFCNE8XQXi4i0C-2``K$6hyB=Um!xu5J`-K4bs(qTX`M zy39y#2W-FPZ5hSw)lT*kwPMph==!M|lAnF2`}0JdH(HrDKLz$b9QVE?XTjoowSr$v7%bYs({`!Z%vxam0kw#GUPg)?j_i|A1fwH{1tfK z1O99MzX8+pjxwXG{pj!AIt$GeV4*C1~LTIA&9oYR*Gm+TCOvgymDUW5llmD|cQi{sQx^;hC$|Au^a zkgb6Cp-N>Qb}yVxh3wN<+*Vx9iSb{Fo`2$OR(n5#eUG>^rvpF6IzNj~InVYhINV3K zi`XFN)g?djl`x(QiTg4B2a@$rrkBXpjLv!Mm@~4n5B_kGAmf7hFv*_#GNM2+-kmH% z;dIssmRC!6?AxThzBri~fvGIrPuaYyK0}Shp?dVbBp%5Fsw$uSLqB0g3vHYS>%&>) zQ?h4;*$OPYjXv3%TaukS!oDNM)aUCt^{fjU)%E*tY{~nFeq!*Y`i!j6nIpDsfZkGw zcEFOX&e~4uA4&0>5zQO4JQ>cv(sDJ;v-;*1Ee=uYaQDuJK~{IPBFpDEbi1-S>-{{a z|Dbed{mx`R$(8&#t6su?hrl>_f%-$Vv3T%0R(>FQe58)aw0a+cHQloVm)c-JXElzY z(M>*Q4&^A)=aj#?FwN;}=c}o*S_iRlp8A*Qr{?pe61bN&PBZjCGqJ8co@dSfJ~ega zZMU;;XYJPEg~^)x6CU4*p$G8q?7&X+tm5AC?3)wqH~W`NSKDEm6E!#EV&1pqDJ5r} zRGQFHnKB$g4wW=hl19yGCety6N@b{!%=1u&a7@SN|Gl36Kkp~+``pjI z_g?E-*IIku``&x)yot$4FnJgH8|&nh^0P=gS1Z}Q{R&H#rpZmD$SEMnF}M{H-C+9# zmK@45S!sPDOJ&D(c4}n~Wt%uZnk|yCs5^|3!{!_q9FCt^0W%u!Cc&dK=|6+6`G$iE)N8&lI+oq)m;Ya$75O6b0n|%e<7dew~=ggI~!eA zVEZCy=j^ztFdolRSv#NeZ!>bsj_L#P`#JiR&{`FI$$49^>$CE9?OE*|PNTtmC9}=N z`NJl-zU#iXMY4@pGLTl4VAKMS5-~D2-c37|)tdROtcS~Lym}&LPIj)z8d+fy-}>IT zV7YQv&?0N|Pp3s2k?KmmHU!tQE^Gge$+)VPK4Tz^vNxnt;hC#1Ojq)SPH;_Lt;~=g${VNg(oJGfveJB_^a@OUg+*H7 zPS%_Mg*m6O@PVw8+1~o>{tN_ntNkl=W_RSLbo&-!Kd5UyCgfde-j!rVKYQkK)>77R z)PrIv)_j_OZX|CbHIH|1c8C1N-&*;6xwf)x@gLd-x^rGv1d_QBe?Yus0N&G&AhRgWEToEMaP{#YhwH8pV zsoophF+~mQl^CzrOxBsOcdf{_#pl!MxDVTN_Gu-)UmXtj;Ph;*WHuoEcvch+BUv&b zJi&s=1JnouR`@1Q)tO-~riL=2!4CY((?Qlvm(s$!BI04N&$-jp^cttpdIa>#lOt=8 z$B4%Fif(0TvsBFL2e-u#yNIqi1^PB|?p!`Kjb@o8%i5K!eOm^D+hKZ&XPQIw{U4Di zr!DQG^JngRj6^wSc(YO6)2z3&Am6p_ZO?*DY4|raCcZB)(nv10mXN&DI47s_ZZ6n) z2{gVdj5P9W{xHl(LZ>u4OymV){mXvvI_??9vc1L3Ji*82zxe+c5h(k{D`R9^UVj$5 zd@SaEd_gxodfs59{+|lbG}(6V#lBIHZlShA)SiY z!-Fz|@DQEeq~Y1_OLnW|hpFZMI<%@QE@$>DF+V3=JSXmDv^f?}m-x4de6wMY_oUgo zdnEP@W&P~iFUx+T$-WXIbJRSFrZ0=;Re1c3;%YCxaJQJy!RJG?KSE?lZqaZ3y@uyK z;O{3e=VlS|0sc0aoky|vIwQ|2^v)XS%;{y{V|~5%#zN$7Cel4Z$Gn$b0FNv2w=#Q| zB13ZK&Zo-|?IdH|{qD_q4|Opj&kmbdF1us0M=g8H7UE>i0X|v$$O-ZFSuc6{v!Aw} zIM5jO$u{>B37_Rz6_sd0_Ut)69REsbahFe(*kB=T|A)`dlHdfEzXU(F^ZLAnO~%sZ z5K7+hoAK%|cMSIZ$@Jbtj<<2(4l-ojNLCdtga0qQ<}osF75DN?@EXe|7v^?23}xxk z^iEc!>2zr>p5__-LY}ab#oLi!wJ20seb12oE1!>Y@BT9s$7s19Jv+L0infQU;c?bp zpiE|yTfuuLJC-4DazE~0v-K>*HpApTme_yx`ly0W9^p!BUN@hnpYyr-ysjf9}IOr%9ll3EO>c-tks^m~$C1nu7;vJ$QX*zhTTX9s0tzL5M08DBq&hflFg z@*m`k@4H~RLEO!}>jgAf!k=?m%&(+N4$>3Vx?X!l*>52Hl9xJ}!}4tV3Q3>A!$19G zb~xt=?bhCC$Ruk(89LqSp3(4YOupvYIu_si;b-1|b?3QN)%&x1^426{#&cQlKDa#r zvnzQ*_E?QdORBCS$uZcGvHpLstcn_=Z&qI}W#61TmJw^Ri?@W& zT%|ITKbYtAFe=KqIp3%w>lEv{dm?O3fZ7{{vG?D2Zxt51mVEyu&207L{Z)5mFC)Vc z9*}bqZ&PpfYp)T@W)&j(E;0E?Oq>OW-!VGt-apamLp-7)+n>Yotx11H;TfT1VFb~h zuY5_$nppT1Nv=@;r7+AB@@V*6tmnAf|8w#{F}Apue$DyE5;E8EDRXttL;g}Wd|D5c zoV!IJ&`{sFk)I4yS9wVGAn93b`+{$iTlxXtX065Hc$X|Y*@bfyw&Yx)Pi3!5?rNC7 z0{1t&C+}NV(V-`;ySlHFF>Zf-L0i!|`E2rj@G8D^pGcomt6IV?&$&aPK8i&?D$E8P z;{GA*x|4RDaAJ$v?tyP+Ek?7_1U?#e#Ls(bI~kuYV#~}=W^|vm0xQ%xkait>(?>6n znaH9(Rn|9`#j(*WaC^Z+-_SEu5ediPWF_N*^)yJXw}0w4`|;Za%3naU4WeSQXJ*A= zW71Dycg=AXjpQCgK*spX2%CzX}YP0?D^| zP!~P=X<~VHn=YWy!7TMRa%Q*5A~jx1>%_eqM7|$g&+eZk#s_2k?1k(gGE9R{AAC<1 z{28p7^=OAdC>gxBljv4)VJ0aLyM=8nPT_b+L^58z5M$h>PnV~JtD(uQYTkr*ff@&PxHoo`kLfJ$tlPeyEnN4 zhl-=Wz^#;6l{^xe(aMU)nM&`0;c z-}ya=w>0tl#pe-_&iK8G@<-5Tg*ZG8n{p20{xSRy*ms~BFUPAL@Xp$! zflNApX$o*qSw!~cu&ps=r9|r=CS#3w!2@S zoqfkkA->ml+q9D$CCNUJ70@@UKYdPp<-c=1GhGY#O;#R^)#^gbZSAkwJoy3gB~mps z3VcF)htO~h%~qnG4Lhjvp7S^|F>Gu;cp$&KybFp6quA#$E`y zxonp8P4~0>qwvoT(A}(`x!m!XopVZ7DW4T~HSjb0>Jk$M^Yp~_I^s!2Qpw`-8BK3t z`t4@n&wUb>tnLo@J z<$7!=%RZm7`Ziksp{BX+s;;)IGFe8Kd$4Pg{&gKJ-iJ#*aZ{JK24;-Tx1^setiUi2~VI_6x>tegyYM{(n{qPsOm_~0WimRhhu|6|va<0M5&2lyj@52g z-(C-yyIn2I-pT*oL4Dt``4HchQ}Pe@Z_pzhE~1@GieuF~fOR%wa~(58m-##In#y26 z_HQ=wxidW`U|eRClQAdxWscw_&nVN#eVcjZ%VOAWw)!_Fd;*`(j3H`aR(3lc>;9=^ ztE-mm+{zPOp5(@gBqu1nkVM%By|lpK8ER?3-!62uA$#`1qd96x512WOWUKiePCc;f zWB1)nvYBG(Lo8WY%pC)nA~es5%}dnsFZEq3?#&lpzUDP~YqXQ}%S6NMgq%pu-`rQ9 zEdONN8ro{bUThmWCh#;y!}gGS%&^cV|qRQFw&j#a4%;F=IMVO z|4Ae)LYL*FZcmQv|36vZu>#HqYvXIydP4c5u_~)BZlTHFee(nvlV|fsew4GNPZd?O zOZGqh%2^$~#FfOtO6qOFV(0V7%OE?Get%+2_E+v9cSeH!_-S^swTESL9{mTCGb?qn z_R7(>f;y7f@GOz*4HBNf0;38~FePaHD$P2uz-W5>s;!)2wXHCxGLOD5K*aiNJ^Ff4 zH&57^O)9CbK6H2$rX~1oa?NewB@^&E?~aoP?pL+s*)gM*NA(OvS!jZC$tf|69({Q9 zhoaRqGFF1~49Fdg1<7%rwY0~xT25AJC}O{fbI@C}$`|={l{THTp(s+%-Pr8+(bn-lp|&v>U@mny}tCWJxyr ztY7>CKG`4tUmo9xt#ZalZCdB;%xGMi1Ir@nIfEr$(*7}|nJAieB3Dj=9t@|ZpX%6Z$ffQ*?PuFO9V5^W~%?_UaDkg?A|j6F}kTE*|ILTjOKsiMx&EORBMW~X#k zQoK*Yyv>;k?d&AUdCrST*#)nj64AQp2X5vm?~2N$@uPC#&O9}oO1jKZ)@AdT3ijOV zuQ$cGTI`g4oq3o2Ad6(j$R{K#FTR`u<;0rey4QQFMDk`ZkEUp&Y=vrRDX!vZx){wmv^?_V-fcM%zKF8V@oaW+n5 z=TbdPs>=>LNWXv2^L*%h>F-_fOtn1kn-{R;Y0@3Y_HFf%d8_@Y){BT(6<9U-@m^u~ z-(9N#>G8N+htBuVcp}}#GG}I(eo>z;uoJJF!{bN!D^_cXbva zXCJaPkr6-bki`a25l#(O6wCFJ#QjK8Lo({=!aWXB=fJbuM zl!8iSK9OCfSw(ZaT0SiFx=X~=%x1^`li79@mTbbP%Gj0p?oTk}AXk_1k>mlplFr9K z=02J>B4O69Wi>)R=Q}Ja>Rg61K78cvv&mE2wKI)%%d=qClAcV4W7)Qc z-z}js77j~9rMF4@vAf5z${HH4hfNtRT*9yZq~mNz%!7F)?Hu9nKMH+E894sN)7G=~ z1U*I0<62Mu}u&gaxl0wVPi@J6QArH@P@8$L)Rh)lH{05MtMFs2 z=faK{e2ZAxj7E)UmW(9#U}W}`zKNsV3%%zfusDI-6UD%FBE-#b$s5E4^yshtWV-v= zDD7@`e~!j)spmxTsykoGDIG0YXs`M^^Rvu7We@ zZ}~f9vvxn4ysEZ${C-A6{1KmWK7AG6)_}*8TKvtwoYc~e2VD={?%6teegEz|Dm-y+OOeL zMj)SR>EFf$uWKvkl6wW_UT9-e~Fkl+t_@yt7X-3otBd0=`S(< zbZt+=x2ydgB8C*f`etMp&3Z3E@hOsLY|;U$57WO0{tczkHWDO1^%x~z;V)UWFjxfW z51GetsfT;BpE#qdwj|5Sfahp5Sxl)#ldJ?AtJNChZRFoOV5(jR9xOxBMsgmp9DCrihZY&{c~;@!2i(4WNn*ZS@hMeB%H z*{hm!7*~opm2frnE%p0McyAZEzQByWSamk3GRKm0iWkEt`J@)&#dg{!w?y6#4KNi2K!b3*G;L4|RBDDb^}Oj*Oi5&?2X?jn(chnq=MMYesJa z==r6XdxC2}iPA%%{wpc}=dbKd%u3reGke}R=P81M74)RPLb?K{^UYitp^|G`>bjDg)Ng|H@O}6k$AX1qO5w?E4f3D zmp$@1Rr@TzvkN$hTSKyb`Q8`PXH4Ku$vZlVVr^q@Ik1|WNf>iJL`*gT=gUy3J zKjp9OY|@g{ZAFZ8*eSb(rnu6XL|Mb%Sv(v{->q!lM!c^||G~U_D7;IEfXUC7-Jn^c zm)YlX$~Dt^W&yUUrxJ}fz$WXnlSO?3G%_QWH(Q@zRRtW*?D0B0OfR{AbeU6_zY`Px zq-%*noI8eYLuoTz9B3y_EM?EEk#36-S?^Vv)pCl>saSL{HlGOd*~%QP>`~aZhWBqR z_{matWcB?HHRiO`k0850S7KWT-N@2;T1-~ zlutQPC1c{<`ip<_-mf6PkFEEL`d_K@WbyAO@?}@X0(>0n|0P+pJNvX_k?dBWq*85^lIvxhYRD7JfDq)@GIimU{W24G08<(T-+EHo(q^XIK zC27=(-LnoPjC$T3$wbhS73YhO2a4Ls?mtM)3-to&ukNB<0~!s(&b*1ZRa|Z3Q!#qB z<~u{wcmNH4=5=eJuuK%L1;b~2`kb7z#l6&eE*!S;pQ&m)j^1TRQ9N^p`X+iOr%f{8&&Hx3`N3u$el-R>$7wAVJSF%0Lf17F6l~T-)3FINAxO5@11Zt1!HoGKzSwa)^=xQ zJ8LVaz$Gv1RB@~V>s-aY>-@hu9J3nZQQntR#r}ZD1!^fugS>(0QWzZsiLBP`D|VL0 z?5A13n|S^_ziCFgLvef)8@$YmYUAmPu0Aei*AQ1G6nfr%u2-e^0(LkQc3E?EG$v)P zVm?VL>S;zpI&Yi4WyizdHCH>?U3v|x?N+J;-TUi(a%$84yf05H*Q)JD(vA_asxT7&^=7Tk~IvQ44xttuUTOjj`eqkh@ z<~)|l+8rax9S+~NG|oKWR{nS{AKoa=MpkzhynY9Ebi&qW3h}&gVZ4;L{4c_)87VgKv8=hR zM5l?YFiC6K%bOkBEj&fN$76QV^eYTmsn%I)-9f|b_|05=#xJLfYuPd0nHF8$pNxH3 zU0=D-Bmd<}V$!ED&EERF87V=lD{x_?8afqv{+yTQ&5T{>K zLo19*K8;0M`j)R`w3^dF^33}NZWP1Wi{Rc`Z}GAAj#uYsG2{h&xs^7fM3IB>qC0No z+=TSV&yeOCpPqoyGumFvDmBS53vN|m-CGSgS>$fIWi~4L_L}H%UNP>d2DR)|DR)3Cr-j|3hCBILKYUB>)r*yFs-`yBGMB~fh1L~XY^q1! zf9k*|O8+i)T|%?d_}1&5+;h^-4n1ja(iG(vqp;v}HKu243h_;1=@vK)Xa8hXyO`{M zkt~^VmtseA99XGeJ&^s2;M&_TkCJR>ttx2O0JSDexyD(s~85TJLk`21!msVo`WA7 zAkmrCrm%T3W@IJA)oi$0Yu_qg+2{MjnB+vNAl{6p*$kTJyrhSqaWanNl&I`mY@)^I z{Ck01OZ7i}@#Qo<#-(bm2K63#kOy7)2nxT7Dwq1MiQcCjM$f{4tf2UZ_?J__l3D96 z-u#$r-LNGysavu8@1kyYv*%rOM;x5Tn=?DJ2zUM`(${9=5~4^}zWof1ifXz@|MfFz z*I@G1IG*zX8er#xP?$u&tVR5iKUL;u)qPWy)vkqWSG+!z6^deFGJJeSt}V*{NXyPz z8tLzi{I0G@dW-%(`*5ChM`oFx)GOZ0iaGbL}yIP7$6jEnc!M3XR^}bw zCLh5z*tT>3Xwjt-OD-he1CT$MWmd7uX=M1xzx0P2eR_#z$QmP-HD&?Vc{UMR)Ob2wg1Tq89@fWSMlM=f<^v@TYGUjE1fYIr0|#F09!F*B7CEjCTI?U9vnT$8~#Be9r%S z>BG0MPk((&=3KIRqKCFiXfLOPyut52X8B}n$UEfPP&|S42m3s%Fb*4E=%o*YTL*3A zt=WO@Ia8_WP`(&GoA5K)bdM*=P}*hh_%P$ONi0xJn>mN41fMQXlVngCgfSn(aI5Q2 zz@i+)GYfUD*pj)|oPLUcLF>E`=Boq2O%AMr@lSx~X{C&xiec%0PoVAb1i`oje zj$_{&NIRcK_b4}4jq|YQHucOG!M;#iWp%9acNG>|tk=yN{8#mW#nhiUh^llv2J1%h z@B6UhMt?VlMOBzRR2X5sDC$%cpYn{^QJb9#@$`Eow`+GJF0L=M@mwLUwr9ObSUH`1 zf0E)To|f}lb9TgLp7un67v-UG1-@i%B`Z&k#j3sH$v@TfIbTfQe5d>R;b|xL-37;S zd?NY(?$>e==;nOPE&79Zp;c4a8G5De+N}+%W1#UMj6T8s=K86jTFoBRoc-HLspR+l z6pHt_JJ0c-(y0f#Bm=@|%*dOMWcbRw^{J#EsP#9smpRU6dgY+7oKGF@dPYfucu`I_ z*h|lLBpYqikn?BOXzL{Qo4^v4)N>r~yp|7LPS-2M%CB8*&xTvnkU9Hg;F-dH@A8S6 ztTp3;>3a14;KwfZD5JK2^RataXP`D}@`{Xp4^ZQmdiyp!^RD8?tSi(xl`V2+>}H&=BF3-u{UzF|&3lpu`dsqW_f4{8x1wuS zNj$FYHW-xWjO2C5XfZ1S?{l>STPI)bv+T50gjj;M8~w?Vh%**L%d=5+8kZ=H zC|_s)2f_8f-vpxJt*8sPqc+D{&wd8BV6S}zlm%8{(D zYa^kV6>QB#jjnLIj+C7->qPaxT=1xdEKo}6>@%GQi&APhoOU@2yni7YoWYOl^MFn? zO23!$bDHB>Rje5W*YtqJNSj<|uaGil#MCk_Tn)vwB+H3M-|8QBuw+LOX`KIO7xj6# zkrj-QJ$ddc;A>T-^9JTZ(r2ZAdb8vfebmVNM0j2%2Hl{i&zqr#Sv=2EVfM-V=TL~e zhn363v+?3!J$g3AnNIrrR$BcR8MCIRBcvze_zw0fNy@9BoU9GW#*w{GIT2wLe73m1 zHw+e&`D7g5X5us zokyC<>{*f}m#gbMk|e|21MvCWEZ9-LNk5)71LIlnG5yGN$hQ$EN@Dm*49a;y!&vZv zf`6x1%wDo&?)^fU%+?fxY!~q~*+m}0*~8&7oISf?&KPav4RiQ>3%|S=rmJ9@4Bf-Y zIgJH7;`MfTWIfn!emS+LmCJSlStayzq>>x3JZuhEOAC_U?)$pzmP}PS zc_uTk$B0)wS#|@TpT?F)!R2W84#xBcM74IVWOS3v!JTLsf6j<8Pc1iL-MIxhyOVb~ zPUm@fwO;#QSaUF&*A`n&5G_l>DSP7@X*qpr5BL2C3g>7$tG~C>KPM_a$RRG+=5>1lXAZwP3(=aFSZXxX~@w+0_uY}Q1tS*7&-xMOz z0~qs9pYPxq)7WY{JC1;6Ms#@|oMulqLX2n z{8m}__`Z1dGwatZ#I)@G$@BkUHO|7f`m{S43fGYTHjKHAKP=MEJPC=D3!F;aX(bxP zL$bdoxi_Dueb#b}QPX$+9^|f>dWDYIe<&HVBX%%GXXQi*qs1Y>J8=lgVjlAW^;JCU@QIy;DIYh90|$Gf{dZ{FsftaQ(e@kmHyHE2#;PG+og zVDWc$FUyWOL8%YM9e`nxdkeXjK%op>GtV;BeVL8>FDd^fHoQ;wtkmj&QF+&LkJyvB z=DyH-9p~~+IN3IGmdXOWI!VpZ_#R%6EOCbxn4T=E%e3(q%(9RBq6;eO(X(#ue|r4G zXj7ZlJ%lk$J;6NYDS45rS)cimHnu_}Y4)GRLw+ za`YbO+GN%`5GrkWWp*5Ng-Uw*Q%L*{&TMnXV0K7;unm~gl6_~xI%}QIrR`K0z zI&4vE)~}W^0v^eel8vGb|D58UoCo_fI=MV}tpyh;oxu7&gh{j{4k7dfpX@3^Kz zYG8pyjWFVUmVJ*EnzG*^{PtP0Urfg9=v|ZdjfGf$JRJ{>`(5v-Y(~(@1Q-@%M`hMz zO&0@R#J&HBTFKb)f^Ud}x`PTSKM=#x}wH&T42-7?a;mf}8>Ui|+=oSFy@ zCQr*>e1C%4(zCyYm1VKySYv|hraaJJr;~REX?N%|55@LJAe~dev;KFrzy4BNId;E= zZ@ul?;>VYX-w*Sgb}W05D`T+cc5EA=)$}~cD!TxKvQ9WFvU{=eCD{Iu zIA2}KLF(+w2Y15uMc0x;u_o)xBTsU5c7@syTzijyj37&%6!KJ>EF_P?sXt8qhj&F` zcdAH}k=pBgK6%p0LhB*D!Y(pRfI!Z}%nstkqG%N*)7Ks&>SxE$b{wpL1KA7w5>!^G zw;l;E6oH$n=SfL$aRbCeteUsyq5grr_+b8Umy218SwN%W3rx9+kqTJiQ(OodqyB60egn zwI_*_Q~6aIXT4x0Hp`P#-cHrj!fxD1{@OQrQ`X>LN$&=Dly|Ag;a^*v`wk;M70I)^ z^b1#xb;n>5Zi8zw`whmNecH-O$jxee-PMf!@505rF&QjMJPEyZ+Wl9-F1H$|B$H>( zm#@bQ^EPp-XnYnnCyPfizh~|;#7u+G@*7>Ov%^pAJ;|bhr%{9d;;Rjby8F=XPz@rFHg(C%axw_3O){eW@C_UY$mJ6%67jLUC?Nir^fq2<=%!fn`*6{MrpHByg0O$^N{w2mzBoc<+yKx@DHqiS24i9)M%q+leHnu~1GHYbO35jip(e-5Ag7C|w+jvx+-827V}Tv^=lMT_8Jvz0d^594e0945olNW7Y5#-oQ=l|3TK_cNR4 z-0IH3TKfSG$-sLd_LL^^!=yL|k5}r^2N)?gbl-Y0sJR*{VpTJK{wj|hK&~}4O)E$Q;HYx%zn`ddgNhP`5I)uV}suAPez&WuNBUpt)=ft zbO{OHB~e?}UMhN=Aa?xUE|b>ksmsq+YV}}kW^LUpShwei6V(>mWn8_A443fui)h`O z?J7fX3*?jgY97BR&qIpnsrInd5D};$zu$#3KWg(rk*gsOpRUw6eMA$oUWCob&v+hI z9q4yvS(3>mD<#e`&#$h`7*zMl@)-59cybYqRb^3EaWs$Hk-Zho3ETq(TR zBo<~ZMY512qGp|R51!b7osx^7IVO!FR}*^VS?&iTgn9H?h2J?x^k<(&u*20#Rw3`t zka>@nWS?1{CVpbQalShm?$>B}9Y1=ReUo3Or^J(!K&&?^)(Rri#hwjel9eaR8kqhnruN3*tT4^|e}w{WRSL2HUG3H+ODuG-zjInbW+5|In(@UTjH=9< znPthU|o)dSdi=hE&1*GB|InZ8J3di zInwMC)k=~hC%oiz^kQns*gvytd3SJ$dNU%c1mh{Bo#xK0E8MH~?0f$gOe+@{GTdJm z7y9t*1=$0k85s8x+&?Sy=Xp1hT_z>@!Z)PMZnu}ncdT(vGrU^lubc;!3@G=B@;P-f zyLGq0CGU{azw|N!_=_Hiw^=KlwKdt3nCyu2MZtgIb5<&^SMN&w-8A*ARH6*N?IQIY zd^idN?tt%A#tW}N^KrhB6BTM;+g;G9qV?A5s?HNi;^1fQI2(Qqpf?0Z^G-V}TXwTm z@&w%P>Ojb4W+FMZ`(W=xo=}VD6{BI|RZi8IKuAZdDWTVS$&ISx@h7psn zXE**Ow|)(=a3n3Ou=#BLWlfs=hM9H6yW?P0ht5Z;Kj)J4piODAj~9)yI`47gn3y1bzLvf>v%&TBf0lQh zfiFX`ts!QWB+o$Ou9o6So;nUMNIKP(E*I3(qra<#h2*QEMDpsFQrpG6Ib)*Cj#L$s zBYzveuhGWww7Y>9Wt>%7E#1hl7Mjz&vvmuU-X~6!aDSd`+v#JHr>Z6E_Q8+w;>L@j<)?5x z&i5Vh`zAbIjB5|mA+thH;Z*kZ3{~%Jc;7>=!`Od34X))0!^k|G44rw^Q@o@&HdkZ& z*0h`h!(wciGXzi3H$079k);M{URGQ3)l|dDti;S&a#%xyG!)W*D6u5;G?T) zv50@?1giS1(Ujbmi37>VmmJS`!8mXIauQqK4}MF^N(Fh3XT5WAsD=BIbMkb*Gq+Ph zOzgz^Gk8>b{i?XqU3?p^g-f-0zmmgQCg(Oj#{Z5`>!tX3q1I=xWin1>PyKn2DkB!u zXTQaw!i7BP4jfJHu=kW50)_M#IYYDrX|FFJ`ka^+hV~$3-m3)hy9<_Bq}En^@+H{( zmlre>^uGcU*Z{4mFHJakWky`KM+ub}l&p0LVE_)cBFZk-a#zD9EWB3-=gr%qCq<_{!RkN}|#;*qKu!zamvtyl&!~2F44s z)p{9z5jMwJq{r|<@_3))3D=vp^9~@0CfMWC+$!p8Pd=?D*k|n2y z-r`#3%O~^Pia53hmny$!lwv04lL+J{;_L!Hyr zl*n-n-9{>LAWLQ)eM=+b;W+<>s|RW$Cxf?PpYz0$>?wGNei`|dCso$pCVzQ5exI!7 z=aMliSN}o6k^0C){X8xH->&*&l*?P)4`Fg4f9l|#;v~%sMfMS7eP(Ypw8GwSy}EB} z@{3i_OirTg8oI(=d1srmr8?8Ty?B>B+Bw5|CEI27U7oV9gI3D6gnrfoC-?0!5X!jo z2^MWy;CW{fR^$(L+2aQ&WGB-O3~FB(T|7vS?3KMizqVLUQM4dyo<5R!xPg&MO;>Y1 zMoSj#?AyHic?3pP#QPRT2$Se?9KYO#NjV|rDGW-RbCh4o1Czu4LGql$-|jZ%y&M9c z(uap7&X0lo(9EZ`dy@FP7un&%7)rMQ#!uw-tP|Gd+5d z%^gX~sv(F{F+TZlV`jDuYT%;3tR92(q z43T&EL^}+aqMy08FcPW>sTZ`Fk-`;ZPA>60XU!MivJNIQdI!U4HJ`{T@Sc!9mPE;A zR0aa$uxPkivx+P;>Uk4#xxOm9W0O_re6~GEbjv(S&QKdxut8S8{Fg7S#=BasC4MBA z`da?j5o0gX;}#=T@|mq=hvbt!f)2?J)>S*Jv8*j=PcB4}O7748;qoTGGdtGL^{hi{2K`wgVb| zvHwlw`uY4ONj}5BeeUd{+-k94VSzOtz+p4rXe83kgi9;+pMsCo3S-5b=9lNE>1=Yl z?~dk=UtwYL7H$$1UU4Py@)bO2h>4josm(IsSIz-=2Jg1uR5EfG5f7TON9MRUh%Px3 z_ka4aYk2I7{(VZa|w?TMWK*0dDm*~@9h1UD3z6uo#6f)<_=eLPf}$K%%kGk zU3}5rC{?GJShEUvaY`E?#iS*j%-!^ zot@dIkT-cAI20RCmNnf^!z^!%hZM##m#F_2+{uVED^j!CpdLvx&Z&n{^TfoB z?9zgbZX1ZX5uuekUMQgQ0qeYmSqIQE z`#3w0ZImeUsq!a?&v(M<3ihn$-#qO2Mbz7h`7alwnJ>Cmp!qM_&AOP3kxn8@G8(ML z$+kmW6th zO-3uH)D@EeBp2OZ|(c!0q?3eyutNk zEvU=4{^jZzk*1Lr^DI{bqJQ}5zzZkfOLp_^f!0@Q%y}C{apH8fwea~?-qDdKbR+j# z`WB;oRz@#lx!Sb6R*#-H+}Wr3Sz*L>HoR&onRD*m(=Xia>KAyn&9yiA;xVNCls%GH zF>6MOKxh-*Y^7ff%=%fmjL0@9eSTp)+lB5Wc}su%+wRIkqUQlno5}`du`U_F6H|`! zDQ{qAW5uyJzmv`@VVoI~1F)t)ro3tlnH9tXu&XVtessrZ+@4OZi~?R~!2vK!$pP4r zT#?HlFjcRT6VR98?R`)U|J#ZdITv6%j~GaU8^|_T9eKZ+Y^SRswBGNOy;_OtY&4M- z^0qYlf|v2CYS7FZ?U~vy%?~^KU5kD4rs#OqnW&eYNQ#BnS=#-TSiUO1|4F+!qx){P z?J^FS#)6X}@QV_IvEeo`y(6uz6jjIax9wzjhkQA~a0dNe=PfmT-amRvZml1nR+N8k z+?YIK2dqlG*+59t41=IC9>0b^hps|1SmfaDv*m)QWwbWv=FaAS` zX{`5_n)Ci}7CU6+!0q@`SwAwEUdf_#BG&vVGTctTE$}%Mj}PXLJ0Un9zF%W-b&_Od zaCTp1HOBzDAB-hy;gjc-o9X@v{^XpYSH*^F)Rcwd zbSP0Siy?!*l zmdsgK-C1O=NwzQ9X)|l(6t^SUI4kp#?cgGxa)M`86ijeu2{oOs?X07URgQt^KuA5u zE~mLGJBk{s`FC*nwBf+{1Yx^Ps#wJvI%&d@zEmnC&$97LR2XuT8DA_Tz`>t zFNn&S&&}D6jrj7D+NofmrDIre(TtGhO+^p0_1K-Yc_gG`< zIq*4$EgFz1V~4V&@6RGtVV1d#Ra=TzmTpaU(hK|ImwNT}5-A z_)H-hHr2*d@pctG-hoh7MCYyeSh61|UaS+rI?=r%j5_hYt9|~3?_5E%oIlV`58s0? zyvH&PFs?ElKF){lgXdiJR&{47+H6s39FF8kF}Yzz@V(6UH^R<4@a_REb|TXTI@M*Z z?#d?v-}r)^i$L=*wdWM~4s4aVu+qNGY1?IzpSm;@-*5kGjtDW*Z=1d+q zNet=2&ew@Nd4rVwG0*eBhg{2E(rS>)bH>Xodzk23#FfXf@IqSk^7Ng&0@=AYSc%X5 zK7ven4qu};NM^)XwU2L;IlH2`xEt4ZvC%#5t)Y&LIJuW6$o_@(r{_OL=`DRNrISe6YRT9f)D7H{Ngb2eO!ic>=7UbUl&nBAOki-DJ1iK#!Rad>S*Jg~~y6 zIK%aMqUR)W;RIGZ3A&k+*{7DA=l2NgdKp<}uWBb!CXZERQVqm{>^{#7Sx$2~8#2i- zeg+TDZrlYhIzp^F7s7d4n3G4QlY2hy9bkNyyd~ST*_@T{Bl8+alwyNq=6L`fnUS2r zT37J*-7K0@ud-Js?=n~NfIaH{!(BN6Xt);lkS=?zC($gsN6!%Ndy^--``)EfQ*r27 zk{nL@JK>zui01iy6-yqg-E}m-%@`?fzw%DMwR^_uHEs}pXFw{Ws4D84Poh@P?Ma&K zv&-qR@$S|%>8AdS7BA!*Q&~N`PTuiX1v;Eh{>8jMUQh&Po1yova#i`n1nuV8xT3al zqIqZ1eISa}f!k{JwRH6xSEsw0^H1xN=0x+gJzYyir6H``T7Q&Yw--(G{JliYb=_SR zmf3-r`JT@1dzl7*XQS*O>_Ymbu)Dfo!67tgNvq_CNn6M330f=JoW9+m(b2y<@HX$l zI*E5f*eGuoH{esUh9s|RB1cZ|&a?J?u>6f(*3)AzPH!etE0OFMEa;-POQ3mzXPCxH z6g8?$uIZXIm_@#SS+)kr;d!LlWFiP_Dep${n-Jg=+z!m<9NJUK$4u5 zm#6-6)<+R9or1({TkTp2WQ53YFws%R8W) znL9;G;X_w?<&@*9BGDHlszuJ#{Gc&EC)Si7Xud&kuko=VlOWiRIW3xghJ8ceO z#X%5xUW7|d(hllchMyUu=P70=Yrd>>cRct`fB7mNB&*_lc%17>d;EEj7bFYbDI}Y( zu64X99&)-qwq;>vZz*5Pn$K!98;eKp`F1#MPxm?bb&i4PWWDzl;#eiVcC*Oy0)!gV zttUoh9m7kaR!)`c4BcLI8=&nop>dY}9ASL8N+7Mr+WWWA3aaza5elwXS*)zy~yw6TyKUKq1}tmbM^YRNn6L;6wQH1PK- z*ZN}N`FwJw*pvJso!M|bZcKJ>cIEVhPI9Ym=JnZGvX{MEVMI?lzOPgd*sQ|f>|fd?TTG{ zl|4-a&pXsHYV3+>H*0yZx`ts$_DnbT&CTNIgFGdRdL+Nu#KUw|9<3vU`S;_ey7(K4$#_X9Do#ipV8BIS_Q}*!fR7>|l z&4=PcQKO5zfqRipCR6Z{n3|FK;d;B|^GUySoBPK=;}_h@bIP;Yts;s%;dj;s<(e*dCEE7_2g#AKA>5oxR<3yiEMe(do_+` zA8JPu+{N0X+3yJVyv}p8=C&t!zk}Z_wN8gdX8m(EK_@)Tdf{E}&8bou<7apHbicC) zJS)_9`6i>Mm&Dc8*xU$yjWHp425ZrLDIb0i8H$Y=$#DP zC)7|%4LS9`tJrxH-e%M@%%^(#%8fXfOy|F;<8FOGFVSTSw0H61yjgAp^E2Fint0OJ zr_LhLGNtR&b~!6%z9mnBP55F?|YC)6P9yI@(?9Dl4^!Jj?&gyd?LHO zkA`@1MjfPwZK<7{tK1S^??bya7H{@!SA)-tWue+yuBP4eDpToLo~I}Jb%W+cl6<7@ z&y=jB&96v&C_IPz^pVe5g>^e0_!moL?&&gY-UYRsgm9#?dD5@!|F1!(6nl<>#;<&- z5`9mkRZexdM4OpG$f&U{Ir63==XIURl3$bL9ChZr*~~@d4dqGLm;J$Gc>Q~Nz902j z&pJDvWONI0CHnZ>_a#LY8Jb~`sy`qTo~a@t7Fo9tjDa~tf} z!+1g=GR6Cn&-Vo_W(|0Env`_UY4~^{&)DJmLb3nj!nov0-kG;jS(i4Dh4Z$mJz$RocF`%HCoKdIo!|lriOm!2_tK)J|kb&v*n#wZCKpS&yFj+6U&L| zqcHSG=--H|(@8r_%UK3U8Fp7G zoj0TlT=|oAlA$N>vyOsMF*q!P-!r6X$&QcscDwtw!h5ajTiEnP{q1eyOR$}&u(1lA}(e1UuJpJtG>h^a{BBoye;QM{fUXmefgkB+d`?Fpq@8w*?qAOI}c{J z%UN`if_jkBpac8*285KvqAK&k zbLep}>~DkobUqS{24GL#bY(x>0czV#mew>n)tK;jHEm;)M_l_uWZ9tBZwt(PAG?#w zr5J8z-FsJ>C63&PvH#>%+0A(uzGbb}Kb306TQ4VRGS`0z{f?sID}1pJ4QjJXZQq0w z8AD`L{;jqW!TtlyWGeYXD}DHNQA}!sf9Gna4eOP_*W_D2mdtNs;ye6*6dTk~<6*GK z>~b=`--sL8XWm;JnuNp6#J?^|b;g#F%D(8|dRQ;={c-GkWx+SvxHg$T90%Jb_&h<6<*KdqgnPkmb3IVoWq%jXF>?=ODGyj98-Q|CcEB%{VNj9Z(Cd)Mj3q8RFFua_eS=rbFd#d<$EbXo&MQe=spSZdRLZ{+b(ZU#Rh4^u~QdzbB z1!S6tSVwu%Ypac+yyrEzoj}vPBh3!|PPBSjD|zcRS?`%MNA}X-I3;q{)BU)A2FsNo zPjcH8mJ&y2Ce}V7aUy%{i=|9HhIH zYssH-ZhTgP6jNX3sz%fLYEoo|F7q_UYV$@G=?0NJcTB+OntJ%GV9N<^C+Usebw^!3 zG?W*Q(9&p@P3D!n)4U1Oeo{wrd~V|B-F@>D7A_Ve-Vq}@yXy+QLU%SjjBYp3H~SQ` zdu%N!W{Nk5xMMj_xydujVE$HD*|ktz@4L*g=WS$vJ<=AOPd?kssZPYP#{4m}bK6{b z7N$pPw^hM=^0s@kzp^qTZ#$nLeJ!moR?|B7wWRk$yk`QLv+`hsI6jyqlb+Z|!~z)y(N$ru?n= znKcSI6Dq4$Ur=TuHe~MS9_VxA8l-WQ|txp=Io!Ocd21RIgyQ^iriEF%CbnOMIfcl0&w%J1*l# z?bZ4weAi&-aO1QtbSfuqW^U{)p9ZsYQw;1So?J|>ay+R&q~B+w`$L^ z5_zZ5Cp$Mv!)QE4?0-8yRGl^X;adg&7_8JD^?pjW;-bM?TFw=dvi5K>27SRw|8nhn zK`7u-D_l1D&cv}jNj;vc#W= za0j0D;=KopWO+Kyx#O?Ur?GKP#xvh&{r5uTKgW~7n^1hV(4S;aWkszgD^mk3974|X z=$;c>H{j$ve9Kz)YGgc`-FvI=YTmL}E$w}qoGsV0S#_9CYiEXGn``~}NwO!st%j^L@8Fv{;#qbG6w&`>=RjlF?10NI-@Stw zhp4AI+kVGJ*$p|&ojJ)WIZ1C|+niceNgdVw-5a-7<3`>|UaXJE{<>IaBQ}ktRTC&( zhvg0RW4l=NF@05U9KH>@InCrlKSk)-ogEK%ts&3;g@hTIWE7oTzAfO9=fLcZ%v0#^ zShRx0=4&;3<@=E6an>DYl=`q9brdP8Xen zy<8b9FNRj;?CyhTC*!bsBpT@ci)oX+PD5xj1)78W`;^>Sfn49!?ElR^-kEH(gkK)6 zzo`zhHe&lXe$LnOk#wo;`U4A3V@nsmcy9Djb#}7hLQNUh1k8$=aw|z z%i=59Dfv=2i<)-*VESiQY)%At0GgBNniU~Gi=#{QO=r2c7ayJ} zHZ686DyX%a*eoi581Qo{%Ky_P?Rcy!%MO5 zJ*>{tW9>pO{wPe#D3K9+`i#1McNYnVW7Q_H{wQ)~t8pm}h4Jw! zoW01m^KoS)y-qahIvg8v=I;eqlRb79v*A)wmZ5L7KESnPl*zibU&;14t@GCROJ0HqEbfbS@%Qz#Yx!TNn`San^QCnHV^|2Zzi0G&4!#fq&KUA+VQ<*Q69am_nh;n<~ z|2WIM!MEnI*&^B=;P;{G4*r>$$>{4tSSN4p0l1z1=YH*EMb8Z7T8c2qpL(Ktli&7G zT*(>5$MWL#BFTJl?@)D))4L>F&I0$0hDLGt6#~!mGR9Kt>5HoGPma~_r?DIruw>QqbILF$KUH9zX6gtFDfTJw{Yh=$mPv! z&NzCd5J6t!dsnh-o+6UbemJxy!2C?id01VY#rj&x{05zz2yv!6(|`3+PgadY|BO%T zC_lQuhh&TDhh53>FpkX0zp~A@Ikz|&laHfeNtVbixnY<$h{EnaqmVsYPOmH*kI!6qZfmhRr_j&Idf+P6*g$+y#Eu*hGTRo*ln z96;afENxG>`WTXDhV1OP#b3YU^?7Wbxx?0a>kF|mGYQ|bVKTr}BTo-y-}1zFiZWS$ zlZ;>|s<#fk_G*8JF;ZmdMUFRpw}%!N@~rd~&x=K0kpBX8t|CuP-^&T^XW~E;UL`XE zzTYAiobCJUetQ+WJ{L)z)OuM+)G@mVD_Wl$s!xTtnJK+`Qqe#O6DdEX58v zO>~%RIRoft@nxvd#C1ISM&&d5$mvXdU_Dc`>jUHYBF;;s9gh!L?bZVqj&|4e{5S7r zvcJ1xVQiH*;dwt`t;~F$*N?BgM1y4ZS!C8C@2TsN^lsc9LW9f(=S+twxRAKB z7Vdw+EvMd;Az@A&I+|xqW|zm^@d%DS&GuVF-beUj4|RRSk~twDPds^w%u2W8S#%cO zB)3jmb>Hd#O?X_gYh0?u{WIJ-F*wg-->P9N^tc+rrA)$}Qe7W1*I7n$4Xm+q$f-?3vdtZTr0Ck`Agwl&q} z64Fe9)7^UgExhR;Q2s!P>6lT9kAA>H*@d%4ssF&DrfbXmu0pb7AXuGc?kFHU+CAAP z{WKPw%+hCK@=>gseC93v{~+Us>^hptyRzCNIWng6`^VMzJiT_neXH6gW8nTwaQCrs z#_j$1>B+S2P47R%TwUq!w4u@_im@*D}G<^zO0;@@6K{cWN&3M{SP5$PPlkq8`;Bf z7g@h>Pu}x9X!*VT6SCBvX zQFp>{18lPAD}B-qc7B5OvkJ1B(P&OL-HGoNwRfq{ztHvy+-ySMoZQfe-A*A*TN>n@ z)|2=MXJLwtls2a;@rvX_ykKR-X71@oNnt1F*tHKVHZRyhf_7wfDb zXFM|5bo1sX>$m6AX8_wJOVCl+`jm22*=8b6&QNB)+WKm3q+X>ei^pFch31#6aVGD` zDS;VRtW+W+whwsz4mehVW_BKbTkx3Dbj-TbeQFqsLml`@PQ5yTX5TAwB%Is%lnhky z`ED%V6n>e@`W3&EZ7TbrbMEXewZ203=IT3!OvwzsmlsV_ew`luLTKcP`!qUbM(Ay* z=6ytYjIWGoSq+%pC>ej6lRjA+A7j0|!>dfYvq_MX2d29}J8|3K*wd|qnmQxav z)jFAjvflphblT>dKSkU2THo#OpP)JvMuQ>LfRCS`58F<{16)mBraSyxMEW_HT%5i0 z4mml>lm9rU8*Cu=5$Y+b%q^s>UWj+ySS|Z^YVi3MB%ZFOI@pwVYjatvkH3m3lU$bB zt^Bdi*$LE;Ug=j`YU5k_ou!7)+2uw(bW<&K!lKXBmRwua&EWRH-QYZn_l_ayPMlay zs@CqzJj-}oc?2Je!z6nMlUb%ZFUq)QA(oHSQfam1q{m%6CTECewx=uZwO4kW`pWS2 z8%URN(8Zo8-d68grEB3rW@uiZXV$u(=G&acma}2I^T*ZX{DAKbQMx&%~1}e6t5KGa-|;*g0J+d7@If zo>p>dP$#Gi6aj7^W%`@93mnM#Yk51E%njplun&&UqET`ETYsF&`^3*7b(Gcyi+EYR zpQov;UhTvOy1^&or{ccNGj-04+vU#vBcPlHvyF@$S>=9mTw}zWQ!=x6c^jNJU{g;K z>{jJ|!{RgGvO?Y2i`$FESJLxgnw~+LvFw)2a^3jz2_jNf&*U77>NG4a+OB1toH}v? z&i#!IW{_mG7_gk@-Gak8tFj#lUQ^;S$bM|hlFT=QMWTKZ5khUosUq+9I z{mzP{eNddhYnQTrR?R*__UqZ~6}klLtye!h96ns6E)NjqYs7A^=x)NM&um4eel~Ul3n7jyfw=RKe^Pu@aZT1-I9F$ z#pCR6y-CeqLGmQh{+Hd}$Jygt8Snl%KN1vI;mw>YhmW9DWh zYmus@8cV8kG(6s>+hQEZT=-4&p9!aNxKI`Y!uh<(J&4|ys{bP2WJT8g>{9Q;d#IRN zA12SRP{vkID3$$5doew0z(*CB@rRZkH416SP8CTxOH65MEYZ`|sZh;cv%ercf&Hq& zva|^Oy1E~RM@~RGjTCuj+8sxlxw8`{4|QKoCjZ9u+w>;~i_+71^o3f@39VU;KE}_P zG|T>*?B^ZI9v>D^$*3&;yb1Fk;JrI}&M&NVK9ut0n?AWW)Vk2_Ic)t3Bc8*pd7Km=5zXA2mY3)*vu9FMf#~?Vr6`*W0aEFgjQlvDQJul(fgX; zoq;pS$Co@WSvOTpZ_u`Yb`K?v$Khp02_J~)eaJMI)}6(iv$cCBK9%+RDpvf1w2z3^ zSy4NIjLXQ7d5DH0!P$Nq;K9EOY-!9&YdpF1f@S*ay}UYWaR05gH}UjfG4~ui_!)wO zj2lZsr=9rvp?DI@XQgIs-{e_q5DkLbb-eUvn%)QZ`s#hT5UqknPP};>8Wk}vClOa7 z`xCC`#PP(1c`TDv2<^$!iws#?o~#k?ik;aFZQP;mh@s>0>jZs1M!h;>5M; z+RXm)hdy?^c02M}E7?mVTf;u(*? zIN51?LbV2cvu`ZB+YaUV`*19|iSAZUcJH;}d;1_eg=g(iehLPxph>cSHN%@^8`wbN zBBDTcvy}DI5F>liGnwGBeyOi-f1|AySJ?n##)!{<^b394LLEt7FDG#|%u>3QA_!YhL{;P&7JKgs;*6yg4 zWU9>Dk$`3fq@YjuO~o)kZeknRvMAeqUs13dd)?&C=}vc{k6SROBD ztMv=$oI;T*h2asW-DwgVm6AsB6{n zi@S>!m~y1}^DQohVXJ8S6p6E5=|9R9A?edbIl*j>$a1!8SMt7fVnfcRIzlW+{>ICA zVr_ABBze+L*TKfK;U=MA3qX7!?I~ zKwPS*7^8?R8Z|{BAo2Td|2EPv%bUBL`#Z}$@4kD_L7KDpv=S`8g+g=s7v-zUwGPo5P4Q7?=CY9^QpE&{_8tv%uqNN4Ws#po{Wb!8+yKE zR8P{o`&jN}yyin506tQg1kpw$;6`xoi>4pOqAr0GZ&TM5D|5+t&r`}L|aDnGLCB*=%^_Nu%T_V zGzqj1LQf~8z@5<3>9HfYnce3dEayC0*@Fa`lm2GxMP`uMCOo&^%zy5!Kg`(wqNSUe z;j0Cu9E(ipEf?4cX|4%+=BNPHa~Z zSWV*7m++zu%@_|4%zc{=Kdz=_qi_C#Y~7EP9012WbfpfYw?cD14!^@@E0(VkAfs7qsjBzwF7W{^7ppih@dNTJegjbogW=`jW zfexbxu1D*;(q{u>Za}8<(YUeDvKnM2QqLz;`+3hrABJIDwt${7v|6Ej1Gd(u&+e`J z6urEc-m|cEYZ0(2;DR3c|Fqaz2EbH4Pw++;CZjYdiadOo%!xH)^}({MnlsVuKrH{_&LK*b9 z#y>vT9pmA>kK8tM$KV?1bdY|W-cMz&*+IFnXt=w28_?2$Aa@n*d9u@-HNS#jHryE& zaOwy`DbViPgR_eJ^LC?eMkFkN_NB=6W!Umx>HmF3V&C+RKIFOL@1%anSI2 zzq8U#sQL_99f(YrhbBf(3A`>(W8MQk8$j(7c$E$PImo*)xsCF0A68;wm}`eHrV;3z zy$!}LKEt?nQTt=seucIlgx+iTeGC?%Bb42Ve$1!$8z|{Yn7+jk>gFI}lR$AQIPc;2 zZg6n|a$AM1xf^>K!*-Q2+ApB1J^sKYNJSpkf2Pb7TG{}Hp6+i0xxIK|(?Db%dSa9X zb2XU zUVwZVgK|C^Zw#P?!Q*UStvVaJ9-+n;jNbDHo<)2E+4O1m6u6-!GHhHs=~E7o8* zca!Ttxks4OT9Kko;P6wB#s|QDGky3xJ4TITT=mqn_g(wJY#g&_6pHm+>#-{Pkg^`2 zoeApc;5iJ7TL%}HLAhG*-gRxxi;Q+ObEk>xk1|3>WnzLU&3Jg!0) zK7Cz@Hi@Qt0^R3V!Dx+rRX{6cTs7l;3sT}~?j^L{K<~zUn~AI*qx|pa_fl$f2IWe) zAYB=>kwuSrlr`pNB^aBV;UUJ{AFnh{i{`ZI#hj9lES~^v*Cz6q8jZUODSj79 zTEO3&+1lbE%&4aMj_w;1Vl`LIUYO43RHQr~x+gG>DA#MKe*@*a1YgV* z9>+TqEvpTDv3?^xe2i$7~^Bj8#Ud?Yv&wI$O^!Gw0#=?tQW^|vp-%5$a$Zez7 zxDrD?GNDn=?K}{B7S61+gfP9=5 zJXL!FSu{GJ>uT?T>Sj1G71Map=^4uY=|`?h@P~e3G8qX^0@h`qGagm3M?vFJo=` z!dY!oJ67G$J#$ja%N0n*V31r!dr__%kE#joy%}&ZI{ZjbcXf4lz@{78BOR_`Sl`Hr zUt!HlksPIPCf8h#Uqn5hW-q6o<&08);x(lGQF`{q#+-k_;zQwD{~hhZ&zWP#^J5s26A)2HIgVy_7yn8 zZU65dh5O|}xu;Wm(PA!R|IbQLnE{m*iMcgJ)rpFlM8&O%qVnPyiSp`XZecPHGFi7M zE=WD3csen7n`@BrK=MMPpKVB@}LPB2kl!&$#gVF)1A~Qf=RVy(9ao sW)&@1oF4Iir}m5Cb$*I(#Z^^B7hhO0dmc%V(!G$v-{rh-p?D0?v^!bTt_?h6zVJLz;Q_@G5a1$=>^ zZb}S_HwlR*xz`75Q3Kj}lp0SoMl6XP!9>NhwrDh#Ek=?cmPix@$^HL1_M!^3yL0;c zZmnT&vc7o9t2!KY6@f8NYbpL@~l3(o@H zQy0`vpEvXL^Uu5B!g&{+eqsIGT$HLm?~KTuTUE?mHMe?v)oF8kES+2PrMW$ui&e$m z#pRYFPP^vhxqX|z(wF>mPrcxrnP*I&eeU$L&bVN1zmw-SG@m@Tv3b&Os*7h# z`b@v#8C>>MO?qw6q$6r3^{<~af5k!J1DdUTTcJa+)VAH>j$HN}3#fyIv(hZctw|3~o?#ZMKFEsiM`i<65d7tbutDPCN>Gy(fj zar#$)nhFnW2K|T7W}X(CiiiKl6@juRP@Z+}sb|id`>~VfejK<4H5dO<+{D>Iy~g4R z#m^VN$O*+NAP7hR^OxdpxgVd(^(lUYv3#UhPn~|nzMKaX`*VL_@ngm2;vjxM%JH${ zXBg)vi-Sq~T=9P~+R??4T#qRpT`Uy8Q2a+mUMNl|ez`c2lw*o5#dC{Od75544SdYv z98;WBoLgJ~Zq6vqr~FLPP6yh6q>4Td;P_baAkGo=JBZ)Yi^tK{aa{lZKmJl|2Os^5 zbsR4g8+hsgwuX^bO==ZS{{E%-2q_11iq-DN2Z>xoO*y4!u~)ISQu8P<8}z8=YHk&0 zkJO$|IZE2LeI82sXNu#%%GBbi;BGPb2LjQziho^`)432LRKd3C^j&_m|@H!2gj{ zCom`8a>cQn?-xtOWyLj&pad6uqWDJfG4ASsc~9{q_^AYUOe6KnaB%qs)0raXsmIN}ODrhxFY5T)%(|YJsv$ZyogM{X1}a;9&_kx9zpX z-&5|Blo-j=L&evTfNj(nS{%aY@=1z%q_N}~{Q^>kGS&w<$CCP0+TM(Gl&EW3j&W`% z?&Rt*h4TTZJBwc1Y46{QmjGdJN-NKaD#o*H@d z$yq#|2==c|xXcw>IhyII5Ae^U_cM!2!02N7y`(rJweC3f6rU`9hY}@pSv9$PitB*8 zKu$AbT*6gI?@O55%)PcDJ8FR|yAQhNDt*fn)x{N1qKdZqaUBbc^V1k6g0)hDte)#o zwCQHbjDf~Ew1VwbG18u-^#m_@o)>Y7OJ%8JZ%#XXEluUiT+ahjR|0V<<>%mu9Qw=} zo49+fxP!aRspWupJNJJfrvduQu~}dCkOP`;!f!cvZE<>@28Pz~dd@8mGq0b zPNSzBnpa)lmp+Gpm%cnHUG}~JNl=^Q;g(myu&}Gswr~~mIr4`fX9e0n5%?`R1c=I< zIoh~|)DEPsKUh`?cHA7O*wVj{YcGa z#ha*kH8`2bIe_u6r;owKtEeTk`yP(7Wxq5b)0nR3d^=&HlQPYeQug~oX}NGBoH8Kk zAUWwUwBzN)e+8p2fm27)pWJD{crkbKl6qdwy^~SgM|v+t+KaS+rv)t~ryfOH52P9m z3GON|s-|g7SQ&z4+gic8SgPkNP;VD{y9zI12EUtm{t9J=r+)ABq zK(}vUk;Wu_eMq9kBCS`!4^IMX4YgK7ncl!Pi;*jJo6)FRmcxPiA!^p59k)>Lk~lcrY35P;C>Lj*#1iV znmvqc1bIin8$-eF`Ow2YmnBJFMvsl~bid*sDR%^z%7a6B=4LRqJMnP?X?hN7Gd0H^ zY7Brww$s))pqa>>(r^j9xSq0VCS|zm=!q7trHm=sNPnlLYkzz?pFR51+JKZ7GIw+u ztvXptp!A9F!L$<+(1U!n(soj#jf`$NpvjY}HMk8})&fxj<1fJ}rKIbpCcZwB5}Cw^ zaV1^u$szCc$n6^Hy$n^KhcC;Fp$ctY4F`3N^93MttcNCxs;sEn&!iV2ehcsO+fXA{ z{64go4?hc4$0PqAO&A@62hd5Y3xIA5rE<`zAE$8Flm1I2B)q{f$VNMrwQ$kxXi`3olK?&?m2KpTBx5Fh9f#qaya|+lUP{E)+K@aF! z02--PL!Mf!fqvfthg*Q@T3VKNa_1Uwau&7wCyZ8As5cfE^?0gMiJzert|n!0;>cIg z+kY&6I$=>?tjwWh5$`>Kb~qaCMEY+fT}d0luT*JBkb55~-o?=X-Y=(y7AjIgmr2KM zm6TqICbt&<0GISbvkJY|X&F5%EndgCra{{r8sR5twD;0p|5WxhWTlY!=3uUG!f$HK z<&4c4h&o0pzX!~FXe*CqT!WtPz;ApQA9)CvQg*DPG`2yvBU9hKDKjqdK`pkX8rUz! zr+XBAAj{gs^w>cw6M&`I%hcca9)#evc zL;EE)I>?We(G$cM=#hCSc)gaM! zZSokzzdm2dZN_t(DW8@ez&RdyY=u*VP7mEV!W8In4DdN4u7{$Ql{-rODqm{KIrI^>Y#to4 zoN--;T35UKb-{g(Q=fS5AAQvHL)2 zXDU@(HzHqafh|wz5)cnf98v=FhvFez1vk_o#m9heTUAoRLarvynQ)2va{a6Hl}C3i zK(95^!)w4gnmluJ!1=W_?->pJFY~;ex#2f~7e@)7i;@`kK4}rY{@d#f+A86~= zle3iZp9HiUY2ViBIGZ_@)e=`_#Bpm)EZ>;!8&aBjzMW?+xH#6Y(5D{Fbr55*Yzxn} z5mtRPuwFu~0$$@%qh1^Z`ZIqbv!7@O`Ai*=cj~qGp5J zizs^(7_gN%YAF-^J(4jAZ#m^?eaB!S##THq$Ev1kqGw|dbHKuYRQ^)>cpTq#8g^Pt zPG*eta8(K2*+$z1dK?13d^ z7N9vFB}HwlO?eWkI@f6-t%F{3L}aF*h1!XN<$>rLc&BI58dvbNis#LV68g};qJ=5Q zseVol{%L{|zoDgpK)VF0IyZ_)!#1E_PMKWNBQw&SPx~Kojef3ruai@2x12s6OIpm4 z>!m79bx_rFSc=2UC#{;1>RCyr(LA-H4bEXq+O-}0hVQGjZvax~U1DJo9C#J9)Mprv zy(-Xx-a{v2xsVde7=<$ID4pS6ov=`*ZKe7weuHP!O`W7TvsXK0*OkceG9X<=n$qt) zpb;+7(-wlbTLiR1b1T$PyBwPMMms9}xx_mQ7+E#&E(V&*8N)hq*Q2qP;A@xQhgNb{ zC2DKK>)>{M=rXm3(o5*2w~!0HxUEpec0+^d!)Wid48a$9@E%esC(FaOa}z%0IG{hJ z(r)1Lv4o4^^xR0vY0yAFNiFTKc0&*7YOs(a${_}{L5soh_%zyTypAd0rV5R7ITn2q z(qMc^9ilJZinJJsktg);j55>#&kFc#9i;~13Cu748|6ws;!MJ`MCq=>nM_2Svb64+ zD@)5UUwRlwTA*niFwSHwzoE^b-U52Iq|sfkmjHD=61N#>wV7qcq*ggD*=Y4ifqbFv z>Z=ya)uvI~&Sp3!k48U`Ya8YMja+3kha4^hV|Srfo&n=0kP_ubB}$H_?#`5EdK_ZN zqYk~O7RsmgP0{{$9lPQ8HXzc|8wSqi!8288gf@7l2ldo3qoC2@iQ;4V9f;p^7@Vdq zxr4D@htAM%SOT7;;VC?ALVLxGra-&@jCcPFM!yu>b00W;g3{mMc{C}fGs|0(IA$n( zP=&T`rCoJ}agGtpCEBr0$AHlx@Vi{J8txoJUFW_%ivNUdszFEPQc6EGowC0IeirT? z&`dw(LF#M+riRqszrhU+6`MDZ+3IrWYxJdqR`f~qks7f0_aUo?z~xHJb>QT5MlJU{ z6Y9&|bM#gq0@{@HQ5B=S7kzjx9>)wYIvu@UW~9+_MAjD4m%ZDMlC2K79>~>gmw}nZ z(ED!~ogSppbThc9rQZ@GxgVZ&zOxlBREkam&TaHF1gz(h{D!|V9_ebuqOD5Ti=oGB z$;Z)??uoVN&C?fXaX5PAYBa}5^fwV(8D4B7*IXJ`j$y6k z7b52;PhuamX%R2#N$DQwni0t&j7rfwsX3H+MhQDxgVd=fm-B1nXA@U_Td6i9jjxuH zmwq+AuV*$$pp0(T>Pqkj~Z#=b5G zjvjDwHMJHqXEe6df+lbd@U1lOaCD>TJE&F$+YMl(L@cf++*rr6v{;G;s7B(PbG|}^ zPaeqwR|gQaVn?^p*FwhNs>9pdkEL8cS{+86rx@`7I8Ayw!a-ooxnrh_^+WXgJk*ME zwq8;W$i-%!Q{1<6l{S`8NA{$>MM!)TW4@YFcfuRi8%5viDz(I1M5YRqx32u6m5A6+ zcs|;=HAvl+^dVH?@yP>v^JRK#118}VCL`*@I2)m_k)Z*Umusq!+;*-)=ab&SFtEOp zay=;DlIj~H>)=jLKRb+!<%jg)%t4K8@KKW(#L0X#qjc)RVsAH@H({b}-7&wCbDz zKzr*Dy??MJX_CGdL#cw5i2Jh2U08LhlGS&loP%?fHgPan#ID-(~AtBx@Ka39aR zfnIoPd0x-TPc7PJ8(g%E8V^%$H!bwTXL%vvTU~e;EsaT@(*cw+>L3U2076H27+8oH zs(e0{zDsF*3*fWo$(OTT_jNoEQnp{JHw{b}z21~G&~ip)ELZHPZ>}r;3>`2I`|A3x ztK3FA=a45IT`dSJQw2w~gYz6XUjn?!z8t%e8WXrG70wd8(*wSRzJ+=m<;G!^q>+60 zDmq|BDr?E?*v*}Ia`xHG{bIOD`gGFbfy^vz#Zg3`=GfmrAd&`CY2E0Tv{Qp?!3vbt z`|IG|7H4r9w=+)NmhOz%7l3grn2B<2v}wCJW|5bGy)E!VCpel)xufV~Dwec2)Cw=z zJq9Ba_m)WUu%GN+Z}#9ydL|=sYB{AXNVKhmR`%Y^3}YkdYw1VMe~D4GPzTQFl#cRyBwc1DS1UW7bfro+v~PFH@XHoUM&J z=Yfu)7O4ADR$hx$H|0()9?U%Ix8&Txd{6E*_BI)Rb2B{cO5zteZGTQ`r38)-<<8X& zduoC27IW1HY=TRhxKl2o*CDi952amyvrH-J*U?ZR4~#p|iBf1p(s(x}9Lp__8w+{+IFO+Qo2eM2x59G^Oqg)z2k9$Y3zi;OG`{F2JAE*C}wH9aXe%53W&UY1SwKO_`KxxNjnN zV#ny=QCOD1nX^=Nw))KSwcw~g4ZX1C@UDKkEru64yn=Hj?I0+vjxbj{USRx9Nu#?e zVU6Y3KG!`L(uzEMB)`tWPURZdcC~FNTz&&7#@t-ZvK`|r&RrM5p<2h7k3EJy6SKx^ zwd=MztoS|_y$+mBgz5#-waCNhWd`{V!J}oKT>TyhZy!LK+E1?V=m}TpYsUR9=-~QF zEhV30q|TTNl#n0lu+8molw}LF>8fsf!nu(cHDPw1SmAs9pXI89a-V1K~1zm+qU9-`U(nWPKzUP+yMWTnesT<^LuWaNc}Z zvM&n~M#WfrqV7uUPjB==0gIIf3s;k>mN}5QoKoXDhAWQx?W^fk2>t=D+c~hjyzk-e zi^6_smXP+2N<95Neu)z7p|5%=qkaJk`~)ksSVMiC?$!h2cGL0rB-D;G!48v$0^f#iP6Q{kNVO55hY3t046y}O_Aqxms|@Y zbPdk<>HExt^-!Iy8G$^ByQ$nOs|C&)PS-GU^eOz#HQJy~?!P>gnu)YM5Kc2jTJBaW zW{vUtU$>N%Q*VB-bl2)ymgf$gLMpj3X)<-4I`1Yl_YSVB5;jVdFa}@b>P*s^zchBN z&XSyMYhQaZe(`@5ZGIN+m0L&C!&O+qp>RenEzeRxvy z$O&p!=a|l2@8$kI@Z{{fgI_g?Lx zcyi}atcnL_76ZW;T6I=dqMZrc#~idwX=U$9q)>Tsq)#OnTt&Jy5p)w&Z%7hxY=uXJ>cjQe?Tw1}E&|9;49j;OG+ zeL28=N+Y>`yMk5aR9mwn?WJfy{^p@`BiiMjA)}<)OiKlv&n8`L%x4ka>O%I{{gm+! z2A_{&J!Sx32W58y;Z58hn(Ee6wC^R!?hF8%gBjHclv_zWOYRSEYY&jeJXotIH{S*< zH`8`Ix~K*V@HfcdCM>%C-hw1w%H28WJ@vPHt@`7kv@<$qdky@morK?&*h%y(Z|zBY zd-HfouAMCaOSTdoY%eUt9>(B&?nGwxE77A?rN5+2_y@ZStC^`UKT{SsQA z3O72R^v?dmi)rL4ci*0_-e-q&@YuOG#V4i2tD1H@n059nhdE}ono&4+GL%y(OAbl0 z?wv*6N)6UnRppixC_d;4{P%}0| z6a5B9+!u?kHgttC=Jj%_d3rz((qkk08%x0WHqv<%y6B-c6OS>RkK>X?s|NZXE< zc!K_vW_z+$i6_@}r00ovPIKX>9{6(OQhyC_nKsLr{t>{c9UO_y-^|)`JD$R7dac1K zYt7!LA7frZVZ2r-UHQ?ATF(7$Ml96|-1khsnI4jYO0_

    x1sA*o0In$?7a+@(N_k zHfw_}~#MQva4x-CWv# zSb*0rfC>?>k!l6*)w8SUyUg=!p54o>&;BlHM*8$qLx)8B_Vd9*dC=oGT4GE3s(G%? zi|hkGO5uJh;x zmB7sk(u~c7+-zek)39kHfqW?I-R@yoLW$jAXfrKT!BuJ~S6rsSe?~0C<_39%+X+%gHRIECxm@Lhq@UrR8?esy=%tf0O=&)bY}ClJi2_!AMe z6jEbp@^d@C%aHWtKvYOG3Smt``#)!=2$>^PXah@MA^9H>1ip%)6= zxzA&6@|N^}T=Cg=wrp?Fsx(j=+KzT#+^gxL-MBMm7SEad#B-)$v%LTOx{Ed@16i4? z)Xho<#6B69`zh3|NjgT!G-A*WPK@*_`+0ud&#)aj>VLay&t3ZRuX|MGcUK9k7`u@; zSAqv8&nF_oS5eDVms*a^34(q+Zz1*MM3YAP97DZz$y?}|+HfAYnl=YeGH5si8P&3W zl6==dJIJ|@F%@`L63zx%*E5wF=hE)WdJh^HlQw!X0(y7pX;8gq(~{QOZ%Ilu~=j*p11iTmr4!QE__O$um0XzXo7wr>xfV0ZyZB zu11LEdZhGBq(?a$1vb=*%SqLCdw*=|L7w+LkQ?#QobNiyGdLe6w&*O}XvDx|-^Y;W z`yK(OcUtm;z_}d18U1Bk@@OzAbuNbc-B)-qshe0|ah0xyQU5k^=K4gFuLJ#L%Jzl7 z+Q>hhh~_A8*o!h!b92(YJJEUCPJM?_-1*klcGhz`7|V%?BC%Jwjh5UYR^WO7^C;s* zQyAra)NMr%PEK!xjV8D27?yBQ2X`xF2k~14PgkWU@#Zd`DU2#=I8#wG<~ybl%R!l7ywVLCpEJRme!Bl4!3!g;}2xsc^cfgoE9U}H5I*TPwHjq zr)4o-QGlDBK`HsmsVyfBVCvepZI%MFTD<^C|Qr>y-n%X1hx6D zb4q#VN=MOWHRBRTb==7(&U=k|7?&wfOaEJ{txfdPhi#?oTJlSjE>TBs#NY5Cx1@WY zg;)B?YoAR~cGEIAqs0EdT9uz-6~@x`V)$Y?H1d5u*M8iKd3|cZ6`fm=Y2Rf05v48# zj-lkZ$Mz-aG~kyyTQJ2v9zuIS#Xq$F!Sqn3)CK5{Cag{ln`1=L^=l*H(s&+RbTfXY zyMK1k`*jI|@1VopPgcMvm+_gWXs3+lxQQC>Txfzam(j!h;Q3+d7!BCTIh1+l0%9zC z5;fI}lgVE|PlHHXPfiE4Yp2#v$g9Er-Ur0L<88EmAa+wlnJbAycEI7U0m}@qou`~L zYPtCdXyyBfIpBE#8>|f(gWs@;6m^@JkXx^$Rdv5>7C(YBR&hTROLkY1LAfG(uj69q zEN5PpY6oqvC#I0KHM?SWjmPJ~ogT|FEV}uwY&3DFq$-~U&X82O*_gS{9<$P~R^hR| zE!&bap5~;5>-habrF7JB{USU7xy!l38p>+hwPwPi70i0gdfL~UG*ypdBiFigALYU> zcGdaM?z(ewo<7|@KLOctZK1>ne#6}mq|iBw66&nim7;#+E9bS`8y72Zj!*I60(`y> zxO$^av{cUgJ)6*O-%h;I4n5W|M&WW_?EUoTNS7Vb3ya}AQ| z?xE1vmQ@R!m?WTY!jiJElv7zM0oeq4%A+xir}}P(tE|2``Uf~$tEIJ`NSjkBIgMZK zhMv&oif&f!g)Si5_c!As-yqRGzQ+jEgUWqvvQBE->^K<7?I3+VM-!TI8uPBV&CD1eU=x}lN54<-n*$>kQ?fw5SkoeIh3+v|G)me8r*5Ff&Qv@6W%uqMjt90L zsjddvBa`mXW86Yp_v5z>K^v>p*0SS5zI}m_iH~LcevcipTe$9oj?Y0i=Ltr~3X#G5C zj^A~_@b2>DtHbQ0I&pV@_(-1F4DVh;xd!;?xRkFCsg{|~c@h|E;yLu|x6x{HSPQ(U zzip(cJzc%mS8{KTBa?H4*IkBv@QB9KyO=vA@#Of#4;PT8&+eRC+==V(ZsZc-9A3ev z6l)|RUryV1br(iC)s-XcEp+}uyr+napG-T;IEDeSnmCJ#1Z?|IIz`G47hS)i2VX+HWM3 zp2F3yJCyM7oDCRfaL%jOxC`1p3Z?Z#R@2jgw6}*jS0}ywmS@|3nNjIY528l+ZLX=v zk3UO#b57z(^ET7=9OBOH^k5WgY=T()iEU#NzWJv#9h6e)la`y4^5wQW$a4k11NmB9 z@c^wSmkgt$QHg+*Q8oPX2G{mgD;hAM^#Yn2)=Dln=GW^}rZf;y|#lh*UM2khlxX8JO=_7{9-Y z@$|&Mn@8^TXp5}(K9uQ?w^~b#V^_lbJld~Dqx?I+gLxlI`+Gd}x*6_00~`8}3BTI8 z0y(FFA@z$h6rq+U<$P1JFO=kgVnpJeD)?bQ!fZddGrWaS_%Cg#b~Qcj1RCcu%7-gn z4b&*4b{$d7|LfATaf10ka5wYCC}HpR{2rs(M4!%Mq(g6{;yLCH6L=B};zb`}b&BQb zOO<#EA15SAufn+79$-70T;Efe1UXHhbC1R9fROOW~2`V8*&JqD+CyV8#Amhb+w;}ZN`I|kIF)yc=& zNN+FTm5yR>x9<^6qvlJAYdXMzBXoA}TNZN6P)=j}2UF&>H0N$-RIADJ?HcE8zd*lb zvfoL{Wq4U*Sb21PRcYA`cHGZ#FuJ23c5-PNjV1L88_*$da1B4uH(-8}d`n~cu1qec z&ZF2{p(rP-7kkvyJ;wrp?`XQq@miji)0^{?{&;BaWZp`=ZawYwr$^uBXo4$trasrB z8z(Wg&r@zEqk9@n?H>FBR%$B0eSy6IOk2ql2R(sZec8$V4sy>XRrqb~0Cwcu%XLiB z@j2Ff)tz;~;#UB~bsyR}8|c*R9mxmv+C@8~5W6UyahrB1b`ERXw4KeYq%;DVQZka? zo)xZjzF<4s5?`xF8aZ1im03bHoc29--x>8VRu$H@3xa*`Vh^*exLVjk3&!lr;L1~- zR#(xub#TEZl-Q2)v@>BQLoa{H@J-N8u|BY5G7J}0_`-kj^D^~ zHKUytVx2tdXBH~_C#9!=^tV&@(gccjU;a@upK&QeKL&%;&15@#_P|ogb#AauVF;ewj-uV=gCsVt>Oq`y}oubLXLt znDJqx?E4%3kg*5P7N86+c5Tggnvt~4X(yTQ4EfEn8lptcv8O^!G7OF57ZUa)4p@x@ zKgx4IwD}$|IELB_fcO;r*H@8H=a$uJwS5e8OlO71fo(DFnv(09W$s>JRKDr%>b*4S z!AilETrWp^h`~{`{1-~kK)V@dT*4a(eTvNiqF9sHaEhR|eGFyIH$E1Km+i8($|yn-dh#!ajj_Acx8$&R2{UUI`_n z>%LrXohs-c{+#QDPLf~LWJ;*}F4arHQO4eaPouXF{?3BCx>B_+ab(em?75KIJ&N36 zVA<3K^k$E)FlMFnMYH?S=`QA*9~?#K&#dhJSI1RgjD@|eW&T*?Sl(4nHQBy(o$ahj zsd?~sc;Z|&yD_j*(ygv_x(lrUEv`gAtOMtDjK+PQ#=ez9=g^13Pp<0EN`3jwD)pz9 z_M=E!75??dvFkmkB?Mx5O}cXp-gxB@@*-NIHF8cn8Y`*in}@e@w3Km619P=4sjm7x z#$$Vyio+V7L*ea6@g0q6v>H!d<(!Vaxrd}3dwvPB$lSv(9GshI^HoMzqEtSWRC{9EYslu<|L0q)1sbMS3@;|*7n zen^u48g#-2((eWbL(&{IJVgEC;MaggS@2tqK56&&J!0+{T=zOX`G#33m2oG9HpjPN zjK}G#xIg19evd>x-7jgsb0{+(h?~%^1#*<)Vf;$%Ib78QuQ0yt_=PpJvKA~BknUeG z>g9075V+;q)ZWAN)e{&?z;O)zmb-HG5iX(iElGcT3(eI@&&C`4vW{N^Qjf_QH>VPl zXzh8pL9hFe1eq(Ud!SPdz4)%I)}Tb)MT~SMZ-SMf%KK@quz=QAkn6bg#QmzW4t zWpO>F+?=STr{|&m3t!Kaaf#D|^9ww8*ML#J`)_x6x~G5Gy)C)#b$vGp&6D=w$8@OC zJxA>*g>zsdn`2iq*C~(^d$6jD=->6PY*aPCE=+Cw`sPpQWWAXqNvT73Y)Wwp_X7yUZn)g_o^?rfAEC@L z`iLD0?xUze&L^f_S9b!*pWu*ZDWk`sO&>x2F<`;B{RSf6w}6{oXtsM;ExH02d}rcT zsOq;h&f#iA!#C~wpphOYLf^(H?neKtK$ABCiSN*Cr*&aJguaYK8rvUEKkm2N#dxaV z2WN=ekvrEM)wf0yjv~j{>=WtEckGVB-wGYFiPm~ESNsFGng`^<`v&?|e^B~qV=e~5 z$AIuAS{x0$&W479v4^Pn8n8?U!Vc1^@j+cpXs37IvN@Xj=aLUQr_!FDqu)Or&xrkw zqgtVp-yhPmZ$Lap{recDv&mY{6|{dPWrvg2p7_i!Ek42h2}y_hm9wDl^u$4ah0z(u zaB|g9LM6?eaYQ+Pjr7;Bvuki=pN5>!QptC1eq8d^t*71^2xQ?ERq<9ziF?cE$tk2W z+O46Sy3XELrQcR~z_`^kaAvwS_dn!1XC3M~^{*=vQ99;Lb)?IM#=5-EBrWbDN*HYp z@78wr^!}n^O7yZU^(03{Th1_Y(4!1RuLi=UP|PnRxueP*KFXpPQ(~irR-_r~ zPfcTY1+M@XXj7krFO6b%B)T4)Jf)k_bWbA{8xwBi_YOGDcR`$eb{~%^J4g{8cOA;5 z$0xq(;69&Hdjp%eQVZ!r#VW7&9{P79)17uy&$o#ke7{VZL%q!&7A$&Ls7}f$j~?AU zXvgZzHMFa7Kv&jU5>Lx}9jS&M^UcXqa8G}Uw%wCpUhHQcPfT6^X&D;Eartg-P0~6A z%0#rpo_({gnX9L3ZDQLa+Z{M7Jw*h&2d$W=r=u^a*BDT?F~U4JsRe5{AiLVEdLZ6~ zg&M>6ciPCCkA9m2K4uUfa;@_UB9;Tm(T`OptpIv`(*fup<5osA)PJ{QDQj6r_yx09 zDN@2?nv6V_Y0*f+`C!;@!8LKeh}`p;k-9@p7zU-CX73~~$o(&V&Gt%ijiAi|e|l@S zs-1NQoI5&OfX10~J6uvi6RJzDz%ScF8Q-z`EO^%19R~OG0@AUhITtLE=E{Z`Qqva5 z@e4_Q5wTxdBMO_UH#U@6`dX}?vvjSLYq@*i+x1}7nfRB{msM#Wv1?}TBc4K@lx|1X zYiQq?h5KoxNIg&2VP~!ee^ZlgEGKBx-@%PBLj9HZlcf_zd38YIa-XsYRUP;)AW>Ss z$&CCE^5hI<&b#i1KD>KU%_gBldrVZ3LfoI4I1&C*jrM?Q>-<3 z9S6?sDf2nhQNrX%qgJ%6#?AWE%Ex_pkD0E|^8f4W`OtUYQlT3nE#_fW*k&OV3fKFb z{ha_e=1K3^TRWeiZN5f^f;PODOL;aELQX||E^ z`px+1U?S#&qxNc%4AQ=Eq@H&oK3~Z(wZy44oD09{ zJ$Fz->zwU3ZUMLP{nIs2@Od=E!|15jsndr(oU@&iJS)GHHUsJZHn6Q`R{VWh{vA5% zI<(6o+Wb9fTc{WNWqptQMkr@=?7+0rS4ZnJQ@>j4AE3SbzX99PGq>_)S+L=qP8^%fW)MsKXjld-^l#h@M;vKqf>{qQHHoxnh@uE&Hyn z5M_sR!vAcJD(ARf{b5Jwt!%BKdkNvrMrL|ldM>3_9K`{Q*7;M+Y3eCE6s}*v2&CX_ zIKZfhdj%gsqRVjCp}c3~?(r#L$+u3`6J3X^h3%E+#w39SYD=LQxzWPfy?#icQc^rN zz_D7uQ&>Y(bH(>===uE9bW19C);C?VL?n&N^YnoH28F5VGt?ScV z^A@}?V`+==1^kl6YU=xa-uJ=wCOGhLDDRwLEO1-0F#VWzv(aOOl=eeq~7?C=_%%0Lro*>Bg3auDN_`~R9l z9g`hhGr>p+O_k*;wNla*i~=K9+ee&Hg{Y`-ZE(QDtSiDd|y(pq@|MA)s8fm_P?R0 z67Z~|jPvEZD?)$7HAP^HQsbM{-|&0EJTw31$Z1yKd9-~JX|T9Oi5GREps+^3+S~K zylrQl&^LLW1`kcZ`bBhH8J+h|!n>xuF_3dhHL;8WD$99N^zr4y;M9D2 zG9`{Kna(V}8X>eoRz`_a^SCQoS=3_W;ZA$0eHgm~}II0s!_d6lZ)~i`n-;7KcgDAjLQr|r|wfqOZtYvq7 zs#Vl?fe$r77l!4vHI>qNu0eOdM_x#&!9r666G2=IvD0d09oz0z-c-pndt0{Lc=T@|D?4a}O+l4fWW4QYM z$5Je&(vk;a``1q?F%oU*98x~)a4j#Q1nzc|SNa2E(7`>FzSU#=B&=dzWVn=`hhPW3 z$~<5aGwpu?dwyYlH!F0m5xR$K3UbwnNAe*0sFn6gJbwYW+#`20u&LvWA^8SEeBHbw z$@ofo@C$nrDW{L*JBB56!98HDmKx1?G6yCtaSQq(JWkgxjY~g;FQ!*`8D-rk*pal$ zP{uzGoVlm#K(w20hPV#%9J9bH(4Fr5x&@8Z6YX{(^Eu@}Z>uM*E}@lmlzEexpBU^U zwSm10o1lWb@O;_<6z|f$KAw2;?fT2%zOMu8C#mlj`kZCEZ}q3l;l`uejKmmm{5ogPrLFoxARQ)+ZOf z{f{i#yMMaR&c;^JmzuK`_yX&%_@MG3K>9!g2(Ygr+x0h49mJ6>= zoxOjp`jp~(A+E}+YkrvE(Bpg=9Nv$N6jJ0gMw5AF>fO+SWgyR`I$iL_e(%myrhAWa z%3}H|kfR+5N!|a>=&B>omy>H_Pr;6)<%}oG+y39CE4aG)*LBE;bsRA&q0sX7Wo}Vo z{kWX!Hc~!$XIS*`&)m!8n$!-S{mQ7jp`uOoaU8L-l|99^P{FHn1aV;PQo3rJefwtx zy>G!!(DU)Xrn-LJ#7?F!B9p#lc?WsUK7{O5a@J8l)^m+6yRUL%@>RaaSoL}Qc1}BU zpSREl#--OuuaGBQ>y$wpbEr}(o3--c{i7Q+LsjYK5TnjYb? zIif5jyQ-l!SJH!~p)GPyq70Xp!M?P~WHHtdGEQ*i+LrusiE%1dbv{LJZ)4N^+DSXQ zVoqhuTIsA;Tk}7-=8)p?K+x-j=sra$cP(9le%OKz`*(E3CUlTq#~gav!rjO;Kl~O@sR#YSzwa#Tqv*XE!POJ- zf5=FY62CDgk{A3E?G8L7cbEEqAzZ1bA@cDqzbAtmOTR)d{`ZCT_+-9KAa13ZS|DQ8 zKLe6>Mlc5|xjGW>zSM(VdF5c}zYbhp1?*yZ4!^b=tAbb2MjI4yCtNEzTfwCfu<+@- zd-gTtS~F~b{^!249=}%g!6RaQA52$k=z$uaSF?1cx)EP;{$TuI0%<291rbS5TJ)?w z*w+79N{l{auCz~UdML}frR?kT3`1(LHM$@mmh_|G?S){l> z`VZ7_o@VO<;3Y>bf4X}hc8FzT^8Z_9_uZ|PBkbT?UR`;iDYOq-ghkjcdzQnTUARwV z9OE%=<<%LI^e%xT-$u^^?=a+WGJe8iVE0BMHErZyfULOIHxFI2k!R(w894Klew-Yo z*2u>??v2`tn`0Qm!sJG+Bu`dr<12W^Z)0`GkQFJW6n4HbId7@Wp?Y=%KUE#?n@KLe&^r+K5KlZ1%29x zZxvcw4`NZm(g^Cilevk=(I0``HyZ9hyXoVG-t&J2ZU(jjb>8FjJyES=76}o_M~=eskGtuzs--BfOkrL77h0PYR-^z zEpAxdu3q%HyPmr4>OM5JD?jd(j=%nB1TG`hU3$j3l`^xcTPxume0ZzbNaWZ;w>nWe z{Hl=lkbCKB9r56ZLO=VzeU`w14};B^(e68r*pH?6QBO@R#lD9g+5l?$rchYLhv7)I ze8iKqgJrlbG{e5WSNEuHFLt9XqNKi%Upw&ol}n+Aa}NLOjPY4v-V6r`fwLPuk=@+a zVa>C5nGa66@f`#8PX?_q=Mj{OSWKj@Ava0{Uzuto+!>tvxq~`uxVDnB8a%xX=U$si zOk<5{E1F2(S&echr?H?iT<%WS%%j(Ly%d>tFO747w-~|ikYeW)vyhn)jKjT${@2Pr z+@C|P|4(K*V>NcOlxWVmV8%$LGPj#DM}uLhIhg*{weFtwKeV_P!5vk8&1OCN>bl~u z(FDuWnwR@)Ri{yN$%lViWW3-d@3S)KIlw-QWJa6TiCUH7h>8115>Umm2=}7abdzs*xYD!!W+K*ILrclI&g`U*KBhZ>rF|2!;{Tv(=f2A;a_;3CRQLX! z-*z3P%Dq=TE}^eZE87L`Gd(DGYAb!$Zk(i<$k-s8G!yv_AOJtQY$p-A4~gRl-+4CA3gs}lFZ=Vr_c@8(~IwyQ%>n`eUw(ZNNV{g&I5Q&;N3H3!c=2?17p= zJ3j&z|HJqTTqkk*e`@}PGEE5^d(v*#-5RXd$}PHNM^8_zXs21homj&0Unu z>>79qC|%*&4qg^h#{I7w`JIHFP)9ANWUQQTNaLwOi{66f*KZt63x1beALP0;E@K^s zBv_n94hK)J?yDurjB_#le~Hvf(0^eGCou9+z~_9{IaYPTwzN}6D0qoI)^_k#+H-5x|~|x_e#kF9RZXsC^%M6Q#wD z_Bv)|&d(#B=nPFQ>a$SDIgzJ-?cSwQ%GtjMz5T8cWLuJj;)(x+cG0TI7`XORY=Zt$9Y>{Fb!$-P_i04$PQS0Lnr) zI+rTr)8mZOQtD^D8RvFBdvuLH-aM1KraMm0e0owpxx=!=lQS{7q@7mUxNCss?z)^0 zJm#3<3?l0Ly*1O~JL{3JUo0Fx^?|#s=d8b${eMrUn(7sIk;z?-&Zoe;yzI_v`7 z`d`z4=WH;dkE8Dx-{_i+G+jeaev5wxveO&cTShOg@;t^^rZJ;A2U#^naR4)m;nen9 z39io%l2K7B}AFeL=TNazVsW6{BCE|u178B=~ATDy^hY2j7S?H9!i{P z7%^ezK(D71W6pQrSGY?!WQnE8j@fP;`E>2# z`*=GpxHU>h&no)plHNk9V}wS}Z#_@-w5&f+;#bY)lSj6m5>N5#zV8y$y1!B;G(m}{ zu&PE0ENN`acSKh3mS_P^kiWFD?uyo;mnnZZcBhX1x=T#d&U)-jZN*c}Ioc~Jh04_( zW0C(sPb|{?&B|n2JfGw;R=s8cr?Hdl%SR!Za(Y*NvDf`M%h}bfOqTXxSG7l^J6F=4 z4ubagV6p9O3~vOqv%;yAP2k!5C@oF&ik%&rGBkM-^^_~{^ctKS_>SEOmS{%Gvi(s$ zFGgx&o|nb_j3nsuxO(b4D0f#pu472k<0wG8hts`$7@m{5t&v*Jr2UUSIV{a1K+%#o zv_PtLvm<=ycye~=yyM}$7>M-#=!HCbVHq01Sg|mvm4tO7Jn`UO7zUyXe7E{>e#_)K z_Lov$Bhuc32}q~^ozZp&Crke_a8E~q*HL~J(%%Pt(;Gf?ol9=du`<&W9i<#rqt|Xg za~wkIZItzYIp&GGH>K6Z1~iXr0WYL@%>Zv3Ji@r&!eXqv^9rb>(HE1^bI`g+lWS&fMs!d zt3yvxKV&re)%VV&w|u03u#wh{-e_-JCD4xD0o3kmxef?@GqQ~w<1s<~6H+~EXNTMy zwkM=KFrH^D>uF`p-g1@?zIMK$9w?JiPwmh=&J*-V#Ex%^T@GAba+Gnic^|H)3eZu@ zVF@Ev9vzALaj(zhd^gh$nf_V^ezI7oC9^e~sAX@PNL9m)gIDdTl#Kg1@1vn+RKdTv6B{?XyZ~FdEEqmeU zn`Y?;0R0TKRA1r|C9YNUeigrdWw8x+?d%(46KT<&C(zci6diIj`XqjnET6c@FZFAmjUI&_*8kT6`i`d_ zJ0}+X=JYiYp%Y8uxRLWjpzfgeUX@sIp1Y@*S(L!GyY^!&?y1)9#1Z4rXH`>;!?##P zc~@G;CvWX%z-{#E5AcPOGaPN~^6ACA6yf%1OM zH_v}4+lIDkL6+YEF6UuW*#Gx8=sY7rTfx9e=+J~5`(N`yp2so^oJiEl)%X!$;cdJd zzYo8ec4Ka8#J!n3zbx!G#Pk>aQothK?Qq|RZ#Wqf_sdW2^>i=wC+O_~AnHwf)2TCs z`*FmzT`xNTj%;D%?#VV{BHs?epM9TtO3K~fNN>XknBOLLSCH?QxNffp5uTbd(UNc} zz9YUBJ5|NscHk;=lz{vs;FBYQF8WOV$B>T7bLNAHKTD|byY5W!Inx>&$seB5O!xZ* zQm({dy;+GDUFmp?Ja;4rNhYbvxRmxAK+$gZIfRk;wnVe=l0tj!hGGRu`xfx^T*Xx! zSyXy-;wx>fUZr|jehdi|Lu$kDPd-b$RBo29!;1;;B5TpBV>4EO`x2+ zmQqN5*x{5YrFNv;C^X1o=3&l!)>3<9g-iUB^4;KU1nbtTu*Cxs4^4yb>yUy{8s#lX z^9Yx_{+09F_Rhq8XEWf-IAuH%?>NI^`8{CgapFt9&_f@dMyMw9tMkL)>D9CnRyofI z)PG;#sxKeAKHWFF1%7>*oPQ)Pul4>OTlZ^8f zJdifD!(-@!Ln%3$7}a7*HX%n3rrkHbQFcOF-`avk^qtk8pbdO;;s=PxtQ-GV$ASytn=Jq@_BvZjy(_P~w&I?tY+lkxsz(EU?fa?Tb6_UVsI)y=4_6zajPra)6UQmw z{88|c=lPH{GQFB_fj@i8O8NC7^{6`}G7Y$s+ybd`r2S7zdrSN#Z<(Ew*OTgOa2#i> zRoj+Wizt7{s1eVs53emi)4>(bBt_l3=Y80{xY6!JETyZ28bUmk`Sp!NV}$;PtW|L7 ztEBDQ>%LF>&Ir5O^vl?YlqZ#hBo;y{=P_&j|EuNwk zBb>|k;#N!ro*m&t@SFKW`tttA2j4Q(ds5c*q~d?CMvMOXU7rFl7-x6%u?Mxp-2-TZ zhv;h>Z8j&K?y6;5nHFki|^zODJi1pcK=2V^hkAV&F=sZf@k#w4~N#nN4uX<;2CF13O{M$ghcPg{BS6Ns0 z-F)YgpJm=R1*siQTsCu^4E{}RvCF)MeS42U~QyHt(y{+7tN* z?%l?d+@$^zekoT*)7WR`Y4xO5msXIYgERcqQTXe2fBBIyjz9rWq?Xygl(9Qiz4G=uYaAT)}&o?6aBjwasX+V}WG{3v~m14_?{9CCidwx=QoMm1`Z z@9#V=PwiElp*O=mbZM%9sX6s&KNr)Yy(;U``=&~seFubMMavSai(*xrD&6L5ow9QO z^|kzgvhGEZcco=R`t1))?p<38XBJ59vT<6b#ob!iUgPw z9Qvdh5 zP0QxJGNHvAMTuT`g7)gV&7J&em4eSpM7dp+IuSkd7JlYQVEiz6@Tw9E%?j`xMRsj+MYyqQ7;tw+3zOmz}`^P-M@T_hDe?8uVK=t@!^fb%N|_me>|7Nh3I0mbs1mC)rW%P2Gf3zp_Wm#)0PYneeNg2MqN{$VtVA2mJp9iiyO~=$hwll; z_+QxNM0ap3>1KiZg(c-#9L zO5{`j{>O#ZGzRr?i9E;iVMnYKn-j5~79dp)gBG_`>U5^75+Zf<*oL7WVy@Lp)$Qc_Z3%US`s#T0RJWz_LCes+a;kH;ZR9T z*h@A~%px4t5e6|B_c0bNN)5+Bv@cxyuEJ`=dNR5)6kQ5+-S;bpm4GE8815ETUYyY^ zO&-c_t{vzJzYKIe+Up2fH3q(e(>VG|_&xrwc;6(RLX`YD^oP4Z_7uO1_v8Nv_iNNe zEXrD-d5<}uzLf9MJL`X$*`*Y7FZjq*b~pNT1!?`!m<8%=K^}ZxekpC~jo*#b`5zx= zFu!(R!61CMcKUujLHib3@g;PoFsd7ypu>3h-|r3_PRs5uolD%oJzUl3q<=x~+yi?G zQl+1CB)y5#0?>RZ=_aG*JCLFKNY{sO&t3e-BmY-+A*E`+{ciaKrCbylz5-w2!~O6i|`@Q5f@_-xat?HjRDJzJyEr?d4&(js1v?OCzSXfIEm z`^}rvv(a7kj(k2Ud0%Rg5_nxsTdu0+D0^MvUPrYl#k-#+P5W(ZeGzuo6^?iDkOqO( zo57)c_!eacK!Xz~yPcWH0Jz~Qa{RA@rNnc_2lX2K-z?rcQ!zzvUq5f5@2=-_Ny#nz=7K_Kj5@nSOIzXL0uo*7HuT-us1D<=34Q zN{jI=rL+vBT8{#9o4ws^t09ANr{C6fCr#uV=b2h?d8bTDl#njUpX?cRFVldG1YTS9m1ca>AwQsS7E z*ly!b%dzNwd)HQM-ESloxITlNxB~31BJ-|AJn{e|}0p}=zVxOLqlQ3ikCqUUz@f#fDY$&8n-L5CDPL!d2meQM4OqM^!8i!t_pXG;e#nde z?fc}^)*$eI1NgX$Q90+%b{y)3v?OR8mDart=r89kzT>hI&FPm4^~>V_X5>?O_4ul! z8+}XQghaE6_!Rem{W{=Q9}lGWr=j6g^x#>}owvjp= zZoQ7$e&uu!U&Qzkbp8Zo$8!I1`0obn_Oq-!O)q|iTHnT=-p&{c%%EKwYomPJUzuu0Gex1ERzeh^n#H#$1vLCP1 zZUVN`fpI8nV$Xo#C*h~xz?r{FC0D21MM&{O*xMznu=xd`qiOYWTKh7%ev6t1!>O0k zS8;EfZyqWr%<@EF(-s%RZaSe0AXVI>|2lihwmj8vM+QUw8|0c)j zXuMxBq6hF@UPHIe#8$rzRKJFL2Q%im;O@J$wH`fi9{ezx9$p0YKQXSiQcsJ((YaXe zBfy;WyB!WW3to@^1b8T8x`|f*Z=m^CxOO)Yo1wtbMjwA*6q~{A2;}^aK>0uDX)JS= zEtD4<4>6WksWpUFr@)8L(f&UG#|hx!Bz&ixz`p?w90i}8j_m#ki&iGib~3Vd9QtGp zZR~%<{wqE6$kC>{au_?VveaxhuIuZO8}DQ3EsUddBekdPZ9DcX?%z}QXWO+-cpF*H zHgFvGt`IoOHr^T9Ppi|Oa)*)L%DF6Qp*(5YGS@Jyt6lav+R-AoO1XvS0{n0y5*|6R zquR9#eI&i2QOvU++nek6xgJjzc`bbAyGnW~dJN9a{dz1b=~(oL!WfGcCTB`&#yUK- ztRMFaW@Cu|5oW~4T)znKHD((Vcv{7idne1Z?|%r-r6{Q$fDt^`uZ(96N0aR)&DsAB z`i=jiU|Yt8;=eFD^6l8V-@;jg(S_30uh4u4Ez-Rm+q%=C&bHewDG0ZG^237GLl|g z4!n%MEy9OaP;wo7<38KKq4PuS(>hWyow#rL;qQdYLf?` zV@u@Jkg|YLm66Uu!f-}E+csanDDHzQ`d8Z3W-wXJHP4f>=e~+kDw)Ys@U-iiwlkfQ z*D?-U%vzT3@8hYEQcQIh*AJoEnZP~*ERDoU`31F6{LY6XI=TKFkL`I{8IA68Z`C@U zw~_Y>yVQn2WB;>NJEfb6L_LUBoPv#O$2ai3mdl||2N-w?t#UuTpMn~dKb10HSVL;vdt!{lx5-^6Jc2fIShZ8YTVKX-25D2UEgR|c zHsJZc(6Vn8KbG2k8cp_Npu05rQs=-I7h%aUrn&ZXVQ zvBmRg`A_KgH^J#Z+W0rP@hR?Kga0l^TfaxFdO@0v4kp1JScRwwk4{tpLBrB-( zS>TlKkH^LgBBco^AA%G9K=03Bh2F;kjp6PU^!2gy@kLr+Nx6IJ_fatF7xNxL+iNjz zVr(~2`Wx8kwO9{Z83fcnr2I+rb1tK9$143jHcjjQ0NVY}jOayry#sl9h7$jdUb-79 zJ<0DY&}$4j?P;ER)Aj_Y{e7(R21a=aWBX?~_%rxi4?yAj=pP)~1Y8x&d1xPA#8{Nn1Jl>BFE zT?)^?LaXz^>mR_>;iO-J*Z*fM=WJ;GE|9+swcbOUoWs};17Dv78{dU@=0oAj>F-^5 zcQv%!iX}cD+1m|8-vZh_VB;t_vWe0Ug2|T{#gjn$Pqg(4c$-4WrC|9T@;}GZGiaH= zP5ksC?L7(%r=y2{3|6dlCUw4vANLLVoe37cfe*5W5kC!Yoe9Lh2e1Dd{q{Mq_a3?b z8^~S(g14dIb||4I_Cu)l3@P)$=KE;GFL7QEPQ-zHyc~>q{Vs4{MjxXX$)(`y4s5}j zNs^97Zr+EJKSzxfaO&Gg#@{izRlqa}NYCNAj9S-{@-SBXNLtH|VHN#WN?J5a^{Q6Q zR$^T1HEj+0$2;@XkJ+<()XaA;#b0wc`%&L$>DSTXa4e1QJX_aU@&0Snt{Qc(9qNH6?4k9B8IMfx{BVZRE-Cu(lx8XT$wXaG3wA;ug60k8qstJh}R- zg&4zF-vOR}NK_td9Sy|0IR6po;=esULN8jAZ!)qHF!;u|`-W%HqxSE0uu%gP4{=uo z^_DX>S6&^@esoBt|sUEsHDMV~o2% zEx*B0$Gz+K#unvQcjowgpfWUH$JPJE@htSb7VGdDdi@{3`XaQtI`kQ=!LxA9KsTiUoE&KiL|z8|}NA^HD|gl)&R_+N>)GuqkM@Xvsed$C_j z;fiyyLq~&==aO#z9vJ=&Jf&}Z3!}UVTn|c`@+hcwE-{7q;Ok(#(R=CdWw>WHHSPT< zuy9fF|E1*#jQBjfoF~A7*0;z=Ze;`?C+{xGJc#VvNonnQJ+b3gf${ILTHBIbZRXyw zosVsq03OZ(t}_|?ZQMTy=08n4Q^5Am;P`py`O9ekCbanJ^!hT^;?vlJUxDFY0^y_d zI+4`gaA=ZT^nm(~-Y+VEjA!9D)CS0~FiJ*e_yyZ_>&!l$l4nlfbEK z!ZXR)ge4orNFJxgZAfPaZU30_8{lmsUeHz~WCcFqq=e%KnYEn0{`Wd_m4j@W9(*ZV@tO|mekOYw3-=n-?lF2kH zb%%oI^>L^Hi~bgsy_M~bJGX_>qnz#38m*gX)h=8Z3hpud$ab(NgY>ZoWG+~3+3#a)dzI(!gH#8XN+Ufd!={hA z-}0R?p~PIhL9LIjh6PQ}=z|UfX$sB!tiYi&E({jCtz*%n>Gz_d+;N`nrq5&1@}U^_ zMchxU(aP_%z@smo3Zj_ zLigseqRm+=TaFJcV^!y(;kN&dD_$rt%g9JQzY<~W8A&P4Se4?Bqrgc`rPllu(Nr&5 zo?d+VpQSm4k@a|F%E>_va~&S51}c#fQ} zn3>0I^!4Flyf>WuO~Hw*1+AiYmJx`ICj6BD{0_ENX?u8(oo2q^yY^#6iE^@1EThU< zDe)0MlAN*9nc;~O9^zv==t*8xj2`D4kDN(xA>PkL&+&X`cKykyQ57-Jz3Mwa%^eFI zA9Q`JdT&$jTfP&yEcYbQVJRQ59Q(c-E?Gg)t>9utyRw^1Jv@4-;7`sp+{4G@l0>JH zg;<)wtoOsSXkcbVOFEv>hRCEG|94p-huBNxaTo5!67DLll9!g;(u|Jp8Eq)ll?Wtp z*GK-}kJqKUrB=H^fkS3nqbJ$@bT^CcirX*ad1|Pt9X}V{2e8-uNIWM{j=-sm*o>l+ z;rVWK?!b-o)iTQP548Qw_+;XT*ZmI1Dkzz;tBvZ)EMy|BwZ~=q~N;^CVq)%3D9>cUvld9`8Jv7r9oQxAUmodE$Av-%3l_y@&+{@@@;2UIO-1 zWmk(H=E9~l7QKT8wj;B|Chd(ZyyI%M8a|;FQ_%G|t*WMtoGEswskY|YpWVtNbGor| zi)j8oUH^eU`=K|!li}y{ASTJ ze-6jikl;N9PEWZy8tw0ZcrKiWp!gAQoFQ6&Lak{;p5 z=8AtN;eN(EZpWR|*!YdO|DvnbYCNlGfj;zBW1P(?ccaj=g=~6=a9%80${OxHhCgfY zV49XNkyu*E(ds;&=C7q`UA@r;yyM|CS4-VozhyjrZ*jd9k80y>OWxxV*Bju~v8cB# zdVV9~szpa?dD@oGs{p&*C?9|W)A9Z|FvhT~>_I-2mVHkbzDCtD&?n>JLiAO^hrUHS z+m}>Mp%G`f*TSzF^mc{%Tf+JnSj7@QSJQOV-UQ}b{NqfJ-zK>}q_j?}T!XKz2CFlR z);PNf4rKSOSR#0i^4z>Uc{gpLz zalHsX=NOIbfHU3jbOp*6d1tE9-;(I}ET*w&>3L-)(TG;G8B4@3P5=K<}afirFN)X5w#`8DRV!) z&HwA||NCCr_C#HG79Kvc7HdF$1f+YkT^Dj%*^lE%jW;a_J7~aSWQ~3 zQierEH6~;1Blq@nVICiN@YxAt6H-=c^Qovj+Vw?hn9?s zj0gWV(Zx=)=gID`r;a;|R(~GMX7fioL7b(Q`$^$LCFh_#yFz52_g%292hJ>Il5tgm z=g)p%R~9iq=AS-NHc|Bn>N<_3r!O^0{n3V5p2Y$hdNU(bZPgd9ZzTUjyqVF3 zF{?Zy{jGRW8oSw(vvQRv{~%@eL|Q-o%G(*E3)GC3B$m0$?~J!}74IfGztAUoTGF+Q z@|5%=wd{j~DO0o1&lBOF6`_es%N3vKZYg%pJVADtP4xF?(hL51c8PebH>=M)*=`>5 zZtYQauKo!Rou0y2zA^bB*?sXX?MS@yRL|?-$1FG{-hLRQj1KvJA>Yl=24`4wI0z-3@&J;(=Y_zWkP@`snv_Az3i@%(l_(aq_+r1w($?iZmA%G zJ0GPF$ggNl-mTG@9VE7>!{)`&m{ z;_?03=Am@IIc=Ix=A*=Zb=co%HMat>rT-IA6dc=n>t)7z94gaTX_1q;Qog8&4$kfYW)LSXvdDGlFBC{vz}N?fM)GCW!jVh_ChjP^~!@%Lf# z2tD6PUQIm9Ohry7nIW2*1I8$PoInb-*!dtOR`7PuiZ8d2Swl86j`cU9PgUsm7@G1j zx{gP6E!Sr#bCVJcVZ6crtoUujqpZWbaY|H0=X!klf*!1d`S);|M}wEDe>6FyuaVfk zK1idyo$>QG;k1Ph`j)mo?(L~+YsY)6N8xv%--?T+-p&^Oc804(Y8#Cw-z#|up1*`2 z!`a_<7L^gdk=708CuIGDi zGA`ZU^FNu^7x*h>2Om=KB5`Ks12eana>=O9YU|SP(wm8aN-(q2d-hh#D9+(@Y_Pwq z8qCVc$G~gDdu8-8?`5~2(ZLNR1+ptp?B4rll-6G1#{?FMr z?Lj3o7d4C4CSpwFov5un`DLHR>;sZho6|o?u2}Z^&EEX0VDlLszQ%u+@t5J<<6t#| z&2_9J`#qNWERp?mo-NVRELNV}>GUN|E2zn=X6oC&klcRCzTqmpl81ljZ5;}Om%UYz zM?H0C)Gje~GJX;{2Crytv|xnK6MiS}H+GWm)BMWQK>d_lN`0N2_F3j$w4emLoU3yD z=qxoRf-mK*?Zw`s>4)QFb~5RP*I6_8zS^@tX@4H;Pq;mdo>XA-_W0MLZ$(LRw4R{x zB`wd$;lAYf7Rq+{-J2vnD!5zD?+N6Y*fV3IvGZhiWu`qDzvV!m!iVpT!@*<#9_`PI zJ;0y*9lY&+Z(`GZ@p&k2lt%Gdv&KF}Z29*hdT2*m>hp_r)zJe6@o?uVyQ?gK1IhTpBIejkGm=rZn7{v8E0?^I zy-1`cUiH)F^+b0S_r@yq9*(>Tw`4vg11T+ieR0SHeEb_eW^}c-HxqY%sm=Nn-f17_ zl5JQ2=g_EDcsmn5*YGwc`K(auE#5nc4BsGwZ*cF|WHn6*1VHvv*)U4Q4l!>_T!nSZ#B?+k}p1M6H9r8t88=j{aQB zdcQYzYp<%SaW=fBlT$`qnroAn`WwI=`oQozQfj4DJX;;{SqG9}I~-l?t&W~wss&%> zt+U{F8Mp)J@Kn^*KwswB+L7v(A|k&At{1s#$|Ggea}c&(pPM!E35@{a#%+ zkm(A1y;4g)8$SoorjC4PQ=FORPAS9wY1VK8DqbX&v1)A07MiNP1udCN^Co$t8D8(} zfBFWq$?z<+o`NR}>Ff}-HG^g5H0!FPJ>KEc&*5}(X}W+rtx{-yOCg8r}YEOGwvp#IxQwV!t$1-C@cF7ob{%QaG5^T;u&++N;H)r&?%FJ&zRe;&%4* zxH{7F7^~cRSTw=iE9l8o+*?8WnzDhnK*uBHY z&1HMi~o6_$8GF0$CE~Ct)})>JVh52 zETf%|s-q$7+R%|#Mca#TJ6VEnfxgLKR+6tNI367zB)+}h^W>%UQ$GF1*);tuUaY&4 z-@v)A@7VicYJ0)mZ*jgYjIJfq(P+#_`Yw3yqW#H~ybbmz@Bk~(w_4fEC}p-I`yzb{ z)+O%WfiMIo{ViDKusUc%crB4|f&1~hI zB66tbcVd;&itfPwzdogPsEL%y{_T0L@A~2IZ}c|xW<+p=zrU$JJ-v*62gAhHhD z(6a9j9EvXg`=>O=ol(>Dx3X7Dq?%00M7oi3;^ll!h2>DVmU@|)$;jQrzS;jItJcfV z{j^dU&wYX(om!MgseCH__tjp!TSm4Hz}flmi#@)ecaxYuW5lT|kUt{xjL&Dc(`2(| z@5E##R#z@rdP8xeA--nbqcYBxq2Xeb_M=0k(_?mdQd(_Lm1bmD$fGY%YPrjr_zTljMX-R-8&+& z;V_vFRwM1i+q7dn|Fnju9?FZ|%-^0z{(Wh4BFEh{cn1%@jt?G(gIQs9taf$?dA8!S za!P7z&mZ8$j^Z%~_j zx&V)|n(s+%Q*$`H#6s7IYo?Lz*rH{79$f?R_aYvnuBS8I|1*5%lGJjeYc=3~87wy9 zZ6d{-()Oa77PHe)_}U!5Gv=KA(lhe99kq#hlV6zG?ZJ39isUkX)5rbxyvJX7i+=Fz z!Ah^x5?rbc%vfo%$Qs~Zve_?X-8ZTCJzD%GxHrSE15CPkr>455fRmoj!CI=ta9-o{ z3<@4nd*Y*kJjU_(oRP4Mt)>O5tc`sY&zq>d9&h!8T9T`uS%e|J8|%NEqn7EkBs<@} z46ADByg|(H7WkcT`#IW~On_DJ>4v`1YWhZrUc7B5+BXD#oAna1qVqCs{H0{HP>snk zI*>+mA@O6>RTIb3Uhh`^8s$zaVuDSueTp8;gVXtF{7UJAXkqdWd%Jfd?53dUQ50Ok z5)z?>vZKW)$qd+z(qx8Q!q$EQ`Y4#438(3JJwbizUEkx^@i<$--F7s!H@u#}&Ax@K z*Pw1Gf8HIgJ@9Bg?SG!!SCK$0b{pE`i>ssiN%C5bmJ?{;uVA$p4IA+HWHRreMXv|@ zM2GKy-BV3t*;rNIFN1l!`bX1&F`i`n_+(z8HTf+l{7^?6xK?Dp35E^uri$78i}4}5 z08R#VlK$>OlAl0JH;~kEsJH@UQ%I#NJ3kO-G7mJ6)TV(v34hK~dH{Hr(31yI(8#YN zaCs1}-NrvPLvQGAsBAYlzNdyo;D`Ik^!+uPecO}l1Ut^%M(7_cl2`(#Az&`TtA?Ot zwM62iK5Xgs!Y0PUbQF*9F;6icjbHFk=i_#3lx3v+N;3OG`SaR?vcXshCSsikiSYk2BG>6A~EbUv`u!D}KH_!neKEeAN z@O_9Hlg+V(Zu}hux6{~WuzrvKs0rR-r1K#@cf!N{_^_2)@yBp7W5uoc(eb#j87}?Y zpT<8Y&*DXNy#{70b-xOOzmn^f-We^P%{cy-EcGaD*kZ8H1hF~EpTw@h{f@lLDe7AT zm-q0w9~!HWWpnZMS`vMoq>iSAS^YkY?q5&qA7QH->HKzdF5$sD!nGK5|oglowdF0Fk%6wHijf2pn|aOjV|Ua*{|olkC8;2q=H z*YtEmp_Rv}zcWm4W#MPSSu@m1xkz@Z&tr5q zvt}2e>N_^l3{J^f*(8?AI^41JcRk6i^WqT!4<;%XtrFhS^bmm2$G5pFS zbf<$l5{;DkD}6H7z9+uPD10KGivI5MNr|WbyX2lXO5gi|UglZOz_`44=Wra!n#DuJ zqA8OTk1pp&za*VAUFZDJ^bzvxLVt;`ld+iVd=By~`G$M0v&JQ{+n@YDK`nXfAYSL6 z#jmW|4TQvZ>C^13#jIHT&Q)nFG<}X_LoWT{e(<`d_?=x#OZ$YRbUkrXX2DA}XGf2W z#;5j-!zNpF50+bhC{v0iGmc)dD3g54LqMEaJTL9;ld;|GG4Xqx*oG&i9YAs(+-N>0 z(R0?@o&Z8}amyR)$&T9Z>J!|-j+1?vGkUI~qv@6HO{4mWWCA7hY~i7EGW9G^UT~dV z{5sOW>@1reWfvvR^Q${buEP1u^>k5pN4htRR#wG`jNW z>hs|ro~%`WR;pjbKV>ya_CkA@EKYazG|V@F^_6;}SsiG2Muh*wYh`TwSe`AT@}I(? zGcNsBojDC{H>#fD3p(@2yGZqGSbpT*&VtsAaUMd~mwK~O!HEv)I7n?7N$!Qp2em$l zkpF?dhpV@=qf^dG8;Z_IvOE|iiROPjWqvQGPEZMyWK=w0wYZjvrVf{I4qRdR&=|$M4PWMQ82? z=K%gbvjsV;Dtn`4U!ClXc|VSnqema}D}(SmqwdLc%?S=q(3$Lbm$`y()mfq-`}eAeYWNO!np&iy3RUFf}E^F_%!O^(UOIMAOJKI(fOiC!VDdW-HnBXXI>_xIDf zXH>JB7GN;`(fiVi);hh)7@ zGo`*H^AADDK48<}yVYAG=~d#r&7zRc)mH`V^T0ht#Bep(={t6WZ51By4tE=pZq}_e z0p~a#a~FC~=2M&T5ZP&=KmXf?=Nd%+2J_>?wT#Po;aX~6p~Za=-ldtV!?nBzXfOXI zo*9Hs)xD9mC#%u?7FjecBIdQC$S>%@U)az0FdU>rXOw2`eb(5#jM`B!${MAITpg%o zts`a_h|*txl~pH4X~Fs`^&;MP0PSRS&iCFCsCr6N(8YT<`TaUxJx-pFKouS>aEGy$Ry_VwX!$d_&S#Cu zNO(=`x0+-&Y0u|)K9aP0i#5MvqtD|+Z4pf$6sK?brO#lx|1nKI(pxv7wGZqT@<&I~ z!cja~_DI-~lN_@$)V4^>yIb9xcEZ^5nXVEnarlaZEnV}ra?3$)#=KA9pr(4BepjyS!H7uXG} z%-Up4{Xlj9nslBa+iqY)i<^V_rTgd5lw(OItHf{j&Q3kMNhDAi{;3sSxf7+yr#lS}<9v>$A1isJD)_sat+hr;4_YwX`_u4dw!14){d=?y z_kLS?b`hvkKx;*+3s`L%8uPVMAJc#}I6D|0{>U1WBYrndeFx55UO#gp@8ED|=++nh zW-r)f?)Fu+W!H+nupg$zEBU=+;F|u*8TkGM*=MBXT))TRboHW7xtlgE(zayGrzM^E z8BJJA^Qwa~6`T)g&(Z903mv%=#9^eL8GtS@TuUnk>s?KU-&=71BU@Pjr%tF(CS3Y6 z**7D5gY3|5cVK~;xv7c=pVOsx*llvY8EpDw- zx;_8!Hcgrc+CmgB1vz^feF?u_BDqQE_&d$7%jVLDcvOp?9PLgj=mz^C+$3OObIEzU3&x!)aYCJ##I+l5bU94(68=C0JOfZ)7 z3n$T$M%tMZ+4*H!(^2z$ zZ~h}WL;K=*)_xtsXZ}{Z@v)k&5{ZSkBiT?1&qQLSob^P4>E$FM%Q$bMwM4G@q@L1W zX;xskh#=ASzxPY_^f7X`9&HIf(u>FlZ3#*uvQJ&#N&oH^r{!+;LQnQ*Pgam#Luntu ztT)foGHpV#VvZ40Ho*6dFdB!h-$2|vo*>yN@jpSjNY1z*zBo@DeVUZ@u9nW>h8>G8RqcZI4J93@}-;W9|r6-fqJDR|zjrVi1{l|PjefQJT z%zlyo^lm+{y29pQPwV*};V*TkXBB-ZU)1)G;#uaxe*EkW^Q?!s{NL`S|B(GYlkN87 z{)~}yc~br8-%6HtUJm?yNc#jW-7Ifr)+?t_XC`Wz_Tw8?9U2dIuLAFvbI|T6tnNsh zN-lRfoVZMF|G>>DVAXKva_=ogb#SdiPv7>P?46vncNd+=3{>WDVq>TBOQoKDR-m>4 zA$|Dk)j5YIWR!3yKiJ5BR5oZT9CG%~xkW;a2v&P&@pTb}{?39nF87M$YrKh}mgyixREX{;}^z zeb;pFZGLnK`#;nB&1gzn@3-UOF4qFg6a!rC>Mvkj3cs#mvaAhD%i7dc;`VoV*zCL3 z1|Aoo<>q39t)+K=Me{O0oW9a}IGm~ehWLLIdZ&YumS8mu=W5^D<4smLZ{VxDY72Vt z;`3bfQ|flqULr2Y+OJ-$yAue#*we>awv5Z4rfo{xy#&Ua;B$l+r@K2BX@?r&@itgx z{3to&bMdMsN|%6;Q^`JHYgLp@X23C8jDE_jrdi|BpE>FU@aV|WYN#za@ts8;!#ur) zb|m}#bZt)u*iX^Qtf4z&^)PPqe7$(C13opUQ&(%rrjXw=MIWF8O6tQSc`~oinE|+W z9*VDndB%Om;onYrx>;Yj8b7k045k$^@E2fQujWPW<)qa!K&XzI^sqjLN5-e42gz;U z4%#Nt`a4QfezZ1q8BPBw_-)nuJPa0TSvunCGe!B9Bs2l^>)`SYI+o%47cffJcSb{- z)7*?g_Hdm&n^xo49khK84L%37=CEuJ*Spocp5&5qJC0E4%3B|j&5)wjYTSuB(J z@wHm?f$TK9?nW;CuGO=20tT`Y16w6nz&=ks4#3$|0M{f&BVWf>WJOs2|u zy_#fPB@*1G?k|;JiIy+%;yO`xH=>h9MW z=|9t zrr!(r!Q_f0&m{eeZ)x4Wywg?ZW;uTU4*yrO%?G_x9cE4N zH`Hflr#UFEky};*kHNz!W^y<9-vXp_aBMP}cfgmvc=e>-Lw4W%7cI#QL0^2Cu54D# zH0C!d_|6KFmr$79eV+i~LHHctt>2K;CjKY$Or884ujY(+WQHqy_oNq+J-qtS?%QZ! z17(w6-wbwV<923}+p(py-C4#~uT}pT^n8j-$AA)UB^U8#C6fD69$uMK-i9Zc0E zG^QVoaO+cCnukkMaPSKtL0wfbQvFb4hOG+dA#90zO#b+BepnJeb3@STbR$rr5n6`q3C&=@=eh7 zmHMaC$jL0=T3Frg*D{~>YR+2WdzAf*Rt&|_?VzXcx80M*U@p+UK8(kY!mX`RpNWXp z;mAkyB$kmm$?I@{ zw>LJE_yS(JRQ^reIG=_LqB94wkgPt=da2Wj??L4I<4WJVluPc_k8x?n$4j4sj0=xo zoymVH{f&ZgR+83ZJ=r}pCvxNj%~Sm^ooUm?cit{#o~OT5nlIS{Gc!5qGyVH`d-^4b zgikG^$XUyPW4^pZgO{WZ_jFr?EIPQ5>M)Y7#Q2!*~6Oy3LL(2C+EIf9B%QyZCmGZkt3Q{|EA7Pg zxsqL#Scq$h@IU9rlIfQ92Q}3_LHXO<|C8(Nu(>x2YN)QI{?9_=K44`u^G`VU3Vcpg zE_*JtVyhp~nL}_ry_p$G=OoDn?0p1E_vLqA*E7lPC1uLY2jMFB?!$@Dz8J1quX_Qj ze->|+@jzKM^S*nRvCML;ba&wia#HHPu+K@Y@53lNu&ySXA*lK^*nCpwaS+em)D4Wc#4*sVfSNddM)6$+IrR$Q8l=QbWA9*=y z4h1Xoewj~+e(mpnY^IEcX1({GK3saxIobV3=(GGTo#K#8@$6vtGCcP}pW=c!F%j_D|n?_{S&@FJO zP-t^SzcOlhCD~+O>o@3SYhL{evKUCejuL}BNfNz^d+&pqy-6mJ^++06i;sGqH~hO& zS&i`^*hkWU0owLh?uj^b6;Jn)7-OX9_Da64ISF1$Yg+K#8PU5*3s4{aC#o@XCDjZ6 zemp1>c=$#*cy|#8Hf6=PE8C1et_Noh*!UYc`sh?0~BTb!&W*x4F?Zk zH`x_qGp*>TZEEGyjvkb|?c{OJmA z_D5R9lce|3!SAVBf)j9kF|048C)r^!dzLR4*Rz1^rpEd06 z0i0Wg%cp1$r_+_;D4&CxmFP(SGx>n)>C!XwZCj!36~IcLp;OVCR-;iT#C-c z-rP>NU)BEBROi*ScqWN0bH5R;osRQ4;j0T-o=hXAx|i(4tO!37gsyb*3iW0cLn$YJ znS1w-k z?fMXI%|Xfu0Eygd;qwsIaf$Nhk>^!#zP;!0}CjNH%n-U?c> zPW*SW_$m3GH!9bNC1q!X^i7|G&0sch2>8=c(}tAW@)3R5>o;UF4D{QSn9UzIr$eJi z{gZ+lFM#_dSv(I~3mVgbTr&o=8cn-NZ=E+^#HFl}c!cj7S8!wjUw;ied%-m8QkH>w z68`Vx@rIE{ZIRW-;GXYkc0%Y|MDpR@e(a^bR;8nsD?79dhkaAu=}(?S8ABp^;Ok03g$I% zwi_w+B+rZ(Ede8Y3uI>KVB8$SB7YBJ@&&W1s1v(9M2QV#y#bwH^3z;B$x{ws!>eiCyR_hS8Z<#oneXh1?$Iph-a;dvMZq6Q ze+@ny$zydDzYO-h4(3&9L<73th`;*`zt)1CRaXo7i97kV`lO$oZ-%kc?9

    oy}=j z8zsJj<2dykul{E0IFwg7)7!~mTS~^6BhAh>4PbhH!OK1Ofwol(msKYj1IW7i zo_N27FF1n4O1m#)l)t8>}*?H-KD+FDy7Ww zok4#isjNY-1@H7QXW(4M=F)fXj@RYg&8nQH?BN%_W0_;gYyn8)ajkT}OrJKZ zwKWN!hQ_zN)di(%=s;U>$2=N*G0Y!wbv>*)<5zm=^Wd?NX3Y}`45nLGs9~b3^VIM- zoo!1qGOCsHv74c5Z!*j1(`W4CG0(Dc=wzHutnf=b%NR*^c*;0PbJ~>_eYI)?`zVO6xkE%MF^ zv^)kvvMZj!!OSl|SZK*{{KgWW_jutJ-hahk*8Id9CAYU0`1iP%-9f%rc0KyC4@N_N zD)a2IiZ1T7QAhR zulX%$GpG`WUIU||Z(3?+0 z8Vx)!)B`XR36 zyr!J?aiKfuV|}Te%u4#qTFI-68VQM;D*)o64S#PeBftPu&cX1%+1(rU0PkL{*+59IMGA9jnJ?CPWks~RZHB-E?MbO&+>nV zzkl#P86#+i=gH$d#GM}A&q*AS$??3yIezD4xbXW8nAO6$^jZ%? zZ9?gx-n^MkW$*K+QNKUDMzG)ey_5d)A;y4}&#&@qwH}-=@IMxkUFx$l)+er>E=ot+Q?m5=jql9bm-+rm^pZ2dBDZZMn;jaa zqH?7FFVUg$d}VDVk0@laiwu52A9DiZ7x>wfU1t>ILfi#g0%H*loyEh=5m7#= z-*YwZxf(yR;_4#s>u8PNp+iR)=g(f<)ye8C{qQPkJP9r7bzLhyoakxxHs8kE=3I;| zywgrJG-3xG;PEMsupI_j7cvvCGgJGhcb+Drth#Ssv@Pj7XNS$3d^+%!=fG?#KcDqN zUui9Gf!&!X`2e&n+MDb!ei5FWgRg_VxgXigfb}tGUqf%RXL<6JvXglebEf0yV>38s z=bGOSxpBDZVQNVem zcM&g}UQqTye^?7PTMbvitTQMdvyH51SU~_2{KzdRei7{Sjgm!NnI@(O)CM*m zz&W%1PoU&gQAUHpYAVBcxRM#Y-|YVhaLoFf>`s@=)v@kZ!KK9O6-6)W7q4* zWWE|tV!4xXv97pyk++tpe+&HQqb%bIvuX2Ns5`&Vuyr)$RW39f1H$h(w~cXt@*4Zs~kgjTiVg1u#y^u*XV@jx8P!OIH#jM zHvbKH{lSo9twpZ}^X#L$MVs3b#x-eD<`Oe6o~++>xDtPLiDyrfQ+HN0SpAXG^V)$L z>}fH2K7&at>s*|@w0PPV-Puj;CN;M6{Sv9wQpeu-_#%$o=iQy2CjVhAe8%{0 zuTU=gAUvXl{|4tXN1Xh+?Cg=#9FsMjy$(OepPX@VA#Ptwx5Kfm{%3uDbx>aR&UW%! zL?g1-!kavPbv3;X^Q?|aRB|AhmCp0&gXi14Q5~%9c>9pn?l9Ju9py3;-i5T@2D2Z2 z*Pw05rOht0S>06&$L{d!cARXWrA&^&MDolY9~UW=xyY7k-kU|M@z~ zlN*!y&>rk4`R$cpT^*J=$6*K?{;78|dNI;_EtE-r=btcHQFzSOB#=FI?!==`Z2CDh zf5!g5R6_$$d*j*r%A7$uS?Qc?gb8Tw4D+mT&AAe1dfozOFLgbL_1+5q>12NxxMK?* zWhb6n(LCEI^K4qTk+wePyF3rwldpc0T^&axmHHUMQWN*)KTh# z4#%sFd}E@ker#(8s96s^iLd`H-sJ3+oAK~gH1+bmyqN7xrtHD48W;;(?A@Fu^bAYt zLWk2b+=buo>8rIVZ0{7fE8wSUX5emGq+}$$gP-@~Le9llPrARP zHOZ3Q;4_=t`|(HFO+5Qpu2dp9H8=5#m1%1Ynvr!K4~Qe*A<2*79*y}=d`l~{36{yx zxQ@mwVYSDprMBx9BsLXCE=Kja?zCY+TTq{o+CQ?~P3~rXHyPDS@Gfinvbs6B?xWRz zBg*H}mgInEw~_$Ul| z1t5Nn({*rkm8*ESPr;nw>7`^loW)hezlYHKI-QAiKgc7Ef>CGCx}oiRJ&_GXYjks= zZPm4dGuYY}>P&7yTH*in$^J3f&7_UGXOKbKjMO(4&dFSVvY;l}#W^`_1BpGyg0eSY zTRgvk?ymIR4v!XtdIkue!6_LFSAnv(eoOkjiNE(M#=Nsx*8?|sz1i_%ProPq zhKl}v>UnzR=^IxP4V89OcmfwTvaRe>oW4vlFAw$qBh)SAt^VZRkDp`}=RB$Ox60Hw z!e7pj$jZXZS>1t)C#gSK&*_n*Pn>;PB=Ia5G2xyf(cYS-(pE%1G>99{5n&nA!D)bfN;;)PmieaIXbIR-e7WhinEV zBLt%huGV4`WoXW+Q-8wEG8i1|*$a3ygsii(MRej}8hJbI{~NwdDWc4rs_}QvKc}57 zV7i-^KhLj2X_?bRwCH(Oi#CMXoXfd4uHMG8Jc}PKJgMldQr2|x*!R&A-K(C1K^h2l zr1TQ5RfOG91+O-XXtGbqf2g4xI=U2=abEH4OM1AJ#FAOOypToq?aP_q9r5C?-ij}; zix=6m?ssS@&nGQl@jFQEUS%&(b}|Y}83YyR#a`@Y2Ols^iF#zS3vVLJocH;A64|pV z@JD^fuk^%Aedd49QOJ}1`RP)LWH^*g?1*+As-|RZe5s!3#)$=e{|>d3{}9d22tamk zxtoN_!|1j`?wNbfD9FS9J{6sPN|#pPNaFa;qR(!ush3Z(m=n|f4*%=ZGuOWZuVW=rp-wfhXz&Ak3AIB_CuFc^n2W0Repo3Me# zTDNcfu0w04lEL#Lp0%Qf+1j=`C~N25Zv5V|I11il=}u$s{L-6qVABd8GoSPv z?>h;etJN}8t zjfpr`UwtcJ_JaCy#zi~N`r>jjSciH4ar9>#pdU;hCzoNc97cQA!@4s4uc7=(CHi=? z8x2oZW_nBe)9yLyegnrM&rNhF>(aV-rz4N}3a#j0#DmFL%x=+RSy?MJZB_nWT-}Ik ziPeUHTw9$zeRAsJKjB^4(eRh3x=#(2XvBN4&J0g&brj*#!?Ss*PTtMVkqtp?>VD?V=i_NRwwYZ>y3&E1viPwW zet@2K=7CR!)ie^xZictfx;w~HzO)v728i240B7?I$+3=acw5=#+V1R_(g7x!EzU0S zzju{Az{iqq@+&qM(RK$Kl&J9tZ;#?@vsX|%T$`rmKH${iSEs5anXWJ5YrIF~IRH1a zqu_AuUu7Dy5fxYCwDtZUpik+U$BsrKA4U?U%a_Y4F}PKsq8*y?`5w2brD6@%qL$hyVq|; zf7V0HChPl^jb?7w*0uvD8Qtsnp0zA_UlLw~1IfehtHd4X&$!OZzBBh7inA|5EAW%) zm3)U}0*yp(b|z2W?Rh*%dz2>QJ#p3q_Bl%3({a7Gcjw_^d-!aGab37|CcW>`cY~{& zl^zX`%t2Ns%^~EO-A|78m+`7H6z5#Sy7VhO-{qvYm{#9_vYh3f{ec2+F8JX_`oY0D z<82K|_hi(r#Leu7-ibuB_9DH7oGZS7R_`iG*TYH0{wPx6_L!cV+&Jjsoq@yU_O zeD+!qL{?zWR!?`-_fYqfuvz2DAH11$G1;doyG#56{y)W=>{GkL8_noVTUe%_Kc1!B zsGgkbb_iJSp?nG2N~7?nllW-R+mhtH-dhXeWY}gOn4Cg!HinrsU1cP_CG30QVm#3i-dsT=GBTQ-wYtD-E7~&(m&~KeN^Jta4@qS;#2w8vmw{@G8=)M*11~ybG5fLrdw@$km=*&#PwK`#qFjM&mLn zoR*>^Sl{XE9t4+7aLZnP=hECuaCRAQIacp^5YC=VD{iApmx7)hdUN{cP?|RZ1{u-0 z!8XYKmO>}z*DKyp&AImPDW`$B-0AN4n0?r? z#&`vYN3xVoIGYjNg|1)s9k24OI%<+aYDvsBmQ8HOxr6cO1LcQ^NE-d%C$cl)ax~8c zF&Q+OAAXtkJnB0UaB_Et@~D}&&uaP1eauI5_SCG47dMG4zkpd?R36M%WyMIw#4_T0 z0xyxd^2wlojTh_G)nDxT4oj*BPBivW+)qw#dNA2tXE$5lRp6Z&uYFL`*LV7HIaej~ zXUX`??jL0+d&T|qBg=gM`1zaYJ^ixWEl)?wXk2=`>1FQO+cQ1xBa}<->|uDC^@UmG zH_QLa)OVNf?7W@c#-6=Cvx{ZwOYb9dg6TK^71uteBk3b;buV*=yVzec`n&p_IlVKL z&#vm3%dDlI%{X)?ZyIO^xj(ePG5!0jN=)wcaMD{SX8FCkYl=U1peA`q<c+Qzj zZ#J`pPx$Uuj368#BB)17P3ddqzq3O1+M?IE11+g-7wmf$dNBgzIv}QhlZ@;1*!Kdx zGyk`b>&y!#%e@^9&Kb3wgs^52x|F+1*?l=2s|VjR$oO%#RG+PUDOUOv#H>3# z(qBegB9W7L{z!4(LK8B2bEqfTK`1(r{E3wM_s^b_nzEku3HI_Z*q!M)Y+%MRcZa9|%iTEVm34r+2=lfT*l)Lwj8 zdSf^Ee}^Yoqnp{MA3+-xkI8368nJLMf;+K5-**N#Md!^poF7`H@?v~c5 ztw85ISS*9j7qq4lzBJ>5Ci1}XRZZY<7(LH;?lc@Z+CkbP%%>+5}RW4bPtJycM5a$D8fF*`9@8gePO@=dB>L$DOn2bjE@^6+D{&^JEyN ze{dOG+TnG7&?n((&K%0#itXt6b6VK!dC?7~m5RvmZ|t}uc+=Qj)-DfF(|qq_bxyL* zPw}f4C_U)cHQ;>i?iWg@r%;<*a^iPg_ufQl_Szh${y8kDy%KZSTXwHZZ{uwg^>8n{ z6SmflW%t&V%H^br>|WgqU%R-Q$fk>WtALse@vPmfp-lD*S)rYpEe3qqugG(`R%tu9 zXR4td>V|`V9m$SDZF1&v+WJt|x*YE|srMb;Ca0=qeeooAbS2K*L?^OlHCdmnXlC}& z%SqF_(Or$4&VtwaLh4_m{u$cvOVCELh)r~KIx zQk;YKsbD2HbR^u91wK*j3u$69=uW`Ro&G95eLrICPja*MeR#uGDaOWSl)J& z&p^+3WwM83JAPvWYI~w&9I5}xuS6I}($Xn#+yTZiHZYs)OC$Uj@LO4Z{5@Q=8&J-f zf0b1{PH)cuB|FEbCq54M=HqG3IIp4JDzL8VD(it;>C>+x`$>G+S>8+rz#1IRZc2$s zR?_#rKG`KR-r+>GXRP8OcxArm8L-!i06VEAV_sRy-Vq0{qhVR|_XgUM@mpVs!{Bx% zu2n;EHI|cAkqvh7Ji zFDW0b`rIcYM%&5k2-=y<)9f_*F>HUYw(O~tdHu}K=iJ)#Jo}=zC21u0ehvM3jNEei z?Iivydn#Q*XR|UP5nxVy`U;2piV%N}$~TJzn<+Pz_s{5QE3NTkcv>0$=g`D(bulfzTD#j3MuXJ7j4fw9$7n5jP8P_n zerNgqmY;usZAQbY)7*!=wHe=f(yxQjKNPfP@Q-Al!}W~j1pU%u;XbO4_l2KiUH8WCn~z*@I}_ z&BI2=9%VyEz~~CE$COEy#u!gBN1i@OKlew$c9F8x*>*Pv|((gv^&~LG02L-M=eo%Z2pft&<@y_a-?G976O)1KZ*Hro5X+rjuE znDnDT7pte5_mXd$ag)-lXG=1CSh-zjoxqPqA}^81Xjo+bijJtxjuu1cKuhw-iO;XG zrz*T-);5jt?o3>H9pvox)|C9_iY0Hum1s;Q^bMllIk9=1w=Y5e^I(+nT)t65Rudmf zPyc}nFM~W=$*SZufnH{3vT5kc$o&N{8xK~_ziCFJGDg`5H#UR5iQZ(4<0be`f^l+x zx59595?#htvQB$D+@c+aDV-diTH2Dk@%b`d_&eOos`LA3&0PAInaZpH9Ezd?;F=zM zKU^6O*5tyD4u{`wy`9mJNv!#7TJ)t~iF=X((30-$#R~d(Cpi>j3XVQh;GYvSljFQt zxvWl3e>1uHX(NuJt+i1*6PK&A;UTEHndGvE#0C6fEIOkO)6_Ftgu8^U9ZNg^!cMcx z>(%t;MzT4Q4dra<^sJLF9nW?c+e;sD8OT@S(Agr(L}eLi&1&!O$f^?GeGgy-rbkkaDBGeg^nh3s8PBfa?;!U1Pi;do_tVc#)?@aK&dG88LCL*%kPYrFQ0sv-EN6C? z_TH^TGqdAuUv`xKYi@vmfDC&MQ0Abyh!rs;0eY;9;JY z=EuK<=@9p`chW03v=2N3D?6pNQE$f7a(Zqrm}kDYR7+?(fzQf0v**)|!6^F9z05Rc zo%L|ge+TNz1^@2x>mb^)3r*>X-%-?_7B_wFO*ru~svpMNJ4rS>LuF^%$Z2ohBK^QIm{xrAPb#9n1L~Khx@c2mS~x=SUvrI~dKPKgq?I3Et27i}Ez+ zemYoBnR0X`C*5t((wC>Bjc_h2;Hzo}la-uRH1C7ilm1jiSI$-L?XQf#%4*+zL2RgH zIfKsU)VR|~v>*8&2A8esD96VPR!=hWv%)Q74!Gd3aLv z+|4X~MoPNEJXinz*^7s%=T4p+Uf4)_<^TT4I6!v9`0?JpEH(2Jd9SoTTy(7koh+k+ zi$Ib8&jylFatB))#~<${>KdkfxR`WHXPaav>(viguO`ih06O&qLbC3e@^lgE_Keh-VhpE>Ny)cYh)xtu4xmBnN?$noy4E?Tv* zJl(yX)Thxa=)w8?&mz1X&huTxBaT<@Fj|~*4bp#0TQ-T3J(s&sjIa~531)h-tA5A9*tK|@%@{Wep!r{c;hm*pLw+x z!N^IvlYN)Y=cuDCY>59EBmNTSGt-scLR&3L6}6wucRvW5e(K8Vs1LMXub{mp2(8r8 z+mmKIe)6?4U%mp}nPdM-E7uffXRA9gaMoF$$$F1qPsy+D0cvxlKO=#a?5?5DFgn*= zTa>)A7f|+$_AaYX>u8I%(6AeDaTR>VX>-rRkM2dZu!db6P41i2G6(kU*!J^$M9#r$ zTR^X-=H$*aC9So%oLQt~0(aAHokus*ugSi|>3L)p+{|H%J2y#Z)x8Y#RqK=bMIuM=Ry`Qmyg}8BN(O-Lqw6jW}J572@G|*A4 ziIsB_TxBuYC-57E`q?-*6;8>xeG%Q8+41{0UPV3GU+F5?clT^Gn#YiA=B(di7b`$q z=J#pZ%B&l1qL!Qumc6xpLsEf|la15U$T-PqtbL*QVjg;X)A_pi(wEPi3y>!EmxVR2)GRjgJr$+g1 zho1vca*Fy6p$!e#X;mD4*6%u~>4u8wYRxEOTU4Av7AxsO_SUJ5_T#{vp>zYX8paxD zu!7E>XGXLMui6x~U$VMOMTB$oT9%=D89p>+Nm+rIGnMCnw2l5X*P>pA+gbmUGxkOm zy4nSPkF%bvZxP zi#Pv)bLrLQB{GH!GIs7FwUx6`ASpCNjv0r>#-`p4NMdjZbB%IoDUm?~70 zH_(GQcs7?m+mFw>1y{2p!#8TW-Cs4o{-8y_O{s@y*a1f6vIZchAFai&5Akxfd)48c zY?Z7ENM=PXSS*2evivjBn(@G#KN4FXiIcUJ$hwr5yq_J1(z70o<8yH06p{#xjN&dt zO9fi~G%d@BXMgbCbT_ePX3R(Oqd8gZ8qhB$|Lk%#14q74|2(CW5AZp9+Ue!BR!{o4 z872Lhw_DStx^!ZZG9`FDVAg_0JX&aXMwK(_nZ1;v4OuNSO1?D%`QX5WF<-g(oz$({H@iL4b$j!$bc$Qa#YYWpME-vN)EYHFo_*&h7N zP9MX%rqHpPD9Jqk@t#)4xy5uX7#>UyvZG4Y%JwX*VkTU&qs|-Y&24z>~r zAETCwL8{h^R8EKeO~JZLaJ*z=W za-#Y)+(--AoYeMHOHID0HNz|5Z#sAeNi?_+I2m6RNu10BEpY3Fpyu6%) zlU+6Y$~E6E>RboHOCVhdxAy9v28*o2$oNSgH1>CuU1DzYWGFndH`;I5&lpfMil32) ziJ)XQKc|j8&eQ!0cSaX@E~m*k{b?c?19-p>$z?iB2jcpPMT@hy`?rF58Qx|+-`nb3 zp{}c3WhKvdo@9@M>^qRL_G8GcEs32Ahn65u^>$lQx*5G4$?g7v(@(OetXWJJW_A_G z+12ZKqvkwTP8!Jk_e)~dMQ}|VmYvKm!kw+&Sg4jr>PD6@i4J6Dpg-C>)4HszZvv|- zFnY`9W4gQm?>5n}8}TzE*V$L30scIQzA2tR#DBlXGF#xz^Z0b8U!}F$*+(#uMeO`` zyxBy5Ud4kC^uOQqMsg2-r?$HIe+OE#(@ZBlh@Pm;Ze+V)p7Hpc>ac- zz5k`Snth{>AJuS+v!5QyqP4?+Q?yIG% z%3D={^TS}|%(`-Puc3RH=}auXH{bULiF_oY9-;J|YR)J`Pm~|-e(JwU=_}N~Ssi!b z`=4>>LUm>gWS7_~>y;OPP|o}5seZ|#%JE4HQ8h#j$>7Xd>FjI0g?B0A>;41H$rsql z6J-`;BdHzWe|dhE9p;%XV!$5XHAc?iS))#FUlVxwkbndR=8)kx7D>eETpZYH9-(^PWrC&;Xw5DZd8NsPv&Xipi55 zmo+L?w0hZl=PP%=QBw;#(HBjn6-M`x_iMaVTllwDBH2XKaiF(0;cRwR6;&A>Y$!6= zRp@cDd2Ygizw%!>HRU{SP9yiM2FuvuwQ!yX{z@8`k;trdU8&YNcwLF^C0nwEdc zuf%$@v@;8|d3A8Mvo@d;J)BGXG9RAo$ISC3e*M(1n(Xxf@3!}-zlb}~cMglvAM z)mevY-|*pQDwmAI#50+3d>6jeajhzyY^Xh`4Ep)5o1(m$>+ENhdA6L7v>88+#<}bh zbBnu)tZRDj&-_DmcaJXGknGizjQ7cK>_=j0-(Ta;vW~H(x1K{|W44*oK(eazGp$s{ z;$P4%yoo~Msa5=us$T?@4cJ+oJ1SLwXM@gBWMucj9q61^ouV72nsfH;W;WOU;|?Q?JMBv)XhF}=)f&cfe*_>-CQ zS}4iLNDCH}oz)I3BorBB?O16=YeTU9jBh94`8HVBcYi(UH{!vvr%h(BW`OV#o?VTn zopEWS@66#G%yK_P*#@xpXA{*xNDP)e_%g;nzG(lG$=Q-jvJ+{}L>W%r7tw)}=-OGN zFd1F#3VX?DeKQ=Wt)?a@&l>0W&^y4HO={2au9?AIsGf}c0Gi8GQZVIRC5`hkvW7`Xr4p^lC7A$ zjqE)ey83E?`r>wXPuI|@W>>;YBgZgwF575zyj?Q5Q&joQp^WUOJK>noJb?A!u)*CpLwqJO8BX);>oX&bWt zU)D#Y#r?#ctkpZr8;M_6;9qnhx!pN+=&yL%#Pw@M8~9TiGr=b-MCa0rjbxEiiPk8Y z2($@rd=2cbA+nd& zNOXUU?`iwys4=@8#a7=Ur=$7%ls<_byvBwm(BSNpH{JDO@NQ7Y8Mu`_p^|MMs~_dv zwm7pGo!KK{m)5B|tIWREnaiK!c{OsFSa`Dg$#t!=Q}8(c?pUzS#iav$SBKFb{py2L zvvKtn)QyML#b&+Up?worLh{uAldU8ZBzbCC)ly0PQeU}50+;evgZQ1BNUSj({|=q$ zOC|H=eY*J!>&}jLQ)IFQlKXlii5*KL z$CKk~cut4c-6Zq>v31|kR#w;k@X^?NjS>~TCSpT~5=5j(it#D}RuCy_6i5U?dK1NH zG$sPZLXjdD3nEP*^k#(!0!Erhvmv32fC38R{d}LT_x}EP#~J6Gy`N{5*;ku$uG}7w z*s{oL_EDZzx(Z8JAAHk5b1u0`#M49iI|EPGTC)YF0@dtyb1KhUL!i^xv?&~xX zca{}hb{pEje<`>Iu~E6R_IhnBrrFscice;4BI=8t$*9l-y>c&g*7TohJtI!6{zu-w zQ&^(ps2c^Z$@;tkrf0&aJ1#CTKYPWefm$0w9?NL=ZdN#XEKbAkg{;R)QW{_Q++Lha z1sUlY)6w^h|Icb8c|Hax`2l*Z@sl&^+*KEwb&K}iL&YlE%ss}v&5&{@MJ04ebozE} zbRmoP8xJe`pZlmg!+Ro}p3z@5)JqIVS@d6^)brY$uB8gfWWVSq3 z9U9EmW3qT;M=m}Z1LMZ(YiTVd%|ou=d&Mbp^Xvkc;oRr zoSHzIu0{Ea$X&bvlNG!=J(*bSG^|F`LC${Sh5jfQA~~ngo!ol0fsKp?PBf04>F<0J zcq!a#YHcPhPn>-#{kH? z?pQs%rtd_&$2-3w8~U6v?G-%vslVBG-(>!k5xW!0chG)fD&i5JY;6_j#cFc04efr; z4mC&nl+8(ya($>g0+yGscq2JjsZbWY4!Pj43xv?Ss@W9+T?`Ig$N>DqK7&{*%|DG@u`MNi3gnM?=CG?%!IwSK?Hm(fP5(t73s zvYGSq__1H)Q`Y-sSi~2tW3!@8MEGy@o_x-Cs`CoD$l9|i4CD2_UX8BGo#SclbId9> zUfuom8;|dt$seKQUw`;b+;gIFOCG{mJ>^VrzS;Nq;?tc<57S!wc5hKCtz7_yGJf(o zzxl~tLwwQ$^9b~)4g0LSzR-3`=lu=fYF#{w|NCPw%I%xIK$6Vaclx%KK0gG51N^`{ zqWslLFDL!47dsod&$BK``c#SfBxqW(V$7XQ+4&fzY*yU4BVmxw*<*!pWy6J$4Y7|7m1*OB=2XISNy@~_Y%d6drqL+}eN zk`p`q-$3@V!mgyRLkmkSH}=I*4Ni)Y=xP5taeKTJkfl8^@ED&F+3lDxM_?8CTK zPdg{m;)A_QZlCPIzoGUv?Z!j>BeL0w96ba2?1tP-N|q}9xcNoSjgCXV#OfZ7nuq)R zqW%*hoPCHys2`4!Zxr?7QCY)ypImt#)j8UT{uS9ff=t$cW%WX;_gd3VFV?)@lho{7 z{2iTp;m$r-=3cmFD0+ua2dVKY*mk2&qO@PoN6z|tk=us&wTn!SRVvbUv-QT0tNWZ@ z=Qgm?K2!KniUH53>7JQ8p$@-Oi3_oX`Tfz1xdOeplPb7DI8DBE`tW@_E zG<@D1CG)4UB;qL&y||beByzVW%GF~p?_(dU<4ZDg-l5Gg1+Sh|KD&&wS=z^Mb;~`)0wBlGp&oZk7L!_=r8_7J$axO)qY zUR3OM&+_SL^|F4-Z2D1Um*D)9C~*Z#+=f)1q=)3*Jcg}Ld{OdQ-KfSIbz8%xryf2x z_GZnTn|QMu@d+(nZG3ME>O>B-Wc>&E$$d^)O~(eU2796}bGA6qn@wOy+|#uzS4Enb z`F*_kvwyQpy$yO8hpG+Ko`C-=$y4HH+Uv0*3AqUub}Apwfl;Jsm>TgK9f6v&X>u|= zjW?clL%HOoeHK4XHER3}RrhK4Q=bRuZ31p(M)?cSz2|$@rOBif3%x{N8B;C?W$q)r z$D7;K+NGDh+K4Z6vMWEJmtHJjymNE9`UvPcYbiI}uOn^AAwN>BW7w<2W!E7;-+^_$ zPtPe65B2PKWX*Gz&)LK2gfF?HCSLB>XzPBn*Ro_J)_I6lFU74ll}x69Fo6OupHnSej>V6Dt29ou5;QE&JX-o=66q1_# zicVmT4}N#$az|WNji1wty{yIO^yLb4OMbLuUmQY)?;>L}P-+QSlI3nbDkk63NKd<( zFAg^P>?WA`S-$MHnXOx}eI@-;- zUlWvBK(B8C|Mz$?986gO#6PDLrT8;cNVf9R!TTP}a;MvQ@SFjjMBeoAn~2!2NXZ)i ze?m@j2W@{EKT^p-sFL%q&E6(b^c8R{L4{Y*?6ratIbW{}inCz!clP8h99-ac6o|53 ztW7&^MZv!8VP@bLf;Ka}&girkZiDf644QmLW=a{PFE<)oL*H`i)ng>$v|`rY8P3yD zV*xr(!^LCt@d=9U)bltPp1~Tm)S#=oI$HwfU>vV;wNxB*xG}%K3YAb&O;!tLjOXq9eA&z zXMgZr%L06f2Gt69Cy=1r(2yHxpMh;lvXXr2OVInj$--(q#aH1HqvCRP6OoX;*k{?! zwvNjdYF>{wSur)%k3*60}n>K8C>S8hweiR?;0463Z3;z8dIH7b$V zNwh0o``h8!46mN_IXjdm6>l1_EXhqBkFng)7TKE2nq;5gJaqaH1#(k;54ddb>^XMz zdOS(g&Ukao$Yf4xzbkmM77iJy>Y({(Ke<8dPjHHT>#c=mtl}bCdN9hqN$Pf>VNP4q zb8@+@0DJa+KiA@uD0PprEsFoS&v*cP*I(&9=6AUvqr4T=8gREI@%3o?A^4qfDZ4Qh z%*GahI(u3>)as4@8`#p^Ie#%I!o_VY;sM-A9`y-@e2!8(zNxE~s_j_?v%t^LJkcht z$!+qYG}eAivbzBv8^hsqmOOh#FM0ATxW6_Z+JtkTlBs^E-W()b)p@~hZYUdX?!87^ z$v>W5>Uf{c)zZfW<=T7pCLP?S*Z1)#dkKrY%gM)ltxW*Sv*2IMmW^Z^>gX+gM%hdL z2IU%(j-}dbS&U@4ORFvFpUa*P2Gw2Y@wO7V-}!c!KZI_#!Jw~pM$pN($yj}oJ{onG zYwtz-0TV%+{n?AbILzlg=zcv|CL0CgH&Gdf8he^M&=YaAPT7&hlBZxc)NfU_vKLUr zD3BWka!c7LRKA8}^hBHf+DvrDWVYuav^WFB6U#MN|1(I~23*hnd^dk@!pr1NyoF6o zT-@h$=NS@^99a#Oeu>;Shf6tlXXos0lv@FdoIk8qH@7vuNgkhpRV>14QV{>KE$mA2 z+CN6F2eIVIQZtk+bVl#lY)Uona+h0T|K{R$efD+;4u6Bnj})!%WiN6wMk4EL;zfLt z;-T08wBzl6)y2UVQ0pJ0WksQp7vMu~C~j?R`HXHZAaOaLZfg`;jb>*Umy_E+E7>=3 zc_BL(ADds1pDlidnnRSbyKh0q(n)4kx21iAURp~8eb)N!;JmY))Hi|4=fkr_?EV5oDEqPVqe_w^^fYpfT~w&t6Cc zwc-JBsQF-bt(Dq{ze85|*)>TVd)C*tfckK}|BYH@3c8e_u1@QpR`x=6Ah8HvuHhMnUczh}t6WRQw_w9>v z_v6jq>wIBEKcRs75v8*;@+R5Ij!1UJ<7<$!`l%rQfXvkZ+k>FZdOx>(UI3Ov>$mm) zQ}wH;bw7DcPJ}&rs^NL;_BmkrExp?3d3GSr1WE4ZO1{86zZ1;KQ;=AVVK{USEnTm_ zznMvGAjfYgncavoo|GYfwaxfzC>g)<7f|zNB|?q=_a~?4H~9V0NB&fO*CtyjQwhe| zHORU?yHH>F6wi#vQ!{@{I{;;V;L?w==E37|<4Ib$0j1)7vYj1Drm%mIqJ}uQgm>^|I}JpP^An^fBnDEoXg(b&N}5z+4JfDUi9t;qDf#G$KH$uYdJkl zLZR0Rn=}ZdSFl;hH}o1Vd}sIRIg~qwPG*%~-K=0Yp08jTGb_4P|CLa$om$s}_)KHO zbhakZXE)P@+>DhI#=SU`IQg6kK5HDzyz!@Mbn^T-I+$Jl+~k!p=p)Z|lCeL)Io`C( z*sp)+;qPQ2_s=9-(m%~L7ocm-Vpp*si5pIw>TXs%^Zp&^Ia-^4DP$zG@+BxSkCZQF z-I9SkF@){)7HVJO^HMf%i1|zQ#*^o$4;TicLo$Qs-qXn1nfmz`iQUe|t%Lu)Fev5N zJY9*CjL%~{t*7>IvU;BWztMJPOOsHtD?7cGek6LVpXbx)XFU?q7A}8hZF2_KRQ<>N z=4S2WN=}Bc^jub7$+|lMZ1JM}Ilhgg#XF68xl8O@7$oxWOmy3X+qKZ^buE`e%@rVP zqwSnHwggpf9oa{=bG|!=-Fw`~RHNwcHz0ml-`^;g9ICkycu>**e<{@%%y(IvB<|~S zbw3A5B8~4Oi4WrccCaRw&tCN1uT)ReI+|2fA`|h^Z()~X2;BO?^r>RT8h?WiLDL_6 zi_}Sunyd^T15aWgO8yVYd7dmNO^h+salJDNW>0)5UbSWs>u7Jbac>N|pN_U=w3itC zx;`I`Hp#&L9ZCGLY1`C~?ytn)ro_n#n)cWNi?x4?t=)81_S z+JN%Ql*&FwRv+_y%g#$8ILqq$bmM8_PWo%FEb4uNs%PR`{X){a($!6<5qWzKG~?l! z>`&d`oE??R{LjwQBgXu0#e8)?2s7`$z0kvHIG5Y8BZ--d_t!#hxXkRfA-hq^Y&;9k z*DLWBem+eu;wRiq%_`(_IjG*(>&d7UPmWFGJNG;9K;uqe+y|=6MzSWW;Y~bVe(&u% z{C*VGMz9UZ8#dMFie@5%wVPeBoRroeH-CWp*Ql@+ou2kRYwU~R+Rtk9dX(v)#yF$R zVzrjzc|25Wkb}NP*xYE7Nb2}N56Jmv-x=6MY+`P9s$LzOYuSL9~u# zUI5QTyZ(^`Rn%X+xzE$m-Bxk&;))bjrB(0X_e}C|hIgCUy2eJItfI04zLe!$;7RVg z$|>!AdP%H%>9n*BI(&l86-dJn)@PBG+^5zOQ$RfvPYy%N`Dpn|nz$G&ncEE|Bax`N zpikD3sq9d0@%qw}@?iL?SUuj%a>X;pYRZV7d`E5R$@@x8@hx%Mm*`==-eTQn(y`3< z8>`dK_q*Bmf8u85_ES)GxxTVWxD-wc$#!mu%bGv;&L!?Sdv@8`dk`$Uz;P1Je2Jo6 zS(8ob&H{TqBhrm{nb~mS0Lj{wB@E7l*_(7L z8j~2_WXen&?AIX3?GC$fbAKUm@1l7|pp|BY*A^Hjf-QMv;+57F4v(Y4Gbl2Plx3&l zTU@>l1+K%r>~5}y`*d{APW2Z3Bu`z&qGZW$q|cn*u24TG;gea#xv-0_Jw=1F_mlh^ z!EXe)e*pLUvq4z{4E8B;BOes-tfr~fch7sTc}rAFYy=p#QXcm|7O%1rH9QVVGIt`gHfVWmTM(4dI^|6BCkuy zz#Aom1cK`z>n%m?(=M{CX5kA5g$hm=i&!C2pJ zqhC|aajwFt#PDUGI%~4-=$_eDGBJMZTPb#}D;iHAUCY2R9aowuksBi~_3TM<8C*-b z16Q)=*@x^8%Va&NsFhprAbyIyK~oz0JFDLZbnV#Cc=RtK_ti-0Y7#U_xpmgV^T8E3 z#<1GCDKX=He6hY|XIhY=IojO~>qJTBR+N9~>k9O*2a=m%Um2#klP8%zGD40wdrxde zZlIimU&-Y2B-=6{?f-_Ft6-MQcF9t_4YuvAiI?MaymfM?z+e#mLa(x3+Z~w09t6U1 zWa%RIvIb5}(@|;+#_{;EK5WiX5mfPL z`-N6=x8f?03`eQlxt@G~@m3qCm2dqv)c;%h|CipP@Ac7c2$}gC2VQJ=cv%KqB-W zh3WlP9NC4LivkN^eG2&=i2BJrkjy;Q=}L2@H{ zX647wGLeT(v@itaSAc#eS?LJ(E_87diHLXHeQ31VxcxYe=A`RP6iU9PR`}Z=7fN{v z2Y~oBTge5Mm>sWPvfM7vMq+Lj=zTOS<15{o4DHn7eiB#_gAg*ete!X**}Q)LK|zG zBfy*U<3ngn-u<{D{qcX|0juAb3BssZW>~C_kCHp~X2cx!c zH~7s@at&mK9Edw;?Z;uLzS-A${fDsw0q*@bSU$nvN3ojsFybR_n^8tS}Gl9tf4D?oH+!K<%~ z#Mv7Ojn1K=xvecZ9&+naFZKI?JQ?@Df<+y2mED8~eU8UQJT(Gec6oAk`4E{nnngJZ z?8#klGYgaq4O>a~{}?S!(aPf_=x8)Kw7@O1;bD5uZcNVbA1_+`tC2r6${tinl0uhK zU+FJDB|n9ud`i7uu#T@o-ej++#P@vr2dWK2>DPTumWs$h{9(e)!;K_+y({fBmH0b| zcHECzJ+$!z%W^1Mox*bdnx1AKC0Q%8i-T-HONz!Wi{j=ZnoXCnl(ZBeGK0~_` z1CY5*qEYJO_gL$hiZm~~wx`pn8_CRBG{3&ul}UX+I=6j+!sg9o zfro%Dp5K{k@4(H9?9d@>-aMLnIqjV7&1m%x@g!@Z>*@R}v^bG%xhHw2TGh$Ut*l2) zdLED5?4Be-G;snm^meoQ@fJviqulvA-bggdc$i4ScINe))VrOG{FMF7h@Bg5a-&N! zkR_*9qU^Vln?Rc^Ib%VVk@;%cu>xIYsMU#ljRIe?Fn`0&RY9l5EO^dg{$reZg`|yT zcVgAzuYNMweG*lcvUnRneg#g|W6S2!sjrpGP2XR!cga_{39WJy?FTGgX1$rsuU0Bl zZBfj#%Cm^^$h^d}tF@SHHY@C^BuDMJWFk?nnHT4-GHH(Ibw>PoDA7bKZE&cC za)VI+R<+N->lWIq#DZ`3EI5vbeLO~HqegOSbyKq~UB3z5e=l_KEbFfe{dUt+PV3vF z^k%$?uYT5yuaL35{s!MB+Mh(blC@yB5${tN=G<-*dcVn9&m&t~Vex_zk&>SmRO@Y= z%Ubb1f0HL@s@2mmXgk8+ZDyg(!MGVjt+mq%RMoUPTKkFZ*ykswy#xJyjgFV4>)Q&t z#LFUUzxC)ag6_69FG&>0NR%JuH@+A%U|!GIn>-G=uV#a>GWqE%l8`pIo*R%B>u-i} zC!SSVrG5>U8{uC=>nG#=46}^x=8PM_G=pWh*T|e)y776Ar(rdk(+~u`$<91Be5BF? z{C0LsqerK{&+sS7ndOj?&Co_VC-HcFl3V@te!_Rv1odNr7TWv}7i zYG&tb9$OiIfvn>eqgQfWj8MBVC`aI8JW4t$F_U)fBIi%)^-R!B(fTq}*bTe3+Kac) z19avr7!E+ARp_0SeIomhB|F(!N!I%X_&rCT^9t^Fq0?vRGu{~S<4q>RyUeksSliAh zX6>27XZNHFDB|58UzkZGxjLx~m8WX21>Baib}yn8XC1n^jRaod{WO~KA_-46 z{6%nnoQAZZOUXlZ657pVp%aslaVd8bZy}^E7zj?U@*R5TpvtYB$LCtmlYycWj5YZ*GVf>Uk>*iCYtCH3clvL#G9z$15n^= zxA85fo#P8h+k~RwRBkZ5vsiI7RqsP>B%4TfJt`VKbLyR3H`!TSOH-1QVi72l%`V>0 z{XmtBPC3yXjc>1@%4Oc<*2t9g05-6kZduJQtNK6f=wX zogI$qo4~%`n@u2{3x@dak0T#%gT1RUqr|guBrNB_@yvcf8{1GjSq~ED_OY>G3QQi* z>l-X^_FJFzJSVW#N%PyFYwu?w+nr1{HO=R$;>v8LXDJbnvZvwMnH2ORD`V(F_OQS6 zELjk956CApB)$;M^?sQrHT9aTN$+a285wFsKGuUj=P66@zT`oZJ?mK1(+WvTJZDy~ z6TGcz4$-J6-vA6N@N)@GYYeYh;Es3FLan#)=14T}W{m9)+g*6v3`9fyO%B+2Np|%n zdL2)a+_ts~*4LO>JPMYd`ki9$Zl9UOF>K#RbBIA&TVXbr7594jk-2L;O)iG}9Y%`y zaxG(r-cqXvDB|VRm6qj{X^ArN7Te}$6X<>dw$WPYPx3NG#8WRjhwc6Tfpr>+M}x>p zZ@fR9>>fi3-z6UxDES&HzwCP=6BCnEN1GFI=4Gv|M4jZadDHXcvrK-6v*3{&bJvi+ zExx5rUHYDgr?&d;rc5<0WDj=*>MS8$!+ky3ioK`&#rS_JdN)AJre;fPS?pPC z(>M4WPrV1hI?$SEuKF|RO>QD?=X6G8r0_&gjzzob zC^Q+|+3Oqvr|D=qkW9qOGPxp?)8|2OjAVb7qD686N6)^-)5+{WEx(B$sRox$B(Xbr z`xwO!kdI?vKEsSDH~LJ{%7e77BF-$q_4;~@pJ?{tYZN>_8}3a=!rS<>9~F{$DZZ*p zwU9H1c)Df8&n<)Vtd4)dA~mBwx08nKSCrO^|F{3}T>GeaY+#H|S_z~u0 z$pw*pt82AVo7~6WeG%P>Z^em5pL4B0w_e1(zAe7DR-?AE@h?cOhlBC225Bwfk)4WUBD|3F#TO*&?BvS$ zv5kv-N+$lV{O?(;o0I)5Z%a_jX0OYTrbE;_-mYv zpV>wDwbBp6I&mt=eipy~|1mCPwSTbse?_rgbiOM&&nS0_|Cu=@mrCZu$CI{g`g)bU z{vK|*w_peu<|~=C{TZb7cq7kjb|;xChZ-MFg+U*3pD2_4Bqq^8$qCm>-)E!93R0R3 z7mwinIVAW~Qgk1!PfSf8Qu1$7n0s9=BD=lxc|7j*&~80-zPA^8koSpj=|wW@6nhm1 zk@MqNgkKl?5y9=xU_Kw_SNq8>NolVrH~!Vu-a%+k8-Cf_$$rR>KglrB>wmxHUB10q z{HDD-)r^0~?+Qu$3E8~>1|vvOTKKV-d=4Lv@Y}O!A^9Sw`j(iH_|f&kk0a3Q7UN}Z zQWy@SP_ri)`5POYUh^itONYZHdn>sy;V3pHkuE1#dmKWq>abwhi~U-ASDE*(K>zD# z-c9hB%mQT89nMb0O4nqK>Z%c+w_BARiPAZtucyV1;CmbeA7=-1+imVXo6G_=q&WkO zE)(c$9lHCBHXFfYy^{s(BV)!1wf<8}W{0x02=PZhxn< z#VhE>T-Y|T9?bcDys%fAw|vby#OJ#?d+`@?(~7>_%la&$$rad{M9)pZ!(^dPzQx=I z*TtK0Fvz-MJ?&qs=WU?tLat8LPHR@bgI04|^b44MYtJoO!4h6Y+o}F9Rq_dMv=GSnEY)YqS5&{H4Rl-8Xc=C z)kCYvc{c;K6aW1JYkP@R&d-anZkzTl*5VWzc{jc!=J`rk*M@N;)XA*-cQEba*{!53 zSdKusZFrFQk0$z`Mt(MsgJhWh4XAS}of%5JWUAsy4|J=<8syfv+?)Fz9H)VE9En-4 z#fw3j{9zdju4RRDGF^#&WX0c;Z5dq5Y;vR1J$i49EAcn|H~3#rW)d5bIZ72=pTct7 zNgmoOlL*8bN+wt9SaaS#k>}}Xo72+{V0==`xpO2p4rGR&oa2+VmVKyXpiji&rR+hX z-RFX|zAR;v^0w z-nq$JG0CXfO1=1DCk8t6rQ{K50@hbicpr!d!X@*A?fOg9N<0;k`|S=ga7D3SH5{*( zfTb)RS5haIJl5!|la`+b(UIh| z0j|fVVIJHnf})d>$w0RiSIhaE8=U&FAJ-J`Gjpm5!Zv!$Ex=Xi+h+fB6GYC#auddt z1!Udfvj$9&uiVSjf?i~%|2}z2baEqb&hzuBmgBi|4a;NGQ^sfN0Ep8-2;pMFvwozCYEdfc%CTwJeut1 zUWdz2I6GtYaH9g~HYqViod?<7C3;9)LZFDB)@gW|+#EB&-dVYa$kh{`*TVb%fq!;B z6CYenJBjKWsI_GMNRGd^VSE@molepo$GN?9qp}`Kx08?bBtBLPl%1yT`vvDu)BjNN zkcioTpwB=xHlby1gIJ9Nm-|h7+1vZhUS_ z26FnA8wwH?wAuXcLp5^y-he_^Uhuvj>SPZsQ4!Cwo5@bv87=0r!lA@QQk>YC_1a0y zT%ybqnVP+bMczFMpEs3DwvWHif}G}O{hQV2w?4hAeEbi-#-~Jc>>)1|SkannIuJvsHWHl<_2SMPtFy#gnW%|Aqd^ z40yKo<7H6`MdFbWFPe3QZ2X&+u7gd<|FIk$yNGNh4k$kU520{uT29&<(xRa_-yPjs zv&~PaKMIHc>3KKMj?!OiZ?gh_kA~#5KUoGJE9~uq@XQ@H^HF9hdCW=X_=4)I(4;*& z<~GvBI5&w#??jjG#_Fl`pbJ>;Fjfoz$yI27iF%3K=tJ6%1VdGP%4qv2iLQ-q{m~{l zGrv`4vEH-4KLy82aXs-yT(9q^S&XA_HSwLRw3JMqS-ZziGFdm8z^58m6TREk)BWgO z&Zw0$hX;JB@7Z9O9!6K^p;QYR(na6NrrCgOT@3bkPGxT}St3`nfk*q4J>_4a;|s>w zWvs_OT5$&Z)fQ#{4BK(|-x^f8$+RU}wo~(MEq+4^_iG~(KUYsT>UE^vlD{f9)n0~+ zUFhjc`ke^sp?+I@R4C=1PDIID-rQV_Vg1c+ zw^*6PSE?R=zFx<1udUNf$X5(gAUnk&uK~((j2QjM*aA=q|&2iK)B)79uf^ZrQ65@Y!+-Yqm5 zB!9>~X3@RT=2<_9GQGdBJ0Dr03^VTDL>gM4;W_N>Ts-;;9w(~Tne~|kqc_3)4|r4{ zhrLNwVq#X3-;v--9QShYM#<;)2U7ey_T9dmz!ZO<+dWTih0^c; z`4_|sFX+*)X|94 zgdSEWfu%dya(_)5dnC!)w4F9)+-z>vkywdDz{T5rAzRRf#pw$Eobc-&mqYhJZKu{;Gmw3mnFp zuk2TA6B*99RRipoliSDqU1!wD&gv_mt!2L51+MR1JB+V$)wo=1RoL1APF&`Akc}{& zY|!87aLQPndxHN!)7E+(f5WjLnu;PgrQ7ZfWdyIXdXGL#5z?tUYe$tF$1|3QE`tQ&r zF(;GIKl>CdNql@!PlnGpkR<0)7v=B9qwI%#Mv{{Q|9$o;83{)jQRd^*68xHk4g>Jx zRTlkhy6UXbh+gs{NcOd?XI@tB?>IFXwl|QT%e2!QeDOQpMT!y&5HFP9!DSU}Pa$pD zN2yw@l)pBn?LoJA)YPE?bF>t>-pqb)B8`8=+Xh-&sEx!ABoa9HHO|AuWlA(bn`B~6 zEW$<4^Wye3BE5Rr)v%1(W}Ofgb0Ywj0|P+Y9X`%o^~ui|Mnom5r> z%S=4DkJY&n1f#VvnI5-C>Ccr)CWaAO-$fobfW0GJ+iLS=@8flo`!f52vmQ-e3YYkK z=l0)Kc-{uY`^jQ5w|2mSBR<}f|2qsMdfm0Qi@Cx5fI7m%2KKE-<@anZ>V4O;lEVBE~9r88~uE7ww7k)l*{@OSswZ8gtj*)7<`W$Z1{9(UJqU z8J_Rg#wbv%qMw!6`+ITjQJ5XWDt(5p$#j=JfIp(aM)ZqsdG;Ev25~Zw#1nZENawMd z@98&Q``NoGi?UnwTNb6_SC^c?>uJ@~^m>%`8shIbEw8g;NZ!Kw=DgX@$?0w+>kQO5 z9c4x;{hX(XHSDEcvNEnA1Lcggp?6|Pk}2VFJ9^~wGxzO1wgjX(Z-BrbQXwnXziK5v9g zbmKYPPX>qo7)hEN0g^vqsP?y6r!>a7*9z*r!lq`6#Jo%i=W*&1rN_WvlvH?Xpvo>1K z@=i4Vw8fc7}2z1;)S5ul1E zN9^oTWTZamS}Rqzkc33mJPFRL*r5I(>`M-l@gY9$xm_oIK&4DSwY0DU-tl}{g$@Jh z$!EAWU;BsYKN%l#55TGF-G_JmwKJ)}YAB9olspf24p@QYeu(UVbcIWz@K>=or^0A5 zS-Ao17t;5^AV|iNFTsB#?o?KORUw@tarRu&e>3awEWDJ&ZzNVF{fz+w~x@^A${M3%X8^S;;B}$IF(6LZi~#l zzBw0&Z+koRT!GV_Kpk(*o5(_Pha@A*B`ETczH)POW%JW|HWV|tL?sR~`+dy+Wu7j= z_1yY%0Idh9`$wZ!8&GV6%XX47pAsO?7ABvttXIhcO<{*5t7@KyIt}XCe zsq{cnpNyZQ@gsM4)knV;xUv=(Hu}9p>&ehO1`J)b-AkPZaPV5=VPdFPS`pmM0@b4X z$=^8rFDiiST8Zw;7o;Ij?5D1f5_#3jg zfW^50rMlqivBuR+o-9V`qx_CCnv7&Q;uF)w8Cq^E$n77Ab6yG7R(Ly}UE8G1_*rDX zIWfSswOL2KzDA*=$>w9U|5}irg*!Q2|Hj|U>E0~*&3?k`;A)`87vRci-pAUSLqZ;e z%Ti~8qkSu{g_m*u*FHBeQVc}1_*ulJ9@j>*pT>^$H~=BS~yb4TS(CdIClW{ zi|E~KSY)=8=&bt2tIPD=oNUBjAh&YW!kM%5^DOLRu~w4AjAZvn@r#MEa6B zq&j@RA~V?wug|u{2K)^yS#QLH`Ca@NZhveR=`KS?rxmOAUTn*ebl^@LZu^C6^Y7OjqVM^5^ak+`&yJq3N^RdI-xdj0Thm=)eR zKYY#&p4nr_ia)tFvT82XjAuX{G`Z9F?2aVfrS$GltsPpFI!NtOY)bmg&c=4y{bT*) zm&oT6{`bi(BrZ<^OMXk)QL{qM9?Ri)QpVpmiZa>z$_OQ;Cb`coW&kE<^SjQY$ok zfgGmA+~ZNkS}9RgpP*NEG=}+G`su32tW`htJv(&2VLx)SOWwxkBmb|~YHPKUE9DLE z|5R`xk!#5l6JLsCnY)d?RW828=k$MQSPiVrv0#(_Hqz#8EYMR% z%h%Y98LaMmv}GIZ96}GCHXg?3D;a&_gBF{cTfCE}`g4@+sK$JAk0WT$0%h+lv^zI` z=k)S=bp9!*{)Ll!eY)4r)j0BumS>>E#dNO&IhgD312nJ`ZGX|sVw;-n^q0FK`{Hvl zYA!{yrPcvCQ%>aETH4hW4DIbLd{K-rE7+k8)>avLeg=Y#EK7XZ+j$@Vv};-4oPf^) z%U(0Fn%*TJ`H9*{cJ!=SI;ndVX{n1Icc5zS8qR$=d;G7;>OW6rTWQI;YhwoX&Bgx2nsP`X~eS`*P z6%@`rgBME*3CJBD7iy;+9J}LxvNrF5*L$E!c7-k=Pc(H#(aZU#ISEsB@O8Vh7`JOO$!uxSaifoJ?O%KQhkrDL8zN zUWcMuZt-rc)lhXVne7A5`2Xf6&)hnGx{+_ZF|-S7wu2r|D>#*Yx)?pOR~O9Zq2xp4 zYLV7*+hgL5a$-1ue(g|W2VJR#D#^Z`x&M72tq9k6DkbjqdVNGY)8{fAN{){ydap;? zDw;#r*Wcq>$;rz{#?nEaMM^i2u9`kIHmBO}UGDaLo>Vl{=MsIlQ)VORF9+9J5|eC; zxlwB~_?DB~ti=CeJ~W}Q7vsrL_IjG3ee!BlCMmgBzBc+MSIJGCxm2QQcNg-PnN>10d`>^QpwgwRN=`p#;Zl5!myx-Bq~!qK zcOcL2!(k2lGM9Z4=6}^rcC(HoO)W@cOEld_>n}sy6`oY0aZ8lj!4hV!(Z;vGm>paT z_QSNB`#EQ^D%s)43amRz*j#T7(QmQ(i9M^L>~Qrz$D^{=Fg?jc;@Wbf!+oB;LW(n& z{g0nSE#0Jk{DPYq1FDn4e}g0*uQ~I{4K4LidaE{@Xghvyf5DsFD*Gim=1$rfS{@9jG6e1wM0(PD+3&oVa0 z$GI-)d=>P`D?X}_s^nqq?58hm%d!qtyk7*n%%`ifEy=sMQ%kX_`@ox=@O?;jd97Zj zeCDj#E1v^`56lDN`8CaNqP-Iv{I!szc^- z8k)=1uDHB{_p#gr2$c z_isMsq+t&(cEatP(w&SBIX7sb#o2H^nB2!BYz@nlc$Vy>*TDH?PFh9hDkzuHEoYyJ z@R+IoaWKw3fX%^I6@L?TkzIo1M~DYRf7T^8FeT2aF7%NZLvk zIa#L0vT4J#m+`JHSURB5B)YMZwGDX93r_NBY zc1FYO%_d6kJ1t(J$J`&7-Izqv??de;wVON(BhfrwV96bw{nq3)oJOuI6%_7k4Bddw z8NpkD^w(&Tk?3jgXJ7YQ|EHnd6m5K{?}2Kr^5)t?ZfY7G=b_{*Z@&S3GE|La9h0Xy z5oft+b_OcE2e0HG+*fCW5k1=98P6*yJ&WXJ+}Vm2OADQd=lm=b*a3#*j(8s(a(7K~ zCJ%vga!tj5>N_}Jh3*fbL@(5w2-6<8QyLMnZ&i;)9;naDKroiAXo8>p$z}t*&hFZD zHOs;0Gp$}msuN9_+*)VRoon%IEK61$21`lZ4(-PiC4N_(!Sbl~dzjB9hf(gS4M!4b z8E>Slz3;+}M8@V`uPd|{t$G@-Gmk7~LmljM@(CwCIdN9;On4A<@$Ed5yd9_g-5|Y~ zjwNGHf7m8IBi_*i^-~9r>ze!K#?b|MzDdoD{2Rb@pZ}xP`kJIB2X%|uRoCL%Nt}Un%agaMchp0Ee z^VrGUuW^-m$z-3CVKuTo26R1&mcD}PWO`Z=B$>I~hX22UOQM4wCcn3UEK#?~Has2u z6B~LGOlpGYNmwP{>A`wTHuLO#)gmR)%n9I2cHANGJ*Kdad#wA$vjlgLfDcio6}W!I zp8XWP+o~1c(-o|Cv~U@YJObXn=#_b0JQQ*taA%lD54)spgzwLRyQMWuQ@^iSizPNNH$wf~ICP$wMzU?c?pbTURjikOub;Z;dou`@ zYB3&9)yZ;y93G2Hn`u;bNPBn}O&AE%oX01JQQ{qz!lVv)$Q?Ovkn!j7<0TZIZs+4; z^^)0R8>n*UN4#k|f^xRiQ#>4!&vi8VG@wBpSmeYhwPa;h8C3_NLe><^tm>lE@ufPA zz5F-pUXgZWHGUc{B{uf~kS40~b~T?yi?i_LKAc$XNq^LCXdcmx^erHLU6nlpKXSiN z<^}x<-AZ2g@NJydeoKd96Ou#u-&&1d?>*kkp(~qkClR7s;c&M3=>M=4$wAft4c^Dq zT1JWZspkY>7RqGTERi>hKy(v{ydHg$P5UA(#2@Jb{j5jJLHe3T7XD*QY>ESqg8YzT zRh(NXlh@!JdUXn_WsUj?sf|6%ns_Ftx`X2zPm)12YwcHy{}+(_WXya7FS6q>%+n*h z`?E5Mfyv4@z8ck)d5bhwD^|tF>*>ebB5A*r6C@rPzct<@=jc&IYpMNT=o#WH=>;Fd&engdR|cKCdh&u<=q)fSPd|q#|Ks~VvX2KF=ieY7Z`0XbBs9AMr)eX( zuyVsm=v$5jd|my6;FcBA@5t}jO2>a;A}u+NG^EWB(Crz$y^MePej41DlcV$fPh?T# zeHYnHHn&zJ`XDm$lBZixrkUSl2+gglM}X>$V&A0%X}1Eu+W(7p&S1_d`2Qm&*=;H9 zxMUwCcev#2U^rf#1LN#|L~?6;mpzs0g>00eTcur{ngmB>NKn$n|y!_B`WM@c0RZ1{gWmCr|EeimzhVhLhx(YMuawh4_?v%#x?o# z@xW=Nm)k&=`0(79mi2f}vT_gOi=dulF8vj*=Un-c0{6tcUsCYzpT_mAIG&kNM&df8 zydkOSUtpiRum1(xy5`Xpv{=eqkz8AE>nFEf=j?B@*}&2Go;lwOWHWmP$KlpyluItE z?JP}B`6{6E0X#gPCXDemPr4a>au<9&TONl?)+#;dW%5Hc!innn-M&R7PUY=N)PWB9EjJ~95bv- z(Id0u`RY&APV(E-fyc?HFrGAYQ9GVdW6(1hqb|j(MV=&o$hTx>DT|Yv0IHDNyFJSa zVUST{yY_?m27De3mc$EW-5+oK$;xaoH;X@b54gp`AIUz>17mXM{2TVWN!|w5;cY9D z`@nTE38)X7dq`c*PR61^*2l?LH^EHnL7d7CYkd-vlgGv6t%`Tak=h64hIvv;&$U1^ z19UlSZ;V#)QTm70a|+X5`FhSqvnCslqYa9=Nn-I2WxW=lO!g(~`k&aGc&sN*sws-* zRQ_U?<39Xsi_SyY#P}xU+&Yo?$zi$3r{?Ie2h6+RH(p=CsT_Xg&exx!&L&hk4dplB zd?M%Ddv~HzAH(cH_N>qUc=J^u1?_43?fP$wR$0fk*Y|4D zKGCQWUxYpM>qJnpJyeP$e5rOFyGf`HE&$$jxFko9FxLYLX}`(mXhC6QZYKL=U-4BCSBMv&!BgNbN#sW&BA?+J$ZPu$`A zCFURTg`C44BuCanGSk5^y`2tiQ$Kg)*zrD|K7;Ow$r#KcwM2ooM!XRyJhGtuc_>&x#PjZav$XS5iP%Iqd4`e`L@Px--4$2)2```Axg>y-T^h|7ZXO_W(g zi;__?abH>WW@Im&w6tMKu4I35mvJI6yTT!MXcA~vu%bVsYw<8#3CdOa{~qmg|HrrJ z7yqoB1S~dtyHd-SYAeyGpOVAb#XO@aX?=mTK8_1_k*w;VecoK66)dv1*jWAVN#qka zb6O#J!%#7^m@YK+B^*7Dl;*rDdnXs7LUJbOL@s-y$(Zq^w>K$U6~3dj@9c(bIv&?pieygeyx{LXu6vgB+Ke5?eD^`p0l?#QMu%W z83=;)*5a?@O5#*c$EC#mOvlO0Y~%Tvy{29`m^<8(PiPn1Z-&92@H9G*yX&*gD`ErTry(UIqW)m&rc^ni9U z{v}Rxy%XL+KA&7z+w7VAR*PSd?Bw|GhI2RKa#Qp6oG*+p!(TuyPg4JVExpMG&ezV# z&LdmUrMp>zOUUC};7BZRZU6hC?PRns2g9kPCo|7WJo_I0@lgFe%9MxaT=-`vF8kiK z^tTr5i)cqv7Na~mT@K4nm5#6bWI7W6$XZ5_+y!usktMe1uV!b7RQi-;tpn4w^rtQg zzhNCe3jE1NoOt){)`<03pto>0nJ}}iYy_V9y&qLF zQ9!Aiotd$qX-!AhvS#u0YlNrs&09Z)#Xz%)rg&JFW!i>Xxz%XK|8Av? zQ^@Q@h7rhfbSV))z=>;+5;*Q+JYDUwiN4 zTjJUq;>0e$qt(nVU{htb7QDNI&aI-A)s%{6bwP>mX-=~HT}q-SJ(6SNpERR0 z8Vn~R*ZDugy6Fbv{yFIPRI$Q6)wl6JW#1rsCwKavpXA}l&#UA;_tvIPzUSVLlJ7_9 zQz^qk?jI@ngTz-M*@TnD<0*9biNEm-iJ!|G+9>U>+(%aa<@4W)Hq&bM3Xk(G-YCcW zHr4;+cgWxPvBU@E$Gx7mH1a9We+$~9NJ}vN6c>)ycXovCQ{y8RIPj+Jv{}jmdxI8# z%v_Pad%?4{_iiLHqSyAM9`0vXCud2?(mft!K7mh8V?(2Zz=ga zxq40?A7g(XV&S?eIgc#LC3Vl6Kn=Yjp9SdpCF2pKOPXSwe0v6KOd!cP7&g_(gU=s%!Teky`BYEKOzzJL0khAiMuJq2mgg; zZU<+iqA?Cvq~&wTZ=%-6+WXt1wOPwtqU!d|@Zd9rC`&#yfQeq-LBSh*|6 zbQiLnd%@#Lll~@Y=`15dcDeq5{&nHE$KOApPO_UlXXeox1+N5gR-3PCHTg5Xg-17- z99y?5RbaUTUf+6C%ASyT_}AF4 zuaxKqf)@&^+zjf|S;6dwv_#ir7d#D~iLNQj9wc7)E!3=n3jYCHGOX4h-ye{@?uER> z&t#LHV*3*N&>e<@wU^W8U^Gz6d0WTHNpi_I5_8?m@TYNG6p}5ez}wM0KJH}+d}Yngi9xSHBR5N z!5d$p6x+1g1ox9W*x8;DeH{wLv+sbXS@%8z#_zSUo($*SrqOttc;*dSS%~i~ zv>xj{5ATm>T@v{;19wMj`E&MSBpS3ri};o$yV3pRJUki)-XWl!LpHjT>g>`T2B#kQ zSr*P&5#^5KiKJsA470=AU7bj3%J(A4%Ziq|qFnB#*$t-B{_QJtVifAWh;GR{5$}P< zB>fROkSvJ33mLiueR7xT^=$o2v`k*>^mGNg5D)2vdOI04)|!(qfk{95lQq;8dHedpCJ{pYi*an)UV_PLGX!r| z8?kf#9uKIB?8z~7dx-y;g=fSlqn|_|=JOP@`KR%>htjc{Q}Ch*ip*qb-}AICJ3HFb zcxff`Y)-w`djF}Xxj}gfYECClKZjpa)T(Cen+UqGEY&ttSP!bZU^%yt?X$>geCfKA z+fFoj0g5bwLp&cAusFw&{grTCT~Meg%JiU>IZ;mT-((GYg#IN$rHdZpr__!NeyPku z=DqFkZ3a9 zGsZlJFC$sF++W=vB+36i1cbdn5ihLl7o5SGZUSTSQ&dy$Sr(xdS?i?F_%cPO;^$cb zoL_?KB9zNG7QJh$_B{2MY4sA4GaqFWMVZWD53*sGp;iwv5RbU2+ItA}$;q`)3;#m3 zM9`$2b>#3aG|$c6;a{>bWCR_A0^O}|aw|TnmD`fC$;w^eeGBb>gK9Tg-6W^ovO+_$ z=lmIt#pipH@$5`|TcPi5MVTffyq4XBA`?GwZbGsL|7oMKVl2hnmR||0;>v z=1Fdt+JhF!c;C~EcLoXjh$O!PwnX1t?f;3W^gNtrC|O<2#qe(6Cmv~2Q0g!GS&hEq zjV?!PKlgcLhvY>Z%6`Te{Lbp~8KYQj5N}{*Cg^tn`RZ=gF-u$VNn2Ui$wWy9`s74u zg=batzJ$(9)53bCI^$Xelnou5=ri|P{{>$C)lS~0*KqDQdf5|w`)j!+O?#lukw)~4 z;QL5aWTq0inyLH-Jh>8FIeE%$Rinv6J1u0Mmb)4*26^rgNz}#v{bseC+#uNxI248c zTC1R8&0^m#yy?7H2E?{BD)44jA4uy?)9Sk}O-B>86!r@6_qnQW^$_c;oD zTl{Tbz*LI;hzG>^W)`_s<`j196I3brzO)C$%3@XY9Gdp>?j*cE6(sA-^wLlKIC2Bb zx%l`DPQ41pEojjT?ML{04h!%`QSKo0K8NLb8x$9TD_$RE&?T8YU)Eyo@{KP`Zk@f_ zn_v4$>}4lbC3|s+zDk70FTDFV?!Lj!p6FTjVRAo8Y1ikhf<9ltWDj~KpVdC^x}oLO zBzu^#=Neko)B7*T#$v6M({FO}O-6z2n_S~>7y7XVjn4vGZt8tQ&2rX}m&5NU(Bxj+ zzU+2(VbXtEi!aPb7#>BRg4sF6-(YuCAsK)3>D)p}@-{#5GCBe!vXhm)q=U89%cmp! zb}RUJ3jUUEG|t_=rFVfKvTzU?x&>b0SsAwW4S&n9qr1pZO8(zZsh?p=enNM96}7Jd z-#G=`X`u|6kN4kSQKTnZ*^0%QgkQ;^aiTVN(uU7`Z%wY+X`=?pG$oVU(QPC;{T^4! zv!zYYwuO;4k?@PziVpg!VniQ~qK)vq0-dkSGG>HbOU9Sd|L?rbJ!S{=nZ1zy_^?S| zBjA1*393Tt;|r48@|LrPU*k-?J^TBV`DWh5>*7k*?-jiy&TOf2&v=^H{!QfQLsERc z5j61>$=~%eu;0j@E%*F3r>vjqzYR+^k`2wuKj&j7(54&p-AUUovL6djCD|qZX6EuO zi=+-%Um`ALtINbIDD-#cxr&EDd?kXq!317A0o*d5w*lN1C=O z-;!l6&)Vkfy@97S>FALxQu6-KqzB0;xfJ!2;~@Lt(_wY7r%gSnr2RjU$T}#z-sj21 zo>OeySikYkoes+Q2V~3};OS{fehv0`awIBYF|H;P=v1Hk;_d+yXiX}z3;!0}lIb*A zW3pns75qDkdGaPJm?3nmzPgDrze)LD(35A#Z&lPR#W39sf+>2*PS7wjtxn`(0;yRJ zs;sM?)O!4%D;2BV3TBhpOY4iCFN3QZTAY9)iM&`?^qSjpvftFI7!#|S5Bvk~N78_t zDdt|qcqzp*IJeDaO&mY{O{g#h4@SY^JzCxx?DO>eENbPZfQ-&HSjkA?6)1h89lY{J zgY29PAWvBrB$m6qwjWTw3oM6tlf0|b$kBdw{2FhP$F-W@?tZtT=A%k1WS^J#pPTAe zqh}&)nu2hP8rPHSw4JqfRZ?Gxw4_8lhX?8BB%Iqz<_`r~bI*j914c)ckG63JoE1XQF7M}1^otC ztt8toYd!uh_t2{BI5s7*xkE3fR-MrLL8H`VV9Xr=3ytdwwN%y9Gswwh=sFyg_u=)u zH1rkPzK(U+Z$y9BIGD({(S_8EL!V@^y;=Jo=`ZmlkHBRU-W{vOckm~7pk^=fZj?+O z)9@fU9BQEX?m{Nxxwc#z_ghajEhtqMC$o#P0Dt5m&!&?v zf#y`&c^4U5gVHylT}~C-z&NYhU$K|tjS`ol=2Dg;YrA-x~K!4zMFNK*2{|C?;zjCP4m%w15~v&jBvJ2I85 z-`US8>&@vXo_i@eqhU@p`;v&<{d6|T&#uE5T)z~@#^U7_xE-IbWAUN6*5ZMX_{@0d z#9uDE%zp%NA`22^s?uP4rtl*y06}n@^|F&T%w<78rA@ zXL3iyPE9hSx|UAQQ93?^5{m= zt|hbKes)NAu>QG2YL_+G7q@1(JnwkNHOSMcaevX}m}zNG*1v8(TVG`#ng`3V-A2^C^mMH)lvhX8a8A z^}Z39=aQJ@=UAh^ufdUczgNK20BxTFdonGJC?qkvxbgJ+)|2h%_5}MH@2Q+)*9Ak) z3u5Z6YOdu9;JZQ+-e7n{lZ1EBMC4rnj=WhiE3)y3w&6%Ex{J;nu!L9@7!qE4Zyvd(RCcy*87{>e4nsy zH~V}UjIs+pjl?BGQ!_PkcSuEb5=kA;rP|&mPshpRI~jX(N9S@Jji+q<%i9?3Bk_BX@q^X~f-tI-k>$`egMxysN9fImPIiz3^DC zjwH7?$g+1Hf3n}Bd~$S*@g%;6z3?jgn}27oS6D4(S70lu`#yiK1JJB7MZLv}EGD5-)sHl0hwW>n78nm6FiXDIlM2Onb~bz7(3{@I*>#|Q%Wnrkx5>8^y&enVd-bzi`^gazPqU`@pIpYxVVW^1*|HK%c?DU&mF=2l9N0!rGRKKO zRBlgR@Ar9jW|mqNQ8a#Qnd@!#d<99YKvt7wBR8?uM4x^nEThCwFnmti)*BrYL*5M} z{R=r6XB~=tn+;ddV>J@`0vQ{o#ChIa zj|15&xK|I;Ny)Op>Az8t2LVLpc({+?d^ zw_4NC<`V6kN)Laf{{?!gM)InnW_-!26mUO4PfjgHkL(cq2i7~xK<=Rf-xTYD=j;(Y zhYO>OWY;Oz2%Tzc>rq-?NxMhTyFaiI$$EE!9-mQqq%ke4s@xbqfkjAO$1yN@)Hs-v z_N>>27f=pkjdC+a19ZzCN$xKBJDV|*?5s57XiX>DXy*|Wo`EvcNXN5qe*hl8ApzIW z*S@UhWn^*_hmH-Z<14X=XAJS7+nlTyvofbg45Naw>lAXN5PBq3&Sc ztWF=C{m`f0?9m7Mz8+**vmS;o<>4F8ruYP0;5QKu$)8Eb zk6Ih_dx!BU8F4>VdTT-LtZqv;)BeLsH@A0Whu{zrcnaOFZEmoufFd3q$q$mfgb}QK+>|4u^PSVqs9&FPx?yM*4)(ln!hP~Jnir0|0(L$(sFI@ zzM?&MdU_Nn615ZWj$wZPKgRAn?#nU%AAa_IUz&y*GlPbu?#K1GuIuwT&+|Br z_kJwrdnpgY{mIs~Br|zxZ-|};nYl&7W%^(xH^(XN3%dG(_dhfHuhm(QS?D&~>$rV>Xe3(2YGtlR*Wvy}y%&ub5BRQ|KYjgQ zSjZA@E#5pN7s~$Vvzd&2uBRQ^?P9l1*0LvV=4>_R^Qmx`*rjzSJl(7`9Gy3TDb^?F zMAuqVByZpj`>k_Xr@82sDkNvpfKl)sPcEL({~!JCH2N%9cSqkv^dhzF^Cl#j`r^+& zS=$rQX*C(@!sgxW`73Zd5xN7&|M{MN2giG&Q+#2T!z}d!ll5>IUAn~m6=<>nNg5$stJ=lyqbxo*`Gbvtyp!6+@XI}fjq z)V2Z6%+~*Syv}}Q4rxk6%k${n6(p_9Pl-LxsmC=i$?m@d7Wev_bGCStt|n#QfIL3# ziK0ndL1UlSSS~s|pw_eHLm=#wmZI3|Ro@jJl!^h3&$@i9X zw_8x81icQY6!!{ZPfEJJo@pkh4flR0UWXTH;O;`y^* z8ApdZ;lluQPqv4t`rgTYgvQm-GJH!_-^f_1^=t%lSNd@TE^dVP2G`sAsfltg(6Z%5 zTfkDBg0^{Am`gXD&{^adcD*UE) z<)NN#OhX^`d4wJpk(TU1{-}rXsGaxk@dJ$=jd! ziL}?_?R2LPD#iCJ^@Atq_a+paTv(jrwR{Hms+skY%fB_t_D}R{z~c1>(PkJt;eHjk zC9>@AEX6W7=M7rC1m7^wfxc^hMQyMtoiSW#D^OUniht zCwEfAt4$$&^WpO(2}-4r_yN{4X4~RQIgqZls(7K0!LQ+$Z2s-ohj2A-j8iY*0B{VY zWwoqDQp?x8jp)+aByxECdwWs;x zU~?(1f30Prk;YjC#>XddWXTVk^VW~qm1u0v4@2uj^0y^%$wk=$-Qy9lo6b#iXO8D` zu2P)@bz%W-EJpiM%Vp%|ZTKYWHN6Z6->*435-Q?L)n8 zMytUf$cfIiW`NWs+GM7lN7@td6Mv6ktXh2Kzrp*4RwCztrz2b^;aqY>Gz8ZzWUe!L zj}KsK1K$b4Qr{8VWGdB%-YFyT0y^Ca4LRTrP<;upj0<&vD0Y90@sokH5K3%8mS{X z2I|}J{dK+mfZCf-avB&r++)wcnOhuk}GHoDjuj#oeNEg6yf~TvXV{258 zwr*rg>VRWG!QX4Fd}`w1b{d$Ns;5bQMc+4|MZ7+)LC=$on~F5w!Eh@WlSwT;JedPu z@aa`HzrTJHr5K57YUI4J*o6526|I%Uf%e^)UYQupUXo zSXp}e7d)AQ)9qP^oH8XA<#sdrM9-&!SSwOgg@mue_xEr&b^KburKvS&-u!0Gm$>uv z)qxhBh%X0{p!Z45Fnp;BuAG)67AkcD78Y|#Gjeb&?2}_+qSmuO5!;iD-|w=GTk&}d z3=&Z|4KI?DW4o~%>ML&}@{T+!hBNF?vW9v?FNx($7VNENfFE(;MVMcr@0W03y_Iyl zcPhd0t3pl_VUzr)@zrjE8UuamWj3r~4rqrnci{Hs0+LjB|AW3efvt}#Q()KJwIybT z&tS1x?@d6Rx&vcfTS20pCAo>Q%-Lk3EpzUWe9f;q>*&a)Tt^C?XS1hhoooUX&2)!Y z4L@i{{jBxzDJb2`?xb{AcCnV(vwTEvv&TAsek2lNnf`tc+oSZKcguN$m-TLkf+H88 z+;wK1R`zboaXxwchZl20-XF#9u`dcHRzIuMyhHqmH6H=~v1BTq!-=!o;9E66dCSle z1y-SJJZZZrB{i4kW9~Dpy*wGKp6yJ++DNO+RC$^39brVXHg!@iV!1 zn}F;+&mCdx$t33h{pW<`k1*`(Uc7#0dj2=4vZcW5P&~^?&pRlP6N!sG)deRbU-34$ zwV+jLR@R(C9AUq{3{U@$a%mbpX1fi!u*leZ{uVBUy80rl9hMC@N?ET6;{HTT68Lrk<-XpP9@?Y5^jFL zLMGN{C47E|@`->+rM?5%`{P-u%V3>TtkR$V&X1BS$?^K%=OU$e{PcA0d_wn@z&TW? zgM$@NHIX%;eWGB>f;`Xd#K-i$Zy^~!^>CFZFYxRhoa?W3PH1M}M(O>-3O*l3UnjGF z*=>)r`r1X8YOq?Fkut*%w|ZKRB0od3WZz6>lbj8VAQ3O%RRuP#1SQ@nRvp*U^sJF* z78WL%XY=MM`JEGE(8ry{tU>%8ZZdDQ(93G~hvH)Pk2hLL+z98awT7@y?R-zJipPwU zIE8D`Kda3uEXe2NC;6vW86kbe?&R%dcI2sob-EUBSoI8tQ(HQf?D|i@?BU|vqKZ}f zbbY6C*JY%?CQQd#_q2mq;y$W_vGW!F=E1qn?P= zR5MB)nt8^Zhh~jH*_d44RM^4G%#O`GnH|||QZa#@*^6rNvCUlH4kxp&uSRoIUoGc3 z-@~yAo%w)eeFgLrpe?`tyfg_d2+OinUNODiIeQ~j(&8{PRki_79 z%VG~iujK1WJj2h7d#fJ5q7i@9-)N)thH0`Y#ZNNz2YP^GCH}4!5&gD z*{9~L$8+p-H?XAQM&4sjGh>v{o8;I(!suU`(dU7wyT3V8iAT+2+BETh1gsO^uoI2i z7UvNSv`!Az@}wpitEO3(rvA_SAhO>#SH(9b9#r$mPiNx}#IfYpecODLlc|l`Wj(SC zHSmN_VghMd?duO4byE%m9h+=|-pKqUsK`PFAERNy7OgBfdc!NX_+d9&5yk zxHEuVOij+{Luk>6RY=v1%(dTm-Rbu@kT=%T z%Q$ftI>fUjxtrUf`<=$SjdY~`&s?x4TS6<;?4z%bNI)w%T%u)Tc4YxflAomxERtJ4 z(Gcg8rNnO!qCE$rOybe`_^OfROol6nhMe0l&S+MvWF>*}0Q z)xxo4eyQ#A4d@q7gR-nsH=nMeo5>0AMPXkTut`T7ZG+z>XczD1cw|?>zj#K~A+KMP z;ejOPQQUjWZ}JqR4p8zSWfxe&1|12;4d5My|2^s6dN2&}KhafLGpu4eQw3@WnK}Yo z$&YlLrxOjdh8F#f^u|vn`=)AIk3-KHBp^ON{mfMNv!wB>*#g7(8hi$VW&U4FPHys4 z)=+y%(Jc7nJnc8IPmZUYO?4xMb#d%vPj)d%W!(9b&tIZob^YAJu5I+Kjb4wSQ(I}o zm(D_S0+x7>)JPm|^u*O=Wtp6+FW}wfARDdM@5xJ|ODuFxW8U`9E)i!@cI1oEuhU=~5>=K2{q+9=X2{rQ@M89ra&ePu7`>nz0BM z8+R4FHx_zWSzld9*88ANR;|voBvmh7BTuPmp87lS=uT`~brxm=J?e!g7vp)VC{{<= z?8Fiev>wM(Ydg7d66G<@Gua&{sw|aMdwSvny?3XRm9%LMtBEYmkYXh`8$VN@dA;9c zQ6EbZtD;1zZT-yEyIoJtoOt-O*7r6%NOkmZJ7<$$qWZb$vczv~x|B1^lR@$%9@J)0 zKF7CR;Q1PN6Nwa`{G3Q%ho-Cb-PyY6bv$ctHZRSQvmSh}7%`*fOyVElN^a7e`pzh< zK|7-^#F=_Pr8fOY?LDq=$ z?9~4nT5^#dzOWK}6@M~z>Qvvz#y^4fGmLwAF<0i-M}WaAoY zeZP*5&xLbhH{;2fIIf%E934(o<4LHnoKB5_!7`eh$`or@-S3JMlhOF!$aF|++}T*`n9EJaUai?3WTJ&;^x~m*ZnINEy^h z-H!MY48ybJ3;f6vH)|6+bPq|ZZBLkY)bRnR?8*3q91otc=f<~1D)n9u zMu|_ra+}Y~%%QJ=DLS4yE+gDoZV%D|7O5ffSFm(5Qg-Z@;Y{*8-UsqudhQ#&=KNrm zQTdkV-uqd8e!t|BYL*rl9M#2+S+Cy5N`VI`6CkHbhuGOH&KMQXL)ONZvtrvA8g zJKaCkSY1(}TA`h(PrL;T$-vnKPvRkQ6c{p6^0elSN`3N?bDaLJU5x)daN!7$_Ot#; zE|EmP+^Fps5M6~{{n_oiQ6?T7b#Y)Hnp4M}_&*$i)_0MeGWhsILD4esdWCfELz90A z%VbsEY^@ra^h2A?Bs6>MIv-yT3SFsE8~5|9#4|kKPlDRsrSf==(R?pNJ;Y^bntoZlKwZ z?MSqCs{EXSHg&YUw~+P&$-#bL+=uMN!=(&N|5mj8onBMdBQY`8H4op9?)ThUC468aa*X=~}AP9_r^_Pdw*;a&g2Pqx9S%-;ORm=Tkkf=X$CH zC(CK#MRE>u552k)@51e)UA;i>hmytQ94XC6dvC$#3ZyC3iOTpEDAGC-nV&I!Q+Ieu z@noW`O8Y;OmM4a0pW=yJyT~)Cpq00i!$?A6GQVLz5|8~U9X!LmSitq>z|`4Xf^u0w zjiMJr%m|6C${t{{GyIXXxfkgfO7m0g>o1oWzwBE0Ocq`3BAlSDF`Y zG{!UJH<8hQG|FP_zhYIgXPOJTJ8TzUH2x!SS7&=S)Bi*c&htCSjGo#5LGw?dSt__Qo+bIrs$rH{ zrnc3@pK(4BE>F1A37##iiIZ6^ZzSe}tv#x2b0yKNr}#X_{P-^Fm7^)`t-3~me3IE9 zIUC*rQD*04hg;~UJE+cJ6MqKhzObkV?(bvn zUv{~PGWo*ui;A}oyV2@2(wleKU+QxWT}Te8yo26~J6{?(Sr`>A2t92))^!^H{puQ0NM})z|874hvEXY+0qo z3;RbD56rh0QjoW46XBQW_pH>jQz{4g&)JQ$z%t} zuKO#nrk3CWG|60)%7}Z=?IqIllI!1)gH^EEgA+4IOF7q*)B9txGZr@zQE6|#6RO{mUXFCc_$Wms%UkS zdXjl}KU06-gD~sZSIlC`BHT!i?ZC7dY!Bhu<3@?U>EBrYmuPz<+)h@&CFD9)Ql{fi zuVS^-jNOWr?23lp6xQQh5N+~wawE28ZARkTH#l>-+2fBS@^$pQAE#TBub+YWZdh;i zd@u5oN~ejYPL76NWOc6DqrINDqjS6<@&8wI$B6N*dc{1L)%!%Fu6F%I(zSuLAFlW85tD6vzI9G~>QiSYIpV6Z zAag*U2=`>J=x4m-b+{3Arjd?)jG3&9$zgY@)<>e;8EBsq_hgj*kt8M_E}SDV9EqT7$2U5Ka2lwSpvCc$Zzk&^u$0zUs^71JN z>7Gbj)i6|f+5hGwdxg2NH~U^4e_EsXUah_;bbYV?J84$(ZzLy4Y0Uh5Cq~z!)@0C> zkkbl2ou$`$@NG|KE1^@o=_`S&2YMxs>|5lz3fM-1U==HJDvl;*Bk$~|;7BDnmF9!Y zEYp*Yt^#$cAEh?SH2v>HyV>sbp)DWjB_0hqr}|X^T^+KrMDG{tCoxD3JR7gSL|8<_ zQ+=}^Z0E6wZx}NNTX34Y_ zsc7PVRW|J#w3y<_mPT7@^e@R+Y)Ude#q;!AJBsFL`4XPJMD7xY6pz`&UBvTnmpL## zS-*uvqC0*ChWJl5#i^6@krR%pIG=YQgIr7W+e~Ye>-=rSN-xw;{C|=QYqtGTa#EyP zLwq^@MbGo5x(!JxPq&hRwG(LGg^4=zp8Ww$yBlGW`wigrIZS?Igi53(p0+tJN*3vX zDAtP3eu=`d7%gE~LL$q!8&BAGz?N9(XZ3a}&c`3|b1UR^sQN06O6G+gu($+tJz372 zT$aQ2WZRv99;a%bmE~{It~7W1akTee#`*(oJ=}O(aU|I)-iO0jnpuw=4GHi_&l3$|%?Mmd3-1cuUlGRdFP zkj&oedQaHT_sm!pWi0DFojhOZdrLelfmRCkPXbn%Zvh6^%X3Ib372V1JO!HqtC@ zTH8OpL5uFggT&TcLQb!9Cy^xChki*mCt6ozoze{C_v7LwS1O{}#c&;OeKj7|2cXJ1 zAi0ekW$s9xna()0fc4)%8WIUt4K%3}uu0E*;l9_}WtW*W5z%e+J{#6KtG@?F8<4VX zpxeji=450Q>0ggltw6twv|M0yJCCgzW~3RY+?AwmcE16fp3v@7@b4l;lfc@?|74O) zb)ZBk-KNj_I2@n2)9`aSDO>}yt@b<(^}5*{eIL3dx8U=n=rWW|&e$W^kyl`Sq1K06 zY_$X3V&76ly$wl@#V*}Ty=+#F7uzn9 zpR6Xs&}}@rjYOg4`cJm^L{cZl;y!DZR4Bd7-z796CkM&6*#qy^qe>f???tihc#)lK z-lMld^)YC8v013SxvdP^r|MC5;K_vXwv|lkk+y-^Y_sFVs1<*ZE;Qw4R7h@)((0-E zl9WH$p|JTS~V)i3@M|MDfIkVjtaF`#+)UlQ6#0vlGE}u+Q=B zS*ib{P$Q8Utvrzvk9Z;cQ8XXiPFA)4|9UolcbG?<(Vin+Ve9Ddfu#uo5}o}=DJ7e=sMKM>3iZf zk}Wmw0-o~B(I}Ul{9Pn?A0zZEu>3*a$62TJCtrZPBEbQ=6H1-~p`oi6K z=j?;Kll4#rUAJjj4F-vZ&zZoL`v1T)p>nDj$M55(pH$}k>2rL3e)?QmkLTdxdj;)t z{Yb5k((nKL?^C&&{`Y}(sD8Xp$9wJ*|38CgX)e-aBFTwJ>5qIcX7u{L<(y><-j-g^ z=}CVp(T&z2Z_}sLsTk%Y;^(d;!|_yhr3#zY);i~GGsOes=T&omKi{J-;b{DMQ~kRY z9jZ>_eugK#>258$m$zCI=|=W~<4DO?S1K4G)h4!*t5i~tXMEx<^Zw#V94?_};nNiO zCBxDmec#Ix?A3EBb5<^PiW~KMmNsihQ7Rb3Z!!M9m+E;CII@yUgzFyI9BL+sN9K$A zOr_EeG^QWf&79wcrAwWV(%hUmOFf-+-{9OOS&Js2#Bz2n`72Y0b|4Lpf7xUd>4hTk z*?dEbc(jbMrr6=iXmYs7wP%c)jPA)4R*o!p08b(VQ#m8=2cOX9d40@gN1ij6zQjJ( zwi?V1@kJ1h!S!;a`BgLblSYXbe)9j7L#L_mdeK--(77+Hh8gd1J#{p9tuW3>=-RkeGk5PKn}7wx|`Lmn?O>fj z60$psugG-mwxHQ!_@oX{H!J>?t{;b6S*6FH;5%GD8t>lnE$?}zd9sx@d4Jp!Rco>Z zvwX{*^gJ-1VU#J(1l|JM8dQr}h%e9&r{stv(G}bCO zC3`>@R9=f?Bk=MC&ri`q^1u-`-z9&qov5F^|<>IGWf+~r(s>_Ba8)h&4_p(NK zAIB22G=-Ee*F)Yrx7TMa^!+Pnd-%=iPAl-`P5pc$RsutE;E%A28jJeF=u@I%zV|=X zG~?$q-jluc)Y)wQCo|3mzMo8@t^wK0#U60H=MsA`h$Y{O1FiI50oTi;+103dY_ZNs z)}A4rKZaDs&*3Yw_cFSyEbPSXXcbT3WLM(T%wDA@FNZ?CKG)5^}CYNelep;SWiGwDT39Dk%ZaX1{--|A&L?sX?~*>QJa zqkqKpc-1DeMk3Ju0qbO7{sp_!4X@)(m;A&zUtMmMTY~e4qhAepl&~zN`?V>wcLkZs zyTV>t+zqSGJhu>}iArmaCMSdLSv36G|I{4(4(v5aP1gQ@(sQbuti!(}$?FFFeS`*I zphWyZdXmH%FzbmIId?kMxOdTsb-*I#*Sm%Ht`a0d{n>-c0sf8Zx{LR^HH?sUV%B;r4mZ0nhpAt08TlL}E z#7As6EnQ*c_Ieu)ig$4av6nnK4ewjq_e>_k zsU^M@jBkQGryYC9N-Cbj2QU>x!nal6I>EKnWN)UIypg|Ee`Sqc4|MSriKjtohD}1T z?8q0;#rR^?vp#;=EY=o{a@xASkdAM(jaOZ240lJ_b8){PC`PkhS(o?4vvO#ZH{vX~{WLTKsnQIEkhzDe%kgDi=fL<5T@5er2?6N-oyqJkkMbQTO*nsM(R=p9WqFvct=M?u2G)um# zLU(EJUnj+~IpFW}p4obO4&Ghp4wTEyc&ai6KT zl8jlETpve@JE3^|89pMttMpWkW+Z<`iIKmyPiVw)B+uC#PhE~fi7o4*-~C9L#@d=hEg$)e}fU>%NcHqO)fUT%yDU^x~2I>2Uv>t)a(eo%{0 zySmvr5wL%nlseX@sqfnl?mhgzijt3k_i}6gOF%M_RyQFJ=c4hmeqZvb zCCb01_ngf<#KP^+=5l=xF6jR%*_~;fkdys|?9n`O)dgPJTTUbW>)7?_sBr@hMcRfN z?GzMEgm|lMwSL;;Y7NT0R zWj;^3@-DM8%bHwUInQom4;#Ci%0|0jIbNH6_5QA&R>C~>fs(m>Zed6MMl!xd`FK(W z=R~dFii&+fP##_j(B@V2{ET!i^wdf}Ie%|XHy-vm*|X1Zr-@x#-ofl7S;^rv7+rH} z@C0ffK(|`KIr-!+@cbYUjD*>El$vinH(USDqSzn&-;L*O{oV?$#B#KOUGj@gU=vdb zCK08H#*MwZ!oI2u-Xw-HQEELreD{|2U+>}tHc zj@C~qzYa0CjA0FL(?_xtwnK?uk>s^#@prqwt8i%s{4U4YTi|?wc6lrQclQs*t$4+Z zuxC7&6n~F;H+p(3Eqe~F&q15nq+99_OKXap>{Dkw?1v8hKpy{;qi9Z6o{4EW z2_F4f-RXFDZ2`jua8y9)2hp*1oHLzm?&l3|8L%H@ge%#bL~iYYTT3gSygOO~&h(v}qZfOk9=Qtszt=;~TXGgu z#%QDH{y}6T8CoA8otq14^zi&C?)TPX&OyeR@m?wDo4&{CIj0S&J###~bJ9_It`+Nb zk{(8*M|hp8*@t@O9Ao^ccKd@hQXuumR(L>~W9*RAW zmdDV-M@V1J;w!O5GjL`KndrnyL>gXUg_9j{lU@d)VPjBajk$mojvvJo_UKe_HuX7c zqs44lYZ`n5ncZXd?0`%4*tAD+>P~hq`uP=YKcD^midAdO;tghF;w83?BqrYTYo7x} z2eOf9gTBUom92Y+O`3ktfHTze7pwEMzx_}SYFH@*`1$N zP;U)MNR6GxK`|Hnb?_%y^7^rH+hNgwY}Q1xnr6TI(QKT*B{;Vfm*Q^`%f8g8i7?4Y zbW?piNG9r{b|bx|V%%!fnQfo50ZubOQyB~|l8vmavp<;RZ*oOkfi`>157(g9pF#Al zF>~gTit^bT&GY+fYsX}s9ci9@Ny{@>)vS1vKfDFl%NF~tN6iBL z-}ADj>=o@J44GhBz=> z(i)Zekf3kO@A1Wdm|SPgb}spS1%=K+gBM9s_QmhQsRueVEmk+1Kt3MC@lnbNd`-Mb zwSk<4yx@ONkj-^=cOrP&!!Wur-srt?K4+XAeA*_FI)jc3wwE2@EdIDrtj!wBc7i_ zE`AN3%HTK$JflE5u8{Xu^llld&+~LmnAS5&GVJWYzs}~#ymhVM$>cbD2E5&jJs*|Z z7k2PhIJJp<4KVIhpL**fepgwiCudxuk!O+H^b%|a5L16@c-E!Bd2AFK1j}~L=0Z9O-?N1!w_3>q_&A@OAh`X>HXuR_za;f|1{piPb=_&!MEK;?JpP+CU|-uf zj~PGJ3TB|nIdn0(hpxee?Dmc-R#W%kMDmFyKi)6x-QJ@|rICYIf+Mk*-&$#X1M3EG z{4+WOSUgQvjz#YkC{&t7bQfG&`}qsn z?B}V{7?;hc`j~G`aVS1E$y$`m7&*h8k2?3G#(Cs5JNmQ1nN#g*=)c#9;a6qP4mN{E z5AQ83<8HL6s$~1037o?pV*>fnTvv@u^BCro?+BswvTDHT4k>u)HB3 zg2Krdk@s*X(fj1j?t?ew%sfxCG99&iN3W|{vE#tfslX%kD!)R*%IJSG{E{2`TC!Qg znq>`M?~f)UNLIW9QhPViC@W#vi5AoOdGA5E=ir?Hbg=}y*F@o4I--v|9qWLO7QJUjC%;C&()e@?&8xtF(O%{(1z zm+Tvv(ef@Mc~#ed>RNvr(U)m7IZ>n&NYDMaQ;RIeBP3p4jc}(I=(dsbRG9k}Y_r~4 zORiU&*W-sdQk!M|r+(**LaJJ!*+8?j;g|K_I(yUS&FAq5Tx-2) zb%eD?Y}g^77-)4+%hk8Qko8z{9cu#$^RyY^8c)87bIHXu6t%K0N}lWI@vxniby&1Pw0$y&Qz85_I99{=4u!Oq z(@Rb&+xd2`&$oiDBMnZ@p@y{M0Rze$BoAAjFO#DdNC`I z3hBvQ*j%f%?8TR?#cEPl-gA$bqvFZ)iBG8llRAuzjCdUg?qt`}Ya&gvn!W|r_xRmu zwm-x?os7`Opz|o0F40G25_SU$FC+VjEy-H!U)m*lY8kHPTzUr!a}xT*`=dR+-)s-J z-1YvR8)Nse5WLwH-(T$6y3nd*NxRQJ^PgnkHk!WL9Cr&V{~>7abY&(PONE*28UIeB zA8xqajGp`{7aM1?S-qA1en}4F>C+#_j%Od6kmrv`RI1`!jYIpv>ss)yV*gv?UaIG$ z7F24??1AqXc=n>j$!cOP%pJvBil*S!x}6HmLiOl)3P^zt}Y=8l(bA zZjIY>^qk#is;6G$H#_dqy=ktV>Gvt^{tDvtAl~0|$Dm#IvH$tWuKG~BnbQCL|NTEv z8+j%^1#Li?sF1&ttujUH3UuUNa!`}Ks%wmU@pNDJQZb=4gXlPIMxodkxa>pzKE|of zP-qg`CSD|`E#1k-y{Mc#uY>fIyc8F+Lw`Zr@NYgEoWaHhIM4D47x>&SJgWpDO;qMoz6c^?2tf$zal4FC{qow&$zsYrei(v-5fXR7txIX5aWL{=#!Fv0`_i zO-&TZ3HdczEHMuxL;VugEmaXFvw?|k%8F{SnSX-+jkI~%v(I~G1BxbsvlYyfEnm!>r;@V#%$Isok&Gr_{{<90NQ_~{yMoo)GXM?XwTwR zJjfIClT(lQx8=P};!IBfL9#W>bN3y6e{Sq@^zjbQK22kKqh%^G{+Y}?j|&rEpNul` zi%&({#1#RzW zvn{dpTXCZoxGR`t6Sck2^T}XW`u4mLUvQ1k%{r-YbiW?vN4IDs!5N2V3U$j?R34K4~8wUN+jAQ+>ICaC={Xd zqg6(Q8O3OgtQ~rSu{5i5IcuJe@j7)alFKJ}Ka2_=gXAT>WY4w>+&SZ!?wQMtkl3~^ zzBlwttYSTod`Siy!zXL@C8&|QddZ77gj}uCW2%9)Wq&5<%_ROd5}2wF6FnPv z)}zE-@J+kp^k1Gt??JUqo=ZKZC(-#ZBbR_O@i@C=(^Ga9-@xs6D3FuL zyt#bR^T!&cgSFbn@Jj~k#L(xo?R$`=9@anbuPvUOtj+oQIveJR&HBon?w~$QFB9qT zB6!r%J~54Vkm2pFB~w6raN@JFlg7ulrjh=dxSQCbhO}k~x$TN#b@iM&U^jttJ$aa7 z)i#i<4%Pavuo!IIlkm4a86M>6%6@BFja5XAcrt#a|I`&5jfR`Fd!KA38}AJ5FT?v( z?yhA98$>!+umUfG_b>1&(a#lVk?g1s(fH16+;pwRfhB%VbwE?1uT)GP!YXD*(b?yx z;kQMvtME5oOxMFa8HsLS722YIhr(jMLuXP6C1-ib5_t;>#XqM!T;nr2(A{yUT9b|p z(pNvd&M9cx!Yp(Qd1^;FXPY$o4R{XNS0P|li@x4(~eyWw#g zI{#?QnJCf(ZXM7m*&>s7G5&Yc$j4PMI+5g@Xs0puIhnF7|y(JyTW7DJ0|?pPC!1hWm5SC*Gth(E2;FvIghk%Np*# zP9{beX{+8+S8SGVgHbeoPO;53t(NAas>`G&NWbB+BFVy3^ zaKDUXe~6E_=r@1u}exO7S%>!9)TAymb!%;Tp zU+c-y)$YWna1>pC!0*){djt2UqgZ+UZPL>s_UvVv_B%bk3$iJ=vdx}-s2=yYvzWY0 zH(s*5jH7GuU>Z!eqZjQ_;WZQ-iX*AYoXjxsf=OMSuDE@cy94$4tg+rU=1kVIBl>kF z4e<<)kK<7GryTrSn}H>W7%)wt)EeTBpwJqcr6i*jCm1 zSF}1Yiz8u}X!fis-Z3+*r27rvRnb#J$kiu$deO))fb(+gM!J4&fk%9=#^TTGt~4dN zeQ<6HsmTg-E4br_QWNBFlF_f}%5ZkMvMXCbnk@evK|2M7&Sde=p&@%vY-K@{`2|PL zN2}zVi0x_)mcf2|keq1r@7cz_;QEAwC)0OoB76+T)B%dTCGPPhcC!(z@}Bq8;#{Xs zp{Kizn8=hH?6gm@zpDWHGmKk;c5kv7S(i3~=Mt-hcsv}0+R4UoHJKQWZr2sN(;?(B zRWpBw(|^#*WBx|l4n(tL?!QNyYV6`_+}!4*V7A^m=`*>F*RmRY$!hW}r7H06So;UH zd{2vH)qKW2rXv2`O5z@LzcTuaHo|^|R-O&MeVj2oqsP<;InxN0aV_=t4rcWe1NSUy z*75g$P-qki_A!Gs(8F7#<~!1mGnK(K_e^#yQFR09_%U?xd74;;JjDlOw$?|$`+rF8 zg|uR#pL@a4K%WPIC^l z^47Twt-gu|?+f$fgnkVrM!TLoq}dtO*1m$7{U6pU8Q~Pto@k;}&JUycOHOj6R^~Z^y+B~PfF@FCJ!$@Q*9FJ^RDB4<7M+03r&q>TsS@EAO}2fahjj_8~F z`@uc`|0Bmo(8vMwZYg{d3Hli;(h8U3?>U`htSn}$^)zoH=^M{#?Qs86l<0~M$&s3v z`1lZ1hd~Xi%d557V$?}=b-7RRD9u@S4f?r|olhmp#CU$Ir=k9Si-UhPR(7Q=acPD& zb4X)$>!pzZdGDG0&bL^Ft)fE*Yn^!S)J2TPZQh?QGII6eZr;_m(|Wv_HL;-&(CV>z zXvvBsCT#=Rn9jyzN1S}n3*a_`rJja1gV}&7pu3L@9j>1ZbUZOhjm>|Fcuu~88_kAW z^_%REPoUQEWU8V5f9785jgGZ%x>0|F*`#<@CqwZr97v_!$ITmW=&K?KZb0|RAni^% z7l1UCtdrylqB*2>eUTP?bFs z52hyUe>tOOwt3YI`4#ET8^gS9Pu=+BI>~#QIbh4%<;y))x?jqP)On;k*)dDVOsH4| zHfd3UKfT=9;n{e*Nn-+2!_7%#GtN*;}Y|Cgk`eWzJ*?`!IYDO%mc~FRDv1};a{<6H%fmi(69^2 z|D0^)G^Q77$15!-KjZN&ySx|CyLLhSWPGk|7jS|xI(xdC^-3d{W%P<9C1*7k(9_tB z9`1H7PV{nik{#gDBp`F!TyVC*pZ4f{1p4Q#;_WCl2dslVncTT~-~4AIe@jy0V>qDD zsCbxs<%ztFEkTF)G1Nw%3qU-UB_Dz2Pto*+xOO%QTyGAD$44JJn}~#8qiQ=8%pNLn zQ{#;?2k+Z?CY5)(Tg_%?7;aC|b}=c-EIUY>oS#lbmF$z>Q`S&8{D{^hH*scpPf>tqPiY zvJF@Vu`@Z}%}MooZL6d0B(fX-s_W2kzL{cVL75}1B(rmzhi{oRLWTHzwluRPzwP@Z zD0x%jO`iHVwajqI>`+2ZCV($1!(R#=NOVSb_gD zFgy((<5^mp23%FNO2pvRsJ95O^2VSaoVI|ZhR@f)GUrvvA)NW5IoyZ9=Ud-$8rRGB z8ssby899-THSO*D_xg+fQBS({CW-0;r#kLL!dIfi&f@+DAdlr->r7~b|34>pEsU3Q zi1e8^->0EfFC$HaRcQsR5nd1w!}ca7YMv>kzy$rhenb6YdfXmS!CoXPOI1O{XDo{I4^ zinYg0XpvLnMA&^ySGyZ+8u-KChA4j%2;zU4+!D!e{y9#C=FQBkui;ZN%1$uu6fJg< zu4Sm!TW@{w_pfkFzVRMt{|SyyJ2`LZ8W+2!B z=HzG1d#B1~iddFz?8;?E$vc<{`mRF`t9vTGBD-l#=Knvyv=3POYTpYrDxm!xzHU=)k(epNPas^nD z0pSWbEidMXHg z_i-Am#)5e;S^gU=FJ}8nNb+{8sqtV<-u<(T*~OfZ7}u-tGO>dj`8u4sJ|KAmpVWn?_>;IDw|iLA>bp5K=YtfhyOwa!`ZReH+F;X=Qu z!Z4B?|I7VOXr1Wt#>U)V`Lz>|sSdmXP1gUP9ERBO=1H=qiMjtArobm2g=W@kM= z1&U`q^?@EQapgvE#SeO_=Tiyx55@kfrxkq_)Vzo+WF`Ir`s55N?-BR&WN#xS)7c8R z{?5Feno)_zcnl?G=;13epEuLH{C(Lj_c9#lPYV{q_yzo_MyCeA??>aDWEFHGNM5&t zJOteJ!J3S&@hAQR+Bb3k5I84;?Y?v%)g)gj;7yj^+sJVGyZ~3@snV*@lf;j$V(Ut~6BAS0EcQ=xUrv)&k=Tz(P(QzaCly_c@j2}8#_xM^F5VBx7y2tJ z`d5m*Y%;ng7HBV660eh-gYOw3XA$w@YJ)e)OtD9+3qVs}{|Awps&qX5G6P6_VEykZ zvZWb(b2YoCoSXa$tp6kxz0mV*^0He$2hh>@*i-}eCm=o&M^EvsPXS%xcJ2aGe^B=- zPAE+vX>+DEAn6hD*y zoO2}Z={46%f6jqzPR$N2Aj^0^{cHzwut9Le8ujL)T~L3!#uwtkw5FKgZxiRs2h4$@qS>V;z$X?gTtqLy~Kd;u$#D zkNr)irY%-q+vss(<2JF!TXFe26v)msna+E##(p$M}23xyh_E%#6On?4I4u2KtvbCW%+*2Cs3XBrA|s z{~PBXcl5TC|8iDa{5i>mgGZxkYzuKJU(2kYLk)c_bmNjlx+dmct_X5`}o;r zjnoN@wNWc|=96PFId*@EDs2mkbOxK$l&<&IHkm(iW{{YH)6ni=*Y3oHczr!#zK9RX zv8?4aBr#F?BV9kwr<2*}RpkCne{aQw3*b~@Zh6JEWF&2a%fsM2UyD1eiu0DaBDrnr zUf%FkG}BZ<&04;VVmlXt_}A>i-Nt$m)o($^kKAvg$3;dvo$Y*zO}!j+S*^x<{SdRw zG`x8eTqW#Z&LNTmU@^OpX#T9?E9>vugYjxk!IERwmkg6HFTd=BY8j7{GT{)YN5F{9LFqsKS{NEPB_C&^o(U(?wd zps7eV@{YSainYMuSiS6sKc%~a;TPHJtIY^^lJDmwT)l~W)Wfsn^_Wb)QlT#O7Ji1e zr^4|tFvcsSF&f2JIJ?ih$zEZM&LBF@Xsbcr)*Kx#qi^v1A@o?|-gQQ=p#4PDzMECq z4_C)qQJ2#)Z;!u#cf9han_=4OCp1qslcQ+odeYX>Q}y(_9Zq>emzs2cL)9kexDh8l zC7V-0(+R!qfbSYww~{I))I(Me^|ua=)A4^An|m2OEUUkTm35IKX1d6sLM>!`<6k1z)yU2PqX90b{6i?pfq~UQro?zre`=_GvIb$`5ah|=dmefW^XYrfS4O)bo;cIF!Eh8xPVOggqq}csk@;`Pc6YknfZaF| zXXe7|Ci<40b9dBCKAA^~wQcHR{lLDxY|PTNeEbZPNjm42*%v2Q%yRT-?E6ig-Peqe zDD=tf{ReR0iC(E&d=vhC0{-d7>;U_`N8CgnzQ%**p1%nllI3b25GF6;SQhmqSj{m) zV#$+btE%S@atf34iBa%8+`Kc~^+YYELUZEcX2R+$*zTew$v||R9-F&bf#!e3f_+5F zuPr!|*txpo_;S5IY1WMgLA=^8Wkr&;dZ88IJQ!wwn@XqWkhE9KCOO~Eo9?_1jCK3c zXt$H1?9EaQGk)wnUE5up0Xz%K?%*6vwvx#^k*3K|@|ElD;g{7+JXw<2Gke5m+2_#qV^Fc#I~wRRS`UZh9a{BVQL9@4N!eJd?XK;E?@9>Xm&zCxBCrGBCMn6`p z9!h7_oSkg-Os9ls?S89oH3W+SOeB(KI}dsH#zU!i-W1NeXsiiXxT1fWyYQ0 ze-qq(+W6mlE`Iwjk^U|Eh~NH?X7rqgCZk3NHlaN2`Z>(*1baE~t-_1gf<#g@!>N8K zojPbUeeXxxzQlze^;yC9muchGdY(%=I?~JQ+)a+^2K-zfv(=5mugnX$-?v$_!6PI86DTrhQxpj z1IKJ|=6vE-vUsXKQ$K38@vEZ6Gvp>7YKi5VTyQb(O0uttw^Sl|Cg{Hr&ZQn*PF&W5 z{tfr5xcdq?PN#9}t*0k>Dmm~npFfGZn_WG(m^W9^@OHjGMIK}CR>L&8qK4q^3E-W~ z5~b2!s`%&8L#{;%`?d=dFu#u8UU$@sh`Bk_xH zj4b3Vwe%EyJ{ayNKdG@)7S}3+cmxb@_qi6_Hk*;wYPC+Q6>L?!Z~jMriF%zzN8UuS zlYHxsPRVDI2>Pr7;|ZQq`5v^UIcTyzewZA6qeb$^roKS({X7TP#9qY%ayu?(_jQ3@ zrh)iT-`5s2osBN{81V%1cDWh-H(CrNExTyS&tZ^w+xLs-Cy}>NByPK&C$V?WvK+lo zKQ%zkM&Bn%Lu+ftHYjzW{x^X<8Hj74%Y3$ZANn49`v>z-BbM`Q^vgNkHt-HL`fIKv zM)?`MS%ALh8?}XYlj(3Pd!BQwRY3s}CBrIXV3f9OqgOCDLvh{Cbe1g{(~?*OvG{9Bi!%eQn{1 zdHDBhW6jrQCY!KGZyRwUId@uFO->;f$vTts(dxMQsNQpm_kcFH7rX47N`1f~#Z&kAH^u(- z9aNcTzB`b8uA2Oy3*b!d%JQtoJji6Jzf_- z*W*HM5@VF;hUCIMiL9lbdMW^10g@-dnd&w1R(l71lGkiD&8Z9PUsmy?=%E_x7r(>Js0OMaJ9cdU9pd$H$3ed_UZZXHTji zJzP*AHFd_IZF$mkjM1{APVSEbouKrDeJ6b#3eKi1Mb15bfu60jyd5o5YbLpAKP&hW z&zH20ze1|!l(8oLm~18c>ji$2`!}Dn`_6gIHusXPJ+&%x8gZtXs6JeBe$!Xm)Unw} zre7;oys0&`Z^4b6xYV*vDb4GV$eZ}?i~_@>p1jBy`?0FYtpBmk%~9oM5|LAZ-*_gO zSW6>+>iRYamhpU-5(BR%;;GmzUF>T zHR2O;7)(m@)n3V7{R0i+n-UNIQc4GkoYs{7&)?AhzrV+iramhC=ZP`@{#@#PlXH;= zTnYE;vkWKD;@5l+H$Fp)oDZD|qWCc#94P<&GtT$$s&vFUg@vnwbFY$>>`kxnc{W{J z#I8kG&%(7-bVzQ67U)uyb!+X3yp>O8zQwF>XZGqn8k1ecG%}QxPAVGo!p%W!LqD?J zhyK?iGpV4Pby-#YwkMCnX!aoU+Aw`46Mk|_&%v!9;gnh-)7ZAur%T-ZI_=^E5ufZn zK35?(Q$RT#^dEpXG53jD=uUIz)6COYzBkPf$?dn66>CdJ-olS5WTF=h+vrNpTDy~z zi>#^ofbA{XayG1YkcCuwI-7RooF}ozVd6{JmbQdYV#2AmUFUh6;9^0O?nhBsyeXDoapk-EF4T^rVubhqo z@m`4!{mq_>M|E;d?V=_3>uo65;ynP1kd|InrqSdl9tWJufHF-0#(`?xQ#d1o2pVp1E9|EJiSsO&UZvn|})k5sm~g1ofWKCy0JqEzxTrVh|oph*4M?ld=Vn9CQs*v*`D z6*{fL_q?l1yiK$#bLBj=Oa%UuEba_XG_oRzj@_)ki^*a&u*A<`3LHN|pX5Ke%=LJH zeS|u9l8z;Q^42?R`%!w&eq|KN`~YsZyv*@LV*OFiVtD4)AxzE_cvI7LppAR zQwy{{%G0e#!E`;w(>wJ*Hk04RD3*Am_4<9Dgx1C9p2l0?>71+{fDeG{lHoh&$unV4 zAM|(8v{Z|Z1iV`4`b5;uiOvB0NS229u#5mv_R8b5y_+q|`l(#81Fit))KN&>!2zgn zI{D20w#&bHz7<`Yg8vJcJxTL_G!s{Jr6oHxloqYe)Ay5~+{>or{d9ao5+k=%uSXeqi23eCbHdXsNis(T^4nR6?27w%b`?6j?o&>1dXcp? zWVtPg>yPUlz&;a<$t4tfmi=L3z!LqHOr7!1FNa49+|LQ?d~2I)P+>khb8WGTpG*QX zf2E%M+wg8?l;$Xu3o-2JiDOzzKl-9n-NHg$i*FrR zn9QlkN}4#jRA zTx@UT8?7+n#}o+uz&;Lh{~lwHrImM}#2|L9Dtw-?>Zwjv@^<3Sp6Y3~eGDd-yBgc^ z2kY^8+I*Q($TL~TIXygXE4ch*#=JWQOg-({EA;hy+Lfoe7z;QIzHJE`zF z#p~-dV|<6llgPs7W|0c$`4&s^h?9YLeQE*cCG@GTQIm^*I%}|zUVf~@|(;H({Ulyv3`eJ;Y`lk^A~k=!R> z@|ux{=<#AtzDJLG!Ek@GbmBZtqj9M*lzM@Ack>b*xx=Xc^33Hho2sAeGu~zY=Hg*u zil>plJMlG{MJC}yd{pX#?|GxvARnKAaz1LkLuX=@|AOPaP~c(soo>%}D#}hE?_1FA z2oiJzIv$5($y=LD=E;1V)11Cwe~$G}Rkh=cSQ(t};7=Fwk`tZy*(FBqZd99T#O!v@ zrjPephmIooH8|9MC+d zU0JwqHcILOC%)uF_@)}th4}L)R{25Kvrjz6xARCx;-G7iHOi^l#GVrv+`c z6i~)*CHt&tbmkfK{J1zN=!k~L*-K=Hdw^>rL0(zEiC7xX?vDXgPEd|O)936uPAXtb zRhlb{ee%Ap?MtWQ^^#m0{qT67!tTaD`~Y;i4_zx;Tg4yeXjhIlqaO&5oLD9Hr}U@9 z_!pDr;>E*XRi?JTHo2wZ6B3XJ60tEhKFlP3v#u#LXOEoVWbEfObD?hl7n-rl4FrZGuU#*jV5e?~~xS z58eFOr|h5d^iOd>v(P4SE-UfA49)3p%=pY*1@8WEFYPUHME~DE;qZUo`M!*FSQ zPkd8KbGIZ{&wpZY0?QvG1%7jma}es4wI&JY4nnu|9zUD>Od>he%mEwV_5;ZtO`rPU z`?a+9YxZI~%I3XZ-pf`q12w^!-MEsI`9b8oePQ={(~u90l9+^emYj~ew|HhE%_^OL z^VV$&3y}9}$7#8hHQj+D*$Z54lw<@gt?8G{C-G~^JH1_GH7DS$>1({+69aRYv9jCl zPN%!F-l_On!FVTwYbM@g-!#<|S^YKC>Tb4V18cd%Iwn!8)y+^HXj)BLob_8vnv?lH zUMihQNU9v>+@v9CXi3h>yVF=7kDyiFQB}1oZVsx^=9}buJIDV9?B`AvcCwKYr}U*6 zB(-#sLp0tWTYOtcr&glmeP*8Ii>aXRrtVhr`JZNh4{6_4<8Fb)+vswkk*~mwiQ2|< z=LHt3xe=y-c@S9p;NdL&cL(icPb6=_L@TRQvCD3*mUY5S=(5;T*A>0L<;oBGe;;pt zt^FEwp2jYv#!@nl#jCR}*ek*FF5eTudw#*I?A4MTvLj9LB9MNokIHR(wz3B;Bjz03(D3cEi0OMMkj-8M|8Xx+=)TX znaNB&9YP`|u#^Mgc?&vZcayhu@!xEL*UA39%9Hb9moto&wBlhahl=hr)TR>(1^$Qi zeS&_@@zhwOKdzUX&0&crnTgAvqIPokCiBOEkNrcIG zuq3Z#;=F&fe!GUGCST8Yc>Nac)yC)ZU|SA-O2GOAnNF6^yj^{hJ@|k4y7RcN$GmU+ zmciKfB?*@yL&YRX3+0-@m{bU9V@O3&l%mpNAG8cn+8awD)l{@Ewkb+Aijp=-`+}56 zJnrY~bB^DAUH2cq$N4zV^ZWfQ$MN2e?Y*S3YP{Uz89dROmC-xvln z4>a>>+;HtC_r?b#IU2YSWZS(x6jy75=Y6!x$^V(&uLGBy3jOF))(Wdp`3e}N^2SY` ze*i_Yg4_VYM2@Gb@ki*tUeA&6%BAaqcQt1&@h0kw zw@uCTPSW3%+F3|8vcp~x4R^A?CzI*;_5O&v%TXhFR{FX!jkTSn)rIsqXMpSIOY)K2 z47P6MET^qgQSCEOC+}$~G)vaJ)apDDbg45DiF(L8$uOR(BH1-P4IERzIKsU%v=Z;) ztf*(}eWtN*5B*Dxi`p#8y{toP-0h2|Gte`+sMfptgS!n;?o{K$5|AhJNg^3q!KW$v zm^hSV9oUUy$-VM7ScT@v7+D`~Z{c-(RaO@5%z@nv>}2Z1&LNYp>FvhCPMwPK$*z5d z=dz3Y9jN1rz79sQA!};iV?@v1<~;OGrh&}K;+1zZdHa+!MDKR7tT&=|ViQ~8L8_mh zfDR+PnQY8C6H6_sWFa2Uq9wa#E9;)j+N!}ZRk@PEJre(|cT3_)vQqnq>@0CFHK&rZ zza>rUPELNtxkTp8218}AjdX9izOoWb1)*6WO^w@z-l$YS+fz^7v=T3k=U|bj6*~|# zX%H!n*He7u`rtpBnDP#tFyYkwDLlUY4ki7SD3j5nI1-C6kC zlw~aAiR2JkUijAK3@mXJIVspgMjN61&nVgepC-`czr(UKs^|PVJ_U17J)>skyfw_W z2kNB}>m4t}E3Hv_g8n2k_DOgbPmMvv2=f#P>CVDUb?re=_4R&lTGQE2G&Q?`^}LuAWloc_>^Ud{Wu)l1?HBZX+@%%pn9@Y zHucUWq^Y_Y;?RPUUE!ZfQss)>l6V(oKjc%?O&*~R)(BJRcjE3Qxs&{l3*gzE{ZD?I z4!D;*GNa*{J(_P_oj?Y&GnKf-O8VG=Dt!uE>XYHJv^bXHRMM04o9w+d1aa~b_aja5 z*!cpl%9$S}4@m|1XGLBHUa3Fusq1@;xO?BXKUJKQQ8j)cpX;F?`B|xli(#0Z(o`poA5^Syc7cv^t{i{JvvI$S zo>RNJi~Ft7__HaBngwflL*-=IF#Jd=dki0u(`?0mpJwKrEH=Di3i&Y#>WcY_dP{ZP>Tg!|2kG3*tZlI+PP6)k=PvdpJLiJkaxfVQ`j)e5ZboxWFf zC3%>tz;qfJIsx^&fq9#jGsFE6Y&ijHhAJn*J2Tc&`fEgv_H!rq-veJcvQkE?iGmvM z?jRhl?(Z_xPyNGGvy0DseS1{#t!u2;ed)$JJ0%lQe+*sE%JzBv4k~z==;fwHo;B{i zh$i#>J;7TewVP^A+3_2W>hVrr0rHJtz7LjT(K^xS>qz(7+7B7gOX*?0=k7$cg#VHD9(bzxolkWpx(vi)pF%icq~NU>g46-LSF76E2&PK)#G!dI5|>Uu|uJ1W&I|5 z-m4(`mVOCSUOkX=m^DbQDW} zS>Yz9#1Xi)(ES6AM>Szw-uLXxq(W`-M>Y2;=hh|pPCkIm2pW*eE2*>c5NxySkd<&E zcar(_0IgkShvJX)J#~^FwR7=L6q)PWxu}`7a3CJ;C%YlFjCQAiI&0~5KL6Hu@US~W z$yKs4-3yCvaN^6NpS&5!^BYg;?9&|5n##iQbf0KD*pkA|25q-#feGN+0aD6`AI?cTLD8C=WDcML4 z2kF;1T~*I>N$`=dd>%)3g1w|#^l4P_mJaMsp zzCpvE$jh;Kx*uIyQ$QI0oQ9fTfq7P;(w`d*5m|IaaFs`&! z6Q}!}95&hWE7^%C+1EIgMDJ&O%1%|T-&FiP9aPB-ll_ZSz%K26$y@)|YougHqr@ks z2EZ7nU7)FLX0BMP_TMKB#jdKaVFF4kulOK@{Blgf@m_Q_Y#>`%y7b}IU^3m3Au zsqk5!^*Mp8B&skGy4fM=jH@@%z%FdVA{O`~W9}MPUnnf|CxxXxlVy2~1SBebyB@ar zzlXH7GgFyuE-{pS9_`6V^f{yb7&b4N%vb955AKb{pE2ORm3HQ=dlwnYD7*$W-XhB* zY2r`%{KnnZ>`QWQ-%iSs^KlMXJ9;{~TU(LaakRZOi<((Us%hTJK8^S6H|%0!e3A{Z znQ=T`8p}b}-YhM(M0XpR5`~f~zo}LKDZ14K?Ha8NG*UcK%>6cjBJ;&$kzH$)9L0(c zb!ULycDRyRWX$8-h1^ z)hdwd^YC<#Him<9Lhb#7SHX5dwl`Ggzj4wqFF(eX*iUcqhs8S4`MHHor+(~p(^nav(b3+tmr6;Cur!;i>C74My{~+k>{nAGC zL^EALXVTVT+I=3y<4fNj*W&N8m-O^0wBZp}BBw4_u&jxaZ$J)4(c&RERDrH6HZG?^ zPqO^=$A>=jFcr1l)lO$4c-AI6NZ4Vpm`$!Szpq%VhI3Y*Q^Qo7NRFM<^Qnj{&(QQ+ zVA2$Y;+0U-QxBkRYhzahbc;951Z}N_(Ut7y^}^X> zwf`ud{(wLC<8{d%ZhbV|34(XLQ(jNmEx5;eA{2g=yj?)Ima|hE@hth%ve%gymDDHb zr>8GbAl0*yQzxgzbJ*hVv=WJrH&Q2UW{>t|^n8yTFF}QGT}vL%XJN2i@6G7hJpINu zY9fwrWOvR((d<^V zWjoLGesUA!jI$E?8>6)w&?Kk-U!vj@INQ~mkCK8<$bRxCjVQ*6!LaLSG*0%8?A9e$ z(h@d#D%t)T=i&*n8Eu~|R=6wl7Huzw);ZTrw9HbyMNgAOJFEG4mE4Q}rxg--7g|k4 ztyVa9u4k`9#q5Ju1k*{V(^iXfVR}DpI0`N2d!nv4F9&HaZzShTqGMhIZFU25nwm(4 zKasdQ$$8d8S%F_cyQkqqc4{sv=E=ADl(YI|Im)auIil9Gj2YQhnpw0$zlylk$y137 zN_FA*Uw#jp$zU8o(kr6%ay`_+y~^eR-BBdodC4=}&4{1<^JEE#Rm~1*s+ZMtKlMD4 zy*m^gL(Y=x;|)Dr2%a<1cdPkKcFdZ=G-ucG4cw!}S#YUDZc;V&QrN{SsR|o0)Z2T& zu@-#m&3+Pl)Ss>-0;;RFJEOzXxDsm+n{_eWO4j9?z9(yMMv14r5g(;Q#ttiN#tFVP zrLDh!HCdK($`K#Z<)|M&nkpcu?3q4#>Po+#fc^O>-2f*hXsx!M>yfkjeft!3Qj03o zzK?Xz!LeZ|cn-+Mu>6g!ofDN=#awzcJy{3Nv7pOtOet+-uk0NZO_uESV5#ZNcwGI> z*p>CvGCJCq_1sZNUFzpXyCNmYPhEi)$G<%Ja9VpSIc-x<IaTmgW`7<9r3} z&q?VTJ;e`h3tir6ZTKP^d@m{{Q%664w|gqyBW=L;1L%_3V+EY%ko4~EW+x}vE3&uy z40tR0pE%m&Yt7zhM*J0p1kW+wPDY>_wCh>WO=bzllEF>5R|OQYY^BlpXYWQghQK@P z^%|Z$2rcT6s*(t&coyW0GkehM3jW2r;uo#t6kr~{WQS!isK&tQK>Cr~9my(kEWX~0 zX2~@45d5l{;fw@Dyeb>I`m1-sDl3Y$R*IoQ2lt{IsTcgZ{?7LPG_#rCg5o{!J&gMc zwXs5#2kDZM;zdWqQH6u|BJ_`N=G`f|R^sY+6B6Vx8vURVjEM zXr#}+#0wyAWM(*s9xf@URF%be!zgsMzuy$vxSEyN&hjNjGtrg%!!zdy)zL8fC>c>w zm$5&5PGqaDq^Frl^`kFaVY!N3FH^|U@6m5D8tpcsmqpv}NY!U3*OpwIO5!Wfx#XDp zp|Feb@6HI6{2uYHYD`v>%RYO@soq%8+Zm4=qij#TjI`pYWX8VMSkjM##MALan4E+* zvq*f-h@N&e5;2Gr><{0>1|8$wvZ$3T(ciKQp~llRVGH=%dm>r;|LU1Y<%cwN8{W=y zcRl(XP8YMUeHPCC1DE4pS)Uay5AOIvKUru*qJ3wOhj9NsR-irT*0SsSX(x4w8sl=# z)Dle{kIHzsjn->FSeHfTul4bOmC`4$UId0j!z5$wGIxJO!IHiAkInBAzZsvJR6>qT zoNtZQgw;sanlDLGc5h;9a&ET*^tIWKWMS_H!sp5MT6(=euls`}`EgTO_)d1_Nqx1U zBXi6_k~y#BTwxwwHeqK{rQ&4z`>-eehI)$%UHi!_pQAx3uyzB>ak!oc^K;3`H#B)4T%NDLj-c!1?v+{` z37h_?8$YX6ICm;tN#=;;Z2bp3_Vw=RaQm};is2~o75R9VHYZ2jO(f|LI9v{O5?OvJ zjGr&4885|^XtjsDU1{b2ExlXq>8y6*dwg+W4-;uOvB3LFEo@~Odb2X`k?cd+hpdfPJP-z@N)1HNmFkDsI6OpqkTXC^87r#F8@;W?f@!3=gEnmSHzXVHg_ z#g0outDq^K&0fdZT8)K^|HgmAH!&rrv$o0M5^C+|sjrGB<1G>2l6!pnlUdJksJM^k z%7Y|(7kT)Z$@OvIn#SB&FeUC>crK z^(Vad>;BSlXdQTy9ppIuEhc3%eclScURu~%Sc(SV*_Xtxrb~;AXAc=E{)plwy9}vC zRFf9%2l85ANR(G69O>Pbha*{RoEijy1(D-|jn-%Z=K3oC-Rjxj@44U zTK?_tL1f@dqxm)7%%09R{7-J0oD{t6Zahv7^8Q1tLhA5j&m`C+`}3>*|JL{9uF193 z5Xv4_`uR1YC!b2N)u9X;IN55!*-Z)T_FpT(!s@v{`2&F)0%5ti})9^aB#dnXA=rl>kJ z^mTd@t6hp*J%ihcuieYytR~yp`;1+kL_4y|Dvws(Si!4|PFLYWYBFRGCMT>5$V6&w z-H4XiF-e}<_>68dqE)At$>^3|zSnnV8oSI%62rO6wN?0XiC&&3ADOG3MH<&=x3}lI zvV7C={t7nkDy>APYHPD2dwZ$h8@-ntX8$IQsp^%J^-kk2ZI8mj&2R4V5rf7V@eA~lshM6Wq))XQj*%o88cutYA!Bjh_$nf-(8 z-!wBWw$aK?unaaQOKqQwuvOuZ)1$R0m(%vd-EW8gcr9$zL+-amr&NMJM_<{wx`f@R zg9`CsjVJ$F9FMonJa>;a>uBhmLAaQjluPtm(RlTXt8?hU1b0#uxoPd?M)c%4ZOW#d zh(@bWd5gZ{alI0Sros0WI3?PmKkP@*o>XqkZr2{)erIM8?}R3BjAzb$@T=ji z>L+O>Ss*@yUpTT7rzhaYbs$Nm^4m#F&i_)cB)JdnAs>m?cu>2k+>#Y%@{3=uhY9*g z{fd$^&p~*V%&eJL=5(``9^Wji^E{TKu94(^Z${?M15;uv%Aj3Wmam*Pmin8yaX)X3 zL-WMDSAj!oJZTSt5jb$5XOd&MHu$s8aRCmUp|?Z{Jb>P*up1wY#(FB54J^Z-@}BMk zlgH^_6QfB3-v?{?6<8f@g!l`}HaDaAM(f%0%I?Y{tt|xSO(f%G{EHOKrCq7Tu^+BR zLQ)|xyL+kT6kFW@)(y}u*#ah>^Ot*3_+MJw;cqM4c+2YKYx9z;3*GF@1}B?m_Og)Ri1di|#>{#w_H6#cCzh&=SS6 z1&q~DY&&kme=XS`v)p4RS{R5PH{D%-iOg&ybN!9? zU2&zqYgLN>$&51?9*d0>3q1QHj4H6&i?y>!J6-ft$@i?0vrZUm^iJN6w6Uq6YqA@6 zAzdB77p=}dW>$VL>HRNsF%+1{8dQK~>MKvuS_f3heo{BM#{zFbiLtJX#IyL-B}>CC zU`{2(oLv7A$C4LuJ}M^OHYM9$ML)jzHq&gnfMX5yMhL7iIZxjfJdv>XwSFl9Snnlg? z`3F2do3?H8_c~n79(p_1JGedz$5KK1B(nJ`ijSbB7Zuhf`8dkrODuI+^O45>{zO)q z8(A&{>nga{(Z(P2aVWm+Fe?3m-b>9qSK!AHXk8BVWB=~g(^IINeX2jeWFv`-=Rga% zW@R^neSQPY=AdXI52wLBFfvJcMmZb>7!V<{cz0 z`k1}CRONaAmD_oDEljp)XS+ADt5qG~L*v@#8_|=SK9Mrl7b8`&z+_&Nec<2WU`|BJ;PrI&x;ZK&a^Ynw zr|fPvAsLO$BTp$tHnCpwDSEA#v*U(BfH8U1@C-sTxd6hQW4*_L4nj8hB1Z`@O6~ z_LOU*e{y!uCROEO9uK)=NM_>aQhzrQiY1YIZ-KNUS*ot@q3qHMupA5K(IhxK6FI^8 z+@}rbalf{%1!wX+w)S}j`nE8NeTXWjqvPG+PF|N(m;M-x>%FxPOTECnH)Cu+BVlF* zRjthvC)FFpPoaaK;%h5Zo$r}=P@Dm?8(E+EAWE*TaqRB)qUDU;P0jZchxe+x1N54F zHXp$v8J~93zxTD4==!P&RvTpqVgKN<&Cp4|mC!Sd|ychq?d$hS1O!0t<1&{o$g=_Y*lLcxpT)Tj{ z#3#DDcXH}=5-EKQr5<4u*3!b{2;WBgj__v2$Eu)igD->p42S!l@u$7DQD4^LU@*@s zY}?&rDOm(s(ET&5?vmeSAsWVSClQEKQT%s!y8$%0d95dr``gG(6B4$Z z1Yc^doc*RJwK9$ssYOpypKuWEPxajrZJvx`$-PpC?4}w*KTzIHZ$8ECj_gRh5t9Y) zX548|%s(rTo8j)i0D?N?xw-p^J505&humw+22>ztwOOoWi;8E?Fg+xLP<>Z7(wK5) z5&x!V?MPl~8>ZgaW^`*rGfqLnk?d6y?42UG!C(Zf`-!VOE0IEmT<>10t(&fVc!%-L#n z>)b=!8BR;H>vcJeej4wy!*T+yPbXb(`jmR!SL4gaR_Pg&Zm^o|Q&@^YdVkt@y9bry z2c8^8nL92y*ee}3nyNBaa zvJq!r<_FM~>{z6(QCfMcxU<*ak@}C`z2@_gD4!j)*2SzbJJg37egDmp9P0BEc+r*a z4I^L8$>__)E=%^nl51fpST5GWOWOWHTdDH&SJ3|5=MC`LUz>H|7Jt#h(S9TNep_(j znxfA#?*Gxq_apdDwF_3myMZtHYtA+1_CdGDwOoUor}aR7JL((>j_b8o9uDz`N+s$Z zICFYIqhy##R95J4GEDw=tknMc$({WCx-axVL7!i*mi$j;q)S-5(><36tdiZTRH060 z;{C|w5OPw>Q{ig%pOS+&*}C5@de6SoKE>`+{+ImJFghK~lB_V&e?!JI0^H|+JZ5%k z_ZFDXAjADoBvsx@dkSriRSEJD_Lkh>T$~4kux6!1z0|nb@UH?7_`+{Z{fjp|CA~V!zJ^#WOh8kKT4+ z5v!V+Zy?pdHt_}>y!!%M^*k!HV;PgnH)GlPEL}Vd+Opj_Z{EeKHPQEcaP9I`Z@RXg zU0cNVmBQWBe0iES_co%$|0wn>)--GNv0$xjjCe&$v)Qph?8z8VCr|or@Q?5_CC)m<`oN?ze{NnP!)#pzAd_8n2>m=7uk5_kGqU z=lxH^d%G*ktt%37($Q$~VL^evd+TP}k$iMrw0M%frqY!2Xuw!{m3*Evt!r|I{x~_R z=X)RWwiyL3^Qpe}GKc8ut?y9&V|TLum8uMz;XaDZNOZ#@QdXX|-j0hSwXqeC{s4#8 z^dRSh|3a7K8+jC8Z^x-`J%5$Avr<@r2g!AoIw6C})ky{9r@%IIp=9Pyg^_Y_{}Z|S zh7H|p))Vj8_yZ+>$tqepn*CgAq-*Inep!u-iOIsbiX@+GUU8pqKd^hvY5hc3KZN}W ztlGtNVi@=mW7-;(?}l+si>rd^KKdHZu2Fc>R~xUPLnT~||9NUrZy|X(m+MR7mZAN4 zGt>`Y8t?bITB!l5x7=Icw>$hd8;94@)bXCkI=>Q*tkT+^0^{U+Ooo?d@i%#XTA;*t zxHejg8?=_wk$>Q2_Gd>K|C0?ObHw-)BtjzfD=HPU!Iegvsm7o9zdUPnYDEH%F)wb* z9^6Qewi+3h!o5wQ0XyKay?~%ME3pMNl1cc_+I!2hSNT3d3zhyeU)m4FegWTd8uh(v znGN+pnfTKsmtS^;?g!;5?oEM3vZ#$J^e8^3qdavVN~WH1I}~3+cFNGj)CX#BotV}B zFR-1Z#hzqgk>|3HID-{W2A-FFN-psD9%g@I5`2<_VV1kA$xo`BZOS( ztW4tWfm>q0>XYJ2Ks}ViZS+PgMb%<_OGb}GevG77sgzXK8}U@^j@$8rPTj2$bT7GT zhrsM?{Z1o)d$ctaB=e1x< z*64F1iH-OET`=f}zSrva=R!YUF+#*=sROz%Wixm9lpWgmPX3H4l~5=3mb;_Y9+>xG zky0sY2ux2Uk4wRxy`sqeP!cdy8-3s!Z^S92JavjPPut>|X=E;zEUV<{IFed6iGjL; z9a{&o_$fZ<=}n|85ffR%E!9(MhF+<*#IY!?{by4lONzt zSa$+*EJ!kyC;!Vxy1gFEuXuBiwz8UBR(qHcy|WSQR*+1id2`8R6}YVfPjV$?x1kSA zz95BfXlE2oEi@-f&Xh!5j70s7a88s%sGsQI`Cv&kxy0&5QcJsfr*=o;bX&G*6^y!h zyPnoA@MJ7ae>18+q&I#E$&K9Jl}qWvIQOq24cWJ>izYuCm6AE8x$8T%TZxtJ?b>c~ z&=H?fBdHC{vZvJ4x7gDUVVhAXHPXMM30Y_TQQwKdEo}r#4XviyTcw``IQuSYEX17} zu#ZpVYP`IZEL{h`-K^Dmv(N0~CEHL{@{k&?y~%jEHOsZwubeSeGSA!W>4$xbU(6bK z_9StMLE7L`JcZ-g)sZY@XKpl_lm^}VBx)nt)g{-*(%4AhEu=LOTTAia0o=)c^arFV zF_&k1G8uFq_qQu4p9Qk6+B{kB_xaR>j)o6)Xj^=r-X}AMz+e&V+QMT#98;ZXIt~w} zF$bdCLi2-WS~>zmOYkGuF9$(l$})y6As?TzG|B$jiS<~8mWei8hjYnY-VH@tnyqCV z${AX7_Ie38V)GN7G1ZJI9-d3wYfBFMfM$bt--BZ!CtB+32hhI@irDAm7k)rL z9KNP$1Ib~|i|XlN0sKp$Zy&I|td&!7{sNTEZbf}^l-QUpddQi5D%V9C;$!@yo<@3d z0*Fr~Iq`e0VH7D3<9goDioP1U9*9CUQLh7jCLUz7HhSa19&gUnd;G?RlHZ3sQ$|aP z+{(FHXPWbFVLOh;-(-6^&%24tOkU#D)0@e@yk-nv#||~28QEjaI=!>rv!iw``J4^X zWY~#s<~)>3CB-=?Uj>(v{UdcFqK}CJ*-UyTgKi>AFptGdOx9F1I2gu#wU)ig>$Kh8 zx4S(t#ymgO*~)|GPB5=$$F_iVyU8KK|YF@%QcGemfj^)4h%bB=Ha( z?%CaiUAvQvoJtC=18Z_kB!W6SNy+Co)jO%Iu?|lEZM=BPTR9)Q4|H=qw-%NA)2CD! z8ls;S-s@`a(}e8QAo=kV8-k-lT^Wz2{aBUM{YrMg`n0r}G2m$!9n7lDH0wH*vNOljat|uIC!?TlI)AFXOg=}!lC%l zkF>`pY@NRU0>4hMPOZ|h;7N|E)4?_r=Tc!~E#1iJXbTv<%5F_J?%jZji9t%%xBWqK z9^H9MPnV!@BHsQ&W>XO_(a8PLvcEg2TsMO*9KaH;GAI2OE^m;r=B!Y2{ie2Hpo@;2 z$Bt(Fe2#qd)Y1~v+DT6SNQXzEa%uWf4V`Xgg{IQSwMFXu+*Pk7J*Y zCt1r_xI2wEnJ2WNW!2E*2Qv2}$=HEXi^$F$o_GvA$-tAfd{*~IdMY~uhida;7UBt3 zAiF?4X!AAjNDR(nKK~hfpQHEN4)r9RR7U*Kq@-S?&uQUQ*T6*GqR^}dU$Llqi zWe$}8TeT7^(TR;4ZNz#27IpO4123PjZi%1E0e-UsQIfgkXt2gZXC_Sc`dkt>cDZK~ zlXi$T-%VhBh*ajxe30Fm&-5O@sK?pmoBif&@N;%9`IyVY^%w6YFVcyw@AYo#P5tr|H+cIm2Bok))Dcqdb+UvQTXwT=}B- zT;e5_t0g;7`IMhP`CAlErl68m{_VZg(%xI_R3)+@Js$;*U*9SDU#s}u4%c$Jod~~+ z*n`AWy^gClkctag!>RtZrVp9v$18ewF)Hq%L-E!=hka?{o$QwHC5c1G_iS8GZMR;i zy@Ni-k7O8K%*oE(+R8co2S$zO*}G(wcvidd_8(b{0jYfbu5ml@^NCoSNEdD-gNZ|0 zLlS4wmz>sRMW#d~F;{!>#ZGrP}? z$??M<;`>eD{~4rN|0eHHcWa+m(SBN6t><>2x{3u&-SEdr!ZtIv9eVprJIR}nla>e2 z@ii1n48dj?A4(F=M3G74qpCJiU$4L9*_EqQ1vR~%j9D8n8TL#^2DWPiA_Lo8<>(cus^8x zc8yrs-(!tuuY0oCIRnZ7-$Y7N{5B_e8^FjyuZeF$Pq%z5Aw7FZr= zGo6jO4G+p0&8KVQb8ojK8Pn0QB*)wjsB*ZGA}iBSaDe-%HhVlw7V5o~?|;Chs;JTy z1zw=}&wKwNFvM~;0C`&@TIy9Vr}wK+xQ*`{@H9DjRuyxItVQg9b_D!X=dNpN;l6Dglc4X?s#vp(aW(a!jh%&)s(TLrEDjJoyM z%@2){EA^C}#i3~0o(4VWxx^n`h~n*V`9*xpeB($@4%A+9o1{KbvWH|9b2iI50mT-= zvxB~3JFf!o37%+92R6Wc4^3?2TX)uV9ULBH`Nhu|(c6=eJJC7RsPDNkpo>RCAJk2J zLE^{02VpmNM)>>-%Md?|P3}(fDX~gB(J+x$+v#Iv+>gggiC0MA5G25)n}cjkDhT1oWv9l#}$aTJMIgRY`ePg733&U3^Mi zlZCFA)>2}gldG^dC^~>8aV>jDeR9M!*8A1kywdYa@nM?(Z-Mnv67>zNs_E-vtzF2{ zB@a~MUE`aUF{< zsd2j=w3*wkL$l_p2zp54-p zWH0&S7s2z5!UkQ8Z?AbP(UPfTv6NJWuM<3xXs?o4`k!2XiH_8Sa|3Plqi4z1b);6~ zrBMNliFQp*`s>*C4m9RA6p3b?Z(h5T_GMRZEjZhF>UK1G9R!)*HuT&{WIeH+8)0`I z8muL&H+Vj00hh37OSO_X-v-+H0I7&J&Fu(|jV@6zUaNzK`o^kW=q zlvMxA{@EUymwnBSWM(rC$A702X+0TKBaA4?rQa4mvKO0GTI!puB6Tz9XmZQk>HiQA z?g3YKy(Qa3GxY6UXzYK{ol33^#Nq5aXTP938YEBe&Aw+(?l`naEw_Fs)zMQsTtQxvdwnUL zIKy~yy&jXHFWz!P%wVd5C+C@Mw7HpOxw5dOjlmf&t7Hg_Z|Wbt`8C;3-q~e%n|OlQ zy~QAJ=9!GFi&5!**A^9&J5!IbQkj8_!^fN%WRJd@ev+%dDO&tOkCF@j15b|BW3me8 zw4f)7t=zdq47j=nyQ1FP-d#R<_s`r7yA_VJCV7WaJvBX zXOf_JevBwq9gA_UHhxdi_h2$RjCI^)F1f|CImb)(r>yW2SD1Xa@q5iGr8BzifLVR_ zpDlF#HZ3NT%mg;SA=-qR$$m2qHAm8cJo^oU$X{B&(|uH!E>gnGZH#$=*WwQTluY zP3ri~{$l)8x{&BpK+AqhPh9P42A+7jmGnCMOtBOFNlvO&q}plxPZs)~bAj(j^g{Yx zM%&kFdpqg;&Qtxom3W@`e_dqF(G0ED;KJwRr#B22qjqVy>`QLyf-3vEsn3*C-GflK z9qf9e&3B|@JuaLDj>Mq%fX$^eF1ZaSlaspa+5}ROjHfw?OBCNXsIY((OoCSr@X&B0 z`bwiwVir;Vh!^NL`hSSl#PViN^f+C3(wLvwS|qJA3cpVF?ge@3 zmbV~r6G_*-bn|Fq(h6_fjNaW?lhmYIQ_S8Xk*OMf0nP>Dhrx8br?b1<7Z$gGpo`vK zL7(q%dO!S3KC0|oEh3G{GWu^VPJ~}5*HWuJaBn+GjzIU!xF@jGsoFgsZ@xm0zT_;~ zc+yj9KP~aCAIUlk#Hpp*f~4F^AF>;je3Hpbm9wSP8OoaAQncTs#cke=CtN2|o>R+I zQ)`Z1$#d{E$$L_NpTn=Ew^DyBC(4QVPmSw$J)ind4}+&$_xD7UzE2ye95l=CH9n=nP<%h0@;O;-u6Lz=v4)NhO{x@q0g~13C*G}AvE!1O zM3;N6rYBycmut*f_F=2;Bfo#rRx&KtB9p29lXZPtQXj8`##+j5Pgk1wh4)uzCHp-` z8%dAX^S<;jJ}$?TxA?0(T0od5((VE9+T;3#^tDIvbYi-$2V3f;C*td3cQ0jaK0x!C zU^*BCzt!_E=ode&V7@OZ+~NC3Pz`sd2P!?p<{d(J_vq<2gO>4B3N)$Q z6#abATgk$_mgEPUXz>Bs7>*{{Q5#}yHXPm3SAK&3%i1`d1|(nqNZ+&TatST%?&@yx z6pw}v@b^sA&TdKc?O{?_ui#pVKUE{|9fa1|Pua(tJA6wlEb2!hBC0TCOEu@?~0&&r&i2Y&IXrUe9rG-&i~md_VB1 zTk$PBP5a|Qvc&$5)%t~2W}i7;t9Rl;YA0ofX94NGmOgC3kufy!4f?kfuj6N!{K=ze zYi4~-^%?)W_{Q({(}Jx@UWrtWNu+37bImpGr?T`ZdU%-@KR}miqT0$r!*U)yi^eZy zV@~x>9b;GO?j++*H(Yz#sMdh&&Nlv~%1iuovNzpF@5|Y&OA1Tdo(}id_Ps34Nb=U- z8w2#46SC^GcbKuM8*A5u{AQi;1P+`5vR~l$8hI9^Ue;`hd(RHgY>Q@njgP-Hb-@ zwoIQG^KOn@G0kxW85p}*{i+v zJcRoayDvZ=);2w3&1MhU9Aq zyyF2@nY6ZN@8XY~43ddji62wWuH)Og1XYhP^1Tb|kF+??`&ktXqtScF#5gU@gx6vC zHW%jckEsayxggI;^k%f_OV(!f84SnYCF? zGhfrkV6D!7_KUFO&VnejrSZiNtu1DKtKNcGsg(Ggv2PK5MM(^9;n- zksuxIdNO!!^S7ejR?&}l!FU`w>PwdUlhn`Pl)RrmqeFHgs-Q_C9Gm&Pu+Wi-pxo|r z>K8o*riyw@B*S2Ku8IGN8k`U3)E&CV3}=VCPq;doC0WHD+yRR0%qF@s6^P;;eJ0sX zR*6dNWn*`eRXz0WMS>nQgS!&+iIhsVoId!SovXwY-l*+Fqjl2CrL08e2(@X|O|aRH zXHDSx5B>d2OP6amHCpDeL5UPhoLaa)tFUoQ9I*DG8?ZqFMO?#on)_0t=4&@Z72+D;CT%gC7x;q ziICddcq9jqYtDDebkA*oUX}ldMDWH0ka6>gY8c zEj|I~{bVZji)xVU#^6h=&jR@MGTM|S0m&_XV__GEfG)Wo6OnZknm40W)%`t3+wsX8 zZIpSYm>W+3L%i%#se2h3gw~5)tF6cE1+~$46Ql7s+Mc`}*WvUjIGot6S#ZrNxS=;T z!mf=rCZl5Fk)A+>PBiWZZ|{OhQ+Bc(3C!BOB--dxSK1Z4uY)$p>hu!0QUm*2(4B^> z@5ASQT#Y|r>OUk;*dDefG1u>Ct28Uw+&N;%NOCsoEt2;m zE@ZR~2ja{7bul6@aP3VkB$HcHdUu3R&B;oAlwC{ao`=y+@>;5}!||lbsMC~%>+G$! zwVe3WUiwd6uT`X_6{wOK=2YBDZi=3`JcT?(k}pBQL%h=xKF`7~vyUZuYmHu0$Z1O$ zGyrc-3lb%`8@%xlfEb8$6>UfBHZE|S$99OT_nqp2~dr1vDE2{yiW|#jrN;%Fc0ox8@mN$AI#3+E+?HQ_<`dlzkCQ z({Lvv-ZSQH-+`(nZQ6)F%{z~! z>xtod4v$)+U|IZ44BI4Ddktwybo14P6!a$1jZwTW-0Fc`1g{Z2IkB^M{gWBw7O*r! zxus}R#e1LV{Xo=-hg~exr>Ho>h%(5W;S-YnFLUwiT%YQlv2;1UfPLxOF=Rfmr8j7) zADpl7{`1#`rYdk1tbDaf>l)C$7d9CKZ!1qdJ*PBrnklT<>W4N7sF;&d|2WuUr1$!Q+UHpz^<`Ivm}$jBdjtBPLw{bO-^ue>1!Z=k#w0VQ@_1Ym z?9qc7=0d;G?^*0*DHdQb8fVYq6<8#muQE#SPr8nwIj?}Q2T5t}eu;nj`^LY?+FS2_ z>ZDxdjXGA7ZHuF=5naMQKlkU_6YrJiLWK}D79K?cGdi3@HmZ}9YtSjM)Wey?Kc$w} zt03*+w`4!yRo4z+o6i8n7obWOmqUy7a2;}yRr?=t`!X%BB_oO9+KCpg`#Bw_Mi|R) z0N3-dz5|AT*JD#-=o7x3K|YdQ=6KY3M@!ek@c{G*U(TlE@heFd=lxj8AG{YYiR6R5 z5JaI_@^%dZ(*daWg`WR`8~bW&5{=5<$`rD90NMKt)%O;3?&0baz8}t_b!49s(S91t zw}Spi5*3?}-IHK-g5MQj?5K@zecI!RP`YHLU$TEOgN4|i6z84zhWvxIN#&`xy%h+n z!YO%yd(e}Tc&+Sabj6$G75)c#%~QYrlth3<0*@@HpXV`U#&^-otKHSvTz z^*?w2?{nG1$#2QexvW<$FvcI{*C@9`z>s*hU!TeU_}c8_YKbSxEu>;DzV&2(OR*9C zSo8PDWW4xm;!b#!Nc@5HGJDIl*snyfoU4_4{8mHL)IY32?^m%c8K*K9Ehb$Tnv>+j zJ$d9tq1SY_Xf=D1)pIIcq#AHJJ$!6kFi5+}Mp2&*4Ap8P>sOPqb?&`c%tDg4FK28u z$XWajI_PO6n{_YPnj0U!H+DZx^4F2kWQR!}`S^;q#f4!cqym^;W*48v-x}V{nyQO_ zD*Bn?>9(XLe$aPmZ#iq2ioV%*xrnuj|6F`xIvRDpF-uKG(+*mA*6v-ZU$-N*S?M3D z&F$K5#G4>%(q#FqSi6%Ey$!v)fz;;Y@mXV3_C8`^+jzE@))SwZv+mijKF4e-Gr{c5 z&(rq|FwEC`3z}AswNJLs>~nYWKM~ZWj5N=qK||x@Tr;@AEYkV(HtV#US-xQe8EMoV z$?DIBUqhBQdodGSsmmH??;+7jneEqy)ij(sgN>V@_s%dX<%w-*^9t%uHcy_5d#9mK z3zW?%aJUtZ>h;=41YvS#wIV}z;(9Nf*{Z*+HNT@JoxSw{YdfT{v13Seb|6yU<6xu3 zG~5|wJeUIGnPlQ@mLVtN)!ZMUx9WbTk-8oDxCiG`5$tyKSmsT?qxDq_0a3OK{ znSXZ0%kAisibctPoAcfdsFk&MJSsYpxQy~6>DCzE-p2RNF!~fFzs8NH@qR9uuL7r4 zhUA*L38&)a`*)ubeU!So zhkNHs{U-lxqK&>a_HQr-B{CwrOr^<06ZgMHEqYEJdQn(SUeABpXCd=WZrN-TkkD z4=p{tkUmvG(~jaOihGwkucC-Qtf3e^MWc#v%fHs463Yd~3 zvyDC{llA9OX_z+pp=J#j#S`ZzIA)%|6irg2w+d-EwxD%RO*g@}qypYyWIvHnZOQ#v ztm=axucNIMg{JPH*WY?~9I8wPee!}WG%D29|9!4B_IA#fbFO;{SxN@aB`CcPmi_g* z-MdY-7;pA3QMoSZ$%$jCogYFEZ)R)mVWVEan~#kCsV=jPRL6hjDRjOUMPr+LvP4~Q zp{+MB!NpG6xX?J8ESE#QH;shU1XQS8}GWA zAgMt*vg+)r{|3d}qa)g<)>IqMB|5Yc{NC5s7sja63Est4zk^CI(DkcH)MWHa-m-XR zwE|fxDCIQmGPGVrI_tnLnG_e%x}QOPIO$1!x{qj1@@fv&*J_&B!}IUKBrOaBOQio2 zmT5b>bucbpM4IQB)ubxWAW#p$^Q@Ya3%3`$w!l-FfhPCsQtOVy7Oe+UOPEda>?fpT zn>N3N_gFKPYv8nywJpy=HuE?8*nQAwxo0}UwK~4sK>tGL-{F2dv$wOG_iMEhDh@K+ zh^I(3^h<@?)a?1Q-jZi)11V_YZsG|#jz0^9L4st$tjC$2?$o1jH}PafBHszF>; z@Z@7}CX0B^3*#q#oA#TU?cIeYZy>EEVWhN~UUPC1 z-+@E`C)3MfG-*p;9`Wfy_gA8JEmmqfXj7MSAlMFs`%IGi7qHc&N7K<}IcYiHd+{ya zivDlX(afP5pkrb%<7tu@+a>J!O}O|3i94U}e93l{LeYoW*p>{EF&>vY3*;ka|`|t)mDG-W-mRSR6W5Po7NRJpVF|#Aj_#= z>c#}l*{G1{<`Y@V$*7aOya&QFv(T^HNrr&6;Jk-)HDQmdfp9Nt{yBKJ;B-GUy9Z=L zNx&WYo=rCg`1}b@+|N?1z@z)osgkEY@%)e4iLc7*sJC37iBzwx=R^eMy^UkTn-U|bIN2m}5>5C5& zV80%<-vnnWGfwq(yh_W`fZE2>LtW_v>+G!Ng#Sh3O)@9$^?oAQzBg9=25#|6iXHr$InG~j`1f!b>xqBB{5a!q zDg=GZnv|vw@m@%bK&nJN?2RkvRQ&Q9;oVUzQLi5)vbZ*SpWGR$5Ep2M|&3fL37a0pCm>GjtLg-c0m_93z|&b3p^)Wyjfv9vd8qxoan*yvl%(sQEz1q$!b<^+~%CmLi- z|AK^lP*}ID&j;h$>DnKJKRJ;<-`KUBO|DA*kJDGyr)RsCUXsB$C;kt6{%p`6P(b)l zLA%tvPSne_Mw9H0)WVtHcw;=>SVi)VN9kd(Z-f>{d#WWlOy!~(>_;DO9uKnL;NFfgzDoQLhPnQt-}p{m>A6%3 z%c;$NI6I9Ecm$2gXki5Df6&vR=MH>{kHVi(D>Tb0ejoUMzvhX__y7H~ ze<36P``Z8Cx00QNLrC9|`p%R8rG;zJH{48Rt^fD!TCVKt-d|Y6vF5*>aCHLfmaG^J z%xE5A0q@4u8;r8^(X=lrEhcSs&@=gs_oZoT$wN!m+tI;TrD|qs2ePN}3(h=$nY&NW z?qn)WHIe~nAL|#N(D)yJho;F#))4pN89Ry{UGHkDDNiCZSw+7@#~$@m#-FSHGoqJf zA#>(?R`J9lkW6KTI~y@JX!`^*8ej1H+4js017o6Z<0;ln8{f0y$(lL=GT`(m(MJ3}!GQb|K)$A>%j>lP~eYS7OH4<;3clC1y+!M`k3VI|) zp)VM>u{h6x{90qgpTLv##60%wM9bevY~;BUo#Ut6+-Ua#Dy_$l zuDJ9J_`mngN;bQcku4SO;(uA6&H4)%USr2|a&a5z;~{aT=R2aw*V;&xywt3`9G`cv z?Yn$SuDwBEsSnpGaLTBVIm=CK*?jAR#3UT;&OBEh0ZS@%$0IQjK;M}yWDWW(i;?Up z*^!y5$J9Pd=H8?6{4{tphtW*!rH=Mb;7+!d5#Shx^1VsT&8+EHtk-PxN%f_(*^*QR z&D!sL+-nHlKPxT>&hL4xju zf6mKJGkV=*PMhqH4b38w%OIIB6CH5^X?Pv&&qJHU%Cv*;XL_5d|8KSSXBgMj=4wx6 zWU2td0W4T=|C4R`YQ1EB*xq}|lT#OWPIotJrNq*1Wshc)=uL1r2pq{BxT@%>r)vx7 z%z5yM@ALWKY^>+!S-Vf*oOqh@`c4&`M47&)->P`_glBi5 zM@H~V$;3xAxFf2)1&iIDPTtO((#5hJ0joFl(VaYv(r10tdIVORTuIJ}`mD)1eWeCO zZ*9jDJav%9z+^KVlT)a=HowM^QTj`4;$lC^ZhHZ&Qf>J#R%EiVrK(XhKHbTDQ5OZD zcW*k02tQ|Pv7eT{V_V*bPdw|FXtN&;jmKqt2$sWXA`IG)(9_H|l9l5;9P6vStg0`< z+imV%%BC!IX9VcqGzRx_cbK*l^?Zb07wF{(S99W>T;HKXFFN%n6q-v?vieHoN!CH} zXi4mP>S7H8#WLcnU=ER7^FI%k+2_e_%TGq^V_ka=b&@M!Dy@w#ZGV(q z4u|Bh4Sf&CgZQyd_x~8P+uMCgq}0FskKcVYZzNjd8~rUepIVEbsb@HZPDHB{V^Ri1 zJG=L_v27mM?*e-he7FfdV~kGC^&N{ojclj#Z!F3S;8;h#|KRPeg?1$CN%mp}gJH0K zGbWAqbXIJsLhuy{Yy#Km`st5a@eWAzM*Imb#^tMFb+jjTp!8bLOi@lQdqgj_!B>IJT3^%)=kRG*8NT;fJbk-xE8yBZv!bLvVjq-D42YbDv~ z$0E(~f14|xg6eQQ+dOXDbf7xbv&~RAC#%)9Uya->bLBNu9%_6^d|yV4YUbV5QMWoP zaH;3()3GXAT#O&bSh*}Lb`i3tp0nl~^p_du5Nq_Tzuz)`?j+amfUOycO(lh&@H{n= za=MW0)x%I`juG{3RDB(v+H3P@P$pL4Y<;~A)&XSjL)7k#R%O}XoB|AGKkBjZ$BwgIThzlmUO@IOB(!NXg7p zmKIn|@kD?04b*`-IeD%?ivfPyqt_NxSW6GTLI3q&-;F=*-P-||M9cImM%T4;ERnRS z^>Yrs&SVqMGp9@Ts1C-%n?c@@oORK{PH(qFhj@A<4}Ety)COZ^Qk%%e!~j3)?`{-N zoK;4i|uUnzIy`F_G@p36=j*#r`$w!-rg3{zIaY2=F z?nJHs4ATo(r<|QHBom2FdlEfYYvp8ERR&4&|4aerZj|^p>0O3`tF?R{j+H8KO7_## ziI|6;iCY;2_jP1@AZX%Ma1Nf$K&K(>#}t~}Rj*Z9$>c9C$wF~5m}-&LuZ$$OpjBPp zwio*w{|&bTQFmd1ck%;n$M;lTPJNwI!I2ZCdWELPi>E((G8imf@cUmVv%_j-yY|YV zW+FA((WQ=_zXa9e(fOh4JIs%}fVUCaPr&KyWOc!hRO=av!}q{wID7Y2F)G|=mingA zBj^2Z(*8$j!-d`(XKu9GswF-@M`}H}IUYjqWc_H0TJPX!a}qTl7rvlbRq*LVz5Eju z6B&~HAmiDhGjKa6^Jn1E>)Ja6bQfE>{?-V82!2=e{d%jJPt6_bqF>@x+j{?)VlJ_h zZA^SxG-EnE&eL`)7Uv<>dp+3M4Ss8o~e{|PO3dafj^YO*!tbLt|;-#ryq(*A0# z_dwrUi&c9c980a9-@_wu6D23tNU80n%IwIOg&3Gcc8#N z=#?xK528f&O}hG@)p?%&3Lf>rl2v;REq>1m=43xL%vYgr__q)3Ob*h=aG@lh>8WJ% zn&S6Juw;Lrq`FW^o`^(6Wc}ZxcsK7S)}rM9uYZsFf4={EJ@FsO6r1no{zo#d)qAbt z%8mllRC2mZ510FU*?;c*`hNZ=wj=%z`;x*$5>57eOIi{96T@1qSc4yqBiG_(r1j6P z-$7INW2F*HH-zn2%Yqz(^V86JrPab=xY?I}w9=WD&UPVj$8oJ_;Mr@dQNn| z231ajZ=$KGVURix=i<>xIJt$rYRxL8!bGaL^#s`-BlRif`dht!xzCBd+vS;e^_RT# znYSe)eX@yt0M@&Fy5Ik-$_8pX9cmfcHgs(HnNj+p!7u z&*0Vo{Z*jdCmLt#f-^CjCy|-tqg#bH%TTp)VL?axG}vgKJ?F&lmxtr)`uG{OU(?d< zM$()tC$7CM*-M`Ol6)!E!I2yfvEY?l|G>Dil)j$hxiTQg+^ADQwVa^G*ZLH;dI4%B zKV4#QlV`OBYcq)LoD0sO=zlYrd&Q^TxLC``oz-Y+yObw^Z-V0j*HabaXLHj;gd_{< z6KM4#SSEsE9hq2wB8g`?4CS)F@Dhnke&LebAxpsjfL1dzNu+rldfLI$|0an)<6%p( zwH*Yb{k;?A{|V0-?v_-6&MrsRE`Ky{%Z^MdaBrn~@%JAM$4B*d9?a*kDpwhulJo8_ z+8?8}L?)i<`B{1%r?1qe9L!cG2kuVz#V7c7QkWg?z34CpR_#G`C#Z6U{v{2I*Yc@g ziSP6P`j`lTpJ+|iMTg?yhji&~GIle{CC6}LtUvK?vagguxr$`(LHPX4rl&$?vfZ7n z|9&+63C}Ddzti+q*&Ct3m7p1`tv}&vqhhuFqZaQ*%S*Jf5+^#LSoTOZp+a(dJr0)4 zoyUPa*$_{m`x{v3%x){fDYcoiuIve`#P7yaWCZ#D8*N;Ka`8TG3(C9QZv&dd3LL7H z)G3}pv+n{$BmHIlkgO?Rv7_-lc!Bg(rhz$4Z>`OXaO6hzEmAc+_-rwU$ck>R z{`z@4{<9Ocn=G8`3(oH*VOi1N#^%I}C$+Vfu_V<|y`i>R;6n1(Turgy9-eCZ1V6Ssx4$Mvw|OuYE{iqt83{IHae9m z*1T1?wAK_#w8=R%TH&8H{0xmHov}9pYPcf(anP=A5=4|jD z#6E6#G4p zc5l%7IvDPzt;v3o_^;*eZ6=ld!MXw-@y08|E@Zu#-GFM~PmPLIc-py;+nY$-YS&vB zV@7#4tMlxlCwpU7)6c^GZc^Wq%&aQL&1v3?m&i5x${AXGF|s?^nZ#7b{UxN}F+En$ zQeu!>!Ywtg9w7_Oy_->IJ!lr{Em2hijOdN^(!uOulzxZc@Kjn5pX-IV@lUdpd{FDa z*a#E@N$}I~pRV6zvl#DhPI}j(-A?k8y`(AL&3HWxhj!_4CWxMMC!RC!;7IlbldI%Q zbjuFojWjk=mVJ$0Ft0}z-__1V?qvrzbu^ABcD=r(SG$a4gHY!=WBC$Tb@$dkX;xE` z7A{Svd7bF;*LodD0^Zg7N8p*}^GYK_U!%!b*08doy?yn#^SFJ`5G& zb=JufsYIQ4iRa1cu7Z}yED#UBocL#zwg8SV>G1-PpNJa2C4Ftl*i&%08Z{>1WcHoo z?KN0?snn9VoXqHdXB_Qn&9V%}U3fhyxo6Ld_Z+vjLgl2tc0uP(j*${QD;`{yWn z9`0nlS{L;*65oU(IekwqoKX2wIJbdcB5krya=*9sCnr<9c{11=npdSR{b)AfDl$CE zmE``eQNUIUwPHtN|Cj3hJ+w+RWpwjux|k8Fn*N*9n65s}!H48Fi>Jk5`u#U-;yHUe zKEyvTQ7E??Rm+mlFUjZ%@C_#Usr+{dDQ<>#{XvmE)Z|j0q1Q%c`~BJEhI&ZN-c9I~ zO0F$ny^id4W^1a$G<6C0*VZ!hO#U>a zS)ccTGrN^npygugkwj)Zistdr`WFnhvbLwVml&4S?DvHz(O8=$*%?zo;#%$X0CV;z zJ8S8=V)YZsq|W^c^i1Z*y$-S6J`0N^d z=FSb)v712FiM`7?=K*+o54c8TG$phKgYka%Z z?}^|&))T|g^v~`l*Lf2ln8UW=tkL?Sc0;ACI2-2lah;`CN)w6oc+zkB{GcBL#BvX;Ic9a|SylPmUZ z5>uANdXHXZO`WKku_SG+Cype)srhj}tv}7(P|>i7Ihp@raMt zQC2>Q?@pD4vZQX8mUiM{JMU+`o$7!?wUH{hn+j`qDL7(Jhk`aWQf*R%`XZcit4raDg zo|Ifo6AmPex3HlXgQ%~Tl8^p-c3`ykj)F^0rTT#+5hc@5?+qF-nZ1dRX1s21fYV$S z@O}8U@cz^O=cGQK->JAZl^sjAx8?;e5+|Jc_UmDl3a+U@p18{SpdN*87r`MB9`#&{ z#mVTtg>JV)yQ=mAC&PXWF6;ttDyJoL{z$sp8KpkdVq4ZWl@sEpFrAhj0h+91CZk(U zw&Mq!v+LNO8@wI=;dTZ6N@8J?LvI_)*VNP7=}rT!rE*Aoa@v3|>*mC+o(AIk@%%RQ zO9hW-^_(b`#L~CLl|;(-)bo4p=De#sjqc=)WCN|{H?{EA7z~JJrnl@(m4)RBG`Lo)yJ4NE z^HhCHY{`Z`FQcy zAQ_qEZY9^dV46G#^UYBQfh9T1pGV31MzU0BNJfzRNmj<^0b1NnBI4onGA+D{o}B@X z^KdNDFNu-PS~Kw(&As&%T}$@&>`Zh(qi0b&ncAK#~+|M{8wrx=XPhHS2EgN=l!z&HqzcA6iTLrHw$S?PWZ;Ibj7K&^tYC6tnJB< zVAM`)P4S>RxK?VXJZs;Z?Y;sBYOqj=ot>?RXO=*h$GbcgeUWUK?;#X}%kl)3}c z3!1-+{yEJ~EP4Z!8iZGywJ{rI7Qk#0h)*V?t4VYXye&&IlEX4PH*L^z138&SgL)dv zT9J*azRy9Oc!qWYQO`m@E-qmG1#kKod9w;zsqJ_)oMN|aqIMSgJP`yB7$fhcvsn!- z#PcD35_!HsZ!1wY86o1AavwQLt&prl68m43tZfBf>a}jfw=%v@fW=yJ-w(y-k*^cA z+(xhEP%_z0uA$u%jo`n)=B2`>q#Ao_p1%&eWSoBk=6{0AV%GnBQ12p-iNzV9-zD0< z9PcvgU*PSFwB1)9$pK!4-hEBiv(gHk;}@RY+NIhYpuNXY;!>?w)Jr1LLdSE>Dr%rj zcCUuOY#pBbgOrWXQXS6?#l73yPrjg;+RmANc9ml@&WF{9uu1&+{_Z5cI&}oM!ab6l zT&v04cNpHa0Nbj zyIrYNNLgBF6xtOjS&sMfy`OQ+yzhHF$MZbD-+f=pxnJ9Po)`O=>AW@4xjj1f=oYJ_j6^lIvsyT|hGG zqg-MhdE8X&?9L`j85cXD?LlnR zT6}Ir8j{I!5%?}68z-XA8q_^W*-$FGe`l(-!%Q$Yyf-54iDOI#+T6_Z15UL7Yi_(t zY}Xu+-9s|tr7(+Cd>zMge?q*_cPRS?>FLWxlqUgme6B`I1!|xJ~hC2D?TIx_%)zTZqDovt|1kNX*c)b?9^^DQE8Bl(%?sM?YZQo1Lcn%$>W+z&IiCOB6dPzo+ zRz`;N(BmAC?D2FkslOUd@tlrD$sG;%8@tD&OV+`;wV<`u?ziTsN)~@dUd|-(6;Uiw zehW(Gp6j_ZHXf_P*_Q6g%`iIlLbr7=%O2#HWaUF`XQzG#Om5ZYjkI6@x!i$D4NzkM z?THun)1-ET5jXRJW6^9k$Cs#&t4%S!x8qh-#B zOIDZPlI=1^ngwjvE5&-R0WQ}i8Hv~bfX&)UpI(RckEq?i^M5P7jf~Hrd;j;JcG|Ew zW$4DMB<}|lzf!Buv0R_vdJFGDfDyBN$peZC9*-ofc)czoD6@_Q86 zrMJWioDbGB{f_f(9Qx!G|L6Xe{4_0OF#ZqO70I*QHIXcniJbqb)KCBWdr}h7|I?HI z@Bgg#lV##8&}~HF(7maj_+g}MZ_gX}Ov`p4q-9!)Rr8cp7_hEls-?CnsxlqTjS}CJWM3k9|GO`Z!(& z&FT8!M$3)p-)r?N613bLCpoH5B@2y=3cpwXO?@V^E?IZx>M?7v)96o4{Ct)iCl76Q zj4!7B=d&t%v^k6ZeG9S<D931HHs0OjX?#|^`%+N zc5MRNO{ zuZQFmOHP{$(Ij5m>wS*jUGg!l&|>1uX2IYERI8=!z9^pzY&Fquj88A>JvRib1@-6j zeX?<4vnPqOO}6YlX7iV!VNQ1Uk)%P!-((Pa2aj^nHy6%hm3@y!oUPU5ifMs9Ti{qS zzkV3CKh@57^sbM`pPOTk0Oc1boSVEmkVsy_P#8dR~ zE}Ib^RtNoH6xn7p?TZ#$S(#2okmexH>HbKx&sw~NSxZ)CiLXp9yZtzujLg{qsH^r@ z+8nA>9rk^)md5L69gNnXMjOvMfMlR{a}M-1E4oRa*TLspW5e0rXI{0PmgRoFMMlnA zEJ<<)JgUZ>_>ntOZ`9|V^!f)siQ0{cHmJ zP8PKfocpol+pSU8YVS$(_zIQph4l#alg~9dHBO=_*?*skOLIwNyh!@6*`MiQn|g!& zJ%apZ^_2`->tLA-Pr1EuDO-~>#+SXl8FzNzPtLp&yPFZTj`|rP6Fo2wSL3m;(yl=T zeMZhB$4ivY&81n1$I~He+{|hhq0~fDn%g8kC&9`+{ki+=W2@Hpr8M zYB|cjf)cCrwum<5MCWY3;b1OdvUrq$C2@5>d2_FmNjdFN^SHQn>h~`Zq(c5c#>$W zSgco3s|yI{v5(QQUD}wBe#6mh9vO@s8HyHHX`>(c|CqEbrPIImsR`@U4`oi!Mxu^q zfnqoc)$=qF9KA?PXA-c9B^s@@M&#o0qQ&2!Ukmhjgf=G@wTIqwJ3%x7o1)PDd-{Q#2#TZzF2Ic;#yy&*~o0D z0^06F#f>!WdT&dU^uc;vigVYPk1Sy0!}-nruE4z-FuEI^Z&vyy{Qg+I_+s4#)+Y41 zy_rcyy!j|!!`lbZexDUsQ}t2hfR(QQ31p89Xh~Vi2BWgqckukB=w@F0&T%Xi)@-FsJE4eBsg02>+ zykFhy+)h^eL$Ds8oj>>}&;9obEuqIw+-}L9oBv$Vr&^!Z&jHu0F zQUUF@qtikX7q9bXsB{TkPK?B3@cRZ2MzJtuaWUBzK1Tn)l9jG_|Dcv%V;gRz87JXV z7x4UC?>T#(M&^&x{vW+bc86q!oq}2~lHe6&tPx7j(|T#t&wgBPTuxU0ap;ycUh;Fq zB9tOQJ4oiypm?1{KilugY8|g!qQ~ymTC%5IZ513p??0h@vIrdGw-R1lMJ|5@t4Hk6 z>?Z>|$XjhT@MJUag<$%K#f&HBT)Oa$v15woxx46L>(eE4W|)3*)>fT-{2lJqtS3s7 z+{tQ=^D~N89!Sf^6>HSod$$U-Kldzm^u?F@u!84}d>`-I@kZ;1eI5X_to3q&SxT9# zan3}821=DHsFc&!Un=n%?@PRuo5HagsvifUtZoy>lXgA;QSzT9XV?n%{%YL%7I*6z zN%#7jRmNTtn3(CZ@Ol$`M=F&0Gg4F#_esYd^o^{Fbc95~ZG-VsmDkNc++SHd&44Nmh5;JUvoh ziNDXi1SLM?53;z)ua_A4i`nJekDL9D^59KObn$ z4Z{t^v(hB?O;1DX+$@p5$xT<<+h*S6ZpvYPl6^6!)VaAavFAD6E{T~xRC~|*JdSR> zVU~Ry{2u{FPN;MGn;Q;}E$R$VCR`m*a4&1xtcLTXf#3iAiJwBr_e9+P@0S%gDKN15X3|CY_?l9hPw^~|@c)oV_+kEdHFknZ^S_M#)Z{M}DNPheN7 zvIvQWTxHhST8;Z~I2i~3MN2D~6J^DJrSbnt_HU;D=iALVo~|q-v+tnRD?Y~)c@UW1 zpx23Uj2H7L8dkC=nYH9n{bc7gC&Gz*`3&^A1u5Au`*=T_EzSyNnG!!5?OL$|57ERs z*}*w%OHTO`%~+1iB_=6(aeAP{a=&Z5NyeCDz$k}z?Z9!SQA*U65k0rxW<6Y;j6LW7 zKlI$%I8c|(%p7DDc;@2vWo92sNyL$Qh#zHgfnH_=neJ_g=VP|8jDhD1kj}@@#eERiC}iThJ>$Qi-a)P=C#7dUm6}(Nc1pPAz5!C!_e8>`qRK z{%Kb70_ZZMev_`n!}4+5d%)T+v2*=>`?Zn_VAf54TS2gkRciyHCb-=Jf06?*>$sZs zX0kqh2IZbI5_cta)s)ISIx&kI$VXzyo8nG(ljBEoz5h8AtB&`#_#B_#&$T;1pYiir zg$@(YW2bg5V#jg=d9pB%A@ALcBbh;FL^_Vteg(?hZIj#TtHM0l(~kB(=l2`*pL2+e zv9+{vo^NBdpLuO&9$n$E22DcEI@Th=IP2^ez%`!@Zi}}W(f8?l0-TePzlk2=^OrSX zd{~=kaR=F0s>T;A^(-a70O`NUdL#BW9&+*89_m>Q9C;B9o56iIdc0N0<|+ zvZ~KB$m>!Po_SJEG&8%Ig?l+~$*O!JxN@$24f;&bQoPzXpikC1@!rguy&@R0;_U_3 zg|saZ3#|(Y-K(GE?>+Kl8LgFg3FJs)q3?j$5}Z2Mu;mX1y(%Za-G#{0~w zl4EK)IyNys$qv?!YW@obr?O5kQ-1Zx%O|Rl^a;ioGeYn)5zL0pv#%w7pSq3G*|R4*%Ja&Hx%e#q-o=S zbThf&tNOhig%dRpAN(zPj_+F!5*2^O7l%DC}{x_>Ta+6$D6 zU&27pC0|y1CFdDw`zX_0?FuBkKk2wtUx}CL>hs@dULt%?Q(~C0XFd5Wi9vXnEL^P2 zH_9Fa)AvxcrIMS`@f@x7N9SbGIYYaxK>Dz8<1@IlM1ku_@n{e}te50fUJKGezGcU@ zx>^^qtXU)N($}KOqd{F0- z&aBIBCB>utEr~Tx&bMA{PU1-V(a~fEOXTJ_8lKa`TUpW>r1euaqqG0F(C%L3VuAj; zz%u@yZPm`6VS8=X*V``Nr)p(`wvsP0`wKbOY>y5X!v72y#LHqIZM=iLTu`hDvnsi{ zkey_&&WJLKJWca|wGz?1&gk`$-}dw--ZUM|WgbP<9m-5cpWJgiS^FFHpQyI@y2KM< zD2*D0x>=ddGdqaC@JTq6d?&5QPgndJN)wYW^T$GlcA;l7owr2OUHbUSNKg~h@eex~ zEE7mVGA=Ab&)%%#;6|N{=zWzL3gg^qkW63i*(99 z)&$8n`X_a3qQ;Z@pG+#J!loBnGzbLA#T^gd+!=F+z9+!F3)yH&heE6BU|7z=#~Uh< z{~zE-jQ8A#(0`2r{}=9TK(_wX%mR{;LS0{;?^+l;Cl@geHv-_ zBWph%{#j#>SF#rf&eK90w0VB!zC&r}1=?y(_L2?eA~^5#eiX<~ zX6dUMX>udPOuH?K#OjC(xl1iR5y_F(+P*688b zTfv<@!4JS$5B!thJI{D^2Wost@*>0WnTr?c!lKojH7sJi5`}+`7JBP{9NJu`-1GX_ zq}DnbbqPFk0(_D+14 z(TdZ2Du0?_PCe_ntXPUkYrrA%aK=V$)-Q1x5TO1f~j#Eurm#dpwHaC!-_%Gy? z^$~jTtkHP0QR26-P9C$TP%<9Z$)WK(@aJ!7ntnf=Cy>rkDDrdkdIeWF)!i1M5C@h4o%+U_W0&yB^(>!+1i)>^sKEt$qjX+0;X zFOi-%)xBEHWNVKvezLWHqsOc*-qy}AC33#opRI^bdb|cd@jrR!PW0(xc)g7Rxw){F z5=~(I3C<_ed~#rp@GSmsfp{Ybp7Z|`6zND8DuaBIXLqnSXW>V(^p^%n1GRIDV$R+- z!E?E%4cMDf#ZS(;vj;JnRvie!#4(>r|6lQZn-+3z-d*`nHeNNCpvrB^tkicGeIHGm z4kjbhSf+9;-?udIZG4LlM`ho08}=1GJqV&+pnFqG-+*SE|G~Ur!P)pvCu?5vxo_6u zFTLGJs!FSsvSqwEte|)H1L~2@>|`vW|H%iKER1(qDI{*7tlpnSl?K|oQh%@FbTBPb zw3LYNpK`9n7a(uK;c&A-@jsvc-#3wz;N<1K*E}k6>mm5h+q+}eM_b@$-*XwY6oM(L5$J$*$cM|{AiPYr2@s8#* zYv{@CbpLYlktngmV&=rDr&Yri@Xb)_WxwYe6$e^%Z$OL0(I&s*LpU7oq-iWna<%NH ziSe*WZuxlgzN@F)dyx#ydsy7s=sFDleoIqh=3jU*7X2>L!&7Lu24?pwcRU*00Q!gM z)y+nMYGkr1ioIaAvZ@%9>Vqq{_~vYR6MUB9)EzkWD{Wk;h1_K`hLo-X*@t-Z8onO} z7~7A!@zP%As6la zpN!&jNX4VLx*2ujN0t-K+gO~MAn8XQ5*Ip;6>6g855apKt%#5D6MD`0Ko8LMQafv- zcWF}#^1BBwGsAxt=Xz^x3rpZa>|(oKSA1E1^*06D}p~owQzu*2J&;R&v=5zR4n;%w$ioAa#Au zt&@QLp$(*!I?X2;i@Gkex zoP{ID!(oe&Y%Ba`XnUbg>$P_g>v}uLu7tyh?9Rh%!~1kEqBstt)D&~y2qoOq~et>dMDUo={-;>T3_9iMRKhcaQ zw+knR_Ic9Xl689nZ3n@%G3ai?(|8NMPZQ2U_X(t7Snw4Fo)uGG&8q^Z9u=J=o@e5uBe#!;a2n{_pv{xg+%z@ zq}@+ZDz``HRR5qtZ{NkAerP&c``Md5TN^oLUs_1`<6ur)X(yOGh~CMkUKuU3dzzi4 zL3)0Mp5*?f6WE9w{GUn3*O*E5K#OiH^*9_}OmgC3&;(Acd^!hr&Sg>Kq1@WMVd(bSsO}!p!9`wR1aLa;N9qHF*Yd-j|$lxwqp-l5!%s>rvw6wqEj@yD))ez|q!AuVS8nG?VhwK@va zhidU67AE6Ob_{F5_i2*d1Kn%k%`DV-7*5~nJ$D-pAeq^Z{UutRMza5e5|cbXm*r|{ z^gRjQAHXJAUREpfF`aoG*C)ZFwtg?85AXZ7lD_?bLy7Xu>hc*DI+-=EDpon&jK7JE z7z&@Rpd10h3f@jdo7-secDUs9wHH~)T4al&v9&5?wov)Oij^x;*-A7R$1*Y0b9In9>d3Ed^`@FYH2;^pgA4s1k;=h^?_Y& z6waRIOg&t09^R7u%GlRTos8@Uk;L4Hz7P#Rpmjsgu$MB~+q{I_54TD<5H{t&lDPJq zOXfzrc-_}U$(++4#4dkCrpuGCLv&)1+eg1k?Bm z=XSIDbfl>YE_jC$B|o{RFDu4mr3^Gx@Ui3u@~rBPqT3&!UrEld+;^X6C6M(h zKAlA)%agBPvf<%v$*w?Rh?AK;eKf_ddRoqTe7={Q>gLn`{`};>Uh=HX|La|1gs&*1 zrev?64|+8CPl>D%vjYC_lmBhC{jWnm0U2s=sGb zFDCn2*sK@uezJOrmO5Hro9W$4?9_YUILGrPTFvZoHV*!hrAYkQc2@UF)?&Nbx$msx z?&-~_o4i)dS;@?P;{UZmugM0r%ouPAYq5?UKV0vZ zhv?yKQhEyr-eP-7PJ^ZtWBCOvz(ABL%N8#-bD68J=6*lL&*`AdZuMhIB(Ci#nv?VA zoc>ls<5n=so#R`LJg1Pte41r0IFXIdJ;bfqo}8FwKm9qJslf7%MC~>-DtQxfdwWCn zIB+HVNCj3ak%8m&Ih#Cuti?^n!WYyf>@Hq!;^~xA zwjS!n<2F%}OUZ3K5Zh>Lfz~p%eeHQ&obAC%W~MyWSTo4;_b$=3dF2%3lSd8*%kfI9JniUn6=}@i}eTr{rq(AhYzTsP+mDeeXAQ;<-7CwpPQp z$jn*faWZ%lv$qi5i72@N?}PaREJSi7?KTG0gz;V@T)f|>phV_6WkH_#!~}8_xmpJ5 z?r51?bLJL3WDjf}d-I9$@OCYHg>Ox~-{S3g;M{>?uNk2qF>@jLR_6V6{K8_(!xz?KZR@t;ZjTNlr+ z^z<$K%l$1`qh=pE{&FM1J{5$?sr(3dMv&_jN+i?S+u)uAqT~X9UcWhg$&I;-KzI)> zB@*yUrE8#YXEM|kJ+g)#Y(&47EG`FW{ObZ!H#l~Ke_s^eWp5!Kvy;j9O7?P&-jcUz zHav&vaSJGS;Yw~^oeRQbaXFd9_8~Lp(e=y_vMNn}(c8%C6|{AzH}NF@hi})ao!FI@ zBrkWBpMd(m(|h7@tFfqAkL2uYG>NYPn|fw8Wwn^M524l(-u5llPstG0L~pZfKl$(+!kdi~m+$x;DRISxUy$z_74B{{1)d^tVgr*C@_&%u4rk~H#@Z6?b87{|| z8zdfkbTLP6jmo1@U=4|>Niw>U*x&0lnp;PU8@1IB-7hJu;nRBR!ph`?d9^X*4YqCo zy624V0#Y(U%^BXG2D?@C`7fxj3`e`8%tI{WS~_qMytdP^oC=gdxv`*$S9|9I?&Lkc z7+*6-%nfweg~|G~djbC$-Yy1ZMfk-dZwyGbgE@1#iQ35QY%+Xn!2aL9C)as-?|0*M z?wsn2$K!EqwbnA@JC~Hjb9TOZxwZarQhf~x83~_6>E$+_w3Qr1$&_3R9rCst7;3=l zdN{2C%MNp*d4&yHWbVSWlPgOc$*ZG=k69F@Bgf2Bln_J4_E ztM&F3omxXaLfQ0ogIQx+w491wFX3|LiF>t^J-cLBlv?^{c>M45HC^B8)4W#yQCz)9vh z$@!Le%2jZ0ti&>wbUnVUB3}oAcd>HU7@u-O_Ht5O4kt3FsjjVA=0hK8r3aY1`<7Fu zz1}7p(S39-@tHX_yb(A0kkA=m`-)cQmaj8uQ%>%OsF7@OkK*hVU~H%ECHn24W-L=x z7OcB|dxI>Ipl^_^-1hMVd~=@oPyx}axSc4@%*DY}P+|47M2D7%2( zFGt&vT0fcOO`%P>BdHmD??b!D%EK&ZJfF{3s!3r#hp}OaXUV;F$%)vQ=08eq;+d2@ zb<5CfpwHdNV6q?G44UNm>aW}))QZ>5VdSzkDh;HuxrHEiVPr;F6NlG+mw3{PMyhoH83AU4s)LS6z$JyEcs9S zf$B(5O;+o1R9d9W0`C*|+L&A|Ab(jI z?J)E_4W3u(JF$Cj(xH>o%_-EM%tmsG`A@Tyv3mKXPb;;vlz!)a&D!uE3BNrw;0-#O zD9WMa=4!LCrg}|0-S1$RXu(6^@<+HF=I+=)fu#vw42z~BXR3UG`_;y3yfmrjN3<%?oU8*uR4{@;h)sj zE2tBX>D()n)4%VPJ&M)%!5SyGImMS@6dP6+J^O=T2)Vo;XMTYOzeUf)6qG`_#cWi0 z-+u|ioL44Jcz`##B|4t7Iq8k>_zXSHL4%xGm*j4W)u_bg)FLUrF&j+g?yMpPk)2)2 z^|Plqfff9Sbo>HO9xSLkOppKa`Ftfq^Dk-IIQsN5isf%lf4@Zk+_jvT>YOgV1k=Ox zKSP=Lz`l&;$)sIL{j#X_vd>-6ED}*kjl*cx@07^7f7ybAiHpttK`C_o8IB|}JpJdM zwBM5ClF#{@Q_gW}rT4)1iNArXgew0nmrvOvD0v<)@!YhYlhFGM4(EGG>71F?_4JTJ zh9Xb@ZKY&QeMr&If$BzDld-50D3_Z-{|*jANqOQv`;d+Y@G&$VX^hA|@k+82&xL*@ z=u?!*yNzV{3U(mAN3jI+$;1NqB){~9W^F&JyTwc+9&wK-kxaE8vZtASPSQu_pXae6 zH+$0=eyv!8{UmvH(RwxdJ;|(Q1!-%cpX5_)reK^`E;TpW&)WQzU0JK|xh(4pW$x7ORu(WhTP7*>uu|Q~`$gJb z#kwu@xt=~UH~1L!-)Dt#N9I)4?-z99Pk29!Elo!69&F<-^cqgKGB4=B`ekn1f^>KH z?VoJ&t*q5o=wCr!nK|S(#pHz;NJkoavdfGmnQwZd$^Gp4Xd19guQ#J)Ije6CW zs@3E_IzfMvLDa&ivr6g9(JJe*#@aoZT}m#r1;)*3c$%H*%xUA>)zKQPkAY?Uq2k@u66M}5R=oSz#L3!?9BfkKN2T}E|Nmnv)}!%fdOk}#dr>X>;PKqK zNt@YuUWc;#NcJ6Cd(?=YbG-QY_9hQGA4?wV&gNs;VIFSu`UeOr!t#1lsD%&7O4PXE zLvmzJpauJkTbm1tGJLQkqJ3lQr!%5PYWXT3}t_cdGt!x7xLIDsui9x+E{t zxuoQLe5kK%8}ylD?7R|u@gaW*r6-bvm7tCXZ~RzuQk2=zWR`n3nVU&d;~$x9mMhGq zz9Q|d*qVnxd6;)!8?~R;Mi11D=Xvtzd`ExB;p0|1bs>q^57YMk#-li^lk|7I)^m%> zmw2BOzfbY29NHv~HFsLPfX_FuFSWE9U*D{z{;G6tw#pf7GVMJMx@zchE=i4-TH!ehk!OqK@j5oGWP6GIF(A=}ktr zf2&Bj`rMCaD)r3QY5+PSA@Fj{_$rpcrAASzw0c3tf(#oLA`lkv11I%S<+31qcV zCm#2A!fXMTw{KZ8SG`r+r{vmK*B2 zgS10oOV`qqmqB$a4rJ|iQ$dYHPG14%^`MN0(ms?}=>5B3egxm*Go5J5tX0RtaV}as zs`t&P`4N2I_dfd$8B;Gq)A*Aoi(WDZpwD;6+RQeSpL9!AdTu-Kcko-kRV^53SVG zTTYf&qF{FaN?Xyqt9~=M&%~ulRws|?y`6S8ld3OBFD(?ys{eHQ}`~lE%a^-{2F_*+ZuO;7B=fE5mmMHzM4J`2g?i^mMEy~cRiu~ z>}AG(qB|NUe^vGYJHdFW&u?hud5~U8x)K4fk{otKw`9(K)6BaYOFe@98%cjYh1Fet zs}#EUJkBPoN%G3S2KGBh{H63Fv5|iP|94tDTR$`LFAtW z-7?e2%?l5iX(X@6mmu4LN8^;c8HI8S*g6;`zx5excPre;Z6LRqZ8uaedz}lEI7HbE zKIQ!WV&nCJ^l`Z-^U&fBlxkE(LZqpeAFtdkzf z8PQk4I=3@-09$+~o+_;Ay`-*@l~VRE;+b_cO4KQ2DVZ`ldY6dvvPRMG^^lw&e?YI9 zxZMm#{(!gh@UJ5(XOHDG6r7A&=fP$Nsz0mDZ%BMOy`QGev7RR5`UoX*ugYblEm6D6 z^q6eMtI2rogo;0I)~KssG)Ac>!I!+B$uW|Z|1M9mCv^`wT&m|6jZ8UVJD+xxRw8E` z^^{1S$5&`a6VN8_ZA)!5BK22+^Fz8*mYhASM)&`$j~c3x49IWeW+%`dh+C)W=Tcn$ zo8GR)uWNDRB0UVnr{scYX#Mply667q!V{yHtU=gCJ#mfPm^P*rk%Q`0bmaN;8$M_%WXRl@=m@4^v7XBrt z@u{rcsrtVcrRwXgC-_TzaYMcA0pzCLQZyj#eFpl@ph^a~WT?)wBfUG59gg>L_6D+& zE@KQYxk;+G8V9pBO|_lUca!790bij^_EPR;cb_&AMah)TeoLa2JNnrDI` zP$zHNPkr5i1106M_Y+A8$A3Xqjsx#lHt{>PsuWZ^*V~M9?Z|%4yK<9P=Frt?U$meK z>y`g!vm*H)@0W8~_I~W&>16i@RNdlpa<|0t-Atw$u|&BG<8zvx98b4u^#(HYRgLok5tvty@Xx*P;rR{V7z>i-^dvrv9a#Epf# zr_}|lZhIs5GvI#F_;Ci?o5%&1Spnw8k>m_a9+PHTPnOf{ zf#epjnk;lL5I%uV>+v~fhu@Ku@7Ri`(6=`$l3PtaMCVP)&+@dRQKT=3vpUQweQGfl zB;W68DB8;$C1+f{Vf`V=9l=JO!tTYNb{?5)iCWVO+Qu6&v%h55%$+q=jb?3NaGh}= z*_%&Qa~J$Rx00A=th)u)IhT6Jr*SCJ8b^PxjUC=)Wtg3$tnM>ceFv0P!QY>R%_?T& z@z9RpJ+Oy1@e+Ahyl4c2OKnoEn~u^bQ9oEt9z z?R)HWa+#mNess}lJJ4)J%e5>-V_Y82TI6gmV^=%Rb0%}Tzh|=ngO$piVO!9*MnQpO zUYQM&@6q{Ua*|BFfhc#9CRb5nTyry1O})Ir62HKrZz4sTVco&s+~>UprRLJ`oOP{% zZ4a=vQl~D-9d1;(21S?Bo#A?%MrLxK>;;9kRs;XHtn{Fwy=wTBc-5|2Ii5`(O@d0! zMysG}4KlW0&!?I}WZiL#S;rM5y$xB&Iyl+trdt)CLC!j(M6#Dpz^xYSWp;kPH0FJ# zpWJ0r4P}l;!FVaeulr=!wLtObKsuDxCWfsyXnV1GWpFTaqU@H9B3;=JnT)?1*v`lC zYQ6HwbvMUounkY*37q+PT6ha>?JFQ>;=>`ZI%n7B{NV`%hW;b!=dEezL!sd*)(I9x|pTZ*yxJ9dFY9 zC=fh{=;uj%ORj?C;#*C6>yW$Y#+?hmxEj~v9}_>&&INzE;YH#vAAo!IWV7$H2@Ss> zpIz9sg?QYK*3So7*0~*g>TTwA9WHLsPgkwp3HLknozt{o$F8BRyM#ZY6FZpY~(r!C)yN84)LaqvHQvuxH(yC-_ z2)}ak>pBt{Z}JheCG((z$!xsQv+ha$`R%ZcFGWApzYE7s!mB6Mtd7f9;AO_fL7tb8N&i3gp857DkJMvvs2 z%-PL)y=VL#Oy({}yFV9l`#4E^Opj-fuu*v1hW@QZwFAs1Mu01~8jL{U`fvy@vp>Ft zY&>7kEB+_lKyocfTEJT5wDB1|KF`WL4w4s@kKaoV6wgj#?zZ294}UCl`5f@}XEoNq zV@w=S^&C+d_UGXJHO0R+1%a_gLCn%b@yrUezjt?({a>H14&|OIiwrVr+Vp-2jHCndUdu~c< z2cCP;D*04%5*+W-pX;fOS?qW`ivQk4Fu#L@=ftS5vbkOPUt~X-E&q*zgJAt<-!4S8 zti6*nY$UnA!p})$u$F$;nQt$L&5P`QU(~G3@-!i_ZzvNVyrqqfFrvSvpWSBBXVBXw z-fen7aD4t#r1 zR5!L))>~5=p1X(2qt$~f@!fjdNJ207c9UmK(Pc4N>VOVyXu}}weS}l(yh$$QaelJa zyWeU$zPA0rm-E}ixlTvHtX;+{@iX$5d_@h}>rpf(7IsA;?cj#gnQ1Nvq$$5c{qkfZvtUgvY(ty1HhNn@Sl8p+izlik2dql z=|VCgeS;69X=3bRR(=(&G>;-PSylWDHIsLFJRUDMZl8%Bxv@5W#$D7L@fI$ ziKfXTb*N9r(yCKfgDHCNSwMFt{df~?ldI}+5XK*R6xv^n2Z>qv7R`qg^j>JDQ4eez z@qfAcr=xJNPV8y!#s1Rx^AT(R8i~y~a}R!=to4E9>HsqE(@HT}&vGYHR;$_hNYw96 z-$TW$XP?!}&sea~=+J_?AFKP1qOY<(W#ygKWj$Cwtz=f`shO`i4Sgyzf1J+GjvLiPx~ED);}qp z<&?gXC)t%qy`xbuxxW85ry&(2SR~pKSJLd!L-r zcY^N*n({Ye)Yi$2%F{c;%ep?4E$%5ddIv)tEupVdQK)cqAG6H(p=#M#HIN75UCVFQiGDqx_I zGKqUi>``)8wJ+ZP z743$qSx1lEjK8g5-vyuMgChA=R?)&nqBTLTk za&Kv}0p;G&8aSGL@v48J|t4EyA{se zsFZlGy)bNvLzPe<5v;d)I>p@VLmb{rpNyy=U#-q&(zzCuCe!6)uh@@@Q}mmdp!eaG zEM%{-M9uX-*nBs4U4M;IRcXnkxR(gfoEQ!zcahuN>iZe0HDXN?J&aU~PjlF4!-|KMlB@!7Q`P`cMfPU7aSoWAk`TQ?fJYZ(pmTcy% zvn_p|3ZfmP{}dWK7XNq9lt!?Mzu_R3Ec^ZYK(N` zujSl|)PcQOpr1x;Tvt-DTaWQ1JVm{Dy+-op6__X1stVfmpfRE6d*tJG^q7VE$pw|0 zj<v{z>fxyYO&J}i?_uQA=Pq3`eM zPG{6=0`lv@6)*o+VSKZh#`~;VVn!3YzYvU>7w1;EczEQ@dMJ3l!sF!d&pCHrSbPT8 zUD~UNR`IeO0jE_ja38jA$trDvqKNS zWGSn7l0Aic%n zMDK(m@u_-JOR;qyDxX_<_p`q7kV;I%7#vDmYgTi~c38`JI*rEOK<+ZCz691$Xp;Dr zXyi%e2s6O983o=~?-qX(6Oe3+8D($N!)$%eVIk-Fmgwm1^gX9^xtG2StxRm|H@@$u zpR2%;ED#@}^gS@m`Zeo`PvO0S-A>fRQ1tIf53_pSS4eved>cfb51y2 zkI$h;GaA?eX1Bw36S`ieh2&7LUhwT~V|chzU4N_e9cwujf0GX@E05KMg*|G z$TESg8-fF0STi)%R`y^U(YlNjv*6i<#Fhq2?m$V-h6!k%orOe_Jnn6@Zw>t`XKptb zhS|})lVyBL$y~;PMCDj{UC=%BKpO zwU`E+rfe^iJVpIl=#VjNJC45X%@FNY^LIQ7Z%4OFN#nz~_aU1VU5t%903RMfh0~0& ztw5P*h3p2k*J?6UoU7+|$WCs@&W^=U^MK{X+U)8l$60)-9wGxd+sdSB0T{amTs&7Rfr{TrV@q#+y8C)qEn;!7{KEW2DS z=x<_3I+B%3lVV*yswT+@taa+D!I0*+)j2+nh*)R~gYqlB>ETv~01K zPfW;P^_3jl=dc91VW$b+9?vE%FjvWG_&Hh`$?Aot)ydWw=8tX2%Su>&SIAJ}#=i0F zdo)edV`XoTh0PLj`a6<$1t~wD9##j<4gQaFay%J)9j*1U-x#m2RUmp7T))*eB-V)o?) zvz?oaI6qga27bl-)*4|IeomWUd1Ud%<@As^t#o zJBdsX$;oyL81-a99zfX!p4CFBa#wW z<41p#Z^?QU&*_#vzr(t{jPfn;r;N68M)r5|`5SsZl&8*0u zA-l~%wgZeg|G$9s4j;1PajX)#lk{M{eBkdY7V%^w;RRsH{YvrZK3GppQ8sn=vJdg! z4_^)-FW-2c^ZiQxhQ4`Ts(_;k-W;IKlKjxg!!?1PEM^;X9^4SEN)^BHL@1?|WUjtn z-xK{$G{eJWpnEtjgsTDu- zcr$#03JqycNwoa`zpn;G`4Va+V|nB&J3p8Ed>GolOc%amDeI8mxklyWhD}v1~N1%pgU{)UzDLmebc{Kz)z(_>|4qY^3@%nO&-t{l)Az_gW@jQY+Gt5&l&0 zf38F`mTd&vQPKOcdOlSvTS1i=(fQ!dO-Ze2{YnxOAKwRHxy*|2V)(^N=yn!yB?-+c zWfyDkh$oLKSDM5}zp`S_E#3E;sUC*Xzd?s-w7UZf-$c!SS_#KPICn=^rz^R2HWA5L zLtm^$7bC_;MzZY1FE$S5ykUvf66ZU_r(MR-r;MiY>H7sb+y#agVZRsNtv%g_I`NuK zUg9;hvA?IaSg?`k_X?>vOzT~Z0vq+3e6$T{?^rEn&pdZP#NX%|H8cCEtH-59TO)8I zd(p}DTmiq&#l?YSUutppD$LLcwH%{YaS$eBZ~Cbzw2$jn@v0N9yrP^Pwp) zJAqUM&!u`BsPuQb^;2~0W}K-C?zVn^jS4xFAF1qU8kzow z88P?a`5WZ7EdD2ld2UzejgL1NTXR}`A=tk_vHJR70P{!SmYbK7n=J98**k25Y9G;z zbu4)N{O4(JgnB)2t}l*cZ>cdEYoy)RwA;kfD;T7*y&jXRvVO9NKbOkuuQux!O($x z<=iW0$tyrx%bQE|o)f_4#jHMh)D>?Y!})gX!;7#!N9o)-9Y4{!M!srFOfyS38ox7d z?929b#HVT?sEBi4!8h6SzcBBLAKWCpX9r*oXy)MgXm#cq8&5*nMzCFhR*7-QZEpvX zmgGg7&1NL8R!**yIq(pFFN0^>Vy~qcnc1Y|Y|y=9uDYJo#KSt-Z}t~_Zt3kkG&uRL zvzIvtq#v`BxtHr}<*x%{G6}T>-7|RIjgD;xU2^?z@P8J0>Pr8vLzT?C%aG}KpgltO zH;|9RNZT1On2dto!FmdNm|R#hlxPKtoc|m~K3b?(0ameimwWqn)ZdG`Ey-_gbWQ%* zHF`eQ=VX#Sj(q2qu*68#BUx3=5VoRjJoIx^#A^H=b(<*pFnt;YlK6bi2V49w za`Idcg-=H3cse|V@+(SDz4*H;Eq8SpKdjiEE}RJODBbLQ1ai}9d7SGn9wm6(E|_|i}tjp%zk6~^Gh z?WC$AKGs(EE2TDrYOT4&W>z=n)U|P@E`GN{qt9W}#J9Ox9bpDC*^^%A-+^3Q3*+3t zcoy1ahcw<@xn*iL%B_G|{MK^w&2ZR7*S^J{#%S{{e4_oG&Hb`JGO4hnBgtp{{<|5O zy5rYd>|Z(ZaS$%YBP;pkN7JGo*q@JJ+|@j*g4(@sb1u2OOq(x~-@o8tPPwytnY(N^ zgYLnCvn$C|a`fLp8}9UbC~WV6ZL%W_$DQ4vYoX=KQ9aQY&yn#^=p=mW1g-|~dJG+A z>8Y35$fw4lN=C5NtWrF~5{uMG52HX_f&6qqpJw__F181ZAi2vacXre^y3A&CPbc-A z&3ES;*DD+OcIq*?l&>ReXOi$l)g_bj3oz(|lFMktIjB_?9dg5E_Php@u)pf(0TP&8 zEQvXL37jMG?jmw>9}35VsIPVoS8^0xE=4~-!PjKEnu?!e$j%>GrkfJOS6XLfy_9y7`&S5C0l2KSuQZ^4gVWF+4B=b%tDU>Y7@g?1MkD{7MK(YW5) z-_KE@t~$B9r#VPZ*21^GKf|(SPreKrlME!uUYdxX6TNTFqI`ubIkTSN-D30C_+eB; zsnK}247`=ec}s048gLkmkH^_gy_7+pZEzcBZIFn^r&){Ke_MvcWOn`~i&0LCOF&Ty zZR6`z0ba*z?K(4)YGB$z=H}w|GIWaX{?jz-em3<==M-0xxwc9_58u)_bp#GnB}=(! zHMyG4GNPYZ%;amLdG2-18u}|-YEFL=6OxS0SCflF(K*?avu9ivZkLeRCsA((S;-mV zi*#!v2Hb6mTW!AR-6dy@~nCTlnJ(FP~>jVJc9;z zgXEXmKMUpJZ5&_ae`>3*-+0d+;>k|4=rhf9H+pg{JMf8bFOk@B%4GHZC471scd~X& zbobr3S`IDm^0yOPXnsg-werIc%^9m z95bw!3%Ii~%zCP>9tP-rhIZa9c(wzK@v_f8NbZCh;M2F{?h`fs4)5HckaO_npc{aj zH^b}HqNkjVf2o&OQ1f6^C`C7t0WCS9o3aTxy}qg7=>YGN%{sp3pTn#q20H%g$HV^b z`Y+G6C0j%pl=wlLFKF=-wdxxu()x$K4Z!WTTB+lCya$rEx(-N#b9}H%sk>O26ZMl- zYfk*LioSt_BuYAGw#|(;ufj0v?POQE#HaXGzlo~(bc!~!+WeyU`*QK@Aa<`5`sYo_ zNp=30{N&s^b<%d8{C8D)ls0o>lvQZFD+c_h#!uej|81eCL~Vxa+vf-wf`$-$&ax9)mV-RM&k$kJ(T>dCvU$m-e$I+ zbLB*a?Dc*HE+yB%O06XhF?qCxvIl7~zWm=BqgJW&78w{tvmVe+&d4&;|JKM{gDzjp zTKrZ0vaH>2d_I*us*cCK@vo8ZH{y0Yn(qYl7!r|LWp+bvrc)J_e2;BUHrB3s8cT=1 z_I)}C+p^1xU>%>BRkS>rV6xwywLqe8Kc-8M!(b~cbCSKAy=z8KcBu6a(Ep1h#^3IH zb#JH5J<(wsxF^u0#CJ5;PD8Su8`+Oh`ynvY)bGbApEKE5!*~PcMx_?Ug2R$IMbP z&LvMzPRjZj!Iq=<7yd2=$x_cVi_2MIyw59Zs~fH+&MW(a;pr6P?)B){%(Fz&{Y5W1 zd&!y6xy4#@4v4=ZnZ59Ro@eXS%z5?$_%*={AW>(RX}bgISVLl4>30#TCMq=Z?ys#T z=78XH&nuANm*JO)swYTQ@^$C-)5Gv0Ianv-!&ll(jX!s2Djo)`q(8XtZr{AnJl0mW+8Nb^1WVcWD zrj;ld|JuD;+*R~f*$iV{!PEUH*F*1rWCM1CFEMewXi_IKav3>09>&Q5HwhF=No4Z% zwkEOJR~n|jO&s>Nc!iHw7Q_mzR(nHCn`O8v=ebQ zh~%d2M2#i0TFLF(xi_|nr*n*O3-yEs`)i8J>gB__*#MIGE#J;Pq}$KyFi)I*3`yOd`%R5ZaPWC z`7%7-#11Sav!~F4M5yP+rg-Qj8nhn{t|JqPv73m?ITdRKn)bzPH76CjmAW4960=wn z9kX`+6er^W@gSvyqcPzY+D4JE#pjKLW=l($R`6<+hfby8N9k^&v|W zlua(Oi>upKXc3LsrTwoV^ zC#%l0g>_nmYMFWN1b;I;`3k4b1z*Yj*{xd4P2Y`i_6&bp7xR@4u$oR{b1z8z={ssa zo<6yY=pOIVPOQ)tKd}UfXxIa{9bk>;bp=mzJI@q&CPp%u;jRT|BKqSEo*3)Tz3+;W z=a9v_^!yx39SP28frz|q{~mWYkm^kL+7 z9IjqNb`$BIdjRA4kqmM>XrPRTj-8wI>eK^Z%BHQH6B)1R}v$#7hgOb=$sjz^^lXw=PoCc63|3!l}=eAYa% z$n}kX)zs>x^<+324T|iKyak42ay}e%4{50ld?T}w-<&{(gFVq`1qhR8`rn1s+GsAE z`~`FLHqqZ?AQ+7nFR(gC=;dK`@3n(kll+y~^X*zlPRi>@>JVC!9KNgc^tpa_vOtY- z?|l+Hg#=Ear9Z;2zqxU8b?gA&7Uh1Y&s*t#Nv^+mDOCeib_O2D|9Eu20j9@%dz<$D z0zIz+Z7q6L0lje7Bp-Jp8LR- z*ysW9Z-fh{upUi~*;CA#N`sRsxcOMcgY;W52Dh8sC`xEv=+>){Wi^IH1*SNzW{z763r z6o!v$IT=>2Gw<$7kIQRs5UFf#+&IFjG7+8wV1B$6*(+8S2U@LGRqJF=?Kok~Z`!6yD2KS!;seB#4aCWuawW(-BzBS)WPv(rd4SLPDS1|MdYpZd{KS7dXM!bsJ621{?v?wV&MI{F0CJPne5CpU zzrWO4Wuwo4qFzb;oJN=YWIrUIn#1U)XZfF%@qfQZUVFo^{KT6|D3D7r{?e2k6uQ&&v4|}_F6s0uWFiBdaw)U znTI@!uWQ(xn)*5c^r!3bMqGN_=l?T)-{JcNWx z3F3COldon2t{mZaIITDrC+_m8v6fyy-EUDP{#wtOO&x>Mi}aKzz3iILH%5G_P9h!O z_at6Y4|~>!p3Tw1tsq%iJl&|(!|>>1&yyWDcQ#Ga_a>a*f?CteF_X)3I9?|wR4vvf ze&!QUc8hjWqYZvs3Y**pItpZG7}efUGPgyq0$omK4%bf~t;VN2QNm-pZYR^pK^gzfschO@b z5>UtNCAoU1;r(Wxl2_$Mdb(9V-C52#tY25MaG8;;4~{kU>91rW*_RV3_7+}eUbfhi z>x>Sk<5IjU&n4AeNaAn&#vl7tzxA}1c;YAFGu)gl`Bn#emOH-Ilc*zMm|g4SWBpb; zwbA-ZI#(HdIj=}otK7xBlzguMMQ)6Y&+A;zauPfmXWJT6PV($_@YgW{OvQ<6ss(d6fIft{)mfVS<@fEDqiJnx& z|6dx>lczd+U$3D?JY$fN@gUD!3%AV9x7ZRVDdNRnL1B%Vy{}d;Ck-VC?S=Z;> z`w_`nNs{7O)0cheijy1ENQU4y^iW0FxAn8d3@xoDmtQ_*_r5>bsso4I=N(C2!cGju z`RuM-jKiJSs=>5!nUW1jS=RBVDN%9`koo=NdN>DmN8!&>C9ctWOReSS94#gqbp$&# zABARWtrsr7tDV96%r5Ibe975!VE&jSEF??Uvx zeeJ2O74Y0kMska9qKO-_&haCf0)p83U!(f3@Mj6iCG+1v(7ayQ<(ps=|C-Kh{>SPj zwlvvtm+LEQ`2Hkdsj>q=_Xjc&8yX3(=JQO_p9~Gru8YWJqEZg`bcnXHaynN#iGiA< zt?Oa)QqfnUhu>lWmeb7aKu$&Z+&Y~*$FHYh$Afe-{v67Bo?=cq&*+!DS4(KiD@upc z^U2)9#=84K5kJ%_TAPkGeMv{|am_wOR?)wunXSxAjyG1kNFEYh-wAKCmz)fA`|)}f zJ9rmrFU8A@QQw;v#Ovc!8u%_ut^`4REQ0GbIGNeNJhQmuJfER%Mxgk-HqqkeV7uIi zm;H#(aHpy|Lr^C1FD>=n+vk4T90{sAX!^MCB{xpBGoPsr?p{U7+JZfeX3YH%^;gQz*R-dSCZ5XxNvPT z;+#*rZo;?Zz}N)8a#|bW^B3eRQPqh6e+RwBn=gOFs$Zg|#Mq99_k%ER54M`5xF?F_ zCYYDaKIh_1JjXA=tGzfd752}O%Zf(0I_lhpM*~qk8L!SnrOU|eUev6hugp2Cd)JX} z{}zO0Nzy``n9gz}Qa&doccRA2T9`wwwvfzrYTsZ)szFAt0_OttOY8MOl9ZYEV=&8^ zY@#FMOLhWItf#+;Uj3MD{nGP}q`jJwi)c`|_JnuW7%K;p;OQ`VL2Ee+Nw%V7nQ4F) z+rSrp-prq~;yX_7Im7yAvEq7`Z5XfDLzKBfFY&KE7Mur@l>TtKi=EA>+jw-jLMu7L zIT9>cqn1SKCR25C6CDAc+_%&K4@;tYzgHr;5dPxt5^qb^5`nOR&pDyZ8s(RTt@)M~ z=6vr1Q09CstB_KD%HZH_Ry>I@PW_pmRelx&C2IUO5-6S-wLXX`u2;FV}H-tX~dNRQ!E6S6pp zm5SGGwCh;fm)n5@>4SO;EyuxP9Eod1zw)jmGPiW2#YXhpU!2w92jE*w`{IdPnkAkM zhG8h071+Trt>jHbbjz*7rTh(yw<(**@-DcQdN2E%cUb{8rK!IIdjr3rehDYCfgQ8@5j>MOo)aPA#i$`;^v6c0EU;#nNoBU0t zt+M}tIdYL_$yED8v2K0{{~FM~NLJ3fa|>}tqxYwUWc)zN5`&h!)2Gt(_4Fk1K)JJc z3l6^P%|K&(qW0RbQ@ODwd-Vggm7U|hp7p20dkS0o5>AxdofIFY>00hf);?!NKUVJt z?Os7+`>_q#b(_V`Ud+OMpmrjL6N}XqXSNtg){~o9kNC(fB~OX4s{q0)Sd~NoW!~D1 z6vn@J8e1{fcwfictTYGzKZM;0*w1C!KYmN5MSDsSsVNm=BuOZmsj1N>O0u*VWQj_) zR%uWBLWHD{7A-=EObeB0(WX!hDnb%1CLRCR`|}=k4wCN-qgz)@a8n*JsMmaC9+R^ zJS&n2zgy5?2VCpy({)CW%<8u&eWkZK6{@eFc&Z+w?m49RadqxM{V!n{ACBQ{NhR`` zea<tz8(EK8G~z^08?e*K=Tj3EXBuIL z6r)u|+V)R0N+iQmQ8Jl`_XXo==$(j#H`A77tYZ8}+IoA7cUyehY_>BH+&7th{0jxL zd!JYrb;-#QAj!?ALveXmV`omB?n1e{z0JAJI`$_~uXgk1THMV3`c`sMiW|06JIT#? zEIXW&w7z&*6ZDU0tug6nrDh$NkDy_RYL+whWHUKXA6LR`IEr+~m*>$nzOoOa#xfA~ z*4HC?dBSMZ8CDCykvKH*j+^T3P5MvN^5heku9f5Uo;~OqP?Bq><;{79hMWI?7IH~(tC=1zxMc%*e3g;z$i2x$)d#1 zYzip%E9Qb(J;f6$GBSrvtcBZ&dT@o75~JomyqFHM#x(FGG{_zCc<3ai#RDWTyCu1u z+Kb$e^7KBf9)*IJ>0_6Ij=#~-9nicBy1Yixv+JC*tZ=y+>h@Hsws}JC#D7ofPR6ls zQ9hZ7axb8+9?sFjIJIZtfAafw)>=<8pA+Zf^q+`xJ?UR=uJ&XL&cV%`T_hIzWy);S z+8CoiynVlC|KeG_*4ymreCBUGBf?Z%*c-OvK9XBOm5oZLv|tV#hY#<Z+Z~K(++dV?E7jP5cJ$)aHfS98Kbm z$Aj$jE+W@CIUl0!ebuN-FA{_KO_Z7mg7_8w471VfWFvaKmmZ#m!Ek*h+fG&?RdJ|= zUXl}f1nMkNVgSq+qgdwE=b*!6Wop565InA#*dJ49K$R$7B>?qY75S0yi4SQ?ACN3)BVU< zvdO%o-_mW6$H`Sr*#4sJ#d@sgZ8NQ3rrfi%_!!g-p4n}P9R9O_XCi9<49b(x_(-iL zet04UjP$#Sj6S8s>U4YiLT0iv*dA>sncttI#nZ{!NHk0Y#@uEq2i95Y$L@84OLSnE z7FOcY8qeN==PTL>Peb?k(!}EQNAC~FNEZ}M)}3=$tLwde7j3&Kb2E%)s+BdxE+BZT zpkrV5GOOQ@Ncl=nGP-}FTq5`V02o!`$8%=3@3*i)7lLj&sZYHAyHVyG+(@3=oMj)5>o36ZRJ7g>W|8$| zblhZxdjNU*l7-9d*81p}s9d>io0yn2lx(b>Ro=A+V|^G51N8=$?@E#ue}QCH%b!C^w50I1cPHP^A*Q;-zr4y4g|tl?>%a_~RY(ycXg~lv}ci8y?S~E5W!GWWCj@My5_+&2kqikp`3H@-;ekJL+a{F1Nhq zvk={3ns^u4^ZpJMnrk~gS&}j6pBqvw;E^nW!wW4)&c@LsB2lW^v(~w@eH3oR6SSQ^dVqC5T6dmz%@ zoNT3a^ctK=w%m*y*-`0@rfsyC(KViH_2@$rC8mPx7xkXQo$+93r*wQd;!&_(n~6i# zSTCO|_d5&u37iLjCi{c;pvC3-ui{-|Yb+#L8`P`l{Q#EgdA4(fk>_rzeJVRZnx`z78dFEA)KrBwJy!PnIViO+C-9%5H2&H#B`;z2woo z2^TM+U5ik)A{tlu6W z2NghYsqzP)#;zoOCHcSDPi}uEkMThTtTjOOXILi6M<`cbx$HqCvQ@mRv$Ed-RL2)B zWuK;v)~CO-*UVn_$h})w|l>2~_3D&@Db4 zp!^2Xx?agd9~e+jHW|6HV%(`{t32I|@AGw};ZxFH)$e;CKFiyj5GKZQX?1xJ+1>>t zW4(#DdU8>hL5b$Y=X}pf`f_~UtypgjRQ6=wvx>~#!dR5OUg>xjS0Vx1d7p^jSt;+x z(p>BR4n-@aw(>V8sk{A;Xa9M-?Nip@b6b`%q6g;0`^uSVBAlh)Qr}tmCVO!d??lqFFEf`Vs*K;q6%;6sgV_^ZYj(DoBpyoB9`trA zDkmmPvI1R6wwkh+=hN7wMw*<3{$k7+K%$RTvI%Y4q-I@&mgcE#g#3Y+yh-HLZl{7?_a|DQm4Ee$`LR*WW_FM2zP7Oo-z^VywF zp54NxBKM3%CS$_g{N%BRAvJWJP8o4G}|n62tg(~}J}=VI}j ztZCkm`}E(VX>Oe6ret5ep5*VZZ10U`1(z9rk|*IiwGLvvGX}iN${uY*Sm1wr_{*!C z+?A`8i>FWGxt30x7Zjtz?%I0`9PPDoFReNYd`Hr)Wb3Ws^SkKX*W7Fn>vuXn^+lrx z*wviS&C^Rpw5*jEdY(DxyQu$nbF<_(Yi~x<8Er-tBi4_&wiM>~X#EqKbQC@eLz|aT zu>mdlRSSQDRXgoeXIZL)^b$~f#CAT58@cb&gs!e(ZDxY#czq;pLMuOsjMJBuJ`v9* znO7(FR&~8E@qA?Q^)r{9qt%XR_6|wT z{g~t)N_55+_!bZ0#NZsRwPcM*RN+eKH6MSwsh=o;!||#enad5+yHNLBHY3raHW;J( zpmjV9o1tsY`Qlgn1AI?~)o7ZOxkBzf-hk6}V30GzoQKBm_hG#G0#1w3D$)O5^EVy@ zk+9#5zKd9fLzM1A+B%co%u*JZ(>{k*nGt0jm3a93;op8_I(yIY2zv{ZpMt&)`fS$X z7+SNdxBc~d47w}=>q&6^lC_El?$>BnQQhR9O!ThdW^m`~?KnKTlGF_+oA1G_9SJ(J zuv{OL$@pF0gM-OpkhNk?!Y7itoXGu-y63=qCf>BtMmy~$YR1c~%iCJrLc>_VFo*Z&k8$q|7Pc%E|U4`g}z1ISp+}pQh36}70^@B%uTuOH0CuwbNH+Rr;;-oBB z<`Vz27J31Fy0K7i7x*tnOW&jn*1<&_4a>1G+R#_7HTKC z+P=ljzq6T{3CGVkW7#~|w1P_{YXC0AGUqHj>!`njaJ^6ak;?dV=Z5zNa*{Y)>y>P2 zek#YG5&asnGY78mLO%{gdm6zevV}Lo{U`K|=Y4i7b2IF1Pa48Ml3xo}AD~Fi&T{e@ zOWs?ZoU%>Bzk&KapX5dAUPtZgSo3Gumr1zQ&;RVJ^o2{J!%d|7@ue?Q^maemKCHDB zxH%7p5+DB$df2RNV|DA(&_sOQ8RSQ?M(3gSLNuF3Zo8sYpgUEogYe}L^PlHYHn#CF zWq+pC`?G+N&8(SopZq$d7o*$h?95is<%al|T3dtao8gqKxi_I_{QX<9EM;I5uiOvG z!4y!BL5V}uYXm?kw={|(faT`zYbcmDY&c0Wcw?%a_@aK8xmjdar7nrzDLo~=IGx^+f#9` zIp`Lmej9YTAN@{4t9Wkj=gF5hcicQ45HG0QrbjcZ5-PDZt-@v1*)YLUL&3C-!( zJ}9~yPPg!+xn3UdK5K%p+CPDYo(sYka58x{#-mc&ZViJfXq61YIRncId@mfUKsIyF zaZa(SsezBl6qNJL_;!vfdQObw_w{@xK8#g5E2ayI6;k}`fA*%5nb~2m{j>UC{<}<7 z)Z0$KxuvyAKUv+xAGe&6Wx+AH7|TyrVgreeN6TO`kQ2wOQL`)1hTfMexSKwA0%IdI z-qG{f<~|jC9tyMMKg$08P2?wj%H_0`)3Y~=o)Qr*UaiNYO>30OJ-p}i(wQyHE!_>; zSc%@HRbv^lHdkva)M$-6o(Lksn)qI+)kZV!q|>U|5Qsqn~s*-$I^yr*1x8vv55XR~%L`2g>xhm@-2_lbZ1 zAFFJn=)eB#ubpk**ZNet@w@HG|LuSFDz^P>JGsqzcnHbZ$#1aE`!qKUao(@nOb)Yoq%PyeRAcTVq_6|H z6Hg*B1Ro>gb&DB7&iix9lKh$RJN+1S5`Abpc;#-`DYWfzQZiOASpnx};~mPiMw3g) zNShRRK%l(uY{HH8dRuWlNwbRZI#ZKuEl&TBknJir; z*!~4|dg|c?^6(K3%rXXCg?q^i^d#!nK+W9BJOrHmVEit)m!ZKN7A;<*8Fdl?Cs|tt z;^<$%-CJAn`C7#yjzodP`N*BqXOuXFB>h#pqga-`SkL&Otu!l`=V=GD{#>2h|7=K> zM$yc8x{g*m_rYHGei`15XB!gR_5~8r$L}9dHgnXcP;&te*Ee6tye%i|@on6*un%p( zSO>nuyIRRxe5sc2V(k`#_A>ID)2K0YB%XY^ z51R~_i9~Y_s@{q!`=QMbbfGgyJ^*WH(6*#ke*tIwZR7P^j(%kBe~#9(3mvbd+(?^D zDwgx`i|^iTMZKTFn^;+$$;yX#k(+4~%!Zyp)v35w7iF?P9Up*rseQl_jsZ!0;}cCN zHw$N>U!t4!B16fT)SqkuM4)fUlX`rjDd@_;$ zs_eI<;V^X@quf-z#2$4b2g#xv87xnB+o55ieP=v4(*fE~o`N5g z$a*0WV<($u%u+L_IiYs^T1socob)B;a2Ne&N1zv-m<76TS;$g0^PDDS$NyX{UQ^5@ zm!tI<7UvwcWiw6q1kZ2Kd(QOUN1w?o%g>}~7@4exzty#Ls&@O~%iqb=W^l|SgVU7% zD^0ys4~bHflkuC;GG1qi`Igns7Imjxnn)*eNgmWhZWpjjpR-}u2}P|W@Q`DhxOnb z1k&&@xyb6GO|pWHLc{Ds9FObCzVd}PiBnS@-^b`He#W!>B&O{Cto&NsyAWJQ=wpH> z@r;O%WzNzDg0dRewvzDL@L7i6!%=O4a@UZoKIFI-`khOn;^~)@@`q6-ISZ5X>`-%r ztkl;i6Z-t7*TlPwMH{8<+_g?FpqvKpi^n-@`dm+q@UJbcJORCPuOh3f9kuk4x;gcD zM%&2(6W@Yl9jjAVl25evPi@8L;5BcOIsI__ik>e*uNSqDb=6Gv^HHtNSL$52XCGsP z{u4d-E|j_j{hmdO4Y)d4-xnIq67#w)?cV5Vzk&~K(YP%s>ch%R#_^|dWiwpspvgX< z{0U#)L9h7m#@Z|dLq{0aG7pQN_!v-SPvR*hUTXP+5q%j69*)Z|g7__1)nz-DDH-pl zyFt{SW+u+~QuWuN!Z)~f7CaO0;9xy{0Ma8p%X#Yo+Bnn5xd8vxTfJOIGIKNfkLdj+ zTp!YR@`zkY$~vNDMK#y@n{0B4V887ya3?amnH(f9UFJCZ;c+|Ex(X++f^kDI6v`qP3*2o_*6xk71;e`mHs0XHK4z%w*~Po0{dxOx9pC^^;S$+{kQ>rU&`n20wl(YGkchfrUwIlSFA6 z2cz62&&n^i`kwH&jFvww+Dg=&)@14m)Opcd;UJt%W{z7(X!f|{?>QJhvr5a#B&TYX z_1zxLTA<18I2-TS%G$`eoRjdPiLwWg-!qlSYONdFu$|TQ8KgHS_^+6^9mdYYJK}h?2KqkElLLMJ zR6C)48STCY_na@6p#ReU(g|yBb#6fMYw#uKmDhMvjqOXW%QC2ts7}`@Q4vNtRc@}$ z?kK$nyeoM=Ks)2~+D0G!;qaJ|r>~xO(?&1c%L)6{$|PQJxcsyJa`SPpnmeeMl~hxs zSbTaDZ!)LIXBN)}!(oFrcluU|O>3^^%Hmxj?WTtse!|1}8E4g;^W~h>rp+h7GuZ#q zD)IK>S)OnE*&a78D83~M(tn=b@L#_D$M<*J`#`qsC!cfbz5`43APCL`LG~85z5mbu z-HT@xKoqzRfYmOfIXg+&U-~QfcVlytL-i?RU3LN!)o20B6EC6M7kH43NZg=0?0)X| zUyDwOCKQjT#(MDQ)jf9unK5nbv(d2=FaQIT1X*9;&^l@Al}iR9gpuVHSM9p;L+Wu@DA3 zdww+DWfplJ3hxEB#Q*z}4#kJHGpoA7|BM)!r;RT((hW}|dOV+};qq2I3dd)l!#|BZ zFBUV1!?o}@DkfjS9rW`V<4aD=p7Uv$SwSCiunOK?!IAay!6;ZCr0dAf!|Zn=h9>Uw zePDYOujhd{e$?&2m|Q6Fp?Vpu6X|jfwHF(gW}9{QhF`onpG1)*dQSGmp+3b^^>n&+ z4ILeUx^+-GqgL*AFM&rg4(wvgxR+d>?QQP9?n{CbMP#CR%XszI(BKAa-aD}V6+F4M zmHa-5DH*!m=2PYe8(ER80Gq+`L%ohd;h(iMj}+zBNaA^oK$C82kH-D@)2$*W3&6UX z)Saf@B`{sB#D4l{uf<)_=sVP0Kra$yrzt5*2B(~vtVfwY!ae@v@vpv)CjL{c?D&sl zjT2$!ERq&4qSDMOv77F}@h?0V(^viE->8TBIc52nrJjt=OSO6=>SP_0GrO!?v+Lg% z^%LoOw6S1+{K*_Nr$F(e{ZJ3_kbi~5RKwd}$>bGgM!5k#h&Aj^dK<%VC0GWk^&L6R z&cVNw>5Kmh&BOLI)opZ^)H5*Ij`D6VAxw>KuW zEOhp9(zOH+&qSBnY9~Kv?8S##I~5MgQS&yKZ>0%sKs{cM@sxWIEsxdjQw7AggZOkf zTnd|HXYGgkOGssXHTFlB@4?+wyNz(+`$DGTcYPjNzs_8s56yf4o%bg#Ye`?`&Ex$a zi6>d5T&mWao}I*=KCON=Fy>VGD|&brESBKj96g?7bc|<7;)f>>^Ve!7Le!*!2e}g- zoA)U`EF=N@;lp(-Ya;5ruSU+M9>9xdP$u*~Kx@fll5_tvz`9;54Ox{TX6$!sBe%3W z6|>^i;FzZL-?W-l=x#VR5ygjr?R=bkgLE8$DjoGcpUm~b=lIk2Zxz|!N>AEoWg0o_rd~Z#_%R!ln}i?gF%q^B{E0J{ zC@9mE&8l#Yv9l8_p4HzE+Rq+a;>iv0{av+oCfA9K+@4I{RV54A*&bCo9X)M3DPsXTZSjc(|}RR_B1!l&Qn%&Gg-y& z=++ka=1y}mm7VH;Z_r=F2ITzb9kP9z7Vc*WlIx)Af+^y*KGqB0K%0{)e8o z_r9_DM)FYf?B5wf2Wc7tgz( zPkcxcJ0;P5u0ZiS;QJ%J80gze>d!U@NN%3%VH&Uhw^1h%;NssB**+9?+vC?_E!}~8 zmw4Yn%Nese7->fqaP7x}1hQxR{7wt*DWJ-}-|OULJNmm1yVZ%#EkKXl2ECpf^;7F$ zGp5_g^CP}(($7IOEB6R<8ghl%@WI|yHWwa=F0+m3mzuHmp!fHZlbp>gWfi|b*VcFw ztv?Fa`=fGZSQ~tv>QgtoAE@1R`hOE|zM}u1u$#xis=l?sZ#1$U>sZgY+1_|j7kxXz z^bu5F3;M*N*r1PNJWYJ#(PVlqE)LLZbUG32&H`~xC@%5obkfzHEnI?!zrplGmMAe| za{97})V|D`P9^md(7OWpoM^mxOds{l^`1b%7m9gx)+sw_eGI)EYIXW38t!2}mfLj= z@#%U`vU)2^Ud}|HjWErrS9P2!1J>NfyULT~`#6Ao$XcTfoNANJoai0uCv(kOG;D=9=v!uEThz+E@B?u>tCp%t z#apqdk_Uh^+{yjTQU31)qq*edetawo-tuZU$H&=v$;s9@GLRTcJAtu@Hg?s1POUQE zucVc%L;_dxI+e3-O6;F_WF~KHPUb3-mZkb{gMV40Y^P3Qzn!Dazxk-5Z1s*F#G zA^am+9Ekq$#cd7uMCV=4e#C$FO~2ov$oA&lpBL++U13`~W4vD*iS<*C#i~qVO3`!r zgRFF4GOMNcKv@YSuX~nrzA`YHtH$-{zU>BOARFvk)@C`!&8OVVOy-v4ZJEgOhJK-B ze#WBTs^Uq`^#0GEoGJdFPqQvu3W6HsA~$*uBYP=TDm&Jvo$!9F=ZTP$*YN3Z_9(gKmeP=xq`wQ9+00_iWV;h@<~9G@8lhjI>%(DO zpH_8bKmNwz#lLf@((Bos&ZKG`4a?qF?z!}#FNdP(6gDVrCSTHaYHpwz=NX|p;>PVt zonTyux95)f`4>!+y&yhXYn!~<;sp42D5xkpeBZIWxWr+S|$oAb!q{iel? z7?ZVlIyfFfpS~!bouu3#?EtqI&}k9<9%GL6Jsu_|-y*HAC0B_Em%M^m<8)>1vbX*@ zj@*fged*~RQ97|@`l*o}*dMi+`TZpIJ|gRhq4T>pKPa2M_(Z{f0ex1Gra+P`9WSF$ zBXIP?)8tc`jx#r+ZQ|^7G%DN;rp2tve6leU?rUI@U95{?xg!dU#QoD?o_oSssn6EV z6}Y$twf>0zS1OfpqI3^$KQ)HyJy|362IqsMa=6~6(twZk+ZQHFNJc00=;VETHV@Kc zA|ifLj8?yrtwrdclYkXCA793OmAaG-{)Bu@M~_ob|IC70i71muJ$*^lBW6Yu{Umz$ zW2EJ3vXe3STDq2Vp-bSBtc2M+$XxtdGB^T7yMpf?kem#kVetD*AFc2_UeEFKK1XlK z*pVy_iITJuU9a^du_o%F+#L0C3+fb_bG{K`f!{gm-3h>WC6;QKm7t2jSl|vy4;0GZBTG z;>=a7L{(DxrP+CW_mac>2e$hz()&B=_9LT-3sSF;n)&EgSzDX+J(84G zCz;drRmTh>`wH>H>cq;&kFBkil64||KaWBqY0^A+pN=ZWqQf<;#y?Ohe!JB`nss2} z0X4&y%)eHn#~KjzFe0w^GpBM=J9P82*Yb50J*M&~bGk&mBQsjjaE!&UZ8=dk{ITz7NgU zl83v=Pi=A@ui!{jBRgpCqR=Je;YgaD)%6&)lD8%2Ap5B~2Be))J~zdd8l4{`XB$Bk zKk)UaeYqJ$&ZHBex0%nG`CfoaKZ0Q$8Ttg2V`#=MYERSK7_I(}<2TWp(IBsa4%gB4 zhfuOEigd-R>x^k#j55jmm@}>6dR@!HU5-}6N$@o!_yy_u_sVL5r-QzS`ZS5uCq_&MSo}fVPNel>I`Itd zPxj_rvV0Qy>`XR3LZjRlzS`fvkb_xzzL3_pqb0-rz0~+Io+kU?XJt3w=U3=9 zlXY5yCYSkigF49*6Pdc1PQQU7Ule0b744h~>ic{fNw;(B;UE_1GQYWl{xU60{Oplr zC^vPcgQOHw?jBTJMpH{WakqK@A-hvWOCK907UO32t-9&+4NnqLw~pQuK`h>>Gw}ZI z!Ww2Ld^j1s2;W~sr*$xl@8?Ev&w@$T3kQ2LgX~`hg1RI-5m+CkOBLw!iEx`^&5^z1 z73L26!t`vbgElNjLw#-s=KDePIqhwa;uG<&E6P5OZi#?(hM!%?Wp?}yA?YvUX>uhz zjv9C1$V;9-?fHY+`pVpG2p%>dXW>j2^^@l>_h*I{cI_eW2ZClSO8iY5|I+_ac-)l6 zC3a@c21>KDs{Zc^`t6KjC$jGGAIvUrV7nbe&uJsG!tC(>?$dhp<4gFS7KRiv%jAa3 zoG+&rmGR;rbI_(NV|R2&9P0;hFn8Z38OQf$iE61|QCl~V`m|gTO`lieS@IW4b)B9E zqsf!L?~N;imD$1b(!8=FDxZljfu^}<={xhZ+fb!6hfOw^)>?cUwa@l8H!0>8Wpgq! z*ptIpj`(xU#k((kKZYJ$?^6YH({g4O<9tirp@;R<)VIWyX@vh-Bc#VfyG*Xh8n8^n zlutc*$^Xn$lUL*~W{HDw_z_y1xm)sy{OE7)=j}?rayKyZ=)KGy0)6J@W$`EecZu0q zhQ^mM(yTgZtLNHw!VF_^4c9Pi3FSe!?BX2 z>;%g%wR$U*RP`h`-M`ax=&1 zF_}&2=Y4jJhJkAVj1s$LvVKp5(~d0j=WJ=-&NAMdN-q-6JNwF4(9wA5Z}xTqsmf@U z+{E#g$*8&je{xg&F(X38$LzM3qq&J$m7BIVdp=+3!~uJOO3gg30a5WoJ3}R}!r( zr}DWAmAmM#(U;t8c}l<8vu}!WZ^CPo+Naa9Z~PsBN4fh`1B87@MHh3|H$X8*j~}Ds zhoDRLtlq41_Rd!8HMc<%nSCjqCT~Y~c04{mHyNYWlhnR2$+N6qmgsw_QKh`!M7YcB z?0K~O+~oRYWGHCc@H{~Tdsfi8$XLb>N|S^_oLrB7$1f+?LaXPRdP$_G4yPw z@8k}tW0byxP51)lJy76O)NX*f&*(FE_`X5y5p3W*6y6U<5(}U^Y=6{m&Y#w!Kn?x% z(@%V%enQLaTYsRx&fdMNd~bdH)mZU%LC<))wpTlQ3yr;NgknokCTr8|s>LrbvGTsq zN@BX!#epH9IfN`egrm>`-MXJ9Bwy_rC=>1IN)GPQ z`&!gZuDH}b8n*HBYhu0;&)($2nZv?mJ^d~#@jwAhZJdlwzJ*U&Gn_`Fj-eB|>+%&y z61Cz0JzQd(Pp+8Ew(i$wDfY{7Ha@fC%y<*K_6_40M93MET@5AldntJlSJugMw05G^R3=}fJfnRYnpc) zNjnit>)DG&qgCT7qOv%j=BjzoXP@(wJD4$r5uaPBa2hdDO}+!JNfqZ(uZ< zWS#ICt|V4XD-hhy0<8D8qf*DwgICOJ9xG(w4%(4B){9V}pH`E5?s2opsxXaLepeX1 zq@Q}YekAVaj_fb=vie4>YeKAO% z0bS|zate(v&*I0^;T3R2&wJxpb7fDVdmGuP%V3rKtm9!-16ATJeS+0M4^$jNc89RM z9eq0(TzjBYGMZ-=uoYCxNXJ=9v_`K)x=hZf>}xGSkq@l^Y8AHhC~#Lo{rJH*)=oR$ zj{#TDsWZ*-GY=V*?4uu5oNu?!f$n&i57X??uN=`Uw0-nV}5@-=Xw5{%Y5%oAp2i z7{n8(4CTN{m&$IZEzDB0H0; zf1u(fZ_D|4q}bWcdN2EVSAi{=xfhYR7G{9ijlW8%MOtX8Y+u&zdhncx(pi%v`{To; zBH0=fC3ySd-78=yPcsiFG&7#9?~=gM=~Dc%%EBQrbrY3w1H06kPPSs{GN0Z6-e*v8 zHxM2T(#UXD4M)Q|_m;92Kbm|-(x3DDFTeRbSlEehU6%`OE1*b!|@s z;RQ-GMX8*bTwKto#HSi~z3tY}SgqwZXKOi~D1$3I6#q*!yUCOP^C#=1|M@g4)BFVX z-3oZ)7ngmbL7*$kf(I}lM%Z)&z&DP?zaJ*6EE>>btVO5v2)tl&WMwktL zma{mC!jY5n{>m*wtyhc@BaJNGaWi8|JUr&&QD=OQH$rth&3^F!eLTdPpG7XF;eDdF z^fLmyZM18x-OE96jq&j(JnX8*=SGfrt0rGnP4&(JWhHBg_@U2)QD@Q;D?U)afAoKN zLD8&g-i3J^?axBxGRFGOO0O`txCF+>(xF7Qe**-UvlIJ!m%MUKJipreok-0B79w%= zvxXmqU-My<+xyvdZwcq2sIyoHl1fN_e$|O{fF@-n1+cnbxq=->LsB zNp1y`4k+J@ey#^&Jh0ERpR-W83(!8XDthB+cCb35T72XO!C?s;a`P;nJMo5F=JP$Y z`Wu|Slul+H*&f|x1T>=0_Bp%!xm~{s4Su2rIcMq!w~=_6nAvTM(fVw1_6)4~VW9P1d-0l@Y8?9#71z+V ztMu~{zC{x2z&X*zb}#1GlkhSzRg;_GTsrcK*;VdAFCkl9K%VH}-@zsEQhwIFuUy!jr@K|Yt`q`ex@1RNCpg|xD{gO+g z8+tXdZb&ZD-fYDHmhmaP{Y8!JK>tAEHt1^wXft2x=hLa^`+#T3M>S2U_-lVh6LUwa zt@52wuN@hE5KR+bB08Ske= zkV~$M+&z07T+`L53if)a^gdg+2xU$OdtEZuj4b@7l@!ABC z`kq6+Ze~m3?~t93^?1=v?dA4VZ-ia$ay)PB+C)x=>Up#$$!stm_jgo2XJm=Oaw~Zl zV-|L-|M7Lojj91)Yz%|Ux3gBfoOD#i{oH9xPWE~zvpDEHj@Cm) zuh$BlTB4W4blT+qet0*JTvRk#k3iX8u;R8>wEIXU zllT2vz0Lu3?#0FiWQ1C)OivbOqrS3wON5up;hK9yhxm3moZ_>XTT%^4t&Ot7ev}&B z!CV=)_tL}L^ejI3!_0~j=l&8UW|FZb=$v>3S#4y_d^^Yz5#sOK%lBQtKNfVEhsQ7G z8=7}8&X(8fZ>-aHXp`GOiB!-8?GFQGCwz}r)o%qi6YZ}QE4?*XE(P_i{jBDNYu@(? zdw3Ad2vqm{J7!#3`xR$5*g<)YL}#DBlvMyrGHKWfo-L&E0)0NnLRVzD<1v+U zhn(4*NscS2eZ98gPj;v_vQJcsx|f?H@3XdnG_&8b=BE1_BcH|JM?87dyF@6c1pdsC ztN5PVFJ+W0L$1ftp8>{#-#~LFS=`=8`!#zwA6$uIxB->ss54&4v7r5@v1@nGRA!C7 zG>VPY!;8vYt+kv_)c`{ql(+>hH9Xtj(=Du68M1Q$2ojefborq8bfCAnIkYRyEJb+8 zI^(FK#AP_1YzjGJ*v*O~`E%I3@{Cn(9y7B9C-eiL~jXJ%vn*Onpy z;X!=FJ}T;#DeO(;`gk>KX>og_>RQxE%U?OyU4+hw!nB6ICO^>G#h5S%H%5`qeklHR zVbNn1k1a-r1*p1^u4e47PPh6R0Y)g($J@uX-PxG=Ag;#h{fi{mV}WXUm)PzLiy744 zX;gAwCu8JBy8Hyre@X5?CG{;?i;=Xd0qfnL<$Icr4K?aL$$IUruiTvO#{y?JxP@=I zy`DI7jp%LeI*eecJ~eyjqyLP&z5Jib)+MJ@cU+6_&qes&5a)KZN}Em34rjS`CENA2 z*A6T%pz~$w<(B;-)-L-ZlZrXaIIZWde=8c(nS7QfnaLoNQ;0jYJPMVDu+PKwmiro+ ziJhm!VR*k63akQ8PgrzhcgNEF;j|Fnb6jT~IHlD_im4H`JR;J1(Xfxs8&X zX1$Cl3*dAI{^fq{?Rb5;`d1iF;t_r#`YbRq%wSPhdVVF`;|r6ySZ?Sp#=m)K{((GTU5;619y@+3Uz0;3V^xMRYT}@>}sKv-8~B&TYC=@N%#*tG%bW4Yb_2ajw2P z>-!CS&$(A(vu6~#hR$u#O5!j~F}8e$S~HZI1>f8_m;=__dAL&9cpe|Bc2-5%E1sh7 zuV{Be^hk8(#29)Qw0YM+*W)?;8aUHz*-ei>dI ziYnFcXbo9Qyu_>~-$#+stUtR$XTW6v*@+Kae7qjP!!^>T`4dQ93c3?fG2E_SW5fJlR5Ha zA8#aRTWDya-^B;AIy@VKXBX6(sGY<_ILmA~cWkay;&&3hS{sQKk!T3d8$UAR|44r` zVG?P3314zsKC!UBB4ekLnM8AmUsr2^)egJS=@ExL zh{ibb1^(WxjbqVaygBL1bT6k>e^)v&v$A763U0~Jwq8q#j1qrBk(|cGzosgvYO^Ra(KPpLFNR$wRx%@Dyh{HD@}srz7A%qlvOHbMdBnnE zHkuQqaY`g2$w_4WBE3w2VXR8>eLV-mb4d3?S`NgAYp1T!>&RmDmiSvoD3_>We}U6L z_}mQAPWn3nfBt0D8;Ry?LDzzPU(D|ID(2zc@vS9{?;s74j_N3WGmInCiLd`9OsApY z`*^mTzEmybx~sWbC%o7nG*#f(4n7yt_2boS1@B{V{d_zdZse~D|2x>C0VFt?557m8 zU11P^-S4z~IEl$ku^!&VkF+P9e$kjU4}34to#X_&+UMA}5Akps$WDQKGm<)m4UYd- zV#f`|hZpcS`QRIByMwkrrt7*wG_gV?M@pT1Cg!*F3ctsD))#9hnjZDn$r%z~|9GsevERM_-X zx~#H~gCdbgy86jomz-ri3;UPRDAAUh<5%uEe(z03&{qP*AU)57Z}h05|1%3&iq~SI zcJyA0VYj`!CD{Mj2EiTa(hj^r^b%?7hd80R+@ zWD03)?tK%MFSF5j_HRe)=V~wblzg44gC=|C+k>J!ieKo>_1^Wv+pF|Fj?Vs8)XzTr zng9M2@1n#EuV6&WE&T(*Qd-GXqYLN$yHs_uJDAoy>3bqrUP`Ne0{74A=`-wc{=l!DX|5~Et3Du@}n|KM~({rGE z6C`8NYWLzzRu82}=jA+4yoNI18(Td6udU^CVlxb;t@)g_b``&)wRf236O9)w=;~($ z<>z{SQ&Fxd>Xo4-N8{NC-v4Z5nCkmJ?0rU@eaPW#*5C^BujpZScBj8F<4Ichnp(@) zlgH_C_EZ+(c{kQ>6uw=|;?@H}GJZ}arCXGEi%w=X{SB+oj14=PtRyPs19U7giepu7 z#qsCK)yGDxbB!K{lh0|a=zLu7pzgou+&FEW!{S}fHV*?qqPE=3Lj1wAoZlyV_a&%x z11TBm*)L%G7<3bi79;WOQnvSe(v_&H$?jDTbv`XL?K0Fl8RU0pw~aBUC;ZzR(X;b- zIGh*hb2J;%14fCuc0Re!JbM*+tu81<(BVL7SE8$%q*3T=^O&hP^U>{>#UvRIb5!1D~z?j|N4DwX{ zCs$P0!oJ=Q`a{5&mC1YHyILFZz0S>qWw<=Un_!(v{k*Oqu*0kzI4Rz_RF+f3P)^pc#f%gntNYCHRy7Z^`Iumar|)LYT$ zU6i=Vv&6*8IYxWqz&S98C(Kh?IS)i%ys4a!Z*CgaI-}ixAZX2{khsR)a2ApyS=@YBGZhCG(W-kX( zBmLiRy;NUc@l;s}>aHl3thZZ0m%K~cgR36PKMNF#wNM{bIvL@U!{kThZzJu)lzg_( z!Nghmn$^onvu3gKoJIns;eYbjBtm>*2)>Bk&EOoLncOSMOrbisk_|5L_O1lg9&jz? z>77k8aw8~f+j{Ik;ymq%zw@<~ebnUp{sDEWgK(X8E2G_9_>^KNo&u8bEJ|YBzlvIS zqR-FhR24j@dcF|sTS(#qpiS(qm%(~G>MTdah2Tl_zpOCh$5YG9b_;GL#=z;MW(3N1 zr85J`NJH;4S52g`#olGdx&z*xRm_)?!QfDqAeq=&kiu%Ha~_#(fbx?I3pK^3JK?mD zb;Rwe6`VEKlW5in754@CN8~P%a2NZ#*ptNf=&jvE-u+CAxna>zUq_%>XrI_c zACkp|-sToqVwX2iE>S;T0b~5Jj>N?$NJ`ciTj=0WPwqpj-Y9w;nq+-F4NtR%OEkdD z#^2LsCnHRtnXcsy=$ez@{$@nINz2RReOGTrr=II)H62c#4FhfK11QGv411mUyzBraq3-`b60RJ z0b``+HynzOTKt<^=p#3guEDEjuv=|}t*yt+BxjXA?j+6QjH`>t&OBqp$4dQ*OO@&S zH{N%{vw!#>kFi~8R(3FA%My8S7E8LlrxWo%>xSe68ep!F$j8Tl;WeYrJP^G=YR`sw zvMIfcUV~7jH*NUHo7~NN7?jBYkhlr6(LXr=s?vsJGMkBdxjDE9jw6jCeT*211y@%u zU(lM4MIVVQkW6tIrH;V2_;Ow8Z&vlQN%eOiYJvjE26h76W*UP^S@a%8-$XNux78EM zhVLWv8;`Nv$lFnCZNL=kl!$Kok-lWTZU_5V_GG7M=50UFZiP#IlD68s<4`nuT)q8q zFC)=ly!eW|T|;`Rq3tOwLkm*19n5DbwHEE;Y19%VAL#XY+MjrQ!{}p9o0p^Y2<^?H zKe=h~6MAM(W+`0>x3b=8f`Y5r?AY$T$kH%UH;MLd0o_rwcr7jYQH_uA;|%TZVb&UN zpZJu#VE%HRnOJkuQ5$qeYkv(HnWe2~m94_jxno#Z$DjYZCV;5O`FL%LVbJS?%SG&KEgWWFvFES1!%GQYm z9kL(4gExuSmH5W-lNzXecAm$w0xOlu%Ake6XPdEC)o1PqSI3LY!mFEyO!QFXAfpZmxgQ6yP#$NQU`$B)7& z^Ww~oZ}fYn-<)UUPI4{paso0I7fNrA*6!G%M6&#B@U4onW%cx`vX6i>v*P$oS1)F- zWzqdY@RWKuqZk`!lD@%gVs@!BuMItt`KCFDGt*Aq!`wN0lU|l~$Fow2uktB4{Dj_K zVMEW;{%lQWg zuRzJ&aBE1xgLguKp;&sCmp?N!kg+_?DMRk{&t)g)`rN{}^D7B&<#Q#!k)iyQuq%De->iKT~htiZZu#hXZRPk}Y-@sfmOt@8%EdWTuXS6ayVcTdun2zkkj(v+QvpZ+>6 zbz@)RRnpL>tY)%%pZqoVlh7y0Q6G}{Hl2T)roLUw9dF0Om-Y5QVGY|E7tg`N+ey%e zp6}&T{84jLVvA4HaVPgd7qE=uVI64e`H1Syym0}d$EC5+M&-xXkaDP-?2eOAy>Cddp z6t;0Govljx;@AE+Gfhe>F z4DsN56TioUyggVl>mC5-5wOoa{?mNEQoZE#+n}$C`d&eXKLknQR$a>;Rb@GngCTJO zn$oIkearpcF~*$n{(r9iP;ZaXdZI{H(^F%ZCJI(gWBT~K90iw=%igphG4L9r%|Aie z6)mn(=65ttl;&iaIKrrWEY8#+4flKcgV8z}aMr7pC{UmHl>AMJJ9nG^i9^*{{aetu zp4vZY^KQD;p0sCoHS^6mlZTcceE9aduCwH6h>aQK!6duL^!A2IR(;R~XSl-K;Wl*MC0z zUcvdjQ2$C4dj*^q(Ue2Yx88zDBDrotvv?!S*6TGm-xjX%bxkhqUHr}i{UGu>rjWb0 zeeVE=3Rcg3l#Y*3^mH$@zaI|=7(p%o*DokH#%yF2nB&*kz_Z5QMjoQ)XPX;WQ#0%Q zGe~5zsU}kVL=rwh=@s;9GoIuYZn8z>PEum!9%qcq`#Z@|PO!rHJxJyx5bO-jf9k6f z`sS`mGX1rOb^F3XEirfb5{(mAI(rGv=EM@(tC={@jyVT|yo|*Y7hZ z6)&X3jGYAUWDRNu-uPe7g7Nh%XLc5%lP45b=rc5ZLJ!qI`njLRC|4V=>!5K5y(JRV zT}9syc=Au}PXp8M>_8K;)Qtp9BZE0n9Bq`Vsb)*mc}S1pNi(G$#_LtMnD~9k-4(y~ z%kd(aOKYk-3#Ila)nn*#$@jh=E8ZB7j@4tbqUQ86w(V`wyxM8SGH_n2zw(9sjyA^Q zC9xQCnpReeSFk?uGVB7n<*0o!IxHl4=i$gDs9jSJPw4+LWqbHr6^D}DEwR5Y1y3R= zUPT|WO3#^1n}19GH0?GZ4|meC#2Tzol$)sbERvE;1Ajo>&uGzbw7bXHxd{G0fc#q8 z{}*z;5e!$7k>w;XyI6hM+zBXm7;T-Z)swZ9GlWxM`4sqHW_wPgH7oFVBz`ASdJ9~9 z6`Zp?&7ARdT&jRG2a<>%jOfw5zUY6JGQE^t?A<|RVKHf+j*d_3XQa`&EJ(+)YUA{t z+>_18RU>uRut$qoul>;QGjg8&Jt3Yja#-94Ng?u1jx@OUG6Td(#eR9yj|17Mlm z^vrvHWN~u4{P{`WFD{;r^}Y{UmToLe)z;IV?}5Vc3Hg#{mnVnU z8&}ds;#g(>{S0`F^*<4$g5jG=BzEkpEc{Jij%QQu>2`v9_QpTeMrNwrw79oE;(2ud z>cz5UzIeUgtexhP{M_l-#nV5K#HBt}WXTf|Hm4(1jEBF$wzXc$qWBP>&j3aCx{o4F zPx*hMJ`(r0EPV*piI}y65#W}BW2L!pR#u;SvOSIr^7$H;X^GxH_jw6y;#F9>F`biv z(BNBK`Bi&aV>AVO85Af3x~zhZ*JD{VcE`)+_*fnnbI+k%v5KhV|25#utT(3<@#)D- zxQ(CP3&`_5KV`w1I9lOzNrp1#Exr5C-(Zk=|F)mdw{**;^dxYVekv3&?#2e(Pxm7i z$pTcGpQl88hvL_kyZUK!3<&;Gtm$%^vaOZ>?SK3hD;Ky|!1d>pZCaEs$0~iw?mlgP zl4wA~jF_Fs*P%wu)t>Zam$F-u+yco(|1>*zzj1mvojO#_svvI4{^ibZ1=`z|^}mGX zo~X>@;7?qoO0>AWPp|ubD_zKKtiKqKD(Ycx)^nHkT{pG;&XI3s!_!L`S znSIIm({4WH^!^LhXbdZMELn&z`Gvm6Z{e?aofGz+G%&d`lE>^#3 z@i=^fB%a!|n~3-ml+|_J(qYbU3J5_;$IL zGZRhb(c4)48J>K?mgN-xb8>kq8H-0o;!)-_I{w(fwiCH)WTg0lw*QIcJ{kVm?>n`uWDg{k?wM%*)W{T9r6vz=cYiiW4@sEMn;D{j4sJF(Hp0eb0_;}neQ(_ z!;I+Nm0AF=)!_b!Z2ya_XE%2l+yAT9H=|WDVVp;TdmFivv2Y&!{}TvvhqNk*d`szh zM#;yGdhroXzuRkNqBg$trX6d%gpBkBY4SBJ(MryD2Ein2jpb@qfY~BXU%x=ZWnO({l{t*ewsZFwtRD=6lbh;{-GB)H?;C=O;L%UG9HjMTKSv(ta>XLk?1K5mC zTKgC-UHtu+yj={!Q`o=U(LEO@ZZr}n_vsS-ug0Mspp1Xk-$9tnL0K!^rKR`;4F^Xf z@{x7b*Pd?%+qGawzWLSQc*y)H`8HoNt~B$khtcF^)cmoS&ribFM8v%iCe!IhHJDG- z(raiGFORKkTUNkpj9!~?afni#(X#_>VgObmbvw2??X_4LqK@8|mdou{WOkWU6s(X+7@lfmg6{f#k-wWWoBRN@!BT%_&RAjru=HFCa%#^s*#*=CB@<5ydd42SEv zEOYLzzk@Sl$kF{M*Nc>n@$7E2ZBIJl(f_0ocZT-*6ytq-KklX1we>Mwuk~59tQ3+3 z^H|W-Hx?}Sc>;Yn1MG>}S`H=Of_?HxO!B05Bq6_LN6FQ zb3^9}uqHlecI}3OVP~b|D--=sjJtuJX4Ux?*>A$S%wPfPnHl#|?jg@_V~f_Xn8USr z8i;$Tleq4OlK0%tiQoPly(Hq$c;n^yDEJDTrr_JtEKVtZMFlBxg+WUbeL3bgpqBetC6JWio4i z3i-|n)={XxFU|d)E@o!;BDy>Q-+6eK+$xFSG|G%1*_2k1nRsk2L4}L-RTp0dDxJt| zeevflZU4i&1voa77OtQ}>Gu3BD73t_;fu=x9f@stnSGa2f!&czQkxwyESQxg@gF`!5#rYCP?Tez{fl zwbJ!bV5$)~S^wVD!w|fCRO=IzYXYL~aK6agAQ|cxlEwJpoN7$W>3;Th6FKxKqtKq( z{FPjtuAhc_%PHw*JQ;3JJeN?f9NWn`9Cn&l?i&s@&3#Ii$1~y9tnfvNtmNlr?VX8d zrEj~@%1S8RmW(!Gv6`~oFPL>yXBRSKd$TCliyS4U&=B0s)46CKjxF{10?%(?OR^9A zh4RUun6{TImEGm%eLmXn{)Ge#W*h$QdG_kV`R4!s-(5Xx;ag~xmB1PP9t76>=1C}g zz4F^2&y&oI|MT--XQSKNNt?;!IQYMm3zr9LDZcE1A-6mJ6QT4!?d60ZtB2;~Jn@&l z)L+wouWHKBguv6-NR|6u)nWT6zJxlZ7`0`LcumP_JmZcsL&^+hvf76fwyw8N@ij<` z16haK#rSvx+ZZ2}?0X%}VrC53fa*2<4pAa!HC@=4%ptD!lgw~ywf}+Vl~H&qO+S)G zysBh*vzn~$a|`hhpE6^aK_jy(mU&R(M}DoezA7i;}~YA@E-aQfAT3}sg+^TQfi z%IWuKY){6GdyF=%)mf?3%UU^wjXd25Iu9fZX~-mf#C9gGbLOZOJkOjfx7r?u^E&tr zqpO(@K5tCA#~5)p3M8U_;<2^&DdTZVyzh>x=aI!^Jm_rg0X z==?&w+RxlNBX@FH)nMg@7vpP3bg!e`vS^nX;)DLjqqDscDn698jN8vDlac*yl>F5w zlenbW7h6hFGOCPX5qqF!c6fHx?*#Pi37W*AXaMK!V0XAObF}_8E>_k??mc!xxqq zNp9x&XT5AB{|OJqkh80NUyF9R5%DQ*TnLYM*|b(>p5v9g0p%8xvyH`Qo2Z@_kc-4P zZ32VDP>rM{B3rzsE0Xn=C^yyUGZY4yzsD=J4-6W5p8KWAt`gs(F?2bx8snk-0{m~J zYxU@1d?kijQDt|jfgWD-E-_CR!FfjP-2X40twXgo zAc^+oj$nUJhJy7zEnJ4;17Lp?xW7Zqbw<`iolS|3~`y7)2BD zZv-pVmG;EH=q#F+2!M~kCASB1e!UPKZYLFUwOteKll36crOL9zo9IT)mNuCIG&VM! zPFmufKTE$`jSbalRI*W@N}SHX6a)&C;jvhST3PYv}Fuf0SF zZ3U;t(PX)2gSFboxRw)=(J=Ucq)mp`5xBJgwcmkRWc?Q96NzO#{9B{dRJ|nMLT&&} zK#$zxdJH`MjRX%H(f3g63{*UUjBL2;K>uTu8I<6*?^qVgaI7q)~DLIT!)c1U!tD{)I@1ezv_{DF8ov*7jbtWOVg61R|4=bm3A zAlcJSV@FCe)Ed^c@2M9&hZuo&(@#zg&h#z0+CsscV3j3_e`8k?|1#&>gNLUqT0VlZm*|NbBh);zkbQ5{A7++k%V1Q(61uf+=+DU0H47u zOz4u|oYa@f*YdyCzn`B8hn_@Xe$!fWf3wq=7?6>y|31gt)<7skYs7%q%vu$atU+@H9Kb z4cO8qWFdZhxkWYKI5ygoPxYN#SqpJBH!5D~zC5-T>|ichkx?`o}swr47lnB~Xg#RRR~!M6SEv?=G)9l#Oqz(i@E1KU&d z+MZnQXxvY<*7>a8o$TL2T9nAV%h{W^SiG~y#9bhc?LLuZ%FcCS!pE!Q6L`kIY8mTu zI_s3k-QQ@V16$EUJIM>0jP>zYK2N>u^xR8lma<)0Tdvf?c5q34!+6%MfO`|qjvz7J z)qYPexr6b&7II#nQ^nRW{88;^w38W9dsvGRMj&IR+2flANjFPt1|`(RfiYw|rKQ$*x%jW$&Yf4;s(f;QK+O<3ZzoV)3=p zaz7M$3G~tFyR|e0Orh*5(9cAZYTiAmPVDP;q#<$P#-r3&vzLdBc}vvZV8l9+qa~S%}YQVzL*s)bb2mIF?==<6Ghn_EVxX3vY^E2h!wWXi~?!ctXVEdj`pP3?(Pg z-g3SrkJG25B3Y{kk)zz?YejzIE#IKn2Z|@lTV^)NcyKRi&TZP4)wrXWKd&}AEFdj? z$V78?=6G>v9Gt=?yz^aSI{EL}#mWP0tPM1Ay* zf50=|-Ve`R@c$zcHXZb@D_IslSNqAWqvXYz!-7OcUk7cn(mm_{BJDI}>6hc~AHZ6T zWhh6sl6f#u$Kq+ZH|R%cE%E#|(%bB@4n*OBO1w^&e$oFq%3q=FSIPNY{7I~%_U0Sk z>ajD4twST9Ql}SQ?~IGdqd3mfTUh4Yq^j(EW-t!VBJs6p^fYo+56{0v+xuw4S7_2f z?>VtNh=wMb(pWY#5rS)xm+VJe;msdN*zR<-3S1kY`)Q!QiUy8kyFVp=nT^F;Dqf)@ zY1I}S+RF}R8{@?;W-;%x00)x38R!##`y2FBg&x1EmpY(q?ptX;DV~qbL7sWhy=?ET zV2g)Rb~J~8JAP|p_52weKlAw&>!J$YpI+=h9R|OOq&j&uZ&UVh{Cm$V=z4Z+Fsb?y zN8{C*u{d|xzk)^lzM8W-HObDa_?*#ssB!IJ__xvj9G~OQc^C@(*}Lou-tKRFOfyPn z{#M$pN~yy^mw8%ZSqv(0DeX$dEAK#kXD*OjqODN2lKD|u$eeFarE_*$1GOK~LZXix zfa=Mp^9#y~JEejI3flB&_!lP$Z7s?i>vIMFvvMErZx!F3 z!;i$~tB!h!XIG_|i@$}&i9XqaPMn~PK$VDk1IX|mXq(g91M#l8I`P*{w2x$^iA7A= z%%(!2KWnGC`eoUG>^>&mVDzfAhj=1g{0jH)VcR>x;TCd`eZ!m`CKu@i{~u-N0J5;dSYul!#O`Gje&5Ybl~aE=SYF zWh&`Hjq0eBCYLiZ66*i|e)sltG&Aq#|M`EO&vVaGr2%yR>mAYDCYb~YpFLa2HX@=o7Z7g{|5+`G$eJCv^ppLqwwKt^>( zg;9&2Li!kc_#{Sa4w9}ZEqOP_Z9(e9*cPtHT#eLlKixoJbnnmo)am+pWj^)B z4FPWB0p5TVtOaMC!~eQ)v-4X|A9xGnbMVUTVI*BezZ(MAa^QG}el~^D72yTr#KsuD zv*SR9VM z@ilq8(_gVV+sp0PN@nGs!o<0nQCV{VKDE$UppzY*+jT?EshEl7LzV1al z0KZJ2{cFI;v-Ig?aH+p5i=hOc`Q*Jx(rl5Qm>b^PDHjT82 z@Zth|dLPrLsnmQrQhzl(_c;*lKq`1z=q2#59m>2;dE?k!3sh$U&EMdz=xH1V2mT%` zUrSHkfVNj4jlB!!dB*z!^!n9MZvwDhLcR5Y&WKg|#@+_c-vXbJ)Qpx`4yrs04f|uI zJ;n3I^!r279mn!Y8|dTgz`Ih6Yj3=SRlqWZ@jC-~HyJ+Cig=dNzooyu;kAdTV=b-K zqlKo(hr7b;Wf`{U6<|%B&=l@mj{NXmlQQTSt>c@Z>jW%EZ~0gU*Io#2+&kuOgRb<( zxOSTuvCo2M&p7=C>dv4hZxE{ljk;l_8o9hB?-E8vGbCw$YSni*6pF5(HuteCfuDAv zbzLcMhRrY(sr)IW_QUs885iD2t%N@bTVe%m9|flgaJC#w+(Iu~BVle}tj{OM)AX+qetcz?qSWt24URQI9@cZAuf}$O+H#ZH7C% zKS-bUg>b_PTDg*PkAoNIG{!V8L;rix|Nj94H^6oB?Czj_G9R@*8JAz z$UF)i9Lu#}pYp58~a{}eG_B+sH`)#awZy8pBxK>u@+U#1#WEw=Wb>pv38MM&(2x8yh4YJGcxSG=*BfV_X?cP5t<5><;JFuOO38p~Sb~ z!aK;l_hdf2x{`7~A`QP}wEq)$o@8{*ffwfjg>gE)Gv-ntcC~f@R%#k)ekHwiN4~q+ z8=$LPke^=2c@N1%f~M!M{xjt0o~hk;-x-0NP5t@PnVZ0=hbKbP~ll-?f1 z3V0;wIoGk@Ms9Z?$NvCXHF&iLZBD0mJ&|yCFkapOHcxFV;I){(y9PXzdM1$5yDw*A zaju2lTcFA_^zjwiJcYIwphNP&u=~%((8FKD0Z$+smr%pY=$S_JR;(JW_XD7R8r@(N z|AItYL*I<;R)apd z))h}=+(1oD;nAIxUlPWnXSjU+0z7dS5L|_uIUqhEU{Qrr^@-Qk*hNgo9M zK7%&Ks{5LMBY<6x(|?1RXZc(!xMu*fdet)z z&KotZ`|mC9bha2V>NCg?FDfBG1NMf zetrm_?Es^Xf$aiXdJ#(Rpzd#hrYTs-LGOJC-~NprxX1Jv%6Xz+Px^j(=}B&_zbD8$ zioUvY?;c>hnigJ%`)=gP3*>#78OSQWt3vw?P^TF()$`WnkYWev=U*r>of5^^1;!To zBye6MMx*CB_EN9W!;{#?H}P)`J@A&aiy56s_@)6cH>1a$fY&IME1`oL{Y+~0R6!fY z_xoTe4fLNwDQ})|w^KIs^%Sz&+K5f=Q}e{@;^0%!-=ojMHNC&nzmK7>?RtNN`+1I1 z!)``mK0KF4eNTYB)|6D1>Vx?LU9c2gttPGkho(r=LXG?1W7N^E)VL9767WQCJvsZ4(P?`79iM7HJs-aFz=m2k zm(o~+X!IY04&BV?OCS@CVp|62wD(fr_jc?GV?;m3Nboj=S=67x4tf$Ux`sJaG5yWO z&i#n=9JuCmF#i=f>cb7j$8vvJacDmoG_-fP3ymv+Lny2F=MQ}AeSHT^C*UsEamRyY zSA*XIW7^PZo=;#@cnWzTwBC(`-$u=zDR<{dJXfL)Z%t`E@o~qOej+h202vXjjq8Bd zRZgGx=Gj<&xd&XCqt)O_w=3|ji?=`?YDF9cN@q!jf%G(b*9kIi;#A)KbaHb-jdSxnAI;PZ9h|&>#~{O(tLBkqZYWS?#WA$TWomm z$yjpSh%|8pzAAOL1dE%IU`7{CgPV3>-YClY0@BDAW2NLHRSq&nyi0Tk{8^Kc@+LOQ zTa3CV(dLElY0y%H|+4vJ^sK5x5NfevYn6?GzG?k-Z72k+n+P^B)K zwkLItr;IDUO6cuK8~4+@Qq%)@UnBKI(jNnkZKPg-MBk0{nhp;ean=|sAGe|PeFgOm*rGK@sR9x%%fgbjRuh!GXACdRp(58OAu}IUv=+S`L<_3rWClj88L`743Z~HFid(Isbka*jLcw2ITcTq-PcX2p*mL?*m8P z)~dzkzCpEIU7p^8jC=x0oeq~cJ91^Q5v{D?*E-;O7MbcUe?1>fq1CmOL(@D zlHNLM)J7xmx;N_%;QJnU$AjD3czS^He}oTC0{7k%?k?Vwka*{VS#QTV8_wxTYlr#X z4wYY|?`wh6JstzWlGfoX^yVWZ;-AqSbHVrr;I08rJY!M|YwZbm&O6&*Mdr?fml~sK zuLT!N$v+m&^%W2}hwKMG7s3DfIBo<-zlAbJv^Bc&U!dr2YF-7EoRfATkGHNlqxRIm zTi~@4?NtQ&TY%&fpgJF#-@w>B3;K3}zwV{CZ_#gKpEjiTj)A6(vFqWZ$xw0zlyP-<68$mS@ND{NyPg!) zCh?|4cmB19$_v2XT520pk z`nNEK&V~!u(f9UX!;`lS;S+E1+ehD9!WHL(6Qxo=YF9E`36|ZV`#EKEfyPLiwV=w$ z^e7SVv;lbTW{jla*eO8Kk>0KXiaLz{Y%HmB;G$;q{55_R0L^&bbHGe}s5lO7o=3Sc z;Py<$>LECL5gI)SXMDhz^(F-UBSyt{E;g1{joWFIL}PPme+>ZddO+O$cp_=90kx4V z#{f}pTI@l3b<$kt><)BkYIMJtd%gTNzIc-SFQ60LHTx>FIW4i}A%`b1Ytr*ez^e0g z@0^%K&T??`M*3&;js*Qp!Ea?5&3dhj-{G2>_KjK)B!(jzq%Uhw%w$gRu1>Pzx0M15cz9V|z8bl(c^ z6fqvZBt1d@im9<1Sf~T7dhmNI9FYc#gTp+|GpJuP7QAonBCK<9?|NW5_yDweJQvv> z8!E-KENU+Xs{4S#XeoMM^!|NB9`)NCM(19htfPPC)&sMjwpRgVH(E)9uW{6P0lcXt z*aE41F`VK4Ks|_!pk#xSaft`V>L(V{Kj3g1L9;s5GTY#my!6XT;T_*skm(6>__ zPL+OAM~|Otm51nE8VniDr%CA7A@H<|w!1JxYQUH{1@7BTuRP^l2Fdp|b-w~HoyPc> zNU5c8;ye7;OC~*9-ex~yZ6qy0_TpbTWM_|GuCTq=NWi*9(}rz+T(4ZQ^32cqJ6N& z>(U!{WZzEC0bpS#bsi8&GbOUm5Ay1zHHltg&1pl}5>j}oh zTR_=|9(da86-MoPN_0Rw{Qz#Bq)+b8@vPlKM&3){Of5Bo{@@pZ!0E9izqn-}Cm_A*G49H?!y0grffe+C` z_pZ041@8kp1jLu}%X<-wJ^E+h{E)O$;Kf%1<$jB7Gz#2%$YwS8K3cqvn%~C;h%p+M zft!<&?yf0rg~vWX8hr>C7=Lpx_^3dg-d8#a=scTzCVkc$wj1nTNG~T-Di7+t1h+g$ zuA7l}Q{kyQ;FML+DhGHbp~?4vGw;8#WyjLHU`-v+37uF8ob?CJO7zU#Ox_aQ6Ph%F zdur0|UG&Gbu4jP!0J)Z7yS1ms?udCEe0E09B!FWfsh%;^rdkaQWr1WKW57r?&A@C0 zpx+BDS`cl};XCPd7jm3{Ogs$^@HDlz!>)p!Mti@Dk+cY^w*qtjg+|vhFix>&sa^Z3 z38&YCfA@jIYK+U3cr(05`+X?-2+-a`8}|X(rPweJVFi?fw(jse8*A%S>eY+p9LBS4 z#_4_%s49XBSE-C+GMs#$QAZw-egnpu(62|RVF8v(S^DkTn0J|}mr<`#K@@F(XH9FEBMng^T zdlKB$oG0EK>uLGD{N77mBXax|j6O@PTA7=H?L}JJf-N)?>i0p1{tha|Q|j+SgNGP7 z?V-PR&Omf;c_92~Zaf>jqa>qqj!? zQ6?DW*)yA?ph}uH4qR{64b=6Z{c3vcY`7A&yH@=JaOio|Gk6{8Mh^KH3>ZggJtOrk z#{X(8c+Xn)rffF#N;TuZ6ri`XP_^ImtTcrO*U}qpT6fL6u4WHfB2C@~-u)q$|4r4l zXhO^Lpp14~*^rt=t+9+=&c1=O(xmA3e1M)O`Rz$$x$SsQk^9?rg_4%eykbk4f7(&e zYK@yGaf&ed}wATwPk{FMk;eC54_YpCnM#& zslihp6Xs`qbK4q-G09Cz_=rqzV zhicvqJRW;51(t52{JHSV256EEj9Y-X1tZe?DDDO;8-So2W4AZs`tzKFc#|43(px;qEGJJ zc>o%EL&jCm=L9hN6Y|Yy=K#-9U@*dMXQj0QkqlooHje;m9R?c*aL_inoBQ27Sz5-~Oc3cm<0lW^Vj@X(9& zO#glb^z~v!im_r(Cw~!=(0;h1%3IQV07nHNxRlnl2l9b)Cc4afR$c%qZ-8$CjCC0U zlaO8RX0A?IXY1FZPvkP=Vw@au{gE;C2(;))-_8TBH{h*z>0KYvt!EZoHwzfOZ8D4g zG$ik4EXvu`wGbFP(ar>FycnKn2W}Vg%$qnjzy%$UDVNc%cVd=Brj3W^8c;Hcw9)2H z(U$v>mcSM5fp8c!91Je|!LNe`2vhI-AGSx=hLT|E>blyec>8HcNCKy8QSE*AvNw2qo*>cuo z1m|)*>&`RRTZ~dyA2_r>Jmoka43Fp6m(YAW6twr+EUu>=K+3EPv*d}i^E~pV1o$hE zem8vgap>iB;J7>(D5j3p@Kqrts=&t&QR6Mp%>DV3LapxAicyNoGbY?!WpuPA)TljD zo1E@U*N$?ox`AKr&CdrzTGo}J>I7OD9_sScq#h(^6=jg=mm;y(P@m9?t9a9i-Y3`1 zqJ^Wqn&vf*y!O$xG|xtwE3<#el9`u#=A#!Wlk&@NcS!llr@V6nZ(+>jh-VYB;1IdW zmp8l!fmTXaMwxsrxBvS~yDF1v%qgRg zL-$Qcxqaksio{rnv}goxxC?X!qhS~CHOQ1zNQC#%wx5&R6IR#5t%s3fp5%H8j(Y>@ z_JThTk!ufHa5r4j3Vk>b3*%Jem-SprE7~<*L;u#mcqx`tCHUi9uss2}UIS@72d;S! zJH)#hj09%{+AEOV>ljm?BBx%1U%V~$8uaE_^llwow2ble-$=j?=tS>P`6tjho9+h8 z&mw<~@^=-Vqp0at{;y#iJOYh?}lr8H@H2TKbRR*MepEz94S^d6 zBGJ?<1CbwBciw_Vcjo;)r4OPdN5P%_!~FM3__i{weE_MQ=a4(hxJ6uHpQ+X22trh4PVAX@)#XkL?)6n!&tg)>G^QjVjA^T9|hWQ1O= znn2(wI8Ulw4@B-Y`hq8oY2g}LP9sr0r|PX^u9|ui*PGZOHL!7wmT(Sj+(yaDaQ9T| z{et|9;ASJ*JGXose({E5Z)5Ali1-RxX(gV7q8i~_S)f$ibsbpM)iy*9P#1$2$< zUhw)F7&@80ErJ$LG47I3O)cU5qs9|*O9NKTW}JE?VtV7?Q2Jed=Yxlu zNE9`=v5NF@W@C*kBGnt+w30tWmo$Y#&6yu;jx@CBYQo!*Uac$UM{==(4tzmcXt zhF7-0QTf;$-JsQBWS}QQj6w1s7HbzcYA`M6i*RrMqb1G9;R}AN7PT z6X7h^2-R@L?{5MhcZ7~pf!p=Qg^U;b?0vtN(5B~;lVE0YfUQ4$NQNGCgs=!zOqz-Qw zn8NJ9m``PBXFIqVPKxK)sv)t5!(k85PPd>(Cjv!F=&jc(S|sxBeCVxqwvODON!%Sg zm>LS;t6uc92#qigC=Qb&K^?R4mfb{NZ!R!0nQ*94)t1-7mG1D>`{2nvwM{QZimNww z@z$f!o$sx|W~+%rRL2bkx9-+=_w#ANhHy>EyZk;UWe|C)AqhR}rT+Fjr1nt}eC)Z< zY~XK$WLXPTRT*VpKx5BEH>U4e8|#o#GvKD9ls$q(yN6#n(C8KDHX6#y^{3K`BiC7M zYn~)1-G-6pO#)Tmu^OcR16el;DfbYmS`7ynx5gA`%db}$2lM&d31@W!8{6O$&nkXN zc~8w%51+%4DXz@9pS}f9{{&UbVA;5CbsGOXaTz&fE0RK~rPWwKZBb{ZY26jS-J}{_ zD~quh_fn4zIm&<~bLtJ$!|1x57C|NCsC1|gjNZx-SG}}=V!i+Sr;ufyYExx?YiYEn z=8P?}pEgs}9Qj)6mFCo&C`2)$$8@Nd@$Ho6$p*uZ2AfE@OVj68#OC&=R~wU(HuMn+bj2<+~9w zV>&ux4^JOQI=Ba*0VC8qkevBFg1tglY`7_}G&w!kbj#mF_ zKNTTg($s%_!1t3#tq;(*A3=?+jD{D%PFo=M?7#Mv8t(?=u{&&B0duELC-~|S_`Vy@ z-B)@ET1e&S`3u(U*bkr`p5F23*z1_|z z)2`eKf6N2p7c-hS0p-=S^dO`DadfqNFy2CEJdPIl9*(=2XWp44wzfmbZ2CMD%={Jz z{{&`V05)%s=mIrwrk%gT*;D9+tCZf0d=ohTJ2-EOWXgj652I;Yf~`-Wr1zD(g82=! z%!ludnCE`kLi)1;oKHk97}>uowVy-Dg~1LO6S$%d_SSM*R!8j#8s;i`UJdSE4a5VG z7N@`gPoS-i0QE+!xO?CVadaWj8^PTh1Kx#yr-03U^v$#Io56F`u6+Yf7)@at&)myX z7y5V-YBgHVxEI#hk9TYAr!MffcXgH?J)h(A2c2uT)^)>(7>oC zS0h{V;o$y2)HLw;9PA%YcpCey8@;SSIlT^-z?E0f)4q5!wYIJVD?PzYO?c0U@;!iR z8XVgc8JErfF67(_+~-lxYT))f;0S2a87_FA-?pH|Q4XAJBj>%)awQUE8fiy@RWTa6 z4Fj^y=v6%_o~5eEIA{$A7a~y)LU;Wv?)+N@BwE}N)~hM+Ue*M(axQN48}*8-hI8PG zEsRA^-{-dA2P2=BMK0+d(Kd14@mBC9CQg9Ihk$iqao5c)P$3JfzsxvF0?Bat z)0PxxyzXLe2)*NM*%9O#MGl;@p6@JbP;V835%+fwhJKZKqE6ImDxl`G`R~cEm(ksC zg8Loxr8_*|8SKS)OrOx}Zb-WpyUA=OF&QhqhS+O?M!VkHQVwpJmWUo$05mQ2G)!GcuOYS_wEi zaxBk9QJ*9rKutmpUTsBk>&;PMLQ@R@uc3=Du@)f zgdPsRT|sdU;!X}%x#Fq(){N}C0wuG8rISTY^>zHteBS!=e?1eWFa1E7m;9ba0;L0; z#!~Aj=;98YoY3PmyqU=>H+t{!FMS-DwV}9Hr7hbHX;hPMdlO&L9?ksoyET330M^_= zE*G_dvtI996^-r;WgIj+KDTuCF5*5&yT)*fcSw3?l6&;^Rdm7%_#@QyWW+b{yYb{- zL+0*5-ku6KUKP$LJcu{tJ8Xr{SQvk0L|zLQc?xF@ydS+F?r0c*)_4dStwxsK!LOG5 zFXYpcTlu7XLK_EY^Fu~`D_S*H`~`6OgY(w8K--Y&xy2 zqU37$;8sS>h4jnaB=+L~BkUFAjrWdx2o8(}Z7j>{d3{Sx_Zd&2|Mvh_7bN6tbZG^i zU58c}3=W^-vkRE`fciRNLB9rHdke`Vc=rtSNHchTGvm**u4nSAJY_v+*e}R=cgy@0 zjdK_No=>S`5v`QRerSqJb#L?rIO2M0@WiwGl3kTQh4ym7T*~kB$oVqmA4lfsA@!!% zkKlTF)d(30%3Os++`>Qi_HUxkuFBm^8}3uTlHT=zzsEC%Y6AQ7eBO-YaVMrDvI{Ni z*Ix|vmV?!mSRGdlkha-9NB<7xFiK=M7~ z`4VKnYrxtC{GABr9l*wXmh?S9yBNMX8|dz2{xArbm&12P>>LZfEdmbXacZRvgw|() zxia8+AN}x@;0Mft^sE~{;(7Qy2{gA;-cxexq49L|^*^DaYdGzo)K$p$-++rdp^jLn z#q$I7(Hmjnp6~Z4xtEl|P^K=ux*0zD4LvJI&U@h)*9e}Yy*r?*XQTSjvYwvb(2G{| z<2uI7pWyX3fX*Aqz9#Pq>b#adETJdwF%mX`^zHZbG7+p=_l)hQC2xM~OR2hW znU+>OeH3kmcC8~)W~@%vTD))k4MuyC+PvF- zE~T6B@>I_h?2n28ii>HffVQ23-V9&P46_4knMFBe_B@_k0$1vZ>B9e&*wams6(ixQ zC%}X^d3k2FBJgjcE@yfJ;fdW~>=@ge=~3QrUPS1&A!+3;Hy zGPWtwz+IptNIL*bRT;PSsY@92wvTCqeX5 zoJzmUHy`*$0dqsF>|!vO(F4(&Z$xru)a$8XEx+w!g8J0cMg}?uN_%I`YM@rXxB`=4 zRJ*&m5Ab-u$7*VKWorfFy*>PWD)>tQ>2Baq_pRk=ydSC)60&4ZtR%gApIytV0Jg+Pxa zIph9Tci>nu2cFVDRRfr`pXM{hCy=KEEN4;6X|$=m;Ce?1zg$^#wW0+*^q-y&cLo>n zFNZP*sHFhPsKJxGJMeiTwXdhHCFGZC86ADR-WK;-wvh($bX5FZ`Y8n>r@4wF%`>@O z5h;sCH^S5xXpa;Sci^88XI|OCei5p;v&0=6K6C$fsbvxW$1)i>wWh0*EACl|&#Ya) zPkVZ#rILgqw%{J4B>%gI=P_^Q)3^lgg(?d*^qzSS`7osUH^{;r@ZWEcOQ*nTZ}9Xj zJO_>7oFsbaX}GW-X}#dMBk0gODDga6aSwDpiv0Zt_RK+2$|61b0LOVqZg1IZjlZQF zW2G}bwsCM$FKRphm*^o=j@8CW@jPb zDNj+CcVm_SgYexH;Pj?mA#O$cKKG{X7VzTRaK2kLtc ztm^adRz7bkP9ytA@w*=_{08~3g*pcD>qAPvjQ(@KjJMUh*JcWmaXzqY2Yc?6{)pbr z27|58M8<7g2`2KHmwfnQa=b|7USqcsOAd&K1zN#R zmod`MLU!~fXB{ZtAIiEG)r}rp8MOLM^llR}R10k}cHswL_F^nRcgp?>NbUpDOUZFM zdirYYzNP`1JK%yj;QvGZ?}nzcY0LOV?mbF?23891V{37tEkuW5It1zIS0Ybf8Q>+BJ%2OGeZ-tHX`W;%r^IsoQ@`AJS0bU7+_C^D~aoEuU7b$*4YAK%wr9`zf?M zs?$Tiv+3t#Y!ml9fZ!iz|`lp3Kc6kF!N*6#DOtfmjv3bS**-Y)XG(1iloUyb-(( zMZ$PKP#b#=bzX{u>PXvXV&OGF+p7U~V-cJH4>;4#+;?H!YMm^`-&&sAGjFKQN7rb5 zR6urm>&hX1=OHIzZl8MI!&|uZBwOD5o4tF)-)bV?V#$^}lD|9@a{plOP}bYb>LHcf zS!?g#;hA;JM7zXtMvOU(q)XBIK`85UPnCERv9YklyLV|Bb9yq^Ka*a!WR#|WX%;iE zB3f|n?8)i!aOJ~D2BR>}h3eiKcS`U)sHZEy@%A_c$2>(HBf(H7(hI1)34JOGHe8H% z;t8Nv>9MO+Qmz{Cmw@dAFZaWELwka<-p$;OlAfq48!UtMq~`I~4)8O+;%S*QPtugr zs*0G+JTLt|<>xY)Q#A2e4sFC=dC0=}TVF;ibhv&ceLC*5HZDfx@&$U=O z8(G*DUd)E~tVtWq_>!sc)bBK@c>yxUS)`h`FCRu1?ZDnX0Qa>GJk=4|`2pI0En{dcGThPh z4H{`F@-xm}=b;U1Ftb|4OyvNn=Y{lnNcc_Y5zoiD(`o`Z{wK2Fc}nkN4ss9)cM$EH z&uBHGk#S37J!{aTyJ%5cWfvN44Y15XGChh!{~X?34_s@3bQn7PQTVhqm^(n3e?VE| zW3&eAYbiU7);|swKpU`dHvKw4iSvN46`$LHY$g=`0lhIBJ-GzVj#iA`<|SzJ-PF*P z=e6MqZ`U*O#;5e-I`H9LzUML9e4JWap`$*h)!)(9kK}m*DQSfK!$9#eW3q(W=K!g9 zVRc4IwMK%y3J(6n^Owlwyynr+{*}N`8>uE#Pe6^`s56^=?zXN;C z=b_tIEGVBo2)fdazEso{J#WDy%aQkZRjL7c*e^;RL05p2UKlG$yWTA zTCGA2t*PO3o=vBX%E-F$)U}q@YlDS{kEOetY(aP;T>`(p z1YLFpS>t`$RVdMlZ*QHJ<1U6D|H52m7y3|Y^`!krcruu^jp~$?Vu=_ldk?%-3!2^nrmDeXUs20!%3lW# zJ!{bJSlYwj{65O~lBUY$~(6_LKwbPr|pG&SM-LU6BaZ5nLpMYQ62 zNIvuNv5elW@bN{^V+(l|zb3!cxFxhTiP~QUBfH=N&jdGw``w+Kf&OA@cpoT? zfA=u?^>$7~O1YNOj<$1{<+v(zI`--o{yoG~eIfcI7J|$9JR5`^r9Sfnkx$%hv5Rki zc~4lzV%bc}EZP~vcN!`b!lzltT2GA3$AU6<7BJ@V9!owoU(`{ zkDl{>9q+*Fk0f_SZKMv*U1*i47n4DwA4HBF0@5M8%TwpwNa=ZCWjN{kg9h>b?LSlE zA!^G)(zrHaWbaLk_yO4Km63W{53YaoX5>8z9z7HM71S9_pQ47p2%antR=Y7Wjc#{8 z{d0A~*}mA;(`Da{3G9BYT;$+*Mnw^6T7+tGpBqyq{?Zoll?F?mXzCF7L|!QfCG<;`O)Y!ev(jAtvR~#s-lL+0FV0Ob z;H_P*f5P=1Yw|P6fAJB$BAGhn-OTUIw|FfyTO}hJz)5qF%6m+LgzoG_XjrB*OjbZ+Ch}M20 ztsD|}JbY0AXG*2ZkWjxS)o6LnCBH@re95z_jKEyD^cEyc9yIHZJTjN9s{sNr~Rq=X~UacEA*YE^6t`*{3oH9gah@7UjhO;JpH4>775(DVNEv>G;1B~qtB ztr|enm>OL7Z5=398L7FC-1U%szvI1)JZr&a0&J}ba5e|;P3ZejpveQHp5c6m*7U5~ z596=32DTbt##v>WUq)ou&TnP!cfe5|95)H;&DrpJ5v7bkLV_BtIg=**8F?M`Ok+lOU%1iPv!4Hn!LG_9Un*e3HTmODR%U9F?}WW# zs5|naT0UB8wmFKIPvdul<9JBqux#X$pY9SWmE@T=^qV{Hjh2b2_FWG`7v6h=bV*=s zkD)bh`&fq*Gdc;d1+ROor!F!&8{M3#Ra-^OXI_agPL#MgSQG9sn@Y+kMxf;$L)%vb zw#%SI19(B7$u?k6LsY;9oQy1S-PC+>E!Z;db5YXv1h>lij-hsU)p*uK57}DaO=6qP z1>>Fz&cYU(LrSLr-FW`LO>ag7-{{GV+{NVbeu(z;RB3LsPbG(EzsmscgY?OMxf)pp z%i7b5*+BO`a8#pxy^Zb}nt}}74Kx+7n$=3$;b+jk-YQ#AudDOgBIR98T+Uoi+59SP zr;uv7$YO6~*4B286)}+&zQTr5EhYv^QE1V#S<%l^jvnb{vsHasa@DB7hx!?9$5ZKgYiv(Hn%2B_h`(!)s<+7X z(2O@O#WP=hX*&-I^irrNN=nOA1=f~ODFt+`;V^qxpLB1WsK+>U6xg$3B;LkQQzh^+ z202q4V1ySL&l|~A4_If=J2j4bz1u;dw}7JsEqV{H5x8pxomUnaZwUPdUB1*QV)HW2F?R;&MW9eca8ad1MSzQ4}*YP|6m3BVa$W= zf$}}zA7?1@&}hcyNgxAfQ{QfQtqyC0@uuMBYo5^{69a-c^kwdFeS(OSsKR(CU@Kky2>_YyK z7pI|-CnAfA1FpY>8^ow{Kx3>ggj!v}yWV%XTDc$bT3U0)gBj--?>RtNoiREP>^%ubYR_*W_brSR zqf=;)`rMn?-=j}T)I#{(v71Y)O7(P*#I7A^7aBj!-J6|gFO&8Ob4BO#p8RkQep`hc zDJ?RJ_skrL?6?vv%&l?|X3i9Voo>s=ck_HObp=|p^%EdN}5et-PboWmE z2<~%fdmXa#0)82R3B z&xID#)PE2=?q+hHjy&5*Ki!E^3)*@Lw~+LG)G;~Gqc`xmH?tQ!CMER|%tWqe3ATZv zcLSfNKJ-+04_{+ORa+!e19~989N?+vPf|$L60C1^`A>nXmH4;Ni$z$k7vQ;FAE0wQ zD_z`ir-ZBqR5PH>Jo@5(j}o9i3XV5}A1z9K4DKvEK%2XuM>qO27mT`R^e9g*;JXc! zl9pTOK_Tt9L$wXLYr=i*tkHi^jkH6gDy^PpjOiijOAED;7e@ZwP22Kk620T?trhux zn=->G9c^h#C<(Qo-$ZL^8O58QdIz2~3f#M-D<8B)#Im*7s zoBHVFX>*qPH{!G7Kb1bEJgqI;>o^&$j_XGq&wxRtP5UsqlC)h+z59^`YF6zB>(r`` z{CRu}%HJ%gGnViAd@6MveU8w7&s|zF9cpv*_)7|xK8wE+$5OlxcpVV!$Kr7Ji;*+3 z!ShJ0sF6@WkHjm%Dn1lu0CnJecb{aepIBSm5fXoI4mIZ?(S=(mQ^!(9;OAThJ&X*P z7qmu{#yJ5$qu_0KKX^i%IkyZN z&0Al!&6IGP$=x^10Q7&3rM47LleqRJeD>3F> z1#XN5_&l~%9&-oBUpH{GBG^6MfaV%{UIc^{ka>>e-_d3^_Tl}=na_jWw3NOM4$?xe z%)5B9jO1||(&}F5A}rgGbKfG_jGFl)csEv~w=dims8SBuxRAV8^Sl-k%##J;`fD(+ zWojHeeYWoPs(_5Rfxh%YN*jxGGjO`%_BXgc7tS%}uKaFH|L^(l3hepRqhH>Wd!DY# z2{Yeu(Aqsk`un2XTm?2fDbRp6&c?%&f@TMSdI0a`IEuI z%Hmmb`q&fAnHThCRn~6Y*>)-Ij=(dQg$_uw;^p2fCEaJx*LdSQsCgs$&zMY}99NF& z-Ehr63pyMC${(rQndO&(dah9Xh%PRoF12!+r_OPV+2!qK@|2L~z!fQI;y?FYD4A{9 z$Oir@6Et{=8HSSGDB(&)d0L;OG}edIfw7a|e`CgjGq4W4^`j^s!z;|u)8s$)KWL21 z?|=PPf|pA1%u^+^FLj`{eTq-_s(DJeDCE}$igUCSsXmXd*r)irJ84E7P*b?0E?QXX zzmlKx*ZTQariN%c*)v}S$2~6zeUvwBGfmCHR|kGf@hzW^0EfN9$Z1bXlpNOL`ctg2 zEB|~|TjfwJZkpM>M6s+MwFf z&TTEDl+-_NnphhT|7um%2@+S^*?EL^WTbNK;2+5k(&C%|wO(4I(CLl7n!0w=<4QqO z^}*Kj43765Wz&*A|ElPj>#+KEQ=itxcAmvP*;oBoqp*mwj!A0yN?TT+a*8MNQPi@^ z>?qF%!Tn0)Xak$>>Yj2{4@(h!x&;r69%Rob8{NyuQqh-Tk0Y;?XJp+&?h4do{C=&? zSauk#D)qg4^dG?DS>igp<|Ae9#~M6@Rb_fcw`|3Cdo?5V>tI2AM=iB!bq=+vTW?{c zc`EQQE$l>6o=*8P$jixai`w1WqVmAsOsvIS!B(CHyiW&f^D3mCwo*^*Ojj;Xp*~0c zE>hjq-vSJEC1odlG@^)F<_@56WVjQ~TZT_S7R_K(dN#Xu7`H>HOYU5aJP=10BfXpr zR-=TtH};{ou*g&QLF2{2lkyokDx)u+L|Z1P=~}ej56HGW@aKuWA~<0$u)5>;0eXCQ zXnz1Y>PKcZHL1qbm> zg~VA-9cnuaebkWaf_++xTxn+Yo^;(!J~gkl`{USUJHTHBavKB3v(=v9>JHTFkh6o3 z*xJ zT?m%y(~`ZZ0z~ed(`q~r{QE<|%Z14M6umN+c7GL0>)m~WTw1tQ=tD-PBJd>tuRhs%;mYOo@|Nd={VvgfAg`=f3 zuduu4T%L?k2J|3B-&D*M<*oA?|3^Q19cYjfFr@`2&Z1^;&8D>OIy}>^PVrO=u_7%$ z$GZhm&ynxBcXNoBD6O51uf+PS#?u7!Qm*Io)Ei_TMiQKjw5zjqHkOduQdbgDs&W-*U{a^wz1rEs&(helVXZze|zpsE8e^n(BN zi|ey4i*(#b&Nxpp=D7IL{`b?n|CD&@@z{6mb9d6}CrQ)(+)#&>L?O9y7-vO{jh?}e z=ZcO#npCg@>BF70 zrDXGd=e?v%5))83l z2GgDs)t_)Cb$dr&v@(?|o=Ud8NqqJ~dOZg(8A(LF?@o|7HfjTJl|aoZtka7@ci{}C z+)cDy3>3Em!8CHY5_mcI_rA&K>8i~La7H(RzTHHBjNa6Q(x<^kY3fOV+v><(XHlM# zYz`j0fk-dV9n`apzAJr=)4wk06JcsWE-~)<@+s7sWYj$f7MqiL0cqxOP9|^XfQ9Ll zj%#l|$)i=j7s5$t@HYl}XYrlpvpy}y-N1XGVYe_2we{j|5#_BGz2&3VIo4!t|N7GJ znknhC3^p_Q9E)c2Xg<#pycwyUpjn`;#p}qV~6OUdki|<_;Y#z^n3^lWjz#31>Uqol!sXXk|?dU z>kGi7V>)_@^dXF7oM#2BJLf5FOOI$)@B?L5kCgMr))j$S`IW=?(S}p<>P^sV6uk^e zT%mP6F$?MA>Xv)oTzfhX>&+ABzhWH62$zE z(^fs?kG^?kb5^N#x8t#`OaF<9fb*dob(*=|>$+k)~=v z?NMoMOKOPgs5^4y9#XXHvlzqo1L0^9S1}s(qQgNCO+5d>%xmr57=_YrKx$;de_owN3aXT=A*)a^nEw@W9P6^_QRe zyZ=@HWO771K~KmKIJhm}Mk#eXIO=k+ht~jG8D@$#@OTakSU2{(Cq$g1y9et#a4C7vGwL!*g4wl(0ur{30 zDNTj)PWtLenZ?k&CG{2po9V71DVLoAAE4b7^%zsad9JHn?k$P@`VdhU+-t0aK8;b^ zi+<@TP)~bedMI?wAx9RPKZymol3KN=?&hC6R3m1OQfD9f=GoUx0aMvhHjy!FHoOe zOf5n8e0V0jGQFyXZS({@U}WHS_(!wxoO_<47uIN8z13IU0;@w9UGWr0{i#D8BS%%{ z?8V=Jp*(ynM%9!04U~>w27jg&xVkk#p5DO|(VKrp%+rH^0(JN0e|#k81cK{xU^VCd@cqn zld1Fn@bn@u;y!hEv8z3`w&D&mM^=*mUBQMD{SfV3Pp(toQzdu$xVq)M)ahk$=>s9)$@$8hdh15W*( zZ#~oc7yD6r#P3qL+2=|4d=9jA6*@&kp(~?v0J5qfSojH9?aedB z33slqUr$TfanO#Z+02!-T}RSi$91$Zv%!^8Q;*y$HQJ7zmv4 zm&N`|Bh8&*Iqvp@KleS%r$??0i>qt-R9g*!zi0A$74>WX=vVYCi+&z!HoD_1atviO zoWlR;Ev{$|9Ak!Q*^Z;uLUJ1)L(jUAF82la ziqYK4{df|jD{Xp*Veeom?xKxDygmQxPVY&`|0j_B)#=}K{@0`DLR}K1srx@F^WVJ; zu7VuoiMw)JfZZanu^ybAN=^1_J)B^aa3z{&O$(utk;SrjRSchb;Zw>w!W~7*wRW^B zC%M9-L~w5AZvAL!Iy*4UT9iMf{!Q({U%tHeFODcjmARt?Nb<|EpnSK4GjVsZ{m57w zPtFBkQb~CkJ_dKqyasl1z;q$jjJUSMp&+B1^V>B(?WUu_5|#R%eMx{B*O|*8Intz+ zzKn1B8c!mQTv=QJb{?RI{{Ux>jwAVx+(Da*stCNSwENnxP@}_W-k| zFASblXW#3vz6ZiXVqGezGZNw5mArZhy3{}h1wRx9-6EaUt?r3ZPHqc1^bAN{_k1Ytoz-;!ODXhh zv_N9(?k#X%Zs9Rbr~_wJq#ttLX;}aIAO<6coP$aK|f^s5Ms z{uL!$%NY}}kb|^V8YRisf?l2ub(+Ej>Wksj?EQ>NL_H%FusOdZZ*!>Y=^VYngTdlr zTAmO77SZ>4;IK9D>XET;TBP=>dl>uKKw6Gn7bn0;@{4CX)*`9mZSqRgcC)%}A?n9Gq!dYZL6?Q4A5 zhH^BC9PkXGP%4X^Wf%qOZzW+z$ZzL~{8|+$aINv7Y!CdCmftyMR@HtypUY7oJG|pXf)L z-0Cqu<159tqfnbG>eQofkRHsvVbqwa&FlDbf1$IQ5^9ff!8*#b)97Blg`+x9#(Izc zG|h3mA3jZl-=6`OvhhU5+^g{NNU&BNiaRrvFZ2?+XVKZrkiZ+_x-0*WuQy7^`9G&g zUF~hlI*U0Aji{%_)mrZvX$+@aLOoZJM>|Q|Xby16Pu)W;N*GT^w_yf#&JSGUYL*<=fl)Y} zxj>5N!>CVL9Hq8B8^&87m=gUKsOSFG0)8uFpJyx?>+}!I7sP}sOnuSR(Sj(!zhV@V zTzWGO3LfU)2+En$k?JcA$F1bsS>G0Fb3cQg*&PAPj_PmtrLA`=ImTi+?xOxr=wtME zr5RTT`4_$QLZ#1r8jy`Atv0(3+y^dS4+Klb5O;0-2>D!PS4u}6qCHiMw-R5jEld3s z0u5b58b%AJAtmR*cc0Ls!{9rGoje;J)K*n$Wg-2e{%pk@L1{k%@0T7F=c-xYKyLet zymQcndYZL|cS0NOsY%q?930n%|MVmq$K)L_^R<5&t9Z!m#Ab58pvPn$E!+`l5! zgR!_GrJQf&!5g#axhej7pE6Cr)GYq{)L83cdn~f6JDB#ZPN;Fr&!*%kCdJ*;NBGRh z7x~lFlJIHW%4ogJj=Va&bI9Q+j$NdWmnY8smQcn$=}Af)By9w^bG6oaP&K^t`9Z6{hh>vu z+&kixF0SWHhwJR~3Ta&Zcn zQsIfQm&a1iPH@sBV7($xJcrycBGX!QT_>K81gZjB`~a*NP23s^{;g)uv1h1jdwM{l zTvd}RqunRwEm(5661oTW-A4MTuD=pKj`G*CMn5_TuJxPLr_a*Td+jD+iM~dyp2~|d zKSAz1-s-^mjK*JTm^j<8&trLRpFOkc{pP*kv?KJ(J;S5uVG)>g9mqX|t^mFa*BU`( z9se$cM#ikD6ZY_bL`nyq{=_q9OvdUQOC6sEufRVT<*wPeTK!<~Fcm;aZ}=Pl)lTKR zH&5kZvD1O}jJkb4brq8qEre;*=br6Hfhn80y>Lg&s)hZvg|^xQZJZ}M^Eb{x@4)F< zJeNA%$eYnu?f@-^G*kn(LO&God<1XTp!QH|Jy;kEbeA%sRsyFQSC2(2`dS;!=H4Rj z$TC`i<6pmvrCedsujO3nD6`IG^u?QWJVEVQL*;QDo+}lMvRRQCh*F>_USwx*Rp9Qv z;OBg3S1WL^V|*l9*`9i;+#9S3^C&)DEi$a$OF&w4OYz;l4AuB^6TRYMdOmBXC*tU|huWSv+@!61|<4_}7;b zRv+y8pP!7P-RQNh$oSOyaFi|x`n9xl5~^yA#FQprL&-IZT{Rn6H+GiiPUna-sqHT8 zd@x!Lo1h_O?X|daKY?~dZ+bKlNwtZ5*3=lO?ygVo2h=MY&&hj3 zDADXmv=1Cb`A8)#2~SaGV+$#rTS6=C$&dL=@itzm=|}lD2ON0&MmK1{OpltqF~&3Y zo=N>DB^%JM6QKP-O6Unx8#_z79p!cc!oPn>toYY(Xnf8C?1u{Wt!fMH_#hRsFK{ z;MXK0%s72Y1MgFg-s*_M4m{BtW39Q!&h}t8moeyU=wzsHn9(|sRPEya)S*1v&6qk5 ziTfg{-jJv-+WV4z2aJW_=XcOHpO$xkIr(=CeNsm6z%h-QpNABqGtlHpp)KFpI6=pQAfxE~G<_QEM& z2YJ?zrxl=g|3J|eK(IJ)<~pRzM4*&kc7@XV?~Rn)4I9qcfV}U?Vm&ppkx$Oovw-0K zpbshm=Sbk_8|u~mb6;zcXNS-qZ5bK%Fb`xr_g_su7fX~{6Qr1@`v>P*U1!NxI;kMoSLfy{k~ z^TFSp!4q;S{~tgud295C;8$6m*~@-kC|?Lv`cw368^=YNQ4vhK0^q3&bx%khjCVEign2|=2!M4Y0-SSq$

    %YDtH+@1a%7M&VT`(rRweOYMwy1sYs;EAz zXvJ$Ood~s*s9!TS#ki+))Z4aXuU$<}g0*Ua#&?s$8^=2Zz1EAo4^pfB({rR;74wcQ zt#5s<))QY+$Tl1wdVBA~Li0ZHLMZVm(tQ-YH$IIN^9=n&@E_-=_EIaw^=PGS2kJGB zwDa40pi#%rLMnWU`4qLAFTTv3`K?dKe~#KDoTUFF{qs}TOOMb8BRfnZUE0*-X=B>T zqo0Y8Q(Y(zTj&2q7iDKv;8dpOQr9F#q4Hxabg53wdP2Sc8(NLM7{5mN)&^@y%g*`7 zpn*ICa1lLp1~2XOO{b5+V2a!l{j}QAS{$9I!_#$IuBS5}&`a#egA`9)DQ!y~?y!&# z`tr``?ES!GbXfU7Pqg?;^IX4sU*OlHDRi#xD}j~QX;Q9ZMkAKwvOBS_o zt_PvsdY(Rr{MUw^Pw$7&*8xb`nMj7+V6X*v(a)q+B9+{S;O_HINE=t`-6@tt9vWLp zs0UGp_S1aelm5;=+ETI|Bip?*%h8$I1hZ&YOWHoDFZ|Xssy@qSe&x&~KiJM$Jn`kq zrzafbH2p-*m^=fm-{c_Q6KPKmhWEj#WgM9s`8AdK)Lv-oX^eMyS{wY7g~#^ebx=Z- zrNzp8<|4O7BcG2_ch`WwgJ8TYBTV1oRv=G7>4}tZ25j_wrN-KjQkNaS*~*WdbZ+`v2e=*;a^7sBc?{zn@8q?L$G_S1i8sH}y-5TQ9-T{@82|oTq)gl`T+IJh;WOb-7G92&$wP9h zOOnV1S6{lq0}%t(tforAy~fvUOQ~^iTMuZ^2!3vbcf#Am--p-N^Ia6+w`cW(&olB$ zic&&5h1$weca>lfd46<0>3O_n{fsHSm~x>M7Y5ID6d(&~@z2vM<~1g#J*dw!@1)v8 zy-IZ@i8HF_p==fs_xFtO1TW?NmyFwp!#w);J#e^-HO5AmbnHn!Hf4s=dYPkD<-UhK zfj5m}?76GPXrdx;wiB32kSU))L46Y1r)lc4T=zgjqh7m9LaVARW88U}vFf}z`h2LP z6m$35M7V7j7GK#gR@8&;6CKObvGlD9a21m4PJT_Kd>(VFq5!j!u?IR(jnEZ{Qvst3 zkP3QCUq=3T4{@5-EUc#OxfUkFMU$$M|(*LSdTiF(Wc&>tARDeFJ*DGzTV-vdk>U6+8oA# zn?hYNGJBjgtBn#oa|d!U-|hlzi6r)yy^LwnP=AnEsLyY4<8H+Qa{N8yoKFwNfUi2V z6@9Oc*f`p)N%`h?=Jl`teJb}m<5fP$zj0qy+&LUq2i1wuFQGTl8%(kovF;VI2NS`q zk}baOLw0t97AaDT0xl2nqzbrEvU(;@-QzBcc(TTo0lD3sp`Hl#jM2^@kCn0d>*6>} zQ@$AfbXI9^Z7o`+S;tA@1pV-wN*lObdMh&;1bis>#bQS5Sl*Xyt80GwiWa}?zDeqE z#i)oJx6%*uDe+R_moT~S=N7PbA}@XLac=Bv&{=zISL*m%zV}<%u3lGmyX#V~swI7r z8z6m%9<+cvQ}og^TlQ97P%o(c^T0tyo~!c`NTke(uoO9R7?G~?=KyWAAw9RCuP#Bk zcuU_%#);Uo#%Qf8$J60|+^MiI%sMxdG8X<0`KGY#x>KjCdd|YKpsp5e zf3UThr_PI%MhR&0M6kZxRpkyPXKcB^`y;qD`k}i&<=b4!uMD!Kf6%1vPK>&8Exw5J z8A&k{_@9OE_C=nIfTHrZSgipx zgOL@zfpHy@-!TzKjr&LB1EsA|8>%D8rz4FXAgvI4<4th)X7H6n{_xat9P2qq1Zg>x zwrT>O{3nMkr3Ozj7&B6fu0Iw?S)}))ks?aFn%YEmLGiTpE(W~!%OL3H-#d**~ z52QN4efrK}yMp~R`P_+iJeKm1Gi@p7OAS+=Z&!(r@3Ar|?nlVTCuK&Iz_FxMiBeOo zpcg7dP4>eXz%uemXM3zd!ToT&j)~iD|o7tdq6{%%`%gx`E0rB z%BhXoN2y>ZG}Oyz{S4MXbkVNWXq1PLY9+c{l{}7w>xBovMJJHo`I3Y}B_w@Wi@w*X z;SC;2K6mL9v?EV?aAazfS)r9RO1cv)xD;!6PGHFO0^>x`AuuT#I0sBr7hJ*S~<1t zQ?}1QvvU9NeZW@Stita~d{bxs4q;mLo{h5-_{@V3y2@6LmwMly1E-(xwvv>N;17X- zPp10vYu@~}@+7yv=(N6?%dsidt6BPs^Y z4GETOBT8-5nwRo(Rigd!ghglFJvYjCYC@u$LM_~oy!I&00)<{U!4yRKdRDXrH!^{fE~$_VuwPv1W=JlU?l zXhjDVZ(L7f$*#@48+ggfK2F%d;fZlp*^y8W$v%uF=PQiW7ouODnYaMH={&rp_^&r? zMc|L;Ib6V4xB|~uy#d-9>i_5Q-$8%ejlMj>4EGWz<$L9O|9f?ZP$iaHoDdxXP0R@R z`Go`b#=DZ!UH4ZJIxzAHUwf463%bdx3&yYw{4_;BB#!~J} zFP<^ux&N!-8mR$$4uiYcTj9>5Xp84>jSZR^N7zhw?H1bMtjrgAa>i;gZLu6J-x_1D zhx;+=o{N387Z`mG^x6id7vR*PKxYY@$r%mzrv4g?kXw1K$`ooE&-*W8eC~mpJPk(b z`BWnRH84C3KIY8t?xE8-$GS9Z%fV2;UyFW(l>1VOJECUdG0>-SAvrE3zc6>+(*ye7%vmFiXV*K-zoWa2NEO1%x!f-h8&jJJX zho%NY?^n`WX9Q}%^s7L4W$-_JxZ2rL;}%-0wxczu$L3Y;dr^k#p0RK{g1<1bv~n`*jgyIZ?_dM&A^b4i0Y1g-0n z@hyG2+VRdh$lvwmSd#t??doB8WUs)Veh*_^CrqwsxR5L_wc#;9YnPCe+Gh3bai;Oz zHGMwk+8g^(i}#oKPukP6twvb+v_9f8)OCL1`$wr+dFbw7eUAFbpMZnt$Fi@d@Gn=e z9&vRe9v;U*4Y%N5JwZ;Y7JMdS&O}W1LAj6-vonVy?Tc5%Y9|r&f8c^IH?Yf>g!!wq*Y1 zf0iz%l#hHTu)^J0%BYutz(&d(7VBZ>@vcrYky(^mDbaT|de<4bhfxYrCwHfTqcpM6 zf+?gacyUc$xTdgvfo`q1G7^%^S%$X43^=Gy!NWi5@PcWY2WzB<^xz^^ae{?{ASQ z|4J+Nh3~jOLCsv;P}aN&j=e^So^Gtg_6XNFO1+)Lf0{tM}Z02|5bk1l3#q*=Ji~O0*I{SeL!#(VWao56>iuYJjEJ3`P>*U?LSl3 z;+|ptSNSRb3uw*!%H!13O6?`@>Y9f|52W(sJD!Q9=*`9WIAiV9d=&c^Uxs;PD=c*y0#e_Q8(jDR6(^s%Bc@s~D5>t&- zwk@Chu97L-z4zteNwo=kH^De@*TdxZa`k`Ahf|w$hsKVk_!@{1&li zB4f^`)cK^4;_^)Kk?5G7tk{Qj^(r}kjeeyz^>6THbtu0xs|D?8GbKdqvFj5ai?MqK zuA%O29rhsSvR9KvZu@ga)HS4w;d2Gj;|EaUoAhHQvvx0$=2qIg3moyA#Qr3>zg{xG zIjw$u4>;pojow0M_ML}T8*$ZqJ2pife&Y$@->2R#(mO}&p2=l!wJBiv#nHmA0cm;5 zXyT`l+m*yykT7c1XEFNvD!zmqIfD|^AD!L4BRI}}+&9M-r%_BGG#Ykurr@|Eq-_s*M|F+g;1+PBC>0IdyVHW0kAwT5>h_EgB}5k=w`< z`}@>w}x@MUB#jv?+BK#M{H<)%rGkF0YfeKds?u4H&59+#QrEU#c-8 z&Qz5AzRcI;sP;bF)IsX`Xe0lG&LXEDi)U~swBiiZJz!8TvF@k&CTE#Sw|!8)wt%|_ z`p7Gk3GB2WBc09p_rKaXxKvuVo)4Sk7Rm->;b$ z)O;kATEK(+y3|_IIpbgDU-_cdRN5S-&NTt*P;1F~4cOaDX`U3QouvijS=Nt2lltgf zUDFA5uY!L6g}iG+!*ivuD%cq3-MKLTbLsllr^oKKZSQcI4(RK*9Bz`_uYc;l|Fa+zl=`*7|jaz^U(o z^G?G;)y5pkx(?TTsw3Y+S-(b}y3*B^pHnFPP2`W#Id_ctD5xd}qDK!+TG|kGB$NtO!q1 zD?OYS>&xB=@2-)u2VHkFrJlq!o;+~2E;OT>bv#(Nio$wdIb!C|=6t%dW)BhH5orI<%cDYY?8!_+aS^dG& zoSd2JqpmrD_3pr|FpjVBuiQM1S|(9yFJXG+I~&xbM6q}3{@$(AC+C{fMTL#;atirx z^7TK*a!BrK4rfIcGv=-U%wIRXs8Yrez@URNYVos-k>H^GCg&Sme<9zh^R87|<3G8w zE0Ha`|0Si^rayg|!gfhp_wnRurs+{kt@lsoRAb*UR*#1KWi_qK^+Re2>Yt{|)nfT% zjXnvV((80M{ck7jV&cXk`P+ec2Q*eAjq_fkptXn5_iK!f>j%}Mz8#kG0P^%jKef^= z_fK7x_0}u(W-h?@@1YHRE9vEBF44p%Ta=1NV=7)q+H7*%IBYLRYH z#2A}1PI?Wh;FmQD0pBMf(H{aY^+zk~3d%Sc zK5{!Pd=Ic&MQf|H`c*LHP5LnsEBg@Odlx$BbIcX(!)W^L=YNjzP_x$Zy^VkSvWofp zdns`UyyF_+?-+duepf*z%%hgi(z}IdawkBkxnD*}dJJ;WeY1Dci`#fR5|8h4c-qs{ zp`88zQuyECwog&UC1@j);dH%7)W>*oT>JQcm44qqPh2gPuL^%Fc7abrUp^=lrkPW&bo)1=@*lY{q2)6RN(63`P+uKoh8@TUV}>aac0 zUe7Vy$dk6WR(<(m^3kKGuLF5uJwoYE%9l|X-7SMj#>JCPG$(Urn?NK z`A(qYNzIln-xh;C&->%#{z;6yRGkb2(45=#%S^`Xa6x@t)4_2;_c_8;E-uX(8RI75c@5 z6QN1_GKR4CfE}K{=c(R)x4V~iA3_g*gpJ$7{omoXU068}!6{wYGzfoajj$D@e~uo1 z39jQ_p9g`Gr>Pdyb~T#sZ9uLSocm8WoOAPUpieFh&1(d!;J-#0KY_>G781;F_NsAD zW}Lsr*xU09c}m&1gx1RgwnUrVH8wi<*JfJ!6roC~@A9vw`W#~Qnk*uXXQKE$f{~^5%{Nk%+wDsQfGi0u8=<(tkL#X_K)RX zOYe8|emQkqLP>tlKnf5el?$vO53l=pc$agI@?JfyIXc>b)>nyo=Q8rWV5uj~>;cRS zcYSDnIm<5^WZK-p;f$NuEkF14@^*S_`f4MUCuhjlQ$I@@%KYMD>U;UVh<2q%tA)`h z`HFg_kky{oyDnE$zgH`q&QE@(e60q*liL;i@paPYJ`*KF70FcN?%5yO8(*Mz zZJc*>A7M-RX?axeyjoHlTtGkXn|ObBEO~w--Z>e6JI0o8LZf-51)=21FjukueaJ~y zY}XlEt&joO+-kmgzNsnoX4qz-I4Yp)nLVk4E8iSNwLJH*P3OO?QEYOiMckEk_32Io zi)(P@cM|7*8%JU}6gG?cwqTiVMf<*sGUTuNQlz~aSmes&t-LAYwVI|tA3c%h7)oCg zrR({$?op(=3++vbtW}i$AY=UElz3KRM{`YsYxTwC*>IfsG&#nZZmy_#R?@~proY02-Qd;KPh(46jrcU?u8-yQM$ zTX#{jJ4{^-aV-4zhs4#f=(q5z!6~1`vOkAd&v|yn&i%;xLWHj4;z@ni5bMeUcZRD| z{*v)<_Dk6z|9O!AIrM7-PoLsH&mB6NnjQqAo_VEr&#@6>AE5p<GnWG!xtM2eJG;9Tj_s*Ao}6_oDek4^azyRQ|Db*H zfr<2@kFm|Zxo4@wAkQfDTau}HxB_z_Pt~^e?#~oueTnPJn&b)PQ!~6wDMvQ8yx#g=hX40>{d~2kU)y;(V^iB+Vl!u{ zo8AD7+-oYGRDh>nn0TE&&E>7m@0`Yt;2N+n{>xBL?lswiG14ouk#J*DewgR2R{^!E zCr;<5&g5E-Oe6QrYcgR{=?4t9+|3I59VNLwxf9k@1m2o~H9dMdQ$kmQSSEF?#hX)2L0bKtu42XJS%MeuTj*=> zWJh5gIH+7!cNN2S;Yq(~L5u5XzUPs1f9iE6-5xdZO{%YeyWMEQu0UuHFQ*hHwX{d- zvqRI-H!Nfy3q8W~zm5rhvp>CX#iFuKo9(wyn_tKNKG-w{+RwQ_=i^?$!|E)Alqg5= zT%oP}F9wQ^xPC zc^2iI3T&Je)Zcz1aC;9=?O>v7vZm3$TWE=VQ4a2oNPTmjv@{v6tB3GhwAFJ1Qy0UD zwAAlneEep}$@FzOBR!RzOK92I#N9-T>x8C{tWG&vRdr63Sc(3-kiO~D(Hr~*e$Qp$ zTX3fJ^KjlOsW%Yo&domVp3N%Pnh>qfzIT0rv%`*w*3zDXdkFy&J!jzo|9qf8R>Y+WC$`UbgsY;4HOx zRrw92m0Q5B`xeL(?kZ+z0kp6!qj-!`X`X_vryLcB0atE~`y$B1z&rk8J zx6f15{F08}$UR?n8z7*P-*7Ue&x$~v^wU428ssA}*EL&f6&QRCm-LUd1Ig%~W zt}m~Y5~IM-rd-h`Qd*rFJ#nO^s4_>%L5G8#Lcp)i>$SEGxoY$=Mpo_5-Afg)K4)SI zp5>jMc--W}HDLEJefLCGzxy%{Ug0{&?*Fi(R*(E#>#Er!o3|Szr)0a_^g~tk#3QXF?kVyh)kq zNt<7%jRn68X}PO7lGa)qipeqv85aR}QEXD#N7lbC8uEB5n*N zygQ_-a}a9bt=NBqkp=QiPmfs?989P^ibVKNxSyVLb>f$3!3#*bH>kM}Ju|6b1hCo4 zDD{%!6-wNKwR`|$nmf^yrdie{@aXT8N=jT$Yu$Tv9_0ubztYu1ekIxwNQPYX;;3qQ zD_PH{j#H^|261QsNObqwm#>;0PYTVCg+>79a(o4C2%XIqB z5?8F#)i?SC)lA((q&%z;TIFA9Xby3t$s>K+@fymzyr>rcJI19fkVXv`n_bsl#M@g_ z{`c={KBibhZl!tke<(waAaz-*D_82FCuCR+jB(Dl1(><#doA|CCi?GQ5T)^1)Z&Q< zj=pDaxdvoUWabuV$F|dvR{Lmo{yK zdi4TI|7v7vkA5}EIZIc+^w2uLcWF7#(^l(rR68iM#&gpW@yic=P?xhcrqU*tnq3EK zFW*5)hRXpx%T=35KZs>mZqDrwrZsvCJe$LFdDVEd=}N40NAxSS*1d%rn3?@Jwe-M! zEjf1#O@dxMvqc%RiIPtTE4D>jvUYbgI)kH5EyvPg*2~Ztx{Mm{+`t#tEBn2#3iVfl z8;lG-wF2HUff~;S;+``4ZF;#gaP_RK%NXee;KTL!$XCIc-cMKsz3yuGwx?1*&ZzVf zucVhxifhUr=S$^JE%3`e+R{P0hJ#D*1>5Cwe)Gx`0G)&DCFK^z-BV+)VT`m_T>q#C z{%X?vfcANIkT_x;gWwnwc+=x>Z`_q4VwRd=L5TS3*U~4`x?D+VYDdetXwTo7!@cRn zPhRHN3G&=ed4zg+FR8q=$C4A}yAbq?1I{zmsrPAGmln||YOF?{ZpI)f&v_O3VDlPF zPh2;Yp+YpZ)l89;>T}|f{I8k!Pd(YY>3=evw0b4=$&D<36e%6KvNSQG|0S*3`jma1 zAYPE$GbW3$5!C9{joi_EHR&A5!ziyE+u{=Z!gIl;U((0ffrFNEC8K^i_T9g-7EK-C zRpjw~&zL%Js)#=|Vd58NMrL;1B+QItLCV+Pp<`EyKU8c21 z(&+^e{s-PV`8kiOB-Nu*<(GRCwvt;2?F_F}|4?fA+f3LReibmVAicVLo##3s=B|S$ z^2ltt>sa&6?=FrF@0@uPKeaZdqsjQ5_Kwm}tGN4qMN=zka2V8*)xj^2r$q8{wZbk{nLrpE`sGrkX$)EYkM)6O3ZUMm$U^y5VE_70%0#kd|Tc`@mqrCh(V z;(U;6pU#CtyMA#?$e}&Z-_9jZL20R}*|#-3Z-#r;;a*o#!cB2Hr{}iIv-F0zmtZCR z*bSMr3Fx}^W*h&5DDOJLhJt(l$-9{RZM4b`)rAtvObW4 zcHhq!^pbZ^{`CeN6V#TaRmfkV*G1g(O`N`lar!J0tOxkq6+P@FR2i8(!qFB2gD6w3 zB`Gorv{)^TmM{8jur-dXa;Z~;iK9-;Sq_(}3{;~fz z&w2~w+iE0rcvo8DUAz}jwj8T}d1)mXlJX_bQn4j%T&`C3^qkdD`4oCQ6gk&T$&(mA ztt;`?ozy)@(t-3>{eNC)(TAhy{SsMP0}ECm?>8gsJYUiC$K`#!NV08=-CD|U#$N8U zAWGbkr>k9APf59I2)(?EDM}>c)Z84mIxtu7l;6pZEO#DbTS}Dz>={K#1tE2Qr;$#p z!ZmOeWT!mCT`KA{YK4wUauatNbr3FxbdJ{&;u;7&4t-#yS$Ie<)k7;rTRj zn%=KBiQj91$Y%OEgB;f)S(gIq-(%zIH7n>>&Pd9YU6uSWST9DO4W+qn&b=3EtQ}ax z1vLCDcA5O$)$T(AuFbZ|lhugoq{!Y?qb2fkSB2Oock{MK+Y@UnX=mW*483C^U278+ zkyE);p6)Xpt5aFlg_h~QsC^^79$3ftP^kYru<1HdnCAYVx?hst)2=^4T5G)!3GgNS z^%H=|cuH{(`9A=YY2cWY6MVeJ+U^8H?gT>eJ5NXO#1>~)4lXM+vpn@5Yn#r`zRH;#*@C+^znsE&J)3G!Y9eYr;_pPRv?D6KmzOeN478XW z_^>FyJG8_a5ZMew)B|6FlJu5I4bFwTcJ9U)LtEncGtOYR-+V*VrnjYr)bU(bWkG7F zHG1_|Xx!Nf&s6vT<>?80ox6INI)d{f`Xt2&$F>$+S3H$!(^n%G@{5V4(9WqK3GRdP z&O_HqxD)}8DL}@RSgsLNDt5zTucx)AqX)Sf+;7@m#SGEGl;E2GzvnKOS%l{L0=zk` zY4LOv*qGxaZ;Y;~su`K!yjG)`R6Y+WNpk&t`BH zik^WyQ(7OWCwkmTnWwirojZoY&3*pvQy z%C9rQzoDLCjDRP&-iF`e7nI`;n>N<4`_1r+XvLANtSqSM{cyWpsG*>A&(GdKPlT0{ z{Yj{DKKSoxZLTkR1FrUO^k0i*TFm?EO?d=Jbb-U3ZleEsPwFdx;pLRoNBs8~**Vm( z2n%2xbquGBA@oC8y)V4-X3|_t`yL|I066ajjF4vwC)#N z4ss~dc7uPoTX9a*oA~UC#l$4%v8z0<;CCjJbq`X=-I@jOVwOD3nY?!3mwD5Eo`iJK zO7T=q&$C`YDmk;5crCqjX7W)=&Uwk(115gQ#4^=OBDEUcO8=#?)NOyuc&Pzx1A;3FQ=>NS|J@~*OR3_y{eA^mum4c`TVs?i zjTWoVNJ(mVrVTr6yWH9rY|I#GLVe*yMp`fI&lAoT~r8J+-wo}{`J zYIqLc{=4a84SQie7~of74ujs8P@A@7=AFzKY9UpaK{yQAs#Yv5=y9AE+PaohmHiT) z*ZLbUz+KBjhm9T}*|m^g^BPSX2E#LQmftyDIq?nT zRs&H^4*-|9L=I(8J3LvN*_D~s$2c8KNNS!xgb#RDo^w&!ukwH+qr@sTe-rp=arp&_ za?HhU+oIo4J?TQQ+FdLAf|b^vlE%|n%%K)9R;a&kTX~Z!Dm|^qQI=194+vk%lM-kKHNOWNQ`-3pRy%hR>Mp$X)a+V*sc{dmaU>Gm znWluKUr_G_qnw9HEBg>eRLj?MGENQXT~A4VMfhs+sLkv~566MY=Rh;gkUUGP-Fbct zIu0p8cr=EcBbe|n-{-Il+^Zz0c*0>yttGMouqJAv7wcrlOW`4r;6 zN!_nPO>**!8Noq7@ym?SeA+OP((k3E^CbKBm zdM+hBar`k~oG_gS4?(gaqY#-N1l%9wS@BwlyU#xTfyPy+#B{?rjB5lxodzQS@Cb@^ zBiWckc7>3}^FNMjSJ3WV0OFm5{S_ZF*pG_maNh?(Rf7p14P)>VkAF=!-cRU4CeV)H zZ3#&584%<%fay>i1gC>W$1w;;V;p>jVJTbU2Hw$JyMhwCf*#|!-b4Ic%9_jl|NrX` zflfPvB<-+c-4+9Y))ZjmL5PyI|9`;H(zs2i3fa7b>RYcCat>fX2QtK&Z(rc*2cnQeO z1KM4P*xw)ujs&?*LNyb{ws}|TYF=+`Yx!QbIrB`3K9@Zd9$u1O-ZPhD*GdZ~M|(SH z-4xoF?fwJGN(JmPM?z9K=A!L4QJ8*=z&RRd*Fk!P&Y_2V1!lGnPdC9{bic~h?}d=g zgCVV@p4W)^6}a{;dfExTJ5iux=^~hh2j|b=Y)VI$2Mo-D63)ZvI|Lk`1`C)DBk6@Ibg)%;nizMd4m9Cg4*3Qnb0{^g zfVoT##jp(=m+id^X7VmtxSSp;EWEY=3l-hv)S^n_wE~sfZSpnR`dmCcNv(=8-DRVx zM~s;hX~hp6NiNLE+%9F21@}MDVM4^E;M=c}u9JIDJgf5iL8yV41Dh$Wp!B_oaT2R^ z(OLFb-gS`(yI*hWI?l)j3*r&3Z*Cz^LAsrp|U8NTPgTL6~s z5O0|Um{Vh9hT){Wp3TR8c<^Fce;gvQtXW9cX`y9R;dwI}r6jZ+6&JHq!4OS8jrlqy zPvvgRj+glHTHw9mVuA{9|Laav*Neb#7h|PLVqb-dsnzD6IEe6X~ zbZY!pz>Q^;q|v9Fr58+>hPxRTRhTbPiW`cXZFC9fD&9v%{*&m(I;e9TA&SG^NO>Vp zH%BWHVnZ0k&%qJ)qdiiRbuLG|ZpPhnBCt81(LWp^C|znUnNwQ13Ds6k;6beNp25IF zXZE^SOzg49``})G3hXR`gSh0(yt~q-ftWi}!H9o_Lv2Oau0(KOPJJF0KLGwT5~^$g zp3As&QEfmb_E64o7`gWY-vSQk6y-sbbU(jaXr~{4K7;8?;r|7Cu!7R=f=hpv7MxFg z%c=WDIChQEnLyi^ibR>~3YEtX-3U+HC)UZ4RAa7;5EoGQ(o3D_li;7PFj_iJ&Ii)N z`K=+eixe%Su%!8@FEwPxLF8Ub>ox+_O~{p7=v9R>9}W#-8*QG>Vms@U<1YYW;@&B! zHD4gk108hQeUKh)CB_dr_A&CN&Iy94q*5KK@$ZrqO|HT8;y%Xechq|+p#xF2bm{uL zmKv0g9i;RiV9oPN;D#9GQhcdz&nVSea!kGJB*_q$>iDGDcmz;f2tGT#U`RK)SM#g$ zp1dyeOec0FKksjUsoHz3rFGJ&{S`muIhm`@yRKBnz>=!`cXJH|IzqO_y~4+#%=d%% z_vs`sZ{7AckAjfD?;CyIo&S@_<#>B&@WIrb^*RbYDBPBq%jdky-<+Z-8sQnHrcwz* zQ=Vo2yV2qvWz-%a{FtC+;j)svN)RDjX^eV}r@O)Ot_HvSD(_;U+)}fr9f>`I^h4v# zva`+-XYwtTVi$}^<$1Z-RFmYu24_s(sWvKfxz$)D>3(i?+!9Zfmo zf&1D9*19$CN@{$Gv^%pkpbCE}CB0JHZC88KidD!CjV9e+8_BJ6uMeHYqk860=e&l@ z{sIt|nx&&5@Sb}Zwc$vG0&3Pw-5E}!(bvMpw10*RbRh+<1#!f=ZXCrjsc9Tco?<}}YTATf=$A5JO4Ux#8lz5Zyt zX=K%c5AH!rYlK;IOUMiFZn#m8(`>t+HVy)JE?_*Ij_ipN1^|tTyjL1|Mw2G7?^((o zNdKx(b`P+0qAPJ(DiXt8j+kx+r*$U=edZ*nEtRj@@6;)LpA8qu7&+E3a{D%dpL$50 zpFBlcHatHadwEccWtBPeUhnr_<+&bIA$9b>#II6A5*|7>E~n)KDfPa9Xq~=0NhFW% zrdQ{~xwRX+fZKG+DhRd2Tp{-Gpp)NR;zQt)rWD0HEi}qEiK};P91}}^_b)|%#`BQtb)pD;EVq9*} z28a9lc9~&1$1gQ3%l8sD$_W_tHd7^^DpTafR^M`rlsJCSN-3iRD1Y5Z^qb+XZbeP4 z&_!&`QqfM$x2pIXP6OZZJ$bU~@!3D_jHe_5uj@RKZ$$hlUK7|%AHJqIC zX8UwG|9R~X{m-DpGea7+p=0-w!({<-%$=a}toI-&P$=1_E!61tP7nFImKM*3SG2%S z?&Wz1v|1%qD^lfBdUqwYJwtmv>?Aq3lU>$55;?d5i?0_RbvW{D5F;R@mJ&A=H;h>Q zPHGg^BftrdvoCnAAhG_1GGn?^rdg#eibf$Mv&jd8GYq*br!+N~!5VTzni+Pl= zSC9k$0Ir-z3zQE+R(-Q4^6dw9d&JFh-jvs)fRLkk6zyKfeK`E64Nf!mkPic?*Wr=s zEMmNTy$)ZV!1Gujy%NODfZsPwXp0!wZL)QO8jH8@pEkErxy#Zg~ZVpHOJ+yCs=tsR?oop0s>S9c^9_;Cj z@O&NSsbTouOZdgS%dg}gY8vH8CwCMRYlN1Uva;kf{lksF5XZV&qV-D|n*u79SdaV@ zwwK~kz?=G&t6sU|Ri%{kfbXG%uc7VgV^<>`O{KN(#HuZml8@khQG^}=+&_wyp;Ps> zC|_BlwNYwC=68{|*3DPIgx7eV9V5PiC#mcR@~au`LJQ_HHXXq9$Mj(p4fReLRTL}+#syhOW>|8ff?6;RW(A-;O8M-1L08i z@xwv0No~e^klaI1aUajWr(B_a3|L$Qp4<+vK7p`@$onQ3)xy|b43{6)*t%Tc@IDOw zd=Txu1vzj9w&qb_-~nLsIh1k-`46S+h4=~o2FT3<2D2&oCi_5em&Vb9Ud4dB5=#JF8dxh3xzPG4RN*}sCa7gK{1E}dZh zBeYixFBnxPNaoX8>EI1o`4mq%!Q&@03*MyLD!=`pv`XW;DY)$%`sy-EHHJ?9{giw! zJhdGjB^T;#q)xA!n0pxfvImZw<)}@$wC5w>M<36QsEZNZnmj$8^{17Tk$HCfUrMUI zGUeMWp-`La23nbCRf8JEnmSnO)NOjRq*5(zuU1;5Rp;T+zNgo&9?zK~|62kkKC6ov zU*#uMefCM)U)eIBHxF0W;=O@3SVL+gZqC-Ov^`SgEK-X7&Ckgwuf%#aN<9XzQH!4k z`Vss_xIc+}m-lr7yG^tsm%_@?`uJJ1Wp+`%qiwtFeKqb$qqf*l$T+!8`DBl#gj7je z-EiAFxxtMxoxE`%o~Z+=N&BZt2}8i`8c%x1gkZ6~Of_a+^Yz ze&(AYtzc~v!czAb+HiDW${wU!!1UR_)6Z78=W51Fo#G4B&`DXdvFzS}_symDzl4Uz z(B}czs%b~T&z*1XmFZ9@h$nA%xuA-J3!0BVath;Hg9<%4jw}`uo z1JA)a&bS9Vs=IG)mk2P*Cy)y%9>lFE_?A?S}RSZgp-ibrx_{s!ES}sZZ8< zmPaUMgv^2T-dbJ@+V#*{ZI)JWpc?mFuvC^{YtkNWhBTj=n2=A7PXBA}c3PLp#0hKu z3gcOFF|{4rJB4>qLUMROrSwtq$g_~U>Ig09=C=KQYHp6@H^BHUge;?{?+9t- zhtGZ+{NGkMCpWku-JF~H9q%r7b1VizaZ*rsggGK|=JW<-%bnY+MVRx@pG6w%#(#U* zv>tpigSZ>PfjP9i7F=>5E&WSqu@-zlAEhM?qSfm4euiNgt@(FqJs*wbGqlZ*PmKX< zhBD)`J3Mez=$0Hgj6|PEI_<-WaIqhwxAf7*mDHvu>Nt4LD0;LQo^cPc-z9V;ka-WB zY8D)7cu?GdXtRDMYCRmHMviCb(d)FxaeoN>b+g%ia2uCNeH#0+PU-sbFQGRhgAdP& z@!SPH?~_2-18E94LO}`6TMWgb{V%ZK7)p2kW(>SzMeum}#26&>Ym9OQKKCGL<;hE- zqCjMHj#QMGVH)8 zX&mjBms{hFP>r9tl)AG{<#ImlsKu3Gwl=v|F2VIUV^=>^s5yP`w$6n>9&%LXYVyYH ztsbC_;McWbS*hnK)znwk#&Re5a4VFTN70I-!g>?EaGBU8SY3XeN_)Y1J2f9`^MgWa zIZN?azslYE-%md$vUq(J&plx`cYs0Z*_c9#O|(=w+Y0S{k~IH@o&S3vl~SUc`|aq- ze(>8?`~{kmxHo|EQU-Xao*z>BGxSA2dnq-#;PW_29vYUdR8yn;XWwq46>Bp;~BMgW)XI-~>m~KV{}#Xg!^j>vB+MS*7*gQ1g*tDUKtzn=g9cv@XZg zf8lZUpCMEpKN!C37WYB4NFJ~v=;Ipve;&HG2wVT__|{$^eihK@qP~}aZt4ct!!xfW zk5;N9Jdsu_?OzXnUVDQl4FJX!T36dn)1!FT8zI!x4>knu+(5~elb4p_V@Qm5kSpiw z)N=H4%MI%M??O&>lefQWK~v86o1?s(o9Rz$(pb)`6^w2zATo>`YCg`ai6e;tWeMK1 z4dP{nmb#CvsYO`x$xk`hTRCQWFQlzMb+t0io2d7CczT_hXD}Mw_#1Q6Ro`3kwey}n z8ENSt(pN}*9@yq=$9%@CgtPI^7JE=p4W8H)eQUO&wEhf{dfLG%X{gS-S9#Aj{|kP* zgB|J@%}~96YAmJwF%TR$h@YG`w*e><%qy1q&uBDkbm8KMsX8OvxV*&nBAyd+%4qXB z=j~H!J#Mh7Q}afC&Uq~-#*g9oA=#rFkP$jrW(zcTA*FthG*w#kx6n(jBsdxA{~VBO zfvaev`bnT#BUcgYaO9WQQJX6?9;LNKXu~Z)!_RK^M*H>Q9u>0ZLzJgqQ4j1dXx%cN zjtI?2D)!%fd0Tp&ohqI#Zjj^eKu~R8cFNGGk zR{k(r;Czdk@VDsMO8$?JH0{`9vq?D=D!mT>jGKmPjI;3Z_zGd~8V}*-JnWvNb{6C? zc+V9;$#n?l0i`j_3VjZkxI*L=>NE)q$$LojMgPOo zP;U+1aTGDm-c13Mda3bZu*8{+8c@&=AwIn)$p$@Cck}f##8y?@m=C%$`_aEf_0QLoY04O(xNG?2%qFVb7;*{;65wj+USEl z+(IkW7X~s0dSvRM^GW~CBRjKV&0gu_5f4gy*D@3jGTzZIFDcIF@~&^jM*tK zA?gEP-~Q%2;@*tR6m-ifaQi+bEP_v%&$S|%&zg$JlWP?8ty{h{?3nb%TS8Ku$Qq5G zN-3F6oK>TAJk^!-GYdm4KY3TKc}cEej#~J6M>4MJOX`5?g|2Q9!g_Ve{@ar2{0d?U z?#;2OQm&)ux*fIkEFssGn9eqt!;x3EYg_919T*;1$6!yij$V2(j8+Z-zq9UKla*^M zw$M_KG~60?+U)@^Y5sB`vr9vtvR6x^eCya3jX_!FyjjkU80w{tq*kC7dMEdkokhfz zn&}W0zE=b5XJg0xl(>BY+H;_ud5pc>LA^nrxm%UGC}kyWX<@a+_koj#{wzkyAI?*a zo^2w%ZBw@Y06lvcuu6SYDSHLEj|d(%3F_M~a=b)NS4;Fp9d3SZgMJT=CG}U4<7Q^o z7SMt(@>@&YQ_;Hif(M<#I9!8&%-URIdM)>bQ1%cgadz;6#|U2*T!!f~; zWx1NST;)(B<=((#GJJ0lV=ncMj#;Huu{CrbTHON`h+S*of2keqM2oi4p2>ucCr6E1 z^)V+E48R7>5xbr7aBHQWd{qk-(o3(ZDI}n;H@wb_$l<3V6vAYJw*yiz!OfzOr zLM=gxtVXy`|I?M)qrH_YDetlsSHR^B@tPl6fAa6e0gsPu*Hwy0@eRhJ%G1~B)2ZCo zGrH!KXFtx!b^se^k=iM}_X>l(Ifb$rTyd(&SW@oolDuz&boA z$#OiXU=wxaXunJyy7YVHM?SCFsFllT5UfQ`hPYfmVoEz0vFBWYnSZH#3Vjn!3A!taZuwT*6r z&-p}GJ!;pm=wh-dAGix@L-okq( zZMc>DQKXWy3p**Z7HxC`j2{s8XbCOicFNW++!FHo4tnBo&7sM+Vg>YaO9|q|@ph%kvZDP0xu?T0qRF=$oIC(ZAXQ#8%SJgrTb#oRxfnaj4Vp zb*$j1hAgmWj-hybQQ(bOW{IU#DmhiwYK%RU0*sLpIqn`#;z9PEJRcL9krMq{AU2Bl z{N#ao^tfo@FN%DAEFnvDZG|{hrDT0mHJ)ekQ`Y5*Ueig%3kefj``bIlEhJsRy}WFl zJ;^Yiy!6%Qlf5_?Xt*}cahgG^T}iwfp^mSb~_WIAgW};(N2)&ISNf90VYK^`bFNY~F^(8fFvAD`l3RdnEan}pvNNLB(@8zz^ zS1-bXGL5UWHbHaebAJiWxSEl)ggJY5K_ zZaim7b%}(Os}**GetJn`%j>}TR`9tj@k{t#3euhe{q&%fR-m1nq0tspqxlwG=)JUC zco+P;^382$Swa;^c%)Y9q)T{voOVeixsf_$&#~ZpogN<;G@`VA8DG!C4asV)j-m5r z+IG#7F9xPZG83XaeVS+KPkPXbUfW3b9;p9!JkO_%y~MhvK4GXNcqP&arvc#W=g}<3 zg|>1KHkx$%3$VKrYVM@H*8}YiN`DA?bS0}F27I1SKPC80T6PXTp}otB&?PxX1v)+MoqDb=t=+=-Oj8Rx_6RMx^xX z7=Kr1)QCTae>c4*&O*w`^q%`{UyyeuiEzKv|ySs`6TY>6IL(kAnihW zqE#SNOp!D)nx~`CjyH05UQ4;|oX6(yK?*ZdjtwYO(6hbXno;lD2q^jVW4??0djh z+@XT2&F>fOG2PiwW^eSQcc~a+h9#9#rG!++3bpmZ^gZn+szw_%g z!t#Cxv{2!CFEw5j<;>!(J)YG2T>bhY*yxOynD3=U@)angr}3VlY60Jl+FomfyAShN zQeOoh{v`eQC%BfLjLYz-ErloEM9=$pzMdL{vO6c74gNV#?V;heuAqM9 z8tOW4enQ}{BV1i}EtemV6^f2iULN)6)A;yTGxbd!OX>W1T09nex=KjF)9Zv4kz(hD zzthx0WC6c55n~ILfbzcd8l6hcTvwq+^KIH;F8Riyu-oiWmACcWr;t+#n4Cs^U=5h6 zG_s$$PVmDGN*_vywvMz>aCf%Sl2eAxAg@$pf4kytyDVF3&2;uNpZ@qi`ASk@N_joa z`V8cB3nQPtp!5gX!gBS9WvP#(A7Lx)GQV_cU$V|cr1593{0LIcP3*(-Kz<(5o-3L5 zCceC~6-N`FA?8=FQs3>2Fr|}FSw|`AMfzs%j|Ha7T^90l9^Dd>cO|cNy+{FU z^~1;y_l_~J2L&m`_?eCQAIHJfETu1NTX-z0$b{0SA44fgDPp=kl#;~fdU*4!TPw_i z{3ao-AExJGU2%>OUz|XlyHI8x2Re#!E(34>68`4=sk^yW z)3Vk0vbq`7#f-#vpx!fx*_*c-{7I^xjTUnVBT*$>`L+^RxSrvs(BAIkO)a%fUwcWh zjh{z`+uupFwwD~eK;rHFRJ|$pEvmIh#YgZnB&D>yt1xG| ziLm4^$scTgzWa0}a;J!trY^74TSgTKMtPfgBs^p5MOhVR5Q$(J&{5b@xngCn1~(~7+RDR`$$DX60$ zMp&z__V82A`KbW)W_h{)`XyYknY{KiYIUwSS85*)wR`Bg8ql{J?Q|Z`_VaRk(oMnIGD-a2Der__JMQ?xgCOPs|| z%F=rMT5!mTl77Jt z26A@7NbJR(gbZop> zH=q9LZ&rIdo*GO)lY4TUdbG+NpO@0g`Gh+jN(x~kmym0#HE5x#%d|2A{w&|JFnl+u>gkaAI4&bqqEy`E>K zUX`CKMYY83Xo;zMymSM?bJT$GFQ5J?pK+7k8-PCQbJ$s;&^rbh)ywYiEe6I3J zD3syH@qS6CUjH z9HrsJq`sn_)w#X*^C?5$qTc@&Y7x`(x)*ulehDevYb~Yt8PN)9<>ZN(?y4(u*vERn zOzOOuR=Z=V8u7;f)_7=cp8Nk z>F%`G2)`7r0gVhMlF4WzC^roTbgtK<3|WfsVv6s4g@{MT`Ulv#omSd zoP!V-#mcXti(ScEUbEmDt+7_qQEPfD-ECh-wmcMAIEQl8a~=vU(A_)6T7sI*2f+eq zR<5TEQ5RoMiqz%T0MET>TS4j;R%6Ov7ZIzSq7Jt^>)n+9i%9<;NQ0D%**a~Tqy{w| zxlcDgu}x}r+>=)n&`ZIatC|cK@1?lZyRxkEUl_E4gVLma@8o}}DIF7ZlXWS{*OS(= z)^Igvx@^aEV#^Yx>w-7iR$leSQ)(P#sQ0Ay8sk^`Z)!XZ1WU_Q@1)m0>lMm1ky>ii z(BsXm_T!lA1^;`()fPIDR`%z5-lQY-jp5MTa^75D*g*}#{l~yuE6~eTO8Jz1$E&7wvFaDD_@|dtmpkYYTXlR+Xaeg zhayv2na*)v9xG*(dz%}hCTH9U34a{_Phn-)p2wo(g`~06$#6F%a4-KJw7M^MuL;;Z zYERbTRpTdr+XEa^_wilN#3__Agb_83wB}tn=wtKAnMv1hdE};?_KC1Cv{NsJ-kdS7 z)9Opfxi5OoM)Lh#(6MtQ6BuE4%FO{bC%{kKgF6j>xu3eY8V)dtH4im(t4%;qdAci_ zfj)ar+1Wz8DP&iItOgs~1KAKz8OJupuL4A@w=@;o@2F4v=y|hAW$XE|F^uR3#1(f+_Mw+vrS7q>O^ftNl0I<2wQn8M^PwUo zS{pR3ePGI6(K}{BFRuS~WsFCkZh=}o?WG{arwM%q9qof)fol++A+!}Kpg;RD{MnnB zpL+t>ynuB$7F_yAIE`Kw&wh~u?gCt0(OIFjdr`)x(Ip<{sgt_yA=a}TjI~v(DgR;S z|8fWX{SmGwe;m}-MOo81yN@AI}G;dU$gcY+7*6dyE6FT4Pdt`ocV)K|Iw1Xz%46qX=>Q(+^V8{2r8sLLs#)d6YJ=<>obom^JX-IxQ=`V8-lBPrO@}wnnav*IsbC%9LA7Xd` zjQAQ5T8PwdW$aoh^=PEwILZ>@^PrgHp$7H0V+hF_T0=hjydZd)PeaK&2LASB*yuIn znxP}0cJGs@XD#LHEh(U|ocCE9ahv$}fcT?nVd}}bdq3r*7^1A725!CvRt$tAc)np5 zK9@zI=j;dn+=iD&&G}=@2VM*;)vTU?l73I`H*U8frP1Ah#8BXVGkx%Ehg&JpGZ+#} zv~cIZC$4MomA23X9)k~^36E5tmhWB+gde4J=jhkMr)$(!qo->~_axfXKfwWeDf<=j z97tX6u~`Z?{SY;)r@E5&x6q58BcbCdRi59eWVjl7--Q=9@|FOVRMfk8B|E7#dd0D<`yEy-&7EuQWm-9XuFIiWNob{>=%>;XA(T+-E)mhmlX;|LTO-ws< zs4OzaLjKFvd7nzZoWoLj%-`Ne>DAWs#(ft5r@~{Lom&DnFQ;DR)75Zi)0HFVTBo8B zV(saL@XQE5;U+{rjW@=<=8I@^o-Uh^b5)S@oyl{DVGrGmC%UIm*6iSV3wV=^@J%6`pmt^h4B=XCHYUgqEwiLE3GH!&T>yXJ2~khal%s zYHDO!cxscCw_J{yJitrd^cC>G;BIY0xz7!(|2d<08*kQ^{76k_AU~;7{lsOtOZ}2paG9>TT>XUG_ zm1;)iUAoK5kXiAZoK`q^jgEh%Yxa9S?RK^3TFR+4(qziAo=t?hvk1EgiJtxz*XF4& z4r_24y+V4c?E4nrw{3e#&b_)@p$6tFg&f)W0pU`klkX+W(gQE;BzGt|&t&4&^0Qt3 zAI#4_lqsY(;iV)rz7}_7kI#nm_baM-dTNR7QL8@)zM(C61h9~Y%NaW1vstFtqOWZ_ z;bXw)c6#ax>1klohY7n8jIIESbIBtHr{?1N!2)VHp87|SOBtc{uJ-NNcxqfqE~(iT zio*k`X%+c$%w{lJ<&Kt2F+S2M`8sITSZL#FO4olpj}hp?YWQsMl~2+OA<+}*^&)K| z#8qaV2WMH9l(r67Fva`!#U85x-F%obWV8{9FmR zkv3YZ+{&3V<>eek@*$ve6nEF89u8;p1jCD>e)~L=@mCUhQowVxZ7!aFXIl?p?!1?H zck^4W()lHRp2jA}X;NTvYI%ZuqzVRnml9mN+D%GFyPNi<9Vn-Aw@rmwjq}>H{mJxo zQlvD>Sm{~sfid1w?`ry$^aLqy>$GTkqohn%mjB<~%U1N~SI(T=*(j;ZoBmWW&(rYz zK(YL@7OCW_@~Fhb)PSedYka{KeZzsLhh4Xz`hPmdU&B#kO zHQ*|?B$xAah(YiKOVpy9&NYP^pCgwwcknNiXYfl+!Ja666S6tFS<}wUwOCsR-cGrb zHM+{u*s?X5GW%2`yauIzw82+OJ|yQcogQc9YW^(kvA`!|z6PDnKpH>BlkY;b=?!{0 z`l&7?_hWSvzYe(WaNuNLbETFm;wyM{Pa*YCWOOeYL>KsO{C?DC`$}n02`p>{i(2t* zJVSVve0HQUomX4nw$}JYS~rstt5Ac~GaqQk*K*cwFVZfjEmbV0Vc{?H)bHLXV`luX z396Y-iO!FSrFT=Kr?ve#H2hIoF%^FLzlnEmttaMG(7(Fyt6vimTAa@rlii?~{jhN6 zGkf-FIB+dWdk=N5qs$xNds4EWu^d9HKgVwve5yvsoxt@jV*TLEuZVR`>1;}U6i)C2 zV>2u~EYr{l{OIBda5YEbO3H7gC0grVn|OOEG+Sx&6KIxoLT@C8FntOQO1@Sl=X3A} zA?z&NQ^dOCPMm9Je!mKj6b?P1#V*DoTuzGFjGmloExBigSH#i&47^gFb0=-|+=w3F zej`4v!+Dw&nBhp-Nkg87)Vt+1&a0W{t=yt4$@Y8MwleI0zH8f5`E~KLmNh_48Z>9gb5i3G_Ql(j zTy2C+p@dwSm9^$t7oT4T>Z2mnj-2ZAaGABCqYnzMq1H7DtG*XWs7`8o{H1I&NV95Ra(!=jP?l0)(F{pychg?qV$fl|3Y2vZfbjC&DAJDea1_D zuz+7JpuR<6$8-}am5E6+!R5^{M)D32%!l*_-S1ZBGOsK=lK)TqV&UW=4Iggg__cU%K-Pxu|O-288c(w38QpBPCc zs{GsklPJGj8&?En>`o222Bx}wd_7}LpGac#NkFTOe0mLq(2a0G{T@T zP_nRJOKLR+cjU??^Q?YX|D4B}<}bty{nt|*OX#@TrjD9^5|IXeX$C%ZKoyVx;DZc}Q3wU;myE+TXc8*sbDJ3dt{Pfjk zp8U*WK`kfH3vHiT$U#3F{V#!S$5XZ^9$gjIhq(_z_sS6y+m?qUQL=k7L=iD1RcEUH z|9;53DgRP0SVV{tIAvRfam`iY&YjeW>y9$5OMG9`_%$)VfBE@biyVI(o*_GWP0LhG zDj$H=*&8LOzk52F^?F9` zvR|y4k;Yxg-2!L08GmD@Hm~bYhEU7+5qnhQ zZd%W=(z5Z=GRY_Z(?gSGrv93-_3dfoxY{FR5Lrr|!!3Vy{JrB}em-NAW3 z6kOptgO=H@#1PlFya3jy+c=NretE5nRY7G-!9?3x>U^bST1D*>Sjkci-ZTSz{1mCW zp#oRktPNj_wfPO1ramU8{RFdM_tP3-d^7#HHuMQwF7!N|zk-MKG;moQl}c)~;7Hmy z5RN6E7AiB~3w^}8;&&=#J%Kzw3+j;j*>g)$wtE)QC8XUz$iCDe{yOrG#XxAU9Y2Vt zr3`NLSMP2e_;>=_1jc$Qy5Gsf`;pKq&`=(Rf1e0{QOA?(?~ZnG8RVwJvqix$>nzLWEu-!9LfD2 zxY;n;HV_+bE&Wz|d4d*sUZQ;XNOC>{Bs|SCbx--dyv+70%cP>8Qqm`g*Q%Nwvt1vf z2G5L_wp?SO)*>7i@T-9{p17`LE&?NZBd*je)S``jAnG(%Ua1X~Z-4qOwWN-#FEF*2 zl)44&ZRRtk{~rEw{kZz#j@SR4E@NIffcLe*gS9REIme&Yg?d9RQpjmG$5RDtGEe_Y zP9cYM_PdMtA}Azx36|+rkiycPk8^$JmqX@ctQhec@vate1hqH3r#HsC^dM!(2gmYj zT54ta+B8D0hyyt`aQ6rrUL_Uo^b3d(~iH|A26Or{) zqgSQZKg0YlxkR}`*>QdQOSz?-DlaXr@>xufx7C4_W7W;m8e(_kAf|q+?on(nqb$oA zf#$PMqZ<5SN4}yRndy|LWe(>F%IET~G*a$3$LpN3nr?l2s)T5zUx@j%L&`MONSj)L zad{vy^0QIH2f~`iFeUgTE#v*VYO5IMMOsR9UHt<89 zo3)p2R4*c zF7MQ1t(Qq$o)JIuy3$0+Zm4p-7Gc6&ui*^t>K;|n-AX!ld#8UUajgaJdIGiUK5o*v~I>lV&N&6a76DZ6cfWuk*3(XZtGuE?p+NeS^fa9tbmv!X4dq15I&7iR>E zh}{-k%ab4K@ve^4j;B|-qb+n^jEx-1{RgSzjYk_1hT28iKONBH#mwD${-bLn{qEc` zJhfxREW^{HkFDH+D9p6GI?)z#KBPUU&{=E8@Ar6EI=K&{T`y9WyKr0)SR*X=wRBSF zp;&bCO7YKCFrOn{i^g64TC`HEy1*Pb%;{jlK)Bs?gw2AFC|CU`yJv5!udfL0qci+D zt%N9ZR#BIyNsOSZsg#@F`78o6S5bGlr>-S*#R@HYmOg6V^`Q@_`yU#kc50MbAw&(z z%XzVUYW~*=^)30pLUJFtLz;rOv@D!sSE@`zW{3yr=U+u%q}Vn5at1*A&l1Hr?{B?? zkJqL@ig|0w|LMK|?9&KJ6Z@2~%^e>d5z@b$CM7mQ&Ea@7&qgfcQfshZ^ZChPEz#LQ zJxI>vNsCqR;SA!md9|_2-Ka_i=fvINvyx}EdL@=A%Np`-?LMjdIgVMbyuoqtj9OZ5rp!opEUvNYS?%YS+~GDIurRZ_P> zYknWDoi#U(+svT;MNqx_i;qWs4?v zdggV8AbU?IzrNl87XGn?yz0v8Q-k4}&V?m!*hq*n=QQ}0a#PJs`0q>4-6eD&keEfguMKJL$eu&` zIw^L%^r4rSw_{BCCPo$Xu6!@=YJ5Wi+Hw=+?1kXMbl}v>d(N3U(i>=ndHVQOjvExsn#b4p~J|KSw zGUxy-$U3-_7{!ioDr_ynlo5 zZxDLt`|#mk4K>^cXIP0$`wCdHgmZ0M{|;?Gi~Kw6kZ&N-U9EZ;WqN;|G@l~Z zRanX2=WCj~)0#OEbMYbnEAG#O9cO{>2Y^?96CuBbLdMX7Nl@I2^yB-~@>^uDFroP?b8L!dX&qKMVquoO<;SS(?4%F$6^FQad;_H1r-*G_P|1kutNWH$xqvfqSw<1}_QsohC@mIOe!PR!rb@nH4!;VrXS8o0tjI1nnY`8+WwpCWa0qwt z+5&e(4A`!Vs@-ONYMJ$T)*?{?FHa_mTPj9^c3R_jo_v@Ati&bDe9yu5<2l&Yhm!4|tu7rtDGvhAmuIlqhGu zB+od4OeT_hXfJpFeg-%41b>=ue-N81ai}V_h|YwhMzvgn9a+MSF@eGV{#{PaFw|CJw5*cx31LU zJ+$Ub_xpppQBHCoc=I+v>&y$bQ9l;aneTea*i6=jHDZ0=Yh{-9XMmmPhX-Tz!wn-yp)xE42CmN7Yzt_9q)#K&=jDZ?X0Vt#;y0e9`Ejl=MUg6CWWQw>VR|Yw7Zhux!aY?X~U}` z$%SzH)@*?jVeu|o-{0Na=;so#a5PN5+fiDO zJVV{@_~JTb+YvA7y0;OpMr-j>IlvRJI9W_t3;N~mOry_V@i5=w+ITSTqI+veyBfdM zM&ITc@wqAusw=yQEuACh_%(Vu!1x5Kd$Oa=-__`avQ=6~edAAySLG-f_4xeq@=)tzQU_Qaj`TKJ7<(E*3jmt90( zI-~w1IPB&AI!CJtsAuv}ZE5wHEa`3>f0`8cr9~~Y{+zR>@c1X541{qb);e8$UWE@E z)mqQzq|PVw_dAx;7X5e7`6=#%j@Q)MS9=va`9bNC1)kkl+H5?W0&*XakAcNlHTOZ` zc)ZGfA6LnhK7-j<^@kL{Ls7UNO+A|=9w*t3g?9X?_G%h-Gx^nZuNBMmzIg$pv=eq)ro3M-X zeERVqmlQOv;Je3Z{cy2<5}GSM86U|roO0jXj9jGGT=v&mh$rdKXD6o2z?k5u84UiU ze6D7kGbR7<6SRsEob=Ud{OoE)Yc=#8Gf)2a;N)7xyrlL&E1Qv*w%|60Q+BR;zKH!P zop@S~iEe?CI$D2QA9wDi$et0%yyyKCw$>bvj|HVIE@r=%8%gFoTnUt{QXN7QcY;aQ z7G-p^V$^zfR-O^JuJVcGD;a-iK|b$0%k#khz$qiS<(NP+x;kjc)71_5nr!G6XRm{B zk>iX%rA&5C&Qs>(IuDW8TxBxDZvy*08Fjhy8yFpdJDCe}6MM*$nsRFZo zCMK3+FyYLupyr(!T?^inbNTirt+Pbt{&<)5iT`46d%E&#*~~9^(9oTk)Mv2n^OV`! z%$~o%AW!s;DtaBs=`)Koy_dD<$*$>Z_^Qf9mY8QnTl)@XF^};2nT=bA6l%DVCk{Kg zH%#3~CQpwtvQ}oM6{DZI_kVvjkEP|_E8qs+|Ev?qW|S>YAj`4hv^7lWeXRxk%y!x3wv+3|1 zeCB#yskeKt^P%(E*aLL54^RG$vekL@>HNoQyx4j$pQCY!F&oKoh8pvg&6C8h$f6CY z*7yG)Js88=EmqngoL8KXa?;@?`8KKU=jgLjQ|T*032 zW*ZOSRrW4;oSvVK?kDN(F>3e4;Zq#9WS1Y}%s%Sg%!aDUN9UlquTpLC>N$6pD_@UP z8>2J3G;RdxZ-tLIl{Veu=p9stme=5Wx;XMW9`{Aljkxoi+TXyw0Sq#h@hT}CfD3~P zn8~;%i*4tVUQ@zi{4~kh>R6%MNi{#^G=V!#Ze8vJ88et zrhb1pR&f-aO8KmLNVz{#E8mXDMAVvYs)LwD=3QS(C(TXsyhO zGCr2)k0pfs=Ss!+TX{9V`=DW%Po8MEEHEo)ihSf=jUtCB%VzhS6H$}*Fg;a3{fn9v z_tI|Y%x?J=Z6u#c&XY(ycvHz>{!kJBS{7FlDR&o#GH}=O*QKPa`uC}pZkxD^8D#!pLwh|GhmvbEl;JtcQydV=WAt!a(ScRw(Kse z>NB$@InuVut|hn35y^N+4Od#gFIjEoN@e%d+sL#wn|YW$L|fj3MWnbhJ-8Nc-c~C) zbns60RnBqC=-m4SoLogG=+Ja6{1^NKs^2kcy2Prqb)5HdsTPyeIj z&$XhWX2pNzp1tz3@)f$7%y1$Z*ZJvQ#yBf#MF%HnX|VtCwD`BAlrL-hsv<{T@V0GhzH}bvr@Vkq6pNr_s-dagkb~oSGowq#F?+~TVWC4$p&mQoY4)&Gs z%NW~xO3oACmhz6@Dm_cd$HAF_j+e!vx9RdC<4?EIoKY}&SN(mp+=M^=Cye(HMIRQo z520&g;W?aLzNuUflK8;y?OOYhy)DtoLFj3Y#s(md7GHXx>U+?i5a(a#S?{2sPtdF- z^y(#Wt_QWR8q?rAUChnM-w2VTqk8AE&>i{Y_S#>LN7Z3B5EU278ro^&IWU98NO%r` z#c()ZOV@AV+uy;Hukq_|a@49QxgH;;(Ep5Po=gAgx&9A$WmI4`@03xag*^JjqV;H3 z{*9h%S>}D>bw8Fe5ii?l_d<8aiA1gCV0Ys}2bMToMsjx{)eE&+m*r=yq$LQG94{#{ z+?6z8vKD8%awuyYj+X63^%?&A;PI7eZU=*&;C(61%t3K;<3(r7(W{DX&uH;^*Cx}W zUBP^~s8=5k8i*V9(Q_rJAGnwCk1@EP%zi4_W)I_`INzUa>KXYyUKG3!G|%tcX|KJ% zvdKqXN&eS^hV>?c9OqpSop9j>mfuGk{Y29@Nau1Cv=Ht8ji(KjX^DH2X=r_XxL&T1 zv5jAoRCmYs;m76V^c{~g0p!e`Y=YkIwEGO#tJ1Df@a{*tRdM8NaW!umTSULNljU?G zud4jXavo?jnueonM=i_);}qAX(AvXr<6Y&ZqWx)QGCDLLC-U624{7bLbOStm*l%aJ zc7gwGBzF(~kH*V?I~$DyGucq~M?P0;UC?zi45k+J_Q$EwG;AHdM6WIe`%xKHExbMp zzFlEEOAAl&ryFs(Dn2|2%N|NTsMI&G{@m{X_l|wwSYZD zVvFhCYC3r-I74AR6iuzb`cdsA?0GRBEhdfUiZQfRpk_QT{i|G@F!!IlP%T^NVR3x0XZm&liRky&&VqfuoqJNeIOQp_xb%!A9_>><2`PsRzF z%H;B7-%Rdz3di%KuEMrY)`AtTd zXKc+KuLtK-+{k#}ci`Pe@BRr=OM0ELv8+oD$1*!4qk(yM%;!bMka_b1-OEa}U+G0O z;79I6cd$88z4!fpqx2=TrN0v4b)FwTPjbl~lJ$gp*%4$LK051N50k^3@9tp7=ev7= z{AEja*Fx#fwKEw1R}?yy(Uoq+IMfqb%BW&(-vek-M!+VL-j*!pSn>-#m3e@1V&QRg zX&8Oa%(~-1y9f^=<%|SJmMuZah*0X}NnRY)ANAUEq~_ zp{^pA3GdzqHyK!k{$}j3w2`dqn&49tF6ElFiurPz^(qic-6~uD-+!OlgiS?>a;!3W zQ8b~Py;_OqTPtR?gJEoYmJDNX;S0v_h|~C$>|32RsUvZ{9=U(Si)6o!Yn^}U_z2#9 zn*P#Cx^fB6SBvaZehH60kcV7>(_Q^(ks*N!EKnq`rgX>{@uz1xG7ALFIg{OTQ zw7pUK82tW?10SOC`2y>O;%XNhNG8=#`!C{4j$Wooi$#rgYF5YBBT!TmcCY#zz|T~H zM|TU3oQ#_R8utD9yd-0QOoVbzX2lAG;t@R zZujHQ!^&(!LIY^lm3}|g=KjiOOlX{ZbORpbsY`tp)BxW$$W4=ZJgxn{1>YZVe>@#r zPj62{UG{xQuKbuTX7bwsB{W784a;GD@{CZZ#mw-Prs#>epuhvqbDMwBeE>Tg$%nBT)AaIM0wv zOL*>zu3g31MQ}M9o;SMt7;Hbq(?0mNoY$y}!`*S`_jvv~4qXdIS1|W2#)yXTNiCH; zn2dIZ%X=WzBGH;K7^v(t(%%ziRp9mv3RmLg(Q@Xwxba7D+rj%XTGz($JlZ%4??0hC zS$EI_H$J1s4Nx_T9VL5RtgYtg>WPn!DUI{V?Fz6_c+{{gG<+=@i3Y2P!ZvI_R*FtUjhGhTKX7o`qS`B$hxO?7W03- zQMl5b#xk(vX@5k~MPxpWezc`^(@?({oLAX#bs91Sl^Kt1P4a7<&7vK*IvNePk8y1> zO=~4vx?99KNu6$Zo#$=8mjBeHE7Mu+t9bG}+uTjr6Ts+7H`nlOO>uluLHX@8`5Ulm z!tY7fGx|M%?3Ur@|0yE+&8{EDcADbSI?!)m@zubq>dN==`DErwITxtnO8zr)v>&}~R+Krm zhz0vOP9)l~xK>fOeCn4ce;G}xcq2xhOzkaF41q&!m}K{g_feBQEGu%b@@?@M|EVpj zFXwL61iNCjS&7efJbfp9v_#0(;AP*Ix%4^fCo_jV&$6;-WyO5uUD#JB%m~tt?w51Z z8W#CMMkZb<>XfjPi)Efrp03sKJ;HBYm>;c7MkdCgH}Q8X-lBa`_e^ak8%$)srii>l zV4kx)H5{vcc1GA+?O^sdxx>ExYtp2Ds+m%0IZuv~mm~vxR{d?*Q|36gqotv4JNk1h z>K>M-Bqs9|<%j0Nt0C>nyurY&&bwCN9ynPAy1?%vtS@8DTeGe3 zWKTzxY5Sod{*vT=LCaDL(4I=P`zzP?&{9TX-q+UhO<6!?_e!nE z7*BOxHGQ^nm!HfiUg`KTw%Aq6*ZA!1IBVmQcP10coZ<87N1h1=i}KU1(1T`lD7!^} z<7#-9yeFK?IM@r0xAx7dj{Mj59jsKj>NIQ1D_}*ZGM?3}xKbH6GV(M5v|&o$=acoQ zw-mhIT<3o~Dl?&qyXA;$_NLFwxr(dhe{#HLo316Jc!4$y;4234P`yNu#jJWYoxZ{6 zTb_3Y-*g{aA3)B#idv(1y_aRPGxWcUCb99xszMg+t%bQ#d zlZ-xm;#xNllBG;jH=`=`)X3P%?@4KjjN@*x={SBW&!!WnALma7`W;0d|0o`0)aN_p zcj1}#B>yKxf{X**Aht|%G?j(4(oQ^Qdw%HJHj`8fIKg%rN-xU=PFnnBqyjv56@7dG5PdI`;KUxjW0uR=VlPv zp?NyI7T{TH+H(k8o3hleV3rKy6%l>D*m?^{KZ^Cec-}lAeUI)FQr)rl-Q9KT6+~14QFaf%Ey~zCo5X8NTD` zL>K%yosHef!`=(KnLTJ_n|AeubR(P6lIi&}3vz7j7w!+3YH zIF3zCBHs^rzT|^>a(x)dmd`jdmwqZbwy%Pe5x z&vJoWnY}44cY{|mWnb~#mS4^eh&ih1dft4Or<{SBT%p7$^UkyW>nR zB|{1aw-EtC<1jY46Z<%ek2xRA@$k(G_B@--xcN0K!{`Y)7KD7Rdb$3;77G3!#iw(ynfRMdE&g1j)hVXxa zbUGiLcdR#~g%jxeTwc3;X1%@t?sRLg_CGCX+!CBgv?drPuS=WRQz@-yPF!f--o1>Q zWhFp(_n1C>R-t~(CuW6UGW(@+(CiIcz9pj$C>aw@Znm9I_Kt3kN89l-;ne~yYz6C# zj@=8=VC`g#c`KAQ^SfSD%saHx4|r2t8C$8l{TTk!f)s~5fccJzMy1CHR3qfAy=uJABdHS<> zx|d1dDPHnc*1Z}gucJQTLryX_SBcI}(7P*oqF(XB(1!CV} zMb4YN=t7a<48ED$B*z1IcG+kUQer z&7johvp;49bHsxi*mG^Z_%qm?Ob7DZDc-07u8crI@{l{U*9dj<>3(vY-pXCZst1te z%QCu)lx-!Ff6r`FG*xweKL4Ts|syu(I*sXzQjYvo-w)Le|5 z#s_R9y^M#R?RTIn2jbX9kT1lQWT$eZvp`%ZcJ6`)_1&$;PLl&)rBoZ7 zm`Z1FMsHs@3k&(ze$2>{B(V#?5yJ{iNfH&g&DA_}v zrY*#+?P0SRHFJ2NBehizAC7dTn)}T`I#o-Haj=!#@?xBr$)5HVy~~veZ_>(OkkPGk z!MhhH`|^m%t=iM8GO{OEJ_OuL;kO(mb+z8q?{2vCnJlC`&smGkEr#t7TsX7%A4boT z!G0kpn1YXa(tMavxXc2prk%v_CuKZ&g4_^oO?@)^JgWx7&2Dt)C^=_49$^ICc>))9 zW~&+5+yzg%XtRYH|3P~P-0I2u<(c3!JoPr{%MK6Qz%f0S*j{B&&o+j%yZ@8X@d(V5 zWwoH2W92D5;XYcsjkK~tjcehu6fQ$>=nx!xn135Z8auiEf{eHxIC%>EQlaH*SY|EQ zUG09jS(jv%;OU3_)LL-b z8!2whOP$1Pw1Z7X*GI9QJmI~c-)Tr!7Wm}tQ=LH@tj1q`pI}*wP}dt5zJP7saXGR$ z??i7iA~{;=X@w1b<@!CW^cfPVPd@Rk^^An%iN;+dTn}v-`OHexU!yo9@^8TVbvizo zG(W?`i^-@lUK|Fe%%*$9ou;U+3+E?5Y@lqiu1RFD7yWvWHH~6LpVHYu&a>xgQzhq+ zMpvbl@&+>sj30*86z8kyWJAAM4f8JSA1w0NjXc(%g5&=x{LV@~XduaCEnMcpUCMLp zhSO_!_gd(^j3s=Ehj;s)tc`ow=u*_L0CgYn`4jlgq8-C6qjHMUm+D6(N1v|w zDRK`k;(vKBV@Tz`E@hO@ET)Vbmg`%8LGrKD&i2KXvc@)aYb%_&U0&237bX-q{d~kK z`^W95#9!FhBHl6Y#LC!Rb``st1bg#B4{0ZD<(-b%Hz-f#vYz%D_Vh~8T4pC*;wYn` z63K(0k$iXZ!@XB3yv#uc5a%C-xxtgClmL+67dS5cj+fK4? z_gC_bJ;C~w7QRz9S!?zr$X=CsdfeE5a@*QPmb?f3YsQZs1#)t?^c{YIgL7%yjYVta zNMIXKDvRi!lU(w;%sI=u3`(h$>l3m!#r10DEl+ud{)TUQQfK*3ZkN@dgK;dg8h2)` zSwFBVAGwy#d;z?SQI_wx%$`~;c%o+dKIzeAPuXT_C9}Q7?<;h5Yu-PrVso_|6D^N2 zcK`lQvfEi3<@LO&q0BTh9-8@y8EyReXGdrMy}}@O|G>tE`H!{?6OD7Nq&i0xeV-h^ zU);-4dN$?%=HFZ?^U89s?`F>0f?ke?mUpu1r#o5xNlQKTjPK$z&-8l}o$4p9_U2U| z;A=9g_-NeEdZ>Hx`X(`MoIK}8KJ5y=X`yW89Cm&v%fAwg%#$DEGp*>i52KMCMaYb4 z&S3c)MCi=tdYm`NPU-b|tOrRtGvB`;D|wQE?CLeryO(Sxs&}B4M&nLj`qGbt z_RzurcQR||K+xCG&VkxkK_AD0*c9H6s+ajn4Yjknz-SOkyVKm4c;bv49ixrlI$X@! zhmLlKb!GtPow7f$oy9!ZtFWl0%~dS$6j5NgXqhqMJZbIAnhB}LZ7S{ zs3JSPN?V!f*ADa%N@P7()|vLyc4AO6im%DCW0Adm4Dz`~ZT{|lp4=~H*NL^y;?gdp z+#lS1Xk|a1>00SeVXdrLcpDc%>Dzp6$_xl!}R)N82M^jwOD)KztJCiJ1!LSW3?vBgf z(2I;p?kt9kceIQ6H;SA)pyMXjPb;`}BOld5JX$L!coBCpqi~J0j9xCpueU(F&{;-M zj#REAACp;w;bNZhRuzA{)4nQ>s=9YQI*w2;>pv$I`DC8^U4%!C&~T8p2BA7@S&xP3 zKKx8p3}#e3Bkti>IQ$xqnB6LpBhArHO}^k(T6H~XWdwYlw((Gb|ndoMnMy??Gr6$EN~IXGtqdnyO)#RJlM|R z9XiwCmhk!v><(}lpK;e(O=ed2TG^VhU^{vIt@96K4!JWn__f#t-Xk+ zb4c$r7*BU6D_=f$oQyuJUB=+pVp=kSUS*`HAsx&*>{jH_g1jzNCr>(h!7kdm3y$~a z(OR*WJh!e#-zL-4*xgI`-5jJ&^yL3?@9Ff0OelC|Gz0w3m-!fpxX{EqsqA z)kgf?0BwIyCfN-k>-8T;Q&!|>KEdByS+2eJ$oMT*+*r+Z;FqgQGVgK#Ib|MmFWmg1 z(BRRmDc6WyWFM&HKDW@`+IYP=R%fg&nLy%kW~lvlE?QA8Gv50slaa^F``c0J z+}ql5SteP_vA&7*dE#GDGb?Q~ldzm4IAPQE8fe@Nyvz*9du`{6fOFl;QEF!A+@|>c zhvF>r%0}^M<$JkvzeQ0ZZ{~nSuvW=6-t&z`OVsZv0~Hv^dtG! zA6-i}mN|f{e49JY7+W&oU($zWMcr5MFl(D{p|>r?gz}m0T{xO3-9o+W;gvk}eBb2F zxw4CI^24kFegTeii?duyhE|K+odBPVsFa!G?nZ@wjk?SN%q+^x)krRTHLH6>9+9!= zU&1}}6f3dL$nt&PP&HS{k&asMQs?+jepvP_vV% z$zF4u`sF8YlQ$#QS@Og)0gOGUoqi+9Nfv*IC7SpLnQ9IW-KtS94Rhx261_>z90uH&;F&|+p_y{zOycY5&y z57N)mXz>|ZIh4jcDF!tHvxRtzaH2bIm}%5Z}91RcQbl2gP#3OsdieLEna0mw2aCw*5(;9 z(zWn=4D9+a&5X5~xX_*#9f@l{D0dLb$`R0$i@bTFwh~QOsrea9>cf9LJI>B1yRgNM zXr9CuWi@l271jb_ilaOA$ub8kv-mStGI*ZMlMI!MT(9np>P&X;Blsn@JPzZXVfnh& zC&O{E`dL>z1>ZAA;uLZ9069q~5pEEVf3Z@>(2kej)7mFH`kn-f7x6v2q@0HDnL{{) zoEPzCSvNBg)QrK_1Uah{`lEeUy!%EJ%Zk^GP4~r-^azfkgTM1RfktMf(IQvIgWR3( zd6BRT4X?xS*y!Uh|DuxzV>f6fqU`B94k{&;qHv5etH zl+;!-BW}x-915f3aQsn`=}ne3+|%Hr@H*Lh=FVR2T2`&RN`f0`SH^?8;oJx- z?qwBg#(K{IX*RjFrsEyqvw_Xk$Ag|Yx!Q3f)_JP$zIgTm4x}g39WBonmLAK^s+Q|f zmC=>MUC&;9|3dLPbY=W>s+KeFV7T*TWL($%FZjaU+5bj#byNF(aMx&i04q6_XZVOO z>A;$HQ8y!L6Z!d-xcoc_d8RxBw9naHRNk6?Nm2^~+vZl9>l*qu4Lv!Ur~6Zi%{^U8d*`&4gB=V|Kn_||Hnc33}08^f3mP-cW2|;bdviM zx%Z+67mF&{8*K<#?W%m=Liaz%&o|ljbX2wkJ^UCCkE*B`Bi7^@eRn*0895^~ zN%{lSF66UM2f4L=8ZIv^YSy z-FekJ*xVKHdsTaR7BL^i_tK=Tm21bM;|V*!eTIvscOKQG8iRH=48R z_feg>K}VD5Ug+&wWX|{FSN1I$z~A@e2lEu{Tb%h0hFuG;t=IY)S{tgTl({pDNILVj zGIo9{ew@Z{zeZBK(T>c8ScK9`=u6h)U5UThv8R`skJ7xR{6}_s=!T9L9cRa=uDF|- z4Eqt4s_sYG)BV-27{p5dz+~WsVena~*M zx!Y@NA6WjV{fBZ$W!+rMnxx?(*=A^YCR<5P zur=uoR=@pExy^nuj3x!dthCKj^1RLbeRa1asg`o8{Yg7{Pq0XC{|i2-lPh_n%(mJo z$6m`3&|CD6w=UXG2D&ZY%vCbE_l`JU;(Z(_ZD{Ubp8X@&4lYV&b#~T9Rg75PMp|=0 zI@a&jF!@}b87{Wb!YH0IBh-PN8d>j`H@s&>N|_^P#6^c~R>-eknQf+hX#BV%Abdvpc)jRKI&JT}+0a zz4nvA9!fWwvAnzuF7Gp{4EOXCvtFv~Ic6-p8Cq(w z*E>b@{gljen?ppJ>aL%PC(+N9JZO5^3*Bo&4lkiP&o8I4^=>p`IE_fem`#SC@Y0E@ z>v@JHuD{0OlaXJ@<2B{+?{~F6-+D4naVofzMZzZd{IL>cK7FzOb?$c+lgH8ACFJxs zv>yVeJ7itcU~&;}^#ZNS?5;`VZ~;lSm*KY7{!n_FnK6y{l}q6IRgq0>BSK{Fw57cC zxx7tg?PncmkHR}8Ue&_m*Ll6F@av{zFTQ9m*T>Mt{;2z&KKFKS=OR+=Bcj|5`*`o{ zLUtM5&)Cj5k>_pL@6S&jQe<(93f+!3{z|!It?w58G-GZj$n^We_G46y=N~g3eFEQ; zHAAl}d9Zw-K8i+wU4dI)zTgYcJMfSTQFxKMHEC4lW@b;B@NFb+WY+3RR8HjQSHSKK z7%V6K2XOj(aXr~;J(9Ry{SjnWg%!L3Zk|$S_U3D{qMK;OBr+eT%v5K)!Sy8)8H={e z@%==*(KZ^TS(o%X@UCEk&(OH> z=xHuDDaUmeiceRFJ7H7+LEGKwL*6rcqq}D-S(Q)O1@E#`U>j}M<1-`eH9X|w@NDNS&pu{?e1d44 zoO>E9YiTDlO|l=wfgXMrspr@?3istgFz?ztW)CQdQYPp4Bu~ z_eUkOAI|kG@CsMf%XNCu^kuFdLH_$X?hWQ2T;FPi2p)ecH#NnBsh*7&wxvI>&|@p)-t=Ay#hWT!1WQ_POf}ExkZE4IQt7|6SdVB z#`SosiEKIR=7ZtS!RN!Nt;1{e-Q!yJ0|j55pNko~?#psxj5`>1)Lvzc_{0~Gb- zrOtBn47``aEW3hs6mJi9o@buN(z*DN>qO9<&^rPRc^-2B3H+6g9xda%kCl85-+TF~ z&sooGa5JwWZ_=qnDqYEIKite5+5NQmz1A;Kw=L?X)0&oGAC6~T*-UmW8b`uC)l0v3 zJ~=%~0=;Nz*2WLOhku~_UGiAqJTvFNSND6@7UIlk@D3t}>=?C}zHNmYqu_tH&q^5O zsnJ_X_4HZ53v@-n9KGt*TFI^)$%wOWS9XQ$FTRhVqs{SrlJ<|(N6NlrIx7_>TSb3--qG%!nR_&u!`J zC1f~*gx`VF11ReR?vwO-Xz^PQw(p9PuNA%Kf05E+&~C=*tY&+H4KH@w6NI%S9`Am! z_CAI6736X^4Y`@FRbx-t2Xv+!Gh@vewa9vmJSAG;{7h_qU(3_V; z^zq7P%z6j18H2K0ewH0%WplpC6EeSX^Eg|XIpjRqbH+Be#oOBSE7t5%oz7VX~v1uS~8xqiafHcn+!HsW%PIRCs8-q_~WjXdjeFa_gNtqx--LcYgz0+ zX=*%h^54p27hE$-^jCQM5&NpddPj=aTcPA$Bc_#U#tX1K&h^3Whsxye+3)abwdWQX z9E<*neWH_X1?PrfWCS=I?nKI2vvwgVhNi5ozZf?6iTI)5SeWL`r5E8_My9tgDwH__ z+rgm<@6-Zqx%wOWQ4NlR9OvC57by~%xHhsVtvkYq%V_q-28y@xk{8A`o`7k99QR0GDj^vfjYFX z84R;uV{j~cD?72{&HHs{FZyU@Mui&|yxKxbncY&3JO8b~?)^gV$}1(L|NcF1Q(nD! z47xpwtz1wQZ7kn?_TP0X&dR+TE6#rYf1jG6FRSB{g->vty`&~Ox(|jEeC~q(7rwXh z2;Eugfi$cs`+Y!s-ko*#=5KyL(NcG^&s`#LKP9f>VUoRM)!msS-d^PE$g-~jc?|!N zr^b2mo%x7+^Q&3Masn;L$m501PvWERp)nbO{EXjN#3N>B!^e5DsjM#9cV_AKC%udv z{)aAf)XL=`JkKvAKkvuG=4nx@8UB3WU8To4fpK)62t5o(~+fljXj9Cpl z%boLG@2B15#{-HSrinPP3a|TspZSPOlsK1Pypb>5Lw0c^*yGWi8Euzovx{r@i3fRi zMH@bOsq1y&mH1y5pWfk(d&^;869blm(VO>tjBmV(rae{Qx)wh&KKmfc%8HoeAvcI| zx8cm~;z?!}bYyeeka0uxU-g@{+gWpU5}fP9zKc>>m7X?wpekAN-^u(|acY^XhlqKj zWHR>{+c}<;`+;#2{?`V-J9>A)-HgKaA&GlY_W;fRg!djR;=aR{Gki_;gtcm;@)X-cBp^KjU}PM!xZyX|F; zO1ZDVK=9A!i+ABo&TxGSOkbnf`;x|N963>qt3f!8Y_CI8FIw3PzC-Y9d(!Hy%a1Y}A#+ZLqItaf$FRhD+RrnDTj6yY zTsJ85JJ@}I-yf?V`L`yslhmK5cl0kYbu1|+_s%X7$I-qKG_@Z~Sw){N=l^PoO?#rU zxrlxl$)5$6BV{VtQ~X4d?!t-&z##LnKT-NIwfdvtWjgk(qutPSnwH4Li%am>2S1GLeehpIsjrh;}SS$2clNU#ppT75M>(_YFoo!{7{9kF}t$HXI_^#FN8Q@*U=lv3le~EZAS?NkGEr)OVaSPy= zHzQ@G!1vDUshv4Y@8VMS^Gq+Q3)wgK`zLami8r;-nB#e_KgrM3fo*my+CiNrG`=qx zUWnFx@nJssSJ(Du=-q~l4%QF(2m4xxV{hSkLq{3!J)zL@6~+_pGj_36SHc$Sr)*NZy<%JD%{D;KLf&;3l4^kF%Xq5H7RxHxsq}Ov z8i$L?+1Kn4lIsZn>&a#$PnS7knMvOi500iGzkz9H>YtD1qh${#!+pB*CAc+C+5O2Q z>*}6zw3=LIkV~F!RKdHO*z|dHb~;_oNN?i&4BqU1`dCW~?cp8|o+o1a<635eb*8yl z8TkRMI^p1#wD?OkcPF3Tudqg6@9`-g>g!)^Q@(&ZKo88&58LuWR#JN_K5I%ayMPDwJ#%w2BAQYNUkEiFi!`AsI1b!|iZ zXU4!_zm@3cAB+6AsJQcVK_j>aV$vL54E~}=NWwWR-~3u>C-_EjB|XC*p#DY zC)#VAm!qjiaMFqJ#|xteFCqOc@J~pRxyX=ou8!IUMw2S^HgR9@9W`Z zwZvR-GRJo&D4C_Z6?nDS#ldh2*E15G*~cryoxDMJM-oalz8|VGzd5<`cJyPKmTy!m zI~&Z`MsnB}3LGl&BWYs`626w#NETV{44XH(XGTm7Jjh$7YxqrOS;=or*R!K}`Vr+* zf?O%#|P}Wv*Pq_4EhQBg$?Hc~4LzUBcQDUu&?Cl=`2uadM^nWbI(ZaYfdf z$W)zXXZ$+8Z9<`m+2!zAULtK?q_sS${z!Z2-R9jb*-LnyYnfg6gEHA2F?;l81=}p1 zAp0WRz!TNv0kZp32j7=Pk`DaDDfpQcJnf8zEaBPi1tRF=6Gv|yO`Jyc=W`}_a1Tr1Dq^VGhpNc<#=uF1QfOA8(V z??ZZgy1I3>GYl1j@GE<7WxY#PHrcb)+ah}A8`h@jXXD|s==uZCoVWdCr9?T)_>=<1 zY~CzS(Jmn0Z_v=w(QibutndFF-!}~|ACvX#qS4Mg$T)H279Czs(L1wUxOMUVF%Sx}Yk1Reou4K4_b`h4NyM7 zZ`P(|HGW3@@{ZrgeRtSjKn7V=wXYbG+10y?e~W2`>M}pCxAGcq9xm z=C+I{ULm8IM)v)SI>|MA7Oi9qJegG20zzVU=D1|8&w1ddhmhI6$&bI~_1deQe!)%P zufe-Mg)R>PIcuWcq3a*IK1BN`;AZBm{)higWY60JZsgr(l2=c>Y>TE-Sy|pT)Ci4L z@%3C7WjD-zEGIL4FQ;WYpku7uBu@oS^6f&0PUmg*fJ6GVr-634yDPPpJ^cTUkC~ev zi)%z{cEaU7mC63@nNfACJUF}7jinzMdH;+IH_D~9$Aw_qMjV<=n-6hzff#YW>leYd zulzN=$J0=HwD!l7*1>G*YSxjR4Xy<7QFxqbbR=V?ebGP1*;#n@BwHE@@`vKxGUF}- zUEd&j?}XCBT|El+@4|TloHCv_2ECc(yO__(uDDrSKN62?f|;2%8FkId%i(%;*Q!w; zjwj&Yjxam{tcGaLbCgxe_QvyMXHW4|e=R8Q4*$#5$cTHMaGVX3MoO$Ge9t4~l=T!p zf}Gt&p9U@OKl(j9+OXx5oZU)$AH?Nl?7BJK&x+X0?U)F^hrr5?7>iIb2b?wX@#9(4 zWHmlB7LT!OZK?DcLO zn4qP9xziS}*2xNzw`XMiAg~6ri9^-T2-Z;Cy~Fi)3eLU8->$-${lum>U3rGSorWuO z$vbb0KM9TLzem0w)27ZvY^qLIGv1SV+#^ty@#xGx`3L!DTqC0g^Xb$&*RLk63;2sZ zBpSW_T%CJJGOM?bfz|C=Y@qe=ym2^|HKgm*$}X$>)0$84zZa=@q)qejZZFcv9u?K# zvH;h*;pwePck(@n#iSQMloxrOB=_gLGg6c}(^<)Lv){}Hx}4XFB|VS7oz9D{=FUUHKwr{TtFyyz2Z)`LkK9GXc!yT}__@Zf`KZnEZ&e9KSX>{HGf zt+BNQVa&5SsbT8=~AUvc*UuRp;!|Z-G$hEvjI=z|9k<9FtGwA9TY$thc-hF)>4a<0LM#PhSXSPc) zen(Wz?B!1CY|oM^(dAE2m%CYGd4)RX7iY<=g>T;IU1e&C(5*L+hJ{S@6;gUidbl&8bHit5>;bs$N7!n0J<-eR_XylW@W zpSO6bla-yyH)J=(f&4*xc-_Fx$M7ZDLH~MwAR|o97ap$?)BA;XUk%{yv z<7emcT{V@T55nG}@CR`GLjJyzKbq=zK|$4!qF zrX9q?PicH&?(-sBQ~Ey{w=#CV2qw>?KdY)6YxxM;JYDv8s=9gqP-hY>N4`!3F?$c( z>3Zf2X9W2R(px2pB~G*xpZ<*>@4%o1d@>rj1J2}W_O)7_LmM6wk($7#3hDk9?GNH+ zPyFhRM_+5{&+?(namij<&*A4mIGnZRna6l3nnt20Gfjtqdk+|0#MkUoo_Xerz*!}( zl!yqv+J7oSt?d-{!l|w68TDb#7Mg_BHe|C4zo>BMEsq9pHH>sSD1Id*$H#~C; z(x>bv)>K!&8ak^eSqH6|wefd%wnlk#*c~F4HCKC~$g!3W-ws88;tR$rd6}qK)u$0! zv*+#UD60$myiM>FH6PPn*7b~aeJQ;9lWOJ-FNFU$D9XDSy3(t~bfg=tx{=mqPpGWi z%#MmPK)xQm*@rmuI)lmDBKLU+Uotc0C49>JDlgFgxRBN!fnyKS;bi59v%c&@a=EB{ z6W?<^$i3LZEakJCO2$lYQ)3=(We3VZ+HZiSwf^^{BR$cwJzl19l**{rKT{CmNj9ytx4cZkg|iye`tED>cjLi#g!dxU_bI%}7lkg|3fo_j#$xbl;lYI_D{#)6sIFK38LedOG_yo9-rW|(Jz;Y;jTz^-E;T$zbEcLh?~Q4 zezEqB0{b1?NC&Bbg_T>9_7hNyp3?*D zjQ+-SKeO*|uGOBMy@3Ji?x;alC1-Cs1ldciNAo8 z-LMbEkAqN_S?`r_C|*DBr8(J^!%*@&$6d9SY`zPtoI}&HBXTwV?mGAk=X<)K@By^c z)I-V&$aef&H&^1#UgMWe<|#VDBI};A8)xRoJWFeZjcstEqSfT{uekSn@~u|XD(@sK+XBU#%eDOJI4y4t$|Sk))--)BT(aIV z&l9q%NXFQIg=Z6-pHtK-sZ9SM&lFlJlhNG4&hsYoSH-g5(Av<@71iaPz)H?qiFW)& z*&SHGK-{jO)|N^?LaVcvT;667EHi^9ZyX&{{3q+no@x)HttH5r&60JH6Z~dHWu6BF z&O>Z3^HcI<;We#Q26qdT?9ab^iXY8zC%a-)Bg1am-qN_!uXxhDb1m8HVOq(EetIG` zlncgdaOISOy7alT`&}os%dzz>>3K%`gX`|DB~PBK);qq9eFwODC|I}9%VxAHbJ_B? ztn=t?CHJ;q_1CiP7qnYjR`7T2Bxi4-biC#QZP!*mZznwqq+!}h&5D^Lbz~;jptlo$ zdW);+WgIV#X2wrA)XKe#yGP%KfRz78H+(BUyZm&N^M^K_4c_#ho>87B*YPRG?laFN zB}@6_sNxgaH-F0K6P0=V?DCN{K3m~^dMR7r_36CR%`mIXMsIiJXuh=upPgN`4@UiK ztZgMvk)72N6`s`U?^*im^fUWC9LCQiqx^(zH4;;vQ#QLzj+NiMOwJd$e~P%0k>I>< zx*p#(l4r@=lCw7MDp7l}n0h#w90S9;Vt96H%WC)OG^4%<@?DWvW;c$^!M#8mhu}qT zUNWoN>+znu@ky)1jl8|@0Wy4!Us=l|X1puwt7nJ>*-y0>TCbA_W=D-H)&GQDtRwe7 zYHeRyU5}1s_p*#+WEZ9R{OL5=!_l&x;CW2ZLf$I%yt=#ba;spn1}EO&7iYpXtJ*Wa za0s}GBFPW4wrshGUL6j>wxbq8Q{E_%=c{+q#_UP@4;W?#q(fO-_BlvyeJ&r|mBoBY z7Q5rhOn3JpldPINO__1pUX7Z}^}d`3uS32|=wc_w*;V9dG389rB7Kdv_WCywVWqKo;rqr{@@lK+A7E!`jC=xkTBYJMz_d;}>ziq9)a zGAkTMkn}a6-427HT55;d@%+mRY-TSJ>^iXXR?ImtJIS4PsLWVj)?3zAV;r7l7H!5| zvm;br?XAYGjrjBp71Av??-k8X0pkBb$JRfhPga49DrU@xzh3O=g znY9AheldNWG_8w`onzTDY#UfEo2;T zG;Xz~g}HDR_*O@bdrQ=HEuvtOcNnzGeNU|*E90Q)&5v;W+eyg=Tb){UNTiQ^aJd3~R%G;_G} z>4hB+n>nngHZE4dh0G?Ys^0ltU7!Vm!Bu6>^(D4gvjjfjKNQGcQP&h6>jC7c=gbk z?6(8Yof%xCaWeZ&+uxn;Ez#oJsDBjpnO&3pW4rSz{bVkAb8ijS)uixM7lTq# zJ`_fc-TkY!U%`QeZ0AH6)r0TLt{v=3^7Izs>sa`YCHp+hd{&7+<5No-a6U^)&o-m` z55sqKaW}iG41nt~^n0lO!XNpm?9{R&{4RiTEmS01?1>*))xC@*FTsa%eeaUjXHVs~ zSW8ur^GNNVh9hN`o42ks(r#7D5AYhx$B$`uR#{ zJ>%Qx%Id}kN$oIJn!V@B9iA^nL*6rYo8P=mFTVd@wO)NOEx2*e@Y$9)lSxm#Trm_=WX19)ZvWBc9&%0FG;AeJ7?1A^?)6mg$;Y>JP z3ubpdEAO$+?6B<4{uPNVgMT9t=p1>hVCViU~FiQ{NR30yTwB%`QX4s_{e@PKH-`2`+)Y&K#_=exC z$oKE$H_}^p7glw_%e&>;;(XqZo!w?T$}JuvtFdI4{SeBh0PSGX5>0tSOCrfT>ZPBQ z8HripxnGeNmA}c5nic1pbEWdADPC8M`DYKv%sRM_-3>0vXZ2$mdVZtV;c}K@tI67Fla@KeDipcDi%xudTb!t{;?>nPCZ@!&U%iP}Vq^0fX z$iZ|YcQfBRv)ZeOhm~ks>gD;yH|}Jv$*=gL4cf~d+Iiw~sVlQdu`X{lgm#@!w4C`y z={4o8XFJlWt&67?=~E=z-4aE~;lIQ2X3oynMxbSHlOKzt!(H7Gm2>qSGpc_EY%=rx zA`tTopdydXyrjGX_blbIt~33P%F1T`(1gO9rEF!fVMlg&9Sx5Jd!ePawz6N&7VIJK zZ_Ai%o}K*Df9j-{HcY!I5zWhgq?OVB}u-~nG8uQ@c<{ygdxtFs4DgHxmdA&TU z&Ii>f?xbfIY@(Grz&m=9n$3#e9F?E+yUL?2;nxyJf6HGTii++ya*6sMv8m7XfkJ6| zcRiHKZj(z;HQtrM{A(RnQrq!%>~v?gHVCZj_j{Ab)0Ag96U5mxBiUhQEU#sydEy^0 zI7cK|>E7NxgTOr!=hlL}1kW>fFWKtRY&yH;kHh1~aefYaf0YmHz|OKNr6X8JiZfZ6 znB92$yY?+CvkS%jqS|6U@E%^ETzNGMPtyw-z>n|FXKwFqeSYaVSm(Lu`C3`R51mPe zo}(2#o!zFT&)}22>yBi{yYsUDB*B?HT;5cfH-}Hdopmf^F;6s2{p@=3sCb$AtA~L6 zxVDm;ccF)QB3iyh{U$LYyWC_C{e|RuJ0A2AeJ)VrTNL);^OMc2as4pfYdAl)fY)8e zPiEFmb^_j;Zyd{7n&3>{_|^fes#<@_{f4mZjTepGn}jo~;W3{~vor1zHuePx&64$e zsEu*tvb~baopr_4?0nT&)XWZrUy|X&s7*$`o!^5+tCyXBPllaDxAW=9aM9y+IyhF` zdITO^kqzFkQVc9nMw30(e-2c+z+37 zqd2@fL0{u?G2=j4>!I*Z?_{h|pFDZVTF;@p(V1exByuRDULE&m;>D$8lF`5~)C*-R z!JJ7Vzok1(oF9QJ3-JG0zH^z@7rWnw9pp_e9Z28<5%GCfv%B^XGP#vJR?+HHi~dPN z5Kg5fkwI1qR7G=UMQ_mRJ!q^!Z}x$8R_9e#ZwQWMZ@B|OnMT%4NPH^jnQi;E+Be{7 zRzK#c_7vDGgTqxY&ph=`>?-e4ZsqhbiapiG|1bUK*}!Y`YgfMTRCjC28ZM`C2h-Urd4QWmzhhvV z-gZWJFV=3d)XZ7$Ef;$qCYgP-LgqFDg&pN(TdI}!&O8jWWQ&i({%Le%7sbrAzg)yv z2YPmH>?6xbk3a9ld4~pG1lE^iGeY^Ssc5YI;iOdshS#cpr*gAewHDFSCw-Dko3iwb zKIYlQ%|-t&&o^euiXVe>_La}-ms#}oFFZsGx;F;4SsD5RO?(6N7nEuZm*3HvHDuI} zyz}0{y7IAu@nr)KpMBL&6!SA1Xm|9lRW9#ku7-n$D4Ps7bJCK7wI+jeP@WYO9sH*s zR278mVv)UOmW%e;&*XPvT(Hb&MI9Q@m{l%e3z;`QTe%j>WZ#B&mAQ(XLUH#0j*rL~ z%(b9pzE)2qwxx&JL2OUaC!?=tz$R^bDmxnP-&Q*{6W=l-ifW`DXlkm(R=& zx9tkO&K~x8hLRDBb+q|5o^TP3&PYu5)NDgbvIEv1!OHX7tQ>rm^!vars|YTE&wpsb zaiVp01;|S6LzG(we)ig*N}3rr&uW9LGKn4Z!ojb3yoGp?k*V3Fay(z#Td7ysUUr|# zo1n*omG@*l#usE>;tZ{=Cy_o%eB@4DJgkDd<7wA(d_{epdjxB!3Tk%t?(WkB)bs)B zmzBzwV>6aBdx`Z z?Ax}RYrRp?nP<%N%>^`}4OwJ$RaJWPEcmUoleu!4y%-zMbMoNz6+1itRz1;ClfTR^ zpx=X+5raGzzFG_M@1OY3Op)~5`^oI9qxDq!m=)`{X#FCk&XlQd1IMgPiBwy%;Jawf zWZA=15HmjYG};@}h`fROc)XuOYL#(o7TeAWt%dO3iI(jKv$tWrEe(v{?kc;xgT|a) zP&0%Sv&(lY^j;74$1G=GHN&CtO8lYF^s`vrWbHo%vrhE43+p?D9ABsJ1JOUN(4~w9 zta99|fHP5<&a`(d-fs)a&91KJsb0s?%w+qw2%E9VjN#;Y(o5ugoBHvVci?6Aa$F7C zD$+}T{1V*05bhVN`z8AJ5|Vg$Q}^CmQUsjKtp z-CX|@Xv759(-S(@|GWZvFf7-nmU~eKTKYH_ZR)w~T0AL8yJxwRcO_PFw573?@MBLl zGQnByl{1{Op0pF)ycXQney?_aXSMbt#k4(_r@5Mky2eqSlvD>ZGxD?M=rZMRV*@pc zEIQeEpq@kf4`G929UnlJ$MIOXo?U?d3`*nTY)4qXO6MxckDmm&+;b-7#*1ai$xnpy zHL$tVS>}9?rE_nvk5{xak>*|D+FaQ}dI?vsty<1fKcCDXYNhryFdpxCTN>Tj)$GsO z4wu_2lb+@Nw4stRTZrWsDECNV^OI?LdJ~!LRqn%*{$}3N{fyGrC{x?-eflBkt5vj_ z|4!NGlYaFZ z{K-1By-)7Rly>GTmZI^tqDK%;T!}APdHaTX=b?IMTDLE}C$p%!Y=1AlU=n|kox^u; zwOT~)z>DR{e)(?ajHkCIrG~6!BTYD0?9a@MJbgbysmzznPKq70k+IGlY48uOC1=aN z2ic2!UpD_5`>#!kli-=YNS-!y1@lRg%KLLO{*f3uop%_;`@PI3EaNf9^Kp3-adjDd zo}VP{k6}rf zd>oXF?Bz-Mw8AG{QTWG(y#CEHxXhut0RGvjc^2xMiay==wDc3Q^L5@0aRZIG3;qk} zeMX_L(8AHUlWZb80p#rrEyT)`=<-!;FmICUBzL)x2h4uG=W2JjJ9!&Wxkudzv@i3N zGFQ8bqjBu^RG9bX$?sL>I{kw)c=5c~csP0wBkw(AXy=MsW0cRkZ$`qdDt@jOVdmpQ z2cIL=%Lxj2_DavF*D3UIF*!Fc;(1GLovlRZ znuMN83 zcapaguaAde512jAR;J=(cBdI6x5%8Q-Qbaae`*iaPM#xV*S(XpGejO;3um%Uc^h1v zUg*J5_`EM_N0U$o+0{Atn0R!F;tuGWi8C4TxE>AToxRRyT}#udpd_QyS=W(O&xp;$-$! zI*V1Ds8n!j-jqB{}S?-+c^e zSJB2Gb*4J{6Miq@LDOfgi)=_n(aHXD#<=ma`|@8VBlYB=abm^CWPP z`oE)tU6jh2lWt-~R#x=Ya%yF#pfNZf7}>kNIhr5UVxBi&sn$3eaViSyI3Ei0!&tzn z@VZf*^jIH-Pk-__mrumLFPL{Ep1hZZ|d^=o=nH_e3STuwVEQ8et?&fLQf8dh! z*Qb!hG?H&761G?SbM0p5=N)N;!V^w^j%LLPtwSE z#MkxVvg?2L6wb`e%-f&HLwyVOdwB4dLhpyb>SX@!ah%Iu#P8CCTH4;h-QCbL2$yGz z_J0TYI-2pIp7l_8Rl%F(N`B6db>=M|<8AV^Df-a|mU$|CB&c`rSlQiV6zZ(66VV@n z)yMQRR-9c--yx;EvEns#d!v5@Z#ENE58=v8_wr`lswnvX2zwJaAFKB9e+Z9=j5Sh8 z_6ALpH8l0uvZk3q){qg|mqL~yl`WBEo6yKML_#7-vSr_8Eo3dS7KPXU{l0JW^!&fS z@BjaMeO{lL`*YvtKIhubb*^*HbzNr_{jf9=IFo39IBxvZ4 zNo;V>XoT>YHP@t0W2l{R?BxFId?SOu&Zyk$_?eHsJ`qTxGmcp5bA~pTZ}`z#_A1S; zEyT>ox%LqHh9@n~V{CLbV-A}xNT2X#g!)&w8i~}2$;;f!g0!yTml=mAp*4(0CR#$K8-cklTTt#eS9(RwKq}1Den0gX*D;Y&#vw@GCz&Y~rC0tE zylqtyDR`wvBb0|m zucEm!VHvmsv5mjxd&mb}rz1ZTz#$I$W`@@nxSx}mR(0^Mk5(H7t>v_5rRi%k2CerZ z7`Wz?h>q0CH!EIUN}4Zs8ML28|24AIHO+0%rWx8Y87l9f6?0Y2=dSzrlhE!xdE-~~ zUOTkOJ~%fUiSExkx6mNH(Cm$q)jTuXKprLwG((}E)p$+$) zZ=#cWp-+mUXJXKbdC+|Ifc!GgE;BYf!*3%g%&_8{uOpR>f!qCgGu0VOsh7cf$l5`) zQa>;#iFP(NL4VXpXcI}CM~Fm#Mse6+W56`e~@I&1Xb#Ngy+4``^rca`Da`#*ai` z=zuKi)jfcg!gargpp!8TJ-~hgxGbZUYhdcxjQtoZY5>)Hz+V#mW=`)nz;+ul62^ol z(&`r8HafO6?G52cb~vUl_jrx5AoMrq@k3xx71+$E zV+^4AG(UrHj`{6@#Qms1WmvY8QqddOIPTLdi>-CZnL$vfNrnT zQY5{vKb&&D)Svf%0*@_xT&s7kww3YNbLP)*eH%?$8oE`4OMfxr%!B>%0=?R?x(rX+ z(fjg3E4}lz?@EGO2c*<}v$q*nzXoUTFQu$Mc9f&D)2=Yoe5kwATQ+}E=IJqFBk;L<#FoTpTs0UJF<5}?oLK++W$ z4p4R^Bafbzm4dOw+$OV-tEZ9d>_Al?c^LtBlNq_(J&WOM` zjOtw(Pusr%%`qt7lUj}Mbta=`uMgF>z~Lo8r`OU;!2ca`qc7e4U}vnwa%9H+Q*E*Q z(8cwSoM1nRJDupo#^em3-lb@;jc8cQHb$q!AXks04@UFGcI2%Any3{2-Gz0HqbKDa zp!O%gxHJ${16H-z4Mw0k;8vLS5`ps+(5~g}+}!B`-Rx^Opky@A^y2APU|9hgG{tt9 z4EOcQ8U`0GGIDC?K252q(4{$CP^a}`jF`alIBGb`2$c((ev0&uhigX4*(;1?%SCMs z7-gNGn=#bYtj$o?KI$0)uH!ui#!Gi7?}&pow~uubu{f76Z0MKbp_heP;3`)80}P>cgw@~m!R_z9BLdxpJoKEKah@6;H=;9 zBI;QOL>DL-4}K+(1M}b2r_U6HC(V!se>GvGiGo-C>7AahaDm=+0IGb8)i4fSun1}S zi)Y!9cXhhakSn3kilC1?#Ir4Me~*BokF#xt|}>yJDf zY!(6UUT7Kma?OMSQE+M(Pqfj@8E&+$5w!o)X4_4#ok5?S5$sGDOAxM7XP~|6U>UB- z|My!NfO!*E+aTz#Jva<4a3!!3X90f_oE(R2>+_=Z``caHHUb(IM;f(E`yo^2HF_0F zKg(E?8Q$jLiGD>d2epRQz0nT(HBIGyZuF2@-qWmE?N8&L&5`pDbm_1Vlc1PbG{yGM z%X@G0KM_qA1(ZqLpTQgA+ZkzdFJTRJbfm7jP%;bJ@I$Cxhtb=tm(~-0(qeu+^C>*> zRNRecceXN=ceRoAh0+4|y$tkI_vA`3s^@@LMr}0Yw^KU3@cS1ddLep7dT22XP12V)R#GN{U+Lh^J=9i-Us}tZ(HgaoA5SIloDOqI z83k`7OhHP|q9;4Ma0jRX_*JCmyb8Uu(~FGqNe>*(L<&&eSdnpPnU|?I9X(Hc#5W80 zf6iO!X|XxlD}qnxiRN6z(*^XxioNSqm6JO|gZ7td=+o}@#8TE+sW{${E@lobgI?K-UO0&!n2Dw= zNk2C`rK{e?E;tG{r4O8kFYYgU3eYrYc$~K;^7edsxVe|L81=^a22G!Xex8U1Nv73} z@TV@E^GvWKj5%xR6+I~L-Eel}`8hwJi%ugo7pQGE-&~9l+tFd&&>csyG7BSzhtZ$S zki^Z5L&y1RF6Cd*TD^HI%q-apc-=Q|fF5qe8|KD-IM5>anK_Y zy%e5hn1^~d0EO{{W`prGq|89O87k`!>@4ab+B$@GK1hvbcs3`+tw6(i{>gr7GHTNu z_9(H0$dGwvyP|LlKM4o&_ zy$zwn25Oju=BWW^`h(FMaD53h+=B*ajg%YZmj|Bf%kFs6fxg{|+P;U91<*atFU+r? zr-{1}`o_&gFPjf)E+bD*c&?xEWnk+WP`)Jn8O{+cV#hs9bJ>atoOw8wFwvI>W z^UIMOXLW<1SXOk?Tqxa-aHqm` zJ3Z3ghd$$>i=Hm+XuT$Kro@g!B8-5Zg_J7?)9HWm`~)plXN=tdPmMx;Ag~3L_zl!O zA0DMb-(+Yoj=EhF?@L{tnt2o|I-A)A?J5IV-oOT338dwbgx<9OF}=#OcupWy6``Bi zWLLuhef`W$R*9LvvxF1yHUX&2DB1~Xw59KrggWQw=SS$@^(a*W8Cigg8xJ3eq*lhd zwcpx5q652Y7t~ll**6$Bh67n~;P{v~&7-5|`#SnfKd_%l{qEf~fTK-;Wiof{AaK8_xj`LLD`phx=h+pM{s zHPncbp5^o*8m12Sro;2{z;=S(a2X!frG`U{*=ynYTfnjnevX9)JK>`K`NM#vB5-9u z)0zXlG?*HB{4BIHx7lQVbwG<2Ln_SpGYGlY{+tVEQqhUuGFr62YHLR=ld(T@Lh0tT z<%wp!;9^^@dc(gx$f%iOhtsRFKzsL|mLu)C;CyPJOPxvdVs3H>Eu)9YV#buCaMDxQ zI#NE&!Q+mVu_4Yi3eehBdf=E~RGLOjohh3aikSy-5B;$fk~JFnF-~n0{oF_```%l? zQI+pDbWss#dmM~%(Xu_w^JYiYEy9Q{-ieIzAE0x7VmyD7G4O3P@mi$9y+tY53wihy zs;moS&`g`giLU{>8$f6lF?Y%uL$8s@oM%FOOnbe!pNDd;fqg`q&7hWZ(5b*4MGc=J z4|V8s+o0_d@cAF3<&C_MM@4*mZt{yqb>%;K2|Es>0b8~}@L zK;zD`J7?x*T}^G5z&bxt-3*!}Lk0c6PSZooCsqk8Mxae*^Q14-FawuS_QtQdhVC58 zy%ar6$3x|K>i?MXap%{5(<`t<9aR3f#S!J%iD|w z`Vu$jKLH(W_{XXT45~_wY7*l0lgx%DC zo8EAO{(k~mx<~3+%<2X6V4tFooQGDm7@-@Y%i{Pyjh>JY^rCjW`w4Zlfl7mU?rzF$ zc-st$FA1dDIg$CX{)B5<1Li8}&AaXt>x=As=0P;D(HL5a;nn}N>6|iY2A{%#Eva|Y&bv?a^9i#A#s25cvv)01v)yE+jv2_0^|;62sR~|O z<20`I14@I@Ghk-~En6b((>b`x#<<*;y5#Z$$WH+ztUXd2+Q2!`y8V!}XL(wfQd*y$ zePKjJI%GZeKeeUR>i;T~>I>!EPsqXNFUqP80gK^TeeC*{NXbQ7 z7>BIQfXYuoaU&4)URuI+dP-Zn`3q)Hx;VC&S)9hwSMEiIkI;%U;@0%U-P9cgyaSQx z2(C)-c2=kq2_83qjWPI&n7jA3lV6_ztC8t1BBhxak<9K?9*U;$-bg;y zkpk!2KyfX5y}Dv4;~dzM&O=J_%RMor=}TbNe^3c4K}lyBg@U?DP@B~H|NA(9at}<| zaL)ZVHOTicGRS8&z;+~3-w?{==l((N-2*n=fhI3~!r6wn7DStThx|y(?yS-6r2pLJ z{xx_if9x;LtL~wVC#dHoIG)6R+sy(M!{`Jvi7bI<&%>iV*iP=1Xouv7TJAs(!0s{c ze?N4{d*~kRi^}M*dFb}7=+6V_kQL~Ua4*F@gj?|6(J~s^uScVeVa6L3N!+S8c62HyjcvH(*wyYF{{yK>Uz(NHqYiHMj%&_ z2l3l?zM}T>d`F|3d-2q072}GHcHWJ?QlFpYy9>C!WsYJzt^2lT0=pp*K2JlQ^|Nvn zb{5!{ke&sqWC)W>1{^zDo|t(c9nb4`a9`+ln?6e0q;f7 zr7Cpy6wX!X?@8Qw292nM--_|6KjT+rbl^tDmOaqm82Y*ot(At4=FdM09ZJ*oNTi_@ z_*G$q*bD_W1Tys*w2MUFSx+xUSCim!!&v))n zmC_p-3)<3iQo$q^NaARzE3KDiv@&wv75I0M5aR<%GirLatloI$T{P>=S5WL6P`$&O zO{upke<#EDdO+x|n0}1ak=ci_A(ZPW$k1-?_k`AR%@dc$L1p9owRrS;HZM{P^f2$) zIo|gST=&7uD^-koMX*P)C4&04hXdTB{{LRmcZoyMB+#E6{8Tew5#DY9m! z9R=q4IUPe*ZfLQ@AUCYvBX#TG`SxJlVJ~`MEzTr;mjIe6TP zQC^QBeR*8fn93;NdPY2U=Lnv(L>_u#qv`wg3RvW)ZckzR5uEk)8xItxX{juHn#+94 zxt>|NT<@PiY0s)?&$Fk1Z7q~7M4O&5v5@b0w8i^KoO7!wV37?yzZcD@*Ys7W<@vp% z===Gx0Pm&d^iX;iG>`%U`;7cXqpd*yOn{Dupx79$uF-?c{%VHZ zG4vHzMLfGI+|e^8NcN5>X3m=FtPR zGD?8MkG!=W>X}cqA(}1$_~U_cA2hj%W@v|gj;CM!MLV9bXh3{3q&Kla%U(8TgmKK`u#zkzc zdQdlxH+S>?R^X`x70tJ@3MpHJW^j(Kwcu{)B4qg_v_A;Fm1XBjSD<8TYWECzPf+g8 zyx}L{bzPtkIK2^&eZweRly$pm)ad+wYTn zLw}wmo`tibQpjxyzUKDo$#d7!(>~hYNq2v_9}-2~dbnkT4(92JfM(60*)VF4<(EDq z&thMu1n;EHon8)prPZ1CU#r@)wB4ujWQ)njs1YM#Hrj!~y|iM^;v`Bsqj-=}$k_Y>{Cz68mjzyW8p({{t3RD*c>2*-$~os) zv|t8wj=8>^#pDE!aK`H%o7u13Gn)+F&QYajxM!7xyXo)pS0BVF^dcqCGn~V@Oj{!GEuvXNLc!!Fj%(qM;0V*I51FY zG&g7jT=Y~w<9CK)X=)c_q(>Pa=U&_uFmso3CvvL~Q-pokN%j-DD)bAU?07*0MFg?Fp46PeyK)uuUpS{b5=G8bhoD;xmrFD3Alh( zGb_kZAYKA4#=Ja*-Z)QdX5BL~dNp*;1T5Cmk@`ao*$G`c84Z++C(FU}Lt4ld=!yYo zhTK5lNx^HFgJ?$=#;*2!hHPj!BOlDdTa(ctCs1m~=AoJIpn6cW{d9%y`ly6{U% zI7?{)S9W2uYUR9yc6|;D<0KH8BW4S|eG4!Y0j_=U(Dz(X8AA)v=toz9;*icQ=)}Kx z{~7L?bFmV-_$tyc8;&hT>di@e0o(fsoRVJJIV-v6{{JZYh^y&2f!>TaH)*po)Ne@p z&YNyw9bODp&pds#2X9Y@%13~=FPhFsmng0$(DEhv$Ypw?(dA9hAulpMyh%OTfbuk$ z>uY4Dbu&}F4HY7hoDpaWJ+q!iDlIc5;DH%sicy23>1O!iNx8$IS1Q_}C$N~SCkHSr zW|TMtbzb6sCuDLtw07s`nXr*hNpRL1 z+`LXxvHm`zf5p(Zeu8cf!u>_mpf6W3>_>A%YDa&89O}3JF;vxmtt2>DsunaI0Mw(X zu_rj22RAYp*HfT@ei_EuIEVfQ$Vb6hGkzIwrO(kh?!QBc(rD~M)HVPLT|q)SLMQD@ zPYP z66<*{l5ZtocoT>`O+x-mMJlUszW}l_kvrz)G9IJ@e3}H5uV9m&fa*Qrc`+=SRlwC2 z7$?*7XEW>2E3hm)ewliUvpzG6o;@0!Hi2?QsB<2lFDd0N*afV$O8lQeyYp$ySwm?o zoPA)T_A-j6AlA=#?&_U)kX90SH<5epW*C$3Z=hX{zPpd7o|91pow)?;4uJprKv))S zI~5+AyCR+vwc*oV=vNMEuH(1)i=Pi>UdH;jUB(CIS)RdA#P6PB(ZDjNtY|eCym~T+;exUHAoQsSo z6X@yPfkV6gGWEW}m}ahDqa~_Paw$-R@fBNKJEP8oavF5A@GEjOP7|x31EsXF{ot z$o5RA^?rcW;J|ACjuM``@(dg?lJWvQd=M0i0Yg{d4%7A^tg=OLVlZ$TVe0uRbD)B! zK(~Z{y?Lhq8aB)+n`S9RAgNxvI_)`MUdIM@l@^wksrSPBJ_|n?prAQ1jUi7*Z_!o@ zYnsb%J+U67XIM7Of%+&oYw7=hEJq-vY4wEfxazL88^&U|79Fli{|k=%Q_`zU|Mgot zQ*WtBflcdv*Yi-XJ$KFOo1MSDv5D{8;L2ZG?d9+OG6$|PC1vRmMrA}Xc6>|^Fng*r z(b62l-&l06eq)wt&Hs1NImUq)L8fIK&VyE>vDffR3)3}-*kFGl2HO0=om^2 zEzw?P(Y?vLAE68Mw`dJFMG9JR4Y2`=_n^0LM`IPHR1UP!i~O(0 zc-58n^&9eRo|E*XRa}oo&%KLooQS2Gmlnb^-~Qwt)(G_<=9xKSw2;bB#yyppz-7K9 zvwY4)s~<;JkJI9OEWjVpZdzhJflS{C?eZFMMBnwYXsk8RUayX|=-7NnyB1h9(ylMa zE%cai#8GIo`f$$Zk9BC}>}cEd{F(wqN`jwj-(8_)9QCb4kNr*0YXJmb^0z-a@)u;F zF>Sjtt55FdSgyzDxjDhmbD75nEe{5wXlnILt1sz|^P!vBIX&INRabXaPGec@2X6N# z%u#b3KAZrXebl{<@<(}3f0{p`M?q-x1r&OZ_dIc}1EW$q$|oY*lhH%%fMz!%LreO4 zCpdH&T1F$ahoF^tPt5fC2-dsVlZ+#72i|e?z=>ezIRPdQ-z*e9SQpZ2nGYS>tjMu0&TF2CvkAmEIXe8 zkJ4aVe_^N@LtA>NZ$Z|Y0poVw_2jG%!F?+_WcNCg0$M&9x zJeld`6tJ18$GzLN(0u}u{UZAC6g_DYwVL}u&-oZ6Kue|+ZBKy~MwgnWCk1`|5fEiU zYdruD+(pv5HkP(N)U5>mqiJse@7DsNl2CpUZ9hy+tAXY*xIBtpX6~5KV`V%R%fqzw zC3QF_n}h@x2f~&>;)?ltAjk`+Q;}PJZ#*+{G4iBLnx|k98ppo6gtx?G70{V^)|J}p zaK+ub8fYU=iPS=l#&$@g@2{dyRl_E7Ts{P+)?*1YrMC7!Hinv9Nt}mVq$1&q=nKw1 z=0e>q$Y?X_{)zUs(3?CfUE97GGTMRuxQvhS!b)EQdTe$iAum+!j^@y_<^;cr@LUi7 z`p|136lo2wasyvS+Sa1T&xqjJ?dIvcN}GAmSa$+EE+BnPxnCL%c}_(s$-iVnA zx-yP^0GF20U(6(Gu70zY9-~!b7|ziz^|o9K_11yCCsmH3R6V4r5}XLVBJ%@_QOM8J zCp!Y05y!WYkG8+t;BWr^|*Ib6eDd5&0{&;r`QneLn(lYIZL|s4|c@oAd`bIA3WlWpr zPCST&nC;b>cPq3`JhhwKV=%qj{1B~q_c%`*QQI~o@F;ZG>&9IUHSSwTM_kb2i|}6$ zfJT&#p^x=L(^{VyGp^H5od=XgTHH~qM9*mgt*axM#x|R2v`bhdzeDp>C8nL*I`Itc?p$$?P>%(Ep97cW%)8>tP#tCgF9o=XK=xQ+PHH4mCoi-k^`KM+W}q zN6Svz?Ww>z&3N~D26+7W-6#K8cHu1GZ?KpFjjKY_aD_h|+Sy2v7x+6isKFKM^jKrx zLB|W+3FjO)8>tJON4f zRKj?qqZ*PEW~wrxA_5qS^Nu@-k5WP}acO7fYompXd(X$a#&f?)*+*%;EhUEVbr#~> z!#E8kp%k+2Y(^nyE8w25~*QKyRjP22zG5Uf83 z-xlbxH+k-EqboW&xN3nm)`PVP{7$9ry1LansfUp}z5TC( z#bN3^g`OP~eBVQsnxJ9KpZFg7Nq>?PXs4>s#XLLj^7~KPc!d$=IyL`74eIu5lvuym&?HF6#C~U==LjPLN~ODJ|o+~uq{v+ zYwmvQY{o%z>V5-PyF$xV)OHf>vw=3eR{&i8LQlt`1lKO&!KwMH-b9W1(s?vwCh1?cV|WIc@Gfilhu&=JdgPk>K;W| z^YUMyTw7?`2HIVLKHZRsrM%sNc}-o)bfEXY1edj>N;5(nL(>{_?S8rIy80@;M%!j} zs>m47i+XdyuBOM$W6 z?F8+OD4c*~&0~zx!tKVpW+-qM<|LZI_5YP<+-%&5Mq^9|+7`^1=1})Uxcw=-H7|j_ z65oQuL}2~_F1|!NrQy^Uz<7>U`yzpHP-O`;c^f$0h5rQUHZO{~R6WOHb=?OU(ai&0 zn|8E^azm+Qw0sEuT!!1Dki*+hV=5Z>5_GsiEm6=s2e@sd?XqaFUOc}O;NJt?wh7AV zm8fNAb{V4uHiAiCEctKHk`Ezi8>zi8?`=kV<^f8*rQBQ6XJZeNTNkS%o>EO{+jUMo z6b7Id#_{(ckedOvCD6@5vL;i<0!kQlo{O@bplS-Ojt1vk&{!*@D6^(EwCTxi8Gx_| zRNTfGGdJkfHL0-;^%aKSdjh$v0*&tlj-rh5k3wIq?#=K?ACFUTXAb?%=&;88-NsY% z7MM@WGii(r?~Dz214%KuBN94K2>OlZ$MnQ{`T%)2fh06VCp->yo&e@oXk|Tke9t=% z0__qgTpi9klW$Ke^U*1KoO&9@z2KS=j_8y485*q#wU>wQWBA<@h&^#I9$AWqlesA$ z1B~Ik$ddt$i}@1BOVjdNq|f~eqn?YvuLsb>qj;+dJtsYIZH3EKp}6O?=r^K;{wZ>> z0QkD__7d=`03K86*J+#RK`1jPB3ch zJKlH}TA0JjIm#*U8-iR^fonOT%tz4W1>W=&-#gS_5m-v0oeI(V8Tj!k?T@8abBI@j zr`j>)fHN1r^za*nB|aa{eF%hin5Vfu>3+;=S~GLB_L`V$?LPpeU5o7w-T#DA#juwP z0NDZPxDET@cckPHPqy&+gL+T%#PcOiK;0PhWZghMKZA;u&|5}%9iqj_(Bd)@o0n2o zp!`DmV@X=}M3P3xx6eny!Aj^d<)k_IEQBI^kf9`?+`t{r1If#?($J(RBTHrKGHYW4 zsMCP)>|@5YmeAWABQ@dob>8g+w63d6M1qY|bbnZ%q66SNm+O_(VI=r9;Q5wX_Rvp- zxD6EW9PD|>OF8Pf3U2-2+I(6Wh`zZ2bj$g>GQjo`pze><{LQlwNbwWAxe*&-1#&xz znxB9gWzoLw$#g|7J@H#FGc)fSB|IBB)9MI6;o7LSvA=&o_ap_@ol&;wfF(2ZN(!EJ zhT5rIB?VSfDXft~aI$Cc%#)^E&rJt~b73dw@jZ+?TA^V(X8G|Ae`yC7#Clpui4jo4 zNZhau{d26 zk%gq7L@nCCK)vzQ7K{E0YxIq-T<1Z5rbqs=Av2E#n8c$cb88a#7i$(bVGtwj9-vzp?^gPcPf0k$0=#9xd$qB^Do7hc5i?rtL!AQhRTAYp!Fk6g0 zJtz9lljh6Qqa)G1o+#mJUR|u|{*?Bds?$MFH>>#^EH=+s-il6`1l2q}V=`Ld5E{A< z)<;A7w>cA@Lfe|Bb~g20=DHI2XGJqE;=TR+|CpZ%qz12IXyEmoALv1^pSSUb zMpf>E1LbIG0V78@MlOAcuOiRA;M8UM-w3exe7?i*$7l+DUT&l3d!ZA0fn_QXCqg6F z-L$n1Q$qq;VkuO2JzHxvm9|fVzw-ifG?oLN4nUf~*wBd4Vgj5l3`C8nXA)E|!$-d> zJ-zgVT>yqBfz)hMSLl=XQQt{q%~)kU5BhM&?16f>ZbhGa8o6g1c=EB<@nC46B<|+^ zK1R&l@Ny%XXFRe|8^|}%@&aJhwrz||=`-LOSY2?b&uq}F!!bzYB1ZpNaC9KGrZ5sN zLN3hWZ`7VE@Y|vIdgx`u%5Cs6JIwom3?2fbz4U`w{GEbiC@;;Zc><$e6xwDP6i9|H z{lIZ5IOP`HL&nZRk*(Bv1B#dh#@(S2 zjIrZ+{vP;EM83^Z;!1LN>KsU&d4YB^t(c+bGB(+_ymy1&&5`5H$U{EH_h`6Oo|;b~ z5u4$FXO0zz`(voPD)NwkCYVgQ3BXj4F|;F?)CZ5kteAK<@{9C!<9903?jUONe7scf zFrwuTa5j5Y4E33}peg;JF1q6ZFlq=j5}~NQ&m5jh!L$&X#5hvdQzsGr2463*$@fnbP##`#5 zJoLxMfO7&CjsD-BALtA@25Hl)y9RHUqV0Xqrw(;~31?4(Wp!x16^-V}= z!)LskKY+6>Fx?3Hobp-USyHxelGHgfVYZ4QIK zMH$`xq)q2e9T=5LA}hy{lr=ot0Z)ys{s@V^4;ej;4x9)~Mo3?!mhh@2CHo;I=Ez?F ze=||1apE6=M>6tI1bQ5$ZTEFrLbK11qQhXSRs0oCTfyZC$XXQbEd#Se^py7cK}s5h zy$qRiFV_*{10*^gy7mFC!j!xOuG-e-R4$G*=A+bXtnpRg=`P6++}EOg0XaB{7Bm0P z2gtdx80Lv81{Jo#w>Cg$tnW6)NAn@ArDk`2_A^#Ch04{Ta1`2fGVRqthj^-m(Ys@i zPfs=Utnde^>mKSH8H}5rBy0X=^^Nf)vuOJ~8bEZQGU5(9%jZhm-+s1L;zk-Z@P7OI} zWd`qNWNZ#g>oeXHjTRgHZa_=5kvQKAKl*t&r_i1c{fAtoj-?*qd6n;KdQn5xX@*g2 z5mwWZfNL4wu+Pbla5 zx}N&hvKHy`nR6=T$Q_Zo+>y@4VHCQ{@l5oy@XYj(&howbf8^17ayRW`jF#6W_&O)c z8q7vRYyTn==~~RtU>_ngn$>JayMVnaSLx_CVXQ(yMu-{UY)gOhw;)>8OjBdQ&KZq6 z7$5RC?R4!7P;&#;jM-8vFlu!|;s&8j8!=}0LyLY4UeoD~KT+Zg*6CtOy@Vtg8|(^Q zU4Bo4em$T@Pi80jz?P+-8V9WZXf8&a189=jKs+3+F$sM$i{A7tu=M7CC7ycn=L$5V z+WT$VEDR0xVRl`o3DOnD_8eoBFeZ5b7!(Cty;M#?jT*e+F7@lcdlqb7L;^=pTQamY zGjTmEBO_8<(@V|EVszAb##pln=>dNZDaeUl+luCCNn7S&+K9ck9Xf4C8|slHJo*vn zy>L7j)}4}x`5^t!MOq@ z%0k&?@X}L&_cPAuQLR#^7I$v2RN^VAv#PV2BT^p48RDAI%RN)InryYyT3tcf=RyrOy1 ziaIV+pVUl&%X(+Kr`iq~{RG(DEzz$)d%G*G8Ea9OS*rdyFuAVGQ z=noyy^<~f$nUSXUaB38v^z>VOzKT$XQS>92MZCnDHK^Z+(1nyN1*H1ZnU#49_#A~= zH;|SaL92iAd>#}s3*2n_-zum+0ll{u&N+X-!6;RYo|uzyx*2`x6|l-o?W>S1J$!zn zy<>Et0T2t`1J~soDGU8Sf^8?Kku8Z+*DL8N)U8k?S=WXnOVhOx&r0*c))Hrx|pO;W3g<3E2wrBVK zf+hb2kVI0$y~uB0c;H-e89mQjKW0LUgqz?Pb;ZU8~Xa|WoQ;Mv-deVo`AE*fT$}Fm_X`M>(^F$3Uft(Cabe^dd0S zLq>b?cRsi%3zl$K&J(ZaQ|~hY@23OBS$<2;i@@y(FJGhmo(Xz}Ihv*;y}6j(9EQuD zB3cfdZa|d<^zE~Ogg=jj+P{i3I=eFDi7an`Lu=&OxlKN({5@rr(YgHgOs^M^!qAd> zf&cPHdpR8~xEh=Xn=E{)t?ii%#;oc)<41cf>F#v|dfIzkc{3JDY@*L(fl_KY@5fVj z4j}lA{-=L-nllE&J;&KI7fKQ^J{ zD9V10%y_CoW^i4@INP0%nOfUY?+c6(>6yvt&+qK(EHXC)De6wY)j}2i^ho0OP&W?{ z85N)ph^y7vd8<1#yC?X*OuJFs4L`L5NW$w_WcWSW9>rHr{|rHybl|@EKRnJ${VB9G z1G;OoYi+xNUYx#nFBs%TGmHq{8wwS6(+8wYsk?P~eut+i7_FRyHh1T+JhsKofTC$! z4*aBw!7^}%**yx+R4;`*zJSckz?!xm^YluYdf=apzBnF! zKMci+(C^p5g>6w;c&sBu4~A7PDTGP-0HuyzA_JsRAtyhbhi z0-f_Fm|Ou{{k}V*skJ(n0qHKrk*erpt*j%wBZVp>EzN+qI=$l*Qa+oJ%k}8@(bZev zfmyXLQhV!Q&Bn~AZGmwn+A9V^y3-D8;`@m_0(UPlGTBD17ku@`f(h;5*T;PmGc3( zeh92BxcdlKi&))xhH=^0lM--f0)50$;T>SBg_QmQEWNQ_4nVJS)IJmIdlj&5&cP5Kx7d5_kdQ+m#vUioLB_5s>4cWm*% zW-`N}vD%&`R26*n_%b#v8d?_x*D^@FxuDEw-VQDs?dP6Q3RHiClEvZkI$H2FM5FTD zYf={4!DYROb|EoSsWTdS4u-Z3_*;>_VqU^!jB2hqyRzxINA=)N3+iyUDw#U31C8-8 z=9)5N?0H7arpVetXfcI;G6}dodGY{v_F+w|Lz>*H()+0&nx`1`d>qvCEj%=@ zmHQ!+f#@}^j96?87u>fgN}n-iy>!su^tCgRydIPoP3zUEzaj0GhmSYG%q&bL;I!T` zMrIGD59?3uDfNetkZEvYBsH(5UmO4~v(`S%(-}aN7yU8?+C-u+UP9m1hw|=@%%R^n zmng{iW4!9?ls3w@J=7RZt6dmnJVl^6Fey)E;qH2B7=dh51F~fLgL!H^^==w%y81i` zYV3!<$zXO9Y+t3{zK?vL;Jqo}`ZapgU8uhqb&X*(XRY34V}UA~&s6vm#r-JYZHfk4 z3M3nmBjel5M^*vq9N|ih_eU^~dJR2QpZiA{8_&>ZrXhi|!Nzsh1@OoGu08qhuA`a1 z@<8{ms3RKPHv#C)r{5mhC!txNpvLLYq(}gnCmu|IhU?Jx<|fz=L>Z~uy|etl6rSMT zh9|$nvA#&QR#+9}c^{nBJJ6lBuGG>Ii7ErnMk6V^kgS}PZcD3XG+qHN3t5BGI!y)x zlp)(HmSrVY^URv5q56Mbz@WhZ4Xh&nAw*l7#u*uBW?rK2)VB|T<-Lv54 zoUkKWVjws=?=1rDBN+qCo~h^ROURM2urDLypE9DGcgWrGtYAG4%PW;%#~ESGij*Ha z>jQetT%a5de})Cr=#S-a2CBB_mwQnepjBJuHnZVWJ~&pB7L;@2kM~1QPlGLt{JDd; z3=70)`rpCO%tEuE&mKw|k$i*MN+m{%I>@KlRX0&08ahk@BJ<3fnc@=9>e9Q7#C{X# zjF5T~DxBu;K3XuJ$@}PxbKv3GhD)KxWvF|YR_0LWH8@obz8g>A?oK3q<~X!*7i%?r zaXTgY(38zl5f7D4QD;rC9ReLX0--soJ%f8a8tVyUw+E%R@vhkfZwK?TF;Jl;GVm3n z!WgLC3P`^S;P4cp3XDxg@|%as%(V+?$9+k&9koPS#JDTEXC4r2Lz?Q)USA|hPk}gS z_7^QWXFA3xwhApYni^dr$VHFbgRU{+&K09Fl)C|(p53FwdyYd@Bzy<>T}SsQbsdq_ ztzfJ1%c!&mKS_Yj8C7sGi{+6PNDyV1b8fj0DzNr!fdg-@=7TSgDN z&|WVeSoB)$dijimf9rrT9sKl^$yj=)*7`NmD$5jwD^%0aH`1$MW^g&-{%73Mpcg@N@JH zJ*A8Z5EJ)N%pQ7_`jg;RVfcR|z_K8}3-HD!TKOBzeHLnN0xIqMSoCrc-W7u9;bH{u z{Y>360=^i5q2JjQ=xD^mX7F?d7fTK4c(R6a`r_Y@)`{iG-$0i!kb?)gR}49DE-D9& z40g}Th+|`zogKP^rH@)D2TFo-j`uJC5a~Qhs7I&i1N)Ms+FVSvy zENoAJ{Woe>8Q_8yB3|%*?tRluI<2DZi$e2A`11fOc)zny2J7%R#$ z9-GtvI3TknFL1x;e0+%F^Zcd)w6kTg2f)7xJ(Ptw3vwQolJXr2}cSO zf^$cB|32V0JLwBh%#$y)NXnsuRs{6Eh|KB9y%#td0^wQc-i!A2)0l?rI2-X?_DA5I zIU4rymVW!4p@p`_Rq!|8-F*7&V@QDc&h^dcik!R$)eH04g}iKCzL(bFsd9FtQr8!8~2J&je?*KIAlgLstWZSsL74WVp_}0YE>B}3g zTx-GhM;6Rl=)R=hi|$0!0HzM;iVetaJNO@m7TS)bXDspIK>wH6$v5l7}p)%tTEpaPKe zp(S_Cra+TC^!;`4^F%QFh=gY;$hGJBc0gN9LMq3hw$3doPp z;(c)MacFUd5ktSLoY1u{aC^o~0mg=|^ulEHUvVhzPDCod-L*N6hMEe7SKwy^Jz_sR z)#GLoQlK?h37p3uuU&yLfp?8c){Czo*X6V>b7{1$x&e zJm=UQ+xE2eBXF3NeLt9PfXb7x+!82p6DdeUs;7hNO|B9dZ_T{*4z2ux6d4io1AWz; z(dB6=8O&YJx(2=KAs6P2)gNmL8s}x`;NFyediwn@M5c_!_!24?MM4MDx+m`Z1{d~2 zrv$iWOu@%saRcf33dwNSeK+qHp_R^91D>>Kw0kqKn+`|aBQT4^I^gXO9m@joIKExL z$@9APMr#BlW057ZC~l%3je_4>z{Bjv6QGkh##~`K3h(q@RVN>U)@!hfjE}p_|AGP4 z%s+mW-sJ42GjEu0pekH42D}K673a;x)Uq9#n@j5n-ux4t@e$B=M0RRZDgt;5(|3Fp z13wxdO*`maH?UP*#G7FQnIp$k6%U+uvx&_j=f z&D1^~zBC2T2Z2HFvbT|jA(R^c%=gj*jq>V-X8JcctwlB~AX~?QXb&aF0E1bMngg3> ztLkm(YC$&c=VF$wU)^KqTz6bkpukK#AhY3N9CB9+scC>l9{>&|kh@w?q!@jp8xl63`+LAA z12ifV@M<>qrbDk;=)A#D$qa1`Ss&XSJbe$2Cc={x>U8(AH(DSSK6^TzdiEeaU@(2o zs2lU*#sGCrxM)U$ztA+J;K5U9UC(ytPN~|wU5L*-eudWESM+vQ*4;($6J|}#fNZ<} z@+c$v8Z?CVppg*9w>XnATGg`(hR|jitZ7U5d6i$`Oy?c!SF<@+;!YO0v+iz*(2{iy zlMzU726rzN@lrRvM|#58};TuLl~W# z0ollgEExf8Ja1aNyx*d&6y6)fc=-j~D9+Oedh4hB9|A8wr^gQAokCEo6;BRA&s4)kBOYnPc@XNpMDTUD=zHfZZVzAZ^)cawb!t#Inlz^{NsSdIc z*6&W$GPwFCH7+`SYuHTlpEvhV$hJ-rq*w@+7uL(3?L2 zk-5=49k~kQbbWf_czVG=w1ipT-lL_fXs+Irs)+s_$(vQ^Z?4=|K>s+>c#gxni~?I3 z>6>uZa~5);58VqfVtEzizDKJz3$(;9yuASlIRq39k;uJBd^viiCux=Cs!32P3a!u) z9d!oCssfKTo3`XT=tZsX$=Kv(pmv40A=>pXp1uwqm#Ak45N<=FJ5ie_^JvLG0EC_p z;rjJF{(b|@diqa6a*WVE4{q+p)P_FW8Poc4eTiNag_erp$z&vdDm|tg6rYW}E`fG> zP)`Sb|}2yGqF3&Mp5s8P;t zq}){c^B!us1y=6euZIt2s-4eWcir4g>IEe3mU~{tRVd|YcY2x652)u(N|j*N=zd5l zRICUL{h(P_#)g_`>2An~cFid;Pk~aWsnxT?T#+)XNFB!FeBApOF7*b=>geo^ z&jSP3x}4ukr0iuhSARIImC_wLjp1rV0EgV&M*UT>Fb47e8Gijm+uzg9Jl^qKq04Y) zBEO6ac?@`rH*5+f!@y++_TwX9@9xg4j7H6%jgiy8qKn;q>I}X9g67RBqc_p3JTseW zcI5qI^w%Jy(TJgQ$nr@fWn=Kp7ARd6xF<5+JkH-aJT<#`FCe=PB*me#8HOhynUTN5$1D9#3 z57g0$_SCps$WkThjDb!|;AbPGWe_E<1HFDsN?r>3!B~7fORmteIX1t5W9}ymfD(@& zYk9e69>>GfybeeY193}ixPFZ2X6|eWou7vqcYxGY;1uX+{>>+lgv+$u7TJCsiaZWf z4dG)vkX#BRVky7!Lr?cC&4>RwQgD>I++AADxV3{8T0`ApP$g&GrHtri5HMrf8-ZP8 zG|TtAwH{8)rro??a0A^iBPed%I~XAGjxDCrw0dqS0AzgOzc< zlfkJ7`o03CN27x_@x6jZ%|##22vzn$iF(x7mN9A_y)Fw-wPN%!$6f`vSQ)M1NyHm~ z--t5Txzoewb0-SApQ83tNbfITaSOed z2l<{1cFWNgH>laXL*@rFi`#bez~l3q}d+D`ITIplc}7_@_; zrQrJ!{?-fZ%mwt+3bf&=r{+^}?$wS_zA&<@w{-15;%_kW<^uk8^o@l`mM4{8V9r($ z9(yiOA~;BwOF*5Ka;~dZg#ww;u|^r|lW~of*1+vg!T4ElS_(vF&3p$~rBMcGr$Ky7&g)Z88Cuux>^*3$CDa1T%8ah=qxg;IoAybEB*pS8 zj1~VXxY~%s7)gA=>b$3P2qQ{U6-)=!~m;p>)TVo0|Gqb2( zNLt&LUW4_xlk#cvop2pp+tk|LrtOB%HygFY(xcX*7xY}K#j|;&Hus(>zM(b zPyae{E@jLQb(__RvHYsR(-+~xOytA7qi6YT44U@oCCU~<7o`K%uYhDRvxsD1&5EAy z$$Vic@-+&6hx4*9_QMDdGbm+8XM92}5kP6i*Q%6t&rE*l(e^WPFc=MFi3ngfrpKs~ zH9%oZxpOI>9Os)2x;l#+2vw!X3^34x$`c+Ekqo`dJg3f5CBV~}j2!b(A$$#M8FJyUry%vu-Y} z{8Lsg=b~pM@!#35GuyCy_;<#C{rc|=0^!p_|9M&wxSU<42kzIX-#Lttf?@25xl-IU z`h&5fJUSo)t(2wzyV~TgPHlLX1rFwfcWe0mfc}3ps9UM_laB8hM&DHA$QYoLXtN%S zBr~x#;?R`?u|Ri0k*~3cy1}_qaIiw%hK%S9d9Nis*$Ct%NZQl%R=q=9d9Kgb{CADf zm*1m#2h&0+$~(#&3N%j)va0=L4s*SnBhlbp&^Mvw>l3r{3_^OeF-9P_o;7x!`t=mo z%GD0qj4nP&&pOZOaS5r&j)rK4+)rbS)=$-JaOM;`%lK1`HdZjMuV!3*m=Ybq*=V*w z^wag+bJo(A`+2CdCB3B={JRO{*`a0m01C6Ud%|2a|IP1yEU0G|zjM>y#_{fIG~73g zqt&2NNm@7S_C(v%_e6*CykTcOyzRGKTa9ipkXf zD|A~%Er+1cyVx|d81?mGD+z_A$Uth+Htf%PPw`d?npt1t2jId@^!^FPcFTr2yt~rQ zW$G`D1SE5BHcwXZao4>heAx$7N>m>>_ABF^b5*mTe+#|yAR&)X+d62r1Psh6H#-<% zW>Uh_3b#;3n==J0U1c1PM%sbR;X3ohU`9KeI?Q5p1DAT5SM{oa;6_`eTbzcpY(=rh7IJbXDPHW$7x=8 zy96o9M=ih8V;j@l{m-0ZZ$<)pqjzSapD)8x&wzXcn`{zTzt3}bM81NP%YdgXZ*60=buViw zy0=e2!FoXNneO9wwhBtr#MYMY*^q}JtbGi_qS*+h&p`ow+9I(ZoHKQ#PHP$g-#v5B z?4Q4ZNjZ2j3~sFge&_qsXxY6@W6UGoPef`wZ((@A z$z0UdjCx(kmqOoz%VmC*0E%tdM0F`#P{&+3NUr%sVB=# z2F@Sp1tVzfQ)+GtMw{TXUVGhvb`EeQAR7y?(efZq>(D?!Ii!aG@M zZ4D*lUo^FNhDt4<+5ooen8{2*PL`mTYBFAX;)|Y(FL0fVjj$9dmZ1KJ8RN#ot^3fo z?pluc1@e25KQo{W z3wmTwy`yefR#SGv{EX2Cp8^&Mv>R#uaB5?8>sIARs zKAQKy_7lccJzvbI(GWS=1PqTNS;@d^-V)D4twYN>;iNI8>7mpz>P&)rN2vW6(v(aM zo@=G2o*6~<0ZlY~>d93CGTjLJ>HX50wm;>+nLF%3p4_Guu1Edr_`9CBe+JhsXsvbh zj~YSA(3s-OV7}HUB1rydIMWdYOH~tGCJP8cDvzgO=RE+ zdhIoI@oIR~pEo_z+I^!|w7nY1Hb3nvjOfW|whTy5Q#fc;TtDo*CBSx(maU-!HT?mt z%+EL+ULHeReFV17>dm|OBkgaXg@Nca^Tgia>OP*d;d&YrsKA|KNO==JTA_adgQL@0 z=;5k+Xpw|}i=v&hsI?Qd@%0`u->BA9`d}O`guM5mj=H?5l^lK z;pu(o>)ZS~3nUGpmh1TCxhjOb)C9siNNHd0MgU!9TF@)DJ97I9Bg0VaSLflzWC+(> zdO$w-(+xXQi(KnW+h5BuiElpo$JT_LsF0xw#X?Evj4HBr0 zmWeX4@a#dL`hc2-plLkkx+W#6fm?R!Ge6ij)H0V+4UyLG;JkY!+4hWS+<)bAd80OLQ(m}Z!j0<`>eaRZTI~eYO{Ec*)oyq9-M}Xix?&{%G z94OW>8l;1B=K{TUiI&B79MsBztmfys2Gso*eY-e!jr}PE)b0UR=b6^C_HcUc82u4N zPaX<~BKU;aO0z+)uJo=F(0nmtl@O()%}423+7vI-P96B=%Da1i-FYt-DbV8ASLr?8 ze>0GeD1JYPBs_@(nkjZCP#K9a8ywsh$wV&~`gs0E(Eo%X{J4|hCmX%fI`pN}ntzU~ zB)%D;tk0c;IA75h(7(=QoIR%Pm{~Ubgj7oV_0NC%h_aG)=LPC44s`CL>3KaZ=vn%( zWkD7v2dyVT-$J~R#GENT+E1M*zvbfKfL_kcoMAeflG;(|Ky%zwN50Igs7||%^ehLj z&0H;lM*8YH+P=&EH;`TZBln=)2GZ~M0f(ng8T;6Wam;f+f93i#qjguL%3bhE{GLPa zs6&g(=*ec8Xhe_lKEREs4^d( znt40@#}nw>dpOUX7~s>3+<1Y+pv|?&a5TC>|Eig^q0f_1%jQ8%!B)~gqX1IX5MGsl zE={SwIjt;)!kd9G4_KPrG8#CHDY}V;Rgt$lG2V2b>?QQE*>l=pS$@jceFo^?g&T7i zC5F+rPr!fId|aE~2egZk;`UIg6m@+~U1#pvnnui;ukahJkFpg#zlx`i!B;(SJe7C?<(fc)`9R^>M6Oxu1>!8i zvxGdCBbr*iL^Ca?HS?Xyk&Qqz6Ui_$dn!0?L-tNlb|JLg%~#9y05W4-K#pMK>rb0n zY-ZJ52B&=2{Gjd7d(JML^u>XX90NT4c(qs-Tb9m{mbeX&Jfi0fejl6OtLBBC05xn0MZZ@P2 z_ng8UwKu_LE*y2vR~mSFKwI}?JP*}8{hsmY+HC@q@4#I78W6cRn#{d6(5W(XcpRE| zUZ`IDb!l$^_J3vGFlU7MwVc6@Lek!WXVc))Jf3{Y7&Q_&oDY~`>q#VD|Bh;uHwwtP zL`&c?BXe#_?19d87!^FX#QY!5AkM()7{*rXxeCWq`F|&XqzyRFfzqD#nHMaK!}ta5 zekz!SC&Hs5jLNN$OzoS^wDLOm4g%^|106XC$!!J?%Q4b=YTl>8_^=HAyp65WnEqIr zJ{U>Q>%$ndk>{tN+8JQai4A6kFQc=Ryi4e=0^n^%KA&9+c=rJRjeCC$sI{8QfnOKu zXhb!TTu`_^dhu=UjfTQTG-aUIl!Q}elRgOLlHv3M zD7yj(j9It>X5S!@Z==)CKvnImI+XN$s3Jh;8Q>?8Ro7NeLVMTb&0Q9rSner9g&D)w zQnqej>1BX=X3HMSyxX&dchGuaC~fXvIa3IkC>pHf9)zQ78L5k*tHyC<%z1%fd`S?mJ_Eg8elz>y_z z`VLxVK2V!4(p9n5P(lw#eZlQ1V6fxm7yf|GE^FGPW>(@Qe!7A_9g$p zV=q0n^}j1k&l+{|D&#KV++vn}klb@|2Dz>Y@|s%u-6KuxZuGV#?j$SMy?pV1f%O1?H~A3h;FKw6#Dx*>TCej{A~^r^sh{GQAeu<{|NZ%WAELCYmFL-I%Urk| zYuySIq47vPXJ@7{{yaxNZf5gpq364>`h)h@DxJIiGF#4VHgnmxawPauEu^*D#oy!y zEAwd=+WVn4yR$9xXkIBsPA#QTg{v|uA{^|bTqdzY#}FKg1bAq)aPwI!Cqz` zJn_#blJrDFRAtfYkhR1*tY$4^sgv{RdZYH8`kMg$+%%jM7`bCNJ2_d!Dy?q!1>b6K zUc~vtyd<(Do^LXmW)z8MUB~E>yQ8y1o~$34|0P!V?!s5eo<}9tCi%jCF`nkc$Jff- z3Zm>vXN7D9+;RhM1D}SYd_(W=h07^=`O+A@)JUCEjBjb>JQgl7^6}cH@;mP_-W{p$ zPvDl54rADd#0}-_<1}y$!nvH6NkqTA z%+3R+4jqDCb7zG9Cn!SY0_t7oc}8_?&gxi2Vkn_BKXn~r-qW0W<%nc9fgUK!*g`4^|~ z!xE|ezOvbsI**;$>|0hCoA7n_^ESI2+3`3U^_m(DnxR^5Rmoo3MzB1Ci%a2^-HGHt z&K+!jA^AU$<@1zIq;%p;PJ_V!7)HjnD%F-Oc7tntX%7UAWyuz2T^KpZ4?acpMq- z=`hsD9T_ud{^6i+2gXLEYEz*v*@H+nyX0_-pWlsyJ_7RWadg!704;UbQat#BNymy} zPSC}te(+D`!Zt>Q75vofXw~I=+^4Texc(do+3Sj~tRapyJmE&N}C z?km;#gx{GP(C;9diMsm8|BqllSN+7;H8-a2!_I``EsS`5+0{fzUBdDVpq+_CJC8q; z97scW6v-1%*O+$@_!}twov~piny#dK_Y@M445P=wXA~>5LhnZud{6eXmfHGKz1;u2 zfh^X7Z*0(dT)i6A#^b}?s5wOM+gQJ*sFMr_gOyy00t3);9ga`(Zid;xA%1fUWIZ(P zg70-;eI`#Jdl60PU3OGwphdDA4>5A~huei{zP&!yfgm|y$9j`&8y}wAF z6Uklng0gOZ2iZOoL8x^Z*~$$9YsmAR+KGIet)KAuT0hBi{7(|x1g%DE>wcI| z_Vjx8?;&{Pp3}WaNv#5c+^K#cPj#Xmb_0JsP+eu*n?Xmr(f#b?JP1MW#_t>~CsF%9}2a}D%+25RsA44iS zgY_VGHQ7COr-8@#^cQmRFk6(nT5@xEEw!hj_~+#D747_n@-L&@CcK#dv&s7C#Oib> zOO5DaeLPr#s#ADCFY#@1jxo1F)Fv0__|%Zca;N`y!nHQ<;tMhmPkjfHl^B{vFnfgb zXJ@3H&pYGREwGHwRQl{>xV{+ zCb+W`8`0jgGVSNA$DUx@p4=Yq`8pD?lXp3_opW@_(w5dw^j$S*Zh(ZhGAZ412KwLrBy2q`5D=a}#epvUxrVX5Q9M$wB_N zQ7-2le#DCzD0HjdZYVV7M0F>!I)h-|!VKtk96VJ!t^NOF0o!}*^dH!_*VRuZ(`afR z(sNMZKc5SRT}V%R6wYeVmmU^4mVcvCQ7HvE0F1C z$|URX;W)bzEwj)5lJ`5&rlz#)ef5%+ud)@@*oJ-4C-9ecT6QP9t$fbUE@*iGOd9Iz zB6Wg)G6~kx6D)4(b6(}tc+8=^}LlOD7D=VW;eoj zK|!zZxx8_BXE4sBsihs0os|B{=kg?R5p7RW9$B$KWlKLta$(59_z6PiNWv8ie)wBA$~+o^)2)5aJKvd zeEpQAt83i8AEjoXbJkuyVO0n5fMa2@gMcuJ>-f_Okk%dCWkik1=}W_mVypB}wM^^8{LRxx}hTllZv#>iHB~}>S&(rEs9LgPRIS-t(E4ei}c@2lt%yAkU9cGv4SePDFKs?JxeKef97Wl@m=&1JoApQ>K$zYUSt&QqruDA(>vz|T} z$A^*Zj3mhg_D`IRJ&)GaL#ymr)j@}uTF%TZ5o&Mgua|Z{*J>iq;$1DmgIDq6K-iyc zynh9}OG#aScqOCqM0BXFR7Wtjgxmk%XepBIT>U)&i^Qc3X7LX4H@%+*rWPo-1aGg@ z)8kqk1^zmPrHs!`=ycj2X-?N zL=Tay?~8fLABvgHA)cH_=ikMJ)0ItrhM(YcMS<^KbmBF5XXf~z(I>0E^I(%X@s6bb zEm*Ynwi=jvf;jn+dxD{taV@#Va++i@O6894%#-gV$+NU_HQBz7pYyghXQ`cR?jL~l zTbh58lJTm3#iKLPViH_upyB;mN<`LNa@L7F{6_M|v+0TOUW^Y_Nm^H}rq2C^e4T`g zYv9r!jF}f^rf`&RxiPmXia&}_T1 zmK%jTpvrOj`IOAIC-KLDZw86FmsHN6o7vZS0hBEY%u6|ymhxl|f#(G{7a8yB{h!&c zZlrM#+tv}CnuDS$@8=3wHb&dnwXI}5>wuNK$tmaLOIQW(K3Wg&uhnAC9d#y6$*Oml z&y&b=SG>QRojQeFp9HqV2p&bpZ$syt0?R&T=1%KD@i}Qeo;6O^rkt@^p{49ADB--S*1o#8P)7`3!v?2HW^j8%TA|yFRCU1DbdNntw^=j#Mvk`rT-GRH z1l@|_|Ndxv2DwP)jpW076SnI?@}hCDDxJu=#KbQ>Mqb|p&t$S&NgpTCp824jh8N%A z&6#*}6}Y;h)3IpTAMFx*m798|YQL!`lhEl|TJR-}%Pv@Dn(tO3!cC<0$%En~%~NM#)4FFZVP(CtJbxbheh7*^9|uTWA_7 z98dm6;czTc?Ehx%u7Ocwede~oiFyyj^+|6n@|l}*t}wneXX|d($BjIhcK+`Gt~IEW z{N34mNe-l({rfYhUjSi7`NsT;{qW@>y4p~0M|hvC^$o~M^2qJIw^m(Y?v%$3+ zjvr4>4#Bav_4TMXxe@R}JzNE%0qFQl@uZtFo8WP5VeK1(bd0`kVXv~+f2^80J(paR zpR0c?jFQXyS04SLu+9ikk=1=biyc9bc(mEt{t-nF0^M6Exz<>m6W8%3vyW0b&5@Pj z%RFz4R%zpRuP8oRZeGaBPUayai&`li-1kAFa(I3_4C6un%8DnJDtj@deTw*e8&P>z zJX*-Iy>4t+sqDd?UxVH)Nq1e=yCSXoutPN?`VQtq&9#snt(`!=EBaJmktd= z-OAc?=0?AtDA*JC4FI3D*7?CC=9vIFy;_Y(^`CCcjxJZe?o zverzd2biXV_DyZA(MBtrAK4x96 z68T!lf?dmlSZBmfH2*(nLGB>hXp~5Hidd3lxw?gnSEdcQZ97p^qs(jNVB8%I_e9CDcyz0FGV6Jpzi=t*P>a^? z3;G8=+raK$ho;vWks70DTlgfa&y#%cV_D|ON*t@5S7`eWO73l}Ud@L8iw-B3)hyKN zgeG%g{hnTOiae*^`snKc{#4d7r_z$6*_v5+{2@#m(x^Ttb1Q3~Na)Ye>NW4OAJm1N zi0`!zoqO3xmYhuw@jo+aY=PgO3$NpOG~WoS>iG5H_p*ajbR3jZJ! z+k~pQF?tQDTc=!0e&0}^M;5R?2KE|cD`$Jc{oO&=9_+~lJO?eao6%qWhDPMU3-RwZ7Gi_;a@H>QV9zHH$(wepx+Ar4iFcQvLG~5u_#J>Q$ryK{ zcE49D@m!PPe{wfcz$FS^^CWP6*- z8r6gMRNllkPp`wBeQ;?N+BQYG$4P4k<66$~H&)_Ebf1PMxlKKBkYAI|L+E_u|46te zyVYMnm?-Gno8FWhxAFF4a2%t2vgX%fBXg&DHP|;n)%d@OXrE0Alc{toJz0ttZ;+L$ zWbGN=PHqK^=3n9SZt#e&pL5Usv|PW?oOo~Hz(AOf#-VOFA8B|L#Pw+EpUJ~``nQ54 zPT-Neg%7uRx*6T-q3BjLy#zh`z-Vt2nT59}(}cY{+;2qh!z1ce)Q&%OI62SB-ahQ~ z2GV^EteV3onf{`ktI_sNZOljWUTP&?s;MWZdz;z+Qd%icKMCMK1 z+5Y&Xd!R|6xK=x>P=8O~s{5NevS0T(JDWS;P3~EWWFLZBnV%%H`zywcWEK8}tlx_- zAF7v?%{sIvw_+q>Y?0r2Ah=7t4SGz@u}jpw6xH5mEds-K{#T}Db;-{gFz;w&?5eNa z;`cl{CZpfaq#!$%j-@-5_OvGnV~wUe;OIuQ+iWEHE7V!X8PV}9U%aI3+a?e2NYcL)tY^~ItpD$-)y~SDLk>XY@tnNAx-%1;Y;@D8Oc{#h>Qt272?*yUN*W7tk&zmdw?K|K|?j31FYZ9L`5{5r&BX_2pfW}#sPv(VIq-r(F<=)Lw z=A-nUb@R)#K2wS8UmVDK<+hh^JU@{f<*dsF{q2Ik$7we^xx3Pv8aO$LELS9@Q`F4P zO*yux%y?5esZp12Gs3f0BrboGLHK;Mh{og|^b=T*mp!XP8wS(dC$;pewzDg98@Uf8 z_4KqesJ>;fh8Rx^(U z@%WqqlH?kHk*xidUhL>|@?4h#)m@$p!rR`ymG{4NZauky%fUV8ZpwRhXW^fRi&rZD zoG~>yrE()mKU90xr|oD!Njh>OWqKhkJK{s>St)*SZ`9ob9(73chbYw!J+kl6QtiKy z{J*m1`hz$JTZWuBb?rr{uZ9ZZRUdx9K>&d+6h18DM{f5pDOqlH;)`)uBDqV_MOZ})-w7jI{(oji(( zJR4BFdr?b$*spuo*;w8c?AJX;#y33635J)^XCPY;&pxNXAJ;-NEoKyIiq~uCL84l+ z_WG#)YQW=Ia`+}~Ifm8C>4k6g+eABAv%M1~ODF!PfHgl2XvJXV6Tdc#@18T8C(z~G zI+Pr-b;$nfM%mn-ka+6Eg|A?j>u7NlS;^_i_Q@p;!kUfpgmxKKtwJMSJkJ+z82ULPl&X-()hslSV zdsX7!>`(J@PULj<<~sWSRblg!N2+uu!kx-qt)1#{tLteZBodSHPp!?+*Oze3>h*81 znSg?4ld9KHJa?pgpw}&6noBCKRKJ!oqm7H}L6>vW_krXQ?Ip_WRx+Erz7A3=u{Q^q z!6X+?PAktrrTByUpvp9sWftponD%C(b@B#eFYF3qV&(!lOHmyzbwIQoo~?lWtFTQ5 zg6#FY1&0~1&dt|DaU>^OlE3&`m|v^!v0x45pJ$EY*=4QzIaX(P_2!bM?BFJ^b#l#Y zrAawKu?pNxe0r1y)dopjKHv)0By+2iK--Bl&(+u4;CP!A%$<3Wk*kaYcf&6k*Rq~F zfyX_cwaEF6xAAj1?BYMyrb*f97=$;M~>Ygk-xCYIbGWt ze#yzu6CIEB{z}koB`H(UE+>(e!Sz18XiC3wz947#pGT{MS+``F8IOmF&%e^BF^MKW zgm&{u!sVX5O6&W>FFB14#oD;uI&yxLKToL5WU zjPNOYj~(HB3+~@jcpAA+F*%oWBj;$49!17>LyJL$C*KPV5`{SxjG423fI_(ivy!nk zbb80gahD#?V##Xp1+!<8b2H80k)6eF)%ue$>KM2tvN1E?Bj8y9pKFux_=#t$ol`E^ zv)WBR-APv+Z_ZM`qw#J}pAXbyKW%0{ntX)aV6;R#D?G~`uE(RvTr%^a5pbmPx#6=3 zdcTi%FOlg7XkYI7EJfl~C7nC_yp_x*J9iy0y{TT#cm9jlu|15c!@Dxb+pv_$Hm96pAxId$}uHq+Yx{AvK_ zdgyl({=GwfYN7rJvc3?dUIpQEASh++y$k;uf^7s2#PdEJjfT_S8_^}2nMloKJ;+In zL`a@WdZW#WA-@shxVF-^G);oO6ugMg9Eiy zfj-4g-45)#s&_fQS2I%g(olnx@rr@V7mP zDwB}zEOcTClCiEm{arvp7aP$x)26;CeLl`-CG&5nn|oRk{hC?G+4L-T8#QH(sv1pa zdz1W>$q>^AfBuCB*ZH1rmxAFFqe4&E?5N}}?A6yeR~c<%!?JRhz1-HM{cZT>HudbN zeFNrX$h5Oh5?Xlk~ z9HxswI8&?lp>v`L>yw6Lbo?G~vuabiw=sGBzM`?`7b{OWt#=>^T0=u$z{_|q&9u9e zROc>}N0n~k-Da(y?pqz4DBX3NmBaH`?95Z+n4%#`n@IXtaYcfmADp}@h@lh{BjpS6_mF&Lnd*;vAX(ykK@;|2-a!+FR zWD=9|vFAO>$Y!M$6tlnNmWuD4nO*Ar+3Nm8T9nT@QPoHr!E%GYi3VCv8_GbvwJ4E( zqQTkunZ^z#`|Tes zPtM!}jV{N4BamgEV0SdQjAdv^LMD@f>zU)qDDwg8s26!)Qb}zdP+)t7P``l6|>7=#Ek+^U+6?Om`J?lu9J39{ZLP^~vzv2EUF{?urc0e+J{=IBwsizuuTK(rqshv_Dvr?(j4p43bUCHWP z#Ai$P^lJ3tFtyHye+8O<3@+u=P;ISm#j6Jjtj|E{Vdi-`k(m9#^XbmF=sb}Im;I;D z-=S%Wy0J%_*oZASF`h0b%KduSUFGRI6q?7*{ExBj7d)NKLdQ}rrT0UO+S|Y}j+Q2t zYJ}Qb@%np|J&$}`hEvB}2*Ju{oH*s8AJ*^wp8Sr+sMmYn$n)l9S@p#%1_b$kBlSF zsGnP#KLx?l?Dbjr-Io1`-?@(TuEOVM*@I*-pQFzc*^+BeqYIiRLhL;r*NeFEo9Ee6 z&sqK>Q7-po{KcE>J-l1YJt~871~8H=g^g_9@#gosO)T{NL zeT3YU{S!#vC}3--#tE>E+$p>pZg$p6?*fOmMy9O)CnD(?8b5&@ z-W44p$$KmPG48d{V4^(?eBvn=SrR=t&!o8wEJAW*2mxM_~lgWE9Cq%W5HDgzGGO7O*oaj z$_r^iPrA|%Zn?erRs7t`vo2)xBhr(ceAUb|>w|ccCx0XJIoXoC-fjiKVMd2ON@a&K zl64UpUqx;gv(jJCiVbLZ7MqwEUK1wnzlM173~Kek=bYIXt*_Hje+q8jg0g)*PuAy|MwLnGW+!tm9bL+!=!CMXy~zpw8LUh8Yii(7H+@zid!6;V zTz^AQAfr%I)SJOlBu2Hg_LUO{`;fGOg}ttdcgY%cv7TBN(lHE8mU)_Nbh&lwGwtsJ zuQ|m^#iwew*J|cOcNmj4!tow(ze5hzYa<%p8$8X7f8*frm~RiU?k5<_pF*SBMTy7p zrj1s!i~0yTor2GaWn4f)azgKJ&)@WuEE?T;A7`QMeyDUmo1L5568&{HS|(aaA)Z1|qG%JA`GSg%5rWd1w~J>voW0ktwqXrrB{i}9c-Nz5L4PCKvVBa}wt zRv>)H$ev7>*MPOT|HE;-0&S>IHk-lxL;7(8ICJ8B6{}PZ7ZU-tfINq)Eec=fef=dy zA@}HJJu~N7I`Iw1K^%Ls{>hyH{sU|hHDCao{S#f zkg8<*-IosCqNn5>dzeRcK_T_0;7Bv@yickh#Irrg{9sSosne0SGZvNKW>==uSs2#dx6?(Y#;=Q$_p^21w%CkhkujZL% zMw`>&{j^D-eLbvHVRgxM_pi>(YhkZyB?*JXDFu}ex+Z#lIQzK=1u5&Htys; z$=c`;ytgL-*>8*YJ%(IO1lb8jyE){k9XZGjW*{%+t9p(GCw}P*(v|FRgHbH8RvkT? zNau3$B|AMkk%*j{{}3Ir*Yl(D@$z$bVotO@Mhg=)wiB!tz~!&-%N>r1hR?1__8)R) z{!b{H?8-Syl{sd0TCxrI57Xwos2JZr^z2PGCZk;LT&!5Yo9qC+wR4LWk09CAU|JD$ z6-mKP#*Yf9Thp`b{ss0wvTCjTjpSrMDzvfB;Z1gTN)cjZ#nW=W)$?grzmcPq{NH~< zz1&2(D{HaY=QB_%J1!Zu%hTuVDekR}ocHeS{TjS_(*N9{IS3R_q0ylr+OFtpJ0<`B z{j@OZ<|gn&ON`{POyNiTK$cHrV|GQ2V{mtf5%UZ0eq*bW$s=nX7oo^8h4rdljEgl` zo17gzQ`w)5+_|f&uCZyh)rH)$w2qxm&Y$e6B*Xt)_|(AlKhXM%3yZReEqGWf$rYUF z+E}Ife2d#iRd$0np+s^a{$gCLNW+rlC^-?|A>-LIxYYR66u(Q^gjZ@OzWUpE`LOny zllw>bZ(X#WlaR~l>S(qv_rtwlR6Ce_twzn{Ok7XeKZ3)1=sZLHcrUpdA+zF~xUcW& z^(3hejA9G>6%clzt)D126`afYq5q@5P0CGSH%A+la&Pf_psGXWHy7h=qDKa><5|Z_ zCf8&JO zA6MgTwc+(=eepn;WNki~u5wr9Bwq0m#;4UNmK-aCK-0~b^%1G*qSxGAn7Ey+d}aOW zBli6BD)P$y0PQ+&aQHv4C5&IMXP!+LPpAy~@b)IbL_b%ckV* z2$GbU;Dsc1DeY=Rr}jjF?2&E2fmaGoA@Sg)2&P0qp9X?$ z*d3i_k&0XNmm3O}8xs~83EQzVm*Hnay7(sSlBp(H8;0Wedmu<`->2ZI0=_zAuRRH9 zYaA)X1yxbwbGnebq-U_OchiZ}@MUkGl85+zP~!m-a-sN`3oVk4sCvwtxF)uit*%Y5MM?b>YG8EOmgXw-~9L^ue%f zjt?hkFVdC$u>q{zavGiJm(O69)rh)yGo+x{c((c}Ep=tb5-*t@n_(!J9MQY$BUv1l zgQqDCi%q!{w0kNQ`ATNsN6<0y^trwUsWY1nokuob#kpiy9>5z+ezKpv%__(7-Xtqu z?g`IHqQ0JPVp~?C^v~!t4-Xp~M|OpO7h~pY=++PxS)Fc*PPxr5S;BI*s1Hu%l*Nr| zW&dJ#{ns=GCQsG9`cK}EH{qR}M+@M6B>souquJWI+RWZtR-0##&gVgTG2iQR-;%rU z$wKn$>O0xVa_V(18IQgDt-~TCdIP;*3hvX<{8fKPqDgW$#KTYQRYTuv>oxP*y6aaqPty9LVioZCI3@we|=teM^ZONk&KS5}|kuO{u8WJ-{$q zyQi>7qrp}u*qdc!eq1q=LrPfytwIUTI|T#96?>3`Ox6Tvl==4a>UOXU&=RtxMmse3X?l|#Wq zP$r_WG%tD)XEVoG=H2=K_JvFNXLlhj9o75}#Lda~rEEznPp(itXV7y?(oJf9&39=_ z4yzTKUY>-v@P4b_zr~mKN>0S7`Q-d9TqsBGa&}?|8Xk&PQ@Szx77ur=XA=t_^$DX? z<|?^GWIy!IoNX>yU5yG$)%pU|FOieaQM?%&I78_ZSwr1<|Xlh(fQJU_P=e`|v++-~Q8XFoa3leN#4`W{QVB4wo=zg_StnGnt~ z8Y}>JTA#??)rVp7Oh*^eYGyU1pUjOa8PhYz$cn|TBx)bjf0oQWZI$8zwjlF^M)=v1 zm6*Wh{7M=R^}jMp)X@8vS@$-y>PXy64EeJ@{SV7pmkrE~9S^g0HSqaLuzqZX3oEK?(1KXGL)ya8vioXA))M!Py3sTsJ zuCE~9KeKU(TS+`q&XYW7o|`i>@y~m(0h4g)Mbzo9#Z@r78^x|uwkODQqxToA<9lRy zG@1S*Ny{1Ei_s(JX@4R^n^{bEBpcCZ;>lv|=4^RobVy`V?l8Ppds*Mj{&(n>b@F7+ z$W3?2i&@dAGM?sUuj&>&4F>IuRs+!VR;@P&K`GWfH>_;(WJBQr?XLC!R2XWM&mD*L zecG(u<*Z9D)OsH0#^|}VpI31;k>AOR+qqctS!4X_1G7W%=T{?4ytgs9k(?hdvecJ* zmh7ZirG5on*2DQGIDL#id+RA{ztzD0Cm1D1^UZX30^D-qCUchBdYpqA?NR3x<*I9` zCygDYT-Hrf>&~q3`V8WOfM_fH?rtIsaqMoIRkYv9(2`4a)$mu-a^+ONKZ0A z=Vs;Eq%u)_Ys|y;BVl(NAv&UJcyl-3{4eBSfO7BP?uDrJEeH-F0l(6cuY60noRV(> z%hs@dh&-GJ-^>fwX)kNr2bil3fc23mk{Gd4UaB_6iqt+0)lbsuw|e-6cE5rn*W&S9 zaDS_%70LzLMR47XXLb@vUO|tZGqNROtTP`bGrc2eS9|hRi}dBh!ekV03!ZK8&3!ak zU9ZdsxS1c3xyTUq|5LE;rCjcjzZG>eGn!*gb(k76P;!KKnYC3y#g3qQlw}`FYBs{> zM&rv!t!D@1Hc$Jgb2gk00Pn9f?qf8VX#{!=Wj;2BB%jxMKgkq2n(vfcpEL04V>s?YHw=RqKgUT%_kV$}ZPk+dzAXgyUZE0ckrCiEc-1(Sg#|& zwz%Nu2>p&gsl=o0%|1>iJ>ThN6`0n*yNNc|pnhL?WY4z?=xzmHBKTc`A=;Oyi7 z2;*?(Xz$_4C7y1C@uU2Kcxq$G*5`PzNUg zn@GfuFxwGa{YXM(7}Q6f+_6{Lv*sxIx)%08&6370z|%|hm)(|}SIImtvy;p#9^>_G z)#GJw@1k~YaLMiPPZpd?E{Npj7=Tufg5?GH+ysvIjGH+Dle@9&laA5)AFa3eu&E^<-rwad*i)v_PHXlzNJNs881VqVkz~*#Vs}!#xXKT8!G?7gq9m?Ps@XA>48vsR^pQ!bjejCiE>B0_MCs+MdCqN|1K!NRl5?vP z=^j8oV!PtS-c8m{Q>VF6`yujshSshs*0Bb8UmBH`vo=G?#a90lwR$P(?SNC?lF&|i z9;)vrwVQRCC&@?TB{}fx8Uc@`ZSe*&;vd9jTts8zSw3I{`xfngtCR5J5DU3+MAhZayk$5{Liez1hDN2(#Jes0QTHBIEh@ofWyzB zLR(KB_3Q)^xjhS+bM4>b@yVnmdk#C2mYeyq$>V&SHd}z_GjJ!b z*XWd)!TDsOgR&QsrkvY90DqGyyd7^LyIqH}RXMZK1{Bq``#p_rK{G4z6AmMTCI2`0 zlsxwAowP)O0R;qa!Y3>8y$f1pKOnc0>dpN~YmFORmG6 z@Zt>cUIkxS;vw_^m8mUby|JzUI z7R{|M=Stej zZF$KB_=0hyBRiZdNni4Svc8-3u*+EJ8daJOoUP*kXmSUDN21+cvZ95ljNL9UZ+I%e4_8uS&p%^Wf1xN0R#(;!QUy}$GfE{ z^)Va!n^xjoS7r0!r)2H%Vy)!7b8fntTG;kmS-9kz{f!lyrQMtk$qi)BgSeMb=0lJT zMvtZ7O;p!x)^>4$-4r8!GZrCdM7ojN!{K_35}Ad3%nM1hNmD&11Ms0ry~V!%!&sB- zIm19QRmo&IPR^mn*~r}Pc(M`VC3@FP3r&>UQ(wv984v3&*pK6l%~G-oshf$C@t!{= zr`cmk%zGlzA4P|oXv`?(Uj)n3_>h~6=fE^0az7Xi(O*vIpRVVru*+SnL%}?%z%n<` zB^OmJaZ7Z_tf3c~?@wMXr>%*+$ZcS)$yQ4;eLs3mz>lM~mAIpe^f`p&^zu1==f2>4 zpB>8Tde$a$awTU3zW19E@nmr2M&PP?Pwv;u3r3>xc=l`v=uS4`CVyG(-pK9pD~#_? zX|FOoN8<9QY+CY2wLqTGPV@nbmCmo4i>OLI5bK;@FfAo~>G`J9u$ zXX*c9yqN3F>EtO<7T1%NZhp7Xq{?b0LwmCB=1f;^Y}=sqq29)iYgX_eH#lqs?Rzji zkxWe>O*NF?18;L<$8@7#Zv9OZOk(8L(1h?Vky&%JdNvpvv4qLSI}K#(Sd&OpHxzyl zt;6@k?X`wOBi|2(ch2Sh!`qzKX$k&gz|u`?$# z?E|Rz6V7I@@o43$!DBsqI*`go;lB=!xl1&;QIZojrwk4=S8aph#~2&4uhjr$lbP%V zu&*d)979mL7tbcUR(QrSh`6k##kdT~%O19n)V46E`auz@H zyqto#*NFbScYAr3yE9^el0PERe2LNNO5Sc!>r!n!k8@keU8TZ9eI5kw87anr=UY12 z90ilZZ5~W_#)a3&-KR#uoGcln%mLcSP3i}LI(ZXbMxUHMd{OPpYTq#yE!NZNxb`Lv z}SP1rwbmnkc_ax{t_Zgw}L$&!3I-KUoJoqO*WgDgQ@we`L)O6H!7`s8CR__NmD7Pqn!^`v$$L$5PPN<)%<6Kef~RX7j*A7JB` z71rZs-)~j6E5CE3{%^vyJz+Kyg>PmxU%{1X`k74!a$8MZebw_mXP}=3{S8K}dyT-^ zh0XbutTv3$T3_(L1(rR*unW31Acrk+Xg;oQ*7`zY^U-j+NsZjD{-IIjCV0Q@Z?Z(3 zNy69Dvzl6e0e@DY^V2v`9R@ig^t2hqF#RNl+5zOPv{IOi_BqQk0q$A#%vy4FpH``z zTPKpID$xwvld9K^#)Gw$eBVca@pbcn;jBt|z0abtH9_)}`jcV%IvOW8R&D?5v8kOs z%lO+@-wzjFMP*W&)97z|a-n){l*%ceJWC$82iTLuy^V$44BwK$Y@_yCqw*$}>si$K zh~)KBz73xC$GIh>E&HGY(QO*2-UMk+wM%O%$&;5eUUk*V=<_Q(HkCHlfYBm9J8L&P zNuyEkTfW0-q@yDHc@7!>*w0Ngq&dl4<@rdII_}?4Zi3ln{D^(YdGTi2$^L&0&~(>- zZmAqsjJV0tFoMMz%T{D9Br!2hsP_Tfe?{X(tXgg&{aNW}{CyeEXRy6jvHrKv<_E#@ z2T*q-C2LVPIk7sEr>eAL|ANxP3tDfFB75LWXVi)}oE@g^l+At5WrgqbyD!$&x6%$k zR-O~p)Y=+M{Kv%W*Yq|KUAY@DCqGJagn5PUms>>=Nto!EX+@hmv8IU#$xVkTn-dz@ zBT0sW8pXHdaEh0?Z{ZWpL&Nk_9#?ne)3qXB=d%r2lU`vak-2g`e9sMv%Y52i`5U!- z04a=zm_3b&zMrp!%uFia{ojfw$?Dqt-W` z+W^gTUwGnsdgEIo5FA~=n7xi<08SLwX0Vr`^;j64&wHt%&R|f+Lxk&pu!sk{5F*ZR|%L$MP3{ zVWFn84~Ya`TzG2n;-BRGE+9dt8&MKl^Ab920L5=C(i#@Hm|AN=! zy6!D=<$9!ok;CXG(BE_#)lQ)-qBdzoR+sG^-mS}uQ5v0LG9kS zbue6()4W7DD^p+sI33oTX-8z#mi6bt3(SVd-%_=JrSJy!7RRE za(`xR;VkWzW+XXNya@HrVJDJjs{=lth2!IlIVW1-NmNA*Pihp}`YJkI0ms=Wn;CfY zDCc6Q6qF1#6XR8iw3$t=h8W2c@i>-G@d3U+MgJ0OpIg3j(jceNlS^qnm~)0@5*c4a zPES|%1+;q{zj~6M$oHq(IRx(8NMBDpZs_}Rtt9#|v+PSy{d1q=hbPkNgrbe)tXihU z#D2#wJO$Ttp6Meab+kTrZ)DzjE^A%K*m4KUpIb%GG0x^Lfm+&Ks!q;I+=1sa^pu?P z2kZAd6rPLXt$fO=!gSKPTz}noVl7$N+-p9H22^K7hp0J#^e3zD1A6~b*}nSw9PMsJ z{~2U@H=I6;?MhVV^|1ch)4M=%bWy9B)?xu_8?oCMDaU~~qgXlEd}0=un?9Pe237Q% zQpNaz?DPTg=Kh=*yqVSq1?NZ+f%+_HFB?L zQ@B@0)7*vERPAw|U8ALg)rjvNZ*LZE-UOrEr;#1tS3P|SY-buX=j*pj+bc+4vXcj`FWi8u*Jdqt|YI_6g_%vQ-v`g-$zQz3IB$9S99;`#HUY*ThevIa5O&))dm-~8iTPpgyl&&;lF@+`Y8?ex_dpL09NrM_(s z`UW6e0QXwP@niwJ85CKENFIwlP&@O~%6e`~FJjYj_BwI(b6`J(Pm~-p!(m$+o)>v` zJNX*K3NP@v7742l{w4aVMZccX+g5m%!EK$-vE{i#_!^#9dHp2nYX~^9d-X;!!zwGR zNh0<__v|ueZrB-R*Z7=7%lE}{G`%2}k zQ)jX~7455-Vi zJ=hu7+o+xW(6>En4WsQ?;pYqg=4v+d5UnPFso*xn453scE(E0!pk{b*Y`Bfc!X<@T>*$)YAZ}*dV&%S>1^GDE}s^uHml6uM{rtV+tee%Dx z1Aj8I{aEnuM0kW(rFx}|R;ACSzu65-d{ZT`eGC0$y5zj1M$un@x4MCe(*TZP%<)myKskz$c&smT|`Aa9_?5^rnx3&=f z;%cMZ5>}=f?q9+Z=9Z0|+s(P*CamvN{ngX&AviK$`FD&hRrQ+4rmSxcRC{VM3bimI z$6p@7E9gr?b3=Md7WzjmC$1qkBz=donH%lR3T%OECoN@Xs?2dP%(=+qw4JO>e_D`J zos;o0d2>dCu>m?Jo^?D=akG|_oj5+?1Nyz649zBeiTfF;bRS$yr26@KnC1C1%6zT< zUPjx@h_Ws{!t;2o8__&jj56{k)9Dhhe1$h7wRQqHkD%LUlkc|+-*_p=;@ijH?gyTu zw11tRrnA4-!LdInIJ%%i?lEtz|6~IE2F@SoEm1v*-N{MfL>o0_e-;_zJ9(3nqX+73 zoiT3|D?7mm`JgukXg4e1IlW5AjDZTMWevEyal_Ze_mO^P#DX<{WL*mWQHK z4{g*i8aFXUZlp<{cWYRO+2(}qCGtsUu+VvniC;OjB zrJR$=ZOqq`+~~_;pgJAqjd_n7K>l(e+sQOgiekE}!&D=BZiqh=S7xi1{LC}7^f10X ztMn4>_Sa8tGU*5UPAHjG#-Fu5n0(y|zvO-Gh^CL}Bj*op14Hgmtj>P@O!IR0%^a|0 z?P0kY%7ezoL=5KU(1(>6TX4Arm~vap<;K+H`<+f6v%`{e7)SECuh;gYpj!u;cYHfr zi#c_%#gipkp2C8E&MVBl_1Dw1i}g~8jCKd#JhcCg6bFJ|v zS#h&AyV|Gq-sC>CoVv}NrW?82L~?V3*WtK!n718KxW97Wk*n;Yt=3z|!t1{ewvT{# zfzhHP{1fNi0Gti9a|}%#PTC$LXDz_go%9B~`#oQby185N6ZUanp|h8IUmq^#qEFV^ z$NHNaH5#JfNm@v@ikyL6gA<7gnd&DoU9n5AD)%*c8b%-g$=cV#k&lh{57OPzzTt0$ z^~t%R@#LwB8s`~Rrn4H^v-wF6i6Op~cXTm}J{|>b2j3g8nB{qHqdW!oH^AaESpK8n z?tCRLCGE?#dNb`E4Uf;wSlVkpdzD>DQX=(orfIVAF6TTGfm2@Zr{T@}MuB8f7-O7X z!_Syvw7djNr?CfhQFtA!Gbgx}TqMi+^B^kI%ihN6{y3R4|{=P}~ zDv_q?db)v6lN;@;;o@s3u}$qZEXGFqTp!O8m9fM)`wq!TB->2bCfDz5ZS3iLgJM3B zQy4Rhqc^Z{C+a2fOUc^W-uEZ;QTZu#tt z8c)M_JKt(*BiTIv$sXV3Ss!v%i3glpvyU)7PNEOlyBQ6>%%*Nb-R5L2d&0kH^EuQn zouUZk9wG7Ld9WMF#}HiYh@O>LoM(7959)swuCE1cA_p!3;biz$LXW}z9;xlsWN|%y zWzV7yf4?2LhmzanYBusYH$COfu#3p{_qcE{eIBn|e|ldDjaQPOLQV0 zFFuTwh>xG0>i9m-DRVfFG-nJhWnq$m>16ej3u!03>WG3h(J?2cduZur@I3+MpJA5M z4=cc)y^);fnZiCKRw{SnC1dXMyzkF^u8giZ^E`y-omJsew0H%rTLhPdX#YAW6P2H{ z6XEU=B<5@N+82@=96mvXKz#$KyRvU1{auPDd%@%H{F>3`54*Bi-zt^e(S6Z-d(tro zuZLNeKg!z@Ms>;Aw@SvqBlCkTwEjsrCr0OZ|Bum5R@ZYb|5X3u7nk`xqwwhm6*Hp7tl4Aw%uFLmw_Y{15yP;fqc zAI?T>Q~xw_@fnXH5gmWk>vAO<8p}GOM0)xo4G4F#Mlc=!lLu<8GIg8Ex2W^z@tZsSVO1FOepXV=1bt|B6=~MppQX?~tbHEi& zcO@yE)V~5|k?E{o*YrO$Nq-ZI|D`>c?F!v2J;~cpvIdRF%G+YnwUM3q6$f*MaxAG& zzQVI{C^j?^#);7$LiPr*B=^z0x`iKeKZ<8fbCP#C1NfOy|3q4K5V=j}yIYOG$?CC` zo&H#B`J71Tb^5y4_;fdmmf6Za${xeEeZhwJQ@1jnCN}9OR{v-;_<-fFjF;Wn!>(}t z3(CJ&KybcsZiBvt@END-;WswtP&&H|WLrqx5~I>dyu??u^sd%(K52mVrm(U9BHt5O zh};)CgZSde~b_zHY$`8}2#CgcDAc$=(ycd$~e>DXDu z=PLTyh(dd?I!i&3j4`e8J9eatF<=Or@Mtl{uVX)2leItNWKIF^;N8z!OD5cAsFLg^ z4}(70!`7i{a&CSCpX6m0T`t|=@fl$R#ZrKro`OG1AGfFa%y5W?f=&M+-{sXY@)X6 zxW8!-0r z?r3s;I0=jwlR4+hY;+FIX(SAk6)3R zE6}MP-YusO-=R!mpr(TUZCt*FH1Flv2lRW1a+zZ$LU08*a-U33R7zCEzl{1{(f9pe z`Yf*0rQvJH@K5;H0{^}!J|!n>a@-EXhao6)I_iE-u9AyAaXUvE?dO3cCl99UWhIOs z0>uw#w+z+(4CnS}`Upq{kel_iDcQ;rN0`uX~+`bEP&WUVT* zTDiYD@hCY@otzD)D;1mb5V^`OX=08>kjVG&d}cwdL1Zf#s-DKztkxzgQuYq-)q8dX zFC^XnViA)`C)o=-D6^Y!;Ck;93lfj~0$xEMBmJLnHS>u?{AWh~3R%dGO>!Uf@_rZa zCJ)_{sQ)&66W7vAYq?9L8?Iiihp$QGD=3?sspqMavrHTHl@sZsK>4ha=ivW1^lZVB zhG$~-L$b0J^mNS6H}PXt*u~t8f?aq zuQy5BVp{Vo`CFu~emJwH(1G0g(vAFwp0C1ro~N_fkGlHHdc+bqPJzXPbY>K|?)B_2 z<5V&s9c;8oB;P!o*-AG)=Gi7W$W>2_3Jh92$7?;T|A0^`E{`mBdKufw(`UdKm&+Ry7WvFe9Me!9(d;J<0U<{CTT56L$W6R!V~E3 zePRS+ZM)HyuZ=AeaC?aQ{n5J?JGhj74JDCH_1jbnXQNwg0m@3ifm*KN+4I_60=J`C z{>%9nC#roO`|_1hFQa@;0bd5^dud=+8ct^+GL}8g3M6tQD?9V)_iy^js$X`BlMOt+ zMGL(pO0^ySTm#ct@XU=9V@Tj}_&G>_iOneaFxgK{#A$Xa*8b;t<=yR>)6mC4uJn2dvzebDP{&xwU5V|Fg*6F%j2w2Ved%b66INsbo{%^pKQ($$w63a+h=BgKv z!ek?hj~XnmhJDWae+`nV<`-FeuTGX8RO;`ZRY1#NkuwHgtC8$QiKdxNrtT!Ai9X7S z`nZU#i;JhD$+0z6$UDCnKBP~b`xCoQ9F5puh3?qXV;Rv zV4s!dYmAevJblp&W}@;Neaom7s78{i-9TK5+s~XQaZEWUlSr>y=~{0Tn`!KB2@*jT3sXx4^ zRZfpJwYsOWSF_GJcb$EwoMcMW`{8J|BRX6_S7sD%wj(>4wS*eU2)huCa&n|0IsK9L zP0&+LL4=1L$VQn@`ALLX{+Cy-^dzep)$p~PmNHYxPi7OxskO?x-%FLwnCz*~-=Czd z<4~xM)^k?uR9sB%v)mD|!QWoABKN=ST+}%A|EO2yb7Hps`|S5N8?s8t#Pk;!KY{Kw z3cJ&Yo@Aw}3yYJxyprwaE;_zHpZ;pnavy7VqA_N7_Fy(keDqhk@eTC!iL*t zkj>lJ#Y9nE1NsSIOf1QCpSGz#)+qS{ypxaaJXWQvp1x;wa|$6RgO_XZT6HsOCWHMF zb}ctyyhFpM8Bw+^7K~PsRDm6lfZE(pMkzpi0*-3Nyv${-sc@8S)G{p)Uo!H^{ zCx^0}tMxqHSQGE|I=19;a6aH$ZlGx3S+dSv&xSWc*Hs|<9Ar!VY}C&TWuNx>NS3(= zx?YM#RrHWNY}r$v4u@IFe}xavp;tWfrts@T#}ohcR)?lW^jS0~tNS@Gk~sJNs6Gb7 zALGwLJj@B0n{aoak$xzx{a!m`(fCWSbWrjmn8bI?nZU&L<{r6EaU`))iT$|`r5;vh zn6c+K_1mJuYf6=}eLjs38);)bG(Oi@mNTCVQN5><@gRG$!CBSI9@DER@)fQ4$SAkX zyN=!_7UyTweMTRXKzbKCEi5QM5EXV&H#c`=rq$Z#w@J_`FnEnG-xCdTZ*w13t*iH| zl-`8)wY*>B|GL7DO^&n?==dEt69fGUy~uvYdK#AuK25-LFBop4F;A1tmZ*?hERKdl zRnqt#SZ+rBHfpQ}{|eUe8xj|ve+jIv)O%LU7is-L_Gm0zrlCbc8ksB&&%f}?h6y$uuX@w4DujxEkW;W7P z*_@7QOd{VV57%iak!gn%zQw2b`w5C|C09A)znPsbW&O>K6K{d+Qucf~+3iNUcK7X0 zy1WAIf7WA8DP(N=9DL@6Z(?QN}QFXk?~Hi^WH zQKp?we`hON6_m?5<)M72>gazBFQOue+eIt6e>0|{mtCsaj=fiRt0d3#hFBfB{dE_gASOY5I~3 zO~b2GQ9F0ACk7<>a8HBT9zG`;qANNjH%D^iCYNr!geUPLbJ+**aU3aqg`V#T;wMNRo~yTk2j#;Qz-a48ZIJ>yDFDkE*@uF z4$)4$zP*&mN&I*}S&PohqNXwv$#7O#clIs&!_&aAps0~KM{1VWPiajz5mzshfPS8} z_B8QFtvpM_Lm!{EBVGAc<~OzS`S+iAXW2J-qIfqN47by$#&o}jPdz>T8}EM=>V-bf zYx_a8ybE5VNkdPvl)bLo{oj`ylxe>lX&vNkGJ3XzM{xh1WR+Lz4WCMD@Sm~Q$10x{ z_e4zR=D)1z=hX70XgR)Eb^np48@NBju^mX!U~nzc?)iEyqs`Z<_kwTb!FU%e29T!Q zjh&k(G86e5`x;Ek^TKZ~=(?77+M5h7&{vtC#HHP?_UnbO+=;G~*Gn=&WUscSdTrFN ziL-$tUVm;_El>LH&`aVl&o1zOoYW^raLzMikKjb@h70XTO&QCTnaR#dX4j<$sLFUl zxtn-e;XUt0e#^Wo{bWA&qngRBdr2|p+n@a9-41A;J%-zjb$^9vjpAwISW7GLrQMg@ zX?JYF(MrWUYDacD5|#5cSHUOyL%Z=W&SjO->yG&H8MwD&o7=(TM$i7hHrzl)GrNj^ zpV>fmG4l8K8Eq+#Y@Ss8w^UUjk&%F4UdkL6iAgk4B>eZE@csACX*V%ewP{qIWj>r3 zxtiqtMNhManptMf%>3Vy`+E!j_y6*=^=?}J8JZ7dx1#0O(eBY~*2}E#%_L|bT+e0; zc4rr7qtuu9)ro`+BFX)ft zGxd?&AlYkZ#Cmiw28?B&uIKGNfYXPujmt^Za(!n_^a%a$Lt}q7!fh#Z>qC%UZ1hR& zLNfDT1)@oy+oY9u=x8NS&1Wsiz;q&Q6-Wi}; z>Gv`98>7NAa6f=Ot`FZCsG9wc!}Xs?u`4~POUs@y%IwWD#}~|915Ihj9PKAp(>bv1 zL;J7uu9o($;n!>cW7b%oR(cy8uQXOA3uq!UyQ9{RytACf>&vc(n%T>VN15A|lGU@Z zQhzc6Jc!Egpv}c-l8Bl4^t~!de1i|klFA-@a-JD&Mw7EMy=fWcZ%azemI8Q0!uMqy_FbLzTAlDj6udv!f@F>*b{HOCxntkSEh@ z9XK4$Qsowpw~U`NJju!9oYd%shM6ALIM!^gS7(X7D;b1sWt&ONM3KuK-&Do2@S~WEWB>PQru?=Uv$D?Ut<~M_Gqt+i_#j`s4nI5~r zKl8H-(QPH^?2W@+c&y3Qw6FfVgJi0aa5f$$GB7jFvuM;@SUgQ`a^8Em5qEq6RbzH` zCF)!b+pKgarlOTz=aaU~-J1B#E#M~?dR)1~QX_f`JU-RfGlxu0L9xVvok?1AMk29= zxhXQI#K-6*kyklK(YesVDyY!8kev1G#Q(?Foq&Hit^eY$)ZS>LIWo0FDk&)vMfR@D z(nOL#f(x@W@Ht$M~J~^^;&RhjiRZ>as_Atd?^Q^))2xXpkj_VHpl) zzwv9Z&uaEyzfOQLd9fmZ9%yw6GF?;B0nq z4WBO~>5a4{nJLHXKiLZV75zVqDl1XHDR{4i=Z9Ka&T@_d_j=r&jn~;H^AZXt0_-2C zeFf`XnhOpE+22twXVi|--dGT?P(OS4E28CS(l86n^4#nyJ!OYvp0gx6ChIg~)ymVL zAK8z6NN8s@N*NX2hwldXbY&;^FruwA!?+jzS(7|ex$L%lw6IU@Nlfwqb@p1WRuy{s z8(g!WV+fgBr&gZ1RWDxs{ofy7Vmq@jR~Q&Q$z)TW?3g zt{qxlhx?0Qv@N&>g1QE7EJ4dWbNUuXu4JLd;9(;as{qfFXi#!cy`#76g-I606Ukq4 z-_65?i|AOQNR!KJKNjLpdYN-hlbI!1s(0(5>lZMP-!9O#4je}R7j9dxVJQr!J z{2YA#LY-~(nt4Ylwm4GyeWBk=iqFSUsTcdTfOX5cuGbY7W?vKXm=z{ zSnl^2u&seno-aI(t}E4k0G+ZkC02D9{=AASSq~pf=JzRPxrcx}yS0)-rw=*ZL(MAg z3_;UwG(D&Dy@a~I_^t->#Ar-)FL|Ld7l{9#ncvGit?;3=GMjx0RdFp>%4zWipIwXc z`JZU^JZs4L<)s``W$x7|%9d(`&s&ne>~>9*W$9|7oYKx7>L*)L1>7ugzcwgp>*Zes zZawK-Wl~g?1=-ovz?d0NaA*sMtW;;e_i64WC-Wr#at>)`&3n+mKzyl^JD_?z_GDvi zfw~uyj+aSrJkIQUIGa3`V9P9{yw^XCsTcYksZYk)%(ebTd*1Xt4{y4HsS}Q6u5yf? zrjog=_GAW?9lQPT^$b=k(Nu@vWX>(Tix%XRv$|T2=bahH;=-e?0FIHaQ@ep9{>x)5LqG=RRn@2$qR>e5l}G=0pE|orn%)#s2|5 zL$}OCHpc~(X0uZZIR9IxJWVP6yTEsOQj*U-Nkd!TQ}vqXO_h1;SAf4fZOKV1Y3sjj z{Qv)IKyor)y`POv1l0`VVa|SQNH0EPKdP|5Z?pMJS+HI-x2Z8Y>yNLJq9f`1k>uw! zvYCuE$#Zrk8H=7aHKGsV2h`))yhu9}MVr;2*;>j;5ZNKP%=Nj-{YYw(r9Tlz1B@eC zXZ#6Qs^ZcYY~07}{%~W;NFHhOknRP`ob8ZEj>LtW;9kxKf0Z6>AUkKUXUW;r&8Tp< z_Hyc1GGE_LN{`X{Vx^A9<)P>{6I?mVc{Sb-XYmrPlM|_0sBL7AVrVcHkBveP^#xGpPjd&{-KxSOby?Djr~ z&yW@BTiKyg(IGqak`q0VDEFdCJ0oy2v>Z(?nyIlsyQ_>6$w`^Wu(NR@d`f=PtV+#7 zztM#xB>#V6Fvb*c zgPL<#&5OXZ9ISIme3{Vl5+;z`E{QjbH;HOT+Z2fEpaL5l8+{@ zU$b~e6ue(ia4K0X*Bb{CWtCXA2jP~SvQP1W??Tbes4?1W4DQrNm)A&AY|R21oA!4> zhYs+o1+T=EpTsKs6VEO-YK#TTj@ploy}<%}Nb_R%m01Vq;CeOG z-Ah|}a#;_C>-ow9XwM=Ru%)Y);%5&svlICZZppcovo^9n?PLmuQq2 z`z8f8S8D5inz26`z6*noVDdgncf{*UNcJAeSLPQ!hE~(a<5(^3kB;rYIKtd?3|eFt z>U2Eo0NR`#v6A&VzVM+ofvFQ6It3MeaOYDL_yH{U7>hmw=TMTe0uAHkRw}ISViep1 z6|zUGf>APYJbU8wMBZ(AcS<9F&K3Pk-|O_UC3#Ku(E;>1K1@Y8RKvN%f92%F%~_So z+`S9^4>!-)%jjFh`!T&{1;2-@i}2-_!bV)d@5)Kwndg3m9#xG5zi275tmWERIKASbal z$;ZEm70>+Xakge0igrf5y-+DW`5e;T0v$>jK-+0IyFN?n?(I>ji8h|}T}iK#$Z^gS zj9=ESs67(3E788gL7IJ=P1Mb*TxM8_K^bJ8cQJjtR0~_<cN5=|od2&_iJpoxBZ@YvsKC^n;l5VV`3tJeK zO6?|d$WL18Nmg$~fiqE{9>|k7sVm9*r#qQ#OvTwMD3j=t#MjM)!JZ)O4TFz--%Q4z z*3KkQCi_M?Qk@;awOzZ2y-7q(GRIDWe=qu8QN1ZBo0aIT(ETIai$_|f<@zKh=gy5r zv7C&V)$g3%T}RpO>}zLK+!Yi#MPvqt&>U8$8br z_DgBsB6lX@SN!OlAH6*uKUYxYQ`d4XTb{_Q@tNJpKf8OKp0_mq+@Pm_uvwdTVwczQ z$^KP?XE_!pkw6y}vxhB6PN;H?&#LMrMk$bG7j%A|L2tu>>~K!hP%>-PQ#P}Qd}a;5 zJY8$+R~1~$6NJBzkF=4y|9uVo)9+l3e3b5H*0uRcH?4(xe}1+0FA`Sr>~rU0@aL@a zj5u4+v1K^+293+Tl2_lh`2FWr{=dH)qro@_|lSlLed`d3YXS8xWOv*(W*jrY=8PM10m zO`E`Apb@lH;SpX+?($q9CyM=0Sf#`vbOX;pbU6_=iJ*RlYy_sP_Z$e80U(&<%8T&t z0rw5cy$6l~B>X)5d`kUYS+3`KkM}B{Soj{ioL=tajM!1+BTw0q3+8$w*`>UgSB*P~ z)@;Xq9cT>9$;z-TSm7F?>dP!XMd-(qfC|2uxHZ4hXMozBF84CT_ zkY36p>ugp#vxjOL+>&SEc^J$=t@PF$R>|&~v#v9j8OTeCw-(Rx9=$H7YuPU`K{x$I?_I!~ zy^ts9JF}k5sge`w5}wI$5L7d+FjPh*%0q+$&hZV5gpOzouot+ukcW?<4e4dr0Epa6|A*mB; zUkB!6@nM)2;@7Xljr#g;h35yMPu62gS+|}gm5J%heun+fY-}NauhG=ESgjw~__JU= z-sd9HC2NQ|QxjU3`P^bS9=6wh9$7%E1xYYv< zW~vb?W`*?vw2VKPGaS00#GNeeZARUA$vMjURDg zgmz1rm9y71d;CiKjHmO^>ZAQ9%6{(JxBefjmz?%~0T1I$`2OO5f7Ui9GwcJ~Ghvu{ z(_hi<0i3%YowM8fCI18Aw(g!qM&cFax&67W?}u9-sFP=T$w{<KQv*W(99hCBVc4~?>)e=TZ0Ota5W|8^rtFKtyd zqUTie#i*6MmG7c-PG(7z-JUSs3cYhaT(tOfyc!FqS4j69P|haBfn<9y-;WZ>Sau)I zMV^l|Ufk+`PCs~rq#TA% zNK3W&203GSA*dQC@h18wuTpcGGNl+LpTmz8g^uk`5~|^PWft=mC3A9EbFK7Xh3diL z-*hux_+{kh7JWRA(+A>AP29~+|2x5$==z(`tdUkn84ELucoR+bG8XTt^Y{R6-x9>&IkB~7FTDf9wo_vdONbx0A>quV;Z_=hsnLh zz&F8^=j8DlE2h$ME}oNn>;^RLysooXBV_k;6w$?%-3qD{$$EkG)$~}RO=jyXI z4SESSoyh!n?bIS)@1W(eFpe)XMERp!$@sU{ugne-Cw+!GPvhId0_rdEA@=V?xTnNm zCHjND5{R;gIlFc%_}uLGB$ni98t`xR8@bxh-OX$CF#>*|j>_ouxR`RlD&6N0Hy&gZqgu=tg;lYc7ujQnI5Y+csmu;N~# z?sAS;HBwmS*UlumfwEuu{f+TE{%=*im*y7bNJKe&P5YZ)-T&w%|Nl=P2osboG` z(*8utCwAt~zI66jZ|B;a|7o=nS{;B!$!%1j;7!^8+}qsq|NDCj{^L1p$TjpZXBmFU z!e{T}Rj4uWte;&-RtGiDU{4Pz;LXmS z57b{=KwcY$qi{M;&i5jppP<51_>hSF><(?Fjq7P`a=rz>Ghp+f5i(D&zXI0~)^OWm z-aQd!_p=5$V{oOC_Xv*fFbJ`>aW6PmO^rP2LSHyMbs53!k;NaPB;<2LGMFnH_8I zpu=jGbS-PK3tA=$yv)AMxR%+&dn7p0kXb?_ z(2oOW$%}2}>OCyOPawDrHIjKX^SGQtdN7U-1>t-=`WYk{H-dL#eXWGu4d|cjOF45Z zr#U2wWF|RJMvLdP^}9ZA(|Sg%b?7n{1zsbKmx3nyOp{M&Jf4fA>c`m#gG!x)Y) zb$u0kbq|_kjrl&5Y>!%5YfZMMBlO$PE6;@HgC?`afyT&#jeK3upgD^%+h_JWXAkvb z?Cng~a%S}dN;Ok=KfX>fW!$XYt{WQDYuaJ=bt}R8q?7*y{_A(_)yZkz!e^*@TOWH=mt_eOS`lu-x z>`~Bn2mNIgWg}?185_FjEm?8<@g0(dzZCud6&b3Jf{7RWot$?8?{%(Zzw&=b>=K;K z>9vRYoqaXq*|1aT{n`4<&i5uDyOzd&#ad@CKx8I+6>^Gnbv@RBK{643gxgs^-jf_v zz>%D+Fc*KDsdtu=Tfn~-&B%GxiJ!>13(0(P8>t+}8%eZL_Qs!z^3Bn5s4~eQ^)C3v zf+qgq7$xsv>mS$881QGUGcnUY^38MpS=L_9A$f`2uBrYmptuKZKZQ>+;(Vd6M1fyQ zR#xz&_9qpIbb5d`C6;U=IFgsSmDb1TV=XB2()BvMZH>xT z^Q9+|&o_;U58`W9o3iqD3(huUqvwLP6BtI3wOdJ7q&@IN=I--9dGB);a6iA_rOgxZ zt2ub;;L?Ky6yKn7DRzD^7<1loMSb=|rIE(;oPv@3K*_^-3+d>BYkQda9pSwtj5_)^ zRG$+;(3oAj5iCuN5PjTz6eW^Hrm}YSrws>yexMO5=g)4b%n%aN06hOh*A=AcD*nKE z^x;Q5+QBR?E82NFd??yyCG|{_9?#%-HM18qQPY7qzV}2}Cd+ySIPPuay_O%-2xN1$ zwJQmEUwhXm^AB84*9Dk2~R4W_j6V(gTbOw0s`^RiW*{?soU9DBTm5 zeMsI=e5%en8;!1gN$aCpP0okQ$Z{*ZY3cWWz}nXDSfB9l-{`m}N?b_R&e2{~oM=OK zZ`0S^c>0?b=EC)#TK=6(oN4?UptbYNEFzD)D0?!Q7;B8_uczCLu(8YY$-q*jYoS&T zt=s^*SxUxFsp#r&g`F71OG%!ot$0#z((=YEVaZp|S!{WB@g@za!PaH3>mdAZ36keg zFM5=Sh|YBLS=4w#uX*-V7iV++R`Ls6<=-^e*3(8#Y&u(=WT|=A{W2VR1+5Yb+7Ex)qfB;;mXVm`TJ4SsKasvQWFp??X!hgUz4>JF}ycp3JlG{A1Nu>A_zOystv4VP8x&PlIsQ>Ka?XKlc#_7=DDjfV5JKC1! zBnN5Mn{R+i;$~|2%#1Tn2tv`swwE)vsf{;%{7N)PJ}bhqt?#VRUyd_@Oo@gY8XL5qp#ie9) z+Mk}qCr=E0BR!RowLaj>I`@&r$LZ=icf9`Md?}}{N7$fg2_U&Dgu#V^YAiAB(uAIYmy=^pT=9NA9 zw~>|yVVDT&4d{1O;aC3dUOcfHM%b&_pH*!BvvlNmmL>jM^6`A5m&{GGPIrOv{H0<% zxQ`8c+@0)=&B^J>4fw7yT?;+B$+h0J?ii!br^cradfroyCzHF5THV?I#0(GB;`t;f z@f~wn((G)@Q@K3v?*WhNQRq^4bE?q-t=vOKI+FELMzP00`w8#obeL|^OU{gqXP!Lm z&x5l!JMjVB;$bI;;*q@jPw@0@)QG2Cowbg~au%(~TJ?22&ygtf2|3Ds+}n&***iOj zO&CPpa?aPsppW-CgT-2`g+uru2kCXXaX0xlt^?<4SC60vQ}ps4pW#Vb`7tY*odxxH zysY_cOETY>Zd|;FZ+0D7ebwlh_3mwy zt-~6$#Fb>cYl^zPQR*hYGREu&!p~tb8^>Cq?%PJg>_R>VmQTQD3p~5f82CF{C+>a% z?VLulv!Y$fzkdvl4e`z%;FiY1qv_vJTDJie7Qms4wvWZLd(}&Zocq{>Jc~`VWcDM! zTlD`m4txyrekirTsGZ1$Jn!hQuSELKV;fd#C9}bsNz6KscO-Y;p-)awi}yc5*{n$q zg7b^o8h`@>(P$7J&C}|Qq@zxOkSv?F8Nkjb1jcnj*U)@CRDZmGryGM8AOto%Qt-FPY4Svf&}50kc>8}&5_ zi4QqJ--i{HjvZJ)O1Fk(GSu|ca-IrRg#FukJAw7<1CyM>mU#2lBrE&CUhrA!FK41= z1vKXzWPX>ES6h+#chT&2)R=<{&*9p>h2MWViTxbL&0Nd6)7CH>U<6o+B2(DVJg2h9 zn_ak$M|-0(AHY7l0DFTy>)u1r{itF*NJiY9X!brJ|46@A)8_27>xUMR!1n0%G~J$s ze>q1dd-B`6yP@#AV-*g^?H854*}cq*=c4k-B=kAjmS-}F!I%$+i72}fZCjGYcH|U`yu8tg}{Sr_V*98(@|A{^yjxmEF1*hYv%c zn~fObak7@aCTZt0cK=?OU8G(^5}N!WOHr^Lh<-%(d8FhoYGwScrN?$S8Vyc#$T%$? zSbWch=cBA}U2>2Uqcg*4=gv{!{gsTL3AgxKHl<>~U+a(3qIh;EpxBYS6kK1B1~@MWd8GsC^!clP!-MfI1_Ju6Z<=dcGlE@lCq08KJ?o{MT(6F;AQs)+A7 z{h}QWO9r;1;r_K!$#va=C4L5Nv-d0`aQ3Tbx9W#%;Lo^phMF0lz9{VPESi`-J3~oF zEAkOPAl`HQhH_fH7fo`GTJ3_?mFP=2l}LlqnsTy=w@1yj%4|*Ek~K4NM2X;j9Pg8d zB{6bYo2*4%A|08zX6|5o}V|GN3z6JKVL z$Gu2f1@(#ycx7D`SE$~kaK^t*8Gt6cA!tq&}TiF&YsIJ3tzO3ayy}Z*1+4ssG56;0x!pE zlofohp^by^p%j6Xdu8sfr>XI&A2Q-3J5BP;BnmyTKBLj*I976@c7tQ&D(3}fB-;hA z%d!3Ad`4rpLdgonjAE+qv{SM8{0j`)!*L6?eJME!pL4fS@tG$J|6LbPRQ@L@WhpLXOWMg4H=`>vqdY6jL9FIb>r+GNw$Jy+UvohY+xxa zB$We@P72 zy>28+2I>+Oo3Wy8)Y=ZMzJ|?suLscLLf^O0nZ#>fjG}W;a48xuM$KK|(H%A4G?JbP zuBuvof^HpQ%+ATaGg-h^>NjN>7J(p9wzv6p8;jTljn77jnz;5RnVm+bKG9Yu6d33J z{qE0X(JO=hX%xw61HXglYua&`@#ztz9i-;nDD^t1 z|Ec8raF2H~Mqg*K;5pg$N&RQ7d{3H^?4Kv2&15ywLQa%U{L6!WuIZw9TOtd$`++Y~O#VY0s~9blHMo43R0 z3#IlkT4g1xH$1zN)Ft$27KplNaV6emhw=}s%Th9$EI1Qj*d2V|D!oYS_Zqjd%i$XL zTEcxU8UBTY)^RUhb9R&6=Suk6#wd4>o@SDjPb z7H_GUi1>M^m-)rB#)zeC&N=v(T=f$Q|L7^yd4eZ&Ka6^MH%8aQkVgxCa_3e3o(PBiO z;{I50R6?1jw3E}q4$wm8d^6yBG=AKU#yc2yrnr-wGRNTIJfqwqx@s>SPKz zy6B}mjP}6afvn0h^74*SXW@T&ttPM4%jD}K9`!Hay_%2n9LRI>;3{_IEizp}nQ@>P zjQ_9d^#VPe1h0RBw7b!Dccn6SsgF7XaP=;|jP<*r&zF=v7Jk`Za3npM$g<8L8(ZPh z{aSv5jFhMQqm_@3m00`@?$tK}CZAC|mgimaT9J-N&yv~q2$I;)@9YS^n!Ln!NOsi` zpxGTHqdCI6-Sz(%)SM)`R3bb?^ zeQ)pIZ{WGh-9+7dt4`v>;;Antzu8MOfp?N!`oomHl|{<)%|$f$B6!^o*K^S0?<6|A z)$gTKxA9CnpzF;r&GW7?xb=FWwbhLdIqR{NvQ6C0PX6EYk(0B2gmvbtbI8F7Jn?q! z*CJbEwX;7sTk~knC$(9Z%&QbJF}3)e-H)Ynkb0xQZunK5bksrdWHY%K)e^H;1p+I zS*O2)oW@Ujmv7$3ylOSsevsdl)2=d$pNjwUjAWC*{JoxzVtqE!yX%y0gL7GVeM0@r zWV@2Wx~N)?G-bcZOl@Snb}AVi!dsf_PEML@3!6TE=T$mkX%K6X-Kg2~ITOFj&?+;V zQm&oDv^ve0bey}JG5w#_R3my~sDB~L@!&)0toUwzm7>kF?p%#+%X}yMOS9&7B0TD% z^49Lv^-kuq?znXr3QonS-CIL`%fOJ_@!Nqi zv$GngnhdGIcdELXq2v`vcl_h`M&F5n&pfG&#Ft8h2AhBV`8yP>hz=D{IiF*EhKrkD zk>X&Pc}bb??5Edf;^;*Rz|X6T9>d4u1>QWC^KC?q`GIRC;_8%o~9$UTw17K}s{YMY$P~i`hW#gCj?wNd?R{a@gIZ@P0bdo&vDn)%lm z^>aGNXGZ(%g3Ru|J7{`VZ#yZOOnVba;&1MJL2ia>Ke>yuhi@!t%?U59!E!8W9pzlOp zA6F77b199u2E}^AZw7icQa-uPUIkGXcpi)L{k4_3<|p{OGY zwFg<^loxmn_xc^hKG4oR1!UQSaTe>D8SHnY`euFZ0@gXCdOCdKWp`Ejc#@Mn)rqVM zpAz4moxjPS7Ru~vgzpTitV2&j&A*zTH&!|^=TEZgQ(*N4>+n6C7yBPCKU#l;5iTA> z`+|AthVRmL5iYLxLnOHv4`g%a1;-}zBzxcnqtqz&Ij2bPPp|G_RklXy zPIz~RQrXMdpEWuK_fFRG(^^YL)X}(g2HLMdk>n3q!FG>zHy+*yw);wOCo@mx&i~R= za(moJ=awnCNN=^ZJb^UbsYFx%;xpDQc%PFWdf@LJEO<}f-zeRbOnt3Xc7rCiFS%=` zk@Njvxd=|}(P)wTSAb?9P0krji45t_o-{134lx2YN0&EASaKoel&GGxs3lrfN45DZ zarW_^fx>Tt;0Pmn_F6VavwF(@q}F)X2l)3ozOAKmS$EDDHGx&n+HOB|$qZny`?YX- zW1)8yjHNjvX0^7S$Bz>Vp7p@>b-bU9QOneS#Q#P#CI0G>@Yxmp)}m2%v@b)$9re=J zylD(tG$SP^8a0xaHuKaMjqGpW$P-G&KR*R;8*BMcu*RFrc~yPPX8sMb$~aV?zi}vv zZFIL2A7CSm>3}BPwLHu?biCF+M4_Y6rwJZqU7!s2l8tfz{PHYd11s5)uTcec>*~1) zyRa|2b1MmYgYD|%-mT=}d{VKm5?k{s?$m!Z@O^;}e~0%#*Se$ft;VJDFiCF2E6{vC zYAj$Sc0t2rZ)r>ZpCxs7>3K0MvV$razmvW0aTGn#^(%QHwT%A1kfS$1*%;-zvck!4 zvj$98!mpv<$>`dYHsyrHjF21j_XbV7jV(V3WWSS`JS!MaBNKskva2KX_l#?aU^y7w z`_k~fxc3ccS9zZVx&~fXgJCOuyumw)M>Pn3_qw*(14$f0c~rd+f7h@J|AuoJpCeiIoy~K?0`-qJqbT^2IxOXL+sSH_?K1X?9WB(< zHYXY7)S)MFBKZ=sFJmxG&)(BAz5nLwIQ=KP;S^Wr6}Y?yldoWs_2G|Ta}-&;q2OYB zt;Kt#58C=m`FpvLA`mEu=oa>DZ&`b*u!mNdcfBgoTm@I*?I4fkp?o~)B! zX*E$Ub5Z;<_U`~Xc0eOFeD}(nOsn)~383b9JF#-}{c= zoc*!!nUl-DW+6-2KM;&JX=x3}2K)C{-`P`nBRNjy$S*#4!-%7|C@J-=DA$rT;{sB9o{S~?xw`%*ZiVZ*Mh2v zbIIwp$N3let4u2z6)=?*SmqSrscM$jZ|WycOht5>s!Ue(+ZI>LqjBbM<%*h{``P@R zGZ2y!fAjxD%3NQJ7theMyGdS7@M%vnYl1fs5KCG8?1s)5KAl#6LOT+NaXqV$DDJJ$ zVIqpoMwRT|$~n@*Tv_gVOWN=RIb7%0lWOnJnvY^}b9Pwr$R&a^&r5%xcf;7ZWH($$ zcZadCzp=;3)jWaRW@Nv_n3wz#zmuO^j8spsOg)V5eU-_c%;fP{%?@vCT*%YxdgLqn zKUS^)eI;Ar2o}2?%dsmR&N;oEl>E(=Wco|gb>d@kHt=e7f7Zrg5;u>%T8c+A z&~^d{HhHa8Vh1I!B>~BJ_c2-}Q}x?m+n-!N35xn!Jj30Nu>FvoI0YsTde1UvY0L^O z)B8(g^$Xu^;MWkB%!7>RHCcybrq3GrD_ZJ?s!O$XmEXzcy$Y@$@qoYOZLCJkPI{UM zpFA13A60te+SzEIQTAQ1?WDcr%ZXGZ<6=iG?#zxIW<)pufBJ&%M?L(czht29#(F=@ z54aExyBYU#xyuO@j7<@wqtH5pIWpv8A%BQRM<7^m-Oy zCWw;jrVW^9q5Q}2%ISNDsCPaLO6LmKMAba^Sj`qCUs5;t9&1dxhAb@hs}=3(hX*6@ zX`~j0p;tq&e8HwA%XLpU#XmmP-RVZgO(30#s{hc|0U%H2%tS`-=HG`#!$i2B$i{sD z>uKnjmB@ymKi25jkJg-xx6|nC(YSxV(#y!{67<;##L484Go+4H@)i7w7rFwkmcsIW zxaO%=)(x{iGC4hSI#i_h7WQKzjY=GIqS$XC7d>D+j4c1hz09d+>wguTN*P(lf$?em z+;5fO6*@FUYkQ&pmn1Qf&e?&HQ@3|ECU!^NFUaBtMv%qe+ZHtk(2CVs>kj_PxVR3s zck4at?;p{X!@Z9r-H)K((Y%T;;L(h)R-aUk!H;AYTLXf`c~s}^H*qK9U`xHs((iq= zYX>~)z`p#8JWXT^-cj#ZBg{2kcPUqoPEF8Do<_x!ZVKNWjl?-4`$5tb%dp(o(+zet zT$`+9@_8p(EV+4(hWiyLlvA_EpkCtQK4Ay4IzLHYBiYhq3qKpK&Ara`ges8$d%$%B zsk{MYk7RT5WM%^Wsin`_>Lk9rEABQ$r|PJlXQb8jSJUS|V7?>&ELqZyCl$l^dh@l= z2E?P#pdUZ>T)1DvC;mm5n^B=32%lub^W7O%kC23|S<~I&w=b$cYy_;T^*zCNuNns% zsggJHWOO`;23Li_JGk77C$j<_>!4A6+K||xM7s_0tC^a|gQgdcpu2u=Cm(&a@jja6 zoZdtsRMkpOUw)6Yk25lUgA(8Ic6u4(O6z~|;R^EVdpVRmRP@8?a_*u{4*W$uRizIuPh2#|~< z*>#*T=O!&B>q_F`;|Wz%E}rh0c%7B&hAdTLs1m1L0gbZGd`MUHvZjT;A2>7|iTs4SeIDHe=g{DU)51RZy;8F|LKr-;?JiBq2NB zPt$g7^u3C2yEkt4h0{F#$5vWMOlIbM7i(jUQvKobBe*Kk;+#`34 z_rv@@)VyL88~)Hy4UFiSd*1-oMD27TNej_1@$_}|Qh^-2h8~@9Bs)*@o~r*nacn&t z;~~~_txOx)bDbEQ)5yoyt|kjk1@cybrcA?!N=oFZ%jf#u2K5J$@y2>eE|SEyXT?3w z9m=VZHS;%V#XQiI{M3(VS4DO9fk$Fy=J||o+NSW-vl^esqCUmdwWyXg{&=-Vp!Z4= zUXG0RG2WlPuH{^)>|e=zAaLg$E%KY1rcIW%QulUVv^(QR=T}dOZMf+><>kQ*kvO(RU&qSRp*4}mg<&5-uNNGILL$s4K zmER%#XS=_nF)|S}gV6FAyqN>m-DqFdeG?z?KHd5oXcAX_78`jE3%eQ?&AcWX=Xw^W^2|$8t?D+Gk(xx9~WU zWW9~{J-rVuJb~BM*`*j4jz+D|aAG0|hp;1)j9639DtYgJHHMC1<(e8x2EcY5OiqPI z;%x>QFHX?c0F-zMY<0l(i81Xu7H3~PZ-xIe@FrPP*0B=tn8(2?PYYh+dCkYW9-#hQ zf6454EgH5q(hMdGFKILJ;fYfTmujN!uX^mnZ(G7nyw4u~jyE~~_ed~52)4wJ?CJmQ zxK^FKBs=}3dQ9HV6Zr)bNkZ~!W7jMCsFrEFx^fP zW}*4sJb+~LSfs6-RND_+ujnz+Ims=Q{D@VJ)^)%z6YU<*PBKP(O=fm?_hMG!Abiix z|1NawNqr@5>oct+7VRu;?Z6v&96T?9w4YiJ7Bp!MlV%{wGpi5O>w=S|43xX+aXe{R z?7OR3!D+0;S?tp|6l{k=4QWWurc5^btX|K+{hUykjCwgWHctu;fbCk8x{P$hw_8GA zl7+h)89X0%5|wg=I_pT}Zn*leJJVfDgw0?aJCGKy2SH8{ibrw|y!K-uwnwSN_4cI& zjq!9H%mjoB~DL5#go{T;l}1>aH@|&$(i5F)v@Ga zqPC8Q*D}`SV;mb`Y|NhZRqnOGo9NRrkcwyx-Y2iiMdoU?KO%D&bpFAf=DE%$T$$HEZT9dW@oLQ8!jdB83BFA?(7Ua3)>9p}Y zzZV*bC#jjaS3M(p3)rqEN$U7D!ub@#* zz&joM8Gqg)%e#Sig!(7a;3aTKtWKTJnr;(1gX?1C4@QW+S2=iAGP>GCu zifV~M%e?0o+_@f1dxQEHlJ_$XBn~E-YECm_7_5zQboOoXTOV(R;>JJpeud99`e{G{ zTH$j&8Xx#7qv{j-8R~ydf-3o~Q*mK|-&xiDoow$0^2!Bv@onG3=Q3kt8Hw(x)wGic zl7r!zmEWw!@5Gz^o%U5G&s&np&g8xge&-C?OJLN`|KD9d5BICFdWisOk0)>Xm8{y6 z=t5SBd!z34{qhCR zql^DmCMS_rq)UlTiN_l%?g+MW-jRYzM!V@+&z$aA{&u|7wmuK{9!PiN6~|j1<5!~6 zzd@C(nZE(&pV6c|d5>RxnOd3ay>1+M$d$9uuN)3+gRhBtx(v^5hgTwS@(kf+BjPR~ z%&v>Hem*mU>Mb{G;Fv-~RiGT`2u)@xNW&^rwIKYB6P|vFLfC)R1kgt)HxpZ|PmZ z^~A@FDsam_-jr_25;VfW4#vjD=v7bs$XQu&x725YKmE>?WSac%{ms7~hVwV1Y9amI zl@{m8{0VHu*R0bt{LfkOkCB~C{*6S9Htg|S|JI?|>*{PY4!uBncLzydn$d&>TcxjG z@ntE!NVb=?_&f{5HQAgG*!MA{_G|Rqn??Q(Z!adDiDnEwS-<*DOZ{*pr*bY}$#&+q zY+%Xua^)T4*mTmBlVHvx&&RM@-)M0(JxPZ5?dWVW_Z`b}okz>ss2j|V)zX{B$mCT2 zT)zh@os({og?JN;4*}%@y{^XHS}beMBW-G&&d$$Ec_^`#@uzYQVV)O^(9)i$aFKeG zQJ^#1*bDE{TcCN?$doJ-OWE~KTFp7DiILe4mg~?tnfs2`Z*^GJ*YXqKT1`4LTbRhJ zxtXND4US~nPTsj$ApO38Vx_Totv+t|of8_9AtGz-leIP$HE+kU7JRat7x*g*ChOXx zyo~H%d|Xe-vz?v)C%V&GxqD!;32(nXYJw4cIFG3(o1Iws>>^DpPE%$61?sL!O)ET^ z?EQO6FW-UwC;G8}0r%Hz)UkYmC9bS3e6!Dan4QV=Qg_Co$I+}$En`WZ;3fmkiFi8% zZ9XG2Imc>}G6%yXJ8M5vIuR|8qu3qn%|GDyqSD)<@*uv&$#5O6?PS-Cm1#zflHcMB zt$K6!&Sd`*lwG9$Ak;Y!B&$hzPRX24-ZsJ|R}#y$!mknJ{5p`uYe)=McltJrtiR#@ z&bamru3twFm*dxg+6eSHMZKBN_%S&}=N@z%Kt}I|d*o_2)S8E9UD$##=+fS=(4{YW z+>N901W)y=DhM{Z_8)bUk^3O@8>0X3NO2;ou43``)WZ(oPhR%>l$!1SHR>Is&R=1A z2Fhi&(U>+SGfzX@eMtFHVCwG59CT<*Y8vQe2Qs#v+?>LyWDd{>$E&j%S>ZnxPK`kL zcUCW_swH#s628qumU*gdJl9OuUu9wM}vQvD_J$mtYJ4gpZM+# zaPNi}SCicM$jPp`E!sB0ubgw$i?x_!jMNUDp`V3?6qI8}hWPF-~TUJUe=SLWKe7wUWnj1&f+h7_W^BhHJ(+vJNz%glSJKp zsjt~6kyX{?vboc!+0eK!SZmLKCmEFEJ%6KiPQOXMjy#Xu?P{PCVTPcn(7M1$tb7V+YXdO(bBFK9f1&jba_+ zJ#zgmjwCZpp0%As-&do~A*A+Ebf}7JyA<|v6)Z1A*X-gP?N?^6O||(ME0{gmvFRIG z;GM|M?O;yKbWUH%*&B%i8Vt`Zd=DcUeV0heoc`GYKN{iX z#iajJzH@w=(%R=PYP3)~K5O=%<{3sQBBQ%rZ_!3Ot!<;`JN(Tb)!axTjzGo4I^V9u zR>rKvt1l#Ln_%>JzqTeZtK7YuZ5XdcIG(lBc---h;?XRl71?)C9S0ha>jnDSgdf?{ zdYclJaV2peS>w(50EzLb0QzWXBCkIMc~5*w2AMuA=kuU?$o29lJ=p7FzeX0lJxRi< zYUd;V!9(cu0$hHlGp+d}WoVgM(cCn_0^Dr+aF`L6Y@qeLaWpH9Q8 zgJm1?pA!PsgZ9Q^)%`?P^;|fA0jf$?n5u(eI*3Yf?=Qo$G<(XElM&i3Q!00FVBtf> z(krKgM&~;C%nElTDklZCRsJt1@i+GWA(r51+hkP!R%%wWE5LCYUCe6sP#FFj)uPGC2-6l`TdS2wp3L*wxt5ttxLoG5Dht%g z_vTkA%6{{|&EJv5%;~fClMJl6zODAM?pK@UMXH+MSfb>s>$?;eGur>7X#RtEkvPaM zFyGh6krnhrNNuTJ>IUw%K3fzmW<5Ral~b-HvH72;OQp}q@Jnp@1a@;edz84vduaY3 zmY{S>!P)+wV>I}g<>-gM_wyig{U`_|7<%C($3G z&0XpwPT?~cX4P^px)jg%4OS>?f5`#Tz{rymG@tOVFG{|B;42_V zX0pjNA*VIQdLE-rai2Lpv+~pf$EFygUWQRaE#{fXYi#=jG<`?AIR_>g;Z}np@jLs2;B;k65iIr5I3E51 z;O(!K?BRS#EBC{$o&J~N(MGQgxb_kpyTT+VFXnXD>>q7i(B~Z5`V*Kl%Ueei*YU$< z8torcZ+rNC3xj0QuBFZ4c(EFU$@ntE?|&BbSdAxNyPtDWH=+1N@ZOxK@o(UG5j}eH zQf||B^0~YU`|P1OkgZ+HTD?Xpg3&qj_Y*Rj-KlHH;B0pM5;YGh#=PX|TTb(mZNCd0 zNk+xw*}jgX&eC5)Tv|o~CZJKya5)E*iK%)Cp05?{4<<_|;(nfg9A*B{m7HWhbYJk! zp?=M|xpl7f zF)z5l=n;SJM|yBFoO6zC?ALAHu`QE9ROZ)P^t=Yk*cN;rfp{E=_|;7FI99B2q38Y3 zf2gq|xpiJ(Iet*$1Ge}?eVs~zj&d(&2Sq3EQD>3sot67L*gxb|T%g1;u*)fx_0e&q zQa2ifPgXX&NY0_-H4E?jKKByae>!g6$M(FgzwC*ffVxM(|6edpr2cr{i2!@bOmHU{ zEGIe1c+`LfUXS|&^pn#U&LZ`X<5W9vF4JFD@Au|uRY3LgjQ~&U;Sn-@D)_6C-hIi! z3(CZYe}fDy2G67ZO+w#fp8f!Ifq9M*eH}TegUE=pG?jNDWctrKkHYbtZMGG3@VltvvC)=5+XWqFH1-^kr1>^88TJ41Q zIUnHyP(|7n8vU|Ld9<=Qm3Sq-o(`VBvbjSk9K%>1t{;L)r#`mIRX+UX&_ejoIW##U6LCRyJ}*{QVk&4NdzJ`;_TnD@z~e1G0Y;uwyE!MUVn zKKP%9duzBJ%41F>&X;K0kyLg^k;_PQV%_#*)eqHca+3d~e4f=+Ldnu|f~H`p;7%e$ z+QK7VdEz7v)k9)fvR5SMf|i~eWQH)-?|W!}d6xAb>TM)d@nowM-cc#qc@&AL44Srj zN?x4wo3or^1+xEP09%@vn#xA9J6V8oN_FB5t@HmMq${~>*JyJvI9j4f*8=-P)L+NL zxk;%Tl+W(f3rN}vMz(cDYoE}zqePN4c{T8M}xy z?Sf{>F`0AC9$}#?k<-kVj@DD6hJH4V?n~>F*}acG7Uz0w>m zIpC*gH!JD~qgOI6ZecWdlvV4)FKDIZ{n6zzZD##C=Q@>#Zy9Z9O-{p;AJrIL^f8Nt z?o1w2C#&*@`%Ik5VKg>#gez%E{NlW_KemeR#Ey;BVxsQPG`fyL)yF}ReWGpuC|j!W z-_Mf&|MDMqOELGG?{+1rHMN(__1Rhb58O)pN~x`y`Yf&CN4Bf7^gogL?46kj>(tD1 zqOH&@S9T^t$)&Wp?aZt5yRr9<1@xsm$NfD)P2BibwB%R!*OHBqMwUyI>yOT#@XU8)88(pODuh)*J83cPt8p%W^{cKOS2Q6 zeg}?+UCWyEY}`9YKRHb#XEbED>tnb*14POEyOR+o`DKryEsfZ|9%^*Z!xBAQs-=t; z$qxP(?XA+sT5_CN$z;9xnFM4#`zAO&U0C)uo2D)?^fu{fgsGuH`yIL%ou!!xo2`Muh+_c#*OTP?1h3O{X1Gat7-L0RBX>8 zeT@2JwbVwvmC7gn{~3C7DZSlUFVj&c&n}V`^F;Jpt^W7Cx;N>_t}y>jsUAj#oAsUb zoto$!Y9_;6YgqSG@;|g`8K`b|v0Cj`%mQ}>RWJoy82z| z_AhAr5<62&o0Onp!Kx|8O2^qSn6pBfJnQ}h--PVoAb72U|&sH%lfu{rFM zSENQkq0BZuH?rsCs>95zZpXoIQTRY(^laESQMMnbu1D`({qKke**7{C*Rsp*fdbnl zG;U{gv!5YPyPEs|cyYHIyl(@=6r=O|?&T@;BIEryxNdZP7K{?vxt=w60*5ox+<_NR z+N&|pm7Ev*4Vbzqbpu&TF2BBBC-c*CW@h$wlxBeqNI@cp)`RLS*dYSkc1Np58zF|2BL9$^Qp_+&F-}Fu4O-JGD|OF4>Q&!n(#PnT+H*B3g)+Ao1JsX ze7Z*ML1=lko|f}Z>d}wnjX%Y!3aO}Mgnt*Gl0m7Q{+pAHwRoBHjguKDPqxn^@jdAJ zSFR==X#$VuTH2eZ7m4kwMt>@za~0)|^)IX2@f5Sx)I;C#^LwlPD*Q92tbxDljHa2{ zCKJLmysS+l;wLu4fjO>!MWd62E2jgW0j9)yXYFWza3#;tBd)dt-*>p#L5o`|c`6<( zRl5yY`@#Q1X+|&f>;vCKJ#{BP$+vU>e=E_!=b>hKwO#?;LQ=UC*`ERa@AWWSyW64a zKzy%Em$Irjsi1bey?F7Z6~@wvV&YZ$xN>z-qU4VxXZ7a&hBerP(&~BQ1j?i1PQLq~ z+O{~<0Az_pF6BNvj#iWd(e8Y@lIK5!bY*5%4h~n4sJdFHiy}EO`x$ePrX(%-n9fDX zxAE&#wD|_q!|B8gs9Z*~bGmWzcqTJT<_LdhOB!gg3M$to^L=nLPkY{g=Tw-~RsT+R z-k{rG($(sIuP2?ONMRs}g*~*OT~@H`;OF^xkm&O#Nl#`A@z|aM-DKPd<$BZbE~KvH zzaQs&lU|eUvL1=fjG}I#F?ajD77T~^z5oYu66|HTmhAAy{NeFkhgv7=HIYAw77JC{ zu*PeT+Qo=I4i7KYc7K|=ggwf>=0cl&3(vNg!hw>UFKP=i_ShSUu@G?4CPN_)4 z%SO?u#hn(cNd84fuhw4Hd{VX*sH2tH0aAvhi5Pka<=y~qMN*Ny5g&Q&kH$NKvZ;~y zO*In>SB33L9LqyR4_lx^VwcK_f93x8%zQMz|L<$`fgyW$kK>QDB?q@@br34VmoHa% zn%TG8!07t|%aXkmTNf6reIXZhap7Lj-$WYQqIjhAzfZ$9L(sNpE4Y?C`CQLlQhjmuP6J4A=RXfw&8T4zcv1Tiy6QIF}=ms)%V&N{@;S-TaY}5#@W@^$p72v z)^v4q-bZ${|ZK%Kn)+~^!}BPe6QBJAStHBIep$8bgV9ZV){9V|B#B5AdHmA^MHT7&b-BPTD9p-8>sSV-g=TucP8ss!1o;zu@`>rgkuAg`dFRnY)f{g4!9aXbCG%+m4kks+6bqC8T<+F!0=d|v^c4Q~Q22dvkN3r*b5sNHzNP7&@HD6or%*qk1LlyRO?>fJP$)TG-Uq?Mg#Lmbwpt zV^{UF3DDSc``o7qUKDCef>TLjlP26SQI=@x#dQkE`@dPteu_N z?W*K4r}fOE9m7%SQ+2a0P#y#`j7mSVt9P@A*K4tbGO=nel8&ojkiANI;@yl6mB-0W zFj#}4S+$D~m9qnq>pLsgSwl~Tj`^sXsE(9M*2Y&*@M2t^PNx4(0uJUO_Sfe$k`r&N zDk^8i`vZ3~PH%&L@mx#qslI!nP02S%)buSlk@5R5?Ys=CG6zIuYktyS~W#LK2WC7k{M>Wv*`WE_s@Np{tkSK~|~T!h0vs zW=BEx@BYp%#Oo_p$V^T#jcGGrBE{`%j`;N@j+hxSXuK-{rgX z457^R#Li{6`B8-&mm>q;f;2I9chc#98W-{;@$v%Kv^uqDH+22Ky(*}8x&E)zPw9Rq zmgZI1b!A6h#KG)Q%wD#yQ1)yi&7-(|39lx0=7(a9FA=rL-j=w>`^dvu@^~j6j8Uc~ z%D+kmR_p7ov}A~AXH@M-N^b#MbN>?8(iSFZC9!<5Xz%DL`DiaBox_Z~$qX|~nXGE{WVK(1 zX)UAwi(ozj?6W}rYT-%U1LpeZSciqpsXMRYU!dYXDBe{VZ)z6g;i)hK#2)BurK{Q0hP}zyw5Auj~BPXwd(9; zXP)8^zTXL6zp`5y8FJRrL^5}ta?MdIe#2P!ucXY+Xv{aCA~Dc6&o*??akYb9rs z=9$A1t=@-*|1>(}N$4rO+vHEoKH04KB?f;c9qOQ^obgc;pJNN(Rr^CwC2l^{IoGJu zS^vXHQr@5ASN!rD+1b_TF%Fhl3wl(YWQ?8<-b4V!D~-qTA*~#O_Q}&9Kl5Dt%Di9; zy@_iuqCbN32jXToHQ&-^Dd%Hr7>xn%ayCBxT6?m67F&8a3JoHE$uja8XyTWDsAMcy za?NM;?=FycN4@$aq9W>_2-?T|PA2l4Bl#atr^?Mz~;ccTs0lFWKI&=k$C)Ye4O(HJGR z(Mt>Ue_<&eMC+KWek~uzc7KyOWzW?qh-#}Y?7(Jd-GN`_=;K<4iy5AWg3d}TX=pqv^v;pi^LkMYL!Z}6u%n=}Wn6D6Fd z&fR@i#o2pFe{1!s>7f-!SCN$&yvRkaEuq1mshKPU{b7`n)`xJBP_(x@^m38lfQLJJyn&$M>1>ha!ZdOkcHP{V3 zGL9dLhueTFqj7e3<>a1y&?)ovoHd`E?c3;m1=+~+s8MXn0Y=lrVP|JuvP50QTJBkt zZ9$iQM(1QQ+7?a+;LR;cjAxJ6fS{2&zk%yfG|jV=>&VE-AU&PTU+VoWOLUlDiObFj zWUt}xCOkXGl{{~}m0Uap(rduqUCXhHIOXNpm9-yAO)RKg~+x{jkXT zWdrTLsI@)Wt^MfhM`U$N9By0qLW!O24X@dI-quFt>vyc-^&_(%T7(@A7;U~a3`#440xZATX#;%Kec z!-LDvqHR%rzoK4Z1@_1F#K7jP<*c!mW0$g<Yc^*<3ZF8dM z7NjdDWOgJ!gI(Rkp44?UT2-4J$l7Qd5;m4*J&)G6;s53Oxr4SW0R2V&jYprHxqBAq z_C%FDdAh)roRxT4Aw|j0o|xX`Eid6Sifw6&Yk7*i(Dm$uJVvjH>zfGH7vOsb3e=%F zr?O&A&?Ne{2aTyPKv^Jg;HluTz_>Vpf_6) zb3HI6rX!#KedPp!Qk2o=YbBq3^S?iTm)7{rRRJn+uG4| z)9!g=ZgjtzF)$~;FQ?THqH!&Ft)O)|sdyBhX&8Mw1fB0NicevI`ul$-InGMl2DU4{ z)h(>XP;IOxCl}!JCL>3pbhEm;9>j}Sx;dz{p55z$SBV=wLkms8ors7#S<2*YOD>Y^ zzHhCiF|6R2LOb*P^e%1WZ1=Bdd`?Q<)`;=Gae5Hx%DFd(@x_wQqJwd~y8G)u(*-?W zqRBa#W-UAu={ge~mXQ92plgM9hk!n3_jIFK?TtKR=+}cvW)JFhVBF4~Hnd|CZAe_n z9a_kEyEoqFwB2NEOGN22_&*b6`l8Qdm|PF9ru1}}e)iGQc;#}&TC$)`09(#!{sbLb z7ry!z+Rq-)oR-{>WqVf(6>u+m-5(`eiIZQgUUC5LY=oJH?k$U&XX_>Bg^w{>JOIa> z$bN)&R~p&Q*WNYwKf){fD3jObb-e!^wU)6j$J58eDz;a1E2Gp3@cxIT&iPGWgYZLr z-@;D*Mj{rlR13^`k0k9~m0M<{s*WBxJ?K(&xZ3|SSfArm0~q31QO ze(QRohK~pHdp>8v{rmq#*PXy^Ii`ODZzZy{P)ajWNi{7J5-|@`L`7*KLKCHuin2|X zrzVt=5K5MWRJJITH7QFA62f2_6vi?%H1^N?{ayF;&ivoybARsVx$f&amt#MV>pYM1 zIN=xXERh&F`zEU;bLh@>;49HAd(@vL15Xqbt%=Sz>S?I*Z-V6o^4b~?+ZUd|$-K=K zVCiHG*^~8ePVS>)_kr?#&^)4@toY=lozN^9-m~i89;b3%Obhq2LpP_v57*KfpN|U~ zeF(Z_Is8^j*?}JYycfM+#KAw~Q?du=RP?v#cxM(TKJd37%(?5y*O>7%F%RFNbmD4$ z#nlT7|1}X7SL*dTW8N@S>ju(R+MIyH6UkvMROt1VjoQ^nQqCva#~Vm8hS=x3Ca7#o%F3<7_{{g-4vV~Z57Wj(Eq|)~uWZ2^@Vpy;e`F>1Fn^K^v3v5^ zu7^i_{eAJHC+m~EZ4Gfc?+9i7Pk>EldQm!`vN>tnjpcu*>!n8YhBUT64X%uPcPiD^ z{WeCb$|Pwk&aP0m1q~VN?rS*l6#93-l|)kHJh|&|v7)PF7VQD!Qg-Kvpff}zz=G_3a&q|w~An`T-eo(?j~1AB|Th* zR#icLE1HJ>@fdR+=K;8yOxbz63V)`f$vvQshmm)XhHCw+)=i#P({AJ?bI@n&FPzLy zo8-9L1$>X8<>6o$SV+y^*z-TKE15~}K_}kUPeZ-uT`zBDshwzs>|rQ}!^^0YH^8mM zlS|o|?6A!zbKp5ksPrq6dpVvj{P*wD-TW=dT5{!-?xo*kK|Bvtf?Mfs_QO^pZFyhJ zd%^PJn_Pqq>A($SChG#rXjw8!MUoTazbowztd)wltoYXd)0=+n->s)iV!Gy_O5R#$ z7xydC=xU^U{g_a@tq(3IVkTn)bDi2h8ytm3}FUVo&WC)NI3%e`3Jkz`=L4$Z_4B&Y6Z(!Ug6PEoTx zi@cD0H)aWvl_4?7tI6@JFwXpKVwtzH{mqrjTy0J&$^2S&3S|a*B&!}TFEf{4q0lNm z%+q>F_U@{r@o^UCLiC!=4R4&w^ovUA$#81gY_0L-Qsy; zFtyg#MSkDx`z+`Z*^%9913gO&*I(e=-IF~*xGy+%^n8OmA8PYc^gbB2$-k0lfam!? zElEmpKxeiqQ7!wEgqFOI`tbQZxf%@5pTUrM@^{d>m0m7|_XcGKsg>By&tUR6yb@VD zMUCxf^+mkj%>Jxb{ytRLot^msaaBhpjPoP|KU2IW4{`S4> zPHGK>$6VCOd35d2t%WDQr#~OTB+;E&i^xeKTglW_r0EV4zs~hyQ0APWzUnom2M3a| z2TAJfAWxLXI$E;`EfSNm2E8V-nU5K5vfnfQak80Qgp1kHzNeO3klf5&)klrQXw+bl zox|-lQ(67NDouil4dTB$FGHaR)Lw1)L(I=?09yH0ak=fx! z@W@FoPb;7NB9%e4RBJcjL{4wa8|!>rykD(k7-^*a?93d4-YxlA$)){`d&5|w4^iQ~ zf)|qukFqgYywI~PXgCS?8u|Z-Qr+0{#59b6Z_fVBzT2$$tiU{HVRG?`$%-+%S6~?N8+I*s|Y%p_H_jTy^Wxp=slyDFXH8o&KqP_#jZ zUD$((c-dCFpQ3GF^}qG|3zo94I*riqI=^;volF5ON$yj|v4`<#2|28xm+aOX$X0BC zbr&$*L;{Z_6YI#|By#quk>FEw{hr+QA%zXd=W3dgJsOY0U>$wv<$4Yqbs%Vy8+&)- z;7fkhWi75p@0@zrj~z*#=Zv{^Nz6}1;AE^gTb-<{C(}#v(IneJ7gW!xea;Ovj;AFrs5+c(jslV#=ew1go=7jgh9ska`HAFZl&Kj zfoO!X1JtO>whh(KY<$n!_Ic{3=biaAjkKPu3vc_~21VjmFJmvujeU8WSyuEMzkMnU zBV`rz_o9}osr6S9ycS)L*KZY68w=)g_B0un%Jg=SHlD-PCZs;GTN~NcrfO^^1JD^4?LS@QA9B?gnZ+ z1iGA(c{Dohhfa^7?*2s|$@P35XtLU$xZc{NHGXUd-<{aD+5W#qFVa`uA#R3I^4BGk zbWM`cm{gZeuPN=f{fQowqf*XSD%a*!S_lU}M!hrKN&dUc+;`$XEM<{OZS4v2tF@nq zhpZ8#W;3w-k>u0Gkn5^SDy=NMzXMx~(T67h>>bY0W z0xe_N%IMGT=#bpAjZuFTI>cj4c97C;!rfeV#<#q!=H#0)zyI?|mdd>A0BAWbfN6?8GymyvY4zk8Nl~PjpyL|64^y zFCe$^QyS8VC4A;NW=bZ(;#l6suQcK=lz)kqwS&z#nl?xIk37GMbX>!t{y^Fgnfbd! zL7d?CH=upZJNAimCHY*kPw{rYd*bB*#=}p1AN4EQC9XuD^HHvpMekGAbS3hUC98zGC+4^{^*^`(25*y($ORsyu`+d}W5FG2#Ff)dq z=wk|q_wy^`TxESF0&zaMc(IU@KHA92KufUCGI}OUV`2vqHS#+ar5z5&+e&Y>*v%8k z$*&+u1V?gjXZL~yi6+T{G2gRfk@f z)QFSyjQEOXwC+xw~ypOzRo%&=@C)Y+ix9mav z!Ta1Wwk@7{X5Mn5_yD+{tIj+UGM`K(j&qq_l93^Ed&#D|057xmwK=$UA|;vI{DM|B zMuW#-cM8gWp-yu8z6AQZdOZUk_tTfDwEKIu;vTfD!%IkJ&0{_P9;dR?Xg?B{xU9eU z-A!+=gy9FXQNG9@{*Bg399}=>WYF*&g-InnY% z@O_4}e<3+pVaQ&PWW+uMZw6{TtMuQiKiK$vH9OT)OLvjXyJ2yZo<1QpYsmG*xN!q6 zZPb3^hFjrYvIh(U)tPKcKP_YyxD}r%nVaX)fp#deTDwizy<_maiN1UB__8-{IxS7+ z&$j+&b-J0>lUX@dr8%u>w6M^vlA&8r`|?2UxM}F zq@)YF^faC)*5NJk+KdF}#E+?3*+L$Zq3w0!{MBfDJM6onH(202a4LoZk*)^vhzVmFcx zy{+fj$M86>>1a|pObbttuCt5-b7}Sz(z?jiJZ-Gzv7L{D%V_DhtV5!Ohr;Q2R(KRz zB}4S>TKJKU9EF?9Ks65~k~_5IZzd1J?dX&24GoN^3-E0TT$0n`CBNq=|2s9y!I1ds z(%jl8)OZHW*$JCmh}m_RdADFzLFoa?*EN1DL!)2FdB*m|WGZ>q61|kn$S0xOxAd<9 zDILT5my?y8SRY7g=_&KdrJ37jJc(zJweWKHN@IOx{CduHB10R3B;H;PIx?gf*~|C~ z@wa~UP3E}Niw|50Mx|NY*?u>2o!Hf!Vv-r|L}SEbs11VIa59n==Y~KHuNEq(A2c=8g3_bUW0Y=hRQs z@MzLLN-uwPKYNP50YTRI+mP$$X?sQbe>2XUV?;k+n>lwXF?Pu**NY4{@i(VfKEs~0 z)lyH-v&XS2`Mr=Fk7p&ODz#91Z^1Cpww?XoOg@t{^%UH^(YtUXIE^YW7z65LGQLci z#duW)=e!l|h@z8Va+9_*#>Lj>9qLElo+x?(t#7QY6JU3Ywh~LdO#MwPYgan8GyEzj z^Pv_$;Ta_-`yTURE|S+zsgNA`muoFRL;+sU;p{ zM>P5vL=}wEiOSiF)MlSsJmZP(R`+~>|D~DG!~oBx(aE!&-MJT{Mlyi}hGZj5cIiYL zRP+C3R9)_WY(sOLnxU<%t5wo+-Yx$7vrS4%_YU|U&;C=cKBxx#``K^)EkWC!CIUKf zMfqJ7#)+9&iHY_I$9)@D^9?n(fHrzVQ|2K>_mm7C_k&FPmrIS{7nS@(^F4>m-9E0s8$Oq8-iHH}PVA4}t~!!<+|`oTWJv@nAS)k6!lLCfjsRt#n6=?);br%EzK7 zDy0>hFa{Nl2V4A-&15lWdL7TQ-0ZbIgF?f+3aFftxkpB~5+TY*I_{ab0M?!Ooc~`C70J?)b zn`_K^8dgow{Q$I0B!G1M20p?)By( z`jf;PUG?FqP6634R9}I9$q<*d@pkIxq>L-c@28;4yVjTd=<{Kayaj8(cB|UilepHi z_@8U_u|?T;LEl2jWDriK!c8D+r_Jw_-vXBYdiw~Z8;lGqNcNTJJ^>$(rlsq(S0B~7 zX}=d3Mzc~m;pGoBDiC(1F-ODpvqBbgp7J<2&Ls=4Xm_si$=UuJWiv058OG8Hkb~iM zJKokpxizeCPPI+Gu#F^QAoqb0Fw#s<+HQ0ol_RtUIXQE-&+Xtwf-8}cguoD>m z#d0U7!!un#&<(Dz@AS;^^+Y)uc1|54y8p_0&Aj-M2wDm*&sa+Oq`?uhRVbsCXBB7)bB3xAXy= z$qA~_?y)4N6FYK?r?qgS558mv+F%$y!ly`vkVF+er0@N;x(P~!%QWL~^tb}2l3}_j>B-8&1Qb2Sc=a5KmA#UPqJ2 z06TK#qvZGKw`P_EKwVKA!%(^Gqdk7j#CqHXI_#2oc z|NkmIjHcoD;#7CG?Qs;_r_je}`EJ^6&w9i^{+_J;5u6oC-oN0nhmkUCq+N`g6^k*w z2dH|Zbxyk&X2i=IRs6pN?leS$GBtA+NTO{&*Ylp}-waI__&Wo(r<1+UlGXTZzOhP@~$p<$hO0%Yk^(!oB=T zR7%GA(s)~%?>h|TG6K(ICubLJ)g>>}!P1qMe@ON-ztySumd-1wNd9lbo!MsWW}|g- zC604v2l9}J)gQE0+C887v9G|CS?c)rSNJ3!&1^6{;?AFx>qNG)e!e3scocjppxtc0 zGnd?q7Zg}eN9p7pE60}?=uB3D2jD?h-1`>iD$tn4>SX5h^+GcyYiSxB-b2TS^_Yx8 zUy!D?KBM53h>vGrRvEp{f&Xhpy{6=NG45uK`!xK{n?fRc@1{BNXyQqpOmY%yl(qA3 z_1;mLQ13B3S&UQ7==pdWJWb8F^_1D(aOFpJrWIa$qAQ+LJ|_(();aM77qLvsNd7d| zJ@fI|uTc)aPyL#v_9t|(0v+E?z4VnBqvUZ*Y)Yc9(@I5}n3#h|Pa=@Y+2fovKERXY z@ZCTc^5zgPv6_2{+Q^BQnO&W#k4VuCq;NO&;?tKYc`UvrCt*(a$((FCYf@Ri2cTp* zU5PA}u?L-9DeCse=|}o>c`yayjfw3X|_B?|kJ2_UR>F zYTmE1H?C4~UFy4KA)(v(yO@nicAq7Be3m62V+7uWZ<|T&X)OIkMz#~l-V9dlSDgGi z?fjKw5Ax&;GB_9g*VB~ED3^?BD_E_|Hv$H9HIu}+!z#cQ;CKOH$n z*_@Vj04blv8YU7syRp7E>fUBtdCJ&&E}!5seK%FN5f0x-Q)ZH+d$it*#PlZ_iHq%t z0?o91m$q`UMRzvqJ(e_)es7S~SxR;;+L#CL(I{~?nAgzBi;XxFQQ(hk=3S_H4(=@h zOXd_7q0Ity4@LK;TD_b;XH7om++}v-Oxkb)2-cIn!FnEu-)mr-=)%oKEW$acUjtRv z8xP_QWHw|z98c!YH_+EBdRV0VTy?VtZ~~k|dgqC-JM|pCl*gBeb-gc25Lp z(?W`pA7dC!Pgf`Be~0>Wf!xDic8vV((R@)Z5*%evfXME^`n=XrV- z9-U>B>b8L8o;<6>hCGNa zIXU!RmT@i{x>cQ1d8&qte?(zmL6j z-E>(jL>gbm457WGICS{dulvN zUnY=>%kVkt1s9OW2bHf1@5jKs0}8#XM6%qDSAMZFW$t_o|KwJ^lN}#HwsT7DU|7$E z!I9{`iWjv5nj}IrG|%3`M@j2A^!hstPayB1(=ad}$%=|p0oQr%B_6h+IzMZ>7AOYOshCTG$4L&)IDREGFuYDBmv;DsT6}lNo zPC(!6L0rf7Jxz1x`ZNMf_9xs1%JuFgpF=Nw=VbLaQ2SI@+p;gT!Ks|pHNoBZbcr8G zc7g-Iu!?nSsJ9Am*g>hx$Sgs-?6u5k(f{%{@r0SJ>uP+NrS?{1|5PxS)3jt!?Z;B8_K5loaj-hfGsiGnJsF= zR%Gq>Dg6wmd&zT^)92!W#n(`V^vB{X|dz0AWN ziF*~i-Nm;q^F4@sJ&Zafk2Zc(vT-G%vD9`-hAyQD&SarYJ44{H!2kFTXM!%P!Y8mv z71`M7s4$-7yrfpVwsBynL_3pfdjh$ch}Ow{mYkadK~fR7BA<1QEt%0P#p1*_{u+kW z$a5n+dxdmnHK9BDP4T1=?sR6m8nKsupm`VJTs?NO9{Z5<$KN3l(WXR@T+V(UL^8XO zpy!lMKEj#g?-8=ooJNgg6_b4|9#HnD{mWH#+SNsWne&Xjd{_Tl;JFC@_an)>fFkiq z$<~pxT^cCg*355b=7qeg5>nuM*o5?l1IriErW$ww0=^*0$@CJmuF$rMJn- zwx_$h*Vo`*RZeD`0*}yMLw)JEG^+==dq@SHUD%HkxU(e%ED2^!TB9 z1I~`Oc%_l=WuDIS3G}&>_lmr02IhwTZu=ZXDw1*OpM{_GBK{?|b|@~~%val!bRA9a zo+W*m%}nO-sU+{O#-cGay&d}f4NTeZnvpiF6J!D)x4L^U>Z*~}E2izdqpZJ6K_;R(2*|cNH`6LpSJ@DW13_8Q$ z9ld96DYoI;z)&cUHCQ8T;-*&--X+q95kFle`06cmmn4offw0 zKkbf1pLiYZcq2KRI5|=i&o+twmll%Y`FOZz2ik!wYzJe|Tu-u!ot6A!mDu9?16Upd zgHHSxiT`Q2T)Pqo{Y zj7-Fr%=P57*~G~nuAM*s8`nvA@{nHsiNd$4`vVM0G1AG(oE*FTinp@Wesv~0Hx>P# z#RAVN_>gzDcy?#;;*%$?SfiN>er7UX6)1CL1NubRB`F^;VOmTPOZEId~a!(xQf26p!@!(x;oJfYA z)Y}Dme@eNvWMhHrsjPA$xJy#9TK$WRmx)4MZCq_lp4X7dHaK?(ee6rqZUap+N+pZU zc+}0jM<2BkUo#ip*?D*nn*7S9u7XqYKg~eP%((B%CvWO+{M5X?X1CwVV7;E~P{SlvOim{{x`!HDDh2%b2%hnk66Cw06j^@O+oN8HDOUEmFBpG>< z?|Bv+E;a+3D5CE{d<=P8gFlbr%cIKX&1ik`cY?O>BUJ~fm)YoeTE~%&nPj;Md2Hl= z>`wab3ggU)PgSmEp=rq={V~~{N{*VKZFZf%44MUcT@Iov@M69?IWO-_&?Qscqx#57 zZN}b2nr0--3V5P)&*fFW3)*%{FG2CFn?7y~zD?h=P%s&G%2BYgzWTF|-S8skC?(Ta z-U0jJ`(U&=2F*IMqM3m|n+3laC2!I~3zmC6oNA*4n@FY=1-Fv@FEcE)3Qg>RW>t7I!?n^D6pK*fcu)U|{|Cb495O$ej3(RP zH>lB$RJVt7=5e3Kxz}0QoF8zKepispL=C;|SNzFMu+MId@6fi#i~?U5 z67VXj=d77&WOgFhvybp57V3FgdYy6fL$sNqooHBHcYce5>kGTuo<8o)25rWbAHnmp zaWdHrpMYZ`whmKzBUwqFzBkl80`*&>cDcS^X1}r@E!itiC%cJS-n;PHO7Q>{$X+9( z@ho)DtXbmAdeDvLEZ2{?{1Uttv2aycli@Vyq^`@2=$R+XKC44{3Eo!tni954_l_cj@r5t4|j2$NQGtY_fRALKnK^E^UL0y zQSRhZ5l5H9zoF}r|B$)W?DeUMjz_qglc8t(yR0Zt0T1JCmN*?hV~B4u6ThvEQad^E zZ3i{}rJap5z5-vT)1@_607>DAjL zW)iOTChdvgUxr^-;XpNz_9k0{3d#JN*0PiAI(TNRJ`oPtqjdl&iZ@&xRIT-qvjfke z6*sUJOL3|TEoq{qE_}+sT8Xs0=IT!*uRZwM(AS?p7}!rivHqU!$luKl%4GZ7&U@L1 z;9o{&ULk`y^Qttz*@gEOok`y5?B~lo=m>O)FPYt06$@(5E#xk~+gzHOIp|4wwg)7>ZhO`THtiljSNJCKXWLgKsPXManF zc30|fQWl?f1B3 z-gv)C5jA<=n^)i#FF7UiX7=Bo%x#D253`1y*rCr!d3@oISluQtp3FXcZV3=bKYKcwG%1#0Lyh24kqW&ZDc&ryx+4G z@o~Oll}^*<2-2H1#CCWv8gI@r668d+J6P{6+RL~yf>eG$?w?{C*0ArVux7~`|0X~E zAkbuvE;+mIiz|s`>@+1VHe*g^Syt9+j>+^R>q+yGSZXPEL;4~6}+D@WIt9i z5w!75hH0s{`Z@RRkis__My4pC$rN}WaUj9s|)_q@i%+~wd3 zCMT=4uu{)G;rtso{KlA;w}i~M<*e>S+G>kW$+$Kbm$xS;^YAwDC;dSatCxLh$>EWG z{Mj|~y`Ihn%Vw0#s&Zzd>!3pR>^{ckt=3m|w7m|FUD%_OK(Nu@Pk19I!~HDMwgKkJ zx0DlMCZWYY3cOA>{(KJ4VQAmYJmyk8b;H{M;7jaqA{vhZTlVW^zV`*K&f!5^i`Nh0 z=r8o>0T$>xcLx?NC#QMxaePmXKQ6}R%&k_VQ)5Zsbhu?7avM_jxE?;olk7cgzzPij zT}~KWRp?T@jG8{jk;cEFSznZUT1&OHwNhV0V3R$U^+@*bw017&4u)MD9INQg9bnCz z{;~Xle|dfpo0v>_oAF>J$WAZ@-%j6F;8|yuV7T@>c=`bw-<1Rmr4v~n>%)ipMSpq6 zYzexx?9wfsWgRDe_F1GY>naV6etAy2JgObo{$?Ov1;Spi>!kiEAnVO) z_SMTuQo5G>4*_?l!iO)t55G%y5?gU3u2f-fTH*GouKF4w#uRdw_m#ddmIGqTh zUhe&a^+`-jvd(r!p=BheA}-8=@0BDYYwR;wo(Ht^i5}X)eu7q`U08T%+0HfMP9T?;)AQtiP3&``?bpIF^A1-k z6)$iNtg>>PliE6IYZ(1ms%}qDYoYA%dar_Z72SIqE&m3~jN*w-u7j$#Yq26YPC~O8 z>|=KJoC1rPWGQP6i)q(wG;2CvDTn(lN_`y1Bp+=dyUEPTnk#i1CCBNnLFk|d#+Q^u^BfZ(;{~Qo^asLtD>@TX% zB4<{v8H^IEItaE|5&ko-EX27kMf*APtdzl_B^(}8atTW0`yt7AttfzJ9`+fywvIFu0zsu14J$%ZJgk?%)w&b^k zKlCnawve3U<*1{j?2{Y;hdKB(k7Orui7l;ENWrtTFniZ;RHwFf|81UE=^TiK+IR%S8w<}dUSv)?+fM&^N1g(Q%sDS~_YdShd-}32 z|1sUoY;N)qOeQCnX!RInJ9)M%4XS|piFmF-3U)P)48ol|$ao?h(tiz*yrRZ0AWqCl z?o`ra4}TM>y`7P%G^g7TMRR821N`?g()D)1movap!ei{e|CeH8a<4VYE{FB+IOSOIp3-w1n@jOp{b?0;S^G=+sKH1aWS?kH=A73f{-#LdBy*t`6SU-T~f*`e!4M)a&1f31c0jE$u|IDI@%3~hh#B}2-5_^cp7Gf?kM za9>bBmNTn|!67q(Q|REWto>XqeK#5}De8(Lj7iHF%%D^HQ4SNuO7RLM?oI@^;t*(+$$iA9}BX!Esa2kYlN z6s}1^{>-0CK8D`fKHhaC;7C2k*BQzuco%hU;SYb~PNH-dxStiPxyGQmENWJ{TalI< zNl4yVl6xs9U3@?;zR<#8Z5>T}l09aYb`H`@A|03edmfto6D2bTb&lHil9>1o{n4N` ziMiHQR_zDEE4wO_9d9eRb2{O%Je=cL?pNWy9FKaD`V-M8C#&5^qW1HvJ4tz*K0Rno zdI+i}>+=WTX~p{GMCxRR&bo8J{uSB(*H^nNUUq3JhJP(Iei_i&Sda(<1^h(g7(ltc3utE zW8P5?CQU7hcY&M^Kb#lvE^QcuABVy3#zI35WxF0$?iBQ$O1B2k>_^m1gnM#6XWgd- z8R`V0L>{yQOK;aTQTQBu&CdA5dM0OLcKv6qqKW5&S)B*qmbcIE)gR1m4g%2uAUOnI zH{tM^WbQ#UY=QQ(z>pZacDS0i>^{7gdi3aBzC_NENLGUP$yRfHK0=oIv9g2oUY9OU zCD+M_kuf^wR&-SEbrO=u>BMoYW3^h)rAzn|b5Y@6c#snqzRd4Z?;g?0 zIoiKnpJiw=1&rVLH5deK$aK!F%&BtAJ&C}XPV1E^jx<|Y39Hk5jjzHU`!q;aJtc!tontBr)wbU&72G7ZWb)c~ckH}W>}d<8hF zp!|a*aWqSuQw?LI55x75XmT@6$Z172N$6dASVeZm!lHw|nv;}uu2+-ewxG^w*prO1 z4e8xtysM%0&ic)6sLc3f)w_yv&Df*t;iNjlYU>ZH9Xe+ahInV7XHL{O?0?y?Wq+|{5587n1%2~QOnd?;L4l>s7hz5xQ z=;>ZY=!$IG4yfA$7c!sK5C%thma{>!GTy^jm07~;jnK&+wXvv`c*c0nC2u41zvX_H zT1>0)06s1Lmwv_TFO^M>fUKFvgD%Y>S0NeY`sj+cIa4bwSAqGCu;@Y7z99jdSg9Le zQUN8uWDUz{^fyXfSG>(w`c3l#M{ijXI^+mGW$%?=L|ad9w_4T z#g}gZs@cjNMVgXLVUTev8MZzsIGA^b35AsRhhOHkr)j5L?>*?m_9QuTt?>?%k^8P< z6q(2ZCSzGMcXAHNZhZJeJw*B<6`6ev1j*do2%p25$ae*$8v2{hf972D8^ zMK}I)FCOS0d=raMQRy1~XCW-W)ENKPBf~A>*P-iG>MR& z{qJWfv5_Q|``?nBzl>H7vONn}+sp&^F)GfdZ<)cpg*3ciTwh9(8?z$a$o>n(eBH0! zS6(6K$FYeywY{S^+r>2gJNL5}{%snPb^5lj=u58V;%jCMo@XCUhCyarH`9tSU|w&e zN=!jFP-SlA6TF(Fx7s|!%r38jb7<9$H+Yw)b;;62w5BtU^HMUF%;hDGf|^kJFT?<^NkK zk=W%W{JpuJ9;TI*@a%~mIZ5P660?Dg=yXk=A7=itp}K|XMu6MiSdpc9Wj>$mr! z{CV!4t+gk$u$NKnYc-ameFt>UICd)e=?2~zq~;-I<`$MTyBW_>CmGAy;cycr(@PiS zHW^F4V|5!=beHKOpJ+j|;|zZ5JvsjDtdtO0#{ZPlbBA8I49iKF2( z$8~!?@yp;%HuOiyP;C~rJ6|D@D49FnfzRHns9PH~Ps8aIDANZN-C%qxDSaK~*Pz!S zsFfIv#9bsNsV9C-)xr(1I>`uk7K=C)*RoUYSfl+d^x^=}UkuV2V2i){7XH^}n=^;F z+SOVTcN3{Rmdv%ntDGlxfVS$hudln;vG8ghLAk+3$F68M8O}3F_x&h67VlnAYlXgg z(}tyb{}>b#l$b!~YoO>D*8Mzfb%J+YesFC(?PxrGoE{}3`&HWR1fw-*a6dc}F;<2m z*QuZRfi}k7<@)%DeaTL-JMrN&(A)#wayZ|JCViDUQ;pj2e_30LQRyh7+L`(vsa)RT zSAb#_?`03Rpb;C@4E*is|4}f_oYKQMbR>!&4aOQEIu-A7GW{7OBz(;&_b-tCj;ug3 zjJKk>Bfyb-M;D{ai>N$b5683cv9QTcTb~WcKB43keT;Rj+qJt9{eF(Ku)bhxV+OLr{B z*UUsUb2qULHP9{|=+QK7NB!QYm5%P7s{9FbCw^a5R8A(a&hBTnry6P78x&Px^DoqC zO_p$&Vx@KEqKgRz~y1_%s{NS$Udd zRQgu$$#R%go)^@rjEBdO&Hk>gHzJqABYw{^(r~cW2hfk~vfqy6b@A*M)Qq>Z8_7CG zsqknN8OrXi%$^MeQ)bzc@$5-D{vv5g9_60s7XRrPc4@YDM!B0ad=oqHC~54ma{q7lnr{YTO^izaQ*+P8&#UZ&MkjCF^gLgrl? zz$+04i7oh8{j=4cOJ+LKgTy75k)bwV%UZ`yddwVlGPxyMVhBvLLol3O%D&`m+jyHj z3Qv5Y9xAco!S)m_k9U=5fOxXW(EBu6c2?#HHt`i@vb(SxwUYHP+{-5vOY4bRNF;MI zh2;0q-Xu!d;}Ug{Xof^w^ajlk-$0(v_GUx!zX3Q)cS?WH{P(Z;n`ORQ!%h|b+>}biDI2&eJ(Fs(q zvC_Mt!@#14^pgC*IWwgsx5?-ck26pH`xDCLcd|0pU=!~kgIU#ok^RUSP|0>R6hvD| z#A0v4ACkrs(ds7Lp30NEnQWZyIyq{Rxh{LBCb3Wp)JtyK>G*IseaT4yE4`5=i}y8Z z)F+kgQ1J;mJ`i1!(>%Lqdg*l)Y_H_sEpUGYzV@Qir>XyY67d#2Si?RyaPM3r`ok=J z^1W;zAAxk5t5^8U*{MI8MNLN6!~rxle*96NkFyYmlbj*i?Mz#O)srN79t(X6tdgs- zi&iIjeumNeG#*3dQ|Bp{jA8S5lF9e_0O+#^{&Ds@SsF4woG9gidP&C92 zp2qOZrTh(q;Z(9u4n?WZEuL6>n+awG`;dzDdOMwTHgUC^>tr)ewDVT>@M4;Ehu)^6 z$Lp>aYiAA&`xplwNAF}&2yBTe*$RR;J$b4-XjOY{-h=n6jg2eyH<3r346Bc7e-pm821^U@Ctv?S^4tgoUMBT( z)XIGCay63){fHUis z4Yjos+FeBEu1AIbFpL~FC9OGo`Yo_c@Fa6(k)LiP@)|IHPg-_BldM8sOS2^Y3al09$)Zl-?p@1oHBKA`7jiXr?X38YjaX|)_@lnhg*@3Q%J!9#T)BkYVEQA+8;Erbc8U3}>2E1cF#j{>z634rR;RRv zpL4kGCK;XZF0qe0pmiDlcAEOh?e#Xw)L|KBtC@Bi`p$!S#?*NvI(O>Q&t@=Mh+~Oc zxeVW$(SiUgJ9_ToPUhz(Dp~Sn_9*To<7u7@ zQ6?+2yJ%+zuw-5@QJ!akGtV2*xa`l&>U;-R*)jE3+)akm+d;5|F64BQ(ysN|+B=+7 zWfpM-9ePlo+oANO$|hzZr*XZcZ2a4bq%lz|73tD&vXjV|T*Z4z7N_j(KZp*la24-* zJ2iJw<^`}eVNc>gU+!CpJ^2LHXR`~-$=P%;Wmb7RZHGtOp~@0+I9e%c_c6+^Q+9ij`8fUE znI+%I<2_T&My^7|rD|Ub>Sp?T2W?h^DZ8-qcJdOsE-$#$8bk-+Y76hR^9#Rki=K7_ zUnDtiJK;gzdGa39RKLl!-9pbfFC)2N?_k9%v1?braGJXJlGQOP}rNX_@Qnj*Bl{ z+WVH%skgmzRY1R#{f{4|;$7`kdU)K%>@1)XaqC#Nxk@q`laW4rulw*ni_{FsiOmb2#{$fiUZ=iRDaougnN` zGM;XB@0r3Y%7EG3gI4 z?kAIDW<_dywyE&a7m>CXv{p^|571}|8=7dqL*d$sJx})hx<>cxzT2Rk6WII2uht=3 zi6dQI=y9S+lgD8eiOw0v_mZ8Q5PBc)`&+O*iXzi-D;XeiR$d?9CwXx#(SN;ijc8<3 z80_bIG-{1O$v*JA47QmyeU;W6XnfoiW=E-U8u%Bgy9>IUjPjE~dmLIl1oxkLTjAw; zupO+#Fj_Z=9KWj78TfKCK4jn7wQ%_p`}Z-+^AXSK2y{q(p#8M5hQBZY1cOjGu@w{j z{UaQ*Pcd?u6NQtB{6X|O554;6=@ypsKA+5R_S62?G$FCdGmRk0Z1x4-_l5WK=ujUl zlZ@wyI9RIe7oZzQwx6XDtI>5&SY~(M6ClYO-On_%uHK)(lUvF8*@YA)YU=~CyaH7o zL&MRyeiv9X4_nXgXy!nc_Nv>}*&j@^TV5lUd+OTC$V*iT&B~ zHQE{m=Y3hpoDkj4r-A>i3Tu8QU8+@3Yk%^wNZFh|_DA;`q1F(!Zy;ld#L0W^9p0Ch zg1)prXqextluvGpIdp6?TyjqJ9yDM(QZUk3(MsL-iyHmN)oy&9=HN~KxuqzcGcK0H zv<9qiq(dw4KBIhe@iAQeoYbBUmSoAT%{I?Rkj z04;B?<(lNQkDjvIV*zd7o80ftioeZ!dDs7O=u%xjwb-Rzpzn;viB3&)#AGebqyYo* z^<&SDB&mZ*T%zh0(doS7O?KxTef9-&O|ATZa+6uvo*-%ogEnkKIoNjB=J}wlsLT>2 zI-%MdC{@8X5&a#&+L|7B(`OyfE%y978hs1RyNVQUg86C~AFJ+KzjCJFhcG-;&%48_ zr zp2fRg2G6gHezNAi!W`SO!Vjp-jwQDA9&IJ>RWd46^1T#f$w-_TsLX?9hH0KB8Cfp{ z{q3l^t2N%XM#ZeRXS^L-{LR{DenlqoKfBx-kh2HDn&18O7JvIlZM^FD4)iAHjb%N% zoQBsgM)YJntL*7^B%uOYv;==C$}szRs^ZNt>{4dYny{0}SC&}N4$8$l=*SL@1IIzA zP#GqH<|xm1^|XmQeQ`f?dox(zoOPS1pU27gbn{}%=x5fZo+7zj(5&2(m%+BqvvHvO z2fUJ#>=-hUUH8+;?7N;dgMU_<_tyJB(msg(BwjaJkUPMlD*0+isc5agrXUDR(N0wnVWB za4fy|Y$b8YjxvW_9}L_qZ{ovrVN>Q6wM+2o-I*-DH@5A^sO>z~ZJd%>!OGA#;T zovFrbSB=0`2hARGm08rGaC_HS@d&&05_;v`A`;tx3@4vwHSkq57L~!Uf_4)F5D3Si z%TjF)*JrY1lw>3FaiQ-v8Ol}OS@H%GpZ`CfoD`7=hxmGVXZg>Q|9MrJT;x==il9oS zxSXk-oM#vN8(*+Gzv*>YZ^ZBVpv($y&ZbYRZ+Kn-eu)L%7T;2xtmSS_kH{zbR{CGs zLsq&QeQfA{Bd`zme`of*l)vF(<6TZ(c+iOVkiJf1oo*zDSCghkK)1m-IEXDC>zftH zMCc`}|035v;_o%Mdc#$wV9~eSMl{Op9A2W9b@g>Pi}<7iYAu-k-`_8 zOp#b%uguS1MCz1i^(aB8%XuCW&G*Y3G$ zKBHtuP@SZF_NI-dTlL|S-OXe5Tv6%8By5e&zp%vYXIG8(1KZpPdZBrj15pP@i} z#O%C#L78?q5%08{XC3L;qv(1u&hHL`;cV@>;0txTqfZkuIs;ryJz2zRuAmKlVQ>!$ zCf4LO{7*)Pi%7xlp8u?+rYNwF5})H_M!KOeY*BcBlM0>>MVs4DvAr7KdHNZ?AIDmM z!_uxL4T<7jkI&E0jLhjhuf4pbbT^XUj1Ti^TCzQDMWc_w(isgOXGP=hKCEWWo2$vb zJ!&-W!h+`{u}8F5SMS-IRtsdQ(+fQ|>G>=^+k@!%QNg2iFi3{B%qB-iGrKv3-X4gv ziGxlC_MZz|mYsL|7d~Dh4BF$-N-$)X>ly{n%YtfScgj5=3>td^c>~W+4U_Z_>5( zcL%)Z)84-9<}j_@2&U`wmA9bv=-i7*oj8Q7(>_6_ zni!X^S9+E{&(!u1_VzQBt?qYLGG@}A8}T5RHa6D3LN2SJdoMb5CAlb9`&f84M8(BQ zJw*<-=PlfT3gg_plk}_y$(1A|9{dV4eopGPrN&Zr@hDd}c$x^8`lKq} z{Xo$4)_$^K+Sy@5?+UutS&HbPdp6s-bPt*)g#*>1~ zAI<@HJf(_cb}zb>6OQw%qN{%v-fn(pcB(d9TcCCJBt+9H!r)R;yVcVvMzU*Z<3idO zTJ4E%!(HW^*vxu9iSK8DVLQ@P*_&6+8mnZ6xjG5Te*Pha_5aHFdV^B^(Y+%5-U}Co z!15pLN!}W6)zhAE&v`KWplRL^vu`xnS2w_@Oq(mvrK&nvcS-!pSkibJu8mUnQZ2ls z&%1GXHd}e5s}J$z2;XFfZK&ooELhG8i~rk>t$0qWFQD%+xShOUcf&FHTl(Pgd|W@8 z>{i9$X0#-Gx{@Ea89CYr=j-uz1Yh^JBxe8%mGe=uD)>HHW&e05^{&%iXYhQhW;yz7 zp!1o}uS=6^)A5NQy-ArT!BGt*X5ixy^enz-BfKd0J24!Q=UOy!n${8-@++LH!ZGux z+pFJ*gv8_eg(hYuwT2#!f>$DAbH;V@3-nNKC@Sm*%01oPsi5FX{&iIYgobz+^%eVegT3*E~}0OJaH+R%mT(e`WKWiW{+`+v7yQJw$3n|HdL zqg(!8cXNjD4s6?J?0@z?4aDg?NWwED>s#DkglA)UBKz@1H-ICj_~h)u#jJhSZ?2>j z=Y!@a7AsNmvvBzklJO6A_&%lYWUn(Hwt>a!i^liTyLd9|NyuFCeh*pL#HuCV=Uv*H zg63J%o=+#^k!7wVtJ9aTxKEMOLF�*JwP7e>GgKKhf)>jIp2dpOQ!SY8GvQmOcda zjcmdLdL2jePSa0m7kDBZPb70AwEQ!y+h}ZwHIfy)8~uCENSKZ6I17Q&_||wQ~_FZ&Bv_V$}LbovUDSEjs4B z*k{0Tt`Ro-@MhrN2`F-nGHumc3a*^bl(TZ08fW$ZZBA*(+-R~8EHJWmK#}+u@vINk zL(b${YeY{>VWKl9!9FuAi9`JfmdOS5nSQRti#O?0<_gxc@zqf-CkQ33J%n;ORqb%{GQ}7)fn=SChkq?JWEe~)7IM_U|5wayBx*l10M)?% zw$^^s!*HI*Kvt(Mj+})TZ)$rM%=@5KVge69hvc107P#aJOa8}MFnLp3{pio3g&%OL z(PSc+JA=AKAtTw%nUm;VFJOsweMJghWrbRil&n7FO)e)Dbp=UhZEZ)#+bj2kwzHai zG$|Vd-f@NA&4giRl$gQZZ)J%R`#BJV8*t=stqj%L+cfRCVysI1L$V*90-Bpyi`&Uf zJ^UM>R!{nssGfhJ`3d-xRk?@hZmjP@rSg83Jgp=R54x2rX3cwlSJEH-y&*Ah?=V&V=QG+HFT8tKs)e_?#0}lVAA_J@2c{ zcfeE){pz7iqKHo?A32-hP!v0#l|9b?0?B<6SN1SA)FQzf(Ki-m88|z!UYR#doYg=+ zZ*!VH9h9~Btn0{Q;(yxU((Cl08{gm$bmD5`W#;EH%b1y|%4#RpG}%uspiTFyakuN7 z_+E*IW=z_VyvzpSc=Zp)tHbgC2>j1V)`hOdfP5hdFYRBdfGY9$=FyolpJr-j=SM@f zDLa~9rI*ZPKWT7EY`nct#DPC4CPsk;j(&Y`2Jowd_s)EwoTo%FB3ClQgq;%X-?UP=2a zcp48b<+9^#Pn6gdhVA^$X~MNpq=)N$(Ca4Hv`{ut^0BM4!C4yw@nfqglNEwHKpsDJ zJ}Em_%QeY#8G6NgTk6RTM)3h8^LYH4inh!2HXHoo>2c;hN5N+?DS57VL;qT2 zxnl|oRNL4;p5%;TL%!Bpa=LyDviL$dnWCJ8T}rzn3Gp+x_@CMKyFED#+@GTS6-M3c z9IB*^(KO;mwa4SrSdh*^)6?H6c^*X%O8I!R z9#qBHJdZBM|4oER1u`>?l}gUCQF!ztotWiW3q8*x4~aM48E(noH^{TwwH=s$>0VYG{_`~R%}3DKC-srtnhVLq zQ^n|$JY1)f>U)gx->@z@H)|#<7T@+g{>8ogs6U$ti;er#|HIjFq1>Fp(V2pj+9`m05_9sF4$ka$acic_jnDM>Ovs*1hE0b~C!1 zPd*ZrncYD-CG|j(k$jFfv+!-mc^f#tQP}r*@ZDkbwYz8N^A2`Dp5fKJtDo7!ti^vs zqMlX$N~26x40F;`;`x%{ax9o~m3`#j!eTnx){WnhEdP_xca!?B@`^);_m$0_mwWY` zmA2|EaL)eC*|N#6bs-wBq*Xa*BDu-l(bmI!=DW$x*XZ**cM}IOQ~f>FeBC&n{ZQRd zEm4c}SgQ3TCT%xn3#On%CC^@E$=Abc3wv}oY!eAE6}6_aj>(5w6%TT%cd|l-n=`?j z{XN}bzJ_Kkrj3as%#(@qvOh^~sMVwKI-XIY9iPVCyr=wwwdhB3vf`h!XmbMZ7fL3E zEHM}Tz;HiJuL7dlWb9Tr?1BP&pzxNW|MoODCl4n!dpfwSWNI^R%q5Yv z(K-1R5(WNGa32WPFczymTcrLqqEPgxEs$RGqRa0J_yY3lhN!~%XuQ3=~vbg z|3#)2;lV8UjHlc2me!%m3Ot;omz=|!Tx_$w0Y1RaU<3NtGdE1dPXi`bUrCTpvwXh%*hjzrcYiHR@G+I2EJ zBr>HfIn3_+#8xJU)=Q|{oOI406N^9_e}5r}l0EZII#2_~ISuD zJe2K@(}Pj)VUln^nMA57|-rS6r9@^9gvE_=CjFtVFHb+5co~7|UY4P8tW&qk8UT#h@d5 zcQUz5yh(D{&(zi)_%nm28Hh8wwa2G^czOvA)Nz%!vM!*=y7ol4oJg_~1$`S$SV7Wn zba$|pN*aAG>wF3d)+9M=^|cOtI>F>iFl3$MPIVW9|7N!G8Y5OGv&Vao#WK%ZdzJ`~ zW!#=gYSy9Mo?y*s#Z_VWC~4aZFY2LNe2>gTWp*fjMn0dQPg4KX-^3*^Df;NbK9#X0 zf2I-Jvpd;uxdW^-XPli{iAKn%_ggS#F7iq?>o~pS46Ed#JAnk3yEDLd6S>=wq$JPl zHSk|X!}`(i(4#{aAo-$+j?;MUtH z6EFT!uy#X-+BlQQ@W1G-Kbvth{_Kjf3u)>xZ2DcK_6ao)09^}wnMelakhX>F&K1Vz zMJN#ojOUj3jFu>yJYnO}Ebmr5if1*z(*q@TW?^6RHz)69rtJ!F<-C)M>`NE*GH2Yw z)mCFx=5cQTTe9y}14TLS=uu^A@nfPH70FX(HhXxQ6^<(C91W^M+Q%z>gEEN*2n><= zsvwz$JAZID@Wx*pKsMv;R#qx^wyo4(j?(WH<+2~Mym%HZi}#pv|L@Nc>O{Mj70=_p zrd;0g_C}|iHogozRcY!Hu%)#`jg82?WIbBR zCT2fL&bFK2>IydRN1yC*$O$sZxssUGYgviRz#Wg*^VyTnNZMO?oju$Q=tumRb~I`Z z3wyZx$xFMHmaIqHQ`xO&NqhEI-NU-QsrJwO*q`*4RsZ=kC_A>kB{z>7m)4Qctp2Vi zm6`48z*;`8{n}`8nwIho{UaSdgrs)SLNENz?C)KmY)l>!r8tBY%gX+gqGmhz^;KpP z8Slc@Cwd{a>vQ;@s@=qhu4gI6u=A(m_k6$BlDy1^C6_{GXL4qJH?sc;IwT8Uc0}~1 zKQp!26HeJ>^&(Gd28)=PreA2=7EtG8fC^yBo}R>|T@H(!A=?wRCZk;9bjQHzMR$M4 z3cN&fZ(}7gyZxfk{#E$S*IRegxlWmW%KVk}yN_mN=Ba|QEN8_YhH6LC?8jKuWQw|+ z*Oz(SW;8MD&&hN-9^K9^cyN@yR^q}w@cKqyEzr0-eDCAy%tiIAum8TVwT;+^_qI6Ejh~3}M)hp!l34JF2K?hQkIhy1m$ohM2es^a3PvH60_B1N5rRdudZ4-5R13WgdoY~J-S^0rR)O+=Drm=ZG$z4GvGJl!V zL9^3k9XM9%p^j=G_z?PKucqB$-6-@7?Uz<@ep7v(gkA(4H zdXm#ulg;RP60i$i^)WtH(_3@A%el>)=~qvXy^8ux;GUf&d!lk$I!>)b1SRiKcBW_5 zDPCFp^GOADl1(XI!%Ti?GHSl5{oPP;7r1q0qZ1MFY#}|_#r-Y}A}z_tn|R{GK`{hp zCxLAX8Xf5I?XmHWZy zXMB5+728+6w^+4#N@kzYN-gBQXAv3rnOt<#c1|73DM5qjz`H1Spf+CzaX(b*g|EAN z)&UKUVc|;oxRUc^Jj`m6#RuRRTbRhgc!Y`lyc#6QDIDHp4JJN+Pq5^ywVYBrPwAC7 zb2~a*kB9S6Um0xN|+2=HvB0Nzo%DqCZWpL(d+@hyHq9i-KAEZ;At1 zQAq~YK)V^`lj$|*on{UqZ+8#DASaE~p@GSeob1Yz@##X)<;1}UjCLn$H+w!W07v@T zTe;9Qd93q>G!Z@O@@#>@$hK9l5%fjX4=L?g2~v!cVw`Jxhef^Xk0r z&N?veN4DR^mFy5p)K2#4)kA@$+OOtqV4gAZ&om}6Cnv$O7yH!7-$W!lr&jiN^hEKj zww~Z`W{G#jg&wTW1?ZL;wCs=jO0CQdCVzf*dMCc&HGf-^jZUyz=1Fo%ouh?H^try# z9Dft_S;PMhv^(>Rufd@XuPX0fmlxi|0Cy7& z5Dz*TgtCi0>;2Q*O`PZ&_oo@J5)B-DC-_^Pttx}_H1w&&&L!?}KF(C|G!eIZ(5N+f*Vo*C@fY|<{CoJ~{8aq_vs=g7|PYiV9~#3U2gR5ZwrsbH7gV_V7j z&idY2FUi3%-1u1z@~z+qE?a5CXi|~A`H6nKz}2lRc|#Cf#R^4U6RXg`{p=uGOJXw1 z9q(;`8fWmsGe3R_S=pD&C(CRyNeux}%6|jO_<{}5C8y6WK)H@|{{r@VXOwt}CY_|M zu4r|zx|PY+7~C1lzU)jQN2qlszVB!JeU^+|N@5lox$41YkY^XbBY7{LAU&<%k_;%> zmpZk0GrLX?*Qy~_Tv7D!H4=P>f=}1(FJ)iCGQd;$z+S7fi;&PRu&hFs5 zm8>T#W9Icwf!9dA#7{q+q?U3AwDJC#v(={kFSgDEOvkcq{9}wgWs5|aCfOpp^kx~N z$iBU{F!o)d#gaToS;~^g7Sm))sit9DR4kOjWq{CN#z4cqt|D9ojO|Tazbe z;l;CPKfPAR87}ef>cQ@m_!pkVJCxZJNp{A|{nh8;r$T7Ng-~^I@aQh`4ff<&COi(YvC*XQF}Z0d>GrFe1KD6tp}Gp~wdOvq_{`j%+!$6^+Q2 zhi{Fqsc-H69-|>?ksWPx2ZM6d42^FswUPQ2l(HC)<}I#Y17b=;LP~l>w^9` zhxRQFf0qD{Goa29aJ&PH*e~E?4Nx}(^WUOlUILeQfk~tIyJn;t7&;7e&xGqeaHYHS zn*zrY(hTGM*VvJ+NK=36nnStzRZmfer!xN=-2Vc^4})nV>$-;d16uetkU3Vl0v$Jw zl0SuK*6?Hn+;9vG&Vicxo@J@a)2Ai|Xk25x9X#ydX?rY%k&NF1CpqAh&e#?AIDUn` zaDDpsl-vRSnSmDSPug3dmKd_O14!yAYOhBMhX?)9}Ylwq1V^k2d9svzTBkW3bk`X!K!GbW0d_7&CO`+zUL63AGeKuh zh<6m)Ggqo$Jz50M)Yb57Nt*{fVqE-Q(D(^_2xo4rI}w{P0zNd#@G-DB7j5L~nzFRL z6VlKSoLz;drz0VG;mSAQkAdg{V-&k1(w&0G;0`UkadE#R?HX`@1iX}{-pNRTdzXyH z8V9z^Lpk%iuKshhNjA7N0o`$kckXR;RZnjy-5D)oB=;p?-f{ZLw8Hhh%eWc}SDq)Y zYjc0ZxAny0^}M?ZtnPtO(hfrXW$?dH_JgLbsUC}E?L~brfPF_k%5b+496pJL{Q@k! zO{tC?8%^3>UX!5ySg=|OxgLYgNu*^N)RdQ0t~z!9>WolAIe5X=wF9OaXpc?YIg%BJ z1w7my7U#I0|siiP!JxzQLPhAV_ zy4aCWtQ}=F0JiMpRpWck^iS~FCvazfG?JrgwVeq~L?|7`c z;|B1?Q7UI3x1n1dKRg3&h2(R7Zwx(bPt}{G+5FC3Ff~#4LsJe@1M#6Gs z+-}Nvin?A$uCl|=!_hN6fb~;2U^1Vx)ZqEmUBF9Guw4N>r2@@+l+hi|9|HfS(7Iam zE>h56yV0wTFzJnHHBjb;w z_mlu<&q2;V1G*BxXm8>R%5>l1RCLW)V0I4RL!{m4VS1FgD9K&<&A_#%Da6C&?s#km zuR7{4Wu%Q`pEJo_5Nqzuu+XZ`8ZwYf^DN&-H)LHAq1o^r4M>asBy# zfbS;o=be;&oabGDe-Ymp70SMw9}Qa@&u}!jJqNB{rTkxk;wdQhGVk4C=s8Ixc{dpC=vZ-Qo;?m{Un19Xd|vmy z=Let8hgRWvD{ydqz28VQQg?XN@z8VpwnQrRJFmduOM@rjYPhfQ5*=gKi%z4z_5+fj zA900NKLuyId&)7)m!R$vH}rlMJ9Vs`w?~nt+y`2u~Z0 z@8?iMUt01hGVd7A1b&}D1AF>Ze<1f%gqJAK^VUBBGd+NO3U|wC^|$Dk-eBo2ayXjo z2upsfL?_Dfgq@<$z72Y<2)u0R8{vpUj3&5h*i%FGiQU`v9ynftOx;Wg1F_5T;O7K2 zzl&9>jwHISL2GMoqaGS^JzTqqr|z>~13d}@QAbKBfi=>GzKeeOnX;DyoolKez`m74 zl1_o06W}lz*{KMm^YB^PqhCC|`Bid9c-scHf25=|;Oar^3xlH;K(GNT_&Yl8TkP>_ zTHTWT-%z`I@-Cpmv!EH4gAx7PwNPm~?RVU$C+Q9X(K)zmYw+P+^;!y8Ja@sB&W@zH z*RDD+{Tppng%(c4syZSy72aJ4-YZaM2cUJ-{3Io7wTtq87n;?b=C!C}A`-J7dVAu9 zu{3VvNhiv!$bai#*Ly(2?$9+qvg}&dblP`@JpZQt!a)2g61V`2v_orF;#Z%b2_9J) zWG@%>xmK(pT=XEE>fC2v+WQ|U>TbXW$f#=roc-=coil;m_2*^K*`5^Fj@nisZEqr_ zGms3UslH2H#{GN)p6m{-X2aie(7rE$nMK&Sdx0YX$*ITNJ4j>vEKjvv2JFVFyNE30 zCw&LfmjjLzcxq*!ozdfr;rIe&4~K59W}J#fA4V>BbGnkib7}5@CKHf;=l0zt=@{-i z;NJa1FYxAjcwjZWy_)M1q<5yHB6edcty&6Xk>*_jc6!o|mXte(a=StQvRKbA(fo}l zp)h6LOzCeTE6;L28yegXB*w9=hz<3GsCCewGC51rVx#g`pe070Du++z307yIzT?*J zQL6`@jZAkE4tCs6FU@lp22rvy{5f?z2v1Z4ij`0zoq8ve(~o!lp?Z$eG(-mZCqUVia7-celY3Ghro9&_ z$@LnpWa-^#78ZRVSTy4E5!!AXG{>}y@pLB`Xw1DMJ;n_-PK4g_E;M2;aQqq);mJuI zsAV$T=4{I!U|=Mgq!aJbz{&_Xuo&e&O}i^WWqZZ0@XJb@S_6G1c)}5+`_c59xJ#sz zpLw?mF0K!^W$@{V9@j#+XT}-CN&L1aOmYVhdlAnYYlw&U zjn~dT-nh714$frMc z-kIHGaQ7k9eU{QaV`v_Al&7`50|i{?*a@uGrwZEnq;We>jIvZ4%V2-Me`v2h+9Sw$g&^A{po{Te z3qXYgU@pjSN3_T!xUe$(lt_Dy!mkT})zc}*!pTNCK1uG9VDC7zH_m|j-W&&i4qB__ zXTdog(73Mna3!bnc1MB7c+mDtjJ21}h*UDEN>69+WLGwF=H1RKt4#L-HH6S15P7v>Cr{}vW%zY z;e{CgMW9v^?aL0_L%4VBIgxrw(k6HF$J3VWq;`#yGg|t54X7z6II_M3XwVEP*-WY& zwBa@QDhaG*&{pIAdUCC+SsEj^N6~4+DKkc&!E?sV>3VJ9uood0B*XV{e5AgszrD6U z^WPB+qMnwR??J2eE`8#Ntf~KhpNPY6#lF1sKkuv2BKPL%i65Z?@6dkRf zt2B%WIbs3M>|B=$_nsr@3gUn2wEk#SxLU?Ds*MHl4>YNvO$lKvZXg)dqufkAw*i6v z#3Xd$K{(7ar&pmpk0V#R(9rrg-=T#&(W7`V$Wj?FJBnV)KJ>%_H2wR?_ar3nV>G9u zclN^F-I9);nTkZ+hD7he@;IX9sQ_2`4u_vy>o9>)v|Hh|xA2~fr`m`d?#oJp>pr0d zXRkj-PyRxw&XK(X9p__X-LX{--Rpk#=b`aZat^1YlR)!3-orrbLK~=i4%_Rlv}$nU zV*Cm{oc+Lf3T-fjk30P9Az?Qm_xevCpjRHo;(6+n=ZlpC&JI|vcd5Aw7%@UZMbf=T zDNVtXQ4$<&x))pFYCk;#S5qphLn+5yOf}F}*|89f(Kwzm`Z`!}Pe?TO&<|hxXP!EO zSsLhXfgAFX+mmyOkj@hUD#P9Vcybj9?FY^rpZ*$5H3svA(LGb4S2`9&j5dafwUEme zVDC-r@2lJ`BHdZ=b^|zA0hKyI{i5jOKWK#~I(0@9d{0Yy@TNEL{6U-cBS}*K9IaeN z4eQ`9V;C8cvILa;lKSGorz;Kmz=;cJ@3YWtGxW%fWSu}pJz;4g7<(GdHv(u3=#5zM zBqg-P`lRzU(#S|?)DI|)=&Kj{G}`%Tp#6pR6hq$k0lg7J^HYYSaTRE(XF3)}Lnehj zy7Tz1oLNI{`jJL+-A4O=h9d4xYYTMx=IfDrXN5g;*Xa1}n^_IzJhQDMU-vAn2wdhV zqP_S$04^?4pZl~<;)^W=j;ASMDJ`r4-KwHn-Q%+yF7UijXOTU%uRZ$AF`HhLUnRqgfF)tHLe)) zWc#UL{Yxac4SLxTbx%ZSgOAV#x(wsmIp+0f@^*0X5_q99ZTXTqI|l9P2~dARgVV@Z z9jLMZ*uUakdAM;4Jn%1`^#hLsp;1HXGn!L3sJ0xecA;HuX_u>Xc0-dyaO9agzk$vA zTsZ^b3{4IA>o&@n2Sz*#!5#js(A6KIYmPvz(r{uET5|{;yql*=#}c4?8tSb@GZ4~MaZn4)Z2=>dxhjHeC-HIJ!}~ld zL<@=nv1fd?!Iw17|3YlQCh~Yb+Dq8;AE4h~@LCcyaTZ_;xG0Zi`;;~vhMzo}b_w{c z1l*o&VZXssUvA+3Uh;X@0j~TC4j;&qvryzV>Ro^&KM1d!#ee-5PjZ2u9Y~MSuU`jy zMt2$u&0=uAC$&z-!v2R^`XV!@py(Vd)L-E42JqwgV~!|{L4&O!M^2u{@!Od?Kfz8J zBh#|9>FMBL6wnW%C0j_lkNh)f`wr4iqEyc-XwI*5!rgf9sLAc*bdUcOV0a8{cL9Rh zKsg+1+7gYpBT!NA?{;!MjDAeySzmIwvQP@^ht}hN85r5lTjL#g-yFIe#;?&6nGAM} z(6I%bL4(w;&t z__`JU@!+>Q5@4j6GeP3Jz;mu>bmm|%SB{$&LN~c~BNQHk@N;IaEZ7NCI z4guw0INedz0X)gz`AEulemoTpI0{W%kCTk$?1h~C4aT-Z+m}i4GVr?=HxbSp0}Q7r z%^8Nxq%=N*-qu6##*Ju`XW=^`i>K9UqO5SLa}PDZn*ACo;^!e7_&mh|2nD9E1n4&K0<3B2FPeRC{Naq3zQt&HfWpZqA-4fw`r zt5KH6L-%;zEoCHZ4=LM2nR=v7*WQEk5EuUE?e%gq1^)2;`@HK5mnDZZa^G2?b#!sTvuUA=RxvvRS@p= zjKI$Lv^CMLS}8Sbb0o4D+@(M2$w$?&Sm)8ke;}ccVQZcY{?rL*|0Q~9DV*oo8%Lqq zRkZor=*;f4;ZJz_EV}R$TBvS$2Myur+U1b;U!j&OKwZ=MG&QxvPA#PTd1%+ikz#k> zyLanp%KnSeZlknz;rb!O4r@p+fN8cS5~|l(mi8PM}NcBZteN<8t~K z#&OtB{`>G-76Nap;NOn{>-W%>e6-wH@jFA^L!s{w@=b<^jk{X}+dLR3$5G!JxT`j@ zxrHZt;rm@c=&p>1f%yz~v!LzI;B`MxPN1e<;Ab^H$`SOev$)3N8V0tHfTwTJGIQaB zTglU#Cu_Lt1#X*Au4_N@gTXqaI}LU&(C(4wffT6hc;-SZ*!w_!BQ!Zr3TJ1Bg1bSa zJJ0o5aD4(RZ9Gz=*S?3g*ae)WsLi;rcOfb5;hsZa$9;P*0moGQP51ruC4Xl)%*aL+ zp|U-w(eU~-X!0U?ngQ=gw9HQ6lWRRosSQrNdVOgrX+i916dAeEHg!N*;x|9jw>fiyWfRGS)I>G3ffoF8)J&U;st@@EZR)H|5(+Dlt6qtI0pW?`i z{BCThXzj%WJO^W1Z^2sM1229=t^2^)jZpq(pge&rm8PYq_^j;R8=%$qVi_2JF^@dY|#k)$YB(+J!)ow$N$;cuND{#{F`2is!J! z!>6toY)HDs@ceclclWqwAf5#xcR~(?|L%h~d%-vA!8ec=cW`=6$yVUJ9o};_gJ&@< zz+x-_L!MdtKD=SL?&=N?!cM^{Th+iXy8JRf)I7+0nM7)jtRE2W&K?$-2uW^!L3STc7` zYa{c(F;{5ySn8~bZJQ3v>yX7Gw7>|Qx#7fgu2T8>>nTf#l%EC{AB2kwz)e%(g}p#r z7`!CXyUc|o4+d+-WbFgI^P%E!AZ<-6Twhp>-vN{y0|NawJt933$N03omw0v)c-@6R zk-5bZ4HP0n*Jo!QLG~qrS{p}3$0C>e%8FjfU36<}s?06{E5NHlU zBUh+i1x|Yo?Rlp^m>CRb0Kt!>v?cfQEH^Z`0Ee#uGd00schWx#7pz319|*jcibTyK z=b8BFB<&9CA>+)RxSK5!F9zDiXT5nz1DXT7J z+sjMj-jC~P9V50U;e9+-D2A=I=OI2HfJUx4=uDZO<2Hl)a#)0td=`@?KQ+7VLK!F= zxXab*$!Jm622=!>D(izxDa`n}_SjFE!_Xe^#Lp3Wq1vLLT1@2iM1_uQ8=Z=gZjs z<6Mt~&(h%pIquziX$lLIsD|y{j=UB`mlsJl1ZD~zawAFIT z)f3-_Of{oDkATC8=y4(cg6~rDx%yx;c=rTCTdu_X3;5^b)vI6r#!skDU9P1p3PhFR zk|(h|Zy@jP*O-cpC`8HqNZ}c0zu`-|AJB*^yTQ^%@a4Gt1oA!xl(~Y$HKlgrq&XKq z1pV$w?fH=BwZM}`*{-?R4_CGWA0J~~ngE%S`ZgNFdQOr*J3STSP;TXJAozb8`uC)sU>J7Axe>jr)Et>ls@K6a&GYQ9yC~f_SDuy78arxwC^->Y(+8R zwjs$6l4dFO+)o|zu(VInf{W12b?S4#R(~kf0jald^D2-;E_MygiTn$QccR!EC#z0wx6t{3c zniRD|Z=^an*$Cfv0hjhm_Q0QS(PpEWG(#gdqegjnJ9YL4(ni!=97r}m`3|(MKfEy; zT&;#1$|1iWk^f%OWNUKxoIJLpv8mRO< zG+F{B=76_PDEnUUzYUq{1-4F7#sb>rn6f9JHm3A_z--*a_o2_*v_Cs-DF`h6q43j` zz7Bt)JkYv6B+9jdx78@C8y3l#LnZknt#f?KbD7_uOl7|vc={MgzX-I0p>SS46QGtm zKWc&-BNxntlZ>+ND#i8C));JJgBtzRBz&&?BxA(ydR10{~q}6K+55i zJ{vA}{N9}mv%q7uV%;NG5MV>J7e<`w6pJa4>{t1-*L+; z;MMuO40N$;FE>D=-@vOed-I|nJ)x{I_A(Xyr9W8(Ue7}fu8H@ou2OhE>6F@;)b6va z8?fb!eiEFM3HzTHthzqMwFN!#i;U6WNPTohsOI zwHWQ102XR;m5hJfg!IF~VM{3Ns^APt)Bjoqf26`?j@#aX295=o_455B(ms1bo^bJ6 z==V6PYmAFcK;Z0w>q8uM^jtvW161WJ1h=E>O)(10SD}V6NUP@xdk4?b?%}lMIORJ|_Zz%10J<1;!pIS)sp%tZhATNrQJd$)TetfWU&R)<+Vd4) zO{JIiDHLpo{7)dK`%jG|{3_f#jdJ^tV+YnR9r)hBUoObg+{l9~Ov*#?KhY7707Ev~ zXDn_{;LV3T8C5}8%X0lYP`J*wEFag56rkR* zQvPP@OQvUkNc94xd1_-ykb$*ml_k(le|$4bKGHpn=k6?xcJAGT&UU2J$b!y9ZKe&b zx{dPKmyM^MEVMcY7)3zv zRA|@LL@P@4xLq|5YMc zY0!HG8n8P!`VQ$_k2Z28Y%i=|O>A;CbW}O~x32KDQ7&pB$t}=gt{=#c%*{p5btL_l zK>9j%>I(dQjCL56vlblnI4PT=g|}d}tMP0owQRr=IL|#2Np(%Zr@u^xWH z+FSxx?;$@=pw*jDQdMltSvdYM8mJ?Xx;x4>8n07gWnj>M%T8TQfw(a5h5+eV@H&i< zD$j&HN_*^w?4g7&cw@AxHMC_AvKCKWo)1zLEdEH#=b&eXk=IyhgGlL0J7Y4rR@}3? zJYR1Ou(tw|U&!NXHGTH^;QVh=JP!tpJ9;>HGbNCS$>;+8vb>bC1)O9>BMhZSXB$dT*5mXYioijhLhVj=R|1@eYjz_|$0@Z0Zyft{xtox_FED-pZ+u6YKk&83 zqknUnR{n(C{2od;L0RX)iW1s8JUK@0N<4cAtXNwMT7MOMUcqKQ&ec31HL9$=1lK{| z24za1`$q!b5xAfhJTRJeKa7U?G?ef<61fJtd{62GTIG0mE%G&m2A6>95k(x9!j_hPj?BvWenYO4$M4)T;(TEb0}@RVM}(T z+L)HESvUA;(mB**wQ7{cgOD+bGYOiPro!rkzg$`$lL| zo$?COf;N<3lxL$pybV2!sbT)F!QLz&^7=uv(Omd_AH0#q8&CgRMEhEVw;R#M8-c1L zB{hNrJ@x2MU|Y!jZeaTzt9J#MYrtPqL+`;kkh`J8Ze%VxLFX%Kn*tSG0n`kargHM0G+U|m>g)f(n9J9e`K47=sSs9#Uf`e+Ib-x7SKd5tGfMNi8ybjO4M;AJKHW4Tt z?{a*@6HY&XvxZUjp`hp7x%(xheGgPdfpT~K1gLxwjo|KQXRPjozRsjMU$z!~P!L^x zlu}zF|GV+R-Fp!S4cz%NnUdR(ZWp%9c{JC@wnf*xfc}_HndRYjwbeuTqJv1W5dLfc zc0NJ|s*wL%cziP6*>p;50dDG|Gxai8!VT`+%LLy!F68L5@MhF$eY!f}W*XPd1VtmZ9cjgOD3$~_EWw6Fo^o0IM?JLHDESBe6QKW4EbX(v zZZE-;D2Cw|xVqh$7FSOtVPBi`xk6i~(W>{*btiz}CTbNv{YYn{YS4!3-d`%c4*PHJ zPSRVxfj7&LwDq*b-G8Rje~fmy8GpnP@WzxM3lQd`-frP(b>JGs+xIBNorbPrZv}_A zr?Wqh8=EK|xr@;!NzQt(Diuwo{NM{2+xAX3i zAa9THZXNd4y@+!uGs4vsC@0T1!j|PhO6=wRP3v;-dk^$V=G|m0==InvQtkrp?VZ_6 zbQhAboVs#-5S}^=Mv~ByKLGt$aO#Xo7Ru0LUjRpVHh3kVay{-8c%l!r-T^N>4!@m; za#_*Bo+|Hn@oeuKRoCf}%(6Qstr8U4-ZuI96DC7FpqO{91 z%twX!5a&8Q2P+v$xVEV|r8@Uv>t3dW58-oTx-3F}*<;DVllr9Iz+Kd%aPH7O$@ZL@ zL-XBe71#NXqrOaVp)1p*jO&8Fz~;M4W+c|c**Iz8zElLi7L^ zgOh4Naf;tewDCOrVy+!fbO+_9A~9*C+Cw`J^2QmPRY-^<38eydBWhjdU3I>3;MDmN z_fu!4YLB6Q0+jY||wJ+keji)#t8$$fJx#*zj3mcxL?j zUEnk)yk(zTDJlluqdlq7%$R$5-J7r~^uyfuDjfEOqnhd?Z(`S{(Oy_-WGo!viT1h( zzpH3t;h9fQqc1W7y*vf;XwYQ&!QTjUe;KsiK6uUO7;TZF2Jq%z$lNk;sxSW|{>MJJ zZWMTWnd{kb`+v~YBeBlQk%Ot^IuEVw`FsWM)%y<)G1oY3=Xv}VJ*8W*8oy#E`&}z>J^K7NIJQ6V z+Nb`SlE%SPpJEG7VhtUeox>ZW6fVTVeFH9CX_Au?EW07;KL*}oyQy8t!j4P1OXHnat@*9?vIIaY5IKIU@T z+brO#54fKZ>UMT(8@Q?g9~h;t8a_}{xbV5~b~mL7;c=jQnHF549cz(NpM6O??R}l5 z#5eJeh5_df_(Dy1bBPwY2I~U9zhlvS(wCP14c)#*1K8trJ+x!~yMWn6G>)qa-vP$jftTEObw6cX0{4%B;|t`L*IU5@AApep$jC%|PsdC4L%)@z zH~~eU3^w#DFz^-WU1!yXT<<|u+p?XMZ*%`O{iL?Y(^Gg@6Tz43P)btcJ6MX*=rv=C zc0!LX#EwMv>>?%p0%a!Ank&@W2)$e#xNhf3ggJjZg5@7WA8a&bxQe|b{|~`KtC1N` zi)w`}dyH~>p*}ui4^NL#!U{?~LfxH^HB-9h?+f&wJFzdKr`*9F^+tApKT|9Nf@bjT3TjwQ zPS0%m8P1*qc7K4nuHbansF9SakghU1?_VLO(WcIk*ZuTw(#9d+XfybzM*gpX4x}~TgQv+{+voO-`th{ey+Hl&D?dY%-^V*g8H`!7ma_7K zy_~>PgnaeEv^!O1BBfu1F*hSIPk@tL`H=)KWCiYZKzAnC|KjA|i8e2Q|7QFu^Od88 z6M&==e#9=gTpcT(OQSbigWGQdZ?+2e`QaC%F#QYuewgdGss9gZ_yCQ&p8EUJ>OX=9 zV+ro(F;@Itz~7g4C(?$mk-Kl*;8vuLLlw0#Lz zx0Cy4FmLRyj$r#;%IE-mt>DVTVCoaJ-bS#{ntF|odONWFLh5^Y(wN_?q;^H@PU!k2 zSoVBTcSGldruXxtHKjicRVtEV1NW84-30EN4mLe8XB}E)Co;7L%nk=iN69)sfserK zZ_w9sfGfgrpCECr{W~0b0j~1x725bR?UrMbz`>(n@m_jMeaSZuJ5d|_>HX$_Z;V^- zc>x-_Q|S&iyqt+f{&zyC%j*R`;wr|0)8_AVO@CQUbvVJ0;okiKnG?@orpkRSp?*mwP;=KE%D}A>TngDR_m(@iD5?3h;jgxbFq4J&?hp zV5w#BGMgZ$`hBk7cRs*a-OeJ^g)gIZS4XgrDbQ;@+8rD*2|P-ibvULFi?p*BDIh_3P1H!XmN=y?auUQ7tBi9s#T zzs?MF`OzfKR2(3822a!B23O484~NbtR}yvTj~@h%9JC;vYj?Q(MVscKwVY+y9@35t z?Xt%<0G&_^tD1@S=R$%$LL+CV58N8cPeK;I;9VX#NXwoM)jeZYcyptJS0V|nGn)zj z<)V%va8m}hZvq^UjvQu!{}0oWc*>DN_rMeG_{q$xv3 zsj&q9?@@53cFP4NJiY2htjh(cZ_HMAAx{Oqd*O~0XkHkNlNYZ~n!W%<5|H6?$bD&A z@F4IzHu*QdF)*Zec_XQoqrI}j)rmmhzM=y7OZ7;TffSAd>eg^Wk!!ruBG0N*n_G z?rursNd#r|yXSx18JFJvTC``o3AM)HSo@Kt_AdG+K~L|UdrO2DV&T2LBKJsb_4eceQRjHQqg-q}BOEFc>n@lys4h#?6n=%5(&L^V0nmYo{!z>{jj zRr4UxLYgO(rau#{CsbnzR}tD0wedbQ_5rZuxp`giaC$&1j{EMeK;KWvNW@Nja z-*+D6eM+84+jb+#j=L=cH{YW_uK5661N4+KBD6!SiU#!ae8Eab3^?`;j(xuDTDV4-i$xJ{n)%IP_mp zqGwJTJ$+~B)2*h|2f>GtpBsQl&#vD_>2IRR%Tb!S%A$Kp!b9%faeu@>xYJnPwWy&W zoG^y#KKLP0<$mzlm2x~U`XjJD2Z&4KZ|P-M04nz{E$6xZxsetZG+K;B|2L_neT?Vz zXhloPS9^JK)I~UK0n~N1rz_h}(%wFy{oQ%Llv?zt{sotB$Huonr+k8M;pz+LBtJo( zSHq$AQO*(IbbnDZTKg+)7N(bgp+0md1(uDeZ~J<|i~Fc`KD^TutK_L81)zpIRX(S* z#$d!atLDoE-i(U#8R@Um($i3SFTTguKx9O}ebB{|N~h4WrND3!9c{0CC+!;w23pd} zZ^3j${3UmuXQB-Qc-w@hMt|_+PM?;2Q;)HuwnE)0)blMAD?|Q6l=bX2-Si=lcBbt~Kve@MH(aAl z@(sAg`TVtb{X2l7A(VOnNmuX84WdShKt*P zdM1<@LJ3>I_e9EnhxhNJ1+Ky|zfrg6e-wbn3&Mk*8eI%6Vr-;OkgDq=N?pMJ>%i^V zy3V)WPMZq>(*|1V+PVUy_AJ%kk##w7X~4%Be%=3bkro+!YZA~J`TPp>T1U-`u~uc# zF#5CA;l3+y-?MP`T6D3yX5FQ62+f)oOrAx1^~Wy8C?SnLhAW}QK*_DtVIQI=IUIM* z1uplXKlX#+{QOS>g0j48Pm2!VS2Uo6mONjD1-=VE$cVvXC@T{X?4|Z(to=8nPom!S z*uep46HmI(^S?mpJxJvWm3S~Vm{Ny<{{y6$LK{lqPp2W*v%p3iys(T^?a>Uz;WmEB zD|k%{sK+*Ufrcf69BrlLrq{ZcfCJ8RT?SvkSWtStb1Q{GQ`$h)Jz!2vloe_v(nj|Y*>4j!1<|~Tw8@#M%E0BRKPluLPswFSVGqU= zblrntKa4~-xh`%g|L#%B3#E*STP?_vd)A&smu00~PtA3# zzY6>rM@!u$v52;grFHHvG48N&o3_JK(cF^fTzd`|>yyxvo-$xG275XONcR)97}Y5o zI6n$ky7RgqpNqV6U9z!yJz;bq?OKNYzlW9?r_?i<;>fcdOc?F!3^a6o!*rh93e+!C z+d;0~opmY5^&E7dC#7Z}4ex?8W39K~z0r{G07LoUDm2WX`c%kq=TNVuhe!L@s?G%Eu}E`d!;(t8#%#}xE?-OJ+4L?*PAJgc`| z6uBx#xd~v*~%Vb{t~f90knIpH_==o^dcC18UzpsO2Sk!x?Bi_s35pZDBYq4Xf7y9X=vx2;+bKf!%!+@4OSf?;ddK z+V^MB@mJv+V|SiJR_{ia+7C5)yE_Nnw^|V>F7d{>|3~1#Bc!o3HP0b*i>F5Jg-8CT zyiU{|`Q(SWbH3zTBySUv-;sK1k=v24dA#wQqH)yw5I&J{oF)ga@p1fhBab^q+KL?0 z$+Lu-^;Ml6aIMC-$i;Xt_82X=2#>x=8(MRxf4>)q2J-Yf@*So{!gv`!<96iFzQAZo zG4`h((P+{d-@#}auIb539#3WO4`to0?JBaFXs32~VV-2Z1*{gQUe;c0n-%#Oi;5rLFchiy=ksqTZ{z182kl7~OJHKT7gw^m%Yx346mGKxV zq4iut)Cf+m3P&1ce+Koori5oGy*9NxPTBTMJA(g#JbMjlkH_OOrp9=Dw~xU=A!`2v z3(yq$cuM0MavzU?kdmp zpPkvB$Zs)HmBx?qG@f~2e=ARZ2PPxhI=lH4^pxK`rRlA3bqe~qkHb?WqWR9xDD^%d zxC1;y>un~1Ib#GY!PfnY+W&@U8j;F<@s~o)O2J=H!F^uGsQF!Lz8Nf6A=Pdm{xZ~6 znA9ENMPm{@4UfMC-d?Bvn*9GlnsH#rF#vZiyaqRZL+hQ(*awbhf$jNFq!v#Wh0<<@ z`-)L#Q?U92ICzYE`&GvD&7i$|@jZvrj`@@m&Fpw~NiVJ&(}pj>(gD1+E6B(~O3}9$ zGYz3i)OWfE+v_gFCy;>Jl-d%>nE^(6!3*W#p$Z}W!{E*JX7^mvDPMxC3#1thtvaC( zoxwc_CQHyt_f?ICx1R=XR}vV7_X>LD694B(-4NVN+8XDagrC+B}Q43_@@8qO1z|Eb6~MN%=3* zREFoG^9$yZsu5IKO*`btvfyboaHesW3GLqz?iqv)SP8DHqF?hNfA1r`=KceaHb)R(%D>DEKH^DXxJQt+J;p_(K4 z^J$4O{!deuyyJ?ej!@0mMt4xNqqx)1>}M#c6nv14=Z=P?Lpx8yHM*ATS?chuPrZdm z{Q%fBVpbNUMI3j8pHrZ==S;S2j*Agh>@zx#$2QjdE z93IKQ>ly~vq~K{egFOqqn*dB(&{)&C&I0{Er$vq!=0_hC0tb#FxC-wo)XEBdcTnph z;4T0c>rH#6>a9TEpWN=<=?LzZ16LpT$6YnXJLyAdv*}foMaDjb2aShQo;TCcKGlI> zKlGW3HZoq?NbvjyX`Bt%LJh@ek3QufG{z$Mz+DFmXk8-M>A_VU{367{3Ydj(xlGK%9!VUx(5hITyxEq-=*Bx3Azn6-U*b7qb5$54h@DA3jIm z8+*Y+xYJ8^Prm;pp@5O7<=LpVG4Ql~miPcusqjwNO)pMTLq2=fdfl##j7Ik5!BV7~ z3~!~tZIifm4#|18R498HsK4NQI~=1fN#=bbU*iwBSL`+Pu)F&`U!zJ$C0^_Y#v$eI z{;__cupg7gHz$<(5J}bZ6hiwTQGDoJK;U^0y%}{(qO2G=DG3)lJN^_k9-!P2c>cZ7 zT&_9nLaJ1r+k>-Qe-q(RXVJT33-@3jo}gV<(SUl3&+v2>r4GYVY{pJ_>d|?4^D3I- z7#_t{u;32cg>Z|j6O8P584YIS)sb*&Rs6em&<;-^{d1AzzbWMvatw#dT?2d$`Ffo9 z?%DhU|F0=B==rY2NY#|OE>eCCI9w~bjhf3)YcHO-f`1VlUkvZWe$dTGs%f60<;Hq$ zN~#jH_BGmc6~A~XZJCH((hD1owERInMwm^a%Kh zc0sHSe$QF%jNtDa#m~s5=izGky9R$QCwUver{|E=+TieKD0K{+--WexhrqYI7f$zz zo~C~HEWAlg?p3#plfX9vSO($y97Ef_$x}U0XW-9++320V#5Y)>ydmWZ=rR=#+kF^E zuK7jHuvh(%Gja16HUCUr>vgZZ|52REXw^&PpIwDUyP=A^sqTey-96osyak|4YwB}V zS8L!a2d{nr-e0H8W|ViHdM;l}GY(5#gC}o+H`iJ@u2>rk^a-AQA1Hkaj(P>W-M(5MWHlce;V)7g<*hRsUtmq1q20@n>`l}- z8V&q7-lZpT>8++x(wAuO$>d)Q|A?O(;p8V8b;P2tN47@bOErd922#?WB0nZJjZI03S)QD5h;sf?Yu5W-sj1CF=X1=G_~ltb>$|gL6HN z?iTV@BZX1AQn*_O{XFNUCw8biX?F4C7v4RC2Ko^yK2G{Y;LdngS%GH;5;lk&CD3p~ z(f=2atS8Z#d%=Xe7ha_$pOY#Ju|eEov=KA86fq40iEBx0)zqrp9k)wL(HNRt7kx?$081=pWZuP4g7x?1n99Tp~$ z+O|^a7nIoy+U~$Q7Ug*gEouSB%%XjjkjaWX%Ya*rQfiFlc9gM=IvxKT4;{{8S3Gt4 z5FU;rJG0=t6Hu@wHN6H-`_hJvv_1yBMk$>GzovqjSKyoiq&?w65USh?94L)8*8-j&@AIQ+peoJpifj zWYc&`C`fBvCF6|MH+YO=DJ>p4JEQ&*xe}mzRdmWMc+9cr%G7L>%z8k%0C+C&#t81N z^Jq?;C86UY^opksHwu_@rO#D3cK{`O+Wme~Hs*a-@RLBVwG{O=rnUpT&BC`mRJXN< z!E|;;2wtY7dR!Y@vjlbPou)vG>R`Jp_{k04^1v7Qpv45P6M#EATr>uV-GvaNthY$v z`jOwj>AjS!7aK==9KGCwmV67EJwUx*QEDw{=jpkZzYyfVjleP!iUY~yv@V`sF z<0z?k$TbkjO6OffY2($Fg?G+SUIzHc6mqRYN;?CQ>)!7N&JpOjC$W@S!G1Qd-Vn+) zK+A0hXRf_*eAajZ@1f1y!SEcpg!wu6EI-uA0p{I5@9Nphl-!Io#evB&-yz%?FDnO7 z8j000{GI$NM~URwOU~Jp?Oqi}u{}?u47S5{5XLw#=9;VhyCVM;DaSog>EK8yS%vl< z0G3>r{{gU#uCI5QjXQ|nAejJt7 zD?JMi7I2*zUecqq#GOdUG*Z+BR`(F(hWefgmnTrr+-GQCT~avTCC)a$D^dJcHDn>M zkqU<%LT8?YpISrlD8`wyFqtSX9?h_ra`aXG#N*8*Q}bzlTLrINPIZPSJG$C&=R6_Z zM#_x~wDyz=J#8aBHRqXY{%u_v`Nm%RmqQ1`?RvNRfr*rth&*1u_DvF0_N2A!v^^(K z)T2CiA=HQF$#Bd#+Bp}CT8DBk@lLHW2EMh9sE3=t8%HPYL7WfxGxEX#bm%nF9OX^4 z$H0+#*J8!^wZ#K~+jtvCDK(uOi2)jQ#qyBCIighRE(Fx(U_Jng?mSp(xbuXLN!Ue4 zzE6OY0YLi>dfb?utGIp(zhf8j*&E(52H4GvBK!_b@}VJKL@EmL?ih6sfky70n~aR~ z!s96gwSItatwoPN`WED_AzJ21q`W;Goq#tw4Q+8Nt*giVd%Ud$mR{z4OYE`d3)=^J znX9_+*~3V%BPXu$Ux&S4Laq1HW=F~wVFSCu%{Rb#chTZTJQwm^NK9FLmHbdA+V36N zJI{!E0}5zy9RV5!k7(NqLNix%J_)zpMZMc8y9YLR6;v^{d@Z!gLB98cof4t^Z_vJr z;Oxp^ZwK&3HnALBrA=)FRU9EPf}L$>7oP5hqAz3Dv|Nr773Npl=^5wR^!8XRXCNn& z_cL(5n>YFz-uL54e>nG+kmne*$l&cRb5Ue0YVKDy4$0L< ze+@6@3vxFO?)iXr8v$t@QtLeOQLdd^9Seuf!i&tp?*Zh%6QLWR@3gsNvDcpAY(1VA zq%9l*UwIzU2)@o$%a^VsFk12yq}TKD$_D+~gC}z-uL9WfR3FC>Qi5zd^RS@pAVtGWQ{maz7NzJCnK&xU$tSv*g+YObJlMQ@!2Er?n{$zoqlM0I*jFp9d+$m9e&L9<>?our9F7=2M*Z z7^mb%pf<{qvy8{VQa13f-+mOV#Yo)|JR589ATsbFK3yZ~988Uy;ow$P`dtR) z#b|MJ-rNL+u0MA)GaV{ujsBv(IJD&hl&r4LgT6Z*QbzmMwt@4=!vB-oF_m0!rR$s{ zD{QPBxh5;Uo_;*jqPm(wJAM=WpLC$u3#GJ)j`!{5*Y+fUivf5;jtGt6+YOp1&)4}f zs`DsMJi*63p?>}u<4FzBTXT#&seIG;brei1Sc=k&z3HCDmcZ&W$xleRYtOca*6ABvL}r?zpQXWeBrYA!{0C~~(G{oB z6VIJbK$e8cH&V-WDm{U$%C$7Y9OGJEpH7Wykho0Lp~ZLgX9J|T6nu6%lqy!2Qks1n zSCsx4+T^~&glqKn#DoOwRx7NN{iuFeA$ML&pR#xp9nmb>i4LTR!Josid2i8<+Tdpw z?ROsjF1`&Z*|>HyXmfi?>_uz$)7n|UJ_ji;&68g#U;B_1%a@OOveLp?jXuMoCu5~5 za6JggG(NKDP=6G3`Kn+`?4`9t3)#Er8o1M*XJKUaQ@ExdX>UNYYrWn?ezYUsAm^^q zv|r{aCj-DhD=?7*9y*BabVZ1#Fs`G-ocsz)InrhZm+m55$H$neyU=56$g54tfgJV& z)2`PyYRY?Dw79j6wKO3Qo0>wzr=HUfSx)L2XE*H%2|F~|Iw8Ea@}iTv||i8y77Jz zPdqPK9gvr=dx)I-P%3JIfsJsy>+T1KvMo<}s>i>zc!r1N=YVd)l>lC<2_ z`gcQ%qv+r5z;*{Udj^NQly;zj#)ErT)GF~8;4If;JAT%TF{l^7^JebKllBzve}f|T zLFr`lK|5&Zd1#rypr>}YLo+jYFB&v=A$Z;$GDgu@12iSD(&Z^}A2l0$>Nz~N6ga>& zc1E1q%~R{O_I$wtJ8R}Fky39ztN|2Tiq#lQdC5E*4kTKGbRbxTcJh?km*_7x1rrmH zeBX$C#5r-E-?bqq|PDC1F(8Uvka-&<0Qm*6G?glYJz0rH~ z@a!LZ^551(D?7Vgr-TSLeiGx0lP)bjuP)BK_G>?JR%kV`XaFZD;pG>;k zp;l}5V}EV{+V401a{--wk!k3>tjN=jpaV_iIJ&EK^pA`~76W$Y93JMX2qh;|#$_~? zyLn0@0WH9r>#IF|LT=K6*hk9AlQNW0gs+i<#j+II!`BvOgnzHHa-D^L`BA$St*f%% zW8AzLPjkZYO2MDVO)`+#XS2r?^|fxGJ)YEIG|1-2=e_WHmQYU~U@ZVn5`*kTYh<1# zpR3cZ!}BR=#xT~>i2p>WaGW|lPeng&AsX{M{|VGA?xTGW#)EbqQhgTL%WO!stxAUz zGHAV)Qhe_TIM?H=hBh%p9PrrJb;mk=l zQo7zKnN-eP`mZPPGS3`;ZiplrnIi)@-E;38UmZ$lO{%g;QC~3E6OL{OJg3m>EzlPQ zNnxL>E9E<$n47%LKNmqV8&UTTYBu`gDc))iq8`nD>>UIlBH8BWS6#`*Bj` z!teeGy4hnY2(0_T`ca@WD!gkDw}PE?aI%g%Q;_6Z+()z3JMpMJ-y|2@Y2Vuu6+F>4 zd*BJrRa36*J3Po&{w^EdINMpAlJ5xg)1UAg@j{zM`LXB+XxX%D@OidT1i?nq9K=^I z2v;b1dayCRMkI6wNhhDLT8EZ1U-)l1#Mvpt5>mg9aXc^8NxSOIG-hm##Y-sI_XD&3X zx}q*KLj~co%xJjmlwXHd+otKjQiS_nP_#6(KM4HUDaDu@t}u?#=Wti>EGY2^r4^*E zcBEfO`a*nHlE-sR;;^bCxvD{qroes_SlxTw7OdP!Ev^~=h@MIgtYI2yGGQ|k!K426 zI$CPKt|r($Pf0~-w>r~N9zB3$_^T-WoBqg#-uiB!i-SIoLY)QF z>-^oHcw|b)Wvo*c{DcSLMmG&SEdTe% z5zNS*aGMoE1neh>c(sQnn8>cMNZI?k9(Nu_AT z!~X+w8uv+j?F%~QuD7_1I{Z(gmg{zK57!gHad*my@!p+4$#_#@LcrD7`q4 zyI0|U%9#b!jggn5(B>{QeIhkB4shHF_vWA#4Utw)*iHv0HP9h*k(PIniBe!Pd&ceg zlm++WC_}5d6z+AttvyyWjxsFY9;c&a1<_bHfQ#|8>_IHE>yFz)A=hqQw|~v3V=QUo zDJ2z6ur_e~EYiQklT7e@CR*xhC&#N?vE@k09@07@_Rn`Q&-E;OK$8q=QofykN}CSB z6b@dPtDBag4P)Sb13JJRQLf{5c5g78;a=j$$nzQ{mPI>UKX>Fq>MTcIqqKR_WDWWQ z#!OO!IOpr=wxg(3&@RdR&VUj|OjfczQ)vaYI*(Ea{i2sS7dVT;lS|?JRrIGmgx|)2 z!K1X?_2zjfu{>9EpuCYvTTsVX(q90oOu&$a|8+FruO4aD3?=Bz%|sRwu;}r~Wip!e zE$Ur}?3QJ0=w_%t3V!Yl-*&{#8rOF`FwWrWMsPG0{JTEx7O=Gn+~Sio)c)nL?EaGMUA-I zgI{}bkD&WXkz!u3Wsb7AkJ%9|W!UlPJXoazl;e0?94*inISjOM^nhvt`EOw4Ni>%` zJX~E`gO;a*(F@S;L$Lb*c+U)Fqt%5!Qld81{R#v5Ur36F&_~tL3U|}yRO(Z64hz4P z!fzoc=?Nu&1qeojXWGm-+7{{PNY`Hfje1wT;OknnphDmj=kgraREHg;HO?PZ4|nFW zwT|)S4i?lWF}?{r?}}eiAKt774o8s2zQ|s9#Lc|r(b*z%_=$&_jzpwfyHDkbYZw!+-4_BUjs)9Nx*kib9wj#b z#^G?6md2QD$>iGsRcZ@y_{!4<_op*ADKnJMTm^^m{x<)Gr+-IY2P|( zoCjQv)7;SL#h~xCZ~fqz!sLDu+!aPUtfP%LGlFjKu0N191S^e@XJ<>aGxmoH0?W*R z-!!yfPq?=mr8I(r%aOwM6Q1g6&rGN?1sj*h^(sogf%|TO0tW(2xzX9pN!b&M<_Z*V zKwVp*oc>l8u%0XM>2qLV6P#%jp4Y(c2F7u|=~RskBc$J4Z*^dQA+s$ z=;%=Byu0KeXmt6=T}yNBVV!Dy`wKvo0^xDV-7OYejV72xQcU_pD<9Vx2C z^8;Wz8>6}I1vw&RabnBP@Q0 zKD!JjmgBl5HMOTVqIMaGCTa%;osBw3%CsO;&UI^R9dAtn0}ny_v(!E#z+z0sWwi7% zC0&Jk>_ND@@bgA5V9{g18GVZJEct)#eCCN!+Mrl?8rds-cHhZS$>8lq@VO!6%S21# z;67(~&W0yOO^NzYHGpL>yp>EFQc0hMajHUS0DYr^p)~m}Dpwxb#dW7JjyVu445G~E z;MBpi_fvSWCaqi!Zl(pxlR(NCdg~PJb1Y;CrItd^xNafpPkTaYG8j8g-Pysv<9~8S zBG7nVZ5;5phQgEQ^fT=5WF^%Mw1RzzgVgMJTz)t)f-xEmynY@1N8?%l`|0=#S(e&m^ATL#v%b=*aU)aC8jFe*!vr z*H7ZL|H!IUr#<3e40%$yPT{Q<=^*WPmdiCNeduLHV|scr*YiiVGDe;gqBbQ=6r z!rC@KW4;DYiV?>!^{cGekE^Eq_awtl(MFBIvUvCW2IsZDCv5`l5(|CNWv!8=o|Mvq z_GG8@vb1Re--oeRm1%Q1%CgtGTwCOsqYG$X9qj!9 zN~=fy7-egP#v_3V+<#Ac=UxgR`OcQ~gu}B_yR%%;42W|!&Zp(2w!)N|i_w^J_?LBn z&PZKKQGyFTQ(%$;kQ+?7!nyuF3KECL@qN$a)b>O1GC+Tt(Un;Og8-_Q^GqPfUBnX=qj zQ-Qu>0{U|G^ZuW^yO24rWF#I zfKIyutuvOKiRkngP}HI%V-frXjknN-D?E$QT6r;^R!xCB6VMA^VQ#inadH13fEYbD*b->nCTKz2E z!63L-`aB1w4*{!Id_GXEM^3s?c5kTvD0KI!b54`7Y|3N?IR-(u3DDQElnqd42rb)= zjXy)Jc>{N-Bi&uTi4@Mpb)ucac;mX^|DWA^n# zdzQj_D{0b4+ZKZ(Q>f)BC{P8C$U!>ST>OVOdh+_`i@0_T<7LXqL>+Pb)`lOFpjK|A z%dsuTaHmm14zM;Bncu*Z8qn<1pxu8A_or!NbtHcjG;c!Q7vKW*{3GPLz`xh_r#$7v zoiay*mT*o;|5&KKvgeSAZ?q#Mdg}9^py*&y*q_-0PeyO-tt4{o8&^D33;*s%%Aox( zf-n0t_DtRd57j7lNJxaFF8=jKA3lMoad~XHW9m!0bSjXo)MlH2TVJt-Esa* z>oEz~S3xVa)K%#0y3^07S)68pvNib^X7{8xdKU}7spt;%XanTTxLU5sFbY|lS>k_u0fxw4TB>248{uRq$~l8s7eod&sn7jw3o^n8-JkuVZp?e7cd| z6~FnAOLtRmfpXdkeXQQVb%m6U=Gj~FOv@6qss*xn0J$)Vhb!9qqv0HBGmU*0XS`S7 zEk-LAvXcIFS~G#C6GBPa={exdd7Nl`Ovq3+{YpO?75y+B&C=EHX6@z)Z7X9KJ zr_{IKnvDK12H7-Pqo?mmh{fRL5z-q+$}>s(P|~+p?Iuv{GBWEr_5VlPn}=Cdor(HY zMHLhvSPX&z8V@BxK@f~Vgot{I!5|J4k#w-*P%1_=sEBbOYTQ+dB2MKz5j<2-G1n(4 z$2fIz;Hp8B7&OKPqe-cyq0?y7HjT#V{@(Xnbtp{o+}~gK+0Qw9@3q(Pt#5tP`qp#` zkp2uA@+|!L9(DOW^4hgd`$B(j(L(m>$AeWz7nQ8%!~bt0QOjw8(ddsQDFtw>z0l7rFIjt$JA&3^z# zHUr(sG`5>bRU4(i?fa7zpIkG9dbQ%2MxX?OKkPtC*dK<{B~IJ^OqSGGhU8 zJYQEinEXOtp;a7zZXs+OKc!|3Km8}=)U^zHF^A*>`PV4dxdB;(x-KtCAmhlt*42@R zmE0HebJsL&⋙>$>cnM^eJC=ZP)v6`HmTDeap+kgU0k;odivrz8WdKnpzq!4-PWo z)k72Z)(z7_+$;QO3xGSL7CG{r4(5tbfVQ@fZ6bUVKCLQRp_1@VI%`sr!OFlcEtT^9 z`~M1ZH8i#C5K`=c=E)P{+=O0GmGEUyw|l8%#8X07z|G3E5BT@}cciF{w^V|7FXid{ zFF9R#Si!&Zv!_J~u0%POkdOH#Ys^z|^}IC3dj_S|Q_C+(VCO*?|81Z-8p=E$ihc=u zr3hDF1ias+6niF0-a^DQb{U~*ti<{{>uD`9(&+kVo?7`0fYO~4?Hr#;g!~v8`4F~- z*6|J0_8=(E{fj*j(eZrcq@xE~bq~OcX)SeHf#llUM!vX5ky2OMcg3B48D*yZz*HP=(fhGD=r zgRia}`d(*SxUbDEk>U#C*CSz86Q|Tp_5{SYUMDqs23(I*qrqtOnP5eD#ezGDHGl(G z0#}B7ydAzMM40EVyhk2sR%RYKrp>;CGR_VA$o$8EixJ!}KoVR6^vh|PcaVIAz_3!p z`8H*QPlb+qqH$}(L*huC-hfvsXp6rR&+{-&2;G+fxhEGZC5O|7TI2JP zx6ZNCZ#p}C_ImL~!)ZN`HP@4qCY5Ns{SbOj^awsQ1dy5rZdlRar>zehWwVh~dsoO3-2O#xU}BKe}@W zokQ!Xf3o0F(`z;UoHi(<-(u^x^VA)k=lSh^*Uw+%uFTBH+qKN^f|s75w@&1n`{Z89 zS67?pq3n*2O24gTIYZ!_&YXm`Kk>S*{4USh(p@^q@#&-~)2eQ5O(C!y@8o|bP#q5x zMM`@K9&p#~0?=z~b%*-3R+NBG0mW&2F{RhQolmaCR6nP0%4*YUvFQOQNnz)@tGDvB zgXy)Q^6}(c31^H6i~AI80D?i-1&`6scUJ|u@em}bp3LNC-dYd# z^u~d&G~=cfO)1qj^IMJt_Qb9?_@>9cHx{xzBrUsJ;p;rSl7~WHAg@W(vW*_Ls}f!% zXJ;4bi*Yx$B5*!N89ixheVA&GeaXK*o)bK^*nBgA%NnOu3d-}Vpgva}OhdD4dzOCA zKyv)tQN|OH*9BVnChFXuGER=Thtp1$Gzbaak360Es-^776iU}W(nj}SJvI?v8|Nq> zdJ1}VKa69Eb81*)^GRo~Lu@otLNz>c4>i>a_G~;`%@Zl~7|1t{5jmFJ!CM2*jxBVj z1ly~ORE~iRKvv}VeiN_^hvuuWa+D~ZS)C`IE26|nEw!p6FHf+lrOY%(aBupJK0gv3 z2}haQ`0h*_*A&~^tOftt^GET#0*q9Xk9g1$J(!RT+SFdPjD_%(5`HxKJxkr2Xzh#n z>N=U@!RficcO7zR5HQz~)3=ezKc&R}#JDVE=;f4I4|VKEOG}$;pxthi;M^?tJ6wf* zy)Si|LtT%eY-y{8w-#d6Bhiv4^aN-%PsnT_c?~P`Xeg#Pt>qf4akTyzG`lz*)X5{x z5ZcIF3v|C{)TAdVb3FMggCfqNd}Wt?`Yhy01$c2^1^LnQV!s@8B)21iTL z){FUm61m$R-$viNcfNB4cYv*SG@E72=eIkSL&m-9>Jxa?y6Ow`YG}Xrr4^0S2U;jn zT8iHhcSrrSy``H*{_VkPZ5oH2BGBm0556Q~cv`a3%6Tzu5kr6a-};wRzDZJ+VKq47 z-GuDkpFHMM&jR^Y0$o}oFeGxA9r^1UNw6)Zj>EvQvpa`|beNw&fT5~WD%TV0NUJ9q?~CMiO~bWdN4fk!Sfpwd>pBDfx?>s2NB7B56Ah$o zX43k#ylF!!y^^h%pehpMNH|%qd=~mVh5!A(x=pO9NpGcG=dg%R*MquG;U>Q1g2EjO z-pUuX*d9RS-sJ;;_Eg^6qBq?KEN$V-RAhu+8&}$Sy0O-r_S_8OA4sZA)Z`v8GaSl4 zm|pfbuvGL=KZrEl7kpgE^D@5Zd6`J)nb3F(GOpxZc|I&x>-!ScnSAguwD%73Fh^~Z zC$UshCBbBV6Uob2VoF=r?TFbayI2>>^9oXrga&)U33%XW#p%@KR_MuDAvc$1P3L{# zE?nyu$0tj1>VdR8`smL1Ekysr*&8|Vp=R3(hSjWUz|}ygMJhzeMs{vb$|-rfr7+)< zY?dC!Bip6pB>StuuV>%6U&wTF{w!d(H=tKS4}{jMBjEC_xwr<+arhDF`aPh=WqfG{ zV*9Ee@jR1Qmq7`~VA(fJP#r;pExuaLIhts?3=u6*>C2SJ|&(d|mtUZ9qe0zeL zyup1EJorp!ep*O_;JRGUMK976qGbKT=KJ7Y_4yt5nJuWcN0oA^14Nr`3}ZB5roD;FPtznjrw z##u$Uo?3gXgSghyN=wl9Zv*{EXy{hTQ1&Qc9f{OiJdj?l`x$Bzt9K?4zmfEx5Rx!R zTcXiZI@8uu(sX{xYJ2rnq#3|c zn^a)!3g9cX-A3|Gz1pOidon-Jry6pfOn#o+p;xCLwRP>4bi0r?mCoytxX-~2Z-WKB z5FgPVdcH@Zaf@*JNVJ(VsP#AKb?c2Jb;SEQ@|cIr8cj_e!7|xMy{nJa}}vLZ{~d)HB}QhUwb+AtOmo!fRPXR zRsi}AQh8|yN=bX+yXXTKZ9qD!&qms(Cvo&+JOMR1Ht6|t zrLw+`=j}b-^yU_z2g9qtwVs7i9AWt(l+{RB1?_PNe$UtN5{^Uzy5iK4v(KPo{kvc1 zc_#U|BDDtmj3yVobHhWj$~PyX5$$m+Ee8NiD#g3f(Q!Sw?s4Aj=Szz@?n)Q@RNl9^qw@A~A)N3NlAJasPVlvT=cdur2x_$6|k3oWY&&Li|UF~dbnr|~T3 ziRMUCx|)ff=%^HOB^Hh}z6nWJ4$q$s?p)0y%<44hQIE1vr?De<_fM(yXXZ%lEPuUl zN77z;YNms$t4Lp@cd19!IZLKE0Q~CXoJy)zWWLl~ptK`+PP4+5Wv4>1`l40Tw8yXYx(B)Uq^%w!hhDUozK3gwU!WXU0pG#*D(K9ar!CQ1rCO)EdMQUK zj&zL;2vf~$sT}Xg3X==Zq?R)YDOuxcu+q<6F5Q>K5%8@cU7X{!4=Lt=RpsWLwBHNV z)}1#8paGtx*R!4)EF^Z4XnKL2@lhni)e1#&Z|2|iI8T%N)$qds=z%((-NErx==n^Z zpCkUO)J;Ezed-^<O*&ean;zjiB(Mro+I=q z_PkcO>z$?}7b@|3R>wwZ7kfaJv&k)Al__+#4n=^SIE5lN*i2l2EC<4(HfkjuFDGwY6 zQ&x0c_5%trj;=4@^=R+Z8&4L=;K0LT>XLe!KZHVNfCr_i++a@b&~D#XjLG3gATbXC z+s=Pek67az_+7)Bb+ETp21Vq!%LQd{it%e?^osp;%7$YE*61K}LV&&$=N zm9&^Nnds6`?Z1BD@h9l493xehBqwbHD>*Q~6<)A!plovdK`*g$cO2t87M{6^a$ODT z+<2{|Htd7>P{T!}tD@eOq;*BZn$CRFSiR$1H`1z&AzL#r^Al*~ElOHI-mj9kv$5`m z?lNlUnikvUi$Ck!OiN{f`&373p1d93hhhyACrLNiI%-QpwHxhK72?glmAkWy<=!26 zX-ih~@2Y>tc(wmLIY?R64~>2?GI25O*b1+;fUP!Qb-%W`=#^<$BwrqWxt^ANusGaU5 zjw3`l+FY-oJ@YRCLk+P{C1ys=djg$h6~Imh?~|y-0n~RI;Xj019no4(+WU|>4?yp= z)JJJ#Z`C!JwvH0{=RoF+)&;>GA0u6RfZLbI@fxJQqYui5ZKNxNjhuS%8zVeHtTf+7 zjz8Ze>{(!Y6pgfh;AwbBX81% zb?-R!gY#!50*`0tik%|=i-N|D?^wT6qO)VDBeD(1-A)K*mfn`slZlY99D_7O0U8~zljI}^-(4V>#|+=mqO{i8hEUOn%;c1KlQiC0byuSd%1hu3TE8EUR<8%FyViRG-CCy<#PJR7?$ zB#Sfr#=!6W&?6Hl+x3jj)}29~?x^OTe$}LV7Yf;itg1xgxccP=>amg>|AX9K1K(x9 zmGN)ur`eh_z>>NlN2-I65bm|*_jI*!KLh-g`gW#vKjPB&_IOGbN8@JDLQ>>Na!ji;L5?re`o$=kiEUIJ&8*qXPXVeCn+L8nVWhaxNF&sOqi=64Ug ztE~DiTv$oF9Elc@TGCYo|LOI1XO)()xJzHU?n(!-nR+_;UG9dIu$JOGb3GS%59UAV zA-~vW@}IMi+9O;E=<2OJVUzgM7JZ%?C|YT)R*~c6zsn7;Rg`~AV4`q0r(XDs&Ow~jKMFFBZhsUnSXS^qqx*gsA=?F*Go=DBC&Fe7{+pEFJ{ z6$$USn)7%ZFRXzJ+GwlwXsc60PWSGV%2LiIxKnRej<3!G}KWWR?S1m z;8}#aQF99sslFGSV4q|m zmZ-C!-r@OeWPp2(xtc{OnA`p;3Z<(T3=Wmvm;*d|iJ368|Y< z(`D(tC5fAC|FoKCETQ&Nj5i>-UtFta+6c{|qw-MU?Ub*FbzIcjd~(EJh}W?|cpqBs zby{g}pt2{UKh{;C=aaJ@nIbj0H0+jxNOe|dBCX}0qqX$QD(`E+rdD-L^kmw>&a>E+ zd1Q&2Wf8G^!@-Wzz5=%QA;n+FcX_nVkHNmX!OTE&d7i`_le2=1kNL#hKt0Oni|bwM7h_Ph~~?{mI)qrSf)_0OpF zQY6)@NXjbokl5G$UIyICDM!8!46D%=8_rMZ@3XD4m^fRAncn?Z{z<3(JHr-seM6)7 z3EOK3+@dY!Sb9Gw-&3`JLcARH?j5O}OPTWYyl?jz^3C}V$__7WRD0i*KygQeR8uQu zve)L&HPvX0L7p8gf?^BYV>FJCA`U?#y z#Z5i_F59-iw>0;5SL;Tc%mYGoZ&!Zyu66)(fxgG{@brGP{$0o`XQ~*hhNmya*S4~M z?j`jmDBMHqjNre$pv58_;@p$&;N@pfu%&FI*6x}s1Zv(|%2Yqq(%RC3zuHdSljF-Q zetJihVR>+MF3+C_-k--ZT}GQY=DVE|3dDJVw=;uN^#*3ViBn~Td-%>ovTH}AGNnHE zHFPz=Tzq-%LNf?&*G6LhD>x{?+phPR#pQYCxzJy(?J?IfTGM|y%6{-@AR9}YPJ%u^ zjrr|IgVBAF`e#C4o<4989PR#xlgURvel6v>PO$am6moiFz#%MN`uN3VvQDkNJ3ciN@-+D`0sH5nZPtd=7#oysf8r?G{omjX zrT@&(U+&fHIP74seSMT&0j}+vSxOJ!afZNha8rTz?vMy|J>rwtu{p|n3mZOz<{c4e zr008Na8d^tcQ5W@$iKmq?0kY;^vs0aJcon(R#G~bSLzu79-WEg+yl>BDWj#F%eRHl zCS>OIK)4lo=g9Rp@ISZ~`-7NoGK%(IhD=uKej_wyHL|lGZQe`{ljxbeM~hU@N?LW> zXv6cs=R&Zb_EMXLG#-b{nM8{?N6!^;8<4@STU?JM98Eg+j`$w3Z4C38uA`I}q8-Q6 z*0&M268(8Tx~Bz-%JAllf*PKseuqV!{{>0foAuZipgpG{1+ED!rdIs46QklfBZbn_ zU57Uz)stOTvN%$^`=7?qyWV{gqu!-uq%;!!pDBYAZCgvPv zOK`?d`tA;uX)Dp?xx|~{M@;YzFzW#YfqFRYqX&UV&6Q<-T8g9$@zWq_zi93%o}mmB%nk6s1` z*>bj3+RI(b)sgBVJeq9M34C2bK68-k?hpD2sobscP-uEH>9stNy=e5bceOJForC@u z|7w;lP4o!wN^7as`)KS9_f*h-=Q`Gml(%7*j^b@3IX_0tZikm#6E=|0YF0%4fSUXf zoE(kZ7)Z$t)XkMv2|t#ot(ol96Jy;%MrqRD!oZ%jO+6~*owFDY32wNL(9#MTy^OBo z&VjFsxi1edo@=aUj(eyQUAL%wT`@p5#l~PMdZW2(9fqKO&?^H=RL?-NBdpDX}Th@ zhVt)d&j(=I9)$h)8Tb|~^EH0bYVtY$hM1$Mk?(56(wZaRzl!X1pQlR*?TFUz16~Ue zx67)LN9KZKOEhO^OS?yoE$XOX8>#K>*$31{n~u&=9t?&q-O(-ahtOn^LM~63O#aqM zym@JR|C#dheDBTO)m`r5pT5ecNvgOPwqcgv%vBNP^##5sk+9C_UJ;|6@6*!CRQu!h zIv3Nz^NHaJ(BeY>yZQW?R@=y3|H^m5XYNdcF`anW9gfIRwsr5zuWKc1f~!CB_gPZr z`BKgAM%qrQ&wxI~XyLBJDf_~hn=2Z%S=>Xujr3W>uoPiiNM3qZe;RyZ*iu?iTFHa? z=1$mi5nIZ3Y~EPuQV!+gImzWT&U7$k)R6N$B)|eRi>DB!-l=_+O;AEgV}6c-mvXaZ z^889ZFWW8;?_UL-E1@gU>!(Adj*lJzhHGf`Qcfu^eV)@gYnWC~>4{Erg`BgQJgCZ( zUs0;Ih9kYsw11p-Yb96deIMxEy{6oY!TA~g3iR#?-PqZVjv#M>Bb@W;9;L#s=R+G> zi$qQT^^kkJ)8cK=$?Md45|Cd@dDl|TD#~*&>(6+5fcPt+?ggZ9WxS*0hr^rCkki+A z9!&ZpNt*$Qb2_y~EHRCj3<*gtr8k440yd>r6PU>~Eb`fd!8SQ2(+soE|eVm;19l7ps0Oj0Eto~TptH`Sly!i^{Y^Arc0f@Tc ze^^P5`(l~zNh;fT6)6wqp64x{+ETI(lpadxy}-gy+98V+&N$5oJ1k%~k8~-IRtg?VL#A*`W)^=i)raP(k?yP4^uO=#Is||&I}Us=B|eIzjt^so6&+=@*- zmY8Fp+Kluk^HvSCn@FLBpQBFhlD&bJ%_0x&23JZbUqVuc}^0#7wzk52J5M}($rJM-p9^u1=gF9 zs9T^CrRpE~ej2>yiX!*O$cG#g7y8v(sfqGTe04`sHNpk;z&eez9{`>5ZWa7i4qVS; zUHuA~*NsxI1`c;Ibf^4(Chs@k=<~_%dT4hw){V2R2LsVwjFWdG{Lk1+&g^T(7Q39g zBM0}w#6f?`5nhRTLyOU8 z=QY$tNE3P{qYdm&$mP_iMZ3E8U&!fj`WWtvb3Qzp2iNWy@*FKUowr3mp@df3 zIt%G7XxZ`npTRNblhX)d*gC%i@9z*lAAQ?;a5OeJ$km-%Ha*DaQ}m6q=lhc8DQbK? zc8KfthS2h!4CKsJJ+|(DFO`*rmf8fQTL|w1<$J2Y5W=1D?Oc$V#F`wiEdl-_rR)fq zToJPV{IF!V!L2!Jb|LLDi?+x?J3T1lGt$}@Q@1)V_C0bKOWH5b??=43mxsPMHHc$CyG7ZlHT-v#x+6V{!SpXFv4gjEep-lw!)h5AVYYmp zFv}|0y7mGVBF(lyU#^=-=NXAf_3aA&$~=B@_G;z#1R9nE5%|)_Y(0NG{i6Z=p`}z|nHF z+=H}W9sJ_#A$^J+`1l=%KWS$!$>Xj;BjIR`04FP$jZfhjS+?;Gd*v)xN#J z>^b49SMLc4G8>Ja#%Izf%sf)q^Jpg2nQ)H%Yq9D1oQ+-R{#^EZl&J23v<3;_yugg_ zp1*xRZ#NK{*7QFHc3cHK9M8r`zBq1}u#-HgWyqx4I%S?Uv0iGWBdD``KvjZm=e(Uk z{yzX8&%u+8@X_y(#4VA*q5lOe^6esU{X5e$Wo-kXe36SQhV`U z3*X-hq;rsw7l%FUNQ;*FHt0~>qY_H%pcbCPn505AIBlfQwHP=nBGevI9y+{=7$;IU zm0z>wNSvbXoL>xJRq#-5q!NL>5)d3Cs0rQN4D1-a^3)*t^s1_Y`O-rl9m-umtmQ{ zOUvnTJ(CdWB>4xbp?JNV&h!@sZLMGP@9FD_wlCz(5hQz`?S$ocvKG$QYyhry(z=hb z{fqQ-oY+~w&CxQ}=0Cx){_5qVEVbvy$iE%%z+uqmiR5alY4^x~X*B%Kz^pTGon@~j zQ;t4Yr#==^+gh(A{zASdD@y3|;AT%^4xmI^vUIuMu8?*F zqa2F1Z3fVG?grUN%^iPQ16>S)b~9?NPWT}7UXB{OM$a9uT*sgBEf4I@tIhK)G$&Dp z7Ut``wGcl+qt3}knc_38gG*o1zjQVLyhwK$yml+Nt|v|60`-QMx&NrMq^qM{a=_Xa z7}kfU?P@!TKe4Cp%=6vWkXC<(edMlEm0zVdpGz3EZ*I8u&sZqSy`A@>=Q@F~JIV7X z!lmVzVa=!mjXjH>HQP)+?p5O0S4}`5Rji0O`t6Ks%e&v6@>n}lY3ck#zjmiZTuJLL zM{2k{Vd+ZNva(9qp?ApN5z$YZ1{68I%%?AF3Qw4tP0nW`o7QmmEUSm1@uNvIFg&8J zYHuY~M#ySL8E=9EYl1Upb++0}YOe42yX1Eqc9Qvf-A7I*VUNDW*T;B20*iGEb#Wis z#{&B|BB%S(np$ty5%0I?W%s_Ai)`N={&Tm}x!^FZyVHJoD|r1ua(NfK%~7=8q&k;) zE%?>`Na!5ANlnyO>qzPsk_PBfnc0h<&^dNlXK96j-|$Od!uS`0^zr`I%MG?!VBuAyMww=6f=dhfhFfV)rCGNL@rXFGZCin z)5|s!%Gg2OwJ7Dajr^a16q-nD75LqYJkJwXOTyLdd(u*e1Nj`v7mBoE!@aZQ$4#9r zTS0AK2n=MMymvP3tWDcUEw2n3Y9NIp5_8Gx>$IAu@~I)zs~Oz57OKj4Th32Dxs$K% zty%z*8BqUk3BMf7Jwy0o)WP1Wb3a|%Leu)` z--QnUuaLJ8TRUyUO-jO*s-zokM*Y>LS_D5LamJVsSqJjO!A>wD}4(S z37-X58rO3Hl}2^E?b^DPP`p0x*~GWUm842qyI?sR`N?fwNv8QeG~SiH%GW$zq=(3P zEv+$++8hle*TK)HhsEFw_ukO#YN%CRb2XCcukeZc*v-PbrbQ{FOZeH3bdUWy%ErIw39F}A6u3(}m*9Eu6!mXI%N_K+T7X)6 zwu+o{#BqP~a`G+k=9u}cP7GT9!Tf58m%N~^Tyu>}Ep2%ZwDCP?=wrN=j@e&IE=9^Y z2N|(17%fNYY(;ZE3zeM$?v)xhQ`7lKXd#>cj&sPGo1piN!16S;yftX*R^(cmX)E29 zllo?GR)mZClTLVxad+mwp4T_QmKuILcInMQ%lZqAY24M|W-BrBaHKO%&mh&8fW|#N zJ^+3txE9kSwkuBcgE!2K;H;8&jEKoBPYig zifH~8YUEi>hlYLN?3vH8yB)7QlUi&;e-|mWA0;#*tMxNDm%W~_4EjF-sC$#gkHOm2 z$cHu5<8xwoevcZ^eOtEiCZ9XY+TDM*LR&@hb+4pLz-^Awo{BN9v%$$K_T+3NuH1Gy zo(%C)pvH@$XXvRtX>>%0+UQ~Dh;ahcbYplPKBldkXc?tm#|8^`&eMPPb9df)4r!!{2Z@HE`b$hDJ0<332P&NH$<E~{a@RTM1n);` zIl2NGDvi<$u^88`*FM^lFAZ1-1==s!M~mqP6v3%lG4&PgmnkV%pb3*lLcglxJUMbW zmsnEk2WXLvK&A%P-#HBoscjDoS`eep&@OpW^rf$vr$#xhH2cz14&S4;)}a{xb|hD@J&`RZ2pyD5-D+39{t)A<)u z)r9HK_SzW#O7Zlr9_@ygrB}pCVWt0yow3vjUdl;r(B^<@2)f>#R-Gj^iT0^PPHc*w zcAopV=-rxwM$bdj57UmejS%~v<_ReiQ{HOf9mKi`o>Phr2v}XI-Lx}?n(RZ$%mRYb zJKI_PpKMxbF7-go)1HO<8s>ry21B(!BBd}myRn*<)9%glt`|THK-gNT!*<}lfs(cK z*1|U@fbSaG`y^6#gO(hj{RVk^M%EzWT?xmy_n-awj5IC$v?KI3Y8&dUDguXkq+4g- z%GIgD=-vT8(?=SL{yiMx!3{4$_d-a9(CfjA$NzjD^mYcXb@MUJ%Jn{1xaNU~q0za!(WJd$eM)vzr> zl4sQzd7w?P-M2nbw%XiY{!rRt5P0xBC8b^JXF9w6pUBNQXXVIZz0i$d`dGAGu``a= zuae3cLM4BdwC5T$N9mL?+N!J$8kHRfSqRDE| z8_CMmrn6>OM|;cyCeMy~7c7p2Z?2~#ckK2&^bJ7dF5%5QyXV3+lx@Fv0Xc|;DPUGB zw-@g@H1>#P+-Z~CHs?qf3)+)$k!S>zEegHwnGy#r6XjHYh&g?8{g+! zx?CTbp7afgIp3t?)LNx@C28OFK9knDfmkz$J1*p-xXzJ>IM8|@6>}AH)X&)%&ipu# zdW|D@?G)#D$g7S5<+yv<78%ev8tS_qKG)z5jsEx;>hdshs0J;xirn_5eYB|ba(m6{ z%qP#YoUUAz#!{V993gX`9(y-w?-acxc|zq*`8}hBtdX zvK=g~#!|V1n!nuHA5rEWPm2`DeJT05H>}vu>fS)QAwalClr)L{We$yQA1EI+Sp`Sf z55Fla=-#yM`w{LO%4E-(dq#{&gv=r@*PuK0brbDa2d@?aB1aQEza?SEKG*={PIx++9gPkSA$dMfs@n=Eam!hAZaA-T7F&p zYg)&A-2Kb-WbUz}#7u1|Zbp)`Gg#zlap!neHMtZh%@$NPmBFEob!buKc~0N$DeGDI z0e#mMK-@~W_W10`VRhK<&9wjNV5lObRPvMeM_alo!;>S_EwgwkiyCgA21zpY5SHr+xw|h?&Sx)mpK?JQ_;nnd_dYby3oNB|x2{}t zozp$YX?L+#w>J`3e)LpIM|JHty4$)tjC(TYTwrv6K6CC#ODLVSGlWAsHTBTX=V>D; z>$qQy{1KjsLC<~ZPdIydIU%0St3P}j|0Ai*0!o}>l0uJTblX`jrpK`?WSnO@Dz>{$+?PV|Rzo=(VpmkI#%_vQ+X3N=oDv++g zvzKSo6)D?id%)h^5n*;beIE`x9*7Mc2agJoeZYG|OHBk1o+p|#lP$0x`8gl*B)HWy zUe5e#g!g|9FZsSN64%wb{iBVZ=gX6Xt|g@>g_Tkri)?XxFSV+2yc`QZ`3Ri@UB%@zmSY6M`#dL(9&^we1gq?yrNcCy<{rltwW(AP;n&DAYt= zp8%yN4SMdUqjb{3Q-rLA=5x{Z_V_#>QVaM*EV=H?#u18g><&5DqoVDv2N z)SH%+stUyYSDruTepDyc_47Mkl>^GRD(o4j@Bdy1jgSsJgfd^CCrwQRAS zG@AD%P@vMUhFaUNtE5#OE!|43wQ0sesY(ZTPE0H4a(rwwEz zIY+8gpRT*!!fx`JqYw0X67yo+`S1oa6)W&vpHmd6u%ITYGZ`I(p&W8rE-AtM0iMqyrebAJ= zrcC}mHP%Y8SG=5eMpc{{lIuTu*|@|yr1`{C|Ri7tLFuDnmzOFaPLau ztO<*y5(xiHyXEmcXsfuZQG%Cn&0BG|BB*o!Y0!86(bT54;#r!y^j@iv^Fd6ce= zJD&_Of9;}L@Lk})@P)4_&EJM8{gi+D!YjgZNOY4YOpf?xu#o2|^=hp1egR_^9H-~W zuY(7w~oNuB26OLu}#ki+Ix^Pi_q4iw1Sd- z6?mTv1#7Q8&2tSlKr^)Dsp=;~RY#I<7BD)~R?T6VqoWRvz{+2GLGs*_-#dBgzs-Bn zBAdaD{q5_BH-nKO@nRmPbbZDe@U^JZ+SR-6IiRK;%Kr}VDgVs@M@?Ygws4;e_8CK? zt7BXT`~fjqi8q0ke~#MDCDu&ZXffs9PD>^2EloPV;|S`U`Vq6BF!`fDt-FD^?P2db zb96PhxB&}oGz-iSk&LC}lZSq0hvo2O$bD;ChGx_H z6pt0~-!}<)33;RTP=4x#$U~#fWbGgIPN|e&pMy7T$rPeDRDA5b0|$laF)i>V>>`S^Z#Vmx={;054n1~|xg zw-4{C_-{dBIqGD6u0VE)?>tf~%|*~l;MmqPq?`j)+Yh!)99{Ort+&w0`-7pX*e_Li zz8|!FIr!GUdju(3h~IKr6g0o=ru8BNA#~IQG)h_*1MPe0{c_?FH*?={ofojV*2~y)obUwC6{5-J;T2( z*0A%7mY*Zz@`39T^E^%Fm*~khlgB+b#I{%gwXVZ5bCh&B@1{%kN9tR<3%YG3*SU&Z zkG?xGWg$mOeeXlS<0xoX8rE9Qz(v}fOU)~!S(IxpBFAsntD3j!Xj|u!C#Z0VBJ z*4FZivQtV_qACwvHLwUNcPa3zEA=@Jh?65cgYUm-xqFZSe+8V>fK>Z4cr<721?5VfT;7Mz z&P8&lfjv8`Ko8Wt4@S{q4`DlKX*nV=5nd>8H{Z!njgT*b-}BURJ>|QS&^g9~W8}nn znf44G;JdQa@e9jrqvp~<52S=Em-Ij%1s(sKFyZ#nYx{8UsMvLkTIsC*R6^8~Zbne3 z{*}JGDK+eaI7eeV0+C*+v(&!A9CEu+@X z!RQ^ePjV#LQfsM4Be}H@#~I4ffnKbyqA%3|o~9J{vQnfToZ9J7~B^K?A1AL_&M!nE7$VvDe&g{IsJQ0;8$D!UaXqrg-*IEm69Z1 zNnVx0{{F7rl_UK>x}>irB)XoPJ>u67jb09J^0bQmeLb0aZoT)QKBZh-Wvl(}+K-`u zIs0Ww-dZr6DZsQed+gUS=E7~0l-p(PMw+Ntik!D*)Lkl zIUbIbo150BEvd#B%YAuB4tMb?r{}2#F-}Q_3b`{BKkW?li#pd{TaM>TiLVTu&s}-x z4As=X=?Ppypci2_QtsQ(b1r=M?qcO``NGf)?c9pK({4TlYz_j7JoWWN(i4DYB)>z0 zN=Fjn46_sA2Tv84N@)||r!&~;3b1YF_k#BsH<9u4h*Xj5coa<$-w#dt9(Fehy@x1oz zyu1JQ8N@4~_q86Kchd(6bqJC)A93CUjw2}XO!BO!#;#brfK+{uCr^-bJ0Wcma~7?v z)prvb`%IaTENy)VdMuq=prthcn>TUib;M|(#%d>9dpBtMI$CZPZT&sIsY_C? zcpdPp<=Jy;^?jPRb10>Obh$^_NS>dLFO&J-)cHlZpJZn~LMnmoE%I=0nk-;)N82`F zN_J~?lq#>LQk<`I9B`)8#?q&_mQXpgKrD9_aAsghuItZHuVs2`E7Ro#t_>;h26``gbsZ@?AIi0dHj-9ojP%Zm)ArpQHl~_*8!d4eZR@%oYpf?j z%vVDr$>-Y?Qs5!*?v8AEVl)uLwc*a@ThOVqJ&m>uyqd#5zNBqSl-s4;`SD!A7jZp%7xV2U)l;!_8)gm*4wdn%8TN*mIvk>79QU-@6do3ZopEyb{h+YKtd zCB7NN#hZUKIN|ImVL@PISc0{Kcqq@9anZh&FdF{*}m( zaY**=ypM;=?5!RSZ@h@?Tn{cE4)54DVtRhld{R3LsSi2WA4$9+ui95jtMbmmnp=dA z@}ZEfBf(fcq|hA75PKV;!fjB^*YQqX$ouDv6}?6MdLkKHk%#J5<);?Cb4~990>@RJ zrY?`-i8=~tDMsZO$8wcsVyJ*rP&&KQz9Vt_5zDVR{xh(63vy~KacA>(H#IhmJvzs? z2hpomhn!0+Dg89?pk2Bqa4N0#2Cf$Jy#?%;vNv*f194qDl-eiH{V?k4{s0G1w;~vA zpy%~E;cD4ojO1JwmZ0%3I-(mI{UmVZ?ltz$ZYQ*FwD)6B&^Rcol~LJ88Bsq23UfY} za@n+UZOO~6o#KuGu1nlZZdb$}368*R0UPSlT5yr{fOef4_EP@MC1FPFtOX0=RIT8- zZQ5e?-Q^GaLrPM=HU~YGfl>Rd?u*b!_}^ZxzN-gj&uul;2HHyMXy+$y52SCI<{(vJ zxd_D-V9Pzm?0I+PrVMiqk)EKjXur!S&H3MX;wTfuYLc7U2&qNahCOK>+N?^CblF#K zgaV7CdmFqjhK2_NXOZvPg7%-ZfG^;`h5PR0sJ9@4Htb83aX*QFuQadAbx_*E?VX`X z9&hBWA3m+DGqf_EYzI@Ot2%I6OISwT6ZMw9$>}~P$`$w3w3ar5@yn>a_I-)wwZbfK z0}wtGc**mwlv5g2Ky6Z=R}uQlh*jWgvZBOL^4p{sNlK;XMywl0X9m*(1t{?+$g2n7 zt*&_=%C`n=rHnkLLu<*qWj)PDZ%y)$r2PKTzgFukVtMI{Go3w1`@HVtv5?Tl_;w6+ zUk+_J>hT@$ZGW^aq`-VUX6|NpB^;DNam&EL!Nj%at#5h;I_u= z2=;?^!7_T;Me=nwi#*}hss_oZ&o6njl&r$ohZ4U}tP#}LdGqH$lUXPIxtGhAp{W7r zZs#<*I>))!Q^;pAx^M(?^fa{V3aHuj>X#$2mmsO#MP&fBy9v#8I=$)}puSPGS`J(n z$R)>bBIU@Vj&)YjitoZ(XEO`*jgb9%9h5q1<8t)!2|P_lVr1y)CxOm6>CRfILSN;< zs#@SUQs`B1RB0@56u^=rl}F)oszSoLK4=iwd=>t6)>D!1@9@P0x4G!` z{y?oiAkBYl3)$$-gs$-y%dS><2&~9YwbY_lz!RJ15st)Z$FD)EUkojb#;duV8W~?N))rb;|GKk}&W>8?F;70W`NYxBP#{JIHR=~t zetm~h)hNlg?$}(Om|pIF+5lAJ!DcZy+*U71!N;R7?Ay(ReIBKGYN-7x?SbS0G?xPJ zrAyi6&)Luy(Q4={%F{mggv2JW&<9+n(WpGB9>$Jt=U%d2ljknwmhH^7EG*{~`uG3J zEoY~Gaj&DUcyeiy>Pjzn>rk3igXeD3axUn~fXCW7$E_5<8Gdzq zXe0cw4qdnrd^j#&j2Ou0<7}C`>(ss%hHb~?mT$56}v?ZWypoVo00agfWR3@MZ(=1 zSlOXpOG~I0xsU@A&%;PMdIj}$y`$^UtB_jy7ECt<`|L%Yc7QK;3Yvyxcq8_iy_sit z{}kzzg_mqFWgpF`o_?h~xjsqWt^pqpW(G6!V+1sHB(?Q?P}c*FpqAHw?MkH5V|+Od z8Cwp&y7GSqEwqxpUJhIh#7_3KfO83VXY1{FgV*u(Tx7}`YBC22^AIw78>P)5g*5R9 zxJ%x=_sQ=IX{0#v<#Wh-^^bkrM=vg*wYjBBB*Z|yBjn;C*Pr6+L2nDL+^FsKHW?-2SU`evGeY(ksa!=`SDe#rSu{l=k># zgr*)|itWE6o?UN3pa$|PM+#;97j}7W=Gn6behJL!lw8|HIkFdCF3a6DFLiwy2QI;5 z>;hWVm0rh2T;VnnR4g~{nun5Q*Z04Fmi`PCbMm%1=6O~^{iEKX;a+w8T;Wsgp3aqVPWASno6{MgS`U|-TP1XtlQsIdlxTbOFR?s)&Qe>vdxKngwS1$ zJ4ibYFU-@V@GJnmBmYF5+bLrb8haYFG%{M%{^krql^c2b2JPhB0hiVj)(zY|O6{A` zh57>Ak#|XOqn=7&aoIzcU+B(cE_sgkeiPpv9|)hdp0{(e*x64*P_UOBQOrZ z+Q`$M?iJv930joe>SHL$(@mX?{uOkW_F{4oue}cU?UxE!zS;3EiI(@_ac632;3W>*|mTqaE#|JpoLPzkV04c!Qeu03(kg zL4HUNFNdT|yRcSk)?3UGZpZM4Q2SmBKGECRg|5r$=FMc)F(2LX-Kn1B}~8c_z3_3(pgHM^R20o{4tqv7FW_ zV>I(zQO-5Dw^_}zI#%EvTUq?BWqEccsV z7l6H)0cXCG5*i3M7w4`j^SaU+o+xA2FrV|_NGzGQEb`N%rAN%Wx2j!mjelvOd%LsGr=3?U1fxWdmQ`!O} zqMfx@AMXrzhJcph3!SjJn%Gr3r}O<2VA>w^(hfEp54JyIdx*Qm(6~=4_~rPu@=Mp> z`wxu&)XwiImfku?9be{OuS2QjD|pu5Y^-`}c?0sqS%THTconpKIkIPJe656oPohp| zg1N&8KN1RG#jZQQhGW~oizi@=0_wJiT?Uurc&cV+wqGGFdSa`*jhvNR528QwI`77` zpMMjzbFYjkl%pehuF`mZ)YQ zM6Jb<)apJ9(w8S#O67}?l4I#f_D0qX1S)qWd7n`AWPe)qF1Xp41)v)XM%+biGdZ^t z_8QV%4$4svcYdfsw!KR1JL$U?$)gX}yRsuAw);1~OfF?yCsM2X(Ko%o-gm>w-3tF) zhLzcl_1Y61w+g*pj-GJ8wr)Y8?r(epIZX`P#`EMhQRn-}u}Hp-Pw2N0Dp&4mL(3;+ z)Wbcko55r`&^(GPX&_Wfz`cbI#J}8yl_<5up(W{)Y!y#_(r0H5UP`}RFK1M%`O>-u<)>@r zQVdt+_+$^;yR)C1nRm;CRe`ru#uw9=vuBJL>P}GdcA+!hYPi1?TTL*l z^G!*UeAu^vbyMiu-9l-Wq%XXbDoHmzxoU1Ld$~dVELWv9|D`kZQW_yC?b2n4Bgd|= zPg2bhd!?g3ggxPy3wcw|`0whEA~BO)C@rc*y2f%+{$2k`$JmD2RN9T+txcXwPmxqm zM|l6}quc722OT>HR110)R+?6^_0fJ=4AtKRr@Kes$pKx9roqXcKWxMPnt((CB4gF+A6^U|78 z_Tw zWJTzL-N?I=)9W7R?#QomuG}NyGt#?@^ozu62d353e=}BHH)Pcquu_Y3&XFRmjGT|e znGrs;R(w-`p&aMic3}4@k6y;sd55~!06`APa1#(-h5Q(fPOztb7V_y9O7S$3Q-d#W zLpDE5|I{5vCsS8@rNha8PUIjxbwdLEmj17Mk6l15HzV(w>A!jseLa{t87Q2U^1rCL zBUJ7!;_k=q(GPzF3u`E4W#n=PIhfm4+R{GZF~E5PHP>r45>Ll@NMP4@3-{Hu;D1o} zdUVA{z%&9G@h)Fn3voHQd3ty~kjf$M^;`=Ul?3} z*}!1$$kjImU>+6y-K3`rQF7V6KMxo#|++ zCkZ=*vXp~TojdH615F2ZqoWY_2L~B<5^rtX+xdC9=gPM_pRKP_+ZI);S5s0Wt+4>A z)m~HDItyR>Lz|%im{&s=`M^hkknixtRgrlxzLl?e(z&`R+r?MDbpIXk|sw)WX{U%$}RIUcgMf&kPxg&QwVfL_=5jq+Q$n&mj z-wITYF#QfYr4IW^AM!|G_iPvYG)hDJQMRU9(6gkcLjzBM2jNtPR`cZ}o({)$D)M$e zZM7JPHe>0jYpcOa89%k}c&J2qr1xqZIB|Z+CD7Y!+9wCa9u1^^_pB(_O&D`4xm<)U zPa3fR{L-J4I-As$jFG4nwnw^I;HM|Le=W3;N8`>Wo->U+@5R#;s(|Wyw6z%ifZCh_ zkK6bCgnADKz6Xib2JDWMdcMpl#C!@~-$p)z!I`@u9Sql;hFvxgI1(Pkxilvgi}#nH z^DW$^lzL#bgNaw5{A)H`Hx{YU4+*&}(a(US*`}W z6-Ro`A`7+_n$gi`BHw0PtEL^$~59hCl@jc+#3_R~ZFP|XEKEYyph4x(r55Ggb zKBWBrpsn;&z6r;;(&`t${1#y|xU`;KtLzM_^WcH!fJ{z42P&)~Pr1B%r=IwI#H@tZ z<0yslMX}sq?+`_P>f{){S=gE%?wD zs6_|Pp?-&wV)LoTpEInEhfVz2$)E<3JPaTXX~njFNvM>}!B>Fx8yGeKJVNQ?26=7aj)jEdV=^&;}TG zE^jA-_|qWk(@;7`QK6+&a{+{Z90Y$FO<01&ZFU{d*b`Ll38@d^n!tBmRMWWszrX$u z>}Qvzn(K9#r3kVe=2Kww?h@Kdf#Ct;4|SIBa8Bvk71l34r}9g6wf3owLiNR%r8nD8m!m`2^_2A* z@pdD&brpZs#;GVeFcgZV!E(2{Fb|z`y7%slw^FWOyt@hfmBC^;+NjVOQwNo5aRc|# zB%3rTv3lXrG*iZ58aUjtF;}Kj1-rwhKDRi8=Rc^f1#y9i5A7OK4s1Oyyb%fX@ zSqnymXmu#YJ>ZloTH;dFixXTH^JQ|Vkcm8LiX6zj4+c_0e9?KA>f?5{U!nXbpfL}q zJPY{OA&9JpM+85|cPZftu(>y>j{!Fx7E}%dCj-yhgdBt7Zv-C>88^dAb4a^^6cZ7^ z(}7$^@Xe%o5)8OG+ya!q_2BM)D6oQDH4ml$?<+Ah^)-aw;k3*!%q1sib>I+j=+nbL zPJWx#`>Mp&{=?y(Pa|&~ofiUMH(-}W zEME~^OP(c2MxX>#1FM6YdUSS^4EYkjJU

    6k8se(fRgQO4DfZyT;-VX)O&M!=z-d zMB^U#tjU(TtjmE`Dz@9|Q-SBT+}mk!yC%nm@uAV=Av8je4x`Z(WV*k-r0DN=h9-=b z$}(RkuHHwz?f9n7xVRQF`AF;+`M0Bx@*NqW={fx`kJixO%n_%R7MKuDo7T=0<_syvy?Qv*~qA8e$_Pp{Bg zEf}Rb^dCnHeV5z7B;mw73M{&3AZj==ExLTDBP6Di3&i^nuz5dFoeLZuGIus1 z-NWqu07-EM(lN(mg&f~L$B2_VP3QS73{;VMH0%>)R4_)^x6i=5iSKZu_y4Ts=Ogj=V=wrc+pT z-(AJq$2|3iPFG-H%B^cjr<1yZr&Y8~1_qUbS7He)=SjDM(_3tthxpb6F7}|5HcC7N zx^?j6cpNi%bh4ek6-a20q_L}>2D6k0Zf{vf2`*~-bLdMCcXzU34gZVz*%ecpW!!7H zFQ?ARQ{_nN_S?=S$>X!v(w(&I#Mu(P`dr`n%M)uN(9I9cY%L0rGRa0erRtgF;!DqR zim-esxJdVnE@p>%(|sdPsS_^*hFe;iHk3He@N`Y#F1K|_)EcBkOVoNK>E=R#@~U0t zq^;x`Ii>}iDe?2X?K%);EGL(UH;0|`{AQ4PW(=`)5N@}k0PVEW79)@j!@&Jra6t}? zHgH+GOC5HLR{DT;v9|Jwn?&CQHQx`#Y(Nu#8hNO--LgY|a;l!1@0a{Pjh#7(*0+^( zB&d0Bf+pH2V*@zVm1FIv^6$buC&2pTpr&~^siYTaJfQ3qkYs@c%sGN@;G4V;VQv7{>Dhgv^b0GJQ1=<|8NH(j{c2XXRG1=CaN( zTgb0(bKk+;f&azSw1qsKyfu|x%m%n@EVS$*5SMVOGgnb7j~BMLBz4T=e-GNNGSX`W ze+3*%t2M;PQAdv#>r3gpp}`} zUXv*Q70P>-7JLWj9>BhKLh%}CYZg*S8|QNaI$nDGAY|H)u_7HhRmMFD z#2I8CK-vv_xtnqZLM1Q4TOMb(0w_7kzlI@&BX8~8vTjjjvv!f zZg1VZQ${&~+CEZ_7&^4&eK$WE2o|K_Jwik1aI2$rl{^;*^iqpU!lZ>PAa@dX6YyUQ zZX86+@l7gl`^!Xk#%=&FSy0RrxKItK_Dxo{_1p_AjYit<)tUbCsQY44Dn(MB4wZj` z*2!q|0&rPQDofd7nICk9q$L*Wglb@49-&Wzms&7TM~JeyX=kqLF6)^#S~W*wsO#j6 z^lZI+9!lxPDAm(vuO;#Cba!J-AhkK>BGmk{cv3gDc0Q+c{_83=W`Uo!yt(DT=1=)o zn>6$9WIOv5Nv~x*tKSNdwmYp=hCG`ZSV*_SR)TD-A6M#J7-Mk;$#(vq^Y zg0`##*R_0eqot)_B~SS4#MG@igXi68GdCpB<=%k{t_2v~eCG=69$RxJajxaN-h{`g zi4!YrizbGZ){=*Itv!!o+|#h?Ma1tOtu~C7u{R+N%XjO*i9K!giU*Jlp?&h?c?pAR z&2X!h*9YY3lEH5Q#ks)U6Dja5=&J=Sx(KOR6VpX%Bd^iO#s;MCV)B;aD$v9CQV%B< zEkRnZCuBX)ISjr7|HBo?*T0azM**(m|1-YnFF6I9a66QFCO*Jsd`#~HP~33x zP@cN=sZ==_$tsOG9rPyhn~D9~4s;HhUymy~E&Xk$R!;tz&bP}+?NTw05;((J;2 zL(3NvB7d)-#YZ6NwdZbuZf*(eFF}6Rzz0|HHnelfQ#oz6k~G7h+J(UL2v5U!pM`es z0n9%PS}O9T1G^^A0-bJ9iFj~Zi_eJZGS&(t@C>-kC6&J;?hB;f3wRy~i0gr>iW;X4 zs)qvKU8Hqzy@R-iP~Q&HY9UGCZdG|8;kN8Ns3)UV%I+J`22O``5x%(8uE~Ri$vme^ zducZ?vVpdB*t`$aT@GHge|v}4E@|mf3fu9E6ZuMCN_TS~$!`kWynB@Emm`r@!k|y2 zf3&IXZ2!jz7!!e8xh?!3M*E(KJ?HY~23kqGt`9V!WuQz~x@_mZ1m05TxRJ1aUNyr6 z^x5<9p;nf3qvvQf*e&p;Rr4k8(%n~(X|E8^CBqpZd-4<7L%6r|JAkJd==utzWfq=1 z)0M-&dqW!&!F&<hbVC;EdGEZ$ThBi8&MP=@Abdj3>BkF$>(!PPP5royl zOH!em5p@HrgMnrx`47XIdK@_J0=~n@KY2?h(~oW>?;P>YjY&R<6Dz>{F!XdC<*cJm zu^!ypw{SZLC1@TyuLU{$I3Z6%huT0d(4J#xt7h`909v=GtmEHp7G^Otd45R96@04$ zJ9*IE?r-=voC^Pijmss zd^z;L+^MXUYm^HO@tno~e(~QU{!96}q|@H?0Az+zt`eG1Iwu>fU$mCmB}r&KDVeRvcC7h1jlc(8Nw%c=PlY+lK2TzPSfm&R?#=(lZz&g zMcOGtlxenEf9$$?o;|qy_q2P-rcP5kQ%=tZ-KFRBzc|V$m1Nk()ZBX|e9%q_LZ0jf zt#?O%q!WFl{efcxGDX_4Uy?>LM$Kf`-L6D?VtPyz>rN?- z;^m-mWkFB0*$L23J^1lWJ)q3FmT6sjrll(R&>a30Z$fF@SI}X}_8AUDZiH7x{7Pz+ z!S%y{{dor6GeYw4si|J4?Y0r?YGlY6NAyCYcL$FCNW`0=ouje3%Yddg9*9<2H)%q> z0~;gHQTYD*BHP@EU=Frv7IZb8Hr1=sfzN6ip*bkzb+n3Jx?_9o>wlo(WGguyAU*F+Nj;#|r1wTr#~sk*k<{fNavT=ko~x;CE%3QPLT_}{I@-o< zXR3K0&P0nn!)81XtR91&_>jE(em64uFUYwkiRXCoHcHt_%Q`~u(FSfcmO-H|U{b%> z_vphU1Sbd%4oY@vw*Hp=$YC{K4~3I116up5tw`oG$U#}7gqsrW>DbFAa>;}HYbozi zYAGM=M|~X`86@}z2_WfD5k$Ci*T<#Orlenr8-Wv$N0S>D? zO@rt3EH)BX3fK2k37OTOr!$G;ChYp7w7(tUvV{BF<;o$z_6n_bD{Z2m<23Tp&RmE7 zUq=bb!dc+$XyR;zkk`m~9!b7p zS(&X4N-veM8o{^s&a`a{Z?@{}kcozR*xNmfR8x6&>B^quYC5T|G5G#VK)e~~a_Mdr zi>kD5=D-=Pbzd zv`u`k5GsWzF_i6f@b5%gLs`+p>3G@}LLFtu$)(-#CCHmng<7zj%`pPV^aHt7Z4I8< z#J4Tr_)FB_RJ^hNlE+{797el-l^mwhgbHQ;uX;|a=W;_U|HOb;ZFeUbg3uL0!fPl_B-TSu#2NzRv2iz|WhcR-*Yc_A`z z3hmzwJ<|n9^fw%WoUKB4F4bKMWP4ESTfw3Iy_M*x524Tjl(B+d#m~U=O;F{ji{qzt)4V zzu{fqxfIw3IqZBgXGC;GZcYFvoyp&R{Y=7oVOKfo>@Qg-6Xs&dt4UKOhYG2?G7n0M zSN}UQIVJmPXmrD*Jf)VjG$%rofMa-?2@IC#woLMxa{%Oqq_m*H7xUxEvkQGjEr#j9(#qQhgiCdDus@kw7=2-_b?LxX zSr;+pBfT5YVYgvvi9>rN`c7A&3G`dqYt*0hI<&u>@cyJb6u#Jlkm(_Z9PJo^73~E1 z>);bxM#;DxB|B~2IS9*wo|L;)ts7Nk z%B`e7r-k;=vmAjn6#vS<7HB; z;A_e@^H~R{+M4G8^}gZ7*vQ+yU~?*Xlal;#;ECi@VEJ`|6C7DwjU<-R29o+)wC=M! zcSF)Cm#fr$81b|WAEPYmvJ-W$Cx>px;5Ksn9eCDq&8;gMkPhjiS&!5iPiwzGNKgEy z@>M~p=CW((0e=wfh6Mf!NpJJ3NCbNrV}ZyVcL;k`8@!ElV~}(O(3}58EXgLMdyY;$ zLi)YI-dd>iFz|9Oa>#8EzJNru1}+`@h`i=wyAJ~wPKTd}6d6U#Hstd}@?3{>d=;Dd zXIKh*0QYhz@_w-JBif?}ZSQQmm%z{-#5|RpHc($J`!=9o&)cI=V-saLI;H2)jdpqw zVoyxZz!$>Stpkpm$#E#Hqiw4m_7^gG@1*zaLkwTz!dHqT*r)Z=doFnxL61!{gj`8&Z42^W# z1?{;lz|fEm&TmPNgtHurh~qE$3b3|;H#MH}LVeSK{Iw@%`Q?$4C*S zce$(xd9)o*ZqV)5X2Mo*zXNHmO6(Q#$JCCE*5GqDBUcJ5$ISQZ zNZcm8wQjKCZycW|pY=dyPi7D}bW9~@hmGJaZamz^U5ra>dMD)3GvK~wp`NwK)qcpN zdbmm-mp}e8*Iu;WBHFtkOgY&LnQSZ0ffCxnrriPh-2~+wMmy?-91G>tVL#T8G6N}H zirf>}Yvgz#HBAliJo!z;f|j?AM4R|yImh%ZVIEg0!r3yO$s1FF#|=|r4ka4hzR~B< z)~^ShrEcp4C&*dZ^42+@^G5_}eQ3M*<^1^CN{nm|W!Cd>`&h!&htJSLj+oW+z5{90 zMOwBa1G~!A{%~uBukS)RQib}$+;0tkq|g3N+Zn3HGKHHVtVJ_j2;9yBX`(!1&7_Tz z?l!^GqeJSO)8lA5Q+XVQ)mTtUzAHVny}E#3YwIkGDag{7Lt3~PMP2Ixx;N+ts6YJ0 z%PC0Xf>uc%vhxG#!Jm0)TYeMo;FjnSYTfAzv)0{c=^3=u0ps@Oz(y+f-F zfp+cDIX3(-_uj;@W#0xKy}tU@hR_>ZgA_QQP(47q0&{=r*+hF*v6nC4SWjJM;WyrZ zKSi6v(Pgcg9$;CW{221|8f5AIVEd(@v3)6TW%T%VL!#V6%~oQwNCV1qEobMS*C=fu z5IeRhWojYW8&w84uf{%xexby&vaTT2I=<)ZR>SQbOZjC#OG_~4Mc6Y?5+t{GLhok@ z1@|5Bem6kxqj?@kKQgsqj!L`m{SHF3SNw@qmFG7&PbFqQ>VHvCr8F@aicrSur|itL z9*j*q`Maa>;CC#&yX2c!33b$|7vqTw$U_*cby{XKxu*qO$CHOWE~VZiuK6q4T7*}) zDx+V;j$252IY2A38ei=jtOp~8cy$@1OHjq5=~7N9b#Frkj{v3`XWDXZ4LX8Y`X37T zsU^NDxh>Pe|s~9($rol`BS&}UAS7g_KW8XacQvhm|w+6 z8~M(p+?mjx`SybH)O^~h>5-Ox%%?lCY+>ho42I4e%UuNgm%ua5!W>4NA}o?!!=`al zsw%i`1GD-!)dl`~_4l;aVA|7p$Ie;a5zj{_;PSUPzvXE(c{^+BVrDGghE8^2-%4oX zCE%G1ui0n+GVt2V+87dYEVyu#wJW{56_og&e0v+1wDDS?P=8WC1?;*xfNMI=B%eNT zvKyJV@;sHRGf|df8MTqd!}+7$^~3oS!2-O01^B;5n0aQbruMsI!w-D&oHUm))V!U5 z{uSsbw;;%H>0f-^gYEGk7)@`m_`NBxEq;|WNqI`+S@@A=(?2m?9lX1RtD_Rml&J8m zws$txVA9)4H$@DsV=3W^uwYEFnD;51bJ3s9&nP8->X_^~=@U|}F6T7vw}9O~uQOej zBip)RAJqsS$akedV?1X`U&PXS1}VJ}2{Rq-aV4$dhVa)CPmiH_yF~adN_X>#>0oFc z&r_+(%kc9nz`qwXbsCoG2eiLBw27E*h2bpywP=q&(sBjucNH!37GH)Vr@K?*pCdWC z1M4uPNR=|=xEr9PD+!a5EN?Eog`JU(dh9=Wt=z zXpJR=xYSPmAC1(m3+a0h)=P!hchD-zSpDpd^QrmmFFCiZfNPAe&9BF2J+wQDcUxcl zDOGFuWA5juq0yZeeIfa^LC=(HRSx)I-b;V^1bwRHsMmF{)&;>n*!lGE?CDBXX_Y9Ylnq7Xp5AWjA%60% z<)aV4`fQ4@(Q$Qz!ttw&<>;5tofGd$C(j~6y*9*~akYb6I7f1>3O=_lTI0=FWtrNw z@{+lxq!vb(!1{Y_p)O-?eHR*2i23EDD?W+UOxhLDfSc%O?@r>&Y|c)c=^b~5=0i}> zjF2)+$d*E^`^bL19v4L+!g73D^HuC7_EEz z+HKtJaqde#jzq0OuJ%M;PDKh$<&bZ_K)6h)Q)8EfE9>ilCt;8^xR3%fHK|hw(wk9@&@v`4DLJ)YwLbA(v94mDR(NEtq?Y9&t3#`J250#}koq`aGu*!@hKZji@Hjl5xDjIYB*xTb;qYK3Y>N zV>B@x`?uF8_njS2&WY{@ZtAdaHiDPba#EZ!#d%o!@zpUmB~)AFwvOk`z}`lxw4X+C z*Mt8C<$jaakPkgNMI6UH3Sg>6m=a`cz%?2O?g;5OAKGv%Sg(?_Q4iPrHEFs7+nIzv z0hKt9-EAOJwmDwzZx^M+{qQerAkFh=KGy}jOFU=Rr0nn~p3Vd~0XuaKe)Se=kLAh zk=2#ZOZ|ZQF(~IIdNCh`Kfu;Gko>erP6{bl;hx^rR?^%JZ%A8i%H$~VRn(~m9Onju zTHO_-+t)(&l$5(8HCPfFTFqM4);T3|=;ugr0?*1`sZEJ&ENy(tur|ssrCO#n7fbSO zGBvfo@?O{mJBCzuTL!lS-XZML?T`tsg>}@xIy-(Rwbm#>J->)!d$2GMT&p7;v+^qL z3a&XOdn_&MY&`o4jz+EJi{6$338ZHG201PWJ7)w>{rUPd5I;hRslDxQ+2h@nulnZ- zsKXKZiAXeiD*MB0S?bp7VtxdS>j~G7JC+)qLayex1^!zdQe_U3L^*Un@O7iUHxY9p zy!<-fE9jNgyz3{>U*OM0-XKSDIvIZJOU^d}qw`*zui)tIBgn&hfb6ue;rAl%L4*#8 zR_KWp@2vUVNjDH}TM%a{Flik+`ZEW*IGDE7gXIkE1xPX1IiC&1IPN!!5HVU$c{hN! zk9qbO!~1c$oN*hsR1lg7sGJ$wdVv*o&wUVt+>-^Ue866>?uh z_|}JGDqG0iHOUn?$$1R9@wr~rj+puZY#Vh&5qCHL&+m=g^LZgp&OUY)d;XBY zF%tb9&i8jTM!f6k?nT_pMJ(kCk9&aT3PRm@bu~SPiD2JazDU?{P*s(*_J>{|maFvI z$VCgjH(}kOqodGw1-7*IyEA+*#^!Jxh+OKH+Jni-@uIcFYQ|QW%H8qrtH@RBY9cwh z#fqL}#~FH1p40s{ObalorK42SBA2@L47oo-CvbNXU)<1cN9w4D!0mG!m(|DYdW;G@ zW~mJk|LCy758~ZDBC7E=GpxPwz@r2nfmJe+QuhSkdSlW*V*kQ?9oy@MB&vkA=>lx7 zQk)ksRQdJ*=Tx9or{u9Ip4CackfM4lTDhi0q^#^e>ik9oc#lG)MwfSE$A=p zT06g3}!<#7XT(h5%Ja!#cM za+bpzq<7Vay~35zvZqq&AE3>GaJ}t%isgv6IEPZFnb2-Z#6i@;{>Xz|bM}_wk7~f7 zq!g1I!%Oz>)LNg;4am)t$$0~wn@M2hdU(w~i8B!P0iqf(=>sTesr7`(Wkd0sD8aXf zs^xZP1L>Lg4%)`iF?|A?NXzN~AX7dl|GYb2QJ=W&?7YOk2CMIqhhFQ~v7}s;)Q@l0 zVSma^pK2W?$ju)hd*2J2qAg_j0mv^UztTOe%7I9E=XyT^MM+@`DP;q+V%@sJ9qy%2 zM^5$dh`TPBbB)w3P^!PgaCN^D+&v3Q*`&;~pI8vCjJ6IH&g^l?eQI*A%E{w_L|eK{ zZN2jtN_ftI5&~CN=e-BH`vY-jusI`9xO1G#ClBSDl3dQ#c4{DO5h3}tyysim0x9YJ zd4T>3=S@lz!j-%=*5yY@w7h$I;T(%Xzk!Lv2GDX*-0pRl?lQMSs%@?1i149qv0ITKPn*8|m~MXu*A+CSz+n z3M3a3&-K^NsOd`Wow=iDxt=^KY{Td;^2 z#!`}g4-Sx8mLl8zA#o3Umo4#4>v<;Hq6cYu;!Sbwu0210#Mu|wItOUntti*rYm4hI zszwY)Bo8DvHTFtE9^u-?T|ZiC{EVKd0H0~wl_Gq z1J8UH=<7%5u5$@j4m`uGFK2tWQ&cOkIA3)VU#1`_7J~m?(bBI{-asJA-2*yDjhuO) zKf!UK^bNR=$IHZ&8VcfEiA>cyDrYUk#?<5JTI5AY|4Gnwyyed^rh_fH z-M*$4yS+{Mc_sc7NAa7u-UB4I?jkt0GdQxB==vn*f{jNvbY=wcRNWJQiwc>glTrrE7dO`g~>&?Kc^*Ee- zR)@dc9{pTm=HY&1&X5@NNyrwYeM(A4@BHQA)zq+!oRtN$si`YZEKyB0oBaEd|0?qC z4E7fB?wH3aV7MdnhcoAAlUAB@3%*Y9i(05k{oMsbzm2fCVZSpA%|UG>^<5Y(If8es zXT1coAwj&z-OrAGHeA`C-WDXsNGwqz?Yl!pf^2-=)bbLhqj>kXEONYZXMgP6`{UcVc1V;LoykL~ zqKzVrx^bfOA?8NS-JWodc1p6BQ;$c%F^FrSfYW$)h2%HE{3386Z?#g|9Z|+^baOJuqe{Qd2xqzOhbJi}THp&w1 zv4U$)WUF&a9-s`ZL3Ct$tV+P0e%F|M6*oQ0$}L7JRP{XgQq5c|-TEDI?^i=j)@X!n3t zD%O)x-t}}G7;UCqB|i&ADW8{sZ*#al>ZITIS?rv>1NK(Fc;p@zjs`4)qLfni(2~mX zfnjaRDQ+IG+;MiB-)^Ok(v7%!DwZJSwUk`9a4M1Nvn_$s zZ&JV2P|;pk0=n-Y@I|MkJi_d zpl5gnwD%s*_IT}$4y4q1)Yy6M+LoPzet*upt$RB7Q--=(uR9Al%231pngX^~BYCrz zbvd=tQ+XZX%7N8kNh(Q|ysq=hUiZB`_W~cT?yHbX!MoO{^U@qYznJIa;i(DH>f?g8 z9NpCJJvDlT&Ji9<$W(aY95`xWJO8GvbHsLkpmqlNfswoWinWrfvw)R~*4Pz#?gY^z zEUnw%+nir%Pq7NkK1KO`(b|nbax$s*q0UXz+*O~ulDj(~)l>gg?DiV@-2*MDudX9* ze@gb(k&koLgQ5T1-a{S7g~aHGM(q=ChIlys;wIQlpM)x(J>4(ys0h_^$=DrF-jF>Iuhy`=Cb}kr|HE=~J*baY0z&?&7BPRw48ZN{}|S zVx4c@O6d7WLFfNXB+R1%L|4NX*HZ`WfWASEEqs{(Z*_z2e@L0fp#6JL#zrKw@^E6* z#a-HJ)a(qPzY|_{%=bFva0Sj=N1YBKg*vC6zT!egp0A*+FSP6GHRt6w}5#*uoqCKZLooM(?3%sCr5%_1+!u`=is=`%ni<)LYk?|9S!M< zy}UEMh4caE+!;q8>u8}GRORTD?WHxkf^#+3Mbzr2gjaZLCHIxg@{-eH{0EJGHsAIA zR0-RWQ!1H6zpa(@zsDQCFL(97-ZXooM^lF;T1wBN8pJv3=8*H8g>-kmr>(r6H$${c z)KIR7vBztUt~zYxEP{WXrK7x5&z9cI!)OQjUP`+%_}ZDs$`*f@shsNp+>h|~N1zuf z^E?EeR#vyHgx$K`6{0l}C3kdR*u_}-@7rnABO4@m9$jn%Z< zUQok>$fA$nW_7i`sKfbw2DE2S!TD|H;H&6FpYJ=QxiPRh09h&A^N=!cP>z0$3waj` zwL&+rmaV*BU`21qL?G@0?JS_hoK@+59ojsjIJM7CMaDLfD!p_4qb0PHD%9vGDCs<4 zaDJx}zlHR_+OzHnvl6^G=Uhr_ zq7J{KCeG@#@29<~b{`AP z9+dUQOAlfbck$FC>N%gM3FOvDEj{x&C9Bqt`15Ut$V;y4@G@kcBl#kndDv$xpRI>0 zJN0o?+e1g8U;Hj)wo=g^ZkWeq)c;wuUlkm;^3}a+ZGU$?R$uCi+#6}7jkXq=6dK$5 zGOQI_+Mi1~N6WFfz3BN5BGjL?>AQCwz$t-AHAEX+V9HK3*!k{8ob<0KCb6>m8VVq{ZdB%A(wTOhYgJl_s}I$k z!0c;e>()_sahU6nvZpTf6?}h`Q@v%s+9##+ILgwOkvlB7lI-aCX5PZ&&$%`smvVjU zF4Vz!8yCSfb@0CWQYh8FRcKv1c04U5o|0>9YoW83HL*pr#X2FyE7g}@uX}{jbiw~z=lss(<1KIv)|^Q(6vj^9{%!HEw7eYkCbx^ zVGOmFHmZc{1v;NNp9h!Iq91xQa^*xgR*&Qo#`aDnkAj-L7*c3wpm5~oR3zbjw8?eA znR`oI3-w(fD*O6?hwwfX*-<1r@#&R>a+4Nc&HoT8@=G z1~~0Q=T0|{El8o`qRlE`OTWYh>;bp7-vCWJ6X-X>AbtUkUw|58XIMA_)WYG10L6;~a-500E5L>G1bUL2 zb0Hlinn$k2UkDAq%)2AC_Ad3~s9QFL1kl4aiZ(bYJk8E<*#UkkXU*w7Xkt#$m&w(2 z0n$z@t?*#X)o^yx8|1YGPIRT$8u~Hr;J8Qd>3S$^7qr40+H8N)dF=`(xZ3Jk-j$@y z+~+{Sr3P@!Tg;sq7DGy%#H9Oh=^xOedm%Q{lgJLatPgh0816fVRYlr{_loV z8w70i;P)v+fa%}cT0d+S@a@27-RN*>G z{Saq@ZMpmbpte7;BQ<)Fr}eboK2TK~xNxt{jkK_<*{1-pyGf5A&TwqQD&hXXUEADs zH+_)u73!L8x`|lhLPD1OU*geTK>3_^M$1gyV_5EO;oPIn_~V9z7HJ@-98uQ7uR#yy zuBCeaC)jA9zfg;CzsZF;ZtcD&t(4`MwX0Oy$hVF?-BjCts)VJRmSSf-q+v+)9~4I2S?#>OtocIig~Z zWhD1q1Agg28_xNTHLkZ3=Vfx&_SufpK9WDJeFn^#OK-kO1;Vl`?KYNrwUOIYEC+SL zsT@?*COj$q4|j%&X!jnXRp=0 z2i@tq0XQ9NTmS~n;Lsy)|N3M!zi>@uBy1|V?h&-nKuDGIKCtZX8ihOIuVb!Hjgl*b z>1FQ@DYGBB`q+?sg@s)1W|5xatM)FZE`PJhbo>(TD}_IXlpmp6(Z{<#7plICpyqGEV9A9-;f8`FbNi{O!1Fe;>wMBi&yD zy)LKq^!4^bPu~iro`!25BV8-99#3y%fAG_W+^7P5m0X-F;@F+LJS!LG5YiVZe-Sdao|dYj<+UK~ zd0A%dEJq>v;};H{)Q zb*Uw-iN#p4{8SM+Xa59H^<;aCCFosZlN0zhoq4e!I z+IY6^9#VAr2(t9F-7VSi{iR@|Xpd>^i-EzFjE*>G`tq84S^1{Tg)%d@C!yXs zV4{hy?qm8Rp!DAtODWm^NA#hFeWAS}=tbqyMoRxH?n2>6r?Xd%rTw+yFX!!Q>@G*I zlm=&zgFj@m#)aQhpsf)w+% zvm?qiDC06p3 zay)CcI`qGOq}*>SYaztrNF}9ct!o*X%A-G6k%P)}05q+ZR`1>gRA)vkcYe>c(CTmf zy2*FB8?`HA&1(>{^ZmevTImk#xWj<72}zfuoc6Uh_a?IDLNIdyRD1z3l>SPX0tyso zL&37XNPo`lETJ52a7V(+qWz>D>8}>|-aK6hO(fi1;R?UnpVdlJ9yLT8X6#B(x|WbS zxJD}~m!zQ!xfY!N^-+%E+HX($Pa0l{Of#KpYi+T6fus?*ml8++qH85@Q1!;(;gcd-QU+K?@l6QDR=i~u^rum z)BPv=Lv^l&cQ)!3K{?5ZO1!LN%DExLk>@HAYVZC!cybZ3oJm;lK9aM>QHj%)jkO40 zLB83KFSyq@N6^BT1SakK*tb(A<{Y;ybL(racNKG%-AtaADU{~#y>n1`rYWIr!s^kZ zP){nYYG-;1y%@6%rB7#EOCh#^GaIIZ?_R`nZ!_^W1WdcnkhcCbVmKS<^3c%sO&<;J zb(K#AZvO!^w1Tt;0816EZBIsx*M;(2Jy}B*+z|EDm-G^ODKq`au46=wOnnRv8sYgL z(7Up3_7y5fgU;=gBTud)ydNVoGtnMOH~ag_4Y8+oaUVDP@s2!ms|xXB)mGmUek$C-RJQWZcrkXouw>{C0kS0$AroR+NQ zQbT4>+diPSx;y&isM?tJc*dF@DYQgdp=jq`D9x`R&7*vAmo9rx1JGmcy6#U(9UId= z7pfbv>sHa~`Y(27l)Z0I;JI+(L{j=I_NJh_-JyY9!PXy0;|d4&t~~_l@4sEVZ$cir z*6pkG43w4c5@$bRYDMgT<@G9UbqeJ=O4v%gD*4Dwj*a^NhCeZ0UZF0_;oy_`I)m_0 z5l8x%OKN#a?wiM5j_Ei`TTgD%pQWaB4%*C!V>^k_*%9uuvM2X>@U6=yd~3=VIrAuI zaFp-voj*CgX*t+373!##+n!cb1G>Xi1^#euRs-R}rCn=1rf@p5-+2@TPwEi8cH-lg zP*e6IY7s|0roE)(_9yk$U^*$KKkKOe5QFY8;3#Gvs9;OXeN^g8U7cyaR^rYLIF$Ti zFMr>vq!BaP64IQuzOAV@SqmlWXzl9SkEZs!b5@)+dS~R;LVjh8&$_GmjG?q0+8$G{ zP{utSYU)U()$N=N^Dm&{0xPRRSgtPXO>J5twqxm=cygwsJwIpX%>nQBa2G+@gV1ng zuH&JhCE!gvd^mXA0=J0;b?8#Ajs@E*9!5@%Zd@H#cQZC;=f2KH5?qBebH@Mo_&xz_ zPs1ji40UZGuS=0l>Op;aH-(4Sm4y{T{yla|wy$;pl%|CDFI69Kchqkh_46mFXCgzt z13lMcrE05771u|L?#px1kF=0Fb2K5T$z8c$e%HozcXM~;twpF9&)thpLu)&0U>02H zNJ$@{s8QPPKs^%ephfGt@9nUsp2Fr?1ShXTHh+i&b4KPBXbk%z>50D--dsaTOVH^0 z51eU|-hJD~m2J+!5=O^+{p&$1Wl6gyVQ=~G(}Tc=6niEdaT-s*gpvk>m1ZRPI^rq$ zt(S7O;F>-5Y+L(6Lt?~FJ=vMu<#PM4_JtjbPHM4)#g@GEAzg5-v}497={N zFeMc#7i?Lt>Z0sBr#!9!jea`Q_o0Iye$n%vL!F0OWQ4xVc5XGxXTxCq=N1k&qyL3h3jh4O4j z*q+*`mqcDIgnRy1hw@VIS%nm<$kBX0>u}`cR=y13^bdD;kgGan9q*;j}Gwf-5E>NnGFJ zK6ACOFqgwI*HQAR#99M(o3VGEfENE9?41C`X}dkcx0gu!t$?haHvb9l>%f|OK0OTe z>M?DEzq$a^uL*b7=3ziR4SH~Fw2>T+3f(^mo;w=I`T%P^kX{6g&cM8b7(K&!ZRM%M z51BI4_TP+D(@Q@gaOJ3=6s%uzF+8cYB$b{N`P$EJijWD!*D?@qXk9MK&VsC6D zJ;p=H*IfstY$4q)s7xGQ6n1qRVQLfe*H+eNF_v1n!YNm+9SGK{eAS*-rFL2squRQ!2XcKm^;lnyBRP)mj9uG0rK6+$#+(hF>;d?e7N_}G zKYPu>;k!NHC)#;vGwt*`bf=9wATT$HS(Y#J{yNQ zim!5v=x#`N8?`VevbYc-mbv(Pv^~CJJ#2u5DEpwMb4RH*pmBeiuE?bdkSo>2 z+Z|Xcazud?st{)|JY57dOUR`W=-q4KNI0c4e0~D5U}p4bCIjaTo}D|=18zHtrzZ&$ zrx%juYTjRf&j!;T8-PQJ>d^)aYF1Yvxj*M6l)8vCj_VKRYa_DlAXZYXqvU(wANNgD zLTH;Wp$6`|*9A_`+{Q+Z)->l)WPSG~werduHj>)CJ=^efX_L7>t+VLrf-mG2>o=2k zEq{G_wuxgh?%+L%QuRR3y^{5Q=ijA&aObFT3cnz zyPWQDB=@*us*=S&$-l~7N$yIlZ-eET)YLS#XU5Cr70J&9C;!+*j|1v~YJ~$RkZ0 zJp5t`3#g$KHXjN1kH|>r z)g8>cpuy8RcW2iv=v93I`-2r*^+aTi^ka*tyBn#!5Zd$ebZ(BlIBTcG^Zx0hw4y#8 zE(MsclGblNWmp+gI?ZQeT5dJclsyyI`s-n6iLXiv@i?4X)(CN&bTjbx1%5ruw!K`L z;{)nDTP;1H{eV9GC3{C+mgb-Fig=sLb!XbZKC-$+8gjRcIY6Vv(~Dlfne%{9n$X@C zfBJ}Pw3|?Uvu|pQozRn$1N-veX!x^2s8-7e&Vn48sAG+1bJ@aGiFX>k$-z;w z9*8QI(M?dXYfuZ)9Rk-n4yb)OfKqIox$uFb%LVDpX(fIl|G42Q4d>mubEn?d(}4d5 zWS+CvE{7YP`>UpN_e1+KtH|4Rw?a4|Jc5pZHgE``hkdT>fh?u&+HRI9$KDk^SpRFA zK8$6=eL2!h3^`!Cs$12y`KPOUc(=Cp4{W`pUt6(48+D>jqAtjuO&gSE&aSw!m*W39 z^pv`PwBeh8b4heu zfwfk`<;0UXrBDB)Vhsu~xsb1VB{Tm{X!JH9t@6Gj5@~m|w!3pV1KrW(kBI#{>Yy$5 z6nvvJTh5!*Ca ztU8yv4bPwUs(fKR?A_P~MT7}!g*UG)T$57`6^^`beHGrV-^*upoOVfm&8zhE=_7f? zKirh^pp226C?)4%JGeTzl^Wd4v`=!JgD$^`@3%vPmxFCrYF&m$ z!yaJ^6w(>Xd^3H+0l`~hR(s631Fp{KsJnimW@6g{L*T?5fm0q?hx0kNKFSps+Hxgj zZ|%dYf`wLa=DuX=u#U9qsnpPEr8(~|_pp|lwJN+iqsU!3wEgp~4v8YaTAFitw1aO& zri`PtABb{h^1LtDv)`|k@Bb((^ipRMay{1rxx1UD8v0h+bOjPm{F>vS;2!tZ($~^V z%x>uQf_v(zaeUF5SxDZ_?^+tG)MtQO_3(T2m0Ph7oCogwaP7$o`s1Sx_(l0${dzY4ihW+HSZ zq`P2QRk+tU_k^SCz-a{-ghdJEKNBiEj|_<=bkd|V?q<@u6`oqdhBog}oZ?)lEm1qZ zJVx_02aJoyviG`)=eMcB6Xc^@b`*aock{R%{(F$}?*MOKr-s(;9Hg)_E!^#L4pg#^ zoGu4{(#i%}d@B6=5b=6IDf=T4{X^iP#2QJ8Rg|$DTZx2+AUz&ze?F(bnRnOj1s4TbW)A$Q*K7O&7pkvIQT1IaJQEJJUegx7v$uA`bUy; zAE195X?haropB>>?7P3gJr7sUX@&Y$@bn7bnuAL0y>{5bQdY`ttp!(iDLczt%Df6f zGA~2=$M-C2H@vimht6CCU5p4mlk)YZYhTv5%FRmN%q_2KI%~f;B-%*gRf*FIh6WQZ zAGw!XxfV=4Xb6}`qdS6kIc@m_UgcS|s55;23asS6S@Z%qE@!_$+O)S+Vv2^3`>Y-KhOJJloSr zPg}QkY$kT>@u&k1A&!)GCh-;_#av6ffO8RHw*|-RA9U1PTUD#-gs`I2zSh_Ep6(E$ zBp64Ht%0&aP3B)UC*5%+^HGjS zCCekNa&jHvIX>i?ZK+ecrABBAxY2{H&lR5uU)>)|AM9fCewEhN8m$DqEG70S$Udpw z_`;wRPkouzM(-%8Af`3&&7+>)wf?&XEH7cy=6Q^i{$1}5>gnnl=REfSGj{>^wUna- z{10NBM#}T>p4=bh45GG8F&ocy_1W{cpFNp6x|(%5XO`~Hd)fls(RfnarzOr#?Np@} zcaFO~{)Bexr!wTfzGtsq36T

    KQ_F`qXC*SRKl(Wz{nUT>qf_IONfI$)!9u_O5O)Z<`D49a*EmemiuC zhf686`8c|~1AgcO4^J;$r5|S*4^i(CXz0E`GuoR3{NAaw2W>X4yD2tiII`J@9)`mi zv+8{a)~@s%IU69aFCgiCkT_41m4Y{4149GaO=0XZLb($b$T-biw3iDibwR7av8OivT@g$7VsfuPxhgI~I4#aQ3%@pKj7_!v`I;xAssB3i5uh=Hfa>moT z85&%XJ2EB%kGIL{FEHn#r|Ok#eRJMbRRrni2sHcAh4Nt29ys)*w1WG+X!$yn>=4Xc z9>g2f6n>wf-Br{oN2vkK@t1(_Ah0<|o1UkQgf^RLt33Q@NvlsV4(&#oF7w6yDjvRiCxHK4(RB9KX4s~*V>GW&`!JZBho5`CiByQ(Y6hmTOZCW=UwQJ zYeJuwz|6dId7xtjzW>Fi9kzWm*30<&o=9T?(8bZaqpu~l!m~7R*7vcB_a|umIP&m1 zba{+g%>yoY|40X5T|)h4$VwNuw*rak171d*8-L~{Q~cP!K_iq?5+j@gaQ)AAeCD-4yU((hjQbOaqs!G>+5{YgMGhX3_TnSt&CG%@RY zWiX26dvnIE4&b~9%kVa^yKDLZ>#uvD^&Pb78vJ*->zG^>Ub`YT*TE#p>8H5|j+j01 z6Rd1Gv{}h<7v0QVN54(tO0qph9Y>6KEVuF*rD zu5sdp!Ep*$KZzcB8(uxEcDaDB#~IN*P1p^LA{Zlwqs^=6H3A5xVnNM8;jQLr$m1?> zakXUJLp`vIMNS+a+rR^5XBpD(T|wUAlR!<^8WHqc2d;S1q8BpQ42m8PaCsFD9--IO zv^qGTX%ypVPkMJZ*i3^rLBCAKwdcXqTqBi{NBv21tr_LaJ?DKz-}7`WlplZM1-N5XfGM;@j%dmG^?hj<(SLT>aq4*qnzYM2J0&8icZ$FgtUgCmiR4X{;n0Oj_ zX^l?o=ldzRBC4C zcip`6s4rt&GE#j392!A2SF%w^S5)p(rpMZtX~HgpBgv_vVCIu89~D3uP3lfcxI4okrCKB!uX+4^(Pdk0(| zjwTt;zYUm|BB@*GWfmjdA~=VKQDW{KWB1>AnSR%<;42L1v6^)7zID=wHL)g&FIOcu1tCu~!cx zV+oWoJ4Y_4wGtUS%y?(c<0PImr2nq`9SQ8;AhWO0hxtOZd1YxO7p>NYe){zMe2DJ< z!T*Jz$xddc_fg9kT@F;!X(Rkzf_nNmbKZo1X9Rw$hl8h~m&^FWTu7kNZFQLkm|ZIm zFjVDZgjX!kx^kXRTRotaw$|G{rs0XJNo!BSsSQ{QV|T*5Xl9)=-+V60YV!{9DT@>j z`d_MubG?QBM&o=! z+-d=x-|m3swHc|zAZ(`(Zr_Hc+-SGS?H_p2Nm`3N>;8>2V;)O{<5|2Q#eY1)uit ztOQR(XdBb|E6AMx8I>1iDQm#UW6h_6H@?+7tUvi~Mgu*GM&22h7uI`k&J%r%dVoED9m4m&{jUAr$`wKv!;^P+G6s9k zTLmmZ8n}B)ZV4oMb07^LVG)cexP~+}ggVYWy;;oY3+Mdv18cMi33INSjh^nsCUr-r zj?q#(X6yA>_hs|ceIDnfM&T8p<`!&}`!MNPsdTK8bCNK!;X1ZtCeK{`dje}PHBO>| zsaOYd;P1iuCu0-$Fa~J#Uc+iu0($2+-lrV{b={vB47_GB+Rpn3bYcg1bjB|9;6EdU zI-%d5=v>bHKa9)P64s}lQRJ=y*J80(P}b}#ld+_iz}b789)R!0&m>WE7d7ezYX##Z z_0~70gz-OS6?UX_x81DCSCK|gG%lLd&1}x^HBYJ`WKEc{N6)5jH{hvF!?=FyBNW@9%}VikJ15N>Xy9b*xy0N*Xx zD!qq0k(UARIhE(`Gke1BJs|U}Y&`V$oY!#rbyx5*obL_wu7h()ESOnHym9$$#?B+O zpG^yqjGuqS2hc2_X0NXEn2SuY}XmJnn1Ve%p4X_ z*4#j?X=^@Q(GOJeiR%o@J%&`aXN2~wr1zwTje~}nP-i*Mnloq6)BhvA7(Lk)YPmPp0d8&ro0-%!x;_K>c>!4~z(`ty8L?*v zT@$#5J;QgSoQxK2k95BXXN~r60Pl|T*1IvkLB^Az+T-Z98tPfVbm+DiZVdq5foO~o zbUSJNJt#7mIpzrJx;I}J2OM|TH?*pf1z!kfa5u4a5dqe>sC1x8T54! zTk-@oPatKU!`6eVZ&5GJBxJxmmQ@0s^%Rk}n)bp5ET)W^8J`Srok~x}MR?lR4EWoy zHr_Yosq$gvZ%Whth^_6GZZd> zG@DVs9DVg{rN<$80bofls~mZobRlwiJr#iz>-n z%)H%7aZ`F|j%2&5(unao8orv>I2*YzCaE~m`x~^G4ZO~W3ow?L&1W`kybp{U8CBXq z{n0>Ww)6AQR?0V}jJZ-S@)-h+v!UooG;4bRm%Ff@*oj7}s{^&U85hw0a_HBEw)6At zAv+NCmJKq1$fS=a1)0oyC!n!{NPdy!L{`45=vKdO`=>oI^0!d7QF86_hvA*U9uLC_D`^@U>UErSk z_q1UOTBcvXn4>K4jbw%3-5|!97K6?Mkf(#-m_n%sY3U;G98dM2*W{^IxdF3@tISF~ z32lC5b7zT*XSUZv+vDa-)(BK2#L zAS2*Xv1|1x@i`ywc>4{#>I5&n4KN;v@jpfOqGFm zp2Td2E$j$=^+UZvKbwFo8VMHe6Yy&y_53b{6sPfX25CJB zH}pp?z;4U~u7Nx^@3;63MDs?`>nzIe1)|5GZGJfANq!~&D7}5lST~7!t&uHnN1ViL zUhmZv+VniSt3#s&@1aF=|C{MA60GxKXU@Sbqsn7xw=Q%rmvlYeegb`a@qRaw-kUPo zoMhy@7}OaA4(3oR1H5m5)i>~F8yq&v#Bj#s58%4DcE-YKPmnDK+vRZdRXA%tv6l4Y zsf$cnoliYupY;?>LMB?ntr$v}f$1uCpdRz29!Q&a*LutHElBVuSVwnETk_YOOpc!6 z-2u(ux_-@x^s8^xyjFTEqp=2CfwDh*SjGr(l+pAk@O6RSoq7HMf5odMIDSgKs=)RH zur328bMa;&A6Kz@-Zx`50=>hVc&6X+V<>GNsW2ma2Qc@vhNF`4%(p-y?$$V@-xF!&B z^GLw;wv1=SZMLDevGkt~4@)D>m+060+Fj9BcTf62hcx&xi2wQ{TdxMzX&^mkK&M^s zdNnq(1+esm|1tFL9T=YA{2aTi*TE>Z`)Rc{`k4WitI)I~VCAiv&I8Pst94ZK3bW3f z2Pf9T^K_^;hQE4YBUx<~#G7QOJgt_29_L3g9*6&H z;cXw_Yn~8Q5dfgYPGpk+|REBJ#*mp7^oL(jdl;~J8!cgjd*_de&M$yJcNCbaH-1;cpL3*B+o zGy*IdLPhh8R)8mw;If~Y)eze7za`M+1;*I9;F$vbszL=b3>Zc0tvA}m6WAy7N%lsU z`%}Iv>s0rs!wBb5l+veIjb8O89i!Dnv{Z!OA zY^$Qx^RO}9sBgYpPoNuHG8?`wM~>=YFIqvN7+_sYYu*$W3*XIsQ;c4`(|sz`o&p@+ zPOut2HKv{VX#UE;!ft}E&TOKYpLhf5aHP-`W_Z7Z^Yymy#~6!0pxak)vjIF>%oF{a zN_H{iWgj@s142(Qnaxp8&q4IPE%SnH*t?48>&M{HlQy<8Qiajc_XAT3TpP~sRL1hF zQ1~>En9ucBsF;9cWrL?VVdV7*DCK<#dd{n1L5w5+1{z()vYGcJopw_A-3KZ(f%d;6 zQ+FV2+Kv^mIk9^}&!H_*Q`j?nLR2UgV^8pi+KuD&nx>fOd)f9{43;WvGL z`dID&zMjk@%&C@$p48@je!jiPU+<;dhwM2^Hv3yaN?!){TtM|2Wp2P`eZ_y?)chV? znGP*~<%vGK{_w|h)q3vU=6MV)+!f$hlV`b*9esU=>CgKt_d(x%L7#UrBmOWbkpt&l z&{e-;7`vHsFGMaR+E`9GR~yJepPd*Mi7H*s6C-G-d?Ik(AuXu+givgZFy!{QPe}cmKB%g=nKNS~C{ge;$ey z3+%*N^s@tF!ay|V5LSK@R%Q!Q)fhQ9znBqS9npxI^w}RhEr`@EW<2;me7y;nk7e7y zjY^ajp-4)UETOT)5T>#{HMVAkWQ&Qh3xg0*Jsx|D$TF5j216s+GJ`x>L!>dGG4`FI zGD8u^_xs)d9PfL4-*>#n9RHdB-1oJd`*q#teO~8jWq5PudSa+65)^~IGdI(kz*cmJ z!d4jEfgP|iz31wGfURY=`g3P|_25?Rd`A_V~7K zEEY*POJTD-IqS}+8@53EmjJYX(RVR@Y9U!x({fL^7VG{eD?|xa2%ot2B2C7@xodox zcBxoB*DJj^)}em@2y0?*`oq(0(4#NyQdtKNvljR!x*^b6`!bzYjj)ZLrRj)vct&vq zBYj0H&lM&y!u_m6*6i)XIM=ZV<_B7a)f)uue+HgUSpRx})!#f(Ji%(68|-{0KtbQ) zTDNZxtJXSZn*%4#fTz(Bv(d4q(K&Z>gW!g5^}^|C`^k1BwOhOvNh@&}Nf;I6rAebxzw`aQtI?A+I}cfQj*$81^b*sXbCuA&_1)&U+& zfNIu|Z67=*G=ck%bL~p7_M0tbIXmB%!D2FNtNGnGFw!K{wyP2r%cpju&a7i? zrtoe+Vujh$(6lz6Y@jfb!WdG$HuoV%Yq)B?3eQBCB}cg%3}wuWU6Jt)AqAd$@u}4_ zs6FA9K0&>=Z=j_;*uRhg?gaI& z(rzShmu0NE=us}5T0~FJ1FvDuOFRph=id0YI@ob*tmp&n0#|0D&wn5jRoFcVzo&hq zU{9Wm_CXtS^ctOJjAvam+F0IXsBcZX%jmghMT|Sy$2DUO_cG!MB*NYDEUtHe21Xf~ z0q`s`OjHglx-t7h>Cq0D`a+B3K-VWSm-!LZw~)=V_IxXL*Pf7H$+(WXoH?xH?fM=I zR;QV_746m6_4GUi_U1=_o}HCvovo1mEfO|}eSo_u>1!O+7G{41n!S%+si~gvGos)a zHsJ&my#R-2z?pHh_6eyo^0ynVRc56tg%*cFdC#a^M!)uf!NtH*H-k#CJhM*##w;km zHn4hs0@V>(87UJ5h0NGJ8pu77r?)Se_P!aq0fa(ryk;RIa}TQ6xx_GQ2+CB?v7=D zwE#|<&#fpjl*yG}(8PL-Y*c(&Kn4BS%HtkZs)1ac3C!+s^_%|(3jYvX`H?G0>;tqV zDPT61-`#?r>a?r^UgnlED@GCYyCU5CH@3;AJZm);p!INcOPyW}3{S!pGiDv9w)en!cuAShp|mdogl$J$S-1CT~AhcP=s>4c_K~$YD2TwmRct zti{t1XwBQP65RAWRib4t_B1<@put@0i*&6-H`MX_0_~Uy4b3mr27E%^$evKj^QWx? z8&?thT<>OM%a+pn?*Qud%+wG)tq50JA<y8)Cy+asL7^$3m@i zsN1`ti*{s2_4WHp2>>&qBMwV7wOoy}~GU;6XvgF*C?} zU{)}&eqF%u6|R^A*SM~{VDEVaPinhjf602d6`3{jNN6vThcR4@-$(O|HCoF3wX8Qr z|8zt)J&it!QH%r%2anIuz{b$J4l=j`o}R-d`(PQ4x zF<`DI|4T4E4SWl^aFA$;Q7 zj)pFW_8p;*>q9rVbB29K=YXfK!{!5S2lUZUpfWI;H{ad!CaBtsKAl;aJ7YC!@g(#h z)Q^ISFQR{X`7hG)Eb_Y>37ZBSr@)~$(pr|g*6DZ;=x5WrVX#_jiO%t?GYib$q-}pF zHw8?`GUqWMTMK4ek=N#|O^<>@KJN8q6tfnDapgSiit_|g5U$Q)SE0=t%QzLW&(ARP zTda+Xpi(*3@JLpoNkL0JvD?9{1~T9~V>8ATLEByn?BGo1DGXmSkkIPPJQGW$)V>1_ zFL6b0@DeP=bms9LiV=2`nC&gDY-gNDku&osTJfM3uz?jTx;9#!F<#r}sB2JQD{`L! z9-q?NJgNQY`wS4yg+u184*3dS0!QP{=Ac9Kk(^@Cr3SsMpKV6G3((x^6DRnu2=G~t zpb*e@<*Iag3RzzaFE2ouYFzsUIa!CElmU--pyqsRs_#0?hn2~_c&`5itmcGD`}oR%-wL0&m;G{m+mm-vQTPY&~S= zjVOT}#_;vv)-N0xm=ZZLDch23lo6h3~M6X2eJV z{+3vZDU6>5H(G#$yDlqQcY*TT1FPb@mE*wtA=;#`%blG3d6d8AP&JFDwZ}a}Runj& z0y9rKd4l>wsCFW-QZFI}|H4*!2Ky#F?7*JQj1LW<)gHLohyUus`*X|^2^_*{#UW2T zT4%zON9L|ujox2|PgR3Gr1>(}GD8!jV>}~uWrwf=Y|MP_$rj%_S#5P7z2m{dD3{N$ z1{0CSUeG~FJpwc%q0mnDQD>Pc4JsSm+z8su2Jg3#$w6SA2A#%(=Y7DC%1-CIz($Ct zUdYF>R%XoVhFs?&Yet*w1Xpvhryzq?dr)sKK(o6U>lS)s_LgC+;;Vq|8hp|VT@49% zgVn<~L7tm!1f}*OKdws`n71e63}I*LzGECx77On48O<|2y?`twun%8>i}^gQ4gLf& z7KYXLE$?tN(z>2`*x8KYcL|P&1vkLaa{WIJZq_Et27k*gqGvk0T*DD zvHT6iYaL~j|A~sKM#~D&sXh0tMWV;8E3|ejt7~{tvn@C^}bear^m1~S4+d?l32P=`BokoSQoU-!`SnJKGqG$7x?Ln;)@7YgVM}-l)HZhZQFy_ zW61x!pp`Sd8C<_3khBPTM`I}ua?R`!W)RSFzR7&Q0nf8=+I$3$bN@HwArvWSPPtL& zX&-(YN#}{x+KiLKck5{k1rmK)EwKuEOFfm`iWZ)leE~{Ff{7lz_W~TPqiRH)v1SoK zZe4?;jA`_vr-(PASD`4glh|Y<4W*PP$_l{i8t_!yto2h8ewL#}IM0f2qIE9^5>XmR zvspnuV%$7%$c%k8(D2^;9nL2|J@2N?E%eIT5dUF}hH!j1W1L{m{57MGrrm>B#qv+V8!U9m)XMcUqEt%P|LjF8Y2ZyMBKip8h%r)MSZy`^z z@Q2P0+ar{ruMsokrFF;$_&?{0ZSjBo{BIwiQS(;Tp2%G8_^pf38eN@$1?h_3eu9q2 zaQ!Mj8;~WnGnO?uiC*rqjRbof-rUd5s5X}U5;zryimi}B{lfdPQxmZkeXzIVv42Jc zZw~gh>#z}hSS5^q8-i_^gsoY?dek4SKgX4x+%Ybv2>YGu@P8}6S24yZNcwyLp8Js;PtfIJ!z%K3HF%hJzX6mmzn14|d_wj_(iK)b zE4+pw3A=%}G3&xrY`swswOPdmGRkM*TOHaxj}*r7*K<%apwwu_&Oz5k29Q~+GzrA!@P zY!?*S60Bgoup-+3_kcM8OJ5sGJivcev(5(hDd7GIJ**qDo*6#I)>MW6G0d3_9o+|P zKtCUVXE|IoKhIwJW)tSE`yPGte5TN*Cy@6*8mxrt^P9UMJq=}9eLR``7xwKi z@@55s`AGdGXiy!96KL(5NuMb_E1u3wzQcB9_buZjB;pYA=+nJ9jc0SUH8Oe(X&6kO zI`BIUD15JHJXRJKrWUJi6kPRb)JXf!naAvsCE?3!z}GXFyC}Uk;5Ewt8zV3Eyt@N! z#q2*at5_e0ZuOzR)#a8ju5o$#O;>{JJh+v^)8q<%Tbt9!2A^$>KCwbn3U|(-Q#YYX z8`}H+zZ}$l8QvNBsn;_DtPTfs+=<<2#Q#dt>rmx1JUW2H`s843`oieatKgmrO#2u~ z-;vd4Uk6g@^cWN|`|lcfYE<~82~O+xXJzDF|#-*}u~fZbUWfoCvqgaMtt_YU+N1dldB zD^IBC^V-kvZ=q2b<6BqJxN2)!>I?B|9k5x04RAjg6+G?uhUYu@vkoe!Le~R8r4K?$ z@fmp$l9&S>gzLXzR!Y%Grv3|*m$ z?}$Pf%fkbC8V*-`U@31wCH=J4vpEecpJ5f-f~n`L#$yY%GP7AUw?nF>v3^P+9cy)bt8QGfu&r+CK((_h;OJ z>`y!?mxa~vRP0DJW)R)ueg~lM5@ylG8^=R+=o!7y+EY{kpotuCs7aZgGI}yp+&9lvVX14O){ou5gedjJ# z#a4`382GNyQy7(EF4y=lj7tLBry>aYeS1_!L#YtfOH9l(xcqRhD z2rzsr;KMlPs07UlBLM>$u{Go=Y46oL@q(MCfDeZcBJ(-l&;qoNI4k zU^EYf{^8H*aS3=l4_OdNtOo_m@;VtzK4u2jOXCCerbQx|&$GMl3YR_rg8EnkPs4i} z(*4&C;8zblf8Lok#^7#7T1)U>G*WM67%fW}JNZxHQc_~h;$ z`E8zeKf+!c^R@%+xx(C`Jy9?EPG#3(Cb}@5@p@uCJQJG`KsOBu?gGvykf0BMYb$gs zgzV{?sRnGvSsy*2a*}<++t_ZSb|aW&0NB69bCf4OrvOtJ@ZH7CzIpqDJNcoBmF5?7 zXABTZU3XvW!0iV(Hv>EKFM2G36Gl*)_wFM2-2$^h=yC$O*8nQ)McO^j*qjl3+hOh> zBNtE6D;}%Yn0?$8xYw9jj3RKC;ZsyWdg{G=kyiR8Gokf-^xax4zM=4>_HxEkl6qnZ zMsZy~Ll@+!Hxg_$dEc3Rf)!}So@F;b#&g90X9BiNAL~pw5z9(Z8IGINw+>QkZcg8e z&S9pd&?*Z0oMPrM#0gXKuYbJ%_ zKy-%rzCl-?L9f@++PW`g7-ItTF$&xov!2`Xc`rSXj!D>stw6OO``wooJK*rl0OlF2 z6#2o5fEh?nGLB_Rm* z%^()?I~iRyik3cS-2R;x`ForH3eeiSe)0u>{cXF@kA4k5#wJxnDnb!xp~$2Aga52V zuLsP`ZpKcXz-qK)pP)~|c1QW05lDvk=_?rxX2pwBI;_nIMFqp@#h7>)@1Jqzja z3X+)&ws*3k8)@a~-n+5nR!a~+E4x0oo?NLU3987Y6pe|$Si9Uq&a!$RL z6KQ3n+ZRAkfNvYYWpaS`Q&3q?SuJSl)9^F=F2Mh_xn3Vy>L z89YH+IFM2+L4^FDMY-!d`ldX=8j>E*rQSYo8XEh5{~i_0CLGS<&z-?<>w)wJui5bW zLu{5_80+GtAOn3_Gs~lixxt>tJhZvUO(I%sfqs_q{OlKu7$|~|Gaufw+ z%-ER@_08Z?nQsgDehs*nVq2vzW7F4=b6`Pgd*;ot=nB8nhGEtQ?N5 zosKPVcQ6HOF@gPEDXti`mCa7yYJ)l0wsPpk!`O-0Sei1}5iOkWcFkwz`3Iw%dtsk; z(BHh>?tYS(F9kXEY2@6thHnI_V^#Y? zkx+gSYje*8yI<))gq0zibz}$@bI?DydA>Ur2`mCcW}J%xj;YwyRp{0{5)8@=P-`v0Ww+F2f@!VU>AC~x|XpY<=LSq>&XQA`2=ldhQ9RfgY`WR_DA{c z+j*<6{f^yEMOzj^IWr(s1eO?j4`H=1&cc2E0HDtZo_o!sve>!F*Jr}%$5}P*28ID}OaD?^q`xoxa~fFh)Di z=%<1`MJ!e|hgn_+2mLoEp{DsYN-%3-{u_jRH3QD^Q0O`I<1#un3z*H+^%e4z43>+b z%2?pu8c3v>XUg!c8G2-YwBzTO4lV|w<96hzEb@}gO zDA@rxVi?J+E4`5~&zQv{5yQ}{-aw$oG7fC(2HNO;&b{za=ra-62SVfBd~$(0F__yd z#l}{gWsF;_=$#mU5R^@1l*d^wPe6kr%xWw|Kd6%qUSZHMYB1dldw)$H}=hPevFp$z$Bfz!;a zy#g$)J75;5>^_!;_AN?xG@=!l9uHzlv*C~{(Pz+6&$27IQ6_0k4ewz$nDyenLw5TZ;UN{ z16Zs=lEEmSK2Y|P2@X+EbPilzj^6J@>u&}thQ2*dl$Qj% zY{o7QED`+mlv50rBo&yZ)7}i3X5ZYySV`OugBPAfHYOwy*=hwH{{a6X?EXD@try`M z6gB$gec;dsdy%KC8SwZN`e9wqgUHSRxb`oejsC@)KO_C87@;M<`0r|rP#rmH&ghA7D3%rHUa)RK+m7JXhI`gF{F0xh zw48*s)aUUGyD;Ad^=1S;PvW%{X*2#Po6odho<_85!0voFEB!Z&x&j(onMIFYZN@V9 zc>#7v{ekFC?B@tZ?!mhJ5imCeq8qgBf>!p2%G#`-z~~345sS2K!n)+8O>;bDxr}uT zDfGmm-p%poTM>BR+eF`*n5!+9Z=P_>qP-bJUPMxtF-l=@RX#kwsUKHPT1B(~xclzs z8)#UY_2CF`-oyU+C!{QiHs8|sF=p>VyK3wvtd!7*@uT5?82B%S>Lp*T%!+Q_xpZ(E z1>E{Wzhu4=Tqy+a+&!#=^S6*XpG!PzdypGA(t z>vq86d)t?x&|zTrr1=oE;t&!O3A}C4^=IiboKc3c(l)@dwBTc2M|ZMY(bgKldK1db zs;6=QV?2TudB!9Px)>X(r>Fq>v<$eT!8#Vn{ecri-K z`1Oid$#e_wB*C|RXstQUT(L9I<0WWq5;~ECoi>tc7yV*@G7Mdb10OBGSLpPjKnjfb zHD>k(+CCXNTfuDxQkM;l+rT9yWh^$TIa+mtab9ONb@yua2jB9>F`uUc%!cDRs9&JV zT&%_CaBdHrzu?L>c5r$n^sVW6d6SQ;htXYLwN66>-j@PABjm@X znP2`a_gbPQ%NfzApeoEU9J*MIH3E6;#-|Uwo`oH`6Z!X4<|rWeJNSMNR`ngOdAiWp z+w;uuU%rR@ee(hct0No2crPQ*VT{`7>s`o#-Wh#y1&|MO*h$;}!iRg%q1zrF;k}zy zZ!!8Ez$-3hz&nOK|G<13krZ<&KLrNTL#?|XIjPOMXx-9kjI<6Mrz3}7(n8NuN#yGb z^kg17_&BYMlbRF!Z|&9s*io~*m>Vr`ARk5pdUC!7`aOysFEYn(+{+6T5&ZlW_^hJ1 z_bX$00#+}jKDn{{{DY|>&psx z!Mx{Nlg3zyJFsn2S>fGdu3<-M<;9911aAo@&|?5r&L>mPW4EE*0_;ZUjc_FM`vx>W7G^cJ`YiUS z7m{T}#8%oLru{x7_j2$oe2RID?^+0aM!0H0o&v5kxRHciap&6%`)MADIoS4Htk|8n zvI3bZ66_a0V?5snPD8>b0F#j!Yq2ve(84bnry&-_%3+?gZ-dr6j^#4?z*A-VD-NL%<5)FbaQEkPKriB)q(7Ij0Spx z5jU&4xb}q3vN-pf~8Z z5T2OdMK4bbn0ZpL2YCJreJ=;A<{4yQ06ot_%?|9Kthv#F9mbzfur+_{GmF((&E;bh z-FbQ$nV_d}KHN-ZMZbymCDEoPl$eUdTm$*#;HM|7t)^p4%w=@p zCm^iORV%b?hf+s?@!yP873dEkF;&2|Ffxk}G;F{Unx`MsueV#><<^Y$zsS3zP67)R-$BmoPgELVQJE7SxaBnXC+|#p=E-N4r!v#0IMc~Y5n+1QVw!T*uO8jXNMGl1AEM6J+Z&Xj37V{%0w1jbl{S;7T0^?D*vY!97vY`q~7wPG)=Q#82 zf@)8|r*`mC@1_-Ot^4~Na$=;{Eb!Kcd>*cOo~R18^AF%(jdYq}GF0h&K4X-D+LKs$ z_9I=MM=?fdH&kwhR;f*T<2NAPN1**-Y~BWTJ@PG)zgvSAmEq@G^tD2hHD`Rz@$9nx z{y0{kVa#?M{OceU#{2!qJC zp;szWssFw>dUKeT-D#s==PCM`CGINLSi9`L^BwHWD^R->(rliwR?L`z{LF!?^MNQ5 zeJTJ&harz;pxeFRsCyii{aU-<@9P!FDgyeHyCjsr2Ob_rM$h6rS)b z1Pvzh6r>EAk2RB#R)Om`^fWfN8=CLzBf0hp7_Np|rTMOJHHrSs*?+8MJTu^%`BA@# zZ+YwSlvkxUSkvH1>20k4J3L+9g;b^^EB}Tbd$2X98L=?@%@5_5@a%jTn0=oAnAPNCo)%vQ z>K#a)(a{ANUCVp`eR!HST0kkfoYAbbcn1`X;>tnhGLCCII-oZy3C_B|E5voHkZxw@ z8U>B4=`bA4v$As$q^|{BKY$l;2=Zf{>~Q4PwWcAsbOA0i$(ey}HuJRrXV06@hf7nD z3;nxe`EHbeacUF!?-OKaGj==+sC_q(15~H!w;RfS!0NRU43>gJXa2TA1{=b`sM&pW4}Jyw~~_8o@jR~f-KB1YBd6Sf}QJS^JJ;BSOlf9%hvtkg!yt%IYUEYZ$6X;PFc7^va`@M`&56*AE;(rN@ z$xw76T6ch*)d||W(|HZ4$)SbOEi<7}HltK!H*^sg&8IdKs~QzZTS0!80k4mdu&%)U zH2jU`+P%of{YbGsw}wcxaY6dA{LkkdbIj}6@MKv8A8U<1$Zy-5WxgS8^^t_`>SekQ zxeP_M>D$m>Vw{hD62BMY_w7iY7lS*+;Pz-}vK<^EXjhfLdUx#oKReMy>FfmS_JhLvl}|bdYA`2e;>WO z3mWf0a>igYzQbM{ZIO!<^r1yPaEJ%i{P3hNcpYW50m#gsP+ecUbxupO^3DsW`!+2W zfmLzVnp;q2BW?5^zQjzU1G{pFzfzH&UUxyodVy@{ zyEY?RV^A1I9l9 zOnSBEAQeAizaPWK?MM4N@bSIh!oV-`JUf;aSgy)k^-Y}-AKm#D54HDT2@}!R7T9o~ znXAIZ^IU7f-uebs_6D}DI}n=r#@(6k&j(|tjd$q;ttumL9r@p!-^;i^iJi`1@Jj&x zP2hTtYd0BXDj0VF{T*^hqDayJbdYgQ`HKzTykCz}oEgf#}nGqPSDeLq&U zo{z+M7Qvn6D)iUq`6O_g0sLFwUm99k1>BM(PfeEv*Fo|3_9 zI5Tz%pu7aF2Qq_ajCuttj;F9Zb-NbmCLwX}fzc1lYCJ(M-_1t9juqo7xYl88Po*9J zX8pj^(b^NpXB>E>p((jQB!s!ZbsEksg{y7B;}PT}2g=WehdzIYL)ABcVLW(@KvIs- z%X6+Zm}L+nM?>!z?9Ast6~`L^Jz<@g^Dy+7MZfh}60_Hvt;D(ppE0`BTmsccqD|?H zWCWjigNAV}JCLaH;N#-z^;Fv$XcwbOUPu|Q~+ zjJe?a1T*I{{(Ia_re7$h@?=`>hAQTx`ic9VjuXyAWMd|H?F7ER8Sy;xxHk)fckVQ- zqGmQ8Gjo3j<<16cm7_ibXNA-_H$BA0*mQsb{#83Nq42kGGZM-ey{^YxzkU-qvfMa(la871iz4GIk)ew0<9b2KTJTG25D#5Kx#n8{PcoWgE(PP+wlb&i7USc4 zyDd=Z3Y04aPcms+6&`xpvm84O&ka0_9(({D*3wU!WzjwgTQQ$`)*>lB6`SoMhWkDT z9Y>DL2H27D4>Plw_4gxjW}RyUUCa~SpE0(kFWcn99Y1fqf)ez)ZdGd+F zEJi!IZccvvHMz{MOnJ^{99nCX<+t!<7juUp5#?DmRx#@}uqlCNEMa7G#a;so>$05V zijm}BanF+;3DEo++E58i^_=+Nmg~4{cI!&$q35aB!{bHZ zWj>`oa4eG+!-3DptA*^yJjpQ{_`<<@CD6BKwYHw%mq^Q3;Hk|q2a(Swp5dqx*xD>; z5(DhkTC0y8kcxHLtvyVyi|irV0!0)4GO~Kgl=3D9H-w%=T$?HFS# zBWMTDg4antHGp|2`3P;V?{g-6N4;8 zqFoo+lNE#}`Ul@-Z*v(s{|0Z(ycUT(CD5-GGPwa8em}Sw$D)_q`ZL?mVxx;%K&MGy z;S<J{=&k%Fu6W8MjXr&W zRDZGY=vFzp4qoTrWLa>rBIhRHbw#nVqE^f|4ab=2F`%r2EE=_Ng8fEcR-eL1Ln+D$*k;p*``9x zM$FZm@8)l+2K45V8^G^ba753wzOBrlMH0HQ3HiH>jPz&TDU3D==&bBshpQ#ghF-Ma zhc;NF`%&b@T|pVzR>x0z5oxwk$ot^#+ic&~N!=Py%sc?b#~GQL%jkW9*?O*Kz3E7+ zDo|DnZsrEhApU|rECMbg1_uIfIeJAQ%^QFyhE*h#H_Xa^r;wrPTruXjFyB27y)Rfj zSHSV{Kvo^9nx}LjbBE3lHiQpF8PCW#Pb3~iQ))rqY#?mP*q#iV0G39zrv&>>W7y^Z zv9S3BSs&Y83VrN=uE^O^^cagCcEA@l7drS3*WFJ#6w-6n9@wnEagEOfbV$F_CGdZk zc|FCj0!UJU$7~zM-$XIm`)H9nxW^dtICE~G^;r5JM`~W9%@y|PJs2ku39!cY3G8S| zU~*M&4#limZ1u=^#xcV~F7reIr}rA55n7hDJkwNW=7C6a9b{79litcEK(Y!=pU7Ra zvGzdD8}WNBn7FoFKwhh(l}7OU4qaQEfUd;iznagJ#bm}b%Y*rX&G+t!Gb0pF5gtwV8%tz zVj;3V25D%?-NEo`Abp}4!7LJUxzd3ZZXOb81*Ju3<6I$R~MzF+S&BXw`@Haw+^P4qtpH<~zhqXvH~jdNu&Fp4YWX8Z#Mk(;NWy*@Ud+RFjaY^ZNF^q-kAgVrnPX$!v)Fqq#UB+57pBn9s`t*>5&GyID}?;c#@no~+q3spi8Th>tX)1=7-?oSP$76Y znf13e9*UaqNKWZ}ew#50V?n+Hr#eXdM0EE_uy9>D0$kQGFOTf%8LNXPT046nYjJ&c zPS&(>1$zscc=|OTbC_r67@V_`SrUDk(;^yKy@H(IL`HlU7>&G{S91nd+d2-{>DiGP z-9Px&YZWp%5PWeGjUIJ$2TL- z1I;(kQd$(jUUUt11)G88EU?_cZ}WlQq}@Jvp9JM+0+kh;%JIEFSXN*L>%&b6=JK>u z7&1~Xc+UQWyFLq-M_W!|4U9E)r?C@HL~EeD0nM#!J&G%}k@jue*H@c}T%ABxGQs*J z)a?UBk0B*5V@1#NU5{KOV_IiQe?kzili}~fv4%*anKSj;RYy*$U^Voel!lTe z(YJen@SZ?gP9fpOGZjHF~Mj7#}GFYw9PFB(jRqAZm4%=Yb=s^~!LLtdGJ=;Uo*%>Vuy&B(Sl{^~(mm_O@5 z;0gJd{NIy?5qxU$u~ukaq_RK!9Kg5w&{W^RU0BWv^fHUrHSj!wCfo%E;c&DlRCBOQ^NxhRql1<9Gsd`|c@FWrCOytF_x)0p}xLVsTjbTfZWETJuhHAebev(IvY!` z+wJUMp8uTtN&bdL&`W51X6QfL3PT98(0~8?w;a{mG7j7GMerQo7`xLP8pdH^jPfx` zE0w#}eKA`1A+)v^*1{YhPqQ9}BC#g2uX!1|8e7!{`*;#B!+xY|E_^nlkMIAdv%8E6N;|sWV4U9Sfhu$nt zSH?h|xFMwFw#-M`Y|_9pG>Wc#hwHFv5JOpZw3=pWvlC?(ATWXdzAnBV7t_+G|Rmh^?_>-9~Zzm0aXYF7_N zD*|tB@r3dbxc3EGv$PmrY5uyIP~#0G@K?rDwtBEe`m|_-lkts4`OHV|+A*`yW%u*H z=N+vJ*8wby^f%-B2(FBUPVtOwWco$+aowPR^01WI61aN>4)uiQ8{n&vy>poL60-Iq zocftn>^fMOgUZNFYvcGNlLalTaA`b#Ww`VjW5htIKd^A-ep?A#%E#wW+A5^_v1(yO zjEL&OwV$yG+XGtrEL(&&Rtm0zlz6%#6^}~=;P8a?cg)-&kjo@?;h6z+J&=NQ{`2J4 zR8)tSo&D{T;N&e60;A6dY9m_ zZ+$bkI)W=(;Aalf=w3ta`6hBcYm#x3`k(g!LkjqhfC|~P)7O4CtFHS)&)S*$_bPM! z!P;UTF!5F%KEj&!4CGWF_J85nK>9bLXDH$)iu)Nzw()hIZuKp+aUp5YX(Dv1A8^{| z*Jz|!OYsfz-wsTAf^%Q?m_{0VYD8<5%jiGD(M0wwo_6ZVrz`j?aYm9x^3w&VqT#7e zCArv+F2Mc_@N`56_5wxI;F-8hU?Zx-rQP6cRaNT=4*_eRZo+}y$SzND>5V>(xEVx!`)H9n~u9cYGVpK9Ipokbel%{2mJJ(^a#vo3|yIE+mzgJkyyYoCLSbh;7jQNz&bCy}TQ+%a0S4B9mWnXJoxE(tp7N$HC`tb#i=(Sa|) zHwBB`2E90hOs$0%v*Atzbo11%=LgM&5VAwFk$2BbZUF;V3iktOeXf)tb0}y^ThNC?9ndhGK^1AFu2N?6$1V34>az)jzLIODmwERkcYFnWzZu5P2I%R4CrR=z0-_qewi?=vu{Z}hwc+d z=$w<4XRSD4#=m=6y%K@g7~knoGa32m1Esvb5{yoxH(^lJ%s$q^9e_Fih*Y^#U-b|;>z|ND_R&=y(z^`C1jysE>+Dafb=C&F0nj_ol zkmZryMo5VEbsbcx3ohBrksjP4|hFHvJ=d+k*NW2Z#})MGH-OS4ipdU!&zWcldLS^8djU$zPB^Z)hOeF z!1_8*&ic1KquCwV^91!0B>5s^g*>E_!88{A$&XaHUok$f7T8(YqzD?D26xUQn`Y0v z3~%0L9<7=FpVd&vGgDFMP)X=M4(zVI+JzOp3@zSay*2t`2t1Djo)gUIDGj}S>(Ji1 z(9S3IxzOnh{OQX7z6DOCePOQkV|Q!Jdp$6kh)nxDunel6VMWRhb*%sJBQWayJO)nJ zz&8rG%{uxS`tAuwV~NYM8`cw0l3iU2SKpF$Y`VPz%Hj9tzhZU4dLKpKF8dtv|*e4q4{|X#1cS&8~7=sk*H+u*R zGLcdD1CdWxZIQ$$u>y}k`+f8`R@eyE<;c?z#ykvOFF;3koBg1)zCGW4)`r$fd`B=$ z12^Mw^pH1)LSx~10y=C=@MJ7%XEeM7<2HeQ+ZoR}`ha5vBx+``V*QKu!_cwN`--QT zKaugS@KjWR9^-f}S%G}^29FBt`YSWWI_S0wysg388k&8?-e({E*V3m4cb?|kMLwQd z-pB|s;P0uE@r+=e26HWZf|guE>RZ9}8}zu&j3sFCHcxhX-Oj*?o>-rd$9fr0r&Exc zib(RqjIxI(x*bT%i`W1w`sI;YPf?Rl6v65dup#tK1^2FOY+o`*+V;y+=5?743I>3n5J5+){g6rjJ9|2V_zzZvjufiHk zM^aMZhP4Z-V~aiWYlh29@bWx-u|oXATp7&nz*Wxp-e>t;0!SkR*&PBbZ$KyG)I4Qd zi8(!!qyK$6+VB*Z8zVRu4O|K)m6)k8zrUgXm4LIeks?p_8rN10P83Do!@=t_d&t%K40}XVl8YW*qHJ&*6;tBbYc= zEhroTO(vrm>0sIz{w)H&?cg&ByYV_U)#{dWkRhM4jDS@G??FUzHJRPhOsf_5`6f3WIVzLv| zZOlF2^J?49p?fL8KGIma4E9XJz@ZqtTL6BI!O-lj#?y=hGP4?*_tyB7)6D$~dUgSc z@g2)9?B--RVI0CL~cB%+7{Z(#`;+a)~MMqq}-E3=U7)fE$bP8e$ZwSQf-!S z*WPhZ*=$hOWLn7#KF_|ze@4aK{)s}4^U?k-WMBZJ8!5K~zEwo?3ZhLfvg1tv?$S`- zw|TF_q4C)Bg3w#}&^uR!evxQWQ@-7RE}tUt^WfPh@arwC+CaWlXU0T$dKWFXGImMu z{|SzjqrGoetAM8wr{$5W(p;-WyR(d9UX-7>{wOnk1`R&r?-cmiiZQ?8>ib|>pYiJQ zZ7i6K15>k~8CzwHU1|Q#3$V=xogQP>WH1%`{J`&DPloCT{5WXe7>tc-(}!l9;ZH!U z&!s4mV(v6^=Vx)}_Bl-77Tn3Wp1>5>V|;rN=pwMYdchpujQh=y^U=uL_W@q{psC(S zE4gk*e(igW9@lAI3^-n(?Rj?61+lTqz+nQ{ded$r7S8IRo~nJH6|Xcj@6MgO`L7<3 z<>z_<=&$F@pChd3#?|@V>@pG1eJuS;VwbFHkVxA*nagZ+)@S&VPbzln5SZ)9YYXJn zkl_(P@)wr!CcVcad3lgErSk$1NR_=vKs_w>NpN_Z-g@Xt!^ukUum^L5pqYf-Y>9kY z#X5@n1%SOATwKn|W^`NyIPw5`W9w@4`hE`^_yB} zGJ-Kf^V|cMwbn*X-U3tos(OTtlZy!2gnY32bn^sbs%JCcwfXsMVI>1k5^oRutBttk z7)FDJbH`T7L`2ZmF>K(mM|@8f*H0N9Z}0KY5{FJb1s2?u@`1 zT}JD!vwr@-9&{6S!}kHRu&kAlCNq4dK5;uumE{o!u9G%L`AHQE7pD3Cr@}Mqfu6^PsM_GCS?@61kRCfXZ{Ye{vN@KT)=wjt9XQU%DC&n^!7B_ zv)FduWA1iCVkNa7 zvCrXPb06!!nXWr9?=r0LTE?+L%~~wjTzGajHd~M1e8&6^ex!q$XNe{=4~k0bPGV4&spSH&{o^ z&YHA7!H)?rj3Q?>w&gAyj+Oo34`7v;m>*avYH<5 zE@!b;o5k6RhIe9#o?!2Coo7x@+L$>_f5m;Nspyp%C2qizM7|sK9usiVD76Fd!Q63X**6PSLFj4? zo4M$2Rj6aUfpJ=C(6>pTA^L#BuvuB?*KXv+bK@;jAaxn96*$L$gT9zg;Or*!KigH+dVT-V#LGyskzZ!dn$1V4 z_SC-_uYD&y5$sk%ktd+6@locUE3Bam$0J-EwChHFL}%w^8&$l40}MFX9c z|90_vAN1acOr8_MHw~-l(IQ2q6^;RncYJ)}~vvs_rh ziv9+WH{_Yo>I7yayg}P2=oQB*Hw3AukG}Zi@+qJ8{NEl)nh))br?mF9Ub5png<7BV z2s1T6Vh@0!meUx$eQ>4)t&Kz20`-%ToIc=I3My3s<|feWJH~y5S-Nw#7kKty3}Zjd z?`C#e_xiQqpD`;((Y-qC6{|q8z0BizJu3qb1+PZ%EDflAOXLaBQ_w2`DXjx<_n^VE zfvz}`T!k6tvb!qA{S0W<47`)!#aZ^!VYD$nV+MB1H#TMHAt$dR6&HYEBAA7vD^>u~ zhgpsp++&-&)M`1Ytbwz^tsETc15d3cYJ`xOn5p16Hu6z8F^B(ryM7St#PhK*m}KgzK=2SK*vGaGEE( z(`e>{0X5~k`OiFg9ETPgOKA0vN^qz)Equ1{q_lC^Z!>lWu6iQS%yPy(rO~E3SKESN z1pG3J$}A^y*n1v=ALb?-7~r5!Xlh{bjNddyD;^A{KqIqoE<|ozpQC_yJ@!SawS}Yk z_}>`2#DHR}Sp|)xG=|xjq&nDdz3qFUOLbuBik->k?m4vARoD!0XWi$k+Z8%{&RNk|Dw02W7B{;9NL%>%XRk{5>Sje+XGE^@TvlJpMXM(p;j#aDP=Pm zX$C7qFQ7IL-YIC6g*HS((}p}jSoI(r`6>d>n*-xcu(Dc^6&ng;=_)}}_d?O^Ks<}| zIk1=D?q0M>>pThG8Hs)gSoFS~W`*@!?^ED>ifgVf;c&p{HFrn5>AjZ`)wH{iJL_7l zLX$S5!7H&8^#a@c5_jZHcjSK{cza$o4f@IxeQ?u(XDIg?gV#i;W%Y^o;gDLYB%1pm z7Q0>@T3b`J7&J}8mJP+mO$YPMSX5)k&VkcX=v0%v_YQE0=c()leJ>-UwfWCCJ<7#S zW=;sGFcA6;0tRCs|6=yi(0Uo8HerT%B*d7-Q;Zai1#1kgio+lG>otJsEH{>=3As(bkhB`5n0Ccbp!tI*yUG4AHM{L;y|0mp3w{w>*+BF z`spDl0)4D}s$a1%b69!M=+6^aJWl}c1A?Eh)z4x}e3!8bc=Q;hfUlAHddwGa?Ihej z%FkBx$-D)T%F;AxXM_9B<@&AdKXl%g4|a6C9;3F z7H%QF#nHY#6qyD_U7*KC#>$T#FXdhW_*FmxBN!4lw@$PC$039>1oVBrYo`6+r`{9`A?r+96WSS=L%>Q_g?TO5?wCB^+aqz z5k~6Ie&K86H?#*HMvG3Y5k;9*4Bc6onb5aYtHIyrGtUaIMyjoNwSZBr)O;MQBEiqv zNS-6hhTonM-v*t>GjAfZ`~^pEfmKoZT}KD3aIu-)%{Cx>gxP$u83?@Q=4nF9Fy!7$ z&qkalqbqCR*cossPw#_Br1fC-A_e1-j;C0^jfqK!qmuuz`cW~mOSj9)^gYA*AI}$7m<(q z!M_519s!Edv@ja-8`c&5OgmX;Uf_Ol#(E2CJppzu|UZy2-eDV-_<*Mb;>x9S)3Q@DuzqZoV`;+Q{cMpz@8iJv@J@r*Rb8 zS%AG&J|LY!OTAUb(#;MiU`v1WWj_DUcTZ}Trv3Z;e-G>AgGl};cv+RJA>WfHRzopl z@9@{$c1CR-g|~Mhna$XPSx?jW+?SzLJW%-#uQY4zB53mi^c)4W^Jq67O!LAAGsD${ zlXdnWSnzW(6Q zk1=e@EBdZuNity%!#x4b~Y?J+8;@Ttz?MVL#dv3q*zy)-#_S%pFpKmHYv$ z(goHUPwS6C4;mv8R_FMg-!tHi_To5}A`D&7GJB?^5;oa-3=0^sE&uPwKKzWt@57eq zk+5#`lRS5N^4=2{pL5-J{=N|~8ZUs%+Un@{Q-HYE-_wx0FC{9gF{#w0C6KEtsLD*`)~M%xD%`C;VWT}v3>Pw-jFo@y??bD6g)GOibFFWTqM*0>G* zQ3t^@6G<{>*Bo%)0zLX5-(~1K9ocA4kG=F=4y;jN-kz3L@-a$p4HzH60)+!tUsjl3 zv86+3na-%ou|4{)u5sU*Rm-8rYOd$US{P&LUM>mPxRu7}OOO~;N=>o3Xpiew=HssrUBzh)46`@Kq{u*~~espuk>4)(|>Imrc zGkp&-M*=JQO{nQ<;KNYA9`pP3y#&loL0^4#o+dqsm3Wu&Jz>=ZJi9?V-`h?H=1thY z>-4M%&Uy^ZbpJOi)kd!M1&4XGOJ;xP9?pmH{RvYP8k2`wvE-<6-m5py-j&!)%HbGkMW{ouN&1&E` zxuUNo8V+YLjyXF%g_c&NF#5xtt*6U9*)pQavB>Kjl4|uDsql3c{j7!B3eC&}f>d;L5wc^pq%8Os$;`*md27XG@_!d-)|^&9Lb-LU zNoMkhz`xCGqCsbfkMym1oS5zdR1Y>ea!Iy zE2z5Ii8dMB*T3lbE_JUxIBen0d(3EMw_mxI7uW`}R+`t!6S*Cb24iY!!}~E{wi?Ji zPp@A>8@G_(GoW4!9NPv)6M_3RY>j?^^WYcFRdWQLW5$)xcs1V^LZA7-l@rioJ>$24 zKSjaQtXJmh{g%(G%<7Y*nS*PA=^*5x8KZv1I^?t2C(vjl&rmH{UwXo$KG3lznsFLU znaEtfLW}iaXMK=W^jip2hiTIhShd5U_v}u5uv#>RGV$zne}{I}=-C}CCm}C-tlYsHFQis1 zMLwgUgEb$=AQNl3-UwcLjw_BQA}hC98DtmGegF=>O|aT>dnC{b7Fs!Lvz21z4YY0r z*H;HCXcBVdDc;@e*e1cFL}V)qDKyI99P(TU3f&9U_raxjaIVfwW)D9Vz@T4e8q%P2 z86V$`ont?2eo`=kYpeML3L{a&S+n%X8y&kV(9*k*D`SOn@u(g`rv8LO4Z+haX3e0J zx_1Ctjbm)1k~~>h7VOV6-!IH$ETeCBJmHxReA;p&+3WEs3}rq6mL1@-82TCG;xkfT zFe%J<3y{%nT-WdDlg5jT{RH=YAL2;M7~8rF$;{xn--|%n1lSJ4bMuAv4y3@mwbtOR z$_N*LaSpwWYs|o|@4(V@Wf$59$(@NdmxBwlz_=L{oQV|e;flMP zwX8O~p{gh1&12PxaSlRVb4Yn&_B7Xh2Rab=b^?dHvKUtMaZsWNvXacFCz|QI9%Gq* z3N+U@ExUlNGWzQ2j``rRlF>^6xtWt<(f%4>{wm}BLeFtfFdn*Cab^Y5*#_LEppUDt zD>IN!qf9Qb6CV`F$`B;lNT{!X?qu*>-I#ujXlX>$1@J8g2iI|>4$w@4F6I(cdfTxk zya6@Fv)&eC6i<6=wVNXaO?iU6fRuWgV>)=;!{}wXYGzt9u{WldYtFa)^(107T3IFJ zMJ$$C({gBQrcrB8yvM9Xkf;*CvVeW5`I%=jvYr(qn0&WagErQ1^c`M1WakoC>R0k? z!6hJBi`*T6*XtQ`9dmw6Y# zK?O79Pvm<;?8nz={%YEnfoH}|_k#B|kf(6QIm6$=Kw(V&aIE8WU@wlOE@U09$ZTDh z!BufNBVU4k8Cak*P{K@(`gZ3biS^;P8EUk8QY{QPz5uq@;pMZeG2OUgw7t0#dxMeL zFdG6<6tZE2|9U7D&TM}}P2Uwfg-)8Ur5jZJ7u;S91V$CRew#O;4BtxAXC>D*GiGDH zT|%;-M&k}JZxY`s@XYxDzsEr@{l-5+p?>gSFLSzvy@@<7XXG(p-jHX}jYvli#&cIY z4oJU&{?*}j1hAW}Kb5P%N^f8Y?O4k5UD`c@Hy<-APo&epgp)eS(Cc_QiiA_hstOwbfar;0Q*OfX*+^`D3Fb!s`54mR4dM+@STjT=Yv$*eb zfFGm0egIa_3_r&_S7@;Uix^4UwfI%qGKSecJPo}bo>qWMzOU}doO$3#JeFw>BYFKb zbk*#dE72D#mQ6#-T%!_@wa&DfgIp&d?;qfo$iPe1jpv{#Kr^vKWuT8|=7wkGhF zVGL`5%|>^NgPZlAW}`7<(Kz33=t)1uC_Rvo>GZBm>oxHC29TFyqzFbB0sIHS^|^pD zuLGanYbytRiFNR7a#=WIM4j(ptS|Ky|NDegox8XAxSC}H#XNM=dKHtnwx70J;hM1} zKO>F9*~yfGU-_BEnk!SFL3LojSnORE7_JA`bg(z8#%}cc z4EKzMR35%!N013_=2G_^?;vol0X!Let|A52-aElAKzz)?pN{7GZoCE3u#DCEJKFh9 z)QtHn;G$UvKZ3fxgYmuPQmFS45G-NVa!@K42o}Seoxr{dYHWd$p18;x$c0tFdLnQ7 zp4(tgtlRVq6xa^L%aMW~;8JejTiXW&PoZa?f7IKPh}0OJY~_t<$jLrhO=4$S1iEO? zjKGS3a-Tq(Q9u&SELL3zvSYHMhtAtZ&@KAD0J#7@J(giJw!a38TQ&TPYcHKy+(8BG8>%!4J9Lx&#_QpIrq(M{|a!F zVvo%^48^N1UHvqUt zb7d`K>PdYWoS#HCYJ>Atdfx|ro=sQ?>{tKs!5S@^kM+m6&(gH+pU*7R41f1 z-}RSy=K1!=w;Z?6n}cfp@R%2F`y3ie?VD(Pl{y%i}|Npn%->O&y zql}~O8n!VVoz%B996Rtcl5rK?S;a`#fv7y~FCp2U3%LSz z(OA(HSf^xU(~P)g5i-907pzVb`i#f2>5(;ilkWh{EGMeYQ`vdV2XzQx7gq`7EaLP!}93;bh8k6DZ6!727NF#x+0W?bm zze{l7EF&#I#zrvTBHE1%=5}8l1^etoAz-E?0zfQpYij4R^vf;ViT&&a!P)P2r z;a)H1sn0!Yug>R+Sq5|1)4I@+= zLO-)eu3`7kjXB@=e;9iY=&Y)(Q5z5hK@d(rDHa5!Nil@pghTZhnjsXWD500oK~W&; z5h(%jAWDD$2Q(oG!~;kP2qMyi0AfVCfYeZ>SwI-~dG@RS`R;$mxZ{qDB=5WT+H1|e z)}Cw5xzNX(Sgu3R-}s+su4JNdR*>_gkKy$1j^!S3agWxzzxUupCuHgeDC{nMIK9S# z&0g@fHe+k3dKg+3W3C#^_=&1x5z-Ulb9Jc z&`XEF>uyG-nI8H%hS4S)O*jno-NiMN>?HkJ-#LpPJ=F2EG-C2Kw7DG^>Qz6^m2B2) zy_kjc_H;lm^aWZkxejj(?o>}?Y@=|d;Cc8F>el0@KRi&wluUP@tte*RNjl$*GV^qv zm$CDqJ>hRz2kpTMsfR^?$~W61^zc2SyLwZH#(?($#y3CgB%BDvGMS&Ly(rJKKR~+= zd15A5^#hvE`Ss-0N%S}tyXlF@M)1}^{vQT@^WZxNUZsmyJl~Ccmr-P{!@0W z!IX{$o`gD`kl1VB?73ODfVMMmMT4uS3r~Ri-RVtVuIHcV*N#Ou%ncn6-|K?;IOeC-hZMHM^F7QnT zTI&|LX0~pgwGaj)y`E9i1<4)-FJhqQLU?0cu3~72K2x)i&5k?xnngZwpsvr~E2%5vFI`YzYv!6iTAe~D#X2VEuugFA!9 z3-`uyWP-63kPFjp4R-b(&|4>64tugkZSJ|(VOHqxtkU!qTtqhO@kS^GI?~H_KD`94 z=A5pl)o6Ioke;T~XAP*VR<+`9sJ7`#^t_n)(_KX41|tK_D}?1WL*NJ_8~>-rpeFZr z^W76rJzX>hpFFolkNpnTAZO|O2cU5uD-0Vq5j(JlUdutf5#W~vXFM&qEVDv>AUA(x zF7|30&xUFxTGPy`r3aX)o~7>;u5UyKEAl<03wMyp`M{uOFd8aE^P@)SpD-_>SU@FX z0zGk~CcSwE>~=J{Fuk^h-q}c<8Cn;Dndf`#kNgt;Zu}f(q-I#cdT^p2xVfA961=s- z-2mj;Q)x!>w)7Kl8EM%UiHn36pYc1LE4%qSohP--+Dqe=QWOKjb?H|hNVw$(i*vrf!)DGju)i{1=+`39Xh&b^6@v6|Up z5!CfWgLypT3AhIsJpmciJ9Gmmw1uwa^o5|dd0gKJ16{759&gT zG~io~#QcQM;0WXIWJVtVB!}RzFbgqo_8J*V|>+C7xYq{eY8P&O_ zf76}3p7dx2!zETmIeB&gS4PuzBCVFwHjULxJa!@m3V7bGwF#}=XvEb^+}HEd2kjUM zd`8)y!YUZ^uFku^r#DusS_eAhVZ?Bx(Oeqi9S31!-r?^t_@g#S@deQN7@XV$40`br zSikn=nkP(XG1kJRCy|LfT(1os|AvkmE4B;yw4!GQ_pNZU4r~S>$DT&Knz8O7LlJ2D z0eU|MpJG_kcIVx_6fi6WhZ_79n&PY#MlzPWEouCXWQMQJo2jRwmjRk-$VUw%>S@Mk z2bK2*@5?iJD_;VJ*5B>JI2*Y;65YIs{(3H3SzxFIkCVaZOXR0M7R>B`0$3mQ-}&xU zXf=~-EuiCb$V;VQ{gc9&X0eswt~u7rpzaiOJ^~H0(v7&f6L|<)*P;IiMo0y>;>-nB zcl-u#pY`8M(|0Q1^}d)>l`Pr}6=QE@S0dx(L287~svfc6 zwVfUg(^{=JuH5~!G4yRVkn!%u+tf#Ad(y)CN>;Cxs~wSL>znA49u8k3u!8AW{%o{$ z9`aX_zN3MqFHl^?jygk6feP;er8_r2Kws;FoQC3Uxo*{P=kKqvtF<3$`-KV_-ClG2QL|kagu7^!yKa+nfGB0FLHR{GVw0 z+jx9j^@L(9OYqjPlhMpbcoAHUmGrFcoj~LIDh6sF2jhon*BUP5pl2%%nRl9t5jR0k zv-S%h2OlDPzA>3M-6Zfn8sU~3TI-eZL<#+%W^_AtX*lH-{hm3{=Jw!fzOcEco;zCr zo>>K>7gtWx^K!0w{>KUa3*8PRq$OHp1@3=ycN2YVqtA7~FdV;RI8b@|Z3>!y0S$i@ zIrJ>yCOlsiZP^jT1-=e0Kck6@;O1($-k83vd1Q`n0X`3<*Yill5_(*Nw%6i&GwAMl zO~0WvGvK}zKaVorCRWn<=-r=?w=0fM`WC|&Zz!_booB7YsL#vX;PQ;C7t-wC5GC3} zkH$RhSx9YwH4i)s`MZYWm#h%9Gm5e#>y9{hY-V(R;Ew}Bqh1TpZ(hDxhcXY+8-F@lt0_>02 z#2oZCgU@eJ9FpPL#zxOBq5slcLsyv59jy#D?!Qr+p}yST z3PGuq8KfoPGHDo`+=wQx#kzQ!#z-_Z z1vp((yWeD0v32}dtzj-YxIM79ji9WjCRo*GBO3h$^RKyUnNZVO)i==XVL;+;WkY6q zW4@C3yMjJGK=XVvw|CdjW5NFnbIf)|G5aeFxz{d*)1wuU4#SK4czP@=5#vwp&|@PhfT{}NAGS1*~* zo~&V&=s{?R^Sa)>Zpig_$Z9muSaE9-KktFVP1;vtuBnZT_6aQgMR*4^j{?hmnQ@vM-RTx(8$?iKoe)CRj?F1Ha_QCO}mtTv*cLLDrvmbyJutb_%< z3B3AMjre+IY`WPcrWt^$N*B&ce4TjLLN;5kd{v-EF;jSjC&WW~NtP zpiX6;488pphfBUc>}RA}P=6;g`7p*m0?!t(UVe}D!DCS87IT|5M2@izx`td>h0vOR zgSexF7_Cu(Yx@0+N=-t7j76OTT^AsC%5w~M&NGdD=e!RpeugZ3O7GR+oI8m{z@{Ts z%8Z5$0p_04HHuG5ndMi~n`d^u#C%~~couEV`+gJHBf+6BGi-kD%iSlSik|Rv;CY{C z`!ZH6@Z5wNosfO=yi3Btipa7#r%wZ??;7AQngJoXbZvcw z_I(-oV=U>ntgntErSWKLPvo*UR|g?G`U9`BE*XrxbO-xJP`x*^+YeCB>X3s2j*Njn zX4lWd<`qIW7om;Q>3J7=asbISE@=;te$47-0u(nII~%I&c?tQfMsvl}ipDUWe$U>( z@guk|Wptys|Bg(kPnVDfSN@H#=+hbTHgq(yw*mK!X0x_+N#x9PJ|;o!^~`_~NUJf3 z$?*IR?TSE+Hc;BgjEl^;HKEUJTI;v9TAr)RpMi2N5E@fihP#KD549|wg}oRlaYg(a z{p<|(NuHqBbZFo?28)rC(_lLXt6;SBerWYez)>xvKBC{j?j(}uTe)XHj|MvT7CJ*G z{Y1NX!fFu4;@ty^V#rGp7@vjyU(&Wckn9I$cT++&{Qrej(&O8Q71l{)+tYhG(c^8d zE&|UaptYKU@jBl_;q}<5TJ-)I5N(Acso>b1Ctim4Rx52uPsSuXid1z59_y0+3T2Bx zCFAyuo|^#YI|9={Eb$?<;%i3H6IhWp$;_m?=rtRvi~=V!_EVrjb1=BavwM-C$Dq7E z2iM^GNE4ug5%yL7}*d+cbBIP6urS% zjx!29xyx!~Oz@-Ou*lAj?%QTTAw3)s z%s0_cKp)iO*y`EHzS#oPf$d>%GXkVAJXpdtt+*IuLOmlhotM+0#T6|1O!R0nJ@=rU z=SS$PpM%x&oRTSMj`_hocn2DS#4mw9E3uFI;nZJs>^ivIrmqRK65m00sT2?u*lI~%A0d{7fx8V6yu+@?ou{sE?@I7)F{cS}Djcc_^ zd>y1@5xu3sr87Lc4Bq&*b{h&VRDH;k)|oZNDK8LNr^MQ8uhTvndEEs~ zyTJDWQ0g!^97SiY(ZZAN`(lU9A9$Gm?$$g9?4H~-63Bmq6Ga%^bAeNU*$SYpVD`YX zLU3j^cGfDWcaXIK!5YDc&{1fbv$&b(*75Di6;Che3;qQeD;mwPZgp>PjHgWv`i$VO z8Nt2<8Q0bae$;>mje%$ctG+Q{P@C0pU+m~MR>#9=eFchn=872xR8}fNWW{~= zKF&bhrvlxnkKV4QM|az5BQyQLwgem~A85PpM%FV8gDbvoMgUcPK6#o>3$T5VXMY1O zJuB9goke>q9o~mLT>>J{J8MO2qeQnum0q-S=d3o`Zlz!A^ti7gPT#PGw|1ZT^WnT5 zjYB*2VVW^(bd(vq9Rod?1?Su)`3=s;A-h%&tq(QoGtLRds|vNdfU{Y@mAP*NW?s9RbETIAq-BAbRqAj5C1`RDZYilpccGVOW&)v}pruWzmqq$msLfuX)VS zC9(2)fYX8f60|+TZ2A!x?!k951KKP`pOoM+wBH67jF_`BQZAk?gBC;}eGl;ba_;(e zm`Km<_%(iaFL&a2t~tK61V;IVwcKm8uufE4sFxGDOh*3`;fUFVeSk0rI@SeAtPVw< zL2~OeP7CDHRoGoDS1j0@KN1DB1<{P{^kXF!cf5K5?*Q;KBluHf!x)pC=v_WWXbj|4 zp-^xB_e2IuBXee_eg_6sm@`ImeFT_)fi9HdJ=?g!BwF`Gf{oR?%_v69jR2Y_p`iI$ zb%CiYV=hCB+Crhz$c~l9qk;Y%a7zcH1_4*xNihnqI#6DP(n`ef!0X!^T0F)YWDhu3 z0vdC)OTf)gw)lLYx&!BSz)d0gJdo`@P^&I$n=4p^5{zbchBMHAP3gFc@T@+3;eWH+j*uk(&GBgx}yE) zOP_UbsFO^Osqo+T)o?UpGjwQ;7MBJWPtIuuCfDI*5;o=zI^G*;_yZg2IeKMT1DTtz zpVIkaB=hYdtVPkF?P&Urz%uM-MmLtltd-^H?IrZdotOpiuLqPY0Vgt$Q`bP&hA|iA zStQhm+x0-a1sk*-?(g6^_r1)mj-{_Y$cY}*Z&`2Lq?MJY#i1_W#__~&tPY;1b!+-^ zjyHPoGjLykwCzT&AA(=w!Q4onW~?>G(T98N52Dc%(a??X_8j!u3#?OstRuRi@1g=f z6Y>0*o1;%57gX0@)*Kly=lC1AZ0>VcWT+f>tcWjZqAWzoB%gfq2juG`)7-g6n?fM7fCqTD%xq6iG{^!}le|ap-nAIbv|3+Vp z^#o(-!x8*{9jdvDT_}(q&p>DlOrD7TH+sB|=H8FY*2D9X2k554uYW@=H`*NPEufSDdVy)SXd z?Nz?H%FDydUKq&qn9pD|E92#)&l&Vz2%l6VMkp3s_Y{tW0d}E?IDKwH!ch?A3d%VLH&n$R^F>~-!5?wFB|1I=a6d9WVjZV;7-`zDRcZsLnMaWCP zW;{O`K%pPdT7ez-_8|JV5cop5@5_1Gb4Fo8aTIsC z`z#|^6?0+m>+|;md#Im1$kWW=%EI7E=;{@0(o*3ml2y5ljd!YJmDTl^n_Hg-ir za$)uELUH3iTq93qp3;vq2X4N`TxH(odNkZ9!U};TXJaAz)5k<)_dL8X>*Q1Bs$zWq zm7b5&qcIu26HH;Q*RNvzqphH5jSe5C2^O zjKy{gKu=PE$vsZj=no-}W^+J z2auk}dKy)+1ZtS!l*OFyZu;-E+y}g7;@1Z&Yr~X>U!G2rjg4r5eCa<<<#}VphC%h~ zT+8HYJ$Mw#fN`WZkzRc@$;gd6miORP=xx>&hLJBuLxcmfxl2)S>LRrH0dDL?&*i)4 zLSG5=MLxQ^D}$CVLo(LTVj1=$Cg6>*w`HV%(c>Gyl)-3okln|TFnvwdN;Oa1cTOY2 z)<84s$@rdT)zw7w+uFIqq2WKca*Jp5N5uonR%}MMfP=rmwWq zuCT@+15X2~Z|W6*+6==jjPMeFD}!A%@XkXY`UesQ=@s`up*aLNml~@=d3h7L(~2Me|IpNfYoEcJCN)3 zXfW2BasfW4Ly1yI>k4SQo;KC!Z8-9F6*_-_gnUK+W?pqd21X#2Vayiwfo?wio1LOG zYy)%OI_iUo?+WhbJKl0uK|P`HZhBZ5yz#U`t9t-_Ypl&#FzSd*q|stJ(s6)s-=JL; z#_1U7t2s!W&>VeNZ$RH&z+%2o6F#byif>c*?B(RH$G~rj@LWt=)s%r6Z}HNOgqg_wkgGlMRqBcaqjZt!yB< z05^Q+TgR`x)P~=3b|g05d_mt~W-%LG2z(gEwiy?)48AR;FC)P+>2(XA)**{4psA;> zrPHDuDq$XY)dAeC<7HJ4s|W0X`e(3o30!Z( zyMpx*N}@#-fomji)n&9hXpSE0)<}cVP^0+Wh{l=Sxdz-^^_-zMQHhZ7ig6(f#8 zS1%(~m!Vr(#?K4XyP$x(p$S;IZ9pv5^snYe2G?Ujm0Y7bDxyc71Dw+7DH`1=j!t%E z&7ThCJ_Qz`vVPZOWMAtcN1J0MeXD(sk#F(-u@MPqOS`L3y(^yvp;3#uW{&rn;Qe$J zP-**qg612bt@#hdPb9V-GkPp7M?(L($m=Xda4$!mxZm5JD^Xn8$={`57sbrekavyMa4?GB zA@C~}s1u-NfA~BOo6{e?$&4jf=-XUmG7F?$BR^*4IX9?wm=_xg1^}c&mwc9(b;W$-o&%l;9Wf0uh&4I zNNb>4#b@hh>isv?Z6s@F>kM>(OHx6iB-QG4FQQS_`zX#cR-v7bwAKUi z1jcO6>`l4F-Y~lfH4mowvvD~d8|6S9XQPz*Q;sn zP#gGTrNC`G{XCH50V>a7Hp^OXq`I{jYgvw;QBcH{gEe;bdw+{0m8QLCvb*j$ioe8j z(A>+J$*A*?sJG#7cf1Zmkgt|lB~L)A5A1TG8PMiL<`SXjT*ff>;W>1EFmhfN3Ycp% z5UFZS|35Q2dI0u9YaOx~@CxDGv&7KM)#8%cE#1nd{ z3S*zggR7a!sXQ|S&7O`t^u;!Xp(XVh>py&I1ZHOcX7c|Mn$`elTjLFSpR4(?53WX> zB|i-~SQ4yC0{u7iHHsO@eA!6kVl5Oe0q;&ht)BEy0X$bgU!y4pq0M!J{%126&VnL) zz+9|n1O34O3$0KD{N0AU8W(s8c=fsVM^0ULT7%PUjNQ<#DiF9IZ~R^sx-^y%hp}=S zfYe!aBa>0H=xZ|)b_(o2qTNi!jDa4HLiL*TZS|^C!TRkgwyR9A3sDhxj5;~}=UbzZ zgBPG`3Rk;g0eS<2Htap5qB=D7ZLc|$e33EB@TnMF&WSnt+EOr8PYYXKTLNUF3W^Gph({@OB$gj5(8e282F;DrUFGcT~ z=cH&e%$@beyx@cIrYU1C0)qU(RlPrd#gx@$p-nEhl$#MMK#jNPD?};%Njc%b8G36>s~N%d z-+7NM&CL248j=%DiGvd9NWn~CkLOBBcw@|UE~xS>bZ0)}IsK7w!1NT)O0!4#c>BSx8Ttf*$Sl|NJp}E_Bs8na1Je3w}fXy$akb^XFJ`^j3l2 zkeBcO?CGy7X5S10%adr1v0laojb%OL34rd>zlf&K0q-}N)q21sPdBQ^TwNQ>b2+e! zUx2ZDd1o2>dn|MqvzZkVE+Z4xCaDqJGt<||mbqwoQRF@e%jOC6>sc}Q7F-LRGtQ&~ zBU|OI60&5aH#2#c0!b9l4Q1B5jD;J=D#rEwWo*fDG^Hn>GWcl$Ca#i=STh3tCX}~k zV?LzP3P#nC7BNTyCU-W^V#CZ<)^2^tm1AhUHLT7vQ)Y41b4q422lQtRKN(w;%Kakr zdXc*)nb~Fm-z3I0Q)o0Yl!Y8xvpf|F8NuQ?wX=coX=aSB%n-kGClRi#LF0c!6L0e7 z)Bp@Kur<%n(-!vq^p&IlL3!Fieb3&u(*M6`qff&X+I}?3s*Dk6+y*FUJjO%tQ2)YM?2>!do^7`VD>DRaE}-RO zS%X`T><^yMQ*(>YtKhTm&+aXYe^WeaU^8|Gl@BOg@N05bbWEj*r;;!=euDz zd^}6r(@=aB_cP(3et-dBYb0M=Xm6Cg=e@Pz-ZCutN#Dzpa)th_PQ=mZ`?CW)|{)5q00hyFHSF8oAuh&_dUx90$GW!zr ztqFFXAu$9UwLj~*oI_g7tJY2vT$IzOdsyJETX?;u1p4ZPt>zwzS3}%C$Auj zOBqd%$W~T0v!I_9LUO@_by%Bp`s_%573g&f{22(QAumUBXkqs3My$E-k7ngJ1h2Kh zyc`1_k-QOAXMSG~MchsN4BfFhXc)G}no0Uamhj)xfNLTr=GYWL3%4-d(_qsUTX_pD zQ3G7Bc;>?)u0Ji=ccD&4Mx219A4U()GE)sfI`lj508;&G=6$w-rrM^N=*T+y zT>+(Cn_0&$bYkoyKxjo%-}QQ8IrPxI2u@AuRescfI@S0t2di++c(OInXlCB>rScY$#ww(EDe_7r?8hh!TWY0PAG##cr{ zd+Y0?Q(zLoho++qTeQKm4=SRb8m(sjDvmpB2H~2TI_% z&lo$MRoV~8_Yivf5_#$jZ6ld|pQ499fO$H$*?6?)uw(mxWGXZIOeE+7AWLRmbv1t# zOW6sz8G__w0pT4qtp+$WM^g3Y6vfhg%O_8HxJt`Mpqbt%{m3)G&r=5?!AH-br}377 zvdOGa8gjKc_lE;n7I+PYT8sG6moXaZdD5&kHXG1B1?jPRT?1_V6|mlqWta};7vY)K z)cvYZ_N4if)^8uk_*aqZG4xiCXAXd$TI?xGsqBdM28ZoXHUUT{z>Sw^eH7f>`*O}T zt43>4Hqe47o|+APX4Bp(!AaP#R8~iN*o~xr4D2=n$0AxB=j|htk&E$+e#D=lfPVLD zNQYH}jllGcWecCOX;X>b^x^8?-owg955s3@bsOYCUspO-Vl?flf{U@+AJb=hda&Ya zLZG?3cz<<-_I$q#ZQHU+jNuu5k7v>Ue)R8qaSKK)1b(Tsb9cll?@7=t9Qk<%D)q$j zr*LHe7}Um+OE0nVl*7F|xf7|j!lGGn)xrBBG}}!tMrnH5>wR!gIg4b3U$F*OB}xE$ zwCwy}1_ctO%`{C?cSJZm}h^1 z3SC&!n&WVePez{_p*W1U(Xou7ogWL{bNDSsAAOMA$AMEgtoh<;E@|9N$Bw1(+<9!v z97geUuZmn9#rVGS>gisF75IWRffX+zXjc?@UW6oHfNB+)6<$XB!nv#e*0`Dz(9aBY zeWZo?UW<2I&s}(wr;WmE7yNGqBqh;G^BGD&u}SEP`^e5mW>7>x^L#+^I(##dpbr#} z;SJ&llr`qO8Bb(^OJ`P66X-#ot@C&Ro=oIwG*a~-(qvV%EU@+Lmo@DSWkb1sL8K8d^D{0L*z4N{?$MD>5P&x^z^5mWS8UG{tao=4H(*t4k zmR(4JQE0;$%`?AUJGVfm&9Ai8-9|AHp^X3EpE88Ft0GfPg7_I4ogBu~%%$qmKyUDSpr6~SZ^b3|w_ z*ZLoNMYG^p61MLm{B_rT8*|+rW>K?gtFxN2N@5&5Y|R=j2bfuT@-i6d|2JCSGyHC# z&qe6%L3%Ql?@-W}tM$p~)-W&}#H^JQYPox79^h)O4g{Y6a6JNuN-$n-Ag>IS7qezr z!GEjiIrpys2J=viuinY1Rsp#mDV)jkmsr;*H7)5e9^G9I^bz#+KJ#pGV9_7l5qoSN zbTmKHSYP+0U3cg@kTHx&aNaBjzY8%N#=zCmP{X;RCosRwvQFJ}yMP$t|T zfs_@emz>NS=U%A8jGl<+Bn>^cDnLExU{&jtz_&AysPa(j4sb8WnpXycrHtxX_HE(Y zGqm@F^x|OY`H8L2?gPj`U1ZMFWqyGkWqCGK>wF0m(-U`_C+2ZqA6;Fz-Zxl>stqLr zIcNhOgRrtUXlI2`Pj)K{JTt+`n)|M{^a#54bk@p3@2pwX8hd4~)Mu>yrUh%}F= zcEQ}*C(!n?^mhnJF9w~%fV~f^HXtT?Xzohd!PfT?EQ^1ePY)?J!0^0TiR5h--qKP*C5XF;RZ1 zgdO@B$r;YKg;)r^97d&uP6*C{E|`7j{l-xK6?%IOx-^B#rFrrzWPTGpRf7T*1IsrQ zj8yfC(;-%;}UmrZDBCxUp^OoK&8P^ zsP#XchLTop`>!X{4ACA?`7}Mb4?dKUTH^t`56<7?a~68^Ahfv&B}c$ZvoJl;>O6XH zTw8aZFex1ouOM8P8laShC3bTr!m)y&^`x{e;CjrClJlx=RS0)Fqod^ zo*BTe1phtP#kX547A!~VLjKRZP$mU_Sp$0^nCU5QiXK~C@Bwh&0k7=Co@UUZ6g^Lc zdk?^WSLXx3#FJOPrSGGR`5Z9R0D^XC$6Mf+4_nxPzH?(8-{Qahi2u9HHl79b9MHu> z#hi>SUmr)F{{#NH_*RPlrNA=>Gm5@YB}`A7zSTm&=@~D^Bj!h|9;L-lt}o%&{NpDB z30oL^Q)c7%WE*8-BBQlK(&LbszdV7Z_|_loSL#miljDE;F%SBGzY>ZkGz%#=y%|Xu zIrKvZUwcJS$({v_@b>t|Ozy)ISDweIu#L8qLhu zm$_dY>~>ix`1~{KBZ#kJg+1IIINU%9jrD0fj;VH z*!mhgeTdbObqmaW(w9&KicMg4NaKz^BlnSeK<)j^U0KW$ld;FKJm>zmHKihf-KyB7 zq27C3v8t1Ai+V(^(K;S0a~631&i@W@X%;;h;nbBrtnxMs{(3rZI{hAH)>#G>uA`MZ zvCt_%+W{Er07-AgS`Bw@)3Ps6)}j3%XeouQQNI!zdyZT~WN0O_QXdQZJnK47V${>R z6Yg2Ba2jKn4Z95MSs(gjA_K0nN-+8&Y<{OeTF(Nxm4%E`+s?HxIAdj?vaE&9(90ZT z!go?5Hd0@Bml<8ZVK(#6R_0@CULS)VF9$Z#H?X@vU}mZB%8MAi1QM_tE*yaGZz54j zLR)6}_Kf;C)?gekdgAR%V7?JfOv4Hk;`t;vUK<)u2r%~qgjk+BPp{|suLserWb3{z zrHxg|ZbA>&zGH!`7`+|^GyN&qSTp?@69PW(28$73IRt7pVg}92DA7nlH*oJk3s1jZ z!sy-MmwqW@5{(zR$+bkrwJJ{|X8ozOGCI&zrYErOWyZC-v7G-Hn`Z2*(MJuTkacNW z;t8BfFXs3dAFvuNNyCPXg<_9Ft-18%d$%#FCFt!TsN?E>JCvHvcx4$m3of}v_1x1h z>GN>FhgtM(b>gXvYmA#Y^G`5}r_hA+_ZFC#b!@EeALzU}Ue;J^&eN_NoF~nk+QswP z=tVQ=Do%;CGs@8FoOkJU6;GrAuO8P4aK>tXdPS@reK(N0n$W>Wp4-4@eB)G}H$!9* z(7cLGD-WEPpoY8KHL*7Of{hiq!}`NDcS&GuLGKO0BZJY5!!0l{>2&9)??e<`51C&uRkQdie!(SVUa7ERw( z;rJkU^)dfjLnm_{+rqzY$lXog>JD7y&_kI9jf~kii<~9XquKAH zphYx&-{DGKdT$OiThN1dm@`w+$WdrPDjf0MXBvM!k2{6oEx<7Y33APTh4*~D_C@d^X2JVW=$YDSRF3mN zO3?Fj&~X60ds^5Qc|i=kAVZSqtXBYUsfpFxP)=g~7?_ zv~hgX;Fx)g7vO-ABA(y<3A}8HMgI*-bOR$J#%nNA9WXMFw?BPbEAMeI(KmJ;PV7hC zzl0uVxYm#vB$Zig7Ie0D!6l*=);(mo9II))@c)1y`?jjk)L&-O=)D6Uy_xJSZI;)L9 zvi7n9e~Bm4p!+1|2|Y~Lq17Dl*iVbcp!>^ELOERr)>dekgftmHHh^9(V9h)e@CRUz z0CTIaGzj>xo!P29-#THHM+1xYD|Dhyd*sW#?-ul($**&YyS^8pfZR5N+#RAN=%JEV z0vbgE+dL>X4p`sER<;e^!xNCBB>Juctmon6_q5wiZ!dGjU86EQvj-?ABh81f@6HHy zptO}DF2dc%f>n1Byj|D%$%YCM{M1KRt3rQ!&_`2*5!(ZW^Jxd3s|>_F!RJ-z+#igs zC+B)=CsLG$=j$S;$>`LBXuwir!YXwa;rxHVcq4B}?n6~ZTdnr_A+Qwzu5dJeCc86N z!0S;o&uX5Y*pLO6UtqRx!`eY#pw(2(WN??m8XM0;uU~1o8wk^xJIrbr#@IcuRV%Rz zE3q=xb#o`HBQxZEV9*lE9trH@15o8rX1TBFsW&4U<#mvmvp?8<&&t)9Yb^{-G8vIYQVdq*}OM+L)#W`GYsku0CW9IzPA*{pLhr;vKV6; z_DTyr4gQRQD%bdAo|~S!Ay|a{frKq*JoA*sF&|BWgC+1fxw?!2zTUu>O8*J`Br>iZ zO#OPV@cnx<#nS_}A-Ro_g;&AQdV7`VZ9n!`napIKeH05n7c1Qx8mcLWk$=}S*Wut( zKrj-1j-|IF;JTV`uVQ6}gONEko{nQ~u4@%B?;p$#5opI`#>zrYjR>-yh*7EuP^>(( z(xYSsuRN?AXigjS@DQWif<{@`66>f$!TEfA>wtFk#)`bcw^?ZHAfOsTU-3wpe#TUfLD^MKnbm7^9w2g#Yi;!h2S?&VzZD_!&v@q^2nO;3-3 z=2gD+M<%j>&1%%v7`z3P1L(hJKyB@#zP1=}f05Bz)0=x8&SvGHQ#dm6ZXh#yX7?c3 z&(P{cuxNt?8p}v#wi;`pXTCRMn;+~meF?X@CQsdSxX2j0 zkg#ZaF?QLN+bhu4$^`>is}JXH3O&1@rZ@IeEWilHkAe=)LcWQ0r9W5lKL0bTsg%H4 z7YFJX?tcJ&R=zQ!*!Rij=w${J|oTSEb>CU{*ZOU=?@G=6uLKFnl_=IUcrBTp=Ltoc;PK( z^bX+S70-m$gY`SM`vAVN;`DJnz%ePH!JBX+1Fdb0O|v#(4m4vF64D+hWAKJq0Z=>h zBid>8fohDf5KM~jl=7P!>k>!1hrs;=PamT7TxPB&fzP2FbjgcO>yPY;jFtkcN#OoA z&lsgA{N~Lr0^09G{f%Vagp-1^aq@WuAM`5>z13@Yr*OTh?S9rPxy$OxpKxt_1vwb(lMi}#$ z8;kr1G|$bmW*X<^vnNvLV-5K#GLb=l3+dVGj{<24dOXOJ`UK5T_8jUMu7ti@wXz*| z{CR-;Igps#NLUlD7G-55Jb%@t)CVgE{n3-Gzxp8jG@{w~$NQkK(xN0=SJ3gL?mVvM z=l8#q4)r13rPuvXJ&ylNz`;AYf^R9%A#_EbYZ-8|7r7?BWq9Uou7`U1pMT?mmZ2;8 zf^ps1$wnh5BZ;1m_6`z$8*A2 z6tl9C-R9z&xo0k&I}7u9!X0!oCd~iRtJD=*x{fq5urh6*fGg91`+KaD{(<6HNKe2$ zgl2aLEYPpOvK&~G8L<@lyck`%z&F>k)`O|Wv;A1_eaI;0eJ#MI8Ryl7wyo*UnsL@V zaD{yaJd&81jHc<1_FB~|28wRQUiAg8ygaK{@CdY+#%R8~zrYAfz$b}0;|HMh>`ODC z%b{=Yv$`4tG_IY82UkY~^ht!vtI^Tdp@cbc)0nkfNi@a|>oM!bs_`ej@1gZ@zJ12E za8|-K(f1YfJ(lP6P=&l6TKIK9;b{>G$e5WM8SrBOSigp~eFE$@g3nfLc|&ODs@Ze9 zZ-R$aw_N>XfysLQS_!Ep)GLCNSQTM6GmlXo;b@-TkI){Qxj-*sb1uN$jZo04IaZza zl!`x?DZ4ZCwL~YZA2=MjtIfQ*iuuExEYHS{hfXJ<*DYvyir;87|1dNfLr-U+z)1A* zW8}-|iRXg%(aT7h{@v+NU=KZbzUvyKY!RB$2P`}b`6{1`xYU~-!~8QH&Uz|`?{wdD zWd$>pa%GL@mf&avomH-^?OqbxOEN}5XgZl|B^cdWNy$*(EWfPa4dgQG$1C(W582zr z|MSeN+ivQZ#K%~jQ9`qXh5)n^|T1-W;EIXbl&*JE<9!Sveq>jx#|noZ=!wKP^=kk zR{@pl72|p9(cd~S>IkL&fMd$6(fE~-p@YEmDNiL~laE11tB2kL#%AbmeJq4;wWH9r z;ru_s{mH;O0&N)zW{beVdX2u-WI~l$Jl&01(%jVrK=lVLT=^M)vw`ug?^qu`)kHQb zB3Z`1WCxO>-;rHeS+!~|ef)-u zdg9|2o>P`yg0_dacL`pW1CvR}PZZc*1?NacE(uj;)7NM?GX+{iF{5{5v1CCa)Eo<5X-to2GT{*sc!izbX(LkCO$U;N-Hju;ztR5Ppi%8haZ6i z#$b^K!?#P^b05Syd|A+C1P~gd^C;9cEA=8+EP<2XLRoEh8msY0__&H|BazDTw5*53 z{fPcpKW{yD;SHWK{-`|?c{XT!5>6YX^e#LqjwBiF*^&AGL;j9HU-h;Xfp(shoW_s2 zQZ?!AciI_u{vg+JL%HYrW+a?2+q5jBRA$uUKxO9Lb*@z9Z8HW+`~>9h0lzud zS9qpLfd3S*HNME21@8jCo)>Ep?q?KdZ0n|ef=?n9{u~9iIqmcRsQRo4D0Bs4Q*K8!wQ(Q&?bu(z}p4(>N zNq}NK!P+RX2=H6Vee3EO(H+nGWpVI409}n!%LcMa$gw+o`o%uOzL*zMmgkD_DH5&> zL1G@|zm*<6#ds3-AOVRi%oXE6odMIaljFE!gk%S#ybgUGVTAfX)Sg-G9MtpNxd*_j z75Z)V^#axzm5{i%(1^Rtzpnjz!TUY%#d%H}VT`G7YU`lS8t87-gE?6HaN0G6ZdUWL z%HA|2u@Bb01YGu=%Zw50s5at#CW33TdBQl|aB%YMl{rB315f6GgPG9#X;xV&jNFI6 zTaYkMJKqlkMqIRF%zpH63~c()zBkl2I?M%|)=> z1&`Z&4LWqkxWPNM#{46S4PIz$g)#UdE1van0%l6@W2xie7#6+|`T~ zeB6UB#{F`E_3i^)zXtTHf);%a=RcrjCm?g~jYSUJp{WJV32?7GxO@UVmNIu)n|>ej zrSBs~09T;rGf0ZrXzn$f!yfGmRsvJObqc*%$;WJu8&IVcqq{myg^8tTPpeo8eYZvO{(fd;rO8TujHe~`jSAo9t zr|3z^MK6DOw2Un50GxT?ncm0yz{@PGrmRVGAWhBCjfq&)JJ4tnPkYL63nanOaxgQ* z@%Mgsx*J*)r4Q@p+lMEkL}A}f)4Tb(W=0pnS~P*0W*P55s>&h7OV}qd-_Ub5O4HX+ z{#JxHC9w1NVOKsvQfhKtzf%XW$xok~fMYz8ZC%H`wEu*A{**)3{*6}Vf_hS_H_#ZH zWDVLd+Es;y`XbzeO8;bvt<8{BiwU3;E>F8}y+kSeloEM^^NHoAD-NRZk5 zZJz58eES_9dS*&dR-@&hiSdNzgMROavP*gLZ)j;FdVdUU_&cpCAz>NFjq#BU(Y)(? z+5~USYR*gFoXk+$!Iley;@X@I1Z=y+(()a6`2ke4 z)}fKsa@domTXUswFivScNnJDUqtN}jv>3`0`XBZ7-G^lR*Q{-0GH3DZDn5ncYNc2{ z@M#RC{2bzUF>H?Vd?L6q35h7mXtq?6{B6Luyun>*q)a-$RDznJUKaAr@jRoY13!8> z^^xYo%G*Nkc1|eLm><2Lj;;^be0w!2H=oTqu@Y&h|G!RK_Kv@y>)s2s^HUCJvGkY2&kM1RgZ`~Zr1cguRuqfwH7%|0^l!o$tLDFsY8mC;Ctwv zbs)N7%Qx`JdTLQk5B5L8H;kLE{+3N^0(L zor1qcMnr?DZ!_zW&_(n;0NSlaOZ9QCg>TMM#_Nrt=Vf4ehxyENCr@+TOwVw7HVaUB z84Pch^SheAp?yI!*nEHL250rTWP){RTO%sV5^=BdgvC& zx}FF#kQRWGnM~Qx$M?JTP*(4o?_&eF>zQ5y!OFf~zsw1t6UOAIC$%H|W)zLL>7-WdtTj-Kt=R{RdO;#+a|(N4j9MWi zI+;0gIht&)!y9}%PJerWbw99rI?Q1-$w(S&TMuJ?NTbafU`T^IE&1*V&PHn(VeP8s z8RW~_xowf$wcNE1jVD<9mNpG5_7O7W8@O-hW)bUq`Hoe;C#6LPlJYdrzRV~;(_%8z z=>sHs(~MXd%x5!W>%a#y9_!FUG(US-D|i-gJ#f}P+>H?;undvR7su$i3)hW{R6a71 zhKE==)x^3?;jb~rt+2M8z{ePGy>>UT8C!YIQzMMd>5oK@;i^7T&-HUmcVQ=CIjnj< znDOe<>IYV1g_x;_As^<Ffc2&4a&)%%6rfHINu7 z_X2e66VM<7E8}VO3u!%pUit@i(ddw6jNAph_A`Hug0}iL%w^w$4jDUD3ogAEJTad& z#D2af1L1D$=uy@VLy*6xkyqx7I%0cvnfc2_09Bdd%D^Xa58S%Ebae54RdvkI16Kh%Y5EVAKCEy9P6%q zjHX41f{V++S9|*y79oi5?b@i$Z=k4kIIQf^6&@uZ@1B-5ml<8E`ZhY0zC6eH z8|X6{I%^RxLM3Y$u4GLfdav<3Agg$tKof_c#S7?tF!Y~Kf9=tWW>ENJq@^wL@EY9t znEQ8ta336vW$nKxiNv>Eu z_gCyjI3Ac(WL|H4BsB0;=swsKGvqw`TuW~DeI+d4JVyR8=+g+R{5)Z1gtfX>Aql>x zH{*JFFxDgeGIX;N-vK_Gi#ZT|Ob2>b6~ln!d-`?P%v=W7J68Umh6Qa7%*I)Us<62~ z=UQeNa;Eflr_}@WdLEtm19^RlF|X62KXe#@r5cXpsfqU?S>GacCAqVRx6g<9-52z{ z1sOlbn@e?OeD6!&>d-zO2r2?=Gb~0lI?@2$E)JavBMbF`*mJS!gIz2wM*wq2|zdd_E5+t^E~;?zKiz5Aff(VdL;UjmcOR>m3&7yZNi0@G%Cu=7aygV&`VE zGJFNtJamk#C6 z(8hcq^E5o~r!=zS8;P~%jJ4M~wL_zxfo8`1ege-tb7d6V+6%OU`MeR^HyBRrMDxRF zF(6nkyYsY*9zrMlnVmEoT+9&e&72nl_U0ud2Qz^&>A84Ln_--_RRG?n{Xi%ZhfT|Z z!k?nSH{g+bWGfi01UPvnu^Ck(pz9An_znLv8GRlw&Y{oA^b@N1vlC6s56@qRw(;25 z)3kdF`ch&y}|pV>~#vW4u97q&QD{LiOZ8*JAkVoVE!a z70N8$Ps&1rbo5~wkXzer9#qQ)-bwUsRFPR04bZcB$VMd~uM8(*kkpf4-WZy8M@~C~ z%Q_%yOW(Nyz9nKAQi1R?eEEUVKZ9RZru1A>XV4YM+i;$7jaLsj(Fb`Qj<}m%8V-2| zqB#I>^QmO8*Qv+uJX&%DJ#jQE3B)2diLARzf@umEcuH|J+W8n%Hq*zQis4w1?^xe1 zqQ3=b%?7wtkn1&>t9)O`0)|h)*VErFqSx*djDl+JD~tx)myqSrz`X!@tAedE=0gv% z^?b6C*&9%=50d;mw%7Al60tZ>^WW(4TVUXt%?gV{dBO_fzC~348h2_-2CGtQVtL}y zW~jLqSV{t!r<`{Qr13C3wr;Se_ZbslcHUO-FM~Wr1sH6D3g5!%ThOi{x-bo#jdWg& z>>Oe(>5l$wsMn7*M=7jPGJRi!Udxf-vRtv!_^;4&37FObe>0O@@q0UXb|$*=Exo)=|8LM@AskxB9FUt;`iJn>So~*^Lu*^* z!0KA@pe{JI14lEW+B2@cztO??`kTMy`?Jt36t$&~um#*1$8$TlHWVGRYNDCSfB9fK z@ToJ@HlNhUJ%5H{gY`_Bk!_?yY5r#*^96u-2YM&X7c##*%Gh~nWe)dv=(>ZaJkdBd zX!%dIXEWFh_ScbpCyO6*6Y5O|;hAK-IWDaQ@SfJW+xFkHX`e^!^aE84732 z6g4lrAotyeYs_b(4n2F>5pw)_H;zvaqTvey%EUnxGZ(!YifI%+r7rYi*0B)AF;?hG zVW=0Xbl8DAUaJJ%{r^`?q^*ZC1H?hY#$5Nz=XOC0TZMdN-Z9(UXvL66%x`)sLw-AJ zP}=&xJ~2NTVOT79%Ki!m{Te4~5BBshlr6-Ha~bx+c!rzEuC?xVL-&`E-S>d19&O^W zJaq$I@br22?Mh%nUP9vCllL9QdVuR#6-hjMX?sR&5!Yp`g+`>FoybLr0xJC z*MOoK!OG7V`nG7knGBvz<|!qqKv4;qF$y}J9*r?u6Hw9}Z{J0%%b0=9y#yp%ppMmr z)-k8~wl|-r=V2F`;cW?n-ex{`3;H%AE|a!a2l0%6XXw?MQIo(U60D2xw?D9%uQHw| z%$D$M#}>>PR#Gb$Sh5;trPa}D_lIu)-5KnHHCXg35YbAPD@ct&&2DE+<2*C*(Sku2^-yGAMp=F0hUWT_L9H4WJt$us(=tN{`Y ztSg|WUIyRrlslu9eq^*SgV}T&@Rr1~?FEJiu)P9q6VQiINX5ZmJz!R}(ra$oZFJ1@ zSgfk#d1%H%)e0au%h;#T0^cT`r$-?FFMxd-YnWp2bsI8f4WKWfggFXxu+#c3%~{eP z?l~QvxtvAc?m!#><9DE-Suz9BL3e8Z09(&*_zt`AF}Ov;L!k(e{7nHDBhHMteV>`GKTw(r8Nv4}jMNqw zjLTrjVOZ^IFl9Z&EFBxqD_+&$+anS5CeJ3O7?w+oujjPD_pp&_A zyU~LrXdgOVaw4`Q87}>T47>p?d{aLMX9qI&c~-l(fb4d_*$O~B1uQ;ima_to@N9ts zTll#O7YAdB*99xlWyp`wpWDG{2)Lf0r%QqCc7yLnq0|U^*@7gDWaNFwpXcR`M;prm zrBSB^xz`Py7z1>U;Q25Q0n?{&;!a>k&2Kl$XbB_lgA$Lx|1-d+tRxRJYG3TIadDxDuw6hA&v>1HJP~Nm1Mdw++D%(C`HfPIKvMPsYc@Sc zp?%k}iq)a6akGm8E1kms+0b${kUc_6D^O^2%wsHr4s4Fx!Z<9$ zDq4I54cGJhemM9GzxBXKh(-pqodN|5!K3v^dQI^79ee2syq==fDX>jO!Wn^_5?IWA zto@wvv*E%T>`6E}_%kv(m9g4Gf!~4ObGeNlcL&glu8a9^&Gz;@ z@LYh)lY!Gc3Fp@=sOagOO?bv0wR0VTClaWtfLR}YQkkPif^{sGN8dm}F!20=$>5v^ zIqATtHR#E>C-W1&L!w7uBg}$KLO*@~bd_=|uwnW{Mg!O0;M)^W>SrU=2Y}ms<}EvCv^OG&+GEH$+>C zKcwbky^rcONisna-1-6o4Dt$JvvR@4C zS3~`!^wyakT<>CSEYIlWF(27VbDM=-pG}G zV5A0Zp|5f1VFr*~=9-zx+puV}kf`)n` z&=YF=pm$2tHt1gfina&RPrzJF-NmS82!8U1`0xd|8710} zrR2G}8LX~+%i4yO@O1N7u32N>y|c#y37Ew9b3FM8+ThN)nY`8{Hg7u| zY8Qi=N0103FWN(Qcjf!>Z8t3=;7(Pjp(Q(vu9XZp>3KQE=Iwy<#-@iukI$h)air3d z*$(l^lW&%wt?o5{#2EYOaTZiP4R@`Xs&8D6WFivxEwmjB6xLYx1&Lo6+>JxZjBwNY;kmf~;hxn3tMO|Nqj{?OfSv*$ zy+=d2mIJNOTc>yKEv^+n`@aLzqS%3V>17^m%wqQxwtW1oNBgWQXcSTdp7R~Xv&nOz z;pS=9rS%CkvnKcRAb;A;c5rVk+_z>@W7;Rd_Y=&W->~-8o6?Xm_1-i?x6I9p!om&Z z*@fs<9%OSGS4uNdW1!p2|NFqmYT>hxf*k1C3}h`gHo7<8Y5;k6;7JCy{797a$^dS? zYWC5JtBsL`9DmZyx=vn?MJb4IAPSfTDP<6)6H%DIH zVr^;FBHJ42__yF0YY1lW8-@(z1+$-neikwhzYSJ;6>~8A>0OKCSAOk46E^d70oqQa zO=a#61;&MptG_S@@|+Xh`8V{N$9siQVtOt;accoE--Eu>fhz{wE<=TEq|l1t&mnj2 z^L9hpqmk22Sf+hw!+0!(kqTO!mPqRp$i@aV$?9mHK)ivOqX`tb40l3tH)^w0c^07w z?_uTJ(vLer&-1iBYWLhvenn9{I}o6(9!ye&H1F)J0F%WC;Ujmf@;ak=5-gjCG<1y z?f|yQnt`4GxE1Mg&G#N{Li^xfK)-ZaJ%OYgfO8K+QQtFLK<$h4-I6EA@Z24^ZbWi_ zsC5S|Fz?h;eCr}Plb~ZF7GpIwuoITts`C4>kvo|`j3?ZUZ|zgoZpMvOr!?D}Ee;Q%61PNKsHS2l30bI@Dx$m7L`JID)bYV0zu75(pzl2V%MIxb~ zYfhu@Qoty5Zo?U_nQwKC_xh$lYHY!0&`tl*O?s^q%mj}18?$98Al}E5`+>g)ZJ(k4 z9kl;9^c{fRb1iioxf;t%d>c(ZNz3oRK#x%cD0&UJ%=Fm6D1%wsx)Rju_&eNbLYu9~ zxfz-*SnVbPy%}*rZdB53sNwsI`PNBT#)SQEbKsr_GEoe7wa)%)-VMT(+9S&}F_HO4+Eej#O@>{~*{ zuIyVfsb4XXP%`#L4aU|qN=8Xx)EK0pY=i7$EqlcEem>7P@Bjb4*IYC6{XWlm&bjw< zp8MSQeXOa}p84MMAdD9^XU`LpH=_%mL-TOjETSjZY({1bhJrni4(rpjK%UJ~PlUhb z&Rylx`>gPqVS}us+6dm92703xJU=&vCx;_-8=<^4H1(iN2FrTfJHe;j^l03N*+z$; zf|=g!X=lWPD`rnH^;DK=Jg>h)KUYs6dI(z2;XVFIxT9PrA-SGx^c?og9#$f6HIM{7 z+NFWC08(##yTNFQxjo)(#@JK1VudEv?E6CeW+Fb5=z~D)Z=YyZc*pQNFf@fK@p*P<@%rVpp zGY=jf3dR})%&vW{X5^VUzUlaebOdUdZC{(Q)&P(1!sDxx`4MA z=N9+C=b9B_AHtnQ=wWCqm*OxozTNQm<#mh`L|}LheJyxZWR~}LFb-F;z}b=?nxg- z4@NPTrw)%m6ZHKw2ZDci_AO|$4vlQYbDozumDSBMU^zoiX0;E53-0Zt@HZ7}as_+n z2^|jy(yVW5K6r)V=DrE~(UUt5iMarz57E2z?T)gR{hr?Ra_;8ohma|G86G@oHnlO& zZ(=b+H6Cujbzxq^6IuLqZ$9MPU5gx8WlHN@mtOUC{tl***x$=gyC$ote&|&KxO|1C z`4(z@+ik$sk|&M$^9=YbEZ0q_Z=|^1loC+I`T*;owR=+T@>EBzI{(1PkeA4D4cP-V1tb=kzgGV2r6qZv@&}6IwsS{g$j=JmcINYp#==M;c-e)QlB8 zae{su@XQo!OnxM^6FB-Vr4D~bPsNe3@1Rm>FMk0xxfoU^4NJi8Dl_^-ev4tp%UVeL4-7e+5g= zcW9547#&lAzgE}RlhF{H_Xh7op6;YH9|xx9^kEd|7=CBIwCX~WfJfoT&~Hdk3iKU-q(y?&lh9LY4~G9OSYKFQ z$9Z8F@V5ZkrnC-+dq%_PIS*wAegz*F!j+xiYE08>v@(iEU&(twXg0jNiOFz9KTQkk9VW**bYWnJtgekEb?T zy=@45FkWafusE|?4aADfRx?=wRLMNkjFDrZp)vH%y#08tJXmi->gxjKSm>c=Z4=OV zs*CZ|*+|iRMr@4?dp2tqr009+*BQxd%`A|?`1&8sqhHEf$^=>%vvi5KzV-B17is;O zIk5wnE&|sKY(!-~S4Rt-rOm?30kvoFTzB@XJ_W)E`h1uzv1Bhma}Gb`N_y%#>a7iCVjwes9G zYKO&{15}=}HxKH+3zf=3@4dVe$I*u|(MIrDx7PiWU-<6Yz;jp+@Lw3KEAC4zd4NLSzHkTJ$*m&^r$D`oK-@d zjj!-#&;~uo#k<9JELlf?fzHz{th{KXpmkJSon3_b>$vv< z8tKWf#gU_)ShFmo?k;@WMpU(+@3)Y+U!iDGXyN+I9o5qC_G_f@AL#upk|VT_!X?jH z_H0fe-X7T2c|kwcY^VrkW|q3|Yn8-Iq^t-w(Ouu>JmtAE#^t>Xto?!1I_i3h3L+CD z7*`K%Hq;HB+nRxm@VzGbJOW)A3*C>?$`g{!*`60zQ)lZPa9^)) zb=tc+-AM1&BDVhf-&~oBzGNYFuH4QA^5QD1PJnx5ETNfJ&Sa@TA#IB?%0wvDlNDV* zq`nP(Tt_oMhbm{0u0l;OF{4{SDL+9(*n+;M@9f%#%5;ow-6&54Pc#~ zztQ7If!0x)qOHHv&RX`L)5=WUGt6>E)Vs=fmDN_r`~L+x`VsA`K$Xw1zxv0zA)C37 zk(KagB{J6*8GI3aN}}g)>E$Uj)BBUKeD^XZRphTRgnuCyMlYo!cPII*e?WgiKAy~p zo%PHLy&Xoq8RPahpN}CK3mJ6-EhaGgy9-qSTjwstA@nGg&tjAoXrtb?P$sALlL#X1BHkj8_o0-wc_86|0rub1e{Q%kBd$_q&Ub4XBO zFv<_*^@TpeC~sh6#r`gMXdcJC@L~qoEJP;qf@^E^DJRss%{a$s(T5&up*i{aJ_Ri@ zQqt;bKO)0_(CeRA`30fwgI=PCX>dU= zQONhChpG_Jc}Anzr(tMKUMOk|&v8|H$bq!$T`ozhT;TdP+WR=t6dPc26-vH> z&gCtbb{9UAa$1Dy2~n^M@ix9B$)n(z}E;JL~R(PSeql`GG2F~TwA6*I=v`hPvS z^ByCF=r9ExzdTh(lS!O2{4mUL!smlW2Tp z$n)2R{~>RnF^TfqRf|^3Gv8-&m2ROc!7Tb_G;|)FksVXBFU25%!>cPxu zY{f@d{rzZz|BnJo9c0*baz$YI3uybI@0Z~2x6r&K^0g8uj)8-398J)v64-F_bFYBI zW=1nUQ6EJ(v&=f^aflhi*~nV4pP`j{B=uM?VCHd8F$+5E0P}{tH(7abE%vx6Pq_c- zyG$D}`?jmwY~u^~?04f6RJD?&6!tF`VCV(9UyVzeY1wVTHHQTlIiG z!=b3vY<@*@t#Y!PCr6<7<|?knBHw~nT1+E*1Ps@Wf8q)Po*ffZH`*3ecQ_O8e>mXt2X*7TOr#=?E1<)jl%dsMQ;j zXsO@Ac+ZvS!VPBhp-9LH#_9p&`ucnq|Bi1)tv-gtbc4S0kpi<>KY>f0P1_zxIfzdA zj^SH{=X?0va-OOm$hVcoJngSAG;0sU)|8k7AI)Uck8O6qe(2nYevK7shn89asV8%u zQA+Kw=%-j^T7AxX{dLiYj@Zp3U~7(L4Rp5{ExXZ|8CCj!S7JG@z%Spm4npxxTs2S5 z-AyCZeg&KF=~)_EsoSWl^1x#)z!=6cZ}@)b)EE6&%JuC~;0a)>N3Uwe{d^z8sHb_- zbBJ1Fub+dKw`h^d)ool^2xOO$;pV~Gpa;E}iTWONc@7ORlFp2_kNL0nL?4G4u!F!a z3U1G(S0mv(;cg~We;=(jqvRISp(p>LfRb0SV9`AF4=w&-v;|1&`%qYqhI`QufbR_; z@LhL1*i~V)!bm_fdNTrOR{+~%w3&w{7|-mv3!ag45}DnIrSTpQN_Z_8eB~2G`<{*l}=LNjyq# znQ*%-n5EHE9UxkZ{9AWV&1nkPttsc*$$Ti_YCk^`bOznH0Yue#%6RE_>GetEZ4|A| z9$iGcn^1EJ*1$6+j0yIfwNC>oUO@uiMDlks&N}2Y6Kwaxi6`MzCVVf1Z5#k~qrhP- ztvva{?0M^OCt|5@0;BN@f4~EGPTz;a(&<^K8md=Y2t4ay+qz09P}`v_8cSV5%K)W?bz72(e_aN$5GJ1lVB6TRB!c4 z54zvC2({LS>;?+h87Y$EG-X zaS(~fhPqEN{&F;+5|pt%#ofFCRbWM`fri)x zR}*u9$B5#=Na9oUFcb>jKrTiy9}GjQdUM5$gtm-owwiU9Z$b5Up>G0I%0`#|fDVn2 zbS<0v30X*h9z0J5ieSX^fvs5$bPb@hJ3Q;?A&tNK?mX*lJpG&kPER`72s8&6&C_Rm z&)G(ARD)F&bJ7zSRJ8D=zG(e?9fZp`w04V=TXAZQ2fsrJ>zc zGVm<(9$2qm!OruIbKzAn&NG>QtXKCsmQQb9eJIfyYvidNM)JSJbJkCu#M_9wu+zYx zAozK@UOPrMp6viI8Jp=Fdk>z^gZyrX|L)2p&~`lIUT55Ez>$O&ZiTnjwNQQwVl~D9 zcLKfW*Pg}u)LlS53VCpic@d5#g0+<(r=a2P0Gh|}7OR54pnD`Z;;&~$e+XLkgB$bc z+04ZwP<%6^XMu|+rWyxroq$NBQBPVI=suGtJS$sW`x0rhdT~jfb5-Un{tz}l8%<7Q zp56y7j7QbO>K;^YdO6OUni&u7QX3H-2Pbzz??hz#Lug{oUNl-`H1kzfh--l53Rm>Y zSR=ux@NvjeIG^68UvsOCfFB#kkJ@O~cMGm~a^#oLPs?X?S6lcn87dmrHkE7VfyWp? zeYG9H!;_8fhElx)J7DgD6}F$Hg&8G-=w~@?zM!S6RcBH2<9hH!XrA1UTrFm#oIu%RCro*oUolF{#YR$$ej)ok!pTg_kXk34^kH692>+k#;)ASna} z70?U)sO`bLIy88O_u(XD#@zJ0wDkGM!T22Q^wdwFH|M{ONR8aS1XYavSp$~W!OXo* zYlnA7o@?^_w}IsRL4VdF9S1h^8OvN?>5&H1laPA%YqRLRF&4n*yMu|D`^w+T;OG8B zD?VMoR;;H_BP=4pzApTE4Y@u7RvqB2XXs?mDw$r*Y+f0xAol=u1#H-As9Bp)mNWWJ zdU9V>&S)ds!16%-_(*gILP*P|E#( z-yds$^E!U4T4>hJWptt)+Ax8qUG+8x3%$uNq6v?1$NF#Ypnc1^5(a(lc>b*2r^lcH zKSuogm%9hJ(hl!}b(*X{St{_3=s{bEcIAK?`k!(HPZ)Xk65riNeTSFsl6Z~badCx`<2Fvcnio}S|n%9Je4x0?JI(e)>7 zJ!@zV{Yaz7(20eCFUAT|->~*C$8(;#>G>9(R-q4R9$c%1Jj~_VecasvpAYl4H318` z6p8s0P5A>Jn7A7Y&aE*8b2^@60#Y z3C~V{5E!gc_yZ&81Nxa!YSH7j(Buc6Gk^F9bUF~g_#v>E^;;EcPvK_+v+ZaoR0BB7 z-L-n&ebB{SF73X)8sj7N!+Ki9D?l3w=Su;B9Bj=qUEo`Np49&|g}+v+ZbVysJNj*Z zfMVsr@K3JzlM}kk;Ytpmi{;5soS~kz(A~F~Nj|2J+ECVNEae&NBC}~-D62NdBp4Fz02N}I1x?pso=TYCr4*7;y9}0_${!P7Io;GZCL1RtxAUj8Z zCXCrrD2@f~%{$%&HFIHa^t$RVv(Dc=w9tQ7knbU%S59PR7FWFM^Zl9{8K;-bn9Uqe z)Jlc1VDbx`yod2V?>7lY=|0up>pGsb^A+ z=FXk!db|E@rN`8(dguxV|HA_L_Nozgg`uWZ_~dXS{u`rLKe!W$$GbC@|J78v9eUP1 zhuqMu8JyQw(w^_;jch{mt@rAR_8H!zJeR^#h))MI^=@c5k-6(Vct0Mya}%i3m^U-I z|2>?li?kWxG6s%EGZQUE16qM!1y=p%ku%S@N}?se3Fez)P>ci=FanZekQby)6^tO!ri z*ZuSz50)da`kqQ)wRdBGGU?ICQD=r&Y++TT=L1%l$Fa=rtLs~Q8OmhyyBPC=t$o!F`|Db(C0EkH zyfjevLx!H``C;HQhF(@d%U_valEF&9>M1mO3@gq^deX~ch3>IX+loJ(p@<$cb0ziL zoCbO;0DO*J-Vg0|)AI+&NCKY3&_Wt zKx@5?c>yNY8Ty5JDC9AVV1#Gj%QaR9oxw>jz-_Rx`rJO~Z`C5}aU4KKj6O0m!Cjl* zz}F0J^BWF8W$XX+g(Ba;9q)fdj~9>vSI4c9P^)R^w`dP!y@1Kn8Xm!`%-Xx?Eff9!9Y~WIbqY^x0Cl0Lhk1C5Y} zf=F>&`0zJa_?A`+IXH>bC9tA94S&tZwzA0q{yxuW1DI!(9d`oVhc>1o4}bf!c3BHV z`kjreZ2@ea7LbL68o9ZR{*+KN#il^(rRa|FNi~t{vhdHfe|u)UTg={lXt9=F^*{}x zhe$qUfmKnc(-BzL26W5>8&`~aRk|Zv+qm;A_ZvZ5-(DKhZVaO&GluyQq1@gLP`NUr zczbS>W(}+c&CVNR*kE<|=ms z`>}lcl{M)^=CUI0^rt5ZZ1e1I_K(P z1^DS(c^Fx12)qTD2|ewj4tg+y_a|46Ro3IlJyM)xax4#}~aK-%&pFRV%Qxj;E1jeStrTr*^?TuHiFk*BT99MRV+Y_tIbco(A2S+j z!MAMic@nB=FPibW8c!KVZ{MEBX>^|TM0P@PZTOqe#EP3%z6j<2=?^e+(`<4hhs?LI zl7RES9`U2VVnlXhc-{}JUO}siFz!wCryy1^0&aUgjdAy$v1-)}-*1aS7tfaIj89%VxrBMm(xRl8)bo@lWXs)r+UzHOWLsHe$2>R#AcvzCXl=0Cv~LP$PJ6j+wIKSu|Q5 z&&;vnu3q-vfz;EgJSVpd+%js>w@fnxmE=CG_zE*ZTc}qVdS?Kae)THQtYm=2I_%_3 zo;3ob8GP})m`AWcA7cei22wd0J5hliU+0^(DCR-C@0crIgA3Pyq6_PROek#~uta)L zc8y`mMgQhkR06Y5t@w#(SSKJ^2A28{^m={C6V(|r5v|pe(FmL9Deno`omb!e_&L*CejF5}mbVl=+20`7`{><0w<7fBj@ZLI*A$xGKno``pM}xKX|S5b ze`82J$8s)DCeV5~v~^$9Y)0SM>(Eaal06(c?!clQqGsPX;=Y0=*8j2BKp+(Q5}199887*rE#}LFy{|u*ORomhITi=N^Ix1Iep~A zYQKQ5&3%SR^lk054Crwm+%`k&1aeRi{u-fT)ePr`CV_W1TI2~+=6Y7gFVPTQPKB$Bn73a8noOXd%=a3=K9q5* zpuMY*<1~8moLKi>^*1)d-p24oeh+ZhgLc-IUK((C9#}R2?#58$ATX)78NlrtdI^{x zgDVr!+rd!27dqjI(4N=Z2;DzMf0My_D7?x*?*?*b0Mv-b5^9~zFwY51!{K5ZU~>FK zV4V#&j7Xi&8o4vLCBoY>VDSo6h~fT5)}wzTt5$BaVqyfCMbN4&80%>iFY7s0f$qK0 zOKV=}ZMUMM)%o0A_!}8h7nj3P&zrDPT`jENe)v+Gcg+`|lX369GtER|S3}F9jP^4Y z-+CXOY96u|jhTfvqNQd__J+&GBCba(jPk67OzLM@48JyE`3JzEGt6$8*xQNRiv+5^ zXo7iwlNrf%Z67#jM#Fi&dlu4Epz$0|D?W{&|L#0l4r#G6rCA>}p+rKk;^~dX4uV@r z@S!*Sux7$=IBx}(e5^mLoRR_;YJ!)$^i!D8N7290y6UTSKebdJLxnh~IRwqNwvX@j zMva;QeGXk*iA`}8mlGT(gO}$9`7YKK*?tx-x-a4G$9AM>1rU$mz120H`I+6p%)Ddg z_Hw|R3Z#vBq8XGKL0`?dlSJ=l>0eDL2F#~`+PX^1kd>Cmz$|!Ei@s+8dt+Mv1^2C~ z`Zg=7wX7LK?JooCyJ(Ji0TqJzX&d;ALzl-*%vJ4?QELrnLeFW);9j8X#HZ2d z+k=c$lo+#0SkTANk0M}{3?y^;`v<(u4L)1=G!v9$kcr%!Of9@BzSum{_DvxD>oJ!Q4GpjMa9}}H5nlmp1#8UyMvZ$qGOfi*K;1MdUzH( zvw|oY49Qxw;RUwQ2}KNu%a0Z_tLn39X=XKHXcZv+lnG_x3SJwc-qrVUZsT* ze1G#~L+Eb3(-}N@0t;<)-+!Tr;~Zno$_4@vBqFG zJbR=M+Po4>^{E**{|2-QM~3bV{CBJ1(Ut)3`ivp%m8%BeofkNh>HA@LQwCa_2X;5s zVLH@b39YWel`zJ66t9>kVTCa#C4t{`pwq+M1gJ;zX6bpWKD(cBbI{i=@Yusl--nU) z=31Ta0Hf+T4uc|c{{djr$F>3L>Sc`O%71~%IwaO+^)!~Zc`gs%N-%0}a41J_#tps> zEbcrSCXlGoxo=836W+f|wo@*GQhyHUbA|B?d zEkZf&HIa!HwEB>H*1)trp%H~rp*}R*h5YQ|+XjC1VCMPvu6_99*)X=f^ZibK_@luU zC06{4^Q3Lx`}g}XsC0}`rvzgfsrztnM=yV1=xE&?0hGiG329>MZ8XRXwkw}jnTX6yf%i?aZY%!6ID8mwn*wng_^(|_r} zcD{f_jl`n5yK)ddEkT>i8uLw}DI=W2R&B-px{p|cx8cQs-0uU5hK%(9JhY0HaqhL0Ec5swAiIHe8Uv)piVg;j=FI^O_0vcE438O<&m>bH_hh8JAH;v&v+9(&#xv&E9Rp_NBPhOm7yxR1sce6Lz zm<{BOpqs1CdEjX#b|*fYS+^1zTB-LbdN=ZFH*gqhv>7{W^{Xl1ZjOQP@u`eyR=m3d z+ZiFWa;XPwV~{}Kn5`x{7v6+JJ-zSKz`~O-kD!GU=;LQ-I*(uLR;EJjZ0>DBQrdv~ zb?jXSuDH@^i7e_*=mQP2(7!C;am-{S(GxBUgYPxI4@XMPx10vG2f#ny%FUzJvu%vY zMJ(}j1ja^msnd^P4{m}*d1(DB zIJd=;&qOjjQF{dQ;C%XBk9^6$N5Eu9zzuVnOCxW-UC%?)u0R9N^~^%&=kj+ly^f@h z=lSWuY^bJq#*6WpCy{FFjSfX}7K5GH$H~CC71-+{PdjPrTlp7^8j9PBfttRv*lRp= zh!56%?E;Nx3>=1?pMI<(oikKFBF7bW>Vq zfM*1&ojOpm5O_X=WorthBl)J!W-_gfxYgF0fvNYe6*3ly&1wTy&oEMB#%T)8wN?GG z6E~4L>(car+U}5MGS*foBLDlLUp1JY^fD#@Q(vB6$Bd*jeuvd*j)Zik?|s-GS7}Py zHSigRu14c)oW(OMz}wl_@eVSED`8KWID(`XMhiW$c{F`pqz5xex4|jTfoK8@k6^K) z=*_BpR#GgCJU)Y_*9m(08f$X|{up6o%u#b}uB(X_;I$PFS=ZCE;Q!`XcPNaXFptO+ zMb1N&*2|3+C229vJvc2FR}`nMKA4fl|4K{2DrJ@M%u`rZNmtZn8l z$~d5iM(#X|-1kaPXmLkqJ9Eq_+MDlct-@_c#wbQ_0WQVi>pJdg_pDQp8+tTG^Q=1h z3!JJ-j|HKs^$y*KzXasZvj#M$zb^3Hh7Z>Furi|&S7woRq`kh8*=SE0z6;9+-gAyK z6NRej8vkb12dk4!gnr|>`x#?eA*vIARSnGB22wi(x_`@aW+Avg zVFsI#xOXG9?i>AuOz(j^ZQ#>d`dNw2-b8vj(C#=-#KU3Z>|Of}0q*6zZ@voktO@im zkhOq*k3)HDg`7if-4VzDa^s?`5ZsRU$_qTJC&GBuD#)02cN_GM=d*g>7D@AT&hzko z3hicLxefwRZ|3pMK(v&xY5|*Z``Prj0^86H9Ew4&B4BT&u$o{!g*Tsx(5E%B>nV(t znISzBR4Wt%Z{I_o_0XrF$>!Nt0)|FlEw!_-OtDZlB6vqP0&*WxYW_tge6*6kdt65T zT}DUGp<8AUXTymZM#SjJ$kLf9{7`9r!$>Jx|}Y zp_XrLo?LVvdKL#fJ3F9* z@%YA4ZGzGv|9b+^7%62vqM`hM82*?Iy&8UPL$-~EHq*zvgPFj53C%l;thdIN*9|oB zWvqbE?uY+LSh(EC>lu2px?4KjDHB*!y;C2f-NT{CL12*X@6d-?9nFxQOnOa0YVefIfj1_m+{Cl`` z4!x^`%(j9PX6wwwhM9FYn?606>Ic0gu+J$%gQZ?Y#TChu_pLdZc*NfH# zFj~ay$jL}oHKf|ey0Oe*Gng@q#w<*3cM@A@a1 zb;Wl;ZuP*MP}22fOXRaK-zW2QVPv2R&s>5c39f@eFC$oj}i8tpT{$ZC9>;c+@tEs0iG8KD+y zpcKBPGlrgV*QGPTz|)+nyGtJ;&sN(_07kP&_M<;9^ZYp^*0YhLppB;j z4ut2+$&|P<=;Lp2y%v)40dRkgX1Q8^gHdb4!OxJB-q=8EwI@TvchQXwNZ3wh z#<7eNfh@g(M83$g#lUMl&)tKq`4TOgfY;y);7i93Vb!vi(3B!bYbe&LBD!B1h_7Q0 zlCkfHf#^ee%0XMtwfz_G=-Jb?D+MY0|M5X%z(VB%n(PMsS`2xKC zkSC1F%g?&JKkKFXV4olAhaov}Q1~#sy9ku;;Rz{@CcOu|rCIrwf}%^f5{7R^Sc%qS zg#W%Im`Fcb-_qh z_p{1h0oFM}uE*+Le&^CxX((Y0K=Cn-elVX}@f#gbQ!m*ue)WLWMB4Nd`{{1l=po$; zh2H_n&Y(BG*leOS? zG5UB5{KDw(6?ku~qY0!8}e7%K@6JKk|ZsFQDd@^U+D`Mk+X(m^s1&1k&oynWPe&!S-bS4I~h1T#JJ|Co|8LCF#nC&}{ncxt8 z>$&xeOyeE?;%X11xENHm#+dh9kJsjY5>k4Qmb0L7e=u(i7c!YydjOFubk7X_8>+s> zb8%qpX_7gBwKcO$Cn(+wXgBeb2_4-9cn-N*21U)Q^c=g{v^dE#8PMP$qZqSfo}u|9 zS?Ev#_RDy<9LyR2(CZ*9ONC&yy8tL2p||sBp)o&0(bPIT11`p6j1$a^$X|O`3Ro&-<=Q>8WeMjgx3|h zTgY`qC@_epeS6x^^IPHZ8g%F!lrTEes+2}NtY>XvE~?R(r=Uh0mgWj8jiJGe{wI8! z4aKu~q9VVpw2Tx;1M7~^BpTc0ighW^oq&f{?9$s}&axSIo|{t{xO|^8%FYu~-77J! z%=%Z^jBOm*CV2KX*KQ-{Pa;>&RDF@Lo#?QhvE#f?_D6EMV1ulKq^Ig6x@m067{>C1 z-54au_k}iaxjl2~eZV&!ZWrSEFz(L5<}QN2=dcG>maB;Tl?S43yc_LeY%Qd#e7#lb z>CbR^4V0(}t&DV5s}2HdA^_ zp-y2qw1{=)5hN~_@kavZM4*$t#)SK3WYx|ZK<+8i2f<8x|1>xr1CA(W;BaUkssmxH zwx5>s%u~?CNQ9ldzed9=tI{}!$nOY#K7-2nm=UyeYx(tr(5JC<&7pxl#-7;5<7nX^ z#vBAyLnq*U!n^Y?0k3X@+g181!<;{YCxqoZK3%UfW8@hhlLI`_mEZE* ziJ{GJw3RPMfW8u3wI0|GDBd5=_$K@u^57ZUp8|JPATXczBd}}3=nJ`019>b4w0XHR z0B#jQE7bNcf!VBTPjRRY#F z3tXQKl%7p-I^e(Fu5&xEx!g7_V*a;l|%I6x-*Gyu9ZH6J{O_FJy`3y|N1w+=E?p@ z-AcwzL<_YuM*DZiqG?%;s2T+Yy1*IzkdfHgg3#t0Ab%AoiUOaf54WcGqgW!V-Zx@R z=b0xzVr5e4VHR@sU@*s)<$7&;DvBL)z4AA*QkS>c{Xn!H>OaOzWt4D1?7h2+<$%sJ zOrF6~c0=N~FiJJ3VU+B%K-Pwt=sjliC%EEzzBU??z$a(DaOhy3znM1ekptI>?u^s} zn|HXgiSddeom$srv|b2@zoq|UP&5UZaeU{qr_eVg+9=aBIGh4M#v#Lc2d$DCiV1rQ z$*4}Na3B~61}XGwru#S`u_DZH_;nR6vQC#7`1*Xb`R3Bj;>MdN^0e8y4-JgLV3;ml&?+1CeQw!#DtujzuN9s=U&6A4Lp~@NXwieW3WLnAj z6TR*PRrL?IF|YlzMo6LgvlU z9*RY6jqV!%c9k~PJiG^f<-~^0K~g5btz}48<6x%Lf24m)U!oo%_Yh-QLEH;``i5fZ z=R-J_6W-njKdjSdjp(=d_BBwwgVx*?jI#r%zom}>VC`;b2G)KGkW|DLJ_9^w=`{!1 z@-%H?_&)}1@e;mOBPD<5rY@Xb8w(B9o4=Dj*x4G;W>R$3~>nDJcG+i8aI z73`h+0ao056e%gmDoel7Ids)(lIC)s=Z;yq?)AEdodazi3XDI|?@f9!hR`hEob1diwgI|F3{uTO?y3 z5Hy4G&YV|hs~>G5l(OZ$Q1dbLA&JB3 ze><|R59=bn&$;xyl)lWmH_}BpXo!T2fQsdzNkil~mbQ8^i_z{ype&D!hCF0?U+d$+ zv&y4!kZt%YWg7%Dw9)A=z={4y#6s?t1JBC*e-zE1ij52hOXV#Lu32Hxc-X1XDlhh? z7J8^}@wtFEd9YXd4foKy(TnxDo{1btJ?WryJO@8UAtP~IF9y6#Sm(Kim>1~H1~su1msJgmc~V~iL&!#M;+6V`oyt9pgtOfYyKmt9*$cm7sgQtGtnwjbg`Sw3QUQ>@kl^^ZB6yNTpT>^h&gO(M8 zzYDqM9etOtqa*flr?-m#ep=ErCo{Tn&2jwLK9s>8>c@!M(DUl_J4omX_~5xGW)^lv z?uxUzHIn2W^zTXRP70iP5?fwra+@$T_nG@amD=M(-QIzXwm6bLDR72(0}R@NX>| zr2k|dJy~_w6S$7>)Hv|t8b$nU#ytt&a4b%nD!-nClBPFK8w7JHclwYPrYV z3^`LS3POoUtV%kT`V&?-nZR+9_63oV_mIfvz}y|&_RMw(*x7hyxSh<9p1@#!hf%Lt zJaGk{JkI^!!Qeaa9|$~?fzFk(r`WV*lzwo^bE?mP#r-_1mqZ`4QKH86S=HU_0IRJG z1%^hfe(e7f)+x&MF#7q3HJ<*_!_Yo|(5ef1KZ#lDHrRH>hI{I;{)gc_VU;#H(uJqe zpvqbxFz3A z>D|RuBjF;TXfyb46+QQXjivHjolQWlN6Z2{2|51O0>_Dev%guiu>0&7$sVvNf4=?=%!!HQ-Ca9-ugO>E00a1Mt8R&VQu{PX~( zby&rdz~G57N8o!a_@+la2{~;GjOU<&Z}8tT_IGgFm7Mh<7Q?sCY1aY#jSX7DZ*|6h zAXqz>21|W0+tI1}k&sj*r951l4b&YNGY;DM#%@k$HY0f=#$aSX&tE}SO!_db|7YxM z33}weM;&Y+c})RY9a59z}=|VNocxWj@O`DBJJGy zh{Ue802-}gP3Z4Q$DS{_6?m zIBdq(+&hF+l?d?AUaW@l`JtQpp6b9DXfDkr@h0*t92g7br@;9JtiHp6YBBRcCQu|{ z!P0@lXw$lk@G>h0PcrPqcq^cfSqZ(6k3E4UQlF2|mvYz)&ijVYEzt89kd=x&8IJ60 zr=$4n&Q>(-b~CabdQTGg2s*mwW!%)w^$bmY|cmE}-18j3ds!*lSxI^QFaw4a&L&Eef1 z*vJDsaS3j==ZbZtx6|@|td4Kf##k3eURpzcYeE@sx)*yH4K7jq9H9rZIjyf|=9~57 z>OvLc+Iq4s)3-E=t8amCPF8K<@b(lmiQxZocw7yf^{AcXzcs+M>|Zl)pXJkVXmuOd zWgp zMXz2bbYoetQo4(NGFeOe-kQx*IkBQ&g4;dFyj3O5-do1ELyR7c22@5$wQ`f-rIA_V zgO!~*Gd+RzE8stY%a6#=7G(cz@G%b5lb5#x^E9BT9dKv? zqq~p&J$M_NIR(CoVIO$47p+(eFZD$^lYar$iO^#?{ZC>|JOyi|jvs+)XZU{+4wq() zSA=$+DB{lH@_?^KUuR)`&7um`4_pN#o>tO;wR}eKmOnAjDy#Zg(Ms($Gou&yu0s3H zU^U!Hb!F>WQcod=gR!t{xawZmB=Cx-wLYv&WW$}H&!MsJMS32yfG?94dwHq_5|9Q( zeQPLA|3BPSY5wlY}J_)ajH@!EsKcK9?J=v{}s ziX^+jPr@?jvpr5b_imnsZl3ux33#@1@pr${+D8m4e0WHaAiJidSSzU zrk_Dv@!j3i6dLktE@vq3)l)-$L^HT$G z*oijQ3fk!T$PIL!c^r#|JqPXb1LbzEl;a&F7qI99D$CykK=TIwThiZ4P%S4~^%r)? z>W!u0-k)5Xfu!jN(oa+yynX~Hqx{1D{V56etp#XJ#yrTz!(4xwe&djuuYqzM^wX0w zfv57q*^5YG7&z-=Im{|32eR%d%|H2c^aZ&$kMwb)EZ{Y3UnKZ(<$H@AZUVpSAvAZY1S1xY--J7>oEP?Y?2uPXmhW zfyc(&SsPf{$PerxXb)h=#sdFPpb5i@_GI664|=l!{EVKnPUF4Ig>s@GvXdLg{tHk4 zL^g~({Dz+OLrsT+v1mblWIhymcpI%3|Nr^XV`l`SoX7=5C(u5YK7Qu?Q(7xIW?1`# z&>rT$v4lc#n%`vfAqRTe1Pi((xEd8;CnlaCT$}&-{~fa)pYO!r-4lHFyCbMKey$&U z5|%>1)G+vO&UG}fgtCau!EY0^y&K7krIqKJC>cI)^rBCNqB27jADt=GL>Ohz7n7g0)9|G=Bl|H(2skQ!@zJLHqLr? zMylyOtiexLpmoPh@5&8i_IWVs3Kv}O7Y=5UD8{qq{x|g07g#glTRm)LF)WJbrB;G- z?ka}^cQd4?9#^c)W;}cfc4`wE9f3?$V$70IuL!hp7tWoz6zsJt#cvqTNHeRC-3$O!&PFZMcf&;3B1%AHQ!@jTcRxIYSNSW{pa7}nv= zQSi4iL8#J4td~?gQchOm4({wlKTdA6x!5>MoQ&WC|(46kp&d4q(i=jG3eyy ze6tq0S=fc(x#wq32CJ>Wc?e9jT(zLv&H!fpU~`$ZDsi_wP!)ly$J0p@6yN;oL2aeJ}^9132rgO5#_$9ET=vgEM>Ol^3@Am%?>p3sw|RO25lV295W887=dn%%n)SN9<9e#1u2kzdTko; z&d?PqWx&OI_&k)=TOn}M&-onkVYRQT%(Z&{T7ZK&jKs$wt7-pV zU=gisIf{1>-=VMJ#d`>R&0W-={yROD1l!-h+&3g^j;1ohmIcmm?o7oNrGoiHxX=k* zSweq_z;7(cDD*HKP3R6}rJ#&a4=b@dM$Mc+e)M7ZcG(bktrzR;V}_>ZI#?ae_sPxl zz7qL&CaVXZtS@x{x_gR@=PztT654}zG{0RK*_>S~Rb2%4tyqp1=>IZU^ny-9kqiB^ zErWNZQ^-S0o;FG-l=r1BcV*NIQ0i51bzKpUg&e{Nem0lO8uuYfP&v?aql62B{SH9Q-QTn#@xxqT*l^YmulE6?L4xPsMg&%9|A zWH@~q`J!f6EmBXd@jrFY`3vB=7M@-MvMosPG4M~s4yDn?v)DHvIc~L#1J(%aWvlgBQcGgCF0BiFlV_BhVD0DA~Or$}xzv1~f=H9(X+BhWOSE$tw zj?_V`YC%KuK|8=*-_hs7w|D4w3%C!&dSuaKe(0v8ETf-tz?Kzg`XQ*?nf|__Z55#G z3e}uj+&A2d&94rO8O-(%1L;r5eJZ`UE_{K%X~9|~jJKK6&|*4&Js;Z2f1U_x4y5Nz z{)}819c8py475q7Z>tO#^-~TTet^D}um)HM z(2w(u(Q2W5lpS36#GaCjW7OKu=yGppP@7(g(sCc~N5(ZzfX<#YWS&_}U@_uZHyICT zTw*3rn=^V7J{Lj8dNZT{%I8e*_k5bxaL(E^tH3B0P8&V>6nL6{UKH%>(fUV5F(=I| zcr&WwuuZ*?h;UjYfuZ|Z)*CX)OJBoCptu{U8^ia1&>?-R8+bG{|L1{n0hl*IUqhZzPgM93oos<@ zESc7Hw|`%?_Otm3#AI+^D=5Q!Pg z)r-j05@2;jY@X(BD5dAl8jwcEC1HnWv2NG{j?%9gn5)Yl15HCla}C-a3aOjTf%Zc* z<74o#KA-f9fu>WD|36zuJE6Q##Pp0goE= zXgzv&I~ua88vu3OCrAk7q!B!<5samckA$1qJo6Zk9Y?2DVe{4?5r?rvL&4Kob2dF> zBHitSb&OFOHG#zw(^m4|N>DSQ;sV+_V+}=*E;4i1WmcZf3dQ%Mm%Qrnx`e8zzFzaqUhpgR&#eWQHgfpT( zkmK|uJ!0`ol{{t1@ zK#!$>^z+2sn^;++2WQcfULn1I?V;XLMl;f>JT$orn8xBk>%*1G!QaqXy%p)d0DX8) zbS(Fhc#|?CM4wL{^0?dg50K@T;dm$6{x2kXo8gy2~C=bPTA;M#<5?#;$BlbXYOn7`Grl5c@q zeLm-)g&r#Xd8Ob@1ME+Ip3mUULav8FdHL;+BkSo5eUrle77zOInmjJaDm|99s-CV5 zLCesYBu1hYhaU?Wc^=$1W48p9z7_Q8vu1ePOCM-cmd^u`r4ndcDXzVajVTb^%@ed5 zip1uIKRy?FN~rRHacQ*m2l^@>w6;G##X_lADCT#c`JZn&;b|;COSoNorqXyb^4ry2#ycxKMXsxf#=tpBl`hZVw(u1|Q z+A?ETqK7Auq;ueVotfv};BBTlJzJUAx;kZ8fusdKAAKR_1v$5T!fba7nw$c*zgV@o zrx}jaZO4{=gVtqXKdsJ|gub<>XR|KcTbm3;ld;~epx2_WA^Zg*Be$CkW>*9eb6z8)HNE$(t)3h{;s3Ts7;pjQ&HJaXkfadyWK0z+54rq0$@ytgtJU5%?jEk^} z+0%?`{Wmk%>M_C;sPqXC4uZl*p!cgheLr*0QYaq>C#-;2fmzve3?5+iv^JZ&b5?{% zhI5|Htv2kVZAql(GIxi;;nUD!3FD4pJ}6E*Bi_fLd0iN19RGi3jNRauioA6Mi%^up zI%Fl9>kS#V5R@CiI?DJj<3z%tcu#ndf~~0(@G#_`F*7Bep9Mg;ld%gUJ8RM6&5Yif zry>~X6V?(988Z~s@2a;Wt&?eW2nbLZjfc{pDUsq(Dqc1Xkm^Tau$}=HvuN#Zcu%Z%BdkiXMhs{Y3c%fe^5-9URy(Fj-kBqdyvQ%Tdp5VWWwxi*-vZIe-JXSyp`V_SI3jNs$ z9%etsL-laz8;9(B`mgq$ISx62Mu8XOD1qFqXjU&)0I)s`gr(ua6nIn` zO^ra`@-f>iz_ztT9y9MM z6+!D9tR#$No)qw3D;#=1E)3MJE{u;HhBV~`QcwM@&F3_zF&!G5;*&Yn#gO{WEA5z_Y>-+iC6&TDP1MKE$Sy&k}dUq?fXwD^c`i^0}+_d3W%Ss-*j%9AVf z3Csc`&qaykxkRwI8eENpY6Izebs%r0|K)J5D|fx1>MN`Yim`e< z#i+*d52eRsp0-+84(K=!=qd-082{yb>DijQ;HOmSN89S?R|#xJZ@AbDdb|2EUv4MV z%Vy5$hRs?B4(7=f2M(ig=Q8(2^5gqvBIE1;{xfJq4BtIL##(nv>C-w2#<5z7U=uWQ zXD1S>*MUA}2pGpU2cBPqW1Z;PO0Vvi8Rz0T6|H&7c$%G9LL;F(iA^hLlzMUQTnoHF zmEg)&=r9bND?-;AjN;5^<&qOXZ1#fZqI3qIN@&|4)_p6Xk=cfIpui#E+>90GPmCSK<9{g@IDJ@>`&SC91rV$_^$jHIyVm;vD(C6NO3w^SpXbXe3`XLJOykK{hf!-&7ssfWZu6i5)gU{mbs|(v zETfk|+LWhTe6~-0KwpBBxx#+VOW&^pTLu($C21CUQ|RJaUy0E0=Kl^b7y%qcax?^{y8^#HjB}$9sDP)JQ>dz(+A-~fBNf(T)hd@m4M%Bl4cY4 zq6g>icz9G7>Lq~JacG_fJo}*3be>#>mOFcVkDggi(6b55D=&z=mPI;;!RdkgR)jYr z;N?V~i9<$qaZO*=Xhybns$MIrF#LoDhpHqL00QF~jnhjAbHyHUBY;Xk6%|+6x1eSL*{*Tsh@)ErBeWD+jM=@Xb0xB^wd!q&X`VOq! zKe4XUFs|w4|C&#pQ)z^~ZwuyeMKN=K2L@K3NDkWhZaIm5f1FzMhWO z7r$!>dbSM11K~6Mm6<9TcyG!^!5NV8O-dGd#Qx@8rKRS>%+|y9= zE+ATqL_ZFM=DgPD{|o3LJzQud*Vp171Hd?htq}krrop zW2p!&E5ffI(2G!*F)l@Q8{{efd6lBO&{Z zI*^6F>#6gT9=vzpxEyJQ#Ck%sc?)}?+}ptU9MsGWzP<&!?{ySAvK^apAFz7DsIeYx zv961;G@im~t@wUWqXc%;Xk0DR+d$#7gP@cdj@Cxi&bFkj=iWxb@Ar`y` z3VZh&8u|cM$H=(eJeeJ=W+oxWOOg9g$a#67`-r}4@wYwW4+Ex`>GcY8>SefS1c~q4 zeYv{~`Mkk9hyIg8^l*!r*u2D(=vNmosKV+k9eX%5Sj)Cx6=EHuTWIX_%+=QIs04Qp zFuLzoW+Pl?ZDZY|EGXjcW)1Wqp8M|V>*)#SQ(@-9ByXG?LKWqw( zqQJuwg|o2h5pdCUwVqXdX!iogZfMy8KjbW^doNIZ1@LRFZ zEQ=>{SmDHoEYC??2-oIg%RG0~xIXv4tjjrx*5$c=jK1Q4&(j{afVZB2PEchXwqX+A zizD?f)B9~?dNfj>&S#@Z^c%F~|7o5cir=6Xa1VhC(a@&`{L@outcul)o~NZ}j>LfT zV5Hz8_YVcLuDx~@dw$HtdEI$7qvd#p|=c;Y|V ztOX^PArmF&VKO)rV@Cgg)#Ok#VFEDd&2$5L7J zDc~0e*Uqud9u5`DKw0-#9g)@(Ji87IN5DzXsfo~|J~EO71qRUe3FzcLa5yzq0JpoV z9PA7(@~H=M(hAu=i>z&+Mm?n7+qT;nn=y=jGe!)AU!~A8ds&?vp?z(TS6K0TTHOTQ z;o58^G@T1o^(+|zUWe(U7x))M1L)0?$hTF{(Y3N%oExb50a9)@u>3sLTdopN42PzV zGrCtXUiHE>r{8f}*V^oD@H|LGU&d!8X3Q!`!g1;vkFE=!yt(4Z|2A4NKj2T)szI5^ z=rfK}MEc)rYxYz}2(22}NZ80Lrg{5}th|Fe6@|P9Hd; zSIl+r8w}6$(sN(d*fXiO7wK9@o913W2gE&~t#^tZL>kh;$GDs|kQj5?wSilgm;+|Q z`RAdN`!F-icn&`jcnZXbJ9v+TAv zQ~oSH?Sbpw=1XU+c$-PxseHPx)t07LPCKaD09vK8u1*Ep6SU{(6l*QE?m7p6*&u$a zqjS!H*^{*G&f0v#Qnv_vyj4GxH_~z;IG7u|6KzfbhlW7>B$(N9X1>>8 zw`0DIDe%x6z$uLD1Za}Z+NTHoWiXCrK(AFu=>fjkUUkNjk+d2?vAb!*eR*AY_AxCa zf|ofGJv~VSyBUKgndl6En zMMJNNKjC0jq^Biy%`dh9NjZu%dVfu;LOMV3a7^#=NKRSpQf98GO}|DW$^}k(%6-bn zY|W=vdE1k=wbB=04Z9JkJr6}@B7svFwIiXkes4xG*K4H~6lu$+^VHMcJrJ%BfriOw zMekVdqrUmG^cg+L7&7AId9e8$SbtzW6j?1KQCr_6EfDYXzXsaE8_u5D#UVGz%s;O) zB0Y80+HxG&GSEs!h||uhXGu?>IEQXCC)PN6Y{jhZy=Fb&mh()@t2Y|NSMQK$YF=lI zt$|iOp`=<%YyD>6GkUG-D({As0HOlGUx{aCU+TsCvPhZPH`LA-7^CC(rswBsM$jH) z={iqcHFggp^(br8FT%)~#2noVI(T<4jsAMVpFhyF`rmuTGX(9n9|%3mlD~SFZQ{QX zC%i5H64cWklMg&2`#_`PnRC-y(!WCOz0fud*wc}-89?znJUCAa+3C|b!1~f?4Niua zuQQq!GZJ$_)9#es4?b#;j)7a=f7*_${RRyh(}EF5cYu{v>$*_)S8%z)cW>sdhaze4 zM~_tPVIz^gx9QoaTIQoDSs+@z|Ngcho_Ji?@;J<=r-ccV& zeWQs9*)(u;#i4!C=M{kBC!Usqy5CXj6!@%T=X43$O=706f&?1(r7*pIN54Jb#|0=K ziQTXgoLW&f8@09bcs8PqQj6|xsFO^2J#NnYxX&Af?)(y%&=o>_rtps9Nv1Lk*H44S?xQTCyz4|6byn!3 zeaZ+`^Wk(3z8i7uR(LWev{C~2wNiNB@)Dnfvpld(f?|=#R%V~wf;LqXzK#~w1I;*G zSCOy!^c6$v=IQhf=}A^@m-way%2ti6whO4;nOy?w-C&%EY>q?U{()tsF*s_YZRGaj};kMZcCGNJ7bNOLS@zem%U zIm0}`op>^x@>-iBoZAAzggLD$7 zU(w);m??eYPoBUvZ$+E;L#ULHvw=R-Sh;6r1(?W_SN<-YNGVGgOU}Q=`4xw3KxiJ^ zSo(>0l4&Q79;#@dP8g@e~mo-C$t|MM$;nnPaI=24LOYA*PVv91d5>1 z%E5c*g3FXO2cYK#ld%t)X>vMr_P#|PG^?lCo!NQ1o9Ye}y@6^sy0s+SHv`ddq)U$@ zPgZuIFN|o?nzmh8t94yhx1{IZXm3|``O)1?_^ii>*(r^TF@pKky| z&O5U*JPCy(XG7j_`jVEsr=!)y3_>--Y|se}5)C)>OM3#CeqgTD5@$@Qy0jBTd3^`^ zLs@;&YNPSTf`itDZAkNCIP)QN@box?mOM-L#+O#Kd0<@`$`=Ix0{nGlr+<;LATBd| zT!Wr}F)J1dJB2~ic#WA^KREX-o}Igf*S|f1IYen$k9;fu@AABH$I_kB-jF$i#!I7} z2Jm$o8q4V9M%S1EEmPo@x4^VYW&oEbr?bM$W=yns$k+wy#`AeOrAN@uL1^fm=9=L3 zJx}#LO91l&v~Kp~%{=pz)0>{=9juDg^)S$TdYneVIIJsTHdaUNXa^2^%uOTixhPN3uXEe zpFG_+L-2jnxd&XmyZZ{5c>7`}ZJuR?b`5G>Lw?;&rvs68MDIGgYBh@YM0mM?mXE^G z!}K?b`nj<>T;u=2unYeSTJ-#T$0sirK|g)!+tNw_Mq(=8jIA~pE|-PoJE2Ji_2o`w zWX{`&zo9RkWn4?V$ZR@;|1CnB#TdWC;qVTA-4nlxJbVgG^%fikEwy5=1D*r4Olt5m0?9M(;Rd*7q$F+EyIA!( zme)b)cF6Y`C^D44-r~}CMtgo{sG0#}r&!zS7ci7|_1ZbXyN2jPZzk0Ua$c2I)&bRN zwCKxlSr4Y~;MRCJ9r1ifrpJfqd3)%!D|i@V&bweOz$$@Rpck;8Wqr9vku*}9hsD8FY*6(dV7~P+CW2B^Gc$t%xknz3rRe{6Q%1K&{X5U zmb;R){2^GI6Gfl3BGg^T3}YOZ{ph3+T{BX}APWJ$Ki4v5--y=}kOr1vgm+w1h!N_@SDK(hiWab5-5S*s3X+U@$smTpyx1hQa*<0~k%YZQz%7UA| zw?ESMe&o(QoBpklIC$Pn^4!I}@j0+Mi$vAnU2Z6i?+Y`!R_9wt#Ld7>6^oN(MQ)kPk%;fWhAfyedt{g zv4s8y$tnR3UxyjNU0il(qerl@+suk>E{Wlch&fQ|Irw4L@HF}>L#xI~@XT`+REiGT zW(Mt02-dBHSNIq6+ai? ze+|y7JI#5L%DDC=!&?X^c!?Vl3(F^*D0&Iyc|Q$dVzI3Jp3A7cwaIFZsdU~11LKesGJ*8 zkmSSAE1oq=FL-jE66QU63rU#`2j_#$YN((!ts`)2+13_c5j@QK<18M_yG8J441bL_ zloaIkYev-@luiJ1^MeclS22H<9vq!z=ua*9BCQQV$2wZnp9g4hE#s^|*zRGLY!3%3 z^WFQ*Mw++<{mmM-h4#9m6AM#g9+;c;sSo8>F#=jbF~>p^I8vH+v@*8^?gVh^PPt-G zJ3BoVL$8>nqB9cyJ+Sp;WaSF9D#e(YObKUqb1h7uXQPs6hw=u>-K?y&gsA}r)7v)s zH#VpiurGk`YaltssMTs!4?f3(e|NA`-nxK~u_?^C@)i(i$FIw0S6Hdw?7Gmi)_GVJ z9BaFgw#xykPgomoqm(vO?~Qqjb7+7_uT1kCdV~G|W6Jzm*LkmRjCxnky07W|7~J;G z;t62t9`u&0T;swR7uCqgwZXX_l=y(Y3;(^w9*X35qqW*#RFjdA6mYyrd)l0xfxAHE zb7)~b$*<7s1h}-6U0VkDbYZ4=l{ziy(U`=ySU;>sx37avQ)#g%G#N@Mb6FRn=hMu1 z?t6@m^BXd<6v;NuX$qe{LH@TweNR3;eSZh(Fh6hxpQodvyvwVN(3oUKGxG+!e(@=k zI0{Ec(RO=Az-&G>hU)qXs4+dqc0J@tm-lN22U*(=r8`2wLp=F|mdhgVt(hrmuwHT; z=%JOH8NVm8kpd)jS@Hch%!6hhE6uaT=*dFNy~(sY4v5Y||NGHL6QPHe%)zvNn`dQc zuO9f;0*@6y^dBgBfj7#Z9IgYV#$ve$4~jxNZKEF`aS8nDwK$Ts@)BreGy!i7e~0|E zV)p$IxjPT$6`PH{3EIr8)TCj&hl3H6L?daU{pPZ@=>QRqACz zzO@k5hyUNBwZ7n!wj;gVj$wnU28_?pLnJ$AULeZ^jF)NQDxCb5lIci;xd$>KhsNKs z^)1iOVn+X-nr+}*QMmC6yPBtg+uX7B(50E-&)v}R2JjVRp7@9#b9+^%NBwsu1E<*y z?&4cVN_f*g3u}XjKV1y4O{G=?o}Q$ABgM@}mzWzh8}N;x1@9^92j)*Rq(f^9O7>*^|iarN9&#fx)?rp;feR|<^)cB1mqp)D=*a28|*DO z6SB$cX57eqQ8t9W-Nps!EQ9vtxg|eodrIu4Q+h`gvKk= zE?6;0Q(nf&!@M<*X+iMN6YK$2pT{WIkI#?tYiiB`a|aJd#-y96Y4(B2i1!OWb790cPzaNAW#G^4&! z(Dwz*iDW!f@x<#I7_LNH8*ykmbCL0J%!;H1 zZWH}?VHWAhTvL|ch{uCDb&Q!%6j^Hq_U7Y`VVuNM*XUjs(KF=&o!#|L8|l#;&U>N6ZdM5o0&fLM`9%M7*W7XZcei^BnXbkf(8zRynQw2QE6gz9x$zJ1 z^g;B+kI0>|^j086J;1^ofJO#u0_3i7%7>cWz}no8bAYiA()KDXu7{4spqh=$xbDmk z?*E2fbtpN3x9frYHvE_Y-44S2h44V{f&;YiE%G;sPY+S20sZQ+_AUFxMaaV@=;2M5 z@nHWGW%Z7|fJ}^}jYH7HynItw?Hz)$Em<8mLZZzmU6(S&Sku_XGUzmb_WnfH^nn}( z?&dBqQlK&5;(@v*tDn!I?c2!F4m4dBxT)v+b|_T}n9M|Ih9~bE=GSv?VdZ2mC)90PS>x|~i;I9XfcY_N+&`5bT6InsHR*-Uq zp`~%Ojr?NVFC%*Qr>dPwGXcu%1DnaKtI;a-pq?OIf%QDHP#v9jlsaj7q)`>)%=0sliClH$!{s03<=G9|hx5Kwp_Yn*(7E zuv2!ukGm8~#L!w2^fRx+2Ka6K?F(@8HWK3Q#5{KH)235@9WZuA&KojE>;cZ9K-7i# zK_5P??LFX8W9Vy^Z1;6d=}|uK9?VkG|k2ViCtMf10g;q&iE!+zlM%%?jP zG$X3;8!1aKn@8xk5^Fkd5B-dUrtoGV@XSL0d77n{fp;W(LDw2!ti8s33R#23J&hhT zZqPn7x)xORgQtRf7`^+Tkyby~3-R#Jxh@sG(HIKXr!8|9ETPnFxULtUx0)KGLA98h zX?-N{=&|Naw|m)P={edBE%gH5>%l8;IC&aX82oqBiYGLgp+;+D@K(?~@558=H+tph z`>P*VJfG@7@fGkzt62skygyLt31emz&+rZ-O&h>#HLz3zFJt>GnQ6cIir=o3_I8oECG_V}$NT|x7gFK~ zCCs9%wJe=>jA3O&18s|Kfln=802wzM@OVbR7HI0->bIap7obT&=5oOWBT7974eP-# zGjc>S-Fk|$jh#>>N_YcH`@>%F833JEf}JCG4)mFchU`RpW-cwk7#s_27r`y{_(G(_ z8@)+z%M**vv{oE^Gt+V-c7#XJeM0FuKru$O9G(q@oKf{KGP_0!xbz2`y|mexUi9D1 z#M|Cb%k$O}K#{>{{uwPhoB3%6E8(3`&|8A~$-M{mUowhw(e^!1YXBV5D&&pHf5GFL zK+~65E*@&?(N&9>P%6I|E!wg@=)!J9Os%MA8A*p28%Rhlvp8=yBM2Qu55E$Sk@eL{U# zVz-eN$C44b#)5wzKKDU#^>*q3{0oBiyTT~h7qt5kBx@?NpB;?PQ$kOy*O7C-^`g!n ztSHt&Z*M88i+TXXKzQ^3*qp=CxB)3jLuN~|R&i~67~K0HQ^t60&HVWktyM*0hO^%4 z4|iNsu1D|a-{~EK&dhBo)bRYx)ypsRtlxNv=PNLyYi0G^@fJPRW^8rki5@F+k)Z=f z?{voQTkM#|pr=nk*=+E=2lG>B;CO}SuRz_hNI*WMun4312xX!e4WprlcKJ)-9mDs6 ze14Djo--?rW%=yQIemXVLW4ifkGseY(CRMUcR}laOs@s0Ym`FuWo~|QB2Pw**Dhcr zFnwwNqE-EXv{Gohh*(43gj+)5{`*PzQk?PlHRHD;67~@@q>+ds`@bmQcgN@LtNHx8 zk4+4p%_XQmQpD%Tn?_4`u78vFyD}rOiL~l7@2VZ-lO9iY)GrFP^olY5 z*a#?URI&$oYQ*r1wA6&v-bdhVZiP&=5DUeALVvVp4BiXo6_9k-$t#1^#;95P<19t? z^jUh1F<%ghn2Y5)Jd9-yGOvMW!u4nQkToB6AEn*Hg&Y!8K#`j$+N)iakymy6hJ)tOVZr7#NMu)0BIeHKO^^(?hHH_&}$F%wnH2 zVrH|>p2fPqEOfffGczfgyJtVE3~emS7&~=Yp%{zdJma_oI`j^!jhSfS9>~V?$hgtG zk1=1=2J-D_!LsPQbz$`F1@3Lg+&QT3eDDL}q1G9tyjtomT}u>qZ8IpdlYk~5hI?4%I?68>tmFc2jHY@pJ=4C z9DT(Du{+00lruWir%*m}lHMQajVfjI7X5x2KoM8!xdOgM(9T9n-d9)$-Uq?%4mFLt zl>`L6Xxk_$#@J~Jm$g#3`kujxWg{5W2et|Pnlo%2GPDNjcIAI9Rv|-p_6Kw}*IP1F zFv55}_;!}{`nl&fGNTWmm)GG-OR(L?*xLt`4f)kms72tEr^;WUr>oJbkvcr-^?qgo zbK_Rvb|vyPlJz(;qgS%D`I6edFq@ju&Q*wQEJ4PNDPNM2a0Ci{9#*Sgq2puVOHqL<_W;=nU5nL>YwCP#m&7xvpkOP@r z5A50<+tRyx=Fz-2=3EKhpW*EWXt)ykc`xEM-&7TSPyZPqjI(A!SN!#rwziG)SgsrnDOvi3g7S>)2) z=m7r4hZVK=-u14m481B-Vjm+(--=7X=BcaqbK`*NC_FUsxH8g;m7h@}ymRvd*v&)o z+>5m1?{AcRjIxJmqZEJ7Fz(`*H?>sEq>nW~;=RFlp_g|i>hWX5`WaQu*u(<$7 z$Me?vDDIOISQ|&;p7w$IRlqEbQrejGXxFZ|hZeu0-5bzKP32DgSv1~SD7zr6(A`xZ zhBH^`=>c%8O55&g^~sPf4bfQL&;{#(r9Hem0EWvMv))ZG{_q3n*yUiW_k{PUjXbiO z_ug!K5G>8yQ3m|=XERER*7kY8sQ1qe%9$(6C@$J+*TVfcW?H=)Hp00SO3kBxeNcx0 zOJ8JaB{W@tgdTu*-fGqx)?I{NQsyr!iQc&mHQY6xMstk;(#p`~e#XryD5_o3JK>Fy zPVa@T0%qfOe*hF_i#rM&{lTOdt#1Q1Em~LM`3>+;hkXGo#z@$~xM>IloE7>4`8Z_o zOQiErB>W*H&dgU`(B-cO`@wp~XJ2rfO>bsxd>R~#No59ypywgv}=t3)@=^pT2 zOmgxoGw17)NN7pkc*8o9SFQo$W-D!Y&i^s=jRVKBKsgRfG697(|4k420BG4N z4DOyM#v{Q-Kc9&XFh)u?e)l24M*82(8_zyAFw?ID>UZI&)^zjdMIs3=rnJ^Q*FSQu zKX^BQ<1bOtvu^J#ya1Qt`H2RG2EgEs=OD0qGov6pR^xO;CX`(BRjh`7!&wqE@;cPgXRad>*(y-4D>H?=E2AnGK&td} zNTYrvZfQFGmF2rxGHyX#{XM!OMHP{}1n@MzTQ4YNBu^tnn_vAio_habA9X&4D*K@P zzyM)A>L;M7-=xnbK%5^~Q|Q|{()-bAj4@BHUSvEJ0>0HyDG9t6!m0jHK9Lf7B^h_= zBCy7TQ5t%Ae~_iVl=n1b7UR>ns@@x`L?6c6uEE;hSw0?!PKL2~gnstJuR&1qY4pt{ zq(U2RCEC8pxL(AYMM0K&0{@?^XWv9C?*m7(jIM!G+BiSuzgEp{@Sz%SUIAWjmaavT zHn9@;0zNF^TO^*{YGh_Nn!yirYZb0(%%`h zw>NEnhZ+~@-P|DOpbt#zY$zZhni17mj~g;0pO2A0hC09?2c zcsMi6)bkki+t5Tt2zm!-%QD)$@zk6V=IAsI^;>A9F=(P3K=C~(F&(@)FjGSE+s>IORzbN}6l+DAuWz=eYFG?T@li^Zv zT6E_-0-UD9*R_21-i$k9y&4=Hd7;W^q+AQqS}^qd$(tlbAzTeKIe=*vJ$Nt4tZ$j% zqKrd8J#^3qbm|ADl>3P)kl!0UO=FIAV1W|!6-6v(KIMFogZ&Douua; z{C}AL^6v^-(fdl7(Qn=$I!S$fG@8M=p_I_`Mk|T9y3dc|CleU>U(12_ZRgSUW_S>J zW@K*tIwF1)IpOWe@J4%w8T*E@&z=uW^+S-#TNn|2;kA@48cOO3RmfCNiVWX%U3-nWIQ$7?$viuaWT|B@iEj!I%AC*wTK$03y~n6CpU*9{&k)8$4|*9yDPkI4&U-YywT>C>`t^WFR7_mky z^H3v>@!)NXMu7tr!DtpO_NQzUK968V*TbR^IHxl<^>NTw-aY;(c%B`~PiHh}NA_M( z=|G`}pr)Cr-{*<91B_5>go`tjn+aqifVTnCk`8ue`t{^1ID`R%n{INFZB+><$gT9MQ=OdM-4Q-c_W7* zbc`OZn}B{U*!=>&`;i)>S2qqEGz*%!;+N2$cl_K* zZ-nYiSmk|yBoCpNpIOxoVy5{JJ#dy1_B0LoGG=fiIN@pTS4f>%TYsk&bM~6ya4K)B zuv0VplJ`Qkq0R1wGR=YU0%cB7uM%)Hhb9Z5`X=@%M)tUuy15yv`jqd1>f0H$T8PYd z`7D~aH}L6s;0^ckU~h!5%0O8UzPCc2^e1|sHeBg=mt-Fp72xf4)=hb7VHqnxtwYAk zH7nP2+FTZ>k(aUQTB|XXO9AplP;{{9 zaL@e7F97#!D7G?;+>hbOW~jWFryHU2IOIaBWpgMq37WP7ik7sd5467Bw^LqHl1rGC6<>7xk^6?_&UHu!$q<4^oqtr4Z_jT4@`ob*(S}nxafP50N zZY=h#w0;S?Yg4L>+;yfc_XtB6MH`^5IemR{jJREekoo zcn{o<@XY*(#z{YlROvBv8%$TjmlCwIls1gC?u?;Lr4TagEflS#=y%0(76Mjzk;gjKrL6N z8MpVM8H^c}o7GZj)_mQ;VFx_QN?GH397A_`qj3RmW`V&f=-r zO`ntCaXKZmF6b-tJ$#A|GC7%f+T1>q;7v6sa|ap@V!WLWquTp?o=zu2{qI@*4}seG z`Ti)U0)PmiORmzF^*^J^VP-y{INwj^x=BdDdl;~bRl102TQ*vsfN znp+3j9?Dzun|V*}5%}s%sAb_a?U?`2sNk!>*z*eS=PaX-Nz`-48b#fBN_gwhoN*g? zo5Y)njB#_SNr~R{?S9M*QTpEZW<+~Gp$~JX8B4v3;Hj>%GMK+coYK4SCnS9)QrtN7 z*Z>%sK(FD9Og%YIF)~iWiSCU1RluOH(4VZ_2J_>PMp8Xj9G$-sZF@%&fc#mBI3syuH-)jHpMkf#1~ax=^F+T&vt#^%{Iq9m z>+6<+dNC#*=IKDNke{B~=zHZk#8f1%BedSc=+p!0Pb9|u&qF}tUCDh= z{TQ4c4uoUTt;f-#gOP0cTL~({uh9W-Z_0j&PBo+NH6+%&2BX-$n4PmX+!;%M#%XQ{WZHRNguY4eMn4)YLf+}R z!TXPSI}*9s&6{Mfz6S5N1CdcTi!fIh$H4nn#gNnG(5WoktO5Ou`|gH^kx^BPx&I0eS>~-@~%I6r#-q{?-2J{`RL(&;C1&tfuF44 zvNr4$cJWu6!b-mDE3yF&3(-WLx!-hWHxkJx1>KiWYd;+FG~3;pb_#b{Wmpy2&f8#L z0*NrXl#zGdW^|8+ioGa1jaGJoi#KqLcD96e-k^PVd8a}@wp558d-bkI8!#+|oR^`uSx8PT{)_7a0gFUNOAWr| z;Qbu*Q4t`z4qm0OrFfgoxi1g!)<*jnqgNl6OmHqcay%AHyxma%elW z-H>t5{mbwzGm@fhwH)tqLjUTt>RJBX{ME`aF2Ex-C-9^L`Z|htw~#HhVJ_scJS}Bm zOxFh@Z9C6^b7U887N=<7@%~#ubnG!o8VOrlz-jR50Q6IMqh~-O800}>x*;2vp~Az# zYLFM4?uJXo6*huEW$J5DmjxD^&#F&m^Ria;q_nP`MW{X0< zQk2oEk@My(+3>B;hkrs-_w6>##l2(n^vs~!Rn=k zof%qk)x8eOt&wkUflX6pnV;d!TG}kf6Ym{nM}Kc+9chH>gN&n1)M>*=I0sG5jxE-~S0x=zd=-%co&3 zG{XBDAaabPg*Emtz8jAx9nJeK67Ucd_eQ~T#@bj~S_RDJ`|>`&XBp#IXV+&9;|Y8f zDC@d?7yb93WHfm1L*lZr26zX~cz!V$=o_#*dY{?QJyU0Va8g(^8ke;T-#5^DV{o4W zUFt9zuA$r820AvT1tY>9LIRA|`!KkarM1P(qCe4QCED2zgxc%17`#XC#-;v}@npO& zZ&}=?Ei)6ZqYph_W;24<0lVjI3(z%2+3>{kRVZHw3E2eCBUwq~uq$cBx<=0_P{#2* z6YRIqeru#l&&e3HUQgPoi>~>O`Rho~Bkr$e1JxY*G*X|Qb~%uZreLCn_#{4+1L9lY z>+LH2Zcl^blfXI%+8S9=-=QDi#Spms6;D#>Uu(ge%mJSI)~9z*p%(JL4VXF#+)0e2 z5BDKv5BD}meOBMSnblqa%QCcchPnr+pMSuI3^a}sb*M2G&gfm^$+MY2(&6zqpfQ*5Eu=S+ zIl_pt?k=aopGnjKL-oCQzyNFRd!@x<{Dq--~| zp9yr{$1r-!ETDfCS~Nn-coS zeR~&fJmabhv}pv4dNogF3@e%G{QZq~J^wr()_5b}h&K|3Fe*ww{imRYUf4!f`vnSl zpC*-W5wC>(yzL8}_rrTvu!X?edu-mk_zDa+G4pwwq%{yF(^^4RCWoQPSI|o>H;R#? zCb>@UMy+s<_ZZ{6DfQRGJNHK0p!M@`>jtv#sJH{&FVO17pr70gsi$5B!~96i3|gxL zgrz8Z8qMgAI~jR82IbsC>t#|CTBsLariX8#j^~Jzfk?YU4|>+ur7?236FqqYPRypa zbv%nh2d@adnQu)`w_kW}9M4)o(hs6P^;3(31IFKqXD)aRnbJlnjC$wfLe84Qukx($ z%r_GUO^h#L9*k%CIY^HQ(5DPi{VMX@28!xmkO~aulv_q=vj-o7E<|Iw@|euBrJqm5?K+Em8&_t1Y5tA=EYs|RT zT4odr_f@r!W<99d@Z8hqvuIs8;_T}>ei`K&A&bZ9aW&8D!>LtBNg;S`wB~1lv<8&$ z4Es3vFNa>ggr3aOUJ4qUapDFVaS=2q8DKSA**ah{N1GlyH_~)cm6p5Q&iRV@S9^sL@Y{m@t*5qL*JQB`(r_G2bD_PBS=V>%6wO5hE zuIPzW`X~!$mcfO-@I=2Q{hT&2&R<84j0&*>eE&;3F~GT)HMyP?o&jBeI-U>cMYj`q z(z^aTW9oY-e+TT{_qPJNvXp(8IwdKmCD@2#Ehs+$nbMM`9-YmK;%&a&4_3pF0%!f{ za8w&ibIPtm&RRhGcj1Y?Qu;Vt;+?)tSF!Y%v$i^I=SNmHph>m<`o?%z6M)(iGwmvR z03Jf7ouiHDrw@d8UH@do6Bz-WX`>nv=xrmt%KikyB=qVv`0we(M)Z%`VmDlD4osJM ze+q7SqpdY1v<5B#ALBpoM8ZAa8G$ZX2>rCb&w&Ttoii%#NNAy-#!+Y!1HYr0#UEpI zYJW3QnY;LypnbfpV-&?s^fQ!M#JDEPZ=;j0yjQ`z7zSp zjM0^ZH0y8tAmiPf43ojZEO!TKtsc_fCd`|&k=kWQZ31J>XQ}86@9bUWiBY>hgfkDJ zVJCr?IlMiaTnx?r`1{=PD74bHKNAU0fU6%ty^k0rTH;;d?1ZwO8JrKNe*b}jZVys!J*}^SQ;{g`t)S^`TJV148sxwjw~ojB>@wE?TRhww!;?-( zQ%=TXBgT)GknFUQ#?EIgIH}*p@V^}E+(vxvg8sAW1IGz`#MDAe!aII9B(KZ=; zEnOd8dD~!n%7|I0>~B2vYcnzrA<0{qw_+(jpP$IC?D6nli^?O=?;|*t zi+%?{)%D2v6G&VvJ$TC?J1s}-9FdxlzuBN(JuqwbcYE%6^*M|M#zda#^|1wrJvY}o zK#!Cj$kj>a$&pZCA2U~dw5as)j9Uw&xu9Iz8TGsbl5VcMynK3tr~QE3U9j2E-{g5V zMudCL9LTFa1Cew1Y-q%6P^2yNjY9huq;x^P-+@BfIQ|z}@4sXU{Ps3l1?ElTNX&tQ z=irLe(N^N#T)&%l*rn=IFbzpGbJ7C1X}+aw@Kg(lH)QmxTS}`Nz}09i)A-E=HNEZq zJDjOVzicVs1@NRc=r#w=`%`CgWuW- z7SW%xYxPjt+~bq@l${nABk%TOcBD6;=4522Gwo`{$ibK{2-W*REpK%^f~ydql%`%uV5q|Xs$j5{(bkXB-U|X>X7pNo>kwM7m3zRuAHRv=NpnMoj*C);R&GYwYilud*M}D+iXl3%-uc5u-eEKMS&x}MYhno$Noczp^_t9=P zw7fBBj6K{GyiE!~QAke;ZFExu(M(Op}zKj941RScN*Sz&oiPid9 z<_PnnC$U0p&6>fStjie{#=m%!IcWephOk-=Y<>?@n<1gxO#%^J!N;Z$QtR z+dZD|=Gv+VME5gpormIq>s9a?$1Li(L0ihXj%&nd9K`=r@Xn2nY=cC52JXJKF3-(H zW$efc^b@&xp=V$pS~iRISIkaFnE&PlTsuRjtF-O@!Uz|`SWmn}?WL4SV}|-2>RqKb zSN!q(c;88Oe1b5q)_`e(~xeDw}%iagB zgZ6a2|5AYDWw5>tetNIZ1CsghSz=GZp%zh4xp1MOC=y&C*J6C@Qq{JYo zszoA!R+{sEFtB&!=~OBBtgrIr%(Yk&+o+o?X;4+_#0qKcAf1tVm4DQDqsqgcx4?U}y z&242aCG%K}LjoRQZdwBEzNGYdc-{aC>Vx(wwagGTImp;H@YlYP3>@wu8`0iY-WRUq639o8l=^zsG6cXd^Om6{g zmixi%H6VP5QDtT|Z+KM&`(E_x4NY&VJq}zgc>V}2^q~HSK%|X14!v|7z4;puS706R zJhXlaE|&(vR9f-Y*l}p!-pojj`Dyn#-WxB=EK)*XMjEq!pJUCj9Qc0+PDgknMw&hj zo;B;`ufNwGWTgi9dQUAG3V5b)iV=~)_x(u69Q0v5dNhlNnTw3dHHe?pl=0qSY1+Fx z$kHKNGM?)S=z0(OW)(0lVBON0UK>Hnaq#vE<5&y4aUvaqPom9Ip~NzN+Cv4`>FzQ+ zu^Re?KAn+->RQmWuhP5clhG#QM=IwY6jyN>pA9( zP@1u`3=ZD_OC_KnU#x9b=r|P5o-%bT1gH6K;V~ zReGoa>=VF99~1Lg8EIobvz8f|jL`cB8q#=D=7$^zX8piIU$0A0r6n}grn`dv9^&UZ z+UX!8VqxIXdFVR_xz*O;NyZM!4g#wOp!Qa{W=?T^sSYA{>U*O^bfL9n$cHBzj@de_ z`)V-0QbLQ}kd!Mt)rajG6tBXp){Ysk3L`oRt)IjQ^rWD`|47?%M!Hd#I??YyWaATB zaQ&`Dz~~bV;hFK++Czz4tO&f>)FbpYfwH&2-P4-Lxqpu^x(_nL9H+)2VCsQP=wVO` z{;mL)6X5bQ6#tX{N-;{VVTb5TD=p#Q!_ch~bp9IrCNVlYG5YF3SFM-ZgI>tbEU=bw zScW=JqJi_F>9;ceH`A8B4m*&xN$5%MhD4s;$9g|EZ;ZKKoU*5=(GYz!nD(@f>BXV_ z;T2$WpPHLJfmy&hL)H6%JQq7KEfZOh#2!H7d4pMbDnn7@e0rAOlR2y~5>bj4hM_?^ zAf@q?@m@j&xD~_S%y1-;n)4YikMr&%^fb=EJzzeBx9(!x>v(e5oY|-a@4x4*C-P&$ zzQu@+KhW}I@QCb@^ta#5j^{yeH}>LidiW|p6$6$9(X)HOqzb(+3Hzf+rW7p*zXIhh zo_bSdLD-cghQ7T+5Q(Z0IgOU9!W7HX{AirJfL$NV(bW7E8XaK$P=QhDiBCRuhVJ6> zut$hOG9QN?#?J88#1Y1@5mCM6H3FLFgeKlq(S{%{`JqK@!2cgOw7iIMJKoh}oW>%t z?#{g9kd;v}9g0_in%uJyZ)d6tuM#sm%x)vIh=WmIiEvGZqB3=ZsNb7vY-2(Kw5q?t9 zE(@Sn2m01$eG8HwiEClp+MTpegb`&nBzN5lfY(T^zVjZ*7C2QIIJ6{~o97!KzXD$B zl1w}^s^>R6k7WcWB9(Q)y8{@PMh1*+cMLgyfw!(AeuM56k>YRYVJ(;qM}}`h|EC!R zS=jLoh0@P3qmKvD3cxXvQSk-+J%LEqwV zT06}oAS?lW9zkQ5U@Uu^eEP3b@_P z*oa~zf6U*afmTNJ%z~DWf^W8`2gPK1@soBF`MQ}m|Au;cfMft&5&DkAIo%R$DfS!r zc^o|d`n&&so+8|m64!WZ1d<}~T0QjuSZL?)PKve+Z=^)p^KX@~I*+D|UKDyFn=4eS zyX%Tda4nS;RC(xg5ib85`5lSGZeS(i4HffHoPgVn;O|~~E{}w^rj0S^?WW8J-U|LV zn7s(ht`6t1M(`B)Ai8l8oXm$#_7tTRPrc>(JW}nReFXG-G)S6W(Vhsq-!Ws(*YvZ2 z(PMH@9T0KXbW{=#9o{8jc zn-ySx7oD{Yh+S1>(5vf~Gog%k$X759YC`q7^tORD_;%zznh~R?dkPSGZ^7I2GnfxO z8IBLDlpaX(Ui7)E%wd!|0;JxT&_`@AFrP>3Rs$Pz%{bfE4sZ3Vh@-D&tN`@3FkbZt za2miGaR_UJ4EB2!7++={TnwJiqkZ+nxk?*GJLt^HKNGU_I;}0Dg--0s&I7gUs6~`W zpapM&lw!WQ${yz(Ao~b@AEwvFK;@mJL99y67&;Of7;knXvSHk#A#nd7bn^bPcQuXe zvY+3!L4!?zXVRxTGm|IKwUJk2ZF$OFn`aNgGdWbBI$MF?%z(SlS#jVV@kNcNeSO=_ z3wk%d`k3o0un;aL@J^5RGqmMt(E^|}kBsXhEh}$?)mamITLAAKfoB_-ql|j=6{E$t zwT+?WP$)D29IgY`Wj=eVKac&uo6IiBtRkjR)3bha1pfsEdQ;Ecg!Zq2)F?|EQ~2o$ zXZ5q5&dApWe-mC5WF4ZEl!MCKq3BKO=LITff~7!{A39Y-#tt)%zT>H$p?Y~lv&-s0 zJB8rOC7!PaXCvbepuDyb&w5j-_e~gYFEhS|!1ok5Rgd}VSH|TE^rQPovpyR)(A!S6 zsIvn2+fniqG|-OaS%GIT=IcC$mg~s~YRCwzNPk7bI4H|o&s{r1)m=!Kd$87U$h-Pu zpu5>qH-P;o$e=cZ>Wt1c)M*JNqS0ZGL6s)#d1iz039v2Cx<%PKLwl2H%@ds>;=5&ML< zN{o3I4J{kN!|HJCAbgKT`nAXA=bdqV^xHKqWCj+W!h z{&N`FNoXWjah~}N=G$Y;bo!`dF!uT&T{FN)uZV$2?QP&X!N|Wqz|av6>%BIR_IA^k z@stO{{YRleABd+!HR^p0ukT|aEW3-Ro=i!qHx?^(B zbPKuAinI?bmq01+D^x;KQh2rouD7Ayc_if=7-)BW0_e+DPLrwWoWkG9q7 zkz462fFKDxjZgY4{nvs|OM;AEMN8^U@jZQ=bA5ftNRENJ=CE^5c#+ZiGVdnP z(n4@mn_Odka~at6dMdy?dk#KN1qUOcD6ivz&&b{p55RRuMGxw9q~FT)*q6GgP|GN* z>ezw6_a!520_Bt7wo#D1L3tmw7Sqlg<}l-DC5Lf78Le{!YEPiG21rFiTJDV`KgHX0 zN*k-PHs8Ee=d520O0Nd1x{QxWa5b4ey>DQ?RBbfxKu_ma?-Mly8og+|36M-b?HF%r zy*T90$gX;I#vvc(n@)jR%Jh#wnE{N?(x>xYLE6+(xgTm*W`4+E$9o=(U1@#--oxm% zHNClWQ%|3UIu``;owZ<3fB)3mu`&Q|Pw+@U1TGo~GT4 zz>uGxO7uMmeP&*@Pk~20q77>(ygvZdO3_0@__rR|jow)TyvhQRo+L5w&m74{W*7pN z?%~f+R=d^)+OLFOp9>Z}d7sJ?cQ=(8AKG%Al}93l2|&1!+RNdvTHX6*Ptd{vO30ZQ zTKxlTmIH^kp|%5w(!3^&#58RYAi?k&A?`yvPSUF`^*#JY-QGi+ZnZbVx55_O_(2Zgtc0Ir0D|MMK3BN z3^xjOylEV{CDE9=T0+gSuDw$KjJ;r>O??_twFk=QLu%#!T;7&~dT*fn+;w}ppB4CK zBHf<&uSDN?dtx)(?8pB*^p=Gg(|Zt}IowD4ouGXM+BApAPHH4l<}SE+7i;T^>|_#= zi^|~80uI#&hD>2J+=PmmfYA6S+5@~1xr5(zw6zv4>3!aSnYaXR%?;}fuZrj;BVw4z z;sJP_o3{UnxZtjRGw;0Dafo;BFWg&~MjyE1FUTyO56)bn#4gJ90MEj3VJ^K!Au*$% zU&FBbsl~qfZE8mL5uQtL=CfyTIoSc|`7oFC^mJ z#=TM$rF`20&PMXjx);s|zJ>W-kQTZ?Q&$j;fNo}JFEgXdh#KC8E=W7aX)iB$n{hV} z)X9#XjiQX_%;gx{1>n-7wBYWk8Bp4nyan;F_B zE`@!O`EwI#I|}-Aq~99Qt^^bt1}BXo;i+&-dVLbQ8tv0_Y4e!oL$+cWRlR`bVOE2q zfomC~!ZY5FXhV7~VfAOkj!Z~kWw7%;LQdXji)qeCpT;LGHs%2FM1BkXcA(#e%$ZNZ z6K{fVMGt$o&b_|-V0Vx1Ynwrn(bOG>tQh%m0B!xiXr0ao)1qM90`tX|1P0@()}fr| z>haL)9e&?sp8t$~-=l|LnbF+?|HRA|!@E>Izs(c(OlP6r6)^DLj^hS#3<+eOFNMIbjZ{wR$BNCz8t9;g?kjU>g{&wTLZ`acH zH0pOoDx!k+-^D!bJLSZTBl6G450QMjev0L@_-Z4HVpLhe$d>N5J#jCBzH#?{kkR~K zdME-Xv;}BEQfjh8Pm=lIxL~I*9o6~3Qr31ITosH4$mxM zS))fM27X6@%^N``d{Z18X8_Aj@J)$$hEH{nP@mYcW5^x4auUld^%=Z2tDLb{njpvS z{fz|lStysCTBE>fHT>;|4l)1G0zM0IasK8E<9vSjtj!`qo0W{wtvq`SX}J$R8@K)g z^!-6<&4d2K_}&*zpMX#1$XSd$-DZAwF8-P^QX6`;rF1c*^)-0m%Gqef?r^=g@Em%s zJNtzvkfRuQW(+Jd4Gclwo4Mf@Ewp7mYQXr=o;{Fpe+xY7Guyt*r{RnkAA`|{ z^wEu(OK-}>%&+&;V+~3!0xr){hS0`-aH`m{1RA{@H7EVKT@2ijE zo97coKl%%LU10t=i|jX{hWRjhGLEbBS(}L70S`b4ch+C>t9N0)um+FhCv?}m5Q@y8 zWG5uY^N#Aw(?!wn`ccFnqn;@l%Po6Y)i&qZd2ltBz4?WY8p*3a?0)gd=>P$g5J*oRbPSs^We=d^jwKr8F1w$&%Q(Meg?<; z;93Koc`~;F2;E!uU{skcE)G1+zub=5bPxLY4mdRezAyRN0LHC(I~xsBoxX;HZ47V? zWxc20%THjKO4&<5HU;VWj6A8jm$n%W%ngc}Io+aU`> zSOe(eQweQ<7j3!LkLRZ*6!T8zr}Vgx7FHu`-}2j&K1!jT92Lu;{&Hwh7O0bfE{@jZ z$f#hYy8y2}{n2C2OnL4;jdF4~+%_BN9BAh)0N2uUk+rS#`6F$6x-=CEnRPoI+NL3O zpCNBXi_sP~pFUqi-o`VNUxsg0r=A@erRoRXeZm;1A4Z$LzFO>dpuMId*E8V$ zK1%CduTQV1p85vWV4Q9OZZmk#hGOHPyZ%T^=uwY>NHzxjI-1a{Cu7Eq{{-55V!4fW z8>1Dx+4L9KY5%NF3)0y$yEL#|Lwl}e4)P=iys1S0N0G9cv@;XxHKhDu)(b;{-CRc_ zv4ZxbbZ$oEc}nX&V>V=OHT+D?QOHsiK6OH$rz4T6K=C8A8V{a>q1KiOOZiWvn_>*pNsd<#kWn3;1gGq$@e_f>jX z8QaMG2il{|AL=a+Z_w+J?YW?ta2`gUv?Lp`SAUFs$bs>f+rb-iP-xX~cbP(8=35`k z8&_jZkTqkb>Uli`&hCR!dO$4!>leXCAJ%z{OksH$S*gdYZuGj9^ko){Uew88?q~q@ zSJCqW(Ekvn{tJ(chWG^Y^8lc~3SCnIy`rgMOr7@ho5DG;_9bnfW;oU+xd|N9sQrPi z4`mjE)(p@KY>PO&}apWEqJ3xTNBnRZ_(;1dguu*X6QHv?G{1HUntRnQLu>lDIGeM zV+5Gb;CU#amB6v}JAD|la}#r~W5?`Ui}-$-Pq~r3Lcm*_eix$g8^RYeh&!*ugLyhK zpjX=@zUy1!?XNe{Sl9SwJc5FhP^&sGa7=Z7HNy#;bPkyav>Y9kLjCU--RkHa|Z z4D@FIHS^O&`cZE5SRctq@P43o;(9={mGq(Y(bx@Lfz$QWION4FQ$46-#MLL@=s4(| z9_CUtR>2?xr_qPz!%_O&k84X_&MLYG{UuYw=)xo6RazJ&^}xFaSlxyf&h*!yoc4YZZQd~gqP=$ZQhC>;;9`c9Z_@V*I?$difeKj)zv;XyHld z*O1b4c(a=pF2S3F( z{i3w>v%zjCaG z^j&b>u1{1ND}HwYnHcGMI@Jqx3IV}<^v6d?ySoGT%SPAF52p*UQ@9CMU(&Pt0CPEh zL|Jzb5npD{SO3K>WMAmx5Vedu{uA&$2>&Y5Q$r|Jg(sPipG4ldpYz8v$}gK&JS$_e7H#F`=>T-%XdsVcmwN#^eZ)H91*GaK zdxb0T*H&z~JZ*FY>f*GJ1&PiJXO97GEbk&eV`$g&{J8&l;x4BjqrnIfW~i|5d~hN* z?8pXCwix_8!|alXq-n=U1mddvH7Y?A(&BB+A(U^AW_TRAdjnY;0QXJ>J-UF;vzaT8 zv)9jw^j`(Kbl&DgqMzfNczP#q7c}XE?DVCL@<^chNc0QWD(N~*`0GRC8SpDJ{f}mx zZKg;44LsS_@*r=MXwQ9MW4Ll3{e;tVW^^M>W~ID4KXZz7<9iYKW#kTTe)Qv817Ogu zV+NjcJkO5Y#nRU_W;Xpqj2r0=)}7`bJc;E~Evy0c;P7iaD^B~xX*(aEJEGABvl2)I zo~MztXMoozn3vFv^MOPQirH_CJ5VY>>%Q5&d2zJZ7eF={%4I?;3X>L&LHspSx*jJ3 z7~MIbbQVU;T3UJH?@~EvcK~pi*Rumuh@8LcmtdY-ZD786CA3%-c{O&XJ_4TqM>1qo zVce>9{@F)oLgti4eJ<|hSBlxo!@&74I4*@cd3YwK5gW@tw-TBG-6?8h<9#Cknj%tO&xD8-Kud_(1%|vxU*z_ap!7;K~`shPTN)QT_re zRYlguHPJh+6CF%}IZuK|k@5+e7i;~4ek%4mb; z`wi)z$^3JXaWx$+s8x9p+R-!E5v*n31q$yRx2K(1=&j*M=Sw_0$L}S|bVbhpgUq$1 zhsX_vw}bYKL;Jg~a$R^g_`BZqtgR1zeqRU#Fcx%p)1VoeaeP1>V=8?z7Zx1vF1F=SnlP;iZ7*LZnRZN&Q1T z`7F%)hnRPUGuwKqV5WGz)6Dd4O#GL?b|7$?qiQ;I$%78N1N>(09YfD))JqK9tqtAd zp`q~r%{=bj-i&bGSS$nue*>#`X>kgCIl!1-!>8jwWb}lIaQQ6tyeVn~>1gW4@wWwV zt)YHhsBZ3vCOq>r_HD`y=J^t44!Lf`r`zzf3z#mYUC%O%F20{P#@{VOYqc5U$0@%Z zc`#RJIo3{fSf!PMw%&(#XY1LkSw!nm%JYa+WWFY=pxV$?i8ZF>LTFl-_RXEBSFc!l z8_`=9!hM~m4?+bql@6hlF|iiVm-@h5yYC?H6Y0_14|l*Tfw^HR(zO8ExLft^$O&5O z4R;=;jTz`2cSoL@y5jnanJXC#PtbBHKJ`R)yuo3VG4Ch13m8GGNvs&!&|WJvn^^@; zLA4X~F^AdV7bJEe_~nO=*P&nm`aFp&cpiEISvC{kdFI<2Xy8M%nn2IXD61F6VDL|e zf~}zYZB{x{!E!w9x<6I-&jYr0^sZlrXY<=Bso(00wC8TgyCtvE-b}`m`NcHb|muva1LPp zG$#8jG}}2KPDMX7X7;E9mAsFkAGN1`snj<2V>ZTg6KLoy+iz*>5-o3HESir*FOXw_ zf7&ZDScjHFk|t1pJ3VQyuL87%*Z~ye_euKk?u++cjBl!iO`D;48AgFmb;d#hdNBiv ze$L2R&YYWo4rmLVO2E;Y&|JGjAK-Kc+=Vxe3Ul?i9@TgFGdMN`x~)JH%w%n|8VEfh z{}P(EV!X#dkp^g^2cWR)vn4>8!JCN9D;cRcMOzun=z6w#A6|=6Jf+=X422r60%b>f z^~T9Bte&++UFBUfq)E?uv)l}Y((f@^moqZQ^F)3b9d-%dS21c5;J_Mcx2Km*(5x-8 z_bj8(JN-#~+YN+EXn8Dj{|Y%D8syN7jE_*$b6l+`-hP=u=`M`3?qJrL+V3*o-i3~K zzwS*d&t7MtjUu;m^oW=ZPj^C%nn36c;%$sI@5Xz3T_5vRP$eGe)kczy{f9osouOk1 zM%rY!ln1(*&CK|w+9#sn+gWHji*~KA*YQGRu_HQG+pm`NU3?k|PVP$ep*jN(y)Sl? zxyb08-av3(Yl99LK+ARM%V=_|;lLUo&?o3Nzmt&3L`IVuV=ATe>aj*L^*cfxv$q@h z@$=y!`ZMn)(TMu6o#jc&sG`QYh( zU;SuwnU~SL-r4?+c0B9VYpMpaaut3UhcgaJc-L|lcpBsGdmx|A$~hiSpk&+jwC)W0SluIB?EC8;QNsoZ3ev1)+yJF0sILP@NpOox>Ln7j>8y`lI6y0jc-hw?XYHV1E<{${<4}kvwgdL-}unALHAY3#uodJ0X2N z!E7ZsPXvd~Kz}BT=G#cXK`^MqTElx&y@6B9kEafLU>v1}_Z%aZ3MqCBS})_x9r`{L zsAqhfWFYg7&kd-1G)UXSVAGu)^icYr3v@$~#Kx4j4Q~}52PdU+8>_z(Jd-|W;ngi_ zNh$3v-m$m^ECc!0o|;n2tjS+PqY2c~!@U}kGlo|5Y}9{S4WwUN8FWD^eE1e#wnR@9 z2CJ<|zd;#}A#3y`pe z#j+L;NZIsLK-LO)!lKY32^XyPyI7-8MXDIVg-v@12#BHq)S?6l)XP?{ind=-sV3;f zKowE3VAXo4aVa7fQIRTozvr1rwD)&@=bZmC%RBG9+r0D6%w!;b5&m&+Q%{e4o9}zb z>S18fb-`!S>#p2yN&SxI`vFsNfFq9W9hsI=mT}u7klxz=?qRo&I-OhG37uupr#DlM z<4bovJV^U44N&d@Ya_tg*_2;QJ;wLlPPx^TZ+llozrIF2y|^v`M)OwjYvj43F;}u1as4dm7BK>?q0CKiL1#+4 z6G_n?8X64-T0)^?$v+c%Ofa@_&$}kT)rzvWQRZbp(iI8pNd#5E7I(sPH12pS&QV*d zP!xlca^NlqbHmbOGQ8Ohjy)sOS>v|gumh>G;kdRy;mGQ3zD-Htx|a#G(V748yuFK* z4`E{%>AZ$#g|zK8q+K3ztQN{pXFBE_0e@72CdWw7suvC1UN=^zI-6k+j#Y6$nx3z$1$z*^tDi>r+pR!Pnzoi zq)GFnhML@4bTi*-w4`I>HGJH^z}*fUOUIQP?zkZ4t9arH1Z&N}N$bI0O=v@XoPI96 zX*|b*kjmM5qg;#yh-Y02tKOCbskVol>*4k`!5>fyCfu(=&zR?Y*3u>;St{WRWmt?O zPy$2>bHu&_JSBPW3W?}(NQHl)YFr1IHJUcYvv{2ytcKQTk0cuI&&aR;hCcfvH;jhd zfIhqeDd!FW&e6nu5B~^eW(6JH8M*T%oOwE&_6A&X7usP^!8sC}0C&>Swe1Awu zML_;Ct=vGm$H@N&WTHFATH4LD$#KvDa(x3IoWer#WGVM!e2Z4>B5h0T-OIrAwP+E~ z{d1MjKe!hcuhVBYlHZ7mGr+LtPo51HdLtFrA=NyA_y_cqyHb4tX0~FL{uWB`6g|(7 zb9C;$5}p@PgvC1rXq>;e4XSw@Of04?o^@(e^eOsVxW{2}OeUvu{;yH{4Yc`5C?@(+ zt^yaUY5DzN)p?2w$nPFcZ-I+8lz%O?KZ7*t7Lx7X>0#$k|AW>T zMZTQ3<-p_a-dm~9briE`?^pEj2ch5E(0^J?Tj)v8o^bB|2wKNTl3A2<6Li}VzWG0t z>gr~rs2_s&e*hHAu#Fz1*F1T13~?y^X}9Zx-v)D@8a9!#TJ!Wno_JF2i?rUkQFqDu zGcA1;ZaPY;j!?GyKXj+|Ho&!xmdpWX|4p5f!TApQ;Z|gFVQ6y;+S8v@?pg2(rK5}A^YkLL<0v58isn2{U7yf*MtQj6whtJ+1q^%!1fyt$tK!`y^7r)0{{uhk zsL7tXhFo{RT}DP2@A-ewPZtCGe}mC$16{is;07Re2W?M9b3XG^DA@BFJ=d%)T<}|J z(R*_?Fu5k7J92v(8oh$PoCF+y1y6sYuP*^N&PR*Sq1c#*15G?d-CKe9ePA*2`bs3l zZ?TtL4=mShAh)N)JI6j6-Y^>Dd7w0^!}w+A#2;-?&pdnPGw}HmsdiHD2A)qKuj^380>$gJ`eJD76n*K*Gozrd z|D@OM=l4pm=BUu!U(2A*Cn>`|(f3h=Bycb5i-5~r_+JG}7XzCoB78|sS3OM!5AHzb zj~&I3rX)A17gD$w!{q|Z-~3Kx)9 zyuJc0Zbs{P)~#OP_ep((^4$ku1AVjyE5W($IpFh;aQr~(xg^lZ5#C>oUb%wb{{*`q z1K}M&>q>*`No{Pxl|hC-PwDp~*>^#8Hv!vw$e}aPq;FFHBhb)OaL{4iT|_Tx*X-e~ zrxj}Nyh`sFPwoz+Ux256;J+88>Mvb}dpzn|Eu#z&B8FBk-dS z?i*^p66$b$$tTc_ek1o8bu8$-{jIdz`EH{!rh}0e=tUz;^pkuG{B0=ZHm>Gl(+mY0 zYoOSF2fxnE&I4~pv6uG4zi**U^~)IB^C{ZG8FA;xTT-j@=eEyr$Xxo;y*gb*n*-0e zPqDE@9e~Pp;qE&81bN&`<{eVG(#(C`UMJlz;6cxCngJwxN%JX~YXfW#!VxWa<2p8@ zLgHzMKLNMy{n`&~==F2$;FDl=8|7XOeEaClFQH!J96ash*I-$%w-uBFXmCf+uNV zYpkn2K;&j&>ar{~Nj91eigu`o&L z&;{=E{}H*Y^=_WVDE1q`=r(xDa|uUNlB06hs+WMZc1Y&>DEJ9H?F~GOc;5-xBeW^f zT7=wyk8RPZp&{Q(maAq+7)|u2UO%J_DovRlHATD z7V%^v9J&#!d_3hkoBlNKo&7SZx)!?I1lKJgm-F>aDa+Y&-!CJ7Dd~l8H1In{sKi!r zW?JjJ9g^F<;M|kUIj>4e$%4{*B1Ni@CuNLQThjLzQ9@hBI!)k*e7Ngzu;X0&Ta@Ja zbIv86Mqjn1X07(tj0zm#JHsA#f;X>w*EIsJhk4@;{0aEHI^f2cQRn5GQj)XJ`VS{U z#YP@Glj#m&d0i@BUuedo6F=>#v6PR)jV?``ig@Bko?ej(Rpx_g-=~K|j04 zUkznc0?qZ%QZ?yXBeyEay#hEsz)t7|&Gm=ttiv94HuZQInLB5l37F1i>}5p3XHe;1 zC~s0oQvsgcZO`b3xA^T%dt9UCT(hOLpzjjE(v38FL*^mN(m=ZbS(HYjkA{CUe2395 z86=Z&;B)CAM`vkd=pufnQ=04Vojr9oI-}QJ4L%-xmxl7)4R!^!cIH`(sjq<_AExDN z$>XjFRs82-mE@sojQ{_F-gFgT8P6SkPUVTz5NECXa9_*&7sEKzypHqzHU7Sg9yrPM zmwcNcQTFri&Ui*?INr^GrRL=J9D!{97l7Rn*a(i&yO5?6(!{a*DX7u&2IqxVj^Wx* zN76see<$EIj-Y`5H2su;hq91X2}ViomDm9~*~gu6^{x-FB{M1cDkys_^|zrfdjw26 zD;?*llOe5qY@UYDVg^XOcI2wRz$-`e>2T%K*_7sP6vn)}@+1RSDU%n#wTDSH6BrA@ zh&dhUpAE$2r1WIZp`=_zI(gAnSVt*xr}De<%@y-zL&Zvr&A^p|H1Pb<_np8!kEaZ-DPon3yP{T5~ z-jV&gX!~2Bx_=-KJ_au@gQ;=Y%+JH;LUS_wzK+D(i>BF#*8K|m<_c2Z4<@g}5_PXG z$K1x)PegCN#Py5dzYUbGzra%bHbvgt4^$_yh3*JXJ^`|)peb8nijKj)=?m|CPTq&X z+MU!h5Sslf@LtJx3$$_s&D|PWIWy#$fn9I|HBNz>K7g0*rA;}MMlJb$>l~JG%zHc^+6Itj&>kFTv5v$oVVi{S2^sD>7{YP<1AU(Rl9T z?`)h<@1t+lk=LCSUD-B?-&c`OtI2;2l(3jKdOCsq`6;&DIP$JWB0fZKjspgxQ9Fa( zHPm7huySe&eWTs-CV5;JaX$U&X_cNh;L3r8(Ag|nWjy3dr27^qejlX43jwAtY1dLD zpX<3@d3yvbT}>Nrg*FDz4;#RRyX6fhO%o*SW}aB?tvs0y^iPw21Ms_AU@toT7M`zx zR<{A~je$NKec5_f5&V_*oPs*9!6yG9Y0svdr?4?!fIFmBS0=|beXm0S&KDe}{BLOa zIQsW1QePNw{_mukN89qjOb6)cCf?7ZM}HCe%GDW;{YGNreL)FFY1N}}Lw_*v6N!)4T z$K)PL*^fd&MNsG6v|=viN(nWir^=4<`(#jeCG#I`8fYm>!D_HHbRrkkdUV z9%K~n&QiOlsye^52^{rh^z1$xpF!)c#_fuHeFxnB z8JuqiJ5%5kXE&XLeh%6(7IQVYyBPYr8fowcF#annhk@|THlDr>AB^MkA!Q`#|1IQl z75!)+c@cix3;tYT>&&=z>&L+G5Bjt(m@~3r0+{^=IgFQi7k+q}db=X+-sRmpynC4* zIh%6-P79AvUtc7VC)v+HqdU^Q5ll(ZGtpVcNwJB3ok#nOl>RSZzZWWfl$!5>E8hW& zro98a8B42Y>rb@j)$sIV=;Rk@<6D9CW%}LIG#(8UV4nX=AOD8?n|SY;C!c~_NA7Fk zsNc{dp98T`53ZMSPWxs0vxpMiZ{zRO+>$nksrl%f=jao$;BJxsm)2ZGnkNI_86$E& zc)K1tYD??q(pT=VdOp2211cW|%}hf|*Fcf(;rS6IpTqql;8E`QnBF`ZsOT=}`j_0f z>sUWN)m%G@*hB9f4x@)Qq*z4XzE673YjBKm4gFO^%GL0KF&Ns2=ITywzKr~JjIs!c zG!%?H&)fdsyneRNvi?lZxI*uFWTP^4H>n*b#qrO0c%&WL%>0Fv^}lBb_woEJuxD&< zjPo_t_VJK+7Uk)6i{qMY^z#+q(Fn=Ef)JV z@R|U_&TNi?vg*fB)mRx5dD@nmJe|xvAf2yt?=Qz#uJ={4Y@t=N11?)(VL6I(j8z;U zF2*C`E?|#Rs&kR0^jRJ~ehy^}qs4vDX=jttXyfVhxe>s^QU*Ty!TI*q4banLXi2f? zjG{YV_uW>Mx%l(P^_y32Pf-u#7c8tQTW<84Yi z&O5EcQm(V;)$wrM9NJ;|yWprwayrvk8J;-XSVm6YI?L0L{~St4(5LV6+X$|h2i9Gi zo1mw6193H2>Iv`I=27(9Y&6?3^y>)9+5!jfL*k|Bk1h1Gv8>Lu&7cPtzz6OGx0O3b zzS`%GAlxbZYD#RvcL%M{2If7$oP~utF7%rx+BG7zD_9qTFUKnh^gu3ce;Hi10fG}y z_2rbHt=&GnXE!ZcI)-wb53)_}N>vI*bC3nv(e7|oOnFVI(UrLAI^4{n6i2N+$Y)KT z0Bv)mTRGO-!BEbv&_LYn`xH{fv7x(?YdJe#>po$x(DFveF24`)W;ZoBb~3JB8@v(N za8Gm4DtK(ANBhwc$(-OiKkg7&48LV70;zPpIh)3jnS zB|5{nfah@pq2|b@MECNFShI{b$!WU=gx$0_wV;^_ew8~2ewK1<;VdTJr1I!9@XJeA&0GW;4{@71sC z?&CPCRd^RyEuSRUO_ZJHTh8;rl$4@fu@vKuZ{&Ja=w)YZZG$b9z7q7FedKyy$70=~ zGIx+qL;3!04z*6>T>@(91a(%>H%Zbjf$!aY-#2+sm7L~vK6G+gxbn{0>Z1+3(_eE4 z*e)gK5ppzvLLD)k3w1ieU&(J*Y%13%I<6b};~G8fo-4G8kqRIE>2?0eTe=$N4$xmWQm$vrjsosA^pd9$eL@Q# z1rqVnlNQ_q4a@}}cLUcaKsF9s{gNI&gmqge?eV$@ca}w zd=UJMhJNSKkB+g6z|hO2>_C5Pgc~N*S!`#JdLr#|kEUB_g|YLWA<>MLd>_ki4gK&D zqnl>jzlH`Y0_N+&i1uZ0Z%pk*+Sm~bIScBDmSj7|IKQDyxyT34a6SvIU|e%6+H*g@ z`e5G!>-w1<3GWKQw5uww0OD1&(D{e^p&I?LT1!Ltt+#?M1D(#u}E0xDW*SJ1k83uiU3 zb%raWH~P|GZMgS|mdzExcDew)^ddYO?X^pTmbV6NC9RpJys>5a)`Z+0R5J9c@M<%< z{%RK*KbPOXkxH9pFO(ZCp59P^WsIwIt&y9?;N_!B*CV;h=<_|)>j`k~T&3?$ofF%nju~C|Bt=i^ zY1fykht#;T6fl(rtE(w3semuPqV5Dz{0aKv7$uHHAFhIKnj%F#wR8hK{1kY0pW>Ft z+FIcF2Dm*bI>jd&7|ug>t^j_0?e|fmJAGVBsrvnoQNCI)dLmy#6BYnXHu)={3;p$u zWV8*oQO|L(={~nc4*dWcNs#Jn=cR#`wGYalCQaFpZJ3yl6 zq!x>$Gt#3KG}SQlpH|ueAnC~yZJW8!i>-DClD*^~K#Q8wO9}d-3Yjn#9J)_$DP_3# zYARTi4MN)!;GE*rJyNi|wURa}Wvsi!}ERK?p`G`im8 znbh#tkXo6Ti!AR?Yt9ROJ_Cp!r;n%byc^W{37lk!^`AVhjDd?bss7yfreWw~_w&i3 ztQ|a!cHJD{DCb#MIHf$KZ5rAgbBTMcHRn7kd0tRgnzWvPUPmIqOqmPRj?HF*i&msu zMn3mnajmBIvD~XKza8I(v}PZ=Jk4`|MME}(v6KAKWadw9qwVs~*H zPnt{luZN&9_J?0+TjDe4G1VrXIQ+erpaFHMAJmmbE2>XoG_f

    z_2xn=Q5!CRvLiu2as$SJy1`bZ2}KUm>espsnTj`eMASk-8_jcI26buF!{KO!-puWh?L!XQVrV-`UiV z;=eJHssd;`(o^cI|G=hoC-P&AI(G4X2R%C!i^F{)JgX~O0zafjt_N1fiV3dP1GSON z8E8mZ+zc$#&(t-e$8(^I63Uzd-W*+z!rIG%=l5})1QQ?fdpc!&Lf!f*rqHH=lx9g8 zT2KNm7cxdGLz?vidudvkOC7$!`vIrTKSGw48dRGyPKbk~%|AfV=#;dcI+J;xl!}6zFa! zvhG@1x1DiSihNb{>;pj)UWl}tLn`CJ^%m>`i>@b718P-_7$l2R@A(w^MA0BY+`51dcC+_6p1%?UKR zev-FID{ttzs3nc7XvA6>?X%`(bv*2~nylVKB8QLR&OL0`(3_o*0LP(KVbzy1m^ur2 ztLLE`t#o$*_3|kGCvoK`?ue$X)|b+j@FZ%`dbyDV3Vk9O+Bt;O7f|W|N@~ZG=dmRc zj5Hn3c&frOO4vnPjg`sa(-iIa9$H2jVGOOmXJVH&MWbwm+P&XRiy9y~wt>S~rg~&T z7>~S3$>X?F!?=HV-#Yy_7aUsaOHi5PKeew>7>-1G*46NMkl)ov26fmRc*K#}IMT;4 zPRt`!E=SLMl7!JHBgmy^+chwbXVfVf{)M0on0L2^s3WvzTo>Az7RMHBp`=1mUJC!@ zQj6zLs6!?LTY{20A(<Su#kV$RfhIXO$D*EFJsB9z> z#=QV0;P>gl+k9yHGGJ>%yY4|d_aWD{!G|Tytmd8b>Bb1TMrH-qIi!+<1_M!dayou$ zLfz`hQgUkRwqw*X4Cves(y0F^eWe>Ieq6}oUWjTKX*JT2{xs>*y*wK7EbcL&CsogQ zOjG}>@4WuH0W~`YJ&pDn1tab4g`)RU(jZ`P1lSg-k-fbYVkAY7XBP@a%ljT>``%gS|Ty$ktNRG3a(4Dff|kPM{hwFc>&WsVzlLCA<4X`~vE- ztzCGkPEyvmd*dKdObxAd+_ewQmI_)epHh@v>KpCstiTO=3&qrVLDO}ihxP!0V`L?U z{@;~&i#`hDH2Y*DCFB7?w=h;QuGG;*fAD4G%a`QngEd^vlOcTdSM&l~qscXrPZpSM z3_W`y>XFb6vEeGC<q%0uyQ|S6}*ky4v(~lhOfv1?B&8zbg^um_fN;#)Uy%4_jELv$lp6lUQqnddn zvHKaN9i^r2d@`Ev=RDm5%{&2j%RfNl@N`g!L}(qQneMVH~ zg=?*iDU|N>{6K~3&g;4*!yct5eI8Jx0YWfV>3B=So8Li=}GC+1pV+x%-Pc%b3B1 zr0+}5^&nSYFtw7r?a(wEX@R2)C3{z}<+y7Rkh_0%9F5ic!xlk@XYlEO-Q%jS?>&&} zvR>5S{9#_*Ea+0CLIEX5jiCl`bz>RzMc-w=0DG)MEwqHUhtTNmNca(@tcI?ChQ552 z5qZ6rFzTu}PH}}{4G_hVp0TWRNOy#1Rd_-jwM?e1Zjar*8^&GOj;VC z$PS*;vM}1o4|LNESycn<4T2;bj)b|K_O&Os=lYhBYjU7)$DW@+!S5lLjXrg^eQEiUKWbVb2V;6_ekd zjaH&7;^L^sF^ThzaV+l~NIG15K21#jzki*ZTud#ITJ$tXm0Cpdncq0aqg@qG5NZgI zJBBiCOK{u+$#k5QA3$TC!?2P*`k1#5&}zB!UFhdzu*mv?RdLuhSbo{q5NZ_HHS|Fa zEdZZ#*4NbStYR&6tfi>78UXKWSFfXlS+vOL;0oR~Buz2=rQ{eLY_${;(>-ZDIcg`8 z*qF>rpt3CR-iC6V9a9UXs38w)vH|b47cb&_IaKS4?`+ys3r>v6>_%F(fqOE^g?;F! zhLk8?uSHHcdeyt+NXA{SYsqc&tScC!W%*miRT*06X*k;Bd6cT{vz#j;MeKSaaI5 zMuLu|R&|UXPS;yzQ}#i)Pgo9;b|hGx$UDcxdifU8i)ls_&N{dnaVqI@D0?@pP!}tO z)8YD#pf{>`k|4DfZ+5oYq89NI*6^_^k?Ycu;g7ogUokQQ4-j7)A^xd7SI4glZ zk5);K`)G%_@S8?%I%3FTd~3Y6vz8-iNekeah{Tu_YEhHd2F(z)MOS)44|iNaIud9T zjHKF9R!*>Uq|!7c<$4=#BxzerA*S5}Y6(xu!08k%4tyMqj0ZkP8FR5Es`xL3 zSJQChtw0jv|9=J@d8K|jBlyq0q^$1ru4jdNQfvmQDy-8Bd2moR?K6rdjzjWk|91Mq zv9hPRo(M8NkG!{2w`a+1hhE%KBpc1({Mm9KUyP)$LT@^+;W@1Lz>(vCTWl|6F}#f70Q-BL>f}SIdlsqROyHwLnhtTgAIY zq>iVM%OlF!Scg6BwX3G}4)|$=jMz?|yYPp&Z(bF@KY`;n)2>qPkNq?cjjk{2AU!h# zd+AH4!(9qIC+7xa`f4~xkBXl7w(#|P$kNtO?J3@MKm%C5(F*&4c|WD>L0=eM^BEeo zj5JH|4{avhT1x#2{N6_H%b=}f(8LAkiWJt-2--IRoA7hs%ENBcgY*tK(<5fYvGOiW zPc^44##+0YsvGc>LRZcQDV>@@d;8%RcW%(9yPckUDDboqU18@Qmf-(=;H!-LoBG9UP2aL&sA?qR2tb1=g*u& z=}L=@>96EjdvNdj_(=oD+u`~FYVnyfhRy}$1q^%}=84=6896T(SmgQy@9!d4f_rCkl)KV_mf!bzgjv0%OzO(J z@847)iAum<7yKsrZVQp2MlRY;a@+d{(I+x{93;w5j_%d9KOoN;iy;{jl77G!=lklalPj(se}nE+?W^=OyV@j9knO=)JGf1|-Ps%^d>*tvn;gaTjq+MtM-3qkhJra$=t)xICdel>_*A{> ze>_>$*8l5Qi@q_k(v@Mu$={WBJL*(6e;KeH-n?I?RPQ7|bqt@0)A!eOLT zT*2eG-jPCz_Bh&4(VvI7nuJt5PU~I4H5GcCkMybxHq))7@hsP!VEPXDbt+Q2CD?YY z?i6UVHIXrcNHL6FyaXqBARQO)#E(dp6@I^zU zeialU7L85I4pbbi4R;CA)^f)aXE7Eti{6zK4d`R{5WEr2D5TwEz|XnVpvUlDzK_$3 zjliO_MQ!QTwY122b0hzI(#AV@KAhZ_kwS0X4RB^UwEafzwTZ-PGw^1VXcaA#cjO3V zLsxW_+@2aJCGlP^G zPAUDM-ZVK5^Hg8ZD*8(8+6)V&75(epgG%7{;D5Dg5p*$*dPktwim1nVEO&)%L%C~^ z49`%CGbkHraVZczg$<`RUW7*fglE}k4x=zS1qdAU+dCE(7 z=255rf_ryZP64^POKX1^vSTXGYsrx&&j@6M>%xXW*-OE)V`0bP%JNR=mt3fIN+{72 z^$JOMDY^4$y}q#|P&Yz)#Pe2jxOWHHg|tdIghz;0gr4-OoboOPns(4x8V#x+&?tte zNi3_JJ4e>Vp=I~dKd$YQj|T_sBi>S^cg}w)W%TA=3t}r}ts`X$7}TZ5k;N~D(p-0# zqII4GokiqE3ON1=ziYQI0!PL4@(+km(-*a$XL{)zr)kgC1Z%$+|J{(VDe9B2;|!Jj zpuX~%n!XEfyt1Y6DhPB*b;n(6;h0(|KSlZN;EMf7 z!Ud$3Dx`@V>WDGeOS!uMNOn@9D}Ba*rKgc~=LJlz2hXdp%(eGdhZW}gpt6S0?epZj z4&J$rUR+1-v_vKvf!7lFvnlW`0SCsH716c=FjhhO08&=lXW}TzwYdrK zv>eXU%b=z0)o#j5P@X;WEM+PElziiZul@ph$QcbI=0{PtDc^x+oFy*~sdH)RI-a*; zT$ACyJ$>~i?;7xLWcDN2MDngJp9$nO$gy&0!Zy$1O4-|lXU5%q0QFRcwBzyDxgw<# za?8DaYZ$XtBFE(7emtEXQq2K_`cu{KDInAOpNckV0R>gjo@!zk9EUjmo&vv>hjzb2 z`n~i*S2U$|@+vf(p0Vq|MGn5fTuPq_gzBwpC}k8<`30nsr@^JcbOH2T!yBy&?KZJ> z0aSAw+Pe_C)W=c*9{O^n{MSF82GU_ri5|T7p`cF4$rP>D{wbrkJxw72{7Q`jlaAS$k*~~O|@L#MSnP&=n|gI3L|H+@;Ujx4)YxO zz_^UQHg;ePzc1F+whW6$pUIB{{uAKHm^kMurIr5%LcM>&m!`aab^WFFQUz3%;l2@e z-%PM$yo)U;=4}>Gl|to4v+46Ua=!ss8O&z_lEEicq&2!t87wtD1BS%ILqBbUMt5a| za?hAevFr?ICDJhy>QL@)p$|Gi4^6OBq%S46qrhR|*Exop=)V-TsT;=!2&8;9uXB-c z%2fXS4ThMLR3jq*Isu_%d?V6Q7VZvy|9Vri*=Jge4ttmD9}H>f4!q)U;6u4r`M zh$rchjpUWL7lPTUP>Llb>GSBD>jj060um*vdf_ba@K0ojy{!i&5j2SF-YoPnd`Jh)X~^cr|_ zD=qFE(kSiT4cIg?+#M&h#`SbN|EMq5mF-%x!?EH=QBMIJ@g!b8=MbffTqH{lrL`qz z4=_3eNaoT*J-~x=>J!0*-qcDsPfN`>UuDp%@PIqHr%CT8#E9D1iInrJ_mdVn|4FCm@%6X*({p$za!lX=u*kMyIZH9)5X zr~>Xhu-}K$&f%Sr7l~kTjR{g>bI4i1uN-|k6tR<0K{me~>AfcKlyfP0=!QyuH}gA& zv_@wQ=6@l5Xzy!x%?u^!<91FW2}Kq1d?32|2vELuls|C?t*LSA$M97WYH=$5VM-_qB-;O>*I>Y0#m&LweSohO%z&$yZ&AT=gHkD6=E76wUC2850) z#?bm)TACuQt79GmmwM6VmN*BlcJgi*oSGufP$;H?d&i91gHJO9=4Mh-S9_XmB^_?u6l@GLSjLZM>B+3~hoWy8`H^(6h>S z&-HbTG=Q{9iczE=0CmVw)lh>YwQW$caR}-*=i|E};XDJ#ITri8lvYhef_Dh*TS=YM zkk775DW=9RX_=9$(zg~{JTKMVb>uH2KC9~5-W?9o@~8#=w}DNMO>DXTuKLex^0>~n zr0z))+z;V?1>Zx!lj8YkzUo-lJ){|zm2#cttHdowYPqkF<56d#4+Gm_Any);7{@-5 z6a&yYKcZz5d9G|;Nvl7kPxJVH8!T-=!j|x+5U#4GSCpp()U4&?F2V`&j_21A`nO+i2Fc^XCg<&Ab)Zhz23yzVbFPlx>o2pm`Yu@@YUb1cG?Rx z?&kLhHOCpi&g61$9%m74xwgql-rXKXAjamI_g3y5&mE?WZKU!ef5{EWFc!K7nd><& zw*rfCjKZ3tv2NB@ITTjV}#d3H7RKS3@o2J z;NLWOUJnKCg)XO1wzSnajAT5a+1R`3K)e80i)fqsZ&`kS>XB0q!WWBp7Uw(~1&_c^ zsH_I*JCT_;<-B@(39?Jyg=@WxkZwc{W2T(PYyyvX0>*Uo-f(iXf|g1HrYd=VloFJ7 zapbLk$g{v*!L^%T*J5>|ygXX#+IUCer}Ir9YdZ0ECasq9_tIu1n$d(&LMUBc0&=DG z48FENexFW{XW)px(~s)aN$5sTma><+QJZDQkFh5YhFtDI;rPJkMt3=Os81C~7>?ri z0fSG)TMpc*+$jRH%8ssFAEV|q=&*00#y3gVmL4jn?o{CEAAMJ9iYuj+Qcqjd5s)6v zSh^VZqrN*Wz#;uq1&N+Ti`r0&d}Ul!CH>Hw8ZH2a`zUJ)sniBZXug2F%1dX6?ZFc6 zo$;AK3R@8UUR9Kl zsKj(;w34e_xT7mASXS4P*dBchRpCiG{AQzd;>=TgU;k@-ol;FMQyV(+F%tJVsH91t z%T(Yf;{sFk{?BNOnxYX>VPS8D<91PivX z1CY8mh<;T41@e3)+>%6MeuNxzj@SI7kO8h&8V_aG0Q&}dWhuYodHW3c;^@RRNS+8c zjyFn*h1|VBFH7ff2284N$`jXfG~k_kU&mR^`{12hfI9^S?*Nzo00yz}3oK^6J?GKp zrbx+I$c{=Z(JFeXJNM2BEDkGponO^*JxZ?}g|C_+g@^L?T2gMOCp|H*hPGCasy($g zM?QDsX>Q=a=1{0oJBz2%>m^w4)ySb{tZEp|2r5Mz_1up|2J}NC7ze9eK9b)aq|N2s zCE(wc8||p24R5qwv==<#)Dg-cN)r!`RJ55#0N+ZiSVsoNJ38u!Yft*q7G?HFK%WR{ z-^H3A!?iO~OUQ5iTBNo3*eZiQa~J>5Lkd$3K%zP(*F&XOv5Fqmlj?kw`k)H9q>fj3 zYy3?OtuV^9hSqPvx_W??829i3EfbO}=>2Dab8N_eA=1f6r80V3Zk8sEIe37whR~J- zIM8NQl5WAn=3Ly3++|4TdYqF`P9fJhNUsBMit<$Lpk1AYXSL>6(2J$C?Lx3>ytgw@ zIY{FPL3T$wtre}jou}ncMNjf1Y1I-+a3=XQdM6968jFv{(XhHzUqKq2<-ia6N#CG8 zJteM|l@f(st*O58^uQTlO?yWRRIieIymX=DEJ}2Bhx3J=OO^#pBWahCIK{8{FjB)j z={mVQ5(%h`S2ra2ji2U0P8A1;wT8^sEu^YXATxbpcuL|yK@skJ8N)TBlZ zYK_|9#m??Rv?b6-v`D=DePFDPS?`*psK#G0K#sVbxE!K zU%Pg4T>tV5aukxf8myXM4He%^;;Q~@?c$;SlcZ<7UPf8^N#f|skxJAD&SRu`YOGLC z>d!^KIFqB@skcQqW0Z=eObb-(ew3p_Zz?T~ZghWw`{)yGKQUpXwxbs3fYWe*s~mQM zUp;BFkT{*7+Qaat>(q>4aX-^ua9#nnq45Nc_1s_PH}q={Bvl@gMOd0(M~TzzaN}4! z3qJ=JeJCRz2)2@PJN@)E+V=xw^}*0D?zNO5=eS5-U@y#(h7H zHP&xZf~?X0o{!u*4Z2=V>J$)6LVkI+MFEmw5-Ey+KtJ08>dpsa8@SGaE7X|-;i>i1 z>t9J&{ zBzfaW%vsnHdQI%L*1)-u+8h_@8#2zjhB4>9pXQ*^leDaa66{$$|Kd?Q)p^*?p+vnt zdQ?YJzoR(!ky0w8(KSj%*TNaUbUgI^)yN;C*_2MsN4sLr((WbqIbp2%Wf-{{gVY_$ znay+kk;VtRihnEAl@AY{ON*Zk?>azz*4rOG($AWN-tFPnppAu4ajRe%8G)@$Q$by; zfX4l1jo)+^hyrl5AFQfts=;G7IH4_eZ#Qzg+rm`R4Gq4da&(X*7U%wId8(f_8!Fbe zQ#Pywzf0jSBR?MI%?4nT>z<%zT(Mvrp}v0U=d57+&ZduEg1X#WN!q&?P0@y0-Amay z_RG;(^aQo_p$4T*`%srrN5;5$w(A%C>oHJDd(!H?;aV+ix$EJCD*jWv(Fbcf^|m|S zh^+#y<)miNXY=qj%2?0xRNmIlB%81PQyi|G8C1(VW=xZ=4Q09_as|;p1+;n)HBO*C zlW5D$w87b&ts&=9@KIluBLKBWTqoea+C(eU|MYk1)CgMlHQ|Y8DrpDzB&CwY9^V2# zsvD+(_d7_pg&bwj(NZh{IrIKE}jUnSW3eejR==ZSH=^RUorY40te50uKT zbvzq-al|Q(l0lX(gYx2Rk$ca2GN0|AMyeg8=}Nn+z)2bn@ElZk7UddO{Ty$!SGA&y zT{7iEl$}LdX}18pv_~)HATOLlG~#|AEwxozkj<%mApDR|9`A~K2cq0=8ZhcsG%FJi|ZbHgmJ-0B(n4Og;*wOXmlqU_Z+aHrgO~DFw|-k zN)jwf!KZQM9*^$*>W*_IR5TU|2D?PQFgu<)|o8D%1VLz z{YZ2)FA2}0K9#qWD4rCY=3fu4er(4gVO)Sl7Y=2%8pBg46ZmZZRi>HF zoPJEBBr|^;Pu#tG} zKicUURy_*sc;?tl$i9YRJni0fxi?VPO?+pD(L#0@Dd>OF6POGoTIxLfRq8!GszyQ_ z<5@l&lA*7)_YyoC8u&*lPt!x?Jk#PV;9nYcUQEh#XA!kkE|Q`(@7vPmr^9>KEjxCZ zPl<8=BfamZpqVFWqa#aaDm#&)7xu<*@cJv>RRu3W4_fN@yfygg5yr&s!+w6C@^Uml zoMCi@iu<|ht!xY5w?R4_2o%wU{4IfhJ^qgqoP$ zMcdYqW-M}lArLzMon$UxH}AzqO@QokaxbGr_PWufM%FBW1B^1d8k|-D_XEh+eCkw! zPb8<_UG3!zsh3l}IyJ6(>%p}=q2>zGG@)me-SJ#WPo+wb`y?e7(Nn#_ z!XoONitKUJpx$Z6KEVbfmsyo*-vRe_RMQCAfhZH%-~x7u?z&_9E@G9Drq z$g<%xBe#`^9fDogn|_?h+brlX4ZdQTmR?t(4y}TQmTmaqZ#nR(>8NGP~F6oTLu`x*7PC$H= zloy0{D51_FZIbWzpMM>@xC4|m`ZkU>Y=0$pmC%v=C^tR`03s-kKPcPkS-iSdZv*y*%IlWLSK|(m+QYBO)2iF`gi2o8MI1zSW8{T zr8FjGcQF2=@A@d>zXf*cQd(XD#+@~40dEhWwOW6UtY<u75#q;9~!F@yH;=m2+=61{1m_CTB! znaP!_8?FO-*WcX{@;bK7M!Q(6AL+wUYCP37`T)M7+| zobFKMb=1%cjOlATo%Rg}&dtER9#|JpZxQ3WQS^!uS&MoCH8{_b09&4?pg%s^$U~48 zTJ4?b$2GinPec7eZ*cuK-yxK?fqroADM4LTU_24Dtn^hyt=gVD;Z$we2a#ykf;FGG zgN=I+Edkc{$k#2TI0vYeca2D)J>H$)9O@LmzY2I#M>$?_uWdD_ab2cSwu-}SuAPof)xrPP2~yo8(rGt)p7bi9u?1SIo#86ur+)xN z${$U6)z`@c`W_G~1GR&ubH;3{H_?=14+reuJdeVv*7Nm>k*J|0V0{%S4 z)?KJa8?HCM?v8dFGZasgei3+7(rO!(hrSvO)%T@O<6KS}9i*Ha9Pnuy{Sj+6Fl8JCCF)?+uTctJCgFl(@T?v~;r%?MjhJ)Yp1jop%iUhIl$K#Vuxsdy z?8v~ccR;1Z@c+ZK{CJpKU5!l9%HJ8<7O|yg#r%%!jHOT#i8c4wapvt1eLs%-GFs@o z-D28N8L+MG*9eL>x_LYGJL-tD3c~l2W#sJP19g=bj0>5N8;)=PV?0HH1YWT=d^&?V%jBkjWAFjNUKHQB1PHyO$SlF zz8`&qw)i|?cC}v#_r_q%=YKZ+>K?7~rgphh=YA@#GUyOm<~a%rkWtIS+XQ7F1k=mW zTJCkR4_bG=empv*>Zc3P=+e>*xOpk}bFqny({7AxuAyg|;-^xpUW?WF>(JZnpzLx| zYa5nP-xXj;EX}MNpSfd`9;)ViT9ek8*v|BDTiQI4R7%>x^!FctDhIkY8sC+MOXz)9 zj*SR%=xtz`2@X7!pd*mG_wHKw)IEj^C@%#s8bkUpsmB0moIz`dP2dP3jvp4#->wz- z5ULcT_FR%DX*7zfxbZ_n*_MnfB1N~O)2q`y1 z>P_I_i6H%rYk!~KEyhEp#V|5Ztb0Ui_l&3X7SuB)`1$qer-8jw=%3C|W=C4RJv{Nb z`z3Y;i|r}tf4JAW6VFOAd%=Akmr=q7@X&$wIG_9gkPWYsKW*VL#{=D|+qK8$>H(Z- zawUO!HPpHf&QsqSHL9gB0$!71^nVyvat`^8WzHj?a>scP#x{Mp5%8= zl>XWcGk7zPuj?mz^Q0PRT}!RUHopb0*WkMOUL=A0>Sswq5 z&^OT%PK5Sv3VG~bHS@sG8`>uBM^ntlu~{n6!$ES_e`be17ef{J^EZdD>piPTF*DGa z`o@uG$GY;BJ5rXMBs7)K#!lY3>zJkbz7k1&EZmu99;q5($)@;q?BF^Qqw(_LQ}@!` z540KH>LqW33_S$232fdil(Ct9TR`cqU|$BFh1B>3dBt|BDc=j}U0d})s8j63PkoG* zcX~|+@UQ>H2*dxta^c8&mTRDEFV-8>e>{^L%>qq>WF2 z*u8SJl(h&dp?=4YN5Px>p1Sr~%l!$g;)d9N+vqVAcMvS%NiFE9KPI@WS!C%pr$}V>qTf$YVKxK~k zErRFS7%7wDc&*RTRf8@ZlV<#-Z&TX zCH(R^PaXXZf_mnVN6)BpP454n#XD&!5C82e^wTbSx-wwH*cwm&Ou01EwpIS=mP_c?vE>PJwvY@C8z7c8$gL#gN}HO zZ_lm6s~%@hDbn9$t&XD>f@!f(f(1Vd>_s`HkA628ZwQ2*hvM$K?x$Nrt|S!Zi2Enh z=Q_Xf^o@Ic^g*WHKu zGNjkjxgMB2zvv)P427?pJGvcie2S=;43hT{@7*D?Pmto<`Rzo`u4vo4Ly7vOwRYSm zPL5KuTGQ)1>q0KMwIbw6AZvSa_pcwvDav2thP0=Yj7tLyZ9=+{U~+VwJsk5D1{@Uw zac^piz9)D6SFfu}I|11S%2)^B4GovX@|9SyA(xlcJcZ_#tz>t2pNj$BEL(BR9o?`U$rAQsy zpxrO`RFYe-nQ^*~$zHDbwX6`_Az z1Lo-()uhXS0e!`}Na&8F%_n6`Waq_bhJOQFuL55$o@{}~J*niPz~x5tt^%uXg9XP6 z##0yxR|cjEuyNNT@w4&1xDMz59Df6Ewaf1UR{bq%?`J9BG3gv|?;fX>^zkv|> zKK)ACp)6PG90co1w3FPs14$Y#bF44~&yk)=*S9PJE47q!52cA8XS|j9Q|QHVq>GUd z#+gaO&ND6N--xJ4&-K5KB~m<7vWpSdMce|e-N$xC-I{BCWr-j$Jon*QJn}W%%lrG_ z;6%`-o}^b_rthlg{bYC&qeIq^YbW?H%^1FV5lp2HlwZ0+>)Jgjaw#QaS=qcX&-Z1U zHhN0Lt5W9u$DM`FMItzO9UyCBHI`i870hKgK zey7s5e`9QIT&F&lj{NF(G=<-W;a+`atcra~EU7Zj|6TI0ckBMJNMd;GX!FO zBUVzxvAe%Ho>Di;Q)%eIy*)g0ts2;j711`W0!mkoo&lz%I(;wl?homGXInP|tMRQF z+9ZrOLZ7CRM{k4Agw!+H%fVR*7%c{#b+mIYDWnml)KPloEAH1}6B!YE1+DlibXkMk ztR&4j(6sikcGO8)0id-O9habTov5%aND&23c=pY8(2k1zKSk{V|z(^bju~ zm3tbUMf#@?@ zzf0Jq)-wOBU(-RlN@3pjE%X>I?o_O8>T$Yl$ zj8-_)I-Ys+k4gP?uz%KsmT7||LTrta^m4dh23YEdojVbTN`mDml{6KT`g zFn5DhYS&VpI{}t(C&zXR{|RbNa;1;P=ed*>rI`{_u4+k+sB5~>`|dNSPDzEg(d%zI zKYF}f>*uv2&tfEnu^@%KsSQu-Q$}B=mTwKTmk`7T;eae2ki+c~Y zC%vPh!KCmU7OlsoaPSyPan^r)z<@sW%XmJQlpUbhr@@`^huR!k_HFp6@APozVx8O< zc>aCr6Bj9dW1Ouu)q7X`7eeXOPaayo28wVDQ&xw`c6D#d&{yh)WL;X*E6Jm7Qoj4H z6BsgH<{)p3SL_?KoTGE2ZQQs16wl==y-n^}I|k}%4fR^CrezCi&;~vTC)CeSJHixhZ31!Sb^N>s zt(haf*Q!YKrfHo{s!!#=-kJJT%Hb;Rb z`@#-l*QG-GtUWTJAu`&r(-G>vI<&&r?QG!L$Mac8^|?qd?c)?RZb0U`>ZcnTT^aiT zslGw37)f&iI{t{SvwmlSy^Y{uB>mSJ3iy_JbZPx*I5(fVw(x#4p2w%K5j>;F{TqyP zcLu^)0g%!bL?g-`z zv>|8+HOA;b4W0}5G#EC{^aQ17Nf?XM8n3&NImQ9EqQ+6Q_ix<0gV#N@aUC#dO&f{h z>_RR&O~3Rs>Uf<0`REI!vhnQwX@xD46Ws4q%cU6*`NWSq1t^!R$t!KSC-Nfx=g~r~ zJ$HPp;@((#d0=gjn95^$-?6!GrL)caR?}`exyo!}~ZsJVlQfZBWV+EeKZ) zvt;uN)pZ{k~})CNlO9AhJQ^g9%jeiGmKQI^(Ut7qbyZh;n}?Clx_H#=62AkcIQ#@0XG$8Nz zy7#drX-LiXe&`P+o&E|-(1&Y$c$Am1OrHva(dB;i_QW?MsrTQwch17IDzm^~7Pxj* zi`GppQlJ4CoCqD4b5#V+`T+T2C}|q0o`nvKZ(PfGPS24$z`9q0(PsKAU5l!O*W+p= zf@i7zihgrd+h*SCN$W;Q=g?nCq)R2wq+s!v=HpDD^8)|+ar}7>9>_Fu!@jnh9pr8Z z$GH!Fg4SesH!1W@hF&=jY4<2kqBd~-hvj8Kb$VDFk;-kZ8BliYfImDd=U?kg;G-!Z zWfHiS^4&`=NMMaQoD zk=ZHIl!T|oau4UqlN*=?g8YKAe$%a3{M?w_Z~Z~UPgd_myzVffqE!-K}( zZN#szlRoVV?njWr^>Cir)eK$K48M`HuCu`2SbE(vaf}rxqqkkdatZi8!h2``YmiRP zIj*G7{!Xb6kXlQ268Nh?#`mCoYoNgc)X@WY_aLQq(eCx+H2%hZ^%S&?T&)js$eme} zVB9lzj8%*~a(XK4Vc^XK+z+Re-;;kCl3oeai(YZ9hx2{zzOayVP2t8V;88npEqID_ zFc+-I=^bguc5va!y7vR^D_y2T@9y1oHjsZyi(GXk$F&ANXZ?m{J50pj7{VgqTiXyGD8Wf{uZ6g=mxu^tXnYbP+TEwvf$asDHl7P-^kIDT^}y9+dt z%XbI8+=Z5=fnLd^g)o$owNs{Yr~L0n8??JzVSS9Nsnl8t6wVLY=U(-u4bnwC!(kTM z>`_uSL!&nX;xD<*)Rphf@Yd6svaaV}Kd9@qT@CH1^8W?Oegz&gGFBX_PmORcLZ7wa zN*IiWJ*!YF-%KvG@W$L z^L2$PzqjgQ4x|2KPRICT_)4$Fq4uODNv`TYJ-@rM-rjh&l)Cc!*kFgr-EmLOG?4xX zxmrM58=w=M$u^Gg5PfUjVt%Ea`k7>FDCKMHohzNC@DyJ^`ZAO`&w_DRdFzq;F|gIY zeVDgux=KEaL$42|J-z5hwYyXuH9@2Yy({i08}0a5hQE#mEWrpcHICM4hJPb|jRIJO zKi;)ghtM3(^%lUvhv-A+MXHc8#>s4lj%V_p&)p#A8QfQ*6_Q$Cilr#g((uARNSWe| z_EtOY=kfjtS`>A2FR)TbZwo=aPCG>U(b#y4$shMv%pFB~H|Dzp+$6x* zs9-nL)U{K3pHJT2{Oe_EgnaT^joFxT+(|(AgvdF{F?Ii?;MzDGEvi>2%QNbYk9GC= z8$6X$&EaWgk1_-0DhylN815-0eJ&F6B(OUpbdu-3iywKwy?a;k-;S>*PwprGErC0# z0>zsndWX&9jwX(}N~zQFqCB%ANW=5MRuTN{O7vo+Q1m@_=Y9sM)Zto(o~U>`t#|^= zIG!-lxiOO3(ypf^4}m*%=tSD;$WWX3tI&2MZQ@Ah0p`LsGA27p&Y94Fkqd30WJd#1 zt}Ejl6`V&eZ3^<<*)`7yx|<$x=Y*g0)oa%Rx$4=J6M?Y-cJv$A411CNqZrZl1)mMT z`*{2co+n%gw!gv0Hj(@8w0t}5Z9z-V2f|_avh>Vc0)`JFgWNIveP#>wE7k&q7XBXE z`qxGLUrYPf@H8I?&&JkYNRKP;`tam&s4fN0cY}XV8OtH3JB~_6_DvNyKNd97 zubBrwpD|gA*^7b5NB8)cMXUb-PK}HGfF4kzHv_*1;fbMWj$?59a?;%k} z!y#&RjO)ocZAqgg?_O})97(jc5vSQpJeEbmXz`YIrM96HF5*Etz=hI`dGZ&ZRL z&<^$dy@BBOJ!;dsQr^u23lGslj(?s2qv}`Jb~`RO4yKJB)vlR5E1ucvDm?QS1TJx>o?39ApEpIV z`*`C%6xt}xxeK{v=xy>>JF`4o>C-MGSA@?#@V(<5_iR0mMt7$NIbO`iSxljmU#9Z4 zWsYyvbkWykJ6)BiW+~v>^3;U>>dDxT#v27prq(kvfnUeCgW>XF;5^P-x+Ca1_$0&q zH}Kvqv`&6n#nUPDeG*FB4M%UlpQ<(Q`Kcq|80{A=VKt*Qs^itFS~_|)TvHo+Qf?4- zqkK%gA^i7*QqxGCBzMn1jZq(Tgw`^^*n+3AP0NrBLK^3B+_})5c3uA%^`Bl_=h4K* z5Tr>jD9^D-CHh4hVh_KYkbY_Y?WfhDbZ1PZcjH`pkVkE`1M94cyv|&7rY9;%vz@C4 z=)wL#t8c$#O@ylwWBfGzwsyIQy4S+;XJLO61RBC_yiST3|5}w;W!)9lXC6T2@qXoDxa?MCzMrcHYJJ!MmBjWepo z<0X*#?r8J@SNrHKJ?zHjI;-U_Z~vq}jh`F}UW^m$Nq;p77WhB7o{wgBccf$JJnQz9 zBj;OkDM`!A83H|VyJ%PRqpYQ^^2WRL{3Vob)U9(%!mS+E_HehRU9@yj$m3q(MrS4I zS)(DS2+0Q{YDiZBr;*KPQ?lF&>llrkQP$@Ndt5notgak)$?F-Une$}t)1o*6*csmUq`V|w;qL%f zxC4D9_wlK4^~8d!3Ta#$R*5}gbg@t<&r+e@0oXt8QEa~Pq>1)`cY62J;rVFNMo&_k zaPQj~_fy4{cC}C9N1RqdYrT1H&p9r6ffP%@-R)fWV@^U{@9tGY0|oS=ma$kV?g;P^ zDO*8RmY~&U%O0T*wYQ5Xs|)-%g%-`nhCMUfyE;~zwla*&rCoiF>xc$Ulg={=H}gNe z?s;cwa<4{D51GbQKfcdmbG?8KF(Xh(6;HZC@k&}LMlbbj-W(@qhX0+&!a{QY1?aQG zj7My@#hqk|fe18l)^+!Ssmou_St9eefHUBAM%|8yibBt zX>%nxa+uIzV8413grMznxf0t6mc=pUrp@i#c z=K`p&Go#;F8cMWp1I>HX^&Yuzq(_dxfe(@H1D?6cqW^`bk{sdhufg*Udajg_y&J5% z7eUXNmB8R$DXp-3kbGOfz|Vp%V?RAWNm9s3^r_E)+VzW{bCoYP1iUz|jEtn7(3QHJ z`*`n~hSoe+ji&?g1?1dL&ujyR(e#}=&&ScW?;!8>h;zE6+T=wg`%h ztL3g)EDX=Jt`?BS_4JA`Hq~z&MG($w`BV<49?@V%ar3b?5vWVOG*9@E5J1p8T_pcT%!(qSf)cfw3H5e<(cn zoio5Bbmk6qJv`-5(uJ07_uJOP`CjJp48J#0f>CZ9??$NhTyA5epHDrTY2O^$JAwNu zc!YbezEAoB^6KrNy=4g)=)qefp6VrZ9ka^)V8WARKc%l5p@gxdZls2AFTD&^rPGH&0oiV)*vHI~eymA|D?+wlLrj#Pyzt8id zK;wGyMq2ebGEx0TeJf3lsZdcJ6sw2oRCEgGHcE4M1O6B~cme$FgzJ3-hLhB(EP0H- zo~YUb-Q*uhv4mc{3W=^~uQJ8mW4n<`JB9Bz>Ho=S`dt{ISOji-V!YvhZ!fH zqdJ%fiJj>=PgdFk$G8`LeKEaoCbfDVx6)!8DUJinxxhS&Ru<4Usn*jRxL`6Xc&X<} z$G`*I(K|O#)-6ESm0Gv+=5E^5DR4yb=B^j@0q53n|L+c{JVV*8G(QlyhI4A^HAxy} zu&Xu7wY#}L5}vsx-JJB+?E0>|5!5jpp({g*mc*!1myAzxDOb&55Rk}8Ex89@Kkk!| zAKNNV)u?JJS8I1ys98-m2RbXF#x!?_fUPq0Sn%8J;^kaz-_jUwuLz%gB}(t&v95Ri?=2h0ge9&I|M#TElN=Aj$@zu&ffw&XzTc zw`p1vb?@v``L=vW9I8+U^wk zK;Jn16EB3$&Vxsk0?|&!-;cphnsmmUUX3g=Zmk;5TE^1|XHlCv zw7n=T>DfGo{`(rP+&##Z56E{XBP~r0OQ31bG<_E8UjVF|p|1`=dy>{zu6t2E!9dvC z0=MI<~M7YpT;pm5e^(@{GqK@LA zz3RL8H21qG`3|sdJO?e^Y0~NmlZ3up717t_Uf$O;-ZPBVf1^L$7o>h}be}Y||8Ml> ze!Ng^-MetB&6K)lRY}BY|F=ldYl^U(myYkxD;MY!R^ndp`9Rv4{tpu=5XEyUl z7++dZR>9dn1ga!$mB&gY_&*__{JTToPzFKcXG&v;tK+>jpX57{sN-)hxo@kMq0xmNA zo=J}jQ7QMykiu(oTC802yaplP0Tmi4<_K-7hl99F%a~ampvt(zIUgFcuI5d1M(64B zr0vYj>ch1;l&M!ln!IW-+3yVC7L!7kl}vFCAg7%`|3&@%)JjH7>EA!JXZfTtlwxa? zKepRXeb^(Slq^O6buMK}{RLu7rkE=Gw-gK}Lq28W4(>gbs1h6~rMffTpW7WHqI&ZLB5f)qCALFNsIm_o|N-^OH+?3nWQjh2Dd?j z>Ve`do`NQ}tw-4s_X4iSVe4yBvRGvJkOhk-?pb5|!C z!A(E3!fn_`;tb~yydY`_Es-xdSZ0o+GsHQC-Y_3i`}`n3&fr%^M&`aSu5IJJ=$!+B zs7HXgg1elM*Hh)(lcS_@HsIdI8mP)s#N>Vz(7JQD{GdU)LREo0&^z%adN9eIW5qVi zg7?@v`ecfWt-SHf=~3{!yLhA3$DN}i7$cDf>e-Kdgcs_0y@hp^1Z{c(sGbM@p3DO6U%AxvL0hY& z{5&i$#x(vr>ilKUR9vGS#)$9?DA)Rn0;PHmgqABkeD**ep187_^k37Or|9Ec>S+Li zgK+O|aFdyEr3{+wNZ>GzZa1*!KCTtepFYT1a=Kw9+rTep>}`<@o-L}*sw7d?U5`z5 zG`M$`+!MLwxtZ@lFU}Ox&`JwtU9QDchq+-7N{k7D+8T2@J%P*u%Em&7V#imZy8j_Y3sVz%P9^9c?jkQJVYs$){(1qlW38dQagwLaH&e^Imd0kE#cA6+zE1 zoqSMe(&1{!TA;d4DvE@hYyn<^sg;zQ6=}#=x5$iiS zfo})U6T-KW(v+e~7ti}uBlP`^lp@!Twgd4ghcR#bXswI9vGJQ{uG-4YjNEQbNq@RnpbFpH zm)d4x``u6SBB{#wYC~AgryM%iMXy~zUguA)FzfTC-edgch0vsa?VjzTjNcrns+4s8 z>J4Z3IVr8x`9Tssxd1)abvn;bmXj921=sPebFft8GD=6l$>)$`RM0P4l3psXe{Z6M zp;(F^A-!uK&jCSiK5^BvFL^dYweP~6+65lA8^~R=NQ9Pd3xE4iV}|*3IUGmnq7RT7 z%T45N!Kk~D+KdF|?7f1V(*9O7XjcWh0MkKQE1vaFTE}=$3o*ie5?S%j{1v1BPEDRq zrhU>`;%l@-u8<6MPi2(;22L;`JnKX+egPbF!>ULrx#X-;i?k{AJ;Zq1L#cQ3OsjDL zqjVk^=|$~&agGGa^O$i&I9+qn1H}_LH&Svv(C-fOl^)2s=fW6sPoO6~^bEZ7T}pa3 z*uZ}pW*&|Rp=}_wHktn5-e@~NhWE~cPw5~35Kwmp)_&AhLHP^#Q6aFQD^MpgA8nM$;c1fZ$&l9WRpdE9m)iM&waicn}O!(>odDisML2 zNk^d9W6{$zz61Vy@I-t63f_MOeS0#xD?>`n>$w^YI(j($61nVtd1HC+4cIMUUbqC^ zp`Lo5VP53S!F@2Jd8_W928IN=KBIMq(7v>PyhPnn?KAX9F4xZV&`KbX2Heb2}-mI?zhwhbf}QItTN73}tZwT*#@C{57_VvzYFo zRArwtGoe)rc6HZQoJH+9d=lYrlzaa66Fqn3sNSn>t9AP|;)QY}rt|Ng&L7(FEJ_OB zXEpim^srI&v|K(&{}n(Jec)PJIZA<|2I$gF5L-+s!roJe73 zb5-g@Z`xe+*e$f!mFM-8L-H32-})z65^sGGlfY&}GQwEW^3K zkx7pPozaF3*9%GU5YSGA zKe6ic4DBe9*pJR zA`gBA4C~P()`zG2=*!)-suVn`Rd<7Wi~#Q3xRRFjqqMom;rA)6hIUl&TTf^G9?qkL zDxSMeQv(fM2#;`0s0HnEokFkJw)Dnj)O8qa4Wl1C6UPX!bHVQlX577?iCi#rEu+C1 zY>E-5_02Y^Kj@`<893C7-Zg0b^p5eo7Fs;cEJ#`WB{_#s*X^{xRdTJfU66pDN#&V} z>J>}4R&zIy_i0Mi{^6NBMyhZHz_F>#)-}tAfwr3R3d0;lJCb{iz96S7@%#B?&$V8hN zw%~Fb*Qm9srK&$3 zo-axIRvuQ(uLIyg?I+F|EJNw&E_b7sE#!Cn#1fQO)?C4Lc;I(AaDanN&|FMk$~TSO znQqFLY`SL4nTNc!Jq^^8;a!v++KT*~l5JX<5`5(I@tsyrrJJ5q>KIYFC}(n*ODHE| zx@xY@qTDr>%_+=DzUlzV>-K^6#q>^~a3n6I#ATsebx$J@R)ySMfN2?Z$~U?&Znu!K zjJNKhmR6jpOJ~kVoGWKYukCIlyv1`4HiI3{Mss%Onw%an<(?I=+XU?okO zeu!2=S1+_OWu>X-ml>U=JG|1}PVP`k2MTw8S8S#2@6GpnKz13H>YZS43F$q3_+{XV z(TCD-0C&l*hYJs(mw!eZ9s`>7z_|%-&;|-$7bsp!lk2`(t{Z6ILUcWQUHd^2?$H(= zRm&J%3$M^JRS3p=@p1KNGo188%IrYNu8Q6Zmr(=POY$c2YpdReMDZl6gHWiunSMi# zZuG@iO3)9hfl+ul@lg4kts4oQIG6dFvI`kqo{=*hY!q>~3l6lJzwSbK5Bq?T@7#H=SB&f))@&I|{E(>=6w z8yHzjUD~wX0)x(>T;ub+gi&DqMOs?~-g|=0Bv5O;)W+cov^kbhhOwgd(I=jOp|MxT6^QcYiYkAk!RR+98l-w9b zNc48Hd{@flPI1qjXSo;^(YRR73)DQv(Pxpz9;eg~d9MV^ermWA_T^mFqvdU$fuK(8 z(=@rWX-{3V+%ipx>2KawQ>PqO$)OKk4&|;e zPwlbClKge1URs;abMmjOP158N;`XWKGgFP{8>03OhCoH$|EhV-4(3VrT@AVCK zhm1QO)YjgD<8_34wuf&-+_mD`j8;|hPVL0KbSt^jwo%BqIUA~0t|=?Z!Ov4%w?d5t zj6`=hYi;@-P|7a~Np+l5YZ*J|1zU!rdKu7MMLs>uoMjn(brbyFlQ!jWZQ#uM^tg-K zxxq>5+fAw;Qv0gl3)=~(JkjA5Mu^@%6Tz=(9}Lo2|E-sSN`Fda&NIO90$P1R7?Jww zjYIZS!gs!cOSq=2e(8>}B=`Nmh;t2hhacisb2z$E&~<{3;Nx#2^J?G}#=z5KZ6erl zq`v_iuAI6aREONtYN#BVO<8(;&Vk2icXyun2ohr|kj4EudJC0N!!MwT??QF%v)PT- zznD2wBdt1@Huhj8ArEfQ2K>7|Z}iv&j6qKpxresi3B13irLH>YhqIJg*TS2`f!e{# zVa}AMr8fZU8X)y7o<`vHv}q&co&#(~G1Kcpsi!a5Jz#AI(2k>qk7>~XXk#F-eg{Zg z4cZrEO4~p=D|ln{q&L9iS(KoMQXy|Hrj?7pc4u0mb*vjS>xo2a<;O|!7E;)Ks@uVt zzC+py-Mf-N6WL7OxSFLMX)C@Jy#v)6FK7_$TS`fuVkE~?H?g;WO70zunV$UCQ$!B0 zUbT+Do}KF2{0YWs4e&X8)+(T9=5aV}n)=kdq;E${cSdqsFd?m$Q;z4`rlG=8uJS!C zxrMYvO{<qXoSP3`YBT zAKuGhyv9$4-)hU*->0r}U{iZaaUWVO_uO(?y6BywXJ#duPzAL|Pi|-KnSk9`f^=IO z^37xnMF_>e5m>T(zJa=oxtIad2SZQ%Ht?Rvx9wkhU(S}*bfeZAq0MUe`bPLf4j$k9 z`IgzQwX`a>P6?X^1Lm2PrXg{RG@f37G()c+xt$8QxHcHq}T! z?u2u%q&*91TM=(^-tZFAp*!h2L*1V0m`C0B@!T^8({OX+jTSf%CS7Z*PDHCHU%TQ-`j46MRd_{tn#S3y&OzF24~R@1gAVaPwz?+BnDi!-{z` zAkg+S2VU3!rV6NSacGD1qz$1vSSf^e%%RNH7$+zKyqhsR9pFdOLGnczOb zS6!+A4st!xt`SM64cXmr_d$Ew-1U_jL@SR3Zgv6=>}=sYaJC(Ka#oWAXB&b9_&(I) zTE*Lx?K*lpIJ@x@U8%VVT(t1jWWz;9HZY3Mxzn^f0xqxo|!t5zH#Tv2HI^b z6|Iv03bu^k?an+;y3)4MjH1! zJ9lvop>5EavGQka7+v3{Ua8+vXH=UB;8E?~9c-(S|24GKapkNyO+MqcJAY8VXb=7| z^-UwcBhOvwrd1PjMa_AL)Vq=A@(}w`&f~nvk9;@>oYv7Vd&3iT^439mI(PEZ0}Ury zXA0mbMv{v@w>uao=GLAZ)4IA7^TexvU1MnpRX5{1j=x%ZrQo6GU0bhmO%fRis7G2OtA=g;*))3rB-e^Ztvi&s}rEm<3Bj9)8< zD~FP@Jp5F>TF>S1>5!u>b+{KW$`Ad=)1^rH^9Hovk9raW#Wxe3k zk3;`z)8i>Cm$A8x``zGp2BYR&N=fo-K6=e#SP?v9@G4-^f5o-Xk!Zt9DZd^3ZU?lk zpHvdrkk6QS51!h{)t!=M)a)v?5jKAU2Um|*YaK!**1%!bSDB z@VpwWL;9S#+wn%Uml81jGg5ormAqm<>36~RJhN5H#sPTIC(xTG{khXFpK?D3w)?4d zGF<6(An*j`p0w#H%Ki*kTqkkGRjJ#FarQf?Zw+JMV=%pzo|**~TuqW=-a*PT>@P-$ ze~8@Qq95I#=Wc}i>CulU?^0k|L4OaXZPUrA)?LZ-jik5*K79^#7#n3cH5HS_lf`~Z zYdp*FEv!49m~kO;<_vmDyHr>BkdY#WfR*bhxstx#ML%uiZ!z^41zkw(2W-zLi+M0ggUHBY>e|i$$7f{J&YL{#4Bj?Jr9!1KJd%&=} zc`d&!ZTb;>r8j?%Qc^GQQbsSteRebGbyFGlWFD^*4AC=TjkS|YS0I{KxqB)HH|se-oa z1ED3?{jzdCz1`gVpy#*gtH5gm*fcJaUT*uL@8bdco9XjBC{b&;5%1pNy;3&%&3(eJ z`=JI$xNDe&;H;c{mMNdC;yRyEn-fO3@oQHCy}Zxeu4!QLbU9B-zb1T?APcEyHCMF@ z<9k?^pZmeiA+A|IpA%Y=Ls{iKO#nqAq%&>gz%gI;Z|0k~@ktqd?7pkrz_BTmpv@)C zU3Mnqd6~6*^($RSi*xvAEI5rZMk2jlNV@*eq`RdPwDSN|tS!@Z&3hPMkMP`i-g2mC z0^@cF{MR{++Vk(hqj5j=HPh?Ns0k0jo35p_x8cnLk?1QK-=8oN_4e(F>@srI^T4fM zX>^yi@Z~wA`HZs0(q{Fzhv8*y;A+~i*C8>qi@RKzsI=DDk`zMcdk_qNp{f3(WY zqfhlOcpd2N-C5Ag1Hip6@GH-*>xurWUw)*5&PXKvoQKjXXP&O8KS`@6(FV`$UP1}( z>G72McJR~3!1!jQPrr~VfjmRFrh(uH^*j!}Xno%R zKG#tHP2^jQM0S?=IdnFa);&+kV%q)__?9wm9WcHW#)l^m{Tiq|Wqtt9H^AxS)o(FY zkHcf@Df?{ZJ6}*=DI8s|j;`d z@XPb*jE#3LH2frZOYr7r{B@MbQ%>;L5qbf7fs}It_36v}AlEVE8$`M8e{@yfesd4k zHd3`ci8`XP#<5 z(fSkf8UwkQ`#9ey;!c}EL6A?{J(5l7Os)T~+(?L%r1{f%h4GoHpfB}bWvS=xcy^?+ zq?WG{8WW_L4;)?LL(WL^pz(H$oiz8K(t15!)jV8<@6Vk(#eCvtIOTeJrB=}xx3fKW zr~7Z^(;w0E!1R_GOHjf@ojQkW^!;-E-1n}n3vawX{jc?oX?mUuJht2!Li))e`?>k9Ervv?(|soiNk4tjJw-o7`9ns>oqAsA1aa z;Q)FfdVK1a{24s47Rjc+!o^_eHcESi*3JXcm(kqq$vr@vp}&pgp>^OfT2lfhJ%_^c zqV#E=K)YSxFp|o5`Rau_nBI}5l2Bg~-28z1{-M8)lERqO&Yy;Wze!-KjJ8%lVQR~1 z%Dja(F$+MV>+9&wkmbPHNXik^^F!#Sh*l1#+#R&Z`i%|ke6j0$Hb-}4Ssip` zD_Vsz90%&anWkT+lBYXoQ>LE{o-**j1Y_oKNOLRgcjuTX?fWEsTuRU8z^PZTO8f&T z^i8O-9!h?O(jVe?lr$fa#yy*AJD!_22HqFXR=1?Tx%$5bp4W?h(WBc_(VTx7lTV59 zI@E2%m41w-JT&EM(tOBR&85Wu4yC)B)jIX8-%L8K41MS)R|Gpj!ItSBrb6n}TI&Ae zH1{`>>H_YrK!5%|u)1z+q^Jx%VjR&Cz?+2Ddm?i_hld>F>F?o{JE>F8rp3V)?FpNX z|EaY31TsV~z^{2X4lDUA<`G&8T(e&ja_s_A=aS9nJEIRCr&SwPe)oOXb%^Y?kUQ@5e<4YkhFvw9)lkC)2cJjtex%b zp~bW4sl32#jY&8aPGhvo*2ti1f&6X$x;mmIz-W7Ny8`g1edYvh?FHSrpcX0uXW>F3}rd**4kt)`Hp_XQPaGJzApgl)r@OncjWL@e|BUI41bjl z>P1E|@M#^TYk78j7X!C^XFK$e2;)Ec|JI)-1)?6W#yKD8vwY8zmh(;WEu&>SX;Y`B{a>E=xV`o)~`a22)8pmbDjRNO~Rp8v!&@xdEhRmQYD+7pL*h* zJWE_GBX_1Lg;K{|W6mDjHTGa=h4#24&lBVsLtlIv`b~W#1Kz~lW5DfQ3HI@Mv^Q$o zIrMXy@t|zebJWuWb;Xv)gT>Z^N|7aNqwuHqJ*u~ag_OxXIKqle7y)4znT7c56JaxdJagx zM-LW~&Q-W~;03dJqd)9m=%58PkEQ+;QbDVA6{BCT(H>x4`^1b8ZQKae`o|XsDLa7{ z8!c8p$QmeD=~_r>{{r9Bm(p`@YT)9X`Be?9p8?OC!0t@1A8#tC({mzwpt-fC)E}UG zMoIe^P<{voJ5useusWOG&^t^|B3DiJ(Bebj#2HdAX8U8Q{eG~n9m+^p`;kJfjvArZ zx!@si>1jP9kms&N8>3dcMz^po@@$wbln0K}4?VXUm}gVEXCUi?tyN|+CAh=TT8}d7 z&H(%W1mvE(*FYbfB=w`fV%)r)^o_HalR$D1$#!OF@j}`-k=#EejXRFN4E8p4yGr=F zR5gH>>|k}nXa|qLUz1?Dl3x>mBHEnG=r!kMD=D)B5UrvuuBv{-pUuB=BlKNDYW9mJO5@*yFc|bd*>KQFqWKAxN0beTY^p@4>ye~mL2H*IpLR{ z*J)@^&eT4%uYgh57}6%-C$0cQFU+!#-hJ|!aF?ymt1|rb-_cH9PLJ#c^R`HCr*z7G zW_@#)eUiQY;TvnO1rzqA7_n`xY`8|D{8O@Kn6<7$&d=kmbRoxg|3$QkxT2}n5G6($ z%%ph}Wnqj9Wgqrr{2FyL0lZ?YmJ*GG9HG*?BH~>xZ4JsTS7zh;_$T{_PblKHe~FZ|>HJhK&TcBMITA!(&E zpK0`xkqtZzAfD5+gExhY#htWU>#6VFW;~vto@eOe@$`kKFc?>_4!xuZ&CPROA7ysq z{>-sJ+j#X{B)Y4iU-NzycUPdBzE4@>8C#yPt)HrK_4V6+pZ9O@b?=%sKjTtWkmF0n z|0i%9bzWoa8#&5e@&vx_cxRLYn=k7puMrah{0pxT2b0dHV zO(d!DVcy(FYu#h0XU(smi9x(EBB3_xeq8^F77Rjvc|Kt6O42K{+_!TX{jFA(=DwbC zm(%h@;5Y85P%3OiiaK)iT~+gVFJREs$7<;4Dbl#6GaGu4o3%kw%3YmdbzqcKf_3#Z zdBj=B4Cg$9=%W~6trj@S=-1KI@19I!ipjUt=v&g-1bk#RGZwWU*ZtziS6Vzms*AWf zi|UIWB0sMW9JDQzmmsw|NCs+K$aC`*@=Ja!M^^t{4%O+|KZbrB$85Ym60r{PdLe+YoS? zeWo6t`R126Ge@-Zm+@;Uzl`++g} zHoUe$57SncOS|0P&_cII7f2SEb-3chPFX@}ZU#!u%g1va#9Ngyu zi@4Petz;d@-$&rej*1P`*^V;O@IbvRA18;={$8O)gLu0=)MGrgr9tPOL@QF1<<6`} zz(gx@Uk79-8C4H(e=j_%IUI5Y6fO7e$gd8d zqsMuAMm=ym%qYGfl=FSKqx**^f_2Z_(WYHQDU)dTL*Qlw*jxd9zD8-<==4Kr4VHcb zmpQ@jbwKCZ!2rg~I(YnAu;{+s;gnNL9;4~J#BAkReoY06%YpS>a9;-xc?oV*4qi3_ zgAs$zK@XMl7@>A1aCQXt53xLX%CqaH+j--f=0rwE70?dj*?PvjF^gQ|se-zd*q)rR zjZyD?gq)B-g8Lx zBV~THfSJc@KzW8*op?i+RA~c#5dGuv?5qOPp`Y}%vz`295 zWM3#zj0N`zw({lhZEYF9;B6V@m%u05qscio7E|)QHRB+uvI)g2$L;0#6&6$^SbI`cOc=wT=|O;3%Wrx%km#$fLm(z!#f3M`y} zPl_G=Me~^hdA}D(+`l}HzH|@6`*5K;R#WuYY)N@{(mrE&4uFr;qbUqQPPohX9&AcW z7}MvFdLgj2rZsD!G|x_04gOpQc@6lAnU$S{^OcfoGPb{ufaDw?7)$RB;BGvciEYsr zrkwGzi!!wXDj$X*uhYC&A1~sWYh0svKY)^30)wYI?WYcXSkmA@?@;%s=(+I@a>>&k zZeji~oHmVw5^uwrr(Mv!e-Cr@%#bg_bA2DLguma#7+DBDdN9kmBjCkx)0LdgT;!UK z!0gEp(%@nwqUFs63Qvb@My|ogj$5EoEpv@fiaSzzklWqqZ-b}4w9;Jz#YprVMv`lB zuh3$(v0C(x?X<8FdNCTL+D4MUwbY^>b0!!pWBkZ<)%3F;u^f4UTA^{(l3eA{T85%_ z+raZQ_iA6S@bxsw+nRV;Cm?i9MxS5nGo3r-B2Q35bv*iU$GO&Pea?0UzTwlzL);g& z6|5|!Rr=7l#_P^ztyOwZjX|O-Pn>bYI1{Decap!^kD7V~Uv1TTYHIb>FUZ~5?lDp? z&8E#>wS_x7$ZD9`=YM*2WV;(D8OrDnKHV#o_@+dCSj^87);%_Qg{Q2TD9BwF2U1msz1JpwbP%q zX!@0lPLgiAcTa=1AgxE@dNJ2cf!?-4rSg7fUeloPGM=YN7i~(n@w^k?Kg3`o(8M}j z8Pozg4((RY-ufWjT~D&V&g8k8OdICuQe~27W8p%*`Kmi@C4HPFG&D(pbSPKf&opV> z0i%sf%H0ePd^u<)wzrtvrBK2zdFR{hj5P1Y2K*Yk)A?A$Vlk-?uy%YU;gX(-6Ec~@Gdp9AzvL(bRf50@>==dVuVUbo}Ks!TI+P~U#3L;Nk>632Po6sFG*6W znae!}lHc>^JbOSdpbgMqiZ@1?_PjB9_WjiP4&!_Wyxa_i9TC&LxTz!B3xB(->cv|d>5{WqR$l#&bhEnje7UM02b zRa*yC>gLWiJe$E&u{#67n!w5TlC}oeKcOA<=)>+?G$MjtF`m$KEXby%z~L?6N(m%BYB z-awrX0cj<@RfODkce{R|XEPh!08ZZkFG=cmFWvp5tD&zv+tYL9^3h4lDAk#S^X*;a zu4Z()JMB&0dot+V+&=&u;zX;bW2Yzie+z_T$s-3ZV&?HA&=^T<0q+Z#DP?Hyok6Qq zcId-895{@4>|gcpsP{^@(L#M5_CD#oB_*3f4y07{-uUUpnNVu11CH(7SyTKN^P+ip zroW?SkY}wpaShM8LX3i! z<*mW6ciOAYq|NCdO`JuTN*jrv1mCDn`pemXe72faN1sTkWf=F_&!6Ljkw3XR(^M^) zeMfobuOy{M-yr$6I-&R)3^Ym{dA_qTJxJWkX%6|b(nb$HHE#8K?f+`$y}^c3#hHS6 zjcH;O05u}_&!{h3Mx3#DSH;(UTOOY4-D~g2_k0`e@$t^TT}XNA*MD?3FQn08sAcb5 z#z=ykt})2TXTaGbwAOSRHRaL1X#26|r6IRi5ZaYIiL%$zgKDtb8*$T|<&594P--UV ztj$PgPwQVanOb-9yqJ`pgyTw$I*M9}QGS{s(F?$edz8h}HKFH^lVd3mIuqzc&91N1 zz$fZ?9%1Rnbt&U>ELU4%e|74rT zS{wwO8x^xAjP1AJf=97gJVU$l8TUf&Ecm|A+ezT+1=J5SioT>pQ+en9edAv*K?3PP zrY>iMpV#@DKw3@(9<9~S(&wHmokHW&)52YF`cW`qf|N_(NPFQW(%|QTx?DxBr1TWJc#LQ^AL!&kXVa43k#9Zao&%KblyMK$3BJo2 z1)i#)9m#kWhiUyn%4o@KuO(2If+0`#^BgH7J@kUxYK1bsMLy|Q0Qq3*+aGlL-yz-7 zJiiRMMuXoSd>#dYG`-Y@yIj879*qmvhE&sN#pS%c2R?a%QF<$V+`wN?Og}`~_wn?0 z-d_sF?gS_67z6tExK~Wg^>EOd_k$xNjd*61d&Y&Jk-ryH#+zXK2XGqC@OHMX55Q>X zrXwYsL+&JUpa>{kzjh4iCzQdabddS4URs`a@Qc9t=h1uLqXtifcnl2cA8;db!~F%0 zsVYiSOV;mHZP4i2`QYWgU?Xq^$4Gd_cZn8?USTB2uRW#HaiTt!p#0xcc9LI3)Z7s$ zwJb)SZW?_@%R}lM>e>vnail0$j3s7@GFnj2)p1ut4KIKX^ZG8A(Y+F^_-j9_{K;@9 zkCki6r-$=IJ*$+oTE%9P!@UBYT%zth5t%xN-=3_b9O}liO6r!gpZ;IhE_rNr6*uProZ5pB`(@@*k%Jy>*OPAn!|r616{!pqCOrs3)u= zUq4v)(x@*RORS71C43_ev%96_^~TQ1rH*mrTtGQHprBglQl2c=RpR*32C;~@V*YfS zdbIY)?fh=xD`!k|{o_yaPoA7g5%u+KY%8T|ITW{+av}{RXo(t@W6Jroww9hj*DocF zG9dvS&4f;))yI;{f<64Q0E@9c3P{t1(RC&i(4YM6(V|+R|1Bp)NBT$V)V?owzk}Wz z3C7(IWQocL@1kd|QqU+#JNf$}u*^am)W>o+8u&Flt3blID^ITp`@IzwBrOF-h1B{! z3|tyd>j7G%SBZNRE1^*-^snVnoKr5l z8a{Cg{n(eMu4Wtqu6t;+=d0}pTiRi^GDeK4aRA+M3-oTh*>0r&g#52j_h9g1{FeS< zd}v*23-9V1VrR;qT)};Zbn4|vM&)Zrsu#esF?;k!zLYWJ3Pe2+xLUU%%nx!ILDHK0 zl4{^CFVJ6lG*i?Z#obckYf{^ zD?xb=!%63o)*bxrvH37?Qde6JhMCcW!PfsZv*>9^$S0`Rxq)XjET9#}mUi}{#x)6O zXS3pw;OTAj<%MV^+F;#j@)1;V34NigHy%**M3{tZ-U~HXQP%?K_2<<81<Shog>D*YbNfIstH6Of(K_+&7%4pK=zmbQ z-Z`VdZ5Z-jd0Zj1uBAd7ju%$N(1|J4j?>}k{_xkGJ=JI) zYMEJ!LJP2Eni%U?4c4^@znhx!=!s;>ej+^C(y~vzruk;e^tYI^CCU%26uIyWXEC^xO4zQ5>*U{~UL9z0BJ%UXcPZ{z~{jG|A^b8n1bEB_*5pO!u*V!3|zKy2N z?rGNM62JPJ+QRr&T_AhsHTFlx)1HaN_|e9fy-(7XQIwb)+SeW`sDKYSKWocZ`5Sv= zB58~VC8g<~k%Utx0$!%kCS{GB%~Ito`a-JZ#rc?PLrO2@kvsI%O1ko70d>r!u0f0% z<7ajWeY2dJijk2&rX)QhZzZLx%dT{|LQ_o%#wM~i4navBNZ$uM9OK)SQdR|NtQF^b z)Yu2w7Qga<95|WLaD`7_8*Tsg>Ok-l#Yx ziHs?Qr>m{pO3RH5aVc-qwfljeT|hH{a!&%^3&3aZ45KDv@lGL?yEpC%a_b0bj34Z| z0ItiOpoixpXR7F>ZSeQIk-Q&J%Jk4{TLYJTIi!-uNDV#Ub1xzFjJmup)Hy2f*iqCp zly4h;^+#{@1Pvn-HV4`=zIv=SAdQXcF@}3*m&R4~l$s)JTSdIn%DDrsbp%)w;7woK zLMXJFURqCIjD(tg3{P3X=;=VOA%V~~Y;P_4*($zT`be_W~92$Y^o;2A?jXfl(j}~CClCQ zkvc|CZ3nVOAnQPRU(-LAQd2H4-3DE`AL9h@9-;2GSC2xXHv*BTzurrI?k9VNw3mb3 zE5KTx&@1&ov7P>M2Z%P@V&1$IES$!})vMOnGV6sUFr{up~eKc+|_(JBS4MkN=kc;^c%=GlzyqCG;4C!lHqTH zc4c@{L*HvBHiqIdu=q56)re-d8@*B)*_<(@bzu`VNh#~Vsb?RngO)?7?h+o(D3Ut$ z!>K^$Y!Uj^z8w~L# zt;k^8si&N1dFx93(|~g&u&Av#M_Uwny_%;Dj1zHcolzG%$}hbb)yAG-TqU_?XiFK= zPCMA+ymvq6Y*KjAjPl+YsI55)d}=%Hzch-k@flq?Y@jXfA$lB|svwn8SnrYR!IQeW zF?V_ensPj8Q&j`{h-b=geO8=ZiamS%FyC2>2G6q^3IwZ=G5QT&LOmOiVnxuNR@+i) zpFo;?p4a|7&5l7Sc+U`6g(Jeri&zQRp9?}VDyF-a_ zkCk)rVWEfKoILsZ;+&kEsWWQsm@{GOq}en7X8g=<<>P0bT@L4c9R4}w>M5zhlGLkt zsr+WC@^)8GN`0NnpECfV^RI}FT z98*ZyDnGUS9Lvn+I@vZ=k)L|MMQUe(Nm9+)rwZDou0K2VLi^OLW~rK1sfN7N#Vt}V zwn|;mCe^Ye^;({V@cjC-N&a?oYxtMq)ZUWR-a;S8w2Mnpc_kL}Kz?d7uB-74*P+c) zTk=vb<)y0HrnpT!Aoc5xsowddEeZ8gEdL)bP4zEI)#s&N zDN2pA=IhQ*wJ%8B-^Q|*wn{zLIaOW)h`a$2|1&RjOS4qEHHGt_;4Gi@XwL2NW~q11 z0&}U`&${}%lgH0Yh~_Dvc>#tqh1Gw$4@l?^5>kUZrtWW*T3V2rP;9?(Kc+=$XfZhB z_GxeFl+@wQsp&2j z5>cIMCVft7#-BqiQXk~g>3kw@2}t{0-BQpQC)Bv;MvDl{eOQ0;C)X6 z{QnH~|1Vkzah6bTlp|8p%nQfQ><0l|RF3HSPyL}!FaAR${HJ5`MB*Gjv;Q9=;V(@S ze+5WoketCFnLT;RbeMBcw=Nk!^HN61fbu5E|Ch$fUrl)s$iP2D!e2p${-xD5kZ@TR z34fX9|8f8SWlH_OTFT_fljqF5{Fd{v?dP@6SD*1;_nc%7kMdrsGU_b!!J zsb9A3-=1X;eR%&%eWp|^%TLX+`ekPfAKt(9{zHe2u2i-|@h}DRVg60)c|wog|52%I zRN2@TgU%W;aCnPDo3^;LSBw3cwmAF3;TH`*V`%>ihYva{kB>Uz{1In)KH}UnhMnbo z%l7*rFBp0LMT7Bs#^_r8ho3d@!l5HB z8a{I1MYSr*ww0D=L-BuBxcqwbGdt<=0eH=~Gd)WvNoBW~uVk71jQ$=%9;6 z4?C+?MfH;^6SG&liW;S|Ql)DuYWAt9)v{E6QYB9>t=O(-&z{}?)4$?Z>F|pxYM*)S zX%*YIJZXEMUvS2V^UfO7|NIO34<0n4qRwd*bz7cRu|vxqt16cU^=MP4G{~RdEA`me zsK6>hxGzxkvBXDR6D$9^%1KIV5O zdZ|jOPHERt!_pq5W~Jt(y-I&9bu4u%l}bHJrx#Inip#+loN>-s6%9_SXo#;yEldAdde_fd^>!$AFCA4n#t)4Z|I11P zyv|RVGVlA{)lYe;vj6}6yWFeDw(WP-QjJno|Eu_4wN$HA-T!Ky)%Lo!|J(Vyo&Vdz zVn;3JZ#_SGwnJ$bKlMsGmFkyvF87>$sluj-UG9GNr6)^&FFjv+0m{ot%S$UtFP2^^ zyHE@_($>-s zr5{T_m3}V$@?U800n6C>e{1T0{0-Ls{r~@2U-guWzM`vWs|pQl^Nm|1(^w%%kVyU<-*GX8-~@tJ7tf9UN0 zOO0Le|D(?SXN~>e>FYf>)-H6l&GUk1S?O>;KR~D3&w+fd1d%1Fy5> z4W%Zfagd%S5>J78+tNp@yfZ6$t~AHrp6Z+m>w!LdkK_|dU$N5DOMfdJONS@1o!9B+ zN~ka68(Y|FJ8?a6xVBbid$K^C?rgmiyS@k~ThXoN_a;1CS9+SI{wQ(`R%Q|}X{+W8 zZOvix>y@uxI-v9yeltedfAOa>{#J#Ty(IQD;RSD#W_KlO$s+&cBjx<&M&7bkM%kWx zy?j2e^lQ;hC6=`j_Z4_ts_ZU!=ta_@FsY=?`t0WoR@oLJTfLvJTpcA>Yqf(;u_km z#a~vCpbqKFJZ-=)R+TQm=MQSUU&zpo|J?7>z5Q&!*(7;pNTKOf-pwxECcbp$WjBa# z$B8T>OJA^#Wy-gvhu%E5HffTjR)|7}L1TXDeA;M*lYfap6L8j)MU?2L5-s2A$z&Lp z;eBQy>0fBOJ*khNvkqjfR2s|1wxIDA&W~n6Jy2_`wN*ZA!}4yXrQ^k_Zg8#dvrn|O z6a6$O{U$1Yh29HjA9_|xlvbqrzX8k2Zbp@Z-K3zNfmLpN4A zo}}-H#+zvKMBHp)i*0GKtDjigA>z?=(!Zg#awWsVB$ywI_C#{5Vtf7AN(*$$SkM$U zvViP8Nco+7^Ciw3$YTfMV+T?D3D1LDEwODSj}2!RvzNvs?W~;-cx{ye&oMmo706F1 zT`$uAf`c|P*n>3GN4>Tv4@0M}mYb3{UNo^lva!z}CRcY+aWV<6DeSs33m5>UFKD$T z>$s~lOM5NgIj_)ZQ+4kuY~d|*JL7o{EaJBdS>w@4E~1-0_}>J#F8JxI#jm`s>9dJ= zdq?fb@ag0Ea^BF5r#;FuK0%`u+B3A!l#Y&JZ#A^J4wb{?z_~@d3ja%S(^h2J5r%8U zpZM`|@>UecyiZGsP<2cFWxMIDA4Hb zX{mI4Z#Dj^vbdel`4%1@;Q`> zQq`6(C+Wc)>G|IaTe(FH4B~ZU)$XPHP+5;k?0g|@?hL1&XnlqMN0eR_HQy)q=P-TM zldDL+r}ht_g-QA*Z+p^!^{(XEyTku-{yHBPFVb-3(lg#ag4(yD(Zw>&sral%L;s}N ze<)QhYAh1Tzh&jI*8jxSF7UdB-cHb;dI;q|v!rG?zn2AnQN-O@Xx~B3t8w&T(IaX{ z_Sy6`nG~Cq--P4$VX!SrbS#~R*DY%7!BY1q>?gXrTOKJFTfY*8#-SX|)`d(ZrTc38 z_98-G$;QTumS59Oa$#2}e9WF!;CTpo6VZrPCwNr`(Z|#tfzz&@Px7p~zlkZmc~(n$ zKLXub*~xsLOb`dU`8x*wGsKuRMa(~hHB`{dF?2nHhs~nt6KJnR5#g>7UD}~lwXm^d z(i)=rBz4-ctVVcfC(mpULFcfzuUJ*GY8gE}#U^Ta(i^R_S$v<;zT#JEZHJ#SwdS(B z%XrJd+G@)qhSTN-*z64Fm-)&VwFZ!48Lucuc?_#MmB*e7vtDXELC#qHdKUZ$Z7fl? zk~Z()>B~fe5m3I#lO;4X4x(@2yH@GnbkRn<>s9D1`KAu7j3w=CJ>vQJISR*j;cqRT zE3x#6WEc#|0XSH#SJGP4oao*DYPRAly*xjj4*#X4c3S8Rt5flFF0Ex;+De@b5a|G$ zyR=su4=rJ^#H$`8OD4IJtd(%E82-0=y_yxgPFM3$m;~p6evXDpg&Gaf?TMomT6+=2 ztMU6ByNM5WWAD*bZ@O>g^XB5w9eVK{ipW+&%%A16qmU$OBU$Tg zagKap>Wj6MRQ%?oRv!7``N z=0Y~Q5g)DbvxNqpqoWRmlx6tqNUtyX{87BESNk2d)KCPQS=ed^T(zQw*L}W+Sa*YZ z=i+Z1T91=*nI7kEo-M@d9_%1Jo;K*U*Jc&5tr`yMdEb`aDrvhWM347sl6uMdzbN0H zH1o9Cw?L&2OE?Nw?fKIja>ZimXtM#mSE${cO;z)5CBE9TlJpTDhT&AmzmCKHr2h@? zOGLf5;CeqxKiKPY(d$>bP`REgp?~QVHvJbUR%k;61X%05q@naRu zM*kEMZxV_RL8U34_7tziYP%m4CKs5m(Q;jO+QGAn#g)s%^4&<(Uc_leqPln(R(N&`o#(?z^HhgAKP-9Y;niIQLO_C46hkMvs`Pwm8!J7mjmmKe2_QZZ%x=F{@z+dlLe4%q1GH0xd}gg z_}GM71q4^E_pbq~)j8 zT_CdzhC>^^_&9WDkfK!)NB4nfZEf^{(K64HQ!3Hc5}xr33>SD`K^ygPm$AuA?-L)k zjSpWi57CzYrnU6h5|jFhY@dh;v-wIZ@+Mxqq4o`QaUj{>GLw;>)t^{!Up!_mp%ISG zfYsU1JX`5=J)c5f4d8S$OB+Ix26!2XkEL2qzjPJOFGhD@5!biZ%1AN#M*4mT=c|kU z+Y>$?2lq0%YQWp;v&Qy3X(K*X>4!~#cV|)geOj9!i!C&|UdS7U()ba4rY5VOsD;{S zM*|yC_*%)`N!6~f%rZ3_i_xvvO{9JRTF3IY$F$p6j64@=@6l8plzP%jMm+V*5v5<+ z8*=T%$8J7Jo+&R~3Ym#G+EA*GK7T7^*wi>+1{i+9Dd!QRF6K=|LElJQu1WJX%7Eb zq{cyF>4VyOKuhV#)PUM7?Hr_c@FBnN!8RtL@BvyAhYR+^wE;`qqP;{`1ML(jlcSyej>h| zFM_9M)CQ7=YP|N)-_j=O3LJ$997xQWQQc@1r)5tv3723$CyA}Q2OG_Q~C$9D6M-g@h z)Mt_>-hNw=!*1pSiOz@P`%F?K107EK#&q8jKI7%SNoXf}XSHG`i+kTsQ_=lP&nrZj z>S|miW7H|4!&Nx{NVMGzCp~?BH2KmC+@350_-Gj&9?G`ovVm6g)(VAY+PQf`)K9N+JghMt$>=LB}~I?GAFF{6+Dp`Bj;ez@(%4ZE_wH)i+;H4!^+mu<#@W7#(NryTj`IMlVmOFhC{yF1o| zZ?fQC?O>BRnK$wF23~&0 z_iA=EUL;u!fgiQ~C1fU(a4D-;I0q?4?9(<=h#{wF=QJ1KaDJzXD(w?t$mVSOGCd~!oQ@YVd|aW(@#a3BVm+LVh7r7 zPv&s;60|ZtIobQ>K3&3_Ptb2T)4L(;`A)Su=|A*AVHTCX*VOE>iRwhsMneAM4u^fFMTa0Qw1JJk-QsO?v`zy)J7LP{#9gMj^-+L z5-mU1&ULhKA`0`x=i#1jkZZs8dq)wvHXUT-{sR;aCQtIkMp5y4I@<{S6LEhA59mqD zZ?gT@;B_@w*FfPWuLi-ZoV+9TQBI_>YkBa`EN%kMCeZlG!ahdHI6Jec4~necLG6r~ z>af?Y5PShz&*JPB=uFTLxsaxxqT?rI^j`2{w7wRIt{I2G+zF!Aew&8k6OG5vVX(Zmomts zoiF9|4Y2<}KVpn|sm4ZAZRk8R zmDj5Ef>_uKudlMSpVa;sH^1sR?7=fn)B|~t#r#Qo2g_miiqb=6-iA=>;$2oG7UAw0 zTDugF&(c>XSl5Sd;%X!LZkCKxiDkY=zJ7AtzBuYw^r%+pRxb1V*UN(k@WIRj9YxwLg29lZa z9a==c1>~QnjYQqAVbTn;law1&CN2jjF2PstiqnX+w-h!SnDR3`KHr}EiRkZg$e z#NG?!;*1^shR$sDyNZ16Nb;4>??Nek`x9_;JsGR8&CGKp`?bZ@gQ!j_Mne~g#(lJ~ zk!;gZ%$(ew@S9A7S&pLk$gj{%zC2gm_~H3by$IFpK!`>%UaZFE8i~^{vDcpB?e`E^kApHL_7+?AX2UO# zXf!_F6ooT}FXKVwMI@V}om%Q$qttZPwX}%C=dhFw{P9HyyvJWw@!2)qGNk>r+bwZ+?gWqJbIl;nn^{3 zZb8nAJS#W0ENOKzJXg`q4odZ9;m@hJ&eO%>X4W*b!n$3N(OU7O8{yWju+L=5&7x4( zV&uD+*JaJ_FRXJu9ko~L->haFOX|!Iw(yXwj{RJ;_zdbzAYPeO-67(nkI_!MS(%K~ zL2t0~>qtMJ4&T%-$h_TQB*@D4acH&?llP{>>O~x#Osgd^p#@x;8qa)&`s+UFDIUc4 z78g-wv55bw>{YJbD)FZ}36hKU!dZ3^4MF!%IBi>HyqGN5`}>Cay0(_LWSgpP-a(U*{Uw!eLO!ZWPsYcqrN_wp`^->3W-a@Q+)c?QZ{v>f}qdb*Wu9ZCR05-A7 zv(ItSR_z<;cA-xa-^c3heUJC~?5mIe(dy4W&1}-nV&!5j{ZTua3piMr^9l`5Bg;eV zcsT8EVN<)|VHjBt;-T~5cq(h$=;>~J?y+JY!=tdS#0Jixwd^_=Na7Qr`Z>SfK+aBL z*HbWgh$XaCC$n>p;WbhIGL(OX?j^MKI%Ka1lHN!*0i zFXxS)DN&OJmWu3MLq^{y<}OD2Lp_lr#escMUI^7SG(8Z)UyyXP{!!+nlZo#|`A@X@ zNAI@aay{Ogkgz^1GS@!?nl;(%F`j%1`9bpfzo7i3XV3aOggl!_Gns5ZvE|oTOHi>2m1ZG{=)&{<7#CW>-$dER>qUb5-nuT zC)L@VEq%mFGmrMOK3lIse_0Vff}P%jm+V){UX`rZWk&1}7SIR&O+=PHaGgq{+p(uU zBHp&um1eB9rB;qnGi%1lGKm;>^WIrJCGp}|cGFNl;4XYLl-O)?z??A6Fiw3vjv4@_)bo~9q*V^)qtv+3aLRR+& zDBls4j2*Aj^UR#}6>41}0%s3EIj+(pD3ePQ)ys>Bd6@UvN%1U{gH>mF?=4!W#2PY! z9EAU-^l+iLld(hA6x-1DF%X~5GJC4q4ZoS;uE8UQdOriFnOpd#$OY}x&FG;iOtMCl z_5Y)Bm*~0_y>TeM#argXCv(pWdFvFhGH8`SA*280P>%eC@zv|2m4$#l-Mgx;3;kr*tBiD zzeqIton2kU*A7N6YwAal@;=B-5KH%h(-_E&7YW)c_lCOJf0-Gf+B_~}q}^F+6%l@# z_%@!t$3o+A5={534ldWpNc(y6PulvB3@7XFybIZG;?HH`RAx;xr?VclDL!qF)1~y$ zOziIAe>K*Ty=eLTT(Ukzx1;F)98}*#{XKeol5O;7fh{0&k7)T2tS9J){Ylv~{eO}U zCezp&)DP7+{f(V`2)jSheg|z&_3l76Sx0O8dS8uJ8_1jcvbz0onx4VQu+5&rl@Qq? zF26@aJta{{k5=F-ndeaU<Y{yopdyczqV($hp~mmFxaU5^Jw8~`0qjbm*I0JG-+T-)<1xQ~Mp+`=ii0n!I0% zOMhf3pWt&epTC}*hvMKLT7OFHeS>A5O`=oPovF=GJ{Uh2@!9Mp=m+8NaQJWZGJ1Z8 zJ}yI}8tNy@PmK$CJ}3RBc)yi5XaCJ_K79v=KGnQEWle+8cCdN#+I{kc|V@6 zk1t=aX7W9DOC{|aE-m`Ng8SevjF_VY4rk(0&CHk)`V&qi) zu&h+%*;M5+j_yp?WjrX+<7D#grNqTzM8+XQ@s?Hmh9cu^@0*Eo2dPob(-o}tC|n-P zXY2FBj6PpNW0`VQSx0u*WH;VkY%%)=GZU1x@}{hKH|4&ft%mGnnP_kdf6a_wW)_yC znlaj9zPn1@*?e~(X&T`zmfusKrwe3Pv5CXkP*<9mAtGfD=8Hbho{!DYcwEb8;wo5= z$4ND1nrSDaysUffA#yfG^+|PD*G0b1-h}n%&s+1V%u!$KWQR79M2ZM5OK1k&uPJC|1u$cR(^35^U&>f)D2lPv z@cTSV><_EN>+B}Zcq_B^Ls;X7IR1y2)J^1k4Nt9Py7gwPvUmM!`ubkB%npvpvf4b_ z-UFXUYGDd}?I7x9b~jv}A-cEJ${2mT^mMXPn_bA!!eywAVsn`t7*@1(guYU8O7jBM zPFgvW1lfPHm}HmmuH>kbin05hY@o47)e)zM7Iu-Hz2D()BR!`#QASq{Ntdk9kkx#s zy*cWxEk4b@(A`9~W=dvH`dY0ePiFq(4mznNQBJt;Rwg-A8e76;!zJ|se@!|no9M67s@%cy*^-g`WSJ~k-sJsR3^ltwS&DZeV z+-%(%XkN*79wEv1VsHgL#&1_^?=!jP6+ZbPKOOG%4|J5-xntGJ9-50u(*Y-M@a&E3 zp^|6W?Y17*-+T8Sj%JdwebL+ciRRPW9gE+2VakQ{os=GOs)8p>2iCI=*s=Qe)`uEz*37TK3Dn!vFS$;Nft;9tXeq zd%xQAw6lx4S${f)_l)H2*-5tp{r1P#PkiJbF!~)X-=a1f3PaKUk2D)CGewob^mYT=_y_HN%d%@K z@wqIW9T}PZd>VyI(V2nEA4v7Mx*sYxoke|)v*+dTReEE$!>bBXmPO!Tljm!h~ zrmGIc$SQfe6)l`jwu}>6^NbeCT+RZs-cnmepH^hB-fZ<>qB^v)a{@#8Qr4*|P|D0+_Qq{T<{R-^&~xttky2TV;_C za~jMDe~EZ=EUq8LL&jqDJ)4Y1Ml=V)qgs)rt`OH2i0SWZWw4l*ott~2ozd$xTFZ`x zjJT`v$^%(T_P~BXhKX8!3djA$lG*-7r%Q_*P$q*+g5N5oD)>+L0-3k)6NIy}m7Pr+ zMB>>b&Hk01>?`}UvKun;WmPZx=zA8~?`lu7S7;}6YKuP`wbhc|-{u_)#N_AjG!wUb zipkgMZM22txvcQnLNm#tlgLzE8-vkXkEiSs+nujxH%idRs3$W49SW;`n^hgoQ>Nme z2L9Xn{Ei}O4Cnp3sAb|>W=LUu&gf?Ik+-AT2Y zKiYUgw%vj;V67#Ss9gr~n7gD%JKTqs_Qi|b{h ztz(U@07Vl-kL0kSS#DLDepTtcpm~{iF_G4mKs`HFR>`G};J6u` z>E}`#j`)0ROGP7Q% zcwe0~S7@UT%sa4`Gx$Lh6za+WKkIvZfZ{GRdjWa3=UeZgwMt&ti*9NtcM_?Zz~NJ^ zJb|Zs`OF4%UqEY-47OahAL4yvYKiZ2A(8z$$B86&@%luk1yJwj*|(^76dk@PV)bMi zI|!d+aJaPSc~7Ua>~Z)8kE=wc!K9zYs>iE)I%NK>g$C?1v$9vIQ_ZK7SWHivOoS{e zevcD_R?<&TGNwP(SvwhjW~bsOV#R1YXV>-KYQ6`}?AE(anc65`#6I^TMWSL`)^H|% z6aBm3sZ^A%Ttw5%(5E+EU)C7GCT|mgvaVi7dzrIM#4YEGIpdXGcr)4K+5E2;jt299 zaRoAcO64;8c-p)~{K!eZoT3^>>g~zdQH;u5z)?`YjRY-7l$o9Ba$NR}W_~p>?b~90 zZXl_ulD`s6vXkjhf8PneAS z_VzA%7H#0YRIQ#O z=o)!=0*_dS?op^M6P-)w{UE|DRO4#xM!Ej)v1B5$g(3b1a-6U?azg@O$8; zorv=p-d-hBW>d1(*+9*+X<^&wHLKE>Y4H>swGmf$)z-r(oF+rvfzzXuY!A;T#n_+l za08o|>Fp*u6&8-I$DAG4X;#Ke(4X(als zh12ag>yPWd!ss5otg}7&E5x>gL`JmhJ$p)=I6#y*$&<+Od!g-*$#peNz93@!0>{6z zqibkjxWCz%`i0*8MJV^dS0@N;$HoW2;}w!*=T2YtQKO*ru2v?qrwQ7-sqo+Qqju#X zXQ}fMKmM|Kx;M=KisxjM%+=3;%?L=g)ZzuCE6E<~>1`uggZOE7u4XRx-(pxJ-dY1k z{mJ(ljb_*WS$IBN-n&wC`h=!;M{!9JrE;#if&|^!!E>;kNzX?WwskO#&B1?GFTaI$ z#$nkQv=%`KFCyHp5v#9AM-mMRk-urX3mCrM}nkWutwynFET$ct{(+S}d5`8{8C6&FdG@Q+!^dEjxI@81b`E9qk`db| zHNIw#nS*?mogGU{--sBw2_fT<7w|csw``j)d(*3|+0KSp&MlV7=$V12Q%Id&UuJcW zMx{3tvcop>wbjwPs^B(jgfHQ^FFxLSJ)mw?h4Q*kS7elf6ms13hJ)V~Q9vPlVoveVH-dQf*{RoKru+ zcqMeR#joOg>5R-=W_3KHxJf8gk)6ikJUeT$qpPg=TP8y;fI-IObJWgR znp6=SjKpl$i~~#SjrApCw4NBL|K726>?d5PG2yZt<55KBYpO3XUAf8qg_hM?3h>v zjXPM)#V9ort23KXS7z#^ts9`XlJ;*EpVHIsO5SN+Ux%+5qSN+jpAO?eqI2%l$vKm5 zTH9SD?5J0A11=Bd(I3eXi%Gqo`1h=~#wzh0Z66`FtwH@K@~wvH0r-DSn?1~ZzK+kg zarUCV=R|mJMfqpt+T&=9_>f)2KZ|Vl`t)tNZh0{_=*Mod8+!CEehzvO$m-7U*C3DDTL(pLud5+pl9T`vp3>1V;Dpz(etKFn@f%$nMX{%ez>aox%f#$bF}g zxUW3)1wR-nE}f6cUuh@(ivHSuiA84SdKa1-M(f!_e0RayK78wUwLjP68d>nLzigL% z2)Q-mClXykp0m8`4yn12epambvgnJPAYSC8>=-q&7I(PO^7Z~dOAlG?Zi}B=#Or_3 z?8p3iApO1s(H7!GKjkN}&73;VuFOq%xF0T=bvnwk-EqGIjjk1K`}_P-G?0s%So6l}Xv$LD=I|pZ9!u2FQ-kJPnn%~*~wwOIPR(gzxl3b7(oxR_OBHuFKgwVn;#4~0rT zyIVhDP%*#zJ>RQ9rCij=-sRjgF-uIS>D?TjnOs*@OS%2#DTrnKlU-d$&`R>*X=-Lw zsVhXb$9p&4mOkrwe&_S-*Ke*?=8v;$rBCs62@jm*3%UPR@Dsq3u&EivL z7RTTuqu$J*-+<4It}{;V1(R`P-rckA`p?Z#%SbJGKcD3;wX9=jCtrK^nZ2Nyt=xpW z%oY!&r*m0c;z|RkWq)n_83z8EIw?DEaSJKKC6B^xef3=3Z4Cn z9_S7v%^scHEi{xiC*k7~?G8gTJ&%FxDd&Y&(eUMJFA*yi!X_s+Gyf6E&Zeu(BnQ!d zDxJCU+zithqASRG7f;H`;H+{k#oJUKkU5cZ8R>DQ@1eW)g_Yk=GqH@lv^F1w*uuG@ zV%ERn1gtohG?7*B+=}uMJId*(+>)>s zHyLkLCr|X2v(&kJA~(}y_4!OFjOBeR(C)2gmmN~Q^j(*cV^8(2=6|*E`gd{v3z(18 zYJH!LLqGkb7om{7Bfk~(cf(0`qgP?eFW}==I9|lU7qXq~rCJNcKKMxgBYxk*)9hfJ zU1WvF_Dx24ZB5nd_wd(6C&m0sSq`8B;Bo0&dHt%e0|Rrw`JPHlzl)veFPSbMSk$V!8C%+OE9^s=eiJl`_P6aQzl0D?+ zulr<*>)7xqtl~lXeVFb1=xHmmzQZmS(c>&UWGBWkp1mjE?5PiOpH{M)ZXLT>E2rf& zLLC;By|<%086*-XJ0DHDlS#9T9X+9^^(_eodzX7O4i@QiyTMrX;{)~mxW_hd{OmJNyIezVz&TGJY+aU$3nmc@4X!5RC-|LDr=8fyNEW*@elIbmuFlGSW2Kji z9BZNWCQav-i{z|mSa8O=ksi|ydHXQ4SL-mWEN_Kdjs@9%z z^S$JoN3!Jc&!C)}Y~I8F<#5ZLGQoBae8l2+Wz9WN?#qVauh+wOZZW@A+oy-BKOKea zO5VUubDQEUZGQ&W?($k+_Bb8?1EG5(Ni$0p@6EjkiH_MDaw5B%pp~Jtoz=$d6Pm$i z=ff+0G8r0)T*u)fd#cl0p2<>kMtUHhPtW=|UYPUjnOhy9<=h+4o|SB`c4oe|ibWF( zJO9R~Ps8OB&vKgeUL0ITQ|r;12%&n~*d$ubX5pF9ZR7t+mN#6xKYEq(?70JCy1e&; zSXzniJjYXVPfON#hp~;^VKt?QrQ_tPb?>6({tLag|;|(PFCH7huoBs+|-bj3>M|9z$20VHkNl6X_Be4hdH-g zC4y(1djWKk6LO2O_Hz8xV%yh3_D@>N^P5<~bl7D?l6#b@kz}s+6UF~X zmNW6#oULS^=Lp)IiRNIfC$pBZgL*WReWkgpYzR%nuNxOVuyG>H090~1$y~UM{w^$Y zs=N(7^rib3QOPOBBU!{zg%zY`X76`5E7CwljJKSFVrFS}qo=Gv?TMTE_#ced3VFV} zGMz>0b72~dW{=%BWUY_4*|eUugp4UOV-Txkxa&uLRuKS>mQ(N8jA6Ly?)E}5^XEE?p-y9&|q1JNV5J-wht1)g%t(iEI!FIG<6 z^rqA8`TOy3Y)VIy(YW2~@hm%gdf&tEFsS7wk2$Pwyb=eJs|72pFPdk*ET@crEUfrg z@?@m?i?+JsC74#yc02Zx@%g=|4W`Qt_s!&`u39fE+8nHnSo%RMGrzM7ET?cY*Swr<^yA~*MTN{t9YL1XVn=3; zCgN$bXqO!@r+TsvJI!rmgXNLzh0G3ugJ80^{PQU9>7ehm2_BEZIdbMsgzPBDDWNg^ z;1U|1BoAaCTE-nS*M%f|v;eJt!dsotI6Y={J^gduG$D%)gmhYwQ z%)Z`Z#PzdRiO%nm{B-=bqO}u9c$&QRtx@84erIR@n?=l+Pg_e_X>L!jZtJ5K{R$Qz?hAY&pgTHt9?UkNfD=uY!Ze}*K)95#!eFN+4bDW^eDjXcZQ)jWF zUCFVZvDQcU%g*5KXyFAmzmF$x;_3i6{K}3pj{J&sjTKk6ifHTMdb!wjItzWWh*Q}^ zlRKpDVHvYn+-oH7M*gSx?rbtFMlDe?tMSbtKBdSt=}%uyzMVzs>|*GP`>fqng{_q!AWW_7@ zA7uQRF+$Esj384lnok~R4#(VC*WUY_5vhdRS-dCt`Ewj}P$PYe?6^wp#OHZ5Rab<& z8U5sj-06_Bf8U60FW@0P=zgftddGK4{ka5I;dQdYp=}+wC5p^4oHlv_fXm8|6ndRGd5+gU#e8x+zz|Xbx6d5{+ zoLPa({B!yZ@2Oo!t*r8PgHLmk=X7rFG`$2bXOJedtI7A-UprYG9PHEdXe*Ir+d0$W zY<#wsa--%L*nSI-pK-DS`_5=^i_Dif%#J&d6-5AEEzlk+|4YvumUcw2VKwh$q+ ze<~RwxoisBHR=5$vb13qwMYZozJe8i~9I{FpV|hPYr5TprvgQfKb%&$NG+HX7qN`~9!=bfMNau!95mXwKyy z>iJ5zR>#5HqVzKD^_6#XPW39M^BrCa1Mm*+u*}pJr^|gXW$?D}B-I!TKj|dqAz0wsHdMA)Y&i<)&YGBU-uR zsFyOQdG!{JiqOw6~V`!^3cPxyJhoY3UiCAFAZf zP+zMD^jFV2;Hr^$^Cj%xqt~;%F1{D2PjQllPhK9;lgr{xC9 z)Dfxwj`tt5)}IA5Cf6s>_#AhaiMhE$=MVfT_tEAyj$SayoiA6h!i*bpXMX13I+CZA zxYh{1$q!lUDucllUfs$6v)Y>T{S)ZvT~eOSDn4TOpR$bXDV)b!epLSg^|H1%Lg^#$ zl)VUjaJ81rWHoat8^|fJj(FD>JuCX?-{hSIY4FqLiJvS(m$>CKA&s!}NZ7AMS5< zqm5_lH;6T7JQJKGp2EB_14%q6C8b0AO1eC<^9Dznrb=`(1~;Sdb%@u?Nl{A+ z$BAIsd7TsYJ(Rt{@1~-}8eF9AU|Bk+K6AU(Fr_ZX?Q#fZ9&!$Ze^ss?n!}-2#@5nL zEBF3zvAH|EKazK{H{=QU^+vfr3R(Zno~~`9tlShh6E2O_9U~iNuWb#{en;{0Au=~7 z|4?|W^hx%aACJneB5ErrX0OoYq)Q9qao3i;mqYwLPaiF!XnID=dEy4%{wxZaZO-}q zk+?5z?vl}0@R&Q{mYd7oP(OF?XRMpG>)azfPp#Y)lz!SB@P3h|?)LK>Eo|`VP*xi+ zA4s3wSp5`rvyV9QmIuIdiRhDz5fA*cdf6wH8#T+dd#+w(HLv$&ft%HzrT$g)alMus z>C?=YFDA3I`$VrkEaz1?W{3RFO59G0FW6QU@>iy_#F%ZXm)U)idCb$$%qqgeY~l?3 zc7WKeV#+AI%x24{kZA^uj;5zO`1(VlZRYJp(8QIvf7st-vJ2SIDP+p5;~KFwyVi5B z?*zKZ-jl3LHdQO*!@m`=zL6+AR%HAu&&WQ_WvunDY$XXMzr*M6mgllII0Eh3aIM1g ze&TIqc%H9Zm%=Mfq|p~}H%rg#bA7lx3lqKOI=-X!%$ zM*F?R&&DKwTaSFUj5JL|>xch_{5Q9_j3v_weD-2(TiMa0%IzZ3{mv$z#PPMfy}3{B z$6xwaxuI}Z)*JoiF2dI#@eHcD4`d!a{hKYFgRkC2{u+~c)(JWrI#?B;!9^*#8W%==c;Va}jW;YnH7sm@E{ zp_#2rZzm@}X3@cHdA7Gt(^GyDMg!E&>d!tn9gV}}px8%W92N{ztqumeK@*(>0=`9QwKXOaNKHBKa-lwvf z+%C6>WZBUf{rpJMtV!lhjamGwQNcxRl6`@L?5o)bqk29cB0Al`2C~C8-!zdiO*Ord zeMs6GCs_p^qsASgY*m&%LH+Y-En|jLeX>gK?@2ei(^Ny4uEO!Uf^N_V<{5ib6-OVU zvAtoDlS%LUWKJPX_POTlX-#!AzdMYtWQ@HDT66r*dHCe}K4i@*Xy)M(Z4P2>$=|ut za3ZUmf^JS(J|nj5qHgv_wP7(iGclA84CZw?Q}TDE|ID6e>ubzo3GuoT`#A?k>E{HU z^xZ1+_h*Y9Q#p^%F4f%kmU98w51!j=d2&@>67f0yBn>njpiQw%haF4&vG~Q zg>;=hNcLrAm2BriTbZ%gUrafZbylX$%IM|x@?J_EgV)X|{R*GTvSId{CyT7sPS#1Q zk*gaST6y}g-<9!|8$rg1tI4p{Ayk_lw};LS=p?T$<$bxIDqN+fnUg{nXeYDG--%0^ zu{l+}4tfr`E51EkvI?79IC_Y2Pm(S3Jg<>F-kv_`96EYKR(u3M`KFfS$M5x`cgOR| zxaeBgbapG}tlumaIZNxAKWf0oZp1@%R43xBy;l?I`=%tdPrd9w@Y76`l~ zHl0Gs!#tS^$(*UIURc-la5!D`yITwEi_b1MYCKOXS-)Ei>+H&}5HlY1EVCoIXW>k8 z_7cq=V0FJkBqQ>NaJd_Q`WaU_G5Q%qZp%GFWks*6A4zgbc{HD0u4e8xcuTB*M_lTzwUyd>r0C6m zN{i=dVVa1sirq~kf5xEq^Ur+CS#C9(gY&H{YrfiLxj-f^FU9 z*^%O1_63d-uNT7q5E1bdcpLz|9a-j3R+aBAdYMlx(RNPTX0^E*yIw@<=~_XzbFH~v&t?Lq83RHm_)uX*(%z3hSHJsYKw(8~9b9L(phC)G+)Tn5{VWc#c-|J&2! zAh}*U!`15|g6I1!a%!i)xV{O+^ecMf{~|nZ=3#%u?asIugi=Q(6P1q9~A&S&F;qRD!NtK^YC;V{;G*nt$cF7pWJJkIf8yZ>kY*j+Ruv9798a++?U{xy`h0TzY}_C-AqoXlE>WW{Oj7w31PJ2hTDCR|$uYsChGe?ytpGEHS%udXoK4d9F;2 zZ}{8iKFN2aRM+aMtSK{Rxj`bm^Q>-8^K7XImiyQi_cS zP8OBt!?lbbWqs^F*lq5iNc_%To$T*A8bVX?SMFI2nWGY|e#F9aw(4fJTH$Rc^|Pxe zv-bJkn|0clr_OkNfy^2%WqWOLlN)Edd!2n9xzG1jE$6<2yZzl)49hn%Z9B>NM|#f+ z?^F_HXVvpC%iUyIZ9c!?>~F9dM~B&Myq>4EWf!?WGBX`tlOp?OYr`e;C#^{I5{`EF zZWFy;2ai3ARgh`q$SnA+c&o)1*0ZVf?t0VEbR(TQ_;2ig&XZkb?7C3-Zu)ksd2ddk zk zI}UShH!GxB1A14a$_-!pc=rxl&7Cnhv$hYfKS&E>`E^z_b2mquf{UCJe-EcQpPYM) zGq%Y$3w0J5E-#SUD7KAbA=&$!PX_p;v0AyO?4O0TRfhB&tv#*84y<9k+ULVPr;FCp zRibmwMeIc8+&^*$UAJdfnLV3@N^Y6V-HczcgzTzf)iQc|_uGlSgG8KU>Z}2sj;Huc zb9y{P-P|XWed!x{$4t*gh`{sddmcF!lW-#b(lbg<8c_JnI*4bs=4hETGk)dty-Ca6 z#KFvWtkqAg&mwZ3?mF4O95&hix4kxNYq=F&jb$hA<38WrQV;E%jeeRP-bB8)*+hEv zS>t+MKm8dIxg{#icwAlF)`RxbV&pQ}{y1LLkSF&i*W0vl5Kox`k1b;4N_~zoEa4WY zWp;d~awC|JNp>Mi#MN&$a`t)cNm_A>xC@1CNwttY$cAe?>RRYd||Y|Z3dtZPj9W> zHDr6R$O4a`@QzrYyMi0A)Aj0PkHcU1Y%P3!LkGDztDAW6PB9l>i@iPuwVP4ipB-!z zLz??!0J*;ugL6~P$E-f1?mPKlcDOwWyDvrTK6qKEMx&zbYjD1TtzQ81cI>_@4zgpb z3D3KVu4~J}e}d>5Xk{mE7cCwmp6u+&cCk3RKNK(L!D#^w#>$&7>1&@)pHKMt40=a; zwoI*tcsLoS`@!M_u|21J{sGftWUfm+IgXYuW}lIE6)lW}$JwkRJFqj0@P+z6(R$7X zErZSu^f-`J=DXQm#l;xyuk?H*|NEO#hb#3H&wZKQoeI-|TKtucv(GGh)9-};Vkmv2 z+CrS4tUHy#4F}!9WDSssG7kKKaAJM7E#chp2 z8pFSn8p+cO`O|vT8?xj(`RTXnUoX<_OPAZef1xhv^R2`kmCN_fWesV7$Z{#&KV4`k zGhw;oU?dG>cj*FF7^~V<3$Ky<3AMAg>Q#1imT2}9&Oa{tfjOr;hb}KvdJ`o3$+7vK zzP*&Z4A=So-Bs*%CB6>OFIdDr-eJK#w3l<6Gg-r#{(VxVszQ-XuP_(-WDdIqdm81FBgEepaD9U}Bq}v1 zGQq)kiN|EUy%G+y?O0^&H;rT-v;qQ8@$4?5VD2`_DeSEIW=uW|FQK2C6mph5^B}o@ z}Jd4y)XVKlA>;&eyaZ%B7(zZ%VCtu^bpne)jS(|7zQ_fnsOo6PlZM~;m1(&N3N z$cL5brnjHJdEbks8j^W;NacpKxhy>2aB~DJSxuICWX-p8rjPZ3T9rhIheW9Cr^=au zvO>c8p04yd-vXTPjmb&1;r<^@+8#W59c^bep>b)mjNU_Axf?n&Fgtp_Z7<&LIQo?y zFVoWTT24PP`(<+WE$k5o6d~;xYGc!hA@?|9{_n&+x@}@tRdBvV|w{3s*Mt0Mb6?~0$zHi_|7-UCAdJ+e+ni_05_oQ_7 zJg3)R{4bf)zdDj{9OCCjKV!8vgI=n!#q86qromjk{_64DDxyurj-P zQ0DAHQzwgKISuq}5i>KZzg}c0BTG-XpU9S5;3M-~)ltaZbzPu$1XSnZA-6Z=o5}7m zy2+emJu#}BUP`pFi%`deWZvv9{a9>z46+-1K3v?on}t+BD)+bN=8f~v z&uNCuxb8{1_xaq9WXg`sMJzCD1KAx<(`O((YResK8Ky3z15qzJO(Q=#Roqi_@^&&_1RV>C~ zWhf0L;Z!psi+RVx^xw|2V!(OiUyb5Vq{x2KN45B@R%-kFX{i30tS{mAOIDw6f;dR6^xm(bTulGM^m8=J&paHp2pJe*>tmjU;PC-WlDZUPhHr+REDtT>(wHsj6OsZ$Qg!JTJNRw8DiW4K3n4T zrKDd=nz_a!6>M}RIj+%aX3ugqKl=%GWJPzg=O zZT6y!!Fh6aS&=jI{pz{1Xo8lK8xBLg7k_Dr`})evBYW1=^PMW0nVO0A2%Kc~>qrRy zqSsNG1>{`x-?Y%t>p841-!PGzHFMrAC&%mHrG8=4ZSdKZgm-wC?`gP)KHr5vLm1>f z(thZ-DtK^{sU-6)xi@RuedPD!u@k9s zDk=Lg1}L9hXMK6d^YbM>cE5Mq%he7huKBT&|agMU-uiP$6Lbr`h$9U6#4yrU^UAOjm9L@2ghMay(P$FyEEg zibnID#pUAFrMzple&a0Vmy<8&KwpN*Cb92g(&toAz9X}lpPzktGt@VFn%T#usODs3 zZeGl;+SV*^7Uc3>0r{q^$Keod%_C**MQn-Fm(_@lD)E$0ppf~k)vV|PXl2yUSIo>V z)U1@{J4}S!REB zu(<%bIR}+JzU45^e&Ediv=u?7(?HHYW*qw|I%~<=Kr~6Oz8(}akDYsUGddUrmw9;p z6U@G6A-UIY9$UMSO+3y5&W2I^tu;Ar!`ng;=p6PHqz}elPRhnI4#shZ!lNoF*A^#3 z>GM2&zTED!i=Nai{53bSJOJH%lgi6H_A7bv4jP#$Gj`&uoBf_fuJdqPiOyP(X!5!2B>fAL>Br=R$7KB61m#!g@lI`YgIi7$ed@C& z>|ry6O04F6`o9&Qm6gotq8+p`Of*OECqsWc{-o{i=^=fK!L)IRSAQY-oAScPxZI0g zZ^r#4YX6GHb7XfF*=qJFxIw^zi>A1*^=b7&wtPlPW8*L|39s{+S=-zl@-h)&C}Y?cNsl_ z|K2n*n(n_M%|-|vgW4W!V+eiDr;7_v&K}r_qDXEw`$(G)Lt+^Ghb!MfyR}ICfTt@+ z@v40A7|Tq?*ow|(d@lBUdr{>Q<@(Wk&ez<7@&RgW=55d5=Lw$FoG#vD&l_3bzSnDYeS3>I*d1i~Y?jYr_sIJ3X z3l>yXM7AD!>e=+T~3_u}Jg zsO_M9dga+aafhDbYBl+cith>$3xlm<_O$jVAe zS)rkltUOjlGNLjXDndq@il}VL@qfL)_wRq)$9>=5agB4l&-1!I*ErAGbWscqBMbA@ z?PPqY@^3)J)96feg#LWXZ`#j%?2tmAxp9%02MuYZwol&iZ6fsc&}w&nGP`HRe3rTT zN93}G)^ehDFnSa3;!yg^*q*%NxvezX%p)0ej-{jQh(E{TU)SEAN;cHeAM`U=xp049 zVODq+$z^VqxLny;`+}$X9yeF(dl^k-jxx9~ziVqQkgP$;s`8Ec0}FG=%q1(}t(jJ9 zks9+nRg-iqcs{m_cp_p(K0O&)_B}Wpb8yjkY@C zC+mkfnOc+Fm!thC<3gx>n8x#L(oN5aznC^gv9oCf$z=aM{&utW5&`ZG_K`jIyOhpp z??*}PNYdE{R^CQyqA(`D-l5t^{`LX%J_DCK(3+aL@3s$K4rS%JX?hc`#w*#==ohW$ zuF_N-B>!qw+Y@{LPMn>hOwdtR&x;}SaeZd~mH0G~L_5-~$(Dkp>`KMEKSh_xY1)>Q zR(gtF#w#a3!#Z+IMwZM#CiysO#XQmD0+ez4AVIe zJJ`4Nefv3Xs-b=csZ?iG%h7W$9K7kXMn1{i=Ql zTOF-T8+Cplg<@tZr{J=$(qFRHV{mf_8d|f}0W|zH-s+R;udp}FeDh@VBu-wQZwIS8 zpX8D)tN|S@V~aV#n)C82P;j|=OVqxNEdQm^5o~6i5j}V1`tcSGd7R|(%1!H>ElA|} zjmDQm8+(SITgW%n^|z(AE+NI4KFvCP&VnZXY)x~D*U)f*(d#W`hCuF{Jau;zoN06{ z$`)RMXMIJjnyAjKBDo5(!aY&nIg_4ftXD%u4?NsYQdv3mS|MDnVfv__2?&OiB|IVIm0$koBQJDZdyzh*lT^emG4Pb^K;1M73GtI zVJ^AkNg}zSvevm4=5x>b67*ilE~=B?I^_@2=a=+!3vFHMbs4hAotT_y&q<>9%}Gka z&)Gg3r$!YcLwUaAAM`YbnTu$3C!Ra{f0Pk8YbX0?X*r}OQ$iCGo3HNP#{pESCT@8|-3>qzhl?bkzhf09YYg%^}aZr9do=FVNR=5{s{=ty7RLi?|1 zODu?&NcS){vH}fl@pE`V)-BLA12+w!_OOD7%f0TbSmX|MqD^e}8d`|jj@*)jLwiT{MprX7H^=wxCD$g+rChO>XBgwT_KXK?bl5bOZ z+t=JTxv|Rn{|VF{MpqSZ_grDtKS%wQ`ufl)T^mm0A?6u#&Vruf(ETMHG$i>w^_!f= z+felvnZBU>RCULp`4s)U5226Y<7-bR$uv>Z-_=U>)k;Tl${2gEF)n$H>ymnQJ_qP0 zcZMS6Yt_AxwrW7n^=2ABLu^jreMf#7Me4AsAK7j{rE?2x2x;7l@4s;wOz(}38GZ+& zI4hJ{wNK3J+}O;Sk*oRQu}Tg1Oz}D1yX)a%mOY!~GZMEW(Tr2M<^CS29ge2E_5M5` zb1~#z5^>_^`Oy;*OK7db_b z??LM|r1d(>$-YGK0$PTX&1W<`voOy|JnstlT&ab5@HY)TYtV6lR}Z81MgHLd_V=;z ztpPk0M@RCWj4-Cn!s8L-_$NxHp!9g!yA!59B-dp|t3iIAs2ySzG=L>9NLEH;Y?+cvobpixl^B9!!=0%6B1|v!P$As z>S~@DR^sOZ?X)6^@~k7Xk>O;qH#tw`8*_H2f$!(*JuzYTrN1G*IUQ!_(|6X9A7<;b z^naN;713YZtC7%sC;D@%J-5%Vq@~AD_A9xJA-A#U&T8Bq#^+=`Ttz;)^AtH;24lG) zH4f!1c=5~3fwFprjcTl$(5+Y3x~$ zP;NWar<-XoyMwnF?^R~hIny-IEs>?dNr+0CEw%#Pn_nP*;zSW_Z>*eji;Ph z_?9hKgNNk%%E_oiUY%PQvom*!#*fD3#pIY>&W>#IO>HOJ;~ywW%o?T_vyesZ=C;B4N$eq5#{u;+qWWG>FOK+k+ zr$e%W{0&cmddX+o5EA#~my&lov+%j_(@EWRG+a_o zKd{FU{uiydC$dE_nbpT+k;weB=q6<1eYK|9ASlt_lP5b7Bf7v=&d8P_&zxvk$lfxy zO}x<+B$GXZoG_VN&}VD4GAC9??*y)QIJTcC(&YZ1LtHy zIoe!Lt|M@q_+3ws$)QHmilm!dfjLK+9rnZk&)Lk&jUI_ao9K>jYI~sXGyCtZ-dGq& zobt{3Imh>p<2h>g^9F;{H z3RTn8?V#<%U2P2MtytA0_MNBZtW4zucSnEMlG48$DQD^2dmb7v}Hcm71XbVx^a$sE60@;PDm``$^mP8u{DO z#vc%pwS`M?JPIEtqHP?$&m_|Wc!axAot^e%Je;ju&eC1OdJZOoWN|9jg{v9 z!))JgV+)(fcPP}qhQ6sjKL|Dd(t9X68Ul*+xPX3-C9`Dlngl&9$)p+?E#OBk*ZMVZ zu|yj+_?=PsTuZL^LGv!->6d0;H`Df?biF{|7qk9uN+fpBZ>Sy#dCdz5c>vu*VC_ko zPZYmxp_c^{!~l-^f_8jH$ZC6KwV0!f1>vs z`gu=(Iqy0d&vT8*vw5%U(Y{>m#q98FI*JFG#xgUWt|ragUpWY_+VKR5THlw|-Ka*+ z;a;WX%rSO3*)mSPbAv$E$wbX}#!=x;V3FqRit!+MJQ ze4Tm2E>cQdjouJA5gh}y+gQC5AgMh)^@fFxba8?i;QLxgWVoGPXOFV9(us3&02JiL z(s#7GAI&B=cyqMeg5N{}x&Q}7d3~~qy^^lZF=|)B@1{10u+EX;@3)6dc7FY9UkWm3!yncw^@CH)={*|%U-U2wUObq?T}awF&} zeHEkOQTW*(tuxiPcWOi*0%a@tkehs-^Ag#C_yzJ`(Qh}h`OO#-d+J0dc~Z^|fT`s5 zD|sG>vP3-@%s<^|6nPoVeeu&&TgT!!xda+RM=drq6V4w+PhY&$AjKYJaixAnDp7@R zsfyAYNZ~n}y&7MC($p2S5^H`;nY;9KA4+%A^B?rNFZn0(O`_>%jy04dA0@w6`PM@V zt@P$ihLF?(emD0~-_Tc{I>zd&GCO}8jVD4|EM+N_CK}0WW?(z;oqP&!kong%pO^!i zjCFtUOT$RHB&j4P_gLHJGs$iZT=(^D_Wd4!n^#cT0+){zY@?c4 zOB)#Kz_wmtlZ|NPQg)u3{cYL7HSnGj^vUTy2pV(4FxXA(sS*WSxF15UW?hN3Fk6pL zqw-kVYXmWQvds;Rw^>+muaZ$EtD@7`KqBg2?Ny>Tzoot8bSTPFUJCLX(pT<>cEQh# z!l+i8g`NW0nGYp{OEVVuv{u^~+p_MQ)&^=fcZVv{>kNNGVa`Czpp}>5d_262QX#Q*p9 z{FHc6S<5`N;MFQZ(OB(oDA?!GW&wZF%I!+$4)(tEa~2BQ;dvq(OO%jP;Is|dWZqUD zuf<4jFz=Lz9=Q`$$G3^4QXdytm6=TX!`Wb28h#$4icWtFM?-NQdp^9zJ7l#kT26$F z5oqgO&_dRb%i<^QzVr(6HOk;=o|1=p*XQhYm zNjdSGr>J`%Bx`k<7v_}C8e>^b95>YFLcE-d{@b*hn^;+sKa8x$(@SnBgAi;?S8{5RM8bYuTO^3OU`;*um^ zQb}H|EStMcPeE2Zc|S9enZCWM;N{BmTgd{OwU<)7Y;|?l8z(2wWlo)Chu}4(Yp{a0 z`p&aXc6i3{t9kOC&niCC?tD7SzGAYmJdT4S@tr8gCF!vl*&mMEO$BNEZ0?f0z(ai# z9cRvS9sK1uyAlxL6J zDBOkT8FYTKPyc@ef>+q!p-?-3gtJ3Xi(KpI{jVhWGapt}n@f$1asOK1UxM3~zR8)7vv{y#M%^>?R|4G&*i#q0oXP^StDIcC z+23!fWKp)jbG6V~o!s@y>HX$j-%66XE1y&K^VC^KGq=F{I%v*5{KNFwLXXvG?iXm! z)6O60%;}886*~)sYsvLlBYx5Tcw#So%QFr!dpn1(n5*?FC|?OR_cmCnKAQP6lTIh+Ye7wM&k?^~m! zfnMVmGqU7f*!%kV7dqPd`~rsu5oJncPn@D%O6z~X+#SI&a2LVb48f8Z^e zYX5mOWq$pg-mb*mUL-cri1!kyb@BI-0=7OyHQ?+S9lHg zJ++bSSPQjxJqt;OlLI{o)S|1ZEPWKqD}(F93ifr4J}R@UyZQPm`WnuPPk`pEx}1lL z+#ak(Q#p@Xl-(<*EnZ?d*{_a$j!~kd*Q;Q=JoMz2 zn0%lxvb{ori}}aI87V;~`L-cRX3rw0xf9L$>%vTF2Yz#Ns+iVB=^=Z!|DyX}6r2oM zIn!6!&l*ymqqnDECNXLD)cQ<)kHlMZ_E66ZGWWbj=qLNynWI+lDltNHdMz>YaP z=Xvz!96-)-@2Td45OS{GDndpTc-cV*Z<(oNy(@W#Ugmj6lIq{IajsFJCX33m@HDvU zkD8oHPhRq@^kvq)f&{nYwm6Aqw9XCe-yv)Z%-sqr$@-l5W+$_^TKd?a?dp2S`gHDy zjxnAmR^y(K_Nvzx!`O#fzKkDg2}6x>x?B&v)L%>onW5}OU+=1U9Lq`!nWES;$xV_O z$kO?!`cTCmP{LdP*Flrl{bfIWSV5+vq4-w4 zKE;P7k6Rn1`k*#9k8=ZSBR-p`cMv;T#xj$CT`2BW9K#SB!x!iHA( z=HbF9o87;hUdc_QJ5ir=1)W&KGTe65N1pQ%O)YumCKvo-Gkgz%=fwC47nv6((({S% zkSw2HL&D&K&F9v8;>azh)jHbBYGi9X6vYv|Psy2j??*Z-;V-e(N}?lK(X+Gu8m_Xw z+nB#d#P6KI{+OmRquOE&nvcF@vU`NAW5=~{l?Zz6@Lw82j`DYolJ~&DjcC7$jQ`@H zbK+?-&lwz-;1_b%`y{Qdgo8ZKJ>av%f-Fw2d5XB)|Go8>NH>W|btWV{%-VCP%UBxxTM&>>UClQ(->)wHF&%lDRW02RS)jmK{{X{m1lOhh(a$lXyW7 z(oAAkzoNg9_{bfUswhY#it|uc9xYj08mzsMs7h9%C8U}h;U(33ogVK(byHIPPKi84 z=9c8wJmX?m%>K%6beD+ZS*M%l+iK)69bI>!=Sbt-26dXE;v_P!&H`UI%CDxGL=Q;Z z=$~<(9QQSi8r8MY8v1vmVv=@O>8q#jXY-G%jrO}>?P=}jZqO&Rx0s9)r}SEqKaKa9 zga2f=x=qO+e7jb=*Rqv~JZTjkX9LM5>-YQQ^$(Q%7mX8f(E)$C*LyO2R@c%aW}b($ zk#0QZ8-;QHTYMc<@Dyjm);_5DkTrcz5^vGqX0w>+s{;=@)vWy->pZVO=@#DbYx9~v z_|Q*jBX@5;HNpo=$+KGl@*aSgoXqKIWPFs(T!MlR@V5kQ)$rDqho8sGyrthJ^nWx< zO@`WxsjoufE?C))g3RHLXDOS_g}x-Ctn!RA&bFcT8=d(&)f;vX}UdG7qrH zYqb0d%$0-n(&Y9K8V0MItTzql=ui^A9!Ce0_9A-7JpKaOK2C|E_0aQC{x!s|KwtJz zpCW}d^t%E|nnPiE98A^P3Fv!~o{sWAdliZIkQU0R@jCx;vsz8aY`an7QWEIq{{Y(F z0jnpWY7=@g3T^g!r=LU*Yr?*tCzInL@l>+RD&VIil87098oR!u!;I5+;`&uGNfe}~ zXmtS__!Bqh)7%NTeFK&c@JZI{e^llYt(-&Gi8h%z)M%ssPLepAUr8?E)-bb?S2;}W z4KUP?O}?$vE%1Gv65Y&rx3l#3(Kov=&ThfWR{DMxJyYQLK+^h73(1d?le@K8<|eJ3 zO#2JSs|RY%)O$Y~9ZuGX@Z5(DCtgAF;2uva-?Nl?@YH}FPcU)}#m#sYkd@U6T37|| z<$0}~l;4L>?uxd9A>t7nWUpwZPhZ#bK|I>N@V>*0t`ggQ15Z8l(~&mrRAz~~FZ=Wd zo-HReeuRhY%_JLE@&e}mP@ctiL1NbHawoS3U-1H&tcA6#4);OHBYJ$~?EQ@B50FM; zZdc+tlF#u57*A}b0q}Dr$=sy=MEICU7au~=Vp@C2STINHSvAc)r#1Y1d;ThOm+9Kb z>8zFzzMfq)(_3QtR5tfn!Mm2Fv1(qgw2s-GY}(LjX85HGvy_8KZ5bcfhOhgD)RGf5 zGqT+8&CK?3bmRm??xjCOR+*cQ)YFkXX71}}R}2~Hp5X>J-77wOR`!-wpm|nRIr+? zsprh(27Q!*#@s+_q}|NS2IwRA09TTGJO7u{Ln&6V1v1X^$&dILum9xg$^GAC3)~Gm ziB((|mZ#BUPP491H>V)WL3UzCJf}`ps2lsHUqK3AlScABEP&jcN3DaxqDYRH()4z; zWImBo1zB%Qex5mWxx&0+GHE9+M%KrVW*b>)tw?4K*7?ixKRI;<=SvIGfa#QVlWsBleW~M(^&o%X#+uMoJnmOrI?c_P) zXp&9z{p3ems_*{BmCUQxva!GQoP2eOZ+ST>m0<^`Lf1O;fSj;D3gYUZb&Wo*<^dWN z{7TMn)Fh$Y4*ZpO&3Ugx&OQ)I61DYISWQIu$vEht{bK4xM%|Ujx#RlU56Wku>}u_N zSUTbS%g3r5=YCO(!5d0NsCbv@V_s(V|H{mH~W1_`2zq?5#BVEbTvvu6bz9J>IRT_=I&dMt>)LjiRgB%4b#a8||J* zlXKwlEHkQEywohn*$n=c&`2-Nn~!Q~G37=k{YIujSjJf zM&{H{@XXl*NWO`q$$uyLCvN2ptmtI+U6QswQ1V>u4JXm;JAPTNXKyj0Z$jCpWc3QkuEg6vZG@ov~t2>HOg{;D&Y+bFa>Znn-!_JXqa3wD}sD?lODKt;pL@GaAP7)KvkV_NI;fy!snuwcs&lyV|qt7jT() z8#lt_RrH;c$eGkJzW$aotLGEU#Gd}8Bn_ibpo8}^bN>pgwu zJWN*fnxUztTBpFweROcGa*5#)>${XhPcgIjiu7{MW-8ftGC!ILM=kaLGYM?PO(K^i z+RHqMjD~V2u(G>ARfVc$T-y&A*Y_yu#ng^p)q7Z?(`;>x-fH7}(lL1|?`AaZ6g#+CW%JoY^J3 z<8wSz?j}BHTu%hDCcJwc*v}g0PdG2le`m&*JdMxrf*a_ex4)mDC?^^ZLq&0Qt3&hz zC^(F!9@Ix=Ei7agx#x0^{<`Do8zaVSwGvz68B#ujZ}^AgyQ^KBgj@SRg4Mpl^9>@E z>=Ab-k{D06TTx8-PW)L$-pm8b@mxPa&r(<(TxcUJ*2#)A zfPCL%8*52>5(<+YBQb0K)x$6pEo5)I*xBCnwvx0HW#SHe<{nQErMJ?4;x+t*+T5jI zfx0QMb}wE z;*ed5l24S#UPVr%e}RjfjZMt57a^*ine^LQ39s4R?19shaCD@8E7D;Xo+dNkeaX3) zT3JtQ#7>97{)6h3#K{>jw;4`yf;g*T6>*;D>-zZ1li}60l6hAfz9sQ(I;x*@Q8f## zC2#g^T1kAfthQ$!u&+@nr?sb(Z2JNdvfdts3bdjanYX+7prRYyBpIT}j;Kxi|Yv$?EeL%Cput1;1Hy zNdDQq^wh3^&OMF0vy{y9Ql5ko+bmC{SE%uuS96pP8ZJXg1-Ndl&HMPv#rP?u_sox$ z`M(>5x$EB&wK?m)0eXXu%=7LlP?EFsiNW$UIR-y@0#5$)tW^)A+sNZ5x?2ThjUl!^ zdrfS-JmogyQ7g0J_3(KP^hY}b(2)JS=J2tWttEy{Cw5qq#v-c^XsL~{sF*hLl=YrA z>hn||!a;6DAHgyXQZljg^Zq;R(`KMJw~w%!*o~ zVka%_&!hgOtwb2E>KSkr#H~VA;t#}Y3^lsv6x|Xw`w%a;3SS%O>^`%L!Tjq5>h{M$ zq9kV}>qXkldP5)cnK$9MGDH;Rb3X#D%hXPGsn?*VFNr;9ls*LYbK&$IR^G)NrB(rn z@8d4BlI?U6gxAC6qiFd=t(3Y}t5b|@yLr-uW_~rvuP;uMdt@gX_Cx)isF?@n!)b9j zT1NVxz2>kk z*OJRM#*MOt8B}K6`$9q`yq^h)ljwIBtUgQX$B57(-LfvEZCwZ>W|1@c*UKx02qOdRc;t zL@fBxjPDz?+=-U^^pJI$#I?N)j=!Oo=OFkj$lYli%z9ZJ^|zxd=SmjA`3*jqOZ)%u zc!OYif?4X*dQYs@(I}m*=ZpA`A94S^R_f4tbMvi?&)dvx-y)f;=-%$#dNMd4*MA!) zx@hqfNbC!*S;cEgt4qnDBH0f1X=08og7TY;08P;N6S_*H>?2ay&RdLE=R&@!GG0D` zpmWW=2b0pH>^(cPogqKZ@6}F9n4-HnE^Xc$Wl|MOH(1$7TWMJJk{iS{l~`4y`= zg_Ye+m;b2wh?)9SV^?HyEaV+ass}^fBG}*CXx6A;SwVKPXnn1v)9C#k@|{j1&!Ztx z^hLcsaB&R$kBrr%9wmzZBXug$CRnMo&V31!I|{k8cJU!$K23<11gu{aug1-rgRCUWj!M&Qx0V_6JVn` zfBl3~T}gAN+KB{OjwW(1t|}_uhmF>}z*IWC#NWY?F%3;$`hFq1NuKwytfVx2CBpb4 zUL{t<<@)^w4NXbDy&mh}zPEab0k_!S?&R5lciO1$VaohM;uW;`xbG5eEAjjfz}Eo2 zvN^k6qpwOhPPV}vKFb;2hDP+nc-qVB5B<%vTV1kR=kJP$ugAj)M&sntYX;+A;Q44Jvl^Rw3dx3A(f@X^ox6!&7%4Zim&6U|P>@AV z0CYxeViM()?}_l2^DkMa{Q~xqxhXTdJVRvtzol1+{9lZfCg)j2BgGmst3)&!kMe)v z?=bS%PJR`&fm7{m=8=-4cZk}{X}X!Va@sT*``*;%&2(6dzxi1k!?cw(=Gx|W$wSkM zb-bWvMRFcZ>RHcE=IP9kl8-jI8xwbFhgylFwpzQ1f|j}I4SLHdp2SeA?|(|?jNxe3 zxJa$cn)lGxS$^8VN@6*jT3BgL_M2PjKKUa16)5?O#7fY_bQnvlklflVrOk?vl?;5V zlsz2|9xM$(yytC_yLk^k$&qWbtv zq~<;KP?V!!Z(i?4-#x6&IeJK}j4^cGk2O@H;bQ1WE`;N?`X&oH3Qk5BG*w=`;UwGK zH{X+J@|fPj>o}K_oX4U*$tC zKwVC_MDuTJF)?(jE8iFIQ&5=rmMeLQyYQEYgoDt&UGMGDkccm({meEi$O*`^$gZp2 z7vU!vK(-mlcF=RAUV`r!i-tTIv@(N8WajL4EhqQHJw6I{PBB*;L7SyW^l4r>v&8HM zt)nTFtrsWh}U)_sLqC$b?ntHF2hw7P!3_Z$ojL z)5vAfP?nyu2aspC?A6>#8b^~)NxV+*B$%Dx#MkJJf}+z^|H8{e@~_P^mqhzQEnE$6 zZ_``uFHchA2;=_DMx5BJ}Y@NN7=6Ld8rjbUW>3VyOzwux<2(px?Jc4c>)^_J)y zHS{x7d$+N~miT-~iJV-2nlT&hKCC^27ZFbgAVxA@!TnRN>@~ng5D=R5G zplc!t#&732VGrJQe{xBLuIEUjlQE;UF@1l0Ci7%sE+^ONaB|p0-`~UH3r61#MucQ7 z{N2wi(n@Zq@sOS~9b55yE5zIh7eiP^SN&~dFNdLc82o+)C5zeOU?WQ6zQ3%Wg)nm= z85~MeSNcBZZIf>|{U&SOciO1pXBQ9Lho`%djQX1m->09YW~aH&ksH;?*^qPNL+Pdr zJDWsebOCme4!G9Sm^YN-XUly$6Dbn!9zI`C|} zdH&q}$?jKXKM%mlVA4C5gg$}&(ll{{cGvKq_mj!hs5pQvUC1h`_Cl>V&W7_xwR;82+ywWH zP*ekzIj>R=&)cBkeIt5N-uo{gyc6jk1{;YN@E{-hH=nt~>)xozE=&(T;da0G!SNpG zo#L}6Xy{ixf3LTkH_!R_tYdE>i2)>b8a}Tjfrsg%5)0Y^EBC`)BJ5;r%zQm3%l^RI zT`VHj@iO`c`MwWrp5=Vl>Y|MXg?Znv=A3`4dlhM|GtVfYY;}m*spXlhASZ;| zpzLlk%f4Ya8SK;P`ah`P5t=~mGxWNg7FKzt%`QZCB~IYM`_uE0s67>TEzxz6SLM;Q z(Ep=hY?gA3q5l{4|DoAM{$Ajdb77+dJT@fj?@4HYzRpKePQ2Bj>&)vCS7;S}x58ZZ zP5bHb0cgqnijpfx=3RJ5 z27==u_R)ftmXlHT%aY;l(t=lbONqpuZp&^r(da?YHl5`qAJSr4pTgpUxVKo+$7+2* z0_U^MH`#D|7};qQJ%iua3ymYmb&cPx@Vt}N(`6s*<(*Fs-*o=R$JJ8OE%iBs*GGSbIZ91Ju^ zZKteaE>J#abf&YDdwieW%ENfmQ_aW|>2@|9CC*BBh{|m8N3U|4_K3oGxj(&si-uoW z*cT*lm6~UGSIWpx8}(HnvKB6DqP2lqZ)xoiQn;4HPlc$Qu}JLHM|hFCT5Jx1MJF=* zd!Mml1l^ZLeI>0XTi3PFy2FT;C(q0c55-~T$fHn_{jB~Z-b~4?X(pD_tLlEu|7}+9 zRXUl(I&)q%5!G_Ta-84f@48(5%n>`Wtc)@X%@8+`YSxu=hCKV*kiHo1v2!I+V!(qeFzTujAi;cguCqn$*wXrcD`G_r`tONQ0;XlzQt#mOqO z%s25;7Pc<+D%nR8!yt3hv-EixJ~9U@&$(rMLy3>;O~9=3rW1g zyUa$(79Kyj9jf^}=RL9x`5QUiYE5Y|UzRoYJRM~fFZaU}=_RWtCFp6spFHJ78i~+Q ziv11ajo;>7l076j9nOc(JMfcSu34MS4f15HOFX-r!EHbz?bIoXCjA(1o`?=Pch*T; zo!D@^Z!)u$Da>@H(d<8T`~tq8B7>Y_-#`+d6lC6&wH%{`#H33Gl;jo2oOLal5?wU6 z26LY18hU?9uXX+G%Wvc!V{WSEY)s~0iCeqScMHg3GA?qmIOkb1^KD8(S%ceR?96I@ zR+)~1!+(9Zku;tl*TkcU73E&+Ts=O8#%t++I=R(C|5Pa2?7OSUbugJU@h)?)sWdTw z=6h)`QA|tYW4yK!gJgpLzi8oDqi|w^Wuh6W~i)3x3Q=WTDbwrI@487_PxQk1-C!>&HcbO@R4|f_0($S zbw6As&T#R!&EYJU>VC8Oy^dI&NHc%QwG($JGlrJ{5QdqzF3}$-iVQ&=W7WQ<1;~S&zT|RqAjrWa2{YkJE z3(1YL&UBkxO^N-~Q$Kr?j8biJyI$K_!*~@o zbC+r$ns1=J<#?$=g1?aWr^+-pGsvmzpas!EfD7OMS`pM%EMQpU%^lq~Cr8%WkMv)`pJMUox@&&epzQ zC+BJ9dWcSpvK9Q~8AjK(tZj(-P2#F9<5ByYuQp*Zx%)bf_Oc%F6lr8XHW{iDE2=YW zPgMFl9B18ZHZ<)`6ZQ0QHLj|Y{mn4h5eKP@msJN+aJ^lrZ|=q0BYUPtk%keEEv|3&#_ zzP%aGzv^c{a=4Ti&3^q!u$&0#i3YSypSiD|IH5iD(TWT^qwyk^Hxz{v_^nEK%$eVo z{N!Iy))ob;_@%_WxLK=*K}X`4^f6vkA*I|V4wApr=TMe%0~%LA!7es-GyG)CsDj2N zWcH3SC!^&WJ&fe_9%Xl9)vn1$%pjE&be{-B=fPpJrrwJF6Hsuxv2O+)e`Kat17e>y zAK4!&*8A>LvYn!Q&g0!ev)5`f*?@Du_+cLBT{OO0u;tJ6uM^W|Jja<`oXhn1C`WY&;O6SYWgCmbXv(k3(7L>k^-yNT@9AMXBR zjOt9z^U(4%G*+SKk@`M@PoBYh{-m`$%{`9hqUhmAn#WCm(>gTurT;(Bd(~7z}3znaK>L%R#u|;qVe}&ud}in_5FnL@CYbN z-lp#%V?GPoK~JZsafwg<_OwvHV6nf`(Kblf4j-qQ3(iDwW70`v_(V|}=G&(rCf5Bl z`6sqb8D*wI|5NH*1f!MA_U?uK>=bT;fM?P019_E&yrmG8b9!BPn5@?wgwp3x{S&)s zLn^sh-%$NG^m;Rlyao@ClWy**&cxxb(BBzaX46Raxhvpfb7Ay+NT1Wyxyt8%&}BO) zn1HIQY3WHmCE0Ob)awu$n^-_xTU0EBh`kFwC%I;_7SISjZzrkjAw1`|4-2lLm3j34 zAiDnIS?}W0-t+fsnoUmM8n~_lpEto@;;*;&>`_bW2cReCf0{x1*?PT$|I3=LM^7I*9?f3PQD-VkN+zzEG}Dnz4u<=sN_2#V z-Mm_2A|FnhljtTfTXTo!S)P9lE-yq?ZedL)_p@pHM7)22#;kyxPrli2JQp&`(*6W8 z_ya}N{7>}W{f!F^A$GG~e&si&vzafAA%7Ur2GT%Ddj81gTW~O+WXh1cT7n^URwazHnLPFVDzn(`(e1$~o&CIG6Y2^fLPZSOHY0kN(w`4=fiTgv?-Flp6 zrZx$sL$#Gt#Jl*p+*T?&d44$Ra|11uB|}6hJpW8CIccBxvPBu`Z}4e$97>VLk;c)S z#Xb`a{qdWK3=#Y6oac(H2W03-iGn4nUq+9e@Suc4yt&Z=UX~_xy({XbY1iq=?+P>QWx2@=) z97=ka!94{Br}B|Gl~db%Co55-jaw&at)|b~kjh>pd@!B{qyKM|Pa}aww3;WTY2=r) z8a-*Rw9znoHnse0RzLZl|I%7>{Qty$>KkV=|H^vKWL*8rZ}f)dWK(M8{W+xBkl#oo z#9j2BCyB%rTn0%w<(7;qsh?GmlOQ**f6#Yn-XSNjNARE7L)()z%%GPQr1}>>`ZJF- z#;hYZcuVkHRas7Mog|~tLY_P~+CL?u9s114m%1dFI7xYGNgjjvsIq$5hpg-Ar3@)# zkE|1~_c9HQXIZbn&pf;igO5DrWi5Q)!c1|9nbc|O-$xFKot@l^;UxD5#%iS${ABH5 zG5M^;`yqwia~60+VGPP#{7_Wpi880WX3$;EWM{8wJnO7fD4%G*d9qFn-2t?|RzEit zBzZY>B!6vodUC$_Co;}0^A*0W0}a=)wTh6FeeIk{Ouoz4(cD9SgJ>W*mJ<_n5GmzJ zc$WXejV!q(dx)Qm!iQ)hyRj8enaD=DF~7k)qa7R;MJ!5GizngcXmlUK_b*X;IW(7t zpUL!8)i|0p#4Se0wMN;XKeuy6!$@i65+U(L6z62!IPX^~(S_Y4md^rc%dMI0>y#nq zMNl|JzaOxdrCJ}vr{2S!JL9b-n(9Jbq;VMEl+)zX$+4#yT626SitpcA-b4S1I{Yzi z?&cFR`gGIt(QGE??Q#qBW9{Awox@PJ&^ViYvP1cT_s#A4`eYNzYvSyD<8^X8mnxum zHSX3KiEAqJUoBj!)@rQ{H@i!Gn&(-`F*LP_)O0+YN`mb6qc(y&Yp#McM(@jr# zHf+HQ<#x&8$|bKw6TLhK`#Fy`5?(jKNJAF;BT7D}pR6kH>6=8{`wuQ=npbTjl?moq zS*PlV$NQn74qRbjp}XAG`diE5sgk4g4Z@BSdK-)J}|S1O{vt=|@EC*t>eC`b(Q#2TEA=frI9>L;@6 z#3vr8r_=d>oRrKSM0SO9e}5ypo5sW2NAR0x>81F-4A)sn$zA*MO5UvOC;Hr5nd?#T z4Owj@y+q%8(Yn}3-#Z!>2+ob;bVM{_i-r}NchkStM&8or&> zM(d>_v?kJTEtU{}Q4UWzf191%IwW=*d+SCoUCc->@;c9z@zDLX(VKKO;CZ|;;dSG{ zC3wB6(CZgyzm7x_r!UcGN1Juk)WS^dEkl1~y9kzt@{z~WTjJ1tL`#F|H2V zj9`}5KcK-0?0<;fvJ;roUtQ6(TWd>r_&Ovy)K9XFccZJW`a8mB!}0PViD%WRGrc{B z>O;tM3y$7JXAS+|2%p>a^;p4!4a8;Uzb|=r2%7UmKaWj?^4&C+cmT)pB8i@UkeS!J zG?i!5S-3dKXWy!MGTsvLDQEObqh=WXHj?dddV0`kw^*rTa8iwB=WhKUD42zdoW+}` zwPI$N6=^J^_aeWSu+_g&*b(YC=<_zz`~aIfw3+9~tYqJ!-<(a)dhtV0w~;j4kzrLb z8%W2QU9P6vZKz7jpZ9t0>_4rhLDF4DpU>GtW+x}I|ND7}tbAq`k#+E#aP35=C$Rhz^x6dFiPQd@SxhJN z_f;Zip|UTNeVck}Cx*_MXxyyi3>H-z-J5XV169SmUkIrq*j)n_lpJd<$S-l%PNUg8 z)g_|f@A{p9s+^4;%J$yi&8O1pAn42-J-J#clU;6}zN<`WzgdUQ&Bq&1KAEJlf0MnH zGWy7#!A@AnGv8&jb|=|oFQpM2XP(#y4~b6k9xcD^Cnq^d`o0>BByW0IHl3&&x0!t; zpU}11IfcCU_L}<2?oA7t`Od3~KD~uZj#WFej9VZ+ zPmgoF&zX~C3cHldb8_QyJtkLDZaplA>TcS)m_;VbcJ4^`D*NAqq-O9U zm3^AsphV!?Y3xqs@!qJ~gxml6<^h=bgUk-)r4oxG_nUJYCC_1b9!S*g>x^?p6v|D; zM|1xB7ro@1ebz01M)O%HUq%Awsg?77+51cW)N*|33U-rw7OQZ*#sU@g9;xIya;5Kt)5SO`M`c1@Fs?98E7b84YrCxw~1) zUZhZTkLP>(XsX4meKh6s@?`TH4VOo6X&Nqyy50;O@hD%Bbnc|&DJI%{9!D1|`>Q!c zcCzlq=>#n#o?>!k{Z5v{Sjs8vGy5WsqO`70{!90{w^+f5lAX|fXud6dp1>>YHfm(8 zI>^dXP!PP{|6BBTVnG%MqO38@oyf~vt<7ZG$XTt7zh~mTyWVp5z8pzr4LG?%KjG)H zZZMVft=6cyhjz2dc|A{;XUshRY=-C(TCP+;ec}#u<9)8iUqduR*LR}2J+u#?(Nb!L zf@yShnKpO9(&rGLQ~g8D(_W^>?qs!1*6C;mUc)TTqo-NB0}y2a;wY+0WK< z&S*ZM=W+1#9r{m!xjx3u&R$nUS=ZVI-#5WU3a4Q?g4o?$0D(U|`7B~Z1 zau=g$y*)A9Mw4w;D7u*yrE8VahyZglR2u2aY*PY}61Pz}ab7ZCJ{o{mC! zq9ZM!k?gW22KZBYzFyr$+W4$sXNc}8PKBucMH6|X$ zUe@p)Ia{50LYw)Bne=uqkNpDv_E9hSQU+E`j2LDIyPr|}hxK2Fv zFVr8Z-_lyIKnKU_tqNQ%()x{B%M;6KFkKf%)zx^J6yBiM#K~@=tvoNB=Iw-rp*cCNbDQBkQhpiU4`D1B#THaDy5 znTeLif6i)8AjdD*d{&(QC9U|b?D{{5{|ivigasvH&kgi&ffCR9{8XdgLHOC^w-Sk* z%qtzE^}qGDoCPI^(37}6zkrW_aM+!OM&h6@tGn13F%|y4LPs8I-g*aFpHE*sjl0)Fdh&8)1+G0k%_5_h z)O}Xn0qpowbX-ij+he-ILsc+vx39{^~@y&fTj)EG_YtYHBlaWU8G#!iZjq#6QOINMlh){4PS>SLE1; z7yN;?i)$&7{6^DOqMv2AU`WBYWR>Gvmi0eY+DrLcSkG$O%-T=Zdp|&HZoXz!w>>-f z+2>gk%US!Zd1h5FXBv`0Iayd|_?r{k`T17OtYtQ4-A$n{Yf{gt`D^FoPMdJ`1oHto$Y*bH|1@f+Du8-d4CdOwIJ zW@vkVqfpKiyrIovkdO%di9vLl)^^ZbZs}#t)eYC#2UuzZ&E4|4_)4^o%$hEOhOh9G z_^yejops)ttndkcyOT*x{7)d4#q4niOCLejJC#i|vl)8KTxkIWE``i`=*VrzoISdd zChs|4C^Px#MDaZ!xJ)4f`# z@2p}TV{VrF$sI`lG&S}lwM1rE>D?W)mbhnGD@Z29Cm`_vZDq~8f%fOX`Gdxy{z`n$ zA7x!Bx#%({oJBHUlSQHcwt$2BXkBEC-h=cy(^yv5l8a|DsSUwp?k9ZBGu6^po;b2< zbvc>5N)C@{B{v>&(>wPuE7H+i+A7-7NF=^uTDeVcn{bugkDOCaESIHd%Xx%0(3DK_ zIVJy+aUwB<#zSu63MQ`2U3}&!mfM|#TPPXpXictl$-Fko6I(R1%iIXaZT4nr{%gLP zvm&{-{D^m*@iLPA-9uYh&&rDE{iK(fU$P*@%IX(pom0s$*;t?OlNo(EXl}?h@?@8% zkw&z&-2cRz$x})bpXLs4a%<)eY)%hVr>{r#(2*u{ceo9iv?%DY9*S~)I1+zGD>+Y= z(`J=X^aRcu(rTWD&Lh8%$)y3xb0#nNy9`zn@un#>)lq*5e!kUGG2ADY&u?U%^Z6%| z(M=?s)!IZ#&Hd#vMwge#@?7+E3vaZgo=ssiEJgvV4?D-`VK{7e#6mD|WKL9Niwfijr$X)Lz3bW6K&{7r`7qFdV zYxszT#7qpKC9cgD}5zI z{!c5_^_yszIXjU%naP}-xyW%!bcd+i=udX~ry=P*zUU}VWc@wqB!0@tuvrpnZldp} zNFzDXx5DXi+Pu*38kqcw?w62#&PHDZ`y=sN20Csq?#IWD#qDqC?8%DuHljSF^d+?T z8WgoG;J-F&OIFlX=>34Lk4F7wtvt;pbBnbeE#%(RNOt!b9pvPDBBT!YWVD7|j#B#{ zPi?Jvft*!JEP&&*em&HkX&!Wmwz`qYvH}kuLit`a{yUAyG0e zF2#RarEAf~0#yDDFKcmISBqU)Wp=;rCXKE1bA-~P;4s;M?>0xs4ZMnM@({eY<5i!A z;Hu=Y2yJf`W=E$%!zt|TK9trYg%`AUBI|liiNjEEXhE8P@;teXlvT%;T6vxZ%aBrK z2q>%c5}aJh6D1$&f1t3XInEefF?Skj(%a$e^Dq78q{T(>^B+_kMd}^Y+(hQL@TQ-$ zx|~d|MGviTSix+s4qCdC?_%{&h47q~%e|xxN-i?DsP=`be?eqTd?VRn~{5dGEq8S-bF^=AH&`YIQfxWGV+$@DF)-Skuqhy`cU81=(QmW zJBD@Lru|lQS6k1S<8(xIGSugU(cff}n{rugE~kfyq&HdFvONAo`WRcV!_q8cI{n{9 zdOhI$3ljYY&ADNg6FL)FQDQmd%=AF!T zenP_-{xi2D&LQd3(DFGnl+f3gw3FyN?MQDw{Pjv1?ZS6%0J@A{G zgJ*hu7@twh`$V0{y`SUhr2?eZ=SPy)I60Z-;wW7IN83%|@f(^hNs^28lH8`r&{&>r zjpa|0@n|_Mlt5W&vVB>b!F)CT;1xaH!nbFocoa)7$_AQemxo9{r|z;!_8+{|g^$ma zy$d>$wW7J-4ksTR&$ZQ6IJxL%>9@&|=iq3N9xC0+^6}`Nt zkHoe78NCmptttzdM&G$X)0MwI0X3`WekF~aPUE>xb(s1^&!$fqr^;)ks#0y(OKvC^ z!`)kSAGB6hvaC-sisf!_Ryh;(wJ4rqU#;ZkL@}K1rSCSz`Q(S04^eOXJZIc_C#H^C=sd5rBep8FSx@3u{w zjmU02$!0E^j0tO5+FF!$*5f|Lw+VRd0-1Nio)Ue!y&lTI=l=(u7$QIv7QP>`Rrq~^w~Bv z{Er9vL*GRwk!ESNG1^+7_V9w#d*b3%=$fr&BG1ksiS;~f&c>}#FQ{ zOL9X!(LL+wHM?LrWBxbPEH@T)(bpjF<6j^0dQZqGI+2s?d4tF@=Qr*kyGkgZ#B)A| zpZ(ELguwmyqR+@KcL?T?VxELw^gr2Wlf(2*{3Q<51p3>h?Mekp=~R%)(d^(iIJufH zsP6TH=s1fMW*SFtX9=ZfaEFoiT-MMN2jBDEd6I1A)t;}K3>g_|4?b*_EkXZ@u zIn{p~+UlTczTcu#XPtPaci8s{B$4&%?y!>8px*jh4vWbQootVX7|#~?|A01gQapDL z2jOrFZS4aI&+)lWD%l=)eT+?ap{YA6*3s1~IQoDjraEOg(j!&)BIQi^6$qglV|)=5`P($)eCdO`DBq9 z_5zY0%StnMn!sku@TTvh>~!<8#E{6{fb8COWHpJEw;P2&s(Clsr>AD-iF zqv3gc&zIzRBE1gcr)z0DGxXf?{G6}90Cj(&ayv~-rrR3q;WaY&gq;q+&1ic5nY z+Y&`BwRk^SC1SujttOLTVq!1SLT*szbknuuF^)tM+hZy#dD17xu)O4%f6m-&8-Md( z^n8QISIBHENzM1}Ejs?2me--<77}@azsyaX*VEua)^IY3xA#8#If(-_k{}oAO3d#HcnbF@(WOblctMHv=$#y3c z-l>i3M<)JPH5@jAr8DR(=Nsz4MFq3LJi}f|n(OtyQrVYqUy*K$vB;b?I858?;kiGq zPuD_jKcB9APM};sW0Q^OwcszOCH7|XwTwms$mna{CAXkT(rKdajYD^$d_I-=`9bo9jc0NS6Y)_6P~1vAOvc?}sR zvQt^qJODkV^_KZY9~@@3lGT{l>QXgw6J#1(HWK8^9J4CNQya8@Gfi}LbikG zsyaJ3n1u5Tn47vyjh?yXRgIL}qoD^|dPM!xe3})-tjW#S=6J|o4MF3eD(l3D<1PEM zIe9#TU46o4er5f=;4U%r9*6v{`pNCmYHTTI(97~E{YZ2iZuWuW4}BKA=ZQ7>hjO~{ zC{pdOTp~*)%RvcROzes4(0?V$M$^Eaa8!dXvP*iJwi1Etzok0iSiTQ*Q3!harHz2o8t=hwm01U2XdOxRWogF$5D15w$SYgwC8T#{iN^+ z`6QBfq<20lit!fr(0by$EiTNo6Imqr#&S<1w}q$CYa$3G=1ZO^s*}Md{9#U$KY_|b zJ$wqKqu_KcoyIRr$5%4G#}i~lFV9^$RdX5dmuOChklX8Oo~!;sHZhDxJx~AN^A;_5NPx{A7K;JvVuOWBJj1a>M4#Q*Ot;sMp*R zITBuTyE4zW@g$4bT`ObTd>me?rxjYt37u+c3?T8A>g=IJ(O8aTN@zsWb2U5=a# zyF`0`YdxH;BeC*m9!wUA4Z21T!;BDZdErT9ksH*vlI2Z2;53rW-H3i{sT}=|Kv^?( zvlcBCa8p8C3((Ne=rWss%xd)4XkJBjjactb_)dn-Nh6n*DL za-zc4D$Gb@AI;$RR{dVD-|^;IXW=gKs9KP1Ic2wcAOF$?!txZh3V!P8p)L-xMv%K3 zIi2>pI%RmqtNpVRkiQ1BfuHdm8F z0mY-W(uDLUn8DvYSCz0`K@R8@SSGABBUGrE#PV!bz^Lk!0(IP&E?f!b}WsZ3WEhMMpQ9N*SoX&!} zxoX{yuH2Iv#q%AbmqZos50_=VKN6jnpmL}&H+Rn`n0@4V`$~Vu({A?5a=QLZdKt)% zoC95nHb0bAW@Yw#ixu4sn$33;?KkL#;`a>QQ) z*E8v9qJF=F-qk$hhvujoS$lTdzafico!W@9v-LHNm$}|kL!ND~h1W!QnWEl3dhbeq zUupYjc$}>7jZl?6!&NlY#rHRRBDxV0254m~*}USpr6xN%($h&!(fojh{Yf|{sB#vh zo|4yicDV*NFQSbObXAEah8P3$%rS{BJEO1(n%5R=Epe%j^8J-ilNbWx4I2mFB*mOD zSjmZc|A8?6Fe-jGUcN-OGnAag z!eVbr(3%)U$3t`CG9+^CN;umG)_0>lW7c`f7scM(j-n1Eb`;zuHtQU{<(W6x=#!!O ze&v4k&E;kzTVbdHOJB=pCcf(=*qx5qq(g&(%zBoa6M=n{*$>>nA?B3Ep?{ z&3|gW2SnrscLyap7`^tR zu?zq5CtBC>lEWdcDmnG?$!PPM)%0*Sv^`yDt(R77kiam%d!p$iHInT)CtP1=4Y#8x zQDL8k*;eFVO>Z?w?=_$OLhh^KWC$c>)PbbYKP_r$A-y+4S{LyZv0JDlf_L}#o>v$@gHgBQ&y?y;=ua%FGw+ldY@ zMd9gcH`iA(YbRn#aw46D(wtF92F%~I^dQ2maO8++{G z`-y%#pm-w=@|1Cox+kDGGvmawFK@hGto@(ZZFc<|l4)78r~!LL+2=R1)|-@T=DSi5 zeX+8+S2l``Oo5cUpk^#2HG%g;#6H@0B}w@ho_Cx!a+3W~I2Z#fPt)@_R(6cq2l2A4 z>9MMDIH$B)86gJZ=~i}73E!Q_>O&H1$8Iu`+}LQ7xBE^(K|=!Dq6@c zM{d3*drq*B6TKVxt<(9jE+lXy&P(etc@4AjeI#p1|HB~hJM*Ri`b-SboDsf?wvH#I zDKt_8)j93m3`GseaGU4Mb_I+kgM4DZUWA_y{N#*zGR{n7vlB^iZz#-)=wB@HFW8X5 z+KB#Ncpl0}X5aq-lI=-0A41r_WV#T|#R`_bFBvz0zfs1ZT`<*#4}S<>4`};bC`)Yr z;#zIb)-x}8iT6#0nRm(j5A@`j|3IJj;Vu3jO?Lu@WAVKY+!iHKNR&`~WoaW7WvQf4 zS=tv`&_)z#lccoTN!p}jSEv+;R;Ji5zyIaJ`@YXSGw0mrK4<26 zX6DQ^GM}xLoVy*r&aRSMwfH-kGE1}jS-+oO@_hIZ&dr8T-prRJ=Sl3Eox4T!KNr$B zPXqgjK3$D%vKxJqvZK|JU9LHUW274VM8YNFu z&bi1bXsh6tY{-A%$WvrJk2INO%ucDSGH;%`6`|<}2sZFY0(9HNVZ$+Ake?{n( zS*!10Q4mqwsJb(?pU-<>?on0tXSKf4Ys1_RYnVcso^s0sL3NY!zBCu zsvDnXr_P~xy_6i8*Di`9S$ml`E`wRAKOB1c`~p2E(O{sG$>rKm40zJ#%-fzq@9&L2 zvV!LZzl%zhW8d-m)9t(U#S9TT+r5WZy{ zU>kOwp^bx~6)YO6c%{@ujSn^SQrK_mf!gveL;+pZ!`{GnV&# zW$?MJUzUqB$tTte7qW8YZc(#wA+mnWT3N4>oNIldk~N3l`L{FgKUnWE8tR!Bt%hG8 zks>DyoJNwo*({G4evxG76=dmyzvsg%v(eMo@>e#$Mjh!r zCyJfVV8C*tith^jUvdP_R7)+E&z_9L$Z5Rk6e#o+=SHgeQGDFN&wnIk)^()yA7Ic~ zi)(ntnXpT~-jZs+70eSe`eI5-s4*By{EUE zAj-66$qA6ix~f0ncbX`f6Z!TOi?TLnHH&^m#`4(pfHu2{LT8X*y_U)udG`~YI+Nx` zZ1|pZ!$h-5N*t>0+Azwg+Yk7C0NcMwmS<>~T`}2zexTa2zdKK}wdgj*c=;o;UBS|s zcghTUJt!uh@xki+4YE(L%QJkX6A7;IeG29dz?tHDlgfqBOEUEwK%OebDbH#pJe5i zN|xE|oqTzJ!*Ck=tmLByutZk==DhcZp}zwwPsgY^z9;v{I&q>YoMu98jS|BlS&VFz zw7H)DH6%k;D(pU`_a@BR2iMLN_pTymWm+cl@;pyChx6l0ShKm9^Qr$|!kVSfT8Pd2 zv3)7y-JGCQPHaxTp|v#sga*g**PJH%5X7^FFDK-kNZafsSb&jdyE-1<=J>6NUi(rW zQCs~lX!Bw=co#&q6eII&avekB&Cq;|oClEN7*-s^PFsA-e(j9QTkzNy$-jt}kCEX&Y#3hP zZ}u~1SNx06T&j&fwUu2J3$>Em1}z}*qiFUo%`*$1T~1BhU%}t^NSGBlnO`N0)%sv+%h<6dr)_Pq9Tgv8+92-HE|@ z{@tP1>B~Z&^5iezoU>|Lk@_oGm4b9uS#GEMKwg_w_+?4{28ojeXfsK(udKZq2EgVN zOiT`g^YnjnM1iHE@4Y-^7aSAuvu7|n$E%TWx|m;!Z!a*iI-exzEf(_9ywA;g_zUzT zt0B4p*T2Dmtn@h36G9Os(_a?j4_UW$IEi3u$LHHH zs+f5B6E0Szdl4l&(=w|~vnzKjFFqb0%K3CNy^5;!cx}~zbX#$&FRPf7cE=U$l(E!F${nHI3Z(2j#StersffYG3I&W!{JvE$pg@wN)U!I}!l-PmpOX>0fnQFsgv3e`fBr7id z#pbZ=g+h!--rXNrsf4~Fy=T@hEz@qi@j>k*+d}r#p32sFbN-=NF^Oi$?OKssgJ3$C zr5_|s2@?FsBc}0|GB}z2i&=?sAGz-5l}**Xn(gm}{}2)^R(n|fW?_t#_2rr4%8Y*r z8a#(h{rOBSynU30SCQyAHGQf5VyS8D zl=GaKwP^Q1_J8c2EN|_gntjpPM|+>YIS=VtHS9^h!KBPunat)_>7NuvKEz0+uSJj6~v=&>@YzXx79d##v=mvwCM$?V~IN-TTK=MkRva>iXHmdMGC zc~5^dIg;5wUYylQm++X3Iezimk7^i?r5!|-tPQFmI+lV`N=_v2!%DO!aZU;Sz_0C; zyoDxNKb1L%?7Pa$Y-UmR&?A&pb0f^#hb2nkK;~Q?_ivsmn(~;tv8*WnSODGM+4%&K zc@$3W#Qo2qnzeU3#Qp3>$Z5g5T*;GJ@=1;J%h#gXxmZ6?$vmqj^KQIsI}T+f*fcS9 zxTw(<)8EI3tYm+j#yQI^yA%^0>eD!uI1tKpmA}i#DW|*T?7t3V-mccHbm)Qw*}0Lk z1ojg3kEi8SJ$!ZvR$;@up*qWNqxEVlA@dSW=M<7(SnP2v{*PS6vA!nxGq3Wd$e$e$ z(dz_qjlj0MpfEuM`k6MrlO}TmYm{B3M=uJ8Nzix;@^{jx2mej3MTzlr&+gx;TB<1OW*+ZYEYC{FsbCx(z)M)Zo5*876p635`$v+%h|Ap}#g~*fh%Cg3DkUx*><7=>eavwEQGV2<9 z8Qmq%KyTmn;Qhxaw@XZTOT-$+rqhi9)*7!q%JZ@o?|7qurabNpQoaelit0-Sr`~*^ z0Twh-M{m8{T(YF?7Bt9g`wK>}?<=`h%}vOh=hhGLEb%Glre|eJ)~kM_R0k|7!%CMZ z)moeP;n+63I;t?PK8>xDQ6S!UH4oTkB$9U`kNKW;5qZ?Y`V?W$-c=k{JsXg>x(%* z!|)9E9YwOde;)^xJ=s3HEPhjGdYFIUyI$%U+J74fvifb3(Q7_pxkxhqjLLYqB!7q-$o+&Zo-*>dASx+eNA!e7FmyWWIT+nliIISev`V{fAx4 z{;JPqa5G&Ou+@25zg8Qg*yB~G zypCJ@@th6nDF?Z4j4+QCPu4;AcKX#L)77+opZ?h|zurALHG2g~x54H<$Ytkqp6p)J z{!}*nhbC>5&J5HCd}b6J@~oZp5Ph{XRq5GmumT>*l-fuQui)uDbjk@QIq%{Etq;bW zIqJF{d)~pLd&IL*nDvjka~ATQq|7LA3mmf}_8Xs$gH>{M=85G((SI#eUSqN3^u1pD zx2mx{T(bB0I;cPCj+T0a$?VyMoF&wrbJsp~SF-UR%aYd{30$k*3D`0l?@JhSZD8HZ zH)KbDawOzzqFa5w)UTDunfIP;)l!R=bJ!<2+%gOJhcZ)LFURvQBjuC$&>1=d$XB1V zBa|G$!*=r8PFh_qa{Z^Z-&sGI!dt*B?+%aiZ`Spm1%+3kHJ`Lu-<7r6>+m8`A-VV` zleJ1ACS^TvW(G17bqyx>rQ7`^Tk5`Eq-;aNt5`a#mUH&TFM6%5eyfH($zDGdUUl#& zCq%!>TegUBkJICDWhZ0ng*dX5kNm}lTKjgGIuD`S2c$0Qp2J1ooM2Q&)H=~GpVQ)L zJiSp%-FV0j7G1@M7t^FQgeGD{`jG|XKHptetM5I0-9(P^?t0u^d8WCX_IuFeeLYyR zA}(-O@{!k6rm7KbqH#a{=l(J=s*>X_l75WOnMcoQbIU~aqqW`_*P8H-31ZJ| z41AP*n-ty_C4=G7>Z`|U50bqU?(fekok-n>ZG(0qMI~GIg&4r z6giKC`Ut&fdlKzWiz4L68u(4@Sx4P%e0vMm`l~HD9h-<*U0~Q$%S-X>7cFPy!QGI| zc`bRC%`EXO9N%9JvuM8oOWQ)NvNk92oO881j-89M;=Ll>yGm~GYci(}FN{z3_v=tL zNgko@aL<{f^Tf-d1y4VRMZ2(DvW6vJYTi@N6$f%^?G8vi%?ioYTEs7_dC(m=oe|<% z7SH?d;q1G{cq|!0GE2WNo-c;=7F^Ek-wiBMQom4>rzSUKb`2y`UB-jk*k!Z0nGEI) zaPt&af1NaKX>cWUlha}kWgo?!Ieu;DuJ!agjAd`fr=WC{D8GAjaEdnGquVcP8Ngb} z(%F`EuF)sI&!<-U`*&-o)%-KK&B7NY{2Hp;>;s3oeb5!Y*yK) zv)Q#23ug`DNY{q?Q$~cDD1PLWavr+I2KA8F9)exNM z&b;@}Y(+ydWUo3m$wB z-|wkx=>qe^)}NJK$Ns$`k=>ysTx+70WI681CYQi9>xOEpD}H{1KBESCCn}M3Zy83xR%cEg%U+)B z;?CZQres>-`V72?t*?W~ESTivhCzPG%y&6{lex;Sv^#}9!6V$5PwJas^^#t%q+hqg zZ#CVrn>-m>C&0BR_O~-?$oqt37|Xkjr=fK)ENAkj@%VBx*eQhSAVsp}EU0Bw4oXDq(io=hxMbgp6blWldG(F!2 z<#+10nizXrPNL*M?dyAI63t}y_2R;Rl@^{u*ZvqZE9PY^| z@Q(V%>Q_G?@9~&bQtc1WICE*)oA{0>o^$VO^S~DTV>0i`31-Q;cRLi?YX5QfWPeh2 zUgU{RVpnoy20!a}GK+W_4SMp5Rxr&DijwMmPRweE ztCeZhoUA{RVkLaa@rSQq+8^dQ-KrlcE@I27eqGHjhZk01y+p>h$yASS|G83^_as}# zTppb)v}eJ80GVzV6|x^>KUS>4FS5I20zLMD{~zqV7sju1JpQ{^nENjMI8SYb#kYH}(r0jw8(_*mW=FbtUsh=A{lP^d{5A z#^h7YS@b!ND64aJh-2wRmhzfQwU#q9j)lhxSiFR-O|_dIb1pqD(DEUWNM_KhMVIfy zu}Lgl1w!ffx#Z7WH~4lhsO@i#Qn%7AcqgoZVr+pIX+4 zk$Yib74;-Pcy+$HNGn+bnP=8wIDR{ZwuR^ckeo}_DzH05TWty$74_*~vF$SQOknNo zmdRO|6O=0>Vzm^}l4My-!=I3H4IDo z(;90(7JVPa_;b{hJSWNhlfHHiJXa}Oh5fQ;p$Tnf(`P)7%_``9{Za+m-O2g_^fQ)z z+r4?WoYgoJed=m#eK9l-bJub*UFWy5BE%}3Yt6sgLFOtr-Gyt(m68((Uc{vJ@ZABS zku0C{6AmRoW}%0=XSf=^q3;>^U&FU6w7mgpgZOdYvGxmNXyt)9Yu&Zl3!e z_HQ|nG$(MMjnT_pX_db zh)sE$bs@X`sFw6mWkt%YziSA+dHgyVGoB$qXKm&r*r!O>3v)9X_)LAr^Th1cZ4Zx} z3tJa%!}Lh$$8Xl|(~xcifs9!D;M{o8KN&=Kk@Nw0HpG;&{>{#hVsyM28&8GlMX-2; zjmo%o5*b#&prwepKPH_(>r2%Z{W3%UC1n01X-OXAQp z9(f{Z&mn(BxP1wU?1j%c9-Tyj2|QzU!Pg%U8?&D~>!1$C%*=K4$NztDcc*%~D)$WZ z&!fjBte^7@&Jhhy<1N{9*^P!>MX{Xlb1n3eL;N12;3EFdykXv&Kj~Xe_o;yK#qg>Z zyY^MXcGrKx^qjPm=ZX%{=zYdYJ$lxwb;p(L_&Ex0S?l!<%@Pft_xB&Muex6bvD2S4 z+Yho?4^u<$Sc}~c7O5NKZ_aVe%vLjuXj)*_74+>S1}1OX-q6mT;H=>oN!uGRb)6PF zz+$7Q|2{hp#q(Qv_>bCtP&~*pYvximYb*O>@=R48$CEi?BE0gJC%aU#j_O)H^&29> zjU+8juF|ZO5olK1E!TS1-6Z?fp?>>^{gQ(;YqoMS-hQl>eGt9@@GUzHq}5yQ&IyA> z#g{#O&icf2-IYDu*N`Wp|D`0Ds&DR(&)H#@w+?sUNOB^Cw@o3`K`r0Xbsx4(*4ADA z{|5q@Ie1w;Ip^mbWg7G2jFF3IVsj;vFR|D%RF2+*klD< zcBz-Zruaza*qf+*HvA@&DbN4wTw4Ll?2L)m*L0$345u(^xQd8*1YTSt6tDh}rS`14rzUbe}sT^$~rQ(3kse>V?W zU3i*4U+G`jA?HNq)cKr3dOF)yg~=*)KIlp^U}wIrHN>+jU>JmYsPAmv5Qaa*)>8}p zMXf?idls4{#nhZLp7ZXSLnb@Q9*1F3wVwjfv)Hw_E6HRw8E^hox(QFHsm9Vg=|ncj zY~QWMn3J@Z_do;0rkp=}Ds+bWJKV21%XVL!pCx`5fq&MUW>;o*xaZj^Sxt_^ie9WV zi|jYk>qvcOUzW)pj|X6#UiTByUjw3BhIl?V>Me$f|kkwvwdDFwq>_h8E(`8SH)#PGwinAMErckA09Q zzN^d@zh;%{*SOk{Wqwp|`p@ya#8tYB> zJv$vnV8*ZRJ6k=;2Xr%!YQ|!3k>(zyH^I6&wlrXyjnLn{erS#wj^iWQ!4eNy&+{t7 zyN5pPt-|Q}1lRMXy{pmhi)u|?ik!7KM7?K=xF;J`Uo^6&3AQo#)=($ zYK!g$tP*^Q(iT!-~r$8*i$`wbkX`!*!NRr5ft*4A@iZf zVPy6^e}f-Svubj&Z5M%VBkN3_oxQCmz_pqvksOJeVOL8_BVhlG-tIM}lO5zMza~fV z{ZJ?;!hKGTTU~1j(-LrckL-hKJYK2Y{BNtZmvQWR+Ks3EY~P+?-JB5k2W$P#=2=sj zm1D(&BP0XP$cBWJK^fq#Nydy7wb%-} z4cNLq_IxZpzk#KhP5XqbhpMZSayd~bGdNB0=}eZJuBBw+%M58|2OIFV4s_VaHl^{h zsq4L1em(?hkvVyRGp;xp1FO-fl|Cr%#$VufZ6J}i1jCG*`#}3KlIE>c<*jP1ua)jtQ~2(piz2-1`&BR-1Q}GJ0wthPB7h zjF)Tb@ArUXPKPT+#;lXPOMU%mHjTYcCs}ero#gIglwU*g$!Ul=2kId9d{jG|NOUDl=4xdLzU&R5PyCr_ymFIP_cgjp zF1o|DUP^SC498Onz3bZ~KTX71N{+SaDvQ}K`K1^n?%>}!vn6}T*Nc_eb9N;Ou7g!p zC}l*s0-oO(k)6mcw}^PjteSOoJ+<+qXgQocPlerBcPF28R@UUiw2!pAla}>;|GU7H zj@bV?Z@x%G8wG=#X_a~QN_3o|Twk@9@jV&&cGA2ijq12Nc?hRqZy#JALhmJh$$s}_ znph_izJ!12nXaJCwRCx#y>c??WR@F(lUarLoBOW8j=Xg!qQnl;ehZ-+_+55bW`%5K zF~)1Vo)|jHxao6V)QDX(m-dhU>lDykhf~?5SA`b)u)wb@lif@meg2zP8TI9ax;)z? zC&Rw*DFWT$zSR)3zwz7pGgj--pMh^BpEu(3{p7z=ODlO~5n43i4gcv~rYey&m3aqJ zQG}>M%C7XgRxMeVSCfRv!By9twV{~|u9xuRjQqN~dN3IqtFa<)$R58M{@lbzFD~4( zob*#*zM00aYB~E{b{IkZiXD}CRa%(gzG7s|YVJ~goh%0RrBypJe920gO`AcQFSV9T z%^9PgPP1fJOYY|-kj=B}IyI-CJRNtErC~JYEz@%LbM_a_zb^1OSx$PB=23O+J}0HK zw%d!6b0D%LIH)u;M5&+4mSTb0dM!X}x;`?x!6LUym!Y^eT~ z?3J|;$zFXv{g%SvRKIUje{nwdB|oak1DE4kX2q^zwQAbW8A<)=@wC2W19U3#vCMm= z*V>x}GhdqL(3eHft|WWaw_=cJE*fU{PP{&A7PDWW9t-9T?s^u^yS0%bWkxVhv(|GE zs6nbrMXt%BNHQviCy8Dy3o)@N`KN2Cx_h(Mq6}O!Upk-6d5e-8FU)x(^e^^zSsBek+z*%8`GR*tIp@H&fLtiG(t-pDIsnZ4BBo<&a9i}z4IyQ6-k zd&ZsZXqtKYtVTM^;(}2 zX|o$MthyyTJE1 zSNlUaSr?uo_Y}zAC{i42WPYX|{#Y7j&D>^J>gjFI(38#)*A~L&1gKx6A9|&5PqOTH z!h+-geO29g6L}2wE^vKs^>@=kPUjq~&78rNw-w2j@V550_}dr*&s6{4%3iO}4`bTW zXHS-G0-v$!PZqfBMas2gB0Y=U*5k~duAC)?9>Ruiu|x%oxf9n{LF8-ofSN3KW)FxT6^Pk2UHhRg&j1e}n=n9Aoq5XZn)#j6LvBz{ZwNr0t{$3v+ z!?kAaK2w>AY?m4H&waXI{a-?+AC3%n*EO*3Zq!tgTvHTC9KCU6(;_4fg-%_nhZB!F}H;xrHaRb0}4^AQpq3(ZIPN0x1p~MaTY{k;5#;-X^CTEK`Q}w=i`_XC) zzP*eU*SKR}e@F7QIt2vY)^ zLdp)IUF>=_kGW3N%Q=%5@v`c8)rF2(-8N0RmLkvvYFp*s%w!%)pUP0r8?J_a8BX`) zFsN$GnT$lEdCNE~z5wb)^;}o$b9<6_y8j!n)Vt!zE!vn=kZUvC*F%0Z?atH6-mKb( zj5W#lgs5{E3D@(mA9&%Ju1w~0R}}i3()7$4zl)T5SX=SeYv`T%zp-L#ZL(P3=KnwM2|RKljiy236?`cLi>cWC8i_7sk(e zvJKQXW_SQDt9jB%t|tG*OlUmg?{VVX7c97?Fv6IG?Sr(pp0)B!*~T4NTb@fztktn4M?6fZh4AsCFbtpflpw2J4j4{ zZnB6ytnAb9e98DGbAQ>pc|NbNEr!={-(Bo>Jvs7Zo4p3v7n##F^X4{hf_`Pe>&RH1 z9hYO~S~VWTb{~_!8cl1meooA+tIQF8IY1mgt-#x?_RG%4Ei}KDujE{=tW+LUuy;<} zoq$O-{LNa3`?dKLFG=3wxuWoXtX{=^AFFStvEeH$nY?-RJUy4g!*v+HQ+r2~wiC&d zjj|DG9#CUute3&Gp5E|8cf~``;#);oqbrWLP^OG;nc0t@_9pvybx-wso{aV}z6o#o zh;i8=@;~0R0}=yCmA$RW8=K6PMPOS5pK8MFBv|Ck$?sV#r!rQ=krFg`%eVBgjr5S; ziUrf5pB-z-@|T@~H(+GW9LnzBwW3>V=w#1ZQFo7H>zro3mF!ttx{5w|i}oK`a$ZRZ zSbVLfJRK)j=5S@tHj2&D!~Iw_i5A7xoM(v9g^~A?!t-5nqFk=!6ARvSBYE=Nm*>jI zNdLM|U(;hG<_%QyLfGX5gv&*U?GQ^o++$qJ9^uSr<@}veusc|(r~Tar`rFBp9fBq4 zpUf1K`Ntano}iCSZuykU$%Vh@iGISJl|J>v_`m5`mu8tIZtMQY_!|p;sLjfx?I7Zw zk1y@Wb&eiAV~RD#rr8ahb*J%}3T$`~zfR`;F0>demN!t#R7~EREt1dTX#BsBUmsSu zBkL-&!uWE``O=-^)c!8}d`tQ%S$j$H|H} zg8$`hWby)^;cxaAXYb$!bydWOWM7)Z2a@?FOll|EW#%V2yYr^2w8(J)Ju`YZTf9vM z@TORErjj|4XdM~qD>;}xMX^2SCv4Hu0y6(j`{cM>B$h66XLjb!gj7xi$^7tV{@@=9;W46d{mPF~MFuskX#q|YQ9Xm**9wi=TeR3_)XbLHlAN2?K z-pnFh*#1|x@4((KioTuco3-INA!!otP9E-W)PEl9REB+?2cH$4$|~0n$4+uBr+Oxr zZ@g+F+vf@GQUBhD;m^=7^VG?wlJjYjU-%6z_rdP({jw6j^VTQpGP5i54MbNJh{ zYMZRRS@?Vr=DvqtH{$Q{N+%~pD;z!9z5R?DGiToe{=L*v3qM}wb$Kdy0{U|xdvZEw}!r{13)0LOC^qqG1(`vVZ_jeK%lIOQ9i@h%PJ_ExcdfjBiznIPsEBA!{ zXB;LB!Sron*TF0{AG+lsu!!X2VbPuzc{aL~q>b@s6D|IQ#zsuKTboOGLsw;c>I?HG zEoU<2y;L%*@2A9Gc$9H(PRnZJ({1W+tMx7HI*%=~HsO8v4Rv>NXg>zQjkI0pS{Kr+ zp?%(>WsQ5XVZQ9XjpTh3Mpuz^mjA!UhpgJnzJ-_7{R6gShs$l)&{BzhB)ORXWiP|` z1$is$aToB6WEbg9y5z*$&cm~M>QcBR?^@228RPffcryrZR`Iz%)YOJVv-#X)bsVAA zUB*f;;8*4uJF;)G$bCxE#^hbf4`+~K53ynnoGKbeX01ccP0uN;$t83xkDE%*A24aV z9$+b}O<E^Bm$Zao&*Zj|%@9^K=6 zP6w#S=3l$|vfk`(+C9Zab7-D^I`7(pO?iHPK3~YbuHEY%au)EXTFU7_E8&{+1E=w* z4NphJ=)GY<79$a$%B7U%R--yXSZ1XX2@n{=`E%Ar^OIBHN(1` zDDyxeHtfZY$!C8p7Urzr%!zi>8~=3m4JzAlI(IxzE&- zd_NyT@~c9Reja8d=8m9KQPJcKab|AWUyifZc_JBx_#LnJw4j}iGNv)jF{tfBWqWXTiYdm_lEqTEaR zr03YSh}v>~d`{}E$N#U#inZjrhmR~J!BltMVbry+k~@?;1=E|e*ehbzNBCA=Jvq6v z0qbP;@I<{rOFB*=Yi7^d;7D24sfTr!iC{T5FDqU@G`7gu0;h`#!$>+xWXOKEAv`&2 zD7HYNc|rTV=#|yQyWccF#;U9N)<2@uPN=qGxs!}Ks$hCD61*T%4u@k-yZTm)dk(*{ zkLNjv{*OJA`RN82cE*CINLk0|v<`{4lOnm|f@rc$RKn$*@Lz1ia|0Xhjcqwu<5@M# zr^Qh?UzdL*8(lKEtl+tk;t4Ti8LTpvp2_E0vTYA?WtUD9Jj-go>?Pf8*Gj(XzTSaca#PhhF|Omc*0 zw&WJE{s$VgCfVoOxR&&_$(h-r5hC^7?o8|~jZv5Re7=a5EX{cW=ueBTFl;Wyz3+3e^Z&9>$8AOK7sRE{x}gXd26u+2FZffodnr; zeu(~{E0hM)dr#kUddf_7)^p_|k)SkL=CW9xp?^>=D-ARIxlx4Mjk&_$hw1OISKj$v8KLXk`wMRtUelYAFE?;cTa=V+2Y1^H2I5EleF1}Rde3WPUVwH zK6xYt=%sSna4T{xchB|Ozky`G6f8Xz?@F@JW8&QqHu-~pE{6IL`j6&Wqo8{Tjr*~D zc3yu7`EGPC>Hf@{<~{IU;@ePh?i2obIPW-(9qM7howPqvyJwIj?^j-6?PZ0hy7@fr zC^BTsoNNvg_-r*2C(pzPj6Tu5_1u+HuHJ`mV=P+${jA$fG)}&liDFTE*MHDz=Ix6> zF}avB`__&e?ZvQ(tk;GV9o>1H_A;xNIlG)+Is~I0SJxk+(0VN-Uq*X8Ox})Go-P}B z?n-uzBBJ4ublV1x_G)OS?3G?eT$GetyIEli=;k{mOkT)Susr?nvKTpQ5 z@s}fw_I97yF<;!x`D$5DQw^JX>TBE5J7@GfZ*2DuG>d9GyRU!5j%2HUQJal%B4=vn zozeG&ICi#Lei0iEW|yMw|4~ns9k4kqEGL~!H}1*m|Eo!GFqY>%a_at<{CNt=p3idP zLe9y|^Vm|>xfMGzFL4X$H<2`FDDyMk zXzfa}QCFi&7rgva{HQ?EYAp99Br_k8lgpB!Z9+@IyAdX4ZTmRfYpssta$CZ(M~WyzmEWDa^(zvN zQ|qlHnJM-^q`jP(klmfh#nigcQr21DPqO6oKLD~vitb;~DKkoqA(bcc=gFC0oNvJ>jz%9!+jOCGLnVb(!Q!VSYIUiLr?KIW@dIiIT0_0(MUCBxAs-o z{~aGFMXKFzr7l;;?lH|EnB}ch)^+aAp>vb|Gy7n(HZ3dKI+7-NAy+`Esj`_bO4i4& z{>`d|oCJ_OsW~TjGUkjVQ*x1K7P&s=9_p8q)s;QGuh60&ncl_cVo=PJbo$!N>NeK{ zoj}7Ddc4(a_8c^mclBbeeJZE6+gPJA$p*vlW!|%sq}yrQxq#RZc+Z8*g}ks2 zn!x7O8U#dt@o>W^vfuvzjj{)P#gwV#!yf2uqAuIj7naKC0> zQL-W?BSiAj?(%!iHti^;%;1kVuthyKUPSJzc-1x@)yMB8$lcamztR6*IL_g%$->it zM{R@31l&4}^%|>XDGz8QvONHScc9(EFV&%$ljY74nP$>8GxHmul)X=3NoTG7Nt>)v zI$ay%)VqW|SCMZZMjxs4Joi+BXr9kzkfN0_$Q1p}^*lcL2hYQkM_kG2epl&RR*`lT zo0P`<7XELh{_o<7R?NI^FVUth8@)r9 ztf-v{#eMV{-}%#kg_GgrW`C<1->gtuJAQl?pKC2HCgvXxqYX4T5U*D9mX+FHA(D?{ zm#kcyMeFmlU6)ML>HjW`9>>i$MAa=&K1<{nrQY7OKAsgix$i5nqZtNd<>ErMd`qHY z%2!hV8-d@iREC0pZ-et8{(KV;kYj8`%WX~qi4wR9H!awgL0ESXVFN66&; z<+s>+K3QtA_kkEOpEW*$M^>qA6-idGP)^EzijA^+Xe*RIqT{7Lk0(#EgO}Dq&K3Dw z579^IDwr^aweBV3Us~QSVvNwTqXIDZ_tvgZ- zjz8R^FATRH!=(>NmfZ>4$lZ&lWUo#uzu$o&{}YkkCBtm3ek6w6PoI`7(1w5S2c0%# zI~@mqf#p_ynO~CG=D`BT=J}jc;##tDcW8V?-yUqgR=fTAarPr6%U$-{{sxT)*g0on zB!BcJH0;YC_w|29NW1tzX5c!q^UumAJ5}~6ohnPlim?oeivU(re{vatHj&7^7O2K$@wB1`0WYs zUw~~FDOC)z$BLld;_1;llQ=7Y1{M0X&HAFe8Tdt<$aC<|5Wbbad@D|5b#?fYbKD-r zyn$L-?(Utg%u`}4hW-hM*?NEr)i|6Ta{_oVUbdxR$t9%B&evP{Ks$`e8OL{E@L#ma zDb^!>?<}rm)!KCs)( z&EB({NuSdWk}d6VpGIThzeQ#-o+!aA9czsvQkow|?} zGv=9QoKc2^*(-ZJ-(Cup!)cbYo63{nCAwC2|6+Aa*qSRUu<3>={b;*Ze-#_pE!`598m(f;@GNVB14odrPT% zjOD7Rvk3XJWAQQk%@g>C@LdP5rsT*eR6k$gHI&z{p$;F-7EPr25eHedR)T@QC6jkBsgdCM<@VKVET1@Fx2J*?Ht zygcr%_i^e_Wp?L&_!(D|gR2CaCpTI4vt-q6PMp04qSr!k4H;G{lhZu1_wim{S&lu& zivV>=mc5~q;CC3F-tAA;PoBgZGjBDHJh$`1yyt0%g~@-^i*$oU#{=gnF8ky6IZjEdLSu}@$Wuj+q;mRrf+Sf?YxEmm`AjP{WuoQsco+q zYqf{bx$JT?Uws*augBF#+VCSYOv~H$x=rx^AM|8@%fR2v(-uRO6~r*qfQVE`__^;3bvCi&gA& zGY$S>)z+kI;!|CyKM(7SM~-n%b{`+dW^ZEqHIQ$}LhWGLfp*zb-;ZW@E3*!k+5dQ) zcJp@S5i%adD_4?f6^*kOExW~I#jFY{qJ{}t+KVk3i8p_X9@%f%O4(gLZDyadl+0Ok zIqf26>?LPd$-*0gT~O|1oVu77C;xVLENrK5&6zLz(z80dMV_pQEyX*UxwAFf-r`SA z?-*_jk-f|xU{*`zCh0>SQeqERM;B^H=A5%dmM*NjnNQ}#znK_&x7w5UCnq_t!i~(u z4s-uH5*Fue*(1?F>Bf5WN#cA~p=GWz^NY#Re<7Y8rL|{fQRiV;LD|ZN(&5U9WeW1eHsS2+!5tK&+i0B3!BU zKWQfL6UozIrPJ=7-KeK(#jDF6rtBS!ncjHbzmURf3e;=z)Xvw1Q z`g|^#ZjPRGc$l5C|0n%*MG?G~nm@*-tgA|PffxOKPwAYE-I0~Dc4>}s?cACD(uq@f zlbrYB_u|KLzvaoc2rkwUSB_G$0n{%NE${K~gLuZ`I|yezcok#DWth5&h8ZzsFVb!F zt*+kxd>%}*to%OEl_grPBr5)e$F<4vy2?PfL}sxEjCA{@4Q{Gy}eO@u;uik-Q_x?i)-N;hZYTlq*WGUQD~G1kvGGpi(Xe)B1`TCMlbiK-LDfMSp+0fk$jdM6y8 zg4QnnlsURNd69g*{dBVKW;{5IG~$6aVMYpNbjTMcR**e&$On3)-b0?kplaN3PT0 zR0pn)LU5D4`2(>yE2@4aS$3(^$Ii^&w$?x2t7P8bWe;TLfyc6IPSLxathcjv zP59PRZn)KE*+rF=dDr7z-kVGi4ep@VP^@i1uGU%@L9R>r;4FTY%xKrpC?lIk-BArb zIoaY@zr;SX3)r5`w#ne0l@%Aka1GQ(sI96u_Jfi!oT8Tr}4hx%Q#yayC1P z?&a{P5@eG}_cLf@zi^(wAI0%D{J${{c2v^~>^Ojha^6`+(0}o{*TnK4#iLemEJvS= zC2DH@AAR-be60m*zND_~&R&Vv9oX##{G6q4jE^oQS7KpR@|VV_ydAoOS9MqVI`JXW z)`o5^j5wGdR~HorK>0U38NjlS&?w$L6?dQFJHuIOD<`MV#BXPwF!dXQt82IzkoOUHk#G>to5b>E z{G0U?Ios|7ynI-h^ZcGu>OaTTFQIuApUFCoW7Jv?Vh4&Dry3*XwDOHOyb_;gK{ zNCv+PMYWvFFp}&ml{!MpHT?-YK84i&d@1{=Uc|oPg=f%N5c?4mQlc{sXAi?9t!M92 zS#`ff#@Bi*tJ^<3?q$TxRstt@1+JAD|=T?yN)=g-d7oalU! zzkicEySTG+IT-;e(_$nY`-?7vjCc2e^uDA^7SQZ&yA_jGYpn^M4yEG)*xkS)nQKW# z`UTobj^mBI{%V%U7;_}_mSDmQSbG%jdxbylR8!83EQte6d3PT@SS2m}Br0YWsUrQx z8rKaa#eDIlhPqFNN-MbK`T7@@$@5b(YM(k%7s-BU;eOl|l8aZEP4$VGS_gMau{8u~i zY=a2X8z<)%G&vQIl5Ma7dHVQ!AG~(LV510-3>!lsa|Z3Qmf=k`Ww+kP>TAP#W5|3o zz1xv3XKz$d^Wzxt4y&(*$`aA#V6xnfrBA#6Q+8PfvG`kN{eL3e^StdJBcx;&7|+vZ z{U5(`l0x1CWXJ4Oezl9e9^$h<;{1Mms5sBM0Mbw3&e^niMKtQf*OI|%mNt{=JDGjX z5zQ;nXr}hxW%cpy+S~W!b!D=)i@hd_nAbwIs_WTR{~kMKCDlOI$mtTv zSTzYQnNPe^jh$FFxsQq{nP>fu?D0Cep2qP$bj{P`bah_lzU;OBndHUEG)CD8IGE?a zKOwmR=X<%f44cQ%CMS|Mfn`=BWNmt~VwKU}A2e*ua-DqZkCj_Qk?isAj(07fE58b) za*A$N$DM>XSre4EjiuG}6ZSl*{t9e$KMj(5>rOSi?YF0CdJ3$P5$ke)GWT8#Yjft# zD7G4-#hjN{%IB=J$tsq~EL9q>^DZ?fiC#zR4fvgzkrK-1B;lOLl(!wz_|*G!sfa_T z!zu3=l216N;Dp0DU*|&4mCJcYR%-u^!FQ?cY;7KdfjI#!`v{7Ydb3_M>s4|l!Xn;~ zTy1$1wkI}xe8ww!^yI>w%rBGe`vvw2u6N<~HoD|3MNZ#4jW>TM8b;=R1+8X^mmOh| zvoW*RK05sBYIZGWE-G(s4uooQk!L4O-agFM12$2%qL!1P>kYQJT)fFirrEiYHL!=O zy&UxK^6QOCy`psXL|?A<%={-Nd^VhD$QdC40uQg7kTFlDB&0G2jHYY^hYv_v}luWJVrEg9WY+Qg_~O zTq`OM@mp2_jUmhLmB^JRChp!t=oH4c&`*!Wki zwooc>c1rr*LEVXR0zexH90=L>&f#Vq!_9|D=bJlnl_uD(gk&eP7po+y$> zHzznR#ivj4tBNa)_3p1Km6?Il*eQAT=4mOvO%$M`_ zf2SJr?qV3s4%Oy|nDo7J_5C_e$yOvDE}|qaL0k6Cxa=<&UdqQ$5mPg^$osh^B19+t zkj(PyNH>gavj*k}*nEmR**BIwdx@xh)!#~ec>-L>gXZE`u+6%{!!bKMH}ecziiiH_ z`W-CNMftzQ+#C7d^CH_gkt(OC=UrEQUi++ne`mXPthx)ombmX!3@$Im_9VeTH6)&A zKixYJtH?ehu<CEuv3G-E+l}yJu zv+@phIGY3=)&8PZYT(#s5Z%Z&7m6nX{LO0VPR3CgF`en#PjERG<}2vf(*46|Toe=Y zRPZ@0R`S_AGdDFRx=;<-osxYVEsV1UK)RIZl_%i5tA9wFkFd;h+C4x$Im7cRR=ba+ zJ0Vm;B>NgJS+g-6Dy{hH4Eo;9@{8H9Dl28*@~yCZO#3}C@FVz^!O(s1D!t8fY?ZaZ zS@l?z76w!r!d5`;(P(u4)hV{1)0d zQE@Q_XO!EBw$GFN8_%=N$k<}6o0af4CoxsRp8;gNLA0o$jm?mF-|x3;=?_xojJc2L_o3J|uFwap z6nEcbi<|(xl1$Cm;KxGGcoPkNH5wSIXPRf+m3<}uU%SuUb)Z^u_Cv?Q=)NHxuMi=- zXtkBQk_T`Y90n;fMBP2LID~b-)>2Wn`ojO4#G|~^PZq(_tbPKj*+TWNHGjkCyCjJ5NAY7v>9 zQ)f5PInQ`+)1Vvta;9zdLcC}}-%By^X~_J@FS6?45Ism!{+)ep7enAqb{ebpFV!+f zM7)f}G8;8k?A`8L3)0Vq?qHtzl<%kT;Xzv4L8j~pIaG86(sD5 zL&-Aw5I!yC(mpx z{K%gcKye;+)WO@#*Cn5OFLEba`XD@=VPx_=-G{MsF@09{Jmu`VJa@iGtGx^3&^%ME z~#nhFZd^4ewjQv>=H(QiE&hMRE-HIt$%~+DB)DboRrtv7>I>P1_ ze7zhx18}%zA!46I>g-l{2_N4Qu@)6ZIpav46>NVN##-4EpKO{>!D~C5NBBNaJ12=z z$y%CsQ+3qd)5y84dUH+W5S{&%^I`D&`suuL3}9h zwV%P!w%D+Qe-4Dh!93_mk@HG@^adq!mQry4ADpJrH94l|D!T~sn^+?!TyMwJuXu9o zl$Fmpr~5d+?Pq+|2};L_1IhN6^RP0m$;$Vo;>ufve*RtKnUQ>@h*8a7JY$Btf5x8d zlw1VsmT>w()XWKvqabybcv8gO6-Cx}y>S`quEECVRpC1ynpvChEM`{4v4wQVi4XPh zbUE7`4!LIv)|}_Q)i>GOLC@sz>&y&7w zz=q$%qo*O1^E|)OVpf`G4|UgCPG-qU{F$yM`i?Bvu?rtK2^PPx zQ#A-txVm@&X?3!0D za#CmYb~AR(ty8ebHNms8w0pb+wqe@~rOP-Kx6ki>rj%fwU6{lHNg0BFZ!tTS}M5X`g4kTi|jQc^+Zqk*xR*wjENiayf|HXXJ9U{_b7) zmiFmd+`htHIVWiXpL|G(!?l-)S&kfY;J&_aUtRc?W!sxsD(65K)7mHMu7U-bYj2|$ z&%TjK&^ec+$>)_lBSp#Zf?D(T;u-z=4I*rEzh(X+J>n*|s!o$|{}!y@QNZy`_dOx9 zJg;2x&{u(PR=eax$qHfiIaeMp+OT8p7~I1LkVa;kDe zIv>Ouy|kL~L1sd-w)a_brB_Zig4q5u@(shvM`*Gayx)e!7kY$v&kgYTjZKe->~tD0 zq}zoe!$>;(4vlMBCHtE)Vm|{awOpw{>WoUVf3K95OYwkY5?^QJcY?dFSK?au-dgbd zWIM{~RLOaC0gLv4!DF<(kN4&Dq)Isd5vy($gYq;R%a!qY5B5EXEXi}wkL9lyn=&W- z0d_o0f^Ycua&lx}K;~9z)2Em6*%`AQ3Oyl}6ZtmMsuK;e1GY1cXP&dBXtF zNvNQoTI%;d>3BXqHR7$sA-v4>A6Q~C3H}GAcgXuJt7N_L2EJK?+%2I0AI+bn@9DI< ziFJz@aSw-fH5~W?PA7U&JeC#qgHb719}W3DZT_Ri^k7%Hryo4Zx$jr4{>F!DleaG| z58^+k(;(0G>$KFF3>$fNc0Sz&<-JJw1zrr$W38ay8%6@9#hLZkuu$nsluHhnCfb-~ zY&uLVNpG55G|8&opEv!augO~G9xymn)V+?a7Vv=LelN;H{vko~S$(d>$Muc}tF5ZG zva_bD_I9rlTn)vSjLb5Vw+AaEyUYlYV+EES=&t1QIu=(p(Y%2eGna;WyEIjA-9mKw zjYao07U+v1=@mEPR3EiG%nN$(zoyD}Vb}CBISZ~gS$^do^XYiH_AcWQ^YzK+@!J#h z#4l-Yy7A}ru>2p(aEf?@>R8$(ZX}QTCP)=$$5}q#$eI`UUJ_Ebi(;|qLJ}>+qqnqh1+=nb zFz;V(Q2r@Q>r9UweDxBzOvCM(BK*-1&fAav5X$V+N3P}s!0G-T&j*HU@fK1&#Tvs% zmG>?4u%|O^j$zY{{w{&)!93u0ZT+s)Q#`E^%`buBEo`|=93NbW5U;W0fl$nv(!4G2 zLgxkIc0Y2=CeNR0&GYY(JSThIckd8>pJnSBb$vy%+T_mOuj06p-CX-a=Q7c7f;jvK z9rFB_HT?TQ=5%%)MDPBP%X-)sF=rSFn?W{bFwfxksdWbyRl~@umF&V-|MlB9T093{ z$NIH1uD8J3JK;0Sua}ES4cKfK9@lht-ZWNm|1CyctI7R5AGnIwek30JP3G)Bnupn= zVY0^GWUXn5S6`}coE|+nXR`u*BssI&X#`fZ6m^cl{i{g*g#Y^$-g7kI*Mr5g3$W*B zcO|DsKhfiDzV!&n2EqJEpWY|Qdl1i=_E+HNTs~0^y4el$l>hJ7qg2G<|M9GgaO*D; zUc;6{S+EoR{=?*qzrQT_!#n)sV^*F+@^jR>K~xwE%hjwfUaal_zeL6O&Rs0jLQKkx z@_f8phAXRB;uXBfdBTaWSBgS&>A%?bx3n71{15KqmCWwKXIb_h+QYzVBhk zm14jR#*kCl^#^*q%h&fO<2}k{4_S7eCm&2sOvwo+-B_tk&`;*!m57Xh~%bttO z*L^^n<(j{KM+(I$n)+a3|-fJ%k5i;hr^wUn^a)5I5_p z?P%>S5lt_5Z&o&54$bzYoT$yCdDSWYb|QQBR4(ITSE%VF{(cNw*P}z8mg>>{E4I4Z zT^X}nPOmoXwgxw+;%sK8$}9OFdu4aXViwNmAbD|45gQLBN2!872DbzXX>3Pqcl(-7|6P0BzsPnsec_7IvjE z?06o3d118EjD;%W?p^HuD1R=`dv;<~O~~})1<5@0IPXkv^CufM^66W;e?Z&YwfZgn z@72fLqt{Gv(BIwEys|2k^U@CD0zo>GQ1AZ(qp35SR5YbdIM-??biMx zTIO1t`fa%wx>JkGczZ2!Wv=;0?A*e7HMIF4eZC;cc9>VjvYgzU^MG=W$ypG7TyNUY z_nxpl2*P7=A~}`Mg5kb!K9$Xm!jKwR)rdtG=sm`eXPM7~`AhK69=#G6b*EAlaD1S9 z<|=s`9P>1p3??%$J0rL~S>#iC-vRASBG2;}+TZWzV|**vWsk`{P`<*aQ{C4~*<^FN zgAK;XHMLvXm5_PdfiJ9&Q~Q)#iLJ|rs_`pK+ItxD>e zPpAy=W!8}Ht^8b`lM^?- zAS?Xb^Ihubjq16KwsyUFzhupTA4=L!2Xo^Xg3wvuKr zdG9d}S%Wi2x_>i2JrWB0=(SH_@fkGj3x~`o-b%ylu3hO<6%0sL%47z5lYRGq@f*Bp zhMM!NK8vhbIh*%t$-$eGt+R44S$H33;oHRKjp9L`a$;-Y5(HullZ}Hv`sddBFZIieD+mjw@_uD zTJVuZZ1Vyn&tlDPJn%9M$X?6R{$GsQIcfNQH7|olBh1T5PgPi+_lF-T; z`S(T2<(N~K^rc1fWPhB38DV5U`elaoC>XrqPh0ooO;y&NCFey}gk-;Uc6NNiTYuJn zBtzV3xwjzizO|%+lekQ5{~{V^E(?TQKJYHBDsgyk&d|vpTTD zZLD4vX34&jxy~m@meUqLX1UBMRiN|7>fMM>TMBaJjrcTLbYbU4!=~8KTQzN z9)tRsrckOy13$X#6yZi_mXBT4YVxiKN`e_n~w=2P+?hM>q21%hk5J!gB}O7b^-KS%VvTl~z3B`1I^Qe(~s>H@n5;8c^YdCRcT zx9q0P+20-L*I4}Of=3nTy;F_9VMsAiFK4=(PWFLt9RP`MeJhTqyR#nTY|lJxKA?^P zIN1sISzXi|Hd#sE8~Q)-wVblvjLjw~mov1dtFZ;aKbo_@uj3-gK zLS(oPnyvX>e-h+*KKb@Dvz=Jd0Kclx@F_KC-+#vMo9W(9>`G+o>z?DGmE4~>X>L76 z{H2HZMT_Z$a>m6_o>Lqed1sRy9TmwlNiBJ!mS^nulnjz1**tmOa*lIOPySV(oQwwz zG35!E=G=ydwYvKq(s?+XsI~!ykMXBk{?EC<**7u|laiaD4C(XM_$vS3%(oV@R7Mjy z`!#tn2VqZ6?faTfzed&?ytO){?$Ilc!=YB{ne+c>x)ZRSs_%W^rVU+*wG5 zC1eH7+NQL{E^uEMh%px&q_cOqmG`fvV@~rKA=l2(>e-a1tPAfGc z_SJ57R!!4hb~WrlzGL}ZA1r!C3>nLMzq4%CaPA3*D_LNm^YeV`J5lvv(Pk_^$o`S6 zLQgKn^?oK_!T#hwll(uj{Vs7~wR5v~@ME#!PkMJ1{nwBp&+MLcPAXeABgqgw%*6PQ zwY!nGq=xAsuK1WgjKta5EOw?=Z^VSrT5K%}9?0HhV3Q1I59)av8*ZZC?(E>nZJxZG z%Z)GtAX19etBJ)O^t1>v$@#F})o(lJWqoAd-<8JCzr@C5=s3^pW+N~9NxP}YHj1}o zeOA`rWj}5+zEzv;KE{S^T3Lc2JIRxG8p~Yyubv*}5f|alG2&R(b-k;v^WeWp8+*g% zdG|)oX*bW6-WDrTt9YB14`lT`m4ATMmqMd7D|Ls+cAlH(kzebfAzyAy{1 z{Jwviwbl`fyU=4Zn`HH4R~F4|@Nm){kEMOZx}NxX3|^-q?Y^Q!PuC~&>S~Paj=hVq z>Pyn6f>!9Zn`=cpcZ|UUe$2Zw&%2;$Fsg*Zm%!&O9``-z#*;Rg634?fyGpa_W|Vk12BKAXK?#jc71i$L*|h;`I%KLr|{tKwbb5If>|v4itGP{cxzYR=Zbm0-6dA#?)_tyJ&DG7 z1Ae0`>lvk9#hCB#`D;hs6GvYoWhxApf`8taq-M$jsMMq3f$Wg|(v|g{9EBCM-731o=e#^^@biZH=gIJy81d28_s}PXWZF`=Po4Q*W{n6{kiO)3^8fz9n3jdyqd>` z<$3NNBK-v3ah6duZ!h|a^Y!$*kM_EWbhYrH8tIDaa|+g^hFU!>9|?mV?72|uduq2H z*}Kv<`d{dq+GJWK`aFz{s~qi*VJk?V+L6Cuc;4n5#-dr#T&i%@Way;E=#N;HY}!v5 z`~P9}L*bvN0e6$+sseK#!;`#~tBnKg#L}9g{y!q#&mze&JUe^I+d?Bx8n);mIhu}O zpX4+6AA4tacr>|C>-g75{+#>k)ik@GjgE2evW3l(SGg-pp1}69j*rsTMb3I# zyREdD`_!W3>dH^A=Dl@9u@)FR7Vln!PVz|1c1>zocOvUDS9bU9Omw(rO_bS zG%le19=vX$I5$(P&#+nE#4drwQ!raf?@vXL!8lliZmIKBgn4W#Z5rOm9CeNR7W|6$`+{vCE^O^b=IeRS zF=TqyvE+Mt)reWoIFYqYO=x=9rRK5ZON zrNC`$Iox-3omb5@cj46KV)WFT%PA=EqdJtt>c`xny;*6r4!lc z6LJmG}rAUp2H+$}7#K~Lduf*N#9zO+tFY^TIHe7ho$nc$Z5ekXI#H)I@1s?09F)cP}Un2ybVv3jbVmWD$1H)j2Q z3;Z~njBN^@7&NjsW{9Zt2$t0(`;)Xx2K`F3PX5FfX}tgcjU@k&InRdfXPwmO>d!yM z^XEsQ@EYB>YJCLc2k2{0v7@xs>N}?&y>e%9GK=@-4GrO!l_7bP8cU8NLG~mhlfxz> zdRkc~8f2F88$@ysbEzoSiI)tc{h_4aqV=pj|43xYj;c4rk68sNlG`$?@>Xf{Xg2tf zjG6iNb>&bsi1^hjQ)WDmFlQdx7G z)qxikMzM;JZj003%9qw^*rM!y4BQ5_A>q}!sMw-_6L3pm6zb&!kHuJ+_AvtIU?5z_Q`XmBEEm0 z{K<7bmq#Tla^?)l%XBE+QePn}@^9tq7xT!hs7U6+JTZC4NYjxOR*QL^Aikr(jaIxl z)z~U&uaTZx@#dePmL~)yTsO|xc@|CX^=S>ey+MlEB4pO+CFkCJ7V8M{TH;9Fr*(ww z5c1`D#4{}WmsbBG@k+d$D{7bKbsI^X8V2XUx}E;p^1jC%S)%7Nc~A+x)Mn?mp`MYe zxLMR`ES+^ruft?dXJ*CNDsl|L-8<=&JYB!>hlRe&GnOxKH?=+!K~}T&Hu2^Ru`;VI zlAY{Q{Wpel-Vb)s(;5+N1`AZyUl*;6Gdg4s#}9ti#I)NWI8QI-Sub&OivBYeG{K)c zXrBA^tWO_@i!1s4>#$7rh`fR7O`bfVO1|4~eLKG}ADJ&cR4sU3cjxb3xjzesk{vj2 z+^!~DKNxl;Q3>&Rh~L%eTGG+ev_2n_rC2?6n{wZNIIT9&vx6%$Y8?p4o7rsyHlIPi z8rb+aT;IdU*{&H$y1W&?n;aKvIeE47gmM>&AE9?+KNrzE*_vuOcNje?V)&0@$glXa zL;t_C)PB$EBs7RD2)5)X}|IYrX zWFE--(7Y$Ri0#gWL^mA&R*#KYCTk?-(%=JrvmF|Z=y!;-Mj4a7AX_rJKgD+cVplI? z<|I;0W9?dcpF)<4aP3&ONv5pK+xuzrAL#XQ#si|rrvc_OqgEFL8AnN0;FA$+SMBFzbAqblvGuk~a_2_RoCrciJtZ*#Nrd-RQIA zn}yf!!J!pT?Tm+cnmDp>AD+AOUFKsS(LR|$-p7!awE4OSR}})8%cdsyb-1}4dU@|W z$+*6Z$DGe|7Leo*EpGMcH+n1=$+C_)HM`%S`R{c3R+JbnF1Ipk$S%#~$M^<%*)gAe z-^n$WiW12TyNACss~zB+m(AdwWuK#1tCO>`S7n2K+u%krY~7D9*+rDhtp~v;nedZe zrm+rJo@?jU`(c+oogkF?ja~RwegXvOt~8c3JJ7eIf7CCF`v| zqt~J2%)8`%{PHa`zzTk+g65^9yh8+P551+3+E|#U-G%*kYwrZnD0c(Z#PEq?;zw|; zjF%6vN!ECm!;U|k`2%(&ulhOK%Z{YiX%MgYj;D2oT-Imb2c5QJMb5A<5nE=NcTFXBF%I1xE>-UGqoGhffu=-{r zdj3^&{%6iv?A&RP%36rmp;@FbzZk}H53p{YlYLC?ePEaSgkNY>2M3#w=wnD+Y9z?M zk_Sbo8L&ug`|SJt$Mtv6_ynw6AttwHp#zL}*?n4yyhjwQQWgu&r2A>wy+BO)gkHah zO4pG)wQREn{XKD}1VkSo!v%VN0e5D?`3LNs%tG1wbUkj)!l&^tN%hxcdp@79o=L{k zv%ZB^6=9uP-P2ib5`>Orql5TT@;Ky4emPfvz+18_@d@nu&^bLJSK0NIpp#l~xAMXA zG`@^=&k|*i#m@G=YfZa5eOC+r^5!P%i7(gc3cb(LW8QL1*3z@u$-3Y?E10K+vLeY; zWA-o+D>;H%v(FWLKhL%XYVQ&lX7AgP&bra;U^O&mIOl3?``!83Z9PMe#WATiMi25` z8~%}XUwI0j6;@f(@*liD$LEcXRN-lv*=&YIcd{G~m6tH84)0vUbF&-%U)r?LW~$td z6lp85%XvQ6F&^YOM%r!-rwydO-;uZcZ%v<6Za&_%d0&#Z6%ROP6?rCO%0&H~!_!`K zWSuLnXUWyPygLr%soo1@&e--0HqA8J_QHgIw7HP?tS0XQe{W!)|HzmuL~rrvjCO5k z-cN)Y1-Ddm&C`kB;9Z^ddWb@)+jctkuGaf`^hmC}>&2$lK6f)}J?}_!d_4eLvO}Vr zSol5OAHuSISS(pau5j(Qtd&euwc+!EG<{1SBPw@9Z$mH$8DCni$?%nXp zbJ!|uUIsR)1Cxx6FY>soR-CS%&UiDzH_3RhfoyX{mYaD0WpJ;7^K;0SHOg;c#OXB8 zbKg8!dXK-hfL(IBW<^AHN@ss#E%+s$%1-(vTgYhV?^8Hq_mi9CP3@zF>qX`veBfZM zRK@-Q&deEq^SjjU?ZxBsUSSh-XYtAt`S(tmKk1wb{=N^h9wJ90cFrtv4K3bfo6$V< z2lzfimg=HcD=1`kcQieYz}4*1uPiPdte>nzcwXOIe0r1C{c!9ReI!FvY6iZj|E2sn z@5lEgVOBa^fYrx4;~IR)`^1rE3CF?o1AeXN?OZ=WIG8v+(_{JwYtUE{J}7F;9ke8OIjUb~8rb z4YO_}+SBa&d=aEL7Nts7?&Py?`ULHLD8?MY4woCV=kSQ+vC1mirfii;$DfN?Kl-$L zoqVsTy%2F4gA&FY0qF-5w@o5z^+F zN>&eEA~LVwO&MjAu^<^h6D^XJFSP=bWA9yDY0Sol>g_q!JRCo|i7r1GV>|eLiet%n znb}G1XQ%Sz>>5iI?K^Qk`NI!$buu30&U^^l9`1MY4pqU6GQRymJIkCmz_(?f^FFOU zVU6rUAFlPRmfGofPu^D*Q^sOqd3;S3>czNrC$3D@>M)vQXIFMdXH7~)xFpxZJ;vg4 zyuU6RWc@+$g5M1zam&K!w%+$&>nYXA%90`XST!*1QfQvUDi5+`bI64utE(6`li%M3lF20~U$WIe;~J>#?4Wh^EnTmMQi=XBaNAk$%b{=N`XPxIFv zaNom7wm+Lr!Ic*szf-F-S*Wl0wkM7Ayg5-X?=-r(`ann8!m~b(yo@&=J9{<+Z^OPY z@CGsJarh(y<2z(-EfQtzXF2Ci$Lm=lU-GDJ7aiKc<4AI(&UQQ``!w?0D{D_uajyv5 z|AozoYsnGXft8alFLm5@KU+&Z&0(0?%UJq}ai+NQvah5#t+PKc^-8D1c09>X;Op5R znw?z(3w}|>x!KWiB|W=|ll#IZo>Gi%zqx*e`kMxUAtKk(bU-M*4m zd+PBuTKtb4R%>ZExgO)^*>5la62JN8a+ zF3-f5@z-16w+w=b_o*R}d*q{iGsWzpq)5FSAD+bYdwF4bIJ{n%<$dDlwXUpCh!y+b z3cZpkJ^7E?Xkk2y?ZK1t%**oy1)|0y< zd&I~2c`x1{<(uBDynr+}^S4#l@iy%1(KS!lx;fHOM3|zrwk+R?{q8ZwXFX@KH%Gdv zqTz5-)p7OH821sD9RT&3&glW0qcF6O(e+sE^&-=wdhF-eDfqiWpI!MsFSvXR>id-S9APf)_gXF|57-Ke7U|Eej?G)<)cZlHQ}(xB)F6 z(nG2fOcht&f%V_w-dY}ewKhulHt$BWs(Uf(jnUUr{>q!5hWbnu$No_Nf?qzs{~Exn zG4H(t-l-q51VX7>y~L5N5G?1qx#GnFcFJ4%UgSNCT=(&gwZ?(I*pq5G89!2a{8#4= zAw>oH-oyTPv2h$;3TgU#Vi8`D&`ren_BCdG znbnr{c_iP@YU8yq&KmS9M1%u)z$0{-M5^443BYw?A7;*j_V6-PNYo6pGTo}p;^;lzTE}Sj^zzAj27oPD_K1z z@ZkqxF^K-{;GX<4$%~v-z?PWbW-Z+>5oxAB-O_1Br+*-ci$ z@B0h!C)u)#uw1I5HgaSt98ad>bRJbzq{-f@17Y(H$vX4J3L^TRMz5WipA`_$t!H-wio^!Yv!Jhjqx@a7X7S?b((@#Y9V zcO`94&`K)K?d!X>Bs!lJlC8WezpGB}%k?yv_UmYWyl>|go~30U#NIe`8Ee#~?In=Q z3i+%vPkoK7oTy!3_Zj4f@BN^c14NBf^v&Cpm0AzeUlq%jncZ~2iv8Gkh}d8P-EstT7yz^cKp*(jP2IJDseGTq2NK~IzX?rkRgx{BID|rr&rt!f> zusieuyuhaFE{dK`9^^3DVp*#ssV2y>CcLEE1 z0^?nDxC$$}k#0XDXYRW{qsNMZKP=>d--$rSh<&fKd0jSs4dSWtlzq@E%@FI6XgiCv z_OFZHuV?qXJ2G8&>b@1B3W)SI)g+XeeT&Tsr;PM*` z?%@&Rd3Phn&n4}LSi4h%dIuBAkurNGI>6(8dj5(TlW0)hce$U++W!Ycn5^RdjlIVf zFmCJbZyZlGj(5!nl6!J9k4x6Hh9XY(b|(jP5h(tomzDnKUG4F>ae@Bw9N=M}lea86 z!=BTBYK=d|E3bi3*8C(}_B!@oP3y*dCF@dCdp(E{%>KCXGM#=CrLHU8lrL5TMWXvRB0%twPYYm-m)^JnTat6V@cMbyhg%3 z;hd~(SSQ5Cu`;>Kq@N){`4uCqO%^mm`_7tbE)4G zVep*RvwNbio=3p12#i{b=5@u3WNR;pPhoxDpY6#~ll6A3NRd_9*TJl)pRb5d{phoM zcgT3{E~nXO())K2`qwFFm!yodTBPSp!uGyKB*>u7+s6 zPITEO67)AGNo%8FR@?WjwSJc^miAGxj`Hn%o5cLBk^iiL* zBDoejHy)9E-cxv1ss^WGaNgL@bNxzO>4qD--%P2*338S;1*+oS7n+(6>+~e5) zI}a&gzLQGqSykExt{Wh}Rx4X*mAY!1^qKsCnR#x;ghr&=nIQ z7&Z|fAJuC&Ef&G4tZ3X_&0{?MuYt{CG9Smzjd)Nh%ynnOO>Fcu)@Dy}GUYACf_YlI z8Pobfu^CQmGd{EziL!TO1kZYsgj;tP8DoQNZYjQ(nZB$8|-d7uq^iYXs z^)}*`gI{)NPSNIk*T$cR`g}c^=Cf|z>nE}%zu8*;*nrKF-6`uVsv1X*WVKr4ZtrI* zRb?$&2eIX7NSx?js?TL-M0UNLXPo$xo!c2D&Y@A(hE~>Q-ZR!>gY}r3_oh`@>NOhW z33Oi(XeQRwG#A-jqbW7$Ue{9g8}}gZckI>x!?X4^`{=V)stxO9edJcQSi<+SvN-dy zq4Y||vK@M@Xr7Su-`De}^2WG}efu*8c7fP)=KM#9O-;0rEU&3HxlW(Si=SETwY+_x zp8JvEA^z|?S-a|Gio4(!@O2dIo+iOq-27WK&NzCGacgJ+$vuoDAK+|9bB}uBQgvLo z0^{cBH!DH5(*6?C&Vg3m!QZUqGWe8=w?kl;?7&$?I^Orm51J|}`#CES5ON1X=P_-cr>z#e_%zs65o@j#$!n2) z2hZ6(!{||nNT-l-x~#imRfyRyV+xxHyf##w-%b&Ewsz| zD_p+`yYigiUK$Lb-8DE>T3?63JnLNlZVzONpPZs%){k>djxR5KG8;b(bV ztxW$(uuMP6DpUz}&9wc7v+Mh=1`FM-m5n}U{c~lA-a+FF#fa>(S?vEzl4T#|1KJq~ z%^jFW(b+r?rYqtMdJlV^i)O2zR1BO?@Z%bShR@- z3xlod4xTD^(t2Iw0C-BYX`d`dDmboHX29~?_aI$2TRw^kb3)0nMO*`#p z#bKgq6%wTNFL=+hqI_y%cfq?aNcjpWQU(1M96i-}t4O+7oW6~fQ|0~pSeEoLN_TyhI0&(JY&KN9|VM*k$#FZ9>v zw0MQ>H#)l%o$3~R^KaKEao* zG`>(Q`kjWyJNIta|7Ii{r;i0>yNL}x#++FqNC_I`j^d|6Kf}nMRjkEbeE=)nieZB> z<5ZTtrf|kQ$CAr(C|*5>?=8i}E%3`)*#luy8XGt3KX2>?JF-NN$?^68rX?rt_xvff zYz`HbvS#vjSN=fLC&@UTtf_@I8~@N$8R7PX&kQkUcSg&Dw34+% z$Fly5bpMARv~$KLw!RWqrr^;5IGOdOb7^)RyC?3ihsy+9_zM~rJ9`!Tw4r6H$=AZl zQFO`b*!SU)x0LnSWgo4LX7|q^oD7*O@!}d5I8?8_#gV~mekm=^7i;R0B5UGa!kFZa zs6+pjWch~tU%QkdZb`d2{)exh^8VsHf-9Do0BgVIGaCw1-V`!6g zHh<}R2qwLt-_;_-ae6B5+&);HJN~8q-@tx-`F?-)OeLiz1*;|NLQURv7rs4=mBZL( znXA9l${FPT$ULqF-Wu1F85d7oc?Uq`;A%1 za6LVNA-$Z}oJ=qI`!1Y_ch~1>C0wzSglCdEPldATf03(F*Y7UzCAHwMBKHn4JWuON z@UX0&Sz|QH+qRv2af^1!IR1->lFHVtc~RE%BOQ7QIBu6(Q6BH+!efGuL7!BpOTuGsj5=dcN_32*}9XZAsNJz z&Ay={z4SS>;A6>Xo@%t&?R7GQ+i77RN3#y(UVUATLCNHJnQ^KU>GSNNlXK4|ZPpM@ zgi|;EQk*Qy*t5Nul3WpAz&+I-5?vqQiO2A-IeypHa_*C}Qn;w`I{Vu5G&3vLeiQ$f zh$6Ye+>>px(>_ldO3>speOIJ+-g36U;pK&>Tweq%!8%_+X*QHjq-%CWUQjNnKqkiaVx36#F8c?$=lx!?0y?9%bHV7q2(TM9p<{M zIi4Ug3@+Frx%zLz)8uYWo|s;YPM!aAPY?OT%yf!mNEie)Aj)Uug7P1piYos+72W8SW(y;J?O? z548IkAIa*%nV6hAi8sxP_jXQQe`jTUc8S-;lV-llll&T6$*J)eDZ4#*xmBJRA!~5u@2c+I3}s> zAp0h4IocIZyP_+8yrYHGtxDa^yty6(iw3^!s@-}d`~X`H(*BopoTi=Z(^w+L?=5Z* zHIg@X-nU}nAly#g>T6i=Y`uSqXUo{7oij>mAsJ2%f!}ZVojlH;!C{l{KQ_NjJ*{K; z-4D)e%Og`sY5<<)SxRPBE!p8?k>F1<4{Ui#}PoSsyZ8VUhjXt#Nn^zZqwA+tV47=sAp?4#Ml3@$w&MuNRvi z(eKsRnz12SfzKCne$&g17_iLP*|lHbdLPld4nJ*2;@my2@cTPltwhHQdFyCe z41`uSOuyN;i(vZ?3(O|%g;?+!srvBGp&0Zdl=?ff4`$Y7{htcsadsUI#q#U~7^<)P z$Q-T8X|v$M53zL#-EX(*+3)5Y4Iz#kqXWiL`pq1h+2 zd>Mv+V#U|mNtMx?F?tXQFTvpt;qoj^4;C@gZ#P_Mgb{W4eBSdt=86yXlIOt7NR`UI z$LV9D=rB{SBMaW!n8mM!UREsL0);cR^R;OC2yNE+s}3x(Gqw}?&M)+n_0bbC`z!Ob zQ6$=(O?{BKUY(~WgUNQWt+I%koY0TZ<2;goDH`se+3&Qz3vzjnUKb|guqJytZ}x3* zSbYoSta2$%$7t7-M*BkaWej=|^UuM`+$SV6Lh36FB-NGtFFEV)Wv~03RSjZQS-hoa zQOa2_>La@Z?qsKz{k=&XPQLh`#KX5>l?=d>MabUd&78D7d>&`L`B*;<|L)cH{`8$j zzwrf2zK5H!@zXfi7H-M>8#X-S?2q+1ga;n$-xa)Q99v~a@KAWoAj{p(oMZe*F2Q!L z&HAWRQf$u;_k`Iv3_DCr8IF&i(0Z5af7WgTz5n9qsd{^n)w92w)1 zN7B6mhF8Mcm+-iv&yBTF4=!J@Xm3co$4WK*e;E`1IO8)T`j;$~d@Q>w%C^R_)qJ{% zqu=xUm28kV=UL&Jr!(y!^9J0vh@)roisP~5ME;fB_UE&79dfku|9-Om4`REY)29k{ z9g%t(`@9X$@%Z*Ph9(#Ha-WhJ>o2Umg1;nx%vl(cnN~^h@M?bEmd%n$ZJ3rT>o*l` z%HiGlFj>H}t|3DzcXuW2KkzF{*C8<5h8K4^vz|{AeS3r$(1SPB6mvgihXvy4Wcn?K zd}^AMHP_FZ{Acw2m>5-$q;v@sjs&1MI~ZnGAWP4qb!Y&E)FX0DUW`8SXv z_pMofmz7jiA(q`3C3)8_zMZ@6VHh)oM?Zq2ZE4Y+)t0(`FeH!R^M@K`K4gQFMds{t z9l>%{#NLZskvp!`2~Q=JeQ7z@_gOX4U93$0n@apAdAIlXck(Z$%4|!m9KoCF;_CxE z{|kRlCE<86B=6Xg$2{4=lG$TB39FEFqF9@8a&O$;m-ip3hkNKS1*TbVmuLCO2-e1R zN07g@s}>s}FA!(0VS%gw%$v)b@VY2#9zy5T+ICdbwe|A8dDVE3JIT~g{8KDSo$+?AO>Ka~Tr*J%bNrQ>aM`1ry%23g@p?vrtQGkN z59^UAv-7-Ntbu*0_xG(MlW2aLQ9FFOom4fjF>9bElHo(qut4@dJr_O6U(xP_6)48M*i$uznqk*Vg9PW z52w)?e)cEVXIK<}>W`IqC;6l}R>r^NT*#V@>?bY`#k>_xUASj_K8}RBFBl+x8!=~MU=T(KhRvDh;RRZ((5$OnuYtFm;EA}u{V)>AKsFEC3%M*UT5!i zvSnY*>aFRtlMKb7UX&#>LoLQG;cWIlX7%jVd?xvsM(TS5=~JV$9C=dxBy;UAVfrf8 zuGe06C``qP$6h5IapG;l&)DSD>%ty)B6brJ>CH8B}^5@a_8|>;OlAOg3zc{)K zXGcMFcV)&Btk4`c%0c#ES}&&OO|Wi&iQ7q(UB=A|^2Khy(ee&_$jX6NMV6ZE`KLL4 za$Kb<#<3#P(c)xk_cjyhu6N!Dy-qb^_M$3g{gWNLfy}wLyGCoJapX?A zEF%9u`d>k^yg}V5CZ;Y>Uok1q($9uXYC3k*-hEh^$n!O;9>j+AFg#Bts)|LBbmPu5jv7C zd4!T}<9;4fg5;xVoT~5R*&=%iHu_l=V|tM7D>mN?KaL^6&#ZTosId*p$KYJ@yeEHO zo@OULd``07dcISX%w5#6*t`m-^Nggb-u5JQa$z+xgS!IOc`jFpTr+$(OYh0}(2|Z* z>Dv&iDw3fU9mBTUSa}A&ypcy|C-q|_y3z4Gh0L1yJdt`Hi`S6xH7!hc_HX_w?%cyz zf0foM7(<$3#M9bL#5t7wot#<3Z09}Kr6S{>usD+@S%=cZ)p-V0g%w{l?j?gkS#q`J z7s+zc%c!3E@6({LTpZ6X{>pgT6JASbb~j9>VO=sVCnrRC$mJREDf)Sn58fl<3@+%_ z&36a8`h3WA<_|~sd!SyjXXkP9B~R`vuo=fc>to&{G`bpB`WB*JXBw~6PwMJ@?D#AE zDitxWf=eg%N!FqB=w25uKcT_%Vs)OMb}-MFPsVoINmbG86fK3Fx4_^_QSmbK^&82! zH(&Y1mAQ}4%Y*^CC7&ovo()YAfCzj2l^PRf|N=k!>C{ zFJ`SJd?vZllItwB-8R9$4XsRaZQ|`YzrmmlZ^DKZ<<~E*=_KWxDh{} zOy*9cQ{jnDSxq+qu37z)_n=eYFb7r#yHno-+8-Mm@=p6ezLvbrJNQbTWu<~;A6Lz% z^AxPf^Pur0$ZVw%UL{iHU2$rl&vyJ0ct6a)vdd|hHuv@W5D`2p^bTRuy;)|bn7qX2 z7QF8gSM{Q8FYV@y@?ZRCma}hyOj#|pWuyMuzmohX@UGM~%lhgkaJ8BEb1eimLOGe8 zW|QXjE?Zk-=vR4aBC?7r%HF8$1E@k zoUGkdqTF`TC6$iequbFC9cWIFd&guE?~SWZvSm+Fe9YS0wN%^Z>f{^97k-CDRypQP zePzE7~ z?`U*7n>+(a-I%@;AeeQUSLoqBtR4!Z%Se$uaOH;inP6p54OUI|G9c48PZ4IdNhUk z2N;^_AgLvA4SBNLDN*eujBMifWb>lI{OwEnH!$D3f?eA|{YthjE*kBRCz-Pxtd%X0 zt4fo$H13AU%l$io^$ylXAHVCkIvK||Y2$h|NOoYuTt&uC8)et(CAAoe$AR${Z|fh$xzUSWon8{$xy!(zwquG;Ke#4PYH zI-f=Eli~WT9?N;!a0Ojki&zIk;sg?Rr{8EkkUb8khfv6pO^TV zXSvmR?T!L>CyHjJSTpbDr;)20zFgpm@6|BO`@OmT-)me*J(9VQoWMdCV8dmQ>Y}F= zWGN538w&n+A&EYv)5q+#HwKlUO<&wyN4jLgJRRD(qx;&JR2|n-zo`XYFLBO5bIzw7 zeaSi58K3NI=V@=PmhQlc)RBD-rjM|~(QKA%tT!3qGtxf|qtumq*|k+%Q=Z?H#`sfN zGgXX}zxD6JnI~yCm8Flxm#pQD4_sh$*c%#&m|69`*zZqpH2FDGwJiA&j@D9Yyx)UI zd6x7GYX;ZDNuSCCsrcBDuV)Qtqe3)$isds0ABI=CD{fw}*O>+ME^%HH$HMJDU3IQw zU-FBk+P{zI!I&wL-+vV)XMW57PTC45j zacP~ilZEJdXAEb@youfeLRGYVfb;XF?-qJIM*rlS$X@uYPtA&t6QB@&-43b4cy9JP zc81u2{5rY5lA&Q5_GLftO<3QWhI`@BFtX1EI{(@iLw-zH;_Bs^Bf}4#L zH;CYWz-~H}vS%RG`0`di6~2m#FOAtIZ`!i+0`|vJOC!6Ru zuAPLppX*^4>7T@zQBa(Sgt>gze?_`-TW0=xs(4{ ze9DT$*?v#MxklQ})2n34%h=aQEPmfOR?^j}5}ORnnJM0D?8)=@U!8pwO%HW6`!_3? zhu@3yO;{r9%xlv1W;%6tW-+t&WD;xyt=FKPD)d=Ba}*n`flNspX$OUCS?3G-q~3Wl z!e*6Q*MeorJJL*>tHidVP|cg&z2PyE#@FjRclNml&7FP){H-i9rkdEtyd;?n`_gi{ zZ(6W!R)<}Ru`RR{WZvPUS?97ui^+RX%oqbfT3bCjzjmwH4*?m?9 zbK9D2zKmaUp>ULA(QO@kvu3TQzR%<5yI|GI6}j{73ghgxNp*ui%skJ7_)2Wb?t|>< z=}!NVP^+TvPw^;EfM$rqd9wT_E<8foAD~v8w>M|eramv#d!BnOBJnBYZ7oX9_cJ?s zo_GBL{J05>vUhTO!Q)eptfD@PIsbWWT%hZSK`s^7QUPfP z3+E~H9pXt=OHI{EDgu0Lc2ST2B%XxZtspqh-C%bN8LPD#d^t5O2N?OX+T#g$zK$Ia z!Qp#aH!;4aro}zlOOB(giX zH=C{TSJu|f(`TxHB-i&A`cDyW#=^D%e_AO9HQ>X^V6_N82We$GE@WM0PjVc|F3GZ= zJNORp|BtON7S~q!G!lNj;P*W{&d%TpnV-Y4p1UYfq%>ui{iMEzI`+A(pun)@#M-Nygl#^*@mB zFM)FEHe_eVMwVVcyS#(R?$^EaeUKhHK{lCQgZSTM$b9K`?Prz8FtIw%<;NGUKgL;a zX@8p8=xUn&Y$p6Il$x4l(7ac9?dCboNb;VS(*h{GYMmOWaQ0#``hqGb2M8+8%=yhvC4Dc(MqO zF4FoKD87z;TSd{%^vGy@BEFu?#~!9hGG?6QE-rP>%dqMaN8Zu?W}k<^{5S}%fxlZAG&2jQ6F|w-p!SDF?sTNkl{B+hjlSMwq!rTSag>w`Bmio6X z+2$!$PqnW+FWZlQ&(?lJcTZ)tB>R>TJvsN*;Ln>p`V9EpY4m%TADjb?JTolM+Ba*n zvpBSXWV?9!Hr`PLYFCpr8GJg5L|?Po=gxbag?sD2E8BbowP(y^ia7p0Hl6A3vChi< z-PgwB>}WVoJpK^Io8h^@?}egroat4ah*?FL8AWnguhIXNyr!F(MXIo+VsqY7J?m!=JUCyp z&DzDQe3J_H{UI=keQIM=1=4llPrV>J*?3YHL$=^o_CgOJZ*Nz1B+2XW*-E~$?0ZYW z>+)poH@08M|4!f&ce7gerfo zW%ca@?d^osLB4&$*~yz&UG#X0-A3SHIh<=Ex@=~pvm8&ZjuSC(q>*$#tsTL;pA}>0 z8Qc4cEB`{TK2F@@$X8m+KEdp-dMgvvwgeG__Q7`^L%}X7Jk&nZ*Vw>2IEA^ zH=UCiUTT4Vj!iwFm^YR`n^QbNf~wfr$>%ZH@wzkGU|)OxHj1`+|1+G%-}9;D>1@D? zW1as8JMBrTK`bzkZsk~@wr|Rt?R5Pv&&f^jiZlMA?K%=?EnGdaEX2tcI5kC7J4KwxUeQ#U{!VP2 zElTub*9mNY8mo`RsBU7zCt~bs*fr8#c0eTePa8gUvTw)f{cKpL=1Db2lOZMd*U#x= zG+WfbxpN^ou`sXO#F81cDrkGT^Rj0zYXJM}?Rss#h=11^b6eqeGWB(#*MnG+>>>*b zNM_$acIYOb;|{)_yXu{AyOWpR;d6Fm-$0J+InKMk%nxqFw$xv|pZCmjbzOa=qU}J~ z?#rITF@7U7ZpF*%*s`j&y5d3Jx93gb2qW%&Y%l;jTlja2&p(Tut00{$W7(&ix;E>b zyIkB@ug$cceLRE7aV;*LpzYM3E$7O-&08zRRn>d)(Py{rCPyyCnw2ckTCBUWFo*tG zOOJ{S7h~qT+WyrHy^ZLd{mwt}lD@vlPL|mioE7b7@Ta3iwpB*XBCaT+r99zDWwciM z4106G`#y^ur~gL$IB$%yXW>Q~r^@{%nkAF(5B{CWdspjc1uIR0`f*z72dmV8nu#qX z*l`+(i-_sD-&o>**t(4bc{@23f2JCX>tIpVMNiOPE81=$^^tmd8+!N9tqi`EDWIM` zX~{K~JDZEib|$93ifO0&K6ma})xMRtHGz0`3;ar}b7_|~Q>kRQL@P_77hMKAV?P{E zMfTdX%pFX9I47U}O?+@R_9p*F*2r9|?c`fa6{2TJ(ANLTaH?pQH%@DB<4E4-3Tgu*p`K8ob$Woci%sV9jgznCyFVjd>%nJ$qhvVc~WxJiLH+QD^5_ z>G95e)%DH&KHKP6+;=tg-;SoqPcjF0e&l;^z;Ko`4yDs?TJP2Rj<+8R5A{sNkujv?wUo;-WOhZ z8(EjvPbW)F5-q^^(KO0h?Q(E9mpAu?&$+Ju8-s_7}dg;dtvkE))6jJx(9UlC70jJTv z&C}LA^UlhYKMS_(f!}%B^$nSJ=VX1J$7X+LW-UcsIZ!Y6I+`_l*?+L05ZON>cj|mk zgHbPY>O6bstjFmhZyDE&Wcw{%OaE zv~B0#4hUqmQdKjcWY>Mv{A!l-2HN`DD$rp0%ufjc7U#uT4Vt^BUOc#wna#)i;2Xa?xhC&a z`nu*$xR&RcnQiSs?)g|aNIY-OrnN^N?sW znEW$oca@e-(P}Tf*QUqi*nXCnR*~#qiq3t+`KhG(LySuvjaTudJDVr_a98JMUH>pV zxSW+w(fZ?sh*X-+o7v|Pe7H}m9pTo(kqcO-93MH#zgzt74410#t0AHffWbZPkvp(` zD>JYAczjo!yG2_g;Qu}O^Ne*raWT(ElXayjduKhuSLS>h;k6Ar@6+DTWFD#Sy-2W? z4!Iu;!k3G{w-kJ&IQ+Kh?IZm?q{ZxAJe-fr!sxB=`y0!WiT@vH&!gePqE=QNG=p3j z%t-cu-}G}Mf3NKK{k}het+&vz1y9Sag^GA{58d;g<4yf$kH{8ttT#l=c7E0t8~egD zPvJVSS~ouQHXfd+?PIkw(a4*1W+O?OCyM_RX7GdjZc$+4R674h^V|5?DE;4rC+$gd zB-#JP-v=;uUoG81${YAiQ}QI=NV3h(#QVGj&q~8nTy-vQ$qwH;UGtzm-_vqNs;}|A zBYwsMSCMrY#$|2i_mFsiA1CAc6;SNS1NX(}z9cyek|Ugzx6R2xzEkTr@s$ynnEd;f z^U7lMf7Y0EUtu1Wx|KUfa6S!A<)w2V_#?)q4#NPwXGLo6*v=yTXJXHbm{6W4Bs0kN zLgdM_rQC7N;^|po)VZMDK%Y`=H7ou0AZ>QHr+�I`$E*F5!dC#H4#%yV|IHsyLdu zP^mSLRWy03^BAn_vs&uBXVgt4kL(N1o4viXaIa4d*e{t07aGyKIF=aKMo&-CaEg9< zLiH%RRurjA!>kQ?ZV_GH_c@hiI{5SuoRa(GOFohN(X(*)N4)8Y7kjbG7BW5$mtm|` z!`c1#-~%Gcoiu+FI}XxUMc?(u!{jdb!TC3fE#G6nKCH5Z_GO?}g!iY`R#wrbZp@YArOj=RMQ)VoKf?lrPL8-odb2_1c4s zH8AP~?M%>e#;)dMJsaox@V?q)si%!Q^)QNeR>hKjG|aAuyj5$@OG=TshcPI-O&(#F zueG)bv+|65_nphNj`bwd7@qWopQ$#Ke746s;~hF=7iemUW;a7)EqC$xa(24d+~-W! zG==>ne`TEbhUXkbj>{oG!HE2+wmzguQRrml?hyRmi#Az(+t|OXth&^=o##^zkv>)M zk}Ir+@h9)Kd+RM#LI&}{URYgMjC_gR_NB*RBpR)a{YiJJZ&&ctWKDPw1}}-sPh(39 z4E~5bC($aaLvzphzIZZD3zPNtFDoZg(L$PB!%ve9q8A<vbDc@_sONDPF^r zQe@rex;#g&?V7WBOn1NQ>OU6Ro#SdDe!Pw?S3~R`Rv!ZA{$xph@T>&L9-q6heHr=Z z&@MSb&W389f?dl$k}c|KxQ~PBMAv5p)P)dCj=${LUCDM?FIiVi$?m^eg}9MbK$Tgp zCFb4_*;eFPA>RJSOH=nHxyJL6blQS?%9}6{@goJF#Rhk`xuM z>ay!n(tO3bC0Hc;KI&@qYnWW-j4>i<#*RP9`m;E6Jpbvc-MoLu>e(v_bE?b@M~QY5 z=+si2YzvDGe%InNC9wA#n$^=%Tfg6cb*lR%kNY(|^%j3+epHT6wZP)$+Bt@WKPUAJ zT4p!Y_1JzNj*aC7AHg&GjGl*Z5j~7BmbW(oUr4{y%guZI_j%$w`pLa-^5^{|x)$^K zDe+=WLE<9Nyci=o>8rhHn0vqz;rBdizT&UU4pUq3HF9RJdtaQ*F4gvs%UiCkg*jgK z;+E3xtK@2iX)odH9wK8Z{7j--KekFfy!QA}-I3gv><#OZ`1~%r{sHa0cTT1H@3fi< zy2oMd|F349apFX0C7*OV`c1@uaPQ_SOc}pKz)3pR2l1Z;4 zzt|UwA7M@Mrd*_jtdm?PB6fy;vRIu@yFbWtH3_mOCTkM*kL5QV zA$2b6l;+7nCr>!;73IfjGb_7i@QC96Eh66=z8Ox^*m?JIpiZo}^{H4P4z*8JwT8Q^j?HsTCFu6x zlOv4}nRQR6ZR$YnL$;IQ*OMPS$Wu1MJXtP!L1~0Iypit7ZJxaS590S~T2#UG`A}*J z$8*hyit(F6;TIMqUu0JIW>jBbo|N(8GX3v^F|QfXgXB^XDeK3I>FpDeEEQvKC+}~C z@pGA&ycDZ`C1El?tk+9&)l4U25%&B^bbOcfx{Fh(tJ@n^sR!|OLCPx}sptF`;Mv+$ zsd3g1Z#MI`t02@Fj;Yy@UDK&S&<=i$F)_J`*NV8QBcHeSxl74z^}bqeDH7!QSyswC zsJ-mEh&0V$_ms~+L-0Ge6laO#Jv@(odHS-#@w^4ho7&;Haf_IdXB8uORMslA5~bQf z_yt--=C)8DNAK@RQj#ng5zC2)t9V3qQErFRaExg0NNPUit^FV5{YS)ELdT32iN_y6 zz8ReMbKYe3->Usb&AKn+182~*A$w#Wep}Hq`L2qRIs34ylkhdTeZ~^YFns|uHkhaU z2!(Rk_b;5j!0wEIse%0$vt8WZGg$C zK{DSJ6=|1s1K+Uc8LU;!l~0LB>ztM6(+5HAb(Tvm@l|9R>+co%H~}(e;Y?pXw}o{# zuxGNM3@>P?!{Gy@=3;c1^VSnyY%UU>z~;3G$aB$rjd6 z&rR{<379OVM+GvkWyd4*TM>JceY*?ISHrLtI~;;;m~G1pn7=W|&kJM&Ycvlv|OFy?>a{CDuZr8bfed=r`eBUMRPH84+q z1rq=Gd%dgY7#EW}?`j9+c zjXm>4pWnrWj5gouuRU*AOTy%K9cwO_I)TlH7JU9o$dwc`bLW4eGs^lGUwDW$Z(_gUg{YPK z2DgjELG=Wv-a>|f>@*+d$*eq?Hq)^+Yq^v8X90Gs(OY&p=YD>dXgHpp%jljFZZ8tP zguneU{(h35fZL;dUlp(4FT|JBUadyYxfoqrOw8J(JRN8Qg$>5CWXO9{yeTg7J;@4L zFWyRg*jpR1a-Lr;Ax||k$@&m&!REaRIOeU+_u@p>{;V*%ZZeDeioUB07(PORti!(w z_aArGXs9;TR-UvxOtY~xOg_pZ`E@Z7;79V+guv_CndGX!UHh&ck92-F*6rfTRH6Nl zjf!cZe_>P_g=vq{xuJe5Idd&*Jc}ho#hd?(=r_1FSuSqGGKLX$8c&tAWQe4Zv}#bbAI<{aL#7$W=o^o4Uq!|rt>VKMB^ z{`Xt3K9$Cf(MwxQddf)p7CWVCNoHf4^tr(Q0q)gO>unIrjd3hBGgFTu)hJWVcDokN zfOqaXs_HFulrCVkRnExhloe(FV16-7$eQ^@^hlP4w^=b)9q*cCMz{$VPiC$6aBm~j zFEDzJqWzVk*#)HdLpxJ>&dvU3CD7e;9L!#+^;sPHzv%ND_W6kqXBX(X?mgc2sjg#f z_(pf^&m3gBQDPkA-V{H^=-~})xs#_H$0Ksj-pI8d;7xmeQ%Z~fLS+GoZ`1axxVgKI z(wV$BJJx>|=}%>!&W}kedX{S7=&|^^SC;7^(gsM+far7Evl1 zy^ClsQRj44@4!wAwECF%l((q=!<>7>^&2qjZdZKa+}1E%h(+_*?m}%GgZ1~pcPE7B ziaiIDbP5UngKBZD|D?b2T6+~@iyRr`SWlLnfPJZDHIffk#Qo$6`2bf2KxDc0?&ZVB z^2WWym+XstSUah5k~Qk7U6LbtqQ zJ(#51;E>%j55VF?k!l`hPUb70v3n0zYQ%~aMW(ER&d!!eknM^$srI!Jzu$Fq3k)jJ zWPV|c%wFTX54@Bl`?K19+Pz$xyE7Gc5^>(v>QD4+?%u1p>o0XhDzNAI?Ynq0pC50g zbuS~}H+0CSrF1(Bej~-hgV}PEKAS@|&o@qlRs{%U-#}3jxgi7}BTFY3b)sMPyiccl zGE8M}S#ej)AzdoZWkpyuBg+GrG5}VeV_WLb_TZ0sqx!Vt*;%?=Tu+_eypzqF>%1i? zQ;7FH*(|%sx8YW5TAfYy!`OEggp);iivFA8PbEG!7vB%X{2%Zw_RMa9n&QeJUYq&{ zy_`FaCVOB_?kjVDSp?%UC$E5UA3$Iwd$lE9GWncLqSjhZ9{TqBpNgd!=RfsV?piY+ zEJ?O?+Wi#EZ_s1vKjc|Zp6NH&LMn~7hG$PQc7S*Cf)!`q*=A06v-v+TeiEX&2h0kd zJW*(l+wc3A_q|G5nP*4E{2dQ_ojhZq)Yml| z;4v0X-|@DruXxRQ$y%D4VMoK|IFVu|h770M#jf6f2R~~2Kv-sV*o`=qD)FPVJ{S@s zNm5+9@%j?_uIc+*3lS~rzfaLh-Z^Ha;bfNoitY36wLawEX8WnM+(Cll^xoEW1I4tl z+IUPqGly!`W#N0 z39R#vK02}CShJBA^tOpCZQ(iwvX|;B`DR*>HR}`qWUb8Ir;~1X2K0_t+qy8P%c`%e z4LDT`1GU}?NAo`a9J&7f_Ii@6!Vt<+kjfwoHzE9cFzt-{V9h<~| z=8*LlpR*1v@0PB?uIn)9W_WZU!589RA5rpGIA_n^tHy>EK2If6YG*y>_vu=GmqoYY zSsjvf_sx0wt;HM0vU%Rby~B6U6)j(6feAD(#mYnRFqvLna;GqphRME^r}LR}eZ(6} z)AM52{!{YhbW9i~NJHRr$yW`t{T2#bzy8w=o2Q z{R<;cKi=>x+h-q2Dh4iK!;9HA`$#fJJ=2krq)H~YtTA2;|Ej!wp?mGp{ARF-Qbn5& z`~9Tr&Z74jezq}YUeC8qFv@>~%_H^vlB=7Nawx6pXt4%7&!x>fG<^dfFEaB@PVrh= zINjO1%thW7Gp1w4*Yx}d12)m9KW{qSd2_W{i^N%ja|8*dnJZ_F{VuV$4?Db~-R%1L zlw9XKZwFpKhzWyTbuQ0)*11*KJ8vAb0;rmw8{l38&p*}6D>NDk>A!L24-)2C(c#cZ zmAldGceNOv>~O!czu0Eq zCh1aVZ_r9*Qk?5^4cK&mQ}VogMyI@Y_<&w{=8=_&$7rVk4qrg`ue5VL>s;sCbNqgv zY_ER0J4htt6X=>YNrP4FJwmRECN9P4_UL{r}e}7{>c1$7S z{edT`L~=wM01yi}=b>l}7adI`C2H{Dn6x4>r``XyWTKo94Ol-)^uY>-RDepg5qz+m#qaRG- z5j4Ai*Syy`83ga ziZfSY${=m`r&m34JpsY{FlLPC@T>1qCHq}tV`kFH3N;(kpT&ZyY_g6vm02z8kG`Ns zEzxYAD8A08hHRCm;WxYgN$$)u#nKN+SBKvmiVw-JvAf>Nd)ivysX^Vm1^PwvWaXi^B)t&@aEz~@+xOF_=C=W6AtwpscB3c?7RbbepU_tOs-Dm zG?j4aT&=zem8UTI2-fVuj;Z*Ze5Sv!eoL%cPqKJ>@(EtXA0FZn16Zps4Nru~bZ3vj zoH=yvtJir2ew-=}{Kd@_paLDePBXP7jl!};LX4S_?K6)CwF2TB+^pSleU7c6i|EzkuzurRGC z$$UHy&mOj&W+qGZocJ`reCtqGmoNu^$j?J~(i-+oroH6dxRFk!SmbCuUcm=`c17}l zWozlQHBOg09eC1*6S;Tp7>KdG?aL zW<&A$S@9}aEi1A_BbFL~#e2Ih6%~_NunG+FEbw|5Cib1}=Qv26>GvA^=|_fYTFS27 zqHJ~zJZG`fbh#DtsL0x># z`jf=vV?_XkrdKkTyR!5L@9ZR1>$KHC$UH7?k%uehn*p*e(*Rb5{j%G#ZCt^VA zXT~#bWZT|2xqIh$YE~4dZ+6_Iiu`x1bBgGa_rICJ)FV;ep68uY5!^VG&yIF=a^{aT z7QE)We_{Is+FXW7S#d#f zEZfFdk-g|2K{dNhI zp6=}MxXz5WuO3oe?mDAF-UCnYO+{Wm47v;@idz>l@uX}L24@-A6qCd@NI^ga!*PR8!)B~PM_hg7$OP4`d zc^KUDUUw7w9IEBaW&h2el5?n+h+mP!4aK&%9PPoPMX(~C+|YMVVA6atWGB>z+N#aA z$$i(5k1fWf%%xMGyA3HHVWaGa&Qprhobe2=P9=s7{4%-w&mc|k*{^`~I%j6bcU$rB zPNUBwESbH_Uzy`v0f9&PPWG+knbA>tKT^M^i-JG-$@;(gEb*Zpl3DR8n9g@}I6IGq z=$>N83m8#>r;MRlOLK!Nv>ZU!eQCHqgxY93+0L^cJ@unJW2OFJ$waP2hpOa zF{TV3%07$Kkoi~-=VDboJ=P;@o_LHfPjBb%&)9G#`98s?1!BQ;9=@J-FB>Bt_3v$3 znr3`CN}tV*?q{-UDloUFW3n_?7MXTBvYNI*Z!Y;iHBx1#VRqJKpUy$X)e$89RWv^d z&WU-w>GTaROm(pjeKP@auVLdsWGl~0YSLo`8B;H#hi`_7YfWi(5foCVC;8x7vqu%$ zuf*P?SodgXZ(#B6uDpvjM~MpKe7=%}nz<&sbq?qC@A>Jkk3syc8Wb}YjKkzFA(Fh` zskc+rHJkZvcjLml>~pjE;H6ky0Uz?zC^0s`e3cAO$*ZJaFcCn|nVJeaL!=sgs?<~yz*BS4o`K=@BlrT=rq<30hEK004K4-1o zX%Ov5|Kty^PWJ6&e~vx(_3tO7S^&j=k@yw-Y=^H|;hcTUsc5*u*;%PNi+yj>#`C*rc|3`cJEeS18b zBN{y8H~D7Y6E#x1?Iv+*3j}ts^C0Iv%-<_R>^2r{k1M}ur4L>WVDrx*l+2!eVEDH& zB73^_gFth(dxnIm8J0KkCGqE^!q{8^1NXv_JclSw&l_0#Tb8=XeB)}@Ph;~A_?vP3 zPHn8msxewQxDYFAVqfw$<|*PZ7@lav$*PL1yX@uaYM8LmsBt&1-P3u=N>-E96S2O$ zYkEQ@`$Q5suQx7qrRVo_`4`)KjP(cUdA6V0tX)^zx4E(liE@WOov!Ebq>GI{jaY1< z)~{u)d3sF#o&jP(YUg!%&Z&;9SA^X2+qr}uANJyX;y z;mp1LZYVIKD;<+@a1PX;hkt6*{eoXVV`J7%)_`Z$pQaA{>3EpkRkw-ypF`&(=&plG zvVH!uJ*>B?3LR^uhTFj@8Ub+`T?|0R>I^*$=k9MS?^w4ebqJ5Y8a*$ z)zY>pQWrk@zcO0+6uCWm+&y7<(E&J`4JHM28S&Ot^iD!*@Zq`f{A!TKVe+r2? zxYyYjncP`f<35e-+i7|feCj}DiQice`V#AB`t%L?ZuEU>C}p=t#)Z_a?TOV_l5GKf zvWlxYj4IJL@B4d;EuUfBM!dWfQ}-uXG77xsnw4a33CZ<(%zn#b%*k%i{nOT|m2CrJc6O$J$ zwL_En=5-Q(2G1R2s>ENi1MoLql_wEDLUAEBju&TYIZ#I@{d*^Gv3tOfJ)K?gGr*poc!Hhz@`Ii1$MZE)f zV)8_;h4^yOXa(7eVsr9geX5=M?3s*Rl~|!IPaYzIl;9iRICruhvqLdEi|1p{AI>k2 zgO}-RA_UiKV;sw^(aNXd>9KkmPv>VvvZ-`^*tvNJl{$W@P&Jf~F4Na6+8zz7j!?Uv z=O^1k16r0auUtT$qdJP6;`>0<|eX3y?Qt$h!d>@0YSzmM}dcU`6V+QU5i z6mmVGhX&B>BsS-MD)|i8u+urv*qy&}JJycE+dNar%qdU0^A_VE<5l+aE;e#@(05aa zwZPUQ&I>Ao+37SqZK$W3j!(m_PR7uSjqT<9_P~f@uDgTRpNauR;gYpVU+5{jgKwl` zQ=cDZ|IEp664irs_UHGYWjIk92%5H>3JF?G%P2Qj9{w-Ctk{P=X4;&4p zbG6)-{g?BXGw3{_z=*|KP2Pf1G)(oaHV{nyuvcmSsuAQWJ*1Z2L_DlQ-;;6VX*x#Z zp(4X?-&!8G~Dh6jzIZ!?iyO0@v%kiI`apOHvnjv`G07TRlSJwc@~$ z`dGn=4cKKAzE{x3C>BiR!F9CR#8c})DR;_Kjp##JF!x$_vP4#6B>$f2{BVeo@MjE5UApWx_?Pqdhw?|PvjnmS8Q-GIcxPpu$R#vS zPOauND#4bSe|>2D9VQMnWW&6*KGyZAtMR>~$p!HxuSt#Gj@Z zv*k1+Tm|3XjwM;=Fab`TNWBt*neV)aW9|6jSk@`ShR2a18ALZ}sTJ@3x3Q&;C{zK9 z&WGQ@`1cgwy4-j(Rt)}#9UGAMG#Z>OZZ9R}TF9hY;m=Ubee`36c|l@FRyCc54aqum zigw=Adu?2~gFeY9S(zP^F*!LNCek_aCwT&*K!O0 zC*#*U@a;wNVmyehjE^>nvs2rmhU3V z?)-W)wU##!8EH~=t+VsbH#_JBgZp{n28i4ZtL#|XEMC4#<8q{`iT{=OTq=DW1l8Ob z)yM2<*j|IAhl+%Gb9^x$$y1l_S+Kf@I-4C1=iBS_bE;Oabw%DKX60!|bD9NwFwb4~ z)53!|Gn!Zbz!q=f#ld=ilWhk>sW?1-gMC*mO!xUcoX@(0(m41!HqF9?^<@0fwRvv4 zgw=;S))8uZ<6l-gR)b;Q2F~#LXI5y>#>rW>`)&E*Bxx#It!C}(NOJ^Cw_rl%&I8Cl zSie=ZHeDZGAyySK{Y00e`DbQ>C&BJ`eGR~Z?euzAoJpO0qpwyXjBk~AlgDK)XC7w+79!SHZ0=lM)Wtm~uSJ!zh&`|tD2 zS~R(YuG!(ah!6gVds!Edy!6TFI!pApO8?jUCRGRCbM!dhKPcXB)b}ykzK=!E!>GJF zORa#cw4Gy4pPX`ap^-ed-}^o_D^KUi@AC7KB0^Cdj8D(ti?{nLImd5?{}(v0i}q`9 zqb8hRDvS`Rf^iuu><8;x&1kdVvl3hF!wYKZwJVvo@SWA#9_+hAa4t2@FDlGOda&-1 z`fg9MgGI>O^;BIuhw{}6%mQ2U7^oDm^R*6 zaTp#n6{GJZ({aw-#-FO-$mOEKc=9KY!cFj-&nvEjRkHJDKAcPyE9o;+PY<%`M`Hc_ zf(33QL9)B`TX!{4b9)BYgJa-sEvEr^gKvq z_jYfiTSnKTNL152{ER{y$^G^N?X&xBDm(50r)#s4vpjjl3W^f3hOuavF3Y_4$x$pw%~B^{&G4NWHsnC!0!+IrcUJ;%x{HFfBLm}uhgUIA*`8aRZaBpxUmvh9qa{eNT}zGt;Nxlby>eNOzt%x>(y6V$d%} zoMgk929JR()0Ach^6QCON(RhEEV$0UC5>p~w9%KJ);3-Z)OU7km2uv}!YD8TX4%28 zKy-KkJGx@wYnb^bKdiu0f5VnOW~6^=XC1aALww#?zi$-0mX@ia{4LJ)XYohq_`UYZ z@rDC1=5(zlGujmx^(BAJ+Ruq>mD+Tf;T{OF|Xk6rvQ z9`TYWcoxYUkuq7ZKZac$(W8vnd@3hiuBB6)_mB~=Ux8oOk}#Q+2Dzp;ZSqX4>XMB>9~)rcOceQ<)$bx1mxEgppJNxVGohaQFK^>92GH`e1z-c#gGuEZ6W7iY)nLJ~;FTDz${h>6Pwja?v`O;2>MqR#_UE-(n*HW%;#a3Ndev8Oe5{vE< zy56W+|>Q<Ngr)YGY=eH#ag` zbmpBk{L~kXJFrm=V@`4k+^L=1)uzIFYH=o0!N+vW+Vx~}OkVq*K4(WrHEkv*S5r1? z!6FN^ot+hj6|6p(Rr>O|bJ*)neVl?b#az{a7bX{7aw@ziLKW9@*3Wchv5c-w^^%nv zO)w)>*=LA6uVCYO&Pm?M`aH5aHXbR0B#XeW@{Y&BUkMAy-S*tj3rCwNSu^34C%}~ok#iWYoI>i_wNhj*i6JDgPyZn+;k z$noTfon&_1Mf-o!?eRiHs}HjhICee>-!!usPNQ|M%*wF6AU2bK45MN0Lb9&rAkrrn z$t&2@-l(7aRSnp!80*%@*kne@&wi|u{jqtImR#N0zn{!JV?^@Ihc7p}{>zcIVs9mX zU8JogWUi&h9nKnHM({HI7r5>v_P9bsPeqk$vF8jP-HEpkgU3F`iWi+#n&ew)wig{z ztMe2-ybpYb!RBUj_-7sOsfUkPDyy%PpZvN4?}k8VPg10M`RiCYRR0~|nm72_lUv1{ zsU>X2lCrX~;3~RzBExx5ei%dZ=DG%{la2f^l2n4mnWRn?hi}Mnhq1ecK2p_dAL!TO zv9)mFAQs5Zz|O3jccqW8`R*)#9Z7SdtL_y^FT>DBwA+sLUV&WR({?4*?|69|i^p@a zk3YMO&Vkg4EZhQ;$H&wnon-W!shvC7?qz(+bBgM;Dua(Dc|e}mWwp`= zKHcZr?0WeZFFB9@?FXOAVp==Wz2)#a@@fC1riB_7yq7>}M!zWqHZ*=}2MxxhA z_5(Dts{9(BnjP%PX0Vt~EfWD|(l)EYE8zU~bnZmA%Ub65hn5uV>o_8Qnf2u{~w$4Dr9W zv$dBR7*~p-CCw&>LptwymWeI1pf-gp2lMacY&E(Ncm5z*Mx29bl~w%p3p2}C^q+h1 z3$*hPTMg98m)LqL?^ua>sr0;pKBALJeH!#Ej$XR>wPxo*I<#*oQ+_r0KblVhpOo_FbGXmdR+Pvv!edC+5Q z*2-VmlYg5}eYLsJcm1?_VZnMQVq9tgeD1HiXflP~*Fx|~pHop_4PNhE7*&SrD^-ua z)?QgrCC{~olQwlh@jCMJrB>eLh=3^M0 zj33QJ}>%ngN?sK+aR3SG4^wEyiPG?yaip@pn3={@x0oZ{QtSdzfk; zE1h{DYfNX$z8I4Hyj$qJNsqH3np~`>if-y5ku$lSHU9t zdUmgYzKJB+NAexsPowSWqT`Ds>*mUHa3R@#Bh^aMCo9sU;%9RHJx9jZ_-IoeGYisr zR$hZY{lR~4#K#x$m9L&ow`6p_ z2yVF-EzOHo;NAyfT=I)`gRzx`M# zd-ckSBZKLa)fCUrE^BMnI5)KwufyIWott{d2eDMDve#hAMcNrhx*}RBD@vvAd}nc_ z5f6NfK8@+z8M3M8{*pQ65ZpSR94o}DQ&}^+TC<|~L)Y~;s-F&%>>O&3Aw@)^W9fVk zd)CH-<6Zj-R%iBG9y&|J->eYM+Nr$B&o{YS*u8?|agt{h`Eb`J1ASKfj?=?g!gajvEL=;j)pxXeGG-itf06MWv7v{nGTTnAyqb{6*`@f!B#~#5 zXq5dlweTSus#eXSDQ~IQ~Fcbvya}( zIx^fh)r_Bc*VCCisb!W*U)fQyJ8RjVM!+M*%_ndnm^Lwc+*|z4JO7q2{e_?W>iCh) z=tlMu#+Fo=nL^$x$@?sz&xqCYedRcGuho8m1eF|m=ldgjCyPqRj_nC}q zvtgAS`I&pf7Eg)&opAj^zFm>8{DU1CZHoSPFXapN?ZZoZ(r=}n_wZdZHKwL*FfEF) z`-#^N;ObFYOO=4UbIiE5P#lSlCG=RD-}Pq2k$CYSj8@Tmtg&Sv%qxGK8h@RfxcjRcg*Mffu#|N=%Ro6U^aaH)wYIuB5 zX#Y@uZ@{Kk;aZKPVaWCTy)Kl?i!mc`CE5M2Cev<&Z^M@R3)kjJU2D<~byXX(EW)aHS$i9frFf;X<=(`|fT zl^yGlzbjUS@q@&qikjP^6m?Mb%zM*OmDax^;)6BY8L zFwrjOe+=8K34Wbyx&NN*tkfR5k?fZ`GZ_W1!kyXXw+q-Q)dI7!JMk$QfxFQ6VSlHF z`SE5lBWcos^jmmxYO!{9*8XgDzlhad%P(T+nYdY!)YprA@xyk|O-fOBkNoFa-}P`^L1FB9K17lS{t6+8n@5k zZ3~TY@r1kC?Y~-medQ&+4nER{Ebm@TfPIDJ&@2gVixz&00B84a5$CM*If-40alSqN z*S|?PT8v?K|GOt>_ViQS&z`z3=_diiD);jLUViolWgZjc|MGrwY+u0d>#y9- zR4V+Gj+4(tM@qbBDw9s$*$!}@D`M07r(|*PO13&-8m5PwK6g=km&Ewh> zzbWE#d7p|E{{1h3JC9I{_{j#`-PryQ!a4fCe=^(z&7`s|sol~Aymq&c$GT}b?ZyVd z{C}|C-S!>6%k@QkSKfE~q3ChwQ39sz4TvSl)(bAjBlH+VFNFvx=r|9q9R$`agge2M zDCJDk^Zo_&59EG_bLm=Ks5WxeHyKO$*#=zk6pT0%DYM0(1@5#IG&*t1Y)rpUut>ql z46;|D>NN(S9w>Yh=j+S8`wL>X3MqFR6o+#3d$`eXbRR2djyHrngxZgw^HV16lT8Vp zF&sQA;6HC*d9^_AY=)o3)!z^_-YZOB=W~h>J;~RXa`i6--8|k&+2ijG6KlEIdM^1R z7um=iwhFM@INpB*uRp!S*kKa=KlVzwq1f~P{`0@B6?^`F*{l42+bgz;wTiRAZr|8_ zWk28VZlS#I_AFSdL?H~ecjmv?Dw~92tvud6qhPaGt2Dcvz+zk2E!O(HV5^!e^F2## z{a;r5-!}XI(_(l1AGF#3(_;T`_F4hQvISf1_PqFA5fiK;Y?{&%6NJIX{hp3BlUclh ze>1(>$l}X!I$QOt;m>D|W@=Rw)2iS|5=Y#R_t~odg&}OMP@R{zMWCN@R9SAgCrjmX zCIAnaid4m>CtRIHtyy-lujxh)9+Jhghho_?hPxl#VAe3XKhjWlwoqP*7rpF^l-sL~ z$s_S_vY4EgjTQBGht|&JN5h@h&v)JV+G+lM0b4s3CZ0dhW&~-+)9Dj~QfUKrlJo3d zO1f2?s_0lIY1xEQk?(gg^rukYI=(rQAAin|n$RT+?HZX9?bPxDnEvHwscY9^UlU&X ztO-$;_f*zT%FSovTnBys29p-pI0_m`N}ZGyNsN`nF-f6Yj($mEm8H$IXkQ%KC7@SV ze?>{Z&|i;{ql1W1O&m$tr%b(ym_lUPbsos1g|ZN?E#4F*Wp0hK$ZNRC%8l%uGE+^V zxRwlkNqe9xFVn~6?3$EUS?;|6hE24T1!GIasL`;U=$u1&%?mKj&0R?mA~A6heq>?L zJ|fijrWYwC(1S%+6fE;69ZQOZb@{_ugKRAfdf7QE4AxI-agFGfVp2WCh#%N}dSTGM zhGZS!zKTU(=F@|DY?hBy62rQSJw-6AocKFklq@GU*M-A1;#~ziC~Nw&&?F;|#!shz zwj2DZp95%f2wA>w^{rclad4}|LQlZ?O&m*sm%IjStj8?vN;0VF5L-xweX;a+mhR4q z$C$eI7GLjnT{986B;Qy@=1hDRLGW>Bo@$D5zgEhNPFu9}o_;%u^RGj(zH3f{XG+;6 z-Qv3@EC;gmHoZ;M`_*o22eD(8`~1sqbj`M-S7EkiBnmj z5mfuJ!W{O!Loc~@2z0-4W*+9<%g*oM;&0mj4xdxB@l7bTG-)m3`d(1J8)n7v{2-ct z=BjM6{hI7qR8n4_OP$w_55H|RIE9qmpz|))AL`ua*>DJ5hiLI>EhY`mGJL7x+v4P( z0H>9#dI!s&Y=moIe7P1=uW%ck0s$Agwl;QV!EQgStbjXtwJ^dNDVcbl*uRf!vblb$ zVmW!Th<$C|2TgS(Dt|ZQ=gt&v9CD&f5G28U>eEJdB`#Xt8;TX z+_y)tL2Fj~#Pt93`59Y(MUJFU zc?Ro$qh~f%4c23Ey^s2D8+0Md{aAV#oiB9$YxGL$j65L7 zOWk(#&(i41?9ve0wav&X@WO-ia4=p^A!$ix&tZkUFdxAeQvTK%rFX7wiWvE_;=pnc3DI<~5 zAVuJ^o%{+%hKqq!U~{#oazDwcvho*D&%%hdY*K_&83X%~;Vqig_Sa*4r!y&}0T8RQ zMJfq7;xQ>!-Phm4v8}ggUEh^S+0ot6q(A!HHC5=87(bRK#aKM$x5heBhL=^Ndlp+4 z5jT`U&8imM=r-lTDZO{IcNKSjCRWMoQ&`GJ!sw!|$(UQt{H3#N zS7BOWVV3NzA!#4%T_dvPRx~61YJAG(os91(OS;Rf<8bXBCJs;3b{X6_2ilj@@gk^X zA??S9UA~)*>?L^KujGH4)X$5eZ}ZkZM(?v7*(D-uVae@zDW5pcbty=alGDTJe6cvShKxh$^*9#If%hC3zGIX= z&%c{7a2Sux7~0vr)2HNXCfe^TXmdZVCJOe4W)(g5BgZFjS*6G6?!3C-(EZL&LDNC} zZV7&TAtLUF?Z@NhcI{>P@+jKw5`%YQ!Z)mTtMe|$nvU)rTZ=YL^jaBGW#~N*4q2T1 z3*3*z>Q+$8OYxSZOIhzM54}$dNoSWbS4ZIET9ULP-BK~@1n6cF)&peBf|!&PzEn@Q zi#A&^{&8o&MVBRJBr8PuMeM$ZIZ9sMCo#-LnD88KKc>YL^SsU(pE`3O*>}%0#*$$M zb`HnMPe^`*Se+spN$59)mRDoYTz%y=>>hgi2ZpWvwVu~pBT6su`Bm-r^yw^ixmDYL z8FMy5^Jx6K#a-@2P|HS{jH9=+=1w~F=jmBoxd4NDK;ukyNYT)2?f=a;UmKF$`Z{Bd-V3Pj~ zy$P^L3Dqn$9_`v6*|H~=FLBlf;z1q`ufwTxMTkXO`VxQpz%Zqt7Sbw-rUtTPHgNYX zV04X7Nl1FLXq&r`EUi2i13%+YgSC@aHF-gt0t<6Rt0W!B%ZBD+PR6v_?3B$BO<`Hl zZ16@gBrjfqR5lx3%8T>*_ftocR(Ux6leRaB#Dv1tPi~=&hLS* zBWRZ=KucWx6={y==UKkk0Lw>uMW*(qC}ENor*zIaH%bPvhX6ykGf-1J-ugq%QD0Cn}WUFAwEg#$76c7 zxnyDHX5-*w8ZBnUR(c!(wUn`{uI-e!Il16j&y%4D{g$xa2~fD)Ihny_A#igtEWxi2 z^w`W5NmY}g%2{kQ4Kq$Hpx%-Ni;0ahNLHB~(KHFeO0s&Ai+!l|u%L$M6{}P?hG*;G zYdAaA_bG2sf}QiiGRvd~`5DC@-qON!mMcoXESfBeGr331bAh*bRFVu&(P|a#XDm&j zliYD1ggqJ4?;*_(+I&F!Ss3|@cD^mNaU7hoL~<&hdRZ&yx_U?<)@PgQ`EV|)?-BG! zS&AlP-YO1dmX_kRtF#^@4j@fY{TBzAt-hzXgQ1^0_P)-XjZ=pjCk`d`MAs~2g&XwH*oeNrmP?B;S@gJ8 zRIKE?QLOtRJ(g*_$PjoUytYVWCH~5(pH~aQl2yVcLBScpSbD8<+rsjnf>-Phlk4=cAMa}msSDZTE+bFUMh?KT>F|8py!06|tPyLA(>v*E z9yIg#o7`pfI-35~*s?0=v$$mUjQmocclrGSpSMF}E8Bdfhb%I(Cz*2el1FgS^WKEQ*NW=U#?gIVAOQKOo(Qm`)1o~pun1gTQo<`@h~ zBAb+jen+b}@wCCd`5rSb)zXd7N*U}rdP>QJQn>O7xxS)%u)hsrZ(#q0;?#U{Wo|V@ z#CZsM!&oh^t-gTF4YW9$R%_^(Ehg`h_6{-QERp0IHmi>Bd9v6WgEE4@t%Zm9(rm3A z4!6ppLrHen0@cm9^*!#tjZ?c=e-^7`cGFmVTaO2a^R7`m`3cc5Y521JaXL1f?(EzA zTgQ3Fu+sjWUxk7Bq{mopq8pxZ<>w8lHp(D(g&lUX^;=%&RT)^UyyEZJ}bL2|vw zwu+8(MVCuhI$Qj9@VxVk_~&6uHW?0aMi$InP1iM;yv>+3Q+6MwH^wowG-#T&;tA2!kS)$el_HD^Hpx{*> z^0XVs{ifD?ixUTGX(UvWiY#f zQlH0LJMiKpGkZ$(&*(YMr>9xDDa*~_5l^v4k|NbG#>~`5Yx?g;@(HwAU>=@LP?wQE znp_35OUPVP45)_dbDTR_jOxdykAU1ia6ONgtS9k4Jn-_u`JZd^5;Dxzeilz;(Ra$4 zwZXqnXtqC|Bo)`MY}1Oxlc;t+PdG=r*VFJre{E&+6ZHC)cy~1fGecMctFf@Ir=3{v zDG?||M>dM39gI(#+2;#d--7k0_`irF6JVDy=_Vt~&Cbn}!_3w51girbvam8`GY_Tn zAjmB!j0kmbV>L-TIaZGLnWJ5b8=1pAE5=meF(2vuI5H)TL<$SEFt#O4T5Sk+fcs~z zoC%qv7fH(Zr6k`yyUCKpq?p)=!{!L-$r+Mce z{Y?=6Mzhx&u04rvdysJtjBj6%B2N)B*Xsz`LohJ!15&ahPl&!`lR4V^%Q<(rax%1c z@wsAwmkGsLGPgPKT^cG^0E>`7h{Ym9T=e_(trZXT?K+`lG` z>7T4KmF4P_;0f}UhI{S{^87yAHoNnl+&S*lLkbfW)8aXpk>&d(TTL;dH^Ru%pqu-L zxApUmt5b|;0*2&XVTx<=oI8sgk_x3lLE=@;%y#(?$Z`c~hGW*f+A0IVGJH9U0;b^W z4jN{rp3U_sh_M!`xfAH-+-}&E)FEfX={Z(7kQ~EUXB=w>%c%wIyBkIJVe<}Jtc>3` zX<_&B_TnOY(mUmu+9kMA)s=U%dp57t<&RnPmMy)-AUQ!VZ?RQUEClV@#*9I@-;oB@ zA>5wF{6nH7m$(b-OYpE1v&ft zD8C*j@_d3jd$V;mRc7l}J-$5>%j2neXY~YapCMT{qfwU1E$7kmNRh<;DFbjkKh0LJ z{$lO!wj)fxg%e48! zsJD&ZQc-jwj9QS)Bo_jdgI z9uiC7y4EQ76P;_aMoMTT6;s{|mNSN*iue7^h(8pM7Ww@YS7zw#Ad>vy?6>rpXQbD< z_7^sIi{8~lf!1PLLzc+AGn>4#IKLtI)PsB_Lo%VFka_`USm`$Qdj5;2-Qq=xRw%9Jt zt%3F=e)bcoo6>!(Z%@(dc`Unw)+t?(?cw8{(cIbR6h0>j;+6V5K&yE}dViKkVc~46$QImhdDTW*e(9S$QJO%` zmH4m>_r4a}|AbjHcB*9bO9|`Dc9Pt755D#z1ipa(C$PK|nk`ygZ$!^C%@YezZU}Zf z%KAfHTMR}oh&$InB}*Rj%&rXmQz~#3I~{>9_lkVYoLhq=%Z;7eMg29bF^okkIP+T4 zwuI5mdN>Y#dm42|!6{`r*5KF*e$;_Pzc_NN);F?BN>`u7N=fIj)9*mu@w}M(6g~ce zSUZ~bnweQ8N*F<{oC2aNZeST z<3+t}QB4$_Ba+^z#YQxGfo_=(jfL7th%iW`Wp|(t0TlcK|!SrnQsllr0fs$ui1$)iH9D*8e7BXLw|}^5Kp& z!>}6oQin&E5;N-I46E~(rA~nvQ7OI@n$bvIT4y!?p}{n zH*51+oXb|VOW8Vi9ewm!gAbL4US(I*H(vaRi%&x)#c;0V7b~#25$>+x)1R{CPg=`H znTC#K&Eq3*TrL(q56g@2wsRp~w#J>j)%uvutBsXeQIixHSpyQ}%ZVV7HofpX==!XLN>G?l0cZXDPiN>aSH=>4HUn<9Hcok7c9gbXe=UJYCLn z=FF%z8Iv>j&ztqqbRWV)R{1`2mp$m3C+f9Hp2BZOINpn9<1o9d-^+R2-u`X+-=p)X z&Yyv++0>P?D0xEturoe|&rQ5!s{p626H!RbZSs^K^`xM5b((PI81L&?_b$FBl$8oek2ncBNtTe9l(&_4 zBea)i&qLT` zx)HdutFvWiQo;UB_0^a+7J=5qe4##HDnh;~&dv-yg$^X3V=Gq}NxRY|@63~6C>#9ufZHhFRuMr`dS(>)2h%ifHJgYn`{=zd zCe*`#`Fy`7j4MO6zV@@+eFg~!!TdMlY0CmCKfw2CHqAPTwa{B&Ogb0V?Vy(X$L8jK zXX8URJw8d#tP4rf@I71~c2s~?mSYzw_(fl+WJOsgIy7*kJl{KBWXn_J+@GB0+{c|; z#*x3BdmHOzD|FT-EX9brc>Wr#Qik~~{-5U*BXKP6z_MC2_bTDVL_QK^@?>l;vFmqS zxIr6v0@8?t*YUjy;^~2KIoBwCtUIf>`PenCERB&BV6)sjZ6(V-t;O71CS77ycBQme zQrusFF=rT0U&4;V$zBEGhia=C3!UoQkMXe{`IDGv5}B5}GFuT+l%l>!`vr?7ac*(g z)gn_y;t$w3rQOf>eKYb`^K-lVk;L{HbY9Nt4`E?%(w?Kgh2+Zn+*>huy*PBKzmLc5 z+erO0sVlf+&2>ZAC511t9U$|KJoz4_rw+bvOg>L|jp+9qzb?YTI!3++aC4mT=>@(# zSU)Krey7<@E79s6ao`_uFr&q_j*QmBty=Ac|NZc5kn`T*Q8)0eZX!8R&yyRDBG-Qe7ioyS1sAM>cEo%@uik=c70y4;0brPyE^o_C}D zqsG7#yjx4EB=*Ss`~sRKG0W+Nk@qkBDJM1#fb~Tzmraws`BqZqhC`W^XRXU37$v32 zBt2$M-nDS&o%_T~c+F6D9ZlkjT2Hy5Tl3#hB;%Q*_5x&!JNx@$e(;7Ym}1CY^C$w z;kS8S+=W+uh%-HD{gYOo!;>BG8sIl;axW<8yx18BVazzvC1t|?aP0?)q}cwDCP~0h z8&`782x#06&jZ;dB?NQl+Q5ifjRo3a)jc>-o>qAhUq-v9>320I<}Jl)Qs2WyTVeDw zJqN(?M4Y_>Qu9cApV{cMM#2BkJmp0y(d!u3Od!u%OwY!gVtPuF`ZGl0_p!SjKds}8 zq}|%v&w3F%+pDt% z>cUD%3H6cqwU5@bda`R_#5tC{@6l{24KKp}zItnImhe0I@1uKJ66Q@v5(Tbx?hgef zkAl-fqDpI?7EDj#eIpAq+5<6d6hwDuG4J}4pm8THa<90yFv2DY$=h%^o_=|nl-YHE ztk{>7yI853V|l)l^fQ?$G!p-}ivB5HdVsbLfKW=>ZiT@|{+=WnzKHV`@c33bKA_!F zuHWeDxfogtYhst9v~V6)WmcMPbgf{Ww>xj~_6xL`HB=S(%E6Ab5o22G<9l)MYVq=3 zSKREHydA#?wl#R~;ka9b$3CIG@ubRj(PGZYo0~?is)SWp^VY=-_B<`8Y-Uo8k1x!i zGCxeJzeM7iv9!ort`24e*U>T09+T{4KQa#&oj!(QW(*7IHC4}7!709f0$c~f zZZ&kT#Oz+Ul=Q|&(=X3tFU9SwOs|g%U0rbrUDuHJHJV*1HlIhYY}idyZGt12b-keP z)9HCT2?j!_hiKZ3UDvVsSP|(H|NcYzBoV9Q>LFUreBl^c^$-jG!nloaJcG@uXzOs1 z^SEp}~vzTcbev&Og-yG`YtNkWqZ-dWvosj~)=s0B-m zW|PMqo55$Fh2U2OiEgA-MSOeTk;jaBAJF|}Ua*0U-+^aK66JY7QJQ4r`3y6z^SLAD zrbzn)=jQG08O|t1r_9TS^5g?uo8kkBP?xY!FRf<{(={xbL=@R>Tb!4CP3Dxg_*>s~ zwUZKbvF7c1x{((jY0flR&)xV~c1$hs?LZo}#>JyuKMt=?V4n%3&0W$O{}yG9eX#IH ze98Ki%k%HWr(9GnzYpUna#9@N_`TZUu1bRgp7| zv-Np)4ee)5{KFz#(pzT_Qc?U}Zv5XRZftkv&AjF>ets%j?I8l(X3S{Gf}famd?%(2 zaK;*OWG6{(*UNU;Ocy_oWuf}|$!hMTqIry6Zo%H3B+NZtN>Po4W-r)Ih1o)qog;>I z7kAEq$_02?4HGYg)pRXgA3XP>%`$kc zqTL<*cQgBC&Uly>I+5Uf$L=A;-sGz0+r4P|jQ)mW%1qZxVbwR`nzbblldn3Cr4&ID z_MB{{F~IrXihZv+-UT89S$}}m^VD+!IjWE*^TS!ZD(T$MGe`Xscl#Qn%7_ycSoKpQ z#@mJ1Fq{U*^7(-zsU+&f=eOu}Hg-KIhNir7%Cpzk#tpa;49EHHNT;^+JXAYL;FR*x zS3+=^5u_@7uN3pwl4A@Mhm!OHV^dO;-cH{L{EsF37_@Os>W9~n7=K7 zWTNaDEczZF9fUpaL2nua7Q14TfA3+Nlf;VcuAk3(#Yr?kue;bbcdxJU)Ia$BMI?X3 z46K^*wzLRP!!>`h#eXn)h%1g^%lv%iK~KM3cx(_yv# z6YHwNXAX{a^C@>2uV7x@zNa`+4^sSQr1=0Q3rOBtKV94p%{E8bN$P=YToy(r;mn;{ z?E}R!+F9n`omlyu?~)wl863IJcgyIMdyRTVsVY#OL%u%>Gqoh9%v-IUX6{LL^(t=V zNlf;#Eh52N?E5$C<=sQx>d)1~Q_g(>Qc0=WfsZDQW$s3v({DWpEMtN0B)thUYSTaK zIWBXo6drv|mv68>s3BCBRfEQhv4o|Sbe~9b09Vf z23;Iogi&vcSgo)ltDQ#qZVA5{i79=dSsbb>>B{PJK7_X6=iMxMyZ&;=G}KvFktIdNrsMKyM!CGT?Jt7Hb{E6uE_h^i z)}JPS!0|lCd*W=~w!8`P?99u3<5FCEn@lf@WQY3pSbmUIJQG=@3Jp>Q^&34s&X>Qy zk(&JVbDS?>RD2vhNwD7vdnbyGRq!;=G*h6oC@E6N`va^mhgVruvy(+0q}@LvLlb&r zpG}H3<;i#=!gXTQ-n?!pwssc<<8c>@76Vv4$+4ftvo?N@_HTDS^fb)!wD3LGt;CY` zSdpZG*R#NbMxQy_G{h;0|K@Fo6T$p6Opx1?y0M60)BPSOvqghyRC%_e7(C}l0_ z653|fRmP^y#>x21mO_kbtJOWBa}NtAp?TKS=LyioB>L6$Gk8{ZtbMGn(a=ayzA@z7 zjv;6IZn^7vVoY;d?ZE03l-fk1B<|QIHe{sSi_W*W@)Qy3WLWGfjEz}On73YI{no*R z&*6A2oU^`f2sS+=R-NOjq|x4>|Hs(-Ytp90!Fqqs9N`7_`x<(8xV{gqcBg}Vkae=- zVv4^HhtRPk9!|>BG5j|3kKIo-@~&}=2;5Xm&1!_7aDS;d(H`?t0P+LzY!z(s3_g2a z#<6N$T>hJtzr*Ot1xZp^?I%}%hMzr*%Sql7A0Gslx#V0zsz0@|KhDjBz&QB5NY~Hw znYmUq7__DtlSsFSO!`aW+J+LIy)U4L`MeSBlCQMMnsk{ouSvxf1MPmGHXLwyy` zZ_LAkPCwVCKx7jPzDs{$UiK#B8C;&1Y{i~)p>qgNXwQRlchZpmJp-YVcyyuYQo%XT z=>K^(t;74qvCHGQ^@vt>nLpf0(xYJU2w$0@g`QeUa;MpRZwYNab@W0AXD`K3e5)DG z&gY4x$(px!;ZjP(uj5}!vE?o{FGBz9UHl9ZRr&JoG;bmjWX}JM9_o`IcRYFHwF;_J zd~QgK-FKOr@vlAHvzxUYY!88JW4Il{1G4@q{7>5N6O5f_h@}(!PK0j?{bYxC_TAUh z#}YbZwfZ$8O^QnV!*ba}u$b@ka8@Habr3(^fkSs!^lJ6D5xu)}x?y})z-7E}D`;PvfOY8dZ$lIz@Sg9^Q%3kn}Sha=+^umvp1!=R+|56+u z!zZfiV+#wP!~2phFi()z!E+ydEfjI8?V1Fo@Jq4lzp@JI`4!!Xf1C;^LDQq&&-{A3B8YGiLxa90z>DrcPFT%6!m9xx&ZdG zVV&Kfc>|dQ%TplMf}LK`OEVGlZnCst#i`EU0gWUky39zQ_ns*t{0u#kL@;YxbB~*{ z!~e4iFeR#v(`r&qPbF1T-)CKL_OcE#lAnls?cvgz_IXa&Qd?{N-bTvI6MNF;8F3-W z6ElMvL(ks$kdm=oX_T2?C0BmVGH=8327P7SOu6AF`1tc|F-i~pN%NNW zAAoIUN1a))l(@8*b)IL_>_fUq1nt3(T8VFYp0N+A}W&!t^i8ms}WQb&k%1Nx7{q)(XJQMy&yj_P(@%w9ykF%UNM6VZ!IP2NyY~HsY z7R)ubC`tb(o&9guZ^n^oSeomu6Y0aQ?c}V_PrLc{6juJN;5!#PGSwJ)F;oWQ#!hYB z57RdsKMM+F=v9`SM~jh9@{2DZQIDSUM3j+6&SS~*HVL+4TJCqwW%V!E;sY`JIsLwk z?d_r4m`3ycj_~PTmPvMlmOLsFey!K64_amfEsq_wF?NzR7mET1VZ%V4vx3yWiOg4t zm|2&3E7m0e+_xmn%9iYVOFfkK2AVJEdogs|VM6vJoW}2$v)!IBztt#qmO1Via^24-ZlYIj z?I+!3C5*Y)Raq<5pHClX?sbuQz_mu!>-6ygnKP?jex3yrdFCFv-S zz|Q+zU7AHR^8Y1%9s;qPe=J7SCv}Pxr!2w}`lo1P3cJVXU2i`;n&n>P;AFmeqm{S4i? zdbwH5xLFMQjs|au30Zk`I=y$`>03N$4n1yjU3T&=rgQd)EWw5+w0?|F)9CjgOH5|T zv-DiT6^lg8$<7_+?`@>IgpxQnHYVyzp@8=j1i#;TR#ZD;iBOEJSWc|PQctsuK!Tv$nNcKG+)W0KWlHh zn3c4(%|(hcaXDqAtLra2!fz)>3MuUcp+|8zsqflo?=~1##^P&PHoVS0&NImW288;X zft2vvF8OG_VV5SN)?1E`=k@*hROS>1koYCM$~{~=BYN(0I%zYdIlp1a!P=Zof~{KL z42dIHHTNZ15AY0&_a^lUmOO(0_@_A_(T!6f|)V&TzSBGiGT%01%}l3d{0mBxl% zSaTYk+T+ZR5N^(ks<`SV8fLH3JgD?7#N5yQ{UxpsEcCKoKh0d72y-~EzeVeB(Dy!L z&_J3WAR>GN#nVYQ)wte=d>@FqnO7vE#R`bL$KH9{pG27#XuTr|4rliiJnqb&Izgv0 zd^*y8HPo)v&w1MW+>GlQOdSo=muQpB3l(|oAvjbKuReA4jlAi4T4&eHd>ZB*Vs;D% zvAh$j$llq3&`IR^hE>b*kGzW*WG-_KZ&^==0T^=ynU_N6b+Y`e=Of9Nx%5k7c1G)= z_>pz@d7G7WqpfjrxxP1IXZ96c>5R;O&xT5pGJguoIs7EUnkqc71ZDFlNTy zJxM(eU;lt<5*shlZdd2O;AlTG%z)oa(SI7r@A7R+fBi1L=6zZ@?A}C=O?t~dugo-V zHNrp1AJ_VvRmESsqC9_^!~Z@vcYH-JcWdtl*i^!-4@kFvA%-+0{fUld9nFvM%WAix z5N@pPynnoajEzZ|5=+PMy7?HCHKOf#z`5lsvEm9c=6-Ch_OqYtBrOcl zYW7Q{Oy-{ju9i3EcGp5JJpGW)lg+??hFqTKz73tu_ZPn%$mb(|OyNtch zk#`lHU-NxYsGmWLJP%&Z9$k6SBzF9r*Zo_RO$qoEnVHL?!)Z5{_IX~Jf&gE$$N=Nr zQgP)=v2lqQ^c!4Hr{ykQxFs2bauY^NG-aiK;vJR;ugc`ErE&9!x z!B+mg1(UODFk@cg$Nr8cRenb5?$CLY#7V}QQpVYPnhaXm-5+MZ#>TIRZD-=}5N&kV z!$vqhE)s0yNpJY>FrI&**qytyE!s)RyNOyX#S8lIirkat$x!mt^<~k|9D9#{^w8Gl z5II3x8}u?>Z?B6lnKR5LbuDPW1dpTWP})qYlzt`{;ZDYe16cI-!k9G6S^sFgD{o8D zjWh7&H+Ef2gQRTzLA=S{pAxKio+!CMA9Y-ROCgff*Y*glKCAcF;aS0zc{;KWoOY40 zlA{lbXQN1eI@x{_9a27TJ_)}qc;qSkEh}YO!|xa^wRP?h_;eX7UC8$5i&WX++=wPG zXz?lh+=)MV9`hqk55%9l&HSJk3DZ-tv?mtK)Nk@>JvD$ zcV0_=G?bPpwvaWGS@nCR-tK~1o_dVNjxPF3Le(-DP?O}v==HcG1GUwk46B{99Y0#J z%$HiJ!Vlij>%ExpBAGkUXO%IiCk;O}a-Qz`T{tllQu86VO@!H>)%W+T^>gh`*G>|( z=b7PQq|K2c}Sdrvlp`CmF)j2udmLE&$4xr#ctByDZDs$PboQ) zsP`-E^JeBHEZ@eCr?K<-{(6?J2V>XXxbv4Ydtg|(R-KLLLm)7g)Gf7hE3mO zq2;bp@&b01FkwE-vyV4#%D*G`K4g8uS=p!f5%2DfJ5|V$_rxE^UPk?1!p z?}NW*JF5?^ZiCemj%6oiGDVFj#IK;c=xdy-crxk-|`zXd#|>N6{g_vIrGLh%E~ z-@}vc{IxGxZY9?Yzas< z*ZLIFCTq%7eCHH1lk3FuJDi!Vu{vI!tK1D4W}}CZ^NbS@P5jO zk^BP>(P=3ZQowdJ{#P%=<)3h3r{4wQOIB1r!bb+^buJy&k!@d=sQ|65G`-K?m3ZSc z+JB{wnQU}359mv)_9D%D{z{IU`(R#+E^j#_C5i6U>-}(TDw1Wr<_YXph20v#c@1or zW7Y3sYl;as!kJU_xSSO}5cyMZv@u<)vBDLkmO^OXqWVaf)nyX|&1Ck0n}4CYh{}Zd>5q_u~I5wpt6z z7qGCZqt_U1+QM^xV?|05%z{8N6~Qs4wB6=L7%c>P2L_PzuqRFHsDqASXXloGClGJsM`6M(u3UvjW9j~d^Je(GfDP|(=6F`V1xAPSw3J*ej=3+8Ax}o1GNK-6Y&w=* zd+?eY=-GvR^OiCB`j%lqvRY4b>~5``S9s=?a>pO}lOq&GSDWHOw^`Y9Z9 zl6lNieB??RT`tyK1j%Cflo@ReI1Yh+vXxxzuHhfv{ERtJ4O%VNS1*j|g2xN-=P@4M zl*eXOb?&ow8835n-t=7s`SrZAs`IjLd=$I%V)@<0^me%7SJx&JcxRs3!icffPjdUr zA%6u{OOe)wEMC&L)s0b;aHA5*|ABw@0VW5{T1YBMjqjg8c_~@TvGp4E+RJx4{I!f; zoALE~9z28vj%KeVJmqh4ckrnStp0}maxCfz=Sre)3Q?|wTob%cN%4!s|2*4YA=W;r zt+#o4dFO5+OZG^-0k5l#70HHiGw$WR{{KbOoq%mMb^im8q9PKal7whNrZi9y8Y!Ak zAtEXw6qP6$dV7-!p;D13REEYkk+Cuqjb^Dxsc0|{*Z=dq`~9!3r{{j|bI#steb;xb zz0Wy&uT>sGMIljJ97ra$6QLMS$}^lq_alvoHE2^AZbfnZH{Osn1P5T-U~zp745r~| zFBZ%F)=R!CMy|>{;0j~#ChX1H{JY5WJMVcKf+?tWY~h|DcYmjd5ld+q{j&=&*?;o1 z^9u0iV=-?QOYVOH}Xq7 zdK)BeW7qRpeP336%FiW`IhG7BYyVi-f5tD4!GY|I{?wV3MBQXFjpt3p>o9&C@2Kba z4AC=BqgO-xVbLJFF8k>1e}%K>;Qn<+h0f0Tn`N^9wjoWjQmU9S^Hcx+hWi!R^)2c5 zta?t~thvVjtl3^xkmP$Fw#m1xNwA+$V!dmJh)%UIr=vdpr_Yr*_%9n(a>cj&9k_c1;QbKO@52Lg(!K z{(}wHYiBUl{VYz*H+#tw(9Q6ES^p_-`WE|V{<@w8uGHq?dRQ*<-sYUHdi=~xU=F?N z=;dEmWmk4qFrV-GZQ}esu)LcdO(B-Keb%XDJk2q zERel5uWEUWHjl&2MDw~Zz8N-o(=);OSMac`@Ed}K4e56av_90vPUqHxP%T_cj=W?& zxEzZ|neU}AaeLn12g;*Ze;%*=l6U?CrSguhBK@h5uIIYE@oPnfH(0NX&w2B9ARAAG zNp0U}edln@-w2%)EI9?Q4lcy*Hd^R|ou6Y{E4<0N$%*W>T5ri2a}upTW$gvzOLR>} zyAxPq5bulMBwO23u_w>VKlS$*pT5_7GK|g9Yb_ke`{wM->VSp+GZtl6V0OJ-s?S(& z2d^9`dQ`!plrNen_S_2Vo%(*vn3Bw}GhlPEQQ~{;w#BBo_%#?0-(#JJ9IeOGlliDD z|5#!6S6+|Fu$po{2Njr*b=vRnw5%b#iJ#Ag!7x4K*+4gE)PVf&uKvw`)SASrjTYHxoZ$hT@~`FWo*OPfTJyD?^_Ywl!?_IOv)-zkCjG8R?Fmots2 z*J9Z3@cDyBWiR(K|L8w^=OW)vQ199PVI?i)!d*O_eux}#{E#{duv^G+tc!qC= zVVkk{MRDyVHaQ4F$(oex$bXA9m$6iP@%>Wsq^#i0*!?>@rnJxc!r0UTHrc(_l-}ov zjjc&?1{OU^&mTpkdyV}s=w7~0j{u=Jnb98H}g;ngZ7fmbb@d#Fa5C%8; zv{m$NVt##`-*dE+)qTkaTgG)S;7euJE#aFysk)Ho*Ji<#B5S1W%-A-u_yzD90i9IJ*kwWj}Fm5;dS`S9$ zw%>t8Tf~?0TFTyo?5)nKgMRuhh9Rem-j%h{P7CMwt|Ii4F>9uHT9O{gSf7loDNTD3 z6wAB%Au&2Txr^&F8LRdcEn90l5}pFlWb(~wmaG$5j)}=AnKu!)`czdb*|B{oJeRWn z-UXKA&Db;KY=~7)&?`muUL{d7V=d+hce7zPZPur6*0^NfMY4oWf?j{;{VM8YO?D#V z`y?$%vauvMR(#EBk!JXl75>>xFjK70TmS25y^E}QbGwgzj`vg1b;+N3jU&mEnK{?R z^e9E{;n2tvx#XwaPJ)!7&sx^(_a36}M1jZfD|@Q$g+R)9zASpSDMb9a{*P9fpTEjZ zt;m)vw4eKw8tyolaUVN5C!GMK(PjvkSjfX8~V>EmmS7q%r08| zBA(^R$h#1%&j)TXPCX#bbSKGV*d^XK!mwmn{nn^-IX=GSC%LDq(LHZ4Cu?_$Ggm?5 zMN$uh)^Q|VA$E=;WuzHJ!tBP#p7ap~YwUv?pShpE5o_C$ZZfRaia)<;@d8@ZCG$5d z^sghwYHK03e#_qPu*5(}zGLLcnzS)^e3cmT4_#)l{83`v8~*Cbud*kvA4Jc`(mPo; z(co+2*8}wZpT3qE(I@ie#svnwLFcZn?Ml}Dq4hN+v+DIY_?9=O4W_{gy?p@NQ_1wO z@gO-#PZf7L?;e?cO7B0At_Rs1TAoeyJf=3nDTvK^CN zWXZ8)zLESFU{mf9AER6HeqYQk9a+9Md?$z;`@tc*^gh>ro_7b;#dMsgm!<_i9fCuR zarG?Y!g;X2-PwPW3%$JJ>! zaI$V}qP}j$g|=+=fh#8B$qh!DwdCkdx~<0bP5z&%)wWt42aTJ^bepT2y7mO0+WVLH znIkZ1xOP+Yw?0d!=1lg!<+*89hh&9LvbFQ*ebcnQtxW0zxwl=%%cv# zg&D5;3mb=PJ!?e5a&xS(s%XMOncZ_JM zU9gaaT9S55;r=-J07_{i^PC?=vv+x9e@B0T*~Lb-tllrr1}BOR;d?4Xq}b>g+WVV_ z|3*0pAX#!Cw}4+VA-qDiLyg%LSv33Y za)*(%1AQRzH|Fjj?@;KEhDB<^oFP7@!2O3%jxW{}M{?DCHvbmydx)@q8sq1f^`xfD zGmbVjw)TYZHQMYeru5}!%Q1V3XptQkJNz|_q-W_XMYvzb>3hVLdj4O}qgS!e+m2Lb z|I`)eScna!$(!7WQ*kxfYhNT&?)1ltU1P|ZETKEePzg)+?2ftHXr1*XeOM{k>$3mt z1U@>zb+^GL*-lS&MRJp7^W5{+9G>1Cc#_wNYH5c2j*J5ctyGxAzibu87M<4S3kKdjL^M0b(1kz3**+P=H z7W+>i?^KwMbM_sMt>u9)`zF~p+vCk*QS4!ooXVofr9KWBz6$S39l5-z8OByiS*AT7 ze%{gSw;yk`%KUCK?*45=f5COXv3}m^bP&l-p#AAa$1?QVCPHK`{i-Wd!}4J48>O8O z*dTB0lQG~~608tqwu)g}v@nmyd}TaNu7bPyQ`Q4if#&o6m0`oCNFbWX`-R8PWPcCA)!-7Qa)yr=-4rA$hK@57Vv1Y;YLct&0+!9Ly|$2jn#NfV-52KvY5#Usz0;l7 zDq0MH-D9-+lXM4>`ALzeHTnO;o=u#e=YW5U8q7F}!;U0-o^DT? zYn6n+3mA6`ON?Rl*XaMS-&LSelip{s`_FJr9e|UJh~4+ zD%C;(`@1hEoMjJXIQklz=P%NKZg&DBi~(G&sxh5FtD$0 zj)U}2Oj*x*sRmHRNVP=#eos_61UFJ^asg>;7i3KV_9Iv&_x|g};X8f5UO)3;&=m{U zyCTv1Up@Vx-Aynl3)%9p&0XNW^go|PuZLmk?GJ-zEN(A3doHdg^Ty+J zO!n*n`2Q~dAHz1uVp|pxWmtP8Y?C7*by<4Ty%&}zlVMRYw}O%DRGQ?uPdP^?@W98} z?^g(1;H*3`xvkK9cOyquBqVR$!&+*}=MJPxC0CTyRz*E!E%XGK=Xui2&Pa8WefdG& z|9?)i(OOOog&z7iNc5Wlfiw7L-X`QN@hMtr#^-vl@}>eClF1<%96y3->X#KEWieK6 zByt@}wqfKy6Oxa>C#!M}hQKmCr2bqP3_hLosc`cRbW*o4*-)}xGb_=*F3jU9LMPd( zQ+)jyEln`Cr_N?S(J;9Zzaw1>SnSe!-f_lS)3K)g#o`RxX5O=H>sv(o`7C zTFHG#dI+Y~6XhO(;98MA`?WT*-355?wfTP~`llAd;aWI@b?#(|)OuShwj`^>1J0T4 z*sUyj4~;*@jkkE}P2y;)mc5PWSz*vvTU-2F%SI!_h*El(2#FhT>ueFO7W@Z`hnGUQ z7ag-_b23j${_yws^u=Cesw&1QuKL5%f-#UNj8AT zpi{{i_n2uN4zJ%Zvb=LLp46qmh0fh70-nb*ukhX{=unQAZD8f>NS#T;e~o&n?$V#k zSMli&jdjBu{V$GO#|{}yw&LY@y&XpOn;l=T^{VWZH!B~K?P6$7q-ACt$tje%Ras~} z$j7q|KHNW?6iwm$2RY6b+3pgPQ|V@?h%gKjTC+lGR~@I_*Y)~`Z<4?7A1(g^x$@3m zizS;$@FJ_UbynVfJuNn5w*P`_i^C=R+82>DZ%bcf|Np`w`)kKI+6>>npyM`3oucoo z`mTn-smR!r6?gl2mCdpuEOoPn&@xpJF4AwZGBqJ#FAP}4E)%rB(UGi7TuQzcd?*!! zvxehE@ilw6|BKmYk~MYR=Cjv$vE(vLd(4sCQ|=FmUD)^-xj%E=Ozi9~N~F5T5c>QG zfqCqeogR6n_!jn@3d^m%dAZ~#t3m4 zJom=(hqcv+w;Ya}uaUVDNy=kw-goaSmUMty-uO+x{9W3gUKmUN6%8)H?lrE=+szkA zJjIxJFhsIXaV_npL%MWfY*>MRC0zfGwtKTtF+IHofp|^}Bg@Tj*-nf3+L+_4{_K`a z2dTSqh_R@;o^tNNW?{8`zljcSi+-*_<7vIYJ z*2S*dNxJMw+amHNPg8rAOO1zB&KUx;tHq@M^3>GXEx|UE#N1!BxQn&+b$zl&9Y(g~ zS*lF$lc4avxNrtboTbOhA^Z?dB_G(SbnOI_&$OCn&B^wd9oNS~ssZcGW{K=LUXI(@ z<9dtFn_V-@NS4e3&%ommGlbQm$xhb|*27JHXVrNpS0(G;2mb!!|K@LlM4Lb9nS1=$ zGc__hHI%e8qceKu|^GsY#SY}TJNBJJnwKa)r0iO4wY{oIv%R`pF6&0o_0 zHToEiw=;O;0?baO$YefQE=DDnM|SH^)bczYvW&&=B-!OK{GDwdc6KtebP|2fADW=|Cqk=Tx$#_d{2kd*!y=@_?b`s4gDMOD!UlJWVM&H*AU-d zfO4Kd^@Ld7-`A(zW@sdvce0*VfzAwPO@LqWo$kl>C;9giyt6;IBPp8lp_c4@x4B53 zMRhjX&lmGvrPm{PJc>03u*Qk(cbn@{q3~AUAArB9msl3x?uFdpq|BB(a9;=d<>{w`jOM~nfoUFAm!jFmYT1>Ogpg5Cm^I=trAHB*N zi}9_yejf3Cp16FY{~w)OmW{747X5>aFz7ZkYeSC$gG|@VTFwu(-n&?V(c4+1XEBmOc7nVhx^gC>|y2#1s-G!)#l&`9GDHe4t6oNgRRE|}QrN|=A*oA2QC8k~|@rX8kUqLu6eNWR^?b>EF~^Nmie z@$Xu4zUzt)+3;H5rsmjC3^`9sSpd_n7?fuO$!(aKeSgxnCG+oOn@Yc_M!ZT8xy$$G z`TJm=nlbr)@pC>d=ABG(z9gf=JP|nW&(~s1cZhv}yQRpJ=Yyj}rK6x+ik5jco;rnd zv@{b|2eRth{4CiVYeVQJ|2HS|t9m$Co0IuSGBUqU+i%I%SYKI1eIu<(I5(MhC$N3? z29Cq~d+3w*uqTo8AyS--RlzU!OLJUNj*MN5CVgFhE1O-U)$CumSbToizpIQ>RbZXG zt0&?^s&%!3z{Ofyh8JVlB$>fSk>M^(-68_L#nwe>a|#qTL$D)l7O~dR{4qKDcltkf zmANBH?u$pUI!|bRrCV0GWruY;$fdSpFJpP0H%}l#GcCVGkFHQWo%EI9zl{&PrLV`$ zk?P^go3yEccb{r$DlR`D&h!?eZ_wJ&d^7h{dGB2e!#ikW1aynCX7=`GSMi%-L~}ZK zBGr7O&b^SmmKJk;U&D<0Ge{-R>~WC(*_qSK>>gy5%P{Xlv9LJVI%33TEPj;D*JAXZ z9PzgsJClthJ3CXMH@ov^`bllSp*-YL((L5Hl|}JM1@`qt!yNF4>-T1pNsfaZQmuMM|RuiZepytUPmZQ@?AqRCj;57 zg}K)@`esjmO@CeJy4xU`3Rg`FT1~{%tJrQWUL+U)pn^{%+g|EVRHwm(V%5K7O-|<( z?!=qYYtO!>o3;C+K9lY93M_fV*mEDLukb0Av>ql^8#Dfjdd^e!YHmn7)WEtG1ucP&t+%khi&W>70rLmhu z#S*NMRn*B0F<5Vnc+2TNXYcs+B&g)*W_J9*(RU%5+&XzKGSmNW^P1$BSxBxQaq1p2 z9m>kt+17%UexPX!@)w6+3+%5%;s$zdL((ifnc5htB zT9kX;3#2}6)~6)T*khvhY2-f#>rZc4#EAaf{~=qJPOV{IUHh3~chO@qpT1+nn5O4x zERi>bRUw+4g;kB``Jv_d>nxsr2zfVT5@N_HUBvP$TI@I8&W1OTf0Zqcg>h79{I{H6P=^HO{@61uu5y zAdxyV*Rt%9?7?S>lI7TM18pz%{}+%sK@5(s^`+}YxRWQbw-)TXl8xh$85{G)XsX#t zG3+l2m%PJBmGb|vYu-v`RcZsREW`Y%us(@}9u;}cEOo9G_~WyFx4bx?+Da z6c<6an+^Wvygpibjy|_L^B5jF7~-j4Glb?dcvTa>C(!e0pI*?zjUw?&5I75SUVv3R zD!VhY=chO?$;#1Wty%-+;re*6FfL`qQa^fTEm^V~mob)YrDr4j*%!n9a{dVr%R8<1 z{*ULT+U*8BdelffLWIt2@koqMPS&yP_@Y>OGT+Kug{j*AlMWxTQyFY4O_w}#EK=Y| zL!T4TrsGU`(IoldXFYTYoxKgq~i~1=LOcArmfV3UJBm@@IK0D@(s++ zbGJQEe+`|V+#e0}@*Qpeg#V%Z;xbHK1(R8%nF0SihrSm}F2%E~S??w;Bwl>NVtFgk zw{Z2p=1S9z>zhgS8J#QQa&~aQ?pB&@Jv>k(wBh^&Cd>LtS7xI+Y@HIdDl%E`d%O8`xJwM7D z+*io@6q$Ot_F$T2z0DuII+dToUmSv~BL!2sRrt<8$ zu>P9 zvAnrhH&ooNjXNzxibgD*Y+G;pX+^4JLFrAFu~>5^j#R*kym5bDq@PrH@>Pv2^XQc7 zww?J{>4M}_Sv@;XABJ4^dgR?v?nH<2mv;O)IayNWst9{@WzFPGNv-EK&Z~`k**|dw zOy*(1*Cfk|fp10hd9*)-RLLclDqvsIqzEL2`@Ij@vs0&XLEEO7MAJKQ03Ie;}&wLNe1wmE+p zWH04ARrpHof0Gq_BYDsF&4pyoirB>E@_2KS&&l2No!_bWcd&@Njs92Rz%BkYDg5-q zfMH@;c0^{^MzZwPG{#zANvm%0pH`vPq=QbUQz=hyAY4%HQ6Ee zsQ9Gl=*@@6#e?Gn2eYB{T$8}rKUB*?zN?4?N#!?Gkz_Le7DIkmv zHcuvtGNd^jUU}-CJeF7B;Mw#jT8L}!k!mEpv*JArzYs@K0V0);&w+Ht;r)znL-dg< zY8AD1j)>Gyi?guv0^g)2SsQU;DM_x)wIP&GWtzgC61=S^ZZ>EVzrz??bH; z|KC7|5$sWkq({0wYpwDmZLag{`ajhH`sypYKXYF-&N!Ey&U3~5+*h4Q!ZUfopW;=R zp67s*{r`}7wLnj)MDv3__7^k5zXk3BpM!g27SD6@le9A&|CSqz7Z?|h(CZ0eXc>5B z=g<(nJ}=tTbmkqt`_}biY1e|ziEk3bB3? zTMlBCV)&Yhd1vsvMS8se^ZtNeX50HYw{Ai1dYIh{qUD^mRWz?i_sBVcJbBumeG{p~ zeKw{fXUWUj+Q2$58u|Aob!yR^WJJ%?yUye|)bSGHTnj!kgKq2aF*VL7>GdK^h=-&C z)n;E`+|Jd*-X>()S~07vV`=*>aWmSrVGx(^( zyni!Hlgix9@Q}HJ@r|GjA^!V#-x`HykQi?Y|eEEQ4pFOalzd z%A;p_`2p;je3~cY#ulD3(P)vHeyQ!A)ho$ed^f*%k*{U_>KcC5QZJbc{>bmXh0t)U z%3h%x@Hn+9`$6kyBWdPeqYG@>Q&lRJiN_YawFqpo8h+0@&|3Qc+NgU4nPzJ7A?^Mv zuH?;Cs&v21lBt|~ARoy3#gp{>7%n}fpX;!uEKkX^M~H0MbdL-BB2n;{x}qNi^DZDIAU_@Af$>$H_UX=l?anVs+U{b_952*Szg`j2)W zf@?!LP=)18MU-d5+h|UzmCZ4z(k9`p)XbPsn`&zMep~ zdjwAKpowwqHXkW#Vz5SKFP1*VUk!$C(({rBryoHT-&?q@@u4K14WE=qX ztlVrQ&gPxy_xQ4b57ea59FgL6?N@YsZ{O{w$F{T@g=_zieu3YKqsbTaBF1LtV`}{- zD?%#8RA=RjaU-=pUd61Jvh;W8BI+-sw zX2H*#ku`*Gvtm~1Jc|37-DH<1Ip<@{ECCqYfcRx>fly%O6elylCPlofIRR==Z!SD)squDzA zpU={Fk?j=xspj|;v+ZQ?n;{Ml_uX@>`kkx3rd12QR3-li?d=Es^`c2Hy^VK%Bi_}F zM$Mo3EgK>h@+dKa(^SoP8$U-^Za6V%tyt-mU))B4SqHWf#pk;^!dR|KZ3a zXFW@bC9GSEU$&=bKkQ5;P|uAEPu(NS@3@`k@L3OX8SWj6`&lpB1;6%o=0ve@30)55 zGkK#i3_hP}a~S^YgnT~{V4lCLVtGR-UIfR?6_VXzEPIsq_iK=uz^8lp|4}okm$mp9 zIsfD56(}WR@=Ou$C8P4&Fqnq1eb}Hj=Kmm$CIiQjM*r+X-+k3kWBb<_ zb&L_}c?j*xE>A;a0^}-?uZ4&eD`wSLq)YCb*7&nc%sN_osa`hLdC4j_#dX7RB6o&a zK_MMzx_%Bdok>P6YZbv#RDwd$AuH!DiL?J^kLTw51vV=;#82bqfW$Q#cyFuNPO zrCNG%NUo#FPyTADx1z4tU#qEKor-dqeJsYRS#0_y_UG?<=An7l{ufm8MkRODEoonY zKW_1TcP(bkWcE^T@z;m+Nj23X+DRqugYY5w=W?g95st6wAz8vIYk3I?hT%t^Ej|H_ zAGJJ^ZNH}bCGg2Rr!shYginvsDbK1>-RKVO=bdIP(hMX|-q~eWKx)4&(n>RtG*y^O zvHd3h7_4GUPhkek2qZ9OV0e(kbeOZMO^bG)L+2kLm;#W_BA2@ zFK^H4u1eW9uYXZNyMH>}K@r!<@z@Zqdvg*7qc708QOX!sfksbN*B1clqC^^Hf z_I)WlNM+Tv?7xbBLknYD4>OGtw9fjktN{DT*;!SccNSS4J;#@xs7Clp2 z9bjMF@fBK|>t`yNlIJ7u7Z$i?16!@2>o9(_O0VOc(}vARcC%L2@br1w9|oV2dP_!=tZLm%o4kK2>R;9;Wi@hr z<6hQD-c05`n6Zw`y?McW<5*VRUWL1B*)GqE7xR)D7`)rLmx=GG3-cWN-70GRfT8!0 zA-Sl_>-P#=PwlvS;h*Z)@qwuYZ7Ps*rJm+#eJ!6m9b0zr*`HyMj5fcMe;DrGM7ypS z(8i458uOn!cxp|pSL9z6>Gd-It>#nKv`o|c#5%C;a&i zCY_-64c0bd|I3Yr53tLA1z&s5x5v})CTx706?$teyKZuaocyFs^fCk=@_g%6SJYzH z>v+Q7bUVubE#RD**7I2Gdl)?C`X?}T0j=(4o5A|duJTkR%$@932&D#IL%8HU%6iNm z?3>QMZ>_an^ev;EEhOEo-J5uP)>wCg`b04`bqupY@^M;S3-Rk{l1h9tX}cUpPSfuu zT5Q(()ucF1PnY3tIju}Hrq0t|s;0k*UGw>B)|y`{#ysiVF|7PE+4_o?P1rYCcNXgL zCf~foQ*y7AeQ^!2Z=P%SWCwW?BFUq2GM@I)^Y`r74^#hi&5eAa0T1lUXAZ>Q9rVoj zn(7&|-BsQ~&s5w!%!s}dO0&eHokrE6w99yZ4k>#=_GCTW@9freKNS;>X64N5vlD(f zB>y%yoQ1T=ldeL?P9Eys`%h_){+dWK+o}%IXn0|tG&(eMwoI6vGsXCDr4w2mm(x=k#F+Md??5aza>>AFS{k%bH5BXh=^uX-_FwP3M;Nvro4W;>%Gu#skC`Efk;4A*xe&wTzf znkHEdKb3{s=<`}p<2KeSMT$Biz;D=Ap)jj@iq+nA^bxImK%&&*>R7l3y@3Dbt~Z%x zvM+33$bF2jL-@xLdcI6ty2SCW;%4@iY{2qli@ucayoNb>&pVA~W9jyX8U2sWO0~s# z_`e2b|1sx%hTS`n=OMB96}qRg+A3O?;Gd-&|J#*$zMavjtx+Xe9)2;-l;+QoWCi|j z;e-F>3)xAw5pT}c>rMJhEuYdboT;@3G3PkQ?Sy7y8eI#OI=o~**C$`@?~uAp{HlV@ zzwxF|oHdNU*45u^@iR{^KiA?Y=+4vXr{Zl7a&*My@tC}mKTPF8UpoE^DfVNdZgBmO zMruD2}e@-+`ewO6CT0-83*x(SopU-Pk(dbh=3*x`w*(QD6$&RZ? z@DNMq&SH|kTG1g@Wou~nV2G8%fj7mFJXLPXI-6LuJN-WwowLv9Ha@dS^d9DD_8ezs zb&YTS#;@cssht7!iw_0Rn477FN9x_>iPcn0L!7QnY^6ydCf~u{NC}4alIe*d<^fa`0yNz zn?}p6G#)|6{{J@%NyW;%<2uLPR_dob?o%I3p5wgaaIVfr^Q8AksCOV?WgMGIt~-oZ zeSKdYZr?jHKm=^THmM%*1?hLuxI0hDE|Sf3s>y>ca_*j0d3kPmEFN6QCgoW*cS-BS zr!&c$+5>q{*aC{zvPAaA^`%ADaus2bU*R)MPruS8dzdmK?hLJ;cuQ8d%_v;mOdPC= zy$vAJ1WU3GI?rhb@%&EC-Ta?yntA4OwBCyqX4TncxXEahS|VvT_fA8gmpSTv#-h}D`dWKUv11Dl$iB7g zpO_>TG&HO3FHRgr)ygL%N;Q@|lgXOhk6d|}{!$_JG*XBhfu?P){bo?QA@o_NhTS4i8rwX;PUqIm~8@_!A0D zeo}k?fnauC3@pTyf1o;=j$imIvxw}H&T5pQ`uH7VKZJGa2R%xfC2(qh&8xNFh8Jw5 z-+6rW7V+kN(Ju9tQ&0N?vQDS%STb!9)l1N6CH`gAXsS3)apqv0O`Ys3A)h(o_ht~K zpqKXvzeDQ;G9_2%zwD3|@yWS8)_BsI7v#x9GPUf_gR&+ixucGCMxJp$%R&oSHMO>L z&r)7*shU=oFI<8{$ySy;zGYzitro9`WVCMxrR4IyxNxVLl`khcH`Ob%DrY}P%;R&f zh&`?NYHjiKC$^jGs*6~0BuyKTvcEX=7GHS)U-P{2CmK~DY2J(MnWf!FhuQe_t$Ep* zunlM*T|D#-iU$GrQa|!se*fr z-(6{$Y#WPc_K?3)$KeXN%!lO!(xzI+YVx)ml~K$G7K@h$l&E@g|u=#r=U+py{bV|<=ghM_<3u!Yb~1?o5Sn%(?W z$u^OFd*E7selQUq|I$Lnuw;whpAR>7)kLxSXYFOJVcsJI{jJ8$S8%YuzpFt%nSE!` z`duSvM`tD<=F85{3^6+o`(VpKq)OF|D@V`U14xv0yLI8&l=uFL!@09d_SZ#P zIvksl`E$A&8Q0RbL$trv# zrd1~8?O6OSR6g|0U5Z(({S}>^M!dBlG$`mXN}{h&(SzLK%T^!Ab!x;`0$ukcrPlC;F3Uub*)OuxjW>k9V}zq0Fm(&Q=iXxJntcO|`iL8_kE zkv$UoyEd6i?t#oOIJ}3^dAFV_52?g+3tv4;4>R$9hWmsAM7N?^Ya%*K)BkE`^m9D< zULP@w%`TLVt;<>-V?l_=hv|_`DNFO@LSn8S?n{%%U+`yINbyHvr7M3 z+-)d=)N_2Lt0%K&vQOt}Z&tQ{U_L!ww91p$WZ_IT^mb6VQd=+3GW$_hu+%#uKrKiBKDQ2F>ok*&@m3vwYXiTF{?0FVVr|{O~s#@UeDAazv9lq0ve96RQD~~`KI0KRus7Lhv!W-J)>1ubnk(NW z^CohAXLQJ}jKv~Y&e+bn<4HPQ)cery)O4AKi>Z8kov3^@R-9vgmo;DqIhraWLm__{ zZk-0buCN>e!OpI@2$zdvRyXID#^BVPJ`dY7Gwg^Rg9@IQeYkU3tR26|n4hP5#H^{jMjKgml=T{oaX5Rnk|iV~Ugq`tm}eb} zZOIq0kvAuQH-2zQqr^Q3f~c}n*4jwNZX zc!FQci(p=Z5zzadel|)?T2&EveR;S1MA_Wevz!+s87CUc;(u{ zJxM=$9tEAOjlGl=Mz}WX1iHW^v45}@^2Dq3|J{=f*2`V`%}UKwnR`V1x*34Kx|Ji1XkJXLKfqO0aHl&aeFWX?PCOgNRbY24%zL;h zdtH(XBP-U|kgT3Q?}TJB-?!3l-hSlSTzPy-7K!GplU%Sx^qy6qMSS-eM4oZRS9CpH zlq~P>)Op(gm5YoF86D09^L&w1Aa>o~HH;hTujREFhl}P?9 zO-|$u%SDWu&Pn$Eli2T3ewx)$WeU7L!!^kza5k&vxnmj8u(SS>&*D&hKLex0o*u4w zk_Gn{u~X?OmC>?NFz;})^Cs`xllQlWGp6fde}Csq*LXUYbzSNzPr<*Nu;M7lzQhmy zHQUNslox52>I>bWRn6R`H8~giojdqdzE391{}}^P4d+@>>tk5Ikj`fuyH&& zJ|Jf@WISv>k}3@+i%ePBpA5*Es~?0RXOQ+k@P3cvduicb{?!!Qo)_K5!sc~MN%p&B zZNCo!eMFsiS^8r>`8O->aMfFd`A&0JY<2caq<_@Uq1q~K#;{%AsiXQFmZy$=Q__Bi ztCQ*R98P{u!Vj^u37+)AtD>4{(E+k=ILFYfSv;V<6r~f>&KG^4j zMS~Ocn(?oaUOKdV--w=(^Lq$vgnU*|WUR_Q>uu&|%|zR?v~;%~Qp2JlqltNBA#v^UUds@ZOZ!qb>PnC%Y}9WF8sKcv^P;&E$pjsB!;@4DIAxv*gI z6TcQw-V{MPIgrm&Ah8l7K4nbPm}o*IycsS zMGQKgMtg~xBeDN&{(mwSE_O{DSS17MT=xhcu+~Fta|EU}EsT!OYx60b%PQ?lNR)RE zc@r_2@2tUuC;9h2bnU72tkzh?nzM{ND|zJqFsr;)t3opwT&CbkQL#OHTE{@*7N`_~ zR<6JtJ-^{S?8CBg{ma;aX=gVyA18i|2F*c8B2k zto1Gl8k2MdPSq4kE0Z(tREyHIrE7k{iR0j#J)KYSinsLezIN(h!a;s_5ij!e?JH5Wbfe`8XZ*;%+7jYd)_N_Ak!x# zNCw$9;$hy$uVmG%^1cC|=CkAuR_g+RtDSo;tG~_$J$%`W-HUNwl6)Ne55?T7g-Es-vvPl4AM;0xRW>1(m-g+g3S^vgZS!-cr8 zSXAs#z-tvGf5C;FGxzKD8=d1269D3s3p9yC6q_9Q8lf!V%;e;xtjG;t@KTO zw)5#teYM4z(QGmex7HP`nd}*rdBn5WvX_?Lhsnj_-5_V5<@XulWf!C5mGqh6zM%w+ z+w#4~d1W&4Oc!C_HzGyHts>7VV_BY*TuRGS46hE=p+>Z!fp z{R<7ox@L)U>bBfwM9*6I5wwp#eh9~mx?ey%Jl(S+cM7fyaz^etmeDvnPItp7o}M>K zw`nQsnaUV-4rKde7RWn?QDSV?a#X<8I)yoJo}k}@&C~csD&l;BhX;u+^=O@Yg~4K5 zDt`aP-VbBJVmM}JPDal!9P6#M?5WK&<;+{lK_Y9!%D|#I_N1zJTBNc{ zs(qwVd0+OOfIZ2XP|un5>5}KG&74t&yjdHRn#WliU0sWB;`3&$@a+B}5j%C##{7qde8aU!eat zg_-MH?gHkK<7%=l)l+XVVmHrCCe#6>N&cMoFk%-ww<1G!n;s$dUTJPT!L0Z`=U+wJ z8^oT`T6~N~2RQc|(hcH$XX&c~sW)Q6&oKX;m70qOjiA@vczu_nD|mQKcnyNqlSZ_y zVo`RgF86(MdavOrM>+E&jBDkLta5$GIjPxvkn@XZy$U@$k#ryw-q6<{V!-R#eAc(; zz@efzl^tA7=<+GF_JT#SPi)~!$qavmGf!fpZM<#0eoGn=Q&;qIw!EHBrLnv zs@eWSlf1*a8&^^zdnR-T;!tsVrC!O{73)GuuUGLkMA%p_aV<3t+MO72u<>QzZC6n_B*yYfOS*Xs4m`>#KyLKDD#k?`EZ`u zf8=P^kSC*0OWxT{Je`MkMd^7pe@JGVhFDZoR88LX{rPa-Zq>&4PR8oSuxm}5&GgPQ zgdWg+h~GSoM|>Fmp7Hl;C~Ix0_tO_w(;!<_{a3nYZ4x`R+#Xe1EpjZ223~FC@!W-jS>WS7Ye= zv@B^{OeWaA*mkp{sc1B^WgjE@Z+MuhD3nrF*3#QbtlTC0txc)?-f|78%(D&9O>>OqoQjIT?`ootBh%&+bz z#bn4A!J1E5>&2XL za4td924pJZx)=CMXENO2$SY>u54dK4f9+sZ%XhP(k^CKp^1`NcJ(_nkgjpR&*W%i2 zjBM_Bb=<0^oqyroitKe*xO*OgH)XARRJnL@b>eNBX+lAc2H*!3C#dq_TjEf~O z=Mi>Jmasm~OlFAW^;rSG!TdipllIcbANt5Xy<~&lA7j?y&X2Us+S0AAY!2lSbX?9$ zlR(n;8D(|(=qic3X<(|2|cJ_x)cIf9V&>xt0B+Q%O@7~&}3)ifA3tF#>NhNt)7no*W zYx4AbL9+d6(1m>W8}XClAURDl2HZ=R2JBG-N0Vo~i^#HptuRyyGpx4`Vw?O?dv!8)@ceLJHVbuM4 zoncgL#+7$fd&Y=5?xaS{tJgnTmRX0P@X`sR(-1*9uN-`sJPf!#G) zE~}q){w32w-m>j*c4jiIcw#c6-O78O!;;+HC$41g|C=m%DsQU6cWa6Yd(k!T6c2%1 z#>r$~Xb#C_D{rdD-;DCvZ`I$px(}@1G7eNX_GE44P8$FJH$9c~If5lK@3N%Oe=z%*r^Zeg%hlJm+Q-WW8{^ohOaw^1>ta@`?FQ@?x#UrQ}@C z%CF;TK8facU}yGPw_~dh#m(isZZt+rqI+iLuNtr088=76`aZhPqkR{VMfZ`s zBsFWt(((b3V9|}POLbT%_hJ$tE{dS-BPDMcNH(<`!k|K?g3LR zsh@W8b|QIZUU!esUH@4{l^P?BY4adI`q96Oj9}G_bY=9Mm@~W(6V_@myYlih{~Y7a z?_{mv`hlLJ93Z-NVC}4iD2t~z@}c9fb{Z`8`@8JZ;_V%w+bi42{lyw$pzKOQ(hF zS>h3#&0MVtgfAd@GZO8>&tbG_=I@QzzLQ1gvwK#vJ%yd!aJLFf$3s7BI1a~!t~7rH z##tvm#&>VPFK_QkvE|8ZRFtf(=vdPjaU{H}`ad(`%tx}`A^Gwb!tz|-RnSv1wCvPI zQ#NiWGGC4>qewNE$7GFGN7^ih<6`#yuU5*66_vI6G2KRJV}YNng-pEu1v8pqY%>4u zZB+l%u~Imd)eA?kbh76h$rhimz&P05q~C$YmLB>ZEbRczmh?maN~O7_cc zf%Xu|zQo)`kHm^(oXTt|yEqTWrPK^~UXM#i*oEIF^X@gYy4^^2abX_$p_uYKE$*N} ztAcbD=vxBj3;EMh>`0}RRG~{H^-cPi0;S{6A) zf3DA}J|*A7LjIp8waxUml6)n2!(TizbNZL{ls%8xec3`3{hr;Hk}4~t8i+dQvrnGh z{sX7H>CP^jR7k5R`Y!hW3jBCjt9j#b4)o54X#PJ_v}wSjs$oos0t;wUV66S>INJ25VSnH0JciuR1h)8%F*?od)gumg!+EXJHhAdXUxvQU!32Zmb8nDLXBv(Kea& zTR{9&J&kfK`yWo=Ef*@*)*wm^bXyPqS+I+i=1WM=H+^JJJOr^O0$te*<=_tSV*cF7lkcJ{JlwNBnr zYqK%pQ-GOBfy6ytDzX*%70%VOydArYd zLvE2Xj>q-6#?a9sNO{toTtGa#FLJL}R9|`D(@0-unfcv_VV@S}4qs#FCQ=OdZ7r>p zz_}*Q%DRn1j8G3Y(z&UI{UgL)=11N1utDp|)tr&F z8AfH*_@zeI@0|Uc7V=K@xI!d)NLy2Rz&Q3hn7wzqzdMGHJqWRTA@B!@e!#Gd|9PUe z5jTE=ersnv2jTUF{<9{of=^XRzfHWI3!64PrUw7Kl^@R%8@{5?6Xd=Wc76F*U9lnK z>o(sHg87Fenv69Y`9t1V$8NcU`_h?9N%bqgOU36ONLGs_j)ux4_M9l<9!}E|9|N^+Q>PaO~0f^a;GPoS!N;k6!=zI56Rrb20$(B2mjTn2SIA4+MZ#ye{r0*r; zLXmKdZ;yi0YUo@|`x4G*==gGRC=vY!oF7M$D)i4&l3Chyth312Go975 zJ7FG99nV^e{hiT#0_NRI=GMlXJm2a~pXYhUbh_nf)(x(Ii09Xa`8r%U9G4dO=4SG| zfYG;#H63V_Ris&eUR&S&dCj@(yANFJuu6Fx$tuCDKkZ|dGK$v8T@-&zW#^mekrm9z zS)9GcOIYT79DI^weOci*J=Gy+33^s$g(BqJP0C5Uy)vYG=yPu@N*1roGm`f)JBd3O zfB)dcW#M>|KKemF`9Jd}BDpB0;b~P&%Kp82Sa6Gom?~0v#`g^F)MKN&wOAb{$Mf#( zEK!O5xADg8Hcr)mWYu`i@8|TFd-)c6%c{*}3(szYtfl4{Tei~UVWappIzT2M4bIZ`1^U=`Tmt;k6FgKdCvO? z%S+O_s7QFAW7(CT)r>#!y$&S(1>3*%sV&)Rnct-9_-Q=vMLoY^1g|Y>y~YFjV(=XW zzVC~zUuh>#oU<-zjk5=lJF~~jpq+if>v+z^yy3q_yuAIYfjcXq(H>6UYw>-5XYKKW zB+pp$3HwjRp^OFli%_W>+MM;y)5@2;Zi={2md{Uj-Zs75&i2Vud?TbEW22k(o>fmT zLonHShhW*e`n!yU--GMVdaTa|>!EP2R`YhNet~Ow=A8NqACqcd*d}8{c9^H?c3F0g zzonk=3pjC#e)7gFcWp_5FVR}E0 zt%zg&`D8QLH6!bRJZv#5wCBrydXd z+oziPXvZ?+Xkct;&+{`XxAi-BXvsp9`T^Nbbv#V-wj#6pyP)?oIkWC4ZzHdyb*h79 z&e)ayA42hTY-vlfyb&v=y}Skb0n@hY@mFj*23xo2`y$bOmN6*v#%D-=m;S2jYd5K$ zWrvI>*{yt%<2L~ZM`}A2z^k)NvV~_yR#6ct(f4#RXDvi>4QB0BGUN6aDaVkb z4E^$?IlG`H6d3yz9HwhG&+F!4S1%me#SVXw=W)H&W2fYe&r^c*wTOl{`TPS`mlSKC z=i$}$lN_2Kk$BGvgiXHb>73La-^_#i@#kZVod?o7)g0Rze_vvkwZ0i%@T!uK$_ju6 zSaF`XGqo_bWcPhf?LGy8W{!^1cRf*L9qA{~BWrhhu*hFLy)NtjYaG9ihb3#`R}h#b z=A<^}nJrHi0ywjiF!m0{tvP z{HVWLu+B{PBr~#%b31(MLGJVEmz`uYFr+pNE;Vj8r&Bvp&nN$Zq?{ru9;WBi$+(2w z8<4Pvqr?3>%I81%@IO$_4xLoQ7znYy=sX(>^8BwY8P23n-ZnQUV^eI)4(y6Nt1kUo zyIaXx=An>%P2?B|+vG$p?eAqg@e)%0z@Pf!V=~qb<14?pL#V-m>qVVkA^0V!2IAHh zY-uN!y;8UrxP*P{(RzQsZ-?y`e{I)pe^_6O;mu+8J_M4>@MG+|n!jY9RVsAt#O3C& zOIE+GM$4(>9}myGUE9gGbN8FvU?a4aC$dMedI#29PPYMOMRQ?xzt~;JHI;mttiRX9 zn4Kc^X12_mh~zx@iWTazUU}C)%QnxO&D@9`w`0`nBJ0vRBFS#hVp$_>^43&<*2(Z$DvFP1>x=>!L;oL1_W`!$*v0|8K^Y03P_!td@M#z= z!Y2(%rJ_P9BM}W!l!&4)X(}P3(okkY_G;KPsZd&?LBwYs-|zo)yvO^#&vW0`eO>4H zpXYVm*Er8-)o={|m>H(=rU$sUq9%zCtP4K0=D zi)WG2aastz2H+?X9jxx7*hF1*JWkd}Yw-mXC6aqn@)@O{9zgq-dG>9#oVXKNo9qcY z>C18kF+0y+Wye>M@0*3*WQ5U8Vxgp8ZORJz@|8K0J)QU5fW9>>ps_1gveqBrq$Y`! zf}W>f?JAi37!oqup4pTZBr<}&Z}jYJt>N z?Kb0AR?=1c$_!NWrvJ{gv;%)Pkj%k&_zykyM^7WRbQ$a3AAQM;n@pJ%=zEc88yRsf zg~gx1Wst;0oa$jLs;7is8C8u?MHCDlBO ztQISG56dfsv)%OlnNNq}W2h1}e7gz)C-VbY={*m^OZXOwr)gspKUD?h*YfSFpgA_# z620-|t<{s*f{8Gl@#2}Jz66po0?bNK74-}v?+N(X2E~be?B_TqVbATb?C**|U<99cmlhEvbg5`Q($evyMa$rn_xy zX#zXCk?%-6mBic5+)OemXD&4F^5?o9OUpdbEXb^imz?%&s%IRd-dmMSq`u4{((c?({0PW{= z_AT@@7diX*YQ-2J$D#Ofg#<*=IS3-V)U& z?_cwc^m8_2IF4@BFZ`$AE1rYM<9WRJv&5{;>UFom7_M~lEqe5CXtF2%5{0ZOTZ`AP z3+Ekh*9o6b(|vX?Cw_bKt|uB<&Uvk-ox|C9cD+5rUoYoPI`}Q2{od%FiQep2yd2-t z$@?-go5_oJUY_d}uH?1!tXzN~9~rEhr5w=Vci9H7#C=PRUm3h%H}nK69d zI_SRzqQ@4zU(lCv;`3^2%}3wED!wj^wC?eA_Cp-PHxwb2CunAw8ZIE?oO|xc7aR|F zMIkNwG@8(FvQgxuOycTfRF<9mPmoU{zjgGRD63D>bfRN)^0t}?OW&X=IpyD>zk^t7 zVv%L-GUKEZaa0>;+3EAKw$6vSGtizBEXQb}2$_vTeRk+%2VC};ErP4$0~(=xVxKN{ z_ij=dNr&qTR$82I3kH)TuNvHc%pWD=PcL5XZoDLyUp466yE|eJJPxM)M%v3NPzzdL z#>YHCCo`ex1HNJe9ugVs1ZZfD;=A~vWaCVn=Hx>^3hsuY?k%?P7SHxHUh8RZtWuBp zK9Efp!^374@`;+C_cv6(#xj=BVEW}`R+tGpiQ)1&{>MSavt*kc_Z>)a4XI7#IlsVl z@~I5w^VX`d1r22f$saU18yC-#d){(;_?!8|d$+ZAZ9 z0cHEsXdC65(aut`&kEi-{!XOd3nBF-emT42I%wx+JeMb*?9v(F`G?i;yieJAF&@`P zko1Q%pYu=IKX)uCH)6M0&;5?nYT~b~&$IA5P1{|RF6G)b$g56%Cu{RmTF*(klqx}H zuaQws4o>5ZDw53*rGtUwg-q7I*517m<+}*G&H0$Tp(pCw(MntZ5x248#JTLR&g3Rr zQ|Q4?fSytGm${Sl+i&n0&1iT8Z&(Ft(~l%m&2?mwc)~foQO5m43tp&+zZ+bs3HOKa zec378hSZWd=TIo>4h`A)S6+KFSb0vXcPE{z^@`nSu@6}%{!~jc%-)xBYW~d=pW}I= zS{^Kn92danHZsquP+9A}!Ii_cbO$aDfvR;{NOaLU zB$X`3HCbFs|FZ{eHaZf|^L~CY0bNY7FQI;z6w0yen^MX!>2_T*+Jv=vDQ zUq^ekuPd`~^eml@Cf&r+SpI%ba_% zgmom7;wT;jN7eYC>?^q&HF>AZx?G~T9mcME)~JB4Q_;AF9dDq= zGstrtS>?@mK>-g}u!ds1=aW23D}K6?8WS&~3~BC1*KPTbA$oul>Emjam^G2$Xf^Fk zAo*l1@9gQf3**mgw7nm?FGkyF_Y-q5F&)S9_7gpkRl?bTf};ldrg? z{vor1$M6ajaIq8yL;ahcYiXQ+t{Sc+t(-5uwlMA~LQ3!H5t4Z&D@lnKlQ+a;$fgfn zSI0+WnUext)f(&k3P+#QX*C`?ad&be_4MOr?Hi+$Y!n~(M=LD@lrk}Czh(Ym$P7(cz!1S-$s9;cwfsZ zzGt&jv^NMDGG&J$*x_apsAb~A2|*&0 zRqN^TQy!w7-tsx#H$0yL={a|M3CpW#yi@|0#}v4|LSGTD(u8~-h3w~W*b7(V^#K`6 zZ^ctv*XOIT49htI{Udp`Pgu$MYN(5g##(rpwS9%tc`Ph>FmhI7ggWB;+TdmbY>tGg zPf%4Re#o%H~vOHelNO{VYa5{CX;C1O&5}2UG%(9v%`!mXYeRJ zX?BCVSCev9;wtcyPpRvCs9r##-|?7tp?`(iv$u7fx>}LVbbk+nwB)lr*Hejgwp7TUHwf9ElFt{PFmpc zLfXuJgUs2SK~l-+ko~BMp4J*>GXs!*Z6q!d4fJqy7FA~N*^T+0N~XV@jo#zANAv`x z=_ipq63?cn`q#itXpOZ9m*@sGRZJkQqMAnUW@UcN3hex&pDVZ`rznVb>*!)CpkW3sjXpro6^}~xRtIS zr^V~FyPxN8({@Fpg;BWA{J_urRz(u&s{XT-zW}$-(&8qcyQ1NG-e?OEK~qU+NhFc6yx%So$!?HM=qgF8^$W5+mb4Q) z`EDh$>#IB8omfj-S=2PVU&Pw;E_#U`y#|SIB+>55cBPkO{CI^tEpZ25gcSz_jJmj4wk*^cw;eMZg!bjGG)}px)%>4>ao&7&n$r&uGDGlWG-6+`1 z%2h)YTtzpxpnVg4ex=L|d}W?xtm_^0sX5m-pZ&DQMJ3YMuG9l0KZ0+0o*kSI*@DY-c^n2Sy-9znHlKyuAJEzcIyxz{ z+uaf5^h{yIn>W`9x$2sv*-t?hldM4d; z)7Fn@D@!Vu7-7~??rpWLBi+gDZEt+KV%}Q1@FAa(=2}*h8M-f^Fz51G7=GSj!wWKxzq-Y#J2Cv7ZKzD7ZwZ$sI7^fYC8$q%$sTj#Tx*6i^-^w;$K+3uaf zvnP+sRD7+2n8i3t{D|zfpUyXC&-oO3Yw!7tz#fBy?2t>O=X+Sx4AzyLyNNEDD9jlX zCcbUXAeV6OU*w!~BxmXW9)`r~XsVB%#Kh_0PGt5vpYnm$$B@Bp7M^`rce2iD-c=@| zX+FJ8$K`t7dsM*_pN6KMWSJZoK<+ z9<@<*7^Kgq=bT~7ez!hk{~Oy$1c%Hl=dJrN{_zp|7_Wve`PY`7`;dHRLG>6dJOBfk z_xpe~X7v21HtKl4>qDX$uXWJpmG;bk_@Rk>!2*_@$Zo4h=R}mW`XQ1J+YrU5@V=~mP@d;ztNbyC8b$IcEYWorB0A{170h!yuGmn z{tJhv!uLoNCZ^Una(x0#Pm)(T*Xpvo#GQPaR1$aQD}6^R-sEFi*}JPFD;;OLI+CAw z*Yn>ixeQ*OWS4^=q8!>bY5hXJ#ESotH7ANiau+^BJ3F8x^CQ_G)1I6g(oW`>v+|L+GxJF#k)<7Rmxe`~uATYJlulTb96#s+FJ>;6yE!qw_2 z!%Myk=})rN=;aBXC-yc~*|N~Tp07-FoSo?T19p<}K5MJb`P71rvpR8?d4)%`QMTZt zE09Z8X*xsYJ9N=N4}U7pRgb;2C#zHFW*D3l<>|_?yW}uA4E@KVYa*#tgX+Y_PiCa- zu*)jy%RXn0?MUiTTP8fW*Wo zeFK%3;dO}n-AM6qZ*Om?X@5xhltfN-|0~GaNp4%Tm6#WKL;Dw>@hy}jR#FXKF1cwI z^0f6xp|Wznk?^_XutFd4Fm$hkzJF--Hacme#nY9^-klEQI0c$c#pA?+bY4c~@24u7^uN7$_j3y+a$3A5-`#*m zI||R^NalHRo(y|GE4_j~E0W*sD9I|o33xo1U6i4T|Bygpb6x>kIVG5#Dmi0yb)j!; z&%#f^e??bL<42!?kTUG>NgQSD-IspyK2%=qnGYREEpd>0ISLq2V7$KX3op{hpI8&-m1V6p}eA`l!f4N|HxwD9QPrpV|4<{AV9} sx zc%LUp_Ioz6zA#?R{OC3mPawx2XRx1Nwbg@mK0r}Ts82?fYtfcH%{k$lEVhZtG>GM# zu8mXt&p1%jD%8F1-fq9|LVw2O%W3RtI=KgJ3vkp2_S>O2yPjszTK0(N4BsU%Q4GGP z&|XzsJOOh#W!M;!8hXAh|NEAwrmJ@_jn%+UZ>?0u?EqfkI5HSZLu1IJBI&P#s&1al ze)42Aoq&QwHy#O_7b#nZRVPbP@;NtwmSkm^40AcRJ_WzoYx64aRRT{RDAiQ0<7sXc z#8%RBH-2+Ezi~K!bq+7{E)I&5Waj+GvFdxtH6zW81@58i4P@7lRb^(j9uH85R9{3< zGnyy`smUGtv$CUTt|1D``X1{Ug|;*G55+y7yqedjX)=4LNM5HwWlvA-M|*d}V@Y1- zPm*e?)tMyHpRTUwH=gF_w~@jzJkESF%|7tVxZLN{XdW**_(_S$I6Rb;-zM{tsGm-n z$J0h)a(1JS@jm6ff3W8spu3H}J)oY<;`Bs8Q$FH!GODSDVBsMxmRDk~`ntPTgsw`F zNl90e17`~vB-_DibiL&6neNtBOQQQ+Y0S72eg;BiOA;wU>d8oWsh;>~Pj+I($@P~V z)YWPC1QN=+YBGsG0JTRd-IFYjCxh>mOm3v)(QeA_o2zGE`u~8;DinPE-=vmzp9WBU z8N?*oPxi9j>u-DIda<;t3jJ;e?H1z~(tF-&w3V1E*~gZAF+b4%bYAarI=UK#o76ss z#OLrd9r2n|S$Vh4eA{1zcB`|S`L5kfhwFHZ{m3A(s^_UW(f#+=Mr-J+uly7>Bm(f4 z+U#8L-LdU4Xl>@s>&-Xn(MPd|i+%4^c(+?l_ivEt6R1s0jN~(%t`DyX|4;hAj#V$f z^+Gm3L#>G#ko_P};PxC8A4Q)h(sOO}l~q^#;>)PKfu$vbICR!Nt)~ zlSUuJV{6E}g1rr)uk5kvM9PC;F;RTKhs-AY-d%8&2sI<{_Jfj%CjD6f-N(|^$bzoR zslT=7=8(z3N*1T*Hf*pxOuVa1d{`sWst6}JLzV2gZ=(HiBcYer>%WxvM9IwfCMr-e zSzJb{-{3N*MLxvw-E=jboX&yfsc5f*+gi9D%nEb%JzjORw$7%@73}v2`fr8u_MXWM z;X-9*(b87!59CXVxjPiidsiKZSrun-3YOX&@_`D{I@oD7eYZ}pFcAt9 z^Ydd8xKYdNVCqM9^Ecflqs%7|vl)FCDD$G{2e|Sw4R43^q7Zo!xr}nPo8RFmoQl3H zc>QDOrYW4~P5g0~8$!E@!hbY-%)Xecm(B2Y_X<5{_P3JjOZb_LcaKz0`rJfL-HO|T za8Mh{KT*Tmu(?LtN08`LvTlXTlRQ`7n5Q1^GoL^0q|Py<_bLQ8P&P?vhz9YI#npT--aDf94LE6E@WWrDc^3Wk^L*YUvU7hA@A@-}?^Vx*Q2reR)bqKy ze*XoY?K||fBaP0mvl(^0^f6y5-xxt7zIsIZzVr=k4avi=%=6~Tad#P6Og{NpWr-RPITQ%IjO3T-h!&a7%)xCSv zemQJkXZ$h}+LJ*!5uQ7!{Yd&542SLbi;QXSWtRh#d5I*xq@&Nsu`xPZx|$k(L)Q@2 zpH-O_xKFOO3rN15_J{KfiOKRh8LmZh1-h9JcMmJmgAAJRvR7)gKTEk3b$JIpR-1{7 zw1hPdhKS5tC!*^uT2HpO*jl_u_F3!$ms|B;Z}8xwU^%NiTineq?PK`GowPYleNQOA zi+oR{iR461R>JA9HHu_LvXs2fB-UJbYQ$c1LT`lY`;uGs()(=tpBz=71 z;|}A&v-(sHQnQa{xpo^CdX~y$l9~9-lr1)HDW=ZM!6x2hA~VhKRMuBl!`tJgm|1s51aaVq46Nyqd7>AeeYuWvm6BM73Xaje;qVp8b zzK{3$dZE&4ozAz`M`PCEbDnG{q-1|c1-(`D@G1#(qM_lib0-`P#!Vt{CFW;Ns6VKV zMq2yC>_FxLf7SnH^M=((mbq= z7U+Ejm($4lCUqD0Qy#U+$1oUDEAcyt0nkK^iB>gFomc8Hlc6=|VFp59qMu!<{^)j! zZmugHGxcC`Lq-l~lLq7wgq2n1Ep=Vb0JzVa>q6Mbz5f7u## z(n}V{Z%*nZhVuKKT*=O|R{IzW>t_s~Sf<%URR*dr@OPn>?tqI{=-nTp58`igQgJRR z52AtWR_#jK%jmy)^QLpA|GW*gZn_LHka=p=b*M!@t0HRp89CH`(?`KO>adCBtjmmQ(W z0655K=t?P0L+*jQaGui-qK@ z^2u~s%KaV%j&^vy17vv&n!uR;fj)0h#}#bj|X`8^Glr9FAA_7WrfH*d0o zm4BCZv-5Ty9X>-sd9S#M|0%DX>Uh{;ES9;uRuFY3pZ*K$nN9!A@t!gL-u~}uBb-jO znrIep(fu6Q%i8h|sN4(OQjGaDLDm&KS(B3GBPmcOuVDEMkIbAK~NHHg`?)3S4oMzTByU%OsGxx4W z*G9qDY&Dr{mhb`H)Y=aI#zWr&IH;+I$jPgQFf&hYy$|p50*p6=iw^8zoi^IzaIot+ zYmgm7$+L7BN%z3_dj27?8lP0hG&nyD z5oAsRB*tGNpN~@iZ(4|LC3@Hi>dHQ?jGdaoe>wi32x;Dm;_Pn834@mEx`rLhrK3dK zd)B?~dY9YPmU!{mO|)368zC<;JC?lG<2$_O%v*D1Mzfj$bo#8%-@yCDe9W(?TSv!< z-;*rliF|(#JiUR!UhLsOxO-Qb7Ucd|L~l5@XTafjOWCMxdMSw)@aYvr{ru~qCB@F#kmFFR6{@WHy=@&gkMu^@pY)ojXNfo73HB59BxlPr_mdb4 z*_*f)Pn+oZE^@kqpUsK4JJp(al(k7b8G3rt-E%0|?dR>6HnZ8^KsiQLoAXU^g(sLy$Toe;4U;Rc^Ds!=q_`zz0p{m6Y~YXa zuB%)d>bX^Pwu4rGB>Rczd7E7JA(t_Fj&@L=OfOaW$=Cd>;=?AWr#o9coPwMR;&cr>N#`>#k zJ!`yum0ScNeU+@r?uL>5H@K@#>pfl1tVaz!K~wgU6P}4_l=uD%_{p3$T+63dL2+_2 zEJy7azo)XckD%%cS8gS_?4OtkweJ~wB^F5XWRCaTqtKfe5v@u3NLDaiy#wj12&;cw zKl8AsN~x9^q0e-rwG~YB%8j4u|`8R%wm)Izn=6S;CC|P{xXSWm%*(*CyxE2 z1yA-QE^%VSYs!wqa;&HVt8MA&?0DbL-JGGl7=@Mi!0dU?>{5{XEQ`OG z4@=J5e2Z7gzMWotT1FR%dwdN4}4%`O?Qx(1qM@gYQuAA&-6m4Yq)V8Dw+~%&q5N zGtV%Vw5O7H#$+>~sG|0A-uO#W{LtO3?rmn**@>3E{8cpnhR)0z?kv!`4wXZ-keQS0 zH=K-D`D9cm-MSkycBSYM*vpZLpOq_YUu&yn>(Y~&>U%%wbI=8f~_oV_Op;b$A2 z=Ztk*^_OBf*dz;_Y+C`*!0G%fLX$_t=F<9*s2^(I-+ z*O2Q|HX++4p1+BFb3!jC z`sa}Gd!&^JK1HCTA#LaUXXZ-V>pz%bOGb*x|G+AX! z@s8u@^cMX~=*+Fj(ihm^kpo}bjmmq1|+wN}uJBnMh>e? z^sJd{%$&n&eed=9!eq@%l)3sOv>VS^$8D_s%zr0laDTG;58mHa%Pp?m%#zMvKRM}p zFKWw^$?Y`pw;sK^x{{H6Ixm$IIM?cL9--fcG%^sj51^g0Jj;=K*K(-cMsJC+kw`5c zqA6#MlNBm?{|1m$_FX1gZr%kmL-Yn(`tTMx-_lykcS6|ST!`7_n0R7S;OR-VW+zZB zQrN_5J~M_$w)mcq@R(L_!9mX3{{p#V$zd|tkHT*yD7{&k^yi5}Ssurqp!;@oCwfRC z7$pi#V|IT_VQgL;_^~Ru&&c!{E!|7H@p_l=T^TPnAc=a~ z>+YvLS{v$(cG3GJvf4Z9I6}$9>mE-crQsu4F{_jJaFX~!t0%eE4ega+X%U~c5LNT| zuu0II_xeQCo6cKYUhwvbr~RI56C&-PA2d3;CC9B0nxGRQ3pg@gHwN-QC<-EK#H<~#O7 z#Rcpyt9yz0dzuyxCXu&D{tABXBo@_0nVR&JQS!(7(=SN>Y7+jF)CZDRPqKcT_PgM1 zgr5Cac({jFo@KEYqj)QwU#;$pYm+Owjh`0k%G~dbH1w%DPt#6zM}0?EmE1{w-^QNE z?%h?cBunZv?PgznU8BJrXgg4u)i_N4`a5ZLAAgHM?Lgy==b@!L9IsJhJ2WI)WOg2P zG;(_f$NwUYBOoR_TrO1hFju##^#yXsJN{eVwvs<3^YzVKAByXYRF->wH%pnU#`{Sp z+2)c5;{p8NR~TJ9?0!>}o~q?{m8(GyU(vz+eDKlwh|Jc`VXftG_mpwLFs<)j=x4T* z)0G8_yO=M@ntRS%EOLJhnv&;x8eZE#UP*{*sw82pezeZIbcpNjCI3^vw1*(Uh{AEho4}lF&o$y$2n&SXDE6 zN)(tn%I=23H}HNw6xCr{&p_=9ko>87Gv;^|!VW}BqG~r)W~_Ez^V^vG5`#54*;i|8 z8XD)Zq4Twr`PICWXZ~*!8Q)GTiMq3cq^jXG>w8&A&bxh1y|*ICSu}b%9ezOb)6tOG z?Cd%%iN_E4y`0}XLhCEZXAGIVPs=~yrIb6_Y5ynw=I~WN(#L*0Qr^l(K*d^g-J(QR z-?y^m?78U5C*GjkQrNDd-0iO9{PTWvQ@vm*!CVm@W0$|Zjj6N7c@q?rQAcs(pgwFb zaoO&~QO_%u05@-{$%z`f%3$Fc-{T<&NrgyMP6aN zRx7jh3iuDI{)X{H2)r5pleC!m)MK=@4nm*eCkLWon3i%@Z2%q6}4V>oNfidIdne5PDhs(yv!tePb@(*97B-HR>Ml=;P++8=Gi z6hE^Iby?$RuP=CnY`d|g<9X|hBp1_Ud04%Sl;fA`!22dTN>=yG7yXUyv%MK?Aom|g z zlW{pxUXGHy!%b%AImukv|6|y|gYNCt>TZ_OfyR^D@n?Kp#-p64t=MvA5c5WtvmJH# zg=4h7gdQ3ds`FVUt1C%7v&kNAqrZ6^5KB}DEN#>Fv zC`&H*Y9#y|2_=qEMZN0`Pd^I3Idilx?(4I}E##b)vgds2PHQjm8)cw+m^LOEd)*BK z!)ZE^m6M%?kA&iORCBKZwe?5LMN9pjiRQ74X)|w~8=%ENMNgtwYf@oUc9u zuFr8d{qS0}z30hft=kT@*K21wT_5U++Uj1%6J?EUI*ev6U>Uq7cSZ~LSdY$f`soF_ zzlhY2hnCT(imk2ie4>XYCgJZ&B%W~M9nK?zkMxk4!`TE^$!fk6-(C5Z6KUivQd&-* zd4F!LWKJ$6GHdpXl;<^uljh5)$r@Pto%5i&KWiMNmc^u(QBen+t@1ya4wmp&x06qL zq#;n8-I$3_@-3@aL9^fBrc-l>c>LY zvVW|e+AhLNZL+@?LU(E@@0tH2-tVC-~j6kl@+iyo(vY$MOx{R{-gX_!! z%=hh3US>WkEvj@WK42lM&OCe1LTky`n{(A8`TgEBI?46ABv{^+*LcM#p6ku7H$m%z ztp6W86j81-d1Y2~5IK&7h0<{@%V?xi32lgzM;qNN!yJ`o0r zd*V<1f8JxhP=2Ae-u=)y0$xt=`w2QO!_z6GbsZ{)u)`(nWSQT~=wce=9zcH?@9f8S zK1FZmsN+?rUQe#WJllq5Gm`iV?H9q>2khxOydK8CpT=X?akaSK;|3bo>3au#%wT=c zM7Y|Y?06)&H)*yan>k1sr)wADA@*luT z&Lbvf|13{+LtFMa?n6W6UCRtwM{RthbUP!#m9AG%{~Tzp>q=AfXYF}9-#d^rhQZH6 z5Im6uxApvK_&snU@)h^il5$TeP_{Ss@2zlhw>BOk=l9T)%m6D; zP!pdk*<>4aq<2o(ZEl7#g!0rKiWsC@naZR1qItlK6{>WMk%ojm+?kN;qH92<&@2Z zTPN@ylU(hp zwiYN{XdH4X*;OfwfLFWL22bN?p#+&^7wjr>z8!+zQA7IOWbG)6hwPWxkK}Svc?H@N z88a&elj-C!I=cmGn=9j?8Q~(b5Gp(F~?bqGlo_Wv5%> zdXJ)s`E05@G~C9%7HGMQ8ZOX!7ypaUNq@CG$V*>@=11u8LV9lCxhu$Y1idB3NKQCi z4~v<@&Y7YhZf`99-*J;O>WNuc7gh7r_%$5fKvK#7^^1|?p{PrwjCiYVI9mwktNl;@ ztNG|(jGjAr^5m4S3)?09Jp*l*&|*gDzo9RF>0WklK5Tx6jz_ih9<(M()Cn-!)%^#d z`wE&!e{c{A=iqE8YO0{C5;T_Lg=;|HJf0~w{xDAW#?m|+va^PCf$#Om>J*xO1zpon zIgwvYHk;ybItRt4!{a#ISE9>VG;^v}vL;s?mwEfoe%xyyW-&UmzxCR}7^WG2{SaAH zLwR-*?Sa{qu3X3GF5n67C$TrZT_i3@&Wv2o{;R6FJx(6?#B?^1++0gp?s!t!jjp@( zjmac(f%-DHw+uEL!9ich$$M$u&bGqeD4NI~mBeGJL9;n?Q-tT*Mt6H74q>UCmta?!k1oSw?9JUUf&WjOz)yLRfArJE-D!xknJl0dUsf9Z z?OABXdP8Y`AugWN?mv+81KnO@YQGmJIZoO;VREmFd|B}l=-Y}t>E!Wd;YUXq6_KC>;} zE7D%(Xtu*vV|XvgPA@0>;y9=QYirPzRrH*;O4h!!NOTpncZK|nruI`?RwT;MOA)e< zSNydg)kHk4=&3}jYe?rUQTQ}G-i-R3!N|LL1%32L?VVCUVNMYw)5lQvjz{l-Y%#l_ zX7iSbW!(lpiR+Sh7iW>tp{OlKM)S0vXgD`(dppa??)v@Il+!!KX*28brCj?7MSb8t z8H(S5;&$Y6I+|l$uaW28vllx(F;smO@sqLJ6Y98COF8qOx!=qS{DP19{+FixR?A^Tzjo;Au zNw?&SV4W(D@6AK0*FRlFu|$OyqAS6u7U1@_7Ca zX}L$i8zzg%{-}5dt}iM`zc!tHOlm!~U4#^JCb_jy%IW%_}Ma)Nj&6_yy;A6twVEz*mzc$5|v{+drnNa67_s)$?`ivXVzV(lhIQo*oa1!E4K}8oC}PtGf4Znm!kjE0BFN@I6$@X$&bsp27klO(_B4|o_wWhb zQQKU<@iVW#Tuqa3e5kg|vAgmGFOkd-9ke$K`mToVjh>ysYbVP_RvjzAO*1VfD_cpn zc_=jJ(6dpyIsbYCyDMTuk)HKr`s=6O z$Mtv{=xY`v-GQgr$|-!_@#tHI!Q2+UcQH`II|`y0c0 zU-NsP{&WM*a(3zolKzanWFJ+ccl6fU8EPt`oon2^gT%5^;9%beqkp!$*)^IxQ7zF~ zl7xRn&vKte!s!FNR2Z7t`AN@Oz~;Z^Qq4xZR-CAtm#QJFQ17tfx` zA6Y}L;!S1u*;zEU(1YfD?OP-i+5Crmi_zB?&{d68MzHlp z-g_76naUSvPUPw>$}H6KlWG}7vdOlU2q#DK8%^Lm5qNV}BoRMy*79Kz$?1uw{q)mP za#3a$=x<(PhTcE;%1O_)ejiZt@BHZZYI_jPm3hz$arY_IPr-jq$Y<|**5(rR;6Lu( zfvaYoOeUcPC`p8X$LMtle6=a~iZT33C4Kp~aNLVT7m)0WJaX2_|3gNNc(&}QOctK? zkasICZi1Wcq@LZfhmu_)g(q53qI>^~WIra!c=eoO=mSHaL);yF%WM=cKx1;#&O*lk z2%78eH~N;?eF-=m;3s42tS^5{uJxV&c}@knxX$9R{s}^?8zG@He*S=N1_Yp?ju?_i#6p`Z9{i*j$yREDOq^_ zh2MCz^2V>pvQ!Obj^WeRvz)x|-lgtgWd0^v`|@nrn|Y4sU*Xk{_FgsFv#;WIhieN- zG+Cw}_1rpka6S2r_EZ;l4ke4)=*sxG8445s@O=D6k2UGwQgv*Gyt2G%BCI5q@lr^+ znMWPT4`d!Ev6H@sk5F>B>zSuKN1f@BKA`ho@RPGUuR_{ue0J6mtLo8O8wC! zJWirM&wzm!p)mU?V<(p?GZ9j=*7ya<4ANt4HF}&2Nju?tC+^=u^KZ~_oTqb2^<;M^ zkbchVB+kzWeM=D@VJ74@M@u!HU=L(Bz){{Va&A6n)bD_cDb1hIqgO^jPRZRx+F3!* z`(RD>Ka}oXCCiy={gqXnMk<+4PGrOEg}A~q{YZBu#J))*i3HLPO0w(yZf(^#A zlg(~5-VGn?@H!Vqcj5a8IBo5@XHilE-KW!7-sCE%c?s+Y(f2sQQTYChpB-t@dRfbI2tAJ~JX~;iR=XGShikK}t8G;w93}O6nr5B%kwCKKoC7 zO;b-l%Ts-z?M+(!nf2D^r~AOh0yg(=meii*XIF6cz-M3L!$vhXv6`I6@9Eio(7z0} zE=S+_wAPXRU5v7FWb`5$HmfZW) zS5Ex2D)i%R;p1~?dzC~oADWTGcUt=|l$6yg)y3Oge8+ID@2kxYBwEswH=yr8I7n`^ z%yZ7+XI>+jXCU|;vh0eY%o~)V!6uMX9A3UBvz&FC%$~ng@(9$&60W0_UFy5p-8Q5& zOI!cY@@K5EoBKDz!T=gwuAUMoyBKYcYx`x~WvIDcL1!|DHRH7qhVI8BdVi?pV) z)9g(e;%6;6v~_O}ly=tU2d?M+cPQ!1<5k~6`6iM}w%dj*d2eWH=ut%UXnd~|% zk=PyGaM#gDq=AuZ{eqNl!TCy9*?@!d(9(rA`_pSbZ^Hw4gzPgvk{qtl?l#=!1b9i* zzDon!dD6>We*i`0NpGuKCMp$6|Jjvc>K=x>_B6GL77`OTaj90a&7yv*YAGYH#{X6lFiJ3J3ug8;4b6$QBJ|EQbL~@#;-HUjr2VBdof)$C)H)0LKfzN4^i6>M&q?M}r9Y(q|Dx&_`27n4 zvuEWe?agDY!P36CnW?P{aPdD{SdWTtU@zwHhUt;zgj5*fmRlbbgukP@?E zA$oi3{c|q=AKZ5-^R2J@TUxrD#iyJB>W z5^qAv*K~hkp_hF^Ej#ohP4o_j(?nVH4pn{z-;sFG8KcaGuA@ow5aW!j^>#($GCJI! zCvL~Wm+*x-$+8FPFM;nvA$>f$Pp8KXYM8)s6JI0RnMu|)$*v}@OG7~)UT-)3R&;L) zjOS{03N&GhV{wu_PkGaskBVl{KN_D^$s=bVvh%AlS)EFL&Dhr{dRYK9cPjsY+KL$4 zY@o-<>Yk|FX=txiD4TQ3-zis*^pcaegc5m28BG#HweTvt$=J6wc_fcy@?vbz-jVu- z?N1^h`XiRGQWQ%fLmF?3{Pb{?&^WK#$8r)5d@C#X$?>qIj+hMuy2 zFgdW^RoClq`3{@Px_3?OWcCyi(dN>M3uV8Pnu+=Di z(Nl?>zDU_*)jXG`zT#s?l5^H!ex}zwWKdVFi`CPE&s%|-x-|4-fzqt29mYRo!%W9G3p^D@~T^ebDt7d=(TcP+eU&LR1v7t!khsUj7{D?aL=&G8_rnRE z$eU>10~V7}V!wAJxs%!8b@Y<>Xt(38FPal~?<$<1s>ZzC_fql|?N?Gh83+E-_sph; z%$k)b*lkW8?t+xBp|CB4wealGWSklH`^oQHn8;bF8K}$&>$;v##LMJ_N@m*wak-Tw zUV_mNAiE^&%!0_`{(o#VvJPIJVAU_ ziQoJMd}Sp)`A!nwpeUSfR;CSWcme`4>dGwFAhnn9IqRQ^`Ic;hyCFQMoZiw>Px>xO zU&C>fy^HhU|0Le}Ba~hY5&hk-rW|ho|*2N`fTf-$Jed6}XxaVYIC=l@HfyRI5nDAC8yU4AB`=@QnnOr7%z z7V#Et8^Gq?HR!otD^Q6 z@HUsg&j?R!QR~(Cxs1-wP;VzPna%$HR{A@2<{bR>czFjR{>x9*-Gxrv*>zsJ$4jHY~THfE| z*-Ns5Q^=XdN4zMjF51s4FZsCbMLOJ3)_rHxD!l9(&T{y{u*fKA*?U z+2lNc9M@?rdEpZAGH(+(8+t8_ZY1ee^itN(686>_U&rAu85WLlH#;^)kXtgR4QBc2 z$>R+w(%UeYd90wz%UDDwUMBl45?ke#f(|CA?Fwk9rL8Sm$t>DRSNaXe2r7`r%+H&Jss!w4 zeTAmm@s^Vr6Up!k*gMbjf00@C+yy63x^fX*Zg%$mm#2%V3UN~n&{cmqAx(OMalS3%7?sQJND9rP7>7di*8U+H1GKz+uIb5J-;ne39z z%3TBaDvHPKVXWy}8?wyaq6Togg7&}lbRxdzBvv`2x^a5K;@TNT{wKP=9%imbO(h)t zOa^68os+@CmC6q0a@xC_eCnz1V!HgwoyN4B_*dKDFj>o!5u_Tud<}y`)f$?9!uR{= zO=N^`wOL8oU)1;(k5tFVp@q-el`Tu6B|Ke(9R<5T@fMf(d65Ut-qox%V+RA+1v$UKy$-21x%e}{ZA3~!U6R(5NncDfB7y1VE4M^rRHJreUXRqGb zyz@LfUWE3sq!^2ikNFUSzhfhteS4mVXo{vQaeEv3I-&Dj8e55@?fl$hw4Ir>yfr4u zU9$F7LUVuZd_<>tNBa}z$Fth|Joz#TmOx+wlz-QJl^(q`>K3c3ni8$pY~CERGdd@S z`uW_JtZpHto6+&Ur%z%@Wwr3Larb!zo4SCulF=zS6E~r0xsprV8{=wQKK^Ofx3J?9 zo5jSEC;RA-3dRzbJkPu=k?m7JS*Fa3pM6`YvZ6|deOrm zD<>nGk!s>VCfD^e)}Fl{TOsW($hZTVmqX=={MYZiOjY+2UEqCmjN%^>tvDxovdb`I ztK)h5kvOee7zyTlSQ{-h^-fltzO#$8m@EHczgdynheahG_&Y2+TCPR0lKnt=V$)2CQW6%vbU>Q4@fqgmgveJ z;jBAnhw9fn>oI)5V%+>jUKjD>#R?ePN{-XnXgPRk1)*DUmCW;R!O7{YdlCtbcQ1QC zviG4$q1{F7GJAWLv8I!0YXps4#ygFt>kV2@B+|@hAJ3ZlE3w-+@EBA)Nadh zr424JidG#@=ogqh z0L2%O#GmZ>H!UtQ3ZGi=;K#G4f3bE>qJiP?H`UX%c-h22$(Zp)bst5`$@{g#v)`d_ z5?)qA+Ikk*39aYD_yy2-25+_=Kh??RHVA7=E6D`32X)CV|6lbdb45oU=3$!NRM5zC zWIKQkqn!rEHXF$?`xe^stliJhS!rU8dqz@T2(!wy5zlw^SCI5z1{ZQ!b zRvV+8hNe9@olOHXwfwd3+5P`Ky|hyPc$Srw{r72dse9RdavsUnf%v67QoR2-pMF<= z&faE^?V~td#|rA=v=mET5lkkyjp8EK)t8lIoZes@8}CbVsW?0w0C#%)Q#YiOlC41JonQ0W_QS?Y6ABD9K+%_ALrNajo7g5knHQE~k&j zYvod!PsHY&pMS=4ouQ@?t4vIiaazh7X?C$DOXEk-ksTTn3SRL^_P%)AHuhERBjop?kf2C zM5E45`s@m8s=Y)bY(y3p!O9}KFU1$WN~$@T*c5*?=x-pJ|8)0DpU+`$lc6oq&)Y$5 zaIYAaV)ojHMaFg#`#dQH-LNX@U%z-Dh%%UDO~NgjX(EHnE} z@9{mO^+XJamu{hsA^559=Y>LloAvHTe42vh$^|WEHRyHyes3kSLNZ&u-;iQwo^Z6< zR?dorkHZ;Vy?NCeN z72gEce|TbqzYoCe8vK@o?61jk7wKPyyH0wH-l)lbv*q+3gmp$wVv!$4njiT+ls(?) zXC8`6@H;v2I}9o&>%(h7`s=W@!}rECm`qJu3Ub(vruI;qIr7Z3e1^mI>dh+YVw%r> z>JPMeCH*yo<~8Vifz7AKI1oMAb{;Y_8<}n3$rtcL4ai~xemW}M z!|!f#TkB^(?Jj|jzC7T|+C7}Ey3oi*-mAE8Jqj}uiNKW1j^ChY5R3bfR2QHx^W8a( z{|&pl+|M~Y;HPLxEX_aJ8ne@yOqNLp@vq zHqiSNh}`D?76?0)Bzt*N%l^qnQPYeaW{jJY)yr9FMsHL6U&R~j@l+FiLs`gO1T~N9 zyGr`I%(Gvj>wLblH}tkf-(vVaQOzTaopPG2IP7O8ZzFq7PNz*uJztPa_T=uvQcAPI zZSI{3n;(*47rp8e&x>|^(CKw0w17KN=1wDw<5hw!;Ar{H8B-OeS`EBK3F+4ny1yp(qLA-@Z7^gARx z3WpoXvJIXVqU>-8TBO_(cuVfU@^CR5((hI>R<#<<)AS>U($wkXo>{D%uX~!^SLQP_ zFFZhdBh{I6EOlL9%r|6zX?rrM&DN9aCp%}F!uLPgxAR<&em2RBhP<58%^0^n$=-#= z%zO+d`5Tn4z(ShCR3mqm;_^k_FCOy=EVKuP@FSWP1%29rI*BSVl}Sh-L4{yM9571 zp+t?&+|ikITgC|UOSSFKY95E)aFjBTKemRKL_l3Z_w3{7Q%}Jy-Kl(jdPoSs7 zz$s;%_%h@*@hyAETGP)0RQFa#H!?}4&rzf|36&F2d11j1eg?C33cl!Lo-FZ6vkKS` z-V%rL7c>=9{|J)ng@&Cx-(p@bS=pv&Dd+XBB-^t**^q{>NBJ8CiDqT;Fgp2}9d#hz z&)mx#YYmbZ50`_`bs~-K@7`t>njP55B{T;jx@qACa!RI@oa9KHz@qrS7hk`Uz&hCY zn+$SRGnP0F@5$I#%7`hEr#I5YBT$nFnaKv9RqFJXt5JOe`^k)EqJt&=(*bn;gc2L! zunw7T#Zi4UWhHMK9A(eJVE=DpojJ#yT);VRTOB%kz;J!oYRKR1jc|Sz3(5K8?B)F$ zzlmmd08M>L({o*&skLMhEK0Ij-A?q*M9et>#!i8NXJ8{SfgaTUr9LOt&!ha$SlUXy zww&2aM6SctT}J7iT7D3vt!U%~ay^zN+LPx-UN@dKyIe=%=pk+0s^**{ZA=r1sdkW> zO5piMKA<#ty~+NP7i_lAvE$xaJxKkFaoMGSx>h{e@9LlI$?OrUil3{sb0JOC#n}bW zTwgDCB@ccHUo%)e*|Xh_HyQ^~o8h64(ql+z3IzP2?c{Q)$*X33QC0aSe9$_Qs;Ztp zaX1F$TTq@|Ia%>-UKo2G>C+kLtx8JU)fulEO&pJt(&)&Jj&dZ^64Ddh?}oy=M_JaF zTtoBOY-hI@#3uNaP2)rlb!uN z)|kE`gf^nDZ|LS9UMe$&nfpurnatT|e^B!LX7>CbcKakm^h4R5cy9tB57A4;AUAsl zeT*c}Lg{GFb%C|RS=2owa~EDlq5NZI=EHB|#CAdbQ)timfkTYdKEQh~+^mA#_`Y^1 z+DE-KC#X$O;=BWv;jUc1s(&)+#lA9tCptE>L6{}Vs0hX1DAk|*!`VGJtI3KJlm07pJgx2Q^WqqbGt4==F#EjXaj>Jek+B@+Gwv=o) z-SJw){p3jBMWe}Ud8B&&&GR(UMgus{dg~0o*TPNqjb;Aeeq5i*`y@8-7r1$y9bW0?$lS z*9si2*4kSnaF&`6;vI+UHSSO%5v0!LWg0o-5f{K=u6spir-CM=5rw6F4t?4|4|S! zf?hHXKLEPMvYk(~`nl2r;p%I(->NUWqhNn8qP-P8_tT?yFh1{2p2-cpM7w|JzwTFl z3shz&?lY|XQC!T1p;sZlq8bk`)bfxvb0#_7?oFEhA3Wumk4WfHee^25%G z#&y5B-V%Zav-hI#cA7EsKT!B3{NH8lm3=m|NOW&RoDr;HINMK-$n5p_5v85d)x&cO}2m^Ug+TPhMyMYO)7q13aEUQY~1>ACx${Xu%W$p|8+rc1*{JFO*(^KYJdhD@(e zV`Z)U1P6DL()Z9*%(dBcQ3ZlNM|1jyntb^Z-YVmuL^EB=H}!F)97;y=gcX(hm4)=i z#S*yxi^N`|yS&q$f~M{y{~CQ~hxGn@Nls*S$IJhqyR`dvk$BG2bVAv78cGbE_Qt7a z>%W$JZY3*xf_M9x4z7c?GyVM*O^ICi1%Gh@3ih~{^K);rx|`^44Rn5`#WUFL4`ek0 z&W|ATXHlIj>RsGV9EBFR+=b$2w9=E8Jr}}HB8}|c%o()vl>QQ4Cy-tb7La|jb+vXl zTpo+hD zOjgNE8X{RKZHYuGh58v~WE2^NsH7+ok@fn&Ki~eZ=k+|#cii`No%1=LbFS;Y@9R3J zolma9lD=k+U*P1O^vd(xxAnO}Tl?^uFKPD>>Bl-Js|j=cQ6AP;!s#4LA0m=xC0H`? zx201P5ooE{oQxXD(w^E4L83bb@8lP$=sku7XXq!0eov!U{SL;hWN_VqH=`Vt-DkP~ zw>Pbm>Fgh4&~JJ#0ma6otR}*YCu!~iKgdQW`z(9mesolx7iWcZ&%#wgC7%z}_9R|j zhOAS0a~ZlPNB;uct%0qZNVN-h8fbMa_B{oOXs|njcyo5GtJRXeiS$21C9ANLGy4;k z`ixZHIX-y<>+q?vev;=Z&%3inwmt+B9j6qqxUpcri3Q*MP;@Sbcim~y4DWNTSX?Xh z#G;ifauO_0!MC7W+2@xSX>z4M%O@w2sT}Pe5g+p;HhKGUU+Gl7Rg1>Oai@WAD)NmJ zNtE@T-C1)BZ%wY+&5l2rp7XTdP`v7oE62ig2u{=?%V%1=op+ApGr8KmncX`1S|tc7{7yb#9C@L@Gc zMmaWDqZy@NCi$mE^n3ODp-)psuOh!mMd7UZX-MO&ChkJ=@fckj-kD)#f5~Mu9b^1` z+z7J2z83SBn=mB%(SPL)xkH=XH(k71tk2ADvlF;9X^-WhH_|&!Epjr|pziabL40vA@ zp5iwvs$}(zuR2uJZK$E?L{Rzx3iJN2iU`_G_>+I*x+R5CF?`VXY#y{dC zKST5Yo_HubWfxU%{##arsjP)|@Hzok8p7Zca^#L~o`9y>dmU}|W%X@*Avu9^H@X=A zn@Yo#boh!)iJnWK^fv#?lc!zcLe{y=!r8_&-JRQKfSyiasnmZsf^C-SC3CHIeE%#n zy`H4lM_;wHdLA!&fR@vpn>`j4#i!&lPv)$iFxg+f_hR8N%*Z^cgKxiJuZ<#5R!Y}n z^>P?of|l)Q{xN;`@m;cf-Km8|G{~5=fDCg0Z4YamPJIdTiW z`>yf1pGdVkr~Lky-GSX#&@ihaZbBukZ)WXG82-Bhj5clNKa?Wy$_@L@Z>x)tJH zE#EdG|FvZKA4_e5U2{?vCDlwUzX2!XjT=}Xd-o>7r5DTY#WxS*m5<|Z2~vNF(@pT9 zsdu?%y&b1-<>ko)Fxfc|d3~;ais9=(?HP}n6u)G$vl&t1<&H{(Z-Z- zvFv40vn_Nw;_Fg&IL`lX;FvmYe_`lKY<>VHS2?dbze!H#hIFXHTFrd+8Lo{c&s8E! zRw}IUllsFq(CGrlU*Vfl<^uo2zxyG3qA@3Pz`87Z8=T6MttK7Tz-SQ7cWM7?J~3CD zl^ioyKdGZpR$m<;kzDjEVDLAosyKfg%VfXRSjZkk>*9PY&#@n+QA6=JIU(1OuoI6= zJ>_cR%R;`?jb@q27AIYu0%NkuZ-E{wV@UG0kE2J!LL95%fAWcC-G9dQ!}OH=M~SFe z37XX)sdh8P|3gXJO@Fy+Kg_(eHw6B}qE$TbbnKc7-DD%bQVg6+=iH}!(3tWQ?1$;| zEJ)qS*Q#MoZ{O`;t%WR;syf*hm343np?$3;+)qrQ6>5SCu83y~-#_hClr|~Q?>o79p>Z7&(d$IasB-_9?sc|up z_OqNfi*H@b8{c+ZGJmZi$D_wGpQ2}0kXqUXFsULbXw6aPn+3B*o`$LTX5z9(p_)4vJhgQ~yCBsQE zcHPc@&! zpt*!}sT+R~k9bnQbzITCNb}@ZXb+nbTFDd1?3)darAc`fS^nYCvBN*G$ex9c$A z&)M1@OOstVJW5>X1(luJO(wc4AU++wpYw~e^zstqO8ewbw(G;5OT*)Gp=v)*SR_IE7A!ZCEJ z2)*P*&0X^?bgIo0I^x{-dRgYP+{e3*?H}T)S^1beU^f)B94GRw(qAbyDMQOc_1j)+ ztG!Bv*V#tni`Z%%F7LvdOZ5D#Sag}zJJMyCYo6@xTIU-gZR@EZy$qaQy4U%*F>KV;z7w;HivBY>AMC=D>!$S?{6bl*6O83VKuJ?(*17C zNv*p7v0ZYJM9K$heznZ-|<;(1nz48^udB&|iZ)p)kZ&r$3%SBqIc(#LUc zz^Nm=N7Da#+MJ9Fle9Qm&v`o4n~aCi?spPqooO+sRTedWA?=ZLNR+IOy?5a5K3X}K zy^eyz2gaB~{jP>tQUBMl^fXq;UgrUYxRUX*1ijk(_Fj=PS$pc?;b4di(@#_Wl`Q1t zMX>L2tu$WL<*{RFF%$~PfAAwusws|S*U4<{G}POE`umQgd3v2%*2z58PHW3(S53st zD)wQl_8<#QgYr}S?N!Haq}kzYp4o0)R?Jhy`Pxi1g{+9(1kt}}TUHOLHd{vjG^^Ba-@FRNbO`7T%I*&^+-|U>ToTc zBoFY5fY(sgbxdT@*fn*Brp+H z0j!oQ^)v9jo=@hAZ|&%kjJB!1l9kurX=e*xyNev*P@b?J!=`hM=ocD|yRcH$BsO9F zJTqQMmfQhtDK=+C=B1AP7b=g@rlht~&8CHRF2Ipd@J)8`ZFrHov@`f_Rzq)s=xx5a z)z4I$vu+?Y4eI!X8AtjU3T6j zTT0j$W>#gB>=nL3FAd0>^&{CI)Do9^8-reeQ1S@(k-#@B8Fe-ZjQ}c$p}E1_a)q&rGcsfpQgIn!MjX;^8YWD@Mxwoi!WU9Y}XS zK96+l6J}x`8%ak%pqI0^=rg-7E@8#1_|45wOPsC+k<=&Xgj=bd*H#NJie||ulyxr2 zm{yqvsXjQ)IFr$L1rPj&WV^^*(rhXj8!PbSR1i!xv>jTj;wp7C`!peMB}b(e%;)@} zJC=Qo1LF&Rm2BK=#fvBfO7X?yw*PU`6lw+!s{RODw)I2gUY}9>%ppD z@Q&m)nn(6`>C&0>XJXYCTAv^aJ%VF9EWZhiW%@2c9#2++i&2OTX&$zKxfB#xk$6(~V{W8(DUu@p~f-?_k%j z$uV2}IfnL2w3E?jn-*WCXLb;*<15LIkm?0*h($lJU<2P(z_#i5k}KToQ#=+P^I)DQ ztL0fK86%TBI~9DAcjhRv{-V8NnAgA&3t1`87;1U<9*i=!Zq(z;F!&et+r*v~qQ!Ef z$PtcShr#tkqb?%XdcD8r%EFNl20Hu63$ zV!Z0_1+f2#jb_m9JfmcG8{Y!waacN%KYq?`nS-o_%&kx#PWnmgdIm4e^Tyw?*VUGB&J?T z&x}rAl06xF$FW#-wwogMW}nP-ajqllRmOuM@W?%kPkhouOk51@Ti~+}V|x1J5O`e1 zc8kdKfLDj}k35_C*IZ|mUSDRHT&1Qm^25%{6TIB9x&aT4V#Bk@{tGl;r|)f~=trk- zA%DM~8kj}*<~#Grll{n9b$^Xq8_|1{<|4k>6!$h8 ztqwGoS_JXz0qxJTYii?WcKegW*+rYxxv7frEj}KM(XaSqJ}f%>yA!6v;IPzjC0H)m zLT2-}h7dfDuE{|%n3gZOw~>gHJU@@&^!57NA4B?^VVzCqUmWuk&OZr@14XA9w7rsz zv(MxZZ6v?*F;LGP&o%6mxl3>FN@+d!w`%hL!$^`C`LMato^a|#pATvEfitecj90NT z6^+lNU9!P#5bGMUMqllxYDhI2v}4VznH|iNwy<##XW!$Pr+8UAHh9*%%)(POG8LVs zd)HHc$t!syj-@7gR(WJi)n|pfWVv%*#k}BYI$9c*Y#? z2e@Y6%vJolCA;nr(H`?^2xey%nY=BJv%nzOe9X@>>&hCAY2>d%m)zf(iInF zx;hm0GxYfdjtnQ|Ue4N!K3D2@Ieuj3eiAM6RN)0uCP&f*^v*hpU&(wBeX}EYC41f_ z_9fHJet3K;iBEQJs@mT`rfKG-S>JM~xLcNIyzVzo5VPWC0Q5g3={a8Q2d%$u#t0sa^p+~Y4-z9GUsJ~>#tIX=P=z1dV9gEw0vchzBe1=@6ptRKzo1={HIvt2 zA*6Oc2W`h@yF}oBe6v5@vq~hZsIxNZB9b*`;au@#-(KeDJA6BwZ!aM6Lu49)$sdt? zfc8#dpDK{eJ^EUFb`bo=lVKSo`_Q8nUd_Y*28Eg9gn}jmv2~a}Q}d{nBeOH_Mhsd) z`#tb8d#W3fdzZfFl4+AznaWb#3U*38zSMVo04BAZcR3Fk&IZYkx|kgQ>G2r|jDTb+ zOkVGA?om|IQdgg51YHlaTDbQ>LA%j3%bo6FY+1(Vud{LXXJ1RQIYyGyuFYPTPS|)W zyOiKBy?IqC1H6VOksulU^JKLoYyIN)Eq3_=58e7Zj*k>me#@6}f1V`xSLS9!J4o_%(8|!Qqvx~xGjJThb7;9MLZl9%w$A9D; zBI>NCL*|j&v3xLp_?xFJAnQGJN*15eJiiA2JqT-4Q!J}6vW_V^w;Mor8}9Vu!R2{m zW|*mo*_XYu!leVdEWwYgEQya0K$gw+Hb8DX`vv*c5ZpOkFjmE9na|-L+Yd-U?b5mtu zF~l#$lhzo1ENMO<`Cur#>HUFZPWI6w@NBH_@USrrW#{8gN0xw3@@!SXwdd*lvL44k zH<=M9vT#%L!hcBpES^thv8+121pWg=gZtsom{c2SSd@f`^yfO`XWEw%wNDg_QdKf* z-8(q?Z@d}l=uheNrI_>{YaYu+SCvD>E5Ygx{ zF}J7hCc!j&V3M~YS?P;mQri00&rGasiwWmq+HvrHgLT$8|8C6OODn0nyTI$@4ypmY zfAH#Pm@cN@nZ~2hdVL>iZFp67(r52ha^)n0;v9YMAkRYWt)@*C2(6%X5hG02Eqx`9 z?ZqcjiRu=pmC^PeY?sj{dnYf#+!fwcBUcx^THu(>2Gjcp)@)oDC3a|WXJItX3_5G1 z21EWv5jyo}Q{y`OT+0|SlVf5+VWipW>ObRmDoiHRMpj_#z_*WZr3+jA<9|k`K^XTb zT#hTqeuehm!Kf!`*On!=_}#{WN73LM5$}68ya&gg7ppF1~(SU6gJD< zn;qKh?)ZCo%`O&79_Al-_-RJdc=iN3W)!9?$jpaRuEU!_lUgFjZW1rA#Z)aw@ZLlUgmQstWfq0TVbQ|>bE1o??`#TD@J{tq}WwWA=YD2!aaj#op z_LeAsoCA2Oa)B+VVzdK-?V3f3RIC3*i= z!FwKNrPgN|vZcyFa&n#mxvUDGsl6?Hc@OP8?6)F3Dq&t*vyIHqkA?Uwe=A_%NzT0r zs;QEk>x75>tas#FwEL@|VGjsx!~e`Br;#Ho8e8LAXOaw|S?&rw3zs}aXie@)j$O~& zcJYwxQ-23DlBq2B$5UZ5`>I~%L4T6#UO27isj0i0in1$7c??Vsfki`c{3{aY3D!9* zkrnAX#i#?tt2gy`GMt*_ShcX5TsPPP}=_PkHhutH>z+@DaT7lq+?Y zl9wabAj$pPSUZ=Q_oPC5V<;t0$XA8LdS&)5&6y$|!a5rmQoE z(Jk4Ts)$NY`ArVsWI;_PgJ~pA?9O#uNfKmd)tBPg2wsq!zI9pZaF*G|uZNl`r;6Jc zdJH93M%lwf=;YJ(Wm9*R}8TN6{>)c+mO&0I5guxFBJlK6QICjRC8lk`y6 zw|T-b7T-2F^F-};6@_yp(?*Yd#K?olmmEdEi_IHgmU`U%`1ODOmLqo+2v%d+eYLrY zx6GwePbgd~F8$)H?5KNH#Ce-`N8`rxqHFFWNAo56x`-b?hSNvEB@rq6cXMB1mj3=E z|980eCk_5#lW}@j=!_QRI?r71C$0SAcN5F6r*qarq`pShEGF+)_P>wTTAm$de0&D- z@9H}{C$d+srf=5qlUZz*m5SMibsJCk(7R!I16H`5z7b@+xEVYK*-_58I5JpU`;=pLovPVHs8>Gf!u+ zY=j8&wYk81thhI(r*ifYFgTKxv$nMlY3e$Etrk*EIrEt@+P;CcALgssb>CYIxLLm? zu&h3L+Bt3^?&kXB2`z5M*O@qd8ScF7{r--;gEd}(UGh4vCUHmdwB`MCX!sOF9->1t zExdqhPnZj}qT6=IHsZzQu%!=av(7UaJr2@xvN^n07>^gwCYeSaa7B}>FoRfRgnsv= zN2>aia`xFG_&dCK8h@&5G)%p)R1*3D+YWY4R=!^2j5ElRyxQMjVs|Jf&)u@Zm1?e! zH|S?QoMN9=Jgu7$P=8iQw%7I$J^6G901jk{^Mbv=&6IBurA|AKapiP1hZQ`b+AUV{X_il zMV2UrlUe&%R?K*S)xx85dHBBUp1TEo$=e!3_VcQORV^2XL zRdQ-OyDpnmgZZ4o^;kvUUIDKq;%O1~EGwo|fOl0r-GkG)o^RxgI+&F`aEadMkUlwV z)@eVhe%}}4?$`S5{P_=fHgo=L(Kt^go`lY97P*v7-^PeK&bt);C$h%|oX9=Z%^125 zD{O;Yeg5#K*t@sSa~Efa*s>Q^WbOIkzVAhi$Fa4ErX6h7WxA7$3y>9+f2lKLaX(erQH+Cd8E%rLk zCadVFS#}3xm{bc!XXwQZIQ@EZQOy%HI zibiMP(qz`G28TzqGYe9wA2a|rdy1*Oobjsbq@7UBbH_ovX*k@o#`1JOn^`5f|MtX$ zL!fjKKRlm=$)lJtY(M(mOWwC}=5=2Ch0%F|gW6X1r|R73T3c8d615v+gl{~Nk4f4TD&k*Nri~mbp!eLz`U!C6c=FJzs|Ug)LF?_ zjRwO>l)DrUdf&M)E}u`Xeou3u~<7z*v&8HyJU!GUYpF7Z7((`&KPhpf;_^r_+1^L*+kI;D1X z7hEbqpNb+~qVXJfi&#_i(J^4S{h+07=wKZ~$dKjj?1%=D~4tHEpv0?!k znv*a!!?Tkl*M-a2tFefhX9}}@Qo}ddS(+=5^R#t#!D}mv1-oefqT}Zkc%BR@wT!KK zM)MH?1R~TJyxvuX)31Yu3kOwvga(-z!%bC51*a|li^}jW+W@1e~_LYEnEq{ zO5X|k&g$?V$bE|A8p9{6O_GbNjx*}`?kYZ1hP4Oc+e~)) zhhFvg_c|DyZ**TN$~7z8ze+97Eyj&o=v7;!{T$n>v*cg=KhLU%i$M4KwhkoTW%Wf` zdy_|w<%=h3Wj?u+1GW_3&XbXWc()(V$sN=O;XH|wPr6EsFQwOvOo+XFj z$J!p|tR6i0A7|d6mFy*6rtKZDDsJ4{w?!`_`Zn{3Jkc#d!_Bz96g#)VxVAC437i%< zezqulFX?iJq$C_V8lzvp_3qlMf+H1aFrCM}EQWO>a~Z9?Wpo=y-_?Z~aB2p9>C7oG z%o^JgM#DjRP7R!|M6=X*{N9Ll0K~g%dp`cAvTkzx^wwt;{+V^psiOA)$zKxrA7shw z-boevtWp}l8vSTG2h(mQ(;Y1Qlb%LmUZZ43j-pSlKLJ!Hp@i;tZij9rMldKN^ ziA9SN?o1FC=i*<)}FZO;9e!HK1?MLhP;I>xJJw&O4A=rSF zbsXJ_2L16OIhNm~WgEQAGyWs}PI6>co~CZ`a`AT>k1O*3(f20!UdY(4UguzJi zCQtC4n7s}@Jy}0{eCn~wAbj{y>-}MrYI$wNl2|X#G_rSekakn=YLMtWnU`cX+lyY` z^PoyRs~_LX{(w9=O{LKj$Tv#wS>2asIAaUm`eWg5c6Uw3f#VAjv}5m7_;?H^CyRko zNZW{B$;!IKyK21W6}UF?lL}jTF7zXxJI8rF@gZxKi;!uoINFYuskt_QJ=#EX19oi| z&-$}o^1Ho6=GW+uYm17`{1K*;$$F3Pn-xaP`q+I zwNS3NJB#A$$o!5O$D2H-8>F(fw$6-V7cI{7JKtx?b^J0`-9)QA8UK>r z8?|>0IhTm7Gud(@&DOEW$#goO#Mv=8htjNRJaMT}p}t;QL8!ZTZ)iL9 zNBTlC(K%20U#I;Ud}vW&R`sMg!W%}Gtla3y-re}k6tOM-lp5(z;rU~%aGLj7_k9w6 zJm{RP0v{#@C#nv{_*mpEsBILzGapVyij147MbL&UZM2!Xl9>n9rd#GC8H0wCEcby& zz^4YLRV7EBmE{U@9ghD%_IJtp2n+t>?7hW`YS^58n`ddMhyTN1bTQrLkiItQlV@X^ zSXZ~;Gaba9)I`tv`$T}Ohd7%2qiJ!3WAB96Ul{nEGn?_BfssZiSh%*}DsEFK~A1VScIq0pz%Y9k=oNV?^lOXG|`yk9|{;MY1}6koG>p z@>KLbhaPpr+tiSo0^zKR=;GV|ktNqO%XwaX`ltGGs+D}l<1P}Fd*f?I5{_Zp&U}4~ zapq#S_=a?yd38}3FJ-mOqQpwQyra)Qv@iirlF6qgnID4pDE8Qc|E5CJYV-f(wr_v~ zcWOQL!-hcXXk7VDBq&d#b9}NtA4&CyEv!AB9)IBYU5?6?T~?Q!1?Q}cJXv4OX|kWb zMv^L-aPz$6h=M;RBYHJwWk3q)b-t)R9YF z*Kds!@4)Sbg0))l?w;8BjrKa~VGou&vcV;_RS(gckLy!Q1%dCkAo-D zvacAD?7CUM`kGg{7g2%c`_Qhvzu!aVINo@mb2b_u`(gEA&fUO=y25e}uFfFMkMRAI zSG9-hbvW`7JP(D}?T}BL&$VvW?N%Y_(KxaST6-79$JVTRD9i>rvmYiD$C!$ua@O4^ zH&n6&jnGc6NZ(`Qjda+AIa!mFdzlq!dA*jCdp=oFZ(`frr>w!_&vo3&3@mCF*z`8u6otkLo{{}*$q&(!Y@5ZuVD_1Fvu-ojlKb+sgUD4E zA2Kc_o@T{oGUMf`Nh^B(rr(43VPzga6I0&6r({{XM(91ofC=+u6RY_;)-`j`liNl*!$k znqhUwI!>Ib1n){l^D^FlZ;YC4)IJU7gGK8H;BbSnU@?r!k^Whqe&=1TA=>C`w@mGM z{G*t0F;oxH?gnund1AjU957oX3X)l~1w-qVc#-_zhwep;P; z_p<(a?{08qKhF_Uj9?dGUsic;)z+W->IJb>ZJy3TpYWsPo5*g6hDO=zVV3n^`}p(+ zc4^7ud*~s#54ONDv&~o z|8c#44z)|^bSt)A;=W1Z<;yIWJkMiXNetk*J4t&uZe*2AYC2Y9zn`$9gxFSAe9ZID zYOLH4U-ohIO$FZ1gx~@2%@x)`d@OfvCpkN-DWCT<70!u7Ga%3$Hjf)UlJjvId5UPE z3eQLtxs%A&4?mJg{W0=ab5;e?=HB*!>|GNw^&oR5Zk}qSKg5|A(&a6Fw1)Iq`953R zDuX{+L;Hb#4`qorS?+H~e5$3k5N*xV28t8kn>7t(vnRE%Cuz&D@{6qXve;4yBW@CX zU!&{3@Z4Z54VpjD=UaVU0O9OLI2Z%6r?Uj^<=Q$3cA#0-73WDqYTb;*pHANXEQYCr5IP?(MtwTFhGBRO(IE%Hgy*4^Df#9?iAX0R;(OBz;dZt%Xf1tj~Ib z?$8>qmE5skWGsG-mk-^>GSun_Z&wgi}D`E#H^Pej|qp9?JQDG5Tml+Xd>T9ev|A-c^F1v z*?GA9609$T{t`bwVMZS;*-NiO@$+1~t;;XAK|I$oZOMO>_gP=s-Z6PDHjWooC&#=dY>o)_j3LwsAf(13wk*RhpK2b zb!wjEk*Ugi7O8J2M6Yj+1=~dPnV8#Hui1_GC3)||fe+|3h`+7Y%5z@t1%=|`MO`R8 z$$OJs_oTuol<~0~i)c;5wI3LSL^=@ZKame z;(|X-fkb0R^%1GQp+PFGXPtWX++_Zj%wsk5`WL&6#-z`AzSv{1x|5~}$zQ~rakRbCd@kcyC-08ti&a_SJ4aN-f@87sVEra4X2nZtew4-0 zppMST7TN0Z?(drFhN;8GDM)mt+!KZh)-^LPsl zlRG6(AeuX}19pC7WXyAhc8(a$l8HM-3V3!lz9x5QZRbsP^jFxpmd$^~udJ@xApTsb zoz^07M!i(q%`=YWtkRw|mx@NYQ#;(d5n4`$hw|_l>$p_r&S=$@)spY!WDz;MOK$7f zup<_ihDP$|^vIf&Uto-uOdV8sLt}~2$CJZR_D@n4lnC! z9+GEb$FgxPx>ps|4ubks(9J#S0}3(tMChmPWU`o4hxbqXCHV~BHKII%Q{VC3tV>&n zRZX4OQ$KS>-#l;4+OP+;pP6}9&Sj_i5EAde)N**z171JV`8}WiOWNu*dfO2dvE*-K z@7**y(yS|2v)_^O7_FsdXR=2uCsla0Lx1h?w6`d|h_|+7sXMh&mIPTneymsTo8hEJ zYw}X$dC1rJTd^>gUB@n&d;Ew8Sxu1L3&nX)W*bN2%wc>Z70cU`Z~-6u(Ytk`Z89Sj z@mVb+^HG?1EaXz-Y4^_RX3oByjW#>}F?gpAUkl%+@@IC`+=g#$AU>UahI{oMS>K{# zOJ3Ad4-M$hkG@^>Se`^j<5Ft7{zcc!J+emg0B7XhKvvfc6Jv)uJ9}_5p6^fpcYOPs zV@?p)-sYJzjYEfvx_wEWY?*zuoEb=8nEcM`QX3{&-LmfVFR?LCGEdcNo~RGP=j@nB z?yKAV4u`{3gr~qjzib(K@1cLu}mBIp@;jPTsj(Kbe7l zWk%mZkJB+|mAIX!0ljcN-m#ee_4K&=iPzzD*vdaY#ko(}?_hFg{rp2b>`c=3z~2+K zGKA;%fbYL--hd=C*gfl3lLM7Wq|cg$tb|&MNi(o-ppoz7 zg8z@u;v$hHIao(y$5fwY2W_ql&LK_KHJmRRox(QreRhi1$3U_+iyp7lPB?g}Bf9yv zI7{c=(9NR4OkQ-i*jt-FXGLc#7VQFwNzU3={F$r&=3+~Bt(7#gd{T&Im-Fny^naK$dn>8UD@%t*DrGm+SIPs+8uEd-dSh5eMWH&(WO(m20LK;8m z_+&TEZlL6k9EnRqcw(LsKZ8-JXPE1i|KL+gJnF(qxsS7i*QCmBb^|=XyY~0p-=fJ( z{LdZT!`MEx6Yr*N6)|jtcimX*BO^f8#IM7;>_bU@sb}@n0#lc2VKNy?(LXb~!D$qIF(IJ}%@x6yYRnDtY4`_t`ftv`cPxlX>EjMI%(tyr-t)W-YdF@4@|?43cARNF|s^{%X#yO!B|{fXAQ zV1BCo<@$V`?<%>j%BPBl#4ueD{8Wmj6GadKagLIDeBK zrn7H7oIaoBcKBOLzr}b?5!l^E{#1C*TEK6NE(37(J3gNs2zRjZUOe_Nc)Uv6GkDf; z_)g(TCusL{5|43AW-yazdjU3G2dAof%y^v2n0fNPw{fx)gs#?e?ytS)yX<()v!U5A z?T=4wwAhlJQyabt?>*Be)%|U*pVUpf*f>3(m0D@13#^ZU=i81MhC4sOCC{L;w%{D+ z+|8FSqr=-fwV5(! z%-Zj(vAw)=hG9?k+wIF6KfaLLn>Riw!M*pk?k3eDL;R1SYL!`z3C2jE7Y=4w6- z7O!Ybmic@xm0t3MZ5*#YM5_b%#tN1`(RoK14;n%JS~B&7S3g|lP#pQ z8Ns9eE{0^rr0gZkUBX}IzYmjc5l&}4!L`P#jM)wK*9R+_@Tk-2{TGWQ1M)mG zrtEJ|{j^8Ou#Amw=OZmyzX>0>3Xa$K>@EyS{*C9MpBdb6d|LyCH{qx56sH_NTuMjauCVv z7x>T#zRx&(18Y{oxq4Vo$!z&_F*_N|(pw@*d%QTwnEitYwhcPhY3VXH`-V+3y8g}s z4)yKb;^9_kX6JEsvMqq`k9x~atUJV&(HL-+77oY2WEvmDqFwlD>Mw8Qpa1anaWI?f zlNYr)fTv8@p z3y{zC!!7hb*1N@+mo?8bq1cyPqi9vXa5eU!?|0DiL-OUm)o7eg-(|EuS6t|iS;=L) zg6`Q@`9@(z6hCRj>ju$hDs8jwD%k@!@reDjcoK^yzk2dMyo)_M>5}`9%`su5_Oep) zbNs)LwJK*?$6-#A$rPM?}4yP-K2+Um_2x9$XN-~vrj#1 z-X76w_BT%85xJtRrO(_|NS5l*wlZGOH&0XUW2-k;+qjNs{%hU+epP{&qG@yRmR`geN0^ zR!@GR)orvGL9gj-y@oe^O3vM@ck4l>uJ%$F^kJ553B%OXI?TJKelBINp_rDt;~A5R z@q!~+uRe~Xe)x-`R(2>3bY`wE9w+}Xe%h1xVrOO^P#rsm!+kGk{Yskbkf_D07QuQN z6f0v=KkT`m*2n38g>mL+ua~g@JuIJ@eRh6iRl|we+)R!dcsr9$?O5$DU;@mKOBp1_WCAO|%p`KWg822TQ$yI)Kbw8-J_q6$-)))J1KR&X~7_f_Oxhh#iyAotO z9^VpO59B$Q;Mr$1e~BlQ5N$_0^M=Aqvu#1vXX$h?+hlcQGA-uG{K?Lk;n?fRla(4r z;B)RGkLS5d3dkleMb=L?hE#tzPB+s!UmM?wvU$>SfOwH7fW7cxCidOKUxvSO2>!IcgvqkhrMA`>MjWRr7gtzZ1c=gbF0_=PlU3DE=j z@ms8Nt?%;0D7nnOgyW;QbeuD@?rI_nPs6-du(bl~+yapz;_3*;U1z@cB*yGw$(tN~ z0!yvO-|f7<6^uV-yPohp)C}loh;+lgtS#8CuUj!TwUBOP_3fldO`1K(mia9=t%m{CD%OdHD#HjAn}PvnH`&{{!m$b?SoAh^4k}Tjz#%WR#j&; zWp=QpwoIOjo=JnSHM`R%>a7=4Zt&aJSbZSOH|VtjZB~=AEPQ|E`8&kgdf1ZH+h@S< z5~EtK=##~8IzP$N((EP3O8CwCxPvdtb0?b=X}cN|ubV2*Q- zD@2)b?9fP$xkKE+_l> zW@Fju@(?LgJ>fF`o=BPNyw~wG`>|6YXBurfL*o=)np%Kwi(Rin@;jVsinC+sxkrI}sq)hd1M`e}nO1UlYdksf%&aIM%Jawi zB0w`SYO!y#G9%Z9Z_z)iy{|MnWjD`Uy*{ti-uPFW<#Km^A+{dIHV=uao%B1E{*|yI zchcWC#@s-{t$1IDKGoQ9jBio_cLbz5VZd!Hll3{d`*aD0l&5p{+hi}n&9Iw|(RCp6 zg0qew)rrna-o2CcomDcA!6Yja%Ft*BBv;aVDVbiS)i!PCS=3ZLPsG>k4LQR16Y!)9 z`G3WU$|6WAn#tj2MBgPI*YSOQ?M*dG9mK=x!+bMOIhyR*Q=TmF-;nkP9+E5%cakFe zYO}U#U&pL8YUHXrYekavYow!R^T3`ge!3{XH(k3Ft{zHab}L7uhEhhJrH+hWmuJ^b z{!S*zwFPaGA-|Ot_hW}-%EdCC16>!C2nqBH9>WQpTZ-@y*4n+IX-Ar`i|B<~@19DBo1%3l}+Wl@UMr{k!t!qFx;$ zw){)~?B~lK$Ja11jL0sOeZ-t@j!N9fe(=8wGrZJ~O=Xp2vdlB_4v=c>%>E+T@gmE! zBKrd8B|~sBJ|y#GXUL{fz-e^&qcE5G11>Xsma%;;znX-px6opsBl7HSe;R(oD*Kad zJYOD2_xl{3E0-%_P?VjYV7VrR+2Lk=cP8^a@V}Z23mkc}_mdoxOyMU(cqHv}-#$Ch z%Hha|WNV2d-LYvD>n?Wu2iSj({_<=!QSD0%@9$mmA19w%c72=*!CvIbJ;6iqq!}h; zMNo41wGm~KEA%mV%-7DdSaA(s*;|ic?-7o?3tx{Gt5VajFYf1g^c4EMgo)Y9l`8fN z#f9#o*8gDhu>L;B;bidbV8kxv+xD|8u}*4*wJ zeI~O)>K9bOxcALP8{pG|0xw$@W(NO|HG5CqW`Vg{DO#}6VVKa6tl9ftgm+}+eJYQ3 zg+#IxUng!KM4wb2$7i z3Ybp7poeH&OcZ#*Hy`0gUD~bHcdqo4fhu==%HdpnEQ!P;^pLd_yS#hEamVxZC!lgQ zU+h8O-$>R$bV)o(7J~!$-y4MyC-!a75 z+TK_E-s0Pu^m&F)UL$JFpkpk(r*Z#AM_pgI26~p<_q$v0Cr13l=8v&#Nm8t2wP13a z*7v4;sz^`Zud`U8G90qn;R?t939offepQrdCeCcfl6&Cw4GDf=@9YD5*LTTFG0OPc z*#Fj!Uqy@Ze%5NGJtTYciN@sW#P1&G&#%#N6h0*T&rq1(4wqACmKrV#aOy((oxysk zFV)xQ$pD=yhZD$lHD*=!T?NOyD~?RU{p`^`&fj`Qv6Za53VxYW4E0G1M=pf*5$u&5 z%JX44(mb^lHh)3JtXO@M2knEKxAXJY$g0`1;K zw1ZW5@s?V=<~KH<0E?24T1MKwtl5FCS#f){F?>JmW>5YD`0xO^4rY}Zq%7^M#cX_y z*mx;>)L@b6{;ng-o%osxIIoK*&CLXU zTY2xdiOWXxJc}F7hI8q1DOqbcz6VT7Y42qHAB#EL#L@%dxJJa^O6xQ5tv!~Ep?4>| zx)(5uKf^znLWYqs)%da#Y$_&|c&p*u|O}QQ|_U6rA-gsl006s^_)H3w?JjEcs|?M zSD7YR^Rj>(L&$KLzol6HL(*3fqqD*=E7YKCTPA-9G?)6-Rw>?xdph|D7~=y{UZvshcH6|#g^9fJXnlDZc^%Ltp=+^PTlKKmw5)KI9Ty4q0ZWcAnW z7(LX{xf`3Ti_c?g>RGJh+nIr94RU#!juroZXP0hJ{uAPD@aPDAPZF6@#pP@$Jw}F& zwA-KWf2jY)KHm(jPxSN_>2Gyzd!Mh>%Ru(O6UNC;S=F3mKipUgv+=xg3ZMNK3y;8v zRz67{%zGT4ofI8t*9a#wb9)B|_w#)fOS;*LIDtM45wJ%(;sD>xX2zp>y1Rw(b9Z3nw2r*_tu zXD7!Xn)ZTKR(Grr7x(f%d$(Q_5wgmwCnS$4*tv)HuQwO%gI(FZl69#ElIlW?JBf#G z@HcrxlGFNpDEB5ostKHi)04H7>)k}aDwt9QYvww>p`*5tA0fTB=Kq;*QVk_i!9IUF%mvH}@BY<5vw>ly*e+2h@c3X>5HNdo*&? zaqN(1&%1Ygr8eZQC{rBAb9cD9-?O!! z$W;q>hWT5E4<&y>026hxf4>HSAWQd_UHZe zjZ-h-_ZmMF#El{P&F-wna5qtS8$7bUFgD#Tu1?ic_T=R1E<0XMAxC}3rsjJU7!HMD zuA1+~udJZX4)d)(+v=>WXV?#K@}%T$cKr$imTO@$URU998N(lhSynK1Eifr7@EYLX z(a`!pFHd0hDt&w{uKdEUljktkmPL#rxu=meJ-N2n8>3RaV5wtjVL{eRo&>Ah9h@vm z9wj~m>m|G|Pp+o%x-rhn9fi`o=uR4@kE`gP9D)P!e}KA*;83J*_tZRAgeGN(Yz14q=H?lrdQ@ogGiBQm&w6iPfK6WF`l1w z8bu0`xE2fKj$jeUtYWv+BY8w0XKDLX7R(5CEsuIjyh)vkXUrZ4c%N!*Jy~P-E~VU| z&RihlZxbG!ylXFuhqaor(Vye548OyNH6xrk?A?`^E8821^a|Pe`+pe3f{_9}qhL9|!Y-HGFY1 z^xpDrhv;x0ys~;Z`>tLg;{d!^#d6o_cLW~2O{0ggWjIM1i7g30yPFpE*{(5<>_o0d zv8#gqQ*$yoL0S}ISat?}3fVK9xd}JcLn*syQWNq!*witGKOt`aj2R^{AQfI-#=few z+RnDq@obyl?AM65eL?CGEM1J9a@VJZczuoeMJY$O=WFMQ1`CYDHAJp>RIVeAch-EG z6vef3=~CNo2R-HN)CEm$@jN-)SFgpf>Ut5h11=7OYJ20;UYL@YnR&@tetsl9n$dbL zL~|WCP8(B1q^xXz2hO?9>4e9r!Emp4hm&n8R8p6^B{uAf?c=m{AWy!DE@eci+xt@~2i zoIvg(T6~qI#!9zA zD&;XNS(fK%DfyK4foWF%rIO}F+Im&nm0(?7|H*bdkL|iU{y`pFf}VL+cB5}=lI{(b zSgHLySFTUeQ9P#kRxR%yC zD?91Bk}`LK9?|XsR&P(AtP9N^rkea_4m9T*C$j>2EPc;slk8bpXl~I0(~_5`I9}b% zzAg1pmzB?hKp%SUz6#8m!UKIamrjdl^DVUYbB$L#jn?)>2P)ctf0C1NGF<{0EO)W5JyTY}WC)i(kCk-rkcTh4Ugo`Zis)i^s-euPw<0} zSf?228+v~qpB>LLbEREGOsvALUCD5^=+@EsA3Ea)NS!RQ#pGmx$lJ-*v z+oD>^-pA_;aiyhR(_T9^+2WYYl?Tvn5pHE4Vlfi0!kDZUO-9PTklY}O=GyXXyc*(f z_D>wk+WTNbYtn8Z=_Te~H61yV26-BIHruBTN+Vp_VoE}kj$*OF81%<<(|=NDa)T(uzYj6=P6j$<|h;3 z6A+kCutjzVzeb1P{ub=27zy)4GpmSSqvfG+Pu`%uyfhg(&&G!adP?1bY5IJo5V4X) zBzqv%{IEGf=?SCH#!KCqEJ7ds-?K9$Mck}tI2_dT4u)UlIz!0giLu0Iiy)V$oy%EfHRLup=R=ZTY(#(9$eLUYUy)ch&e%ATz=yxCfKTM9T zu=thc2l##-^at?EaYov#&%PFC+lUX1@p>daR1hO_HzNBqZxZK|tD-zvN|7_y$I0h@ zp}4XYhRN7C#ZPIiCmx^bXEJHtCuLT23{E@$VA7{7`z`&pJLW6zvbP~wSG#NJGLo;rrVk-E zOh27qJc=yU$P|C+$wKe2)C$b|LVs7&XcS(YOpC+llANwt%keFS-Yh<)g5gDek^yoF zmbK$g*++1pqmsKWcZ*-gom(7nBYAFi=D+aXqVK((eJVe1OPY%C-ov?Bms*lHUEqus zxZhbDJu&(hJe=*^{&Z~u#gUL{Wz_h(uH-|cZzr=; z@?9reK_3V{&x-%?pFizH3SHHU=N0wX2MD8HKi!eU@*xh))%KcCbED9jF^AGFE4YTXJYlB*SloxSwnD zGIali9iD{uI6YJ%U9yf2WPx`eKHg`U6Fy-S{jCslk~Og_t0bq-(~!Ixe=hRbT=*s5 zUus#_b#%tiHq9GgS3eIR&(XxUU-pX+ZvbaKZnna8KK4+&h{ck9qOu>is1!O+eN;N!6MYg-KKXt?MJpENiW*7NL zdfj1s*~wO^=)TCeuVeKyG|BVIO050`9^K&kWPv${pQWPvlCmAU_D*nxli z%Z6*s6DoO~tn8&RuPUT&Az!k`4P~>6d~GlOnfpVjl(dZ-pi&+kmzry;CLOfkSie$5? zYL-)i1k=n4^E|AH7@T_HZ}QMw``kyCbDc4VXFW&zzs1_=WID|;%lP3T^ohr8(c8UR zy4%?Dh`#HJM4!X&MfiSd?8(u$!T4bOtqGaRg_*_v&S-AToiA2(Ai*Tqrs_;)r<-_x zYPhFr@fF%00PQV>^{irSX9m)r-(@{{GNp8L{vZ5psMgQH zwal<`6#kt*kn108K-VkerH%(!`7eC7!xSO7987rQKMKY}ohsYFOI?#8Q z;&h_mRiv$LHo8jRsnC>X3025c1hbzqH`oE+s?Pm^hTpR9q4fTWe_aW|!7O(K?|p+- zLvSZ^mE!DG*A><*-1^zKN9ds|ixk&lR=iJwWcD9a;v=tk^&tzdF#gV@!wj}aR*vd? zYP0{zQdxslA7Q=J9=Qj8*^fO&+Ydsn4?K?GjTgbMj<$cNOBnj4dC+;r_a^YY2oKkY zA*mXhjG})+p&#p>NT*aEUWzjlwVLN6xdIvjg&kOPKg-oL8kc3!>^8d%d(Puu<9P5Z z;?yebjo0daUZ*D0E9`W%7M?V(O?JIpMNEKnTi718`z z<`?Gw4|)H-SQ}Qa!;p9U9SpNv<#lvi?r7I#kNs${4nvM3+3QeiCH}UD#?xY9W9(c@ zqdUmHqcEPnh8@eDo&BkQJ9a|B&P6b!y8%P>*Y>tt_Ygb>l#Po4%|I@+nonC_o%Wv%tHJ;{=FbSYVma?TQ;<%vjNy8j<pQ_~&WIb8=HuP4_xa4J0TwB~9t=0+J%W0YIySCT`}7s-QQsBZ;Mv$Yv|llz5B1^~L)^R%;xN*ILd8uZNXyS!>p^ll$Wz z{^5SlPxAf{Z}@9L(QYigJWFjvtC!PKWn9aZMiQNHlr_ezr*~jkpM#^s=>NHpY*RAG3UczKCR$T-T7I8o-of=CQcYZw)qWf675eZH z{m~ms$!@V(MkzZ%Wa3|~WlPyfl-)7?c#cEeDMMp@)b#=k9RUY_z`!guO+elDeBs%F zH;jJ!!{G|j&8dRLyyhYl-$QzdFZ(;G+++lMK;bP$so`bX&3^1;lZ*$siVqsbuD9p> z+N$NJ3i;*S%w>?+oG#a)v zPVPVBW*uB^AC+!U`>AAoJlS0h>3=BKlQiC=!II>=98Z>0Nc1GW zHPQAz!P96qwi-qM1<|*8?kCcEsThgA!z=A%#B>C0jNrLGR#Q_WrW$(go_eDbQS~nw zM0OL5EAC-$f1oqCEKBdN@W$7$ki`&O(e+`jv>}VHdCg>(xs3a?_R@m=HiyV~wIDSDoqnM~}LLyI2$MAF`< zhUrj}>xrb6^T^3p(VXP7M>UZ&&LY8P+FAk6d!l^{;1LsymRy4y@)GROZa(g`|>mP#@}H&rtWJzR!Wks|pYI6nnkPsAOMt#fsJx z-ukomAMd;`-6#6b!^Ie~1?}HJf_+IYr%m^A zJ*!|@=U=O~>V;?TKx_ND|GYYH@hQH&G{U}}3GoEU7Q*uag( z78k+Sd=@$bePj5vT}e1AJm0x80ItvS=_Q_G4ZKdJv3uBLcDq$@C;jJG942yHHQZ(1 z_&}VWj=sFvmE%+YgO5HmAM1-1*M!UwD9g&oEWdMUp*Gw<4Fyf~$~o_{(s<=usL$Ts zM3^f{TYvEz50XdDTtvBB=7)QxYlU&H_VCGS+j)sIPP+uRuYwOW}A@9?b>d7Zv4$*m+J)R|Ppx@JA zE32Ao^ccsmk0nZHoPRINUgXXPXiEm#NbLqaaZdSk@?<-AbH?Z=PxK_~>`Ho4so4;a zOhk#Xeyt}~B%0PAQ- zh7(b}O$|AheFN!a9_ovNw&0~T?!I(Au_n9X^Cw(p6*WC+C$*L$wPn0c;?XAG`v!bX zN9WZrvVa!%a&Iopo{!2E_|56Bt7&2suauk&-D$NF8GcKb88>B3s|{)nA*;l(>P%Km z)VF>8Ds`@5J@3JPV{JXh_K#97u_|7{citS&^7{$D-opJh=zK;GzDyn2(@~ENKhA3Q z@te3miQ}9z&>c{lJof`UQH!Q`KygP@RQ2?9&t{iE;@}NMVLcXqm2#W3QccK%b zeDHbr+{;$>^Z8=^RiZ)W9O1TNBz>qpBbNLSFEx%Ax`JdA733|l zcuU>${J)L%hPZ!^-g^fnN8)4~kI;cnOiZ~`==K0u%t`y~Aj#SCmtnC7`m?7jd8gOWnv)Vaf8U0zwy61dsLy%ghG;*4-i|j?*@Jc;@~s1IvqN+rW$wXccA)PF z;peM!f<9t1o&L;9YqF$kAo*=nyw2y}iN?>-Q-T(<;!uefNydVl0jTe}4|t@Up}d{- z917#-`=7Vw%rv*>&yGexdnNOZe7AW7H5A8&&GkLdW z(3Qan9=DKG-bNFNV+-Fi z+uiC$vWW^@9@>{{w>0_QpifF<*a7UKk-p&+_i~nfAi2%=Iq&MHKxBC~J4A{5N%b?b z%G=>!Tx=_B?MyW^)yDlwB%5Z|v%mCo&K0#JiNi=L+2Pjm^;eV2ICb`S%|`8lFlYm{>9gtMkZWW+VF$v)8W%o!4S5nNM5f z>14IeJXhYhv%M|0q#78C@U3PJnWD}b_n^-Y<1D*#N*@L`_AO2kJ&3Vns+0>s} z_``UjyVib!(``oW8+gr;@V2Y={w1gB?sp=epHSLT8#&Kh0sTjiO>$Dtr?qR9Jyy?^ z(NSp<&FRE(`sb(dm-X!KI6O`H#C%-^ze8zeHpFF@-2?EF_;J6Ga%-M#rGB^~P99Pw za~!ABSkCW1$DTLwvax~JN$EE@S#A`zK>N??f3rGJp0{~GD{V-8KRzsL6WP;}m7Zac zFaxDaynUyq-<91~;rU*Mr=`4H`lPM;!Q_Z-K!2CBnVm^IG0U!Dxh+WGO#i<_L08nD zkFI2u+Zkex#Z@=Foe4P;`GigM-dvfktfCD54`M$*LeK=icj+@f#N|cW$*HK>=-;5+ zP-s7cmYR~oT2Fn9r+Rd`31{z+QFg%Z;7WUb=K^Dt#E80_w=0W^tLf+xbS2Z`19X|a zGKn0%jh8B+^_k@H5g8`K<{ahrW=H+h*pz2(>+WaleE>9cL}%jvzR32n@8b^G{F%l5-@lP?Io9<%tr1d8^&W~(@m+C zYR;MDy!U5k(M#H%0nOu~Fe?W;lE}MSP6nBk<$r3ub_90392TggKn5g_B zb)ExvV|l(g`j*6R`NR0?eO@c`n>ATVPIi~@H|x_iwU>RmANza<^{Ps!Yk+DEqw3Mgck`S1YN6z?1 zhv(`Kg5KI{eP2t-6Y@SEH_UVU796I%RV+CxwaG)W2kiHx%ft~Fr_HRgor3S|nB5bV zl~MSp(MuT=X6?2ln@uLfol(7G(dN~~-+Ne3R{EaDZDsOGJd=CKDzUXrLfzp|mE3i; zX=No%BokCVKg&ywReRQfYM^6(-|k}dIhA`8sqII$={cvegn#uFW4xQxf$xa!ae(*|kTx%3>5@*g&K366Ufls$y9ydkVY|L004nn_)?<_y&juyZncTd<5WBykSi z)raa1WV412=jtT}sVA-9pw}tMR?7Ll6h{ZsUe36jOUGNtsERtfdU}lB;R8szUccK; z{ZmlZK+Ril*g)MmiT^%dl<0aJ_^yv}lk=K;c?)U^0f}AkFnzS6seKDO$g0;uHk)%) zv(Pn%%vzKG1a+UO)!v@|ftGvn0Ph%!>`05fAo)|?c?0h^6IIElF`6gnLWVs^?^bj? zZ+uWf4S9c`Oom6Q=V$lc*HR}sYDFv0Kt*Z3Fn`uRWjjmvU%2MorHtxe5B8Qa%}EhQmQ`SZYfz-+6L1i!Ti! ze<(9i4fC{eHb0bjQF#YHK*^?RKOHw0!$ODl?eyqlXyp~H_H#Y+2OH?Jh8mNdD5o`& z*D#rBGrN=+Yl&!ZE)RJPUJ`M$5s!ZkNxXuC?4iy|R(jAw>F+`aO!UQj>FPGM-@*^= zp`}l7)eFtZXtxzcX0x^5SmX{gRY%W}XhkPzZy%^`^huMxOQN!mENzgp}M3}i-zXVNiQ5F=i5%Gegc&jp)T`F zIWbuqat`JVxsaA-@$HoCQ^h z7?kK)8%X&{fk#`BLAHc>B*M6 z-S1CB;q7I;>vbs49?!puQd{}L9qGIl^qotd?Tn9pH$Jsv3tS~`dUYs#Q>zt8copd-f>R?h%scF_`mRJk&Ku=y7M2L$ zGu3uDAKsf67{khMG9GEdTK++QO`dPJV$Adko6A~ovI;!KLUzJi^7B5Ai)v`g>Gk=Z zn~TCr;cYXCWM5R)zOyoy+4tmSf0s_Oi+;5K?Vzd#i%WlhJk3qw*E;w=j1A8wt5#5R zGw!kiSy>I6aD6aL~3Z2Kf2plYn$mh9^znEE5X8g`kRcR^-%Q(J~LXmf&EW# z?HJn4+1@7j%Q@-CdFoSnvRKXj{8F+=wIJ2ZGMCW@Bue`~By*gassd$6J3pkgWGxw{)N)v_4Ilr-5^K@*Ut}?gG!r*-52$*cr1ylm zuh4iMinl3oCfjYp-ZSU)mNLH>af~r0{gN!NC4q~0klnQKijhZlizokg@_m*v25n>X z@v8E9mw6wjSEKVxa{3X+C$r>kD9svXviTeXx$ly}vuf)HTgO0aExbGdk6%L0o9y@) z7_5eaNFw`NM$=n&_YY9dy|g$GCyA3dSbNLpa2egchvq|E{ROHgvhl=<+<~vEr2gTB zkK3DPYfZ*OX*;n9`>Ow9TvwpUoS#n|jYNO9FISJAJ@A=T>dI@pjM|)tZ9?xA`0d~| zGyE<6w)L%}vOA&sB=sb#N#;#@pu7PJ8nWA!EGd~WP9WVabT^DWd`?Og(6W_Xo~mSh zo}&VO=c}V5ZZrOWm6g>nPEP-M1=~5Mn46tW=6UzYKA!e^sOoA;ev2K*;ZU;KpoXn% zt{l3nprf6;Px+K{HNAHKfrfA6O(hcNRovvqTJpDTCu0)xV1#xnsHYTKhO>eX3yLZhW2MY1e84m8i_87sd!)8*!R5ZR zaXQ~|G%O|CUS`Wzu+Giuna&Cyg`jet4(egOYJqrnA$t)SwGf$q8<;i&blZWfg zXHQqdm+UcfjEQd>`f`5oLE3Fgds)rRSz`uGtrP=@as2RGS0m_9TS=#S?I z+M^_I&Y$5kvz6Jy@^A5NclbYo&F4H}GE9|L?|Pj7si)|u)SkX)Khja;oc!I1_VYD= z^eAn9O5Vxa)En}~D0MBlbn(^(i?Pw{Pds6+fvRLbdYwk8n!cE-`Bmw$Ioe{ z(zuySx@VL0AvBzm7%kOVk|w?*jUC9V1j)^#uk864MMlY6bqfjJst-wQ@$417md`lD z&qQ2J(PBqd)tg_;+{GmF-kaVMx2Pwdo``wX@O0#V`?*1=%GsBWkg$-%-ok5Hwf9kT zZPI^;6h=T#cc@s7x}DK{8#{anAJvS$RHmHFlu+k3pK_kN0ZtP8@EaU9q4@*h<|WjZC8O8aezH?+qJv~)%IS&i zF*_!pI4e%oVc=-JUQTu;zSwvEH$is=Pi4e$BOQH+@aX#Lf6-7fjNGAgcP%FC#|AP<2KSoi7_7t%Fq0icIR$$+u6ILA3EsL43|4}S z?E9(>X=BuXKXiOv*j{s5yclg46r+Rm1=%fll>TrH>-b6Oek7MOF&Ak)`R-rVTkS?# zt4J?dRWi?+XjM58od}E1)95_<>F;|YMrEv!cgO6FNK}?dbXJ~^%1Z6SYEOQRJ@s+f z#d(&xs~ZGgN73s zYNcK=IUIVT_bD>%?k(U>J}V~^Gb8j2BsEo6X4kIbBcAruOU5{#xU&p@TX3IUyNP7c zLtmdvsnzK$JIN0umlACLA%8dHDeoyI@tobXpYXfcCwmR9E{4!V`$>+08v?;I$+N&WliS+3_eG?iOp;aL@pjhs6nO<3iNo2YkWo3@ z=3M9_Y8nbH3*qBIa_>b3$zk#@pM8gV|3~`|FFNi)`&+5FUitr^BqOaeszU znWM{eeUcL#V2JW4z*Pa*wH#QI5%mD45k<>;7SgZAg&fHE5|FPhlWe4;ItpNu=KdIn zIGHiD;)vV8PNo<#yv&g7EZt={CfADtx>!edDfiTesSaXdgAHS@2GC&~!YC#%2HbO$ zVjQEH%GpfU`P>eucXF@yk?Z}O-~*h~gA69As~$JxNC=iB@>#}QUt&D3aQG|0JZWtb z=IkBD{Vr2@AHqNM^71)jN+|p<7-hC!e_goApSk1&4Ec@Y{)36;@b`9oB`iwx`G5ZT z-`a{k|DW`=`+w^zx{9`Tq=D_WvHi&*?r*PQclSyc+S-ZMPN1za^mQe@B@yq9G*y|V zqO)kL9KB_s;zxRmw$?()o^;m;k2$azn~B!`x6c0G)YypsA9ePB)!6@?zCI$ygkp=v zVtKK<5<1QtX-eR(@&--A;dM3#nu&oNvdxlUUIsHo(i~1Zx|1;ZnU+Y%R|Bv5{~Usr{t(`YQOI6a~60@Rd;@5&H4Px}dx)`FKiK|MMA z|CcMLvXq1z&6LFz>>>w>YQyI`6eKOe9EkW0QfF!}8}aw`e2#=(Wg!2ARwlBi^U>ad z*2lAw5-{CJm+`Om6K17L(QXc!41kq*_NF@0x{#CugJsC*MySu0i);Z-*n&(qCyhmS zHkg!tS^CRXn|*Y!?R@?fcS&s1nzR}gl52tUMrf(b51z=H6Ra**j#SfV!<({NK z%KX5GKD|JC_qvuO^4ArTNdl=HD9pjdY`(a|Gu4#20XIopn4lipCl8KcXBj?D@Hf+i z;r%*K%%lAoT0M@xS>^uQZ2lSQ+yO9P~31blIt|G65 zY^|%7-ssDV?B0HNbtQ>djwYYF@D~1)yzD78x6uA>Y&#re%kdJ!({P^c;OWpK>ump; zMrT)g_8sUep_LuUdn8K#EhahkKw(~54)XU#xXIy-1m@Zoa(*SvU2%UYJ?)M1Og-+X zy-eeGq?g;22=Wpvb*g)ZL-B079ZByA3pIn5=NeRI@g@@_Gf5`FLz8YjLHg_K1)92- zm)Z^m1vw5~mvv+ccXm;4j-QrtHQ|)!Xypy94CQ?vP+K<9?L|6iy^$vpB4LVanRpGl zHhJz>`kF#6Cn}$%*}NEjO$yb?Bs$-|8TmHeJxg{w;WfeIGx<7$z7z6hCsfAPgZ$+* z@jy{BA+x*lr`cvQ&l8zq3|6xtxh0w=tMPh0UIvWWI-L;l^_9zZ|NBvKkGprPbt2?s z>h1>D!dqQjK0=qrp!Bu(~F>Ke#c<>!r}C&*!=Y#ePyKSS~QBmV1ZH}bdu zeOa35rkwjnJDkt+k9QB9j5Ah!^gO-q=z`Gc$0VL|r9#-E6H%@XP^)mn^Tn1YY{o zvlYqqQlq=1PngKo?=_Bin~ulhsjkmGc<3DSIF&v#S+*n1H&<@9nvzx_sOpch-lSDZ z3rU`Ox~ETvgK65C%X3vWobLlI^PppvamC$g3a!~7RsxMJNi3Udvdv;Hgik84m4)iB z&~OwPrmpgOpQO>=(F;*q^ybLgT=Wek!8-mX^lvt#Cxk{F9MtgCM0Fpew%|GZ1pSqj zoQ0kQKFVwEC_P#y`uqYTOCB z+2&mx7FwvO44b(^E0w*hJ%PKo3!Cr6+caZk_t0Uyc($QVR{n4OTDA{_t`Y^(C%TuQ z;HT2k6iphUm+pP!e9X;eQsez-#Drv{CkK|1Wj7k;#&t+6e-mF-o5c`(+Bx zd5}7X>0h!XIR{so7u2`#WCa|KCz%`r+*$t`kJ=qhvk12iFWr?Gq}+};o2boqP?JTp z1g5*EK-)J;Wm2sPO^ieDF`mq#$PTcuM!O~8_)XgQgGMW(w2b$G9OK;;%^A5TP*i2e z+#5~Vh}Is5UEnXH`zw^(Kx&I1;uyLass1d`k0-kXMVVcUz>>N>LBX;Zm94|Wv_C|t zq3*v$k}dd}1ni$czBd|qA4G?5kko(3;SlnB$NR@E`qpKhpY5$_jS<`5IDbrUzY2AC z7)P7|?~AlL3tiuu04Q%ve+Yc+?y0X>&P^sct}jL>r+JxAV(>F*;2R!2;X^b5Z_^VY(O^`u=pgA z`VqJP;StBVpT&wFX)jy3{zbtfeA6j7tWI_d_~4`?YK`CL;I$v=WUI;Rh5w$R*FVg2 z)1Wlrt80?`K-_2ZJ>9Ff8aAA>>fe;Sal}s&SS>1zw$rb+?_@;H zp1cBLP9o=|gK9%p(ZcoQH?=_j9{g7h13!hzP5A6b<~`M#(6#H8Jz0An!Py}Hb0BFU z$(*g78`PUI!qcd0%-ak`K_#fK4tGaG$bm5UHf;P2BhlVDwD_==hO)4|VJ8bWjcNE= zwr~s$ZN+O2*-qnu6V#_K&s&?1$gzwIXmcrqe?$L!squ=vve?qt11+j0E>p*C{7f%T}m@qR>vciGr+JeUfRlX12nrboIk()u+NPSgZf9Lw! zh?EjE;y7AO@0cK9Id;6O@k$e)@8=mysd)#w+2m6rZwLt)z0G(#>2POzCT~dTITBR2 zs?R6k;U|baQ*H0iT9Tz!^gTYgomwmNi}AyiaKD8m#&@hNMgtkC?~l)W==po4;-zY6 zy#-u!#91ewla#s^IjqBD4)!Fxc_%te@~ZO6yaO-uNb?}F@1*XMN+v*JFMm%`x{o?{ zR$FgeY@p+erz`TnyXvbCQ6_VH*$$E|gWa@N&y%~-+v~-=zT2Q+D_P{Nge>JkK`i~GmZ{`NQdyh$c3 z&0^?GV9e9hzd$Yfx!(wK@;x&vc~?&8o)-VzSIwry)3h-J3bTFj1-6lQpCq-as;2Ky zmGNS>o-QTxjBnDbWR#mQ|M#Oim`CLUf z6K?#yV!FOHTb`@_KlzslB#=q;26Vc8Z2mq8Ch1;Zb~GB2&tY|wd^@x7*)5gr1_fuJ zXg>5UQ0E^w8|eQoFmk15bHwf+8tDaVwUmDp73ZV*B9clkk?pNXR+40ynY7;@R)6LT ze)sGibeDeRACk^iz-16|4T(Ntjvz-6+iCX%{^WO<>P1_Zc#Fz%%^bezEjYZ52Pn%& zB<0D=_#UZjp8g3 zbylJm4DP8nI1pw|H41B|H~tpd6O5rd|2suJZ?maMr1u`YyvquzlHOd;HuuJoH>D(B z>kF?NaF&hFNi*~rU%P$t{(Rm(_}{EYe+!0g<0BGm{a{j?iiW#9)wpPDU*EHtW(cWV z#!|W~n^ebDc<44Lo5se@BgJocgO6CweDo#MSbF1rO2kr*_N@!gak}4M^!VdZv5?)a zA-kQlnj{9hkWMKr9L`4^>`f)1h7*MNVq>@a$@NjV-$mVz@gg1k7IzEuV5MDo&??oAA?58`t6YTzy(kz32x#%k94R$6>+^CA8x{$|Z1e z7P9N`V~>Ttb9v+8G1ib! z=}(|7+cFOEOpZ9$qnT_q*g)C|mU#vq*TdFWs2Hr&Lh`yp3t#XRCG@8Wd|F9cqj{&_ zA+HLXZ>g?JP<#LzT#KGP@x4XKq)thh>S(=A|2@Q~o0a;8ELPHRHd|$R`T-QTPd8(aB?l(S_pXxJ#VVp4sB4>H_ zSo&*?uZ?=(Ym~^C;1XK6fj3By!RciFC+mHMB-875BCjnld>+a+;P*OpJVsal8eh)F z@x8SAtMB!p_(~efi0fx%e?;Zt!jsjYhsny!#ZP6j$u{&d^zj~UcS7wI(2zIClZ?tT z3dp1 z<2$4=NB>Y>Ynj#SOr96R!CojxaNRYuSq?1;aGII5%#6jSCY|7e{vL_K-uN4*^dK5| z05@4AFGcFx-zPVqCbOSqNU=9xk%Z(Is<{v89jS(zEN2?e&=ZmmRRo~HB4VisA&);mSs}2bnr>s}wqk4ejJpCp;=Ga^v8u^4>{NU+2WOF6D zlCJMc(l}a6Ik2Di`!7i2R3ncZusfMtbC^G=NE-Y6H}3PUokO-q(@g@@u~R*o>~SW$FTaB zNICD+x4QRlF;4x7SI=8sMx(tT`~_{+CymSjbzzk+kZ(79uF+m)7L#Pw(^yxw?SBJ# zRq^`*Z&R9A_?so?n0O9-+~{t1Zxpw%tlI20YjR2avyk*=qcK|{|3LRfEkCW#ZNm~z z(n2;!mx0TT{KR;OIRQqt(q;uTmgigcGvbcFxt`pP*VdtAkuWf|wYZtA6O8yW)Gono zX5WY4V+7oOO)42R{tO9cvfT+Z`MRGR`}{9lJxZGE>2U&_eW2|-{N9PD2iaRTh7X5@ zy_B5`A?Km#Tl{S;-q1eP;`bzTE6Rtv+e^z2x?fH2cRqhzJf z`y8D8Rf${Ba2Nfb?dqQTrS>c(Z}cPB%J^p+W#7SQ;qXU6-!n;L)8T1EHo zBjXp{&$~bynomN*c)+1(xV>m`oK_#j;j5^=h^#&#i{ALmdr(sDh0}&S_Dm?dgS}@n zSoRd$Z>*4QkJ%iV?MMlLF@>e(fW`-U#9rijH<>38^?Y=9Q8Gt!GV^&D9&+G%CjMuj zsukIur>%MxyMG7Ho+rJ` z3RTvNF6RxdP;W*wGd)>WD^s;lhizZP+b`2ng8FyHw`v#kHiOSr11^v-1#)RpIac zK-GV66h5CJ(M`C%v*_Vwpz>_3w1vo9)V&RNt?4&MjFXnUwHBTyt=C9BTQ_qUZ#>%X zLDgFNo&~`lsrylUB#2pkpEDnk0N&O8Bn;{!>@&yFHoDsc)gKs_{h-XTZ0HkQW;HCm zV$$D6j}P-{6hubH^*sQ)Sp7zsbDkKS)!$zAUZ*YI`LhdA+;J*>~S>sIFo1m(rT9W1`N&NEuoq2|u zaF>usGn8D4x6Iam#>Z|?_LigI^Pu)%x~)Ns2Xf% z_3k>nWX*O7WR)c494*i4bM`XiEik(wGW)dss8x17WOd>rmgD?MK>g(&BlrxUm@1-=Y_vtY`!|1J(T&v^N-Woty+1NzE=Br3m@k}Z#MNU*V0^my9rKbD)A*rJ*Vv) zD>{;;wdbvKB>8I^Jym^cJ@YY5W}DwxtTu1Rc>~z~-du$&`#{FKtgkuhf5v@u(GIS@ zQeQpvEO2)K1azXQk7zH)L(BP1vb}9EHecP#)R(vX{A8y|7oI$u_LID*Ga7zGT|#>{ zh3=&g+Fq-90i#%>fSu?&orP#`cr=DD)?K#de36p zzth|Z_4Vf8`k{CSvp?Aqe>(K!AVAuFOzVfJKcmj(Xnl(A&L-8SJk+J$tY3t~GW75T z-F<q3EJBf-`A4m z44PU%9!K&a86%xcS{V^#4e?zXO7e*jJY#~0SE98f8=FH{i~XHrY%qZrJId27P@L46 z;~^_4^tSj-%HMwMC~twoN#k6lGmF_Bifi#-Bk3pMVHWTco#=KO&Bb4og|WORk3@fh ziH(PV3Fyw+T@LWqga3!&=Q`f{0rH5iPI9-Ti%hzNg>>G<6E_=kJ&nq?q*a19%b~99 zM!b{E6Pi6KJAYH}Wm->~&3nmjXZ?FO{Yqs$RSxoe!^?aQ+ttt=ZO?+QBz{hil&AF> z4ag>|!Vj?7s&F`tJ?@MCql-Dv9>(VJNL6U-NV09kHU=nPN3VGfG!5l{y3u%2&!yH~ z`1l2UOeOX^9o?1nsu#jI^MSgvadXqLK$$m~}7wd3Unl`4xSr29U!oqZY zbrOqbE+;{4+miVl_^%IL+1@@73WD9+(a?rXpHB|8;BzMK|0>3ockm1!u--1Tk#KhB zsyD%xHhAJzS8CyUp4t*_b{c7fj|FtG13gWqy*6~1Iirh}8b==Av9GE$wWjc|OSD%7 zg%?BjYWxn;$^=hUEZSTLahctl#jYNr>qb04S;*{*hj@*LVd7V{Wlc8g6VrK@1ND}@ z=y*q597!%K-D&K}Gf5zU$*)GiI5aOtN5(*<)Vm+3a-Lk<^pdc>3OVYmV0k_ z_8M<632HitEHi8MG~B#L^EsltOlwJw5O;Bh^3M%MA8M=sB6yRx8E_c`HnqySeOTENkop zp*PX@xp)|(AJ`WqJ=9;>{j2$cU!@Hp;dobRI z&xAgD0)RvU-*@I-Br+c;P1w#z}&NW%QKYKshp( zggwK_Z$Ajg2&4llXS0YL&E1`6OJJeQDX(WqS^3$XhCVxrYPtG^QQXCPf$V~-s!vEj z_{>9R*ZGh9ZD`oOLN$tvX5e%Z%$>sDeg^2hlHxwQYZ~z;-{9gM z2-{LfEBkTZhO%`qzc2Z$^K^DMCY4*B|6HA0eVT*r9&p_WGEUJujV6`%Xy;c{cOt!b z|Geiv9Kswm31Y7!gRK7ZT@vM0mAtz^{{)}zaFw36^I z&Ga*sSXy>FWaORwcI$ATqfMnZw|4(L zp5icLo}OAi1UeItBdd7h)b~D(d`lk<(bGq}OQ1S$Ku_WNIJUTj_vyqw-t#vhw!S2b z9OB5T_@ywMFvk61`fzl$;q@1jgSp(lIP_v#! zhVrBTz}BZE*BDAO6E{Pdyt_P&mu_f2$L|vI-jl!h3m)cb{UMr7Vz(t|dg3)R zo@+pQui~K`1XfczvwRQ2=9T{DEW-deyoxPmzt+jTT@FxvUigIs^Rj z(B!r5m!`uP*z-rEn~+SeYJWZdRV0(+I9hcj@|9SskY*s17$73cj(IO)@#Act>od^x>WgW>&QbsHU4 zCh3|+QTymS<2MhW|27aex)}K;Bv2PVXDvUIIhudfbt;SgkS?=}B0;ZX3F*%V_-)UN zzfjoJ-Z+`UDsr^_2$~v3S8wsN``~^zJxdopu082Kq+HT0XFpPAr!UdkH4wa%#pXa{ zW9=p3Rc+U*7V;~tmNDc%v=~WLCy!Iu$Y9TZz$*+V;nA)f%4^<)pUY5H&+k$Qok7yS z__lBRrh4>*JngE)?flmo<;rQ}MRxEgIX1)7DMo0GaoUPTvg^JNDV#z|&D5}w>~iQl z`xArT>3FL`zE4BNSd!feXE&;+B%eG^*>$v)+0Mz@zlx>@;rMtZ=dzN_u2tn@ljJaA z1)oGs(nMq@au>GPTJQQLKiv!kPtkH8wH;T?K+K1@-*||meH-KcxwN|wy$|ATPge(_ zKPw&yBrr(L%a!lsT2B-twfc2r*oi*AD*EO=EbTlRneXZkMW2%$`_18fKD{h2Mqn9X z4>M}Yis2-5W&Z3$cAEFSCvcK|_O+Cq50BAgSyUxlab2if>URt|Rw!tX?@b`F?)vcO z;V*l%5<(>PcVcPZ;G-Pb{^Wo5;x8|_N>aqk*PN(N$w+c1);otDvm-F$#7#U>_K@#N zUJ32EPU$r?+yo{|^CfvJtP4q*ftpRSb&A@C(fK|!e>%Fi`OS{_B+AN~XO2YX7;VK{Sc-@{M#>Lw)RVB9~8?&Dm~?wR)59KcIu1t%xV#_^cX z@dmd+S9XNVAnAlm8sh3%>Px`?_xaNvO84g(9%2j4>9jOknn3PZD?Oa#`{?;ALCTe+ zmf)|ykw#l}%_pJNbaFkEBn(J`i(CjLkC55rQ2RA~w$OU^cD67EeH>+H@I@m$mtX}+ z3BMlw`{K9+n)ZR88_4{8SNv@JNU^0p8d#U3|-r#c7CJEI9{n|`) zCXL!xxaf!9lgK84dUw!D67)R77iINgBF|dQ)jE9YL3~;l{=rF&cI^x@KU{lR3(5JI1Vi6nAD&(2Q`EPfq;96aq=Jk@U*%bE zg`JaV=UuYtM*DYoVi*1>DTdnmlvRWBg)i(+)-UtmtF_)AM%(jWi)s6Ay<8L6NWz?s zyy&Ckb-9+7u&C$AGeLp#_V*gPcW^%`FXyqI#zqaBJ^dVABoW*&pUWwKBHZ^Qn=?>9 zh+mq9_V3XB7_02V{hD+CreP26Us>obPTWkC|Os6$nvz;(7mL0OiOiPJoCFbUAP&}ZZBp_ z-X)caX#ZOu`Zql!)!NnC%6^}Oiq9zbF5J9Z^k)Z=&LmvihyNv>Y_HFIorn6BPyLAo z%F*=Lg0lTxO)Bj^q_9)re;V+$@uknf_sKZRx@b*3&MEA@4xTH~eK)P_0c-d3TQymI zJlZ-OC6vWcsNRX>7x^@V6p}3IMRMC$3kkn+8+x;Pnq3Fq8#nYd;u=6ppQ`;5PbOUX z=R8YWczy_eGnU;Sl9E(!mR@oNYrjsLn_zM>o|CFK$#T!q&P0033|mf>1%c_e2DiUP zkA55dUBKUG7j|}Sm7$YE=)N6!W@Vu(JE{e-KjHd8+CPrgGGji8PpXFYFUk8=5*QAd zn^@o~8q4mE1i8t)=tca_zHI*w{A4z;lV`_6>ku_xpv~Axb@G}3BW3Zv6-I{9@?~f| zi@om;EeZ5{veHRobDE#w-U=4tFacNdHuMgT{{!iHyNJv>;O7-GdtZC|E58wTbM9?F z-r`o&e?|sLA^12hcI9mz)8Y*D+@qDu$(_%_>ihn>`-8M|5o{(UVRjHb;d6i9aH?|E z_1+D6_5tMmJ9-lc`6*HgvIp_&gVZ~gWX~%`@;UqSR-uXP9_Xp$P}g_kQ*MOZuOaq1 za%@SS*`uBJ*e}$xx4YkwWl#F3O0KU$SEMoCXyrvH?gkHME8Ew-r(vxF?LMyFTOel} zO;lkw+1a_x)e-D#q&}oADI^?ak~x-#$KQ+{lE^oSc}^gm>@pdQl8H6S!^Vd-_hUSLCV#X0kuUi{3Z=>_I zF!L-fn<%%4?vfVaCeqFLGCRJK*5XB)-W`7W@FgFi{xgL!275@hv#n%lU3RB_K-Snu;2+~@Ct6S%Q%6;Gn;jV+Fw#{dbx}i-h`2y zLr#LP1Soos?kB?RC&j2T=@%2Y?^t?Vgq}U{l2bp~ou0Ezo#`SoW4-)j2mg(zJQZD6 z!CQ@DY*t^n?4DXl#_d@2ALO0c#nVWsB+XsGiq@j6EXgGFUjoPHBv@Vckg%xZjMctV zXLecTMD(S~jU)H$m_Lb(57yG(`pe4Xo3nx~p(>%ASFo6O@Sk&7ufWnJ^p{lcnG^hOwD+U>7ZmQ~UP2Q!EK2;&mUD9MQ{_Hmf3g0OY^wp>d`+8k^!IOgbK1d3^=W!( z&r5s=cYSepEDdK|`j@iZX?LOalP;i?mM%fhMOu0gjvgnGLG;#|E*dJ^8s9IH*JIF; zR1u|UIjf}WU@<{g&g4KEYEBo#Pqrn6ZoVho_`CT2 zh1KQCHawk2j&JcJ!^tpSe{XbTh5vSxUk&5!T}epb;VffPL2J^fJO>L?l-vNhRcI|C zc79iTdH7hb?RI$il^s`yqut4C42dn$`%NURq!8^wQZ4z|s-*NDc_#qzYAqyu*O}y! zQ)@L?_C}OkKsLRM@Vc@3MSOQjQpjldRa};!sXk{c#<(xAn+Bwv z*^L8X??yfPRPAThq$3LB2OspkgYu_B)j6oYSS_!^?nE{Ai=TSf4l@=JYWgPP%y=4S& zoBOk2ba=(VHPI{IHSnz5nKTR((3TifzUwN(WMxH}ae}Q{B z@i(59U+^ud`yQm51L5&DxSLhfvxFAUSO3j=(j!>+PB3;EtC~S3{j^;lPXl;@vw6^K zaWbEtI?!|0s}s;QtHe9&bIQ@o7SANaXhPMUh06`law!>9X6?JfbH?b?Av>e|^<*&? zGP0LuYazeeY32hOzYKbE3gk(4ka!Z`)9z%pmDS!zdWD*jIB+O= zP_xi}ijuYIrVnj)A(^jvqk)AMUiJNU*1t@N?_nmVT9VK&kwDU$yu@bqhqauxX-r{wBw*VrbyS7I#60Lu4vm$#g&q=ivjGp1*|>xOOlrDKr13QTOoGOX zi_za}u#*71x9Npfz~W%kB$429@++(LL_NqH?QHV+96svd>LqB~Mw=Hw$sX{MJ=fV? z@r~NE)9O-u#qwIYKEn4}5H?J!+4+(4>Z?ez5{xxhLjnaq2}OsZ;tKkBv#|5ONc}w4 zu?H&l#>o&!$*#H6$*&uFX_LoH;1HInMl5$;_qEt+=;^}>d8sX zgz8S;e>Ifur=B73cnHb%AXzMw(ZBMG5 zFsv6qN;@@N&4VTI*i864o$tPa2S@^nPOeU8snfKaQEb+gZZdAI%SY`_r=7^>3AKIz z2?wJkXEb|4Qsz=evb-65U=to>BNS!Vb zQ8LGNtmn>C?`QB^iw~)bhL`DQ5pR~XmiLli1Fg4m?KC#B#hvT4{J+N2rO79j`nS7D z19CS0PlfHB$f^_+AA{e-_4$-;viI#}x=13YGn%B5em>zX^Imo$gwKVL zM%q4DkDhao83AP{&TLkCDG!rynpKR2DnsHmkWkmttF-Ym3;fK=WFPHMMNwv46P>IL z|9B%lGZyRvG0XL(13h1guD&C&td(aUXCjKc&EI8RcOCzhQ9>1EzenE=FqlAm&p=2^ zvdddUZ}dD2eTfoqpL!DvKlZeMd`GZ@V~V-pNqPV8x9at^zn#%g}%RUX0B+iJZKrMvSA*%>(jW!cyA z2EVb%Pm;pD2G4^F-*PN1?7|9jg8ymSzYGeF@-vDYU*T(NKttB5U*prx)#rc5>u0xA zKbXszgdSSW$^U)uJOV-v_C4YH`|u0x)NlmNQJvFuTZe-Twj>5A!+Ra4`E_MPFy>4QhKL`*}X6;V0>IAv;(L z$4R!ANJjA$iP^Lb{n=45S=~wGk;q858S}69`5MpWjNh@a_?24IOC*fvJQ8gR1tn0} zhpb<~$H98IWw?*0{27ml%<>Q`Sy6EQTrr+FiR9ATC$&;NwwDCDAM+^hdafBBHv9CR z-tkvjIfMnSfQF-#%ubfaN$C)FogH+wAo3EFUP-=Zp}!?5=Uh-*dfQXkF@?X&D$Z3X z+kqEbitB#H{*BWQx9FZ+c)YTBD2?lk>mG%y+Nk}3+&8n_iM8hPg&7O~||^&JKsEjdb`N zO>J^FC-JiTGqyuE^w)}C9q0L5)sQ5j8I2vr2Rshp>(S9&JuP4~WBw_$)E0WOKlN9Z zvb(_$Hg#S1+9tvBn=MQvnx*qkp6)c~kXFC|e zXX5T|ZBlSg)#g>&$_bOC!01M9Z)j-)db{K03kcc3>t(epZz22Wg$BXa%XD;z zXE(!V_IO-NB9HP<-AL=8ZTOc` zFp-$0rC4EaNWFzDuE60BYFGmyZE12Ry>wIDo~}erV_w9|`~>&ezuwjTR_>qS+6SyG zVH+DIU6=X-*4`@iruurxAU*uJf-ZLM49f@R5z-6SD_?=(E z>sNT+jVCL|Q*L8{<=x3%h|=2mtnhtX(EX7o>!5gmt7RZRrxfe6k(>{IfQ&|w${bqR zoeXlad4T8ZlE{rLGpkXF7kV}SSsFffQdjn>+zOS65|=sZM1<>P1fP-mMp*64uVoi^ zX%aXNjfrTPGx3?nOr*I1g?Bxb^$t^8qJ^zsJ8f{1NS|r%1X4KNz3fK#k2=zCCxzcP zdHt(!k<(1i`5SL}m)3uUiF%}Z9-Q4oE5GS8j#bYI zY-ou7bs6;iN4pQ8?pS4#jHN9}JjXBpr0&cCjy22m2^l>FL8A&U`~zEim$rY#SCWMu z3D3vzm=CMJ9zC6>mCHTxF?60r-=D$fLcDKeAD#J^n_Qi&-t5u1u8`gaXgG>J?XF~^ zPGmOrJ#P>D(R%|J`c|m~=+9_>f3}e%C)vC84$7Y+i%zJ$M2)pja52qnBewx4NqX{` z>Ux80l2~PsnzDxZJx$($@5S_<{oX-)J=ER<(M|Y*Bt5JS^{bS7Sg-jVUzgax<9L`P z@=TJD8Dw~)+OG9f_G@;hw@ctL@4in!NKXCHyb1ExLq^^LZ$MFftzE3O zig1;Av&Q&7oc5E{_e?1Hnk}A5UrYJICFGiO&D&UVC(rysTiKfvZ}SvA_os*KYEAU8 zcRjlSZ4Hb5>wB2(1{vSe@m;uBfS-h>??^_g@mxa7Gx^O!$#bgbUMZ;PU{r8Ad+)%n zHdAv-&G$3P)B9^IR?)LfqKBKfXz{(sEYgX%BveKw6OA@7B#-I>JqK*?OvIt2ofh&0?kfu{RWwLLAw6juhZuoYU) ziNdTlbf<$iwDk|VH|fz6ojW@p8bd}Bk#y7#WY_lsw$~6h?OgfO{ha!F?!UF%i&ZC1 zTITbUZ0;N0@=55u(Qo!x{)hdYNJDGbe`0U9VS@|K1Fcu6H?t7C(|1y}Clz+~ z6Cm|VPvsq~9ZBs;x-}thDt=CeiA0;vS)~Vf$(-p*f&%~!pg^i4bGy1CohdCv+H(j)a&;QVM6TIg<;yFBQSJKOR==E&(Vf}7J zJUyn?f#^xp?)&I&l(w=?wF~);rjI9KJgG*LFsz zwOD=Ed2*Y&htpvt7Tii()o@Wp&9~rhz9-Hk-Ac+$f$^j`%E{w7MnTQ=JJZ-wPS+&e z#{P7j7({!~>zlNFkn35IPg2apiJPqD%w_eW?`3pzAFaMmze#H|Q`_;TGht*RspbuE zci)oKv=J@b&yQ}#Y4(bDLc#ucJQV-gRh`pwThx)r2h)5%xfq!S{|!BR9qlD0$KEV< zz2~ylR0i)0NH=E*cO&yegvh98A}M}Gx=AXy7-#$9Bcq?o+0RB4CI!r{{Cjo<+{nt} z56?sCk_BCe(criaX05yBV!Am?;_K*%ndETLt5IHHfN8=->+7{sASxBD;S$SV*?CM3z z{HD&V^sLa<-|l5}@i+?Zz*$Y0&B@T0@Ov?C`tfIp3;He2KhpN$elkmQl2&(uu2KAY z#`eqD%{0DZK92q-`TB6#QyX)jEvtNGP!&J&1Wz-Aj*~_s@w1cS?Ke8<>w8X`UWBiX zZZF96M9h4acGwn$667*KWMbbBB&-4*$_>dg_MRx--Z(3;bIvjrk zX`S(xnBS|kb}R2T7KamIX$opiq_eRQ)qyS__CNDFquA~w@8yZ|nH^7ySV1f%qnykt zjAThQ$@dz4*5jyq3YIcc`3hf^_5bhatt@PnW4k%&nfI$>>8`%=llZ^v9}H*9wcLj- ztRjJ3Nb6Vhci~x2MCVfS+7}uoYAb6SKj}%OKkk_H;Sq$OLm9A&R_BZ?8sO7D=xQIudS@4*B&v!w1qSGBfuajBf z0krroy!}Bp=b-CH6edYZeRn1)oiju46eEfDq<$=q_dTg(_U2IDDp5)L(a%s?I!*i8 z&+!6_&K%7VXl~3)Eg|9K(0c~%Hfb{}$`$bR6|GM}S)!cg4B8sfO^VrDahqhF<4Gbh zmuGAL2H)>wE&W)3B9m@Zcj5)yWE9#(yX#0Kk@*vUurqXC&+Fx+Sz;n(Xpmaa-SOVn@luNY5 ztmf`Z-jm6#51LNL*A%@#&Ss8g)k)IzI61zK!uB*=Nh=?a;=^kG5EW_lF`E2=EhR4P zn|%5ubdtDc^W6^`bDky<(tbnn_P9~WHSh@eyoc60a25abuU7l$qsA(K9Q5a$R`!?F zf!&+QJ+XS*lJrpYCpufAb0qz4OJ!$5^mNbV?C&Z%%--1bxcMBH+38k=B&MUkF9~Jt z{tDq-MiA>sDkISwaGd^pCzQ6<)3qte--6bhnC(xeYqgp8(1`_t zva*wS42k1491d3CXQ@6o8qOR-#VOfVrzM3O=p(Swe5&pBUyoS5)>y5M0 zIahr-vvfObT!gxgZ1YR?#Y<(s_HYt9kwyHiuJXzpq4m8<{B%?`Bi+%i_F;8fXm>T- zp6qiO<)`50Gd)eBrk;t9)71L{WG`^1Ei37ymz$(yqTKIn+&u|Ji^ymVPkb7xE~5AQ zv~v)RU!rIF3OaM9`!f8g}*?< zx6{sat!1t#D%o4N<;nb3MnEsp!8_!!oV>CY zbSeCPOl!%D@gT06vzMBzBWawMYIQvHXYY3pa{3$oC(`yq>PaMwCwQDnxLBnR$%%w3 z;H?5n{Y`Jx*0NC1o@9t@2SAvY5g791U>x1;3Nur$j!Nbu$jyxv&Szq+gFF;fFLDVDB zGob2q)IH_-U5r;7lhs~iwF^2fq2s*MJ*GsR!nVtK=54i2bT6j|dn0+^Gb~85JOe!~$&1!87A?dO3lk|Z(vG_i3@v45I zJp{eZpJb+YypiPs99_=dGhpQZ_z*({oyV!v=oZ&WR=VC_%gZ@fpi;s*P&uMzt>F9oA=exbaA?tn(%AA z+4nMZ{7;(h1Z>Nxdmn#DNHUcvyiw9XN`_Lri8PZ&4Te-S(m=`UMUf%pjf4gbnnxN` zgl`#|g;JDc&RB-X?D&1o+y8$&M^E>2U)SDyt#h4g?S1WQ@3l{6^&+^*7)csGE?g4BtMop_}o%dHmq5P}3mK`0V zF>N=VW$vUozZ=13+2N6Ox*0cSjZZz+ZYX-?8TwVF%9CIg%xa5YA8Bu|?<;F>2f2s% zb{Gtg0~|X?doi+z5~RIFO)uv6rYHb9n`WE z-WjR&Wxp|cr%vv?1Sf~%#s>C&hW<6hqMGhI0vETj!M}LW6k0ti#v~hMRe#NL=UY(f z<=%H)8O2BcO{0x$a3+aR{$nyljuvcau1KAo2z?>qC>*w52#%lZRYDlCpd=Yq>i6KUrMs`1})| zWgMQo`PtKRnHrX0V+qgGXZov)yN=~W zIV3($Oq=+@pS&;Yb4OxL-nZV()AsP5HSm5?EE&y`PheBd(KuJzZ(+-Dyx7nE`)U6g ze)TE+j)3SBaA~aGzeL}$ykrrNc+5Q~@bixRH9gxWY&!_k9u&_WgX=O($orK%*N%o% zWxV|br_W=_9L2nnWs(Q`ND>Xiq*<=#=;L`TbqpQ*yCd(Z?)G8sgeQKh$4;i#>DtR&cOSML4Dr#JkTKk7 zaUxm9e&&;{)So%t>)}=wFQ%~5T+yW}pRG=d`nZ_4YUS0GH7i&6l-{}@-;1{-OW8~Q zT4ZE$6MV|Bc~*e$pjFP=y3$W6+WxJ@iG1>9NMr^kIjVXTM%^EgV3VGHx(L~nJi`mu zreVY%wC|6H&B&eoAXDg-EV}*iJ=sO(k!ca1JeUW)&&%t>t*wZeJr_+#HUh({W9LMD z-t*#C)&u2zPe1Kn%43qxsTa9!RZ|rl?_NM7GgMnx`9YZERHOsR8J~Gtjd@Shk)_)k zLsfHM39=tdg8nQz3(Cp&d7F6DgEpt?IdY;?_K`Iu!Rut}#4ejiaSe>WB*Pk5{>ay| zC!nKyE~9BOeEh6zR>f4nwd~}Y?fy+i8^zZMfR7k3lOCl7it7?dq|Yipj6 zTo}KrtqQb@;6$FP&$A-X5e3xWhvZP-R>P8edEqlq&${;kERv&oER9%t&i56rlUB%ts%)LcKkDK__g&1B^d>Y_)Khfc9m}i$~ z4Lwp_^%qwzc@tWT2wCBrJ@Xm!Ox5pY+;o)QWgJa%;zLD0rOEvW9d0bd(PUBmoK$5+ z#PBM+d6Kcetg-YAawk_oa##;%*<;kup9TNsuN_E}QxuP8yR00`j_O-6_+6~o3CA3s zp58n;rSmTIzhu||)h2A8`RBvP(1>)+*tN8FD`3qG^}eKrchq!>o@^^V=Uk&lMX@(n zDEk_+O5_Y$SET&|N?uNa@~r(DHm!%|t6J*e-kbthM9WvX<16Uw;t5%qoeUL^7RJ)a zdpV7yHO0RnG(Jr$SGxNl5$ZOcpB0rWm5Hq$5h*s%egOU_9#7z(*ZZ!kveU_$?1-!B z)CQJ$moSJo<>_@a&927b?4HVd!ft%!e)o@Z-#4t3Y-*_~PyRW>;5b)K@?BfLxJ-$SfNk`OL-`r4`|8ei`IeXN<$ z)Q4ogONoZ`&)daQG2$$E+;-`f5HgN{5LJGghE!i zmSVA;>@$zX*Ry<1$7rRE>qxPcZoBcdn?A0q@At#`749mD|CiIGgZ8rjVh}#u!$O(= z%&MV3U0bf5FSMN*lmC!4E5Sd9(WR`FefJ~S{3sA5w|VjbaQ-i@c2-NCf(~V&?7d7j*qZLH%Km+fAZpPn z>&LUUF}N;?M!J<52z)XFpRz#(k@mO7^PGv`z-o z3)uf;eqUeIYk&_YsV#3`DwA#mKO3NqE~I#br0yG2jVz7lXw<2n^LiW76eK z-*(sYraI%m)-ZU1tvb7w^@};@D^YeY@3^0Bl0mRJbRQRk`$Oys3_FGF594qd_^j0z zJx!-4wfviFFStAVEVAk=Pnfl#)t3Z6(klD&N2xmzB=agqW6fmse8(Qih%}G=vRW-? zLCl6}F>K79oD0=Fhevc^=j+H_R&5pWV>AgC)3K`myBB(wylHR1O4$XMb-Zoh(~dU| z)^2lb%^HlHmYVg`b4BK4t)C{sWxv>NQk?|LjB>U%`B0CZw+vtL+V$edhcxL!uI$(B z2)%0js)1*VjHDjG_hf#~ine6`&1o`6u~1Hy>wvqp={1SBjmQ1f%I3VN+2U>V?Wet> zv~I7|723;crV@DATpX)OyNQtA;ohIgu!A3V;?N|12 zHLmBpj&gjZ0)Hq8o$gq4GW=fT6USme^429kN>&~=rbXTWP4!*YJPc-|u~0dSeGW7> zd02!Pja%pNw*mA^uJ7MTxW5+jwzMUiT}0NL8u})#4KMiEp}chlp7&5|PV0SKl$$C- zwcv9fXzh6NAA-3bux|FbZXj_^eMu(NM%sFVUuGq3bDYZi_UEx^s()GcFh|Qdo2eZ8 zUI(u`aWgY9pKGNLtuqsu6Jpz|{RarlRL2tEp9=Zk@Gj4wXNr_3!SfbTtBrmqXAGBM zx2$ZMnA3!siEoUmdUHh50%^dk3O0?pC1L>6=Vh@n}eY)mZ zvOSBuPKGmJk@q0I;Q6#ML(T=u`=%MVos3lnv&Fg4u4KfN_nmc>-ALZNh0e$&C&;f< z$6Orfi(6eVc%*2M_%w&K*TCj#*A6P!a4s$^g!*sT^{X241Qaj2mkv+S@&%Tf#n)!2 zXB1wQ!;ZyJd=@tA;9VWcdB^=Rq*jn5IjI^M>*jofA9&>z%AZ88_X@LW<$3?{dd!?C zILFwn1|O;n(^u#+mkeV>hK#>Px^Jr<;6xa{i!&dnHLLvd9=Vxv8GXKkYftmrK5BYQ z>r-L34HI_LFXQwf<5Ni2{RBU$-8lY}2&KwWZY zf9yBdJe6bvcx`8l+{53OkTdH`4iPU}vB~xBNVIs7hvkfcwT1rbJF+!{RR#S=JmO8| zdusU|?D!RHFBH-8Mr0R`lv7J~S-h{6g}!^ARr>M6M++l@DSkKh*VB+2z=v~cS#}a; zQOI-ijA_XIq0zS(`jsy~#QfhUYY=rJ`?JH5bar;;Ko4lDaMXQD087w*;KWY{LB;7m*# zsVAC*|Jku~GOovajxs*nLF(ir%PPac>iR!Y)V(mVQ?bTWQZj2hH-ebty zm4AFsr{pV6wuEPB_JE&t+Ik-+vJx$4y^Vw9nb4Tc#};~yK8vIWi-G-Uw*}@c@TL_j zpTY(?wY4qOj`RNl47msc@^-U<(LtX0c91)#^X>6F`yj7m=U4UUm+QxmpjlbJ>uUA? zTu;W)Yq3AMvAVMJRrq}a{a%GoW`Fnj$$RvJ$e5M2*-4UY=x6h-Q`~m}7Po=tE!r7v z9MKeBIqCFgyvlp_WPRW1-sH{73ZENX%ZiS{{IMko&voB?Se^&J(!MXrOU_c_pS8U^ zNV|krl+jLg%)Otq=kV=OIJOZIOK7oyeZ$r<>O2mAYqMYvHBV&qL-m`(Fr*4wH7dl` zWLH?}?^^t`291hgL(X6O%Qxf1txL3ZtIs)Id=_uLQ7x;mq8aN9EOdF~>9dM_t+H)rF1$u?!#j)fxzD<3}_&U&2+A(}TPfXXiwhu4z_U8(B zX7_h?9nYXy&NMuTZ$9Hn4?op?9*5CQc=8@tHDQ6w)n=Y{k9N+*{r72;F-2DBU5YmY z$e;W^uaUmDC{U!p@pwwsC|$*lBgCidXUl4q?)tPD%CCd=sdR2gn}e`9doM3kq8!#F z|8p`&6(d!$NyYZ#-CM2@d9$xJW10DUtQ-06=D|7bFS`yVU_coWKIfsg#f1F}44v)! z=ird{p8wKgP0}~4R@;SaU50+&@tBY3m7OEQmCdfepK){=w9_LWPs8k)&MfboLYo<_ zB)4QvbAP#Dp?~s+kA?VFeL*!+KVfW|y=EK6-2<-*&M&C*U3p@8ktY2B>oU z{scO95T(YEpVnv?EC~4~P1>|8P+)qm<=Fu;U=GfnL-3T1_o9 zEXY)x-#jmden_60`1y|hV2t|av)4f+Pac>feJUgNUgECA#d#um8?v9nud|Q4rkK?c zdn(iQe3EX&+HWA=o@Wh*V=r}=_SbEEjGm zTFe96vPu)BKEvyJr2dV)N2+PEGRcK-0ZjTTF(Q-FNHO7w#@KIa;4Yr#0$4L9|cK$whu@;>1eVucXa;B;7~4C)LvwhAq`p1q<)+dk$>!o<3QF zldt)8-~J_*^dsL*{@(=c|A{&mk?K5bco}Yw7G$Y{MQ20fMy*Zs?{W8D#6}mW=>kan zg#m5EhJ8w`E?DOpR$C6mY2x}PVn$Ke4T0V3xSJg?f8fDHKQD>Bd1HJr{fCHg%|+J< zP)bhwhgm4AERrK|0Q+Q*`uECiChu|D%}T8kVELJ!TUmE7R#)ILM?)l|0apNW)|ira_=!>%P+2N*3M zc?LN?WbyYaEccG$#LDD zz2{-)ZaNNN+395HhR5eaGWqGB!>z{jYR)?<@R>5K{3`oIldoAV`wQoZ+I!SpA1|tD zDf5)c=r)d2(=cr?E$?xEvK0NSjmy+`1nVc~#13q^6oWRYV;ehYo#V)WQn>{%jOY%a{JcY|VacB%}gH)-8SzuQr%etr&9&%w%O)zK6&XQpUa$={hN z&#s%!zU^O#WZU#>#rZ>4bBu#W_Q-ePtMkd8IoM0p-iZb|nV|<}j-zD{7}hkhSi{pU zpzR{{Co@n<+)qyLr$vf>n01|+yO1MKFB9D}UF~PH$e%P$R))M;Z-dpDJsKe9oW%;G zw0SjlWL?PdEb=1BU(>h#7w(mn7$d$6Wt*ea`<45e^TwQVvz*8OlTj~uB69L;RwZ^R zMAwQ?S*fn%4>=G+l4pA+&Hh97s z3;uT&UF(v3mD;igG;c#+;Ty@joQzd@W{scxp!@=54`9hf;_x1*?;z(6UOc1_krR8e zbN?E;CSz|qj0gj}xMvWi2gPI~x)4XFxteUHHN?TLP}rbERc)`KPcnjk%&N&1ojr%i zJ@h2WOVDMb`wwFCd2CgI+$o5++W<4@hORLpPT=MUa9lhlJTt`C-e2Ib_G z$*J?>e4CS6-h|UMd~8Ug5&m69qMc&ety)edoH-buocmeRIb1ET`Ir5CmBf-fHD~;@ zNtvZkybw~;M6s-c|B)60w3NNgZP=tg_BVsYQvSUkIhU~BZ9Y9@RCEz+@+_ZBe?R;G zCD(^)c{#Q&@~JQTzb_g+2BB%JnRj&`lHekmAL6b*U^l}5*>8CQJkJzIa$0QG`(|X3 z+*=uYJWk8>(Ak%MhU;0Uli8uKaAXZGCwKXWYR{P&=ko8wnX|}{wfbjb%|azJAM}a) zJ7dI~`l}X7Z}ok$$89h|%iFbM^j&l4F_S*Y1(*1i-Tx)sdji{BMyjrGj9%M$U^SjP zo^}oKZ3|h~lW&=Cv;VCUWPagys~}y1zqS*B^JLQ0oz1XjH!hwYX=|@AVExw$l)J=S$A&K(-d8jd16>Vp{SmwY<`CC2Jw+7D7`Zi~^M~U~oR!FTm~W(Z3ZB=hCyS7MJn#3XpH8r^@<_oTia=ozq0& zY50?qdUCeQO>94mCua@uIx#)#^79mSD-`BHv@PGdMs(T>kssmO$9?yxp&yOrdt!JH z%72O8S;?Mrc6y6f8Et1z#yZik1pGU*UosK3BUd~sr#QUJr;`h2JHEc?`ft8nOU|q; z%Dj5saLvZC>uGr$J~mV42zBO+#7%I_x}(f7UIqJ{vGhNmGE<%1h66>EOVpgq_sK2y z4R-!pyn2}ImDqQn*q0}b&b0U$CT+yAEi`{m>8nY7xzZ=IeQiFR%=y^?oOk^5^jo*k z<7U5S;lvHRc#2q&Hyjyr&4FqaJm~Mf>6{knv$Nr6Z=op;YsRvyM-|U=7_Oj!&b|H3@ zQqRq>nW~?7Lk!63sQI+tj|MqI=4u$8M$)5{JqtJL(z*wWFH}QLXLt%@&QWJQ_Nak{ zd2)J>owK&Dfss!u+9jjx;d-wYP#mSjV%nOgr%iVD*Xh(usq8XK2HD?OrWS^d$E2)1 zt4#XLBz$HJ`J3o^4?UI?d?~Av){wjm?33B=bF&H~VOLjOJ=h}Wl`N&(6ByRO_w8}? zYW>axm=7UiT1rNiBF0j$`sN@^S%PnE>6)A%!wGu zN3sfICPLynzuQAPJ6N(>v9elzh0ImFEhoCw_HzUdpW|9`nsNfrWE4DSz7Ln-N&dm=h-X&SvsC0)XTs-8^(QCfr+BoP{!4k+-Fl4qFdPnvWLcjGlk@{;D)}cn zCZqT!b>y9NSyF!~mi`Qj(!6f2IJ6)Dz$&1&*&j)eMY_o?&vON9}TV5ta<<`a^~K9ELEQcavI!r zs3kMTZ`hyg{goiHm+t*oxt+S>li5Yrm=-xhJL@kV^eJ!l>q72Yee!Q)sm?-KJ@h}i zwb9N1^1sZl`eN7^?75oVJE(DudcUCiUcZOqRtju+Q55BIUF-IeI!o8d4`uE+lR$eVEGG`8-?^D5}&o@Jp4dgf&Q%8c-E z&)Sn{_W~{QWcV@LH+5fSQFXlN{4N_O)5+WJ-v_DH>OLRyGT*k7&8CuKuCkTsl3At9 z{9mDt3Zl~(2>z%CugL1tjjC$V{Slmai8Y$J^0hkJ>IV}ul0mQ(tTW$z1vVuoT{%cx z#v(7MHP4q7u{f)@2C`iwZ>z*c{*j%!NAmhx^|y&~EsedhH>U&H2GgOsR+C4(30?Da zn%&7+r<|wgW5~5YPdo(!E7E8&gpOsQyzyR2;(p3yCuu3#bZ4uVMaN`e>MSy@!Q8Wb zvj;;Tay_dXioz*x7%sxLN7Qr%Jt{%4m$q)ki5fiiN)pUeBIovI$L7m;u@GMW^j2NO zkggC;Uh~X~XO1X4f~#S0_M^Oy-VKG(cJlpv z#1>frne{&1arb5QJZluP6IW`GZ4y5ns7J3QhHVwSX7H%;?yUjS!!iC#HeBhuQdrQB z=E)b9bqGD6wUyj2v3K?k&(+psk>UYt4i}2C+sk}umiCfsJ*&#!B6n%^trfX)hQYl& zBzccs)$Z-SO@81qqHW&%)q#5QjUCQPd0t8$&SXdqApyE*D|XRQLR;j&}N#>P8I7X_EbM>v1bt zQnTwh*)_5n=_$TAf`29Zd$K65f_Nz!y}}CL(D)oJW`Al+`nAQ`oc4N}64~Ro3V+&b zH>Ut!<+&pH&hN(Cu42L?EY%+RpK75NiMvCtg?n}u{OS+#-UHRcT+Ioh;a|@2&AIQ% zmG`*o$HOJ3U``-aqFZ-yYmNxA1b^8qBw5V?#fF@S;0c z!#ZyS%J};}_~c~a%(FD$3qR8KSbfV-u_Y^vJM)_FNb$Hf+o|gg?R17*-aj?dGj!o2 zXG5ka{O9543v9d<7RkTc1ELKel*pD$wu`kk!0#HQISOv8#k9NeDSpU)T1KlTZj(m)Km8WMXUw-L6z{TqSrR~p;S5tB!|Cpvb6(8r|O zqg3)hod}7%gYL*ldD3XbvbCUp9N8DT z{;pOJ!hlmn$n!{@vt?g~_2uk;saCrAt~BvPNb7 zR58qW1OqZR_-}X}rrjIWllM4laC1D#$EYXYBc-*PFmTb&A0Uh z50Ib;lyeeSIFk2MeaXMEz_yB7zJxTt^N{6GyNku|(GQPuy*|VaChgK ziz$1tZkFZ!?#%p^;?ezJzlogXVKzsp-7tBL1+(&G2fx3BMY8KZ{2!yg zJdZ`PA9sI|>Qf9UNBTB!9-!^V$=F4YK3Z#8`<+v~+Ct|K-Z%zx@~m?q%_n168(1&I z)S=M$+_zcVQA*#E5md5Vy-BL&G-^bu>@=ILzJY9Tn%a^R@HM<$gAHS`zBfMqiY-@> z@l;-Qx%zrS;(a(KFJAKVoaCFV9XZbhDle|P{lPW7} zcCy=eta%qgSp|^Q6v?K0tw?(Z&SbW17?iUgFgyGE@rDcNKUm$_rB_O{sqYy$@3fNx zCu@I-u-sn!&#u37;C>J$Jps*gaPlz`Z#XVof)_KC>EYUGJgz?Wo`D}l#fU4+TI8v< z1!jB!vj;KYCl;CLp7H8_jc(l__8UuOc47@3i;?9-OzaEA>Lkk!%KdP8uy(IznLQZV z4nwZzJ^wt1?ZLY2liUfH-`rnI)P4~+H$irc_OcduEqQW|VRQDsjJ$b2yM}+1WTmVp zAF16M@Oss~1O3Zv)p*w%`TZ#DKT_9=YX2JU$yuG#^~&SxEv^qB*}2Lz!>YFM-CsYw zKkmLslH`ZY$*Eb*`4XLyjrB1l&VpknSSLq!W!{qfYl%<{B9+|QkuGJ~%r<5jZQY*G7U zetDxPoV~j*Y3q9$HNwGUugQMtoO-rG49R=izr@N;B3EnDXAJwCnjSKecnorPkf9bW zdhnz1WJpe|<9Kv4b!A5TKX_kSB(K3A`{Lj0SnxF7l@f2hCF%7%ID4M5a`YHcVwd`o zuYRu2d5W6v_dheMSv!&Q1rPH(TwIL!DFfjm0r&u$EgwKbc2!%G~KJ(g3H& zu;voXED!mYjR@M%=P8)fg2RtA?#mkQeCB{M!f;~EbaJmPMvxO=cn_!BG`PXyUwOdRq-OLkjg5T{ylj*=iy{! zk|gtVZ}zBW{1K#gDw(VUV3%AmGd+7*7=#p zj~nYZkAz!op7+l+HAzm z&$}lx!q2gPQ5LSsI`c)ij7ZW8En~f8YB-N=`;$1CW3tw_4vS_cGTFS6cPZybmf;(J zlR9rllPRMVOHF`&V+@?6?;i`lPUIP>)(hFFHT1@iHfv#$=`+tBXY%mtX}3$M(?!Go z!8B)eWXOCa8GkPv7wy$`rzb3wq1#rTl7D9KU9nczmT=DI27yj z)wlE@SMp6hNh z%!#@m;8U{9we$Nl7AnOPM#Er*@`v-p9~OQc-qMd`P9*#JZ>MKXZIouEhEV)fkA5AlCU4R#SiZ}17i%@A55MWYb49`?TEC49 zFOs#Q(iv+P$MdWw%3F;(>^6-pa+>V~NZ+ERDfE2_CSQx{8(Cu!&8n&OY50E4!r6sA z9?LSv-iI`K8$HZj>sTWzguY92w&t#ey!Q(23&bN@ef?hDEKwkvuDt?x#YY{2S zi}?@opih0yTI%fcDxv10?68rvJ7_nGeXijV%lPF+{rl~h{hnCc1}DDcQHPLf2_~=O zVeKH7_nFVJ)LuHa7CH0Q@+j^0RptqZWXzsbirN2jEg#IOt=T`h9d9EXrAt(E7X0ZyayhyA#2Zt_mZ85%_P0wR#y@9RYXYq%%pVNW* zsck6xcA?=t{584EC(!#zwGD#R1OD#gN}lj@a>8ruauV!@@w$Di--mVb+&16uS|U@T z*QKz}{P)2HrZ)2JF17S#ol{_!5odLn_rk#GG%AUid7GAX0?8!z6|44F$5~=TW;crX zCadI6W9wwDyhCg{oF(tq<}^6uY>Aw~Q=3i=eD1@8l41NL5h(LOiS#{vTL;rJoAe8> z|DA^%2CbYGcB5-qi<%P)o@0%rn3~LeH)^k;=#;e&FOzfxG;->Bb@H8{O!5+C7e{aR zY{jwcU3oz5&%pRs3?HaRt)a(>%_h++yQZ`Abg=r8g)%1wkAwJ4?!Ak)*RwT06giXcW(tjF!#~gUN3zcekpD%!KZ^>Ni2WaG=K*Z1$WM}6JTp!|y5|Gl z_&=7I?$bvQ+NBT8PNC%f%bTlRK25^M>bRSe28YpT93QzDqmMMoX~G)6iVCg7!g{1Q zQHiXaOg^)VvFSj+_Z8+dUuD0XAhSw4*{iUR3^}*uebMbnT$ux}DoQ>|s*mWBwV*Re zbaY`pvK-%QPVaIw+`;Nu`QDub_h@mbzs?oKIL7L)GK zKP<=8)dlYy0RQ@h5kXGD+o85A={6f)l?qmBsO5R=SOI=Hv$VBu&r!$UG(4PDy5Usb z27b>M#zOj4T&@q{S7^~***j>F%+eW?JjjPfX(c&0z93U}6V>y5&UtAd9@ik*z3i5? z&|j$UW7j4@YohvdUT{|HRmbM6oqSRsd=Pw__%^5X-A?N4?yAfdVQ+E@9Yda`@U82f z%rkWK_kHZq2ghdeho%@kTiwlgV|!8KQT0}U=^R$i$tdyoU^CvoHm*!$vt+c*X&bXx z>=>->EjpD|Pfj?<%J3yT^KzcKnWycAP~H(_|He8vWZXK4H&y4|^mA78%UoM zIuC&TGWPo1cX>>_*?q76hjVB<^vKnQS)v#NluyG?k|Da8KoU9 ze!eM2y-mYGkj$F+7qBKLjE<)5t^BDkoRY)jJ91=xxDBapvN^5uV|Zkpe{1$?#}2R3 zbDO(&W6fHz?+q=yK!RggAz8A~T`xk-XP@4@p&c|Q^XyS@%ln5e zVp(z-83Bu1|_&Nbf_wtWmg9?~r_oZyLFcCH;IXIkv!|e(alEJjv`mLw(6+ zeUTOq#Pj43%u1fjD`lL}l`Rf+?FVhYM~A#=ssM%O-8q0ymm*)zR{zFdRn^!R?&m{g z83}G@mszCC3}SN0{0CCWarlmU4(5x=aa5JXUSxxe1y2;En$WNvT-Pg`=cjqNyn~$C z1-zeVI;ap|7qen7e^uoPwaB0I2O7I)Jx%vOeHj^dirUE?m=QrZb_H_k9iYQ&}H>99>@(7w@Ov9vsNI0DahS3l`m@mc-MH+y}B{-t*+_ zo=EhJn(~JI1o#Yw@imaT!u8ou7^Zicqpg~l)SOmpeLE8FUmKNtj0=xpD?1yyBj*rjkHg0h&dTYWUocwl zFqA}jLzQfOdHa7Ei)E(eQ1V^NGc)&6&gZOvJa|_wN5kZ>6YJ(`eSU+BsUw_AUvugcZ80BQI;l8_=Y$g0Zp4Fa( z^^rW_d|2d^m`PalqS~`^@#KP>IR|>P)(?dINO3JWrLsC`B3yD#>`i*PoJ7zRZ{JZ% zcE{w&_IQ5!5&chB<5O^1rQNgXmlMn15`n%}Qzyub^(k-NvIepQ-M7=RHB_#^ti5n+ zFOm$RVP$w8#e+AAcstye^;)&@daE8i^O4Q4C}&#DX2CP*yq*sC`P7&V?<-jBJXY$1 z8v|kc45sGka2PA!r_>?*t1Rulgj;0@rWbuo9H^uh$`jBt@Xkp?GkxlXwcA9<`*_K3 z5N=71MdJT9oG3+yWb7G$aeu<8gI>KE``!iXesKB-GwQ-^q`#jKMg9e$z8Kn5YcI0s zRJ!Ncb{!Nme*YcHyS4X4fdluj^B-iJ@8<^ne>?0Uraa`Hhux>{dxocH*V=AuNpAJ-WIsatKNtAb7%%!O)mv@-jXKxU=V>GO zFyVEQt|Lbqc&-+|i;9#zF>E@IYQpzBy6q&UijUQb_=SMK_MI*hd)c!4d zFXnMQXmKv9s$@lOE8)S{_k=n=_n~Kmmk&U)!rH)9`k-bOoiG%SVxtgbI z={DDY(RW?OvRS#A^JZ3%wE>Pb;Col_qp36;${y+WKcL@I7LZn)011pplc6k0O7bjy_dy-twJ^RXH;@xoW?p&q93u5r?vpysZ}Ag4J=bJQOFh zfBZ5sm1WiJ3CNttwf-Nf?Pqw)cQAO9AM_VZa*}%V{oAK&w7XcX9krTl{CPUenxyRX zeUW4<$ubfbZ{~qn!FMOykHw+?U~wH%wbrX7zGQ^CgoJZ=c=BaGO3%mG;tW>!FO4t2 z>YvzYu{zEZ3tAUOT;FK3E?JTrF(<)HQrjh@`!D><>!JRuh0-)0#!|`0&|FLZg3@}u zR(oxoLB_Fc-$A*ostW#D54ejgWqr=MaLf4oGFY5I&f;uYjVC7$($(y-3A*RH`~UEC zs#tU>?D9g?G!IoKp`vX zuJzp{`iv&$o&1J4xQf|7X*Fg^2nUZ_fE`IVtKA znEk>Id0Kd&@LYEt-SXxryBO~#NA?WwkL@Y(71owhXWrX{6~D3Cz5L@2v1b=wy#Q+2 zQ{In#O3>pptX>GcKgqqBRm*ojNonJXtd?yEr{q8=%LmjyAoNh;$>T~zZOKQ>C3Le_l%5YDe*Xtf3s7{Y5nTN{buF)bG z?aU=@5N{iMTFrZx%oS87>tb!K!O<(#n9O7UbEUobm>j>!Fq5@nHH?NPiwBQm#Aj^S zN6E|d6v=B`9gC7%wuln@vGA|_Z!Y9&!1-m?%d_4)&{#&R@6>V)4rf%fs$juWSv6}W zma}h8ib}5kZ$;+pX|3s=tRJ7uM%}cP8G~w~OP+{+#@_KTnyV+zd!_-b`~oansjC6D z)~DSB9#c`|I?Mm9dD9u}Sk?99qN;)6c|+Jj+$qYYS*g&&sHBPd8$zc$3?G4cYjS5E zyeuv!!|hf)=mn=bzR3#Cb3~~I+FA&MU26CiLJw-M8C;)d*UTCeCrM4R?ZT8be7Xut zH^#i9)m0T|^Va@daXf25x*LV`W`!Gl{s{ir?=g*xm$OPTHtdAa!LFQ$2|4F;fzj&< zc1d3T14y?}j7ZklV{v#Ho9=+fdGtu`*}=T%URFDrJb$xnO%d-W*2=Df^a43OZyEcX zA*!6B);y!nBT@3pEmcR>UFY=Uz4X{h?(Jk8DI(lW>wSDHdkT8`{6Bsf?A{cYet>8b zy-IR^Euh8kH0UNi{Idc)nYHqc;xw{lHme*T%xRE0zdgD9n`)=N9(pC44Z-}(vgDl6 z^{kjYy{D5f=LU_Wab2>0BZ>?Zhm$3vAv@)S>}N%rjMjb?S<{zhy;?C+yiJB)bpDCY zWaq16H@>}-Bz5`GXY4$d?$gvUh%{YjojmDd$#Mp3?BkOMVPEzJ%;pK*#koP) zu}u#*8n==qe3#mWveaTxwUK^3`R;O7LMPI;C)rpYdpRaOs{RdXYQ&9wFvDgx24~6GIk{y93IUP9b0owDE^0;&vJ|0M#oS2j~ zc|XJDJ8~y)>{wQLmL-Z|=CyRJN8fu~%{gup*)Z0+ingD_pfzdEXY*=Un;5Xpy>0o~ zeZ28zZD-8Wfel|!^JAh&POw8LixzzOSvmzUpqzQg6{9dp#fYT?v?di!FbN zukE#yEJUkBn5=-#vqn=EP9Bcr&QCt4h3xVjB2k`uLbYVB-X!14e`%H2_X~T^_FV_3n~I>oa)da(q7klh@!x57BZU z*~hteq$swF#_QmCnKJ7|jht@LRa83_V@evaob2yuqQ_wV-w+O&r%O(|b)wj-MtPUe z{s|GflA0Hj|6x%)(I$I~bDmjN(C#EvYv|qv&0%`M$P07F)6Ee9SDv zLi?1uP@U5td?uUSPSVV4UZVcb;hQnkk=lKO@3!I{75H0LOSf`w_M+q*ufz30+i~p& zGGr%lCk)sM)y$8aCPr?@-Zs!%!$Oz4BYn~fY8b*YACUA@8ed7Crp7l-`P*`TH-q~L zMt+Y$d@pR0Bdj$;d)zZHXK7kb*0NY+bis_ouxyyFgi-nH2KpeUQ{ z#aU@LLoCYaOMmDu-^H@k;y`cse#T3V_In5%`(sPqPxQpk^xl27oYj**(`>lWN8XkF zfG@SkGZ6c;()e^7_=FAX@WHQm_GxU9bMl^t(N6j_5i5ol;#oN{AbC(Sv(!>gof+P> zwEZu3UL+#qJeaXG%-Pdxad12?+{TW{L2;KcwUc-{Q{5d^Fa}r&v_9OKu*J!yt+w6i*Rq{0O?@MKFQ0so~zLGCz z_2o*uoT(4L3Yu+5nkUi~JTBR}PRD~2;gudF`P)9=5gBdOWW9P&JciVn$v$7(SyNTK zFgE^@=Z+=gsaTQ|i#}4*GCE~WDsO3v6voMA-7`iVH{s(I!t37qjD7 zwy5a(RP3*$maNh~T-z(v^0rbrfiQW62GBWY0B1&cCn=K?D%pCo;`IQ2*#z3jBl{}r zW$p22aB58IuG-8R)y!A_8xC2&UPK++{LYB9gt8s^`b5k+7@tcj+lCDniw-&Kt0$dS z!!iB+6QalS?m3Jn=lP_Y=(Pssc}rbKss1q7j~-`}B`Y2m(_*)l-eRqd>M5<>oYUHp zpS~vwWEbdxv>SmVSqoPg*PejKxzG+E{}`u}o3AZC9H{I* zXzUW-cDVXFPPBtTG7)5idXCa>K>Bwwuc2!-?cUB~2eR`?`n>&M^Dmz(!hJ9ahOov) zc;_7J%vTKI1(_SjdWO^W&RHk73k#CB_9@nGMZVJP@fw@=CDSk{Cj;4I*x5pqU4=Oh zW7Q&TImCTAIWIX#wqR)99*z`Kvg$1-0gdB#_v7yhRyqk{Q(?TG2fP50L;W7dO39=> z$n^_Jn-k(&^UK5NkhKA~YW+BJ=j@+OQ23Svm5l5@(DR>&zd0eTJa+emL3VI1()tqG zKdRI%Y}gv3p5T`mZ;z%=))4;cyB6fk-tuG++e!LuYL1P*VeOAZ*B)2Ea#j&0uJ*$Boawz-?0ZcNnCo*^iyo=| ztc1+jOK+1bne4t-^A?zmXWcQ7*-YN7X3o0mb@V+B-pfchm$XCi;d7p`)*ZK5ufj9(_9oALsj7@JQCgVWj$; z<<4ixto{0vL`(TVPB*D*6#Bgp%M47oO_>VtZAQ{(3;vUxKF8tXkF0brJLHtN(PBkb zDxA#g2k1q+@|b;WIh3};Y1^BQEpV(kDU*3KJM_&5t+9Tw{# z`XleVUYW)IKgjj0a~X(*Wt9FL*Lt%>R#7yCO&<*C4B-Rx40&HMLd(eq-G=SwvE41W z{0Jm+%3{`kPs5HL?344ICc9DxLRn3o{o}(RISX?(xa(rNa3_n^JsVrAI zvu>H$9FFf@)UpgG*YUpEkj#m^$!*ylx2DoIGl_T7_zYgShlC$0)kcXINphc9@Ta); zwQI=$(@m_(Ta`zZN$$SZ+U+hLAFn^W*xki3FKecoxw97uk`HY&uH+QNr;JVdkoa?V z)TMb?bhol|$hw_I?X|U7Ts|J3PNi4gRZnttsjK~6O>VNXJmD^$U7iK^FZAI#Jv?vC zv*Wo3u2iDcPxLQO=JUmbSV6}1~xmiF%hEg;AFC#B%{$~V$Ko1Z^_>2k8e_Udu8X6VKI#Q(RM#x zeE=EuQ%gJd?8DS0+oIiX&N*^SauK(yTlNUd-yzAZJzAQg#ULd;*uFVVZqbJ)v?0_LXL% z`H(x(=bS6}0rYY{@+I0xK9u*w#;qhOPuFl{GuguI;(FQ4c%)oT#10=G@_kmgpD#XU z*F?@;ctsp<%<_46mr+q0sI( zl?VCjVBgpR*lE%HSZEHvi7w*E`F|^W#q}Is-L!-k#LP~hqC1qoXZSk8|`JZ zHXiCSLw@>1!oBfl4GN`Z?E!Xxj@fR zUL@*+^~q70+&0Mz-V^ux(ruZTyBKy=p+A#en`t#o8_D2!>?dK|Vywx@-`Df; zk}Q?^(CYa3x3R@2etKCU5?)V+**LaI>n)*iKgRT;SBt^Lf$op5g^9#fjo%I)!&W3B9#!mwECQ`oxDtsL9Ygoh~cbdWU$jKMiumPd_}&uBv~D zHaC&`O4jbAfBKWgStmG?rB;#hY93dfkDNu1WU%``n$%_Q&zp?Xqo;*u*k@t^*Jt%X zUqJs1*PH4YKI3^?eDjI#4uJnkZR7-m57qYq`}_yO2dTY;yV48gsp&@7ThTjvw{NEH zU92>db{PYVBUf@mT|>vmp|Bac?Odx!%fbBMX&h|m)8)pkze6S~wX3OfGkJ32YkwRn zqSaR9%;1;!c=o(y_05?uy`Qh1M8~20J*WM= zCoW~~Y73jUd_8M+z??a>%Bfaa>9$zs>sVHuDONtk$HuDjarfovBsnOu>N}(0 ztX%m+beIW`yuD5alMA5r7aebhTp44jkz{LX3|*Q`=R$o6M&>DF3N4GU$O_-i^ml1G z6`@~dir@DABjm_)ZQdm%`&-Vn_*o=4M9H_s^QQFpl5gd`f8NUFty4}GyG>tn9^{L# z+juegDSgTHJ`K?KP2ef#@ZQ(pau2IKg~f|#mW=C189nsIi<&r}=rA7}pV5BSgN)`W zXOOK1WU|`tDm?g${@>E4EI-TnS@XzyBo-z2Xb~QC6NWa&xx57}&q_H3rzks~$DS|Y zL(Yl3RBX%2)0{60?hx)+>|#_*R&`Ypsx!=7YH+@I9hN%fRC{tTYiquW(Rz1Kk{JBylWC;X}EUsj~N z525l9N`8RMY|U104?MjL3b(3J6r zzpf3LoNV$oMDuhz7C(Pxq5kU3$wFBJ)=t!%6X&?A zEKB|kjfHxy|KdXj*8UgV^L%qLiMNwu1FQdslq*Dr%xw+8sCjhFO#ef$E2XAmaIvEn za#liji05>(Pk8=!IFjej(^-2qw1V3>XjUa#N(@s^_>&P$Z=O*GtBxdLO*&;=UrUi9 z?-IMhYp?#HxDp@exf@}^8nxzKTQb9x_x(V6AEcFJ``Rh;E`(GWS8q~p9X)#HmizfU zov-iWKgp*%w(uOZnZMnm)O^Tp5eqYameZI&hj(VPrr=6(WjbMb&IR4Z=d<5#FJEcT zYB|+lfot!O`fDg><>1*$z6H0e4VeP5AsD)#!0BE-HPLRKTvkEpVR5G|dD5@{thUKm zQqM?hwx_Ww*jiW1kBWe=WAAviRv^J=c$3vM$q3t!MtM*21?}fUex;gPL36UG(bC-= z=yoQ4lx4x@bbH?Sm0@QPpu|uB_w>@SKdrzp1mE>to3l-%M|ocRTm7N-4-* ztp3}eaEhO%xI4)8tO|J3J>&K1H(^cQw(Ta%aB=b#He1M_s`G)b@$G3XU8=GM ziM}=pgaNQEaUq4vKA}* zx&IjK%SmwSq1~3R{!!r78G4V)McAyZ-^T{;^Vl_h@;rJ2iKb)S4YVo-`c4XS)=YSeVU5Gd_5qK&j855W+!9i=_2)OS z&s{Xx0+D3(dyRbA)mL6!r}+B;HcZ}=r!i_G{PTp_QR$pwd9l)U)LWmYWQOCv+MA4% zk83USg~P;-7yMnfV9Ts>NdD5Md?mYh#`A^Ih4EAgxHc~EIpeaty=|sPcn-oxX@48k zF7e$2wwi}Q8KqQ*T`5-|F{1qo3v=@9we%ap3g@{0Syv9lxI;bL<(#6N$1$9|8HfBp zt2gvV^T_cDRFhw0DOu~0dm0ujgMA|!|IFgE3w?9)4*ZCTk0?=_*Keik%ee9p{0`=+ zJ+a~tIDY{BDws1+{K{!PAF^uvDRt(J(oA)1VfUPTdM>2Da>tM2(0S^pQLxe7Z1@xl z)X~S~J@PQj%i68);dO`_CeWlS4peskL-b6x=_4SyioJ8X;S4tTm?v#lXKz-_Tw*n~ z<;2_B%HB!KUr3erJ9WhVBO!4pzGs(RGT=4jfh%B^yy6w%op*}E*zXRgC;wC;QqJ@n zrhh4ex4X5K)gqZ)+L=(>kTl=E*s7OaA07fjh$XmJ33^y9rJ>Z5AFK5rYd{x3Pm zK2hd&z1}xs$3>#-OvoGxr-w)~lGNe*qyL{29~zgAa%X#NdI5LKiQ`%2xuAwAP&e^QNI0NfP(`gvg8i_P(FlaH0w#Ay?MbCBMoU5d48fz7?NyR%}lH&040c zpPGPq$-JCQ!+qhM_k=l{EV;R+v-;UIugta?<35LpIV<96a?K;@OlbGe+B%jxOncv` z`9;ipUY&V1Tg*yL@t_Jklfm{)ktgd^o-26!dR|fs8&1}rJP6^;diQY8I5;2dzP9Z1 z2~=Lu!g#tRH|8F(ucKIau^Z9~n(ZJ@ zBl>QE+WD|-iZR)t*2#Bo^PHSh@Ety7zs-6hv4f2gJL_wTy0TGAqiJXDl^#7Ar)xsu zFs(ib#~S!QkEh-b*Xyu)13#`z{_;@IUa{=d`-r5;yqGf)t|aICYRc24-Sod)13Ze^h_;iFa_`C!@sWUOZ}ZV^FBRpV$Vsen~e7LXg3%_i`gM3 z(I*c;&hp6&OuBumzW|4P&BTz-uHgIIrb!7q~K_McNwr@){AU0Y)7 z4(()x<>{C_1w$m$zpW_R2#C?a^BjM zwx9B;r~P#tj~NBuwS~YGTXZ>id_!-qG4LJbQ$Vqh0pZpQg>ZqRRugxt=#Y z1+k9`F|i+<2eHio^v_utJ>athcUHN22iD{bd$JH_KV^D{y%2v1dX3=wBB}4fpP%*P zSq0aKx4o+581`8~*A)xsstih(A^Z%hN{4rKeBz5vek@b>;0<-ofd2s=GZP{_bO0&0V6kH!EAD755P6X=s6obt0VX4l_z85EUfsH1_MO8 zWZl^3-uBqC!CmK(ZX*?z2X(@Jy3cHZQ!#rPo*ctMRtVKoX%)AJ-B zVsaIpiBIqHkz!=&%m*5>V_&wp#0W0quEFq_qg2L3rQv@j9kQ}`n(|Ab+5rN|pSv3) zz9n(8>Agq$X-0I}lXxngRl=nwv92Qh+VTD~-M_}2&G|_7@a5SsdD-gFr5Jg$Lm~Nc z=82${u;L=UN#0vN52dUuKTV5yJC%F_TOc;UH#viK1DUeEGxK88wUJYVx6)&~dY^)C zc8bgs!;`t_^1`V1|JeOHvbV&|8L-S#OF3S+ko14@=FN1T3zz@GJ~(GZ>ICeX1-lBo z@gf>;(N^+(R$%{}n4YnAJ^FQ~(W4~&REaCpb1tMC;9hUn55m8v;F{U^QCOzjaM#b6=gQ{Ve)0Y!bb%^djlHnA^9lu zlNIs;eeyi`9FH}_@HctylP#nVq#ni6AsGHW&Nn5+BlON$RHO9h86Ea#f$P+}z^G}B z7XA&TTYR32xt;VEOGJx@3SK(}_BZ+_vvb#xJw4HKHLP`aR;g!y=^MCrKO8edv<4=P z*`hIiui~|NHXo#gthuPC%p?frl)ZDYza-=~X(hWoZ-HR;NxcBY}I3vC&*kvDRQsUPbFvC zH9UTf7Hf%-S)ElBgZ_i5OV}a#Z!2lzR`Ft)JCZZuTRym+oo7KaPXbvjznZO*;khfj z{Y~;0$Z-+b$I|o&S9^-=$rYHHj+c1r$JnyJapN9Zf2++ad3omDFIMs`9NPH7&gdq3>9GI!$WA>I#<2 z4*TWoom>(VMD6TM+E2gUQi+xH%bW4&CAj( zE3*>)lZf^+hw?oxARqFxe1vxfEtEu2pC z97{D{p|m|OZk+g7mziXlIpT*ZyJ6`s+@p(fP20neby5{!2g%w zNA$Rd4y}39S@e8dTU+@+_EYYUM{^6DEUCVGmAVf<@}@TLy2ct2WQ?D=@=3~Nty@=! z=M8CQW3meJLw8<@NxgA5d%-fF@isP;B1LvxL=@aUSeARgIZ)(hmf$#Bw z7Gy|HmLV9_U;VAww5D3JlD{Vlpuyrv98>x^vSGwI!=D8rH9p#dBvQ|7YBR*V?R zg1g1FzU=Y{jqCHZnXV64^Dwn!?xZ@&vJzn@{U3#T)@~)*?GnSX2l6?3o}v6MxW@Cc z-*lS3b6KIc_!RozLG^T3GrOK$b6F>r9l&L2n?Cm{J;6*eY!bV&e!7?P$-ujx__x`2 zOYwCIrp7DIBgdb<&8Rr5*+v=dY=!#*G8E+*tss&}b{-5Lrhg6JY=Uz#@#PGo+cE2S zjQfD5e~60Lv*{ru=|uLr{{tsdwQJ;WyX_ZCHGxSqN9r^LhL zyRK2t;6oDJrRMI&ac_u3$qCcd?+P?KSgSKx<9gR7;C%A*)}#GQW3O&dyPlDMWKT#b+K$1c>~3xVhnB2+KIWCD=^AyO zum4VD?oF#JSvbC&vwf3Mqp7wtib;l_wk(#MM8)v#EW9GK;Y)s~g_X*8UK`N#C7a-&yssiyQ;gKa5qf`mCqF*0`$? zPro1YyRhkazSy2z$KY!Ve=WlCoU3%6h_Ok;IaJ%7X!WUDYWZ{&Sy!@VM$msjAU#}h zS2G%!qpiHn$O_TUG|TB_H;V{|!TxOje&;7$FgE!eZYTFGG`P=*=HNn@4jKK#T6IT>Ro4J%_}JCf(r+|HPG zs!w?{@z2RP3ovz!domC80X}?8@>2dj8)tJzPh+rIT3k%C9Z>xOlNv%H>sw#f>U)sv zgXx`>NnW5g_{2IRqG7yt0c$5aSu0lhNXcpZ=028fSBSfFakUZ4+hE^7 zi@oVkRh-EFsf8?(+=8bS>~^>IKUc@0WXhZ0HrV_-JKunhAF)N|(|_WTw~^>Ub{L^= zE60LeM59YtWIVi+eYBVRr;{$LDIfJ+R=^%7>YN}hHxy$=iM5-tZkd0j$(0kSUxHko zD4!91GIz2>D^HR8L@YbVofr9bCVx2wR`Knep)g%N)iJL+R^H57*%h(1Fji>HcXLYQ zT8!>XvKz>~pum}1{oj`lcY^Mg!jsNoabX2HUx&o47`uq4jPYGPXced9Ut&{s@?Nj? zoKtfIkI1<`ISX>3C)29rdX9WIYNZKuim`7aUbq7ui4Jc;a3Q;2OR{9?UsGtcJAKC# z;zD(DAEu8P&bv<&uMQ$hcEHXOrMIg6T~_#&_SuPgs6Op&T4hb*d`M(X%mJcw#`;y1 zo6Y{o(EHEKrQn#i=|&T_r}0;CB<;l^uKCV4ZVm$GN&utoe-n z+lzY3G3zoTy&bUqmbYaTw^uvaPt=M{|DfxqYDj+eo$k*Qb>3)Y-uxUKIZ#VE-@Bb2 z^LdgDao1wVpF;QSG0fSqHMGBq$9B?EX2ptXA*%v<)4VllGFN&Tyvva5bh?*d>wc`5 zEb&+1c@Oxn_5TJ;&pXPDuJ)tP!)k8HQh(sjN7_z))#I`B3RcLTs9Qp(s$1>g# zsk4@|Glsv#pO0XJ>?_}_)=$)!8Papbg0o?Mkh|KDAUS`2C(llJfO6TxQb6N`>uobvmWm{9$kaH4QP>*lXFgAB3!aF9LlCIlX?*z6jg33&n%8pGu2;- zm;D>^*+I4f-iIocdDy*hIu)`{V{uz?IVZ+{C3^mevsvx^4E89_#a5tLz!M8TQfT6tz*u}%0BqRMS_eA=EEd=?#qY* z#dvqJ429EKCs%l~2i@6=o~2@HYBnZ*EC;?g;yi7*n6G=hU+n4(DGzB|4Pf%G|%drTeLVtsl(kn8wOe3(TXgeo%K7D{UlfNsa}L=<$&{Ce!MD2-bAZ4qR%3 z0iD%2hDJB}oH>-ENS{;w>cTXqubr=^%4!|N#(C;rqMw@2mrCGX);P`31I@&`_IRE( z|NGR`mABo_zMZvOhZiPm?oW7pCx#xtzq127(X)zs%d>AzwBPJsGF@$lZBEqOjEi~S zmZy}Qb&_+cGq?1fC{UR-M)RR>Ma^1ll$^2Iy?1=UUR{;So0yyglXs!Nu})4Yzksw; zw36N1YgqDNn*2YS?gVbjX?q`dlZuljLS#4<4azZ<(jZYPN%N#gr3oFCDZ?S;P)Rc- zsg&k4n;lcARA@jl&qEo)F&&@(_j>yOyq~=9b3gmud#!6-YwdmSd+)VxBt=dENshrS zkmwHEFRcsCgyT}b~S zES6~NU}zky_8ZAroNl{eaVQMi(Ee6eii=uV6)>$3pK|(HtniL;L(ZVAu9vF@p>veT zUV?VwQX{prgTd8&uMr0SM$Y^2`WeWai?45Bej@)9xSv(z2WYbu*_Y98y6dyydpZ0z z^WWP^oi{Jp{r4mZ-h|mqIww2cRkTi4`mB#jHj`wJZldloP_K$*SZjugGrAXtaP0SFmaa z67S_1mx~q;&?e)mI{J)3Fv{ML&V^^LelT6h7dpc=d9^Y_ekgC8&Pz9nMafF@iP9@D z^;H&WjXPOy{ukz)%EAY-PG)->u=}$R+^zPn)S2CppVIAHi2b0h`IwM*sd-nD8U5^; z%UMfV!%-iKrC9SB{<(p?jnzECz1bb|8-Hu<^JUt~?v-RB8%U4ylx|4B?DNlaUokjj zci2_p!lg9I+t(}j&MFelQA*ZB8D}sja<~Ne;A~P@KS%wwTw+dhCy3 zwt#oH)^aWSRnclG+{%1vGE64}M^45%S8N`rd@*`#VvR{GxW?6!A@CKC+s*=6#gVgJ zv&JfGS{CC=)@O}SBI|Rr4rBqpxE9*Ua5qlR(Mo0m(vN3F z;c${A6T*`$m^?s@F<^yn@>HD}=3;6nBO2_$zdRje-E=7}yelFe2K$^lT|=*NDy>IC zzdSjz26?P#e2?f>mNrYptp0FY46zI8np2=}73a?3Q`2ddS+cBM$=bJNFt`n-CwZnh zMBo1riE`S~E;@hazQ;(EbA~q=)jh*{OAGQ{g)iu1C)sjMV>AA(|%J?mgHy8qzJ*c8J<@a^yH|)+&t7zh<)!#Dnql zd>jf*NRkyv*N9+CM8=#elv&AS(B7cj8l1m^*0V7nYYbi@Pu6TFgWPPrKvvYP6|YV% z^whIO#k+Y>W)L2v(>5BO<-TOMN`9C+?mvZA^~B}OekJDTq>Japy^J=;;pr0pHj-}+ z4Dy~ddv}k-o?)z?efwqEZw%R2LS(L*N7M8b(Yy+ezd>B>%@^(x6FT~Qi1tT{EXghU zt-sgsy!-wAB<9>CLf+5chOqN!_FiY?S%uzN1D!d&?0amW_uf#5+|5P0N9dUM(hJ~m z1^!lM?^0w)&fNKQ8LFLRjJwahIq#t!M&#LHBgJgcG-P05};$A{xzDJ|~usS+D3r0xIk`8g6)WBH5mV>_?U zTi9eQZ2_U=9lr^${&L3<-=9eDjpTS62W}@r){SIU;WGID!fPHQ^A>S0&jhctY;s|4 zhr=+IE=})bMVdjE7UF51(J$Z$J6XIv308|jmDTqw>A&*%DEIC^LvgH@`_r?Nd#7r9 zm>M2u?FGtYHn}ytcd}y{@+SA={xw_AL2MID?q!MnXRnVg_~a3;wBdF0Y5F;zo6qao z!F)Mx4nh#9U?2?I*<*Y_IGwrHsK4&pOEqO zqj>l<%Ow9n&iK9)mg~jc%)6dXlO_B)r^WnAy5t}|Uae1PuPFNsf?x7dCv#YyZC@qn zvv~NYpUe*DJfYp%8v~hS4JbpWTii1Seoe^NLR-h;dw=}Q`>!56x4L?Nc2C}#WXyOD z3*HO2Ct-F4PskpN)7Ye$&m(C27Z#pK_nc;Y6V6Pae>2v9OpHp_!2RdyZDzp7?F`*dX$kEP*1eU z$v~fyw=?~+>tQtQ8j6@D;aUp1$&EUl)_Y0SO(Z!6J2KY)50+I?WAx4H%cbm_bH_4b zO?L5C@OfCN%;XQ@IX#Vva&FEy>c~39dhVVCn`%&dvoQAl8}F^cV%L!GzoeO?p1i;6 zq3oq(7|H{3PU5ZV&3^4QV%h9MMBgPQABl;x;qW^~XWjcJT78g5RAl?JS-uVFFE2bJ zlq`%OI`EY*Nm&~Uzaq)y>c0erc|slopNsSyclm!#9w^2Z_t38e|5!riQ+&!?-3ySv zgbkn3LnY^K5ePKWH*Vl3gVa?Xl08X!CfmN~+vJwM-?v$7aX8*3%T9LT9EB}8m+9a1 z9tpt<;4~OgHPw^%h&h4dWMx-iOve39A+&}ZT}At+$-Myz=do;dROUJHb$9iJ#r{*- z>yf(#=C8p0P43D2)>U-qMeAuiiWKwr>KbWT2-ydS)hFWoEB=hUiJu*-AqFsP4a zk&gY`r`dW@ zG1)V-;;;$nr?T?JMk9GTDTfuW(*U&m~?|PB%N7u9aXNmE_SU-CqJD3d9 z;nNr2lLdb!Yi2##VGv3N@9iYIMO>Ig%ES0#-bZE+{W;L>Ua(YV_&SOhwXkKR*fx}Y zc_My8Z21!ZAH(`1>3N3O{kC?dsCh5{{)f7fC1Q`ru$t7#6&W^-qvtccai6{>`A~8S z@taN`L7;pA@B&0KbucG`B^jRSekH@B*!?=G8{{!|NsK(3i zY6rZt_9(dp55T3Y%d17URXp-sEjJ`d-qoxXsj@nFGX1By^1JBuGcDdzbA39@!K!&| zK7#G;(`RSj@luHI_1#wOBu7cI4`c=OP3ljd(?I#}T+d9`0)CT~2jjH55Odr3YYtDo zpL~f_jf?`H)ZQU9Ttl;!YHhB~tfHSr!zbMJwC}p(%d4!H3>RD3^E)=EFOCfnn@Wp> zFB(yNqc_VwiJYfCg5M=qMP+u%34nFoF`sXZ!j8-Z=3K{*3)Xr?OB31g{Q_r>ByBJD zyB%XMfZW4umi0~dvHYX(&koStte?5u37DO8N>(YK6?V1oGyCci6Nd2g#P(CflZ>R2 z#pN@a-puwnBRCm6mTC87u`KVSHi!}Tv-#hZKZyRl@V}Tik{N=E+ItyN+qG7c7SCxX zyLK{vm@&$A*ie>zK4tZ-wEja)54)?T+Oo=I8C~wiuF3k>b+C9JI$6!JSu5kUoPKu$ zeOibUBen4kUwH^W29xbX_tka(KiH-M#$PE4WMA1gEHRCCIb%3;NExXd$D1DKVxqE`z2bl%AwK=bsd zIb$Jn+x=+uC%H~7aPTEsr$@OE4&CTp1+MSH_YiE#$<(vlIUJVB==cN#*C>&djVFo7 z$HI1;cDwoZI>_ARYFYM9{`ZdR`;N_r`nH^sf4G0W9_erq?L<->tKNaEvk99|F++5z zzw@rC31_7sc<;LPhhGG{jw zq1o^BdWL@O;oD4o$sd#_hZnSYIb5P)dGV?UhPG4Y0e25(?Y~3uSN5;PhGE${^<)?I zT-PUvV6j7Xsa}O~S>LxsZQb$WVKE@5O1?n1<9I|?z%9Vrzx0)5=zlb(*XIwT+&K^T za)w}@{@3xJM8YC;Sx)K>lIrS9hu2_Qg5M_B+-6=f5wG*^IC{3nBMknBGC- z!R}myn{`;Sy4tcrWWI><9e@3$z|A}r9R$zcu%;iIWG~(?q-pC(KG`iZE1YcP$vfPM zpVbs4>hjhSbgoi}pUZh#GCXIDv_G%RZLV*`zj6iKdqJ!(TW4+T#qN5W*2mFqEFWpg zdf$*G+3d4^@elZ9fBb)Wd}FrC86$ORowqY%aA__qi>T*xmUvnF$B<@{Xxf=vISG0Q zoHCQrpEga{H0LVkZO;NSW_GwT|2#;vnaIC?DR@D~K7%m!T>WYlzq1OhrM{(#I>)fg z6_}cx(pgFIJ`M9WXBxD#lO*RkFD7MIyn0$h>#iTTiKo0PDwoEO%7r`g)NnHCGDlgD z&0j9qbFaU)iE(wF}F8cu%$&QgvNLF5aIUCBEH~-3b>1cAjg`;z^XfO7DEl#cC zODkAuBp$p%zr|SBk#xOCo41bb`DNDXWX5?Bt{&=Y=D`nfy}Mej6!~t(u*XSPgWOAb z^#nb3@_o(M_IyZvuJsv8+{b23d28ZTYg|wM>+Jf@Y{uJSYGxf)vF6b@_afBhh)tTqmsPEnM2$~yNf2dj;s%^Nqk-7gwp;)31JJ-|d zYoy8v$y>#_9eDNuNxxQOdmi;Dxq7p0&Z*o+misY#CTV`h)CStkNd>24#oh23tdA(p ztF!id5BojLRtJlHBQa(uJN}!D>##Gc%eERt48-Ac*yB!~o;m2`nHmYxHR|uoW_c%B z$DKXYS%jQ@$(GslPuM7Ho$gZlN;RE>=TnuQMCQ$6$30L=j_lH+#~esChEWGPB+JE# zv^X6e$!${#DwX*}c9~`sO;xpgSm<*f80(g2!K@`c zkqpPOZBM^jL1i2qmWWDkllEhGk7JcJG=2g$WwdZHzxtDob09Gf=9RQ_gunkN^c`j3 z_#01qf~_a&DRLgy6ZB6$kil5foCUHrt+evlC-swB_pxJnygv%d+ObuhCcb8)Zupn; zL2}~hLt5Id

    $?LgtY)N`{I@{Xf2YIaFHcL8iGr5%&Akmi!l$#IO&wnfFTFXpwcZ zZQQpCKgM}3?1aHLi>1wJ)R<<;NOCtuW>4uh9PLr)Js*KZHF8f91J{WVH^C)u5Esy6 zfclf^?q{R4yV(7C8o#BUhHqOGWV1{_!V5O28CG^aGv>!#1rUe`3y-8LtzodmrU|WVYa`H_#xPL5CWG1qk=#=-tTebg( z)=trWEuS(1`BY2)HZFKWTRE5HE47ux?>CiAUWL4=9FNJxq5YG3exgfL^>>Ho3FJN* zuiLP~2d<@ZoAqzKkGC&L)ldl`yPlRRUSj!=Dw{zdR_7>%})Ao;1sD)}mZ z$*P4RBESI1JdR5}-JAWy8C|s_Syl!-PopVfN*$VHCD=Hv)*^3X|K1@_a;DE0m&Vd} zff|RX|4B6_-%T=S1@RV;T?dV6czKlmIJ;r8esm$MoBKXlI-X+dS-2GMuB(LsB;LQ) zcc&s=7NNr}AWt-`92?~IXVtys1vnFUfnU;&!IqNYw0+P|@4f9YRyZ)$> z2jkEH9K08TPZyAw%y%b?Upp}6c3znk7&$Fxt^0fMhhyRVht?kwRcD-+5rAr{C|pr%2VA@4w5Er{KUs@g?h4Ym((+h<`%!o7EN1|0ns|y8kZ{ z)#DFYh2Q!A_e$MpaS|C;x-(})b$~=pk?c?Yjchn+JS;+F#q*q!p>#i5O?IQ+5kYb)^f5vWCBlcsPu{TiCv>cwe3VLwNTvc$W|X zlbiZpP%xS3~L3V$x#C8z6fu-}bn5^vUj99)^cjo_U(EL;1EY%|? zvhEh2yU^!ppK_u~#>Bhz7ystHUqOB!TkjS1zf$Lk;@?l?%dU(C_&Co0OR{JW_G!-| z+1GfO`m?4#y-Xdt=ai@92%n2xpNb>fl>E1rGGgzHBXi)J3`K)Qx!G{YI*Z1n??Co3 zP?NDkkA8;MCg9Kzu`K5xXSIIbh2BZtZwpVZlUS%9ugMv;?eT4|7IJobo@M_7i(hHA zK>dkZ$&36ax$~~899=Ke>T=p;e|%2#YUZ1V3geJGpN)g?tK!=bQXPpg$wgRP+!#Zm zHl)m3%Aer*f%bELYThL#|5sUZW$$Iq7)_dS-rk>1H^lMMKCu;@pAu!)CXMUK4rFM?Z>#lP&lzF)bV5BDAme&weJ zls_?2jorq>`C3utI&p0(U)<{Z7UWt<=A15&JdTs}ntA(~sBtw*^)u?rF6JH(_?mrl z0>u(izhy+(57%p`BRL|viWF_tbTp=~_xV&@j3+h{&06x@QGT+EI(s4x#GM&XsX)tH zMew{2%-ITmuuo;ySxbvEM5h+|?3{gA3XUH@va`EOx>DJrBMh8J6 zt9AQ{o#iq68P@MEp1;6vnv?EO9N)+WukfO}c>0p7kBiy0#FdGKp0~g2)#<%}9S((E z)?6KpNtvsdPtuBdnlX^h+oo^X@i2HjtexyGy_(f_D^-H-1N1&Qwdp?Im#39$)b=B3 z$BJ0hS-*=KPrFZjMYrWQvW0fgvw47!~>)m_8Fi%2l3jUg% z>y56Q@N1i21zM-$e{35X-{;1Ra&fEC#ZmtAl<#qL@4 zCo9{DWGstiiRJzE#dj7)VmUkD4Y4Y_5|epuKB-mgp>4h!ZX;+N2*(33g*0QGW zGHAU*%49h`6)W?Ua1wpG@Swc;IUCzobq)%a*z4duBQi4{OVt$SnN{j9~K@v$_$PsWt_V&ZVjI9&@_)x8fJ=ZXdU zw0xSH2aEoh5y(vV5g4|EgvrNvgSdMntCWQLD0Ug7t(IbU);t%1Rtqf7oJw}m+|O^a zPvSZf>`-F^Kb2tIo<4b(u>`l?!;EiWmR>9U`vfDK?4KA9{hSP&S)J_48tGbAQZyjz zW$%))iF#5!wGDSu*7=#f}y@uu{KzAo~}=wX;}kTVduSp7k@`Dv|SY=$xyrMrzBm<~;4a zOWW^7u$QlFbu3Z#}hPQ6wwheuhRx zHC?Fx`kAzAF!?GR&-nlivGV~aOr~GfB7VuAD)Y0NzNyY?*TA(KUZ2bgMKLiMK0YJY zX61jRWfv`t^7jUQS5G9pS%05>IM2BwvrJFv74KoioO^LH&NXF|^Nk;}YwkH-a{+GU z?BnqT8|N(S-dev_9Jm&fR+6g^kD0};wdn9A{gWMSFgpyPb9$2DYF&U`twg1YtT79x z@*X_dGBPvYO{tbxRvV628ab3C;nfBCo4cnWl={*tnWwTEzBXSALPgv&f<-oAe0o_-yFAUYyQKr_ac_o{wCH znK@_cdZn+zwXC+As{Le&S&6g%CHd>LIDjO3V3S#nyam4#Yj(l)C1@X`oj-k-ERV@? z-GLOJ^Z(xZ@XhQqK;M!%m#m)XsqGTl%PApm^1F{&J{cSG4!JHAtFitNpNAL5VG|0y z^nq~esExceJJ3C6C^ZAh7r|#EekPl4Rgw&&UG|O-H(r~}0yVUmb9hSd>GCv529?1W z^Dzv!xc($8%0WD{Q0Is(nS0IYhj|M!L+yiUmQ0CH@{fguKB9svo3H-S& z7Ga zeN0}|*0^;X`(97l`82v)xrfy_4|{G^&wLT=3$;~N#~OcEVWGu(-K@cXO%GU1{h5QP zPRC=gZWRB%7dvk7cMDilhuMRL5!OqhPDSx4&zPOG*|`u;zgKd*b~oVS6NNUOFT~Xj ztT!1eXOQntQXIw8a(-*hj@ZQ0o-FX9JTxxHm&~nX<;k&FwO2g(r&%iS}p?JoR7I#fAB7}YAZWauhc`kbzyZ3G#-G_C)nRYKQ&CN*@K$1 ze>*Fc{Jx(;@osnLIsQ{R^<CSzD;|X<7}>14hqZp)ZwmY zlr)$Z<#dC+^lVSEF-8qJe`bxgPGG-@EKx~4$MMc<_|Ro^y|&2HYWp`IyPI_eX`?o;$msU~HGZkLZ_7g-W7|A)r`HQR zAJ)P>$F>hdqSy(xPPGblLu!W>D!9d z%fzH|B&+AzC}?H{TMJR68=S5sWf#mkUj45WJgN~3lu|nTOy|L(lo}4FUCsg@P>2Sn z^Wz3Qpfioq@8$fQ7I;=2YevI0Jzz1?CfC`kq|6yHb&LyFLvbz1a^lgq`iC7X*-1ni z@Bi6FeJ*Zf1!H7Sp8E>;T3zY9fw_S6S?QnNEV)G=HS#_lo|lS2*X!x?X6PXn&+}B6 zeIoxk6e90o8o=tG3vp&DIR{urDtm)|l=_xqAgZ)a9@=7QtYeTYeWbac> zL>LX9&F=35gT-V%5yv=#-Iv zp5n4PB=1z`k!A{emSoB0>N=Ms$uM_6d_Fe|c9d_@k7v!m1QvWuKQaUIZN-U_7`_sN za$eAI7QDaU-{}>zmn@lkzfdN#HN_y?RXk00kq2@1aJY{&yg&xF)!LB7Y z*nGdM^Q6qVEq6yNvF7h^dK3DU>3If}y3_F}QfJ@59)C?S#yuBO$qiT@HixUFCCP8| zeLZ$drmCDglNs1!#H(H`yPnTaXUn7DaN+1k$P?=AGtiHZ-f zNizsO52MU^UBZf;U^tFE!_<`%$hWh4RZSGNi z0sl`PsLJq5F4?oRR#xBnKHZ;U=j_8yBrT@Sj$+zc*CT0FcX!~;TiuhD?wK(j1&OQ% z&56s&jCD3F{?6`Y*)b<5^~JaYFf4L!CifC3l%Z?pS*E!!vvL0=<=@1H_vxOMS{*Se z?^^B_dotJD4|;FlT;2&M+eXe(S%6n3s5u(n%?pwx?$83$lSOrzHvWQH_Hkc$enmZc z*3JD-kAE0#>hih=F{YU(n8!RNFLE{OGhf!m7Knr`FY&?S$k8Z^t#em{kNH5S|yIFIQ z(>n5wYX+nS6pCkK4WW1K%wRzt-hz-Eg3DCID^-juW1f5KP zVL^6OW=+-%G2kWK`;VxVj2$ofHd)EW7&*j;mZ`Z7WLjcuQ_Sz8hK7YP`F$kF8PLD@ zZXlcGOsi+0u)wwKJ03-zPNK{#IFE6?A`L4*?G!Pik6K>m$B(+_SN-*T%y?JCNp_)2 zwYU|wKl0Y(Y59xqPgGla_SdnpES4N=Opx7_2m0$Y^3Ejf4t?gK*!~Elb1Hb&|E~7e zUurAI?l<$Tw|$#DCK(~+WQ6zF_&fS#CBzu4+2<$r{ejK4(LJ*|Q?ak5w$4`OqQbST zB~2#%yzR_c>hrK;k7#i*Zr9SrpZ*_N_tC6~-_zlj{k~_hPFC}e(Z;dr&T8Za+Rsj> zvq`!JyVpV_XG$gW=~^gMh29M}NqB(>+)O=B(fVs6z~k`s3s%o*rRCUs8mR}u?08=IlE~3WB*+ZgcpBYA zvl0dWKN}N2)BdwYt6$M>E-y-!+0%Gkp7wLXL>rML8GJWD8*Je{34d@KUr*2!J03z`6{)J z#+~fqokD_obj>IsISMiwz5#1r)-NP~Mf{-*OMHhLe?n>xDK~1PzPs{X^)6)|R$osR zTkF2JdCZ+Sb{;)5SDiN`Pq5q+S|wj~CqKy)oE=s}VU&G9t<`lItscaqQrMKI*8`xl z00u|mbuy;*B5`smzeeM%7p%l)d6LT8soGlDjT^~dyNx$x4gMAMZiq*Dmzo^@b;Y^w zFyd2@JiAN3aOGHc3?boGxF)mT5X{-9t*nIHq{cT~&Dj4=T+ADjA)>@n&|9b7e--R< zi*ZUadFFii`n)i26Ss)QXJT`*cqH?C=03AJeL~{D+Af-=(+X&HNhw?Q@wi*Z9x zemwej^3>$}Gg&fkQD0|`%jngD6uq#oiJt9Ds8-;y=kbfR1(dg`Gw=VNFSK9N|I_<- zfni2O*{?c4J?F7ce?Qyl`gvhoegjOBarp}^w-Fa^#g42X9iy&Mdi3dHXjY+hVu9!N zFWCcHmzEF0d$>9(=qFYv-w`{K&*Od!%jGfv>Y_S00)S z5E-H0hFve>M)po@S5xu;9OK^3BsrCZa$ZOgU!J6y82C#O$Vhd^>Ip5mL&$ewd@Gp{y@ zJ~bIZ^m}tv-t>#S3_5zi0(L@^Gwt9kSoCK_B-gqfGeM z8t2c_()T2~n1t_=s2yuB6+Nnn9sjq>q>Xy&@w1g$Jy@GrTQ?il9eCm-wZ(QBSFa+& z#XSB(TK8eQ$`ITP`Q*Nu$1lqBkRp1jJ!~~p1Zu?Zcj3&B+PpyIYQ)25C^cRm(Uhzg zVsr8{o{Lon`kh&pWOB($$rDJBx&2=;AsHkxul$|wXTWtghU_HWIODIpbI7@B*DAZ4 zpJohK3NJQ_g;`6HEF_7jSts3-CpKiKBJohpDXo7cJXg!BsC-LB%S5gjw zcVO$4BK0WJG-AW)?Am~w=acCuK2n8rCHZh?@-Kv8)@uLZyT#gBt;Nm${u4SmZ*q^Z z{%}&?F1GaNvxD#~^V-QdUy3DXlDU;~SF`mG+I^m{<&^U=N`74!X=SHytXRa)K-cU0 z|9q%_iOX#YOnRMU<;k)JroW1*eK0sHOf&yqp@3VJLhOH6yS2#@3mxq5oR*MT$jp^y zd@&fKDzj#0S+Z*R5fLd_E$V1-hHF0*Mzz^_k~}^;Shy$_Bs)MW{`(vLzX-*Q%{KVS zimpff|KAu>h391kETh!9kjN-!vj{f@)|r_b!d4@c{ZP4vWFIaL_k?^-vAsvhS$ug3 z&&hj+rDS@ZH2XxglH|w9bBy5jL0g%cq(b9yEE$w_i8=+-v0&D z$_0ju@Yh9!K0JFt_CRPR#=Q*p&kFr{-i>6JNlCu&4JotR?Pc;EYn;;@uNL_$=RqX{ z%Dtj|PMyqd-K}uRJLL2)y^R3=qDSIw)=Fn>P4*-vJK}s%@E`b`mC8@3ccuPrx_VYB zQ3l_3k$NsZ9EAb5!}m(#g;$~ZIN!*L3bnB9PUuw8dK-1s79zm`}yik4N_e2)IIHcfuR%zEP8aj-gt&PS?0=aTfKO=+@E5RI}r zuZ_D#sy%C;Yr^YvxLqrvUnim;#vgi-VV83Kd^=4%8Lf{g;%fG(hrQY9eX4lAmcBQ! z!FBY1j(44oFT=2{5oVPn&miNjR^myXIu0*LI?a`?=hxSxzpI6X9}U^dV3gfVw`0T{ z+7UYv-z9qd5C|J0oR>s3Gd98P1 z!D-5rgGA;~KNhW@r%m>Z28RcE_Guz^Hxj(7j-%Y!gU4>g#GG%Hv#*PZz1jJ5o{`mI zuy5&8P8-|=nXTlRMAKvwN_MW!KAlFMFW{Zox&=z_6DLZzKTo#p^)bm))rxid;>QGW z<0aAZQ@9@I`%d_MBOWitwTI}CS)r$KD*Jkdsdo;%?_349*YJelWFA3=F1+e# zUQ!&JYp{JATF!-GF}BPZf+y)4p24ojQj0XNs4e+wYT#s6V&*Kltcti-wCbVVC3@#; zl&Dzn(bcqA#J_U_RRh*&M(#_+fn?-Mj^{gJoVR{Ci7oF3za?d*g1l8(?;IRz>AvKg zJk9US?UWD`JG1^w9+h6dIk5cIFQ$c|JxQ7ODnb0Nf+ZHI zwKbo788-jr1<5tDveo{cuKQE@4Sb9Wv%Tn9L#xq z-}t#uH24gIe-e$ZCjA{W%UJzB$lXQX-df$H&8c{tXq*%L8t9WoD|5WR;vZevu#eKo z6F3q&#h`f)sVchbbxh7%w`)a>X1H*q);s%lH&4zpPD#AW9)=eRzWT0lPz%xGXWZ%Q z?$`MFN)txI5^X=ahCNyvNU_ zqC#?vCx^pU=Ih3;3Z)>6Or zp;k{<=L|I^a$HTfQA!-hl37RJ%E)*G&cEsEf!fH);BDFGT(Km33Ld0iMtcglm|Vc0I=ZJgNi#!{eFRyb*+&hn zu{T_=>D$`;Vih!#lPJ4}E_YYn+2(AiF0}6;-er$=&ahs|c3FLwr|fH?m9nj%pY_1W zeR~XqGVXkmMVl3P-i3q}`9nSS_yG#p$+QE5Iuu4156~lfWv|z-E!I;MEy$Xuk7OQh zXrxlx)tryfiUqs)Ht&8Ofl(FlzNHbuWV#&3FSlY+PKbFLgVN?)<(KlnfvKkKSZCSnGMWwoYWv$tTGpkc?1y)0m7}t?}ebo{$xN_w&%~bIGpuZJt;k6cv+; zbRv(+YP6go@(!P9j{#HlGuISGLbW0FqBb*9xSY(%C7$Q3`QlsF!DL47U^uPj6Ilh` z3)07uD7lQvKwvx;jZkY=k!40bZ$d89S7mo>vI?EYwg-uBnMcVPYQqaQ$m*B>@}<>y zSI4!)kK|Hc%O5*o?1g&VVx&qwv$gDye9}kIA=$yYX=gQ-wIl6`g(ykW$@xoNOyv3f1|LI-I=%Q$Ejz3#`&D-u^5Y!d3z)V zb;7>k%9a=Vw<*`(=WQhU4FC4Iv#WBe#e#(e)_eemO?;!VNIMHIt<`@LKGrCV6?2+j zo}Xs0$!)$nnm>Mpg~?mEQB-)Bp|-u`l`eDan#+AUm^%8zU5jLeAt}tF^zg_hX_|Rz7xy`}3GPLe0HM zl{GMrifecBp-)LVfF}3pfwL+q&)oa4BPW40X2YCXbsg)C(Z|1U0s;vPu_&QZLNT@T_BYF zZB<~OY&gkzoBXkPe#m(8HutBund(~BbUy>LyfGeH7|UF&{$Fq>Bhsu$&1!@CB+WRd zK1R(G6F0C+OE$Wd?3J{16X|=gd9q?PD=>5j&9VwHPe7SDK8d6oU^0g#YSHgHBh+@- znarafvD1CzsLX?Mu2B_OW~Jc2eR`Nhcj5LVT4bl=S6WJ5gb6U6Pm^TAO;)FVw78&P zjbZ*ii%h5Uhi<$o>(O$W;#%=7E7=pJ%0c&B7VZG+nZ_IU!RfaGwx5Uyt=;ir0hJF} z=XK0FfR@?E*^z9cMVU{PuO>d<0jJB^v%Y`xu;Uj|ZwuzXQjli8=w5;5zi2n>VlqZL zfh@^ruo{z3r}@3~8O{nrw4XV~=Gy7c&b?VPZ|EL_&3biY&aNmVa-w)6-yBGfkJ-8| zZF5@O8nxZ3^m3Np&i*;qwv5j?|G1jAZ&%9=aJh{($r#a??t3siYQei^(5C~h zzDq3&^(Y&SR=yDp@*epnEo7~6-gTG7oUh?B8`J(qrbW=*1&!{;9&`D^-{{}Q_sIj^ zO>cO;>&aSBk8l0U)v+Q?V=d-ct`EpVLaQF?j?E40Q}qT%7dck0Z^O924%4> z8Nd@$j`JyRVCG=Ou{ghz&MRS@8Il9AW&ozVW(=7X!~?Ob9j$(J#~9q6L9UDf-eAFj zFiXjS*pXb3%OEgKuaXncm*MTbP!0dvi558*U^|Z(M1$+eHbfnHznW~Pt0DA+-zj^Q z5;fUq5-a3wY4!y#<5e}FnK#sIl*uy{odd$wSC?{dP{Dt zAD~v0e{SJj_u#?n81y&DZiMal0Rv!~enGfxFG^g8KZ4k@6qfAp5!5wqEOI19DX zVzMv(Ly76E_m-OT{%|%sWaYqZ_)}RwGK5~qqI5jg{3$ZrM!(JQITVi%=8roeI3K=W zV{lE9WMy!6Uu8ANK)N4{C2Qf6=aif1{wn_DoT1mmhO5<>ST_VpNAszEWXd{^oJ4*r zZD-*|W8Ws$uE4D&usH%whha|}-kY4E<8UCE&A%c+cep%(ogcG9AIK(4Obs@>UyB*h zXKh4Hj4MOp?ihRt)+FCvP7wJgFWezk=8UI@)sa(f*0J0Mt!Dqs*Z5x6SS2|n*ODhwr8d6M$S5d+G0S?TzDRv z?i2U=)A$-PXI*s{k-0Y6zGSCOtd&#Tj%4Gk%uBX|3w_E7o>@^a(VZpKbe^`ejw)6; z2BL!?^*paOf4YNF=*lq@6O_k`CMIP@Gzl8f>vo;gl*-U;Vq#{Al+WE0I=rmPCc zllJt2#afzTD3@dmwO%u|w9hTtVlba4CCI1{#%3#OsWY7O6iu7FHFaN*U2Q zjN9k>OQd^IRL*>E&US3fm!Hy3J>!9KIF}U=H)&7G}_3UtA4y=|8w(jG2>br8XM+ zc9y%x8B@=N&)ICzkW3jnlqLND7O4)i%x$!WWjU-`hpCyXXeuUVHR|Eo+gFGi$$|fe zUM%Y>TJXeY3(>HdHl~TUtLX6#gt8(!Z^g%v{Xp?zoe0*M?iFFwnfG1g^Dlhoa+>A* zf%ba%o_ygwmT8D_mGSU#K721cA69R5cb1~fW~IjCNS+jv8)hWm%Y1)h?7SWC?$=^x zGOedmJ=W@>d@}G&DA>6OG!IjIPI2$ZR+$Sc?c1ETeY-ldr|op#|3RMbNc~uWpOYbR z9X*C4 zMu)7y%PeDNLI*+UQMUV@9lz!qH~aiHxds(b{1Uh3@#5??D9;;*YUxcqdEU5ZH&Z?B zl;<&L@W9DpNLO~gR^-VWr0kD*fd@Y5TK1CGfLxw4USZk8MBgH=JdTAI(5knm@8lK8 z&b=W@eD3!VWXf~+8ofa>BgU$IeVfeL6~)EfxW0>x?sji2b!60;-7>92yPPJF9Zfm2 zqZXcAfQ7^G^!n7PM_;5zd;x;lSN9O)zof-UERvOC^Pyi0dWYkBDSs~$+p^mwx;&uX z%mCesb8C3*Nm}?P{htthGGCU}sGZs4B9i6lx4hB~^p#l&J`9Rq@~gU7^RC{$Az43X zhgaQS)^}??F)r}^8nI;$D`b`;r)7*|+3=wasZU_>#;&$t!**(}q@O!N93BCk`9_}; zN%9eHXNl7_zT}9qzjw^hdw-C?K;(8JNpL{k?fEARkFS1=ov*Waz?3U~4F$;pv zV8(M$IfxFYyFO3!oGdO>W5pAon>m?%YRP$ikHD_Ck!AL(b|z)=SXCy~AS}r4^URRt zG?%j=lMLgh^Wf~pT>zsa#JY1JoVSHJd1MB;=i}Z1#&^kEvQ?WcSovNuuYp7{dv)?Ib0rVhquxKd$EL0=_Yg)wAp59e-7z!+GRi%=_a7MPRlGdhaS%oli{E zZk~-RYAYw2cOl&eqSz^LTdlrUu72a{3|DjhX+6>$Z@#vdYssiIl$G1)kJ9V*rfHtP zm#Dd(yQ{-8I}kJ9)5U$S(BSWElpTa!Nxu|!R~0Nclm@M6mHaSitE!%$jgl?s+XEV% z{JR}*^DeBjcsG=d@^*1OJ|$~N^13E+?R%C#mxpXp|9x~CjtAH9 z%gX$?1vYGh#dLLz6ywv^<^5qf9)BmFN&b@=ygEA;6Fuvxy(Rz6e0iSZ-_$$Yf}uUM zz0p{pHt91Ox{Quf`OZW<&Hl}|-Ji3&PuEvZhxmAPWIUZrgI}><^7-x04lq@(_Lv$c z;MD?>=Hq{oBztOl=@XM7YaG0C7HLiPsV_z((`;S; zvIBiGM3%XGhkBABAUVRSlkY?rETUa@-W@^bWK61QEYw!**$4AFJKYb-U&*l49n zD|E8c<`7mK43QT^xa1`5sIF!BnK62vVurEiD@ymkgYWd0ui-(mD$a+;Ij(fTp9gqB zvf!OWvia&-$BW`2r|Dx`6=wF9^0ln_tUG{MdtWY&RpM(m zi99bts0rPAVN}*Jyew+vRJktD?M=6V+CBpsXX;PRW`pE#pUK0rb9A@6UV-sv5Xw9I z2i;Sf#pXls`Sbs#NB@})o3xxUd{#i_T}gIGtf$8@JoXPfs7%uhBE@^^yNm_@>(c<7 zJ&UG!`p!Fp8t!ZDU-qr;a!*Fq``95T6eL6WHMmhzZJAFS2iXyYG3&=_t^uW1yt4tM zAN5T`f3I?_9~Pd+CufO0$uH7{4WGb`Deld#oLa9?j$Zf~b!1j`fR@hl%}%zxL{!|V-9BW_48-T`SVnL8KbXb08$fNhzPA|f z-3yhRAg~ge$vslhr&jnl3cIKI*BGiTSwB7K@nT$y0#aG8IoH$S3d|d;uDrK6jTAX= zAal3fuxqcfr;6ZthdNe`-7xJYEiYEra16|dZ9`l>i^g}ge z58qC;^eEJPC_WT5y2u;2m-u8d1s{p28JQohw@W^s^h>w8e=Ib9!L2-}Jg40%qR5ke zXKhg4$;`mmUD#7h9Us8r7#^7QKcyiv5q2Hvo&Ax0;Xl#1WH&uNq)q1HSHkpBHux2{ zzUQgQq%;&OJBeU#;m}&2e#Oh@u;fMT9`5cuji2e;xirZN%1ufYQ6hYMj(21o+aj2c zV5Qa2+N#~`3%&v#&3yL*IorFV7{9rPX4k=?f^UxCrRi(B>(?e0xHFoT$#^l9-4?*= zN1v|2wC4DABrX+qXHA}*6(fJg&L?@(BREi0RLIWn7u}VooT{!TH$(OT%_hY?EHzqW z%bVV-a5Vc+JCWc{)*i!tN4Vz=o|`qdy~z6={AR0l1~fA3pR)-%<7w6l?{aTWmC6`D zyTfProju@Lp}xyE89lu$uCB)B#_(%`3CS~9hvrN9@BUz`vL;E}Nmfix=n3a$}gE z?(S2?lYTyR5s8*5-GH{sSuyi1c@k{O7jr5{qJQQkrqN;lTDY9BoKupAD$$8lGu3gF zw$9`e+3kHa#FI1XAU$j=?c`kLR`7Zs+HJ6SlV`hHd}bUA)zxwh?WR|mM#u6zJ<+c_ zG&hjsBXxhKWF>8WMdCx@Il`xpe9kJY+xWo0SR!*zmtylSsO2PtBbCjQer5lE9Xh4h zb2K!58+F%nU+CjrGWpHx)Ur>tvSvnj~kdGw01_E-G&*Pr$zH4<5_w z-_!H`sLy&9;>iLqRBL6m1GvV-KVmAd#koH+xt?%8?x_b9*$ki z@8+}5K=mXp|BJo8RKvrCG0)e^e2={|=wFG1Bj}k~{HsLVJkw;ib91FnE^sZUjpV$^ zjz%)K!u|;uPb@^Hcwh4QzNp2l0q;SRlI}SbA1~k;J6vBV_J3R$mt4U+^HwVB(gv|` z-d1(MnY*<64sTk@f_FeZ>yVT4emK3xh*>#@`vu?kc78hID}|f3i+8^yGckQ7|e7hh^}4mNc!{@e$u{ci$FxuXTMho4%yKy;Xb( zw)2&$q(|?MEydY9`{5_Dc^jYN{RjD!6N8)JOeK9#a>g%&R!26O4Ch4Pp?b7Y>Thlw z(}ev`C-YKPpGxNCibKp+TrFy@!Qg4I zXhg43Ml#p2VKw#a#+9z(Qda+EmN&iX%lsjy&)&@2a!%Brn3&v`4~VoamC6a~dE=Jd z7yGdDV0OEVMfZwwCz0w>QWU3evQ5sT^<(N^>E2=ZR<$76NaeF0E~A+BqHiVNuYkmA zwcNwDZ;8?MNY}u%zg`J*CVx948L7rsrl+tRAUzl2CVl|LET0 zbjwXxS4hD-Ds9Las$TxlUHRo=biYLwOapFsyT1HjHJn2|0VQ0iHfiC#lAGC z%Pw_&6Ha6dkx}`#+DZib4>XghKLTSh7Sl7COYdXevsW9KUhzR}jTDg5C$ z*fz!IiOPLT`f_AHm__<)?Noj0GuqBc30qk{Psn+H@jK?NQm&Xf58@#iHJ)zV+Em=T zMqjW@T`Sdc1sQT4#dv<&#&_S6>|zq+jF$>{cn%8|;~5R$c7Sgl^K+v*&m-9adbfkt zdUxN216QbFIE0@R<3C{aS?-yO)j88(uj`Mn$ALWST1YkU`C}YDfVGk}^&I!)46er^ z@;VMa2A`jJRr-^GFzQ$63C@S%W%SI-#-7+y#kb>VcLgchV8s8#)kP3G8OMqi#&9dd zkIR(Gs`W1*(^SMd%9CC@Z4Bc*ufwexP4kX4JM=r#>KU!%t=AO2XU-hiOM~N-$XQSK z;r{6?SAsmrZC8x%KMkYJG}`C?nOn?zmiAg7!``*Tk>vG#6Q=F3=0bmeEJ80LQA^go zRjioJ+F5n>3;+M0JDwqB@*!nkav2=RJFjZW%sj9;w~d1@Fn*?oIy6%8b12e3JBaw7ytP>)h9h-VgGgiDb^ogOTF+5SC1S zx~w02(v_`Huj-B_5H8Ed=3#0vcaAhR+^2kPz4v2Eokij+wR@SElX=89#mhe0{TQm5 z)4f#rTktb$6mlk1RDw!`SThR0-$R`Q``?4L{&H6T>KV72s8r6DmMKe9`FlDm>a zwzNAgGa9YjnXihfD!xO&JR;(ZGQMx!9Rv5wMV_5lC8LCu$Gn&i%D5~xEO=J zV5NV#_Pz1Ot8`EHm=^pgry{-s`Q-(v+K_Ix8nVLVRP}c-nns1ETD-J7TX^moY!m)>SG zEU%?;8FsiGPkZy;gGI7D9p~KfSLxHlI49$oZ?yh&eR0 z5DSNr^IW>;#MbpVIS=2mmc0fUk7oBi>br`!>{Uw#-zI0vb!=7>CL;?yMcxND^jFR% ze_wz6IWA36<85pX!Hh%HQ{6Ukn#jFlHdD|2@DLbS7SSbY)=a{ooN$zG?Sw3!Oc!Tx)-3GolhYgOzjce33olaM=ab|{>yK@Nrvnw_y0^ASH$#l(%ke|iTCHkf_ z-P@ZF&k~y!ySG1HKBJviwD%))ZxWA(85U1gd?zOqga0wxw2MgwHWdrcyA?JTlMr) z!-&H8cooiG=-c_YGKyZu8+9Fy4LS4oe5}bHyNlRxDJjd)H(DRyS~ALHUE8l@`<&K! z>$?rteum#S`isr1H^_It^TgT^$Qy|~RX;%TrXp!Q$Y;etu6@Mvm%A(TmEGAOtE4l| zAIEOr(BnH?s=z1GC)V_REsPj}_t)V?X6?>{a^9i$!;NvUZtwqj_FpKT#YZwT_6W)M zVZs>@jpWtDmz-|&4{RB4r1Y>hvtIr@_;k`%)^L5ShKVBj$@=il1@;fqYs^yS3uVU_ zS}LO4UiUxFGH>#&d2F_bwg>oqsJerHW@a+_`ViL1n|lCmr~kQ6J6X{)Q@K_mO!B85 zuioUhJrq}RM)9${xPwSCU)(!Xo#XW`$(FOgJ!7Cz96mYM=nNdpO5v;Y+*t#bXQWAN zx$*o@^ymxy%)`eWker~z*YrCTPtI5C6uy(yB4c2Xd7i8`e%rlgK`HzFt}jH`;`E+H zjz_VnC2QqPZf3gXiDYGbGga$1x|+=GdCPr~|8G-YS8ep-^=JEg9pu+TGUr9*r016I zTnD+lnavqTuNET6>wNDDmd#T{GTM)T)N!3e7jja=tom!mRW#*@u`B&qF->YaGd}X3ne5GvFGnFbOElPM1lh{nTio%jx=Q=DsXqBOdJGZy zE3?WrIYuJsTziObzn%RQ4@T zd?zWB^*6}~cD#B|q1RsR?=VJ+EWOFG&3AifaRJXtU-5!i^ac6PSLZ77Iy&+fO^u%0$KlW7DM=Y5W0`kO@r^u{y ztIDFY@g})-+Nt{v|8L6Ul3n8xE$*M;&WXW!9{W}eTOgl##`$S<~woV zaIvkKHkXiQGMw(x>u=^w|A6ubO3c8FQhf9S7RoN1HA?*l7PVbl=64m69RtCdEOUDS z;W6&XKIvz$;6#=_1Cx(p)#Nj8<^Km6KV;X@G~Sif9?6k0gWo@{#uwfAQzCmSlj(mbIdj6r z``XAJhC9jng?sXz=V{k6w_MU)E22TpzgX&8PVFd)WjU*O{~DHS zXu5*@$&b1dhU;OIJzwdQcCho4te;hoHH=1cy6H}Quc*CCeEx;Dm*Zwr`sU<@#_VMS0Ac@&yo zvc?&_Bc}vrT(MG#jMzTl`8(iP37Xk?{B6NwO4BjxPWP!{91eBlCpq=18qK~}=14fV z_bC~u;`7~Ez8U;7m-Q=tC)-r^L+9MtU21)m>@C!H44IM{d@nDWuKYSZ`UTL)6Zfff z%8bz4P|f>@@)%zk)3O>cy-_m$v><)5Ha^CBd52e-c4v_wClAbUe|F-w#k0kv&T831 z*)69eB&&5Y2W7qe-|4i~H-Cz@9kjmN-#tHMs39M(rVrar!UJ4QUZ&gqTuA!4 zm|UE_^A0&V%9H;%ryHy%_YvwTs?5!#tXYV6-B~UBcWUwZmL#5`rct-k&$ zrc82KX1DUkK4<BYLG@ZKaKG>360YK4+I3^w7<;&>4$9S6gyz)ii_K6L*92Y~DMT zq&sn9IjP#XGxIDHaODwvEDn?GAxvhOn!G6Eo`qOGN=v2Hl9L{H@tB+;p4pymxYt41 z@#-tX*RLmC#z7Z(qIg@qYn85p3z?yLk)B!WeyVSC8e7hW?ZF>clk)?PL5Et?xxRwu>Iw>#zi;GG=^@4C`n-Ro{8Os6Go%F89qI$jpLF)?(*$ zvE+$L>H1p9sX?8gGDrlto|Nft-Y#$;=db1MU@|vMz`?#aKAT3x^=|`kD(@3NhtyG8 z8zSOm^?shFvU;^MALtIBjGv18HqX>KGj5kV_m6;b8q8KQc4C$L$Z@q1Z%)a~-sP=u zUXM+^M6g?w`wfdvhsz3eXD@DV7GFuvhiG~_X~wZzGRt-6&(%bvte(j^7By*DT(n)w zIyrUZdYt=061=X&rI7vDm?fEShKNM{aWFgWv)?7PPbF$O2tT>hzwqfN z{@sdv1H|L(Z@p2?UqSK&(*Bp--pASFT$$kh!F=LMzEO?@qttdc7T>1LdIdDQlD9am zl4Is3CF_u?l^RQ`a|}G*rrTm1$Xxi1^q&Q%@wiYH1H$>d$vuePm#F_j-(*GB{_Ilk z!+V&R+5jfcvQWlWPb!uDNqaFpYrsbrnDK{}9yJPS#7-4SIa^F=W-QUm)oD=8UbDX- zJ(2yY!?Lsp{f4?9f=5n3I+YZ8XW9ctn!B?zCJ%F8PA31x^;`8P2aD1(c=QEY%?Yhp zjXu`T88plOo9yQu#vUIQP|2t){=5lHQNUB3{zCIn3{qT8As_w}mS>|*4Uq}9yr`XID{zdv}Vq#@{JH;p^vk9%mqEgTp zE28%^zdI9Wl8-NWUb1eghTfoE0qvejRK?+CMhPE?=zYobFs-|YIcI72416l<_m!;p z2WcM>t+S$bA{m#FA@dN8M1r&YG{l2{7ueE-mDYH2=?%;D*L!(&*5LkIZQJnlU@`Y> zJop)cgN++YL#Ms?`k{Cd%V(u#UEkzcYcLIh+O@p&XPVv%_Xg^Hr4X%xMozqW92yld zE+-LJBKwoB=fv^Eg?TKKRR|r()0+%gTb`^D?~0w-4QUylIdeJqG$l! zn&QMY>e|Hq_p;MTFg?s)A7NH+_nxHYyqC?ormXuumNZ!*lvAso@O%H-sShBy(A{}T zTg5XTfpN0a_JV3H`exr)cDEhM^Y`IcaueO9p6uRh%lGy{b}G->qx@71SV5Cy|7wmm z$u_W_#6?7b>}DzJrx8Z>qGvL}W&Kh=-~L8}|FKvzo|{v_*DIeB#ztx39H?X!diG-c z%V_0hQWa&Lj1v}WFQ-cHR(sY5C9A^){HH2O#zWvn94QaEPq6$me)tu=^Zu)rD?8oy zH`eZ?m1L^S+vL;nq&&GY5-C!cf%yt5$!m4J@17Dri;(URF(8@AvI9K(Uhd^dH?YQ^ z>{uQz=cx4y=$u5Kya&o0;4X3%gX4QfO`{-jHI`<(kqoxc8s=u z^!Z_b=gB8eh*?LLkTr|rqC*&a5xU~%$r zC+A07Q8wAe--P5r_<1$`Gaj3dcT@BRSykQ-itmbxn=y2XQj^`2l|hx&u%|F$3gQb% zle5FRsy*}Dt$18^i053QtfEi0vAprhD*m48I)cY!bxrmGohBCVVY!UCo>p&Wp@yg- z>rmIK;}>@qEimOs@#kAy48vB@_GuDlz0!Y_D?-v|jB%@kfO?8A= zko=98^2EC0=qU1}pFRagm%;U)YJC_-jv{5oMXxE5C)J$v{T)1iP<|~epQY2AyfgWP zk~bhb2L2G$@?=_{C*};{?XKixp81eTexvM!`JBFMNSArL!<5?wvF%EYP+Ky}^;aTK zai8PTxU60w>}2RfaKG=LrG zgcb)%`+IK8jP%b=I&Fpob~o?dk8_{%oOADc-+Rx^rM2m_D3Nw)ec2OS)%>d>&8>qc zinh8O2^9bJO)O>}nhV(09Hu74L#sock7(78sa@+>9;UrI5BX;Bk(3b-?SKJGz0qT6&7UP9CubhE&6bWO7|WUR}G0 zXQ_d$dRqM*JCb>UvJ}-zaa^lFN0!o$4;`SSWuSc;d17IKRzk0&#u#wZ={|4bIoDFk zF&LyX>F>s0Bw5aOGIC3$|I^y~v$Xwhlyo;ed;@K}0ZBC0B($fb1l?;1m9H*R?GPG5xN+_G8Uo(oqd~$KIT%gqHq2g zZao4^`oYnUt+ar24{~UfTZ%3ksPA#)xq$YQu-iAV39a_tPpKv7P5ah!(ei4f%i{Y0 zC2qq{RbfZ6#A@W+0}{Ul3&kkcNlV&C5RD(u83d#G{XO3JJ#=&tAJB(JQ|U)@(1}j5 z*p6o2#O4FInr>z`{BDfc;A5ER@w7DHlXJT;%S$2TT zV)E$;)hXUHu!n4X%WI&g7+QTuz7Jokr_Wkj_da&{FtsP+>vq#xMK04CZAJm~=pD#i zLCt$%zwR}kqaK}m@i8T*p=IqHX`}6Nlo`VBD5~5stH*x$_6#~yzcc}Bn@R~|@qfw4 zs#O#%_~J77xRpLg&*eRotrd|v@h3o=14y7X5iyjZp8PBLzdZg{6rx>NgknkVq~){F z#ZPEsCc2wNt8ZiEy&dnNXB1kQueHV+JM_VkJF%A#zT)rX3{h$m`U+SpZ^P~!psan! zPgDK{q|s`cGhmmFmF(xgb~E&%u~(2c54rR_Bnqq0>5UqXcY;zo-fXb{s)zkL3qX6K zbEvl)wk7lGqBqq&VK(^W;y1Js%2i7)?*`VY|(ojmnC(kvp4mwH~Kmi3H(=7U5A9Q(kIP}CzXwlygtgXV-v@iyIP zViz*`KzOOGC4mxxuwPNkRdWG<&&L9Gllw5dQLnoI#1w~k27mtyooSX<&r~i`lFmnn zgFQOC`Caa+v>s+5eTYUIm5dVVC@%wvXYj5Rmq0_$`_(HQL8=enY8IT(IW$3P3Nz}` zZp`n3&puFl7hR!|uOm0Pa_#R$jvF&T;(5Cs=oNAkmUwoQ&^^*HHl>HKAKZd+_ z@O?fWAqL4F#6Fs-{Vq~!CQM_C3*=3QVaq^qIXEBV`!Q&-0N&Q)Yu4c}gZQpWTKge# z4aOh11y=aE{wZnhp`?9asNMYoAa{b1*a{F?i=8OSg3fBvIf!k1Ue2c;^rt;Enk~^8 zCyAh(N}e2AehT#BNvl1minFJuHacnU23|GCw}KkF>{$8x@Jlf$o3VJs09tQHoP)Df z6Oik1a(qbZwf{hS5nqR!dfI&t+K?yGxwVQiyo8i}c!M17Cij4Hg6-4#U{NeMd}7)7 zDA>PB9eSP}B**7m)vjsXcbx*WZ_|qgkXR$vHoVG7Se5|VNuVALp4oWZ7PPnp$z}Cg zyDp#eQ(Ai`>7rsySUU!pCF*sEE$PB74&&rTSo`B04_H=!|Ea}!qoI%$7sy_C*fY69mhJh>w4 zm!YQyWYgL)H>GMMeiANdjJ69Tw8k+LUB83R>Ou~!D*Fe_^df^~(|B6aDhg>H<-H7B z6ru7CY-k5%Mq$hH=+}XYytP5zRUo(kf2s&&K{)zrFm@x6_7ZmDUH0NP6l+c~i)SLW zX3d(ZTk$OBfwImi+=kY)X1WatVzG878u7uJh3Kdo37^92DJFdib$tVC5}H~QsC9m| zv{tdN@+hT>t2*(%8bf57HIaL^P3>Teq8LAWdVC&3AWE}H1k^~C*NQf!2`9lFMqApA%|MvdHV z1hHq)Ovo~S0~%7DW%RmxkWsm(Q1&-%&L?edeg7=z{|T1sM5T)s`4A;&SM6oEsOW&2 zS33x*ucC?Na9c<`hc&f>)^6BcNnYJcZU==V#$X}zAssG*C)(v1L|>O|E?bSc>%5~9#tpWd&AT$8R8?o@Q^Qo~^V}8dN%j{- zQv6s{7gkxGUv8qk=6$Ded5oXgE^{S{mvL^hk&rH?IcDg}RJU0XYeGJfa-Brw=7Q54 zaGRjpct(R%IVpWE(~_<-hjI;!HQ6rnX`Ja%h`Vy*Ni^;RQovX6x;&WnQLk}&Rm>Kbd7fnsgWvF;<1xFO z=AhH$jWx+*RoZ!<*_fnk$!_z{D6`0I&P*@?7iqn=ev0K^*BCQ%l-chzM@E??s(ERO zNpzdmc$KxmXLe3BelH^O4L}@rno^Hx9!=p~aF5ru$uWF9=P@V7gPgg4eExmmqR1q{ zTn^0ZAliM9=9>Ejgj7H5$_L?TI zD1l-~`+(~UN=$T_=?Ug+oGBSkK9qG*`FqmL46CWzy@J|!#LqJJ;hIn4Ouq}Ij|}87 zRS%|GT~ZpZj-(Yu(lOJ?`B5cBktz8uUZ>_eOTvYTB&iLrX)JCC*EWW0?+q8*3=NTt zqKKay%80Mmenf1Fgo+}!K%g1wO0Vf(beefJ z2-lpG&zD8cyc&ez=EMj96(hM0BWVbi*WjG(aO>+ukz28nEdLih|8S!+V#CBv@S&Pdn@moN$zm`3vNg@od5s; diff --git a/tests/ut/python/dataset/test_zip.py b/tests/ut/python/dataset/test_zip.py index 353b5a2471..16b61079db 100644 --- a/tests/ut/python/dataset/test_zip.py +++ b/tests/ut/python/dataset/test_zip.py @@ -48,7 +48,7 @@ def test_zip_01(): save_and_check_dict(dataz, parameters, filename, generate_golden=GENERATE_GOLDEN) -def skip_test_zip_02(): +def test_zip_02(): """ Test zip: zip 2 datasets, #rows-data1 < #rows-data2, #cols-data1 == #cols-data2 """ @@ -63,7 +63,7 @@ def skip_test_zip_02(): save_and_check_dict(dataz, parameters, filename, generate_golden=GENERATE_GOLDEN) -def skip_test_zip_03(): +def test_zip_03(): """ Test zip: zip 2 datasets, #rows-data1 > #rows-data2, #cols-data1 > #cols-data2 """ @@ -78,7 +78,7 @@ def skip_test_zip_03(): save_and_check_dict(dataz, parameters, filename, generate_golden=GENERATE_GOLDEN) -def skip_test_zip_04(): +def test_zip_04(): """ Test zip: zip >2 datasets """ From c080ec78746ad9db64a532b19388071ed02896fe Mon Sep 17 00:00:00 2001 From: Xiaoda Zhang Date: Tue, 31 Mar 2020 14:55:31 +0800 Subject: [PATCH 042/367] change star elimination: remove some redundant and checking works --- .../ccsrc/parallel/auto_parallel/graph_costmodel.cc | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/mindspore/ccsrc/parallel/auto_parallel/graph_costmodel.cc b/mindspore/ccsrc/parallel/auto_parallel/graph_costmodel.cc index d30522c2fe..292cc4f5f0 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/graph_costmodel.cc +++ b/mindspore/ccsrc/parallel/auto_parallel/graph_costmodel.cc @@ -1210,19 +1210,16 @@ void CostGraph::CreateStarEliminationCostList(std::vector> MS_EXCEPTION_IF_NULL(succ_node_cost); for (auto& succ_edge_cost : succ_edge_clist) { MS_EXCEPTION_IF_NULL(succ_edge_cost); - if ((succ_node_cost->memory_cost_ < DEVICE_MEMORY_CAPACITY) && - (succ_edge_cost->memory_cost_ < DEVICE_MEMORY_CAPACITY)) { - succ_nodes_stras[k] = succ_node_stra; - succ_edges_costs[k] = succ_edge_cost; - succ_nodes_costs[k] = succ_node_cost; - recursive(k + 1); - } + succ_nodes_stras[k] = succ_node_stra; + succ_edges_costs[k] = succ_edge_cost; + succ_nodes_costs[k] = succ_node_cost; + recursive(k + 1); } } } }; - recursive(0); + recursive(1); } std::vector> CostGraph::EliminationStar(const OperatorInfoPtr& merged_op) { From 110640e2ad7dd862a5f2d9ad96f4607a481bc639 Mon Sep 17 00:00:00 2001 From: yangzhenzhang <285824651@qq.com> Date: Tue, 31 Mar 2020 15:40:43 +0800 Subject: [PATCH 043/367] add parallel ops for neg and batchmatmul --- mindspore/ccsrc/parallel/dynamic_creator.h | 2 + .../ccsrc/parallel/ops_info/activation_info.h | 7 ++ .../ccsrc/parallel/ops_info/matmul_info.h | 8 ++ mindspore/ccsrc/parallel/ops_info/ops_utils.h | 2 + .../ccsrc/parallel/step_auto_parallel.cc | 2 + tests/ut/python/parallel/test_batch_matmul.py | 93 +++++++++++++++++++ tests/ut/python/parallel/test_neg.py | 84 +++++++++++++++++ 7 files changed, 198 insertions(+) create mode 100644 tests/ut/python/parallel/test_batch_matmul.py create mode 100644 tests/ut/python/parallel/test_neg.py diff --git a/mindspore/ccsrc/parallel/dynamic_creator.h b/mindspore/ccsrc/parallel/dynamic_creator.h index 59b8722435..e6e1b41d76 100644 --- a/mindspore/ccsrc/parallel/dynamic_creator.h +++ b/mindspore/ccsrc/parallel/dynamic_creator.h @@ -123,6 +123,8 @@ REGISTER(ReLUInfo); REGISTER(GatherV2Info); REGISTER(SqrtInfo); REGISTER(GetNextInfo); +REGISTER(NegInfo); +REGISTER(BatchMatMulInfo); } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/ops_info/activation_info.h b/mindspore/ccsrc/parallel/ops_info/activation_info.h index d8de19b328..d05f8743b0 100644 --- a/mindspore/ccsrc/parallel/ops_info/activation_info.h +++ b/mindspore/ccsrc/parallel/ops_info/activation_info.h @@ -167,6 +167,13 @@ class SqrtInfo : public ActivationOther { : ActivationOther(name, inputs_shape, outputs_shape, attrs) {} ~SqrtInfo() override = default; }; + +class NegInfo : public ActivationOther { + public: + NegInfo(const std::string& name, const Shapes& inputs_shape, const Shapes& outputs_shape, const PrimitiveAttrs& attrs) + : ActivationOther(name, inputs_shape, outputs_shape, attrs) {} + ~NegInfo() override = default; +}; } // namespace parallel } // namespace mindspore #endif // MINDSPORE_CCSRC_OPTIMIZER_OPS_INFO_PARALLEL_ACTIVATION_INFO_H_ diff --git a/mindspore/ccsrc/parallel/ops_info/matmul_info.h b/mindspore/ccsrc/parallel/ops_info/matmul_info.h index c9feae55b6..b434e4522d 100644 --- a/mindspore/ccsrc/parallel/ops_info/matmul_info.h +++ b/mindspore/ccsrc/parallel/ops_info/matmul_info.h @@ -87,6 +87,14 @@ class MatMulInfo : public MatMul { : MatMul(name, inputs_shape, outputs_shape, attrs) {} ~MatMulInfo() override = default; }; + +class BatchMatMulInfo : public MatMul { + public: + BatchMatMulInfo(const std::string& name, const Shapes& inputs_shape, const Shapes& outputs_shape, + const PrimitiveAttrs& attrs) + : MatMul(name, inputs_shape, outputs_shape, attrs) {} + ~BatchMatMulInfo() override = default; +}; } // namespace parallel } // namespace mindspore #endif // MINDSPORE_CCSRC_PARALLEL_OPS_INFO_MATMUL_INFO_H_ diff --git a/mindspore/ccsrc/parallel/ops_info/ops_utils.h b/mindspore/ccsrc/parallel/ops_info/ops_utils.h index a25200c3c1..2b8fc0ee3f 100644 --- a/mindspore/ccsrc/parallel/ops_info/ops_utils.h +++ b/mindspore/ccsrc/parallel/ops_info/ops_utils.h @@ -188,6 +188,8 @@ constexpr char SQRT[] = "Sqrt"; constexpr char ASSIGN[] = "Assign"; constexpr char GET_NEXT[] = "GetNext"; constexpr char SQUEEZE[] = "Squeeze"; +constexpr char Neg[] = "Neg"; +constexpr char BATCH_MATMUL[] = "BatchMatMul"; // Parallel don't care constexpr char TUPLE_GETITEM[] = "tuple_getitem"; diff --git a/mindspore/ccsrc/parallel/step_auto_parallel.cc b/mindspore/ccsrc/parallel/step_auto_parallel.cc index c3e3f5893e..cf388bea40 100644 --- a/mindspore/ccsrc/parallel/step_auto_parallel.cc +++ b/mindspore/ccsrc/parallel/step_auto_parallel.cc @@ -101,6 +101,8 @@ std::vector splittable_op_ = {MATMUL, SQRT, GET_NEXT, CAST, + Neg, + BATCH_MATMUL, SQUEEZE}; std::vector elementwise_op_ = {ACTIVATION, GELU, TANH, SOFTMAX, LOG_SOFTMAX, RELU, SQRT, diff --git a/tests/ut/python/parallel/test_batch_matmul.py b/tests/ut/python/parallel/test_batch_matmul.py new file mode 100644 index 0000000000..88ba818c91 --- /dev/null +++ b/tests/ut/python/parallel/test_batch_matmul.py @@ -0,0 +1,93 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +import mindspore as ms +from mindspore import context, Tensor, Parameter +from mindspore.nn import Cell, TrainOneStepCell, Momentum +from mindspore.ops import operations as P +from mindspore.common.api import _executor + + +class Net(Cell): + def __init__(self, mul_weight, batch_matmul_weight, transpose_b=False, strategy1=None, strategy2=None): + super().__init__() + self.mul = P.Mul().set_strategy(strategy1) + self.batch_matmul = P.BatchMatMul(transpose_b=transpose_b).set_strategy(strategy2) + self.mul_weight = Parameter(mul_weight, "w1") + self.batch_matmul_weight = Parameter(batch_matmul_weight, "w2") + + def construct(self, x, b): + out = self.mul(x, self.mul_weight) + out = self.batch_matmul(out, self.batch_matmul_weight) + return out + + +_x = Tensor(np.ones([128, 64, 32]), dtype=ms.float32) +_w1 = Tensor(np.ones([128, 64, 32]), dtype=ms.float32) +_w2 = Tensor(np.ones([128, 32, 32]), dtype=ms.float32) +_b = Tensor(np.ones([128, 64, 16]), dtype=ms.float32) + + +def compile(net): + optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) + train_net = TrainOneStepCell(net, optimizer) + _executor.compile(train_net, _x, _b) + context.reset_auto_parallel_context() + + +def test_batch_matmul_data_parallel(): + context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) + strategy1 = ((16, 1, 1), (16, 1, 1)) + strategy2 = ((16, 1, 1), (16, 1, 1)) + net = Net(_w1, _w2, False, strategy1, strategy2) + compile(net) + + +def test_batch_matmul_model_parallel(): + context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) + strategy1 = ((1, 1, 1), (1, 1, 1)) + strategy2 = ((1, 1, 1), (1, 1, 16)) + net = Net(_w1, _w2, False, strategy1, strategy2) + compile(net) + + +def test_batch_matmul_hybrid_parallel(): + context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) + strategy1 = ((2, 2, 2), (2, 2, 2)) + strategy2 = ((2, 2, 2), (2, 2, 2)) + net = Net(_w1, _w2, False, strategy1, strategy2) + compile(net) + + +def test_batch_matmul_auto_parallel(): + context.set_auto_parallel_context(parallel_mode="auto_parallel", device_num=16, global_rank=0) + net = Net(_w1, _w2, False) + compile(net) + + +def test_batch_matmul_repeat_calc(): + context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) + strategy1 = ((2, 2, 4), (2, 2, 4)) + strategy2 = ((1, 2, 2), (1, 2, 2)) + net = Net(_w1, _w2, False, strategy1, strategy2) + compile(net) + + +def test_batch_matmul_transpose_b(): + context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) + strategy1 = ((2, 2, 4), (2, 2, 4)) + strategy2 = ((1, 2, 2), (1, 2, 2)) + net = Net(_w1, _w2, True, strategy1, strategy2) + compile(net) diff --git a/tests/ut/python/parallel/test_neg.py b/tests/ut/python/parallel/test_neg.py new file mode 100644 index 0000000000..0e08e8c096 --- /dev/null +++ b/tests/ut/python/parallel/test_neg.py @@ -0,0 +1,84 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +import mindspore as ms +from mindspore import context, Tensor, Parameter +from mindspore.nn import Cell, TrainOneStepCell, Momentum +from mindspore.ops import operations as P +from mindspore.common.api import _executor + + +class Net(Cell): + def __init__(self, mul_weight, strategy1=None, strategy2=None): + super().__init__() + self.mul = P.Mul().set_strategy(strategy1) + self.neg = P.Neg().set_strategy(strategy2) + self.mul_weight = Parameter(mul_weight, "w1") + + def construct(self, x, b): + out = self.mul(x, self.mul_weight) + out = self.neg(out) + return out + + +_x = Tensor(np.ones([128, 64, 32]), dtype=ms.float32) +_w1 = Tensor(np.ones([128, 64, 32]), dtype=ms.float32) +_b = Tensor(np.ones([128, 64, 32]), dtype=ms.float32) + + +def compile(net): + optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) + train_net = TrainOneStepCell(net, optimizer) + _executor.compile(train_net, _x, _b) + context.reset_auto_parallel_context() + + +def test_neg_data_parallel(): + context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) + strategy1 = ((16, 1, 1), (16, 1, 1)) + strategy2 = ((16, 1, 1), ) + net = Net(_w1, strategy1, strategy2) + compile(net) + + +def test_neg_model_parallel(): + context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) + strategy1 = ((1, 1, 16), (1, 1, 16)) + strategy2 = ((1, 1, 16), ) + net = Net(_w1, strategy1, strategy2) + compile(net) + + +def test_neg_hybrid_parallel(): + context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) + strategy1 = ((2, 2, 4), (2, 2, 4)) + strategy2 = ((2, 2, 4), ) + net = Net(_w1, strategy1, strategy2) + compile(net) + + +def test_neg_auto_parallel(): + context.set_auto_parallel_context(parallel_mode="auto_parallel", device_num=16, global_rank=0) + net = Net(_w1) + compile(net) + + +def test_neg_repeat_calc(): + context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) + strategy1 = ((2, 2, 4), (2, 2, 4)) + strategy2 = ((1, 2, 2), ) + net = Net(_w1, strategy1, strategy2) + compile(net) + From 60f7a95b1c21d659fb6bfc13d1055a1097cbac63 Mon Sep 17 00:00:00 2001 From: WeibiaoYu Date: Tue, 31 Mar 2020 09:25:11 -0400 Subject: [PATCH 044/367] the size of tensor may be bigger than 2GB, should use memcpy instead of memcpy_s --- mindspore/ccsrc/transform/util.cc | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/mindspore/ccsrc/transform/util.cc b/mindspore/ccsrc/transform/util.cc index a106a20ad8..0a18763d12 100644 --- a/mindspore/ccsrc/transform/util.cc +++ b/mindspore/ccsrc/transform/util.cc @@ -361,12 +361,11 @@ MeTensorPtr TransformUtil::GenerateMeTensor(const GeTensorPtr& ge_tensor, const MS_LOG(ERROR) << "GE tensor data size is zero!"; return nullptr; } - errno_t ret = memcpy_s(me_data_ptr, me_data_size, ge_tensor->GetData(), ge_tensor->GetSize()); - if (ret != EOK) { - MS_LOG(INFO) << "GE tensor data size is " << ge_tensor->GetSize() << " bytes"; - MS_LOG(ERROR) << "Copy GE tensor data to me tensor failed"; - return nullptr; - } + + // Use memcpy here, not memcpy_s, just because the size of ge_tensor may be bigger than 2GB + // which is the size limit of memcpy_s + memcpy(me_data_ptr, ge_tensor->GetData(), ge_tensor->GetSize()); + return make_shared(me_tensor); } From 849b84abb157c8bc66387312cf8823898338e1f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E4=B8=87=E4=B8=87=E6=B2=A1=E6=83=B3=E5=88=B0?= Date: Wed, 1 Apr 2020 11:14:21 +0800 Subject: [PATCH 045/367] fix typo in formula --- mindspore/nn/layer/basic.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mindspore/nn/layer/basic.py b/mindspore/nn/layer/basic.py index e5ed147b85..0cf4cd5e99 100644 --- a/mindspore/nn/layer/basic.py +++ b/mindspore/nn/layer/basic.py @@ -156,10 +156,10 @@ class Dense(Cell): ValueError: If weight_init or bias_init shape is incorrect. Inputs: - - **input** (Tensor) - Tensor of shape :math:`(N, in_channels)`. + - **input** (Tensor) - Tensor of shape :math:`(N, in\_channels)`. Outputs: - Tensor of shape :math:`(N, out_channels)`. + Tensor of shape :math:`(N, out\_channels)`. Examples: >>> net = nn.Dense(3, 4) From fd96ebe3ea08238a359122b9d0e640d5eafd4130 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E4=B8=87=E4=B8=87=E6=B2=A1=E6=83=B3=E5=88=B0?= Date: Wed, 1 Apr 2020 11:42:24 +0800 Subject: [PATCH 046/367] fix typo in formula --- mindspore/nn/layer/normalization.py | 2 +- mindspore/ops/operations/array_ops.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/mindspore/nn/layer/normalization.py b/mindspore/nn/layer/normalization.py index fdfa25e183..d43c1c8ab4 100644 --- a/mindspore/nn/layer/normalization.py +++ b/mindspore/nn/layer/normalization.py @@ -249,7 +249,7 @@ class LayerNorm(Cell): 'he_uniform', etc. Default: 'zeros'. Inputs: - - **input_x** (Tensor) - The shape of 'input_x' is input_shape = `(x_1, x_2, ..., x_R)`, + - **input_x** (Tensor) - The shape of 'input_x' is input_shape = :math:`(x_1, x_2, ..., x_R)`, and `input_shape[begin_norm_axis:]` is equal to `normalized_shape`. Outputs: diff --git a/mindspore/ops/operations/array_ops.py b/mindspore/ops/operations/array_ops.py index 8585f873e9..f6d563321c 100644 --- a/mindspore/ops/operations/array_ops.py +++ b/mindspore/ops/operations/array_ops.py @@ -1790,7 +1790,7 @@ class ScatterNd(PrimitiveWithInfer): class ResizeNearestNeighbor(PrimitiveWithInfer): - """ + r""" Resize the input tensor by using nearest neighbor algorithm. Resize input tensor to given size by using nearest neighbor algorithm. The nearest @@ -1806,7 +1806,7 @@ class ResizeNearestNeighbor(PrimitiveWithInfer): - **input_x** (Tensor) - The input tensor. The shape of the tensor is :math:`(N, C, H, W)`. Outputs: - Tensor, the shape of the output tensor is :math:`(N, NEW_C, NEW_H, W)`. + Tensor, the shape of the output tensor is :math:`(N, NEW\_C, NEW\_H, W)`. Examples: >>> input_tensor = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32) From aea6b0c97478cf8268f1edd8a06b2f65c246aec0 Mon Sep 17 00:00:00 2001 From: VectorSL Date: Wed, 1 Apr 2020 11:49:37 +0800 Subject: [PATCH 047/367] update tests/st/ops/gpu/test_tensoradd.py. fix pytest.mark for testcase --- tests/st/ops/gpu/test_tensoradd.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/tests/st/ops/gpu/test_tensoradd.py b/tests/st/ops/gpu/test_tensoradd.py index a60f5dcf30..23c23f12d2 100644 --- a/tests/st/ops/gpu/test_tensoradd.py +++ b/tests/st/ops/gpu/test_tensoradd.py @@ -25,10 +25,6 @@ from mindspore.common.parameter import Parameter context.set_context(device_target='GPU') - -@pytest.mark.level0 -@pytest.mark.platform_x86_gpu_training -@pytest.mark.env_onecard class TensroAdd(nn.Cell): def __init__(self): super(TensroAdd, self).__init__() @@ -61,7 +57,9 @@ class TensroAdd(nn.Cell): self.add(self.x, self.y), self.add(self.x1, self.y1), self.add(self.x2, self.y2), self.add(self.x3, self.y3)) - +@pytest.mark.level0 +@pytest.mark.platform_x86_gpu_training +@pytest.mark.env_onecard def test_TensroAdd(): add = TensroAdd() output = add() From 1de7011bc81d4b42cad56a08c8aeabf40cb1107b Mon Sep 17 00:00:00 2001 From: chang zherui <760161589@qq.com> Date: Wed, 1 Apr 2020 11:53:35 +0800 Subject: [PATCH 048/367] delete longtime python ut --- .../train/summary/test_summary_performance.py | 97 ------------------- tests/ut/python/train/test_amp.py | 9 +- 2 files changed, 1 insertion(+), 105 deletions(-) delete mode 100644 tests/ut/python/train/summary/test_summary_performance.py diff --git a/tests/ut/python/train/summary/test_summary_performance.py b/tests/ut/python/train/summary/test_summary_performance.py deleted file mode 100644 index 9ee9725d13..0000000000 --- a/tests/ut/python/train/summary/test_summary_performance.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright 2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -""" -@File : test_summary.py -@Author: -@Date : 2019-07-4 -@Desc : test summary function -""" -import os -import logging -import time -import numpy as np -from mindspore.train.summary.summary_record import SummaryRecord, _cache_summary_tensor_data -from mindspore.common.tensor import Tensor - -CUR_DIR = os.getcwd() -SUMMARY_DIR = CUR_DIR + "/test_temp_summary_event_file/" - -log = logging.getLogger("test") -log.setLevel(level=logging.ERROR) - -def get_now_time_ns(): - """get the time of second""" - time_second = int(time.time_ns()) - return time_second - -def get_test_data(step): - """ get_test_data """ - # pylint: disable=unused-argument - test_data_list = [] - tag1 = "xt1[:Tensor]" - tag2 = "xt2[:Tensor]" - tag3 = "xt3[:Tensor]" - np1 = np.random.random((5, 4, 3, 5)) - np2 = np.random.random((5, 5, 3, 5)) - np3 = np.random.random((4, 5, 3, 5)) - - dict1 = {} - dict1["name"] = tag1 - dict1["data"] = Tensor(np1) - - dict2 = {} - dict2["name"] = tag2 - dict2["data"] = Tensor(np2) - - dict3 = {} - dict3["name"] = tag3 - dict3["data"] = Tensor(np3) - - test_data_list.append(dict1) - test_data_list.append(dict2) - - return test_data_list - - -# Test 1: summary sample of scalar -def test_summary_performance(): - """ test_summary_performance """ - log.debug("begin test_scalar_summary_sample") - current_time = time.time() - print("time = ", current_time) - # step 0: create the thread - test_writer = SummaryRecord(SUMMARY_DIR, flush_time=120) - - # step 1: create the test data for summary - old_time = get_now_time_ns() - # step 2: create the Event - for i in range(1, 10): - test_data = get_test_data(i) - _cache_summary_tensor_data(test_data) - test_writer.record(i) - now_time = get_now_time_ns() - consume_time = (now_time - old_time)/1000/1000 - old_time = now_time - print("step test_summary_performance conusmer time is:", consume_time) - - - # step 3: send the event to mq - - # step 4: accept the event and write the file - test_writer.flush() - test_writer.close() - current_time = time.time() - current_time - print("consume time = ", current_time) - log.debug("finished test_scalar_summary_sample") diff --git a/tests/ut/python/train/test_amp.py b/tests/ut/python/train/test_amp.py index eebd188e60..1a26c21775 100644 --- a/tests/ut/python/train/test_amp.py +++ b/tests/ut/python/train/test_amp.py @@ -66,6 +66,7 @@ def test_amp_o2(): train_network = amp.build_train_network(net, optimizer, level="O2") output = train_network(inputs, label) + def test_amp_o2_loss(): inputs = Tensor(np.ones([16, 16]).astype(np.float32)) label = Tensor(np.zeros([16, 16]).astype(np.float32)) @@ -75,14 +76,6 @@ def test_amp_o2_loss(): train_network = amp.build_train_network(net, optimizer, loss, level="O2") output = train_network(inputs, label) -def test_amp_resnet50_loss(): - inputs = Tensor(np.ones([2, 3, 224, 224]).astype(np.float32)) - label = Tensor(np.zeros([2, 10]).astype(np.float32)) - net = resnet50() - loss = nn.SoftmaxCrossEntropyWithLogits(reduction='mean') - optimizer = nn.Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) - train_network = amp.build_train_network(net, optimizer, loss, level="O2") - train_network(inputs, label) def test_amp_o0_loss(): inputs = Tensor(np.ones([16, 16]).astype(np.float32)) From 6a1b865c916871d4190f28f3619805f8bb0bc0f4 Mon Sep 17 00:00:00 2001 From: anzhengqi Date: Tue, 31 Mar 2020 20:18:40 +0800 Subject: [PATCH 049/367] check num_samples --- mindspore/dataset/engine/validators.py | 8 ++++++++ .../python/dataset/test_datasets_sharding.py | 6 +++--- tests/ut/python/dataset/test_exceptions.py | 19 +++++++++++++++++++ 3 files changed, 30 insertions(+), 3 deletions(-) diff --git a/mindspore/dataset/engine/validators.py b/mindspore/dataset/engine/validators.py index adfe54a02e..b4d22a4a01 100644 --- a/mindspore/dataset/engine/validators.py +++ b/mindspore/dataset/engine/validators.py @@ -243,6 +243,8 @@ def check_param_type(param_list, param_dict, param_type): if param_dict.get(param_name) is not None: if param_name == 'num_parallel_workers': check_num_parallel_workers(param_dict.get(param_name)) + if param_name == 'num_samples': + check_num_samples(param_dict.get(param_name)) else: check_type(param_dict.get(param_name), param_name, param_type) @@ -262,6 +264,12 @@ def check_num_parallel_workers(value): raise ValueError("num_parallel_workers exceeds the boundary between 0 and {}!".format(cpu_count())) +def check_num_samples(value): + check_type(value, 'num_samples', int) + if value <= 0: + raise ValueError("num_samples must be greater than 0!") + + def check_dataset_dir(dataset_dir): if not os.path.isdir(dataset_dir) or not os.access(dataset_dir, os.R_OK): raise ValueError("The folder {} does not exist or permission denied!".format(dataset_dir)) diff --git a/tests/ut/python/dataset/test_datasets_sharding.py b/tests/ut/python/dataset/test_datasets_sharding.py index b178298e33..b398391fb7 100644 --- a/tests/ut/python/dataset/test_datasets_sharding.py +++ b/tests/ut/python/dataset/test_datasets_sharding.py @@ -33,14 +33,14 @@ def test_imagefolder_shardings(print_res=False): # total 44 rows in dataset assert (sharding_config(4, 0, 5, False, dict()) == [0, 0, 0, 1, 1]) # 5 rows assert (sharding_config(4, 0, 12, False, dict()) == [0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3]) # 11 rows - assert (sharding_config(4, 3, 0, False, dict()) == [0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]) # 11 rows + assert (sharding_config(4, 3, None, False, dict()) == [0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]) # 11 rows # total 22 in dataset rows because of class indexing which takes only 2 folders - assert (len(sharding_config(4, 0, 0, True, {"class1": 111, "class2": 999})) == 6) + assert (len(sharding_config(4, 0, None, True, {"class1": 111, "class2": 999})) == 6) assert (len(sharding_config(4, 2, 3, True, {"class1": 111, "class2": 999})) == 3) # test with repeat assert (sharding_config(4, 0, 12, False, dict(), 3) == [0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3] * 3) assert (sharding_config(4, 0, 5, False, dict(), 5) == [0, 0, 0, 1, 1] * 5) - assert (len(sharding_config(5, 1, 0, True, {"class1": 111, "class2": 999}, 4)) == 20) + assert (len(sharding_config(5, 1, None, True, {"class1": 111, "class2": 999}, 4)) == 20) def test_manifest_shardings(print_res=False): diff --git a/tests/ut/python/dataset/test_exceptions.py b/tests/ut/python/dataset/test_exceptions.py index 7668eeb2a8..631f2ddcbc 100644 --- a/tests/ut/python/dataset/test_exceptions.py +++ b/tests/ut/python/dataset/test_exceptions.py @@ -18,6 +18,7 @@ import pytest import mindspore.dataset as ds DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"] +SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json" def skip_test_exception(): @@ -29,5 +30,23 @@ def skip_test_exception(): assert "The shape size 1 of input tensor is invalid" in str(info.value) +def test_sample_exception(): + num_samples = 0 + with pytest.raises(ValueError) as info: + data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], num_samples=num_samples) + assert "num_samples must be greater than 0" in str(info.value) + num_samples = -1 + with pytest.raises(ValueError) as info: + data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], num_samples=num_samples) + assert "num_samples must be greater than 0" in str(info.value) + num_samples = 1 + data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], num_samples=num_samples) + data = data.map(input_columns=["image"], operations=vision.Decode()) + data = data.map(input_columns=["image"], operations=vision.Resize((100, 100))) + num_iters = 0 + for item in data.create_dict_iterator(): + num_iters += 1 + assert num_iters == 1 + if __name__ == '__main__': test_exception() From fb6eed23aeefbec5a8885498fb626e2d972158ca Mon Sep 17 00:00:00 2001 From: Xiaoda Zhang Date: Wed, 1 Apr 2020 14:16:52 +0800 Subject: [PATCH 050/367] refining strategy-checking for resnet50 --- .../ut/python/parallel/test_auto_parallel_resnet.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/ut/python/parallel/test_auto_parallel_resnet.py b/tests/ut/python/parallel/test_auto_parallel_resnet.py index 667e3873a6..1e0e3570b9 100644 --- a/tests/ut/python/parallel/test_auto_parallel_resnet.py +++ b/tests/ut/python/parallel/test_auto_parallel_resnet.py @@ -295,11 +295,11 @@ def test_train_32k_8p(epoch_size=3, batch_size=32, num_classes=32768): #1048576 model.train(5, dataset, dataset_sink_mode=False) strategies = _executor._get_strategy(model._train_network) for (k, v) in strategies.items(): - if re.match(k, 'Conv2D-op') is not None: + if re.search('Conv2D-op', k) is not None: assert v[0][0] == dev_num - elif re.match(k, 'MatMul-op') is not None: + elif re.search('MatMul-op', k) is not None: assert v == [[dev_num, 1], [1, 1]] - elif re.match(k, 'ReduceSum-op') is not None: + elif re.search('ReduceSum-op', k) is not None: assert v == [[dev_num, 1]] allreduce_fusion_dict = _executor._get_allreduce_fusion(model._train_network) @@ -490,9 +490,9 @@ def test_train_64k_8p(epoch_size=3, batch_size=32, num_classes=65536): #1048576 model.train(5, dataset, dataset_sink_mode=False) strategies = _executor._get_strategy(model._train_network) for (k, v) in strategies.items(): - if re.match(k, 'Conv2D-op') is not None: + if re.search('Conv2D-op', k ) is not None: assert v[0][0] == dev_num - elif re.match(k, 'MatMul-op') is not None: + elif re.search('MatMul-op', k) is not None: assert v == [[1, 1], [dev_num, 1]] - elif re.match(k, 'ReduceSum-op') is not None: + elif re.search('ReduceSum-op', k) is not None: assert v == [[1, dev_num]] From bffa031dbc4e99f480818aee44c689dc62db7198 Mon Sep 17 00:00:00 2001 From: dengwentao Date: Tue, 31 Mar 2020 10:57:38 +0800 Subject: [PATCH 051/367] change MS LIB CACHE setting --- cmake/utils.cmake | 28 +++++++++++++++++++++++----- 1 file changed, 23 insertions(+), 5 deletions(-) diff --git a/cmake/utils.cmake b/cmake/utils.cmake index 4efaad61b9..060e400820 100644 --- a/cmake/utils.cmake +++ b/cmake/utils.cmake @@ -16,16 +16,34 @@ function(mindspore_add_submodule_obj des_submodule_objs sub_dir submodule_name_o endfunction() -get_filename_component(_MS_LIB_CACHE ~/.mslib REALPATH) +if (DEFINED ENV{MSLIBS_CACHE_PATH}) + set(_MS_LIB_CACHE $ENV{MSLIBS_CACHE_PATH}) +else() + set(_MS_LIB_CACHE ${CMAKE_BINARY_DIR}/.mslib) +endif () +message("MS LIBS CACHE PATH: ${_MS_LIB_CACHE}") + if (NOT EXISTS ${_MS_LIB_CACHE}) file(MAKE_DIRECTORY ${_MS_LIB_CACHE}) endif () -# set(FETCHCONTENT_BASE_DIR ${_MS_LIB_CACHE}) -# set(CMAKE_PREFIX_PATH ${_MS_LIB_CACHE}) + if (DEFINED ENV{MSLIBS_SERVER}) set(LOCAL_LIBS_SERVER $ENV{MSLIBS_SERVER}) message("LOCAL_LIBS_SERVER: ${LOCAL_LIBS_SERVER}") endif () + +include(ProcessorCount) +ProcessorCount(N) +if (JOBS) + set(THNUM ${JOBS}) +else() + set(JOBS 8) + if (${JOBS} GREATER ${N}) + set(THNUM ${N}) + endif() +endif () +message("set make thread num: ${THNUM}") + if(LOCAL_LIBS_SERVER) if (NOT ENV{no_proxy}) set(ENV{no_proxy} "${LOCAL_LIBS_SERVER}") @@ -287,7 +305,7 @@ function(mindspore_add_pkg pkg_name ) -DCMAKE_INSTALL_PREFIX=${${pkg_name}_BASE_DIR} .. WORKING_DIRECTORY ${${pkg_name}_SOURCE_DIR}/_build) - __exec_cmd(COMMAND ${CMAKE_COMMAND} --build . --target install -- -j8 + __exec_cmd(COMMAND ${CMAKE_COMMAND} --build . --target install -- -j${THNUM} WORKING_DIRECTORY ${${pkg_name}_SOURCE_DIR}/_build) else() @@ -318,7 +336,7 @@ function(mindspore_add_pkg pkg_name ) ${${pkg_name}_MAKE_CFLAGS} ${${pkg_name}_MAKE_CXXFLAGS} ${${pkg_name}_MAKE_LDFLAGS}) endif () # build - __exec_cmd(COMMAND ${CMAKE_MAKE_PROGRAM} ${${pkg_name}_BUILD_OPTION} -j8 + __exec_cmd(COMMAND ${CMAKE_MAKE_PROGRAM} ${${pkg_name}_BUILD_OPTION} -j${THNUM} WORKING_DIRECTORY ${${pkg_name}_SOURCE_DIR}) if (PKG_INSTALL_INCS OR PKG_INSTALL_LIBS) From bb421b70db21eca7376f11a9d7bd37e53be2954e Mon Sep 17 00:00:00 2001 From: laiyongqiang Date: Wed, 1 Apr 2020 14:51:43 +0800 Subject: [PATCH 052/367] fix bug when dump op with trans_flag true --- config/e2e_dump_config.json | 12 ++++++------ config/e2e_dump_config_0.json | 14 +++++++------- config/e2e_dump_config_1.json | 14 +++++++------- mindspore/ccsrc/common/trans.cc | 7 ++++++- .../ccsrc/device/ascend/ascend_device_address.cc | 6 +++--- .../ccsrc/device/ascend/ascend_kernel_runtime.cc | 12 ++++++------ 6 files changed, 35 insertions(+), 30 deletions(-) diff --git a/config/e2e_dump_config.json b/config/e2e_dump_config.json index ad75c2f27f..fdba941f97 100644 --- a/config/e2e_dump_config.json +++ b/config/e2e_dump_config.json @@ -6,17 +6,17 @@ "net_name": "ResNet50", "mode": 0, "iteration": 0, - "kernels": ["TensorAdd"] + "kernels": ["Default/Conv2D-op2", "Default/TensorAdd-op10"] }, "DumpSettingsSpec": { - "enable": "true: dump enable false: dump disable", - "trans_flag": "true: trans to host format,false: not trans format", + "enable": "true: dump enable, false: dump disable", + "trans_flag": "true: trans to host format, false: not trans format", "path": "the dump file folder", "net_name": "net name eg:ResNet50", - "mode": "0: dump all kernels 1: dump kernels in kernels list", - "iteration": "0: all iteration others: specified iteration ", - "kernels": "kernel name list need to be dump" + "mode": "0: dump all kernels, 1: dump kernels in kernels list", + "iteration": "0: all iteration, others: specified iteration ", + "kernels": "op's full scope name which need to be dump" }, "other": {} } \ No newline at end of file diff --git a/config/e2e_dump_config_0.json b/config/e2e_dump_config_0.json index a67a4daba0..64b18b8b55 100644 --- a/config/e2e_dump_config_0.json +++ b/config/e2e_dump_config_0.json @@ -6,17 +6,17 @@ "net_name": "ResNet50", "mode": 0, "iteration": 0, - "kernels": ["AllReduce","BiasAddGrad","Conv2DBackpropFilter","SparseSoftmaxCrossEntropyWithLogits"] + "kernels": ["Default/Conv2D-op2", "Default/TensorAdd-op10"] }, "DumpSettingsSpec": { - "enable": "true: dump enable false: dump disable", - "trans_flag": "true: trans to host format,false: not trans format", + "enable": "true: dump enable, false: dump disable", + "trans_flag": "true: trans to host format, false: not trans format", "path": "the dump file folder", "net_name": "net name eg:ResNet50", - "mode": "0: dump all kernels 1: dump kernels in kernels list", - "iteration": "0: all iteration others: specified iteration ", - "kernels": "kernel name list need to be dump" + "mode": "0: dump all kernels, 1: dump kernels in kernels list", + "iteration": "0: all iteration, others: specified iteration ", + "kernels": "op's full scope name which need to be dump" }, "other": {} -} +} \ No newline at end of file diff --git a/config/e2e_dump_config_1.json b/config/e2e_dump_config_1.json index 226b91ae09..1486487799 100644 --- a/config/e2e_dump_config_1.json +++ b/config/e2e_dump_config_1.json @@ -6,17 +6,17 @@ "net_name": "ResNet50", "mode": 0, "iteration": 0, - "kernels": ["AllReduce","BiasAddGrad","Conv2DBackpropFilter","SparseSoftmaxCrossEntropyWithLogits"] + "kernels": ["Default/Conv2D-op2", "Default/TensorAdd-op10"] }, "DumpSettingsSpec": { - "enable": "true: dump enable false: dump disable", - "trans_flag": "true: trans to host format,false: not trans format", + "enable": "true: dump enable, false: dump disable", + "trans_flag": "true: trans to host format, false: not trans format", "path": "the dump file folder", "net_name": "net name eg:ResNet50", - "mode": "0: dump all kernels 1: dump kernels in kernels list", - "iteration": "0: all iteration others: specified iteration ", - "kernels": "kernel name list need to be dump" + "mode": "0: dump all kernels, 1: dump kernels in kernels list", + "iteration": "0: all iteration, others: specified iteration ", + "kernels": "op's full scope name which need to be dump" }, "other": {} -} +} \ No newline at end of file diff --git a/mindspore/ccsrc/common/trans.cc b/mindspore/ccsrc/common/trans.cc index ea84537c1a..4748d59286 100644 --- a/mindspore/ccsrc/common/trans.cc +++ b/mindspore/ccsrc/common/trans.cc @@ -53,6 +53,7 @@ enum DataTypeTransMode { FROM_INT8_TO_FLOAT, FROM_INT8_TO_INT32, FROM_INT64_TO_INT32, + FROM_UINT16_TO_INT32, }; const std::map, DataTypeTransMode> mode_map{ @@ -68,7 +69,8 @@ const std::map, DataTypeTransMode> mode_map{ {std::pair(kNumberTypeUInt8, kNumberTypeInt32), FROM_UINT8_TO_INT32}, {std::pair(kNumberTypeInt8, kNumberTypeFloat32), FROM_INT8_TO_FLOAT}, {std::pair(kNumberTypeInt8, kNumberTypeInt32), FROM_INT8_TO_INT32}, - {std::pair(kNumberTypeInt64, kNumberTypeInt32), FROM_INT64_TO_INT32}}; + {std::pair(kNumberTypeInt64, kNumberTypeInt32), FROM_INT64_TO_INT32}, + {std::pair(kNumberTypeUInt16, kNumberTypeInt32), FROM_UINT16_TO_INT32}}; template void TransDataSrc2Dst(const TypeIdArgs &args, void *dst, const size_t data_size) { @@ -116,6 +118,9 @@ bool CastKernel(const TypeIdArgs &args, void *dst, const size_t data_size, const case FROM_INT64_TO_INT32: TransDataSrc2Dst(args, dst, data_size); break; + case FROM_UINT16_TO_INT32: + TransDataSrc2Dst(args, dst, data_size); + break; default: MS_LOG(ERROR) << "unsupported datatype trans"; return false; diff --git a/mindspore/ccsrc/device/ascend/ascend_device_address.cc b/mindspore/ccsrc/device/ascend/ascend_device_address.cc index a521f1516f..b8b7f452e3 100644 --- a/mindspore/ccsrc/device/ascend/ascend_device_address.cc +++ b/mindspore/ccsrc/device/ascend/ascend_device_address.cc @@ -106,13 +106,13 @@ bool AscendDeviceAddress::SyncDeviceToHost(const std::vector &shape, size_t } else { auto shape_size = trans::ShapeSize(host_shape); auto host = std::vector(size_); - const trans::TypeIdArgs type_args{ptr_, shape_size, type_id_, type}; - sync_ok = trans::TransDataType(type_args, host.data()); + SyncMemory(host.data(), ptr_, size_, RT_MEMCPY_DEVICE_TO_HOST); + const trans::TypeIdArgs type_args{host.data(), shape_size, type_id_, type}; + sync_ok = trans::TransDataType(type_args, host_ptr); if (!sync_ok) { MS_LOG(ERROR) << "trans data type failed."; return false; } - SyncMemory(host_ptr, host.data(), size, RT_MEMCPY_DEVICE_TO_HOST); } } else if (format_ == kOpFormat_NC1HWC0 || format_ == kOpFormat_FRAC_Z || format_ == kOpFormat_FRAC_NZ) { sync_ok = SyncDeviceToHostAndConvertFormat(shape, size, type, host_ptr); diff --git a/mindspore/ccsrc/device/ascend/ascend_kernel_runtime.cc b/mindspore/ccsrc/device/ascend/ascend_kernel_runtime.cc index a7dfc96b2f..dc7eb5449b 100644 --- a/mindspore/ccsrc/device/ascend/ascend_kernel_runtime.cc +++ b/mindspore/ccsrc/device/ascend/ascend_kernel_runtime.cc @@ -150,9 +150,9 @@ void DumpOutput(mindspore::session::KernelGraph *graph, const string &dump_path, auto output_size = AnfAlgo::GetOutputTensorNum(node); for (size_t j = 0; j < output_size; ++j) { auto addr = AnfAlgo::GetOutputAddr(node, j); - auto shape = AnfAlgo::GetOutputDeviceShape(node, j); - auto type = AnfAlgo::GetOutputDeviceDataType(node, j); - auto format = AnfAlgo::GetOutputFormat(node, j); + auto shape = AnfAlgo::GetOutputInferShape(node, j); + auto type = AnfAlgo::GetOutputInferDataType(node, j); + auto format = kOpFormat_DEFAULT; string filepath = dump_path + '/' + kernel_name + '_' + "output_" + std::to_string(j); auto ascend_addr = dynamic_cast(addr); std::vector int_shapes; @@ -181,9 +181,9 @@ void DumpParameters(mindspore::session::KernelGraph *graph, const string &dump_p continue; } auto addr = AnfAlgo::GetOutputAddr(item, PRAMATER_OUTPUT_INDEX); - auto shape = AnfAlgo::GetOutputDeviceShape(item, PRAMATER_OUTPUT_INDEX); - auto type = AnfAlgo::GetOutputDeviceDataType(item, PRAMATER_OUTPUT_INDEX); - auto format = AnfAlgo::GetOutputFormat(item, PRAMATER_OUTPUT_INDEX); + auto shape = AnfAlgo::GetOutputInferShape(item, PRAMATER_OUTPUT_INDEX); + auto type = AnfAlgo::GetOutputInferDataType(item, PRAMATER_OUTPUT_INDEX); + auto format = kOpFormat_DEFAULT; string filepath = dump_path + '/' + parameter_name + '_' + "output_0"; auto ascend_addr = dynamic_cast(addr); std::vector int_shapes; From bed5997a141a568a3b0802428bacee9b387f83b9 Mon Sep 17 00:00:00 2001 From: zhaozhenlong Date: Sat, 28 Mar 2020 12:02:46 +0800 Subject: [PATCH 053/367] add atan2 operator --- mindspore/ccsrc/transform/convert.cc | 4 +++- mindspore/ccsrc/transform/op_declare.cc | 6 ++++++ mindspore/ccsrc/transform/op_declare.h | 2 ++ mindspore/ops/_grad/grad_math_ops.py | 13 +++++++++++++ mindspore/ops/operations/__init__.py | 5 +++-- mindspore/ops/operations/math_ops.py | 23 +++++++++++++++++++++++ tests/ut/python/ops/test_ops.py | 7 ++++++- 7 files changed, 56 insertions(+), 4 deletions(-) diff --git a/mindspore/ccsrc/transform/convert.cc b/mindspore/ccsrc/transform/convert.cc index c975b18812..251c855dbf 100755 --- a/mindspore/ccsrc/transform/convert.cc +++ b/mindspore/ccsrc/transform/convert.cc @@ -182,6 +182,7 @@ const char kNameDiag[] = "Diag"; const char kNameDiagPart[] = "DiagPart"; const char kNameSpaceToBatch[] = "SpaceToBatch"; const char kNameBatchToSpace[] = "BatchToSpace"; +const char kNameAtan2[] = "Atan2"; // -----------------OpAdapter initialization-------------- std::unordered_map &DfGraphConvertor::get_adpt_map() { @@ -365,7 +366,8 @@ std::unordered_map &DfGraphConvertor::get_adpt_ma {string(kNameDiag), ADPT_DESC(Diag)}, {string(kNameDiagPart), ADPT_DESC(DiagPart)}, {string(kNameSpaceToBatch), ADPT_DESC(SpaceToBatchD)}, - {string(kNameBatchToSpace), ADPT_DESC(BatchToSpaceD)}}; + {string(kNameBatchToSpace), ADPT_DESC(BatchToSpaceD)}, + {string(kNameAtan2), ADPT_DESC(Atan2)}}; #ifdef ENABLE_GE adpt_map[string(kNamePrint)] = ADPT_DESC(Print); #endif diff --git a/mindspore/ccsrc/transform/op_declare.cc b/mindspore/ccsrc/transform/op_declare.cc index cc63bacd8a..f7fdcfbe56 100755 --- a/mindspore/ccsrc/transform/op_declare.cc +++ b/mindspore/ccsrc/transform/op_declare.cc @@ -1196,6 +1196,12 @@ ATTR_MAP(BatchToSpaceD) = { {"block_size", ATTR_DESC(block_size, AnyTraits())}, {"crops", ATTR_DESC(crops, AnyTraits>>(), AnyTraits>())}}; OUTPUT_MAP(BatchToSpaceD) = {{0, OUTPUT_DESC(y)}}; + +// Atan2 +INPUT_MAP(Atan2) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; +ATTR_MAP(Atan2) = EMPTY_ATTR_MAP; +OUTPUT_MAP(Atan2) = {{0, OUTPUT_DESC(y)}}; + #ifdef ENABLE_GE // Print INPUT_MAP(Print) = EMPTY_INPUT_MAP; diff --git a/mindspore/ccsrc/transform/op_declare.h b/mindspore/ccsrc/transform/op_declare.h index 978828e16d..1924d2719b 100755 --- a/mindspore/ccsrc/transform/op_declare.h +++ b/mindspore/ccsrc/transform/op_declare.h @@ -443,6 +443,8 @@ DECLARE_OP_ADAPTER(SpaceToBatchD) DECLARE_OP_USE_OUTPUT(SpaceToBatchD) DECLARE_OP_ADAPTER(BatchToSpaceD) DECLARE_OP_USE_OUTPUT(BatchToSpaceD) +DECLARE_OP_ADAPTER(Atan2) +DECLARE_OP_USE_OUTPUT(Atan2) #ifdef ENABLE_GE DECLARE_OP_ADAPTER(Print) DECLARE_OP_USE_DYN_INPUT(Print) diff --git a/mindspore/ops/_grad/grad_math_ops.py b/mindspore/ops/_grad/grad_math_ops.py index eb6ee9401d..9e90c5660c 100755 --- a/mindspore/ops/_grad/grad_math_ops.py +++ b/mindspore/ops/_grad/grad_math_ops.py @@ -738,3 +738,16 @@ def get_bprop_round(self): def bprop(x, out, dout): return (zeros_like(x),) return bprop + + +@bprop_getters.register(P.Atan2) +def get_bprop_atan2(self): + """Generate bprop for Atan2""" + + square = P.Square() + def bprop(x, y, out, dout): + tmp = dout / (square(x) + square(y)) + dx = tmp * y + dy = tmp * (-x) + return (dx, dy) + return bprop diff --git a/mindspore/ops/operations/__init__.py b/mindspore/ops/operations/__init__.py index 899b2f8d0f..5c98568b8a 100644 --- a/mindspore/ops/operations/__init__.py +++ b/mindspore/ops/operations/__init__.py @@ -37,7 +37,7 @@ from .debug_ops import (ImageSummary, InsertGradientOf, ScalarSummary, TensorSummary, Print) from .control_ops import ControlDepend, GeSwitch, Merge from .inner_ops import ScalarCast -from .math_ops import (Abs, ACos, AddN, AssignAdd, AssignSub, BatchMatMul, +from .math_ops import (Abs, ACos, AddN, AssignAdd, AssignSub, Atan2, BatchMatMul, ReduceMax, ReduceMin, ReduceMean, ReduceSum, ReduceAll, ReduceProd, CumProd, Cos, Div, Equal, EqualCount, Exp, Floor, FloorDiv, Greater, GreaterEqual, Less, LessEqual, Log, LogicalAnd, @@ -226,7 +226,8 @@ __all__ = [ "Round", "ApplyFtrl", "SpaceToBatch", - "BatchToSpace" + "BatchToSpace", + "Atan2", ] __all__.sort() diff --git a/mindspore/ops/operations/math_ops.py b/mindspore/ops/operations/math_ops.py index ad928d792f..398a7e6f1a 100644 --- a/mindspore/ops/operations/math_ops.py +++ b/mindspore/ops/operations/math_ops.py @@ -1858,3 +1858,26 @@ class Round(PrimitiveWithInfer): validator.check_subclass("x_dtype", x_type, mstype.tensor) validator.check_typename('x_dtype', x_type, mstype.number_type) return x_type + + +class Atan2(_MathBinaryOp): + r""" + Returns arctangent of input_x/input_y element-wise. + + It returns :math:`\theta\ \in\ (-\frac{\pi}{2}, \frac{\pi}{2})` + such that :math:`x = r*\sin(\theta), y = r*\cos(\theta)`, where :math:`r = \sqrt{x^2 + y^2}`. + + Inputs: + - **input_x** (Tensor) - The input tensor. + - **input_y** (Tensor) - The input tensor. + + Outputs: + Tensor, the shape is same as the shape after broadcasting, and the data type is same as 'input_x'. + + Examples: + >>> input_x = Tensor(np.array([[0, 1]]), mstype.float32) + >>> input_y = Tensor(np.array([[1, 1]]), mstype.float32) + >>> atan2 = Atan2() + >>> atan2(input_x, input_y) + [[0. 0.7853982]] + """ diff --git a/tests/ut/python/ops/test_ops.py b/tests/ut/python/ops/test_ops.py index f1d365e9cf..117036c37e 100755 --- a/tests/ut/python/ops/test_ops.py +++ b/tests/ut/python/ops/test_ops.py @@ -481,7 +481,12 @@ test_case_math_ops = [ ('Round', { 'block': P.Round(), 'desc_inputs': [[3]], - 'desc_bprop': [[3]]}) + 'desc_bprop': [[3]]}), + ('Atan2', { + 'block': P.Atan2(), + 'desc_inputs': [Tensor(np.array([0, 1]).astype(np.float32)), + Tensor(np.array([1, 1]).astype(np.float32))], + 'desc_bprop': [[2]]}) ] test_case_nn_ops = [ From 2fdf692c2e5973da1db0e3737832ed6f223d269c Mon Sep 17 00:00:00 2001 From: huangdongrun Date: Wed, 1 Apr 2020 15:23:40 +0800 Subject: [PATCH 054/367] fix bug of auto control depend for bert pre training add comment --- mindspore/model_zoo/Bert_NEZHA/bert_for_pre_training.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mindspore/model_zoo/Bert_NEZHA/bert_for_pre_training.py b/mindspore/model_zoo/Bert_NEZHA/bert_for_pre_training.py index 22a212a489..bc51ba5d48 100644 --- a/mindspore/model_zoo/Bert_NEZHA/bert_for_pre_training.py +++ b/mindspore/model_zoo/Bert_NEZHA/bert_for_pre_training.py @@ -403,9 +403,6 @@ class BertTrainOneStepWithLossScaleCell(nn.Cell): sens=None): """Defines the computation performed.""" weights = self.weights - # alloc status - init = self.alloc_status() - self.clear_before_grad(init) loss = self.network(input_ids, input_mask, token_type_id, @@ -417,6 +414,9 @@ class BertTrainOneStepWithLossScaleCell(nn.Cell): scaling_sens = self.loss_scale else: scaling_sens = sens + # alloc status and clear should be right before gradoperation + init = self.alloc_status() + self.clear_before_grad(init) grads = self.grad(self.network, weights)(input_ids, input_mask, token_type_id, From 3d35792877b469d00c4c106eb4cd0c3c30144e7e Mon Sep 17 00:00:00 2001 From: Xiaoda Zhang Date: Wed, 1 Apr 2020 19:24:18 +0800 Subject: [PATCH 055/367] change_star_elimination: make the non-identity triangle_eliminatin exact --- .../parallel/auto_parallel/graph_costmodel.cc | 97 +++---------------- .../parallel/auto_parallel/graph_costmodel.h | 11 +-- 2 files changed, 16 insertions(+), 92 deletions(-) diff --git a/mindspore/ccsrc/parallel/auto_parallel/graph_costmodel.cc b/mindspore/ccsrc/parallel/auto_parallel/graph_costmodel.cc index 292cc4f5f0..f99b271894 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/graph_costmodel.cc +++ b/mindspore/ccsrc/parallel/auto_parallel/graph_costmodel.cc @@ -948,10 +948,12 @@ OperatorInfoPtr CostGraph::EliminationContract(const OperatorInfoPtr& op) { return target_op; } -void CostGraph::CreateTriangleEliminationSubCostListForIdentity( - StrategyPtr elimi_op_stra, StrategyPtr left_op_stra, StrategyPtr right_op_stra, const CostPtr& right_op_cost, - const CostPtrList& elimi_op_clist, const CostPtrList& left_edge_clist, const CostPtr& right_edge_cost, - const CostPtrList& left_node_clist_origin, CostPtrList* left_node_clist_new) { +void CostGraph::CreateTriangleEliminationSubCostList(StrategyPtr elimi_op_stra, StrategyPtr left_op_stra, + StrategyPtr right_op_stra, const CostPtr& right_op_cost, + const CostPtrList& elimi_op_clist, + const CostPtrList& left_edge_clist, const CostPtr& right_edge_cost, + const CostPtrList& left_node_clist_origin, + CostPtrList* left_node_clist_new) { MS_EXCEPTION_IF_NULL(right_edge_cost); MS_EXCEPTION_IF_NULL(right_op_cost); MS_EXCEPTION_IF_NULL(left_node_clist_new); @@ -985,93 +987,20 @@ void CostGraph::CreateTriangleEliminationSubCostListForIdentity( } } -void CostGraph::CreateTriangleEliminationSubCostListForOthers( - StrategyPtr elimi_op_stra, StrategyPtr left_node_stra, StrategyPtr right_node_stra, const CostPtr& right_op_cost, - const CostPtrList& elimi_op_clist, const CostPtrList& left_edge_clist, const CostPtr& right_edge_cost, - const CostPtrList& left_node_clist_origin, CostPtrList* left_node_clist_new) { - CostPtr elimi_op_determined = nullptr, left_edge_determined = nullptr, init_ele = nullptr; - std::function LocalCompare = [&](CostPtr init, const CostPtr& cost_x) { - MS_EXCEPTION_IF_NULL(cost_x); - if ((init == nullptr) || (cost_x->memory_cost_ < DEVICE_MEMORY_CAPACITY)) { - init = cost_x; - } - return init; - }; - - // Find a feasible elimi_op_clist - elimi_op_determined = std::accumulate(elimi_op_clist.begin(), elimi_op_clist.end(), init_ele, LocalCompare); - init_ele = nullptr; - // Find a feasible left_edge_cost - left_edge_determined = std::accumulate(left_edge_clist.begin(), left_edge_clist.end(), init_ele, LocalCompare); - if ((elimi_op_determined == nullptr) || (left_edge_determined == nullptr)) { - return; - } - if ((elimi_op_determined->memory_cost_ >= DEVICE_MEMORY_CAPACITY) || - (left_edge_determined->memory_cost_ >= DEVICE_MEMORY_CAPACITY)) { - return; - } - - for (auto& left_node_cost : left_node_clist_origin) { - MS_EXCEPTION_IF_NULL(left_node_cost); - MS_EXCEPTION_IF_NULL(right_op_cost); - double new_memory_cost = left_node_cost->memory_cost_ + elimi_op_determined->memory_cost_ + - left_edge_determined->memory_cost_ + right_edge_cost->memory_cost_ + - right_op_cost->memory_cost_; - double commu_cost = left_node_cost->communication_cost_ + elimi_op_determined->communication_cost_ + - left_edge_determined->communication_cost_ + right_edge_cost->communication_cost_ + - right_op_cost->communication_cost_; - double commu_without = - left_node_cost->communication_without_parameter_ + elimi_op_determined->communication_without_parameter_ + - left_edge_determined->communication_without_parameter_ + right_edge_cost->communication_without_parameter_ + - right_op_cost->communication_without_parameter_; - auto decision = std::make_shared(elimi_op_stra, elimi_op_determined, - left_edge_determined, right_edge_cost, left_node_stra, - left_node_cost, right_node_stra, right_op_cost); - - auto new_cost = std::make_shared(new_memory_cost, commu_cost, decision); - new_cost->communication_without_parameter_ = commu_without; - new_cost->communication_with_partial_para_ = commu_without + COST_MODEL_GAMMA * (commu_cost - commu_without); - left_node_clist_new->emplace_back(std::move(new_cost)); - } -} - void CostGraph::CreateTriangleEliminationCostList(const OperatorInfoPtr& elimi_op, const CostPtrList& right_node_clist, const CostPtrList& right_edge_clist, const StrategyPtr& elimi_op_stra, const StrategyPtr& left_node_stra, const StrategyPtr& right_node_stra, const CostPtrList& elimi_op_clist, const CostPtrList& left_edge_clist, const CostPtrList& left_node_clist_origin, CostPtrList* left_node_clist_new) { - // The reason for separately dealing with when the 'elimi_op' is 'TMPIDENTITY_INFO' or others is that - // when 'elimi_op' is TMPIDENTITY_INFO, the computation is limited, while 'elimi_op' is others, the computation - // may be huge MS_EXCEPTION_IF_NULL(elimi_op); - if (elimi_op->name().find(TMPIDENTITY_INFO_NAME) != std::string::npos) { - for (auto& right_node_cost : right_node_clist) { - MS_EXCEPTION_IF_NULL(right_node_cost); - for (auto& right_edge_cost : right_edge_clist) { - MS_EXCEPTION_IF_NULL(right_edge_cost); - if ((right_node_cost->memory_cost_ < DEVICE_MEMORY_CAPACITY) && - (right_edge_cost->memory_cost_ < DEVICE_MEMORY_CAPACITY)) { - // Exact computation for TMPIDENTITY_INFO_NAME case - CreateTriangleEliminationSubCostListForIdentity(elimi_op_stra, left_node_stra, right_node_stra, - right_node_cost, elimi_op_clist, left_edge_clist, - right_edge_cost, left_node_clist_origin, left_node_clist_new); - } - } - } - } else { - for (auto& right_node_cost : right_node_clist) { - MS_EXCEPTION_IF_NULL(right_node_cost); - for (auto& right_edge_cost : right_edge_clist) { - MS_EXCEPTION_IF_NULL(right_edge_cost); - if ((right_node_cost->memory_cost_ < DEVICE_MEMORY_CAPACITY) && - (right_edge_cost->memory_cost_ < DEVICE_MEMORY_CAPACITY)) { - // Approximate computation for other case - CreateTriangleEliminationSubCostListForOthers(elimi_op_stra, left_node_stra, right_node_stra, right_node_cost, - elimi_op_clist, left_edge_clist, right_edge_cost, - left_node_clist_origin, left_node_clist_new); - } - } + for (auto& right_node_cost : right_node_clist) { + MS_EXCEPTION_IF_NULL(right_node_cost); + for (auto& right_edge_cost : right_edge_clist) { + MS_EXCEPTION_IF_NULL(right_edge_cost); + CreateTriangleEliminationSubCostList(elimi_op_stra, left_node_stra, right_node_stra, right_node_cost, + elimi_op_clist, left_edge_clist, right_edge_cost, left_node_clist_origin, + left_node_clist_new); } } } diff --git a/mindspore/ccsrc/parallel/auto_parallel/graph_costmodel.h b/mindspore/ccsrc/parallel/auto_parallel/graph_costmodel.h index fde9514540..3b04703a47 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/graph_costmodel.h +++ b/mindspore/ccsrc/parallel/auto_parallel/graph_costmodel.h @@ -163,14 +163,9 @@ class CostGraph { void CreateTriangleEliminationCostList(const OperatorInfoPtr&, const CostPtrList&, const CostPtrList&, const StrategyPtr&, const StrategyPtr&, const StrategyPtr&, const CostPtrList&, const CostPtrList&, const CostPtrList&, CostPtrList*); - // Given the relevant costlist, create the TriangleElimination cost for eliminating TmpIdentityInfo - void CreateTriangleEliminationSubCostListForIdentity(StrategyPtr, StrategyPtr, StrategyPtr, const CostPtr&, - const CostPtrList&, const CostPtrList&, const CostPtr&, - const CostPtrList&, CostPtrList*); - // Given the relevant costlist, create the TriangleElimination cost for eliminating other operators - void CreateTriangleEliminationSubCostListForOthers(StrategyPtr, StrategyPtr, StrategyPtr, const CostPtr&, - const CostPtrList&, const CostPtrList&, const CostPtr&, - const CostPtrList&, CostPtrList*); + // Given the relevant costlist, create the TriangleElimination cost + void CreateTriangleEliminationSubCostList(StrategyPtr, StrategyPtr, StrategyPtr, const CostPtr&, const CostPtrList&, + const CostPtrList&, const CostPtr&, const CostPtrList&, CostPtrList*); // Applying the Star Elimination in DP algorithm. Return the successive edges of this merged_op // NOTE: this elimination MUST be performed only when the above 5 operation cannot be applied. From b4b1c55ed20144eb99e549dd51d9d01cd4ec7edb Mon Sep 17 00:00:00 2001 From: ms_yan <6576637+ms_yan@user.noreply.gitee.com> Date: Wed, 1 Apr 2020 20:14:46 +0800 Subject: [PATCH 056/367] repair unsuitable warning in repeat count and epoch size check --- mindspore/train/model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mindspore/train/model.py b/mindspore/train/model.py index fe655433fa..4174632cec 100755 --- a/mindspore/train/model.py +++ b/mindspore/train/model.py @@ -372,7 +372,7 @@ class Model: >>> model.train(2, dataset) """ repeat_count = train_dataset.get_repeat_count() - if epoch != repeat_count: + if epoch != repeat_count and dataset_sink_mode is True: logger.warning(f"The epoch_size {epoch} is not the same with dataset repeat_count {repeat_count}") check_bool(dataset_sink_mode) _device_number_check(self._parallel_mode, self._device_number) From d3f733fa25023f56695cb95cf337be8a140dddcb Mon Sep 17 00:00:00 2001 From: huangdongrun Date: Sat, 28 Mar 2020 10:33:10 +0000 Subject: [PATCH 057/367] support grad on graph with variable arguments use unpack graph primitive instead add testcases for all grad interface remove debug log format code remove dumpfuncgraph resolve clang-format resolve reviews resolve cpplint fix review --- .../ccsrc/operator/composite/composite.cc | 50 ------ .../ccsrc/operator/composite/composite.h | 14 +- .../ccsrc/operator/composite/unpack_call.cc | 94 +++++++++++ .../ccsrc/operator/composite/unpack_call.h | 54 +++++++ mindspore/ccsrc/operator/ops.h | 15 ++ mindspore/ccsrc/optimizer/irpass.cc | 6 + mindspore/ccsrc/optimizer/irpass.h | 8 + .../optimizer/irpass/grad_var_prepare.cc | 144 +++++++++++++++++ .../ccsrc/optimizer/irpass/grad_var_prepare.h | 55 +++++++ mindspore/ccsrc/pipeline/action.cc | 16 +- mindspore/ccsrc/pipeline/pass.cc | 17 ++ mindspore/ccsrc/pipeline/pass.h | 2 +- .../static_analysis/abstract_function.h | 1 + .../static_analysis/analysis_context.cc | 2 +- .../ccsrc/pipeline/static_analysis/prim.cc | 79 +++++++++ .../ccsrc/pipeline/static_analysis/prim.h | 15 ++ .../static_analysis/static_analysis.cc | 4 + .../python/parameter_feature/test_var_grad.py | 151 ++++++++++++++++++ 18 files changed, 660 insertions(+), 67 deletions(-) create mode 100644 mindspore/ccsrc/operator/composite/unpack_call.cc create mode 100644 mindspore/ccsrc/operator/composite/unpack_call.h create mode 100644 mindspore/ccsrc/optimizer/irpass/grad_var_prepare.cc create mode 100644 mindspore/ccsrc/optimizer/irpass/grad_var_prepare.h diff --git a/mindspore/ccsrc/operator/composite/composite.cc b/mindspore/ccsrc/operator/composite/composite.cc index 0c55b9480c..347641829d 100644 --- a/mindspore/ccsrc/operator/composite/composite.cc +++ b/mindspore/ccsrc/operator/composite/composite.cc @@ -1199,51 +1199,6 @@ FuncGraphPtr TensorSlice::GenerateFuncGraph(const AbstractBasePtrList& args_spec return ret_graph; } -FuncGraphPtr UnpackCall::GenerateFuncGraph(const AbstractBasePtrList& args_spec_list) { - // slice a tensor - // args: tensor, slice or slice tuple - const std::string op_name = std::string("UnpackCall"); - size_t arg_length = args_spec_list.size(); - if (arg_length < 2) { - MS_LOG(EXCEPTION) << "" << op_name << " requires at least two args, but got " << arg_length << "."; - } - - (void)abstract::CheckArg(op_name, args_spec_list, 0); - FuncGraphPtr ret_graph = std::make_shared(); - ret_graph->set_flags(FUNC_GRAPH_FLAG_CORE, true); - - AnfNodePtr fnNode = ret_graph->add_parameter(); - std::vector elems; - elems.push_back(fnNode); - for (size_t index = 1; index < arg_length; index++) { - MS_EXCEPTION_IF_NULL(args_spec_list[index]); - if (args_spec_list[index]->isa()) { - AbstractTuplePtr arg_tuple = dyn_cast(args_spec_list[index]); - AnfNodePtr para_tuple = ret_graph->add_parameter(); - for (size_t i = 0; i < arg_tuple->size(); ++i) { - elems.push_back( - ret_graph->NewCNode({NewValueNode(prim::kPrimTupleGetItem), para_tuple, NewValueNode(SizeToInt(i))})); - } - } else if (args_spec_list[index]->isa()) { - AbstractDictionaryPtr arg_dict = dyn_cast(args_spec_list[index]); - AnfNodePtr para_dict = ret_graph->add_parameter(); - auto dict_elems = arg_dict->elements(); - (void)std::transform( - dict_elems.begin(), dict_elems.end(), std::back_inserter(elems), - [ret_graph, para_dict](const AbstractAttribute& item) { - return ret_graph->NewCNode( - {NewValueNode(prim::kPrimMakeKeywordArg), NewValueNode(item.first), - ret_graph->NewCNode({NewValueNode(prim::kPrimDictGetItem), para_dict, NewValueNode(item.first)})}); - }); - } else { - MS_LOG(EXCEPTION) << "" << op_name << " require args should be tuple or dict, but got " - << args_spec_list[index]->ToString(); - } - } - ret_graph->set_output(ret_graph->NewCNode(elems)); - return ret_graph; -} - REGISTER_PYBIND_DEFINE( TupleAdd_, ([](const py::module* m) { (void)py::class_>(*m, "TupleAdd_").def(py::init()); @@ -1258,10 +1213,5 @@ REGISTER_PYBIND_DEFINE(TensorSlice_, ([](const py::module* m) { (void)py::class_>(*m, "TensorSlice_") .def(py::init()); })); - -REGISTER_PYBIND_DEFINE(UnpackCall_, ([](const py::module* m) { - (void)py::class_>(*m, "UnpackCall_") - .def(py::init()); - })); } // namespace prim } // namespace mindspore diff --git a/mindspore/ccsrc/operator/composite/composite.h b/mindspore/ccsrc/operator/composite/composite.h index 176efb0425..dc8627ba61 100644 --- a/mindspore/ccsrc/operator/composite/composite.h +++ b/mindspore/ccsrc/operator/composite/composite.h @@ -29,6 +29,7 @@ #include "operator/composite/zip_operation.h" #include "operator/composite/list_append_operation.h" #include "operator/composite/do_signature.h" +#include "operator/composite/unpack_call.h" #include "pipeline/static_analysis/static_analysis.h" #include "utils/misc.h" #include "utils/any.h" @@ -154,7 +155,7 @@ class GradOperation : public MetaFuncGraph { FuncGraphPtr GetGrad(AnfNodePtr ptrNode, const AnfNodePtr& weights, const std::vector& ptrParams, bool applyJ = false); FuncGraphPtr GenerateFuncGraph(const AbstractBasePtrList& args_spec_list) override; - + bool sens_param() const { return sens_param_; } bool get_all_; bool get_by_list_; bool sens_param_; @@ -208,17 +209,6 @@ class TensorSlice : public MetaFuncGraph { }; using TensorSlicePtr = std::shared_ptr; -// Expand the tuple and dict parameters generated when parsing the function call, -// and generate positional parameters and key-value pairs for function. -class UnpackCall : public MetaFuncGraph { - public: - explicit UnpackCall(const std::string& name) : MetaFuncGraph(name) {} - ~UnpackCall() override = default; - MS_DECLARE_PARENT(UnpackCall, MetaFuncGraph) - FuncGraphPtr GenerateFuncGraph(const AbstractBasePtrList& args_spec_list) override; - friend bool operator==(const UnpackCall& lhs, const UnpackCall& rhs) { return lhs.name_ == rhs.name_; } -}; -using UnpackCallPtr = std::shared_ptr; } // namespace prim } // namespace mindspore diff --git a/mindspore/ccsrc/operator/composite/unpack_call.cc b/mindspore/ccsrc/operator/composite/unpack_call.cc new file mode 100644 index 0000000000..64d6b3433b --- /dev/null +++ b/mindspore/ccsrc/operator/composite/unpack_call.cc @@ -0,0 +1,94 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "operator/composite/unpack_call.h" +#include +#include + +#include "./common.h" +#include "pipeline/static_analysis/abstract_value.h" +#include "pipeline/static_analysis/dshape.h" +#include "pipeline/static_analysis/param_validator.h" +#include "operator/cc_implementations.h" +#include "ir/anf.h" +#include "optimizer/opt.h" +#include "utils/symbolic.h" +#include "pybind_api/api_register.h" + +namespace mindspore { +// namespace to support composite operators definition +namespace prim { +using mindspore::abstract::AbstractAttribute; +using mindspore::abstract::AbstractBase; +using mindspore::abstract::AbstractDictionary; +using mindspore::abstract::AbstractDictionaryPtr; +using mindspore::abstract::AbstractFunction; +using mindspore::abstract::AbstractKeywordArg; +using mindspore::abstract::AbstractTuple; +using mindspore::abstract::AbstractTuplePtr; + +FuncGraphPtr UnpackCall::GenerateFuncGraph(const AbstractBasePtrList& args_spec_list) { + // slice a tensor + // args: tensor, slice or slice tuple + const std::string op_name = std::string("UnpackCall"); + size_t arg_length = args_spec_list.size(); + if (arg_length < 2) { + MS_LOG(EXCEPTION) << op_name << " requires at least two args, but got " << arg_length << "."; + } + + (void)abstract::CheckArg(op_name, args_spec_list, 0); + auto ret_graph = std::make_shared(); + ret_graph->set_flags(FUNC_GRAPH_FLAG_CORE, true); + + AnfNodePtr fnNode = ret_graph->add_parameter(); + std::vector elems; + elems.push_back(fnNode); + for (size_t index = 1; index < arg_length; index++) { + MS_EXCEPTION_IF_NULL(args_spec_list[index]); + if (args_spec_list[index]->isa()) { + auto arg_tuple = args_spec_list[index]->cast(); + AnfNodePtr para_tuple = ret_graph->add_parameter(); + for (size_t i = 0; i < arg_tuple->size(); ++i) { + elems.push_back( + ret_graph->NewCNode({NewValueNode(prim::kPrimTupleGetItem), para_tuple, NewValueNode(SizeToInt(i))})); + } + } else if (args_spec_list[index]->isa()) { + AbstractDictionaryPtr arg_dict = args_spec_list[index]->cast(); + AnfNodePtr para_dict = ret_graph->add_parameter(); + auto dict_elems = arg_dict->elements(); + (void)std::transform(dict_elems.begin(), dict_elems.end(), std::back_inserter(elems), + [ret_graph, para_dict](const AbstractAttribute& item) { + auto dict_get_item = ret_graph->NewCNode( + {NewValueNode(prim::kPrimDictGetItem), para_dict, NewValueNode(item.first)}); + return ret_graph->NewCNode( + {NewValueNode(prim::kPrimMakeKeywordArg), NewValueNode(item.first), dict_get_item}); + }); + } else { + MS_LOG(EXCEPTION) << op_name << " require args should be tuple or dict, but got " + << args_spec_list[index]->ToString(); + } + } + ret_graph->set_output(ret_graph->NewCNode(elems)); + return ret_graph; +} + +REGISTER_PYBIND_DEFINE(UnpackCall_, ([](const py::module* m) { + (void)py::class_>(*m, "UnpackCall_") + .def(py::init()); + })); + +} // namespace prim +} // namespace mindspore diff --git a/mindspore/ccsrc/operator/composite/unpack_call.h b/mindspore/ccsrc/operator/composite/unpack_call.h new file mode 100644 index 0000000000..7ec5f9ad33 --- /dev/null +++ b/mindspore/ccsrc/operator/composite/unpack_call.h @@ -0,0 +1,54 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_OPERATOR_COMPOSITE_UNPACK_CALL_H_ +#define MINDSPORE_CCSRC_OPERATOR_COMPOSITE_UNPACK_CALL_H_ + +#include +#include +#include +#include +#include +#include +#include + +#include "pipeline/static_analysis/static_analysis.h" +#include "utils/misc.h" +#include "utils/any.h" +#include "ir/dtype.h" +#include "ir/meta_func_graph.h" +#include "common/utils.h" + +namespace mindspore { +// namespace to support composite operators definition +namespace prim { + +// Expand the tuple and dict parameters generated when parsing the function call, +// and generate positional parameters and key-value pairs for function. +class UnpackCall : public MetaFuncGraph { + public: + explicit UnpackCall(const std::string& name) : MetaFuncGraph(name) {} + ~UnpackCall() override = default; + MS_DECLARE_PARENT(UnpackCall, MetaFuncGraph) + FuncGraphPtr GenerateFuncGraph(const AbstractBasePtrList& args_spec_list) override; + friend bool operator==(const UnpackCall& lhs, const UnpackCall& rhs) { return lhs.name_ == rhs.name_; } +}; +using UnpackCallPtr = std::shared_ptr; + +} // namespace prim +} // namespace mindspore + +#endif // MINDSPORE_CCSRC_OPERATOR_COMPOSITE_UNPACK_CALL_H_ diff --git a/mindspore/ccsrc/operator/ops.h b/mindspore/ccsrc/operator/ops.h index f3f5dad5f1..727d66dfb3 100644 --- a/mindspore/ccsrc/operator/ops.h +++ b/mindspore/ccsrc/operator/ops.h @@ -246,6 +246,21 @@ class DoSignaturePrimitive : public Primitive { ValuePtr function_; }; using DoSignaturePrimitivePtr = std::shared_ptr; + +class UnpackGraphPrimitive : public Primitive { + public: + explicit UnpackGraphPrimitive(const std::string& name, const bool& with_sens, const bool& need_unpack_args) + : Primitive("UnpackGraph"), with_sens_in_args_(with_sens), need_unpack_args_(need_unpack_args) {} + ~UnpackGraphPrimitive() override = default; + MS_DECLARE_PARENT(UnpackGraphPrimitive, Primitive) + bool with_sens_in_args() const { return with_sens_in_args_; } + bool need_unpack_args() const { return need_unpack_args_; } + + private: + bool with_sens_in_args_; + bool need_unpack_args_; +}; +using UnpackGraphPrimitivePtr = std::shared_ptr; } // namespace prim } // namespace mindspore diff --git a/mindspore/ccsrc/optimizer/irpass.cc b/mindspore/ccsrc/optimizer/irpass.cc index ba78696b38..cdc960792f 100644 --- a/mindspore/ccsrc/optimizer/irpass.cc +++ b/mindspore/ccsrc/optimizer/irpass.cc @@ -39,6 +39,7 @@ #include "optimizer/irpass/specialize_transform.h" #include "optimizer/irpass/incorporate_getitem.h" #include "optimizer/irpass/incorporate_call.h" +#include "optimizer/irpass/grad_var_prepare.h" namespace mindspore { namespace opt { @@ -123,6 +124,11 @@ ResolveIRPassLib::ResolveIRPassLib() { resolver_resolve_ = MakeSubstitution(ResolverResolve(), "resolver_resolve", prim::kPrimResolve); resolver_getattr_ = MakeSubstitution(ResolverGetattr(), "resolver_getattr", prim::kPrimGetAttr); } + +InferenceOptPrepareLib::InferenceOptPrepareLib() { + grad_var_prepare_ = MakeSubstitution(GradVarPrepare(), "grad_var_prepare", IsCNode); +} + } // namespace irpass } // namespace opt } // namespace mindspore diff --git a/mindspore/ccsrc/optimizer/irpass.h b/mindspore/ccsrc/optimizer/irpass.h index c2af344e32..bdaf42b3ed 100644 --- a/mindspore/ccsrc/optimizer/irpass.h +++ b/mindspore/ccsrc/optimizer/irpass.h @@ -102,6 +102,13 @@ class ResolveIRPassLib { SubstitutionPtr resolver_getattr_; }; +class InferenceOptPrepareLib { + public: + InferenceOptPrepareLib(); + ~InferenceOptPrepareLib() = default; + SubstitutionPtr grad_var_prepare_; +}; + // predicate functions inline bool IsNode(const AnfNodePtr &) { return true; } @@ -151,6 +158,7 @@ inline bool IsCNodeDup(const AnfNodePtr &node) { } return false; } + } // namespace irpass } // namespace opt } // namespace mindspore diff --git a/mindspore/ccsrc/optimizer/irpass/grad_var_prepare.cc b/mindspore/ccsrc/optimizer/irpass/grad_var_prepare.cc new file mode 100644 index 0000000000..5daeced3a5 --- /dev/null +++ b/mindspore/ccsrc/optimizer/irpass/grad_var_prepare.cc @@ -0,0 +1,144 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "optimizer/irpass/grad_var_prepare.h" +#include +#include +#include +#include + +#include "operator/composite/composite.h" +#include "operator/ops.h" +#include "optimizer/irpass.h" +#include "optimizer/optimizer.h" +#include "ir/visitor.h" +#include "ir/func_graph.h" +#include "ir/func_graph_cloner.h" + +namespace mindspore { +namespace opt { +namespace irpass { + +static AnfNodePtr GenerateUnpackGraphNode(std::vector inputs_y, FuncGraphPtr func_graph, + AnfNodePtr func_node, bool is_unpack, bool sens_param) { + MS_EXCEPTION_IF_NULL(func_graph); + MS_EXCEPTION_IF_NULL(func_node); + std::vector nodes; + AnfNodePtr unpack_graph_node = nullptr; + if (is_unpack) { + auto unpack_graph = std::make_shared("unpack_graph", sens_param, true); + nodes.push_back(NewValueNode(unpack_graph)); + nodes.push_back(func_node); + // {unpackcall, {GradOperation, ...}, args...} + std::transform(inputs_y.begin() + 2, inputs_y.end(), std::back_inserter(nodes), + [](const AnfNodePtr& node) { return node; }); + unpack_graph_node = func_graph->NewCNode(nodes); + } else { + auto unpack_graph = std::make_shared("unpack_graph", sens_param, false); + nodes.push_back(NewValueNode(unpack_graph)); + nodes.push_back(func_node); + // {{GradOperation, ...}, args...} + std::transform(inputs_y.begin() + 1, inputs_y.end(), std::back_inserter(nodes), + [](const AnfNodePtr& node) { return node; }); + unpack_graph_node = func_graph->NewCNode(nodes); + } + return unpack_graph_node; +} + +// get metagraph of value node +MetaFuncGraphPtr GetMetaFuncGraphOfValueNode(const AnfNodePtr& node) { + ValuePtr value; + if (IsValueNode(node)) { + value = GetValueNode(node)->cast()->function(); + } else { + value = GetValueNode(node); + } + if (value == nullptr) { + return nullptr; + } + return value->cast(); +} + +// check if node is a specific metafuncgraph op +bool IsMetaFuncGraph(const AnfNodePtr& node, const MetaFuncGraphPtr meta_func_graph) { + if (node != nullptr) { + auto meta_func_graph_ptr = GetMetaFuncGraphOfValueNode(node); + if (meta_func_graph_ptr == nullptr) { + return false; + } + + if (meta_func_graph_ptr->type_name() == meta_func_graph->type_name()) { + return true; + } + } + return false; +} + +// {{GradOperation, g, w}, Ys} +// {UnPackCall, {GradOperation, g, w}, Ys} +AnfNodePtr GradVarPrepare::operator()(const OptimizerPtr&, const AnfNodePtr& node) { + if (!node->isa() || node->func_graph() == nullptr) { + return nullptr; + } + + // {{...}, Ys} + auto inputs_y = node->cast()->inputs(); + std::vector inputs_x; + if (IsCNode(inputs_y[0])) { + inputs_x = inputs_y[0]->cast()->inputs(); + } else if (IsMetaFuncGraph(inputs_y[0], unpack_op_) && IsCNode(inputs_y[1])) { + inputs_x = inputs_y[1]->cast()->inputs(); + } else { + return nullptr; + } + + // {{...}, Xs} + if (inputs_x.size() < 2) { + return nullptr; + } + + // {GradOperation, g, w} or {GradOperation, g} + if (!IsMetaFuncGraph(inputs_x[0], grad_op_)) { + return nullptr; + } + + auto meta_func = GetMetaFuncGraphOfValueNode(inputs_x[0]); + if (meta_func == nullptr) { + return nullptr; + } + auto grad_op_ptr = meta_func->cast(); + auto func_node = inputs_x[1]; + if (!IsValueNode(func_node)) { + return nullptr; + } + + AnfNodePtr unpack_graph_node = + GenerateUnpackGraphNode(inputs_y, node->cast()->func_graph(), func_node, + IsMetaFuncGraph(inputs_y[0], unpack_op_), grad_op_ptr->sens_param()); + // constuct new grad_opration + inputs_x[1] = unpack_graph_node; + auto grad_op_cnode = node->func_graph()->NewCNode(inputs_x); + if (IsMetaFuncGraph(inputs_y[0], unpack_op_)) { + inputs_y[1] = grad_op_cnode; + } else { + inputs_y[0] = grad_op_cnode; + } + auto cnode = node->func_graph()->NewCNode(inputs_y); + return cnode; +} +} // namespace irpass +} // namespace opt +} // namespace mindspore diff --git a/mindspore/ccsrc/optimizer/irpass/grad_var_prepare.h b/mindspore/ccsrc/optimizer/irpass/grad_var_prepare.h new file mode 100644 index 0000000000..599d1dca17 --- /dev/null +++ b/mindspore/ccsrc/optimizer/irpass/grad_var_prepare.h @@ -0,0 +1,55 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_OPTIMIZER_IRPASS_GRAD_VAR_PREPARE_H_ +#define MINDSPORE_CCSRC_OPTIMIZER_IRPASS_GRAD_VAR_PREPARE_H_ + +#include +#include +#include +#include + +#include "operator/composite/composite.h" +#include "operator/ops.h" +#include "optimizer/irpass.h" +#include "optimizer/optimizer.h" +#include "ir/visitor.h" +#include "ir/func_graph.h" +#include "ir/func_graph_cloner.h" + +namespace mindspore { +namespace opt { +namespace irpass { + +// {{GradOperation, g, w}, Ys} +// {UnPackCall, {GradOperation, g, w}, Ys} +class GradVarPrepare : public AnfVisitor { + public: + GradVarPrepare() + : grad_op_(std::make_shared("grad")), + unpack_op_(std::make_shared("unpack_call")) {} + ~GradVarPrepare() override = default; + + AnfNodePtr operator()(const OptimizerPtr &, const AnfNodePtr &node) override; + + private: + MetaFuncGraphPtr grad_op_; + MetaFuncGraphPtr unpack_op_; +}; +} // namespace irpass +} // namespace opt +} // namespace mindspore +#endif // MINDSPORE_CCSRC_OPTIMIZER_IRPASS_GRAD_VAR_PREPARE_H_ diff --git a/mindspore/ccsrc/pipeline/action.cc b/mindspore/ccsrc/pipeline/action.cc index f3742ab654..126992cb8f 100644 --- a/mindspore/ccsrc/pipeline/action.cc +++ b/mindspore/ccsrc/pipeline/action.cc @@ -175,10 +175,10 @@ bool CombineLikeGraphs(const ResourcePtr&) { bool SymbolResolveAction(const ResourcePtr& res) { if (res->manager() == nullptr) { - MS_LOG(EXCEPTION) << "Resolve error."; + MS_LOG(EXCEPTION) << "SymbolResolve error, manager is null"; } if (res->func_graph() == nullptr) { - MS_LOG(EXCEPTION) << "Resolve error"; + MS_LOG(EXCEPTION) << "SymbolResolve error, graph is null"; } FuncGraphPtr func_graph = res->func_graph(); auto succ = parse::ResolveFuncGraph(func_graph, res); @@ -194,6 +194,16 @@ bool SymbolResolveAction(const ResourcePtr& res) { return succ; } +bool InferenceOptPrepareAction(const ResourcePtr& res) { + if (res->manager() == nullptr) { + MS_LOG(EXCEPTION) << "InferenceOptPrepare error, manager is null."; + } + if (res->func_graph() == nullptr) { + MS_LOG(EXCEPTION) << "InferenceOptPrepare error, graph is null."; + } + return InferenceOptPreparePass(res); +} + bool AbstractSpecializeAction(const ResourcePtr& res) { if (res->func_graph() == nullptr) { MS_LOG(EXCEPTION) << "AbstractSpecialize error"; @@ -303,7 +313,7 @@ static std::vector CommonPipeline() { // Resolve the python func actions.emplace_back(std::make_pair("symbol_resolve", SymbolResolveAction)); actions.emplace_back(std::make_pair("combine_like_graphs", CombineLikeGraphs)); - + actions.emplace_back(std::make_pair("inference_opt_prepare", InferenceOptPrepareAction)); // Evaluate type and shape, and specialize actions.emplace_back(std::make_pair("abstract_specialize", AbstractSpecializeAction)); diff --git a/mindspore/ccsrc/pipeline/pass.cc b/mindspore/ccsrc/pipeline/pass.cc index 02e8c5277b..e2626d5314 100644 --- a/mindspore/ccsrc/pipeline/pass.cc +++ b/mindspore/ccsrc/pipeline/pass.cc @@ -160,6 +160,13 @@ OptPassGroupMap GetControlPhases(const opt::irpass::OptimizeIRPassLib& irpass) { return map; } +OptPassGroupMap GetInferenceOptPreparePhases() { + opt::irpass::InferenceOptPrepareLib irpass; + auto grad_var_prepare = opt::OptPassConfig({irpass.grad_var_prepare_}); + opt::OptPassGroupMap prepare_map({{"inference_opt_prep", grad_var_prepare}}); + return prepare_map; +} + OptPassGroupMap GetPreparePhases(const opt::irpass::OptimizeIRPassLib& irpass) { opt::OptPassConfig prepare_group = opt::OptPassConfig({irpass.print_tuple_wrapper_}); OptPassGroupMap map({{"prepare_group", prepare_group}}); @@ -239,6 +246,16 @@ bool ValidatePass(const ResourcePtr& res) { return true; } +bool InferenceOptPreparePass(const ResourcePtr& res) { + FuncGraphPtr func_graph = res->func_graph(); + MS_EXCEPTION_IF_NULL(func_graph); + abstract::AbstractBasePtrList args_spec = res->args_spec(); + auto prepare_map = GetInferenceOptPreparePhases(); + auto infer_opt_prepare = opt::Optimizer::MakeOptimizer("inference_prepare", res, prepare_map); + (void)infer_opt_prepare->step(func_graph, args_spec, false); + return true; +} + std::vector kVmPasses = {{"simplify_data_structures", SimplifyDataStructuresPass}, {"opt_a", OptPassAGroup}, {"opt_b", OptPassBGroup}, diff --git a/mindspore/ccsrc/pipeline/pass.h b/mindspore/ccsrc/pipeline/pass.h index 03ed8eb370..3731d7e524 100644 --- a/mindspore/ccsrc/pipeline/pass.h +++ b/mindspore/ccsrc/pipeline/pass.h @@ -34,7 +34,7 @@ bool CconvPass(const ResourcePtr& res); bool ValidatePass(const ResourcePtr& res); bool ConvertPrepareAdapt(const ResourcePtr& res); bool AddControlDependPass(const ResourcePtr& res); - +bool InferenceOptPreparePass(const ResourcePtr& res); void ReclaimOptimizer(); } // namespace pipeline } // namespace mindspore diff --git a/mindspore/ccsrc/pipeline/static_analysis/abstract_function.h b/mindspore/ccsrc/pipeline/static_analysis/abstract_function.h index 3acb22d829..133d5e99a9 100644 --- a/mindspore/ccsrc/pipeline/static_analysis/abstract_function.h +++ b/mindspore/ccsrc/pipeline/static_analysis/abstract_function.h @@ -133,6 +133,7 @@ class FuncGraphAbstractClosure : public AbstractFuncAtom { FuncGraphPtr func_graph_; AnalysisContextPtr context_; }; +using FuncGraphAbstractClosurePtr = std::shared_ptr; class MetaFuncGraphAbstractClosure : public AbstractFuncAtom { public: diff --git a/mindspore/ccsrc/pipeline/static_analysis/analysis_context.cc b/mindspore/ccsrc/pipeline/static_analysis/analysis_context.cc index 9326ded2d5..aeaa6b17f8 100644 --- a/mindspore/ccsrc/pipeline/static_analysis/analysis_context.cc +++ b/mindspore/ccsrc/pipeline/static_analysis/analysis_context.cc @@ -41,7 +41,7 @@ AnalysisContextPtr AnalysisContext::NewFuncGraphContext(const FuncGraphPtr &func } else { oss << "nullptr"; } - MS_LOG(EXCEPTION) << "" << oss.str() << " NodeInfo: " << trace::GetDebugInfo(func_graph->debug_info()); + MS_LOG(EXCEPTION) << oss.str() << " NodeInfo: " << trace::GetDebugInfo(func_graph->debug_info()); } return NewContext(parent_context, func_graph, args_spec_list); } diff --git a/mindspore/ccsrc/pipeline/static_analysis/prim.cc b/mindspore/ccsrc/pipeline/static_analysis/prim.cc index 98d82de5d5..4110f25811 100644 --- a/mindspore/ccsrc/pipeline/static_analysis/prim.cc +++ b/mindspore/ccsrc/pipeline/static_analysis/prim.cc @@ -180,6 +180,85 @@ AbstractBasePtr DoSignatureEvaluator::Run(AnalysisEnginePtr engine, const Config return engine->ForwardConfig(out_conf, fn_conf); } +static AbstractBasePtrList GetUnpackGraphSpecArgsList(AbstractBasePtrList args_spec_list, bool need_unpack) { + // arg[0] is the func graph to unpack, ignore it + AbstractBasePtrList sepcialize_args_before_unpack(args_spec_list.begin() + 1, args_spec_list.end()); + AbstractBasePtrList graph_sepcialize_args; + if (need_unpack) { + for (size_t index = 0; index < sepcialize_args_before_unpack.size(); index++) { + MS_EXCEPTION_IF_NULL(sepcialize_args_before_unpack[index]); + if (sepcialize_args_before_unpack[index]->isa()) { + AbstractTuplePtr arg_tuple = sepcialize_args_before_unpack[index]->cast(); + std::transform(arg_tuple->elements().begin(), arg_tuple->elements().end(), + std::back_inserter(graph_sepcialize_args), [](AbstractBasePtr abs) { return abs; }); + } else if (sepcialize_args_before_unpack[index]->isa()) { + AbstractDictionaryPtr arg_dict = sepcialize_args_before_unpack[index]->cast(); + auto dict_elems = arg_dict->elements(); + (void)std::transform( + dict_elems.begin(), dict_elems.end(), std::back_inserter(graph_sepcialize_args), + [](const AbstractAttribute &item) { return std::make_shared(item.first, item.second); }); + } else { + MS_LOG(EXCEPTION) << "UnpackGraph require args should be tuple or dict, but got " + << sepcialize_args_before_unpack[index]->ToString(); + } + } + } else { + graph_sepcialize_args = sepcialize_args_before_unpack; + } + return graph_sepcialize_args; +} + +AbstractBasePtr UnpackGraphEvaluator::Run(AnalysisEnginePtr engine, const ConfigPtrList &args_conf_list, + AnfNodeConfigPtr out_conf) { + if (out_conf->node() == nullptr || !out_conf->node()->isa()) { + MS_LOG(EXCEPTION) << "Node of out_conf should be CNode"; + } + if (!prim_->isa()) { + MS_LOG(EXCEPTION) << "Primitive should be UnpackGraphPrimitive, but got " << prim_->ToString(); + } + + auto unpack_graph = prim_->cast(); + auto out_node = out_conf->node()->cast(); + const auto &out_node_inputs = out_node->inputs(); + if (out_node->inputs().size() == 0 || (out_node_inputs.size() - 1) != args_conf_list.size()) { + MS_LOG(EXCEPTION) << "UnpackGraphPrimitive" + << " args size should equal to inputs size minus 1, but args size " << args_conf_list.size() + << ", inputs size " << out_node_inputs.size(); + } + AnfNodePtrList args_inputs{out_node_inputs.begin() + 1, out_node_inputs.end()}; + AbstractBasePtrList args_spec_list; + (void)std::transform(args_conf_list.begin(), args_conf_list.end(), std::back_inserter(args_spec_list), + [](const ConfigPtr &ref) -> AbstractBasePtr { return ref->GetEvaluatedValue(); }); + // get the forward graph + MS_EXCEPTION_IF_NULL(args_spec_list[0]); + AbstractFunctionPtr fn = args_spec_list[0]->cast(); + if (fn == nullptr) { + MS_LOG(EXCEPTION) << "UnpackGraphPrimitive arg0 must be AbstractFunction, but " << args_spec_list[0]->ToString(); + } + auto real_fn = fn->cast(); + MS_EXCEPTION_IF_NULL(real_fn); + FuncGraphPtr forward_graph = real_fn->func_graph(); + MS_EXCEPTION_IF_NULL(forward_graph); + AbstractBasePtrList graph_sepcialize_args = + GetUnpackGraphSpecArgsList(args_spec_list, unpack_graph->need_unpack_args()); + + AbstractBasePtrList graph_sepcialize_args_without_sens; + (void)std::transform(graph_sepcialize_args.begin(), + graph_sepcialize_args.end() - (unpack_graph->with_sens_in_args() ? 1 : 0), + std::back_inserter(graph_sepcialize_args_without_sens), [](AbstractBasePtr abs) { return abs; }); + auto new_graph = forward_graph->GenerateGraph(graph_sepcialize_args_without_sens); + engine->func_graph_manager()->AddFuncGraph(new_graph); + ScopePtr scope = kDefaultScope; + if (out_conf != nullptr) { + scope = out_conf->node()->scope(); + } + ScopeGuard scope_guard(scope); + AnfNodePtr new_vnode = NewValueNode(new_graph); + AnfNodeConfigPtr fn_conf = engine->MakeConfig(new_vnode, out_conf->context()); + + return engine->ForwardConfig(out_conf, fn_conf); +} + namespace { py::object BuildValue(const ValuePtr &value_ptr) { if (value_ptr == nullptr) { diff --git a/mindspore/ccsrc/pipeline/static_analysis/prim.h b/mindspore/ccsrc/pipeline/static_analysis/prim.h index 9dae576a4c..e154473dbb 100644 --- a/mindspore/ccsrc/pipeline/static_analysis/prim.h +++ b/mindspore/ccsrc/pipeline/static_analysis/prim.h @@ -87,6 +87,21 @@ class DoSignatureEvaluator : public Evaluator { PrimitivePtr prim_; }; +class UnpackGraphEvaluator : public Evaluator { + public: + explicit UnpackGraphEvaluator(const PrimitivePtr primitive) : Evaluator("UnpackGraphEvaluator"), prim_(primitive) {} + ~UnpackGraphEvaluator() override = default; + AbstractBasePtr Run(AnalysisEnginePtr engine, const ConfigPtrList &argrefs, + AnfNodeConfigPtr out_config = nullptr) override; + + AbstractBasePtr Infer(AnalysisEnginePtr, const AbstractBasePtrList &) override { + MS_LOG(EXCEPTION) << "Infer() should not be called, Run() method should be called"; + } + + private: + PrimitivePtr prim_; +}; + bool IsInWhiteList(PrimitivePtr primitive); StandardPrimitiveEvalImpl GetPrimitiveInferImpl(const PrimitivePtr &primitive); diff --git a/mindspore/ccsrc/pipeline/static_analysis/static_analysis.cc b/mindspore/ccsrc/pipeline/static_analysis/static_analysis.cc index 0bfba265db..49182e8d09 100644 --- a/mindspore/ccsrc/pipeline/static_analysis/static_analysis.cc +++ b/mindspore/ccsrc/pipeline/static_analysis/static_analysis.cc @@ -289,6 +289,10 @@ EvaluatorPtr GetPrimEvaluator(const PrimitivePtr &prim, const AnalysisEnginePtr evaluator = std::make_shared(prim); return evaluator; } + if (prim->isa()) { + evaluator = std::make_shared(prim); + return evaluator; + } if (prim->HasPyEvaluator()) { auto prim_py = dyn_cast(prim); if (prim_py != nullptr) { diff --git a/tests/ut/python/parameter_feature/test_var_grad.py b/tests/ut/python/parameter_feature/test_var_grad.py index d51b78ed9d..12c05d0594 100644 --- a/tests/ut/python/parameter_feature/test_var_grad.py +++ b/tests/ut/python/parameter_feature/test_var_grad.py @@ -19,6 +19,8 @@ from mindspore.nn import Cell from mindspore.ops import operations as P import mindspore.ops.composite as C from mindspore.common.api import _executor +from mindspore.common.parameter import ParameterTuple +from mindspore.common import dtype as mstype context.set_context(mode=context.GRAPH_MODE) @@ -34,3 +36,152 @@ def test_net_vargs_expand(): sens = Tensor(np.random.normal(0, 1, [3, 4, 5]).astype(np.float32)) net = AddNet() out = C.grad_all_with_sens(net, net.trainable_params())(x, y, sens) + +class VarNet(Cell): + def __init__(self, net): + super(VarNet, self).__init__() + self.b = Parameter(Tensor(np.ones([3, 4, 5]), dtype=mstype.float32), "b", requires_grad=True) + self.w = Parameter(Tensor(np.ones([3, 4, 5]), dtype=mstype.float32), "w", requires_grad=True) + self.net = net + def construct(self, *args): + return self.net(*args)*self.w + self.b + +class SecondNet(Cell): + def __init__(self): + super(SecondNet, self).__init__() + self.b2 = Parameter(Tensor(np.ones([3, 4, 5]), dtype=mstype.float32), "b2", requires_grad=True) + def construct(self, *args): + res = args[0] + args[1] + return res + self.b2 +def test_all_var_args_grad_with_sens(): + """"test grad_by_list_with_sens with all var args input""" + class GradNet(Cell): + def __init__(self, net): + super(GradNet, self).__init__() + self.weights = ParameterTuple(net.trainable_params()) + self.net = net + def construct(self, *inputs): + return C.grad_by_list_with_sens(self.net, self.weights)(*inputs) + x = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) + y = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) + sens = Tensor(1.0, dtype=mstype.float32) + net = VarNet(SecondNet()) + grad_net = GradNet(net) + out = grad_net(x, y, sens) + +def test_grad_list_var_args(): + class GradNet(Cell): + def __init__(self, net): + super(GradNet, self).__init__() + self.weights = ParameterTuple(net.trainable_params()) + self.net = net + def construct(self, *inputs): + return C.grad_by_list(self.net, self.weights)(*inputs) + x = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) + y = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) + net = VarNet(SecondNet()) + grad_net = GradNet(net) + out = grad_net(x, y) + +def test_grad_all_var_args(): + class GradNet(Cell): + def __init__(self, net): + super(GradNet, self).__init__() + self.weights = ParameterTuple(net.trainable_params()) + self.net = net + def construct(self, *inputs): + return C.grad_all(self.net)(*inputs) + x = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) + y = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) + net = VarNet(SecondNet()) + grad_net = GradNet(net) + out = grad_net(x, y) + +def test_grad_all_var_args_with_sens(): + class GradNet(Cell): + def __init__(self, net): + super(GradNet, self).__init__() + self.weights = ParameterTuple(net.trainable_params()) + self.net = net + def construct(self, *inputs): + return C.grad_all_with_sens(self.net)(*inputs) + x = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) + y = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) + sens = Tensor(1.0, dtype=mstype.float32) + net = VarNet(SecondNet()) + grad_net = GradNet(net) + out = grad_net(x, y, sens) + +def test_grad_var_args_with_sens(): + class GradNet(Cell): + def __init__(self, net): + super(GradNet, self).__init__() + self.weights = ParameterTuple(net.trainable_params()) + self.net = net + def construct(self, *inputs): + return C.grad_with_sens(self.net)(*inputs) + x = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) + y = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) + sens = Tensor(1.0, dtype=mstype.float32) + net = VarNet(SecondNet()) + grad_net = GradNet(net) + out = grad_net(x, y, sens) + +def test_var_args_grad(): + class VarNet(Cell): + def __init__(self, net): + super(VarNet, self).__init__() + self.b = Parameter(Tensor(np.ones([3, 4, 5]), dtype=mstype.float32), "b", requires_grad=True) + self.net = net + def construct(self, *args): + return self.net(*args) + self.b + + class SecondNet(Cell): + def __init__(self): + super(SecondNet, self).__init__() + self.b2 = Parameter(Tensor(np.ones([3, 4, 5]), dtype=mstype.float32), "b2", requires_grad=True) + def construct(self, *args): + res = args[0] + args[1] + return res + self.b2 + class GradNet(Cell): + def __init__(self, net): + super(GradNet, self).__init__() + self.net = net + self.weights = ParameterTuple(net.trainable_params()) + def construct(self, x, y, sens): + return C.grad_by_list_with_sens(self.net, self.weights)(x, y, sens) + x = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) + y = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) + sens = Tensor(1.0, dtype=mstype.float32) + net = VarNet(SecondNet()) + grad_net = GradNet(net) + out = grad_net(x, y, sens) + + +def test_var_args_positional(): + """"test grad_all with var args in inner graph""" + class VarNet(Cell): + def __init__(self, net): + super(VarNet, self).__init__() + self.net = net + def construct(self, x, y): + return self.net(x, y)*x + + class SecondNet(Cell): + def __init__(self): + super(SecondNet, self).__init__() + def construct(self, *args): + return args[0] + args[1] + + class GradNet(Cell): + def __init__(self, net): + super(GradNet, self).__init__() + self.net = net + self.weights = ParameterTuple(net.trainable_params()) + def construct(self, x, y): + return C.grad_all(self.net)(x, y) + x = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) + y = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) + net = VarNet(SecondNet()) + grad_net = GradNet(net) + out = grad_net(x, y) From 04be6a37f0b73dfa2c4d86be99539fbf14522210 Mon Sep 17 00:00:00 2001 From: kswang Date: Wed, 1 Apr 2020 21:37:02 +0800 Subject: [PATCH 058/367] add getptr for memreuse --- mindspore/ccsrc/device/kernel_runtime.cc | 33 ++++--------------- mindspore/ccsrc/device/kernel_runtime.h | 3 -- .../ccsrc/pre_activate/mem_reuse/mem_reuse.cc | 30 +++++++++++++++++ .../ccsrc/pre_activate/mem_reuse/mem_reuse.h | 4 +++ 4 files changed, 40 insertions(+), 30 deletions(-) diff --git a/mindspore/ccsrc/device/kernel_runtime.cc b/mindspore/ccsrc/device/kernel_runtime.cc index 99f5d491ac..878fe4a7f8 100644 --- a/mindspore/ccsrc/device/kernel_runtime.cc +++ b/mindspore/ccsrc/device/kernel_runtime.cc @@ -42,7 +42,6 @@ KernelRuntime::~KernelRuntime() { #ifdef ENABLE_DUMP_E2E dump_conf_ptr_ = nullptr; #endif - reuse_mem_base_ = nullptr; mem_reuse_util_ptr_ = nullptr; } @@ -476,9 +475,9 @@ void KernelRuntime::ReuseAssignDynamicMemory(session::KernelGraph *graph) { bestfit_mem_reuse->Reuse(mem_reuse_util_ptr.get()); size_t total_allocated_size = bestfit_mem_reuse->GetAllocatedSize(); MS_LOG(INFO) << "TotalReuseDynamicSize [" << total_allocated_size << "]"; - auto base_ptr = MallocDynamicMem(total_allocated_size, false); - reuse_mem_base_ = base_ptr; mem_reuse_util_ptr_ = mem_reuse_util_ptr; + auto base_ptr = MallocDynamicMem(total_allocated_size, false); + mem_reuse_util_ptr_->set_mem_base(base_ptr); auto &kernels = graph->execution_order(); for (auto &kernel : kernels) { AssignNodeOutputMem(kReuseDynamicMem, kernel, kGetAllOuts); @@ -488,22 +487,13 @@ void KernelRuntime::ReuseAssignDynamicMemory(session::KernelGraph *graph) { void KernelRuntime::AssignReuseWorkSpaceMem(const AnfNodePtr &node) { MS_EXCEPTION_IF_NULL(node); - auto key = node.get(); auto kernel_mod = AnfAlgo::GetKernelMod(node); MS_EXCEPTION_IF_NULL(kernel_mod); size_t index = 0; - auto iter = mem_reuse_util_ptr_->kernel_workspace_refs_.find(key); for (auto &size : kernel_mod->GetWorkspaceSizeList()) { - if (iter != mem_reuse_util_ptr_->kernel_workspace_refs_.end()) { - if (index >= iter->second.size()) { - MS_LOG(EXCEPTION) << "index:[" << index << "] is larger than it's workspace size:[" << iter->second.size() - << "]"; - } - auto wk_ref = iter->second[index]; - auto wk_ptr = reuse_mem_base_ + wk_ref->offset_; - AnfAlgo::SetWorkspaceAddr(CreateDeviceAddress(wk_ptr, size, "", kTypeUnknown), index, node.get()); - index++; - } + auto wk_ptr = mem_reuse_util_ptr_->GetNodeWorkSpacePtr(node, index); + AnfAlgo::SetWorkspaceAddr(CreateDeviceAddress(wk_ptr, size, "", kTypeUnknown), index, node.get()); + index++; } } @@ -554,18 +544,7 @@ uint8_t *KernelRuntime::CalDeviceMem(const AnfNodePtr &node, size_t size, int fl } else if (flag == kDynamicMem) { ptr = MallocDynamicMem(size, false); } else if (flag == kReuseDynamicMem) { - auto key = node.get(); - auto iter = mem_reuse_util_ptr_->kernel_output_refs_.find(key); - if (iter != mem_reuse_util_ptr_->kernel_output_refs_.end()) { - // private member form KernelRuntime - memreuse::KernelRefCountPtr kernel_ref_count_ptr = mem_reuse_util_ptr_->kernel_output_refs_[key][index]; - if (kernel_ref_count_ptr == nullptr) { - return ptr; - } - ptr = reuse_mem_base_ + kernel_ref_count_ptr->offset_; - } else { - MS_LOG(EXCEPTION) << "node [" << AnfAlgo::GetCNodeName(node) << "] don't exist in kernel_output_refs"; - } + ptr = mem_reuse_util_ptr_->GetNodeOutputPtr(node, index); } return ptr; } diff --git a/mindspore/ccsrc/device/kernel_runtime.h b/mindspore/ccsrc/device/kernel_runtime.h index afdb45a698..ac9a56ed4d 100644 --- a/mindspore/ccsrc/device/kernel_runtime.h +++ b/mindspore/ccsrc/device/kernel_runtime.h @@ -128,9 +128,6 @@ class KernelRuntime { size_t total_static_size_ = 0; size_t total_dynamic_size_ = 0; MemReuseUtilPtr mem_reuse_util_ptr_{nullptr}; - - private: - uint8_t *reuse_mem_base_{nullptr}; }; using KernelRuntimePtr = std::shared_ptr; } // namespace device diff --git a/mindspore/ccsrc/pre_activate/mem_reuse/mem_reuse.cc b/mindspore/ccsrc/pre_activate/mem_reuse/mem_reuse.cc index 0db3c35196..2113fec653 100644 --- a/mindspore/ccsrc/pre_activate/mem_reuse/mem_reuse.cc +++ b/mindspore/ccsrc/pre_activate/mem_reuse/mem_reuse.cc @@ -316,5 +316,35 @@ void MemReuseUtil::SetAllInfo(KernelGraph *graph) { MemReuseChecker::GetInstance().CheckMemReuseIR(total_refs_list_, kernel_def_ptr_list_, graph); #endif } + +uint8_t *MemReuseUtil::GetNodeOutputPtr(const AnfNodePtr &node, size_t index) const { + auto key = node.get(); + auto iter = kernel_output_refs_.find(key); + uint8_t *ptr = nullptr; + if (iter != kernel_output_refs_.end()) { + if (index >= iter->second.size()) { + MS_LOG(EXCEPTION) << "index:[" << index << "] is larger than it's workspace size:[" << iter->second.size() << "]"; + } + auto output_ref = iter->second[index]; + ptr = mem_base_ + output_ref->offset_; + } else { + MS_LOG(EXCEPTION) << "node [" << AnfAlgo::GetCNodeName(node) << "] don't exist in kernel_output_refs"; + } + return ptr; +} + +uint8_t *MemReuseUtil::GetNodeWorkSpacePtr(const AnfNodePtr &node, size_t index) const { + auto key = node.get(); + auto iter = kernel_workspace_refs_.find(key); + uint8_t *ptr = nullptr; + if (iter != kernel_workspace_refs_.end()) { + if (index >= iter->second.size()) { + MS_LOG(EXCEPTION) << "index:[" << index << "] is larger than it's workspace size:[" << iter->second.size() << "]"; + } + auto wk_ref = iter->second[index]; + ptr = mem_base_ + wk_ref->offset_; + } + return ptr; +} } // namespace memreuse } // namespace mindspore diff --git a/mindspore/ccsrc/pre_activate/mem_reuse/mem_reuse.h b/mindspore/ccsrc/pre_activate/mem_reuse/mem_reuse.h index 6ecd222688..cae0e4565f 100644 --- a/mindspore/ccsrc/pre_activate/mem_reuse/mem_reuse.h +++ b/mindspore/ccsrc/pre_activate/mem_reuse/mem_reuse.h @@ -76,6 +76,9 @@ class MemReuseUtil { void set_kernel_def_ptr_list(const KernelDefPtrMaps &kernel_def_ptr_list) { kernel_def_ptr_list_ = kernel_def_ptr_list; } + void set_mem_base(uint8_t *mem_base) { mem_base_ = mem_base; } + uint8_t *GetNodeOutputPtr(const AnfNodePtr &node, size_t index) const; + uint8_t *GetNodeWorkSpacePtr(const AnfNodePtr &node, size_t index) const; private: int util_index_; @@ -88,6 +91,7 @@ class MemReuseUtil { size_t total_dy_size_ = 0; size_t total_workspace_size_ = 0; size_t total_reuseworkspace_size_ = 0; + uint8_t *mem_base_{nullptr}; }; using MemReuseUtilPtr = std::shared_ptr; } // namespace memreuse From b8c16f97f73c77dbe4d7e64045a256aedc336056 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E4=B8=87=E4=B8=87=E6=B2=A1=E6=83=B3=E5=88=B0?= Date: Wed, 1 Apr 2020 22:03:01 +0800 Subject: [PATCH 059/367] fix examples of P.Fill --- mindspore/ops/operations/array_ops.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mindspore/ops/operations/array_ops.py b/mindspore/ops/operations/array_ops.py index f6d563321c..7b49cbf84c 100644 --- a/mindspore/ops/operations/array_ops.py +++ b/mindspore/ops/operations/array_ops.py @@ -713,7 +713,7 @@ class Fill(PrimitiveWithInfer): Examples: >>> fill = P.Fill() - >>> fill(P.DType()(x), (2, 2), 1) + >>> fill(mindspore.float32, (2, 2), 1) """ @prim_attr_register From 4feba6332de942a8e4f86b77a265cc58a773cec2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E4=B8=87=E4=B8=87=E6=B2=A1=E6=83=B3=E5=88=B0?= Date: Wed, 1 Apr 2020 22:16:29 +0800 Subject: [PATCH 060/367] fix the examples of P.ReLU6 --- mindspore/ops/operations/nn_ops.py | 1 - 1 file changed, 1 deletion(-) diff --git a/mindspore/ops/operations/nn_ops.py b/mindspore/ops/operations/nn_ops.py index afa4c7dfe3..ed20286bd8 100644 --- a/mindspore/ops/operations/nn_ops.py +++ b/mindspore/ops/operations/nn_ops.py @@ -190,7 +190,6 @@ class ReLU6(PrimitiveWithInfer): >>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]], np.float32)) >>> relu6 = ReLU6() >>> result = relu6(input_x) - >>> assert result.asnumpy() == Tensor(np.array([[0, 4.0, 0.0], [2.0, 0.0, 6.0]], np.float32)).asnumpy() """ @prim_attr_register From ed3c2d7229c1d3ca34e3ce95cb0a7acf2104564c Mon Sep 17 00:00:00 2001 From: zhaoting Date: Tue, 31 Mar 2020 09:14:08 +0800 Subject: [PATCH 061/367] add RMSProp optimizer --- mindspore/ccsrc/transform/convert.cc | 6 +- mindspore/ccsrc/transform/op_declare.cc | 16 ++ mindspore/ccsrc/transform/op_declare.h | 6 + mindspore/nn/optim/__init__.py | 3 +- mindspore/nn/optim/rmsprop.py | 187 ++++++++++++++++++ mindspore/ops/_grad/grad_math_ops.py | 4 +- mindspore/ops/operations/__init__.py | 5 +- mindspore/ops/operations/nn_ops.py | 152 ++++++++++++++ .../utils/block_util.py | 4 + tests/ut/python/ops/test_ops.py | 12 ++ 10 files changed, 390 insertions(+), 5 deletions(-) create mode 100644 mindspore/nn/optim/rmsprop.py diff --git a/mindspore/ccsrc/transform/convert.cc b/mindspore/ccsrc/transform/convert.cc index 48056c38da..fdacff7ba8 100755 --- a/mindspore/ccsrc/transform/convert.cc +++ b/mindspore/ccsrc/transform/convert.cc @@ -183,6 +183,8 @@ const char kNameDiagPart[] = "DiagPart"; const char kNameSpaceToBatch[] = "SpaceToBatch"; const char kNameBatchToSpace[] = "BatchToSpace"; const char kNameAtan2[] = "Atan2"; +const char kNameApplyRMSProp[] = "ApplyRMSProp"; +const char kNameApplyCenteredRMSProp[] = "ApplyCenteredRMSProp"; // -----------------OpAdapter initialization-------------- std::unordered_map &DfGraphConvertor::get_adpt_map() { @@ -367,7 +369,9 @@ std::unordered_map &DfGraphConvertor::get_adpt_ma {string(kNameDiagPart), ADPT_DESC(DiagPart)}, {string(kNameSpaceToBatch), ADPT_DESC(SpaceToBatchD)}, {string(kNameBatchToSpace), ADPT_DESC(BatchToSpaceD)}, - {string(kNameAtan2), ADPT_DESC(Atan2)}}; + {string(kNameAtan2), ADPT_DESC(Atan2)}, + {string(kNameApplyRMSProp), ADPT_DESC(ApplyRMSPropD)}, + {string(kNameApplyCenteredRMSProp), ADPT_DESC(ApplyCenteredRMSProp)}}; #ifdef ENABLE_GE adpt_map[string(kNamePrint)] = ADPT_DESC(Print); #endif diff --git a/mindspore/ccsrc/transform/op_declare.cc b/mindspore/ccsrc/transform/op_declare.cc index f7fdcfbe56..9258eb08db 100755 --- a/mindspore/ccsrc/transform/op_declare.cc +++ b/mindspore/ccsrc/transform/op_declare.cc @@ -1202,6 +1202,22 @@ INPUT_MAP(Atan2) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; ATTR_MAP(Atan2) = EMPTY_ATTR_MAP; OUTPUT_MAP(Atan2) = {{0, OUTPUT_DESC(y)}}; +// ApplyRMSPropD +INPUT_MAP(ApplyRMSPropD) = { + {1, INPUT_DESC(var)}, {2, INPUT_DESC(ms)}, {3, INPUT_DESC(mom)}, {4, INPUT_DESC(grad)}, {5, INPUT_DESC(lr)}}; +INPUT_ATTR_MAP(ApplyRMSPropD) = {{6, ATTR_DESC(rho, AnyTraits())}, + {7, ATTR_DESC(momentum, AnyTraits())}, + {8, ATTR_DESC(epsilon, AnyTraits())}}; +ATTR_MAP(ApplyRMSPropD) = {{"use_locking", ATTR_DESC(use_locking, AnyTraits())}}; +OUTPUT_MAP(ApplyRMSPropD) = {{0, OUTPUT_DESC(var)}}; + +// ApplyCenteredRMSProp +INPUT_MAP(ApplyCenteredRMSProp) = {{1, INPUT_DESC(var)}, {2, INPUT_DESC(mg)}, {3, INPUT_DESC(ms)}, + {4, INPUT_DESC(mom)}, {5, INPUT_DESC(grad)}, {6, INPUT_DESC(lr)}, + {7, INPUT_DESC(rho)}, {8, INPUT_DESC(momentum)}, {9, INPUT_DESC(epsilon)}}; +ATTR_MAP(ApplyCenteredRMSProp) = {{"use_locking", ATTR_DESC(use_locking, AnyTraits())}}; +OUTPUT_MAP(ApplyCenteredRMSProp) = {{0, OUTPUT_DESC(var)}}; + #ifdef ENABLE_GE // Print INPUT_MAP(Print) = EMPTY_INPUT_MAP; diff --git a/mindspore/ccsrc/transform/op_declare.h b/mindspore/ccsrc/transform/op_declare.h index 1924d2719b..031ce80865 100755 --- a/mindspore/ccsrc/transform/op_declare.h +++ b/mindspore/ccsrc/transform/op_declare.h @@ -445,6 +445,12 @@ DECLARE_OP_ADAPTER(BatchToSpaceD) DECLARE_OP_USE_OUTPUT(BatchToSpaceD) DECLARE_OP_ADAPTER(Atan2) DECLARE_OP_USE_OUTPUT(Atan2) +DECLARE_OP_ADAPTER(ApplyRMSPropD) +DECLARE_OP_USE_INPUT_ATTR(ApplyRMSPropD) +DECLARE_OP_USE_OUTPUT(ApplyRMSPropD) +DECLARE_OP_ADAPTER(ApplyCenteredRMSProp) +DECLARE_OP_USE_OUTPUT(ApplyCenteredRMSProp) + #ifdef ENABLE_GE DECLARE_OP_ADAPTER(Print) DECLARE_OP_USE_DYN_INPUT(Print) diff --git a/mindspore/nn/optim/__init__.py b/mindspore/nn/optim/__init__.py index 6f7f6fbd46..8f21179893 100644 --- a/mindspore/nn/optim/__init__.py +++ b/mindspore/nn/optim/__init__.py @@ -25,6 +25,7 @@ from .lamb import Lamb from .sgd import SGD from .lars import LARS from .ftrl import FTRL +from .rmsprop import RMSProp __all__ = ['Optimizer', 'Momentum', 'LARS', 'Adam', 'AdamWeightDecay', - 'AdamWeightDecayDynamicLR', 'Lamb', 'SGD', 'FTRL'] + 'AdamWeightDecayDynamicLR', 'Lamb', 'SGD', 'FTRL', 'RMSProp'] diff --git a/mindspore/nn/optim/rmsprop.py b/mindspore/nn/optim/rmsprop.py new file mode 100644 index 0000000000..3000fdeeee --- /dev/null +++ b/mindspore/nn/optim/rmsprop.py @@ -0,0 +1,187 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""rmsprop""" +from mindspore.ops import functional as F, composite as C, operations as P +from mindspore.common.initializer import initializer +from mindspore.common.parameter import Parameter +from mindspore._checkparam import ParamValidator as validator +import mindspore.common.dtype as mstype +from .optimizer import Optimizer, grad_scale + +rmsprop_opt = C.MultitypeFuncGraph("rmsprop_opt") +centered_rmsprop_opt = C.MultitypeFuncGraph("rmsprop_opt") + + +@rmsprop_opt.register("Function", "Number", "Number", "Number", "Number", "Tensor", "Tensor", "Tensor", "Tensor") +def _rmsprop_opt(opt, learning_rate, decay, epsilon, momentum, weight, ms, mom, grad): + """Apply rmsprop optimizer to the weight parameter.""" + success = True + success = F.depend(success, opt(weight, ms, mom, grad, learning_rate, decay, momentum, epsilon)) + return success + + +@rmsprop_opt.register("Function", "Tensor", "Number", "Number", "Number", "Tensor", "Tensor", "Tensor", "Tensor") +def _rmsprop_opt_dynamic_lr(opt, learning_rate, decay, epsilon, momentum, weight, ms, mom, grad): + """Apply rmsprop optimizer to the weight parameter using dynamic learning rate.""" + success = True + success = F.depend(success, opt(weight, ms, mom, grad, learning_rate, decay, momentum, epsilon)) + return success + + +@centered_rmsprop_opt.register("Function", "Number", "Number", "Number", "Number", "Tensor", "Tensor", "Tensor", + "Tensor", "Tensor") +def _centered_rmsprop_opt(opt, learning_rate, decay, epsilon, momentum, weight, mg, ms, mom, grad): + """Apply centered rmsprop optimizer to the weight parameter.""" + success = True + success = F.depend(success, opt(weight, mg, ms, mom, grad, learning_rate, decay, momentum, epsilon)) + return success + + +@centered_rmsprop_opt.register("Function", "Tensor", "Number", "Number", "Number", "Tensor", "Tensor", "Tensor", + "Tensor", "Tensor") +def _centered_rmsprop_opt_dynamic_lr(opt, learning_rate, decay, epsilon, momentum, weight, mg, ms, mom, grad): + """Apply centered rmsprop optimizer to the weight parameter using dynamic learning rate.""" + success = True + success = F.depend(success, opt(weight, mg, ms, mom, grad, learning_rate, decay, momentum, epsilon)) + return success + + +class RMSProp(Optimizer): + """ + Implements Root Mean Squared Propagation (RMSProp) algorithm. + + Note: + Update `params` according to the RMSProp algorithm. + + The equation is as follows: + + .. math:: + s_{t} = \\rho s_{t-1} + (1 - \\rho)(\\nabla Q_{i}(w))^2 + + .. math:: + m_{t} = \\beta m_{t-1} + \\frac{\\eta} {\\sqrt{s_{t} + \\epsilon}} \\nabla Q_{i}(w) + + .. math:: + w = w - m_{t} + + The first equation calculates moving average of the squared gradient for + each weight. Then dividing the gradient by :math:`\\sqrt{ms_{t} + \\epsilon}`. + + if centered is True: + + .. math:: + g_{t} = \\rho g_{t-1} + (1 - \\rho)\\nabla Q_{i}(w) + + .. math:: + s_{t} = \\rho s_{t-1} + (1 - \\rho)(\\nabla Q_{i}(w))^2 + + .. math:: + m_{t} = \\beta m_{t-1} + \\frac{\\eta} {\\sqrt{s_{t} - g_{t}^2 + \\epsilon}} \\nabla Q_{i}(w) + + .. math:: + w = w - m_{t} + + where, :math:`w` represents `params`, which will be updated. + :math:`g_{t}` is mean gradients, :math:`g_{t-1}` is the last moment of :math:`g_{t}`. + :math:`s_{t}` is the mean square gradients, :math:`s_{t-1}` is the last moment of :math:`s_{t}`, + :math:`m_{t}` is moment, the delta of `w`, :math:`m_{t-1}` is the last moment of :math:`m_{t}`. + :math:`\\rho` represents `decay`. :math:`\\beta` is the momentum term, represents `momentum`. + :math:`\\epsilon` is a smoothing term to avoid division by zero, represents `epsilon`. + :math:`\\eta` is learning rate, represents `learning_rate`. :math:`\\nabla Q_{i}(w)` is gradientse, + represents `gradients`. + + Args: + params (list[Parameter]): A list of parameter, which will be updated. The element in `parameters` + should be class mindspore.Parameter. + learning_rate (Union[float, Tensor, Iterable]): A value for the learning rate. When the learning_rate is + Iterable or a Tensor and the dims of the Tensor is 1, + use dynamic learning rate, then the i-th step will + take the i-th value as the learning rate. + When the learning_rate is float or learning_rate is a Tensor + but the dims of the Tensor is 0, use fixed learning rate. + Other cases are not supported. + decay (float): Decay rate. + momentum (float): Hyperparameter of type float, means momentum for the moving average. + epsilon (float): Term added to the denominator to improve numerical stability. Should be greater than 0. + use_locking (bool): Enable a lock to protect the update of variable and accumlation tensors. Default: False. + centered (bool): If True, gradients are normalized by the estimated variance of the gradient. Default: False + loss_scale (float): A floating point value for the loss scale. Default: 1.0. + + Inputs: + - **gradients** (tuple[Tensor]) - The gradients of `params`, the shape is the same as `params`. + + Outputs: + Tensor[bool], the value is True. + + Examples: + >>> net = Net() + >>> loss = nn.SoftmaxCrossEntropyWithLogits() + >>> opt = RMSProp(params=net.trainable_params(), learning_rate=lr) + >>> model = Model(net, loss, opt) + """ + def __init__(self, params, learning_rate=0.1, decay=0.9, momentum=0.0, epsilon=1e-10, + use_locking=False, centered=False, loss_scale=1.0): + super(RMSProp, self).__init__(learning_rate, params) + + if isinstance(momentum, float) and momentum < 0.0: + raise ValueError("momentum should be at least 0.0, but got momentum {}".format(momentum)) + + if decay < 0.0: + raise ValueError("decay should be at least 0.0, but got dampening {}".format(decay)) + self.decay = decay + self.epsilon = epsilon + + validator.check_type("use_locking", use_locking, [bool]) + validator.check_type("centered", centered, [bool]) + self.centered = centered + if centered: + self.opt = P.ApplyCenteredRMSProp(use_locking) + self.mg = self.parameters.clone(prefix="mean_grad", init='zeros') + else: + self.opt = P.ApplyRMSProp(use_locking) + + self.dynamic_lr = False + if not isinstance(learning_rate, float): + self.dynamic_lr = True + self.gather = P.GatherV2() + self.assignadd = P.AssignAdd() + self.global_step = Parameter(initializer(0, [1], mstype.int32), name="global_step") + self.axis = 0 + + self.momentum = momentum + + self.ms = self.parameters.clone(prefix="mean_square", init='zeros') + self.moment = self.parameters.clone(prefix="moment", init='zeros') + self.hyper_map = C.HyperMap() + + self.decay = decay + self.reciprocal_scale = 1.0 / loss_scale + + def construct(self, gradients): + params = self.parameters + if self.reciprocal_scale != 1.0: + gradients = self.hyper_map(F.partial(grad_scale, self.reciprocal_scale), gradients) + if self.dynamic_lr: + lr = self.gather(self.learning_rate, self.global_step, self.axis) + F.control_depend(lr, self.assignadd(self.global_step, self.one)) + else: + lr = self.learning_rate + if self.centered: + success = self.hyper_map(F.partial(centered_rmsprop_opt, self.opt, lr, self.decay, self.epsilon, + self.momentum), params, self.mg, self.ms, self.moment, gradients) + else: + success = self.hyper_map(F.partial(rmsprop_opt, self.opt, lr, self.decay, self.epsilon, + self.momentum), params, self.ms, self.moment, gradients) + return success diff --git a/mindspore/ops/_grad/grad_math_ops.py b/mindspore/ops/_grad/grad_math_ops.py index 9e90c5660c..1675855c88 100755 --- a/mindspore/ops/_grad/grad_math_ops.py +++ b/mindspore/ops/_grad/grad_math_ops.py @@ -394,8 +394,8 @@ def _split_shape_index(input_shape, axis): axis = tuple([axis]) reduction_indices = tuple([(i + rank) % rank for i in axis]) other_indices = tuple(set(range(rank)) - set(reduction_indices)) - reduced_num = reduce(lambda x, y: x * y, [input_shape[i] for i in reduction_indices]) - other_num = reduce(lambda x, y: x * y, [input_shape[i] for i in other_indices]) + reduced_num = reduce(lambda x, y: x * y, [1] + [input_shape[i] for i in reduction_indices]) + other_num = reduce(lambda x, y: x * y, [1] + [input_shape[i] for i in other_indices]) perm = reduction_indices + other_indices return tuple([reduced_num, other_num]), perm diff --git a/mindspore/ops/operations/__init__.py b/mindspore/ops/operations/__init__.py index 5c98568b8a..727ddaf88f 100644 --- a/mindspore/ops/operations/__init__.py +++ b/mindspore/ops/operations/__init__.py @@ -65,7 +65,8 @@ from .nn_ops import (LSTM, SGD, Adam, ApplyMomentum, BatchNorm, SmoothL1Loss, Softmax, SoftmaxCrossEntropyWithLogits, ROIAlign, SparseSoftmaxCrossEntropyWithLogits, Tanh, - TopK, BinaryCrossEntropy, SparseApplyAdagrad, LARSUpdate, ApplyFtrl) + TopK, BinaryCrossEntropy, SparseApplyAdagrad, LARSUpdate, ApplyFtrl, + ApplyRMSProp, ApplyCenteredRMSProp) from .other_ops import Assign, IOU, BoundingBoxDecode, BoundingBoxEncode, CheckValid, MakeRefKey @@ -228,6 +229,8 @@ __all__ = [ "SpaceToBatch", "BatchToSpace", "Atan2", + "ApplyRMSProp", + "ApplyCenteredRMSProp" ] __all__.sort() diff --git a/mindspore/ops/operations/nn_ops.py b/mindspore/ops/operations/nn_ops.py index afa4c7dfe3..b9ab7e8dc9 100644 --- a/mindspore/ops/operations/nn_ops.py +++ b/mindspore/ops/operations/nn_ops.py @@ -1359,6 +1359,158 @@ class SGD(PrimitiveWithInfer): validator.check_typename("stat_dtype", stat_dtype, [mstype.float16, mstype.float32]) return parameters_dtype +class ApplyRMSProp(PrimitiveWithInfer): + """ + Optimizer that implements the Root Mean Square prop(RMSProp) algorithm. + + Note: + Update `var` according to the RMSProp algorithm. + + .. math:: + s_{t} = \\rho s_{t-1} + (1 - \\rho)(\\nabla Q_{i}(w))^2 + + .. math:: + m_{t} = \\beta m_{t-1} + \\frac{\\eta} {\\sqrt{s_{t} + \\epsilon}} \\nabla Q_{i}(w) + + .. math:: + w = w - m_{t} + + where, :math:`w` represents `var`, which will be updated. + :math:`s_{t}` represents `mean_square`, :math:`s_{t-1}` is the last momentent of :math:`s_{t}`, + :math:`m_{t}` represents `moment`, :math:`m_{t-1}` is the last momentent of :math:`m_{t}`. + :math:`\\rho` represents `decay`. :math:`\\beta` is the momentum term, represents `momentum`. + :math:`\\epsilon` is a smoothing term to avoid division by zero, represents `epsilon`. + :math:`\\eta` represents `learning_rate`. :math:`\\nabla Q_{i}(w)` represents `grad`. + + Args: + use_locking (bool): Enable a lock to protect the update of variable tensors. Default: False. + + Inputs: + - **var** (Tensor) - Weights to be update. + - **mean_square** (Tensor) - Mean square gradients, must have the same type as `var`. + - **moment** (Tensor) - Delta of `var`, must have the same type as `var`. + - **grad** (Tensor) - Gradients, must have the same type as `var`. + - **learning_rate** (Union[Number, Tensor]) - Learning rate. + - **decay** (float) - Decay rate. + - **momentum** (float) - Momentum. + - **epsilon** (float) - Ridge term. + + Outputs: + Tensor, parameters to be update. + + Examples: + >>> net = Net() + >>> loss = nn.SoftmaxCrossEntropyWithLogits() + >>> opt = RMSProp(params=net.trainable_params(), learning_rate=learning_rate) + >>> model = Model(net, loss, opt) + """ + + @prim_attr_register + def __init__(self, use_locking=False): + self.use_locking = validator.check_type("use_locking", use_locking, [bool]) + + def infer_shape(self, var_shape, mean_square_shape, moment_shape, grad_shape, learning_rate_shape, decay_shape, + momentum_shape, epsilon_shape): + validator.check_param_equal("var_shape", var_shape, "mean_square_shape", mean_square_shape) + validator.check_param_equal("var_shape", var_shape, "moment_shape", moment_shape) + validator.check_param_equal("var_shape", var_shape, "grad_shape", grad_shape) + return var_shape + + def infer_dtype(self, var_dtype, mean_square_dtype, moment_dtype, grad_dtype, learning_rate_dtype, decay_dtype, + momentum_dtype, epsilon_dtype): + validator.check_subclass("var_dtype", var_dtype, mstype.tensor) + validator.check_subclass("mean_square_dtype", mean_square_dtype, mstype.tensor) + validator.check_subclass("moment_dtype", moment_dtype, mstype.tensor) + validator.check_subclass("grad_dtype", moment_dtype, mstype.tensor) + args = {"var_dtype": var_dtype, "mean_square_dtype": mean_square_dtype, "moment_dtype": moment_dtype, + "grad_dtype": grad_dtype} + validator.check_type_same(args, mstype.number_type) + + args = {"learning_rate_dtype": learning_rate_dtype, "decay_dtype": decay_dtype, + 'momentum_dtype': momentum_dtype, "epsilon_dtype": epsilon_dtype} + validator.check_type_same(args, [mstype.float16, mstype.float32]) + return var_dtype + + +class ApplyCenteredRMSProp(PrimitiveWithInfer): + """ + Optimizer that implements the centered RMSProp algorithm. + + Note: + Update `var` according to the centered RMSProp algorithm. + + .. math:: + g_{t} = \\rho g_{t-1} + (1 - \\rho)\\nabla Q_{i}(w) + + .. math:: + s_{t} = \\rho s_{t-1} + (1 - \\rho)(\\nabla Q_{i}(w))^2 + + .. math:: + m_{t} = \\beta m_{t-1} + \\frac{\\eta} {\\sqrt{s_{t} - g_{t}^2 + \\epsilon}} \\nabla Q_{i}(w) + + .. math:: + w = w - m_{t} + + where, :math:`w` represents `var`, which will be updated. + :math:`g_{t}` represents `mean_gradient`, :math:`g_{t-1}` is the last momentent of :math:`g_{t}`. + :math:`s_{t}` represents `mean_square`, :math:`s_{t-1}` is the last momentent of :math:`s_{t}`, + :math:`m_{t}` represents `moment`, :math:`m_{t-1}` is the last momentent of :math:`m_{t}`. + :math:`\\rho` represents `decay`. :math:`\\beta` is the momentum term, represents `momentum`. + :math:`\\epsilon` is a smoothing term to avoid division by zero, represents `epsilon`. + :math:`\\eta` represents `learning_rate`. :math:`\\nabla Q_{i}(w)` represents `grad`. + + Args: + use_locking (bool): Enable a lock to protect the update of variable tensors. Default: False. + + Inputs: + - **var** (Tensor) - Weights to be update. + - **mean_gradient** (Tensor) - Mean gradients, must have the same type as `var`. + - **mean_square** (Tensor) - Mean square gradients, must have the same type as `var`. + - **moment** (Tensor) - Delta of `var`, must have the same type as `var`. + - **grad** (Tensor) - Gradients, must have the same type as `var`. + - **learning_rate** (Union[Number, Tensor]) - Learning rate. + - **decay** (float) - Decay rate. + - **momentum** (float) - Momentum. + - **epsilon** (float) - Ridge term. + + Outputs: + Tensor, parameters to be update. + + Examples: + >>> net = Net() + >>> loss = nn.SoftmaxCrossEntropyWithLogits() + >>> opt = RMSProp(params=net.trainable_params(), learning_rate=learning_rate, centered=True) + >>> model = Model(net, loss, opt) + """ + + @prim_attr_register + def __init__(self, use_locking=False): + self.use_locking = validator.check_type("use_locking", use_locking, [bool]) + + def infer_shape(self, var_shape, mean_gradient_shape, mean_square_shape, moment_shape, grad_shape, + learning_rate_shape, decay_shape, momentum_shape, epsilon_shape): + validator.check_param_equal("var_shape", var_shape, "mean_gradient_shape", mean_gradient_shape) + validator.check_param_equal("var_shape", var_shape, "mean_square_shape", mean_square_shape) + validator.check_param_equal("var_shape", var_shape, "moment_shape", moment_shape) + validator.check_param_equal("var_shape", var_shape, "grad_shape", grad_shape) + return var_shape + + def infer_dtype(self, var_dtype, mean_gradient_dtype, mean_square_dtype, moment_dtype, grad_dtype, + learning_rate_dtype, rho_dtype, momentum_dtype, epsilon_dtype): + validator.check_subclass("var_dtype", var_dtype, mstype.tensor) + validator.check_subclass("mean_gradient_dtype", mean_gradient_dtype, mstype.tensor) + validator.check_subclass("mean_square_dtype", mean_square_dtype, mstype.tensor) + validator.check_subclass("moment_dtype", moment_dtype, mstype.tensor) + validator.check_subclass("grad_dtype", moment_dtype, mstype.tensor) + args = {"var_dtype": var_dtype, "mean_gradient_dtype": mean_gradient_dtype, + "mean_square_dtype": mean_square_dtype, "moment_dtype": moment_dtype, "grad_dtype": grad_dtype} + validator.check_type_same(args, mstype.number_type) + + args = {"learning_rate_dtype": learning_rate_dtype, "rho_dtype": rho_dtype, 'momentum_dtype': momentum_dtype, + "epsilon_dtype": epsilon_dtype} + validator.check_type_same(args, [mstype.float16, mstype.float32]) + return var_dtype + class LayerNorm(Primitive): r""" diff --git a/tests/mindspore_test_framework/utils/block_util.py b/tests/mindspore_test_framework/utils/block_util.py index 9d75ae0888..b4a926c15d 100644 --- a/tests/mindspore_test_framework/utils/block_util.py +++ b/tests/mindspore_test_framework/utils/block_util.py @@ -223,6 +223,10 @@ class InputOpNet(nn.Cell): x = self.op(x1, x2, x3, x4, x5, self.c1) return x + def construct5_c4(self, x1, x2, x3, x4, x5): + x = self.op(x1, x2, x3, x4, x5, self.c1, self.c2, self.c3, self.c4) + return x + def gen_net(op, input_num, training=True, desc_const=(), const_first=False, add_fake_input=False): if isinstance(op, nn.Cell): return op diff --git a/tests/ut/python/ops/test_ops.py b/tests/ut/python/ops/test_ops.py index 117036c37e..453ef9a652 100755 --- a/tests/ut/python/ops/test_ops.py +++ b/tests/ut/python/ops/test_ops.py @@ -805,6 +805,18 @@ test_case_nn_ops = [ 'desc_inputs': [[3, 3], [3, 3], [3, 3], [3, 3]], 'desc_bprop': [3, 3], 'skip': ['backward']}), + ('ApplyRMSProp', { + 'block': P.ApplyRMSProp(), + 'desc_const': [0.9, 0.0, 1e-10, 0.001], + 'desc_inputs': [[3, 3], [3, 3], [3, 3], [3, 3]], + 'desc_bprop': [3, 3], + 'skip': ['backward']}), + ('ApplyCenteredRMSProp', { + 'block': P.ApplyCenteredRMSProp(), + 'desc_const': [0.9, 0.0, 1e-10, 0.001], + 'desc_inputs': [[3, 3], [3, 3], [3, 3], [3, 3], [3, 3]], + 'desc_bprop': [3, 3], + 'skip': ['backward']}), ] test_case_array_ops = [ From ff4f935e400336fae2ec42b44ebd73c9397a982b Mon Sep 17 00:00:00 2001 From: lvliang Date: Wed, 1 Apr 2020 14:08:26 +0800 Subject: [PATCH 062/367] pynative-add-lenet --- tests/st/pynative/test_ascend_lenet.py | 157 +++++++++++++++++++++++++ 1 file changed, 157 insertions(+) create mode 100644 tests/st/pynative/test_ascend_lenet.py diff --git a/tests/st/pynative/test_ascend_lenet.py b/tests/st/pynative/test_ascend_lenet.py new file mode 100644 index 0000000000..4681454489 --- /dev/null +++ b/tests/st/pynative/test_ascend_lenet.py @@ -0,0 +1,157 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +import pytest +import numpy as np +import time, math +import mindspore.nn as nn +from mindspore import context, Tensor, ParameterTuple +from mindspore.ops import operations as P +from mindspore.common.initializer import TruncatedNormal +from mindspore.ops import functional as F +from mindspore.ops import composite as C +from mindspore.common import dtype as mstype + +from mindspore.nn.wrap.cell_wrapper import WithLossCell +from mindspore.nn.optim import Momentum + +np.random.seed(1) + +def weight_variable(): + """weight initial""" + return TruncatedNormal(0.02) + + +def conv(in_channels, out_channels, kernel_size, stride=1, padding=0): + """weight initial for conv layer""" + weight = weight_variable() + return nn.Conv2d(in_channels, out_channels, + kernel_size=kernel_size, stride=stride, padding=padding, + weight_init=weight, has_bias=False, pad_mode="valid") + + +def fc_with_initialize(input_channels, out_channels): + """weight initial for fc layer""" + weight = weight_variable() + bias = weight_variable() + return nn.Dense(input_channels, out_channels, weight, bias) + + +class LeNet(nn.Cell): + """ + Lenet network + Args: + num_class (int): Num classes, Default: 10. + Returns: + Tensor, output tensor + Examples: + >>> LeNet(num_class=10) + """ + def __init__(self, num_class=10): + super(LeNet, self).__init__() + self.num_class = num_class + self.batch_size = 32 + self.conv1 = conv(1, 6, 5) + self.conv2 = conv(6, 16, 5) + self.fc1 = fc_with_initialize(16 * 5 * 5, 120) + self.fc2 = fc_with_initialize(120, 84) + self.fc3 = fc_with_initialize(84, self.num_class) + self.relu = nn.ReLU() + self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2) + self.reshape = P.Reshape() + + def construct(self, x): + x = self.conv1(x) + x = self.relu(x) + x = self.max_pool2d(x) + x = self.conv2(x) + x = self.relu(x) + x = self.max_pool2d(x) + x = self.reshape(x, (self.batch_size, -1)) + x = self.fc1(x) + x = self.relu(x) + x = self.fc2(x) + x = self.relu(x) + x = self.fc3(x) + return x + + +class CrossEntropyLoss(nn.Cell): + """ + Define loss for network + """ + def __init__(self): + super(CrossEntropyLoss, self).__init__() + self.cross_entropy = P.SoftmaxCrossEntropyWithLogits() + self.mean = P.ReduceMean() + self.one_hot = P.OneHot() + self.on_value = Tensor(1.0, mstype.float32) + self.off_value = Tensor(0.0, mstype.float32) + self.num = Tensor(32.0, mstype.float32) + + def construct(self, logits, label): + label = self.one_hot(label, F.shape(logits)[1], self.on_value, self.off_value) + loss = self.cross_entropy(logits, label)[0] + loss = P.RealDiv()(P.ReduceSum()(loss, -1), self.num) + return loss + + +class GradWrap(nn.Cell): + """ + GradWrap definition + """ + def __init__(self, network): + super(GradWrap, self).__init__() + self.network = network + self.weights = ParameterTuple(filter(lambda x: x.requires_grad, network.get_parameters())) + + def construct(self, x, label): + weights = self.weights + return C.grad_by_list(self.network, weights)(x, label) + + +@pytest.mark.level0 +@pytest.mark.platform_x86_ascend_training +@pytest.mark.env_single +def test_ascend_pynative_lenet(): + context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend") + + epoch_size = 20 + batch_size = 32 + inputs = Tensor(np.ones([batch_size, 1, 32, 32]).astype(np.float32)) + labels = Tensor(np.ones([batch_size]).astype(np.int32)) + + net = LeNet() + criterion = CrossEntropyLoss() + optimizer = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.1, 0.9) + + net_with_criterion = WithLossCell(net, criterion) + train_network = GradWrap(net_with_criterion) + train_network.set_train() + total_time = 0 + + for epoch in range(0, epoch_size): + start_time = time.time() + fw_output = net(inputs) + loss_output = criterion(fw_output, labels) + grads = train_network(inputs, labels) + success = optimizer(grads) + end_time = time.time() + cost_time = end_time - start_time + total_time = total_time + cost_time + + print("======epoch: ", epoch, " loss: ", loss_output.asnumpy(), " cost time: ", cost_time) + assert(total_time < 20.0) + assert(loss_output.asnumpy() < 0.01) + \ No newline at end of file From 7f53bb062d090dea2c6bef4b92e45aae3b1f5549 Mon Sep 17 00:00:00 2001 From: YuJianfeng Date: Wed, 1 Apr 2020 19:42:13 +0800 Subject: [PATCH 063/367] Optimize depend edge with make tuple input --- .../pre_activate/pass/optimize_dependence.cc | 93 +++++++++++++++---- .../pass/optimize_dependence_test.cc | 20 ++++ .../pre_activate/optimize_dependence_test.py | 22 +++++ 3 files changed, 116 insertions(+), 19 deletions(-) diff --git a/mindspore/ccsrc/pre_activate/pass/optimize_dependence.cc b/mindspore/ccsrc/pre_activate/pass/optimize_dependence.cc index 6ae3f3be36..db32354abf 100644 --- a/mindspore/ccsrc/pre_activate/pass/optimize_dependence.cc +++ b/mindspore/ccsrc/pre_activate/pass/optimize_dependence.cc @@ -27,6 +27,69 @@ namespace mindspore { namespace opt { constexpr auto kSingleInputIndex = 1; +namespace { +AnfNodePtr GetReplaceNode(const FuncGraphPtr &func_graph, const AnfNodePtr &node) { + MS_EXCEPTION_IF_NULL(func_graph); + MS_EXCEPTION_IF_NULL(node); + if (!node->isa()) { + return nullptr; + } + auto cnode = node->cast(); + MS_EXCEPTION_IF_NULL(cnode); + string op_name = AnfAlgo::GetCNodeName(cnode); + // Currently we only eliminate transdata or cast nodes. + if (op_name != kTransDataOpName && op_name != prim::kPrimCast->name()) { + return nullptr; + } + auto manager = func_graph->manager(); + MS_EXCEPTION_IF_NULL(manager); + // Check whether the node has only one output node. + if (manager->node_users().find(cnode) == manager->node_users().end()) { + MS_LOG(EXCEPTION) << "The node should be used by at least another node's input"; + } + if (manager->node_users()[cnode].size() > 1) { + return nullptr; + } + CheckCNodeInputSize(cnode, kSingleInputIndex + 1); + return cnode->input(kSingleInputIndex); +} + +bool ReplaceMakeTuple(const FuncGraphPtr &func_graph, const CNodePtr &cnode) { + MS_EXCEPTION_IF_NULL(func_graph); + MS_EXCEPTION_IF_NULL(cnode); + if (AnfAlgo::GetCNodeName(cnode) != prim::kPrimMakeTuple->name()) { + return false; + } + std::vector new_make_tuple_inputs; + bool need_update = false; + for (const auto &input : cnode->inputs()) { + AnfNodePtr replace_input = GetReplaceNode(func_graph, input); + // If replace input is not null, it will be the input of the TransData or Cast. + if (replace_input == nullptr) { + new_make_tuple_inputs.push_back(input); + continue; + } + new_make_tuple_inputs.push_back(replace_input); + need_update = true; + } + if (need_update) { + auto kernel_graph = func_graph->cast>(); + CNodePtr new_make_tuple = nullptr; + if (kernel_graph == nullptr) { + new_make_tuple = func_graph->NewCNode(new_make_tuple_inputs); + } else { + new_make_tuple = kernel_graph->NewCNode(cnode); + } + MS_EXCEPTION_IF_NULL(new_make_tuple); + new_make_tuple->set_inputs(new_make_tuple_inputs); + auto manager = func_graph->manager(); + MS_EXCEPTION_IF_NULL(manager); + manager->Replace(cnode, new_make_tuple); + } + return true; +} +} // namespace + const BaseRef OptimizeDependence::DefinePattern() const { VarPtr X = std::make_shared("X"); MS_EXCEPTION_IF_NULL(X); @@ -43,9 +106,8 @@ const AnfNodePtr OptimizeDependence::Process(const FuncGraphPtr &func_graph, con return nullptr; } auto depend_cnode = node->cast(); - if (depend_cnode->inputs().size() < kDependInputNum) { - return nullptr; - } + MS_EXCEPTION_IF_NULL(depend_cnode); + CheckCNodeInputSize(depend_cnode, kDependInputNum); auto replacing_node = depend_cnode->input(kDependInputNum - 1); MS_EXCEPTION_IF_NULL(replacing_node); if (!replacing_node->isa()) { @@ -53,36 +115,29 @@ const AnfNodePtr OptimizeDependence::Process(const FuncGraphPtr &func_graph, con } auto replacing_cnode = replacing_node->cast(); MS_EXCEPTION_IF_NULL(replacing_cnode); - // Currently we only optimize transdata or cast nodes. - string replacing_cnode_op_name = AnfAlgo::GetCNodeName(replacing_cnode); - if (replacing_cnode_op_name != kTransDataOpName && replacing_cnode_op_name != prim::kPrimCast->name()) { + // Deal with the make_tuple with TransData or Cast inputs. + if (ReplaceMakeTuple(func_graph, replacing_cnode)) { return nullptr; } - auto manager = func_graph->manager(); - MS_EXCEPTION_IF_NULL(manager); - // Check whether the replacing node has only one input and one output. - if (replacing_cnode->inputs().size() != kSingleInputIndex + 1) { - return nullptr; - } - if (manager->node_users().find(replacing_node) == manager->node_users().end()) { - MS_LOG(EXCEPTION) << "The node should be used by at least another node input"; - } - if (manager->node_users()[replacing_node].size() > 1) { + AnfNodePtr replace_node = GetReplaceNode(func_graph, replacing_cnode); + if (replace_node == nullptr) { + MS_LOG(DEBUG) << "Can not find the TransData or Cast with single output node. Depend node: " << node->DebugString(); return nullptr; } std::vector new_depend_inputs = {depend_cnode->input(kAnfPrimitiveIndex), - depend_cnode->input(kRealInputIndexInDepend), - replacing_cnode->input(kSingleInputIndex)}; + depend_cnode->input(kRealInputIndexInDepend), replace_node}; auto kernel_graph = func_graph->cast>(); CNodePtr new_depend; if (kernel_graph == nullptr) { new_depend = func_graph->NewCNode(new_depend_inputs); + MS_EXCEPTION_IF_NULL(new_depend); + new_depend->set_abstract(node->abstract()); + new_depend->set_scope(node->scope()); } else { new_depend = kernel_graph->NewCNode(depend_cnode); MS_EXCEPTION_IF_NULL(new_depend); new_depend->set_inputs(new_depend_inputs); } - new_depend->set_abstract(node->abstract()); return new_depend; } } // namespace opt diff --git a/tests/ut/cpp/pre_activate/pass/optimize_dependence_test.cc b/tests/ut/cpp/pre_activate/pass/optimize_dependence_test.cc index 3f59b6159a..e95d63e93e 100644 --- a/tests/ut/cpp/pre_activate/pass/optimize_dependence_test.cc +++ b/tests/ut/cpp/pre_activate/pass/optimize_dependence_test.cc @@ -48,5 +48,25 @@ TEST_F(TestHWOptimizeDependence, test_optimize_dependence) { FuncGraphPtr g_after = get_py_fun_.CallAndParseRet("test_optimize_dependence", "after"); EXPECT_TRUE(CheckEqualGraph(g_after, new_graph)); } + +TEST_F(TestHWOptimizeDependence, test_optimize_dependence_with_make_tuple) { + /* + * def before(x, y, a, b): + * z = make_tuple(TransData(a), TransData(b)) + * depend_intput = depend(y, z) + * sum = add(x, depend_intput) + * return sum + */ + FuncGraphPtr g = get_py_fun_.CallAndParseRet("test_optimize_dependence_with_make_tuple", "before"); + + auto optimizer = std::make_shared(); + auto pm = std::make_shared(); + pm->AddPass(std::make_shared()); + optimizer->AddPassManager(pm); + FuncGraphPtr new_graph = optimizer->Optimize(g); + + FuncGraphPtr g_after = get_py_fun_.CallAndParseRet("test_optimize_dependence_with_make_tuple", "after"); + EXPECT_TRUE(CheckEqualGraph(g_after, new_graph)); +} } // namespace opt } // namespace mindspore diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/optimize_dependence_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/optimize_dependence_test.py index 45c419d25d..05eb057327 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/optimize_dependence_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/optimize_dependence_test.py @@ -18,6 +18,8 @@ from mindspore.ops import Primitive depend = Primitive('depend') TransData = Primitive('TransData') add = P.TensorAdd() +make_tuple = Primitive('make_tuple') + class FnDict: def __init__(self): @@ -47,3 +49,23 @@ def test_optimize_dependence(tag): return sum return fns[tag] + + +def test_optimize_dependence_with_make_tuple(tag): + fns = FnDict() + + @fns + def before(x, y, a, b): + z = make_tuple(TransData(a), TransData(b)) + depend_intput = depend(y, z) + sum = add(x, depend_intput) + return sum + + @fns + def after(x, y, a, b): + z = make_tuple(a, b) + depend_intput = depend(y, z) + sum = add(x, depend_intput) + return sum + + return fns[tag] From 15d061f85281a60ef0e2e558702ebd6cf454c7ba Mon Sep 17 00:00:00 2001 From: guohongzilong <2713219276@qq.com> Date: Thu, 2 Apr 2020 12:05:28 +0800 Subject: [PATCH 064/367] pass str to ge.exec.EnableDump --- mindspore/ccsrc/utils/context/ms_context.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mindspore/ccsrc/utils/context/ms_context.cc b/mindspore/ccsrc/utils/context/ms_context.cc index bf05af9858..5de28eac98 100644 --- a/mindspore/ccsrc/utils/context/ms_context.cc +++ b/mindspore/ccsrc/utils/context/ms_context.cc @@ -274,7 +274,7 @@ void MsContext::SetHcclOptions(std::map* ge_options) c void MsContext::GetGeOptions(std::map* ge_options) const { #ifdef ENABLE_GE (*ge_options)["device_id"] = "0"; - (*ge_options)["ge.exec.enableDump"] = enable_dump_; + (*ge_options)["ge.exec.enableDump"] = std::to_string(enable_dump_); (*ge_options)["ge.exec.dumpPath"] = save_dump_path_; // only not supported in ge auto tbe_plugin_path = common::GetEnv("ME_TBE_PLUGIN_PATH"); From c42e53ae5b1b4d5355469859de84d6dc57df3b51 Mon Sep 17 00:00:00 2001 From: geekun Date: Mon, 30 Mar 2020 19:20:26 +0800 Subject: [PATCH 065/367] fix custom op bug and add custom op check --- mindspore/ccsrc/transform/op_adapter.h | 25 ++++++++++++++++++++ mindspore/ccsrc/transform/op_adapter_util.cc | 8 ++++++- 2 files changed, 32 insertions(+), 1 deletion(-) mode change 100755 => 100644 mindspore/ccsrc/transform/op_adapter_util.cc diff --git a/mindspore/ccsrc/transform/op_adapter.h b/mindspore/ccsrc/transform/op_adapter.h index aa466adbb8..929117101b 100644 --- a/mindspore/ccsrc/transform/op_adapter.h +++ b/mindspore/ccsrc/transform/op_adapter.h @@ -279,6 +279,31 @@ class OpAdapter : public BaseOpAdapter { } OutHandler getOutput(const OperatorPtr& op, int index) override { + MS_EXCEPTION_IF_NULL(op); + if (IsCustomOp(op)) { + return getCustomOutput(op, index); + } + return getNormalOutput(op, index); + } + + OutHandler getCustomOutput(const OperatorPtr& op, int index) { + MS_EXCEPTION_IF_NULL(op); + auto it = cus_output_map_.find(op->GetOpType()); + if (it == cus_output_map_.end()) { + MS_LOG(ERROR) << "OpAdpator(" << op->GetName() << ") has both OUTPUT is not supported!"; + return OutHandler(); + } + + std::unordered_map& output_map = it->second; + + if ((output_map.find(index) != output_map.end())) { + return OutHandler(op, output_map[index]); + } + MS_LOG(ERROR) << "OpAdpator(" << op->GetName() << ") has no OUTPUT index(" << index << ")!"; + return OutHandler(); + } + + OutHandler getNormalOutput(const OperatorPtr& op, int index) { MS_EXCEPTION_IF_NULL(op); if (!dyn_output_map_.empty() && !output_map_.empty()) { MS_LOG(ERROR) << "OpAdpator(" << op->GetName() << ") has both OUTPUT and DYN_OUTPUT is not supported!"; diff --git a/mindspore/ccsrc/transform/op_adapter_util.cc b/mindspore/ccsrc/transform/op_adapter_util.cc old mode 100755 new mode 100644 index 49b8714837..d52699fa8f --- a/mindspore/ccsrc/transform/op_adapter_util.cc +++ b/mindspore/ccsrc/transform/op_adapter_util.cc @@ -223,7 +223,13 @@ bool IsCustomPrim(const PrimitivePtr& prim) { return false; } - return GetValue(flag); + bool is_custom_op = GetValue(flag); + if (!is_custom_op && prim->GetAttr("_custom_op_impl_config_path") != nullptr) { + MS_LOG(EXCEPTION) << "The custom op flag is false, but the op information config path is not null, non-custom op " + "can not assign the op information config path."; + } + + return is_custom_op; } bool IsCustomCNode(const AnfNodePtr& anf) { From 60b68a1470f43304d0b734a5e7b433a90834bee7 Mon Sep 17 00:00:00 2001 From: Su Teng Date: Thu, 2 Apr 2020 15:14:47 +0800 Subject: [PATCH 066/367] sort include file in parallel dir --- .../allreduce_fusion/allreduce_fusion.cc | 12 ++++----- .../allreduce_fusion/allreduce_fusion.h | 2 +- .../allreduce_fusion/allreduce_graph.cc | 2 +- .../allreduce_fusion/allreduce_graph.h | 6 ++--- .../allreduce_fusion/allreduce_node.h | 4 +-- .../allreduce_fusion/step_allreduce_fusion.cc | 6 ++--- .../ccsrc/parallel/auto_parallel/costmodel.cc | 4 +-- .../ccsrc/parallel/auto_parallel/costmodel.h | 4 +-- .../auto_parallel/dp_algo_costmodel.cc | 2 +- .../auto_parallel/dp_algo_costmodel.h | 2 +- .../parallel/auto_parallel/edge_costmodel.cc | 4 +-- .../parallel/auto_parallel/edge_costmodel.h | 12 ++++----- .../parallel/auto_parallel/graph_costmodel.cc | 4 +-- .../parallel/auto_parallel/graph_costmodel.h | 10 +++---- .../auto_parallel/operator_costmodel.h | 4 +-- .../auto_parallel/rec_core/rec_cost.cc | 8 +++--- .../auto_parallel/rec_core/rec_cost.h | 6 ++--- .../rec_core/rec_generate_strategy.cc | 6 ++--- .../rec_core/rec_generate_strategy.h | 4 +-- .../auto_parallel/rec_core/rec_graph.h | 4 +-- .../auto_parallel/rec_core/rec_parse_graph.cc | 8 +++--- .../auto_parallel/rec_core/rec_parse_graph.h | 6 ++--- .../auto_parallel/rec_core/rec_partition.cc | 8 +++--- .../auto_parallel/rec_core/rec_partition.h | 10 +++---- mindspore/ccsrc/parallel/context.cc | 8 +++--- mindspore/ccsrc/parallel/context.h | 4 +-- mindspore/ccsrc/parallel/costmodel_context.cc | 2 +- mindspore/ccsrc/parallel/costmodel_context.h | 2 +- mindspore/ccsrc/parallel/device_manager.h | 8 +++--- mindspore/ccsrc/parallel/device_matrix.cc | 8 +++--- .../parallel/graph_util/generate_graph.h | 6 ++--- .../parallel/graph_util/get_parallel_info.cc | 8 +++--- .../ccsrc/parallel/graph_util/graph_info.cc | 8 +++--- mindspore/ccsrc/parallel/group_manager.cc | 2 +- mindspore/ccsrc/parallel/group_manager.h | 2 +- mindspore/ccsrc/parallel/node_check.cc | 2 +- .../parallel/ops_info/activation_info.cc | 4 +-- .../ccsrc/parallel/ops_info/activation_info.h | 4 +-- .../parallel/ops_info/arithmetic_info.cc | 4 +-- .../ccsrc/parallel/ops_info/arithmetic_info.h | 4 +-- .../parallel/ops_info/batch_parallel_info.h | 2 +- .../ccsrc/parallel/ops_info/bias_add_info.cc | 4 +-- .../ccsrc/parallel/ops_info/bias_add_info.h | 4 +-- .../ops_info/comparison_function_info.h | 2 +- .../parallel/ops_info/dropout_do_mask_info.cc | 6 ++--- .../parallel/ops_info/dropout_do_mask_info.h | 4 +-- .../ops_info/elementary_function_info.h | 2 +- .../ccsrc/parallel/ops_info/generator_info.cc | 4 +-- .../ccsrc/parallel/ops_info/generator_info.h | 4 +-- .../ccsrc/parallel/ops_info/get_next_info.cc | 4 +-- .../ccsrc/parallel/ops_info/get_next_info.h | 4 +-- .../parallel/ops_info/l2_normalize_info.cc | 4 +-- .../parallel/ops_info/l2_normalize_info.h | 4 +-- .../ccsrc/parallel/ops_info/loss_info.cc | 4 +-- mindspore/ccsrc/parallel/ops_info/loss_info.h | 4 +-- .../ccsrc/parallel/ops_info/matmul_info.cc | 8 +++--- .../ccsrc/parallel/ops_info/matmul_info.h | 6 ++--- .../ccsrc/parallel/ops_info/onehot_info.cc | 8 +++--- .../ccsrc/parallel/ops_info/onehot_info.h | 4 +-- .../ccsrc/parallel/ops_info/operator_info.cc | 14 +++++----- .../ccsrc/parallel/ops_info/operator_info.h | 10 +++---- .../parallel/ops_info/ops_info_head_files.h | 22 ++++++++-------- .../ccsrc/parallel/ops_info/prelu_info.h | 2 +- .../parallel/ops_info/reduce_method_info.cc | 6 ++--- .../parallel/ops_info/reduce_method_info.h | 6 ++--- .../ccsrc/parallel/ops_info/reshape_info.h | 2 +- .../parallel/ops_info/tmp_identity_info.cc | 2 -- .../parallel/ops_info/tmp_identity_info.h | 4 +-- .../ccsrc/parallel/ops_info/transpose_info.h | 2 +- .../parallel/ops_info/virtual_dataset_info.h | 2 +- .../ccsrc/parallel/step_auto_parallel.cc | 26 +++++++++---------- mindspore/ccsrc/parallel/step_auto_parallel.h | 6 ++--- mindspore/ccsrc/parallel/step_parallel.cc | 18 ++++++------- mindspore/ccsrc/parallel/step_parallel.h | 4 +-- mindspore/ccsrc/parallel/strategy.h | 4 +-- .../parallel_strategy_checkpoint.cc | 4 +-- .../parallel_strategy_checkpoint.h | 2 +- .../parallel/tensor_layout/arrangement.cc | 8 +++--- .../parallel/tensor_layout/arrangement.h | 2 +- .../ccsrc/parallel/tensor_layout/array.cc | 2 +- .../tensor_layout/construct_operator.h | 2 +- .../parallel/tensor_layout/layout_transfer.cc | 2 +- mindspore/ccsrc/parallel/tensor_layout/map.cc | 8 +++--- mindspore/ccsrc/parallel/tensor_layout/map.h | 2 +- .../redistribution_layout_transfer.cc | 2 +- .../redistribution_operator_infer.h | 6 ++--- .../ccsrc/parallel/tensor_layout/shape_util.h | 4 +-- .../parallel/tensor_layout/tensor_info.h | 4 +-- .../parallel/tensor_layout/tensor_layout.cc | 12 ++++----- .../parallel/tensor_layout/tensor_layout.h | 2 +- .../tensor_layout/tensor_redistribution.cc | 2 +- .../tensor_layout/tensor_redistribution.h | 8 +++--- 92 files changed, 248 insertions(+), 250 deletions(-) diff --git a/mindspore/ccsrc/parallel/allreduce_fusion/allreduce_fusion.cc b/mindspore/ccsrc/parallel/allreduce_fusion/allreduce_fusion.cc index 37b6eb42ed..e4f54056a8 100644 --- a/mindspore/ccsrc/parallel/allreduce_fusion/allreduce_fusion.cc +++ b/mindspore/ccsrc/parallel/allreduce_fusion/allreduce_fusion.cc @@ -15,16 +15,16 @@ */ #include "parallel/allreduce_fusion/allreduce_fusion.h" +#include #include -#include #include -#include -#include "utils/log_adapter.h" -#include "parallel/status.h" +#include #include "ir/func_graph.h" -#include "parallel/step_parallel.h" -#include "parallel/graph_util/node_info.h" #include "parallel/costmodel_context.h" +#include "parallel/graph_util/node_info.h" +#include "parallel/status.h" +#include "parallel/step_parallel.h" +#include "utils/log_adapter.h" namespace mindspore { namespace parallel { diff --git a/mindspore/ccsrc/parallel/allreduce_fusion/allreduce_fusion.h b/mindspore/ccsrc/parallel/allreduce_fusion/allreduce_fusion.h index de2844fa51..a2fea45187 100644 --- a/mindspore/ccsrc/parallel/allreduce_fusion/allreduce_fusion.h +++ b/mindspore/ccsrc/parallel/allreduce_fusion/allreduce_fusion.h @@ -20,8 +20,8 @@ #include #include #include "ir/anf.h" -#include "parallel/status.h" #include "parallel/allreduce_fusion/allreduce_graph.h" +#include "parallel/status.h" namespace mindspore { namespace parallel { diff --git a/mindspore/ccsrc/parallel/allreduce_fusion/allreduce_graph.cc b/mindspore/ccsrc/parallel/allreduce_fusion/allreduce_graph.cc index 5c97eda8d8..559ce02349 100644 --- a/mindspore/ccsrc/parallel/allreduce_fusion/allreduce_graph.cc +++ b/mindspore/ccsrc/parallel/allreduce_fusion/allreduce_graph.cc @@ -17,9 +17,9 @@ #include "parallel/allreduce_fusion/allreduce_graph.h" #include #include -#include "utils/log_adapter.h" #include "ir/anf.h" #include "parallel/allreduce_fusion/allreduce_node.h" +#include "utils/log_adapter.h" namespace mindspore { namespace parallel { diff --git a/mindspore/ccsrc/parallel/allreduce_fusion/allreduce_graph.h b/mindspore/ccsrc/parallel/allreduce_fusion/allreduce_graph.h index da4cbf8800..f0db78a130 100644 --- a/mindspore/ccsrc/parallel/allreduce_fusion/allreduce_graph.h +++ b/mindspore/ccsrc/parallel/allreduce_fusion/allreduce_graph.h @@ -18,14 +18,14 @@ #define MINDSPORE_CCSRC_PARALLEL_ALLREDUCE_FUSION_ALLREDUCE_GRAPH_H_ #include -#include +#include #include #include -#include #include +#include #include "ir/anf.h" -#include "parallel/status.h" #include "parallel/allreduce_fusion/allreduce_node.h" +#include "parallel/status.h" namespace mindspore { namespace parallel { diff --git a/mindspore/ccsrc/parallel/allreduce_fusion/allreduce_node.h b/mindspore/ccsrc/parallel/allreduce_fusion/allreduce_node.h index f3eeb53ec7..dc6fbed4ba 100644 --- a/mindspore/ccsrc/parallel/allreduce_fusion/allreduce_node.h +++ b/mindspore/ccsrc/parallel/allreduce_fusion/allreduce_node.h @@ -17,9 +17,9 @@ #ifndef MINDSPORE_CCSRC_PARALLEL_ALLREDUCE_FUSION_ALLREDUCE_NODE_H_ #define MINDSPORE_CCSRC_PARALLEL_ALLREDUCE_FUSION_ALLREDUCE_NODE_H_ -#include -#include #include +#include +#include #include #include "ir/anf.h" #include "parallel/status.h" diff --git a/mindspore/ccsrc/parallel/allreduce_fusion/step_allreduce_fusion.cc b/mindspore/ccsrc/parallel/allreduce_fusion/step_allreduce_fusion.cc index 4db7007448..9dbd3a0246 100644 --- a/mindspore/ccsrc/parallel/allreduce_fusion/step_allreduce_fusion.cc +++ b/mindspore/ccsrc/parallel/allreduce_fusion/step_allreduce_fusion.cc @@ -17,12 +17,12 @@ #include "parallel/allreduce_fusion/step_allreduce_fusion.h" #include #include -#include "utils/log_adapter.h" -#include "parallel/status.h" -#include "parallel/context.h" #include "optimizer/optimizer.h" #include "parallel/allreduce_fusion/allreduce_fusion.h" +#include "parallel/context.h" #include "parallel/graph_util/graph_info.h" +#include "parallel/status.h" +#include "utils/log_adapter.h" namespace mindspore { namespace parallel { diff --git a/mindspore/ccsrc/parallel/auto_parallel/costmodel.cc b/mindspore/ccsrc/parallel/auto_parallel/costmodel.cc index 618e505eba..f5cf5069be 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/costmodel.cc +++ b/mindspore/ccsrc/parallel/auto_parallel/costmodel.cc @@ -15,9 +15,9 @@ */ #include "parallel/auto_parallel/costmodel.h" -#include -#include #include +#include +#include #include "parallel/auto_parallel/graph_costmodel.h" namespace mindspore { diff --git a/mindspore/ccsrc/parallel/auto_parallel/costmodel.h b/mindspore/ccsrc/parallel/auto_parallel/costmodel.h index 97155fb9c3..361c19573f 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/costmodel.h +++ b/mindspore/ccsrc/parallel/auto_parallel/costmodel.h @@ -17,11 +17,11 @@ #ifndef MINDSPORE_CCSRC_PARALLEL_AUTO_PARALLEL_COSTMODEL_H_ #define MINDSPORE_CCSRC_PARALLEL_AUTO_PARALLEL_COSTMODEL_H_ -#include #include -#include +#include #include #include +#include #include "parallel/strategy.h" #include "parallel/tensor_layout/tensor_info.h" diff --git a/mindspore/ccsrc/parallel/auto_parallel/dp_algo_costmodel.cc b/mindspore/ccsrc/parallel/auto_parallel/dp_algo_costmodel.cc index 8e042eadab..060caa4cca 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/dp_algo_costmodel.cc +++ b/mindspore/ccsrc/parallel/auto_parallel/dp_algo_costmodel.cc @@ -17,8 +17,8 @@ #include "parallel/auto_parallel/dp_algo_costmodel.h" #include -#include #include +#include namespace mindspore { namespace parallel { diff --git a/mindspore/ccsrc/parallel/auto_parallel/dp_algo_costmodel.h b/mindspore/ccsrc/parallel/auto_parallel/dp_algo_costmodel.h index de8ebc6fca..c9b6a07317 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/dp_algo_costmodel.h +++ b/mindspore/ccsrc/parallel/auto_parallel/dp_algo_costmodel.h @@ -18,8 +18,8 @@ #define MINDSPORE_CCSRC_PARALLEL_AUTO_PARALLEL_DP_ALGO_COSTMODEL_H_ #include -#include #include +#include #include "ir/value.h" #include "parallel/auto_parallel/edge_costmodel.h" #include "parallel/auto_parallel/graph_costmodel.h" diff --git a/mindspore/ccsrc/parallel/auto_parallel/edge_costmodel.cc b/mindspore/ccsrc/parallel/auto_parallel/edge_costmodel.cc index 30fa90457e..6381049f17 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/edge_costmodel.cc +++ b/mindspore/ccsrc/parallel/auto_parallel/edge_costmodel.cc @@ -18,11 +18,11 @@ #include #include -#include #include +#include #include "parallel/auto_parallel/costmodel.h" -#include "parallel/tensor_layout/tensor_redistribution.h" #include "parallel/auto_parallel/graph_costmodel.h" +#include "parallel/tensor_layout/tensor_redistribution.h" namespace mindspore { namespace parallel { diff --git a/mindspore/ccsrc/parallel/auto_parallel/edge_costmodel.h b/mindspore/ccsrc/parallel/auto_parallel/edge_costmodel.h index d2bad60215..1fa49029fa 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/edge_costmodel.h +++ b/mindspore/ccsrc/parallel/auto_parallel/edge_costmodel.h @@ -17,16 +17,16 @@ #ifndef PARALLEL_AUTO_PARALLEL_EDGE_COSTMODEL_H_ #define PARALLEL_AUTO_PARALLEL_EDGE_COSTMODEL_H_ -#include -#include -#include #include +#include #include +#include +#include #include "common/utils.h" -#include "parallel/tensor_layout/tensor_layout.h" -#include "parallel/tensor_layout/tensor_info.h" -#include "parallel/ops_info/operator_info.h" #include "parallel/auto_parallel/costmodel.h" +#include "parallel/ops_info/operator_info.h" +#include "parallel/tensor_layout/tensor_info.h" +#include "parallel/tensor_layout/tensor_layout.h" namespace mindspore { namespace parallel { diff --git a/mindspore/ccsrc/parallel/auto_parallel/graph_costmodel.cc b/mindspore/ccsrc/parallel/auto_parallel/graph_costmodel.cc index f99b271894..59b9d9e992 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/graph_costmodel.cc +++ b/mindspore/ccsrc/parallel/auto_parallel/graph_costmodel.cc @@ -17,11 +17,11 @@ #include "parallel/auto_parallel/graph_costmodel.h" #include +#include +#include #include #include #include -#include -#include #include namespace mindspore { diff --git a/mindspore/ccsrc/parallel/auto_parallel/graph_costmodel.h b/mindspore/ccsrc/parallel/auto_parallel/graph_costmodel.h index 3b04703a47..e4cbdffb61 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/graph_costmodel.h +++ b/mindspore/ccsrc/parallel/auto_parallel/graph_costmodel.h @@ -17,17 +17,17 @@ #ifndef MINDSPORE_CCSRC_PARALLEL_AUTO_PARALLEL_GRAPH_COSTMODEL_H_ #define MINDSPORE_CCSRC_PARALLEL_AUTO_PARALLEL_GRAPH_COSTMODEL_H_ -#include -#include #include -#include +#include #include +#include +#include #include "../../common.h" #include "common/utils.h" -#include "parallel/ops_info/operator_info.h" #include "parallel/auto_parallel/edge_costmodel.h" -#include "parallel/ops_info/tmp_identity_info.h" #include "parallel/costmodel_context.h" +#include "parallel/ops_info/operator_info.h" +#include "parallel/ops_info/tmp_identity_info.h" namespace mindspore { namespace parallel { diff --git a/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.h b/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.h index ad856fc910..9fb86d467e 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.h +++ b/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.h @@ -17,10 +17,10 @@ #ifndef PARALLEL_AUTO_PARALLEL_OPERATOR_COSTMODEL_H_ #define PARALLEL_AUTO_PARALLEL_OPERATOR_COSTMODEL_H_ -#include #include -#include "parallel/tensor_layout/tensor_info.h" +#include #include "parallel/device_manager.h" +#include "parallel/tensor_layout/tensor_info.h" namespace mindspore { namespace parallel { diff --git a/mindspore/ccsrc/parallel/auto_parallel/rec_core/rec_cost.cc b/mindspore/ccsrc/parallel/auto_parallel/rec_core/rec_cost.cc index 45407a89e6..4591753efe 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/rec_core/rec_cost.cc +++ b/mindspore/ccsrc/parallel/auto_parallel/rec_core/rec_cost.cc @@ -16,13 +16,13 @@ #include "parallel/auto_parallel/rec_core/rec_cost.h" -#include -#include -#include -#include #include +#include #include #include +#include +#include +#include #include "ir/anf.h" diff --git a/mindspore/ccsrc/parallel/auto_parallel/rec_core/rec_cost.h b/mindspore/ccsrc/parallel/auto_parallel/rec_core/rec_cost.h index 7e07ff9be3..af37b9178e 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/rec_core/rec_cost.h +++ b/mindspore/ccsrc/parallel/auto_parallel/rec_core/rec_cost.h @@ -18,13 +18,13 @@ #define PARALLEL_AUTO_PARALLEL_REC_COST_H_ #include -#include +#include #include #include -#include +#include -#include "parallel/auto_parallel/rec_core/rec_strategy.h" #include "parallel/auto_parallel/rec_core/rec_graph.h" +#include "parallel/auto_parallel/rec_core/rec_strategy.h" namespace mindspore { namespace parallel { diff --git a/mindspore/ccsrc/parallel/auto_parallel/rec_core/rec_generate_strategy.cc b/mindspore/ccsrc/parallel/auto_parallel/rec_core/rec_generate_strategy.cc index 06b7bf544a..60f3003a42 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/rec_core/rec_generate_strategy.cc +++ b/mindspore/ccsrc/parallel/auto_parallel/rec_core/rec_generate_strategy.cc @@ -16,14 +16,14 @@ #include "parallel/auto_parallel/rec_core/rec_generate_strategy.h" -#include #include #include +#include -#include "parallel/ops_info/operator_info.h" +#include "ir/value.h" #include "parallel/auto_parallel/rec_core/rec_partition.h" +#include "parallel/ops_info/operator_info.h" #include "parallel/strategy.h" -#include "ir/value.h" namespace mindspore { namespace parallel { diff --git a/mindspore/ccsrc/parallel/auto_parallel/rec_core/rec_generate_strategy.h b/mindspore/ccsrc/parallel/auto_parallel/rec_core/rec_generate_strategy.h index 7445d3940e..4abef843a8 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/rec_core/rec_generate_strategy.h +++ b/mindspore/ccsrc/parallel/auto_parallel/rec_core/rec_generate_strategy.h @@ -17,10 +17,10 @@ #ifndef PARALLEL_AUTO_PARALLEL_REC_GENERATE_STRATEGY_H_ #define PARALLEL_AUTO_PARALLEL_REC_GENERATE_STRATEGY_H_ -#include +#include #include #include -#include +#include #include "parallel/auto_parallel/rec_core/rec_graph.h" #include "parallel/ops_info/operator_info.h" diff --git a/mindspore/ccsrc/parallel/auto_parallel/rec_core/rec_graph.h b/mindspore/ccsrc/parallel/auto_parallel/rec_core/rec_graph.h index 209ce6b13e..6ab2782cb2 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/rec_core/rec_graph.h +++ b/mindspore/ccsrc/parallel/auto_parallel/rec_core/rec_graph.h @@ -18,11 +18,11 @@ #define PARALLEL_AUTO_PARALLEL_REC_GRAPH_H_ #include -#include #include +#include -#include "parallel/auto_parallel/rec_core/rec_tensor.h" #include "parallel/auto_parallel/rec_core/rec_strategy.h" +#include "parallel/auto_parallel/rec_core/rec_tensor.h" namespace mindspore { namespace parallel { diff --git a/mindspore/ccsrc/parallel/auto_parallel/rec_core/rec_parse_graph.cc b/mindspore/ccsrc/parallel/auto_parallel/rec_core/rec_parse_graph.cc index 153ce2ea17..3ff3473298 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/rec_core/rec_parse_graph.cc +++ b/mindspore/ccsrc/parallel/auto_parallel/rec_core/rec_parse_graph.cc @@ -17,14 +17,14 @@ #include "parallel/auto_parallel/rec_core/rec_parse_graph.h" #include +#include #include #include -#include -#include "parallel/ops_info/operator_info.h" -#include "parallel/auto_parallel/rec_core/rec_tensor.h" -#include "parallel/auto_parallel/rec_core/rec_graph.h" #include "ir/value.h" +#include "parallel/auto_parallel/rec_core/rec_graph.h" +#include "parallel/auto_parallel/rec_core/rec_tensor.h" +#include "parallel/ops_info/operator_info.h" namespace mindspore { namespace parallel { diff --git a/mindspore/ccsrc/parallel/auto_parallel/rec_core/rec_parse_graph.h b/mindspore/ccsrc/parallel/auto_parallel/rec_core/rec_parse_graph.h index 7904d260c0..7dfca86a21 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/rec_core/rec_parse_graph.h +++ b/mindspore/ccsrc/parallel/auto_parallel/rec_core/rec_parse_graph.h @@ -17,11 +17,11 @@ #ifndef PARALLEL_AUTO_PARALLEL_REC_PARSE_GRAPH_H_ #define PARALLEL_AUTO_PARALLEL_REC_PARSE_GRAPH_H_ -#include +#include +#include #include #include -#include -#include +#include #include "parallel/auto_parallel/rec_core/rec_graph.h" #include "parallel/ops_info/operator_info.h" diff --git a/mindspore/ccsrc/parallel/auto_parallel/rec_core/rec_partition.cc b/mindspore/ccsrc/parallel/auto_parallel/rec_core/rec_partition.cc index a33b861f1a..24ad8ac203 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/rec_core/rec_partition.cc +++ b/mindspore/ccsrc/parallel/auto_parallel/rec_core/rec_partition.cc @@ -16,13 +16,13 @@ #include "parallel/auto_parallel/rec_core/rec_partition.h" +#include +#include #include -#include +#include #include #include -#include -#include -#include +#include #include "ir/anf.h" #include "parallel/status.h" diff --git a/mindspore/ccsrc/parallel/auto_parallel/rec_core/rec_partition.h b/mindspore/ccsrc/parallel/auto_parallel/rec_core/rec_partition.h index 4e49305156..4f831f4f9a 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/rec_core/rec_partition.h +++ b/mindspore/ccsrc/parallel/auto_parallel/rec_core/rec_partition.h @@ -17,17 +17,17 @@ #ifndef PARALLEL_AUTO_PARALLEL_REC_PARTITION_H_ #define PARALLEL_AUTO_PARALLEL_REC_PARTITION_H_ +#include #include -#include -#include #include -#include -#include #include +#include +#include +#include +#include "parallel/auto_parallel/rec_core/rec_cost.h" #include "parallel/auto_parallel/rec_core/rec_graph.h" #include "parallel/auto_parallel/rec_core/rec_strategy.h" -#include "parallel/auto_parallel/rec_core/rec_cost.h" #include "parallel/status.h" namespace mindspore { diff --git a/mindspore/ccsrc/parallel/context.cc b/mindspore/ccsrc/parallel/context.cc index 64cecf1669..ab216cb22c 100644 --- a/mindspore/ccsrc/parallel/context.cc +++ b/mindspore/ccsrc/parallel/context.cc @@ -16,15 +16,15 @@ #include "parallel/context.h" -#include #include -#include -#include +#include #include #include +#include +#include -#include "parallel/device_manager.h" #include "common/utils.h" +#include "parallel/device_manager.h" namespace mindspore { namespace parallel { diff --git a/mindspore/ccsrc/parallel/context.h b/mindspore/ccsrc/parallel/context.h index 866609fbfe..265f5bac71 100644 --- a/mindspore/ccsrc/parallel/context.h +++ b/mindspore/ccsrc/parallel/context.h @@ -18,12 +18,12 @@ #define MINDSPORE_CCSRC_PARALLEL_CONTEXT_H_ #include +#include #include #include -#include -#include "parallel/status.h" #include "parallel/ops_info/ops_utils.h" +#include "parallel/status.h" #include "utils/convert_utils.h" namespace mindspore { diff --git a/mindspore/ccsrc/parallel/costmodel_context.cc b/mindspore/ccsrc/parallel/costmodel_context.cc index 45b158bde5..0ebbd2c626 100644 --- a/mindspore/ccsrc/parallel/costmodel_context.cc +++ b/mindspore/ccsrc/parallel/costmodel_context.cc @@ -18,8 +18,8 @@ #include -#include "parallel/auto_parallel/graph_costmodel.h" #include "parallel/allreduce_fusion/allreduce_fusion.h" +#include "parallel/auto_parallel/graph_costmodel.h" namespace mindspore { namespace parallel { diff --git a/mindspore/ccsrc/parallel/costmodel_context.h b/mindspore/ccsrc/parallel/costmodel_context.h index a808fc556f..04782fa366 100644 --- a/mindspore/ccsrc/parallel/costmodel_context.h +++ b/mindspore/ccsrc/parallel/costmodel_context.h @@ -17,9 +17,9 @@ #ifndef MINDSPORE_CCSRC_PARALLEL_COSTMODEL_CONTEXT_H_ #define MINDSPORE_CCSRC_PARALLEL_COSTMODEL_CONTEXT_H_ +#include #include #include -#include #include "utils/log_adapter.h" diff --git a/mindspore/ccsrc/parallel/device_manager.h b/mindspore/ccsrc/parallel/device_manager.h index 798d99383d..e87c1d740f 100644 --- a/mindspore/ccsrc/parallel/device_manager.h +++ b/mindspore/ccsrc/parallel/device_manager.h @@ -19,19 +19,19 @@ #include #include -#include #include #include #include #include +#include -#include "utils/convert_utils.h" #include "common/utils.h" #include "parallel/device.h" -#include "parallel/status.h" +#include "parallel/device_matrix.h" #include "parallel/group_manager.h" +#include "parallel/status.h" #include "parallel/strategy.h" -#include "parallel/device_matrix.h" +#include "utils/convert_utils.h" namespace mindspore { namespace parallel { diff --git a/mindspore/ccsrc/parallel/device_matrix.cc b/mindspore/ccsrc/parallel/device_matrix.cc index f9f314d5a3..a581dbf275 100644 --- a/mindspore/ccsrc/parallel/device_matrix.cc +++ b/mindspore/ccsrc/parallel/device_matrix.cc @@ -16,15 +16,15 @@ #include "parallel/device_matrix.h" -#include #include -#include -#include +#include #include +#include +#include #include -#include "parallel/status.h" #include "parallel/ops_info/operator_info.h" +#include "parallel/status.h" #include "utils/log_adapter.h" namespace mindspore { diff --git a/mindspore/ccsrc/parallel/graph_util/generate_graph.h b/mindspore/ccsrc/parallel/graph_util/generate_graph.h index bb1f811f2f..c829e67b6a 100644 --- a/mindspore/ccsrc/parallel/graph_util/generate_graph.h +++ b/mindspore/ccsrc/parallel/graph_util/generate_graph.h @@ -17,12 +17,12 @@ #ifndef MINDSPORE_CCSRC_PARALLEL_GRAPH_UTIL_GENERATE_GRAPH_H_ #define MINDSPORE_CCSRC_PARALLEL_GRAPH_UTIL_GENERATE_GRAPH_H_ -#include -#include -#include #include +#include #include +#include #include +#include #include "./common.h" #include "optimizer/opt.h" diff --git a/mindspore/ccsrc/parallel/graph_util/get_parallel_info.cc b/mindspore/ccsrc/parallel/graph_util/get_parallel_info.cc index 6619f2cc9c..3006cb7680 100644 --- a/mindspore/ccsrc/parallel/graph_util/get_parallel_info.cc +++ b/mindspore/ccsrc/parallel/graph_util/get_parallel_info.cc @@ -16,16 +16,16 @@ #include "parallel/graph_util/get_parallel_info.h" -#include #include +#include #include #include -#include "parallel/tensor_layout/tensor_layout.h" -#include "parallel/strategy.h" -#include "ir/func_graph.h" #include "common/utils.h" +#include "ir/func_graph.h" #include "parallel/graph_util/graph_info.h" +#include "parallel/strategy.h" +#include "parallel/tensor_layout/tensor_layout.h" namespace mindspore { namespace parallel { diff --git a/mindspore/ccsrc/parallel/graph_util/graph_info.cc b/mindspore/ccsrc/parallel/graph_util/graph_info.cc index be73683c57..46c9a37960 100644 --- a/mindspore/ccsrc/parallel/graph_util/graph_info.cc +++ b/mindspore/ccsrc/parallel/graph_util/graph_info.cc @@ -15,12 +15,12 @@ */ #include "parallel/graph_util/graph_info.h" -#include "ir/func_graph.h" -#include "utils/graph_utils.h" -#include "utils/context/ms_context.h" -#include "debug/draw.h" #include "debug/anf_ir_dump.h" #include "debug/anf_ir_utils.h" +#include "debug/draw.h" +#include "ir/func_graph.h" +#include "utils/context/ms_context.h" +#include "utils/graph_utils.h" namespace mindspore { namespace parallel { diff --git a/mindspore/ccsrc/parallel/group_manager.cc b/mindspore/ccsrc/parallel/group_manager.cc index cff5877f0a..1562cbc140 100644 --- a/mindspore/ccsrc/parallel/group_manager.cc +++ b/mindspore/ccsrc/parallel/group_manager.cc @@ -16,8 +16,8 @@ #include "parallel/group_manager.h" -#include #include +#include #include "parallel/device_manager.h" #include "parallel/ops_info/ops_utils.h" diff --git a/mindspore/ccsrc/parallel/group_manager.h b/mindspore/ccsrc/parallel/group_manager.h index 9e23569b15..430d2f64ed 100644 --- a/mindspore/ccsrc/parallel/group_manager.h +++ b/mindspore/ccsrc/parallel/group_manager.h @@ -18,9 +18,9 @@ #define MINDSPORE_CCSRC_PARALLEL_GROUP_MANAGER_H_ #include -#include #include #include +#include #include "parallel/device.h" #include "parallel/status.h" diff --git a/mindspore/ccsrc/parallel/node_check.cc b/mindspore/ccsrc/parallel/node_check.cc index db53bbc59f..fc6115c3c5 100644 --- a/mindspore/ccsrc/parallel/node_check.cc +++ b/mindspore/ccsrc/parallel/node_check.cc @@ -16,8 +16,8 @@ #include "parallel/node_check.h" -#include #include +#include #include "parallel/ops_info/ops_utils.h" diff --git a/mindspore/ccsrc/parallel/ops_info/activation_info.cc b/mindspore/ccsrc/parallel/ops_info/activation_info.cc index 6e581d6280..13155ee4f1 100644 --- a/mindspore/ccsrc/parallel/ops_info/activation_info.cc +++ b/mindspore/ccsrc/parallel/ops_info/activation_info.cc @@ -17,13 +17,13 @@ #include "parallel/ops_info/activation_info.h" #include -#include #include +#include #include "ir/value.h" +#include "parallel/auto_parallel/costmodel.h" #include "parallel/device_matrix.h" #include "parallel/strategy.h" -#include "parallel/auto_parallel/costmodel.h" namespace mindspore { namespace parallel { diff --git a/mindspore/ccsrc/parallel/ops_info/activation_info.h b/mindspore/ccsrc/parallel/ops_info/activation_info.h index d05f8743b0..3cadad6b80 100644 --- a/mindspore/ccsrc/parallel/ops_info/activation_info.h +++ b/mindspore/ccsrc/parallel/ops_info/activation_info.h @@ -18,13 +18,13 @@ #define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_ACTIVATION_INFO_H_ #include +#include #include #include #include -#include -#include "parallel/ops_info/operator_info.h" #include "parallel/auto_parallel/operator_costmodel.h" +#include "parallel/ops_info/operator_info.h" #include "parallel/strategy.h" namespace mindspore { diff --git a/mindspore/ccsrc/parallel/ops_info/arithmetic_info.cc b/mindspore/ccsrc/parallel/ops_info/arithmetic_info.cc index 48741690ba..02c26ea965 100644 --- a/mindspore/ccsrc/parallel/ops_info/arithmetic_info.cc +++ b/mindspore/ccsrc/parallel/ops_info/arithmetic_info.cc @@ -17,9 +17,9 @@ #include "parallel/ops_info/arithmetic_info.h" #include -#include -#include #include +#include +#include #include "parallel/device_matrix.h" #include "parallel/strategy.h" diff --git a/mindspore/ccsrc/parallel/ops_info/arithmetic_info.h b/mindspore/ccsrc/parallel/ops_info/arithmetic_info.h index 734368a533..7cd0d66b1b 100644 --- a/mindspore/ccsrc/parallel/ops_info/arithmetic_info.h +++ b/mindspore/ccsrc/parallel/ops_info/arithmetic_info.h @@ -17,14 +17,14 @@ #ifndef MINDSPORE_CCSRC_PARALLEL_OPS_INFO_ARITHMETIC_INFO_H_ #define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_ARITHMETIC_INFO_H_ +#include #include #include #include -#include #include "ir/value.h" -#include "parallel/ops_info/operator_info.h" #include "parallel/auto_parallel/operator_costmodel.h" +#include "parallel/ops_info/operator_info.h" #include "parallel/strategy.h" namespace mindspore { diff --git a/mindspore/ccsrc/parallel/ops_info/batch_parallel_info.h b/mindspore/ccsrc/parallel/ops_info/batch_parallel_info.h index 0ffdea97f3..57711b5298 100644 --- a/mindspore/ccsrc/parallel/ops_info/batch_parallel_info.h +++ b/mindspore/ccsrc/parallel/ops_info/batch_parallel_info.h @@ -17,10 +17,10 @@ #ifndef MINDSPORE_CCSRC_PARALLEL_OPS_INFO_BATCH_PARALLEL_INFO_H_ #define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_BATCH_PARALLEL_INFO_H_ +#include #include #include #include -#include #include "ir/value.h" #include "parallel/ops_info/operator_info.h" #include "parallel/strategy.h" diff --git a/mindspore/ccsrc/parallel/ops_info/bias_add_info.cc b/mindspore/ccsrc/parallel/ops_info/bias_add_info.cc index f07606e4d3..005edaf7c7 100644 --- a/mindspore/ccsrc/parallel/ops_info/bias_add_info.cc +++ b/mindspore/ccsrc/parallel/ops_info/bias_add_info.cc @@ -17,9 +17,9 @@ #include "parallel/ops_info/bias_add_info.h" #include -#include -#include #include +#include +#include #include "parallel/device_matrix.h" #include "parallel/strategy.h" diff --git a/mindspore/ccsrc/parallel/ops_info/bias_add_info.h b/mindspore/ccsrc/parallel/ops_info/bias_add_info.h index e5001fc0d3..07f0bc00ff 100644 --- a/mindspore/ccsrc/parallel/ops_info/bias_add_info.h +++ b/mindspore/ccsrc/parallel/ops_info/bias_add_info.h @@ -19,13 +19,13 @@ #include +#include #include #include -#include #include "ir/value.h" -#include "parallel/ops_info/operator_info.h" #include "parallel/auto_parallel/operator_costmodel.h" +#include "parallel/ops_info/operator_info.h" #include "parallel/strategy.h" namespace mindspore { diff --git a/mindspore/ccsrc/parallel/ops_info/comparison_function_info.h b/mindspore/ccsrc/parallel/ops_info/comparison_function_info.h index 8760ce5666..110a9a6c38 100644 --- a/mindspore/ccsrc/parallel/ops_info/comparison_function_info.h +++ b/mindspore/ccsrc/parallel/ops_info/comparison_function_info.h @@ -21,8 +21,8 @@ #include #include #include "ir/value.h" -#include "parallel/ops_info/arithmetic_info.h" #include "parallel/auto_parallel/operator_costmodel.h" +#include "parallel/ops_info/arithmetic_info.h" #include "parallel/strategy.h" namespace mindspore { diff --git a/mindspore/ccsrc/parallel/ops_info/dropout_do_mask_info.cc b/mindspore/ccsrc/parallel/ops_info/dropout_do_mask_info.cc index 1b77b913da..c6cd94b7be 100644 --- a/mindspore/ccsrc/parallel/ops_info/dropout_do_mask_info.cc +++ b/mindspore/ccsrc/parallel/ops_info/dropout_do_mask_info.cc @@ -17,15 +17,15 @@ #include "parallel/ops_info/dropout_do_mask_info.h" #include -#include -#include #include +#include +#include #include "ir/value.h" +#include "parallel/auto_parallel/costmodel.h" #include "parallel/device_matrix.h" #include "parallel/strategy.h" #include "parallel/tensor_layout/tensor_redistribution.h" -#include "parallel/auto_parallel/costmodel.h" namespace mindspore { namespace parallel { diff --git a/mindspore/ccsrc/parallel/ops_info/dropout_do_mask_info.h b/mindspore/ccsrc/parallel/ops_info/dropout_do_mask_info.h index 45d4c28d8e..e43601355a 100644 --- a/mindspore/ccsrc/parallel/ops_info/dropout_do_mask_info.h +++ b/mindspore/ccsrc/parallel/ops_info/dropout_do_mask_info.h @@ -17,14 +17,14 @@ #ifndef MINDSPORE_CCSRC_PARALLEL_OPS_INFO_DROPOUT_DO_MASK_INFO_H_ #define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_DROPOUT_DO_MASK_INFO_H_ +#include #include #include #include -#include #include "ir/value.h" -#include "parallel/ops_info/operator_info.h" #include "parallel/auto_parallel/operator_costmodel.h" +#include "parallel/ops_info/operator_info.h" #include "parallel/strategy.h" namespace mindspore { diff --git a/mindspore/ccsrc/parallel/ops_info/elementary_function_info.h b/mindspore/ccsrc/parallel/ops_info/elementary_function_info.h index 33ad04023b..57b4650f26 100644 --- a/mindspore/ccsrc/parallel/ops_info/elementary_function_info.h +++ b/mindspore/ccsrc/parallel/ops_info/elementary_function_info.h @@ -22,8 +22,8 @@ #include #include "ir/value.h" #include "parallel/auto_parallel/operator_costmodel.h" -#include "parallel/strategy.h" #include "parallel/ops_info/activation_info.h" +#include "parallel/strategy.h" namespace mindspore { namespace parallel { diff --git a/mindspore/ccsrc/parallel/ops_info/generator_info.cc b/mindspore/ccsrc/parallel/ops_info/generator_info.cc index b0fd0eaa9a..a39f9faab9 100644 --- a/mindspore/ccsrc/parallel/ops_info/generator_info.cc +++ b/mindspore/ccsrc/parallel/ops_info/generator_info.cc @@ -17,9 +17,9 @@ #include "parallel/ops_info/generator_info.h" #include -#include -#include #include +#include +#include #include "ir/value.h" #include "parallel/device_matrix.h" diff --git a/mindspore/ccsrc/parallel/ops_info/generator_info.h b/mindspore/ccsrc/parallel/ops_info/generator_info.h index 1473fead67..a280fac28e 100644 --- a/mindspore/ccsrc/parallel/ops_info/generator_info.h +++ b/mindspore/ccsrc/parallel/ops_info/generator_info.h @@ -17,13 +17,13 @@ #ifndef MINDSPORE_CCSRC_PARALLEL_OPS_INFO_GENERATOR_INFO_H_ #define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_GENERATOR_INFO_H_ +#include #include #include #include -#include -#include "parallel/ops_info/operator_info.h" #include "parallel/auto_parallel/operator_costmodel.h" +#include "parallel/ops_info/operator_info.h" #include "parallel/strategy.h" namespace mindspore { diff --git a/mindspore/ccsrc/parallel/ops_info/get_next_info.cc b/mindspore/ccsrc/parallel/ops_info/get_next_info.cc index f38baa1e4e..ac9acff41b 100644 --- a/mindspore/ccsrc/parallel/ops_info/get_next_info.cc +++ b/mindspore/ccsrc/parallel/ops_info/get_next_info.cc @@ -17,9 +17,9 @@ #include "parallel/ops_info/get_next_info.h" #include -#include -#include #include +#include +#include #include "ir/value.h" #include "parallel/device_matrix.h" diff --git a/mindspore/ccsrc/parallel/ops_info/get_next_info.h b/mindspore/ccsrc/parallel/ops_info/get_next_info.h index 3dd639ba57..32adce1165 100644 --- a/mindspore/ccsrc/parallel/ops_info/get_next_info.h +++ b/mindspore/ccsrc/parallel/ops_info/get_next_info.h @@ -17,13 +17,13 @@ #ifndef MINDSPORE_CCSRC_PARALLEL_OPS_INFO_GETNEXT_INFO_H_ #define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_GETNEXT_INFO_H_ +#include #include #include #include -#include -#include "parallel/ops_info/operator_info.h" #include "parallel/auto_parallel/operator_costmodel.h" +#include "parallel/ops_info/operator_info.h" #include "parallel/strategy.h" namespace mindspore { diff --git a/mindspore/ccsrc/parallel/ops_info/l2_normalize_info.cc b/mindspore/ccsrc/parallel/ops_info/l2_normalize_info.cc index 183c5dec45..2955f76506 100644 --- a/mindspore/ccsrc/parallel/ops_info/l2_normalize_info.cc +++ b/mindspore/ccsrc/parallel/ops_info/l2_normalize_info.cc @@ -17,9 +17,9 @@ #include "parallel/ops_info/l2_normalize_info.h" #include -#include -#include #include +#include +#include #include "parallel/device_matrix.h" #include "parallel/strategy.h" diff --git a/mindspore/ccsrc/parallel/ops_info/l2_normalize_info.h b/mindspore/ccsrc/parallel/ops_info/l2_normalize_info.h index 1a67073065..c0af9dbcb9 100644 --- a/mindspore/ccsrc/parallel/ops_info/l2_normalize_info.h +++ b/mindspore/ccsrc/parallel/ops_info/l2_normalize_info.h @@ -17,15 +17,15 @@ #ifndef MINDSPORE_CCSRC_PARALLEL_OPS_INFO_L2_NORMALIZE_INFO_H_ #define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_L2_NORMALIZE_INFO_H_ +#include #include #include #include -#include #include "ir/value.h" +#include "parallel/auto_parallel/operator_costmodel.h" #include "parallel/ops_info/activation_info.h" #include "parallel/strategy.h" -#include "parallel/auto_parallel/operator_costmodel.h" namespace mindspore { namespace parallel { diff --git a/mindspore/ccsrc/parallel/ops_info/loss_info.cc b/mindspore/ccsrc/parallel/ops_info/loss_info.cc index 5ca383ebb5..31f80e338b 100644 --- a/mindspore/ccsrc/parallel/ops_info/loss_info.cc +++ b/mindspore/ccsrc/parallel/ops_info/loss_info.cc @@ -17,9 +17,9 @@ #include "parallel/ops_info/loss_info.h" #include -#include -#include #include +#include +#include #include "ir/value.h" #include "parallel/device_matrix.h" diff --git a/mindspore/ccsrc/parallel/ops_info/loss_info.h b/mindspore/ccsrc/parallel/ops_info/loss_info.h index 585545302f..6a9697a447 100644 --- a/mindspore/ccsrc/parallel/ops_info/loss_info.h +++ b/mindspore/ccsrc/parallel/ops_info/loss_info.h @@ -17,14 +17,14 @@ #ifndef MINDSPORE_CCSRC_PARALLEL_OPS_INFO_LOSS_INFO_H_ #define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_LOSS_INFO_H_ +#include #include #include #include -#include #include "ir/value.h" -#include "parallel/ops_info/operator_info.h" #include "parallel/ops_info/activation_info.h" +#include "parallel/ops_info/operator_info.h" #include "parallel/strategy.h" namespace mindspore { diff --git a/mindspore/ccsrc/parallel/ops_info/matmul_info.cc b/mindspore/ccsrc/parallel/ops_info/matmul_info.cc index 6103087a1d..ad6409be0a 100644 --- a/mindspore/ccsrc/parallel/ops_info/matmul_info.cc +++ b/mindspore/ccsrc/parallel/ops_info/matmul_info.cc @@ -18,16 +18,16 @@ #include #include -#include -#include #include #include +#include +#include #include "ir/value.h" +#include "parallel/auto_parallel/graph_costmodel.h" +#include "parallel/device_manager.h" #include "parallel/device_matrix.h" #include "parallel/tensor_layout/tensor_redistribution.h" -#include "parallel/device_manager.h" -#include "parallel/auto_parallel/graph_costmodel.h" namespace mindspore { namespace parallel { diff --git a/mindspore/ccsrc/parallel/ops_info/matmul_info.h b/mindspore/ccsrc/parallel/ops_info/matmul_info.h index b434e4522d..7ced12b14a 100644 --- a/mindspore/ccsrc/parallel/ops_info/matmul_info.h +++ b/mindspore/ccsrc/parallel/ops_info/matmul_info.h @@ -17,16 +17,16 @@ #ifndef MINDSPORE_CCSRC_PARALLEL_OPS_INFO_MATMUL_INFO_H_ #define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_MATMUL_INFO_H_ +#include #include #include #include -#include +#include "common/utils.h" #include "ir/value.h" +#include "parallel/auto_parallel/operator_costmodel.h" #include "parallel/ops_info/operator_info.h" #include "parallel/strategy.h" -#include "parallel/auto_parallel/operator_costmodel.h" -#include "common/utils.h" namespace mindspore { namespace parallel { diff --git a/mindspore/ccsrc/parallel/ops_info/onehot_info.cc b/mindspore/ccsrc/parallel/ops_info/onehot_info.cc index 9b8e9071b0..e07609d3c4 100644 --- a/mindspore/ccsrc/parallel/ops_info/onehot_info.cc +++ b/mindspore/ccsrc/parallel/ops_info/onehot_info.cc @@ -16,15 +16,15 @@ #include "parallel/ops_info/onehot_info.h" -#include -#include #include +#include +#include #include "ir/value.h" -#include "parallel/device_matrix.h" -#include "parallel/strategy.h" #include "parallel/auto_parallel/costmodel.h" +#include "parallel/device_matrix.h" #include "parallel/graph_util/generate_graph.h" +#include "parallel/strategy.h" #include "utils/log_adapter.h" namespace mindspore { diff --git a/mindspore/ccsrc/parallel/ops_info/onehot_info.h b/mindspore/ccsrc/parallel/ops_info/onehot_info.h index 62c66695fa..4697e201a4 100644 --- a/mindspore/ccsrc/parallel/ops_info/onehot_info.h +++ b/mindspore/ccsrc/parallel/ops_info/onehot_info.h @@ -17,14 +17,14 @@ #ifndef MINDSPORE_CCSRC_PARALLEL_OPS_INFO_ONEHOT_INFO_H_ #define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_ONEHOT_INFO_H_ +#include #include #include #include -#include #include "ir/value.h" -#include "parallel/ops_info/operator_info.h" #include "parallel/auto_parallel/operator_costmodel.h" +#include "parallel/ops_info/operator_info.h" #include "parallel/strategy.h" namespace mindspore { diff --git a/mindspore/ccsrc/parallel/ops_info/operator_info.cc b/mindspore/ccsrc/parallel/ops_info/operator_info.cc index f187d38673..8b96425bf7 100644 --- a/mindspore/ccsrc/parallel/ops_info/operator_info.cc +++ b/mindspore/ccsrc/parallel/ops_info/operator_info.cc @@ -17,21 +17,21 @@ #include "parallel/ops_info/operator_info.h" #include +#include #include -#include -#include #include -#include #include +#include +#include -#include "ir/value.h" #include "ir/dtype.h" #include "ir/meta_tensor.h" -#include "utils/context/ms_context.h" -#include "utils/log_adapter.h" -#include "parallel/context.h" +#include "ir/value.h" #include "parallel/auto_parallel/edge_costmodel.h" #include "parallel/auto_parallel/graph_costmodel.h" +#include "parallel/context.h" +#include "utils/context/ms_context.h" +#include "utils/log_adapter.h" namespace mindspore { namespace parallel { diff --git a/mindspore/ccsrc/parallel/ops_info/operator_info.h b/mindspore/ccsrc/parallel/ops_info/operator_info.h index 89fd73564f..cc70f1b870 100644 --- a/mindspore/ccsrc/parallel/ops_info/operator_info.h +++ b/mindspore/ccsrc/parallel/ops_info/operator_info.h @@ -25,17 +25,17 @@ #include #include -#include "utils/log_adapter.h" -#include "ir/base.h" #include "common/utils.h" +#include "ir/base.h" +#include "parallel/auto_parallel/costmodel.h" +#include "parallel/auto_parallel/operator_costmodel.h" #include "parallel/device_manager.h" #include "parallel/device_matrix.h" #include "parallel/group_manager.h" +#include "parallel/ops_info/ops_utils.h" #include "parallel/strategy.h" #include "parallel/tensor_layout/tensor_info.h" -#include "parallel/auto_parallel/costmodel.h" -#include "parallel/auto_parallel/operator_costmodel.h" -#include "parallel/ops_info/ops_utils.h" +#include "utils/log_adapter.h" namespace mindspore { namespace parallel { diff --git a/mindspore/ccsrc/parallel/ops_info/ops_info_head_files.h b/mindspore/ccsrc/parallel/ops_info/ops_info_head_files.h index 7eb24bd30a..cc13512b54 100644 --- a/mindspore/ccsrc/parallel/ops_info/ops_info_head_files.h +++ b/mindspore/ccsrc/parallel/ops_info/ops_info_head_files.h @@ -18,22 +18,22 @@ #define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_OPS_INFO_HEAD_FILES_H_ #include "parallel/ops_info/activation_info.h" +#include "parallel/ops_info/arithmetic_info.h" #include "parallel/ops_info/batch_parallel_info.h" +#include "parallel/ops_info/bias_add_info.h" +#include "parallel/ops_info/comparison_function_info.h" +#include "parallel/ops_info/dropout_do_mask_info.h" +#include "parallel/ops_info/elementary_function_info.h" +#include "parallel/ops_info/generator_info.h" +#include "parallel/ops_info/get_next_info.h" +#include "parallel/ops_info/l2_normalize_info.h" #include "parallel/ops_info/loss_info.h" #include "parallel/ops_info/matmul_info.h" #include "parallel/ops_info/onehot_info.h" -#include "parallel/ops_info/virtual_dataset_info.h" -#include "parallel/ops_info/arithmetic_info.h" -#include "parallel/ops_info/elementary_function_info.h" -#include "parallel/ops_info/comparison_function_info.h" -#include "parallel/ops_info/l2_normalize_info.h" -#include "parallel/ops_info/reduce_method_info.h" -#include "parallel/ops_info/transpose_info.h" #include "parallel/ops_info/prelu_info.h" +#include "parallel/ops_info/reduce_method_info.h" #include "parallel/ops_info/reshape_info.h" -#include "parallel/ops_info/generator_info.h" -#include "parallel/ops_info/dropout_do_mask_info.h" -#include "parallel/ops_info/get_next_info.h" -#include "parallel/ops_info/bias_add_info.h" +#include "parallel/ops_info/transpose_info.h" +#include "parallel/ops_info/virtual_dataset_info.h" #endif // MINDSPORE_CCSRC_PARALLEL_OPS_INFO_HEAD_FILES_H_ diff --git a/mindspore/ccsrc/parallel/ops_info/prelu_info.h b/mindspore/ccsrc/parallel/ops_info/prelu_info.h index 2e74118f80..d491ecb331 100644 --- a/mindspore/ccsrc/parallel/ops_info/prelu_info.h +++ b/mindspore/ccsrc/parallel/ops_info/prelu_info.h @@ -17,10 +17,10 @@ #ifndef MINDSPORE_CCSRC_PARALLEL_OPS_INFO_PRELU_INFO_H_ #define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_PRELU_INFO_H_ +#include #include #include #include -#include #include "ir/value.h" #include "parallel/ops_info/operator_info.h" diff --git a/mindspore/ccsrc/parallel/ops_info/reduce_method_info.cc b/mindspore/ccsrc/parallel/ops_info/reduce_method_info.cc index 5f9a67c22d..5b07f8d0a9 100644 --- a/mindspore/ccsrc/parallel/ops_info/reduce_method_info.cc +++ b/mindspore/ccsrc/parallel/ops_info/reduce_method_info.cc @@ -17,14 +17,14 @@ #include "parallel/ops_info/reduce_method_info.h" #include -#include -#include #include +#include +#include #include "ir/value.h" +#include "parallel/device_manager.h" #include "parallel/device_matrix.h" #include "parallel/tensor_layout/tensor_redistribution.h" -#include "parallel/device_manager.h" #include "utils/log_adapter.h" namespace mindspore { diff --git a/mindspore/ccsrc/parallel/ops_info/reduce_method_info.h b/mindspore/ccsrc/parallel/ops_info/reduce_method_info.h index 6f26b99ffb..8e2e17af99 100644 --- a/mindspore/ccsrc/parallel/ops_info/reduce_method_info.h +++ b/mindspore/ccsrc/parallel/ops_info/reduce_method_info.h @@ -17,16 +17,16 @@ #ifndef MINDSPORE_CCSRC_PARALLEL_OPS_INFO_REDUCE_SUM_INFO_H_ #define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_REDUCE_SUM_INFO_H_ +#include #include #include #include -#include +#include "ir/meta_tensor.h" #include "ir/value.h" +#include "parallel/auto_parallel/operator_costmodel.h" #include "parallel/ops_info/activation_info.h" #include "parallel/strategy.h" -#include "parallel/auto_parallel/operator_costmodel.h" -#include "ir/meta_tensor.h" namespace mindspore { namespace parallel { diff --git a/mindspore/ccsrc/parallel/ops_info/reshape_info.h b/mindspore/ccsrc/parallel/ops_info/reshape_info.h index 982894a8e0..1d6a14b1f6 100644 --- a/mindspore/ccsrc/parallel/ops_info/reshape_info.h +++ b/mindspore/ccsrc/parallel/ops_info/reshape_info.h @@ -19,10 +19,10 @@ #include +#include #include #include #include -#include #include "parallel/ops_info/operator_info.h" #include "parallel/strategy.h" diff --git a/mindspore/ccsrc/parallel/ops_info/tmp_identity_info.cc b/mindspore/ccsrc/parallel/ops_info/tmp_identity_info.cc index 73b18ad473..772a4f83f6 100644 --- a/mindspore/ccsrc/parallel/ops_info/tmp_identity_info.cc +++ b/mindspore/ccsrc/parallel/ops_info/tmp_identity_info.cc @@ -1,6 +1,4 @@ /** -#include "utils/log_adapter.h" -#include "utils/log_adapter.h" #include "utils/log_adapter.h" * Copyright 2019 Huawei Technologies Co., Ltd * diff --git a/mindspore/ccsrc/parallel/ops_info/tmp_identity_info.h b/mindspore/ccsrc/parallel/ops_info/tmp_identity_info.h index 304e336adf..6df5856e0c 100644 --- a/mindspore/ccsrc/parallel/ops_info/tmp_identity_info.h +++ b/mindspore/ccsrc/parallel/ops_info/tmp_identity_info.h @@ -17,12 +17,12 @@ #ifndef MINDSPORE_CCSRC_PARALLEL_OPS_INFO_TMP_IDENTITY_INFO_H_ #define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_TMP_IDENTITY_INFO_H_ -#include #include #include +#include -#include "parallel/ops_info/operator_info.h" #include "parallel/auto_parallel/operator_costmodel.h" +#include "parallel/ops_info/operator_info.h" #include "parallel/strategy.h" namespace mindspore { diff --git a/mindspore/ccsrc/parallel/ops_info/transpose_info.h b/mindspore/ccsrc/parallel/ops_info/transpose_info.h index c7c1c96675..4f6f6bb695 100644 --- a/mindspore/ccsrc/parallel/ops_info/transpose_info.h +++ b/mindspore/ccsrc/parallel/ops_info/transpose_info.h @@ -17,10 +17,10 @@ #ifndef MINDSPORE_CCSRC_PARALLEL_OPS_INFO_TRANSPOSE_INFO_H_ #define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_TRANSPOSE_INFO_H_ +#include #include #include #include -#include #include "ir/value.h" #include "parallel/ops_info/operator_info.h" diff --git a/mindspore/ccsrc/parallel/ops_info/virtual_dataset_info.h b/mindspore/ccsrc/parallel/ops_info/virtual_dataset_info.h index c4fdfcef04..d0278f27d9 100644 --- a/mindspore/ccsrc/parallel/ops_info/virtual_dataset_info.h +++ b/mindspore/ccsrc/parallel/ops_info/virtual_dataset_info.h @@ -17,10 +17,10 @@ #ifndef PARALLEL_OPS_INFO_DATASET_INFO_H_ #define PARALLEL_OPS_INFO_DATASET_INFO_H_ +#include #include #include #include -#include #include "ir/value.h" #include "parallel/ops_info/operator_info.h" diff --git a/mindspore/ccsrc/parallel/step_auto_parallel.cc b/mindspore/ccsrc/parallel/step_auto_parallel.cc index 883e0d9f15..b06ec383fa 100644 --- a/mindspore/ccsrc/parallel/step_auto_parallel.cc +++ b/mindspore/ccsrc/parallel/step_auto_parallel.cc @@ -18,30 +18,30 @@ #include #include -#include +#include +#include #include #include -#include -#include #include -#include #include +#include +#include #include "ir/anf.h" +#include "ir/meta_tensor.h" #include "optimizer/opt.h" #include "optimizer/optimizer.h" -#include "pipeline/pipeline.h" -#include "pipeline/parse/python_adapter.h" +#include "parallel/auto_parallel/dp_algo_costmodel.h" #include "parallel/auto_parallel/edge_costmodel.h" #include "parallel/auto_parallel/graph_costmodel.h" -#include "parallel/step_parallel.h" -#include "parallel/auto_parallel/dp_algo_costmodel.h" -#include "parallel/ops_info/tmp_identity_info.h" -#include "parallel/context.h" -#include "parallel/auto_parallel/rec_core/rec_partition.h" -#include "parallel/auto_parallel/rec_core/rec_parse_graph.h" #include "parallel/auto_parallel/rec_core/rec_generate_strategy.h" -#include "ir/meta_tensor.h" +#include "parallel/auto_parallel/rec_core/rec_parse_graph.h" +#include "parallel/auto_parallel/rec_core/rec_partition.h" +#include "parallel/context.h" +#include "parallel/ops_info/tmp_identity_info.h" +#include "parallel/step_parallel.h" +#include "pipeline/parse/python_adapter.h" +#include "pipeline/pipeline.h" namespace mindspore { namespace parallel { diff --git a/mindspore/ccsrc/parallel/step_auto_parallel.h b/mindspore/ccsrc/parallel/step_auto_parallel.h index d0d603a4f4..5ee75ca162 100644 --- a/mindspore/ccsrc/parallel/step_auto_parallel.h +++ b/mindspore/ccsrc/parallel/step_auto_parallel.h @@ -17,14 +17,14 @@ #ifndef PARALLEL_STEP_AUTO_PARALLEL_H_ #define PARALLEL_STEP_AUTO_PARALLEL_H_ -#include +#include #include #include -#include +#include #include "ir/anf.h" #include "optimizer/opt.h" -#include "pipeline/pipeline.h" #include "parallel/status.h" +#include "pipeline/pipeline.h" namespace mindspore { namespace parallel { diff --git a/mindspore/ccsrc/parallel/step_parallel.cc b/mindspore/ccsrc/parallel/step_parallel.cc index e2b4d55aad..886a0aebf4 100644 --- a/mindspore/ccsrc/parallel/step_parallel.cc +++ b/mindspore/ccsrc/parallel/step_parallel.cc @@ -22,26 +22,26 @@ #include #include +#include #include #include #include -#include -#include "parallel/graph_util/graph_info.h" #include "ir/meta_tensor.h" -#include "optimizer/optimizer.h" -#include "parallel/dynamic_creator.h" -#include "parallel/ops_info/matmul_info.h" -#include "utils/symbolic.h" #include "operator/ops.h" +#include "optimizer/optimizer.h" #include "parallel/auto_parallel/graph_costmodel.h" +#include "parallel/context.h" #include "parallel/device_manager.h" -#include "parallel/strategy_checkpoint/parallel_strategy_checkpoint.h" +#include "parallel/dynamic_creator.h" #include "parallel/graph_util/generate_graph.h" -#include "parallel/context.h" +#include "parallel/graph_util/graph_info.h" +#include "parallel/graph_util/node_info.h" #include "parallel/node_check.h" +#include "parallel/ops_info/matmul_info.h" +#include "parallel/strategy_checkpoint/parallel_strategy_checkpoint.h" #include "utils/comm_manager.h" -#include "parallel/graph_util/node_info.h" +#include "utils/symbolic.h" using mindspore::tensor::Tensor; diff --git a/mindspore/ccsrc/parallel/step_parallel.h b/mindspore/ccsrc/parallel/step_parallel.h index 2d1982dc9c..fd47a59bf5 100644 --- a/mindspore/ccsrc/parallel/step_parallel.h +++ b/mindspore/ccsrc/parallel/step_parallel.h @@ -19,10 +19,10 @@ #include -#include -#include #include +#include #include +#include #include #include "./common.h" diff --git a/mindspore/ccsrc/parallel/strategy.h b/mindspore/ccsrc/parallel/strategy.h index 68ba4962d7..acc6ca928f 100644 --- a/mindspore/ccsrc/parallel/strategy.h +++ b/mindspore/ccsrc/parallel/strategy.h @@ -18,10 +18,10 @@ #define MINDSPORE_CCSRC_PARALLEL_STRATEGY_H_ #include -#include -#include #include +#include #include +#include #include "parallel/status.h" diff --git a/mindspore/ccsrc/parallel/strategy_checkpoint/parallel_strategy_checkpoint.cc b/mindspore/ccsrc/parallel/strategy_checkpoint/parallel_strategy_checkpoint.cc index 4e008feee1..9e3573eee2 100644 --- a/mindspore/ccsrc/parallel/strategy_checkpoint/parallel_strategy_checkpoint.cc +++ b/mindspore/ccsrc/parallel/strategy_checkpoint/parallel_strategy_checkpoint.cc @@ -20,10 +20,10 @@ #include #include +#include "common/utils.h" +#include "utils/convert_utils.h" #include "utils/log_adapter.h" #include "utils/node_strategy.pb.h" -#include "utils/convert_utils.h" -#include "common/utils.h" namespace mindspore { namespace parallel { diff --git a/mindspore/ccsrc/parallel/strategy_checkpoint/parallel_strategy_checkpoint.h b/mindspore/ccsrc/parallel/strategy_checkpoint/parallel_strategy_checkpoint.h index 3cbb116b42..b5d3626f53 100644 --- a/mindspore/ccsrc/parallel/strategy_checkpoint/parallel_strategy_checkpoint.h +++ b/mindspore/ccsrc/parallel/strategy_checkpoint/parallel_strategy_checkpoint.h @@ -19,8 +19,8 @@ #include #include -#include "parallel/strategy.h" #include "parallel/ops_info/ops_utils.h" +#include "parallel/strategy.h" namespace mindspore { namespace parallel { diff --git a/mindspore/ccsrc/parallel/tensor_layout/arrangement.cc b/mindspore/ccsrc/parallel/tensor_layout/arrangement.cc index fea7e4ba65..68acae87f3 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/arrangement.cc +++ b/mindspore/ccsrc/parallel/tensor_layout/arrangement.cc @@ -15,14 +15,14 @@ */ #include "parallel/tensor_layout/arrangement.h" -#include -#include #include +#include +#include +#include "common/utils.h" #include "parallel/status.h" -#include "utils/log_adapter.h" #include "parallel/tensor_layout/shape_util.h" #include "utils/convert_utils.h" -#include "common/utils.h" +#include "utils/log_adapter.h" namespace mindspore { namespace parallel { diff --git a/mindspore/ccsrc/parallel/tensor_layout/arrangement.h b/mindspore/ccsrc/parallel/tensor_layout/arrangement.h index 582beeaff2..6d64e07f03 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/arrangement.h +++ b/mindspore/ccsrc/parallel/tensor_layout/arrangement.h @@ -21,8 +21,8 @@ #include #include #include -#include #include +#include #include "parallel/status.h" #include "parallel/tensor_layout/array.h" diff --git a/mindspore/ccsrc/parallel/tensor_layout/array.cc b/mindspore/ccsrc/parallel/tensor_layout/array.cc index e073c3905c..ce1b9b8ecf 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/array.cc +++ b/mindspore/ccsrc/parallel/tensor_layout/array.cc @@ -16,8 +16,8 @@ #include "parallel/tensor_layout/array.h" #include -#include "utils/log_adapter.h" #include "parallel/status.h" +#include "utils/log_adapter.h" namespace mindspore { namespace parallel { diff --git a/mindspore/ccsrc/parallel/tensor_layout/construct_operator.h b/mindspore/ccsrc/parallel/tensor_layout/construct_operator.h index 641b0975dd..91f5236037 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/construct_operator.h +++ b/mindspore/ccsrc/parallel/tensor_layout/construct_operator.h @@ -21,9 +21,9 @@ #include #include +#include "ir/value.h" #include "parallel/ops_info/operator_info.h" #include "parallel/status.h" -#include "ir/value.h" namespace mindspore { namespace parallel { diff --git a/mindspore/ccsrc/parallel/tensor_layout/layout_transfer.cc b/mindspore/ccsrc/parallel/tensor_layout/layout_transfer.cc index 3321d751bb..b2ee51b40b 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/layout_transfer.cc +++ b/mindspore/ccsrc/parallel/tensor_layout/layout_transfer.cc @@ -15,8 +15,8 @@ */ #include "parallel/tensor_layout/layout_transfer.h" -#include "parallel/status.h" #include "common/utils.h" +#include "parallel/status.h" namespace mindspore { namespace parallel { diff --git a/mindspore/ccsrc/parallel/tensor_layout/map.cc b/mindspore/ccsrc/parallel/tensor_layout/map.cc index 46c066b250..4f3f2369c7 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/map.cc +++ b/mindspore/ccsrc/parallel/tensor_layout/map.cc @@ -15,14 +15,14 @@ */ #include "parallel/tensor_layout/map.h" -#include -#include #include +#include +#include +#include "common/utils.h" #include "parallel/status.h" -#include "utils/log_adapter.h" #include "parallel/tensor_layout/shape_util.h" #include "utils/convert_utils.h" -#include "common/utils.h" +#include "utils/log_adapter.h" namespace mindspore { namespace parallel { diff --git a/mindspore/ccsrc/parallel/tensor_layout/map.h b/mindspore/ccsrc/parallel/tensor_layout/map.h index 55ed3e5577..f7bc061aa1 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/map.h +++ b/mindspore/ccsrc/parallel/tensor_layout/map.h @@ -23,8 +23,8 @@ #include #include #include "parallel/status.h" -#include "parallel/tensor_layout/array.h" #include "parallel/tensor_layout/arrangement.h" +#include "parallel/tensor_layout/array.h" namespace mindspore { namespace parallel { diff --git a/mindspore/ccsrc/parallel/tensor_layout/redistribution_layout_transfer.cc b/mindspore/ccsrc/parallel/tensor_layout/redistribution_layout_transfer.cc index dea44ee60e..2ee682fad8 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/redistribution_layout_transfer.cc +++ b/mindspore/ccsrc/parallel/tensor_layout/redistribution_layout_transfer.cc @@ -16,8 +16,8 @@ #include "parallel/tensor_layout/redistribution_layout_transfer.h" #include "parallel/status.h" -#include "parallel/tensor_layout/shape_util.h" #include "parallel/tensor_layout/reshape_layout_transfer.h" +#include "parallel/tensor_layout/shape_util.h" namespace mindspore { namespace parallel { diff --git a/mindspore/ccsrc/parallel/tensor_layout/redistribution_operator_infer.h b/mindspore/ccsrc/parallel/tensor_layout/redistribution_operator_infer.h index 3515c7383a..13f9e7af24 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/redistribution_operator_infer.h +++ b/mindspore/ccsrc/parallel/tensor_layout/redistribution_operator_infer.h @@ -18,13 +18,13 @@ #define MINDSPORE_CCSRC_PARALLEL_TENSOR_LAYOUT_REDISTRIBUTION_OPERATOR_INFER_H_ #include -#include #include -#include +#include #include +#include -#include "parallel/tensor_layout/redistribution_layout_transfer.h" #include "parallel/tensor_layout/construct_operator.h" +#include "parallel/tensor_layout/redistribution_layout_transfer.h" #include "utils/convert_utils.h" namespace mindspore { namespace parallel { diff --git a/mindspore/ccsrc/parallel/tensor_layout/shape_util.h b/mindspore/ccsrc/parallel/tensor_layout/shape_util.h index 5451af063e..85ca70969b 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/shape_util.h +++ b/mindspore/ccsrc/parallel/tensor_layout/shape_util.h @@ -18,10 +18,10 @@ #define MINDSPORE_CCSRC_PARALLEL_TENSOR_LAYOUT_SHAPE_UTIL_H_ #include -#include -#include #include #include +#include +#include #include "parallel/status.h" diff --git a/mindspore/ccsrc/parallel/tensor_layout/tensor_info.h b/mindspore/ccsrc/parallel/tensor_layout/tensor_info.h index b24df6bbf2..9fc6a229e2 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/tensor_info.h +++ b/mindspore/ccsrc/parallel/tensor_layout/tensor_info.h @@ -19,12 +19,12 @@ #include #include -#include #include +#include +#include "parallel/device_matrix.h" #include "parallel/status.h" #include "parallel/tensor_layout/tensor_layout.h" -#include "parallel/device_matrix.h" namespace mindspore { namespace parallel { diff --git a/mindspore/ccsrc/parallel/tensor_layout/tensor_layout.cc b/mindspore/ccsrc/parallel/tensor_layout/tensor_layout.cc index 9463b99ce6..f49b967abc 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/tensor_layout.cc +++ b/mindspore/ccsrc/parallel/tensor_layout/tensor_layout.cc @@ -15,15 +15,15 @@ */ #include "parallel/tensor_layout/tensor_layout.h" -#include #include -#include "parallel/status.h" -#include "utils/log_adapter.h" -#include "parallel/tensor_layout/shape_util.h" -#include "parallel/tensor_layout/array.h" +#include +#include "common/utils.h" #include "ir/value.h" #include "parallel/device_matrix.h" -#include "common/utils.h" +#include "parallel/status.h" +#include "parallel/tensor_layout/array.h" +#include "parallel/tensor_layout/shape_util.h" +#include "utils/log_adapter.h" namespace mindspore { namespace parallel { diff --git a/mindspore/ccsrc/parallel/tensor_layout/tensor_layout.h b/mindspore/ccsrc/parallel/tensor_layout/tensor_layout.h index 0db356b8b1..238c9373d9 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/tensor_layout.h +++ b/mindspore/ccsrc/parallel/tensor_layout/tensor_layout.h @@ -22,11 +22,11 @@ #include #include #include +#include "parallel/device_manager.h" #include "parallel/status.h" #include "parallel/tensor_layout/arrangement.h" #include "parallel/tensor_layout/map.h" #include "utils/convert_utils.h" -#include "parallel/device_manager.h" namespace mindspore { namespace parallel { diff --git a/mindspore/ccsrc/parallel/tensor_layout/tensor_redistribution.cc b/mindspore/ccsrc/parallel/tensor_layout/tensor_redistribution.cc index 365a2be1cb..93bda5da81 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/tensor_redistribution.cc +++ b/mindspore/ccsrc/parallel/tensor_layout/tensor_redistribution.cc @@ -18,9 +18,9 @@ #include #include #include +#include "common/utils.h" #include "parallel/status.h" #include "parallel/tensor_layout/shape_util.h" -#include "common/utils.h" namespace mindspore { namespace parallel { diff --git a/mindspore/ccsrc/parallel/tensor_layout/tensor_redistribution.h b/mindspore/ccsrc/parallel/tensor_layout/tensor_redistribution.h index daf474b3d7..38fb5959ad 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/tensor_redistribution.h +++ b/mindspore/ccsrc/parallel/tensor_layout/tensor_redistribution.h @@ -18,18 +18,18 @@ #define MINDSPORE_CCSRC_PARALLEL_TENSOR_LAYOUT_TENSOR_REDISTRIBUTION_H_ #include -#include -#include #include #include +#include #include +#include #include "ir/value.h" -#include "parallel/status.h" -#include "parallel/tensor_layout/tensor_layout.h" #include "parallel/ops_info/operator_info.h" +#include "parallel/status.h" #include "parallel/tensor_layout/construct_operator.h" #include "parallel/tensor_layout/redistribution_operator_infer.h" +#include "parallel/tensor_layout/tensor_layout.h" namespace mindspore { namespace parallel { From 5240b1f6039e739c1b861f9eca5535ee359ffc09 Mon Sep 17 00:00:00 2001 From: lichenever Date: Thu, 2 Apr 2020 11:14:45 +0800 Subject: [PATCH 067/367] fix refkey bug for auto parallel --- mindspore/ccsrc/parallel/step_parallel.cc | 21 ++++++++++-- tests/ut/python/parallel/test_arithmetic.py | 36 ++++++++++++++++++--- 2 files changed, 50 insertions(+), 7 deletions(-) diff --git a/mindspore/ccsrc/parallel/step_parallel.cc b/mindspore/ccsrc/parallel/step_parallel.cc index e2b4d55aad..28cddd6111 100644 --- a/mindspore/ccsrc/parallel/step_parallel.cc +++ b/mindspore/ccsrc/parallel/step_parallel.cc @@ -49,6 +49,9 @@ namespace mindspore { namespace parallel { const std::set COMMUNICATION_OPS = {ALL_REDUCE, ALL_GATHER, ALL_TO_ALL, REDUCE_SCATTER}; const std::set INVALID_LOSS_OPS = {GET_NEXT, VIRTUALLOSS}; +// g_RefMap, for CNode B input i is a RefKey[Parameter C], +// it will be one item in map with key: C, and value: (B, i) +static std::map> g_RefMap; void SetCommunicationOpGroupLabel(std::vector new_node_input) { if (new_node_input.empty()) { @@ -1085,11 +1088,19 @@ std::vector ExtractShape(const CNodePtr& node) { std::vector all_inputs = node->inputs(); std::vector node_inputs{all_inputs.begin() + 1, all_inputs.end()}; - for (auto& input : node_inputs) { + size_t inputs_size = all_inputs.size(); + for (size_t i = 1; i < inputs_size; ++i) { Shapes input_shapes; + AnfNodePtr input = all_inputs[i]; if (IsValueNode(input)) { auto func_graph = node->func_graph(); MS_EXCEPTION_IF_NULL(func_graph); + std::vector parameters = FindParameterByRefKeyNode(input, func_graph); + if (parameters.size() != 1) { + MS_LOG(EXCEPTION) << "Find parameter by ref key node failed"; + } + std::pair node_pair = std::make_pair(node, SizeToInt(i)); + g_RefMap[parameters[0]] = node_pair; input_shapes = GetRefKeyNodeShape(input, func_graph); } else if (IsValueNode(input) || input->isa() || input->isa()) { input_shapes = GetNodeShape(input); @@ -1205,14 +1216,20 @@ void CoverSliceShape(const FuncGraphPtr& root) { auto parameters = root->parameters(); for (auto& parameter : parameters) { MS_EXCEPTION_IF_NULL(parameter->Shape()); + auto iter = g_RefMap.find(parameter); + if (iter != g_RefMap.end()) { + SetParallelShape(parameter, g_RefMap[parameter]); + continue; + } std::pair res = FindSubGraph(root, parameter); if (res.first == nullptr) { MS_LOG(INFO) << "Parameter " << parameter->ToString() << " don't need to set parallel shape"; } else { SetParallelShape(parameter, res); - MS_LOG(DEBUG) << "parameter " << parameter->ToString() << " shape " << parameter->Shape()->ToString(); + MS_LOG(DEBUG) << "Parameter " << parameter->ToString() << " shape " << parameter->Shape()->ToString(); } } + g_RefMap.clear(); } bool ParameterIsCloned(const FuncGraphPtr& root, const AnfNodePtr& parameter_node) { diff --git a/tests/ut/python/parallel/test_arithmetic.py b/tests/ut/python/parallel/test_arithmetic.py index 2c7eabc8f2..4c34c0371e 100644 --- a/tests/ut/python/parallel/test_arithmetic.py +++ b/tests/ut/python/parallel/test_arithmetic.py @@ -13,14 +13,13 @@ # limitations under the License. import numpy as np -from mindspore import context +import mindspore as ms +from mindspore import Parameter, Tensor, context import mindspore.nn as nn from mindspore.ops import operations as P -from mindspore import Tensor -from tests.ut.python.ops.test_math_ops import VirtualLoss -import mindspore as ms -from mindspore.common.api import _executor from mindspore.ops import composite as C +from mindspore.common.api import _executor +from tests.ut.python.ops.test_math_ops import VirtualLoss class NetWithLoss(nn.Cell): @@ -470,3 +469,30 @@ def test_matmul_floordiv_broadcast2(): y = Tensor(np.ones([32, 1]), dtype=ms.float32) b = Tensor(np.ones([1, 64]), dtype=ms.float32) _executor.compile(net, x, y, b) + + +def test_assign_sub(): + class Net(nn.Cell): + def __init__(self): + super().__init__() + self.assign_sub = P.AssignSub() + self.mul = P.Mul() + self.mul_weight = Parameter(Tensor(np.full([128, 32], + 0.5, dtype=np.float32)), + name="mul_weight") + self.assignsub_weight = Parameter(Tensor(np.full([128, 32], + 1.1, dtype=np.float32)), + name="assignsub_weight") + + def construct(self, x, y, z): + out = self.mul(x, self.mul_weight) + out = self.assign_sub(self.assignsub_weight, out) + return out + + context.set_auto_parallel_context(device_num=64, global_rank=15) + context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") + net = GradWrap(NetWithLoss(Net())) + x = Tensor(np.ones([128, 32]), dtype=ms.float32) + y = Tensor(np.ones([128, 32]), dtype=ms.float32) + z = Tensor(np.ones([128, 32]), dtype=ms.float32) + _executor.compile(net, x, y, z) From 49a41bb2cc4a700573f262bdb9df518e51b4895a Mon Sep 17 00:00:00 2001 From: Yanjun Peng Date: Thu, 2 Apr 2020 16:37:39 +0800 Subject: [PATCH 068/367] fix random crop parameter check --- mindspore/dataset/transforms/vision/validators.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mindspore/dataset/transforms/vision/validators.py b/mindspore/dataset/transforms/vision/validators.py index 443232d780..caab120af4 100644 --- a/mindspore/dataset/transforms/vision/validators.py +++ b/mindspore/dataset/transforms/vision/validators.py @@ -325,7 +325,7 @@ def check_random_crop(method): @wraps(method) def new_method(self, *args, **kwargs): - args = (list(args) + 4 * [None])[:5] + args = (list(args) + 5 * [None])[:5] size, padding, pad_if_needed, fill_value, padding_mode = args if "size" in kwargs: From c3d210cc3160fcee250e9e25b06f9d615ea48a6e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E4=B8=87=E4=B8=87=E6=B2=A1=E6=83=B3=E5=88=B0?= Date: Thu, 2 Apr 2020 11:13:16 +0800 Subject: [PATCH 069/367] fix issue I1DBRX delete duplicated words in comments fix issue I1CJAP fix issue I1D3WS fix issue I1DBA8 fix issue I1CJ77 fix issue I1CJG3 fix review suggestion --- mindspore/nn/layer/normalization.py | 2 +- mindspore/ops/operations/array_ops.py | 3 +- mindspore/ops/operations/control_ops.py | 4 +- mindspore/ops/operations/math_ops.py | 122 ++++++++++++------------ mindspore/ops/operations/nn_ops.py | 7 +- 5 files changed, 69 insertions(+), 69 deletions(-) diff --git a/mindspore/nn/layer/normalization.py b/mindspore/nn/layer/normalization.py index d43c1c8ab4..4aded20ab3 100644 --- a/mindspore/nn/layer/normalization.py +++ b/mindspore/nn/layer/normalization.py @@ -249,7 +249,7 @@ class LayerNorm(Cell): 'he_uniform', etc. Default: 'zeros'. Inputs: - - **input_x** (Tensor) - The shape of 'input_x' is input_shape = :math:`(x_1, x_2, ..., x_R)`, + - **input_x** (Tensor) - The shape of 'input_x' is :math:`(x_1, x_2, ..., x_R)`, and `input_shape[begin_norm_axis:]` is equal to `normalized_shape`. Outputs: diff --git a/mindspore/ops/operations/array_ops.py b/mindspore/ops/operations/array_ops.py index f6d563321c..e9ea30b4cd 100644 --- a/mindspore/ops/operations/array_ops.py +++ b/mindspore/ops/operations/array_ops.py @@ -443,7 +443,6 @@ class Transpose(PrimitiveWithInfer): Examples: >>> input_tensor = Tensor(np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]), mindspore.float32) >>> perm = (0, 2, 1) - >>> expect = np.array([[[1, 4], [2, 5], [3, 6]], [[7, 10], [8, 11], [9, 12]]]) >>> transpose = Transpose() >>> output = transpose(input_tensor, perm) """ @@ -1631,7 +1630,7 @@ class Diag(PrimitiveWithInfer): Examples: >>> input_x = Tensor([1, 2, 3, 4]) >>> diag = P.Diag() - >>> diag(x) + >>> diag(input_x) [[1, 0, 0, 0], [0, 2, 0, 0], [0, 0, 3, 0], diff --git a/mindspore/ops/operations/control_ops.py b/mindspore/ops/operations/control_ops.py index 242a3b155d..a7d9c3bb8a 100644 --- a/mindspore/ops/operations/control_ops.py +++ b/mindspore/ops/operations/control_ops.py @@ -107,8 +107,8 @@ class GeSwitch(PrimitiveWithInfer): >>> ret = self.merge((add_ret, sq_ret)) >>> return ret[0] >>> - >>> x = Tensor(x_init, dtype=mindspore.float32) - >>> y = Tensor(y_init, dtype=mindspore.float32) + >>> x = Tensor(10.0, dtype=mindspore.float32) + >>> y = Tensor(5.0, dtype=mindspore.float32) >>> net = Net() >>> output = net(x, y) """ diff --git a/mindspore/ops/operations/math_ops.py b/mindspore/ops/operations/math_ops.py index 398a7e6f1a..d9e03a54d0 100644 --- a/mindspore/ops/operations/math_ops.py +++ b/mindspore/ops/operations/math_ops.py @@ -140,6 +140,7 @@ class AssignAdd(PrimitiveWithInfer): Examples: >>> class Net(Cell): >>> def __init__(self): + >>> super(Net, self).__init__() >>> self.AssignAdd = P.AssignAdd() >>> self.inputdata = Parameter(initializer(1, [1], mindspore.int64), name="global_step") >>> @@ -272,7 +273,7 @@ class ReduceMean(_Reduce): Examples: >>> data = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32)) - >>> op = ReduceMean(keep_dims=True) + >>> op = P.ReduceMean(keep_dims=True) >>> output = op(data, 1) """ @@ -304,7 +305,7 @@ class ReduceSum(_Reduce): Examples: >>> data = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32)) - >>> op = ReduceSum(keep_dims=True) + >>> op = P.ReduceSum(keep_dims=True) >>> output = op(data, 1) """ @@ -337,7 +338,7 @@ class ReduceAll(_Reduce): Examples: >>> data = Tensor(np.array([[True, False], [True, True]])) - >>> op = ReduceAll(keep_dims=True) + >>> op = P.ReduceAll(keep_dims=True) >>> output = op(data, 1) """ @@ -373,7 +374,7 @@ class ReduceMax(_Reduce): Examples: >>> data = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32)) - >>> op = ReduceMax(keep_dims=True) + >>> op = P.ReduceMax(keep_dims=True) >>> output = op(data, 1) """ @@ -406,7 +407,7 @@ class ReduceMin(_Reduce): Examples: >>> data = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32)) - >>> op = ReduceMin(keep_dims=True) + >>> op = P.ReduceMin(keep_dims=True) >>> output = op(data, 1) """ @@ -438,7 +439,7 @@ class ReduceProd(_Reduce): Examples: >>> data = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32)) - >>> op = ReduceProd(keep_dims=True) + >>> op = P.ReduceProd(keep_dims=True) >>> output = op(data, 1) """ @@ -460,13 +461,13 @@ class CumProd(PrimitiveWithInfer): Examples: >>> data = Tensor(np.array([a, b, c]).astype(np.float32)) - >>> op0 = CumProd() + >>> op0 = P.CumProd() >>> output = op0(data, 0) # output=[a, a * b, a * b * c] - >>> op1 = CumProd(exclusive=True) + >>> op1 = P.CumProd(exclusive=True) >>> output = op1(data, 0) # output=[1, a, a * b] - >>> op2 = CumProd(reverse=True) + >>> op2 = P.CumProd(reverse=True) >>> output = op2(data, 0) # output=[a * b * c, b * c, c] - >>> op3 = CumProd(exclusive=True, reverse=True) + >>> op3 = P.CumProd(exclusive=True, reverse=True) >>> output = op3(data, 0) # output=[b * c, c, 1] """ @prim_attr_register @@ -506,7 +507,7 @@ class MatMul(PrimitiveWithInfer): Examples: >>> input_x = Tensor(np.ones(shape=[1, 3]), mindspore.float32) >>> input_y = Tensor(np.ones(shape=[3, 4]), mindspore.float32) - >>> matmul = MatMul() + >>> matmul = P.MatMul() >>> output = matmul(input_x, input_y) """ @@ -582,12 +583,12 @@ class BatchMatMul(MatMul): Examples: >>> input_x = Tensor(np.ones(shape=[2, 4, 1, 3]), mindspore.float32) >>> input_y = Tensor(np.ones(shape=[2, 4, 3, 4]), mindspore.float32) - >>> batmatmul = BatchMatMul() + >>> batmatmul = P.BatchMatMul() >>> output = batmatmul(input_x, input_y) >>> >>> input_x = Tensor(np.ones(shape=[2, 4, 3, 1]), mindspore.float32) >>> input_y = Tensor(np.ones(shape=[2, 4, 3, 4]), mindspore.float32) - >>> batmatmul = BatchMatMul(transpose_a=True) + >>> batmatmul = P.BatchMatMul(transpose_a=True) >>> output = batmatmul(input_x, input_y) """ @@ -621,7 +622,7 @@ class CumSum(PrimitiveWithInfer): Examples: >>> input = Tensor(np.array([[3, 4, 6, 10],[1, 6, 7, 9],[4, 3, 8, 7],[1, 3, 7, 9]]).astype(np.float32)) - >>> cumsum = CumSum() + >>> cumsum = P.CumSum() >>> output = cumsum(input, 1) [[ 3. 7. 13. 23.] [ 1. 7. 14. 23.] @@ -666,7 +667,7 @@ class AddN(PrimitiveWithInfer): >>> class NetAddN(nn.Cell): >>> def __init__(self): >>> super(NetAddN, self).__init__() - >>> self.addN = AddN() + >>> self.addN = P.AddN() >>> >>> def construct(self, *z): >>> return self.addN(z) @@ -748,7 +749,7 @@ class Sub(_MathBinaryOp): Examples: >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32) >>> input_y = Tensor(np.array([4, 5, 6]), mindspore.int32) - >>> sub = Sub() + >>> sub = P.Sub() >>> sub(input_x, input_y) [-3, -3, -3] """ @@ -775,7 +776,7 @@ class Mul(_MathBinaryOp): Examples: >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32) >>> input_y = Tensor(np.array([4, 5, 6]), mindspore.int32) - >>> mul = Mul() + >>> mul = P.Mul() >>> mul(input_x, input_y) [4, 10, 18] """ @@ -793,7 +794,7 @@ class Square(PrimitiveWithInfer): Examples: >>> input_x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32) - >>> square = Square() + >>> square = P.Square() >>> square(input_x) [1.0, 4.0, 9.0] """ @@ -823,7 +824,7 @@ class Rsqrt(PrimitiveWithInfer): Examples: >>> input_tensor = Tensor([[4, 4], [9, 9]], mindspore.float32) - >>> rsqrt = Rsqrt() + >>> rsqrt = P.Rsqrt() >>> rsqrt(input_tensor) [[0.5, 0.5], [0.333333, 0.333333]] """ @@ -853,7 +854,7 @@ class Sqrt(PrimitiveWithInfer): Examples: >>> input_x = Tensor(np.array([1.0, 4.0, 9.0]), mindspore.float32) - >>> sqrt = Sqrt() + >>> sqrt = P.Sqrt() >>> sqrt(input_x) [1.0, 2.0, 3.0] """ @@ -883,7 +884,7 @@ class Reciprocal(PrimitiveWithInfer): Examples: >>> input_x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32) - >>> reciprocal = Reciprocal() + >>> reciprocal = P.Reciprocal() >>> reciprocal(input_x) [1.0, 0.5, 0.25] """ @@ -916,13 +917,13 @@ class Pow(PrimitiveWithInfer): Examples: >>> input_x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32) >>> input_y = 3.0 - >>> pow = Pow() + >>> pow = P.Pow() >>> pow(input_x, input_y) [1.0, 8.0, 64.0] >>> >>> input_x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32) >>> input_y = Tensor(np.array([2.0, 4.0, 3.0]), mindspore.float32) - >>> pow = Pow() + >>> pow = P.Pow() >>> pow(input_x, input_y) [1.0, 16.0, 64.0] """ @@ -952,7 +953,7 @@ class Exp(PrimitiveWithInfer): Examples: >>> input_x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32) - >>> exp = Exp() + >>> exp = P.Exp() >>> exp(input_x) [ 2.71828183, 7.3890561 , 54.59815003] """ @@ -982,7 +983,7 @@ class Log(PrimitiveWithInfer): Examples: >>> input_x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32) - >>> log = Log() + >>> log = P.Log() >>> log(input_x) [0.0, 0.69314718, 1.38629436] """ @@ -1020,7 +1021,7 @@ class Minimum(_MathBinaryOp): Examples: >>> input_x = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.float32) >>> input_y = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32) - >>> minimum = Minimum() + >>> minimum = P.Minimum() >>> minimum(input_x, input_y) [1.0, 2.0, 3.0] """ @@ -1047,7 +1048,7 @@ class Maximum(_MathBinaryOp): Examples: >>> input_x = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.float32) >>> input_y = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32) - >>> maximum = Maximum() + >>> maximum = P.Maximum() >>> maximum(input_x, input_y) [4.0, 5.0, 6.0] """ @@ -1074,7 +1075,7 @@ class RealDiv(_MathBinaryOp): Examples: >>> input_x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32) >>> input_y = Tensor(np.array([4.0, 5.0, 6.0]), mindspore.float32) - >>> realdiv = RealDiv() + >>> realdiv = P.RealDiv() >>> realdiv(input_x, input_y) [0.25, 0.4, 0.5] """ @@ -1113,9 +1114,8 @@ class Div(_MathBinaryOp): Examples: >>> input_x = Tensor(np.array([-4.0, 5.0, 6.0]), mindspore.float32) >>> input_y = Tensor(np.array([3.0, 2.0, 3.0]), mindspore.float32) - >>> div = Div() + >>> div = P.Div() >>> div(input_x, input_y) - [-2.0, 2.0, 2.0] """ def infer_value(self, x, y): @@ -1147,7 +1147,7 @@ class FloorDiv(_MathBinaryOp): Examples: >>> input_x = Tensor(np.array([2, 4, -1]), mindspore.int32) >>> input_y = Tensor(np.array([3, 3, 3]), mindspore.int32) - >>> floor_div = FloorDiv() + >>> floor_div = P.FloorDiv() >>> floor_div(input_x, input_y) [0, 1, -1] """ @@ -1165,7 +1165,7 @@ class Floor(PrimitiveWithInfer): Examples: >>> input_x = Tensor(np.array([1.1, 2.5, -1.5]), mindspore.float32) - >>> floor = Floor() + >>> floor = P.Floor() >>> floor(input_x) [1.0, 2.0, -2.0] """ @@ -1221,13 +1221,13 @@ class Equal(_LogicBinaryOp): Examples: >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.float32) - >>> equal = Equal() + >>> equal = P.Equal() >>> equal(input_x, 2.0) [False, True, False] >>> >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32) >>> input_y = Tensor(np.array([1, 2, 4]), mindspore.int32) - >>> equal = Equal() + >>> equal = P.Equal() >>> equal(input_x, input_y) [True, True, False] """ @@ -1252,7 +1252,7 @@ class EqualCount(PrimitiveWithInfer): Examples: >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32) >>> input_y = Tensor(np.array([1, 2, 4]), mindspore.int32) - >>> equal_count = EqualCount() + >>> equal_count = P.EqualCount() >>> equal_count(input_x, input_y) [2] """ @@ -1292,13 +1292,13 @@ class NotEqual(_LogicBinaryOp): Examples: >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.float32) - >>> not_equal = NotEqual() + >>> not_equal = P.NotEqual() >>> not_equal(input_x, 2.0) [True, False, True] >>> >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32) >>> input_y = Tensor(np.array([1, 2, 4]), mindspore.int32) - >>> not_equal = NotEqual() + >>> not_equal = P.NotEqual() >>> not_equal(input_x, input_y) [False, False, True] """ @@ -1328,7 +1328,7 @@ class Greater(_LogicBinaryOp): Examples: >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32) >>> input_y = Tensor(np.array([1, 1, 4]), mindspore.int32) - >>> greater = Greater() + >>> greater = P.Greater() >>> greater(input_x, input_y) [False, True, False] """ @@ -1355,7 +1355,7 @@ class GreaterEqual(_LogicBinaryOp): Examples: >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32) >>> input_y = Tensor(np.array([1, 1, 4]), mindspore.int32) - >>> greater_equal = GreaterEqual() + >>> greater_equal = P.GreaterEqual() >>> greater_equal(input_x, input_y) [True, True, False] """ @@ -1382,7 +1382,7 @@ class Less(_LogicBinaryOp): Examples: >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32) >>> input_y = Tensor(np.array([1, 1, 4]), mindspore.int32) - >>> less = Less() + >>> less = P.Less() >>> less(input_x, input_y) [False, False, True] """ @@ -1409,7 +1409,7 @@ class LessEqual(_LogicBinaryOp): Examples: >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32) >>> input_y = Tensor(np.array([1, 1, 4]), mindspore.int32) - >>> less_equal = LessEqual() + >>> less_equal = P.LessEqual() >>> less_equal(input_x, input_y) [True, False, True] """ @@ -1427,7 +1427,7 @@ class LogicalNot(PrimitiveWithInfer): Examples: >>> input_x = Tensor(np.array([True, False, True]), mindspore.bool_) - >>> logical_not = LogicalNot() + >>> logical_not = P.LogicalNot() >>> logical_not(input_x) [False, True, False] """ @@ -1465,7 +1465,7 @@ class LogicalAnd(_LogicBinaryOp): Examples: >>> input_x = Tensor(np.array([True, False, True]), mindspore.bool_) >>> input_y = Tensor(np.array([True, True, False]), mindspore.bool_) - >>> logical_and = LogicalAnd() + >>> logical_and = P.LogicalAnd() >>> logical_and(input_x, input_y) [True, False, False] """ @@ -1494,7 +1494,7 @@ class LogicalOr(_LogicBinaryOp): Examples: >>> input_x = Tensor(np.array([True, False, True]), mindspore.bool_) >>> input_y = Tensor(np.array([True, True, False]), mindspore.bool_) - >>> logical_or = LogicalOr() + >>> logical_or = P.LogicalOr() >>> logical_or(input_x, input_y) [True, True, True] """ @@ -1516,7 +1516,7 @@ class NPUAllocFloatStatus(PrimitiveWithInfer): Tensor, has the shape of `(8,)`. Examples: - >>> alloc_status = NPUAllocFloatStatus() + >>> alloc_status = P.NPUAllocFloatStatus() >>> init = alloc_status() Tensor([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=(8,), dtype=mindspore.float32) """ @@ -1548,8 +1548,8 @@ class NPUGetFloatStatus(PrimitiveWithInfer): Tensor, has the same shape as `input_x`. All the elements in the tensor will be zero. Examples: - >>> alloc_status = NPUAllocFloatStatus() - >>> get_status = NPUGetFloatStatus() + >>> alloc_status = P.NPUAllocFloatStatus() + >>> get_status = P.NPUGetFloatStatus() >>> init = alloc_status() >>> flag = get_status(init) Tensor([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=(8,), dtype=mindspore.float32) @@ -1588,9 +1588,9 @@ class NPUClearFloatStatus(PrimitiveWithInfer): Tensor, has the same shape as `input_x`. All the elements in the tensor will be zero. Examples: - >>> alloc_status = NPUAllocFloatStatus() - >>> get_status = NPUGetFloatStatus() - >>> clear_status = NPUClearFloatStatus() + >>> alloc_status = P.NPUAllocFloatStatus() + >>> get_status = P.NPUGetFloatStatus() + >>> clear_status = P.NPUClearFloatStatus() >>> init = alloc_status() >>> flag = get_status(init) >>> clear = clear_status(init) @@ -1624,7 +1624,7 @@ class Cos(PrimitiveWithInfer): Tensor, has the same shape as `input_x`. Examples: - >>> cos = Cos() + >>> cos = P.Cos() >>> X = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), ms.float32) >>> output = cos(X) """ @@ -1653,8 +1653,8 @@ class ACos(PrimitiveWithInfer): Tensor, has the same shape as `input_x`. Examples: - >>> acos = ACos() - >>> X = Tensor(np.array([0.74, 0.04, 0.30, 0.56]), ms.float32) + >>> acos = P.ACos() + >>> X = Tensor(np.array([0.74, 0.04, 0.30, 0.56]), mindspore.float32) >>> output = acos(X) """ @@ -1682,9 +1682,9 @@ class Sin(PrimitiveWithInfer): Tensor, has the same shape as `input_x`. Examples: - >>> sin = Sin() - >>> X = Tensor(np.array([0.62, 0.28, 0.43, 0.62]), ms.float32) - >>> output = sin(X) + >>> sin = P.Sin() + >>> input_x = Tensor(np.array([0.62, 0.28, 0.43, 0.62]), ms.float32) + >>> output = sin(input_x) """ @prim_attr_register @@ -1734,7 +1734,7 @@ class NMSWithMask(PrimitiveWithInfer): >>> bbox[:, 2] += bbox[:, 0] >>> bbox[:, 3] += bbox[:, 1] >>> inputs = Tensor(bbox) - >>> nms = NMSWithMask(0.5) + >>> nms = P.NMSWithMask(0.5) >>> output_boxes, indices, mask = nms(inputs) """ @@ -1769,7 +1769,7 @@ class Abs(PrimitiveWithInfer): Examples: >>> input_x = Tensor(np.array([-1.0, 1.0, 0.0]), mindspore.float32) - >>> abs = Abs() + >>> abs = P.Abs() >>> abs(input_x) [1.0, 1.0, 0.0] """ @@ -1812,7 +1812,7 @@ class Sign(PrimitiveWithInfer): Examples: >>> input_x = Tensor(np.array([[2.0, 0.0, -1.0]]), mindspore.float32) - >>> sign = Sign() + >>> sign = P.Sign() >>> output = sign(input_x) [[1.0, 0.0, -1.0]] """ @@ -1842,7 +1842,7 @@ class Round(PrimitiveWithInfer): Examples: >>> input_x = Tensor(np.array([0.8, 1.5, 2.3, 2.5, -4.5]), mindspore.float32) - >>> round = Round() + >>> round = P.Round() >>> round(input_x) [1.0, 2.0, 2.0, 2.0, -4.0] """ @@ -1877,7 +1877,7 @@ class Atan2(_MathBinaryOp): Examples: >>> input_x = Tensor(np.array([[0, 1]]), mstype.float32) >>> input_y = Tensor(np.array([[1, 1]]), mstype.float32) - >>> atan2 = Atan2() + >>> atan2 = P.Atan2() >>> atan2(input_x, input_y) [[0. 0.7853982]] """ diff --git a/mindspore/ops/operations/nn_ops.py b/mindspore/ops/operations/nn_ops.py index afa4c7dfe3..283d35725f 100644 --- a/mindspore/ops/operations/nn_ops.py +++ b/mindspore/ops/operations/nn_ops.py @@ -1090,9 +1090,10 @@ class TopK(PrimitiveWithInfer): - **indices** (Tensor) - The indices of values within the last dimension of input. Examples: - >>> topk = TopK(sorted=True) - >>> x = Tensor(np.array([1, 2, 3, 4, 5]).astype(np.float16)) - >>> values, indices = topk(x) + >>> topk = P.TopK(sorted=True) + >>> input_x = Tensor([1, 2, 3, 4, 5], mindspore.float16)) + >>> k = 3 + >>> values, indices = topk(input_x, k) >>> assert values == Tensor(np.array([5, 4, 3])) >>> assert indices == Tensor(np.array([4, 3, 2])) """ From fdb47860ccfe385dbb4f756b4a39ac8075bea8fb Mon Sep 17 00:00:00 2001 From: leonwanghui Date: Thu, 2 Apr 2020 16:29:14 +0800 Subject: [PATCH 070/367] Fix some typo errors in session and device module Signed-off-by: leonwanghui --- .../ccsrc/device/cpu/cpu_kernel_runtime.cc | 10 +- .../ccsrc/device/cpu/cpu_resource_manager.cc | 4 +- mindspore/ccsrc/device/cpu/cpu_session.cc | 14 +- mindspore/ccsrc/device/kernel_adjust.cc | 12 +- mindspore/ccsrc/device/kernel_runtime.cc | 12 +- .../ccsrc/device/kernel_runtime_manager.cc | 2 +- .../ccsrc/session/anf_runtime_algorithm.cc | 36 ++-- .../ccsrc/session/anf_runtime_algorithm.h | 4 +- mindspore/ccsrc/session/ascend_session.cc | 177 +++++++++--------- mindspore/ccsrc/session/ascend_session.h | 13 +- mindspore/ccsrc/session/gpu_session.cc | 4 +- mindspore/ccsrc/session/kernel_graph.cc | 42 ++--- mindspore/ccsrc/session/kernel_graph.h | 4 +- mindspore/ccsrc/session/session_basic.cc | 26 +-- mindspore/ccsrc/session/session_basic.h | 4 +- mindspore/ccsrc/utils/utils.h | 2 +- 16 files changed, 184 insertions(+), 182 deletions(-) diff --git a/mindspore/ccsrc/device/cpu/cpu_kernel_runtime.cc b/mindspore/ccsrc/device/cpu/cpu_kernel_runtime.cc index 5bf9689f0b..1dcddfa994 100644 --- a/mindspore/ccsrc/device/cpu/cpu_kernel_runtime.cc +++ b/mindspore/ccsrc/device/cpu/cpu_kernel_runtime.cc @@ -66,7 +66,7 @@ void CPUKernelRuntime::AssignValueNodeAddress(session::KernelGraph *kernel_graph address->ptr_ = resource_manager_.MemMalloc(tensor_size); if (!address->SyncHostToDevice(data_shape, LongToSize(tensor->data().nbytes()), tensor->data_type(), tensor->data_c(false))) { - MS_LOG(EXCEPTION) << "value node sync host to device failed!"; + MS_LOG(EXCEPTION) << "Value node sync host to device failed!"; } } address->ref_count_ = INIT_NODE_REF; @@ -141,7 +141,7 @@ BaseRef CPUKernelRuntime::CreatTensorForOutput(const AnfNodePtr &input_node, siz MS_EXCEPTION_IF_NULL(node); size_t output_size = AnfAlgo::GetOutputTensorNum(node); if (index >= output_size) { - MS_LOG(EXCEPTION) << "invalid input index " << index; + MS_LOG(EXCEPTION) << "Invalid input index " << index; } auto address = AnfAlgo::GetMutableOutputAddr(node, index); MS_EXCEPTION_IF_NULL(address); @@ -157,7 +157,7 @@ BaseRef CPUKernelRuntime::CreatTensorForOutput(const AnfNodePtr &input_node, siz type_id = kNumberTypeFloat32; } if (type_id != kNumberTypeInt32 && type_id != kNumberTypeFloat32) { - MS_LOG(EXCEPTION) << "check output type failed."; + MS_LOG(EXCEPTION) << "Check output type failed."; } tensor::TensorPtr tensor = std::make_shared(type_id, temp_shape); MS_EXCEPTION_IF_NULL(tensor); @@ -181,7 +181,7 @@ void CPUKernelRuntime::BindInputOutput(const session::KernelGraph *kernel_graph, // bind input ptr auto &input_nodes = kernel_graph->inputs(); if (input_nodes.size() != inputs.size()) { - MS_LOG(EXCEPTION) << "input size not equal to input node size!"; + MS_LOG(EXCEPTION) << "Input size not equal to input node size!"; } std::unordered_map input_map; @@ -203,7 +203,7 @@ void CPUKernelRuntime::BindInputOutput(const session::KernelGraph *kernel_graph, address->ptr_ = resource_manager_.MemMalloc(tensor_size); if (!address->SyncHostToDevice(data_shape, LongToSize(tensor->data().nbytes()), tensor->data_type(), tensor->data_c(false))) { - MS_LOG(EXCEPTION) << "parameter node sync host to device failed!"; + MS_LOG(EXCEPTION) << "Parameter node sync host to device failed!"; } tensor->set_dirty(true); } diff --git a/mindspore/ccsrc/device/cpu/cpu_resource_manager.cc b/mindspore/ccsrc/device/cpu/cpu_resource_manager.cc index 5b2b7a0750..97df7d4487 100644 --- a/mindspore/ccsrc/device/cpu/cpu_resource_manager.cc +++ b/mindspore/ccsrc/device/cpu/cpu_resource_manager.cc @@ -44,7 +44,7 @@ void CPUResourceManager::MemPlan(const session::KernelGraph *graph) { mem_size_ = graph_mem_size; dynamic_malloc_ = false; } else { - MS_LOG(INFO) << "switch to dynamic malloc"; + MS_LOG(INFO) << "Switch to dynamic malloc"; dynamic_malloc_ = true; } } @@ -63,7 +63,7 @@ void *CPUResourceManager::MemMalloc(size_t mem_size) { dynamic_mem_[ptr] = mem_size; return ptr; } else { - MS_LOG(EXCEPTION) << "malloc memory failed: size " << mem_size; + MS_LOG(EXCEPTION) << "Malloc memory failed: size " << mem_size; } } diff --git a/mindspore/ccsrc/device/cpu/cpu_session.cc b/mindspore/ccsrc/device/cpu/cpu_session.cc index 59d0318874..1613f9f91e 100644 --- a/mindspore/ccsrc/device/cpu/cpu_session.cc +++ b/mindspore/ccsrc/device/cpu/cpu_session.cc @@ -31,12 +31,12 @@ GraphId CPUSession::CompileGraph(const AnfNodePtrList &lst, const AnfNodePtrList auto graph_id = graph_sum_; auto graph = ConstructKernelGraph(lst, outputs); MS_EXCEPTION_IF_NULL(graph); - MS_LOG(INFO) << "set kernel info"; + MS_LOG(INFO) << "Set kernel info"; SetKernelInfo(graph.get()); predictmodel::StepConvertGraph(graph); - MS_LOG(INFO) << "build kernel"; + MS_LOG(INFO) << "Build kernel"; BuildKernel(graph.get()); - MS_LOG(INFO) << "assign kernel address"; + MS_LOG(INFO) << "Assign kernel address"; runtime_.AssignKernelAddress(graph.get()); return graph_id; } @@ -44,18 +44,18 @@ GraphId CPUSession::CompileGraph(const AnfNodePtrList &lst, const AnfNodePtrList void CPUSession::RunGraph(const GraphId &graph_id, const std::vector &inputs, VectorRef *outputs) { auto &kernel_graph = graphs_[graph_id]; MS_EXCEPTION_IF_NULL(kernel_graph); - MS_LOG(INFO) << "bind input output address"; + MS_LOG(INFO) << "Bind input output address"; runtime_.BindInputOutput(kernel_graph.get(), inputs, outputs); - MS_LOG(INFO) << "run graph start"; + MS_LOG(INFO) << "Run graph start"; predictmodel::StepConvertWeight(inputs); auto execution_order = kernel_graph->execution_order(); Reorder(&execution_order); kernel_graph->set_execution_order(execution_order); bool ret = runtime_.Run(kernel_graph.get()); if (!ret) { - MS_LOG(EXCEPTION) << "run graph failed"; + MS_LOG(EXCEPTION) << "Run graph failed"; } - MS_LOG(INFO) << "run graph end"; + MS_LOG(INFO) << "Run graph end"; } void CPUSession::SetKernelInfo(const KernelGraph *kernel_graph) { diff --git a/mindspore/ccsrc/device/kernel_adjust.cc b/mindspore/ccsrc/device/kernel_adjust.cc index a4d316d601..089813b098 100644 --- a/mindspore/ccsrc/device/kernel_adjust.cc +++ b/mindspore/ccsrc/device/kernel_adjust.cc @@ -49,7 +49,7 @@ void KernelAdjust::Reorder(const std::shared_ptr &kernel_g std::vector momentum_list; std::vector other_list; for (const auto &cnode : origin_cnode_list) { - if (kOptOpeatorSet.find(AnfAlgo::GetCNodeName(cnode)) != kOptOpeatorSet.end()) { + if (kOptOperatorSet.find(AnfAlgo::GetCNodeName(cnode)) != kOptOperatorSet.end()) { momentum_list.emplace_back(cnode); } else { other_list.emplace_back(cnode); @@ -118,7 +118,7 @@ void KernelAdjust::CreateSwitchOpParameters(const std::shared_ptrToAbstract(); if (paremeter_abstract_ptr == nullptr) { - MS_LOG(EXCEPTION) << "create abstract brfore insert switch op failed!"; + MS_LOG(EXCEPTION) << "create abstract before insert switch op failed!"; } ParameterPtr loop_count = std::make_shared(kernel_graph_ptr); @@ -371,7 +371,7 @@ bool KernelAdjust::StepLoadCtrlInputs(const std::shared_ptr &c auto tensor = inputs[i]; size_t deal_index = input_nodes.size() - input_ctrl_size + i; if (deal_index >= input_nodes.size()) { - MS_LOG(EXCEPTION) << "deak_index[" << deal_index << "] outof range"; + MS_LOG(EXCEPTION) << "deal_index[" << deal_index << "] out of range"; } auto input_node = input_nodes[deal_index]; bool need_sync = false; @@ -439,7 +439,7 @@ void KernelAdjust::LoadSwitchInputs(std::vector *inputs) { void KernelAdjust::Profiling(const std::shared_ptr &kernel_graph_ptr) { if (!ascend::ProfilingManager::GetInstance().IsProfiling()) { - MS_LOG(INFO) << "no need to profiling"; + MS_LOG(INFO) << "No need to profiling"; return; } ProfilingTraceInfo profiling_trace_info; @@ -452,10 +452,10 @@ void KernelAdjust::Profiling(const std::shared_ptr &kernel void KernelAdjust::InsertProfilingKernel(const std::shared_ptr &kernel_graph_ptr, const ProfilingTraceInfo &profiling_trace_info) { - MS_LOG(INFO) << "[profiling] insert profiling kernel start"; + MS_LOG(INFO) << "[profiling] Insert profiling kernel start"; MS_EXCEPTION_IF_NULL(kernel_graph_ptr); if (!profiling_trace_info.IsValid()) { - MS_LOG(WARNING) << "profiling trace point not found"; + MS_LOG(WARNING) << "Profiling trace point not found"; return; } std::vector new_cnode_list; diff --git a/mindspore/ccsrc/device/kernel_runtime.cc b/mindspore/ccsrc/device/kernel_runtime.cc index 878fe4a7f8..0a9be35fb5 100644 --- a/mindspore/ccsrc/device/kernel_runtime.cc +++ b/mindspore/ccsrc/device/kernel_runtime.cc @@ -241,7 +241,7 @@ void KernelRuntime::AssignStaticMemoryInput(const session::KernelGraph *graph) { auto output_size = AnfAlgo::GetOutputTensorNum(item); for (size_t index = 0; index < output_size; index++) { TypeId output_type_id = AnfAlgo::GetOutputDeviceDataType(item, index); - // if graph output is a weight and doesn't link to any cnode,it's data type will be unkonwn + // if graph output is a weight and doesn't link to any cnode, it's data type will be unknown if (output_type_id == kTypeUnknown) { MS_LOG(WARNING) << "It is not suggested to use a lonely weight parameter as the output of graph"; output_type_id = AnfAlgo::GetOutputInferDataType(item, index); @@ -372,7 +372,7 @@ void KernelRuntime::AssignNodeOutputMem(int flag, const AnfNodePtr &node, int in continue; } if (AnfAlgo::OutputAddrExist(node, i)) { - MS_LOG(INFO) << "already malloc index:" << i; + MS_LOG(INFO) << "Already malloc index:" << i; continue; } auto ptr = CalDeviceMem(node, output_sizes[i], flag, i); @@ -392,7 +392,7 @@ void KernelRuntime::AssignValueNodeTensor(const ValueNodePtr &value_node, const MS_EXCEPTION_IF_NULL(node_value); auto tensor = node_value->cast(); if (tensor == nullptr) { - MS_LOG(WARNING) << "tensor is null"; + MS_LOG(WARNING) << "Tensor is null"; return; } size_t tensor_size = tensor->data().nbytes(); @@ -595,7 +595,7 @@ void KernelRuntime::GenLaunchArgs(const mindspore::kernel::KernelMod &kernel_mod void KernelRuntime::GenAddrCleanLaunchArgs(const CNodePtr &cnode, AddressPtrList *kernel_inputs) { if (cnode->inputs().size() != 2) { - MS_LOG(EXCEPTION) << "atomic Addr clean Node Input nodes not equal 2."; + MS_LOG(EXCEPTION) << "Atomic Addr clean Node Input nodes not equal 2."; } auto pre_node = cnode->inputs()[1]; // set clean output address @@ -721,11 +721,11 @@ uint8_t *KernelRuntime::MallocDynamicMem(size_t size, bool communication_mem) { bool KernelRuntime::LaunchKernel(const session::KernelGraph *graph) { MS_EXCEPTION_IF_NULL(graph); if (!LaunchKernelMod(*graph)) { - MS_LOG(ERROR) << "LaunchKernelMod failed."; + MS_LOG(ERROR) << "LaunchKernelMod failed!"; return false; } if (!SyncStream()) { - MS_LOG(ERROR) << "SyncStream failed."; + MS_LOG(ERROR) << "SyncStream failed!"; return false; } return true; diff --git a/mindspore/ccsrc/device/kernel_runtime_manager.cc b/mindspore/ccsrc/device/kernel_runtime_manager.cc index 6303bc88a8..5a70d75cf9 100644 --- a/mindspore/ccsrc/device/kernel_runtime_manager.cc +++ b/mindspore/ccsrc/device/kernel_runtime_manager.cc @@ -67,7 +67,7 @@ KernelRuntime *KernelRuntimeManager::GetKernelRuntime(const std::string &device_ MS_EXCEPTION_IF_NULL(kernel_runtime); runtime_map_[runtime_key] = kernel_runtime; } else { - MS_LOG(EXCEPTION) << "no kernel runtime creator for " << device_name << " with device id " << device_id; + MS_LOG(EXCEPTION) << "No kernel runtime creator for " << device_name << " with device id " << device_id; } return kernel_runtime.get(); diff --git a/mindspore/ccsrc/session/anf_runtime_algorithm.cc b/mindspore/ccsrc/session/anf_runtime_algorithm.cc index c0dca4522a..cc23dbbdd2 100644 --- a/mindspore/ccsrc/session/anf_runtime_algorithm.cc +++ b/mindspore/ccsrc/session/anf_runtime_algorithm.cc @@ -65,7 +65,7 @@ KernelWithIndex AnfRuntimeAlgorithm::VisitKernel(const AnfNodePtr &anf_node, siz return VisitKernel(node, 0); } else if (IsPrimitive(input0, prim::kPrimTupleGetItem)) { if (cnode->inputs().size() != kTupleGetItemInputSize) { - MS_LOG(EXCEPTION) << "the node tuple_get_item must have 2 inputs!"; + MS_LOG(EXCEPTION) << "The node tuple_get_item must have 2 inputs!"; } auto input2 = cnode->input(kInputNodeOutputIndexInTupleGetItem); MS_EXCEPTION_IF_NULL(input2); @@ -102,7 +102,7 @@ KernelWithIndex AnfRuntimeAlgorithm::VisitKernelWithReturnType(const AnfNodePtr MS_EXCEPTION_IF_NULL(input0); if (IsPrimitive(input0, prim::kPrimTupleGetItem)) { if (cnode->inputs().size() != kTupleGetItemInputSize) { - MS_LOG(EXCEPTION) << "the node tuple_get_item must have 2 inputs!"; + MS_LOG(EXCEPTION) << "The node tuple_get_item must have 2 inputs!"; } auto input2 = cnode->input(kInputNodeOutputIndexInTupleGetItem); MS_EXCEPTION_IF_NULL(input2); @@ -188,7 +188,7 @@ std::string AnfRuntimeAlgorithm::GetNodeDebugString(const AnfNodePtr &node) { void AnfRuntimeAlgorithm::SetNodeAttr(const std::string &key, const ValuePtr &value, const AnfNodePtr &node) { MS_EXCEPTION_IF_NULL(node); if (!node->isa()) { - MS_LOG(EXCEPTION) << "only cnode has attr,but this anf is " << node->DebugString(); + MS_LOG(EXCEPTION) << "Only cnode has attr, but this anf is " << node->DebugString(); } auto primitive = AnfAlgo::GetCNodePrimitive(node); MS_EXCEPTION_IF_NULL(primitive); @@ -204,7 +204,7 @@ void AnfRuntimeAlgorithm::CopyNodeAttr(const std::string &old_key, const std::st MS_EXCEPTION_IF_NULL(from); MS_EXCEPTION_IF_NULL(to); if (!from->isa() || !to->isa()) { - MS_LOG(EXCEPTION) << "only cnode has attr,but this from_anf is " << from->DebugString() << " ,to_node is " + MS_LOG(EXCEPTION) << "Only cnode has attr, but this from_anf is " << from->DebugString() << " ,to_node is " << to->DebugString(); } auto from_primitive = AnfAlgo::GetCNodePrimitive(from); @@ -218,7 +218,7 @@ void AnfRuntimeAlgorithm::CopyNodeAttrs(const AnfNodePtr &from, const AnfNodePtr MS_EXCEPTION_IF_NULL(from); MS_EXCEPTION_IF_NULL(to); if (!from->isa() || !to->isa()) { - MS_LOG(EXCEPTION) << "only cnode has attr,but this from_anf is " << from->DebugString() << ",to_node is " + MS_LOG(EXCEPTION) << "Only cnode has attr, but this from_anf is " << from->DebugString() << ",to_node is " << from->DebugString(); } auto from_primitive = AnfAlgo::GetCNodePrimitive(from); @@ -231,7 +231,7 @@ void AnfRuntimeAlgorithm::CopyNodeAttrs(const AnfNodePtr &from, const AnfNodePtr void AnfRuntimeAlgorithm::EraseNodeAttr(const std::string &key, const AnfNodePtr node) { MS_EXCEPTION_IF_NULL(node); if (!node->isa()) { - MS_LOG(EXCEPTION) << "only cnode has attr,but this anf is " << node->DebugString(); + MS_LOG(EXCEPTION) << "Only cnode has attr, but this anf is " << node->DebugString(); } auto primitive = AnfAlgo::GetCNodePrimitive(node); MS_EXCEPTION_IF_NULL(primitive); @@ -241,7 +241,7 @@ void AnfRuntimeAlgorithm::EraseNodeAttr(const std::string &key, const AnfNodePtr bool AnfRuntimeAlgorithm::HasNodeAttr(const std::string &key, const AnfNodePtr &node) { MS_EXCEPTION_IF_NULL(node); if (!node->isa()) { - MS_LOG(WARNING) << "only cnode has attr,but this anf is " << node->DebugString(); + MS_LOG(WARNING) << "Only cnode has attr, but this anf is " << node->DebugString(); return false; } auto primitive = AnfAlgo::GetCNodePrimitive(node); @@ -252,7 +252,7 @@ bool AnfRuntimeAlgorithm::HasNodeAttr(const std::string &key, const AnfNodePtr & size_t AnfRuntimeAlgorithm::GetInputTensorNum(const AnfNodePtr &node) { MS_EXCEPTION_IF_NULL(node); if (!node->isa()) { - MS_LOG(EXCEPTION) << "only cnode has real input,but this anf is " << node->DebugString(); + MS_LOG(EXCEPTION) << "Only cnode has real input, but this anf is " << node->DebugString(); } auto cnode = node->cast(); MS_EXCEPTION_IF_NULL(cnode); @@ -404,7 +404,7 @@ std::vector AnfRuntimeAlgorithm::GetInputReshapeType(const AnfNode MS_EXCEPTION_IF_NULL(build_info); std::vector result; if (!build_info->GetInputReshapeType(input_idx, &result)) { - MS_LOG(EXCEPTION) << "filed to ge the node's[ " << node->DebugString() << "] reshape type !"; + MS_LOG(EXCEPTION) << "Failed to get the node's[ " << node->DebugString() << "] reshape type !"; } return result; } @@ -417,7 +417,7 @@ std::vector AnfRuntimeAlgorithm::GetOutputReshapeType(const AnfNod MS_EXCEPTION_IF_NULL(build_info); std::vector result; if (!build_info->GetOutputReshapeType(output_idx, &result)) { - MS_LOG(EXCEPTION) << "filed to ge the node's[ " << node->DebugString() << "] reshape type !"; + MS_LOG(EXCEPTION) << "Failed to get the node's[ " << node->DebugString() << "] reshape type !"; } return result; } @@ -593,7 +593,7 @@ void AnfRuntimeAlgorithm::SetOutputAddr(const DeviceAddressPtr &addr, size_t out auto kernel_info = node->kernel_info(); MS_EXCEPTION_IF_NULL(kernel_info); if (!kernel_info->SetOutputAddr(addr, output_idx)) { - MS_LOG(EXCEPTION) << "node " << node->DebugString() << "set adr" << output_idx << " fail"; + MS_LOG(EXCEPTION) << "Node " << node->DebugString() << "set adr" << output_idx << " fail"; } } @@ -603,7 +603,7 @@ void AnfRuntimeAlgorithm::SetWorkspaceAddr(const DeviceAddressPtr &addr, size_t auto kernel_info = node->kernel_info(); MS_EXCEPTION_IF_NULL(kernel_info); if (!kernel_info->SetWorkspaceAddr(addr, output_idx)) { - MS_LOG(EXCEPTION) << "node " << node->DebugString() << "set adr" << output_idx << " fail"; + MS_LOG(EXCEPTION) << "Node " << node->DebugString() << "set adr" << output_idx << " fail"; } } @@ -614,7 +614,7 @@ DeviceAddress *AnfRuntimeAlgorithm::GetWorkspaceAddr(const AnfNodePtr &node, siz MS_EXCEPTION_IF_NULL(kernel_info); auto addr = kernel_info->GetWorkspaceAddr(output_idx); if (addr == nullptr) { - MS_LOG(EXCEPTION) << "output_idx " << output_idx << " of node " << node->DebugString() + MS_LOG(EXCEPTION) << "Output_idx " << output_idx << " of node " << node->DebugString() << "] workspace addr is not exist"; } return addr; @@ -625,7 +625,7 @@ void AnfRuntimeAlgorithm::SetOutputInferTypeAndShape(const std::vector & const std::vector> &shapes, AnfNode *node) { MS_EXCEPTION_IF_NULL(node); if (types.size() != shapes.size()) { - MS_LOG(EXCEPTION) << "types size " << types.size() << "should be same with shapes size " << shapes.size(); + MS_LOG(EXCEPTION) << "Types size " << types.size() << "should be same with shapes size " << shapes.size(); } if (shapes.empty()) { MS_LOG(EXCEPTION) << "Illegal empty output_types_shapes"; @@ -636,7 +636,7 @@ void AnfRuntimeAlgorithm::SetOutputInferTypeAndShape(const std::vector & auto abstract = std::make_shared(TypeIdToType(types[0]), shape_int); node->set_abstract(abstract); } else { - // mutiple output handle + // multiple output handle std::vector abstract_list; for (size_t i = 0; i < types.size(); ++i) { std::vector shape_int; @@ -647,12 +647,12 @@ void AnfRuntimeAlgorithm::SetOutputInferTypeAndShape(const std::vector & node->set_abstract(abstract_tuple); } } -// copy a abstract of a node to another node +// copy an abstract of a node to another node void AnfRuntimeAlgorithm::CopyAbstract(const AnfNodePtr &from_node, AnfNode *to_node) { to_node->set_abstract(from_node->abstract()); } -// get KernelBuildType of node ,such as ATT,RT,FWK and so on +// get KernelBuildType of node, such as ATT,RT,FWK and so on KernelType AnfRuntimeAlgorithm::GetKernelType(const AnfNodePtr &node) { MS_EXCEPTION_IF_NULL(node); auto kernel_info = node->kernel_info(); @@ -846,7 +846,7 @@ size_t AnfRuntimeAlgorithm::GetRealInputIndex(const mindspore::AnfNodePtr &anf_n auto find = spec_node_list.find(node_name); if (find != spec_node_list.end()) { ret = find->second[cur_index]; - MS_LOG(INFO) << "real input index change to" << ret << ", node name:" << node_name; + MS_LOG(INFO) << "Real input index change to" << ret << ", node name:" << node_name; } } return ret; diff --git a/mindspore/ccsrc/session/anf_runtime_algorithm.h b/mindspore/ccsrc/session/anf_runtime_algorithm.h index 60d373d5ad..2de68f0098 100644 --- a/mindspore/ccsrc/session/anf_runtime_algorithm.h +++ b/mindspore/ccsrc/session/anf_runtime_algorithm.h @@ -61,7 +61,7 @@ class AnfRuntimeAlgorithm { MS_EXCEPTION_IF_NULL(node); if (!node->isa()) { std::string node_debug_log = node->DebugString(); - MS_LOG(EXCEPTION) << "only cnode has attr,but this anf is " << node_debug_log.c_str(); + MS_LOG(EXCEPTION) << "Only cnode has attr, but this anf is " << node_debug_log.c_str(); } auto primitive = GetCNodePrimitive(node); MS_EXCEPTION_IF_NULL(primitive); @@ -105,7 +105,7 @@ class AnfRuntimeAlgorithm { static TypeId GetOutputInferDataType(const AnfNodePtr &node, size_t output_idx); // get output original data type from prev node,input_index is the input index of current node related to prev node static TypeId GetPrevNodeOutputInferDataType(const AnfNodePtr &node, size_t input_idx); - // get output select data typpe of anf node + // get output select data type of anf node static TypeId GetOutputDeviceDataType(const AnfNodePtr &node, size_t output_idx); // get input select data type of anf node static TypeId GetInputDeviceDataType(const AnfNodePtr &node, size_t input_idx); diff --git a/mindspore/ccsrc/session/ascend_session.cc b/mindspore/ccsrc/session/ascend_session.cc index 1a29450313..e0027e8fe5 100644 --- a/mindspore/ccsrc/session/ascend_session.cc +++ b/mindspore/ccsrc/session/ascend_session.cc @@ -55,7 +55,7 @@ void DumpGraphExeOrder(const std::vector &execution_order) { } void DumpGraphInputArgs(const VectorRef &args) { - MS_LOG(INFO) << "args size[%lu]" << args.size(); + MS_LOG(INFO) << "Args size[%lu]" << args.size(); for (size_t i = 0; i < args.size(); i++) { if (utils::isa(args[i])) { auto anf = utils::cast(args[i]); @@ -66,7 +66,7 @@ void DumpGraphInputArgs(const VectorRef &args) { MS_EXCEPTION_IF_NULL(value); MS_LOG(INFO) << "Tensor arg" << i << " = " << value->ToString(); } else { - MS_LOG(INFO) << "Unknonwn arg" << i << " = " << args[i].ToString(); + MS_LOG(INFO) << "Unknown arg" << i << " = " << args[i].ToString(); } } } @@ -95,7 +95,7 @@ GraphId GetDistinctionLabel(const KernelGraphPtr &graph) { GraphId AscendSession::CompileGraph(const AnfNodePtrList &lst, const AnfNodePtrList &outputs) { MS_LOG(INFO) << "start"; auto graph_id = graph_sum_; - // construct graph,if construct successs,graph_sum_ + 1 + // construct graph, if successfully, graph_sum_ + 1 auto graph = ConstructKernelGraph(lst, outputs); MS_EXCEPTION_IF_NULL(graph); opt::AscendBackendIRFusionOptimization(graph); @@ -127,7 +127,7 @@ void AscendSession::BuildGraph(GraphId graph_id) { // merge child graph MergeGraphExecOrder(); } else { - // set the distinciton label of single graph + // set the distinction label of single graph SetStreamDistinctionLabel(GetGraph(graph_id), graph_id, false); } // adjust execution order because merge child graph and other special operations @@ -143,7 +143,7 @@ void AscendSession::BuildGraph(GraphId graph_id) { if (ms_context->precompile_only()) { MS_LOG(INFO) << "Precompile only, stop in build kernel step"; } else { - // alloc memeory,include static memory and dynamic memory + // alloc memory, including static memory and dynamic memory MemoryAlloc(graph.get()); // generate task info for task sink mode GenerateTaskInfo(graph); @@ -158,9 +158,9 @@ void AscendSession::RunGraph(const GraphId &graph_id, const std::vectorexecutable()) { - MS_LOG(INFO) << "no child graph but has anf output"; + MS_LOG(INFO) << "No child graph has anf output"; UpdateOutputs(kernel_graph, outputs, inputs); return; } @@ -183,16 +183,16 @@ void AscendSession::RunGraph(const GraphId &graph_id, const std::vector &kernel_graph) const { - MS_LOG(INFO) << "start !"; + MS_LOG(INFO) << "Start"; // data layout optimization opt::AscendDataLayout(kernel_graph); // mixed precision optimization opt::AscendMixPrecision(kernel_graph); - MS_LOG(INFO) << "Finish!"; + MS_LOG(INFO) << "Finish"; } void AscendSession::RunOpExecTask(const std::shared_ptr &kernel_graph) const { - MS_LOG(INFO) << "start !"; + MS_LOG(INFO) << "Start!"; auto runtime_instance = device::KernelRuntimeManager::Instance().GetKernelRuntime(kAscendDevice, device_id_); MS_EXCEPTION_IF_NULL(runtime_instance); bool ret_ok = runtime_instance->LaunchKernel(kernel_graph.get()); @@ -251,26 +251,26 @@ py::tuple AscendSession::RunOp(const OpRunInfo &op_run_info, const GraphInfo &gr // compile graph steps void AscendSession::SelectKernel(const KernelGraph &kernel_graph) const { - MS_LOG(INFO) << "start !"; + MS_LOG(INFO) << "Start!"; for (const auto &cnode : kernel_graph.execution_order()) { device::ascend::SelectKernelInfo(cnode); - MS_LOG(INFO) << "select ApplyKernel: " << cnode->DebugString(); + MS_LOG(INFO) << "Select ApplyKernel: " << cnode->DebugString(); } MS_LOG(INFO) << "Finish!"; } void AscendSession::InitRuntimeResource() { - MS_LOG(INFO) << "start !"; + MS_LOG(INFO) << "Start!"; auto runtime_instance = device::KernelRuntimeManager::Instance().GetKernelRuntime(kAscendDevice, device_id_); MS_EXCEPTION_IF_NULL(runtime_instance); if (!runtime_instance->Init()) { - MS_LOG(EXCEPTION) << "kernel runtime init error."; + MS_LOG(EXCEPTION) << "Kernel runtime init error."; } MS_LOG(INFO) << "Finish!"; } void AscendSession::HardwareOptimize(const std::shared_ptr &kernel_graph) const { - MS_LOG(INFO) << "HardwareOptimize start !"; + MS_LOG(INFO) << "HardwareOptimize start!"; opt::AscendBackendOptimization(kernel_graph); MS_EXCEPTION_IF_NULL(kernel_graph); kernel_graph->SetExecOrderByDefault(); @@ -278,7 +278,7 @@ void AscendSession::HardwareOptimize(const std::shared_ptr &kernel_ } void AscendSession::AdjustKernel(const std::shared_ptr &kernel_graph) const { - MS_LOG(INFO) << "start !"; + MS_LOG(INFO) << "Start!"; device::KernelAdjust::GetInstance().Reorder(kernel_graph); opt::HideNopNode(kernel_graph.get()); // Insert CLearZero op @@ -301,7 +301,7 @@ void AscendSession::AdjustKernel(const std::shared_ptr &kernel_grap } void AscendSession::RunOpAdjustKernel(const std::shared_ptr &kernel_graph) const { - MS_LOG(INFO) << "start !"; + MS_LOG(INFO) << "Start!"; opt::HideNopNode(kernel_graph.get()); // Insert CLearZero op // prepare for next step from json get atomic info @@ -311,18 +311,18 @@ void AscendSession::RunOpAdjustKernel(const std::shared_ptr &kernel } void AscendSession::AssignStream(const std::shared_ptr &kernel_graph) const { - MS_LOG(INFO) << "start !"; + MS_LOG(INFO) << "Start!"; device::ascend::AscendStreamAssign::GetInstance().AssignStreamNew(kernel_graph); MS_LOG(INFO) << "Finish!"; } void AscendSession::BuildKernel(const std::shared_ptr &kernel_graph) const { - MS_LOG(INFO) << "start !"; + MS_LOG(INFO) << "Start!"; struct timeval start_time, end_time; (void)gettimeofday(&start_time, nullptr); auto ret = device::ascend::KernelBuild(kernel_graph.get()); if (!ret) { - MS_LOG(EXCEPTION) << "kernel build error."; + MS_LOG(EXCEPTION) << "Kernel build error."; } (void)gettimeofday(&end_time, nullptr); const uint64_t kUSecondInSecond = 1000000; @@ -333,7 +333,7 @@ void AscendSession::BuildKernel(const std::shared_ptr &kernel_graph } void AscendSession::MemoryAlloc(KernelGraph *kernel_graph) const { - MS_LOG(INFO) << "start !"; + MS_LOG(INFO) << "Start!"; MS_EXCEPTION_IF_NULL(kernel_graph); opt::RemoveNopNode(kernel_graph); auto runtime_instance = device::KernelRuntimeManager::Instance().GetKernelRuntime(kAscendDevice, device_id_); @@ -344,7 +344,7 @@ void AscendSession::MemoryAlloc(KernelGraph *kernel_graph) const { void AscendSession::RunOpMemoryAlloc(const std::vector &input_tensors, KernelGraph *kernel_graph) const { - MS_LOG(INFO) << "start memory alloc!"; + MS_LOG(INFO) << "Start memory alloc!"; MS_EXCEPTION_IF_NULL(kernel_graph); opt::RemoveNopNode(kernel_graph); auto runtime_instance = device::KernelRuntimeManager::Instance().GetKernelRuntime(kAscendDevice, device_id_); @@ -354,30 +354,30 @@ void AscendSession::RunOpMemoryAlloc(const std::vector &input } void AscendSession::GenerateTaskInfo(const std::shared_ptr &kernel_graph) const { - MS_LOG(INFO) << "start !"; + MS_LOG(INFO) << "Start!"; (void)device::KernelAdjust::GetInstance().StepLoadCtrlInputs(context_, kernel_graph); auto runtime_instance = device::KernelRuntimeManager::Instance().GetKernelRuntime(kAscendDevice, device_id_); MS_EXCEPTION_IF_NULL(runtime_instance); bool ret_ok = runtime_instance->GenTask(kernel_graph.get()); if (!ret_ok) { - MS_LOG(EXCEPTION) << "generate task error!"; + MS_LOG(EXCEPTION) << "Generate task error!"; } MS_LOG(INFO) << "Finish!"; } void AscendSession::LoadTask(const std::shared_ptr &kernel_graph) const { - MS_LOG(INFO) << "start !"; + MS_LOG(INFO) << "Start!"; auto runtime_instance = device::KernelRuntimeManager::Instance().GetKernelRuntime(kAscendDevice, device_id_); MS_EXCEPTION_IF_NULL(runtime_instance); bool ret_ok = runtime_instance->LoadTask(kernel_graph.get()); if (!ret_ok) { - MS_LOG(EXCEPTION) << "load task error!"; + MS_LOG(EXCEPTION) << "Load task error!"; } MS_LOG(INFO) << "Finish!"; } void AscendSession::ExecTask(const std::shared_ptr &kernel_graph) const { - MS_LOG(INFO) << "start !"; + MS_LOG(INFO) << "Start!"; auto runtime_instance = device::KernelRuntimeManager::Instance().GetKernelRuntime(kAscendDevice, device_id_); MS_EXCEPTION_IF_NULL(runtime_instance); bool ret_ok = runtime_instance->Run(kernel_graph.get()); @@ -388,7 +388,7 @@ void AscendSession::ExecTask(const std::shared_ptr &kernel_graph) c } void AscendSession::Dump(const std::shared_ptr &kernel_graph) const { - MS_LOG(INFO) << "start !"; + MS_LOG(INFO) << "Start!"; MS_EXCEPTION_IF_NULL(kernel_graph); auto runtime_instance = device::KernelRuntimeManager::Instance().GetKernelRuntime(kAscendDevice, device_id_); MS_EXCEPTION_IF_NULL(runtime_instance); @@ -397,7 +397,7 @@ void AscendSession::Dump(const std::shared_ptr &kernel_graph) const } GraphId AscendSession::SetFinalGraphInput(const std::vector &args) { - MS_LOG(INFO) << "start!args size " << args.size(); + MS_LOG(INFO) << "Start! Args size " << args.size(); auto final_graph = std::make_shared(); final_graph_id_ = graph_sum_++; graphs_[final_graph_id_] = final_graph; @@ -417,12 +417,12 @@ GraphId AscendSession::SetFinalGraphInput(const std::vector &args) { if (parameter_belong_graph_id == kInvalidGraphId) { parameter_backend = final_graph->NewParameter(parameter->cast()); final_graph->FrontBackendlMapAdd(parameter, parameter_backend); - MS_LOG(INFO) << "new parameter" << parameter->DebugString() << "in final_graph"; + MS_LOG(INFO) << "New parameter" << parameter->DebugString() << "in final_graph"; } else { // parametr is a parameter of child graph auto graph = GetGraph(parameter_belong_graph_id); MS_EXCEPTION_IF_NULL(graph); - MS_LOG(INFO) << "reuse parameter [" << parameter->DebugString() << "] of child graph [" + MS_LOG(INFO) << "Reuse parameter [" << parameter->DebugString() << "] of child graph [" << parameter_belong_graph_id << "]"; parameter_backend = graph->GetBackendAnfByFrontAnf(parameter); } @@ -434,7 +434,7 @@ GraphId AscendSession::SetFinalGraphInput(const std::vector &args) { MS_EXCEPTION_IF_NULL(final_graph_inputs); final_graph_inputs->push_back(parameter_backend); } - MS_LOG(INFO) << "end final_graph_id " << final_graph_id_; + MS_LOG(INFO) << "End final_graph_id " << final_graph_id_; return final_graph_id_; } @@ -453,7 +453,7 @@ void AscendSession::SetFinalGraphOutput(const BaseRef &output) { value_node->set_abstract(abstract::FromValue(value_ptr)); final_graph->set_output(final_graph->NewCNode({NewValueNode(prim::kPrimMakeTuple), value_node})); final_graph->set_executable(false); - MS_LOG(INFO) << "not anf output[" << output.ToString() << "]"; + MS_LOG(INFO) << "Not anf output[" << output.ToString() << "]"; return; } // get the backend anf node related to the output node of front @@ -461,12 +461,12 @@ void AscendSession::SetFinalGraphOutput(const BaseRef &output) { auto output_from_graph_id = GetGraphIdByNode(output_anf_node); auto output_from_graph = GetGraph(output_from_graph_id); MS_EXCEPTION_IF_NULL(output_anf_node); - MS_LOG(INFO) << "set the output[" << output_anf_node->DebugString() << "] of graph[" << output_from_graph_id + MS_LOG(INFO) << "Set the output[" << output_anf_node->DebugString() << "] of graph[" << output_from_graph_id << "] to final graph"; MS_EXCEPTION_IF_NULL(output_from_graph); // if output is from final graph,it remarks no child graph exist if (final_graph_id_ == output_from_graph_id) { - MS_LOG(INFO) << "no child graph,output is " << output_anf_node->DebugString(); + MS_LOG(INFO) << "No child graph,output is " << output_anf_node->DebugString(); final_graph->set_output(ConstructOutput({output_anf_node}, final_graph)); final_graph->set_executable(false); return; @@ -477,15 +477,15 @@ void AscendSession::SetFinalGraphOutput(const BaseRef &output) { KernelGraphPtr AscendSession::GetGraph(mindspore::GraphId graph_id) { auto it = graphs_.find(graph_id); if (it == graphs_.end()) { - MS_LOG(WARNING) << "can't find graph " << graph_id; + MS_LOG(WARNING) << "Can't find graph " << graph_id; return nullptr; } return it->second; } void AscendSession::InsertSwitchToGraph(GraphId condition_graph_id, GraphId true_graph_id) { - MS_LOG(INFO) << "start"; - MS_LOG(INFO) << "condition graph id[" << condition_graph_id << "],true graph id[" << true_graph_id << "]"; + MS_LOG(INFO) << "Start!"; + MS_LOG(INFO) << "Condition graph id[" << condition_graph_id << "],true graph id[" << true_graph_id << "]"; auto condition_graph = GetGraph(condition_graph_id); MS_EXCEPTION_IF_NULL(condition_graph); tensor::TensorPtr tensor = std::make_shared(kNumberTypeInt32, std::vector{1}); @@ -507,7 +507,7 @@ void AscendSession::InsertSwitchToGraph(GraphId condition_graph_id, GraphId true kernel_build_info_builder->SetKernelType(KernelType::RT_KERNEL); // condition graph's output must be single output if (condition_graph->outputs().size() != 1) { - MS_LOG(EXCEPTION) << "condition_graph output num " << condition_graph_id << " should be 1"; + MS_LOG(EXCEPTION) << "Condition_graph output num " << condition_graph_id << " should be 1"; } AnfNodePtr cond_output_kernel = condition_graph->outputs()[0]; std::vector inputs = {NewValueNode(switch_primitive), cond_output_kernel, counter_const}; @@ -527,7 +527,7 @@ void AscendSession::InsertSwitchToGraph(GraphId condition_graph_id, GraphId true std::vector exec_order = condition_graph->execution_order(); exec_order.push_back(switch_node); condition_graph->set_execution_order(exec_order); - MS_LOG(INFO) << "end"; + MS_LOG(INFO) << "Finish!"; } void AscendSession::CopyOutputOfIf(GraphId false_graph_id) { @@ -540,11 +540,11 @@ void AscendSession::CopyOutputOfIf(GraphId false_graph_id) { for (int i = SizeToInt(false_index) - 1; i >= 0; i--) { size_t graph_index = IntToSize(i); if (graph_index >= graph_execute_order.size()) { - MS_LOG(EXCEPTION) << "graph index[" << graph_index << "] out of range[" << graph_execute_order.size() << "]"; + MS_LOG(EXCEPTION) << "Graph index[" << graph_index << "] out of range[" << graph_execute_order.size() << "]"; } if (graph_order_type[graph_index] == COMMON_GRAPH) { auto true_last_id = graph_execute_order[graph_index]; - MS_LOG(INFO) << "the last graph of if true branch is " << true_last_id; + MS_LOG(INFO) << "The last graph of if true branch is " << true_last_id; auto true_last = GetGraph(true_last_id); auto final_graph = GetGraph(final_graph_id_); MS_EXCEPTION_IF_NULL(final_graph); @@ -552,10 +552,10 @@ void AscendSession::CopyOutputOfIf(GraphId false_graph_id) { auto false_last = GetGraph(false_last_id); MS_EXCEPTION_IF_NULL(true_last); MS_EXCEPTION_IF_NULL(false_last); - MS_LOG(INFO) << "the last graph of false branch is " << false_last_id; + MS_LOG(INFO) << "The last graph of false branch is " << false_last_id; // now only consider the single output InsertMultipleAssignToGraph(true_last_id, true_last->output(), false_last->output()); - // insert stream acitve for loop sink + // insert stream active for loop sink auto context_ptr = MsContext::GetInstance(); MS_EXCEPTION_IF_NULL(context_ptr); if (context_ptr->enable_task_sink() && context_ptr->loop_sink_flag() && @@ -569,32 +569,32 @@ void AscendSession::CopyOutputOfIf(GraphId false_graph_id) { } void AscendSession::SwitchCompile(GraphId cond_graph_id, GraphId true_graph_id, GraphId false_graph_id) { - if (switchs_.find(cond_graph_id) != switchs_.end()) { - MS_LOG(WARNING) << "condition graph" << cond_graph_id << " has been set before "; + if (switches_.find(cond_graph_id) != switches_.end()) { + MS_LOG(WARNING) << "Condition graph" << cond_graph_id << " has been set before "; return; } - switchs_[cond_graph_id] = std::pair(true_graph_id, false_graph_id); - MS_LOG(INFO) << "new switch compile " << cond_graph_id << " " << true_graph_id << " " << false_graph_id; - // set the type of condtion graph + switches_[cond_graph_id] = std::pair(true_graph_id, false_graph_id); + MS_LOG(INFO) << "New switch compile " << cond_graph_id << " " << true_graph_id << " " << false_graph_id; + // set the type of condition graph auto cond_graph_index = ExecOrderOfChildGraph(final_graph_id_, cond_graph_id); auto &graph_order_type = GetGraphOrderType(final_graph_id_); if (cond_graph_index >= graph_order_type.size()) { MS_LOG(EXCEPTION) << "cond_graph_index " << cond_graph_index << " out of range " << graph_order_types_.size(); } graph_order_type[cond_graph_index] = CONDITION_GRAPH; - // update disinction label of false graph,update before merge to sure the distinction + // update distinction label of false graph,update before merge to sure the distinction if (false_graph_id != kInvalidGraphId) { // false graph and condition in graph same stream - auto conditon_graph = GetGraph(cond_graph_id); - SetStreamDistinctionLabel(GetGraph(false_graph_id), GetDistinctionLabel(conditon_graph), true); + auto condition_graph = GetGraph(cond_graph_id); + SetStreamDistinctionLabel(GetGraph(false_graph_id), GetDistinctionLabel(condition_graph), true); // if false graph is a condition graph and has been switch compiled before,it's false should be updated again - auto cond_it = switchs_.find(false_graph_id); - while (cond_it != switchs_.end() && cond_it->second.second != kInvalidGraphId) { + auto cond_it = switches_.find(false_graph_id); + while (cond_it != switches_.end() && cond_it->second.second != kInvalidGraphId) { cond_graph_id = cond_it->first; false_graph_id = cond_it->second.second; - conditon_graph = GetGraph(cond_graph_id); - SetStreamDistinctionLabel(GetGraph(false_graph_id), GetDistinctionLabel(conditon_graph), true); - cond_it = switchs_.find(false_graph_id); + condition_graph = GetGraph(cond_graph_id); + SetStreamDistinctionLabel(GetGraph(false_graph_id), GetDistinctionLabel(condition_graph), true); + cond_it = switches_.find(false_graph_id); } } } // namespace session @@ -602,11 +602,11 @@ void AscendSession::SwitchCompile(GraphId cond_graph_id, GraphId true_graph_id, void AscendSession::MergeSwitchCompile() { auto graph_execute_order = GetGraphOrder(final_graph_id_); auto &graph_order_type = GetGraphOrderType(final_graph_id_); - for (auto switch_compile : switchs_) { + for (auto switch_compile : switches_) { auto cond_graph_id = switch_compile.first; auto true_graph_id = switch_compile.second.first; auto false_graph_id = switch_compile.second.second; - MS_LOG(INFO) << "switch compile: " << cond_graph_id << " " << true_graph_id << " " << false_graph_id; + MS_LOG(INFO) << "Switch compile: " << cond_graph_id << " " << true_graph_id << " " << false_graph_id; auto condition_graph = GetGraph(cond_graph_id); auto final_graph = GetGraph(final_graph_id_); MS_EXCEPTION_IF_NULL(condition_graph); @@ -630,8 +630,8 @@ void AscendSession::MergeSwitchCompile() { InsertStreamActiveToGraph(prev_graph_id, GetDistinctionLabel(condition_graph)); } // if this is a 'if' condition - auto it = while_condtion_graphs_.find(cond_graph_id); - if (it == while_condtion_graphs_.end()) { + auto it = while_condition_graphs_.find(cond_graph_id); + if (it == while_condition_graphs_.end()) { CopyOutputOfIf(false_graph_id); } else { // if it is a while,insert a stream active to true graph @@ -639,17 +639,17 @@ void AscendSession::MergeSwitchCompile() { InsertStreamActiveToGraph(from_graph, GetDistinctionLabel(condition_graph)); } } - MS_LOG(INFO) << "end"; + MS_LOG(INFO) << "Finish!"; } // insert active to graph void AscendSession::SetActive(GraphId from, GraphId to) { - if (while_condtion_graphs_.find(to) != while_condtion_graphs_.end()) { + if (while_condition_graphs_.find(to) != while_condition_graphs_.end()) { MS_LOG(WARNING) << " to " << to << " has been exits in map,from " << from << ",exist from " - << while_condtion_graphs_[to]; + << while_condition_graphs_[to]; return; } - MS_LOG(INFO) << "from " << from << " to " << to; + MS_LOG(INFO) << "From " << from << " to " << to; auto &graph_order = GetGraphOrder(final_graph_id_); auto &graph_type = GetGraphOrderType(final_graph_id_); std::vector graph_order_new; @@ -668,21 +668,21 @@ void AscendSession::SetActive(GraphId from, GraphId to) { // set the graph type of condition graph graph_type[ExecOrderOfChildGraph(final_graph_id_, to)] = CONDITION_GRAPH; // record the condition graph into while condition set - while_condtion_graphs_[to] = from; + while_condition_graphs_[to] = from; } void AscendSession::SetChildGraphParameter(const AnfNodePtr &front_anf, const AnfNodePtr &backend_parameter) { - MS_LOG(INFO) << "start"; + MS_LOG(INFO) << "Start!"; MS_EXCEPTION_IF_NULL(backend_parameter); MS_EXCEPTION_IF_NULL(front_anf); if (!backend_parameter->isa()) { - MS_LOG(EXCEPTION) << "backend parameter's type is not a parameter,but is " << backend_parameter->ToString(); + MS_LOG(EXCEPTION) << "Backend parameter's type is not a parameter,but is " << backend_parameter->ToString(); } auto from_graph_id = GetGraphIdByNode(front_anf); auto from_graph = GetGraph(from_graph_id); MS_EXCEPTION_IF_NULL(from_graph); - MS_LOG(INFO) << "set node[" << front_anf->DebugString() << "] of graph[" << from_graph_id << "]to node[" + MS_LOG(INFO) << "Set node[" << front_anf->DebugString() << "] of graph[" << from_graph_id << "]to node[" << backend_parameter->DebugString() << "] of graph[" << AnfAlgo::GetGraphId(backend_parameter.get()) << "]"; // a node should not assign to itself @@ -696,26 +696,27 @@ void AscendSession::SetChildGraphParameter(const AnfNodePtr &front_anf, const An if (!AnfAlgo::OutputAddrExist(backend_arg, 0)) { // set parameter's addr in child graph to parameter in final graph AnfAlgo::SetOutputAddr(AnfAlgo::GetMutableOutputAddr(backend_parameter, 0), 0, backend_arg.get()); - MS_LOG(INFO) << "assign mem of node" << backend_parameter->DebugString() << " of graph " + MS_LOG(INFO) << "Assign mem of node" << backend_parameter->DebugString() << " of graph " << AnfAlgo::GetGraphId(backend_parameter.get()) << " to node" << backend_arg->DebugString() << "of graph " << AnfAlgo::GetGraphId(backend_arg.get()); return; } } InsertMultipleAssignToGraph(from_graph_id, backend_arg, backend_parameter); - // if front anf is a parameter,we can assign the value back,because backend_parameter won't be change in it's graph - // unless it's a weigth.If backend_parameter is a weight,we do should assign the value back + // if front anf is a parameter, we can assign the value back, because backend_parameter + // won't be changed in it's graph unless it's a weight. If backend_parameter is a weight, + // we do should assign the value back. auto to_graph_id = AnfAlgo::GetGraphId(backend_parameter.get()); auto to_graph = GetGraph(to_graph_id); MS_EXCEPTION_IF_NULL(to_graph); if (backend_arg->isa() && !to_graph->execution_order().empty()) { InsertMultipleAssignToGraph(to_graph_id, backend_parameter, backend_arg); } - MS_LOG(INFO) << "end"; + MS_LOG(INFO) << "Finish!"; } void AscendSession::SetChildGraphParameter(const tensor::TensorPtr &front_tensor, const AnfNodePtr &backend_parameter) { - MS_LOG(INFO) << "start"; + MS_LOG(INFO) << "Start!"; // sync data from host to device MS_EXCEPTION_IF_NULL(front_tensor); size_t tensor_size = front_tensor->data().nbytes(); @@ -723,9 +724,9 @@ void AscendSession::SetChildGraphParameter(const tensor::TensorPtr &front_tensor MS_EXCEPTION_IF_NULL(addr); if (!addr->SyncHostToDevice(front_tensor->shape(), tensor_size, front_tensor->data_type(), front_tensor->data_c(false))) { - MS_LOG(EXCEPTION) << "tensor SyncHostToDevice fail!"; + MS_LOG(EXCEPTION) << "Tensor SyncHostToDevice fail!"; } - MS_LOG(INFO) << "end"; + MS_LOG(INFO) << "Finish!"; } void AscendSession::UpdateGraphOrder(GraphId to_graph_id) { @@ -742,7 +743,7 @@ void AscendSession::UpdateGraphOrder(GraphId to_graph_id) { graph_order.push_back(to_graph_id); graph_type.push_back(COMMON_GRAPH); for (size_t i = 0; i < graph_order.size(); i++) { - MS_LOG(INFO) << "index " << i << ",graph_id " << graph_order[i] << ",graph_type" << graph_type[i]; + MS_LOG(INFO) << "Index " << i << ",graph_id " << graph_order[i] << ",graph_type" << graph_type[i]; } } @@ -774,10 +775,10 @@ void AscendSession::SetChildGraphInput(GraphId g, const VectorRef &args) { SetChildGraphParameter(value->cast(), graph_inputs[input_index]); input_index++; } else { - MS_LOG(EXCEPTION) << "Unxpected arg type " << args[i].ToString(); + MS_LOG(EXCEPTION) << "Unexpected arg type " << args[i].ToString(); } } - MS_LOG(INFO) << "end"; + MS_LOG(INFO) << "Finish!"; } GraphId AscendSession::GetGraphIdByNode(const AnfNodePtr &front_anf) const { @@ -795,7 +796,7 @@ GraphId AscendSession::GetGraphIdByNode(const AnfNodePtr &front_anf) const { } void AscendSession::MergeGraphExecOrder() { - MS_LOG(INFO) << "start"; + MS_LOG(INFO) << "Start!"; // insert switch to graph MergeSwitchCompile(); // merge graph order @@ -804,7 +805,7 @@ void AscendSession::MergeGraphExecOrder() { auto final_graph = GetGraph(final_graph_id_); MS_EXCEPTION_IF_NULL(final_graph); if (graph_order.empty()) { - MS_LOG(WARNING) << "graph output is a lonely variable not linked to any op!"; + MS_LOG(WARNING) << "Graph output is a lonely variable not linked to any op!"; return; } // if first graph is common,the final graph has no label,then set the stream of final graph same with the first graph @@ -820,7 +821,7 @@ void AscendSession::MergeGraphExecOrder() { last_graph = child_graph; MS_EXCEPTION_IF_NULL(child_graph); auto exec_order = child_graph->execution_order(); - MS_LOG(INFO) << "merge graph,graph_id " << graph_id; + MS_LOG(INFO) << "Merge graph,graph_id " << graph_id; (void)std::copy(exec_order.begin(), exec_order.end(), std::back_inserter(final_exec_order)); // add all value nodes of child graphs to final graph for (auto &value_node : child_graph->graph_value_nodes()) { @@ -874,11 +875,11 @@ void AscendSession::InsertAssignToGraph(GraphId graph_id, const AnfNodePtr &from void AscendSession::InsertMultipleAssignToGraph(GraphId graph_id, const AnfNodePtr &from, const AnfNodePtr &to) { std::vector from_outputs = AnfAlgo::GetAllOutput(from, {prim::kPrimTupleGetItem}); std::vector to_outputs = AnfAlgo::GetAllOutput(to, {prim::kPrimTupleGetItem}); - MS_LOG(INFO) << "insert assigns from [" << AnfAlgo::GetGraphId(from.get()) << "] to [" + MS_LOG(INFO) << "Insert assigns from [" << AnfAlgo::GetGraphId(from.get()) << "] to [" << AnfAlgo::GetGraphId(to.get()) << "]"; if (from_outputs.size() != to_outputs.size()) { - MS_LOG(INFO) << "from[" << from->DebugString(5) << "] to[" << to->DebugString(5) << "]"; - MS_LOG(EXCEPTION) << "from outputs size[" << from_outputs.size() << "] is not equal to to outputs size[" + MS_LOG(INFO) << "From[" << from->DebugString(5) << "] to[" << to->DebugString(5) << "]"; + MS_LOG(EXCEPTION) << "From outputs size[" << from_outputs.size() << "] is not equal to to outputs size[" << to_outputs.size() << "]"; } for (size_t i = 0; i < from_outputs.size(); i++) { @@ -897,7 +898,7 @@ void AscendSession::InsertStreamActiveToGraph(GraphId graph_id, uint32_t actived auto kernel_build_info_builder = std::make_shared(); kernel_build_info_builder->SetKernelType(KernelType::RT_KERNEL); AnfAlgo::SetSelectKernelBuildInfo(kernel_build_info_builder->Build(), active_node.get()); - // set the actived stream id into the attr of active node + // set the active stream id into the attr of active node std::vector active_index_value = {}; active_index_value.push_back(actived_stream); AnfAlgo::SetNodeAttr(kAttrActiveStreamList, MakeValue>(active_index_value), active_node); @@ -921,7 +922,7 @@ size_t AscendSession::ExecOrderOfChildGraph(GraphId final_graph, GraphId child_g std::vector &AscendSession::GetGraphOrder(GraphId final_graph_id) { auto graph_order_iter = graph_execute_orders_.find(final_graph_id); if (graph_order_iter == graph_execute_orders_.end()) { - MS_LOG(EXCEPTION) << "final graph" << final_graph_id << "has no child graph"; + MS_LOG(EXCEPTION) << "Final graph" << final_graph_id << "has no child graph"; } return graph_order_iter->second; } @@ -930,7 +931,7 @@ std::vector &AscendSession::GetGraphOrder(GraphId final_graph_id) { std::vector &AscendSession::GetGraphOrderType(GraphId final_graph_id) { auto graph_type_iter = graph_order_types_.find(final_graph_id); if (graph_type_iter == graph_order_types_.end()) { - MS_LOG(EXCEPTION) << "final graph" << final_graph_id << "has no graph_order_types_"; + MS_LOG(EXCEPTION) << "Final graph" << final_graph_id << "has no graph_order_types_"; } return graph_type_iter->second; } diff --git a/mindspore/ccsrc/session/ascend_session.h b/mindspore/ccsrc/session/ascend_session.h index e5cfb52f2a..caec4b35f7 100644 --- a/mindspore/ccsrc/session/ascend_session.h +++ b/mindspore/ccsrc/session/ascend_session.h @@ -48,13 +48,14 @@ class AscendSession : public SessionBasic { GraphId SetFinalGraphInput(const std::vector &args) override; // set output of final graph void SetFinalGraphOutput(const BaseRef &output) override; - // insert switch and set the relative acitve ops + // insert switch and set the relative active ops void SwitchCompile(GraphId cond_g, GraphId true_g, GraphId false_g) override; - // set args of child graph.the arg maybe come from a output of other child graphs,or from final graph's parameter + // set args of child graph. the arg maybe come from a output of other child graphs, + // or from final graph's parameter void SetChildGraphInput(GraphId g, const VectorRef &args) override; // get graph id in child graphs by ME front anf node pointer GraphId GetGraphIdByNode(const AnfNodePtr &front_anf) const override; - // get grpah id of final graph + // get graph id of final graph GraphId GetFinalRunGraph() const override { return final_graph_id_; } // insert active to graph void SetActive(GraphId, GraphId) override; @@ -112,9 +113,9 @@ class AscendSession : public SessionBasic { // key is final_graph_id,value is the graph types of child graphs std::unordered_map> graph_order_types_; // record condition graph of while - std::unordered_map while_condtion_graphs_; - // record all conditons - std::unordered_map> switchs_; + std::unordered_map while_condition_graphs_; + // record all conditions + std::unordered_map> switches_; // final_graph_id is used in every root graph has it's own session situation GraphId final_graph_id_; }; diff --git a/mindspore/ccsrc/session/gpu_session.cc b/mindspore/ccsrc/session/gpu_session.cc index 196a2f300f..503c98e52a 100644 --- a/mindspore/ccsrc/session/gpu_session.cc +++ b/mindspore/ccsrc/session/gpu_session.cc @@ -83,7 +83,7 @@ void GPUSession::Execute(const std::shared_ptr &kernel_graph) const } GraphId GPUSession::CompileGraph(const AnfNodePtrList &lst, const AnfNodePtrList &outputs) { - // Construct graph, if construct successs, graph_sum_ + 1 + // Construct graph, if successfully, graph_sum_ + 1 auto graph_id = graph_sum_; auto graph = ConstructKernelGraph(lst, outputs); // Select kernel build info @@ -100,7 +100,7 @@ GraphId GPUSession::CompileGraph(const AnfNodePtrList &lst, const AnfNodePtrList auto execution_order = graph->execution_order(); Reorder(&execution_order); graph->set_execution_order(execution_order); - // Alloc memeory, include static memory and dynamic memory + // Alloc memory, including static memory and dynamic memory AllocateMemory(graph.get()); // Reset memory resource auto runtime_instance = device::KernelRuntimeManager::Instance().GetSingleKernelRuntime(kGPUDevice, device_id_); diff --git a/mindspore/ccsrc/session/kernel_graph.cc b/mindspore/ccsrc/session/kernel_graph.cc index b07840aa98..84ff6b81a2 100644 --- a/mindspore/ccsrc/session/kernel_graph.cc +++ b/mindspore/ccsrc/session/kernel_graph.cc @@ -34,7 +34,7 @@ void PushNoVisitedNode(const AnfNodePtr &node, std::queue *que, if (visited_nodes->find(node) == visited_nodes->end()) { que->push(node); (void)visited_nodes->insert(node); - MS_LOG(DEBUG) << "push que:" << node->DebugString(); + MS_LOG(DEBUG) << "Push que:" << node->DebugString(); } } } // namespace @@ -58,7 +58,7 @@ void KernelGraph::SetExecOrderByDefault() { auto clear_output = [&zero_output_nodes, &allreduce_nodes, &visited_nodes, this](const AnfNodePtr &input) -> void { if (node_output_num_[input] == 0 && visited_nodes.find(input) == visited_nodes.end()) { MS_EXCEPTION_IF_NULL(input); - MS_LOG(DEBUG) << "clear output num:" << input->DebugString(); + MS_LOG(DEBUG) << "Clear output num:" << input->DebugString(); (void)visited_nodes.insert(input); if (input->isa() && AnfAlgo::GetCNodeName(input) == kAllReduceOpName) { allreduce_nodes.push(input); @@ -85,21 +85,21 @@ void KernelGraph::SetExecOrderByDefault() { if (it == node_input_edges_.end()) { // value node and parameter has no input,no need to print log if (node->isa()) { - MS_LOG(DEBUG) << "can not find node [" << node->DebugString() << "]"; + MS_LOG(DEBUG) << "Can not find node [" << node->DebugString() << "]"; } continue; } for (const auto &input_edge : it->second) { if (node_output_num_.find(input_edge.first) == node_output_num_.end()) { MS_EXCEPTION_IF_NULL(input_edge.first); - MS_LOG(EXCEPTION) << "can't find node[" << input_edge.first->DebugString() << "]"; + MS_LOG(EXCEPTION) << "Can't find node[" << input_edge.first->DebugString() << "]"; } MS_EXCEPTION_IF_NULL(input_edge.first); - MS_LOG(DEBUG) << "decrese input:" << input_edge.first->DebugString() << ",node:" << node->DebugString() + MS_LOG(DEBUG) << "Decrease input:" << input_edge.first->DebugString() << ",node:" << node->DebugString() << ",num: " << node_output_num_[input_edge.first] << ",decrease num:" << input_edge.second; if (node_output_num_[input_edge.first] < input_edge.second) { - MS_LOG(EXCEPTION) << "input node:" << input_edge.first->DebugString() << ",node_output_num" - << node_output_num_[input_edge.first] << "depend edege:" << input_edge.second; + MS_LOG(EXCEPTION) << "Input node:" << input_edge.first->DebugString() << ",node_output_num" + << node_output_num_[input_edge.first] << "depend edge:" << input_edge.second; } node_output_num_[input_edge.first] = node_output_num_[input_edge.first] - input_edge.second; clear_output(input_edge.first); @@ -120,20 +120,20 @@ void KernelGraph::CheckLoop() { string str; auto node_output_it = node_output_edges_.find(it.first); if (node_output_it == node_output_edges_.end()) { - MS_LOG(EXCEPTION) << "can't find node [" << it.first->DebugString() << "]"; + MS_LOG(EXCEPTION) << "Can't find node [" << it.first->DebugString() << "]"; } for (const auto &output_edge : node_output_edges_[it.first]) { MS_EXCEPTION_IF_NULL(output_edge.first); str = str.append(output_edge.first->DebugString()).append("|"); } if (it.second != 0) { - MS_LOG(WARNING) << "node:" << it.first->DebugString() << ",outputs:" << str << ",output num:" << it.second; + MS_LOG(WARNING) << "Node:" << it.first->DebugString() << ",outputs:" << str << ",output num:" << it.second; none_zero_output[it.first] = it.second; } } // if don't consider control depend and loop exit,a exception will be throw if (!none_zero_output.empty()) { - MS_LOG(EXCEPTION) << "nodes have loop,left node num:" << none_zero_output.size(); + MS_LOG(EXCEPTION) << "Nodes have loop, left node num:" << none_zero_output.size(); } } @@ -152,7 +152,7 @@ CNodePtr KernelGraph::NewCNode(const CNodePtr &cnode) { MS_EXCEPTION_IF_NULL(cnode); auto new_cnode = std::make_shared(*cnode); // if a cnode is created not from front,this cnode won't be in map,so when replace it,we shouldn't update map - if (BakcendNodeExistInFrontBackendMap(cnode)) { + if (BackendNodeExistInFrontBackendMap(cnode)) { FrontBackendlMapUpdate(cnode, new_cnode); } AnfAlgo::SetGraphId(graph_id_, cnode.get()); @@ -299,7 +299,7 @@ AnfNodePtr KernelGraph::GetBackendAnfByFrontAnf(const AnfNodePtr &front_anf) { return front_backend_anf_map_[front_anf]; } -bool KernelGraph::BakcendNodeExistInFrontBackendMap(const AnfNodePtr &backend_anf) { +bool KernelGraph::BackendNodeExistInFrontBackendMap(const AnfNodePtr &backend_anf) { return backend_front_anf_map_.find(backend_anf) != backend_front_anf_map_.end(); } @@ -317,9 +317,9 @@ void KernelGraph::TensorValueNodeMapAdd(const tensor::TensorPtr &tensor, const V } void KernelGraph::AddDependEdge(const AnfNodePtr &node, const AnfNodePtr &input, size_t depend_edge_num) { - MS_LOG(DEBUG) << "input:" << input->DebugString() << ", node:" << node->DebugString() << ",num:" << depend_edge_num; + MS_LOG(DEBUG) << "Input:" << input->DebugString() << ", node:" << node->DebugString() << ",num:" << depend_edge_num; auto output_depend_edge = std::pair(node, depend_edge_num); - // add output depend eddge of input + // add output depend edge of input auto output_it = node_output_edges_.find(input); if (output_it == node_output_edges_.end()) { node_output_edges_[input] = std::vector>{output_depend_edge}; @@ -346,7 +346,7 @@ std::vector KernelGraph::GetOutputNodes(const AnfNodePtr &node) { MS_EXCEPTION_IF_NULL(node); auto it = node_output_edges_.find(node); if (it == node_output_edges_.end()) { - MS_LOG(EXCEPTION) << "can'f find node[" << node->DebugString() << "]"; + MS_LOG(EXCEPTION) << "Can't find node[" << node->DebugString() << "]"; } std::vector output_nodes; auto trans = [](const std::pair &pair) -> AnfNodePtr { return pair.first; }; @@ -372,7 +372,7 @@ void KernelGraph::UpdateControlDependRelations(const std::vector &de MS_EXCEPTION_IF_NULL(depend_node); std::vector prior_nodes = {prior_node}; std::vector depend_nodes = {depend_node}; - MS_LOG(INFO) << "prior node[" << prior_node->DebugString() << "],depend node[" << depend_node->DebugString() + MS_LOG(INFO) << "Prior node[" << prior_node->DebugString() << "],depend node[" << depend_node->DebugString() << "],depend_mode=[" << AnfAlgo::GetNodeAttr(cnode, "depend_mode") << "]"; if (prior_node->isa()) { prior_nodes = GetOutputNodes(prior_node); @@ -384,7 +384,7 @@ void KernelGraph::UpdateControlDependRelations(const std::vector &de for (auto &second_node : depend_nodes) { MS_EXCEPTION_IF_NULL(first_node); MS_EXCEPTION_IF_NULL(second_node); - MS_LOG(INFO) << "add first node:" << first_node->DebugString() << ",second node:" << second_node->DebugString(); + MS_LOG(INFO) << "Add first node:" << first_node->DebugString() << ",second node:" << second_node->DebugString(); AddDependEdge(second_node, first_node, 1); } } @@ -437,18 +437,18 @@ void KernelGraph::BfsToUpdateNodeOutput() { MS_EXCEPTION_IF_NULL(cnode); // handle data links for (const auto &input : cnode->inputs()) { - size_t dpend_edge_num = 1; + size_t depend_edge_num = 1; // handle control depend,all inputs of control depend has no depend edge if (HandleControlDependNode(input, &que, &visited_nodes)) { control_depends.push_back(input); - dpend_edge_num = 0; + depend_edge_num = 0; } // the 2rd input of depend is no depend edge if (AnfAlgo::CheckPrimitiveType(node, prim::kPrimDepend) && input == cnode->input(kDependAttachNodeIndex)) { - dpend_edge_num = 0; + depend_edge_num = 0; } PushNoVisitedNode(input, &que, &visited_nodes); - AddDependEdge(node, input, dpend_edge_num); + AddDependEdge(node, input, depend_edge_num); } } UpdateControlDependRelations(control_depends); diff --git a/mindspore/ccsrc/session/kernel_graph.h b/mindspore/ccsrc/session/kernel_graph.h index d94638aa99..e11f6807f5 100644 --- a/mindspore/ccsrc/session/kernel_graph.h +++ b/mindspore/ccsrc/session/kernel_graph.h @@ -62,8 +62,8 @@ class KernelGraph : public FuncGraph { void FrontBackendlMapUpdate(const AnfNodePtr &old_backend_anf, const AnfNodePtr &new_backend_anf); // get backend anf by front anf AnfNodePtr GetBackendAnfByFrontAnf(const AnfNodePtr &front_anf); - // check backend node wheteher exist in map - bool BakcendNodeExistInFrontBackendMap(const AnfNodePtr &backend_anf); + // check backend node whether exist in map + bool BackendNodeExistInFrontBackendMap(const AnfNodePtr &backend_anf); // get value node by tensor ValueNodePtr GetValueNodeByTensor(const tensor::TensorPtr &tensor); // add value node tensor relation map diff --git a/mindspore/ccsrc/session/session_basic.cc b/mindspore/ccsrc/session/session_basic.cc index be03f54f3c..ede3ae7419 100644 --- a/mindspore/ccsrc/session/session_basic.cc +++ b/mindspore/ccsrc/session/session_basic.cc @@ -281,7 +281,7 @@ std::vector CreateParameterFromTuple(const AnfNodePtr &node, KernelG } continue; } - // creata single parameter if is a abstract real kernel + // create single parameter if is a abstract real kernel create_parameter(out_node->abstract()); } return parameters; @@ -413,7 +413,7 @@ CNodePtr SessionBasic::CreateNewCNode(const CNodePtr &cnode, KernelGraph *graph) cnode_inputs.emplace_back(graph->GetBackendAnfByFrontAnf(anf)); continue; } else if (anf->isa() && !IsValueNode(anf)) { - // if input is a value ndoe, + // if input is a value node, auto new_value_node = CreateNewValueNode(anf, graph); if (new_value_node != nullptr) { cnode_inputs.emplace_back(new_value_node); @@ -549,7 +549,7 @@ void SessionBasic::Reorder(std::vector *node_list) { for (const auto &node : *node_list) { MS_EXCEPTION_IF_NULL(node); - if (kOptOpeatorSet.find(AnfAlgo::GetCNodeName(node)) != kOptOpeatorSet.end()) { + if (kOptOperatorSet.find(AnfAlgo::GetCNodeName(node)) != kOptOperatorSet.end()) { all_opt_list.emplace_back(node); } else { non_opt_list.emplace_back(node); @@ -599,7 +599,7 @@ void SessionBasic::ToTensorPtr(const OpRunInfo &op_run_info, std::vector &graph) { - MS_LOG(INFO) << "start"; + MS_LOG(INFO) << "Start!"; std::vector make_tuple_inputs; make_tuple_inputs.push_back(NewValueNode(prim::kPrimMakeTuple)); if (AnfRuntimeAlgorithm::GetOutputTensorNum(cnode) > 1) { @@ -667,14 +667,14 @@ void SessionBasic::CreateOutputNode(const CNodePtr &cnode, const std::shared_ptr // create output auto g_output = graph->NewCNode(make_tuple_inputs); graph->set_output(g_output); - // set graph manager,which now is only used to get valuendoes and hardware optimizing + // set graph manager,which now is only used to get valuenodes and hardware optimizing MS_EXCEPTION_IF_NULL(context_); FuncGraphManagerPtr manager = context_->manager(); if (manager != nullptr) { manager->AddFuncGraph(graph); graph->set_manager(manager); } - MS_LOG(INFO) << "end"; + MS_LOG(INFO) << "Finish!"; } std::shared_ptr SessionBasic::ConstructSingleOpGraph(const OpRunInfo &op_run_info) { @@ -694,9 +694,9 @@ std::shared_ptr SessionBasic::ConstructSingleOpGraph(const OpRunInf std::vector input_tensors; std::vector tensors_mask; ToTensorPtr(op_run_info, &input_tensors, &tensors_mask); - MS_LOG(INFO) << "input tensor size" << input_tensors.size(); + MS_LOG(INFO) << "Input tensor size" << input_tensors.size(); if (input_tensors.size() != tensors_mask.size()) { - MS_LOG(EXCEPTION) << "input tensors size " << input_tensors.size() << " should be equal to tensors mask size " + MS_LOG(EXCEPTION) << "Input tensors size " << input_tensors.size() << " should be equal to tensors mask size " << tensors_mask.size(); } for (size_t i = 0; i < input_tensors.size(); ++i) { @@ -711,7 +711,7 @@ std::shared_ptr SessionBasic::ConstructSingleOpGraph(const OpRunInf cnode->set_abstract(op_run_info.abstract); // set const input to attr if value is not a tensor,such as scalar or tuple RunOpConvertConstInputToAttr(op_run_info, cnode); - // set exectuion order + // set execution order std::vector exe_order = {cnode}; graph->set_execution_order(exe_order); // set output @@ -734,14 +734,14 @@ BaseRef SessionBasic::TransformBaseRefListToTuple(const BaseRef &base_ref) { py::tuple tensor_tuple = py::cast(obj); output_tensors[i] = tensor_tuple; } else { - MS_LOG(EXCEPTION) << "The output is not a base ref list or a tensor !"; + MS_LOG(EXCEPTION) << "The output is not a base ref list or a tensor!"; } } return output_tensors; // turn tuple to py::object and store in PyObjectRef } else if (utils::isa(base_ref)) { return base_ref; } else { - MS_LOG(EXCEPTION) << "The output is not a base ref list or a tensor !"; + MS_LOG(EXCEPTION) << "The output is not a base ref list or a tensor!"; } } } // namespace session diff --git a/mindspore/ccsrc/session/session_basic.h b/mindspore/ccsrc/session/session_basic.h index f80e69bd9f..9aadb78cb2 100644 --- a/mindspore/ccsrc/session/session_basic.h +++ b/mindspore/ccsrc/session/session_basic.h @@ -56,7 +56,7 @@ class SessionBasic { virtual ~SessionBasic() { summary_callback_ = nullptr; } virtual GraphId CompileGraph(const AnfNodePtrList &lst, const AnfNodePtrList &outputs) = 0; - // build graph ,used to handle mupltiple child graphs + // build graph, used to handle multiple child graphs virtual void BuildGraph(GraphId) {} virtual void RunGraph(const GraphId &graph_id, const std::vector &inputs, VectorRef *outputs) = 0; @@ -75,7 +75,7 @@ class SessionBasic { virtual GraphId SetFinalGraphInput(const std::vector &) { return kInvalidGraphId; } // set output of final graph virtual void SetFinalGraphOutput(const BaseRef &) {} - // insert switch and set the relative acitve ops + // insert switch and set the relative active ops virtual void SwitchCompile(GraphId, GraphId, GraphId) {} // set args of child graph.the arg maybe come from a output of other child graphs,or from final graph's parameter virtual void SetChildGraphInput(GraphId, const VectorRef &) {} diff --git a/mindspore/ccsrc/utils/utils.h b/mindspore/ccsrc/utils/utils.h index 229c0547cc..ee1eeaddfc 100644 --- a/mindspore/ccsrc/utils/utils.h +++ b/mindspore/ccsrc/utils/utils.h @@ -186,7 +186,7 @@ const std::vector> kShapeSupportFormatMap = {k1DSupportFor k4DSupportFormat}; const std::set kDefaultCompatibleFormat = {kOpFormat_ND, kOpFormat_NCHW, kOpFormat_NHWC, kOpFormat_HWCN}; -const std::set kOptOpeatorSet = { +const std::set kOptOperatorSet = { kMomentumOpName, kApplyMomentumOpName, kApplyAdadeltaOpName, kApplyAdagradOpName, kApplyAdagradDAName, kApplyAdamOpName, kApplyAdaMaxOpName, kApplyAddSignOpName, kApplyCenteredRMSPOpName, From c2fed0b7398bc4156f1455b93292de266d179ce5 Mon Sep 17 00:00:00 2001 From: ms_yan <6576637+ms_yan@user.noreply.gitee.com> Date: Thu, 2 Apr 2020 16:07:17 +0800 Subject: [PATCH 071/367] remove (feed mode) in comment --- mindspore/dataset/engine/datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mindspore/dataset/engine/datasets.py b/mindspore/dataset/engine/datasets.py index ad3e7d8255..db2b5169d2 100644 --- a/mindspore/dataset/engine/datasets.py +++ b/mindspore/dataset/engine/datasets.py @@ -395,7 +395,7 @@ class Dataset: Note: The order of using repeat and batch reflects the number of batches. Recommend that repeat operation should be used after batch operation. - If dataset_sink_mode is False (feed mode), here repeat operation is invalid. + If dataset_sink_mode is False, here repeat operation is invalid. Args: count (int): Number of times the dataset should be repeated (default=None). From ab917a734db694d9e727b8b502ff7e77fc265dd3 Mon Sep 17 00:00:00 2001 From: c00425699 Date: Wed, 1 Apr 2020 20:47:46 +0800 Subject: [PATCH 072/367] fix bug for allreduce fusion and add resnet unit test --- .../allreduce_fusion/allreduce_fusion.cc | 2 +- .../allreduce_fusion/allreduce_fusion.h | 1 - .../allreduce_fusion/allreduce_graph.cc | 22 +- .../allreduce_fusion/allreduce_node.cc | 24 +- .../allreduce_fusion/allreduce_node.h | 7 +- .../python/parallel/test_allreduce_fusion.py | 4 +- .../parallel/test_auto_parallel_resnet.py | 223 ++++++++++++++++-- 7 files changed, 246 insertions(+), 37 deletions(-) diff --git a/mindspore/ccsrc/parallel/allreduce_fusion/allreduce_fusion.cc b/mindspore/ccsrc/parallel/allreduce_fusion/allreduce_fusion.cc index 37b6eb42ed..38a4eee9a2 100644 --- a/mindspore/ccsrc/parallel/allreduce_fusion/allreduce_fusion.cc +++ b/mindspore/ccsrc/parallel/allreduce_fusion/allreduce_fusion.cc @@ -359,7 +359,7 @@ Status AllreduceFusion::SetFusionByBackwardCompAndAllreduceTime() { return FAILED; } double para_size = (tail_time_ - allreduce_inherent_time_) / allreduce_bandwidth_; - double to_cost = allreduce_graph_.max() + FUSION_COST_EPS; + double to_cost = allreduce_graph_.max(); int32_t fusion = 1; while (to_cost != 0) { MS_LOG(INFO) << "to_cost: " << to_cost << " para_size: " << para_size; diff --git a/mindspore/ccsrc/parallel/allreduce_fusion/allreduce_fusion.h b/mindspore/ccsrc/parallel/allreduce_fusion/allreduce_fusion.h index de2844fa51..9c722ee2f3 100644 --- a/mindspore/ccsrc/parallel/allreduce_fusion/allreduce_fusion.h +++ b/mindspore/ccsrc/parallel/allreduce_fusion/allreduce_fusion.h @@ -38,7 +38,6 @@ constexpr double DEFAULT_COST_MODEL_ALLREDUCE_FUSION_COMPUTATION_TIME_PARAMETER constexpr char FUSION[] = "fusion"; constexpr char PARAMETER[] = "parameter"; const uint32_t MAX_RECURSIVE_CALL_TIMES = 100; -const double FUSION_COST_EPS = 1e-7; class AllreduceFusion { public: AllreduceFusion() diff --git a/mindspore/ccsrc/parallel/allreduce_fusion/allreduce_graph.cc b/mindspore/ccsrc/parallel/allreduce_fusion/allreduce_graph.cc index 5c97eda8d8..6721df6a45 100644 --- a/mindspore/ccsrc/parallel/allreduce_fusion/allreduce_graph.cc +++ b/mindspore/ccsrc/parallel/allreduce_fusion/allreduce_graph.cc @@ -24,7 +24,19 @@ namespace mindspore { namespace parallel { Status AllreduceGraph::AddNode(const CNodePtr& node, const AnfNodePtr& para) { - auto arnode = std::make_shared(AllreduceNode()); + AllreduceNodePtr arnode; + auto cnode_emplace_return = cnode_set_.emplace(node); + if (!cnode_emplace_return.second) { + MS_LOG(INFO) << "node: " << node->DebugString() << " has already been added!"; + auto cnode_arnode_pair = cnode_arnode_map_.find(node); + if (cnode_arnode_pair == cnode_arnode_map_.end()) { + MS_LOG(EXCEPTION) << "node is not in cnode_arnode_map_!"; + } + arnode = cnode_arnode_pair->second; + } else { + arnode = std::make_shared(AllreduceNode()); + } + if (arnode->Init(node) != SUCCESS) { MS_LOG(ERROR) << "AllreduceNode Init failed"; return FAILED; @@ -39,10 +51,6 @@ Status AllreduceGraph::AddNode(const CNodePtr& node, const AnfNodePtr& para) { if (!arnode_emplace_return.second) { MS_LOG(INFO) << "node: " << node->DebugString() << "'s arnode has already been added!"; } - auto cnode_emplace_return = cnode_set_.emplace(node); - if (!cnode_emplace_return.second) { - MS_LOG(INFO) << "node: " << node->DebugString() << " has already been added!"; - } cnode_emplace_return = para_cnodeset_map_[para].emplace(node); if (!cnode_emplace_return.second) { MS_LOG(INFO) << "node: " << node->DebugString() << " already in para: " << para->fullname_with_scope() @@ -75,7 +83,7 @@ Status AllreduceGraph::AddEdge(const CNodePtr& from, const CNodePtr& to, double MS_LOG(ERROR) << "from_arnode AddNext failed"; return FAILED; } - if (to_arnode->AddPrev(from_arnode, dist) != SUCCESS) { + if (to_arnode->AddPrev(from_arnode, dist, &max_) != SUCCESS) { MS_LOG(ERROR) << "to_arnode AddPrev failed"; return FAILED; } @@ -110,7 +118,7 @@ std::pair, double> AllreduceGraph::GetParaByParaSize(dou double cur_para_size = 0; double from = to; for (auto& arnode : arnode_vec_) { - if (arnode.depend_feat_size() >= to) { + if (arnode.depend_feat_size() != max_ && arnode.depend_feat_size() >= to) { continue; } if (para_size > 0 && cur_para_size >= para_size && arnode.depend_feat_size() < from) { diff --git a/mindspore/ccsrc/parallel/allreduce_fusion/allreduce_node.cc b/mindspore/ccsrc/parallel/allreduce_fusion/allreduce_node.cc index 8bed29f2f2..6be588928a 100644 --- a/mindspore/ccsrc/parallel/allreduce_fusion/allreduce_node.cc +++ b/mindspore/ccsrc/parallel/allreduce_fusion/allreduce_node.cc @@ -15,6 +15,7 @@ */ #include "parallel/allreduce_fusion/allreduce_node.h" +#include #include "parallel/tensor_layout/tensor_layout.h" #include "utils/log_adapter.h" @@ -29,7 +30,7 @@ Status AllreduceNode::AddNext(const AllreduceNodePtr& next_node) { return SUCCESS; } -Status AllreduceNode::AddPrev(const AllreduceNodePtr& prev_node, double dist) { +Status AllreduceNode::AddPrev(const AllreduceNodePtr& prev_node, double dist, double* max) { if (prev_node == nullptr) { MS_LOG(ERROR) << "next_node is nullptr!"; return FAILED; @@ -39,7 +40,26 @@ Status AllreduceNode::AddPrev(const AllreduceNodePtr& prev_node, double dist) { return FAILED; } prev_.emplace_back(prev_node); - depend_feat_size_ += prev_node->depend_feat_size() + dist; + double add_dist = prev_node->depend_feat_size() + dist; + depend_feat_size_ += add_dist; + if (depend_feat_size_ > *max) { + *max = depend_feat_size_; + } + std::queue next_queue; + for (auto& next : next_) { + next_queue.push(next); + } + while (!next_queue.empty()) { + auto ele = next_queue.front(); + ele->AddDependFeatSize(add_dist); + if (ele->depend_feat_size() > *max) { + *max = ele->depend_feat_size(); + } + for (auto& next : ele->next()) { + next_queue.push(next); + } + next_queue.pop(); + } return SUCCESS; } diff --git a/mindspore/ccsrc/parallel/allreduce_fusion/allreduce_node.h b/mindspore/ccsrc/parallel/allreduce_fusion/allreduce_node.h index f3eeb53ec7..2a765dbfcd 100644 --- a/mindspore/ccsrc/parallel/allreduce_fusion/allreduce_node.h +++ b/mindspore/ccsrc/parallel/allreduce_fusion/allreduce_node.h @@ -39,9 +39,14 @@ class AllreduceNode { const std::unordered_set& paras() const { return paras_; } double curr_para_size() const { return curr_para_size_; } virtual ~AllreduceNode() = default; - Status AddPrev(const AllreduceNodePtr& prev_node, double dist); + // Add previous node + // prev_node is the previous to be added + // max is the current max depend_feat_size of the AllreduceGraph + Status AddPrev(const AllreduceNodePtr& prev_node, double dist, double* max); Status AddNext(const AllreduceNodePtr& next_node); double depend_feat_size() const { return depend_feat_size_; } + void AddDependFeatSize(double add_dist) { depend_feat_size_ += add_dist; } + const std::vector& next() const { return next_; } void ToString() const; bool operator<(const AllreduceNode& node) const { return depend_feat_size_ < node.depend_feat_size(); } bool operator>(const AllreduceNode& node) const { return depend_feat_size_ > node.depend_feat_size(); } diff --git a/tests/ut/python/parallel/test_allreduce_fusion.py b/tests/ut/python/parallel/test_allreduce_fusion.py index e59ce38051..fcbee10587 100644 --- a/tests/ut/python/parallel/test_allreduce_fusion.py +++ b/tests/ut/python/parallel/test_allreduce_fusion.py @@ -275,7 +275,7 @@ def test_allreduce_fusion5(): expect_dict = {'backbone2.fc8.weight': 3, 'backbone2.fc7.weight': 3, 'backbone2.fc6.weight': 3, - 'backbone2.fc5.weight': 2, + 'backbone2.fc5.weight': 3, 'backbone2.fc4.weight': 2, 'backbone2.fc3.weight': 2, 'backbone2.fc2.weight': 1, @@ -283,7 +283,7 @@ def test_allreduce_fusion5(): 'backbone1.fc8.weight': 3, 'backbone1.fc7.weight': 3, 'backbone1.fc6.weight': 3, - 'backbone1.fc5.weight': 2, + 'backbone1.fc5.weight': 3, 'backbone1.fc4.weight': 2, 'backbone1.fc3.weight': 2, 'backbone1.fc2.weight': 1, diff --git a/tests/ut/python/parallel/test_auto_parallel_resnet.py b/tests/ut/python/parallel/test_auto_parallel_resnet.py index 1e0e3570b9..9b4e1fda23 100644 --- a/tests/ut/python/parallel/test_auto_parallel_resnet.py +++ b/tests/ut/python/parallel/test_auto_parallel_resnet.py @@ -273,13 +273,9 @@ class DatasetLenet(): return 1 -def test_train_32k_8p(epoch_size=3, batch_size=32, num_classes=32768): #1048576 #131072 #32768 #8192 +def train_32k_8p(epoch_size=3, batch_size=32, num_classes=32768): dev_num = 8 context.set_auto_parallel_context(parallel_mode=ParallelMode.AUTO_PARALLEL, device_num=dev_num) - cost_model_context.set_cost_model_context(costmodel_gamma=0.001, costmodel_beta=260.0) - cost_model_context.set_cost_model_context(costmodel_allreduce_fusion_algorithm=1) - cost_model_context.set_cost_model_context(costmodel_allreduce_fusion_times=2) - cost_model_context.set_cost_model_context(costmodel_allreduce_fusion_tail_percent=0.5) set_algo_parameters(elementwise_op_strategy_follow=True) resset_op_id() np.random.seed(6) @@ -303,8 +299,16 @@ def test_train_32k_8p(epoch_size=3, batch_size=32, num_classes=32768): #1048576 assert v == [[dev_num, 1]] allreduce_fusion_dict = _executor._get_allreduce_fusion(model._train_network) - print(allreduce_fusion_dict) + return allreduce_fusion_dict + + +def test_train_32k_8p_fusion1(epoch_size=3, batch_size=32, num_classes=32768): #1048576 #131072 #32768 #8192 + cost_model_context.set_cost_model_context(costmodel_gamma=0.001, costmodel_beta=260.0) + cost_model_context.set_cost_model_context(costmodel_allreduce_fusion_algorithm=1) + cost_model_context.set_cost_model_context(costmodel_allreduce_fusion_times=2) + cost_model_context.set_cost_model_context(costmodel_allreduce_fusion_tail_percent=0.5) + allreduce_fusion_dict = train_32k_8p(epoch_size, batch_size, num_classes) expect_dict = {'end_point.bias': 2, 'end_point.weight': 2, 'layer4.2.bn3.beta': 2, @@ -382,11 +386,11 @@ def test_train_32k_8p(epoch_size=3, batch_size=32, num_classes=32768): #1048576 'layer3.1.bn1.beta': 2, 'layer3.1.bn1.gamma': 2, 'layer3.1.conv1.weight': 2, - 'layer3.0.bn_down_sample.beta': 1, - 'layer3.0.bn_down_sample.gamma': 1, + 'layer3.0.bn_down_sample.beta': 2, + 'layer3.0.bn_down_sample.gamma': 2, 'layer3.0.conv_down_sample.weight': 2, - 'layer3.0.bn3.beta': 1, - 'layer3.0.bn3.gamma': 1, + 'layer3.0.bn3.beta': 2, + 'layer3.0.bn3.gamma': 2, 'layer3.0.conv3.weight': 2, 'layer3.0.bn2.beta': 2, 'layer3.0.bn2.gamma': 2, @@ -412,8 +416,8 @@ def test_train_32k_8p(epoch_size=3, batch_size=32, num_classes=32768): #1048576 'layer2.2.bn1.beta': 2, 'layer2.2.bn1.gamma': 2, 'layer2.2.conv1.weight': 2, - 'layer2.1.bn3.beta': 1, - 'layer2.1.bn3.gamma': 1, + 'layer2.1.bn3.beta': 2, + 'layer2.1.bn3.gamma': 2, 'layer2.1.conv3.weight': 2, 'layer2.1.bn2.beta': 2, 'layer2.1.bn2.gamma': 2, @@ -421,11 +425,11 @@ def test_train_32k_8p(epoch_size=3, batch_size=32, num_classes=32768): #1048576 'layer2.1.bn1.beta': 2, 'layer2.1.bn1.gamma': 2, 'layer2.1.conv1.weight': 2, - 'layer2.0.bn_down_sample.beta': 1, - 'layer2.0.bn_down_sample.gamma': 1, + 'layer2.0.bn_down_sample.beta': 2, + 'layer2.0.bn_down_sample.gamma': 2, 'layer2.0.conv_down_sample.weight': 2, - 'layer2.0.bn3.beta': 1, - 'layer2.0.bn3.gamma': 1, + 'layer2.0.bn3.beta': 2, + 'layer2.0.bn3.gamma': 2, 'layer2.0.conv3.weight': 2, 'layer2.0.bn2.beta': 2, 'layer2.0.bn2.gamma': 2, @@ -442,8 +446,8 @@ def test_train_32k_8p(epoch_size=3, batch_size=32, num_classes=32768): #1048576 'layer1.2.bn1.beta': 2, 'layer1.2.bn1.gamma': 2, 'layer1.2.conv1.weight': 2, - 'layer1.1.bn3.beta': 1, - 'layer1.1.bn3.gamma': 1, + 'layer1.1.bn3.beta': 2, + 'layer1.1.bn3.gamma': 2, 'layer1.1.conv3.weight': 2, 'layer1.1.bn2.beta': 2, 'layer1.1.bn2.gamma': 2, @@ -451,11 +455,11 @@ def test_train_32k_8p(epoch_size=3, batch_size=32, num_classes=32768): #1048576 'layer1.1.bn1.beta': 2, 'layer1.1.bn1.gamma': 2, 'layer1.1.conv1.weight': 2, - 'layer1.0.bn_down_sample.beta': 1, - 'layer1.0.bn_down_sample.gamma': 1, + 'layer1.0.bn_down_sample.beta': 2, + 'layer1.0.bn_down_sample.gamma': 2, 'layer1.0.conv_down_sample.weight': 2, - 'layer1.0.bn3.beta': 1, - 'layer1.0.bn3.gamma': 1, + 'layer1.0.bn3.beta': 2, + 'layer1.0.bn3.gamma': 2, 'layer1.0.conv3.weight': 2, 'layer1.0.bn2.beta': 2, 'layer1.0.bn2.gamma': 2, @@ -465,7 +469,180 @@ def test_train_32k_8p(epoch_size=3, batch_size=32, num_classes=32768): #1048576 'layer1.0.conv1.weight': 2, 'bn1.beta': 1, 'bn1.gamma': 1, - 'conv1.weight': 2} + 'conv1.weight': 1} + + assert (allreduce_fusion_dict == expect_dict) + cost_model_context.reset_cost_model_context() + + +def test_train_32k_8p_fusion2(epoch_size=3, batch_size=32, num_classes=32768): #1048576 #131072 #32768 #8192 + cost_model_context.set_cost_model_context(costmodel_allreduce_fusion_algorithm=2) + cost_model_context.set_cost_model_context(costmodel_allreduce_fusion_tail_time=0.1) + cost_model_context.set_cost_model_context(costmodel_allreduce_fusion_allreduce_inherent_time=0.05) + cost_model_context.set_cost_model_context(costmodel_allreduce_fusion_allreduce_bandwidth=0.000001) + cost_model_context.set_cost_model_context(costmodel_allreduce_fusion_computation_time_parameter=0.0000015) + allreduce_fusion_dict = train_32k_8p(epoch_size, batch_size, num_classes) + expect_dict = {'end_point.bias': 2, + 'end_point.weight': 2, + 'layer4.2.bn3.beta': 2, + 'layer4.2.bn3.gamma': 2, + 'layer4.2.conv3.weight': 2, + 'layer4.2.bn2.beta': 2, + 'layer4.2.bn2.gamma': 2, + 'layer4.2.conv2.weight': 2, + 'layer4.2.bn1.beta': 2, + 'layer4.2.bn1.gamma': 2, + 'layer4.2.conv1.weight': 2, + 'layer4.1.bn3.beta': 2, + 'layer4.1.bn3.gamma': 2, + 'layer4.1.conv3.weight': 2, + 'layer4.1.bn2.beta': 2, + 'layer4.1.bn2.gamma': 2, + 'layer4.1.conv2.weight': 2, + 'layer4.1.bn1.beta': 2, + 'layer4.1.bn1.gamma': 2, + 'layer4.1.conv1.weight': 2, + 'layer4.0.bn_down_sample.beta': 2, + 'layer4.0.bn_down_sample.gamma': 2, + 'layer4.0.conv_down_sample.weight': 2, + 'layer4.0.bn3.beta': 2, + 'layer4.0.bn3.gamma': 2, + 'layer4.0.conv3.weight': 2, + 'layer4.0.bn2.beta': 2, + 'layer4.0.bn2.gamma': 2, + 'layer4.0.conv2.weight': 2, + 'layer4.0.bn1.beta': 2, + 'layer4.0.bn1.gamma': 2, + 'layer4.0.conv1.weight': 2, + 'layer3.5.bn3.beta': 2, + 'layer3.5.bn3.gamma': 2, + 'layer3.5.conv3.weight': 2, + 'layer3.5.bn2.beta': 2, + 'layer3.5.bn2.gamma': 2, + 'layer3.5.conv2.weight': 2, + 'layer3.5.bn1.beta': 2, + 'layer3.5.bn1.gamma': 2, + 'layer3.5.conv1.weight': 2, + 'layer3.4.bn3.beta': 2, + 'layer3.4.bn3.gamma': 2, + 'layer3.4.conv3.weight': 2, + 'layer3.4.bn2.beta': 2, + 'layer3.4.bn2.gamma': 2, + 'layer3.4.conv2.weight': 2, + 'layer3.4.bn1.beta': 2, + 'layer3.4.bn1.gamma': 2, + 'layer3.4.conv1.weight': 2, + 'layer3.3.bn3.beta': 2, + 'layer3.3.bn3.gamma': 2, + 'layer3.3.conv3.weight': 2, + 'layer3.3.bn2.beta': 2, + 'layer3.3.bn2.gamma': 2, + 'layer3.3.conv2.weight': 2, + 'layer3.3.bn1.beta': 2, + 'layer3.3.bn1.gamma': 2, + 'layer3.3.conv1.weight': 2, + 'layer3.2.bn3.beta': 2, + 'layer3.2.bn3.gamma': 2, + 'layer3.2.conv3.weight': 2, + 'layer3.2.bn2.beta': 2, + 'layer3.2.bn2.gamma': 2, + 'layer3.2.conv2.weight': 2, + 'layer3.2.bn1.beta': 2, + 'layer3.2.bn1.gamma': 2, + 'layer3.2.conv1.weight': 2, + 'layer3.1.bn3.beta': 2, + 'layer3.1.bn3.gamma': 2, + 'layer3.1.conv3.weight': 2, + 'layer3.1.bn2.beta': 2, + 'layer3.1.bn2.gamma': 2, + 'layer3.1.conv2.weight': 2, + 'layer3.1.bn1.beta': 2, + 'layer3.1.bn1.gamma': 2, + 'layer3.1.conv1.weight': 2, + 'layer3.0.bn_down_sample.beta': 2, + 'layer3.0.bn_down_sample.gamma': 2, + 'layer3.0.conv_down_sample.weight': 2, + 'layer3.0.bn3.beta': 2, + 'layer3.0.bn3.gamma': 2, + 'layer3.0.conv3.weight': 2, + 'layer3.0.bn2.beta': 2, + 'layer3.0.bn2.gamma': 2, + 'layer3.0.conv2.weight': 2, + 'layer3.0.bn1.beta': 2, + 'layer3.0.bn1.gamma': 2, + 'layer3.0.conv1.weight': 2, + 'layer2.3.bn3.beta': 2, + 'layer2.3.bn3.gamma': 2, + 'layer2.3.conv3.weight': 2, + 'layer2.3.bn2.beta': 2, + 'layer2.3.bn2.gamma': 2, + 'layer2.3.conv2.weight': 2, + 'layer2.3.bn1.beta': 2, + 'layer2.3.bn1.gamma': 2, + 'layer2.3.conv1.weight': 2, + 'layer2.2.bn3.beta': 2, + 'layer2.2.bn3.gamma': 2, + 'layer2.2.conv3.weight': 2, + 'layer2.2.bn2.beta': 2, + 'layer2.2.bn2.gamma': 2, + 'layer2.2.conv2.weight': 2, + 'layer2.2.bn1.beta': 2, + 'layer2.2.bn1.gamma': 2, + 'layer2.2.conv1.weight': 2, + 'layer2.1.bn3.beta': 2, + 'layer2.1.bn3.gamma': 2, + 'layer2.1.conv3.weight': 2, + 'layer2.1.bn2.beta': 2, + 'layer2.1.bn2.gamma': 2, + 'layer2.1.conv2.weight': 2, + 'layer2.1.bn1.beta': 2, + 'layer2.1.bn1.gamma': 2, + 'layer2.1.conv1.weight': 2, + 'layer2.0.bn_down_sample.beta': 2, + 'layer2.0.bn_down_sample.gamma': 2, + 'layer2.0.conv_down_sample.weight': 2, + 'layer2.0.bn3.beta': 2, + 'layer2.0.bn3.gamma': 2, + 'layer2.0.conv3.weight': 2, + 'layer2.0.bn2.beta': 2, + 'layer2.0.bn2.gamma': 2, + 'layer2.0.conv2.weight': 2, + 'layer2.0.bn1.beta': 2, + 'layer2.0.bn1.gamma': 2, + 'layer2.0.conv1.weight': 2, + 'layer1.2.bn3.beta': 2, + 'layer1.2.bn3.gamma': 2, + 'layer1.2.conv3.weight': 2, + 'layer1.2.bn2.beta': 2, + 'layer1.2.bn2.gamma': 2, + 'layer1.2.conv2.weight': 2, + 'layer1.2.bn1.beta': 2, + 'layer1.2.bn1.gamma': 2, + 'layer1.2.conv1.weight': 2, + 'layer1.1.bn3.beta': 2, + 'layer1.1.bn3.gamma': 2, + 'layer1.1.conv3.weight': 2, + 'layer1.1.bn2.beta': 2, + 'layer1.1.bn2.gamma': 2, + 'layer1.1.conv2.weight': 2, + 'layer1.1.bn1.beta': 2, + 'layer1.1.bn1.gamma': 2, + 'layer1.1.conv1.weight': 2, + 'layer1.0.bn_down_sample.beta': 2, + 'layer1.0.bn_down_sample.gamma': 2, + 'layer1.0.conv_down_sample.weight': 2, + 'layer1.0.bn3.beta': 2, + 'layer1.0.bn3.gamma': 2, + 'layer1.0.conv3.weight': 2, + 'layer1.0.bn2.beta': 2, + 'layer1.0.bn2.gamma': 2, + 'layer1.0.conv2.weight': 1, + 'layer1.0.bn1.beta': 1, + 'layer1.0.bn1.gamma': 1, + 'layer1.0.conv1.weight': 1, + 'bn1.beta': 1, + 'bn1.gamma': 1, + 'conv1.weight': 1} assert (allreduce_fusion_dict == expect_dict) cost_model_context.reset_cost_model_context() From 5a4f17bfb676982035b0e9e206ee890f0834b1a9 Mon Sep 17 00:00:00 2001 From: yanzhenxiang2020 Date: Wed, 1 Apr 2020 17:48:30 +0800 Subject: [PATCH 073/367] fix mindrecord ut long time --- mindspore/mindrecord/tools/mnist_to_mr.py | 18 +++++++++--------- .../testMnistData/t10k-images-idx3-ubyte.gz | Bin 61261 -> 407 bytes .../testMnistData/t10k-labels-idx1-ubyte.gz | Bin 92 -> 74 bytes .../testMnistData/train-images-idx3-ubyte.gz | Bin 340460 -> 474 bytes .../testMnistData/train-labels-idx1-ubyte.gz | Bin 140 -> 82 bytes .../python/mindrecord/test_mindrecord_base.py | 10 +++++----- .../ut/python/mindrecord/test_mnist_to_mr.py | 12 ++++++------ 7 files changed, 20 insertions(+), 20 deletions(-) diff --git a/mindspore/mindrecord/tools/mnist_to_mr.py b/mindspore/mindrecord/tools/mnist_to_mr.py index 462ab7fb53..046788535d 100644 --- a/mindspore/mindrecord/tools/mnist_to_mr.py +++ b/mindspore/mindrecord/tools/mnist_to_mr.py @@ -77,20 +77,20 @@ class MnistToMR: self.mnist_schema_json = {"label": {"type": "int64"}, "data": {"type": "bytes"}} - def _extract_images(self, filename, num_images): + def _extract_images(self, filename): """Extract the images into a 4D tensor [image index, y, x, channels].""" with gzip.open(filename) as bytestream: bytestream.read(16) - buf = bytestream.read(self.image_size * self.image_size * num_images * self.num_channels) + buf = bytestream.read() data = np.frombuffer(buf, dtype=np.uint8) - data = data.reshape(num_images, self.image_size, self.image_size, self.num_channels) + data = data.reshape(-1, self.image_size, self.image_size, self.num_channels) return data - def _extract_labels(self, filename, num_images): + def _extract_labels(self, filename): """Extract the labels into a vector of int64 label IDs.""" with gzip.open(filename) as bytestream: bytestream.read(8) - buf = bytestream.read(1 * num_images) + buf = bytestream.read() labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64) return labels @@ -101,8 +101,8 @@ class MnistToMR: Yields: data (dict of list): mnist data list which contains dict. """ - train_data = self._extract_images(self.train_data_filename_, 60000) - train_labels = self._extract_labels(self.train_labels_filename_, 60000) + train_data = self._extract_images(self.train_data_filename_) + train_labels = self._extract_labels(self.train_labels_filename_) for data, label in zip(train_data, train_labels): _, img = cv2.imencode(".jpeg", data) yield {"label": int(label), "data": img.tobytes()} @@ -114,8 +114,8 @@ class MnistToMR: Yields: data (dict of list): mnist data list which contains dict. """ - test_data = self._extract_images(self.test_data_filename_, 10000) - test_labels = self._extract_labels(self.test_labels_filename_, 10000) + test_data = self._extract_images(self.test_data_filename_) + test_labels = self._extract_labels(self.test_labels_filename_) for data, label in zip(test_data, test_labels): _, img = cv2.imencode(".jpeg", data) yield {"label": int(label), "data": img.tobytes()} diff --git a/tests/ut/data/mindrecord/testMnistData/t10k-images-idx3-ubyte.gz b/tests/ut/data/mindrecord/testMnistData/t10k-images-idx3-ubyte.gz index 9fdddeebe95e6e0a9790fdaf19532d46e83647c8..d7a2ea5de2afb0a2e2af780ea309418c83b786cf 100644 GIT binary patch literal 407 zcmV;I0cidoiwFo#VuW4*19UMkYb|MQVP|D?Eoo$UGc9#ud30p}?U=Do0x=ZEU&7+7 z#L3|{He*;B82uOAU~utoaAYHKB4J@@jDybp8P4wPXiPL=t`s8-^nE?PqsMXdm)tF1 z-}~P6+FpAiCk+v4F0KoA*_ep6nfRsi<^2mqzisdK$4{O{xikSS2eN;^1 zC=tiLi}>dei6)ArGN+e-eqpD4?E!rT_Ag+6ea6?K5o7&yn)ZOf`Sn0(FzKlsg*+4c zC^Tn(cuvh&edbb6*MYVv#b|9@+pXaFO zX?rh+|AQrVsvcPq6@eOm;Xy$xTrLD{P)J(4PZ{BS8)2cJlxyep`31tV?EJ7E000xe B!vFvP literal 61261 zcmeHQ{c{xM9mk0ps8cs0V+h8*Vk?pIazUk_EIBn+?N|vvSj9oI28@7#Icy-=nB<=N z(pfMG@eOQY;Ec4yRv^&~34+Ojmt4RM9s?#(0$lEnklcmjZtrrrx83LIPG>s(2ihsq z`TTN|TsAv%pLw3o+dbbqQ_a+=Q>>+hQ_{9B$}L&2adXl7;?e~hUy&YMux)L{*5WDG zCzkh=jkX3io;?~{DBd>r=*dUt=AY`k^uS%~^clB4zjD^JaObNXPds_=)K6!AAIn{N z^DT=rIv;&*{{7;&7h`)`zFIx%s@|{XRgPW`ys*5aG+N_JuUzCz*bhWY@=CK)W|H7E zpP~&}B4&Na_^n`w)sT9oj(?&*6W!&2izvQw%=XNK6-VOMh=kSwW)%aHW9a$4AP7GyCk+ozruQY zEpa8lD`eQR@t|@K3!EUOv2Yt{Z;$J3Yy%yz$%&i-GQywfjB!n`A z+Fj}VTk;pU7-(Q^V5fizK#Y(Hh$dVCJQ5ru{4)Xq;u69m(lOF8(lOF8(lOHU4VRAf zDcpIBa)j1VB5q)c0vv?YqU_oFvfC@m5 z&;|%2TmdW*Y~sH>a%DNk0&KC@5^gH%5!rZ5)Lh1~4N8q4F8C(JY}P=An)TwsrP?X& zveu&~=QZ}=4}b)V0owq~08WG&KpBEHh;#`T*HV z&yvmk%A`db=wY|>9p5-CPYACFm)@0s#a~>{^VwV$*XC`glPCWyr+4Q&9fze2zJ}^+ zo>S_3>Xv!@MQMY*LfV3V0b*D%*e{?9uqDI-S_!KFrv#%2pNyD*l!TCojEsr^4G9_& zG$d$9(2$@ZK|}ICYe<%#b1-FLN2K1>*i`ejP6h&sjds;oCY626`8ioe{~Gaoa|)yG zN9?vW+Qa(`$N+noUPydwrX8{IfO~{Zc9q={c6B?HloXFm=VWQsU3{yw%J-(6$M@9; zRgVkV-l7+!AG}3%>3mXJWe$g?sav+o#h#)Xc`+}I_=?(3J90W5v-#bBlo#-Q#sEpZ z9KDybXW(YQ3`+ys39JEfgrGn-VGm%b;2dF)5h{?K5D^ip(H@{AL9vAX44osYRW!(G z-EWLH7dl7>zv4vvg|T@w-xxd5NQ&7=`Rn08!PQTRZ*59cCcjcxa&;fAjO}%c%+7k} zZ6+zRSjYS|YFL~kY9L^IqZ)UvH+F<>SeVRQ5oBY^g!c?7R8Kl8gzLsfZS}7RH`mLL zRDGd_TJa~sX{WV^a0exLB1sO(%{F=7l*G12LWr5oSjQ&cCr@8&E% zPv{xxvku!muJF0;G-C{F#9MTNbf}?ppJ%xLyBxm5a?&xKD-pCSQap>3#c%SX(NSr) zG4drX?aa^Q2J}dsY?;Yfl(IZEq~FBvHYUP6F{i^NE!C#;e{T}ITZ9Qij%V}7r60!a z#>+V+#S=#(JLDi2YVLH*;*=%4&FS!b99mheKeB(Qp+23zT|L+3C}UMV4`*L%LcTRMd^bmO0X3k;ss@38K)}_IMaUu? z2+SUQ_5ZW4W=dILPPBFN>d5ck*>G=e*4(q(<2Sv3#}kL%D$aQ9q0jHj{dvpu_uu{H z_KoviKL3;LyE1Ox^`|SP?do?AJC;3ZU6-Xlc_96#`%T$D?2R4j>n1z?bH_Y^O?sr1fxWy_wfqXz+ z>C9hbRHuc)4E1Eb`#1bfXMPsf)tSassw<7h>YR$Dv$$em#+{sSy(|9?PMFwt)w2ON z0!COz*dm}2FeFq20tpiVe*_N+dyEKybc6(nl#K!bB@6l^bZn>(QL&;u{*N^5#81O= zV;)({t_In%U~#gaDQT>r`pfGi?)X^KEt^R4$HeGR8l7y@BK31^JPjzRMmvd{N5zz# zwy<&L_4md|ywUOryVgW~!}ZC%Oh1rp^Ec&@TeJ@?`Vu{=#2VEm=CS9ETH+TxY=kZ% zDUmkyY_0Z@{xBD#>MZal$-!A%i6?K zE@i}R+fTbKUvJafQx5xRv~4u`I*W6h*5iDb&Ayvb3x0wUXL&-f68fjts{SFXE+{%3^04=WBCHgGE~3OirkE&oVwFPhGuE z2<6&vG0?!;z)k@bfEXbY5KXuOcqBMR_-6zJ#3h7Bq+_IGq+_IGq+_Jx8#o=mIKXbp NzBH{YqoI7tKLE^TE}Z}X diff --git a/tests/ut/data/mindrecord/testMnistData/t10k-labels-idx1-ubyte.gz b/tests/ut/data/mindrecord/testMnistData/t10k-labels-idx1-ubyte.gz index c8a685166004730bc0367208b092155ee9423b76..6925ee8ce4a67a8c8e7b6ad9575968b22273b801 100644 GIT binary patch literal 74 zcmV-Q0JZ-giwFo#VuW4*19UMkYb|VHVr6V|Eoo$UF)ejsd30p}EylG003ZOsu;8%s g|E=gyp$E($lJ@0|a-?2sFZ!>s0J+@_B{cv50R43xyZ`_I literal 92 zcmV-i0HgmOiwFn+Z*yJ%19UMkYb|VHVr6V|Eoo$UF)ejsd30p}?ZLqjfFJ+^up%n> ya|`$1&^>xdl0A~|4Nz|Y^$wuk1nQkYy$h&!1N9bAZv*unpxzHgt&brXCjbEcw<5Cu diff --git a/tests/ut/data/mindrecord/testMnistData/train-images-idx3-ubyte.gz b/tests/ut/data/mindrecord/testMnistData/train-images-idx3-ubyte.gz index 4f27a302031116543f7a3dd95617f39eafe97c0e..80c13bf9a37aaebcefd7a2af05ae2d7d17761142 100644 GIT binary patch literal 474 zcmV<00VVz)iwFovVuW4*19Wm>X>KiPZDD6+b1i9Pcrz_^VtI6B0PUI~PsBhFg(tzF zfuIm|I6|-pB!26y1P zv)%5yZnqU#T@jIupJS^$d`iUoLc-E!_4|&ZpPE>Li~ zHPmy+MXbS*HFMvgALxewL-&JqKA1upzT<~1XW7}DEX*MJ^3pbFZ6>E`k}wP>XN{%NObv54Kc=2 ziwF~Op)j|nG%vI~Q(VUZ&AQ`p473UA3<*L%&<|uhNXfE0@y9I^U6PWDrz^m?SS?R7 zhknqZADRQk3_Rb0{m6BDUo=vzCrV8_!KlA34H`_k=C5KkNhi<`&FWRq598u_H`by^ zu}U#!gr6(w-0KqhR1sk8{{zroY$>x@K@J1@!36y@hKC$A4{kKNI7!exN&}dXz2bCz%rn?0N1p3NHH{EpK?cG0MYjtJlq5y*4>z)rQ%t|4ELW{m#m) zjjKl7Z+c&gbjS#*d0z`t`1HiZkYCk?&3~|Kve-`U}^SRyB?* zpY^Ei-7VF&oK20TyYF^%`0ZKQzFW3~2M^}=UrN&2^Mn2Uj&NOmU{-amFW7ug^Um&Q zx>l-1<@NRz7uk&8R_CgX`|YMpeHStloGrTs2l9QNFED@UP1CI?dT?k#wl=sw^QK^5 zZ4;Jir;Cb~sC+e>Z4v$>xRkJgsH_}a6#&)EW_wmkOk$?}$YdHTHH z&d>h+y^-bN+r{a8!9(Cp00p1`6o3Ly017|>C;$bZ02F`%Pyh-*0Vn_kpa2wr0#E=7 zKmjNK1)u;FfC5ke3P1rU00p1`6o3Ly017|>C;$bZ02KW7P%v!Ikq25X##JPoJd-yR zY*h}oRK@iQ!Q9h%9>)@U!i^Ohx6X`iU!|n$YnI==RFT;0J!`Y2=>t{Wx1w!nMcTK` z#)O1!m(gQ;swPXy3wTdNi^c~Hip}e5b2Vr|wK^zEc|VE1o@CYn>aE!HUh^U4Zrp<0 zp9$IWi4I3!(|cmV+>YdqQwIU!V9=!SVG+J4TM5{oao*k3D+y=<0DlIWg*?hl;aD zPF(uhSzqE!0q}qc;7E=G4oWluY6)S8WX2ufo3MzOXL14(6v856 zG!jFyRAOTSb!G=lRTv;KT4RdHe3el$Lw5QEG%YBVP`9C3MCXcn8O1v~hO{p!no_}~ zZA=fF$~ouJ~1uBTm3yPkGE?RwhvwCict)2^po zPrIIWJ?(nh^|b40*VC@2T~E87c0KKS+V!;SY1h-Pr(I9Go_0O$dfN50>uJ~1uBTo9 zztyh4sqIp9`uyQr#^E7TwW`-VDQ;J^*5-*0dd+H!KGVnSpk@ z$y0gwfk*T0Q8P6)PM_0d@;_8qdhF$(nquroX-Us-8*DKi3=ZxO{d2v#!6JLWTWL3Y z|5bmlLUS}!1jhx75*-fh(a`_JL^Z=|$!d34LOw@+Ux^hPV+GwV#cFv~Dp4~IW_dG0 zg{gy1$4;g1qD66)sqRTaid2#!CQJ=^!^3jC%OUl-9ii!oRBw4ixcdi1XfU`OPl%0j zyiLlD=Rd)N;*A0q5C}}mdBMSoJU~YwIFZfx3tSR*65maFK+Hl|M5;#oNQg?>OlHn- zfhi2LB!+H`4wC z6c?%!Tv3hE)4P=Sm0wDHoF~Ie06bsuJ~1uBTm3yPkGE?RwhvwCict z)2^poPrIIWJ?(nh^|b40*VC@2T~E87c0KKS+V!;SY1h-Pr(I9Go_0O$dfN50>uJ~1 zuBTm3yPkGE?Rwhv--CAjnli=QY3Uv6>1wLbi5E@Xy#epaEh2^ z@&Y0gQX_&hvP0ri5@bSj<_L^Zm?kk$W2VShmB}*0b@~NVENGcfw4rN6lZqx4O)8pH zG^uD((WIhDMU#pq6-_FdR5Yn*QhjHdRQ|%ed5g;)f1*6*v#^PE3rxEsj1x1?4}EgBs{W}?_wN5tv#&~ZTa}5zBZAbg@*SQAZvrR)1)u;F zfC5ke3P1rU00p1`6o3Ly017|>C;$bZ02F`%Pyh-*0Vn_kpa2wr0#E=7KmjNK1)u;F zfC5ke3P1rU00p1`6o3Ly017|>C;$bZ02F`%Pyh-*0Vn_kpa2wr0#E=7KmjNK1)u;F zfC5ke3P1rU00p1`6o3Ly017|>C;$bZ02F`%Q1D+s!IcH#zO@M_jlsUd`e09KK=TLQ zeA}~dPT<)4->lNS!%82#th-7VvXZcp_1y_th^)bm0KNYA1G4=1{jMu=aVI;BPqy%IvBvb@w#E*oxWXmM; z3=SB(FtlQ(#zc~NF5_l~^>hhnV$fruWxM}r6x@i@> zu=JKI?Sto5MZENT<)QiiI5X|g=OdOpzjD>+$jL8^)cy0P+0!4){LA7Gmrc3*to6Ww zno3`Hg2ga4dYv>iwAbaTx;r#vt$ta2TdH$At(IeItznw*geZSgd?%`bkMTfwL%;&$ z09$e{a8M!%&`5|zP?;lci*`hvu2{cM$&%bLb(W{ag8`q_qNtZ^eqAbi z7t|Ui3whF+IPs<&QL{z4S7Uu$Y!Cf86cdC>Ssv_)6m(XD{~z+44$lZ-xzw+^Z_97F zlD$6`%l%VQvJNza2?pt+({ZCBUCNCRx>E;sDE2UEqk6f)lH)2XR2)m>?LDa_ZONh_ zja6d`loWG~>Q3ed<%#l+0w(AQ9_3`>2t|~jp3tKRXRHXG2^Wf8CV(I*A$uatBX%Un zC2=OhXV}2_hB*_HJ4TEQZJ91Jucuf*RfA3mEgkAa6tt+k(aodnNG+41D~((_&XlpK zx6{MtW&>9+xYWY!8}2i4@rtW#+_~cxBlkkNz{-_i?k;m1oNMbW0$^hSJ0MtA!TJyO zl(6}Q6*VmVVLK7~o>)x9sxOwOvE_}uc`W#2Wg@#B|H|7khXwz}-D{QC>ax&g&%^%DQnJpewT2zaB*EkDQm){RskclOdaafSp>2sQ zpRUN>r%V;3z5FYAmb^ru2ikxyIVdsu4Twv~iOAdt8%bG- zm&w%`C@@K3gv9KOIV1yECeMuA=^9Y7pj|?(hCUJ9EJ|s#@u(rvzod9dG zvZa$Poowl3OD9`8+0x0DPPTNirIRh4Z0Tf6CtEt%(#e)iwsf+klP#TW>HL9j=?p$5 z277}Zv(_hc>GD4?`E0|@x?rbZzM~)LHFev9SIvQBe}VRid7yN-NS~(lHLvhD=swpE zFCTtg(;xKvjX6EWhlOXvuj;~t9Cd*BCxB znq_d!9G#W{9Sj;O^mnKeQMRIrMp2I%Bdtn0r!;cuE7RttTu$|#iwRuY;8M%~<10Vz z#7&*;b=QxKwZyy|tNf_-`AH?C$G@^LW<+evzNKeJjI~OWt~RaB$^1ynJDqWU<=ijV z&5Ezz(0R)kc*Fm+xKMIugyN$_SF+U-y5QvCJ3I~E1W*79KmjNK1)u;FfC5ke3P1rU z00p1`6o3Ly017|>C;$bZ02F`%Pyh-*0Vn_kpa2wr0#E=7KmjNK1)u;FfC5ke3P1rU z00p1`6o3Ly017|>C;$bZ02F`%Pyh-*0Vn_kpa2wr0#E=7KmjNK1)u;FfC5ke3P1rU z00p1`6o3Ly0180C_Xq`AreMr>|GjePG_a>=yf$ptwHx(AO({)xD^$PD+*6rpHSJN`_M3(qV~rjq(H_(i z^$~)1!23>Vu;GjI+LL09)6o+86ZPa-V(8cV;o|#>bF<$DQOFnrED~lBk4#!XNJ3mh zN=8aXN=8aXN=8aXN=8aXN=8aXN=8aXN=8aXN=8aXN=8aXN=8aXN=8aXN=8aXN=8aX zN=8aXN=8aXN=8aXN=8aXO7@?mWbTgwYQODIfkFL+mb?1?_Mr~jh}45lw(LLKvcoQN^fYR57X;Rg5Y|6{Ct##i(LbF{&6gDWNVnX>KiSVPa)$b1i9Pcrh(?VtI6B09}S#4geqw!iulW ofA4=*jWLTFpaopOkVq@!UCJ3XK)Z-N!avjFOw2g?lwtq?0KJ$Wy8r+H literal 140 zcmb2|=HO7xFOFkiE-6aP%+t+DOiIlu*3C?*Fw`wgsw_!mcze*1u|a`>WrK6bhNVA! z6FYwAd4KcRXU+fu4!K~8VJkD3Uf~O-7L+l8>8;!l(Ys*EA(s&%$_u8h$bqQ^-vto= diff --git a/tests/ut/python/mindrecord/test_mindrecord_base.py b/tests/ut/python/mindrecord/test_mindrecord_base.py index 576063295a..7fdf1f0f94 100644 --- a/tests/ut/python/mindrecord/test_mindrecord_base.py +++ b/tests/ut/python/mindrecord/test_mindrecord_base.py @@ -203,9 +203,9 @@ def test_nlp_page_reader_tutorial(): os.remove("{}".format(x)) os.remove("{}.db".format(x)) -def test_cv_file_writer_shard_num_1000(): - """test file writer when shard num equals 1000.""" - writer = FileWriter(CV_FILE_NAME, 1000) +def test_cv_file_writer_shard_num_10(): + """test file writer when shard num equals 10.""" + writer = FileWriter(CV_FILE_NAME, 10) data = get_data("../data/mindrecord/testImageNetData/") cv_schema_json = {"file_name": {"type": "string"}, "label": {"type": "int64"}, "data": {"type": "bytes"}} @@ -214,8 +214,8 @@ def test_cv_file_writer_shard_num_1000(): writer.write_raw_data(data) writer.commit() - paths = ["{}{}".format(CV_FILE_NAME, str(x).rjust(3, '0')) - for x in range(1000)] + paths = ["{}{}".format(CV_FILE_NAME, str(x).rjust(1, '0')) + for x in range(10)] for x in paths: os.remove("{}".format(x)) os.remove("{}.db".format(x)) diff --git a/tests/ut/python/mindrecord/test_mnist_to_mr.py b/tests/ut/python/mindrecord/test_mnist_to_mr.py index c299a1f719..505b0d6b43 100644 --- a/tests/ut/python/mindrecord/test_mnist_to_mr.py +++ b/tests/ut/python/mindrecord/test_mnist_to_mr.py @@ -37,7 +37,7 @@ def read(train_name, test_name): count = count + 1 if count == 1: logger.info("data: {}".format(x)) - assert count == 60000 + assert count == 20 reader.close() count = 0 @@ -47,7 +47,7 @@ def read(train_name, test_name): count = count + 1 if count == 1: logger.info("data: {}".format(x)) - assert count == 10000 + assert count == 10 reader.close() @@ -102,10 +102,10 @@ def test_mnist_to_mindrecord_compare_data(): 't10k-images-idx3-ubyte.gz') test_labels_filename_ = os.path.join(MNIST_DIR, 't10k-labels-idx1-ubyte.gz') - train_data = _extract_images(train_data_filename_, 60000) - train_labels = _extract_labels(train_labels_filename_, 60000) - test_data = _extract_images(test_data_filename_, 10000) - test_labels = _extract_labels(test_labels_filename_, 10000) + train_data = _extract_images(train_data_filename_, 20) + train_labels = _extract_labels(train_labels_filename_, 20) + test_data = _extract_images(test_data_filename_, 10) + test_labels = _extract_labels(test_labels_filename_, 10) reader = FileReader(train_name) for x, data, label in zip(reader.get_next(), train_data, train_labels): From ff808021c741e28eec2dd2161d2a4ff13e2047b6 Mon Sep 17 00:00:00 2001 From: lichenever Date: Thu, 2 Apr 2020 15:42:00 +0800 Subject: [PATCH 074/367] register not equal distributed op --- mindspore/ccsrc/parallel/ops_info/ops_utils.h | 1 + mindspore/ccsrc/parallel/step_auto_parallel.cc | 1 + mindspore/ccsrc/parallel/step_parallel.cc | 2 +- 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/mindspore/ccsrc/parallel/ops_info/ops_utils.h b/mindspore/ccsrc/parallel/ops_info/ops_utils.h index 2b8fc0ee3f..4062847d73 100644 --- a/mindspore/ccsrc/parallel/ops_info/ops_utils.h +++ b/mindspore/ccsrc/parallel/ops_info/ops_utils.h @@ -180,6 +180,7 @@ constexpr char SIGMOID[] = "Sigmoid"; constexpr char POW[] = "Pow"; constexpr char MAXIMUM[] = "Maximum"; constexpr char EQUAL[] = "Equal"; +constexpr char NOT_EQUAL[] = "NotEqual"; constexpr char LOGICALNOT[] = "LogicalNot"; constexpr char GATHERV2[] = "GatherV2"; constexpr char STRIDEDSLICE[] = "StridedSlice"; diff --git a/mindspore/ccsrc/parallel/step_auto_parallel.cc b/mindspore/ccsrc/parallel/step_auto_parallel.cc index b06ec383fa..a359905494 100644 --- a/mindspore/ccsrc/parallel/step_auto_parallel.cc +++ b/mindspore/ccsrc/parallel/step_auto_parallel.cc @@ -95,6 +95,7 @@ std::vector splittable_op_ = {MATMUL, POW, MAXIMUM, EQUAL, + NOT_EQUAL, LOGICALNOT, GATHERV2, STRIDEDSLICE, diff --git a/mindspore/ccsrc/parallel/step_parallel.cc b/mindspore/ccsrc/parallel/step_parallel.cc index b79198ffca..78bec00bcf 100644 --- a/mindspore/ccsrc/parallel/step_parallel.cc +++ b/mindspore/ccsrc/parallel/step_parallel.cc @@ -1226,7 +1226,7 @@ void CoverSliceShape(const FuncGraphPtr& root) { MS_LOG(INFO) << "Parameter " << parameter->ToString() << " don't need to set parallel shape"; } else { SetParallelShape(parameter, res); - MS_LOG(DEBUG) << "Parameter " << parameter->ToString() << " shape " << parameter->Shape()->ToString(); + MS_LOG(DEBUG) << "Parameter " << parameter->ToString() << " shape " << parameter->Shape()->ToString(); } } g_RefMap.clear(); From 605d980305c1631be449c659c0f0d89621d0ab55 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E4=B8=87=E4=B8=87=E6=B2=A1=E6=83=B3=E5=88=B0?= Date: Mon, 23 Mar 2020 15:33:01 +0800 Subject: [PATCH 075/367] 1. add Note refer to nn.SGD for detail 2. delete default value of stat 3. delete examples 4. some comments error from wangting review 5. modify comments from jinyaohui 6. modify examples from wanghao 7. modify Select operation examples --- mindspore/nn/layer/activation.py | 4 ++-- mindspore/nn/layer/basic.py | 4 ++-- mindspore/nn/layer/conv.py | 2 +- mindspore/nn/metrics/fbeta.py | 6 +++--- mindspore/ops/operations/array_ops.py | 7 +++++-- mindspore/ops/operations/comm_ops.py | 12 ++++++++---- mindspore/ops/operations/control_ops.py | 1 - mindspore/ops/operations/math_ops.py | 1 - mindspore/ops/operations/nn_ops.py | 13 +++++-------- mindspore/train/amp.py | 3 +-- 10 files changed, 27 insertions(+), 26 deletions(-) diff --git a/mindspore/nn/layer/activation.py b/mindspore/nn/layer/activation.py index 00f2afe703..ad63dde8bc 100644 --- a/mindspore/nn/layer/activation.py +++ b/mindspore/nn/layer/activation.py @@ -40,7 +40,7 @@ class Softmax(Cell): where :math:`x_{i}` is the :math:`i`-th slice along the given dim of the input Tensor. Args: - axis (Union[int, tuple[int]]): The axis to apply Softmax operation. Default: -1, means the last dimension. + axis (Union[int, tuple[int]]): The axis to apply Softmax operation, -1 means the last dimension. Default: -1. Inputs: - **x** (Tensor) - The input of Softmax. @@ -70,7 +70,7 @@ class LogSoftmax(Cell): where :math:`x_{i}` is the :math:`i`-th slice along the given dim of the input Tensor. Args: - axis (int): The axis to apply LogSoftmax operation. Default: -1, means the last dimension. + axis (int): The axis to apply LogSoftmax operation, -1 means the last dimension. Default: -1. Inputs: - **x** (Tensor) - The input of LogSoftmax. diff --git a/mindspore/nn/layer/basic.py b/mindspore/nn/layer/basic.py index 0cf4cd5e99..9dc0d5e623 100644 --- a/mindspore/nn/layer/basic.py +++ b/mindspore/nn/layer/basic.py @@ -32,13 +32,13 @@ class Dropout(Cell): r""" Dropout layer for the input. - Randomly set some elements of the input tensor to zero with probability :math:`1 - keep_prob` during training + Randomly set some elements of the input tensor to zero with probability :math:`1 - keep\_prob` during training using samples from a Bernoulli distribution. Note: Each channel will be zeroed out independently on every construct call. - The outputs are scaled by a factor of :math:`\frac{1}{keep_prob}` during training so + The outputs are scaled by a factor of :math:`\frac{1}{keep\_prob}` during training so that the output layer remains at a similar scale. During inference, this layer returns the same tensor as the input. diff --git a/mindspore/nn/layer/conv.py b/mindspore/nn/layer/conv.py index 666be93451..dfbf96e150 100644 --- a/mindspore/nn/layer/conv.py +++ b/mindspore/nn/layer/conv.py @@ -241,7 +241,7 @@ class Conv2dTranspose(_Conv): in_channels (int): The number of channels in the input space. out_channels (int): The number of channels in the output space. kernel_size (Union[int, tuple]): int or tuple with 2 integers, which specifies the height - and width of the 2D convolution window.Single int means the value if for both height and width of + and width of the 2D convolution window. Single int means the value is for both height and width of the kernel. A tuple of 2 ints means the first value is for the height and the other is for the width of the kernel. stride (int): Specifies the same value for all spatial dimensions. Default: 1. diff --git a/mindspore/nn/metrics/fbeta.py b/mindspore/nn/metrics/fbeta.py index f38febf3b1..6771b6ba36 100755 --- a/mindspore/nn/metrics/fbeta.py +++ b/mindspore/nn/metrics/fbeta.py @@ -26,8 +26,8 @@ class Fbeta(Metric): Fbeta score is a weighted mean of precison and recall. .. math:: - F_\beta=\frac{(1+\beta^2) \cdot true positive} - {(1+\beta^2) \cdot true positive +\beta^2 \cdot false negative + false positive} + F_\beta=\frac{(1+\beta^2) \cdot true\_positive} + {(1+\beta^2) \cdot true\_positive +\beta^2 \cdot false\_negative + false\_positive} Args: beta (float): The weight of precision. @@ -123,7 +123,7 @@ class F1(Fbeta): Refer to class `Fbeta` for more details. .. math:: - F_\beta=\frac{2\cdot true positive}{2\cdot true positive + false negative + false positive} + F_\beta=\frac{2\cdot true\_positive}{2\cdot true\_positive + false\_negative + false\_positive} Examples: >>> x = mindspore.Tensor(np.array([[0.2, 0.5], [0.3, 0.1], [0.9, 0.6]])) diff --git a/mindspore/ops/operations/array_ops.py b/mindspore/ops/operations/array_ops.py index f6d563321c..36f49c00c4 100644 --- a/mindspore/ops/operations/array_ops.py +++ b/mindspore/ops/operations/array_ops.py @@ -881,7 +881,7 @@ class ScalarToTensor(PrimitiveWithInfer): Inputs: - **input_x** (Union[int, float]) - The input is a scalar. Only constant value is allowed. - **dtype** (mindspore.dtype) - The target data type. Default: mindspore.float32. Only - constant value is allowed. + constant value is allowed. Outputs: Tensor. 0-D Tensor and the content is the input. @@ -1458,7 +1458,10 @@ class Select(PrimitiveWithInfer): Examples: >>> select = Select() - >>> select([True, False],[2,3],[1,2]) + >>> input_x = Tensor([True, False]) + >>> input_y = Tensor([2,3], mindspore.float32) + >>> input_z = Tensor([1,2], mindspore.float32) + >>> select(input_x, input_y, input_z) """ @prim_attr_register diff --git a/mindspore/ops/operations/comm_ops.py b/mindspore/ops/operations/comm_ops.py index 53a3686367..1644c5800a 100644 --- a/mindspore/ops/operations/comm_ops.py +++ b/mindspore/ops/operations/comm_ops.py @@ -66,11 +66,12 @@ class AllReduce(PrimitiveWithInfer): Examples: >>> from mindspore.communication.management import init + >>> import mindspore.ops.operations as P >>> init('nccl') >>> class Net(nn.Cell): >>> def __init__(self): >>> super(Net, self).__init__() - >>> self.allreduce_sum = AllReduce(ReduceOp.SUM, group="nccl_world_group") + >>> self.allreduce_sum = P.AllReduce(ReduceOp.SUM, group="nccl_world_group") >>> >>> def construct(self, x): >>> return self.allreduce_sum(x) @@ -130,11 +131,12 @@ class AllGather(PrimitiveWithInfer): Examples: >>> from mindspore.communication.management import init + >>> import mindspore.ops.operations as P >>> init('nccl') >>> class Net(nn.Cell): >>> def __init__(self): >>> super(Net, self).__init__() - >>> self.allgather = AllGather(group="nccl_world_group") + >>> self.allgather = P.AllGather(group="nccl_world_group") >>> >>> def construct(self, x): >>> return self.allgather(x) @@ -184,11 +186,12 @@ class ReduceScatter(PrimitiveWithInfer): Examples: >>> from mindspore.communication.management import init + >>> import mindspore.ops.operations as P >>> init('nccl') >>> class Net(nn.Cell): >>> def __init__(self): >>> super(Net, self).__init__() - >>> self.reducescatter = ReduceScatter(ReduceOp.SUM, group="nccl_world_group") + >>> self.reducescatter = P.ReduceScatter(ReduceOp.SUM, group="nccl_world_group") >>> >>> def construct(self, x): >>> return self.reducescatter(x) @@ -246,11 +249,12 @@ class Broadcast(PrimitiveWithInfer): Examples: >>> from mindspore.communication.management import init + >>> import mindspore.ops.operations as P >>> init('nccl') >>> class Net(nn.Cell): >>> def __init__(self): >>> super(Net, self).__init__() - >>> self.broadcast = Broadcast(1) + >>> self.broadcast = P.Broadcast(1) >>> >>> def construct(self, x): >>> return self.broadcast((x,)) diff --git a/mindspore/ops/operations/control_ops.py b/mindspore/ops/operations/control_ops.py index 242a3b155d..1bffc09c04 100644 --- a/mindspore/ops/operations/control_ops.py +++ b/mindspore/ops/operations/control_ops.py @@ -150,7 +150,6 @@ class Merge(PrimitiveWithInfer): raise NotImplementedError def infer_shape(self, inputs): - """merge select one input as its output""" return (inputs[0], [1]) def infer_dtype(self, inputs): diff --git a/mindspore/ops/operations/math_ops.py b/mindspore/ops/operations/math_ops.py index 398a7e6f1a..ba5e596027 100644 --- a/mindspore/ops/operations/math_ops.py +++ b/mindspore/ops/operations/math_ops.py @@ -1263,7 +1263,6 @@ class EqualCount(PrimitiveWithInfer): self.init_prim_io_names(inputs=['x', 'y'], outputs=['output']) def infer_shape(self, x_shape, w_shape): - """Infer shape.""" output_shape = (1,) return output_shape diff --git a/mindspore/ops/operations/nn_ops.py b/mindspore/ops/operations/nn_ops.py index afa4c7dfe3..195d78c6c7 100644 --- a/mindspore/ops/operations/nn_ops.py +++ b/mindspore/ops/operations/nn_ops.py @@ -1310,6 +1310,9 @@ class SGD(PrimitiveWithInfer): Nesterov momentum is based on the formula from On the importance of initialization and momentum in deep learning. + Note: + For details, please refer to `nn.SGD` source code. + Args: dampening (float): The dampening for momentum. Default: 0.0. weight_decay (float): Weight decay (L2 penalty). Default: 0.0. @@ -1321,16 +1324,10 @@ class SGD(PrimitiveWithInfer): - **learning_rate** (Tensor) - Learning rate. e.g. Tensor(0.1, mindspore.float32). - **accum** (Tensor) - Accum(velocity) to be update. - **momentum** (Tensor) - Momentum. e.g. Tensor(0.1, mindspore.float32). - - **stat** (Tensor) - States to be updated with the same shape as gradient. Default: 1.0. + - **stat** (Tensor) - States to be updated with the same shape as gradient. Outputs: Tensor, parameters to be update. - - Examples: - >>> net = ResNet50() - >>> loss = SoftmaxCrossEntropyWithLogits() - >>> opt = SGD(params=net.trainable_params(), learning_rate=lr, momentum=0.9) - >>> model = Model(net, loss, opt) """ @prim_attr_register @@ -1768,7 +1765,7 @@ class LSTM(PrimitiveWithInfer): """ Performs the long short term memory(LSTM) on the input. - Detailed information, please refer to `nn.layer.LSTM`. + Detailed information, please refer to `nn.LSTM`. """ @prim_attr_register diff --git a/mindspore/train/amp.py b/mindspore/train/amp.py index 5a70a86fdd..e909b44e40 100644 --- a/mindspore/train/amp.py +++ b/mindspore/train/amp.py @@ -91,13 +91,12 @@ def build_train_network(network, optimizer, loss_fn=None, level='O0', **kwargs): loss_fn (Union[None, Cell]): Definition of the loss_fn. If None, the `network` should have the loss inside. Default: None. optimizer (Optimizer): Optimizer to update the Parameter. - level (str): Supports [O0, O2]. + level (str): Supports [O0, O2]. Default: "O0". - O0: Do not change. - O2: Cast network to float16, keep batchnorm and `loss_fn` (if set) run in float32, using dynamic loss scale. - Default: "O0" cast_model_type (:class:`mindspore.dtype`): Supports `mstype.float16` or `mstype.float32`. If set to `mstype.float16`, use `float16` mode to train. If set, overwrite the level setting. keep_batchnorm_fp32 (bool): Keep Batchnorm run in `float32`. If set, overwrite the level setting. From 73ba39936423629a875924842becbb74b4fca840 Mon Sep 17 00:00:00 2001 From: Wei Luning Date: Mon, 23 Mar 2020 17:33:56 +0800 Subject: [PATCH 076/367] remove ge depend in cpu --- CMakeLists.txt | 12 +- cmake/mind_expression.cmake | 2 +- mindspore/ccsrc/CMakeLists.txt | 26 +- mindspore/ccsrc/debug/e2e_dump.cc | 2 +- .../device/ascend/kernel_build_ascend.cc | 2 +- .../ascend/profiling/profiling_manager.cc | 1 + .../device/ascend/tasksink/task_generator.cc | 4 +- .../device/ascend/tasksink/task_generator.h | 2 +- .../ccsrc/device/gpu/gpu_kernel_build.cc | 1 - .../ccsrc/device/gpu/kernel_info_setter.cc | 2 +- mindspore/ccsrc/device/kernel_adjust.cc | 1 + mindspore/ccsrc/ir/anf.cc | 17 + mindspore/ccsrc/ir/anf.h | 2 + mindspore/ccsrc/ir/meta_tensor.cc | 1 - .../ccsrc/kernel/aicpu/aicpu_kernel_mod.h | 4 +- mindspore/ccsrc/kernel/akg/akgkernelbuild.cc | 5 +- mindspore/ccsrc/kernel/ascend_kernel_mod.h | 36 + mindspore/ccsrc/kernel/common_utils.cc | 3 +- .../ccsrc/kernel/gpu/nn/lstm_gpu_kernel.h | 4 +- .../kernel/gpu/nn/lstm_grad_data_gpu_kernel.h | 6 +- .../gpu/nn/lstm_grad_weight_gpu_kernel.h | 4 +- mindspore/ccsrc/kernel/hccl/hccl_kernel.h | 4 +- mindspore/ccsrc/kernel/kernel.h | 6 - mindspore/ccsrc/kernel/mng/rt_kernel.h | 4 +- mindspore/ccsrc/kernel/oplib/oplib.cc | 4 +- mindspore/ccsrc/kernel/tbe/tbe_kernel_build.h | 33 +- mindspore/ccsrc/kernel/tbe/tbe_kernel_mod.h | 4 +- .../kernel/tbe/tbe_kernel_parallel_build.h | 5 +- .../ccsrc/kernel/tbe/tbe_kernel_select.cc | 9 +- mindspore/ccsrc/kernel/tbe/tbe_utils.cc | 1 + mindspore/ccsrc/pipeline/base.h | 64 ++ mindspore/ccsrc/pipeline/init.cc | 10 +- .../ccsrc/pipeline/parse/python_adapter.cc | 1 + .../ccsrc/pipeline/parse/python_adapter.h | 1 + mindspore/ccsrc/pipeline/pipeline.cc | 692 +++--------------- mindspore/ccsrc/pipeline/pipeline.h | 35 +- mindspore/ccsrc/pipeline/pipeline_ge.cc | 545 ++++++++++++++ mindspore/ccsrc/pipeline/pipeline_ge.h | 57 ++ mindspore/ccsrc/pipeline/resource.cc | 29 - mindspore/ccsrc/pipeline/resource.h | 7 +- .../ascend/ascend_backend_optimization.cc | 2 +- .../ascend/buffer_fusion/buffer_fusion.cc | 4 +- .../ir_fusion/allreduce_fusion.cc | 2 +- .../ir_fusion/allreduce_fusion.h | 6 +- .../ccsrc/predict/converter/kernel2ms.cc | 6 +- mindspore/ccsrc/pynative/base.h | 67 ++ mindspore/ccsrc/pynative/pynative_execute.cc | 285 +------- mindspore/ccsrc/pynative/pynative_execute.h | 43 +- .../ccsrc/pynative/pynative_execute_ge.cc | 311 ++++++++ .../ccsrc/pynative/pynative_execute_ge.h | 46 ++ mindspore/ccsrc/session/ascend_session.cc | 1 + mindspore/ccsrc/session/gpu_session.cc | 2 +- mindspore/ccsrc/transform/convert.cc | 18 - mindspore/ccsrc/transform/convert.h | 1 - mindspore/ccsrc/utils/callbacks.cc | 13 +- mindspore/ccsrc/utils/callbacks.h | 9 + mindspore/ccsrc/utils/context/ms_context.cc | 7 +- mindspore/ccsrc/utils/context/ms_context.h | 1 - mindspore/ccsrc/utils/convert_utils.cc | 41 ++ mindspore/ccsrc/utils/convert_utils.h | 4 + mindspore/ccsrc/vm/segment_runner.cc | 4 +- mindspore/ccsrc/vm/transform.cc | 16 +- mindspore/ccsrc/vm/transform.h | 2 - mindspore/ccsrc/vm/vmimpl.cc | 35 - mindspore/ccsrc/vm/vmimpl.h | 6 - mindspore/common/api.py | 3 +- mindspore/common/parameter.py | 12 + mindspore/common/tensor.py | 10 + mindspore/ops/functional.py | 2 +- mindspore/train/model.py | 3 + .../cpp/device/ascend_kernel_select_test.cc | 2 +- tests/ut/cpp/device/ascend_profiling_test.cc | 2 +- .../ir_fusion/allreduce_fusion_test.cc | 2 +- ...onvert_const_input_to_tensor_input_test.cc | 2 +- tests/ut/python/ir/test_tensor.py | 18 + tests/ut/python/ops/test_array_ops.py | 14 - tests/ut/python/parallel/__init__.py | 5 +- tests/ut/python/parallel/test_alltoall.py | 11 +- .../parallel/test_auto_parallel_arithmetic.py | 18 +- ...t_auto_parallel_assign_sub_with_ref_key.py | 10 +- .../parallel/test_auto_parallel_cast.py | 12 +- .../test_auto_parallel_matmul_prelu.py | 8 +- .../test_auto_parallel_parameter_cast.py | 8 +- .../parallel/test_auto_parallel_transpose.py | 10 +- .../parallel/test_auto_parallel_two_matmul.py | 5 +- .../python/parallel/test_dataset_interface.py | 2 +- tests/ut/python/parallel/test_one_dev.py | 20 +- .../python/pipeline/parse/test_create_obj.py | 3 + tests/ut/python/pipeline/parse/test_dtype.py | 3 +- .../ut/python/pynative_mode/ops/test_grad.py | 16 + .../test_summary_ops_params_valid_check.py | 21 +- tests/ut/python/utils/test_serialize.py | 3 +- 92 files changed, 1571 insertions(+), 1234 deletions(-) create mode 100644 mindspore/ccsrc/kernel/ascend_kernel_mod.h create mode 100644 mindspore/ccsrc/pipeline/base.h create mode 100644 mindspore/ccsrc/pipeline/pipeline_ge.cc create mode 100644 mindspore/ccsrc/pipeline/pipeline_ge.h rename mindspore/ccsrc/pre_activate/{ascend => common}/ir_fusion/allreduce_fusion.cc (97%) rename mindspore/ccsrc/pre_activate/{ascend => common}/ir_fusion/allreduce_fusion.h (87%) create mode 100644 mindspore/ccsrc/pynative/base.h create mode 100644 mindspore/ccsrc/pynative/pynative_execute_ge.cc create mode 100644 mindspore/ccsrc/pynative/pynative_execute_ge.h rename tests/ut/cpp/pre_activate/{ascend => common}/ir_fusion/allreduce_fusion_test.cc (99%) diff --git a/CMakeLists.txt b/CMakeLists.txt index 9cb73935ee..bdac2da46e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -42,11 +42,13 @@ else() include(${CMAKE_SOURCE_DIR}/cmake/dependency_graphengine.cmake) endif() -include_directories(${CMAKE_CURRENT_SOURCE_DIR}/graphengine/inc) -include_directories(${CMAKE_CURRENT_SOURCE_DIR}/graphengine/inc/external) -include_directories(${CMAKE_CURRENT_SOURCE_DIR}/graphengine/inc/framework) -include_directories(${CMAKE_CURRENT_SOURCE_DIR}/graphengine/third_party/fwkacllib/inc) -include_directories(${CMAKE_CURRENT_SOURCE_DIR}/graphengine/third_party/fwkacllib/inc/toolchain) +if (ENABLE_GE OR ENABLE_D OR ENABLE_TESTCASES) + include_directories(${CMAKE_CURRENT_SOURCE_DIR}/graphengine/inc) + include_directories(${CMAKE_CURRENT_SOURCE_DIR}/graphengine/inc/external) + include_directories(${CMAKE_CURRENT_SOURCE_DIR}/graphengine/inc/framework) + include_directories(${CMAKE_CURRENT_SOURCE_DIR}/graphengine/third_party/fwkacllib/inc) + include_directories(${CMAKE_CURRENT_SOURCE_DIR}/graphengine/third_party/fwkacllib/inc/toolchain) +endif() set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility=hidden") add_subdirectory(mindspore/ccsrc) diff --git a/cmake/mind_expression.cmake b/cmake/mind_expression.cmake index 103775a4c2..345fd4675e 100644 --- a/cmake/mind_expression.cmake +++ b/cmake/mind_expression.cmake @@ -40,7 +40,7 @@ if (ENABLE_GE) include_directories(${CMAKE_SOURCE_DIR}/third_party/ge/include) include_directories(${CMAKE_SOURCE_DIR}/third_party/ge/include/external) include_directories(${CMAKE_SOURCE_DIR}/third_party/ge/include/external/graph) -else() +elseif(ENABLE_D OR ENABLE_TESTCASES) include_directories(${CMAKE_SOURCE_DIR}/graphengine/inc) include_directories(${CMAKE_SOURCE_DIR}/graphengine/inc/ops) include_directories(${CMAKE_SOURCE_DIR}/graphengine/inc/external) diff --git a/mindspore/ccsrc/CMakeLists.txt b/mindspore/ccsrc/CMakeLists.txt index 1c684b6736..0b4bb0d1df 100644 --- a/mindspore/ccsrc/CMakeLists.txt +++ b/mindspore/ccsrc/CMakeLists.txt @@ -34,6 +34,8 @@ if(ENABLE_GPU) "device/gpu/*.cu" "kernel/gpu/*.cu" "kernel/akg/gpu/*.cc" + "kernel/akg/akgkernelbuild.cc" + "kernel/akg/akg_kernel_attrs_process.cc" ) file(GLOB_RECURSE GPU_KERNEL_SRC_LIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} "kernel/gpu/*.cc" @@ -100,14 +102,14 @@ file(GLOB_RECURSE MINDSPORE_SRC_LIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} "debug/*.cc" "onnx/onnx_exporter.cc" "operator/*.cc" - "transform/*.cc" "session/kernel_graph.cc" "utils/node_utils.cc" "session/session_basic.cc" "session/session_factory.cc" "session/anf_runtime_algorithm.cc" "vm/*.cc" - "pynative/*.cc" + "pynative/base.cc" + "pynative/pynative_execute.cc" "pybind_api/*.cc" "device/common/*.cc" "kernel/kernel_query.cc" @@ -117,7 +119,6 @@ file(GLOB_RECURSE MINDSPORE_SRC_LIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} "device/kernel_runtime.cc" "device/kernel_runtime_manager.cc" "device/convert_tensor_utils.cc" - "pre_activate/ascend/*.cc" "pre_activate/common/*.cc" "pre_activate/pass/*.cc" "pre_activate/gpu/*.cc" @@ -168,6 +169,15 @@ if(ENABLE_DUMP_PROTO) add_compile_definitions(ENABLE_DUMP_PROTO) endif() +if(ENABLE_GE) + file(GLOB_RECURSE GE_SRC_LIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} + "transform/*.cc" + "pynative/pynative_execute_ge.cc" + "pipeline/pipeline_ge.cc" + ) + list(APPEND MINDSPORE_SRC_LIST ${GE_SRC_LIST}) +endif() + if(ENABLE_D) include_directories("${CMAKE_BINARY_DIR}/kernel/aicpu") file(GLOB_RECURSE PROTO_IN RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} @@ -188,6 +198,9 @@ if(ENABLE_D) "device/kernel_adjust.cc" "kernel/kernel_fusion.cc" "kernel/tbe/*.cc" + "pre_activate/ascend/*.cc" + "transform/*.cc" + "pipeline/pipeline_ge.cc" ) list(APPEND MINDSPORE_SRC_LIST ${D_SRC_LIST}) list(APPEND MINDSPORE_PROTO_AICPU_LIST ${PROTOSRCS}) @@ -246,9 +259,11 @@ if (ENABLE_GE) target_link_libraries(mindspore graph ge_client) endif() target_link_libraries(mindspore tsdclient) -else() +elseif(ENABLE_D) add_compile_definitions(NO_GE_CLIENT) target_link_libraries(mindspore graph) +else() + add_compile_definitions(NO_GE_CLIENT) endif() if(ENABLE_D) @@ -288,8 +303,6 @@ endif() set(PYTHON_MODULE_SOURCE pipeline/init.cc kernel/oplib/oplib.cc - kernel/akg/akgkernelbuild.cc - kernel/akg/akg_kernel_attrs_process.cc ${MS_STEPS_SRC_LIST} ${MS_CCE_SRC_LIST} ${MS_AICPU_SRC_LIST} ${MS_TASKINFO_LIST} ${MS_RT_SRC_LIST} ${GPU_NCCL_LIST} ${MS_HCCL_SRC_LIST} ${MS_PREDICT_SRC_LIST} ${CPU_SRC_LIST} ${MEM_REUSE_SRC_LIST} ${GPU_KERNEL_SRC_LIST}) @@ -350,6 +363,7 @@ if(ENABLE_GPU) assign_source_group("Include" ${GROUP_INCLUDE}) file(GLOB COMPILER_SRCS + "pre_activate/gpu/*.cc" ${TVM_DIR}/src/api/*.cc ${TVM_DIR}/src/arithmetic/*.cc ${TVM_DIR}/src/autotvm/*.cc diff --git a/mindspore/ccsrc/debug/e2e_dump.cc b/mindspore/ccsrc/debug/e2e_dump.cc index 3006bb66da..ba11eafa5f 100644 --- a/mindspore/ccsrc/debug/e2e_dump.cc +++ b/mindspore/ccsrc/debug/e2e_dump.cc @@ -49,7 +49,7 @@ bool Dump::IsKernelNeedDump(const std::string& kernel_name) { return false; } -bool Dump::ParseDumpConfig(const string& dump_config_file) { +bool Dump::ParseDumpConfig(const std::string& dump_config_file) { std::ifstream jsonFile(dump_config_file); if (!jsonFile.is_open()) { MS_LOG(ERROR) << dump_config_file << " open failed."; diff --git a/mindspore/ccsrc/device/ascend/kernel_build_ascend.cc b/mindspore/ccsrc/device/ascend/kernel_build_ascend.cc index 66ce697ffc..e7c1449360 100644 --- a/mindspore/ccsrc/device/ascend/kernel_build_ascend.cc +++ b/mindspore/ccsrc/device/ascend/kernel_build_ascend.cc @@ -94,7 +94,7 @@ static bool KernelBuildParallelCompile(const mindspore::session::KernelGraph *ke return ret; } -static vector CalCleanZerosSize(const CNodePtr &pre_node) { +static std::vector CalCleanZerosSize(const CNodePtr &pre_node) { MS_EXCEPTION_IF_NULL(pre_node); std::vector clean_size_list; // clean output diff --git a/mindspore/ccsrc/device/ascend/profiling/profiling_manager.cc b/mindspore/ccsrc/device/ascend/profiling/profiling_manager.cc index 4bc68e647a..29193e5cfa 100644 --- a/mindspore/ccsrc/device/ascend/profiling/profiling_manager.cc +++ b/mindspore/ccsrc/device/ascend/profiling/profiling_manager.cc @@ -27,6 +27,7 @@ #include "utils/log_adapter.h" #include "utils/context/ms_context.h" #include "common/utils.h" +#include "utils/convert_utils.h" using std::vector; using Json = nlohmann::json; diff --git a/mindspore/ccsrc/device/ascend/tasksink/task_generator.cc b/mindspore/ccsrc/device/ascend/tasksink/task_generator.cc index 88d00bc6c2..62cf809c21 100644 --- a/mindspore/ccsrc/device/ascend/tasksink/task_generator.cc +++ b/mindspore/ccsrc/device/ascend/tasksink/task_generator.cc @@ -121,8 +121,8 @@ bool TaskGenerator::LaunchKernel(const CNodePtr &anf_node_ptr, uint32_t stream_i LaunchAddrCleanKernel(anf_node_ptr, &kernel_inputs); } - std::vector task_info_ptrs = - kernel_mod->GenTask(kernel_inputs, kernel_workspaces, kernel_outputs, stream_id); + std::vector task_info_ptrs = dynamic_cast(kernel_mod) + ->GenTask(kernel_inputs, kernel_workspaces, kernel_outputs, stream_id); task_info_list->insert(task_info_list->end(), task_info_ptrs.begin(), task_info_ptrs.end()); return true; } diff --git a/mindspore/ccsrc/device/ascend/tasksink/task_generator.h b/mindspore/ccsrc/device/ascend/tasksink/task_generator.h index 0c56fcc744..ffedcd7930 100644 --- a/mindspore/ccsrc/device/ascend/tasksink/task_generator.h +++ b/mindspore/ccsrc/device/ascend/tasksink/task_generator.h @@ -24,7 +24,7 @@ #include #include "device/kernel_runtime.h" #include "ir/anf.h" -#include "kernel/kernel.h" +#include "kernel/ascend_kernel_mod.h" #include "framework/ge_runtime/task_info.h" namespace mindspore { diff --git a/mindspore/ccsrc/device/gpu/gpu_kernel_build.cc b/mindspore/ccsrc/device/gpu/gpu_kernel_build.cc index 0467b59e06..2a2a2be065 100644 --- a/mindspore/ccsrc/device/gpu/gpu_kernel_build.cc +++ b/mindspore/ccsrc/device/gpu/gpu_kernel_build.cc @@ -21,7 +21,6 @@ #include "kernel/gpu/gpu_kernel_factory.h" #include "operator/ops.h" #include "pybind11/stl.h" -#include "transform/convert.h" #include "session/anf_runtime_algorithm.h" namespace mindspore { namespace device { diff --git a/mindspore/ccsrc/device/gpu/kernel_info_setter.cc b/mindspore/ccsrc/device/gpu/kernel_info_setter.cc index 3faf7d01c8..05ecf380d1 100644 --- a/mindspore/ccsrc/device/gpu/kernel_info_setter.cc +++ b/mindspore/ccsrc/device/gpu/kernel_info_setter.cc @@ -91,7 +91,7 @@ std::string SupportedTypeList(const CNodePtr& kernel_node) { return supported_type_lists; } -bool SelectAkgKernel(const CNodePtr& kernel_node, const shared_ptr& selected_kernel_info) { +bool SelectAkgKernel(const CNodePtr& kernel_node, const std::shared_ptr& selected_kernel_info) { MS_EXCEPTION_IF_NULL(kernel_node); MS_EXCEPTION_IF_NULL(selected_kernel_info); std::vector> kernel_info_list; diff --git a/mindspore/ccsrc/device/kernel_adjust.cc b/mindspore/ccsrc/device/kernel_adjust.cc index a4d316d601..4d3ebf9490 100644 --- a/mindspore/ccsrc/device/kernel_adjust.cc +++ b/mindspore/ccsrc/device/kernel_adjust.cc @@ -32,6 +32,7 @@ #include "device/ascend/profiling/profiling_manager.h" #include "device/ascend/kernel_select_ascend.h" #include "device/kernel_info.h" +#include "runtime/base.h" constexpr auto kLoopCountParamName = "loop_count"; constexpr auto kIterLoopParamName = "iter_loop"; diff --git a/mindspore/ccsrc/ir/anf.cc b/mindspore/ccsrc/ir/anf.cc index c1348bf7d7..924453a7a6 100644 --- a/mindspore/ccsrc/ir/anf.cc +++ b/mindspore/ccsrc/ir/anf.cc @@ -197,6 +197,23 @@ PrimitivePtr GetCNodePrimitive(const AnfNodePtr& node) { return nullptr; } +std::string GetCNodeFuncName(const CNodePtr cnode) { + if (cnode->inputs().empty()) { + return ""; + } + + AnfNodePtr valuenode = cnode->input(0); + if (valuenode->isa()) { + auto value = GetValueNode(valuenode); + // check whether the valuenode is primitive + if (value->isa()) { + return value->cast()->name(); + } + return value->ToString(); + } + return ""; +} + bool IsPrimitive(const AnfNodePtr& node, const PrimitivePtr& value) { if (IsValueNode(node)) { PrimitivePtr fn_value = GetValueNode(node); diff --git a/mindspore/ccsrc/ir/anf.h b/mindspore/ccsrc/ir/anf.h index 9050a4ed16..e64b1329e9 100644 --- a/mindspore/ccsrc/ir/anf.h +++ b/mindspore/ccsrc/ir/anf.h @@ -384,6 +384,8 @@ static S GetValue(const ValuePtr &value) { return v; } +std::string GetCNodeFuncName(CNodePtr cnode); + // used to check whether an AnfNode is a cnode with a kind of Primitive as first input bool IsPrimitiveCNode(const AnfNodePtr &node, const PrimitivePtr &value); diff --git a/mindspore/ccsrc/ir/meta_tensor.cc b/mindspore/ccsrc/ir/meta_tensor.cc index d33bc10c27..e9221039a7 100644 --- a/mindspore/ccsrc/ir/meta_tensor.cc +++ b/mindspore/ccsrc/ir/meta_tensor.cc @@ -25,7 +25,6 @@ #include "device/device_address.h" #include "pybind_api/api_register.h" #include "pybind_api/export_flags.h" -#include "pynative/pynative_execute.h" #include "pipeline/static_analysis/abstract_value.h" namespace mindspore { diff --git a/mindspore/ccsrc/kernel/aicpu/aicpu_kernel_mod.h b/mindspore/ccsrc/kernel/aicpu/aicpu_kernel_mod.h index d1e6f69b23..dde2afe34a 100644 --- a/mindspore/ccsrc/kernel/aicpu/aicpu_kernel_mod.h +++ b/mindspore/ccsrc/kernel/aicpu/aicpu_kernel_mod.h @@ -18,11 +18,11 @@ #include #include #include -#include "kernel/kernel.h" +#include "kernel/ascend_kernel_mod.h" #include "kernel/aicpu/aicpu_util.h" namespace mindspore { namespace kernel { -class AicpuOpKernelMod : public KernelMod { +class AicpuOpKernelMod : public AscendKernelMod { public: AicpuOpKernelMod(); ~AicpuOpKernelMod() override; diff --git a/mindspore/ccsrc/kernel/akg/akgkernelbuild.cc b/mindspore/ccsrc/kernel/akg/akgkernelbuild.cc index 8413208c4d..c0759172a5 100644 --- a/mindspore/ccsrc/kernel/akg/akgkernelbuild.cc +++ b/mindspore/ccsrc/kernel/akg/akgkernelbuild.cc @@ -35,7 +35,6 @@ #include "utils/convert_utils.h" #include "utils/any.h" #include "utils/utils.h" -#include "transform/convert.h" #include "session/anf_runtime_algorithm.h" #include "kernel/akg/akg_kernel_attrs_process.h" @@ -240,8 +239,8 @@ bool AkgKernelBuild::CreateOutputDescJson(const AnfNodePtr &anf_node, nlohmann:: return true; } -void GetJson(const AnfNodePtr &anf_node, const vector &dyn_input_sizes, const shared_ptr &op_attr, - nlohmann::json *const attr_json, const ValuePtr &attr_value) { +void GetJson(const AnfNodePtr &anf_node, const std::vector &dyn_input_sizes, + const std::shared_ptr &op_attr, nlohmann::json *const attr_json, const ValuePtr &attr_value) { MS_EXCEPTION_IF_NULL(anf_node); MS_EXCEPTION_IF_NULL(op_attr); MS_EXCEPTION_IF_NULL(attr_json); diff --git a/mindspore/ccsrc/kernel/ascend_kernel_mod.h b/mindspore/ccsrc/kernel/ascend_kernel_mod.h new file mode 100644 index 0000000000..ff8595c1a2 --- /dev/null +++ b/mindspore/ccsrc/kernel/ascend_kernel_mod.h @@ -0,0 +1,36 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_KERNEL_ASCEND_KERNEL_MOD_H_ +#define MINDSPORE_CCSRC_KERNEL_ASCEND_KERNEL_MOD_H_ + +#include +#include +#include "framework/ge_runtime/task_info.h" +#include "kernel/kernel.h" + +using TaskInfoPtr = std::shared_ptr; +namespace mindspore { +namespace kernel { +class AscendKernelMod : public KernelMod { + public: + virtual std::vector GenTask(const std::vector &, const std::vector &, + const std::vector &, uint32_t) = 0; +}; +} // namespace kernel +} // namespace mindspore + +#endif // MINDSPORE_CCSRC_KERNEL_ASCEND_KERNEL_MOD_H_ diff --git a/mindspore/ccsrc/kernel/common_utils.cc b/mindspore/ccsrc/kernel/common_utils.cc index d610ea736d..c2f2638753 100644 --- a/mindspore/ccsrc/kernel/common_utils.cc +++ b/mindspore/ccsrc/kernel/common_utils.cc @@ -19,7 +19,6 @@ #include #include #include -#include "runtime/rt.h" #include "nlohmann/json.hpp" #include "session/anf_runtime_algorithm.h" #include "common/utils.h" @@ -490,7 +489,7 @@ void SaveJsonInfo(const std::string &json_name, const std::string &info) { if (!filewrite.is_open()) { return; } - filewrite << info << endl; + filewrite << info << std::endl; filewrite.close(); if (nullptr == realpath(path.c_str(), real_path)) { MS_LOG(DEBUG) << "dir " << path << " does not exit."; diff --git a/mindspore/ccsrc/kernel/gpu/nn/lstm_gpu_kernel.h b/mindspore/ccsrc/kernel/gpu/nn/lstm_gpu_kernel.h index 2e284f72e8..51a2da8574 100644 --- a/mindspore/ccsrc/kernel/gpu/nn/lstm_gpu_kernel.h +++ b/mindspore/ccsrc/kernel/gpu/nn/lstm_gpu_kernel.h @@ -226,12 +226,12 @@ class LstmGpuKernel : public GpuKernel { size_t reserved_size_; // input desc - unique_ptr x_desc_; + std::unique_ptr x_desc_; cudnnTensorDescriptor_t hx_desc_; cudnnTensorDescriptor_t cx_desc_; cudnnFilterDescriptor_t w_desc_; cudnnDropoutDescriptor_t dropout_desc_; - unique_ptr y_desc_; + std::unique_ptr y_desc_; cudnnTensorDescriptor_t hy_desc_; cudnnTensorDescriptor_t cy_desc_; cudnnRNNDescriptor_t rnn_desc_; diff --git a/mindspore/ccsrc/kernel/gpu/nn/lstm_grad_data_gpu_kernel.h b/mindspore/ccsrc/kernel/gpu/nn/lstm_grad_data_gpu_kernel.h index 2fadccb8ea..a60ab78f7d 100644 --- a/mindspore/ccsrc/kernel/gpu/nn/lstm_grad_data_gpu_kernel.h +++ b/mindspore/ccsrc/kernel/gpu/nn/lstm_grad_data_gpu_kernel.h @@ -258,8 +258,8 @@ class LstmGradDataGpuKernel : public GpuKernel { cudnnRNNDescriptor_t rnn_desc_; // input desc - unique_ptr y_desc_; - unique_ptr dy_desc_; + std::unique_ptr y_desc_; + std::unique_ptr dy_desc_; cudnnTensorDescriptor_t dhy_desc_; cudnnTensorDescriptor_t dcy_desc_; cudnnFilterDescriptor_t w_desc_; @@ -269,7 +269,7 @@ class LstmGradDataGpuKernel : public GpuKernel { cudnnDropoutDescriptor_t dropout_desc_; // output desc - unique_ptr dx_desc_; + std::unique_ptr dx_desc_; cudnnTensorDescriptor_t dhx_desc_; cudnnTensorDescriptor_t dcx_desc_; diff --git a/mindspore/ccsrc/kernel/gpu/nn/lstm_grad_weight_gpu_kernel.h b/mindspore/ccsrc/kernel/gpu/nn/lstm_grad_weight_gpu_kernel.h index 6cf512f14a..b28736cc96 100644 --- a/mindspore/ccsrc/kernel/gpu/nn/lstm_grad_weight_gpu_kernel.h +++ b/mindspore/ccsrc/kernel/gpu/nn/lstm_grad_weight_gpu_kernel.h @@ -214,9 +214,9 @@ class LstmGradWeightGpuKernel : public GpuKernel { cudnnDropoutDescriptor_t dropout_desc_; // input desc - unique_ptr x_desc_; + std::unique_ptr x_desc_; cudnnTensorDescriptor_t hx_desc_; - unique_ptr y_desc_; + std::unique_ptr y_desc_; // output desc cudnnFilterDescriptor_t dw_desc_; diff --git a/mindspore/ccsrc/kernel/hccl/hccl_kernel.h b/mindspore/ccsrc/kernel/hccl/hccl_kernel.h index 71d9e5ba6a..24e1feec0a 100644 --- a/mindspore/ccsrc/kernel/hccl/hccl_kernel.h +++ b/mindspore/ccsrc/kernel/hccl/hccl_kernel.h @@ -23,14 +23,14 @@ #include #include #include -#include "kernel/kernel.h" +#include "kernel/ascend_kernel_mod.h" #include "kernel/hccl/hcom_util.h" #include "hccl/hcom.h" #include "common/utils.h" namespace mindspore { namespace kernel { -class HcclKernel : public KernelMod { +class HcclKernel : public AscendKernelMod { public: HcclKernel(); ~HcclKernel() override; diff --git a/mindspore/ccsrc/kernel/kernel.h b/mindspore/ccsrc/kernel/kernel.h index aecc51794c..80d831269c 100644 --- a/mindspore/ccsrc/kernel/kernel.h +++ b/mindspore/ccsrc/kernel/kernel.h @@ -25,7 +25,6 @@ #include "ir/meta_tensor.h" #include "pipeline/static_analysis/dshape.h" #include "utils/log_adapter.h" -#include "framework/ge_runtime/task_info.h" namespace mindspore { enum KernelType : int { UNKNOWN_KERNEL_TYPE = 0, AUTO_DIFF_KERNEL, AICPU_KERNEL, RT_KERNEL, HCCL_KERNEL, TBE_KERNEL }; @@ -111,7 +110,6 @@ struct Address { size_t size; }; using AddressPtr = std::shared_ptr

    ; -using TaskInfoPtr = std::shared_ptr; class KernelMod { public: @@ -120,10 +118,6 @@ class KernelMod { virtual const std::vector &GetWorkspaceSizeList() const = 0; virtual bool Launch(const std::vector &inputs, const std::vector &workspace, const std::vector &outputs, uintptr_t stream_ptr) = 0; - virtual std::vector GenTask(const std::vector &, const std::vector &, - const std::vector &, uint32_t) { - return {}; - } virtual std::vector GenParameters() { return {}; } virtual ~KernelMod() = default; diff --git a/mindspore/ccsrc/kernel/mng/rt_kernel.h b/mindspore/ccsrc/kernel/mng/rt_kernel.h index f86a86ef5d..54823f73cc 100644 --- a/mindspore/ccsrc/kernel/mng/rt_kernel.h +++ b/mindspore/ccsrc/kernel/mng/rt_kernel.h @@ -22,12 +22,12 @@ #include #include #include -#include "kernel/kernel.h" +#include "kernel/ascend_kernel_mod.h" #include "kernel/task_stream.h" namespace mindspore { namespace kernel { -class RtKernel : public KernelMod { +class RtKernel : public AscendKernelMod { public: RtKernel(); ~RtKernel() override; diff --git a/mindspore/ccsrc/kernel/oplib/oplib.cc b/mindspore/ccsrc/kernel/oplib/oplib.cc index 23e7014104..b20bd741f1 100644 --- a/mindspore/ccsrc/kernel/oplib/oplib.cc +++ b/mindspore/ccsrc/kernel/oplib/oplib.cc @@ -19,7 +19,7 @@ #include #include #include "utils/log_adapter.h" -#include "kernel/oplib/opinfo.h" +#include "utils/overload.h" #include "utils/context/ms_context.h" namespace mindspore { @@ -50,7 +50,7 @@ constexpr auto kNeedCompile = "need_compile"; constexpr auto kShape = "shape"; std::vector> OpLib::op_info_; -string ImplTypeToStr(OpImplyType impl_type) { +std::string ImplTypeToStr(OpImplyType impl_type) { switch (impl_type) { case kTBE: return kTbe; diff --git a/mindspore/ccsrc/kernel/tbe/tbe_kernel_build.h b/mindspore/ccsrc/kernel/tbe/tbe_kernel_build.h index bc4895ac6f..de5ed84e41 100644 --- a/mindspore/ccsrc/kernel/tbe/tbe_kernel_build.h +++ b/mindspore/ccsrc/kernel/tbe/tbe_kernel_build.h @@ -48,7 +48,7 @@ class TbeKernelBuild { private: TbeKernelBuild() = default; ~TbeKernelBuild() = default; - static bool GenFusionDataInputJson(const shared_ptr &data_input, nlohmann::json *data_str, + static bool GenFusionDataInputJson(const std::shared_ptr &data_input, nlohmann::json *data_str, size_t *index); static bool GenFusionComputeJson(const mindspore::AnfNodePtr &compute_node, std::vector>::iterator *layer_iter, @@ -56,12 +56,13 @@ class TbeKernelBuild { static bool GenFusionComputeInputeJson(const mindspore::CNodePtr &cnode, std::vector>::iterator *layer_iter, std::vector *input_desc_list, size_t *index); - static void GenDescJson(const shared_ptr &anf_node, size_t out_idx, nlohmann::json *output_desc); - static void GenReusedOutputDesc(const shared_ptr &anf_node, size_t index, size_t output_index, - nlohmann::json *output_desc); + static void GenDescJson(const std::shared_ptr &anf_node, size_t out_idx, + nlohmann::json *output_desc); + static void GenReusedOutputDesc(const std::shared_ptr &anf_node, size_t index, + size_t output_index, nlohmann::json *output_desc); static size_t GetIOSizeImpl(const nlohmann::json &desc); - static bool GetInputLayers(const vector &input_nodes, - const vector &compute_nodes, + static bool GetInputLayers(const std::vector &input_nodes, + const std::vector &compute_nodes, std::vector> *input_layers); static bool IsDynamicInput(const CNodePtr &cnode); static size_t GetOptionalInput(const CNodePtr &cnode, bool is_dynamic_input); @@ -82,15 +83,17 @@ class TbeKernelJsonCreator { bool GenTbeAttrJson(const std::shared_ptr &anf_node, const std::shared_ptr &op_info, nlohmann::json *attrs_json); void ParseAttrValue(const std::string &type, const ValuePtr &value, nlohmann::json *attr_obj); - bool GenInputDescJson(const shared_ptr &anf_node, size_t real_input_index, bool value, - const shared_ptr &input_ptr, const string &op_input_name, size_t input_i, - vector *input_list); - bool GenOutputDescJson(const shared_ptr &anf_node, const vector> &outputs_ptr, - nlohmann::json *outputs_json); - bool GenInputList(const shared_ptr &anf_node, size_t input_tensor_num, const shared_ptr &input_ptr, - size_t *real_input_index, string *op_input_name, vector *input_list); - void GenOutputList(const shared_ptr &anf_node, const size_t &output_obj_num, - const shared_ptr &output_ptr, size_t *output_idx, vector *output_list); + bool GenInputDescJson(const std::shared_ptr &anf_node, size_t real_input_index, bool value, + const std::shared_ptr &input_ptr, const string &op_input_name, size_t input_i, + std::vector *input_list); + bool GenOutputDescJson(const std::shared_ptr &anf_node, + const std::vector> &outputs_ptr, nlohmann::json *outputs_json); + bool GenInputList(const std::shared_ptr &anf_node, size_t input_tensor_num, + const std::shared_ptr &input_ptr, size_t *real_input_index, string *op_input_name, + std::vector *input_list); + void GenOutputList(const std::shared_ptr &anf_node, const size_t &output_obj_num, + const std::shared_ptr &output_ptr, size_t *output_idx, + std::vector *output_list); kCreaterType creater_type_; std::string json_name_; std::string json_info_; diff --git a/mindspore/ccsrc/kernel/tbe/tbe_kernel_mod.h b/mindspore/ccsrc/kernel/tbe/tbe_kernel_mod.h index 35fc7f517d..f4fb75038e 100644 --- a/mindspore/ccsrc/kernel/tbe/tbe_kernel_mod.h +++ b/mindspore/ccsrc/kernel/tbe/tbe_kernel_mod.h @@ -21,12 +21,12 @@ #include #include #include -#include "kernel/kernel.h" +#include "kernel/ascend_kernel_mod.h" #include "kernel/tbe/tbe_utils.h" namespace mindspore { namespace kernel { -class TbeKernelMod : public KernelMod { +class TbeKernelMod : public AscendKernelMod { public: explicit TbeKernelMod(KernelPackPtr kernel_pack) : kernel_pack_(std::move(kernel_pack)) {} ~TbeKernelMod() override = default; diff --git a/mindspore/ccsrc/kernel/tbe/tbe_kernel_parallel_build.h b/mindspore/ccsrc/kernel/tbe/tbe_kernel_parallel_build.h index 45f56fdd0b..5066e9457f 100644 --- a/mindspore/ccsrc/kernel/tbe/tbe_kernel_parallel_build.h +++ b/mindspore/ccsrc/kernel/tbe/tbe_kernel_parallel_build.h @@ -55,8 +55,9 @@ class ParallelBuildManager { bool WaitOne(int *task_id, char **task_result) const; bool IsAllTaskFinish() const; std::pair TaskFinishProcess(int32_t task_id, bool set_kernel_mod = true); - KernelModPtr GenKernelMod(const string &json_name, const string &processor, const vector &input_size_list, - const vector &output_size_list, const KernelPackPtr &kernel_pack) const; + KernelModPtr GenKernelMod(const string &json_name, const string &processor, + const std::vector &input_size_list, const std::vector &output_size_list, + const KernelPackPtr &kernel_pack) const; private: PyObject *tbe_parallel_compiler_; diff --git a/mindspore/ccsrc/kernel/tbe/tbe_kernel_select.cc b/mindspore/ccsrc/kernel/tbe/tbe_kernel_select.cc index 1953fd0c72..92798aa6bc 100644 --- a/mindspore/ccsrc/kernel/tbe/tbe_kernel_select.cc +++ b/mindspore/ccsrc/kernel/tbe/tbe_kernel_select.cc @@ -168,7 +168,7 @@ bool ParseDynamicFormatJson(const std::string &jsonStr, std::vector &anf_node) { +std::string OpSelectFormat(const std::shared_ptr &anf_node) { nlohmann::json kernel_json; std::string res_json_str; TbeKernelJsonCreator creator(OP_SELECT_FORMAT); @@ -182,7 +182,7 @@ std::string OpSelectFormat(const shared_ptr &anf_node) { return res_json_str; } -void SetTidyInputsInfo(const shared_ptr &anf_node, +void SetTidyInputsInfo(const std::shared_ptr &anf_node, const std::shared_ptr &builder, const std::vector> &inputs) { std::vector inputs_type; @@ -231,7 +231,7 @@ void SetTidyInputsInfo(const shared_ptr &anf_node, builder->SetInputsFormat(inputs_format); } -void SetTidyOutputsInfo(const shared_ptr &anf_node, +void SetTidyOutputsInfo(const std::shared_ptr &anf_node, const std::shared_ptr &builder, const std::vector> &outputs) { std::vector outputs_type; @@ -268,7 +268,8 @@ void SetTidyOutputsInfo(const shared_ptr &anf_node, builder->SetOutputsFormat(outputs_format); } -void GenTidyKernelBuildInfo(const shared_ptr &anf_node, const std::vector> &inputs, +void GenTidyKernelBuildInfo(const std::shared_ptr &anf_node, + const std::vector> &inputs, const std::vector> &outputs) { auto builder_tmp = std::make_shared(); builder_tmp->SetKernelType(TBE_KERNEL); diff --git a/mindspore/ccsrc/kernel/tbe/tbe_utils.cc b/mindspore/ccsrc/kernel/tbe/tbe_utils.cc index ab29ca69bb..5980a0fd88 100644 --- a/mindspore/ccsrc/kernel/tbe/tbe_utils.cc +++ b/mindspore/ccsrc/kernel/tbe/tbe_utils.cc @@ -26,6 +26,7 @@ #include #include +#include "runtime/kernel.h" #include "kernel/oplib/oplib.h" #include "utils/utils.h" #include "session/anf_runtime_algorithm.h" diff --git a/mindspore/ccsrc/pipeline/base.h b/mindspore/ccsrc/pipeline/base.h new file mode 100644 index 0000000000..d007eac294 --- /dev/null +++ b/mindspore/ccsrc/pipeline/base.h @@ -0,0 +1,64 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_PIPELINE_BASE_H_ +#define MINDSPORE_CCSRC_PIPELINE_BASE_H_ + +#include +#include +#include +#include + +#include "ir/anf.h" +#include "pipeline/resource.h" +#include "utils/context/ms_context.h" + +namespace mindspore { +namespace pipeline { + +struct ExecutorInfo { + FuncGraphPtr func_graph; + ResourcePtr resource; + std::size_t arg_list_size; +}; + +using ExecutorInfoPtr = std::shared_ptr; + +inline std::string GetPhasePrefix(const std::string& phase) { + auto pos = phase.find('.'); + if (pos == std::string::npos) { + MS_LOG(EXCEPTION) << "phase has no . for prefix" << phase; + } + return phase.substr(0, pos); +} + +inline std::string GetFilePathName(const std::string& file_name) { + std::ostringstream oss; + auto ms_context = MsContext::GetInstance(); + if (ms_context == nullptr) { + MS_LOG(EXCEPTION) << "ms_context is nullptr"; + } + auto save_graphs_path = ms_context->save_graphs_path(); + if (save_graphs_path.empty()) { + save_graphs_path = "."; + } + oss << save_graphs_path << "/" << file_name; + return oss.str(); +} +} // namespace pipeline +} // namespace mindspore + +#endif // MINDSPORE_CCSRC_PIPELINE_BASE_H_ diff --git a/mindspore/ccsrc/pipeline/init.cc b/mindspore/ccsrc/pipeline/init.cc index f42ae0cf7b..24ead047d3 100644 --- a/mindspore/ccsrc/pipeline/init.cc +++ b/mindspore/ccsrc/pipeline/init.cc @@ -73,7 +73,7 @@ PYBIND11_MODULE(_c_expression, m) { "Get CNode Strategy Dictionary.") .def("get_allreduce_fusion", &ExecutorPy::GetAllreduceFusion, py::arg("phase") = py::str("train"), "Get Allreduce Fusion Dictionary.") - .def("build_data_graph", &ExecutorPy::BuildDFGraph, py::arg("build_params"), py::arg("phase") = py::str("train"), + .def("build_data_graph", &ExecutorPy::BuildGraph, py::arg("build_params"), py::arg("phase") = py::str("train"), py::arg("broadcast_params") = py::dict(), "Build data graph.") .def("has_compiled", &ExecutorPy::HasCompiled, py::arg("phase") = py::str(""), "get if cell compiled.") .def("run_init_graph", &ExecutorPy::RunInitGraph, "Run init Graph."); @@ -86,19 +86,17 @@ PYBIND11_MODULE(_c_expression, m) { (void)m.def("generate_key", &mindspore::pipeline::GenerateKey, "Generate the function graph key."); (void)m.def("real_run_op", &mindspore::pynative::RunOp, "Run op pynatively."); - (void)m.def("initialize_distribute", &mindspore::pipeline::InitDistribute, "Initialize for Distribute.") - .def("init_ge", &mindspore::pipeline::InitGe, "Init GE"); (void)m.def("reset_op_id", &mindspore::pipeline::ResetOpId, "Reset Operator Id"); (void)m.def("init_hccl", &mindspore::pipeline::InitHccl, "Init Hccl"); - (void)m.def("finalize_ge", &mindspore::pipeline::FinalizeGe, "Finalize Ge"); (void)m.def("finalize_hccl", &mindspore::pipeline::FinalizeHccl, "Finalize Hccl"); - (void)m.def("set_ge_option", &mindspore::pipeline::SetGeOption, "API for set ge option."); (void)m.def("verify_inputs_signature", &mindspore::pipeline::VerifyInputSignature, "Verify input signature."); (void)m.def("init_exec_dataset", &mindspore::pipeline::InitExecDataset, py::arg("queue_name"), py::arg("size"), py::arg("batch_size"), py::arg("types"), py::arg("shapes"), py::arg("input_indexs"), py::arg("phase") = py::str("dataset"), "Init and exec dataset."); (void)m.def("_set_dataset_mode_config", &mindspore::ConfigManager::SetDatasetModeConfig, "API for set dataset mode."); - (void)m.def("export_graph", &mindspore::pipeline::ExportDFGraph, "Export Graph."); + (void)m.def("init_ge", &mindspore::pipeline::InitGe, "Init GE"); + + (void)m.def("export_graph", &mindspore::pipeline::ExportGraph, "Export Graph."); (void)py::class_>(m, "MSContext") .def_static("get_instance", &mindspore::MsContext::GetInstance, "Get ms context instance.") diff --git a/mindspore/ccsrc/pipeline/parse/python_adapter.cc b/mindspore/ccsrc/pipeline/parse/python_adapter.cc index 776e33235e..db40238729 100644 --- a/mindspore/ccsrc/pipeline/parse/python_adapter.cc +++ b/mindspore/ccsrc/pipeline/parse/python_adapter.cc @@ -27,6 +27,7 @@ static std::shared_ptr scoped_ = nullptr; // true: start process from python, false: start process from c++ static bool python_env_ = false; static bool use_signature_in_resolve_ = true; +void ResetPythonScope() { scoped_ = nullptr; } void set_use_signature_in_resolve(bool use_signature) noexcept { use_signature_in_resolve_ = use_signature; } bool UseSignatureInResolve() { return use_signature_in_resolve_; } void set_python_env_flag(bool python_env) noexcept { python_env_ = python_env; } diff --git a/mindspore/ccsrc/pipeline/parse/python_adapter.h b/mindspore/ccsrc/pipeline/parse/python_adapter.h index 4b9cbff251..12cfc27186 100644 --- a/mindspore/ccsrc/pipeline/parse/python_adapter.h +++ b/mindspore/ccsrc/pipeline/parse/python_adapter.h @@ -55,6 +55,7 @@ void set_use_signature_in_resolve(bool use_signature) noexcept; bool UseSignatureInResolve(); std::shared_ptr set_python_scoped(); +void ResetPythonScope(); bool IsPythonEnv(); void SetPythonPath(const std::string& path); void set_python_env_flag(bool python_env) noexcept; diff --git a/mindspore/ccsrc/pipeline/pipeline.cc b/mindspore/ccsrc/pipeline/pipeline.cc index 3c3478e89d..861862b849 100644 --- a/mindspore/ccsrc/pipeline/pipeline.cc +++ b/mindspore/ccsrc/pipeline/pipeline.cc @@ -27,11 +27,6 @@ #include "pipeline/pass.h" #include "pipeline/parse/data_converter.h" #include "optimizer/ad/dfunctor.h" -#include "ir/meta_tensor.h" -#include "transform/convert.h" -#include "transform/df_graph_manager.h" -#include "transform/graph_builder.h" -#include "transform/graph_runner.h" #include "debug/anf_ir_dump.h" #include "debug/anf_ir_utils.h" #include "utils/config_manager.h" @@ -44,6 +39,12 @@ #include "device/kernel_runtime_manager.h" #include "debug/trace.h" +#if (ENABLE_GE || ENABLE_D) +#include "pipeline/pipeline_ge.h" +#include "transform/convert.h" +#include "transform/df_graph_manager.h" +#endif + namespace mindspore { // namespace to support intermediate representation definition namespace pipeline { @@ -54,12 +55,6 @@ using mindspore::abstract::AbstractTensor; using mindspore::abstract::AbstractTensorPtr; using mindspore::abstract::AbstractTuple; using mindspore::abstract::AbstractTuplePtr; -using mindspore::transform::DfGraphConvertor; -using mindspore::transform::DfGraphManager; -using mindspore::transform::GeTensorPtr; -using mindspore::transform::MeTensorPtr; -using mindspore::transform::Status; -using mindspore::transform::TransformUtil; const char IR_TYPE_ANF[] = "anf_ir"; const char IR_TYPE_ONNX[] = "onnx_ir"; @@ -85,65 +80,8 @@ std::string GetBaseNameForIR(int stage_idx, const std::string& action_name) { oss << save_graphs_path << "/" << stage_idx << "_" << action_name; return oss.str(); } - -std::string GetFilePathName(const std::string& file_name) { - std::ostringstream oss; - auto ms_context = MsContext::GetInstance(); - if (ms_context == nullptr) { - MS_LOG(EXCEPTION) << "ms_context is nullptr"; - } - auto save_graphs_path = ms_context->save_graphs_path(); - if (save_graphs_path.empty()) { - save_graphs_path = "."; - } - oss << save_graphs_path << "/" << file_name; - return oss.str(); -} } // namespace -// We will not execute graph when output is constant or just input itself. -static bool IsGraphOutputValueNodeOrParameter(const AnfNodePtr& output, const py::tuple& args, - const std::shared_ptr& ret_val) { - if (output->isa()) { - MS_LOG(INFO) << "Graph's output is a constant. No need to execute."; - ValuePtr value = GetValueNode(output); - *ret_val = ValuePtrToPyData(value); - return true; - } - - // Adapter will transform values in __init__() and construct() to parameters, this could cause - // inputs (a.k.a args in current function) size less than parameters'. - if (output->isa()) { - MS_LOG(INFO) << "Graph's output is a parameter. If all params are inputs, no need to execute."; - if (args.empty()) { - MS_LOG(EXCEPTION) << "Inputs size is 0, let graph to be executed."; - } - // Find the right parameter as ret_val. - auto func_graph = output->func_graph(); - MS_EXCEPTION_IF_NULL(func_graph); - auto params = func_graph->parameters(); - if (params.empty()) { - MS_EXCEPTION(UnknownError) << "Graph's parameters size is 0"; - } - if (args.size() != params.size()) { - MS_LOG(EXCEPTION) << "Input size " << args.size() << " not equal to params size " << params.size() - << ", let graph to be executed."; - } - - auto it = std::find(params.begin(), params.end(), output); - if (it == params.end()) { - MS_EXCEPTION(UnknownError) << "When graph output is Parameter, it should be found in graph parameters"; - } - size_t index = it - params.cbegin(); - if (index >= args.size()) { - MS_EXCEPTION(UnknownError) << "Index " << index << " equal or larger than args size " << args.size() << "."; - } - *ret_val = args[index]; - return true; - } - return false; -} - py::tuple GenerateKey(const std::string& name, const std::unordered_map& defaults) { MS_LOG(DEBUG) << "GenerateKey args size:" << defaults.size(); abstract::AbstractBasePtrList args_spec; @@ -207,11 +145,7 @@ py::bool_ VerifyInputSignature(const py::list input_signature, const py::tuple i return true; } -ExecutorPy::ExecutorPy() { - // because Ge only support one Session exist at the same time ,so we delete the old one - DfGraphManager::GetInstance().DeleteGraphRunner(); - DfGraphManager::GetInstance().DeleteGeSession(); -} +ExecutorPy::ExecutorPy() {} ResourcePtr ExecutorPy::GetResource(const std::string& phase) { MS_LOG(DEBUG) << "phase size:" << info_.size(); @@ -221,14 +155,6 @@ ResourcePtr ExecutorPy::GetResource(const std::string& phase) { return info_[phase]->resource; } -std::string GetPhasePrefix(const std::string& phase) { - auto pos = phase.find('.'); - if (pos == std::string::npos) { - MS_LOG(EXCEPTION) << "phase has no . for prefix" << phase; - } - return phase.substr(0, pos); -} - FuncGraphPtr ExecutorPy::GetFuncGraph(const std::string& phase) { if (info_.count(phase) == 0) { MS_LOG(EXCEPTION) << "no phase in executor:" << GetPhasePrefix(phase); @@ -323,11 +249,15 @@ void ExecutorPy::DelNetRes(const std::string& id) { } } + MS_LOG(INFO) << "Delete flag:" << flag; +#ifdef ENABLE_GE if (flag && info_.size() == 0) { - DfGraphManager::GetInstance().DeleteGraphRunner(); - DfGraphManager::GetInstance().EraseAnfGraph(); - DfGraphManager::GetInstance().DeleteGeSession(); + // because Ge only support one Session exist at the same time ,so we delete the old one + transform::DfGraphManager::GetInstance().DeleteGraphRunner(); + transform::DfGraphManager::GetInstance().EraseAnfGraph(); + transform::DfGraphManager::GetInstance().DeleteGeSession(); } +#endif } } @@ -405,7 +335,8 @@ bool ExecutorPy::CompileInner(const py::object& obj, const py::tuple& args, cons use_vm = ChangeExportGeirUseVmFlag(use_vm, phase_s); - if (use_vm) { + std::string backend = MsContext::GetInstance()->backend_policy(); + if (use_vm && backend != "ge") { // Create backend and session resource->results()[kBackend] = compile::CreateBackend(); p_actions = VmPipeline(); @@ -497,30 +428,6 @@ bool ExecutorPy::Compile(const py::object& obj, const py::tuple& args, const py: return ret_value; } -void SetGeOption(const std::map& options) { - ConfigManager::GetInstance().set_ge_initialize_options(options); -} - -bool InitDistribute(const std::map& options) { - ConfigManager::GetInstance().set_parallel_strategy(ParallelStrategy::DISTRIBUTION); - MS_LOG(INFO) << "ME run in DISTRIBUTION strategy mode"; - - SetGeOption(options); -#ifdef ENABLE_GE - auto ge_options = ConfigManager::GetInstance().ge_initialize_options(); - { - // Release GIL before calling into (potentially long-running) C++ code - py::gil_scoped_release release; - if (ge::GEInitialize(ge_options) != ge::GRAPH_SUCCESS) { - MS_LOG(ERROR) << "Initialize GE failed!"; - return false; - } - } -#endif - MS_LOG(DEBUG) << "Initialize Ge success"; - return true; -} - #ifdef ENABLE_LOAD_ANF_IR // get MindSpore Intermediate Representation File std::string GetMsIrFile(void) { @@ -704,9 +611,25 @@ py::object ExecutorPy::Run(const py::tuple& args, const py::object& phase) { } auto phase_s = py::cast(phase); std::string backend = MsContext::GetInstance()->backend_policy(); +#ifdef ENABLE_GE + if (backend == "ge") { + return ExecDFGraph(info_, args, phase_s); + } +#else + MS_LOG(WARNING) << "In ut test " << size << phase_s; if (backend == "ge") { - return ExecDFGraph(args, phase_s); + std::shared_ptr ret_val = std::make_shared(); + if (info_.count(phase_s) != 0 && info_[phase_s]->func_graph != nullptr) { + if (IsGraphOutputValueNodeOrParameter(info_[phase_s]->func_graph->output(), args, ret_val)) { + return *ret_val; + } + } + if (args.size() > 0) { + return args[0]; + } + return args; } +#endif std::size_t full_arg_size = ArgListSize(phase_s); if (size > full_arg_size) { MS_LOG(WARNING) << "The arg num : size = " << size << ". full_arg_size = " << full_arg_size; @@ -719,435 +642,25 @@ py::object ExecutorPy::Run(const py::tuple& args, const py::object& phase) { MS_LOG(EXCEPTION) << "Can't find run graph func for " << phase_s; } - MS_LOG(DEBUG) << "eval run"; + MS_LOG(DEBUG) << "eval run" << backend; BaseRef value = (*run)(arg_list); MS_LOG(DEBUG) << "run end"; return BaseRefToPyData(value); } -py::object ExtractGeneralCnodeRet(const AbstractBasePtr& cnode_data, const py::tuple& data, size_t* count) { - MS_EXCEPTION_IF_NULL(cnode_data); - if (*count >= data.size()) { - MS_LOG(EXCEPTION) << "The number of elements in the outputs : " << data.size() - << " less than the number of elements required. "; - } - - if (cnode_data->isa()) { - BaseShapePtr shape = cnode_data->BuildShape(); - auto shape_act = shape->cast()->shape(); - Tensor tensor_exp = py::cast(data[*count]); - if (shape_act != tensor_exp.shape()) { - MS_LOG(EXCEPTION) << "The shape of the tensor returned from GE is not the same as " - "the shape of the tensor derived from ME."; - } - return data[(*count)++]; - } - - if (!cnode_data->isa()) { - MS_LOG(EXCEPTION) << "The output of operator in the final anf graph could " - << "only be a tensor or a tuple of tensor, but got " << cnode_data->BuildValue()->ToString() - << "."; - } - auto data_tp = cnode_data->cast(); - auto elements = data_tp->elements(); - size_t size = data_tp->size(); - py::tuple tp = py::tuple(size); - for (size_t i = 0; i < size; i++) { - tp[i] = ExtractGeneralCnodeRet(elements[i], data, count); - } - return std::move(tp); -} - -py::object StructureOutput(const AnfNodePtr& output_node, const py::tuple& data, size_t* count) { - MS_EXCEPTION_IF_NULL(output_node); - - if (output_node->isa()) { - return ValuePtrToPyData(GetValueNode(output_node)); - } - - if (*count >= data.size()) { - MS_LOG(EXCEPTION) << "The number of elements in the outputs : " << data.size() - << " less than the number of elements required. "; - } - if (output_node->isa()) { - return data[(*count)++]; - } - - auto output_c = output_node->cast(); - if (output_c == nullptr) { - MS_LOG(EXCEPTION) << "The final anf graph could only have constant, parameter, and operator, but got " - << output_node->ToString(); - } - - if (output_c->IsApply(prim::kPrimMakeTuple)) { - auto input_list = output_c->inputs(); - size_t size = input_list.size(); - py::tuple tp = py::tuple(size - 1); - for (size_t i = 1; i < size; i++) { - tp[i - 1] = StructureOutput(input_list[i], data, count); - } - return std::move(tp); - } - if (output_c->IsApply(prim::kPrimDepend)) { - return StructureOutput(output_c->input(1), data, count); - } - - return ExtractGeneralCnodeRet(output_c->abstract(), data, count); -} - -std::shared_ptr DoExecGraph(const FuncGraphPtr& graph, const std::vector& inputs, - const std::string& phase) { - std::vector ge_tensors = TransformUtil::ConvertInputTensors(inputs, kOpFormat_NCHW); - if (ge_tensors.size() != inputs.size()) { - MS_LOG(ERROR) << "args convert to ge tensor error"; - return nullptr; - } - - std::vector ge_outputs; - transform::RunOptions run_options; - - run_options.name = phase; - - auto graph_runner = DfGraphManager::GetInstance().GetGraphRunner(); - - if (graph_runner == nullptr) { - MS_LOG(ERROR) << "Can not found GraphRunner"; - return nullptr; - } - - { - // Release GIL before calling into (potentially long-running) C++ code - py::gil_scoped_release release; - MS_LOG(DEBUG) << "Run graph begin, inputs size is: " << inputs.size(); - Status ret = graph_runner->RunGraph(run_options, ge_tensors, &ge_outputs); - MS_LOG(DEBUG) << "Run graph finish, outputs size is: " << ge_outputs.size(); - if (ret != Status::SUCCESS) { - MS_LOG(ERROR) << "Exec graph failed"; - return nullptr; - } - } - - std::vector me_outputs = TransformUtil::ConvertGeTensors(ge_outputs); - if (me_outputs.size() != ge_outputs.size()) { - MS_LOG(ERROR) << "Convert output Ge tensor to Me tensor failed"; - } - - py::tuple outputs(me_outputs.size()); - for (std::size_t i = 0; i < outputs.size(); i++) { - outputs[i] = *me_outputs[i]; - } - - std::shared_ptr ret = nullptr; - -#ifdef ENABLE_GE - AnfNodePtr output_node = graph->get_return()->input(1); - MS_EXCEPTION_IF_NULL(output_node); - size_t count = 0; - py::object oj = StructureOutput(output_node, outputs, &count); - ret = std::make_shared(oj); +FuncGraphPtr ExecutorPy::BuildGraph(const py::dict& init_params, const std::string& phase, + const py::object& broadcast_params) { +#if (ENABLE_GE || ENABLE_D) + return BuildDFGraph(info_, init_params, phase, broadcast_params); #else - if (outputs.size() == 1) { - ret = std::make_shared(outputs[0]); - } else { - ret = std::make_shared(outputs); - } -#endif - - return ret; -} - -void DoExecNonInputGraph(const std::string& phase) { - std::vector ge_tensors; - std::vector ge_outputs; - transform::RunOptions run_options; - run_options.name = phase; - auto graph_runner = DfGraphManager::GetInstance().GetGraphRunner(); - - if (graph_runner == nullptr) { - MS_LOG(ERROR) << "Can not found GraphRunner"; - return; - } - { - // Release GIL before calling into (potentially long-running) C++ code - py::gil_scoped_release release; - Status ret = graph_runner->RunGraph(run_options, ge_tensors, &ge_outputs); - if (ret != Status::SUCCESS) { - MS_LOG(ERROR) << "Exec graph:" << run_options.name << " failed"; - return; - } - } -} - -void ExecutorPy::ProcessGeArg(const py::tuple& args, const std::string& phase, std::vector* inputs) { - // check the arg and use the ExecutorPy args - std::size_t size = args.size(); - if (size != ArgListSize(phase)) { - MS_LOG(EXCEPTION) << "The real arg num : size = " << size << ". graph_arg_size = " << ArgListSize(phase); - } - - // process the first args of tensor - // only in Dataset Feed Mode, fp_bp graph need input tensors - if (ConfigManager::GetInstance().dataset_mode() == DS_FEED_MODE) { - for (std::size_t i = 0; i < size; i++) { - ValuePtr converted = nullptr; - bool succ = parse::ConvertData(args[i], &converted); - if (!succ) { - MS_LOG(EXCEPTION) << "args convert error"; - } - if (converted->isa()) { - (*inputs).push_back(converted->cast()); - } else { - MS_LOG(EXCEPTION) << "args, " << converted->ToString() << " is not tensor"; - } - } - } -} - -py::object ExecutorPy::ExecDFGraph(const py::tuple& args, const std::string& phase) { - std::string phase_prefix = GetPhasePrefix(phase); - - if (phase_prefix == "save") { - DoExecNonInputGraph(phase); - ConfigManager::GetInstance().ResetConfig(); - return py::none(); - } - - if (info_.count(phase) == 0) { - MS_LOG(EXCEPTION) << "has no phase:" << phase; - } - -#if (!defined ENABLE_GE) || (defined ENABLE_INFER) - // Now don't use the graph because the exec ge function don't take effect - MS_EXCEPTION_IF_NULL(info_[phase]->func_graph); - if (ENABLE_TRAIN != info_[phase]->func_graph->flags()["training"]) { - MS_LOG(ERROR) << "Graph training mode mismatch mode of libraries"; - ConfigManager::GetInstance().ResetConfig(); - return py::none(); - } + return nullptr; #endif - - std::shared_ptr ret_val = std::make_shared(); - if (IsGraphOutputValueNodeOrParameter(info_[phase]->func_graph->output(), args, ret_val)) { - ConfigManager::GetInstance().ResetConfig(); - return *ret_val; - } - - std::vector inputs; - ProcessGeArg(args, phase, &inputs); - - std::shared_ptr ret = DoExecGraph(GetFuncGraph(phase), inputs, phase); - ConfigManager::GetInstance().ResetConfig(); - if (ret != nullptr) { - return *ret; - } else { - MS_LOG(EXCEPTION) << "exec graph failed"; - } } void ExecutorPy::RunInitGraph(const py::dict& init_params, const std::string& phase) { - MS_LOG(DEBUG) << "ExecInitGraph start."; - TensorOrderMap inputs_with_name{}; - ConvertObjectToTensors(init_params, &inputs_with_name); - std::vector inputs; - (void)std::transform(inputs_with_name.begin(), inputs_with_name.end(), std::back_inserter(inputs), - [](const std::pair& item) { return item.second; }); - - std::vector ge_tensors = TransformUtil::ConvertInputTensors(inputs, kOpFormat_NCHW); - if (ge_tensors.size() != inputs.size()) { - MS_LOG(ERROR) << "Args convert to ge tensor error."; - return; - } - MS_LOG(DEBUG) << "Run graph begin, inputs size is: " << inputs.size() << "."; - - std::vector ge_outputs; - transform::RunOptions run_options; - - run_options.name = phase; - if (DfGraphManager::GetInstance().GetGraphByName(phase) == nullptr) { - MS_LOG(WARNING) << "Can not find " << phase << " sub graph, don't need data init subgraph in INFER mode."; - return; - } - auto graph_runner = DfGraphManager::GetInstance().GetGraphRunner(); - if (graph_runner == nullptr) { - MS_LOG(EXCEPTION) << "Can not found GraphRunner."; - } - { - // Release GIL before calling into (potentially long-running) C++ code - py::gil_scoped_release release; - Status ret = graph_runner->RunGraph(run_options, ge_tensors, &ge_outputs); - if (ret != Status::SUCCESS) { - MS_LOG(EXCEPTION) << "Exec " << phase << " graph failed."; - } - - MS_LOG(INFO) << "Exec " << phase << " graph success."; - - if ((ConfigManager::GetInstance().parallel_strategy() == ParallelStrategy::DISTRIBUTION) && - (DfGraphManager::GetInstance().GetGraphByName(BROADCAST_GRAPH_NAME) != nullptr)) { - run_options.name = BROADCAST_GRAPH_NAME; - ret = graph_runner->RunGraph(run_options, ge_tensors, &ge_outputs); - if (ret != Status::SUCCESS) { - MS_LOG(EXCEPTION) << "Exec BROADCAST_GRAPH_NAME failed."; - } - MS_LOG(INFO) << "Exec broadcast graph success."; - } - } -} - -Status CreateSessionAndGraphRunner(bool is_training = true) { - std::shared_ptr sess = DfGraphManager::GetInstance().GetGeSession(); - if (sess == nullptr) { - transform::SessionOptions options; - if (is_training) { - options["ge.trainFlag"] = "1"; - options["ge.streamNum"] = "100"; - options["ge.enabledLocalFmkop"] = "1"; - options["ge.hcomParallel"] = "1"; - } else { - options["ge.trainFlag"] = "0"; - } - - options["ge.enablePrintOpPass"] = "0"; - sess = transform::GraphRunner::NewSession(options); - if (sess == nullptr) { - MS_LOG(ERROR) << "Init data graph failed, because of create Ge session failed"; - return Status::FAILED; - } else { - DfGraphManager::GetInstance().SetGeSession(sess); - } - } - - transform::GraphRunnerOptions options; - options.sess_ptr = sess; - auto graph_runner = std::make_shared(options); - if (graph_runner == nullptr) { - MS_LOG(ERROR) << "Create new graph runner failed"; - return Status::FAILED; - } else { - DfGraphManager::GetInstance().SetGraphRunner(graph_runner); - } - - return Status::SUCCESS; -} - -void ExecutorPy::ConvertObjectToTensors(const py::dict& dict, TensorOrderMap* const tensors) { - for (auto item : dict) { - if ((!py::isinstance(item.first))) { - MS_LOG(WARNING) << "Type of key of py_dict is not string, ignore it."; - continue; - } - std::shared_ptr tensor; - std::string name = py::cast(item.first); - if (py::isinstance(item.second.attr("default_input"))) { - // convert float to tensor with shape([1]) - tensor = std::make_shared(kNumberTypeFloat32, std::vector({1})); - *(static_cast(tensor->data_c(true))) = py::cast(item.second.attr("default_input")); - } else if (py::isinstance(item.second.attr("default_input"))) { - // convert int to tensor with shape([1]) - tensor = std::make_shared(kNumberTypeInt32, std::vector({1})); - *(static_cast(tensor->data_c(true))) = py::cast(item.second.attr("default_input")); - } else if (py::hasattr(item.second.attr("default_input"), PYTHON_TENSOR_FLAG)) { - // cast tensor - tensor = py::cast>(item.second.attr("default_input")); - } - - if (tensor == nullptr) { - MS_LOG(EXCEPTION) << "Get default value for " << name << " failed"; - } - (void)tensors->emplace(name, tensor); - } -} - -bool ExecutorPy::AddDFGraph(const py::dict& init_params, const std::string& phase, const py::object& broadcast_params) { - FuncGraphPtr anf_graph = info_[phase]->func_graph; - DfGraphConvertor convertor(anf_graph); - - size_t pos = phase.find('.'); - std::string net_id = ((pos == std::string::npos || pos == phase.size() - 1) ? phase : phase.substr(pos + 1)); - std::string phase_prefix = phase.substr(0, pos); - - if (phase_prefix == "export") { - MS_LOG(INFO) << "Set DfGraphConvertor training : false"; - convertor.set_training(false); - } - - TensorOrderMap init_tensors{}; - ConvertObjectToTensors(init_params, &init_tensors); - (void)convertor.ConvertAllNode().InitParam(init_tensors).BuildGraph(); - - if (broadcast_params != py::none()) { - if (!py::isinstance(broadcast_params)) { - MS_LOG(ERROR) << "Invalid broadcast params, it must be py::dict type"; - return false; - } - py::dict broadcast = broadcast_params.cast(); - if (broadcast.empty()) { - (void)convertor.GenerateBroadcastGraph(init_tensors); - } else { - TensorOrderMap broadcast_tensors{}; - ConvertObjectToTensors(broadcast, &broadcast_tensors); - (void)convertor.GenerateBroadcastGraph(broadcast_tensors); - } - MS_LOG(INFO) << "Generate broadcast graph with params and broadcast_empty is " << broadcast.empty(); - } - - (void)convertor.GenerateCheckpointGraph(); - if (convertor.ErrCode() != 0) { - DfGraphManager::GetInstance().ClearGraph(); - MS_LOG(ERROR) << "convert df graph failed, err:" << convertor.ErrCode(); - return false; - } - - if (MsContext::GetInstance()->save_graphs_flag()) { - convertor.DrawComputeGraph(GetFilePathName("ge_graph.dot")); // for debug - convertor.DrawInitGraph(GetFilePathName("init_graph.dot")); // for debug - convertor.DrawSaveCheckpointGraph(GetFilePathName("save_checkpoint_graph.dot")); // for debug - } - std::string init_graph = "init_subgraph." + net_id; - std::string checkpoint_name = "save." + net_id; - if (phase.find("train") != std::string::npos) { - (void)DfGraphManager::GetInstance().AddGraph(phase, convertor.GetComputeGraph(), {{"ge.exec.variable_acc", "1"}}); - } else { - (void)DfGraphManager::GetInstance().AddGraph(phase, convertor.GetComputeGraph()); - } - (void)DfGraphManager::GetInstance().AddGraph(init_graph, convertor.GetInitGraph()); - (void)DfGraphManager::GetInstance().AddGraph(BROADCAST_GRAPH_NAME, convertor.GetBroadcastGraph()); - Status ret = DfGraphManager::GetInstance().AddGraph(checkpoint_name, convertor.GetSaveCheckpointGraph()); - if (ret == Status::SUCCESS) { - DfGraphManager::GetInstance().SetAnfGraph(checkpoint_name, anf_graph); - } - - return true; -} - -FuncGraphPtr ExecutorPy::BuildDFGraph(const py::dict& init_params, const std::string& phase, - const py::object& broadcast_params) { - if (info_.count(phase) == 0) { - MS_LOG(EXCEPTION) << "no phase in executor:" << GetPhasePrefix(phase); - } - FuncGraphPtr anf_graph = info_[phase]->func_graph; - - if (MsContext::GetInstance()->save_graphs_flag()) { - draw::Draw(GetFilePathName("anf_graph.dot"), anf_graph); // for debug - DumpIR(GetFilePathName("anf_graph.ir"), anf_graph, true); - } - - if (!AddDFGraph(init_params, phase, broadcast_params)) { - MS_LOG(ERROR) << "GenConvertor failed"; - return nullptr; - } - -#if ENABLE_TRAIN - (void)setenv("GE_TRAIN", "1", 1); -#else - (void)setenv("GE_TRAIN", "0", 1); +#if ENABLE_GE + RunGEInitGraph(init_params, phase); #endif - - if (CreateSessionAndGraphRunner(static_cast(ENABLE_TRAIN)) != Status::SUCCESS) { - MS_LOG(ERROR) << "Create GE Session or GraphRunner failed."; - return nullptr; - } - - return anf_graph; } bool InitExecDataset(const std::string& queue_name, int64_t iter_num, int64_t batch_size, @@ -1156,47 +669,16 @@ bool InitExecDataset(const std::string& queue_name, int64_t iter_num, int64_t ba std::string name = MsContext::GetInstance()->backend_policy(); if (name == kMsConvert || name == kMsVm) { return InitExecDatasetVm(queue_name, iter_num, batch_size, types, shapes, input_indexes); - } else { - return InitExecDatasetGe(queue_name, iter_num, batch_size, types, shapes, input_indexes, phase); - } -} - -bool InitExecDatasetGe(const std::string& queue_name, int64_t size, int64_t batch_size, - const std::vector& types, const std::vector>& shapes, - const std::vector& input_indexes, const std::string& phase) { - // Convert types to GE types and TF types - std::vector ge_types; - (void)std::transform(types.begin(), types.end(), std::back_inserter(ge_types), [](const TypePtr& i) -> int64_t { - return transform::TransformUtil::ConvertDataType(i->type_id()); - }); - - ConfigManager::GetInstance().set_dataset_mode(DatasetMode::DS_GRAPH_MODE); - ConfigManager::GetInstance().set_iter_num(size); - ConfigManager::GetInstance().set_dataset_phase(phase); - - DatasetGraphParam param(queue_name, size, batch_size, ge_types, shapes, input_indexes); - ConfigManager::GetInstance().set_dataset_param(param); - - if (transform::BuildDatasetGraph(param, phase) != transform::SUCCESS) { - MS_LOG(ERROR) << "Build dateset graph failed."; - return false; } - -#if ENABLE_TRAIN - (void)setenv("GE_TRAIN", "1", 1); +#if ENABLE_GE + return InitExecDatasetGe(queue_name, iter_num, batch_size, types, shapes, input_indexes, phase); #else - (void)setenv("GE_TRAIN", "0", 1); -#endif - - if (CreateSessionAndGraphRunner(static_cast(ENABLE_TRAIN)) != Status::SUCCESS) { - MS_LOG(ERROR) << "Create GE Session or GraphRunner failed."; - return false; + std::string backend = MsContext::GetInstance()->backend_policy(); + if (backend == "ge") { + return true; } - - MS_LOG(INFO) << "DoExecNonInputGraph:" << phase; - DoExecNonInputGraph(phase); - - return true; +#endif + return false; } bool InitExecDatasetVm(const std::string& queue_name, int64_t size, int64_t batch_size, @@ -1259,25 +741,6 @@ bool InitExecDatasetVm(const std::string& queue_name, int64_t size, int64_t batc return true; } -void InitGe() { - // set python env flag - mindspore::parse::python_adapter::set_python_env_flag(true); - // open tsd before ge initialize - auto ms_context = MsContext::GetInstance(); - MS_EXCEPTION_IF_NULL(ms_context); - if (!ms_context->OpenTsd()) { - MS_LOG(EXCEPTION) << "open tsd failed"; - } - (void)ms_context->InitGe(); -} - -void FinalizeGe() { - auto context_ptr = MsContext::GetInstance(); - MS_EXCEPTION_IF_NULL(context_ptr); - (void)context_ptr->FinalizeGe(); - (void)context_ptr->CloseTsd(); -} - void ResetOpId() { mindspore::id_generator::reset_id(); } void InitHccl() { @@ -1309,24 +772,57 @@ void FinalizeHccl() { device::KernelRuntimeManager::Instance().ClearRuntimeResource(); #endif } -void ExportDFGraph(const std::string& file_name, const std::string&, const std::string& phase) { - MS_LOG(DEBUG) << "ExportGraph Begin"; - transform::DfGraphWrapperPtr wrap_ptr = DfGraphManager::GetInstance().GetGraphByName(phase); - if (wrap_ptr == nullptr) { - MS_LOG(ERROR) << "Get graph form DfGraphManager failed!"; - return; - } - transform::DfGraphPtr ge_graph = wrap_ptr->graph_ptr_; - if (nullptr == ge_graph) { - MS_LOG(ERROR) << "The export graph is null"; - return; +void ExportGraph(const std::string& file_name, const std::string&, const std::string& phase) { +#if (ENABLE_GE || ENABLE_D) + ExportDFGraph(file_name, phase); +#endif + MS_LOG(WARNING) << "In ut test no export_graph"; +} + +void ReleaseGeTsd() { + auto context_ptr = MsContext::GetInstance(); + if (context_ptr != nullptr) { + (void)context_ptr->FinalizeGe(true); + (void)context_ptr->CloseTsd(true); } +} - (void)ge_graph->SaveToFile(file_name); +void InitGe() { + // set python env flag + mindspore::parse::python_adapter::set_python_env_flag(true); + // open tsd before ge initialize + auto ms_context = MsContext::GetInstance(); + MS_EXCEPTION_IF_NULL(ms_context); + if (!ms_context->OpenTsd()) { + MS_LOG(EXCEPTION) << "open tsd failed"; + } + (void)ms_context->InitGe(); +} - MS_LOG(DEBUG) << "ExportGraph End"; +void FinalizeGe() { + auto context_ptr = MsContext::GetInstance(); + MS_EXCEPTION_IF_NULL(context_ptr); + (void)context_ptr->FinalizeGe(); + (void)context_ptr->CloseTsd(); } +void ClearResAtexit() { + MS_LOG(DEBUG) << "Pipeline clear all resource"; + device::KernelRuntimeManager::Instance().ClearRuntimeResource(); + + ad::g_k_prims.clear(); + + abstract::ClearPrimEvaluatorMap(); + compile::ClearConvertCache(); + pipeline::GetMethodMap().clear(); + pipeline::ExecutorPy::ClearRes(); +#ifdef ENABLE_GE + transform::DfGraphManager::GetInstance().ClearGraph(); + transform::DfGraphConvertor::get_adpt_map().clear(); +#endif + ReleaseGeTsd(); + parse::python_adapter::ResetPythonScope(); +} } // namespace pipeline } // namespace mindspore diff --git a/mindspore/ccsrc/pipeline/pipeline.h b/mindspore/ccsrc/pipeline/pipeline.h index b075306682..a0d7a19198 100644 --- a/mindspore/ccsrc/pipeline/pipeline.h +++ b/mindspore/ccsrc/pipeline/pipeline.h @@ -30,6 +30,7 @@ #include "pipeline/action.h" #include "vm/segment_runner.h" #include "vm/transform.h" +#include "pipeline/base.h" namespace mindspore { extern const char kMsConvert[]; @@ -55,14 +56,6 @@ class Pipeline { std::vector actions_; }; -struct ExecutorInfo { - FuncGraphPtr func_graph; - ResourcePtr resource; - std::size_t arg_list_size; -}; - -using ExecutorInfoPtr = std::shared_ptr; - // A function pipeline. class ExecutorPy : public std::enable_shared_from_this { public: @@ -80,11 +73,7 @@ class ExecutorPy : public std::enable_shared_from_this { bool CompileInner(const py::object& obj, const py::tuple& args, const py::object& phase, bool use_vm); bool Compile(const py::object& obj, const py::tuple& args, const py::object& phase, bool use_vm); - // for graph mode - py::object ExecDFGraph(const py::tuple& args, const std::string& phase = "train"); - void ProcessVmArg(const py::tuple& args, const std::string& phase, VectorRef* arg_list); - void ProcessGeArg(const py::tuple& args, const std::string& phase, std::vector* inputs); // for pynative mode when use_vm is on py::object Run(const py::tuple& args, const py::object& phase); @@ -95,9 +84,8 @@ class ExecutorPy : public std::enable_shared_from_this { compile::VmEvalFuncPtr GetVmEvalFunc(const std::string& phase); bool HasCompiled(const std::string& phase) const; - bool AddDFGraph(const py::dict& init_params, const std::string& phase, const py::object& broadcast_params); - FuncGraphPtr BuildDFGraph(const py::dict& init_params, const std::string& phase, - const py::object& broadcast_params = {}); + FuncGraphPtr BuildGraph(const py::dict& init_params, const std::string& phase, + const py::object& broadcast_params = {}); void RunInitGraph(const py::dict& init_params, const std::string& phase); py::dict GetParameterLayout(const std::string& phase); py::dict GetCNodeStrategy(const std::string& phase); @@ -122,32 +110,29 @@ using ExecutorPyPtr = std::shared_ptr; py::tuple GenerateKey(const std::string& name, const std::unordered_map& defaults); py::bool_ VerifyInputSignature(const py::list input_signature, const py::tuple inputs); -void SetGeOption(const std::map& options); bool InitDistribute(const std::map& options); void ResetOpId(); -void InitGe(); -void FinalizeGe(); void InitHccl(); void FinalizeHccl(); +void InitGe(); +void FinalizeGe(); + +void ClearResAtexit(); +void ReleaseGeTsd(); + +void ExportGraph(const std::string& file_name, const std::string&, const std::string& phase); // init and exec dataset sub graph bool InitExecDataset(const std::string& queue_name, int64_t iter_num, int64_t batch_size, const std::vector& types, const std::vector>& shapes, const std::vector& input_indexes, const std::string& phase); -// init and exec dataset sub graph for GE backend -bool InitExecDatasetGe(const std::string& queue_name, int64_t size, int64_t batch_size, - const std::vector& types, const std::vector>& shapes, - const std::vector& input_indexes, const std::string& phase); - // Build and run dataset subgraph for ms backend bool InitExecDatasetVm(const std::string& queue_name, int64_t size, int64_t batch_size, const std::vector& types, const std::vector>& shapes, const std::vector& input_indexes); -void ExportDFGraph(const std::string& file_name, const std::string&, const std::string& phase); - } // namespace pipeline } // namespace mindspore diff --git a/mindspore/ccsrc/pipeline/pipeline_ge.cc b/mindspore/ccsrc/pipeline/pipeline_ge.cc new file mode 100644 index 0000000000..4617884ca0 --- /dev/null +++ b/mindspore/ccsrc/pipeline/pipeline_ge.cc @@ -0,0 +1,545 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "pipeline/pipeline_ge.h" + +#include +#include +#include +#include +#include + +#include "debug/anf_ir_dump.h" +#include "ir/meta_tensor.h" +#include "transform/convert.h" +#include "transform/df_graph_manager.h" +#include "transform/graph_builder.h" +#include "transform/graph_runner.h" +#include "debug/draw.h" +#include "pipeline/static_analysis/abstract_value.h" + +namespace mindspore { +namespace pipeline { +using Tensor = mindspore::tensor::Tensor; +using MetaTensor = mindspore::tensor::MetaTensor; +using TensorOrderMap = std::map>; +using mindspore::abstract::AbstractTensor; +using mindspore::abstract::AbstractTuple; +using mindspore::abstract::AbstractTuplePtr; +using mindspore::transform::DfGraphConvertor; +using mindspore::transform::DfGraphManager; +using mindspore::transform::GeTensorPtr; +using mindspore::transform::MeTensorPtr; +using mindspore::transform::Status; +using mindspore::transform::TransformUtil; + +void DoExecNonInputGraph(const std::string& phase) { + std::vector ge_tensors; + std::vector ge_outputs; + transform::RunOptions run_options; + run_options.name = phase; + auto graph_runner = DfGraphManager::GetInstance().GetGraphRunner(); + + if (graph_runner == nullptr) { + MS_LOG(ERROR) << "Can not found GraphRunner"; + return; + } + { + // Release GIL before calling into (potentially long-running) C++ code + py::gil_scoped_release release; + Status ret = graph_runner->RunGraph(run_options, ge_tensors, &ge_outputs); + if (ret != Status::SUCCESS) { + MS_LOG(ERROR) << "Exec graph:" << run_options.name << " failed"; + return; + } + } +} + +void SetGeOption(const std::map& options) { + ConfigManager::GetInstance().set_ge_initialize_options(options); +} + +Status CreateSessionAndGraphRunner(bool is_training = true) { + std::shared_ptr sess = DfGraphManager::GetInstance().GetGeSession(); + if (sess == nullptr) { + transform::SessionOptions options; + if (is_training) { + options["ge.trainFlag"] = "1"; + options["ge.streamNum"] = "100"; + options["ge.enabledLocalFmkop"] = "1"; + options["ge.hcomParallel"] = "1"; + } else { + options["ge.trainFlag"] = "0"; + } + + options["ge.enablePrintOpPass"] = "0"; + sess = transform::GraphRunner::NewSession(options); + if (sess == nullptr) { + MS_LOG(ERROR) << "Init data graph failed, because of create Ge session failed"; + return Status::FAILED; + } else { + DfGraphManager::GetInstance().SetGeSession(sess); + } + } + + transform::GraphRunnerOptions options; + options.sess_ptr = sess; + auto graph_runner = std::make_shared(options); + if (graph_runner == nullptr) { + MS_LOG(ERROR) << "Create new graph runner failed"; + return Status::FAILED; + } else { + DfGraphManager::GetInstance().SetGraphRunner(graph_runner); + } + + return Status::SUCCESS; +} + +bool InitExecDatasetGe(const std::string& queue_name, int64_t size, int64_t batch_size, + const std::vector& types, const std::vector>& shapes, + const std::vector& input_indexes, const std::string& phase) { + std::vector ge_types; + (void)std::transform(types.begin(), types.end(), std::back_inserter(ge_types), [](const TypePtr& i) -> int64_t { + return transform::TransformUtil::ConvertDataType(i->type_id()); + }); + + ConfigManager::GetInstance().set_dataset_mode(DatasetMode::DS_GRAPH_MODE); + ConfigManager::GetInstance().set_iter_num(size); + ConfigManager::GetInstance().set_dataset_phase(phase); + + DatasetGraphParam param(queue_name, size, batch_size, ge_types, shapes, input_indexes); + ConfigManager::GetInstance().set_dataset_param(param); + + if (transform::BuildDatasetGraph(param, phase) != transform::SUCCESS) { + MS_LOG(ERROR) << "Build dateset graph failed."; + return false; + } + +#if ENABLE_TRAIN + (void)setenv("GE_TRAIN", "1", 1); +#else + (void)setenv("GE_TRAIN", "0", 1); +#endif + + if (CreateSessionAndGraphRunner(static_cast(ENABLE_TRAIN)) != Status::SUCCESS) { + MS_LOG(ERROR) << "Create GE Session or GraphRunner failed."; + return false; + } + + MS_LOG(INFO) << "DoExecNonInputGraph:" << phase; + DoExecNonInputGraph(phase); + + return true; +} + +void ConvertObjectToTensors(const py::dict& dict, TensorOrderMap* const tensors) { + for (auto item : dict) { + if ((!py::isinstance(item.first))) { + MS_LOG(WARNING) << "Type of key of py_dict is not string, ignore it."; + continue; + } + std::shared_ptr tensor; + std::string name = py::cast(item.first); + if (py::isinstance(item.second.attr("default_input"))) { + // convert float to tensor with shape([1]) + tensor = std::make_shared(kNumberTypeFloat32, std::vector({1})); + *(static_cast(tensor->data_c(true))) = py::cast(item.second.attr("default_input")); + } else if (py::isinstance(item.second.attr("default_input"))) { + // convert int to tensor with shape([1]) + tensor = std::make_shared(kNumberTypeInt32, std::vector({1})); + *(static_cast(tensor->data_c(true))) = py::cast(item.second.attr("default_input")); + } else if (py::hasattr(item.second.attr("default_input"), PYTHON_TENSOR_FLAG)) { + // cast tensor + tensor = py::cast>(item.second.attr("default_input")); + } + + if (tensor == nullptr) { + MS_LOG(EXCEPTION) << "Get default value for " << name << " failed"; + } + (void)tensors->emplace(name, tensor); + } +} + +bool AddDFGraph(const std::map& info, const py::dict& init_params, + const std::string& phase, const py::object& broadcast_params) { + FuncGraphPtr anf_graph = info.at(phase)->func_graph; + DfGraphConvertor convertor(anf_graph); + + size_t pos = phase.find('.'); + std::string net_id = ((pos == std::string::npos || pos == phase.size() - 1) ? phase : phase.substr(pos + 1)); + std::string phase_prefix = phase.substr(0, pos); + + if (phase_prefix == "export") { + MS_LOG(INFO) << "Set DfGraphConvertor training : false"; + convertor.set_training(false); + } + + TensorOrderMap init_tensors{}; + ConvertObjectToTensors(init_params, &init_tensors); + (void)convertor.ConvertAllNode().InitParam(init_tensors).BuildGraph(); + + if (broadcast_params != py::none()) { + if (!py::isinstance(broadcast_params)) { + MS_LOG(ERROR) << "Invalid broadcast params, it must be py::dict type"; + return false; + } + py::dict broadcast = broadcast_params.cast(); + if (broadcast.empty()) { + (void)convertor.GenerateBroadcastGraph(init_tensors); + } else { + TensorOrderMap broadcast_tensors{}; + ConvertObjectToTensors(broadcast, &broadcast_tensors); + (void)convertor.GenerateBroadcastGraph(broadcast_tensors); + } + MS_LOG(INFO) << "Generate broadcast graph with params and broadcast_empty is " << broadcast.empty(); + } + + (void)convertor.GenerateCheckpointGraph(); + if (convertor.ErrCode() != 0) { + DfGraphManager::GetInstance().ClearGraph(); + MS_LOG(ERROR) << "convert df graph failed, err:" << convertor.ErrCode(); + return false; + } + + if (MsContext::GetInstance()->save_graphs_flag()) { + convertor.DrawComputeGraph(GetFilePathName("ge_graph.dot")); // for debug + convertor.DrawInitGraph(GetFilePathName("init_graph.dot")); // for debug + convertor.DrawSaveCheckpointGraph(GetFilePathName("save_checkpoint_graph.dot")); // for debug + } + std::string init_graph = "init_subgraph." + net_id; + std::string checkpoint_name = "save." + net_id; + if (phase.find("train") != std::string::npos) { + (void)DfGraphManager::GetInstance().AddGraph(phase, convertor.GetComputeGraph(), {{"ge.exec.variable_acc", "1"}}); + } else { + (void)DfGraphManager::GetInstance().AddGraph(phase, convertor.GetComputeGraph()); + } + (void)DfGraphManager::GetInstance().AddGraph(init_graph, convertor.GetInitGraph()); + (void)DfGraphManager::GetInstance().AddGraph(checkpoint_name, convertor.GetSaveCheckpointGraph()); + (void)DfGraphManager::GetInstance().AddGraph(BROADCAST_GRAPH_NAME, convertor.GetBroadcastGraph()); + + DfGraphManager::GetInstance().SetAnfGraph(checkpoint_name, anf_graph); + + return true; +} + +FuncGraphPtr BuildDFGraph(const std::map& info, const py::dict& init_params, + const std::string& phase, const py::object& broadcast_params) { + if (info.count(phase) == 0) { + MS_LOG(EXCEPTION) << "no phase in executor:" << GetPhasePrefix(phase); + } + FuncGraphPtr anf_graph = info.at(phase)->func_graph; + + if (MsContext::GetInstance()->save_graphs_flag()) { + draw::Draw(GetFilePathName("anf_graph.dot"), anf_graph); // for debug + DumpIR(GetFilePathName("anf_graph.ir"), anf_graph, true); + } + + if (!AddDFGraph(info, init_params, phase, broadcast_params)) { + MS_LOG(ERROR) << "GenConvertor failed"; + return nullptr; + } + +#if ENABLE_TRAIN + (void)setenv("GE_TRAIN", "1", 1); +#else + (void)setenv("GE_TRAIN", "0", 1); +#endif + + if (CreateSessionAndGraphRunner(static_cast(ENABLE_TRAIN)) != Status::SUCCESS) { + MS_LOG(ERROR) << "Create GE Session or GraphRunner failed."; + return nullptr; + } + + return anf_graph; +} + +void RunGEInitGraph(const py::dict& init_params, const std::string& phase) { + MS_LOG(DEBUG) << "ExecInitGraph start."; + TensorOrderMap inputs_with_name{}; + ConvertObjectToTensors(init_params, &inputs_with_name); + std::vector inputs; + (void)std::transform(inputs_with_name.begin(), inputs_with_name.end(), std::back_inserter(inputs), + [](const std::pair& item) { return item.second; }); + + std::vector ge_tensors = TransformUtil::ConvertInputTensors(inputs, kOpFormat_NCHW); + if (ge_tensors.size() != inputs.size()) { + MS_LOG(ERROR) << "Args convert to ge tensor error."; + return; + } + MS_LOG(DEBUG) << "Run graph begin, inputs size is: " << inputs.size() << "."; + + std::vector ge_outputs; + transform::RunOptions run_options; + + run_options.name = phase; + if (DfGraphManager::GetInstance().GetGraphByName(phase) == nullptr) { + MS_LOG(WARNING) << "Can not find " << phase << " sub graph, don't need data init subgraph in INFER mode."; + return; + } + auto graph_runner = DfGraphManager::GetInstance().GetGraphRunner(); + if (graph_runner == nullptr) { + MS_LOG(EXCEPTION) << "Can not found GraphRunner."; + } + { + // Release GIL before calling into (potentially long-running) C++ code + py::gil_scoped_release release; + Status ret = graph_runner->RunGraph(run_options, ge_tensors, &ge_outputs); + if (ret != Status::SUCCESS) { + MS_LOG(EXCEPTION) << "Exec " << phase << " graph failed."; + } + + MS_LOG(INFO) << "Exec " << phase << " graph success."; + + if ((ConfigManager::GetInstance().parallel_strategy() == ParallelStrategy::DISTRIBUTION) && + (DfGraphManager::GetInstance().GetGraphByName(BROADCAST_GRAPH_NAME) != nullptr)) { + run_options.name = BROADCAST_GRAPH_NAME; + ret = graph_runner->RunGraph(run_options, ge_tensors, &ge_outputs); + if (ret != Status::SUCCESS) { + MS_LOG(EXCEPTION) << "Exec BROADCAST_GRAPH_NAME failed."; + } + MS_LOG(INFO) << "Exec broadcast graph success."; + } + } +} + +py::object ExtractGeneralCnodeRet(const AbstractBasePtr& cnode_data, const py::tuple& data, size_t* count) { + MS_EXCEPTION_IF_NULL(cnode_data); + if (*count >= data.size()) { + MS_LOG(EXCEPTION) << "The number of elements in the outputs : " << data.size() + << " less than the number of elements required. "; + } + + if (cnode_data->isa()) { + BaseShapePtr shape = cnode_data->BuildShape(); + auto shape_act = shape->cast()->shape(); + Tensor tensor_exp = py::cast(data[*count]); + if (shape_act != tensor_exp.shape()) { + MS_LOG(EXCEPTION) << "The shape of the tensor returned from GE is not the same as " + "the shape of the tensor derived from ME."; + } + return data[(*count)++]; + } + + if (!cnode_data->isa()) { + MS_LOG(EXCEPTION) << "The output of operator in the final anf graph could " + << "only be a tensor or a tuple of tensor, but got " << cnode_data->BuildValue()->ToString() + << "."; + } + auto data_tp = cnode_data->cast(); + auto elements = data_tp->elements(); + size_t size = data_tp->size(); + py::tuple tp = py::tuple(size); + for (size_t i = 0; i < size; i++) { + tp[i] = ExtractGeneralCnodeRet(elements[i], data, count); + } + return std::move(tp); +} + +py::object StructureOutput(const AnfNodePtr& output_node, const py::tuple& data, size_t* count) { + MS_EXCEPTION_IF_NULL(output_node); + + if (output_node->isa()) { + return ValuePtrToPyData(GetValueNode(output_node)); + } + + if (*count >= data.size()) { + MS_LOG(EXCEPTION) << "The number of elements in the outputs : " << data.size() + << " less than the number of elements required. "; + } + if (output_node->isa()) { + return data[(*count)++]; + } + + auto output_c = output_node->cast(); + if (output_c == nullptr) { + MS_LOG(EXCEPTION) << "The final anf graph could only have constant, parameter, and operator, but got " + << output_node->ToString(); + } + + if (output_c->IsApply(prim::kPrimMakeTuple)) { + auto input_list = output_c->inputs(); + size_t size = input_list.size(); + py::tuple tp = py::tuple(size - 1); + for (size_t i = 1; i < size; i++) { + tp[i - 1] = StructureOutput(input_list[i], data, count); + } + return std::move(tp); + } + if (output_c->IsApply(prim::kPrimDepend)) { + return StructureOutput(output_c->input(1), data, count); + } + + return ExtractGeneralCnodeRet(output_c->abstract(), data, count); +} + +std::shared_ptr DoExecGraph(const FuncGraphPtr& graph, const std::vector& inputs, + const std::string& phase) { + std::vector ge_tensors = TransformUtil::ConvertInputTensors(inputs, kOpFormat_NCHW); + if (ge_tensors.size() != inputs.size()) { + MS_LOG(ERROR) << "args convert to ge tensor error"; + return nullptr; + } + + std::vector ge_outputs; + transform::RunOptions run_options; + + run_options.name = phase; + + auto graph_runner = DfGraphManager::GetInstance().GetGraphRunner(); + + if (graph_runner == nullptr) { + MS_LOG(ERROR) << "Can not found GraphRunner"; + return nullptr; + } + + { + // Release GIL before calling into (potentially long-running) C++ code + py::gil_scoped_release release; + MS_LOG(DEBUG) << "Run graph begin, inputs size is: " << inputs.size(); + Status ret = graph_runner->RunGraph(run_options, ge_tensors, &ge_outputs); + MS_LOG(DEBUG) << "Run graph finish, outputs size is: " << ge_outputs.size(); + if (ret != Status::SUCCESS) { + MS_LOG(ERROR) << "Exec graph failed"; + return nullptr; + } + } + + std::vector me_outputs = TransformUtil::ConvertGeTensors(ge_outputs); + if (me_outputs.size() != ge_outputs.size()) { + MS_LOG(ERROR) << "Convert output Ge tensor to Me tensor failed"; + } + + py::tuple outputs(me_outputs.size()); + for (std::size_t i = 0; i < outputs.size(); i++) { + outputs[i] = *me_outputs[i]; + } + + std::shared_ptr ret = nullptr; + +#ifdef ENABLE_GE + AnfNodePtr root = graph->get_return(); + MS_EXCEPTION_IF_NULL(root); + AbstractBasePtr output = root->abstract(); + size_t count = 0; + py::object oj = StructureOutput(output, outputs, &count); + ret = std::make_shared(oj); +#else + if (outputs.size() == 1) { + ret = std::make_shared(outputs[0]); + } else { + ret = std::make_shared(outputs); + } +#endif + + return ret; +} + +void ProcessGeArg(const std::map& info, const py::tuple& args, const std::string& phase, + std::vector* inputs) { + // check the arg and use the ExecutorPy args + std::size_t size = args.size(); + + if (info.count(phase) == 0) { + MS_LOG(EXCEPTION) << "no phase in executor:" << GetPhasePrefix(phase); + } + + auto arg_size = info.at(phase)->arg_list_size; + if (size != arg_size) { + MS_LOG(EXCEPTION) << "The real arg num : size = " << size << ". graph_arg_size = " << arg_size; + } + + // process the first args of tensor + // only in Dataset Feed Mode, fp_bp graph need input tensors + if (ConfigManager::GetInstance().dataset_mode() == DS_FEED_MODE) { + for (std::size_t i = 0; i < size; i++) { + ValuePtr converted = nullptr; + bool succ = parse::ConvertData(args[i], &converted); + if (!succ) { + MS_LOG(EXCEPTION) << "args convert error"; + } + if (converted->isa()) { + (*inputs).push_back(converted->cast()); + } else { + MS_LOG(EXCEPTION) << "args, " << converted->ToString() << " is not tensor"; + } + } + } +} + +py::object ExecDFGraph(const std::map& info, const py::tuple& args, + const std::string& phase) { + std::string phase_prefix = GetPhasePrefix(phase); + + if (phase_prefix == "save") { + DoExecNonInputGraph(phase); + ConfigManager::GetInstance().ResetConfig(); + return py::none(); + } + + if (info.count(phase) == 0) { + MS_LOG(EXCEPTION) << "has no phase:" << phase; + } + + FuncGraphPtr anf_graph = info.at(phase)->func_graph; + +#if (!defined ENABLE_GE) || (defined ENABLE_INFER) + // Now don't use the graph because the exec ge function don't take effect + MS_EXCEPTION_IF_NULL(info.at(phase)->func_graph); + if (ENABLE_TRAIN != info.at(phase)->func_graph->flags()["training"]) { + MS_LOG(ERROR) << "Graph training mode mismatch mode of libraries"; + ConfigManager::GetInstance().ResetConfig(); + return py::none(); + } +#endif + + std::shared_ptr ret_val = std::make_shared(); + // We will not execute graph when output is constant or just input itself. + if (IsGraphOutputValueNodeOrParameter(info.at(phase)->func_graph->output(), args, ret_val)) { + ConfigManager::GetInstance().ResetConfig(); + return *ret_val; + } + + std::vector inputs; + ProcessGeArg(info, args, phase, &inputs); + + std::shared_ptr ret = DoExecGraph(anf_graph, inputs, phase); + ConfigManager::GetInstance().ResetConfig(); + if (ret != nullptr) { + return *ret; + } else { + MS_LOG(EXCEPTION) << "exec graph failed"; + } +} +void ExportDFGraph(const std::string& file_name, const std::string& phase) { + MS_LOG(DEBUG) << "ExportGraph Begin"; + transform::DfGraphWrapperPtr wrap_ptr = DfGraphManager::GetInstance().GetGraphByName(phase); + if (wrap_ptr == nullptr) { + MS_LOG(ERROR) << "Get graph form DfGraphManager failed!"; + return; + } + + transform::DfGraphPtr ge_graph = wrap_ptr->graph_ptr_; + if (nullptr == ge_graph) { + MS_LOG(ERROR) << "The export graph is null"; + return; + } + + (void)ge_graph->SaveToFile(file_name); + + MS_LOG(DEBUG) << "ExportGraph End"; +} +} // namespace pipeline +} // namespace mindspore diff --git a/mindspore/ccsrc/pipeline/pipeline_ge.h b/mindspore/ccsrc/pipeline/pipeline_ge.h new file mode 100644 index 0000000000..c3779fd982 --- /dev/null +++ b/mindspore/ccsrc/pipeline/pipeline_ge.h @@ -0,0 +1,57 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_PIPELINE_PIPELINE_GE_H_ +#define MINDSPORE_CCSRC_PIPELINE_PIPELINE_GE_H_ + +#include +#include +#include +#include +#include +#include +#include + +#include "pybind11/pybind11.h" +#include "pipeline/base.h" +#include "operator/ops.h" + +namespace mindspore { +namespace pipeline { + +namespace py = pybind11; + +void SetGeOption(const std::map& options); + +void RunGEInitGraph(const py::dict& init_params, const std::string& phase); + +py::object ExecDFGraph(const std::map& info, const py::tuple& args, + const std::string& phase = "train"); + +FuncGraphPtr BuildDFGraph(const std::map& info, const py::dict& init_params, + const std::string& phase, const py::object& broadcast_params = {}); + +// init and exec dataset sub graph for GE backend +bool InitExecDatasetGe(const std::string& queue_name, int64_t size, int64_t batch_size, + const std::vector& types, const std::vector>& shapes, + const std::vector& input_indexes, const std::string& phase); + +void ExportDFGraph(const std::string& file_name, const std::string& phase); + +} // namespace pipeline +} // namespace mindspore + +#endif // MINDSPORE_CCSRC_PIPELINE_PIPELINE_GE_H_ diff --git a/mindspore/ccsrc/pipeline/resource.cc b/mindspore/ccsrc/pipeline/resource.cc index 2998ff1dbb..59ee04ad15 100644 --- a/mindspore/ccsrc/pipeline/resource.cc +++ b/mindspore/ccsrc/pipeline/resource.cc @@ -25,19 +25,13 @@ #include "pipeline/parse/data_converter.h" #include "operator/ops.h" #include "utils/graph_utils.h" -#include "transform/convert.h" #include "optimizer/ad/dfunctor.h" #include "vm/segment_runner.h" -#include "utils/context/ms_context.h" -#include "transform/df_graph_manager.h" -#include "device/kernel_runtime_manager.h" namespace mindspore { // namespace to support opmap definition namespace pipeline { -using MethodMap = std::unordered_map>; - MethodMap& GetMethodMap() { static MethodMap method_map = {{kObjectTypeString, { @@ -255,28 +249,5 @@ void Resource::Clean() { trace::ClearTraceStack(); is_cleaned_ = true; } - -void ReleaseGeTsd() { - auto context_ptr = MsContext::GetInstance(); - if (context_ptr != nullptr) { - (void)context_ptr->FinalizeGe(true); - (void)context_ptr->CloseTsd(true); - } -} - -void ClearResAtexit() { - MS_LOG(DEBUG) << "pipeline clear all resource"; - device::KernelRuntimeManager::Instance().ClearRuntimeResource(); - transform::DfGraphManager::GetInstance().ClearGraph(); - ad::g_k_prims.clear(); - - abstract::ClearPrimEvaluatorMap(); - compile::ClearConvertCache(); - transform::DfGraphConvertor::get_adpt_map().clear(); - pipeline::GetMethodMap().clear(); - pipeline::ExecutorPy::ClearRes(); - - ReleaseGeTsd(); -} } // namespace pipeline } // namespace mindspore diff --git a/mindspore/ccsrc/pipeline/resource.h b/mindspore/ccsrc/pipeline/resource.h index 43159ddbdd..15ab70db14 100644 --- a/mindspore/ccsrc/pipeline/resource.h +++ b/mindspore/ccsrc/pipeline/resource.h @@ -44,6 +44,10 @@ const char kOutput[] = "output"; class InferenceResource; +using MethodMap = std::unordered_map>; + +MethodMap& GetMethodMap(); + class ResourceBase { public: ResourceBase() { manager_ = MakeManager(); } @@ -110,9 +114,6 @@ class Resource : public ResourceBase { using ResourcePtr = std::shared_ptr; -void ClearResAtexit(); -void ReleaseGeTsd(); - } // namespace pipeline } // namespace mindspore diff --git a/mindspore/ccsrc/pre_activate/ascend/ascend_backend_optimization.cc b/mindspore/ccsrc/pre_activate/ascend/ascend_backend_optimization.cc index b9b324e5dd..8212d64c27 100644 --- a/mindspore/ccsrc/pre_activate/ascend/ascend_backend_optimization.cc +++ b/mindspore/ccsrc/pre_activate/ascend/ascend_backend_optimization.cc @@ -21,7 +21,7 @@ #include "pre_activate/ascend/ir_fission/bn_grad_split.h" #include "pre_activate/ascend/ir_fusion/fused_batch_norm_fusion.h" #include "pre_activate/ascend/ir_fission/layer_norm_grad_split.h" -#include "pre_activate/ascend/ir_fusion/allreduce_fusion.h" +#include "pre_activate/common/ir_fusion/allreduce_fusion.h" #include "pre_activate/ascend/ir_fusion/square_sum_fusion.h" #include "pre_activate/ascend/ir_fusion/clip_by_norm_no_div_square_sum_fusion.h" #include "pre_activate/ascend/ir_fusion/lamb_update_with_lr_rule_fusion.h" diff --git a/mindspore/ccsrc/pre_activate/ascend/buffer_fusion/buffer_fusion.cc b/mindspore/ccsrc/pre_activate/ascend/buffer_fusion/buffer_fusion.cc index 49c5e89641..58b8a93516 100644 --- a/mindspore/ccsrc/pre_activate/ascend/buffer_fusion/buffer_fusion.cc +++ b/mindspore/ccsrc/pre_activate/ascend/buffer_fusion/buffer_fusion.cc @@ -237,11 +237,11 @@ CNodePtr CreateFusionOp(const std::vector &inputs_list, const std::v std::vector input_names; for (uint8_t i = 0; i < inputs_list.size(); i++) { - input_names.emplace_back("input" + to_string(i)); + input_names.emplace_back("input" + std::to_string(i)); } std::vector output_names; for (uint8_t i = 0; i < outputs_list.size(); i++) { - output_names.emplace_back("output" + to_string(i)); + output_names.emplace_back("output" + std::to_string(i)); } ValuePtr input_names_v = MakeValue(input_names); diff --git a/mindspore/ccsrc/pre_activate/ascend/ir_fusion/allreduce_fusion.cc b/mindspore/ccsrc/pre_activate/common/ir_fusion/allreduce_fusion.cc similarity index 97% rename from mindspore/ccsrc/pre_activate/ascend/ir_fusion/allreduce_fusion.cc rename to mindspore/ccsrc/pre_activate/common/ir_fusion/allreduce_fusion.cc index 7b862b21b9..55efcf9058 100644 --- a/mindspore/ccsrc/pre_activate/ascend/ir_fusion/allreduce_fusion.cc +++ b/mindspore/ccsrc/pre_activate/common/ir_fusion/allreduce_fusion.cc @@ -13,7 +13,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "pre_activate/ascend/ir_fusion/allreduce_fusion.h" +#include "pre_activate/common/ir_fusion/allreduce_fusion.h" #include #include diff --git a/mindspore/ccsrc/pre_activate/ascend/ir_fusion/allreduce_fusion.h b/mindspore/ccsrc/pre_activate/common/ir_fusion/allreduce_fusion.h similarity index 87% rename from mindspore/ccsrc/pre_activate/ascend/ir_fusion/allreduce_fusion.h rename to mindspore/ccsrc/pre_activate/common/ir_fusion/allreduce_fusion.h index c26dbc20d9..b49b8373c6 100644 --- a/mindspore/ccsrc/pre_activate/ascend/ir_fusion/allreduce_fusion.h +++ b/mindspore/ccsrc/pre_activate/common/ir_fusion/allreduce_fusion.h @@ -13,8 +13,8 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#ifndef MINDSPORE_CCSRC_PRE_ACTIVATE_ASCEND_IR_FUSION_ALLREDUCE_FUSION_H_ -#define MINDSPORE_CCSRC_PRE_ACTIVATE_ASCEND_IR_FUSION_ALLREDUCE_FUSION_H_ +#ifndef MINDSPORE_CCSRC_PRE_ACTIVATE_COMMON_IR_FUSION_ALLREDUCE_FUSION_H_ +#define MINDSPORE_CCSRC_PRE_ACTIVATE_COMMON_IR_FUSION_ALLREDUCE_FUSION_H_ #include #include "pre_activate/common/pass.h" @@ -46,4 +46,4 @@ class AllReduceFusion : public Pass { }; } // namespace opt } // namespace mindspore -#endif // MINDSPORE_CCSRC_PRE_ACTIVATE_ASCEND_IR_FUSION_ALLREDUCE_FUSION_H_ +#endif // MINDSPORE_CCSRC_PRE_ACTIVATE_COMMON_IR_FUSION_ALLREDUCE_FUSION_H_ diff --git a/mindspore/ccsrc/predict/converter/kernel2ms.cc b/mindspore/ccsrc/predict/converter/kernel2ms.cc index 30b1960e41..32cdee1350 100644 --- a/mindspore/ccsrc/predict/converter/kernel2ms.cc +++ b/mindspore/ccsrc/predict/converter/kernel2ms.cc @@ -16,7 +16,7 @@ #include "predict/converter/kernel2ms.h" #include -#include "transform/convert.h" +#include "ir/anf.h" #include "predict/converter/lite_model/op_attr_packer.h" #include "mindspore/ccsrc/operator/ops.h" @@ -135,7 +135,7 @@ void Kernel2Ms::GetRealInpoutsPtr(const AnfNodePtr &node, std::vectorisa()) { auto c_node = node->cast(); MS_EXCEPTION_IF_NULL(c_node); - std::string c_node_name = transform::GetCNodeFuncName(c_node); + std::string c_node_name = GetCNodeFuncName(c_node); if (c_node_name == prim::kPrimTupleGetItem->name()) { auto v_node = c_node->inputs()[kTupleGetItemIndex]->cast(); MS_EXCEPTION_IF_NULL(v_node); @@ -321,7 +321,7 @@ bool Kernel2Ms::SetGraphInputTensors(const KernelGraphPtr &kernel_graph_ptr, con } for (const auto &input_node : kernel_graph_ptr->inputs()) { if (input_node->isa()) { - ParameterPtr pk_node = dynamic_pointer_cast(input_node); + ParameterPtr pk_node = std::dynamic_pointer_cast(input_node); TensorPtr device_tensor; if (convert_mode_ == kConvertCpuMode) { device_tensor = predict::utils::GetParaCpuTensor(input_node); diff --git a/mindspore/ccsrc/pynative/base.h b/mindspore/ccsrc/pynative/base.h new file mode 100644 index 0000000000..7405f621cb --- /dev/null +++ b/mindspore/ccsrc/pynative/base.h @@ -0,0 +1,67 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_PYNATIVE_BASE_H_ +#define MINDSPORE_CCSRC_PYNATIVE_BASE_H_ + +#include +#include +#include +#include +#include +#include + +#include "pybind11/pybind11.h" +#include "ir/primitive.h" +#include "pipeline/static_analysis/abstract_value.h" + +namespace mindspore { +namespace pynative { + +namespace py = pybind11; + +enum PynativeStatusCode { + PYNATIVE_SUCCESS = 0, + PYNATIVE_OP_NOT_IMPLEMENTED_ERR = 1, + PYNATIVE_OP_INPUTS_ERR = 2, + PYNATIVE_OP_PARAMS_ERR = 3, + PYNATIVE_OP_ATTRS_ERR = 4, + PYNATIVE_GRAPH_MANAGER_ERR = 5, + PYNATIVE_GRAPH_GE_BUILD_ERR = 6, + PYNATIVE_GRAPH_GE_RUN_ERR = 7, + PYNATIVE_UNKNOWN_STATE = 0XFF +}; + +enum RunOpArgsEnum { PY_PRIM = 0, PY_NAME, PY_INPUTS, PY_INPUT_MASK, PY_ARGS_NUM }; + +struct OpExecInfo { + PrimitivePyPtr py_primitive; + std::string op_name; + AbstractBasePtr abstract; + + py::tuple op_inputs; + py::tuple inputs_mask; + py::dict op_attrs; +}; +using OpExecInfoPtr = std::shared_ptr; +OpExecInfoPtr GenerateOpExecInfo(const py::args& args); + +const std::unordered_set ignore_infer_prim = {"partial"}; + +} // namespace pynative +} // namespace mindspore + +#endif // MINDSPORE_CCSRC_PYNATIVE_BASE_H_ diff --git a/mindspore/ccsrc/pynative/pynative_execute.cc b/mindspore/ccsrc/pynative/pynative_execute.cc index 27cfd89106..927e768bbe 100644 --- a/mindspore/ccsrc/pynative/pynative_execute.cc +++ b/mindspore/ccsrc/pynative/pynative_execute.cc @@ -29,16 +29,18 @@ #include "pipeline/static_analysis/prim.h" #include "session/session_factory.h" +#include "pynative/base.h" + +#ifdef ENABLE_GE +#include "pynative/pynative_execute_ge.h" +#endif + const char SINGLE_OP_GRAPH[] = "single_op_graph"; // primitive unable to infer value for constant input in pynative mode -const std::unordered_set ignore_infer_prim = {"partial"}; const std::unordered_set vm_operators = {"partial", "depend"}; namespace mindspore { namespace pynative { -using transform::GraphRunner; -using transform::GraphRunnerOptions; -using transform::OperatorPtr; inline ValuePtr PyAttrValue(const py::object& obj) { ValuePtr converted_ret = nullptr; bool converted = parse::ConvertData(obj, &converted_ret); @@ -48,32 +50,12 @@ inline ValuePtr PyAttrValue(const py::object& obj) { return converted_ret; } -MeTensorPtr ConvertPyObjToTensor(const py::object& obj) { - MeTensorPtr me_tensor_ptr = nullptr; - if (py::isinstance(obj)) { - me_tensor_ptr = py::cast(obj); - } else if (py::isinstance(obj)) { - me_tensor_ptr = std::make_shared(py::cast(obj), nullptr); - } else if (py::isinstance(obj)) { - me_tensor_ptr = std::make_shared(py::cast(obj), nullptr); - } else if (py::isinstance(obj)) { - me_tensor_ptr = std::make_shared(py::cast(obj), nullptr); - } else if (py::isinstance(obj)) { - me_tensor_ptr = std::make_shared(py::cast(obj), nullptr); - } else if (py::isinstance(obj)) { - me_tensor_ptr = std::make_shared(py::cast(obj), nullptr); - } else { - MS_LOG(EXCEPTION) << "run op inputs type is invalid!"; - } - return me_tensor_ptr; -} - void PynativeInfer(const PrimitivePyPtr& prim, const py::tuple& py_args, OpExecInfo* const op_exec_info) { size_t size = py_args.size(); AbstractBasePtrList args_spec_list; for (size_t i = 0; i < size; i++) { ValuePtr input_value = PyAttrValue(py_args[i]); - if (py::isinstance(py_args[i])) { + if (py::isinstance(py_args[i])) { args_spec_list.emplace_back(abstract::FromValueInside(input_value, true)); } else { args_spec_list.emplace_back(abstract::FromValueInside(input_value, false)); @@ -140,241 +122,6 @@ std::string GetSingleOpGraphInfo(const OpExecInfoPtr& op_exec_info) { return graph_info; } -bool SetInputsForSingleOpGraph(const OpExecInfoPtr& op_exec_info, const std::vector& inputs, - const OperatorPtr& op, std::vector* graph_input_nodes) { - MS_EXCEPTION_IF_NULL(op_exec_info); - MS_EXCEPTION_IF_NULL(graph_input_nodes); - auto op_inputs = op_exec_info->op_inputs; - std::string op_name = op_exec_info->op_name; - transform::OpAdapterPtr adapter = transform::DfGraphConvertor::FindAdapter(op_name, true); - if (adapter == nullptr) { - return false; - } - - int op_input_idx = 1; - size_t size = inputs.size(); - for (size_t i = 0; i < size; i++) { - if (inputs[i] == nullptr) { - continue; - } - auto const_op = std::make_shared(); - MS_EXCEPTION_IF_NULL(const_op); - (void)const_op->set_attr_value(*inputs[i]); - MeTensorPtr me_tensor_ptr = ConvertPyObjToTensor(op_inputs[i]); - MS_EXCEPTION_IF_NULL(me_tensor_ptr); - auto const_op_desc = - transform::TransformUtil::GetGeTensorDesc(me_tensor_ptr->shape_c(), me_tensor_ptr->data_type(), kOpFormat_NCHW); - if (const_op_desc == nullptr) { - MS_LOG(ERROR) << "Create variable " << op_name << " ouptut descriptor failed!"; - return false; - } - auto pointer_cast_const_op = std::static_pointer_cast(const_op); - MS_EXCEPTION_IF_NULL(pointer_cast_const_op); - (void)pointer_cast_const_op->update_output_desc_y(*const_op_desc); - auto& input_map = adapter->getInputMap(); - if (input_map.find(op_input_idx) == input_map.end()) { - continue; - } - if (adapter->setInput(op, op_input_idx++, const_op)) { - MS_LOG(ERROR) << "fail to set params, index is " << op_input_idx; - return false; - } - graph_input_nodes->push_back(*const_op); - } - return true; -} - -bool BuildSingleOpGraph(const OpExecInfoPtr& op_exec_info, const std::vector& inputs, - const std::unordered_map& attrs, const GeGraphPtr& graph) { - MS_EXCEPTION_IF_NULL(op_exec_info); - std::string op_name = op_exec_info->op_name; - auto op_inputs = op_exec_info->op_inputs; - transform::OpAdapterPtr adapter = transform::DfGraphConvertor::FindAdapter(op_name, true); - if (adapter == nullptr) { - MS_LOG(ERROR) << "Unable to find Adapter for " << ((std::string)py::str(op_name)); - return false; - } - OperatorPtr op = adapter->generate(op_name); - MS_EXCEPTION_IF_NULL(op); - - std::vector graph_input_nodes; - // hold param nodes after setting input and output for the graph - // set input - if (!SetInputsForSingleOpGraph(op_exec_info, inputs, op, &graph_input_nodes)) { - return false; - } - // set attributes - for (auto attr : attrs) { - (void)adapter->setAttr(op, attr.first, attr.second); - } - // set default attributes - auto extra_attrs = adapter->GetExtraAttr(); - for (auto attr : extra_attrs) { - (void)adapter->setAttr(op, attr.first, attr.second); - } - // set input attributes - auto& input_attr_map = adapter->getInputAttrMap(); - for (auto& it : input_attr_map) { - if (op_inputs.size() < it.first) { - continue; - } - auto const_value = PyAttrValue(op_inputs[it.first - 1]); - if (const_value->isa()) { - continue; - } - it.second.set_attr(op, const_value); - } - // construct output data nodes - std::vector graph_outputs{*op}; - // set input and output nodes for the graph - MS_EXCEPTION_IF_NULL(graph); - (void)graph->SetInputs(graph_input_nodes).SetOutputs(graph_outputs); - MS_LOG(INFO) << "BuildSingleOpGraph done"; - return true; -} - -void ToTensorPtr(const OpExecInfoPtr op_exec_info, std::vector* const inputs) { - MS_EXCEPTION_IF_NULL(inputs); - MS_EXCEPTION_IF_NULL(op_exec_info); - auto op_inputs = op_exec_info->op_inputs; - size_t size = op_inputs.size(); - for (size_t i = 0; i < size; i++) { - if (py::isinstance(op_inputs[i])) { - inputs->emplace_back(nullptr); - continue; - } - MeTensorPtr me_tensor_ptr = ConvertPyObjToTensor(op_inputs[i]); - auto ge_tensor_ptr = transform::TransformUtil::ConvertTensor(me_tensor_ptr, kOpFormat_NCHW); - if (ge_tensor_ptr == nullptr) { - MS_LOG(EXCEPTION) << "convert inputs to GE tensor failed in op " << op_exec_info->op_name << "."; - } - // set inputs for operator to build single node graph - inputs->push_back(ge_tensor_ptr); - } -} - -PynativeStatusCode ConvertAttributes(const OpExecInfoPtr& op_exec_info, const std::vector& inputs) { - MS_EXCEPTION_IF_NULL(op_exec_info); - auto op_attrs = op_exec_info->op_attrs; - std::unordered_map attrs{}; - - for (auto& item : op_attrs) { - if (!py::isinstance(item.first)) { - MS_LOG(ERROR) << "type error in py dict convert"; - return PYNATIVE_OP_ATTRS_ERR; - } - std::string name = py::cast(item.first); - auto attr_value = PyAttrValue(py::cast(item.second)); - (void)attrs.emplace(name, attr_value); - } - - // build graph - GeGraphPtr graph = std::make_shared(op_exec_info->op_name); - if (BuildSingleOpGraph(op_exec_info, inputs, attrs, graph) == false) { - MS_LOG(ERROR) << "Fail to BuildSingleOpGraph"; - return PYNATIVE_GRAPH_GE_BUILD_ERR; - } - - // add the single op graph into the graph manager, which will be iterated by session. - transform::Status ret = - transform::DfGraphManager::GetInstance().AddGraph(SINGLE_OP_GRAPH, std::shared_ptr(graph)); - if (ret != transform::SUCCESS) { - MS_LOG(ERROR) << "Fail to AddGraph into graph manager"; - return PYNATIVE_GRAPH_MANAGER_ERR; - } - - return PYNATIVE_SUCCESS; -} - -std::vector ConvertOutputTensors(const OpExecInfoPtr& op_exec_info, - const std::vector& ge_tensors) { - std::vector outputs; - AbstractBasePtr abs_base = op_exec_info->abstract; - std::vector> shapes; - if (abs_base != nullptr && abs_base->isa()) { - auto arg_tensor = dyn_cast(abs_base); - shapes.emplace_back(arg_tensor->shape()->shape()); - outputs = transform::TransformUtil::ConvertGeTensors(ge_tensors, shapes); - return outputs; - } - if (abs_base != nullptr && abs_base->isa()) { - auto arg_tuple = dyn_cast(abs_base); - size_t len = arg_tuple->size(); - - for (size_t i = 0; i < len; i++) { - if (arg_tuple->elements()[i]->isa()) { - auto arg_tensor = dyn_cast(arg_tuple->elements()[i]); - shapes.emplace_back(arg_tensor->shape()->shape()); - } - } - outputs = transform::TransformUtil::ConvertGeTensors(ge_tensors, shapes); - return outputs; - } - for (auto& it : ge_tensors) { - auto tensor = transform::TransformUtil::ConvertGeTensor(it); - if (tensor != nullptr) { - outputs.emplace_back(tensor); - } - } - return outputs; -} - -py::object RunOpInGE(const OpExecInfoPtr& op_exec_info, PynativeStatusCode* status) { - MS_LOG(INFO) << "RunOpInGe start"; - MS_EXCEPTION_IF_NULL(op_exec_info); - MS_EXCEPTION_IF_NULL(status); - - // returns a null py::tuple on error - py::tuple err_ret(0); - auto op_name = op_exec_info->op_name; - transform::OpAdapterPtr adapter = transform::DfGraphConvertor::FindAdapter(op_name, true); - if (adapter == nullptr) { - MS_LOG(ERROR) << "Unable to find GE Adapter for " << ((std::string)py::str(op_name)); - *status = PYNATIVE_OP_NOT_IMPLEMENTED_ERR; - return std::move(err_ret); - } - - std::vector inputs{}; - ToTensorPtr(op_exec_info, &inputs); - // convert me attr to ge AttrValue - PynativeStatusCode ret = ConvertAttributes(op_exec_info, inputs); - if (ret != PYNATIVE_SUCCESS) { - *status = ret; - return std::move(err_ret); - } - // run graph - transform::RunOptions run_options; - run_options.name = SINGLE_OP_GRAPH; - std::vector ge_inputs; - std::vector ge_outputs; - transform::GraphRunnerOptions graph_runner_options; - graph_runner_options.options["ge.trainFlag"] = "1"; - auto graph_runner = std::make_shared(graph_runner_options); - transform::Status run_ret; - { - // Release GIL before calling into (potentially long-running) C++ code - py::gil_scoped_release release; - run_ret = graph_runner->RunGraph(run_options, ge_inputs, &ge_outputs); - } - if (run_ret != transform::Status::SUCCESS) { - MS_LOG(ERROR) << "GraphRunner Fails to Run Graph"; - *status = PYNATIVE_GRAPH_GE_RUN_ERR; - return std::move(err_ret); - } - - std::vector graph_outputs = ConvertOutputTensors(op_exec_info, ge_outputs); - size_t output_size = graph_outputs.size(); - py::tuple result(output_size); - for (size_t i = 0; i < output_size; i++) { - MS_EXCEPTION_IF_NULL(graph_outputs[i]); - result[i] = *graph_outputs[i]; - } - - *status = PYNATIVE_SUCCESS; - MS_LOG(INFO) << "RunOpInGe end"; - return std::move(result); -} - py::object RunOpInVM(const OpExecInfoPtr& op_exec_info, PynativeStatusCode* status) { MS_LOG(INFO) << "RunOpInVM start"; @@ -423,12 +170,6 @@ py::object RunOpWithBackendPolicy(MsBackendPolicy backend_policy, const OpExecIn MS_EXCEPTION_IF_NULL(status); py::object result; switch (backend_policy) { - case kMsBackendGeOnly: { - // use GE only - MS_LOG(INFO) << "RunOp use GE only backend"; - result = RunOpInGE(op_exec_info, status); - break; - } case kMsBackendVmOnly: { // use vm only MS_LOG(INFO) << "RunOp use VM only backend"; @@ -436,22 +177,14 @@ py::object RunOpWithBackendPolicy(MsBackendPolicy backend_policy, const OpExecIn break; } case kMsBackendGePrior: { +#ifdef ENABLE_GE // use GE first, use vm when GE fails MS_LOG(INFO) << "RunOp use GE first backend"; result = RunOpInGE(op_exec_info, status); if (*status != PYNATIVE_SUCCESS) { result = RunOpInVM(op_exec_info, status); } - break; - } - case kMsBackendVmPrior: { - // GE_VM_SILENT - // (should not use this policy) use vm first, use GE when vm fails - MS_LOG(INFO) << "RunOp use VM first backend"; - result = RunOpInVM(op_exec_info, status); - if (*status != PYNATIVE_SUCCESS) { - result = RunOpInGE(op_exec_info, status); - } +#endif break; } case kMsBackendMsPrior: { diff --git a/mindspore/ccsrc/pynative/pynative_execute.h b/mindspore/ccsrc/pynative/pynative_execute.h index e3d7649106..17b5610bfd 100644 --- a/mindspore/ccsrc/pynative/pynative_execute.h +++ b/mindspore/ccsrc/pynative/pynative_execute.h @@ -25,55 +25,14 @@ #include "pybind11/pybind11.h" -#include "transform/convert.h" -#include "transform/graph_runner.h" -#include "transform/types.h" +#include "pynative/base.h" #include "utils/context/ms_context.h" namespace mindspore { namespace pynative { -using MeTensor = mindspore::tensor::Tensor; -using MeTensorPtr = mindspore::tensor::TensorPtr; -using GeTensor = ge::Tensor; -using GeTensorPtr = std::shared_ptr; -using GeGraph = ge::Graph; -using GeGraphPtr = std::shared_ptr; -using GeOperator = ge::Operator; -using GeOperatorPtr = std::shared_ptr; - namespace py = pybind11; -enum PynativeStatusCode { - PYNATIVE_SUCCESS = 0, - PYNATIVE_OP_NOT_IMPLEMENTED_ERR = 1, - PYNATIVE_OP_INPUTS_ERR = 2, - PYNATIVE_OP_PARAMS_ERR = 3, - PYNATIVE_OP_ATTRS_ERR = 4, - PYNATIVE_GRAPH_MANAGER_ERR = 5, - PYNATIVE_GRAPH_GE_BUILD_ERR = 6, - PYNATIVE_GRAPH_GE_RUN_ERR = 7, - PYNATIVE_UNKNOWN_STATE = 0XFF -}; - -enum RunOpArgsEnum { PY_PRIM = 0, PY_NAME, PY_INPUTS, PY_INPUT_MASK, PY_ARGS_NUM }; - -struct OpExecInfo { - PrimitivePyPtr py_primitive; - std::string op_name; - AbstractBasePtr abstract; - - py::tuple op_inputs; - py::tuple inputs_mask; - py::dict op_attrs; -}; -using OpExecInfoPtr = std::shared_ptr; -OpExecInfoPtr GenerateOpExecInfo(const py::args& args); -bool BuildSingleOpGraph(const OpExecInfoPtr& op_exec_info, const std::vector& inputs, - const std::unordered_map& attrs, const GeGraphPtr& graph); - -py::object RunOpInGE(const OpExecInfoPtr& op_exec_info, PynativeStatusCode* status); - py::object RunOpInVM(const OpExecInfoPtr& op_exec_info, PynativeStatusCode* status); py::tuple RunOp(const py::args& args); diff --git a/mindspore/ccsrc/pynative/pynative_execute_ge.cc b/mindspore/ccsrc/pynative/pynative_execute_ge.cc new file mode 100644 index 0000000000..4ed6088494 --- /dev/null +++ b/mindspore/ccsrc/pynative/pynative_execute_ge.cc @@ -0,0 +1,311 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "pynative/pynative_execute_ge.h" + +#include +#include +#include +#include + +#include "utils/any.h" +#include "utils/utils.h" +#include "utils/context/ms_context.h" +#include "operator/ops.h" +#include "pipeline/parse/data_converter.h" +#include "pipeline/static_analysis/prim.h" +#include "session/session_factory.h" + +const char SINGLE_OP_GRAPH[] = "single_op_graph"; + +namespace mindspore { +namespace pynative { + +using MeTensor = mindspore::tensor::Tensor; +using MeTensorPtr = mindspore::tensor::TensorPtr; +using GeOperator = ge::Operator; +using GeOperatorPtr = std::shared_ptr; + +using transform::GraphRunner; +using transform::GraphRunnerOptions; +using transform::OperatorPtr; +static std::shared_ptr session = nullptr; +inline ValuePtr PyAttrValue(const py::object& obj) { + ValuePtr converted_ret = nullptr; + bool converted = parse::ConvertData(obj, &converted_ret); + if (!converted) { + MS_LOG(EXCEPTION) << "attribute convert error with type:" << std::string(py::str(obj)); + } + return converted_ret; +} + +MeTensorPtr ConvertPyObjToTensor(const py::object& obj) { + MeTensorPtr me_tensor_ptr = nullptr; + if (py::isinstance(obj)) { + me_tensor_ptr = py::cast(obj); + } else if (py::isinstance(obj)) { + me_tensor_ptr = std::make_shared(py::cast(obj), nullptr); + } else if (py::isinstance(obj)) { + me_tensor_ptr = std::make_shared(py::cast(obj), nullptr); + } else if (py::isinstance(obj)) { + me_tensor_ptr = std::make_shared(py::cast(obj), nullptr); + } else if (py::isinstance(obj)) { + me_tensor_ptr = std::make_shared(py::cast(obj), nullptr); + } else if (py::isinstance(obj)) { + me_tensor_ptr = std::make_shared(py::cast(obj), nullptr); + } else { + MS_LOG(EXCEPTION) << "run op inputs type is invalid!"; + } + return me_tensor_ptr; +} + +bool SetInputsForSingleOpGraph(const OpExecInfoPtr& op_exec_info, const std::vector& inputs, + const OperatorPtr& op, std::vector* graph_input_nodes) { + MS_EXCEPTION_IF_NULL(op_exec_info); + MS_EXCEPTION_IF_NULL(graph_input_nodes); + auto op_inputs = op_exec_info->op_inputs; + std::string op_name = op_exec_info->op_name; + transform::OpAdapterPtr adapter = transform::DfGraphConvertor::FindAdapter(op_name, true); + if (adapter == nullptr) { + return false; + } + + int op_input_idx = 1; + size_t size = inputs.size(); + for (size_t i = 0; i < size; i++) { + if (inputs[i] == nullptr) { + continue; + } + auto const_op = std::make_shared(); + MS_EXCEPTION_IF_NULL(const_op); + (void)const_op->set_attr_value(*inputs[i]); + MeTensorPtr me_tensor_ptr = ConvertPyObjToTensor(op_inputs[i]); + MS_EXCEPTION_IF_NULL(me_tensor_ptr); + auto const_op_desc = + transform::TransformUtil::GetGeTensorDesc(me_tensor_ptr->shape_c(), me_tensor_ptr->data_type(), kOpFormat_NCHW); + if (const_op_desc == nullptr) { + MS_LOG(ERROR) << "Create variable " << op_name << " ouptut descriptor failed!"; + return false; + } + auto pointer_cast_const_op = std::static_pointer_cast(const_op); + MS_EXCEPTION_IF_NULL(pointer_cast_const_op); + (void)pointer_cast_const_op->update_output_desc_y(*const_op_desc); + auto& input_map = adapter->getInputMap(); + if (input_map.find(op_input_idx) == input_map.end()) { + continue; + } + if (adapter->setInput(op, op_input_idx++, const_op)) { + MS_LOG(ERROR) << "fail to set params, index is " << op_input_idx; + return false; + } + graph_input_nodes->push_back(*const_op); + } + return true; +} + +bool BuildSingleOpGraph(const OpExecInfoPtr& op_exec_info, const std::vector& inputs, + const std::unordered_map& attrs, const GeGraphPtr& graph) { + MS_EXCEPTION_IF_NULL(op_exec_info); + std::string op_name = op_exec_info->op_name; + auto op_inputs = op_exec_info->op_inputs; + transform::OpAdapterPtr adapter = transform::DfGraphConvertor::FindAdapter(op_name, true); + if (adapter == nullptr) { + MS_LOG(ERROR) << "Unable to find Adapter for " << ((std::string)py::str(op_name)); + return false; + } + OperatorPtr op = adapter->generate(op_name); + MS_EXCEPTION_IF_NULL(op); + + std::vector graph_input_nodes; + // hold param nodes after setting input and output for the graph + // set input + if (!SetInputsForSingleOpGraph(op_exec_info, inputs, op, &graph_input_nodes)) { + return false; + } + // set attributes + for (auto attr : attrs) { + (void)adapter->setAttr(op, attr.first, attr.second); + } + // set default attributes + auto extra_attrs = adapter->GetExtraAttr(); + for (auto attr : extra_attrs) { + (void)adapter->setAttr(op, attr.first, attr.second); + } + // set input attributes + auto& input_attr_map = adapter->getInputAttrMap(); + for (auto& it : input_attr_map) { + if (op_inputs.size() < it.first) { + continue; + } + auto const_value = PyAttrValue(op_inputs[it.first - 1]); + if (const_value->isa()) { + continue; + } + it.second.set_attr(op, const_value); + } + // construct output data nodes + std::vector graph_outputs{*op}; + // set input and output nodes for the graph + MS_EXCEPTION_IF_NULL(graph); + (void)graph->SetInputs(graph_input_nodes).SetOutputs(graph_outputs); + MS_LOG(INFO) << "BuildSingleOpGraph done"; + return true; +} + +void ToTensorPtr(const OpExecInfoPtr op_exec_info, std::vector* const inputs) { + MS_EXCEPTION_IF_NULL(inputs); + MS_EXCEPTION_IF_NULL(op_exec_info); + auto op_inputs = op_exec_info->op_inputs; + size_t size = op_inputs.size(); + for (size_t i = 0; i < size; i++) { + if (py::isinstance(op_inputs[i])) { + inputs->emplace_back(nullptr); + continue; + } + MeTensorPtr me_tensor_ptr = ConvertPyObjToTensor(op_inputs[i]); + auto ge_tensor_ptr = transform::TransformUtil::ConvertTensor(me_tensor_ptr, kOpFormat_NCHW); + if (ge_tensor_ptr == nullptr) { + MS_LOG(EXCEPTION) << "convert inputs to GE tensor failed in op " << op_exec_info->op_name << "."; + } + // set inputs for operator to build single node graph + inputs->push_back(ge_tensor_ptr); + } +} + +PynativeStatusCode ConvertAttributes(const OpExecInfoPtr& op_exec_info, const std::vector& inputs) { + MS_EXCEPTION_IF_NULL(op_exec_info); + auto op_attrs = op_exec_info->op_attrs; + std::unordered_map attrs{}; + + for (auto& item : op_attrs) { + if (!py::isinstance(item.first)) { + MS_LOG(ERROR) << "type error in py dict convert"; + return PYNATIVE_OP_ATTRS_ERR; + } + std::string name = py::cast(item.first); + auto attr_value = PyAttrValue(py::cast(item.second)); + (void)attrs.emplace(name, attr_value); + } + + // build graph + GeGraphPtr graph = std::make_shared(op_exec_info->op_name); + if (BuildSingleOpGraph(op_exec_info, inputs, attrs, graph) == false) { + MS_LOG(ERROR) << "Fail to BuildSingleOpGraph"; + return PYNATIVE_GRAPH_GE_BUILD_ERR; + } + + // add the single op graph into the graph manager, which will be iterated by session. + transform::Status ret = + transform::DfGraphManager::GetInstance().AddGraph(SINGLE_OP_GRAPH, std::shared_ptr(graph)); + if (ret != transform::SUCCESS) { + MS_LOG(ERROR) << "Fail to AddGraph into graph manager"; + return PYNATIVE_GRAPH_MANAGER_ERR; + } + + return PYNATIVE_SUCCESS; +} + +std::vector ConvertOutputTensors(const OpExecInfoPtr& op_exec_info, + const std::vector& ge_tensors) { + std::vector outputs; + AbstractBasePtr abs_base = op_exec_info->abstract; + std::vector> shapes; + if (abs_base != nullptr && abs_base->isa()) { + auto arg_tensor = dyn_cast(abs_base); + shapes.emplace_back(arg_tensor->shape()->shape()); + outputs = transform::TransformUtil::ConvertGeTensors(ge_tensors, shapes); + return outputs; + } + if (abs_base != nullptr && abs_base->isa()) { + auto arg_tuple = dyn_cast(abs_base); + size_t len = arg_tuple->size(); + + for (size_t i = 0; i < len; i++) { + if (arg_tuple->elements()[i]->isa()) { + auto arg_tensor = dyn_cast(arg_tuple->elements()[i]); + shapes.emplace_back(arg_tensor->shape()->shape()); + } + } + outputs = transform::TransformUtil::ConvertGeTensors(ge_tensors, shapes); + return outputs; + } + for (auto& it : ge_tensors) { + auto tensor = transform::TransformUtil::ConvertGeTensor(it); + if (tensor != nullptr) { + outputs.emplace_back(tensor); + } + } + return outputs; +} + +py::object RunOpInGE(const OpExecInfoPtr& op_exec_info, PynativeStatusCode* status) { + MS_LOG(INFO) << "RunOpInGe start"; + MS_EXCEPTION_IF_NULL(op_exec_info); + MS_EXCEPTION_IF_NULL(status); + + // returns a null py::tuple on error + py::tuple err_ret(0); + auto op_name = op_exec_info->op_name; + transform::OpAdapterPtr adapter = transform::DfGraphConvertor::FindAdapter(op_name, true); + if (adapter == nullptr) { + MS_LOG(ERROR) << "Unable to find GE Adapter for " << ((std::string)py::str(op_name)); + *status = PYNATIVE_OP_NOT_IMPLEMENTED_ERR; + return std::move(err_ret); + } + + std::vector inputs{}; + ToTensorPtr(op_exec_info, &inputs); + // convert me attr to ge AttrValue + PynativeStatusCode ret = ConvertAttributes(op_exec_info, inputs); + if (ret != PYNATIVE_SUCCESS) { + *status = ret; + return std::move(err_ret); + } + // run graph + transform::RunOptions run_options; + run_options.name = SINGLE_OP_GRAPH; + std::vector ge_inputs; + std::vector ge_outputs; + transform::GraphRunnerOptions graph_runner_options; + graph_runner_options.options["ge.trainFlag"] = "1"; + auto graph_runner = std::make_shared(graph_runner_options); + transform::Status run_ret; + { + // Release GIL before calling into (potentially long-running) C++ code + py::gil_scoped_release release; + run_ret = graph_runner->RunGraph(run_options, ge_inputs, &ge_outputs); + } + if (run_ret != transform::Status::SUCCESS) { + MS_LOG(ERROR) << "GraphRunner Fails to Run Graph"; + *status = PYNATIVE_GRAPH_GE_RUN_ERR; + return std::move(err_ret); + } + + std::vector graph_outputs = ConvertOutputTensors(op_exec_info, ge_outputs); + size_t output_size = graph_outputs.size(); + py::tuple result(output_size); + for (size_t i = 0; i < output_size; i++) { + MS_EXCEPTION_IF_NULL(graph_outputs[i]); + result[i] = *graph_outputs[i]; + } + + *status = PYNATIVE_SUCCESS; + MS_LOG(INFO) << "RunOpInGe end"; + return std::move(result); +} +} // namespace pynative + +} // namespace mindspore diff --git a/mindspore/ccsrc/pynative/pynative_execute_ge.h b/mindspore/ccsrc/pynative/pynative_execute_ge.h new file mode 100644 index 0000000000..af0efec3e3 --- /dev/null +++ b/mindspore/ccsrc/pynative/pynative_execute_ge.h @@ -0,0 +1,46 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_PYNATIVE_PYNATIVE_EXECUTE_GE_H_ +#define MINDSPORE_CCSRC_PYNATIVE_PYNATIVE_EXECUTE_GE_H_ + +#include +#include +#include +#include +#include + +#include "pynative/base.h" +#include "transform/convert.h" +#include "transform/graph_runner.h" +#include "transform/types.h" +#include "utils/context/ms_context.h" + +using GeTensor = ge::Tensor; +using GeTensorPtr = std::shared_ptr; +using GeGraph = ge::Graph; +using GeGraphPtr = std::shared_ptr; + +namespace mindspore { +namespace pynative { +bool BuildSingleOpGraph(const OpExecInfoPtr& op_exec_info, const std::vector& inputs, + const std::unordered_map& attrs, const GeGraphPtr& graph); + +py::object RunOpInGE(const OpExecInfoPtr& op_exec_info, PynativeStatusCode* status); +} // namespace pynative +} // namespace mindspore + +#endif // MINDSPORE_CCSRC_PYNATIVE_PYNATIVE_EXECUTE_GE_H_ diff --git a/mindspore/ccsrc/session/ascend_session.cc b/mindspore/ccsrc/session/ascend_session.cc index 1a29450313..64647cd036 100644 --- a/mindspore/ccsrc/session/ascend_session.cc +++ b/mindspore/ccsrc/session/ascend_session.cc @@ -35,6 +35,7 @@ #include "pre_activate/common/helper.h" #include "device/kernel_runtime_manager.h" #include "kernel/tbe/tbe_python_funcs.h" +#include "utils/config_manager.h" namespace mindspore { namespace session { diff --git a/mindspore/ccsrc/session/gpu_session.cc b/mindspore/ccsrc/session/gpu_session.cc index 196a2f300f..293ca4f2ba 100644 --- a/mindspore/ccsrc/session/gpu_session.cc +++ b/mindspore/ccsrc/session/gpu_session.cc @@ -19,7 +19,7 @@ #include "device/gpu/gpu_kernel_runtime.h" #include "pre_activate/common/optimizer.h" #include "pre_activate/common/pass_manager.h" -#include "pre_activate/ascend/ir_fusion/allreduce_fusion.h" +#include "pre_activate/common/ir_fusion/allreduce_fusion.h" #include "device/kernel_runtime_manager.h" #include "predict/predict.h" #include "common/utils.h" diff --git a/mindspore/ccsrc/transform/convert.cc b/mindspore/ccsrc/transform/convert.cc index 2b50d2328a..bbfe60859a 100755 --- a/mindspore/ccsrc/transform/convert.cc +++ b/mindspore/ccsrc/transform/convert.cc @@ -373,24 +373,6 @@ std::unordered_map &DfGraphConvertor::get_adpt_ma } // ---------------implement of DfGraphConvertor------------- -std::string GetCNodeFuncName(const CNodePtr cnode) { - if (cnode->inputs().empty()) { - return ""; - } - - AnfNodePtr valuenode = cnode->input(0); - if (valuenode->isa()) { - auto value = GetValueNode(valuenode); - // check whether the valuenode is primitive - if (value->isa()) { - return value->cast()->name(); - } else { - return value->ToString(); - } - } - return ""; -} - PrimType GetCNodeFuncType(const CNodePtr cnode) { if (cnode->inputs().empty()) { return kPrimTypeUnknown; diff --git a/mindspore/ccsrc/transform/convert.h b/mindspore/ccsrc/transform/convert.h index e38b0b2b3a..556db5acee 100644 --- a/mindspore/ccsrc/transform/convert.h +++ b/mindspore/ccsrc/transform/convert.h @@ -253,7 +253,6 @@ class DfGraphConvertor { bool distribute_ = false; }; -extern std::string GetCNodeFuncName(CNodePtr cnode); } // namespace transform } // namespace mindspore diff --git a/mindspore/ccsrc/utils/callbacks.cc b/mindspore/ccsrc/utils/callbacks.cc index ab3f7d883b..cdee0be82d 100644 --- a/mindspore/ccsrc/utils/callbacks.cc +++ b/mindspore/ccsrc/utils/callbacks.cc @@ -20,16 +20,16 @@ #include #include #include "pybind11/pybind11.h" +#ifdef ENABLE_GE #include "transform/df_graph_manager.h" #include "transform/util.h" +#endif #include "pipeline/parse/data_converter.h" #include "pipeline/parse/python_adapter.h" #include "utils/visible.h" namespace mindspore { namespace callbacks { -using mindspore::transform::Status; -using mindspore::transform::TransformUtil; const char PYTHON_MOD_CALLBACK_MODULE[] = "mindspore.train.callback"; const char PYTHON_FUN_PROCESS_CHECKPOINT[] = "_checkpoint_cb_for_save_op"; @@ -38,6 +38,10 @@ const char kSummary[] = "Summary"; const char kCheckPoint[] = "Save"; const int ONE_SHAPE = 1; +#ifdef ENABLE_GE +using mindspore::transform::Status; +using mindspore::transform::TransformUtil; + bool GetParameterShape(const FuncGraphPtr& graph, const std::string& param_name, const std::shared_ptr>& shape) { if (graph == nullptr) { @@ -181,6 +185,7 @@ uint32_t MS_EXPORT SummarySaveCallback(uint32_t graph_id, const std::map(ret); if (!bool_ret) { MS_LOG(ERROR) << "Python checkpoint return false during callback"; - return Status::FAILED; + return kCallbackFalied; } MS_LOG(DEBUG) << "End the summary save callback function."; - return Status::SUCCESS; + return kCallbackOk; } } // namespace callbacks } // namespace mindspore diff --git a/mindspore/ccsrc/utils/callbacks.h b/mindspore/ccsrc/utils/callbacks.h index 2a18b21b16..778b0a9ba2 100644 --- a/mindspore/ccsrc/utils/callbacks.h +++ b/mindspore/ccsrc/utils/callbacks.h @@ -20,8 +20,11 @@ #include #include #include +#include "ir/meta_tensor.h" +#ifdef ENABLE_GE #include "transform/types.h" #include "transform/util.h" +#endif namespace mindspore { namespace callbacks { @@ -36,10 +39,16 @@ extern const char kSummary[]; extern const char kCheckPoint[]; extern const std::string kPythonCheckpointModuleName; extern const std::string kPythonCheckpointFuncName; + +const int kCallbackOk = 0; +const int kCallbackFalied = 1; + bool GetParameterShape(const FuncGraphPtr& anf_graph, const std::string& param_name, const std::shared_ptr>& shape); +#ifdef ENABLE_GE uint32_t CheckpointSaveCallback(uint32_t, const std::map&); uint32_t SummarySaveCallback(uint32_t, const std::map&); +#endif uint32_t SummarySaveCallback(uint32_t, const std::map&); } // namespace callbacks diff --git a/mindspore/ccsrc/utils/context/ms_context.cc b/mindspore/ccsrc/utils/context/ms_context.cc index bf05af9858..7b531536ac 100644 --- a/mindspore/ccsrc/utils/context/ms_context.cc +++ b/mindspore/ccsrc/utils/context/ms_context.cc @@ -26,13 +26,15 @@ #include "tdt/tdt_host_interface.h" #include "tdt/data_common.h" #endif +#ifdef ENABLE_GE #include "transform/df_graph_manager.h" +#endif #include "ir/meta_tensor.h" namespace mindspore { +#ifdef ENABLE_GE using mindspore::transform::DfGraphManager; -using transform::GraphRunner; -using transform::GraphRunnerOptions; +#endif std::atomic thread_1_must_end(false); @@ -81,6 +83,7 @@ MsContext::MsContext(const std::string& policy, const std::string& target) { std::shared_ptr MsContext::GetInstance() { if (inst_context_ == nullptr) { + MS_LOG(DEBUG) << "Create new mindspore context"; #ifdef ENABLE_GE inst_context_.reset(new (std::nothrow) MsContext("ge", kAscendDevice)); #elif defined(ENABLE_D) diff --git a/mindspore/ccsrc/utils/context/ms_context.h b/mindspore/ccsrc/utils/context/ms_context.h index e7d8dc769f..06704ff9c6 100644 --- a/mindspore/ccsrc/utils/context/ms_context.h +++ b/mindspore/ccsrc/utils/context/ms_context.h @@ -23,7 +23,6 @@ #include #include #include -#include "transform/graph_runner.h" #include "utils/log_adapter.h" namespace mindspore { diff --git a/mindspore/ccsrc/utils/convert_utils.cc b/mindspore/ccsrc/utils/convert_utils.cc index ccd21f6801..e840ff8734 100644 --- a/mindspore/ccsrc/utils/convert_utils.cc +++ b/mindspore/ccsrc/utils/convert_utils.cc @@ -373,4 +373,45 @@ AbstractBasePtr PyListDtype2AbstractTensor(const py::object &shape_obj, const py MS_LOG(EXCEPTION) << "Python evaluator return invalid shape or type. " << (std::string)py::str(type_obj); } } +bool IsGraphOutputValueNodeOrParameter(const AnfNodePtr &output, const py::tuple &args, + const std::shared_ptr &ret_val) { + if (output->isa()) { + MS_LOG(INFO) << "Graph's output is a constant. No need to execute."; + ValuePtr value = GetValueNode(output); + *ret_val = ValuePtrToPyData(value); + return true; + } + + // Adapter will transform values in __init__() and construct() to parameters, this could cause + // inputs (a.k.a args in current function) size less than parameters'. + if (output->isa()) { + MS_LOG(INFO) << "Graph's output is a parameter. If all params are inputs, no need to execute."; + if (args.empty()) { + MS_LOG(EXCEPTION) << "Inputs size is 0, let graph to be executed."; + } + // Find the right parameter as ret_val. + auto func_graph = output->func_graph(); + MS_EXCEPTION_IF_NULL(func_graph); + auto params = func_graph->parameters(); + if (params.empty()) { + MS_EXCEPTION(UnknownError) << "Graph's parameters size is 0"; + } + if (args.size() != params.size()) { + MS_LOG(EXCEPTION) << "Input size " << args.size() << " not equal to params size " << params.size() + << ", let graph to be executed."; + } + + auto it = std::find(params.begin(), params.end(), output); + if (it == params.end()) { + MS_EXCEPTION(UnknownError) << "When graph output is Parameter, it should be found in graph parameters"; + } + size_t index = it - params.cbegin(); + if (index >= args.size()) { + MS_EXCEPTION(UnknownError) << "Index " << index << " equal or larger than args size " << args.size() << "."; + } + *ret_val = args[index]; + return true; + } + return false; +} } // namespace mindspore diff --git a/mindspore/ccsrc/utils/convert_utils.h b/mindspore/ccsrc/utils/convert_utils.h index f190f98e6a..fbd4485a3f 100644 --- a/mindspore/ccsrc/utils/convert_utils.h +++ b/mindspore/ccsrc/utils/convert_utils.h @@ -18,6 +18,7 @@ #define MINDSPORE_CCSRC_UTILS_CONVERT_UTILS_H_ #include +#include #include "pybind11/pybind11.h" #include "utils/any.h" @@ -120,6 +121,9 @@ inline uint8_t *AddressOffset(void *address, size_t offset) { AbstractBasePtr PyListDtype2AbstractTensor(const py::object &shape_obj, const py::object &type_obj); +bool IsGraphOutputValueNodeOrParameter(const AnfNodePtr &output, const py::tuple &args, + const std::shared_ptr &ret_val); + } // namespace mindspore #endif // MINDSPORE_CCSRC_UTILS_CONVERT_UTILS_H_ diff --git a/mindspore/ccsrc/vm/segment_runner.cc b/mindspore/ccsrc/vm/segment_runner.cc index 82a61c010d..d7d5a4c096 100644 --- a/mindspore/ccsrc/vm/segment_runner.cc +++ b/mindspore/ccsrc/vm/segment_runner.cc @@ -178,14 +178,12 @@ LinConvertResult Convert(const AnfNodePtrList& lst) { } LinkFuncType MsVmConvert = Convert; -LinkFuncType GeVmConvert = Convert; -std::unordered_map backends = {{kMsVm, MsVmConvert}, {kGeVm, GeVmConvert}}; +std::unordered_map backends = {{kMsVm, MsVmConvert}}; std::set backend_list = { kMsConvert, kMsVm, - kGeVm, }; } // namespace compile diff --git a/mindspore/ccsrc/vm/transform.cc b/mindspore/ccsrc/vm/transform.cc index d5933db1ab..be7aaf5baa 100644 --- a/mindspore/ccsrc/vm/transform.cc +++ b/mindspore/ccsrc/vm/transform.cc @@ -24,7 +24,9 @@ #include #include "pipeline/static_analysis/abstract_value.h" +#ifdef ENABLE_GE #include "transform/convert.h" +#endif #include "utils/graph_utils.h" #include "utils/context/ms_context.h" #include "debug/trace.h" @@ -55,7 +57,6 @@ CompileGraph::CompileGraph(const BackendPtr& backend, const std::vectorIsGraphCut()) { - return nullptr; - } -#endif - FinalVMPtr rt = Link(graph); Reset(); MS_LOG(DEBUG) << "End"; diff --git a/mindspore/ccsrc/vm/transform.h b/mindspore/ccsrc/vm/transform.h index 206fd00431..f862444a82 100644 --- a/mindspore/ccsrc/vm/transform.h +++ b/mindspore/ccsrc/vm/transform.h @@ -55,7 +55,6 @@ class CompileGraph { InstSet Run(const FuncGraphPtr& func_graph); InstSet GenMultiGraphsSinkInst(const FuncGraphPtr& graph); - bool IsGraphCut() const { return is_graph_cut; } bool IsCut(const AnfNodePtr& node); void Push(const AnfNodePtr& node); void Tie(const AnfNodePtr& n1, const AnfNodePtr& n2) { slots_[n2] = slots_[n1]; } @@ -101,7 +100,6 @@ class CompileGraph { BackendPtr backend_; LinkFuncType lin_convert_; bool is_gevm_convert_; - bool is_graph_cut; int height_{0}; int max_height_{0}; std::vector cut_list_; diff --git a/mindspore/ccsrc/vm/vmimpl.cc b/mindspore/ccsrc/vm/vmimpl.cc index e64cd16fcf..ee9a817dd8 100644 --- a/mindspore/ccsrc/vm/vmimpl.cc +++ b/mindspore/ccsrc/vm/vmimpl.cc @@ -26,8 +26,6 @@ #include #include -#include "transform/graph_runner.h" -#include "transform/convert.h" #include "ir/meta_tensor.h" #include "operator/ops.h" #include "ir/manager.h" @@ -40,39 +38,6 @@ namespace compile { using PrimitivePyPtr = std::shared_ptr; -static const char SEGMENT_GRAPH_NAME[] = "runnable_segment"; - -VectorRef GeVM::RunGraph(const FuncGraphPtr& anf_graph, const VectorRef& args) { - // Convert graph - transform::DfGraphConvertor convertor(anf_graph); - - (void)convertor.ConvertAllNode().BuildGraph(); - if (convertor.ErrCode() == 0) { - (void)transform::DfGraphManager::GetInstance().AddGraph(SEGMENT_GRAPH_NAME, convertor.GetComputeGraph()); - } else { - MS_LOG(EXCEPTION) << "convert df graph failed"; - } - - // Run graph - transform::GraphRunnerOptions options; - transform::GraphRunner graph_runner(options); - transform::RunOptions run_options; - run_options.name = SEGMENT_GRAPH_NAME; - - std::vector inputs; - (void)std::transform(std::begin(args), std::end(args), std::back_inserter(inputs), - [](const BaseRef& arg) -> tensor::TensorPtr { - auto value_ref = utils::cast(arg); - auto value = value_ref.object_; - return py::cast(value); - }); - std::vector outputs; - (void)graph_runner.RunGraph(run_options, inputs, &outputs); - std::vector ret; - (void)std::copy(outputs.begin(), outputs.end(), std::back_inserter(ret)); - return VectorRef(ret); -} - // Indicate a call to a new frame. struct CallWrap : public Base { explicit CallWrap(const VMFramePtr& vm_frame) : frame(vm_frame) {} diff --git a/mindspore/ccsrc/vm/vmimpl.h b/mindspore/ccsrc/vm/vmimpl.h index 8ff02ae946..4ef507af82 100644 --- a/mindspore/ccsrc/vm/vmimpl.h +++ b/mindspore/ccsrc/vm/vmimpl.h @@ -64,12 +64,6 @@ class VMImpl { virtual ~VMImpl() = default; }; -class GeVM : public VMImpl { - public: - VectorRef RunGraph(const FuncGraphPtr& fg, const VectorRef& args) override; - ~GeVM() override = default; -}; - // An execution frame. // This holds the state for an application of a graph. The nodes list // must contain free variables of graphs encountered before the diff --git a/mindspore/common/api.py b/mindspore/common/api.py index 8e23e9184d..9ee95ef772 100644 --- a/mindspore/common/api.py +++ b/mindspore/common/api.py @@ -22,7 +22,7 @@ from mindspore import context from mindspore import log as logger from mindspore.parallel._utils import _get_parallel_mode from .._c_expression import generate_key, Executor_, Tensor, MetaTensor -from .._c_expression import verify_inputs_signature, init_exec_dataset, export_graph, _set_dataset_mode_config, init_ge +from .._c_expression import verify_inputs_signature, init_exec_dataset, _set_dataset_mode_config, init_ge from .tensor import Tensor as MsTensor # store ms_function class compiled pipeline cache @@ -501,6 +501,7 @@ class _Executor: file_name (str): File name of model to export file_format (str): MindSpore currently support 'GEIR' and 'ONNX' format for exported model """ + from .._c_expression import export_graph phase = 'export' + '.' + str(net.create_time) export_graph(file_name, file_format, phase) diff --git a/mindspore/common/parameter.py b/mindspore/common/parameter.py index b1992c8b82..c8ddf0eac6 100644 --- a/mindspore/common/parameter.py +++ b/mindspore/common/parameter.py @@ -155,6 +155,18 @@ class Parameter: def data(self): return self.default_input + def __add__(self, other): + return self.default_input + other + + def __sub__(self, other): + return self.default_input - other + + def __mul__(self, other): + return self.default_input * other + + def __truediv__(self, other): + return self.default_input / other + def set_parameter_data(self, data): if isinstance(data, (Tensor, list, int, float, np.float16, np.float32, np.int32, np.int16, np.ndarray)) and not isinstance(data, bool): diff --git a/mindspore/common/tensor.py b/mindspore/common/tensor.py index 4c7f31921b..d17661595f 100644 --- a/mindspore/common/tensor.py +++ b/mindspore/common/tensor.py @@ -89,6 +89,16 @@ class Tensor(Tensor_): out = self.__mul__(other) return out + def __truediv__(self, other): + if isinstance(other, (int, float)): + other_tensor = Tensor(other, self.dtype()) + elif isinstance(other, Tensor): + other_tensor = other + else: + raise TypeError("unsupported type for div operation") + out = tensor_operator_registry.get('__div__')(self, other_tensor) + return out + def __sub__(self, other): if not isinstance(other, Tensor): raise TypeError("input_data must be a tensor") diff --git a/mindspore/ops/functional.py b/mindspore/ops/functional.py index 5adb6fac57..b8411d42c1 100644 --- a/mindspore/ops/functional.py +++ b/mindspore/ops/functional.py @@ -125,5 +125,5 @@ shape_mul = Primitive("shape_mul") stop_gradient = Primitive("stop_gradient") tensor_operator_registry.register('__add__', tensor_add) - tensor_operator_registry.register('__mul__', tensor_mul) +tensor_operator_registry.register('__div__', tensor_div) diff --git a/mindspore/train/model.py b/mindspore/train/model.py index fe655433fa..65a9837fe6 100755 --- a/mindspore/train/model.py +++ b/mindspore/train/model.py @@ -161,6 +161,9 @@ class Model: def _update_metrics(self, outputs): """Update metrics local values.""" + if not isinstance(outputs, tuple): + raise ValueError("The `outputs` is not tuple.") + if self._eval_indexes is not None and len(outputs) < 3: raise ValueError("The length of `outputs` must be greater than or equal to 3, \ but got {}".format(len(outputs))) diff --git a/tests/ut/cpp/device/ascend_kernel_select_test.cc b/tests/ut/cpp/device/ascend_kernel_select_test.cc index d522a5adc0..79986d375d 100644 --- a/tests/ut/cpp/device/ascend_kernel_select_test.cc +++ b/tests/ut/cpp/device/ascend_kernel_select_test.cc @@ -231,7 +231,7 @@ void test_select(const CNodePtr &kernel_node, std::vector parent_list, std::vector> shapes, +void SetParentAbstract(std::vector parent_list, std::vector> shapes, std::vector types) { for (const auto &node : parent_list) { AnfAlgo::SetOutputInferTypeAndShape(types, shapes, node.get()); diff --git a/tests/ut/cpp/device/ascend_profiling_test.cc b/tests/ut/cpp/device/ascend_profiling_test.cc index 2bfdc9fcae..2829a5fd4a 100644 --- a/tests/ut/cpp/device/ascend_profiling_test.cc +++ b/tests/ut/cpp/device/ascend_profiling_test.cc @@ -16,10 +16,10 @@ #include #include +#include "./prof_reporter.h" #include "common/common_test.h" #include "device/ascend/profiling/profiling_manager.h" #include "./common.h" -#include "./prof_reporter.h" #define private public #include "device/ascend/profiling/plugin_impl.h" #undef private diff --git a/tests/ut/cpp/pre_activate/ascend/ir_fusion/allreduce_fusion_test.cc b/tests/ut/cpp/pre_activate/common/ir_fusion/allreduce_fusion_test.cc similarity index 99% rename from tests/ut/cpp/pre_activate/ascend/ir_fusion/allreduce_fusion_test.cc rename to tests/ut/cpp/pre_activate/common/ir_fusion/allreduce_fusion_test.cc index 1a7a103a99..79a1cf1a8a 100644 --- a/tests/ut/cpp/pre_activate/ascend/ir_fusion/allreduce_fusion_test.cc +++ b/tests/ut/cpp/pre_activate/common/ir_fusion/allreduce_fusion_test.cc @@ -20,7 +20,7 @@ #include "ir/manager.h" #include "debug/anf_ir_dump.h" #include "session/anf_runtime_algorithm.h" -#include "pre_activate/ascend/ir_fusion/allreduce_fusion.h" +#include "pre_activate/common/ir_fusion/allreduce_fusion.h" #include "pre_activate/common/optimizer.h" #include "device/kernel_info.h" #include "pre_activate/common/pass_manager.h" diff --git a/tests/ut/cpp/pre_activate/pass/convert_const_input_to_tensor_input_test.cc b/tests/ut/cpp/pre_activate/pass/convert_const_input_to_tensor_input_test.cc index 083e4168ce..014b0aed55 100644 --- a/tests/ut/cpp/pre_activate/pass/convert_const_input_to_tensor_input_test.cc +++ b/tests/ut/cpp/pre_activate/pass/convert_const_input_to_tensor_input_test.cc @@ -105,7 +105,7 @@ TEST_F(TestHWConstInputToTensorInput, test_value_tuple_tensor_input) { auto tensor = input1->cast()->value()->cast(); ASSERT_TRUE(tensor != nullptr); auto data = tensor->data_c(false); - EXPECT_EQ(vector((int *)data, (int *)data + 4), vector({2, 4, 2, 2})); + EXPECT_EQ(std::vector((int *)data, (int *)data + 4), std::vector({2, 4, 2, 2})); } } // namespace opt } // namespace mindspore diff --git a/tests/ut/python/ir/test_tensor.py b/tests/ut/python/ir/test_tensor.py index d4f96b54eb..1757567db5 100644 --- a/tests/ut/python/ir/test_tensor.py +++ b/tests/ut/python/ir/test_tensor.py @@ -24,6 +24,8 @@ import pytest import mindspore as ms import mindspore.common.api as me import mindspore.nn as nn +from mindspore.common.parameter import Parameter +from mindspore.common.initializer import initializer from ..ut_filter import non_graph_engine @@ -199,6 +201,21 @@ def test_sub(): z = x - y assert isinstance(z, ms.Tensor) +@non_graph_engine +def test_div(): + x = ms.Tensor(np.array([[2,6,10],[12, 4, 8]]).astype(np.float32)) + y = ms.Tensor(np.array([[2,2,5],[6, 1, 2]]).astype(np.float32)) + z = x / y + z2 = x / 2 + assert isinstance(z, ms.Tensor) + assert isinstance(z2, ms.Tensor) + +@non_graph_engine +def test_parameter(): + x = Parameter(initializer(1, [1], ms.float32), name="beta1_power") + z = x / 2 + print(z) + class Net(nn.Cell): """Net definition""" @@ -378,3 +395,4 @@ def test_tensor_dtype_fp32_to_bool(): input = np.random.randn(2, 3, 4, 5).astype(np.float32) input = ms.Tensor(input) input_me = ms.Tensor(input, dtype=ms.bool_) + diff --git a/tests/ut/python/ops/test_array_ops.py b/tests/ut/python/ops/test_array_ops.py index 4164ce6e8f..1c4895465f 100644 --- a/tests/ut/python/ops/test_array_ops.py +++ b/tests/ut/python/ops/test_array_ops.py @@ -97,20 +97,6 @@ def test_select(): assert np.all(output.asnumpy() == expect) -def test_scalar_cast_grad(): - """ test_scalar_cast_grad """ - input_x = 255.5 - input_t = get_py_obj_dtype(ms.int8) - - def fx_cast(x): - output = F.scalar_cast(x, input_t) - return output - - gfn = C.grad(fx_cast)(input_x) - expect_dx = 1 - assert gfn == expect_dx - - class CustomOP(PrimitiveWithInfer): __mindspore_signature__ = (sig_dtype.T, sig_dtype.T, sig_dtype.T1, sig_dtype.T1, sig_dtype.T2, sig_dtype.T2, diff --git a/tests/ut/python/parallel/__init__.py b/tests/ut/python/parallel/__init__.py index ffe03896ab..c08f8e247b 100644 --- a/tests/ut/python/parallel/__init__.py +++ b/tests/ut/python/parallel/__init__.py @@ -13,11 +13,14 @@ # limitations under the License. import mindspore.context as context +from mindspore.parallel._utils import _reset_op_id def setup_module(module): - context.set_context(mode=context.GRAPH_MODE) + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", save_graphs=False) + _reset_op_id() def teardown_module(): context.reset_auto_parallel_context() + _reset_op_id() diff --git a/tests/ut/python/parallel/test_alltoall.py b/tests/ut/python/parallel/test_alltoall.py index ddceece8ec..7365dd941c 100644 --- a/tests/ut/python/parallel/test_alltoall.py +++ b/tests/ut/python/parallel/test_alltoall.py @@ -97,13 +97,10 @@ def test_all_to_all(): strategys = all_to_all_common(strategy1) print(strategys) expect_dict = {'Default/network-_VirtualDatasetCell/_backbone-WithLossCell/_loss_fn-SoftmaxCrossEntropyWithLogits' - '/SoftmaxCrossEntropyWithLogits-op43': [[8, 1], [8, 1]], - 'Default/network-_VirtualDatasetCell/_backbone-WithLossCell/_loss_fn-SoftmaxCrossEntropyWithLogits' - '/OneHot-op44': [[8, 1], [], []], - 'Default/network-_VirtualDatasetCell/_backbone-WithLossCell/_backbone-AllToAllNet/Transpose-op1': - [[8, 1]], - 'Default/network-_VirtualDatasetCell/_backbone-WithLossCell/_backbone-AllToAllNet/MatMul-op0': - [[1, 1], [1, 8]]} + '/SoftmaxCrossEntropyWithLogits-op3': [[8, 1], [8, 1]], + 'Default/network-_VirtualDatasetCell/_backbone-WithLossCell/_loss_fn-SoftmaxCrossEntropyWithLogits/OneHot-op4': [[8, 1], [], []], + 'Default/network-_VirtualDatasetCell/_backbone-WithLossCell/_backbone-AllToAllNet/Transpose-op1': [[8, 1]], + 'Default/network-_VirtualDatasetCell/_backbone-WithLossCell/_backbone-AllToAllNet/MatMul-op0': [[1, 1], [1, 8]]} assert (strategys == expect_dict) context.set_context(save_graphs=False) diff --git a/tests/ut/python/parallel/test_auto_parallel_arithmetic.py b/tests/ut/python/parallel/test_auto_parallel_arithmetic.py index e6f72d8019..7c928be376 100644 --- a/tests/ut/python/parallel/test_auto_parallel_arithmetic.py +++ b/tests/ut/python/parallel/test_auto_parallel_arithmetic.py @@ -65,8 +65,8 @@ def test_auto_parallel_arithmetic(): b = Tensor(np.ones([64, 128]), dtype=ms.float32) _executor.compile(net, x, y, b, phase='train') strategies = _executor._get_strategy(net) - expected_strategies = {'Default/network-Net/FloorDiv-op2': [[2, 4], [2, 4]], - 'Default/network-Net/MatMul-op3': [[2, 1], [1, 4]]} + expected_strategies = {'Default/network-Net/FloorDiv-op0': [[2, 4], [2, 4]], + 'Default/network-Net/MatMul-op1': [[2, 1], [1, 4]]} assert strategies == expected_strategies def test_auto_parallel_arithmetic_broadcast_both(): @@ -91,8 +91,8 @@ def test_auto_parallel_arithmetic_broadcast_both(): b = Tensor(np.ones([1, 64]), dtype=ms.float32) _executor.compile(net, x, y, b, phase='train') strategies = _executor._get_strategy(net) - expected_strategies = {'Default/network-Net/FloorDiv-op2': [[8, 1], [1, 1]], - 'Default/network-Net/MatMul-op3': [[8, 1], [1, 1]]} + expected_strategies = {'Default/network-Net/FloorDiv-op0': [[8, 1], [1, 1]], + 'Default/network-Net/MatMul-op1': [[8, 1], [1, 1]]} assert strategies == expected_strategies @@ -118,8 +118,8 @@ def test_auto_parallel_arithmetic_broadcast_right(): b = Tensor(np.ones([32]), dtype=ms.float32) _executor.compile(net, x, y, b, phase='train') strategies = _executor._get_strategy(net) - expected_strategies = {'Default/network-Net/FloorDiv-op2': [[4, 2], [2]], - 'Default/network-Net/MatMul-op3': [[4, 1], [1, 2]]} + expected_strategies = {'Default/network-Net/FloorDiv-op0': [[4, 2], [2]], + 'Default/network-Net/MatMul-op1': [[4, 1], [1, 2]]} assert strategies == expected_strategies @@ -145,6 +145,6 @@ def test_auto_parallel_arithmetic_broadcast_left(): b = Tensor(np.ones([128, 64, 32]), dtype=ms.float32) _executor.compile(net, x, y, b, phase="train") strategies = _executor._get_strategy(net) - expected_strategies = {'Default/network-Net/FloorDiv-op2': [[4, 2], [1, 4, 2]], - 'Default/network-Net/MatMul-op3': [[4, 1], [1, 2]]} - assert strategies == expected_strategies \ No newline at end of file + expected_strategies = {'Default/network-Net/FloorDiv-op0': [[4, 2], [1, 4, 2]], + 'Default/network-Net/MatMul-op1': [[4, 1], [1, 2]]} + assert strategies == expected_strategies diff --git a/tests/ut/python/parallel/test_auto_parallel_assign_sub_with_ref_key.py b/tests/ut/python/parallel/test_auto_parallel_assign_sub_with_ref_key.py index e436f9faf7..5364263695 100755 --- a/tests/ut/python/parallel/test_auto_parallel_assign_sub_with_ref_key.py +++ b/tests/ut/python/parallel/test_auto_parallel_assign_sub_with_ref_key.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import re import numpy as np from mindspore import context import mindspore.nn as nn @@ -55,6 +56,9 @@ def test_auto_parallel_assign_sub_with_ref_key(): _executor.compile(net, x, phase="train") strategies = _executor._get_strategy(net) - expected_strategies = {'Default/network-PReLU/PReLU-op2': [[1, 1, 1, 8], [1]], - 'Default/network-PReLU/ReLU-op3': [[1]]} - assert strategies == expected_strategies + for (k, v) in strategies.items(): + if re.search('PReLU-op', k) is not None: + assert v == [[1, 1, 1, 8], [1]] + elif re.search('ReLU-op', k) is not None: + assert v == [[1]] + diff --git a/tests/ut/python/parallel/test_auto_parallel_cast.py b/tests/ut/python/parallel/test_auto_parallel_cast.py index be7b5082d6..4cfeb59fc1 100644 --- a/tests/ut/python/parallel/test_auto_parallel_cast.py +++ b/tests/ut/python/parallel/test_auto_parallel_cast.py @@ -75,9 +75,9 @@ def test_double_star_graph(): _executor.compile(net, x, y, z, w, phase='train') strategies = _executor._get_strategy(net) - expected_strategies = {'Default/network-Net/MatMul-op0': [[1, 8], [8, 1]], - 'Default/network-Net/Cast-op7': [[8, 1]], - 'Default/network-Net/MatMul-op8': [[8, 1], [1, 1]], - 'Default/network-Net/Cast-op9': [[1, 8]], - 'Default/network-Net/MatMul-op10': [[1, 1], [1, 8]]} - assert strategies == expected_strategies \ No newline at end of file + expected_strategies = {'Default/network-Net/Cast-op1': [[8, 1]], + 'Default/network-Net/Cast-op3': [[1, 8]], + 'Default/network-Net/MatMul-op2': [[8, 1], [1, 1]], + 'Default/network-Net/MatMul-op4': [[1, 1], [1, 8]], + 'Default/network-Net/MatMul-op0': [[1, 8], [8, 1]]} + assert strategies == expected_strategies diff --git a/tests/ut/python/parallel/test_auto_parallel_matmul_prelu.py b/tests/ut/python/parallel/test_auto_parallel_matmul_prelu.py index ca9b561145..4d68a7f883 100644 --- a/tests/ut/python/parallel/test_auto_parallel_matmul_prelu.py +++ b/tests/ut/python/parallel/test_auto_parallel_matmul_prelu.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import re import numpy as np from mindspore import context import mindspore.nn as nn @@ -66,7 +67,10 @@ def test_matmul_prelu(): _executor.compile(net, x, y, b, phase='train') strategies = _executor._get_strategy(net) - assert strategies['Default/network-Net/PReLU-op2'] == [[16, 1, 1, 1], [1]] - assert strategies['Default/network-Net/Mul-op3'] == [[16, 1, 1, 1], [16, 1, 1, 1]] + for (k, v) in strategies.items(): + if re.search('PReLU-op', k) is not None: + assert v == [[16, 1, 1, 1], [1]] + elif re.search('Mul-op', k) is not None: + assert v == [[16, 1, 1, 1], [16, 1, 1, 1]] diff --git a/tests/ut/python/parallel/test_auto_parallel_parameter_cast.py b/tests/ut/python/parallel/test_auto_parallel_parameter_cast.py index b7a3255f7c..29e81f7f90 100644 --- a/tests/ut/python/parallel/test_auto_parallel_parameter_cast.py +++ b/tests/ut/python/parallel/test_auto_parallel_parameter_cast.py @@ -80,9 +80,9 @@ def test_common_parameter(): _executor.compile(net, x, y, z, w, phase='train') strategies = _executor._get_strategy(net) - expected_strategies = {'Default/network-Net/MatMul-op6': [[8, 1], [1, 1]], - 'Default/network-Net/MatMul-op8': [[8, 1], [1, 1]], - 'Default/network-Net/Cast-op7': [[1, 1]], + expected_strategies = {'Default/network-Net/MatMul-op1': [[8, 1], [1, 1]], + 'Default/network-Net/MatMul-op3': [[8, 1], [1, 1]], + 'Default/network-Net/Cast-op2': [[1, 1]], 'Default/network-Net/MatMul-op0': [[8, 1], [1, 1]], - 'Default/network-Net/Cast-op9': [[1, 1]]} + 'Default/network-Net/Cast-op4': [[1, 1]]} assert strategies == expected_strategies diff --git a/tests/ut/python/parallel/test_auto_parallel_transpose.py b/tests/ut/python/parallel/test_auto_parallel_transpose.py index 62fdc11120..2d4dcbae81 100644 --- a/tests/ut/python/parallel/test_auto_parallel_transpose.py +++ b/tests/ut/python/parallel/test_auto_parallel_transpose.py @@ -71,8 +71,8 @@ def test_two_matmul_transpose(): _executor.compile(net, x, y, b, phase='train') strategies = _executor._get_strategy(net) - expected_strategies = {'Default/network-Net/Transpose-op4': [[1, 16]], - 'Default/network-Net/Transpose-op5': [[16, 1]], - 'Default/network-Net/MatMul-op6': [[16, 1], [1, 1]], - 'Default/network-Net/MatMul-op7': [[16, 1], [1, 1]]} - assert strategies == expected_strategies \ No newline at end of file + expected_strategies = {'Default/network-Net/Transpose-op0': [[1, 16]], + 'Default/network-Net/Transpose-op1': [[16, 1]], + 'Default/network-Net/MatMul-op2': [[16, 1], [1, 1]], + 'Default/network-Net/MatMul-op3': [[16, 1], [1, 1]]} + assert strategies == expected_strategies diff --git a/tests/ut/python/parallel/test_auto_parallel_two_matmul.py b/tests/ut/python/parallel/test_auto_parallel_two_matmul.py index e7beed384e..bd6639a501 100644 --- a/tests/ut/python/parallel/test_auto_parallel_two_matmul.py +++ b/tests/ut/python/parallel/test_auto_parallel_two_matmul.py @@ -135,7 +135,6 @@ def test_two_matmul(): _executor.compile(net, x, y, b, phase='train') strategies = _executor._get_strategy(net) - expected_strategies = {'Default/network-Net/MatMul-op2': [[16, 1], [1, 1]], - 'Default/network-Net/MatMul-op3': [[16, 1], [1, 1]]} + expected_strategies = {'Default/network-Net/MatMul-op0': [[16, 1], [1, 1]], + 'Default/network-Net/MatMul-op1': [[16, 1], [1, 1]]} assert strategies == expected_strategies - diff --git a/tests/ut/python/parallel/test_dataset_interface.py b/tests/ut/python/parallel/test_dataset_interface.py index da8821199e..17b8d3cc6d 100644 --- a/tests/ut/python/parallel/test_dataset_interface.py +++ b/tests/ut/python/parallel/test_dataset_interface.py @@ -84,7 +84,7 @@ def loss_scale_manager_common(strategy1): opt = Momentum(net.trainable_params(), learning_rate, momentum) scale_manager = DynamicLossScaleManager(32, 2, 2000) model = Model(net, loss, opt, loss_scale_manager=scale_manager) - # if no GE exists, outputs = self._train_network(*next_element) outputs is None, TypeError is caught. + # if no GE exists, outputs = self._train_network(*next_element) outputs inputs tensor. try: model.train(epoch_size, dataset, dataset_sink_mode=False) except TypeError: diff --git a/tests/ut/python/parallel/test_one_dev.py b/tests/ut/python/parallel/test_one_dev.py index a5867eb5f4..efd4889ce6 100644 --- a/tests/ut/python/parallel/test_one_dev.py +++ b/tests/ut/python/parallel/test_one_dev.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import re from mindspore.train import Model, ParallelMode from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits from mindspore.nn.optim.momentum import Momentum @@ -89,16 +90,13 @@ def all_to_all_common(): def test_one_dev(): - _reset_op_id() - strategys = all_to_all_common() - expect_dict = {'Default/network-_VirtualDatasetCell/_backbone-WithLossCell/_loss_fn-SoftmaxCrossEntropyWithLogits' - '/SoftmaxCrossEntropyWithLogits-op9': [[1, 1], [1, 1]], - 'Default/network-_VirtualDatasetCell/_backbone-WithLossCell/_loss_fn-SoftmaxCrossEntropyWithLogits' - '/OneHot-op10': [[1, 1], [], []], - 'Default/network-_VirtualDatasetCell/_backbone-WithLossCell/_backbone-AllToAllNet/Transpose-op11': - [[1, 1]], - 'Default/network-_VirtualDatasetCell/_backbone-WithLossCell/_backbone-AllToAllNet/MatMul-op12': - [[1, 1], [1, 1]]} - assert (strategys == expect_dict) + strategies = all_to_all_common() + for (k, v) in strategies.items(): + if re.search('SoftmaxCrossEntropyWithLogits-op', k) is not None: + assert v == [[1, 1], [1, 1]] + elif re.search('Transpose-op', k) is not None: + assert v == [[1, 1]] + elif re.search('MatMul-op', k) is not None: + assert v == [[1, 1], [1, 1]] diff --git a/tests/ut/python/pipeline/parse/test_create_obj.py b/tests/ut/python/pipeline/parse/test_create_obj.py index a702f37e0b..370445cf99 100644 --- a/tests/ut/python/pipeline/parse/test_create_obj.py +++ b/tests/ut/python/pipeline/parse/test_create_obj.py @@ -24,6 +24,7 @@ import logging import numpy as np import mindspore.nn as nn +from mindspore import context from mindspore.ops import operations as P from mindspore.common.api import ms_function from mindspore.common.tensor import Tensor @@ -50,6 +51,7 @@ class Net(nn.Cell): def test_create_cell_object_on_construct(): """ test_create_cell_object_on_construct """ log.debug("begin test_create_object_on_construct") + context.set_context(mode=context.GRAPH_MODE) np1 = np.random.randn(2, 3, 4, 5).astype(np.float32) input_me = Tensor(np1) @@ -118,6 +120,7 @@ class NetC(nn.Cell): def test_create_cell_object_on_construct_use_many_parameter(): """ test_create_cell_object_on_construct_use_many_parameter """ log.debug("begin test_create_object_on_construct") + context.set_context(mode=context.GRAPH_MODE) np1 = np.random.randn(2, 3, 4, 5).astype(np.float32) input_me = Tensor(np1) diff --git a/tests/ut/python/pipeline/parse/test_dtype.py b/tests/ut/python/pipeline/parse/test_dtype.py index a282c82f83..645eba8004 100644 --- a/tests/ut/python/pipeline/parse/test_dtype.py +++ b/tests/ut/python/pipeline/parse/test_dtype.py @@ -28,5 +28,4 @@ def try_type(): def test_dtype_convert(): - with pytest.raises(RuntimeError): - try_type() + try_type() diff --git a/tests/ut/python/pynative_mode/ops/test_grad.py b/tests/ut/python/pynative_mode/ops/test_grad.py index 25db6b15d2..b927da5b04 100644 --- a/tests/ut/python/pynative_mode/ops/test_grad.py +++ b/tests/ut/python/pynative_mode/ops/test_grad.py @@ -19,8 +19,10 @@ from mindspore.common.api import ms_function from mindspore import Tensor from mindspore.ops import composite as C from mindspore.ops.composite import grad_all_with_sens +from mindspore.common.dtype import get_py_obj_dtype import mindspore.nn as nn import mindspore.ops.operations as P +from mindspore.ops import functional as F from ...ut_filter import non_graph_engine @@ -78,6 +80,20 @@ def test_cast_grad(): assert np.all(gout[0].asnumpy() == expect) +def test_scalar_cast_grad(): + """ test_scalar_cast_grad """ + input_x = 255.5 + input_t = get_py_obj_dtype(ms.int8) + + def fx_cast(x): + output = F.scalar_cast(x, input_t) + return output + + gfn = C.grad(fx_cast)(input_x) + expect_dx = 1 + assert gfn == expect_dx + + @non_graph_engine def test_reshape_grad(): """ test_reshape_grad """ diff --git a/tests/ut/python/train/summary/test_summary_ops_params_valid_check.py b/tests/ut/python/train/summary/test_summary_ops_params_valid_check.py index b6e1ab992c..98dfd6aaef 100644 --- a/tests/ut/python/train/summary/test_summary_ops_params_valid_check.py +++ b/tests/ut/python/train/summary/test_summary_ops_params_valid_check.py @@ -163,12 +163,7 @@ def test_scalar_summary_use_invalid_tag_None(): def test_scalar_summary_use_invalid_tag_Bool(): log.debug("begin test_scalar_summary_use_invalid_tag_Bool") net = SummaryDemoTag(True, True, True) - try: - run_case(net) - except: - assert True - else: - assert False + run_case(net) log.debug("finished test_scalar_summary_use_invalid_tag_Bool") @@ -176,12 +171,7 @@ def test_scalar_summary_use_invalid_tag_Bool(): def test_scalar_summary_use_invalid_tag_null(): log.debug("begin test_scalar_summary_use_invalid_tag_null") net = SummaryDemoTag("", "", "") - try: - run_case(net) - except: - assert True - else: - assert False + run_case(net) log.debug("finished test_scalar_summary_use_invalid_tag_null") @@ -189,12 +179,7 @@ def test_scalar_summary_use_invalid_tag_null(): def test_scalar_summary_use_invalid_tag_Int(): log.debug("begin test_scalar_summary_use_invalid_tag_Int") net = SummaryDemoTag(1, 2, 3) - try: - run_case(net) - except: - assert True - else: - assert False + run_case(net) log.debug("finished test_scalar_summary_use_invalid_tag_Int") diff --git a/tests/ut/python/utils/test_serialize.py b/tests/ut/python/utils/test_serialize.py index 12937e5a83..41da45ab25 100644 --- a/tests/ut/python/utils/test_serialize.py +++ b/tests/ut/python/utils/test_serialize.py @@ -30,7 +30,7 @@ from mindspore.nn import WithLossCell, TrainOneStepCell from mindspore.train.callback import _CheckpointManager from mindspore.train.serialization import save_checkpoint, load_checkpoint,load_param_into_net, \ _exec_save_checkpoint, export, _save_graph -from ..ut_filter import run_on_onnxruntime +from ..ut_filter import run_on_onnxruntime, non_graph_engine from mindspore import context @@ -306,6 +306,7 @@ class MYNET(nn.Cell): return out +@non_graph_engine def test_export(): net = MYNET() input_data = Tensor(np.random.randint(0, 255, [1, 3, 224, 224]).astype(np.float32)) From 6f1747045c7fa58ff5ce06ba63ca647b98dbf328 Mon Sep 17 00:00:00 2001 From: zhangz0911gm Date: Tue, 31 Mar 2020 23:14:21 -0400 Subject: [PATCH 077/367] Add FloorMod, Acosh in ME --- mindspore/ccsrc/transform/convert.cc | 4 ++ mindspore/ccsrc/transform/op_declare.cc | 10 +++++ mindspore/ccsrc/transform/op_declare.h | 5 ++- mindspore/ops/_grad/grad_math_ops.py | 25 +++++++++++ mindspore/ops/operations/__init__.py | 4 +- mindspore/ops/operations/math_ops.py | 56 +++++++++++++++++++++++++ mindspore/ops/operations/nn_ops.py | 2 +- tests/ut/python/ops/test_ops.py | 9 ++++ 8 files changed, 112 insertions(+), 3 deletions(-) diff --git a/mindspore/ccsrc/transform/convert.cc b/mindspore/ccsrc/transform/convert.cc index fdacff7ba8..7c100f0f0e 100755 --- a/mindspore/ccsrc/transform/convert.cc +++ b/mindspore/ccsrc/transform/convert.cc @@ -171,6 +171,8 @@ const char kNameAbsGrad[] = "AbsGrad"; const char kNameBinaryCrossEntropy[] = "BinaryCrossEntropy"; const char kNameBinaryCrossEntropyGrad[] = "BinaryCrossEntropyGrad"; const char kNameSparseApplyAdagrad[] = "SparseApplyAdagrad"; +const char kNameAcosh[] = "Acosh"; +const char kNameFloorMod[] = "FloorMod"; const char kNameSpaceToDepth[] = "SpaceToDepth"; const char kNameDepthToSpace[] = "DepthToSpace"; const char kNameSign[] = "Sign"; @@ -360,6 +362,8 @@ std::unordered_map &DfGraphConvertor::get_adpt_ma {string(kNameBinaryCrossEntropy), ADPT_DESC(BinaryCrossEntropy)}, {string(kNameBinaryCrossEntropyGrad), ADPT_DESC(BinaryCrossEntropyGrad)}, {string(kNameSparseApplyAdagrad), ADPT_DESC(SparseApplyAdagradD)}, + {string(kNameAcosh), ADPT_DESC(Acosh)}, + {string(kNameFloorMod), ADPT_DESC(FloorMod)}, {string(kNameSpaceToDepth), ADPT_DESC(SpaceToDepth)}, {string(kNameDepthToSpace), ADPT_DESC(DepthToSpace)}, {string(kNameSign), ADPT_DESC(Sign)}, diff --git a/mindspore/ccsrc/transform/op_declare.cc b/mindspore/ccsrc/transform/op_declare.cc index 9258eb08db..0af2923cc4 100755 --- a/mindspore/ccsrc/transform/op_declare.cc +++ b/mindspore/ccsrc/transform/op_declare.cc @@ -357,6 +357,11 @@ INPUT_MAP(AcosGrad) = {{1, INPUT_DESC(y)}, {2, INPUT_DESC(dy)}}; ATTR_MAP(AcosGrad) = EMPTY_ATTR_MAP; OUTPUT_MAP(AcosGrad) = {{0, OUTPUT_DESC(z)}}; +// Acosh +INPUT_MAP(Acosh) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(Acosh) = EMPTY_ATTR_MAP; +OUTPUT_MAP(Acosh) = {{0, OUTPUT_DESC(y)}}; + // Floor INPUT_MAP(Floor) = {{1, INPUT_DESC(x)}}; ATTR_MAP(Floor) = EMPTY_ATTR_MAP; @@ -367,6 +372,11 @@ INPUT_MAP(FloorDiv) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; ATTR_MAP(FloorDiv) = EMPTY_ATTR_MAP; OUTPUT_MAP(FloorDiv) = {{0, OUTPUT_DESC(y)}}; +// FloorMod +INPUT_MAP(FloorMod) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; +ATTR_MAP(FloorMod) = EMPTY_ATTR_MAP; +OUTPUT_MAP(FloorMod) = {{0, OUTPUT_DESC(y)}}; + // Sin INPUT_MAP(Sin) = {{1, INPUT_DESC(x)}}; ATTR_MAP(Sin) = EMPTY_ATTR_MAP; diff --git a/mindspore/ccsrc/transform/op_declare.h b/mindspore/ccsrc/transform/op_declare.h index 031ce80865..d120c94989 100755 --- a/mindspore/ccsrc/transform/op_declare.h +++ b/mindspore/ccsrc/transform/op_declare.h @@ -324,11 +324,15 @@ DECLARE_OP_USE_OUTPUT(Acos) DECLARE_OP_ADAPTER(AcosGrad) DECLARE_OP_USE_OUTPUT(AcosGrad) +DECLARE_OP_ADAPTER(Acosh) +DECLARE_OP_USE_OUTPUT(Acosh) DECLARE_OP_ADAPTER(Floor) DECLARE_OP_USE_OUTPUT(Floor) DECLARE_OP_ADAPTER(FloorDiv) DECLARE_OP_USE_OUTPUT(FloorDiv) +DECLARE_OP_ADAPTER(FloorMod) +DECLARE_OP_USE_OUTPUT(FloorMod) DECLARE_OP_ADAPTER(Sin) DECLARE_OP_USE_OUTPUT(Sin) DECLARE_OP_ADAPTER(Exp) @@ -450,7 +454,6 @@ DECLARE_OP_USE_INPUT_ATTR(ApplyRMSPropD) DECLARE_OP_USE_OUTPUT(ApplyRMSPropD) DECLARE_OP_ADAPTER(ApplyCenteredRMSProp) DECLARE_OP_USE_OUTPUT(ApplyCenteredRMSProp) - #ifdef ENABLE_GE DECLARE_OP_ADAPTER(Print) DECLARE_OP_USE_DYN_INPUT(Print) diff --git a/mindspore/ops/_grad/grad_math_ops.py b/mindspore/ops/_grad/grad_math_ops.py index 1675855c88..1863ac8fdd 100755 --- a/mindspore/ops/_grad/grad_math_ops.py +++ b/mindspore/ops/_grad/grad_math_ops.py @@ -251,6 +251,20 @@ def get_bprop_floordiv(self): return bprop +@bprop_getters.register(P.FloorMod) +def get_bprop_floormod(self): + """Grad definition for `FloorMod` operation.""" + div_op = P.FloorMod() + neg = P.Neg() + mul_op = P.Mul() + + def bprop(x, y, out, dout): + bc_x = div_op(dout, y) + bc_y = neg(mul_op(bc_x, out)) + return binop_grad_common(x, y, bc_x, bc_y) + return bprop + + @bprop_getters.register(P.Square) def get_bprop_square(self): """Grad definition for `Square` operation.""" @@ -690,6 +704,17 @@ def get_bprop_acos(self): return bprop +@bprop_getters.register(P.Acosh) +def get_bprop_acosh(self): + """Grad definition for `Acosh` operation.""" + input_grad = G.AcoshGrad() + + def bprop(x, out, dout): + dx = input_grad(x, dout) + return (dx,) + return bprop + + @bprop_getters.register(P.Abs) def get_bprop_abs(self): """Grad definition for `Abs` operation.""" diff --git a/mindspore/ops/operations/__init__.py b/mindspore/ops/operations/__init__.py index 727ddaf88f..846be05c4d 100644 --- a/mindspore/ops/operations/__init__.py +++ b/mindspore/ops/operations/__init__.py @@ -39,7 +39,7 @@ from .control_ops import ControlDepend, GeSwitch, Merge from .inner_ops import ScalarCast from .math_ops import (Abs, ACos, AddN, AssignAdd, AssignSub, Atan2, BatchMatMul, ReduceMax, ReduceMin, ReduceMean, ReduceSum, ReduceAll, ReduceProd, CumProd, - Cos, Div, Equal, EqualCount, Exp, Floor, FloorDiv, + Cos, Div, Equal, EqualCount, Exp, Floor, FloorDiv, FloorMod, Acosh, Greater, GreaterEqual, Less, LessEqual, Log, LogicalAnd, LogicalNot, LogicalOr, MatMul, Maximum, Minimum, Mul, Neg, NMSWithMask, NotEqual, @@ -205,6 +205,8 @@ __all__ = [ 'Log', 'SigmoidCrossEntropyWithLogits', 'FloorDiv', + 'FloorMod', + 'Acosh', "PReLU", "Cos", "ACos", diff --git a/mindspore/ops/operations/math_ops.py b/mindspore/ops/operations/math_ops.py index 398a7e6f1a..9600c736b2 100644 --- a/mindspore/ops/operations/math_ops.py +++ b/mindspore/ops/operations/math_ops.py @@ -1183,6 +1183,62 @@ class Floor(PrimitiveWithInfer): return x_dtype +class FloorMod(_MathBinaryOp): + """ + Compute element-wise remainder of division. + + The inputs must be two tensors or one tensor and one scalar. + When the inputs are two tensors, the shapes of them could be broadcast, + and the data types of them should be same. + When the inputs are one tensor and one scalar, the scalar cannot be a parameter, only can be a constant, + and the type of the scalar is the same as the data type of the tensor. + + Inputs: + - **input_x** (Union[Tensor, Number]) - The first input is a tensor whose data type is number or a number. + - **input_y** (Union[Tensor, Number]) - The second input is a tensor whose data type is same as 'input_x' or + a number. + + Outputs: + Tensor, the shape is same as the shape after broadcasting, and the data type is same as 'input_x'. + + Examples: + >>> input_x = Tensor(np.array([2, 4, -1]), mindspore.int32) + >>> input_y = Tensor(np.array([3, 3, 3]), mindspore.int32) + >>> floor_mod = FloorMod() + >>> floor_mod(input_x, input_y) + [2, 1, 2] + """ + + +class Acosh(PrimitiveWithInfer): + """ + Compute inverse hyperbolic cosine of x element-wise. + + Inputs: + - **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`. + + Outputs: + Tensor, has the same shape as `input_x`. + + Examples: + >>> acosh = Acosh() + >>> X = Tensor(np.array([1.0, 1.5, 3.0, 100.0]), ms.float32) + >>> output = acosh(X) + """ + + @prim_attr_register + def __init__(self): + """init Acosh""" + + def infer_shape(self, x): + return x + + def infer_dtype(self, x): + validator.check_subclass("x_dtype", x, mstype.tensor) + validator.check_typename('x_dtype', x, mstype.number_type) + return x + + class _LogicBinaryOp(_BinaryOp): """ Define logic binary operators. diff --git a/mindspore/ops/operations/nn_ops.py b/mindspore/ops/operations/nn_ops.py index 0410d4a346..e0f8280514 100644 --- a/mindspore/ops/operations/nn_ops.py +++ b/mindspore/ops/operations/nn_ops.py @@ -2395,4 +2395,4 @@ class ApplyFtrl(PrimitiveWithInfer): validator.check_typename("l1", l1_type,[mstype.float16, mstype.float32]) validator.check_typename("l2", l2_type,[mstype.float16, mstype.float32]) validator.check_typename("lr_power", lr_power_type,[mstype.float16, mstype.float32]) - return var_type \ No newline at end of file + return var_type diff --git a/tests/ut/python/ops/test_ops.py b/tests/ut/python/ops/test_ops.py index 453ef9a652..0f5b716e39 100755 --- a/tests/ut/python/ops/test_ops.py +++ b/tests/ut/python/ops/test_ops.py @@ -219,6 +219,10 @@ test_case_math_ops = [ 'block': P.ACos(), 'desc_inputs': [[2, 3]], 'desc_bprop': [[2, 3]]}), + ('Acosh', { + 'block': P.Acosh(), + 'desc_inputs': [Tensor(np.random.rand(4).astype(np.float16))], + 'skip': ['backward']}), ('Sin', { 'block': P.Sin(), 'desc_inputs': [[2, 3]], @@ -301,6 +305,11 @@ test_case_math_ops = [ 'desc_inputs': [Tensor(np.random.rand(4).astype(np.float16)), Tensor(np.random.rand(4).astype(np.float16))], 'skip': ['backward']}), + ('FloorMod', { + 'block': P.FloorMod(), + 'desc_inputs': [Tensor(np.random.rand(4).astype(np.float16)), + Tensor(np.random.rand(4).astype(np.float16))], + 'skip': ['backward']}), ('identity', { 'block': ops.functional.identity, 'desc_inputs': [[2, 2]], From bc4602b58e8a82415655cffe117e3927daecc3e6 Mon Sep 17 00:00:00 2001 From: xiefangqi Date: Mon, 30 Mar 2020 15:34:48 +0800 Subject: [PATCH 078/367] fix and remove useless import of example, st, ut --- example/Bert_NEZHA_cnwiki/train.py | 4 +- tests/st/mem_reuse/resnet_cifar_memreuse.py | 3 +- tests/st/mem_reuse/resnet_cifar_normal.py | 3 +- .../models/bert/bert_tdt_no_lossscale.py | 4 +- tests/st/ops/davinci/test_tdt_data_ms.py | 26 ++++++------- tests/ut/python/dataset/test_minddataset.py | 1 - tests/ut/python/dataset/test_project.py | 38 +++++++++---------- 7 files changed, 37 insertions(+), 42 deletions(-) diff --git a/example/Bert_NEZHA_cnwiki/train.py b/example/Bert_NEZHA_cnwiki/train.py index 87f425e21c..86e033fc9f 100644 --- a/example/Bert_NEZHA_cnwiki/train.py +++ b/example/Bert_NEZHA_cnwiki/train.py @@ -36,7 +36,7 @@ import os import numpy as np from config import bert_train_cfg, bert_net_cfg import mindspore.dataset.engine.datasets as de -import mindspore._c_dataengine as deMap +import mindspore.dataset.transforms.c_transforms as C from mindspore import context from mindspore.common.tensor import Tensor from mindspore.train.model import Model @@ -52,7 +52,7 @@ def create_train_dataset(batch_size): ds = de.StorageDataset([bert_train_cfg.DATA_DIR], bert_train_cfg.SCHEMA_DIR, columns_list=["input_ids", "input_mask", "segment_ids", "next_sentence_labels", "masked_lm_positions", "masked_lm_ids", "masked_lm_weights"]) - type_cast_op = deMap.TypeCastOp("int32") + type_cast_op = C.TypeCast(mstype.int32) ds = ds.map(input_columns="masked_lm_ids", operations=type_cast_op) ds = ds.map(input_columns="masked_lm_positions", operations=type_cast_op) ds = ds.map(input_columns="next_sentence_labels", operations=type_cast_op) diff --git a/tests/st/mem_reuse/resnet_cifar_memreuse.py b/tests/st/mem_reuse/resnet_cifar_memreuse.py index 4699c00e73..4edcdd8fb8 100644 --- a/tests/st/mem_reuse/resnet_cifar_memreuse.py +++ b/tests/st/mem_reuse/resnet_cifar_memreuse.py @@ -24,8 +24,7 @@ import numpy as np import mindspore.ops.functional as F from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor from mindspore.train.serialization import load_checkpoint, load_param_into_net -import mindspore.dataengine as de -import mindspore._c_dataengine as deMap +import mindspore.dataset as de import mindspore.dataset.transforms.c_transforms as C import mindspore.dataset.transforms.vision.c_transforms as vision from mindspore.communication.management import init diff --git a/tests/st/mem_reuse/resnet_cifar_normal.py b/tests/st/mem_reuse/resnet_cifar_normal.py index bff0c2d6e6..39f6e7fe59 100644 --- a/tests/st/mem_reuse/resnet_cifar_normal.py +++ b/tests/st/mem_reuse/resnet_cifar_normal.py @@ -24,8 +24,7 @@ import numpy as np import mindspore.ops.functional as F from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor from mindspore.train.serialization import load_checkpoint, load_param_into_net -import mindspore.dataengine as de -import mindspore._c_dataengine as deMap +import mindspore.dataset as de import mindspore.dataset.transforms.c_transforms as C import mindspore.dataset.transforms.vision.c_transforms as vision from mindspore.communication.management import init diff --git a/tests/st/networks/models/bert/bert_tdt_no_lossscale.py b/tests/st/networks/models/bert/bert_tdt_no_lossscale.py index c1ca6f6499..7c50707fbd 100644 --- a/tests/st/networks/models/bert/bert_tdt_no_lossscale.py +++ b/tests/st/networks/models/bert/bert_tdt_no_lossscale.py @@ -21,7 +21,7 @@ import numpy as np from numpy import allclose import mindspore.common.dtype as mstype import mindspore.dataset.engine.datasets as de -import mindspore._c_dataengine as deMap +import mindspore.dataset.transforms.c_transforms as C from mindspore import context from mindspore.common.tensor import Tensor from mindspore.train.model import Model @@ -106,7 +106,7 @@ def me_de_train_dataset(): ds = de.StorageDataset(DATA_DIR, SCHEMA_DIR, columns_list=["input_ids", "input_mask", "segment_ids", "next_sentence_labels", "masked_lm_positions", "masked_lm_ids", "masked_lm_weights"]) - type_cast_op = deMap.TypeCastOp("int32") + type_cast_op = C.TypeCast(mstype.int32) ds = ds.map(input_columns="masked_lm_ids", operations=type_cast_op) ds = ds.map(input_columns="masked_lm_positions", operations=type_cast_op) ds = ds.map(input_columns="next_sentence_labels", operations=type_cast_op) diff --git a/tests/st/ops/davinci/test_tdt_data_ms.py b/tests/st/ops/davinci/test_tdt_data_ms.py index 6463401d82..89f6f212d0 100644 --- a/tests/st/ops/davinci/test_tdt_data_ms.py +++ b/tests/st/ops/davinci/test_tdt_data_ms.py @@ -12,11 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ -import mindspore._c_dataengine as deMap import mindspore.dataset as ds +import mindspore.dataset.transforms.vision.c_transforms as vision +from mindspore.dataset.transforms.vision import Inter import numpy as np import sys -from mindspore._c_dataengine import InterpolationMode import mindspore.context as context import mindspore.nn as nn @@ -32,7 +32,7 @@ SCHEMA_DIR = "{0}/resnet_all_datasetSchema.json".format(data_path) def test_me_de_train_dataset(): data_list = ["{0}/train-00001-of-01024.data".format(data_path)] data_set = ds.StorageDataset(data_list, schema=SCHEMA_DIR, - columns_list=["image/encoded", "image/class/label"]) + columns_list=["image/encoded", "image/class/label"]) resize_height = 224 resize_width = 224 @@ -41,19 +41,17 @@ def test_me_de_train_dataset(): # define map operations - decode_op = deMap.DecodeOp() - resize_op = deMap.ResizeOp(resize_height, resize_width, - InterpolationMode.DE_INTER_LINEAR) # Bilinear as default - rescale_op = deMap.RescaleOp(rescale, shift) - changemode_op = deMap.ChangeModeOp() + decode_op = vision.Decode() + resize_op = vision.Resize(resize_height, resize_width, + Inter.LINEAR) # Bilinear as default + rescale_op = vision.Rescale(rescale, shift) # apply map operations on images - data_set = data_set.map(input_column_names="image/encoded", operation=decode_op) - data_set = data_set.map(input_column_names="image/encoded", operation=resize_op) - data_set = data_set.map(input_column_names="image/encoded", operation=rescale_op) - data_set = data_set.map(input_column_names="image/encoded", operation=changemode_op) - changeswap_op = deMap.ChannelSwapOp() - data_set = data_set.map(input_column_names="image/encoded", operation=changeswap_op) + data_set = data_set.map(input_columns="image/encoded", operations=decode_op) + data_set = data_set.map(input_columns="image/encoded", operations=resize_op) + data_set = data_set.map(input_columns="image/encoded", operations=rescale_op) + hwc2chw_op = vision.HWC2CHW() + data_set = data_set.map(input_columns="image/encoded", operations=hwc2chw_op) data_set = data_set.repeat(1) # apply batch operations batch_size = 32 diff --git a/tests/ut/python/dataset/test_minddataset.py b/tests/ut/python/dataset/test_minddataset.py index 8b8cbc807a..da22f5c3b7 100644 --- a/tests/ut/python/dataset/test_minddataset.py +++ b/tests/ut/python/dataset/test_minddataset.py @@ -24,7 +24,6 @@ import string import mindspore.dataset.transforms.vision.c_transforms as vision import numpy as np import pytest -from mindspore._c_dataengine import InterpolationMode from mindspore.dataset.transforms.vision import Inter from mindspore import log as logger diff --git a/tests/ut/python/dataset/test_project.py b/tests/ut/python/dataset/test_project.py index de600e07db..522788ac37 100644 --- a/tests/ut/python/dataset/test_project.py +++ b/tests/ut/python/dataset/test_project.py @@ -13,7 +13,8 @@ # limitations under the License. # ============================================================================== import mindspore.dataset.transforms.vision.c_transforms as vision -import mindspore._c_dataengine as de_map +import mindspore.dataset.transforms.c_transforms as C +from mindspore.common import dtype as mstype from util import ordered_save_and_check import mindspore.dataset as ds @@ -63,9 +64,8 @@ def test_case_project_map(): data1 = ds.TFRecordDataset(DATA_DIR_TF, SCHEMA_DIR_TF, shuffle=False) data1 = data1.project(columns=columns) - no_op = de_map.NoOp() - - data1 = data1.map(input_columns=["col_3d"], operations=no_op) + type_cast_op = C.TypeCast(mstype.int64) + data1 = data1.map(input_columns=["col_3d"], operations=type_cast_op) filename = "project_map_after_result.npz" ordered_save_and_check(data1, parameters, filename, generate_golden=GENERATE_GOLDEN) @@ -77,8 +77,8 @@ def test_case_map_project(): data1 = ds.TFRecordDataset(DATA_DIR_TF, SCHEMA_DIR_TF, shuffle=False) - no_op = de_map.NoOp() - data1 = data1.map(input_columns=["col_sint64"], operations=no_op) + type_cast_op = C.TypeCast(mstype.int64) + data1 = data1.map(input_columns=["col_sint64"], operations=type_cast_op) data1 = data1.project(columns=columns) @@ -92,19 +92,19 @@ def test_case_project_between_maps(): data1 = ds.TFRecordDataset(DATA_DIR_TF, SCHEMA_DIR_TF, shuffle=False) - no_op = de_map.NoOp() - data1 = data1.map(input_columns=["col_3d"], operations=no_op) - data1 = data1.map(input_columns=["col_3d"], operations=no_op) - data1 = data1.map(input_columns=["col_3d"], operations=no_op) - data1 = data1.map(input_columns=["col_3d"], operations=no_op) + type_cast_op = C.TypeCast(mstype.int64) + data1 = data1.map(input_columns=["col_3d"], operations=type_cast_op) + data1 = data1.map(input_columns=["col_3d"], operations=type_cast_op) + data1 = data1.map(input_columns=["col_3d"], operations=type_cast_op) + data1 = data1.map(input_columns=["col_3d"], operations=type_cast_op) data1 = data1.project(columns=columns) - data1 = data1.map(input_columns=["col_3d"], operations=no_op) - data1 = data1.map(input_columns=["col_3d"], operations=no_op) - data1 = data1.map(input_columns=["col_3d"], operations=no_op) - data1 = data1.map(input_columns=["col_3d"], operations=no_op) - data1 = data1.map(input_columns=["col_3d"], operations=no_op) + data1 = data1.map(input_columns=["col_3d"], operations=type_cast_op) + data1 = data1.map(input_columns=["col_3d"], operations=type_cast_op) + data1 = data1.map(input_columns=["col_3d"], operations=type_cast_op) + data1 = data1.map(input_columns=["col_3d"], operations=type_cast_op) + data1 = data1.map(input_columns=["col_3d"], operations=type_cast_op) filename = "project_between_maps_result.npz" ordered_save_and_check(data1, parameters, filename, generate_golden=GENERATE_GOLDEN) @@ -145,12 +145,12 @@ def test_case_map_project_map_project(): data1 = ds.TFRecordDataset(DATA_DIR_TF, SCHEMA_DIR_TF, shuffle=False) - no_op = de_map.NoOp() - data1 = data1.map(input_columns=["col_sint64"], operations=no_op) + type_cast_op = C.TypeCast(mstype.int64) + data1 = data1.map(input_columns=["col_sint64"], operations=type_cast_op) data1 = data1.project(columns=columns) - data1 = data1.map(input_columns=["col_2d"], operations=no_op) + data1 = data1.map(input_columns=["col_2d"], operations=type_cast_op) data1 = data1.project(columns=columns) From 86a33985470b4bf12e43a651d7660bd588024fd2 Mon Sep 17 00:00:00 2001 From: chang zherui <760161589@qq.com> Date: Fri, 3 Apr 2020 09:38:04 +0800 Subject: [PATCH 079/367] modify runtest py ut for parallel exec --- tests/ut/python/runtest.sh | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tests/ut/python/runtest.sh b/tests/ut/python/runtest.sh index d1c3299821..035e454f24 100755 --- a/tests/ut/python/runtest.sh +++ b/tests/ut/python/runtest.sh @@ -39,9 +39,13 @@ if [ "x${ENABLE_GE}" == "xON" -o "x${ENABLE_GE}" == "xOn" -o "x${ENABLE_GE}" == fi if [ $# -gt 0 ]; then - pytest -s --ignore=$1/pynative_mode $IGNORE_EXEC $1 + pytest -s --ignore=$1/pynative_mode --ignore=$1/parallel --ignore=$1/train $IGNORE_EXEC $1 + pytest -n 4 --dist=loadfile -v $1/parallel + pytest -n 4 --dist=loadfile -v $1/train else - pytest --ignore=$CURRPATH/pynative_mode $IGNORE_EXEC $CURRPATH + pytest --ignore=$CURRPATH/pynative_mode --ignore=$CURRPATH/parallel --ignore=$CURRPATH/train $IGNORE_EXEC $CURRPATH + pytest -n 4 --dist=loadfile -v $CURRPATH/parallel + pytest -n 4 --dist=loadfile -v $CURRPATH/train fi RET=$? From 461d8e3a0801943bc27ca31a3d2987de5edd3d37 Mon Sep 17 00:00:00 2001 From: huangdongrun Date: Thu, 2 Apr 2020 15:33:46 +0800 Subject: [PATCH 080/367] add comparison ops fix pylint use scalar_lt primitive directly fix review --- mindspore/_extends/parse/resources.py | 10 +- .../ops/composite/multitype_ops/__init__.py | 12 +- .../ops/composite/multitype_ops/equal_impl.py | 3 +- .../multitype_ops/greater_equal_impl.py | 53 ++++ .../composite/multitype_ops/greater_impl.py | 53 ++++ .../multitype_ops/less_equal_impl.py | 3 +- .../ops/composite/multitype_ops/less_impl.py | 5 +- .../composite/multitype_ops/logic_not_impl.py | 48 ++++ .../composite/multitype_ops/not_equal_impl.py | 237 ++++++++++++++++++ .../ops/composite/multitype_ops/uadd_impl.py | 26 ++ mindspore/ops/functional.py | 4 + .../python_input/gtest_input/vm/vm_test.py | 3 +- tests/ut/python/ops/test_python_operators.py | 20 +- 13 files changed, 462 insertions(+), 15 deletions(-) create mode 100644 mindspore/ops/composite/multitype_ops/greater_equal_impl.py create mode 100644 mindspore/ops/composite/multitype_ops/greater_impl.py create mode 100644 mindspore/ops/composite/multitype_ops/logic_not_impl.py create mode 100644 mindspore/ops/composite/multitype_ops/not_equal_impl.py create mode 100644 mindspore/ops/composite/multitype_ops/uadd_impl.py diff --git a/mindspore/_extends/parse/resources.py b/mindspore/_extends/parse/resources.py index 1f6f4b91b5..5dd24ccf80 100644 --- a/mindspore/_extends/parse/resources.py +++ b/mindspore/_extends/parse/resources.py @@ -92,16 +92,16 @@ convert_object_map = { T.and_: multitype_ops.logical_and, T.or_: multitype_ops.logical_or, T.xor: NO_IMPLEMENT, - T.pos: F.scalar_uadd, + T.pos: multitype_ops.uadd, T.neg: multitype_ops.negative, T.invert: NO_IMPLEMENT, - T.not_: F.bool_not, + T.not_: multitype_ops.logical_not, T.eq: multitype_ops.equal, - T.ne: F.scalar_ne, + T.ne: multitype_ops.not_equal, T.lt: multitype_ops.less, - T.gt: F.scalar_gt, + T.gt: multitype_ops.greater, T.le: multitype_ops.less_equal, - T.ge: F.scalar_ge, + T.ge: multitype_ops.greater_equal, T.is_: F.is_, T.is_not: F.is_not, T.contains: NO_IMPLEMENT, diff --git a/mindspore/ops/composite/multitype_ops/__init__.py b/mindspore/ops/composite/multitype_ops/__init__.py index 0ab8527ab4..db28b1b5f6 100644 --- a/mindspore/ops/composite/multitype_ops/__init__.py +++ b/mindspore/ops/composite/multitype_ops/__init__.py @@ -23,23 +23,33 @@ from .getitem_impl import getitem from .zeros_like_impl import zeros_like from .ones_like_impl import ones_like from .equal_impl import equal +from .not_equal_impl import not_equal from .less_impl import less from .less_equal_impl import less_equal +from .greater_impl import greater +from .greater_equal_impl import greater_equal from .negative_impl import negative from .logical_and_impl import logical_and from .logical_or_impl import logical_or +from .logic_not_impl import logical_not +from .uadd_impl import uadd __all__ = [ 'add', 'sub', 'mul', 'div', + 'uadd', 'zeros_like', 'ones_like', 'equal', + 'not_equal', 'less', 'less_equal', + 'greater', + 'greater_equal', 'negative', 'getitem', 'logical_and', - 'logical_or' + 'logical_or', + 'logical_not' ] diff --git a/mindspore/ops/composite/multitype_ops/equal_impl.py b/mindspore/ops/composite/multitype_ops/equal_impl.py index 9ff7e6671e..428cdf4705 100644 --- a/mindspore/ops/composite/multitype_ops/equal_impl.py +++ b/mindspore/ops/composite/multitype_ops/equal_impl.py @@ -190,7 +190,8 @@ def _none_equal_tuple(x, y): """ return False - +@equal.register("Tensor", "Number") +@equal.register("Number", "Tensor") @equal.register("Tensor", "Tensor") def _tensor_equal_tensor(x, y): """ diff --git a/mindspore/ops/composite/multitype_ops/greater_equal_impl.py b/mindspore/ops/composite/multitype_ops/greater_equal_impl.py new file mode 100644 index 0000000000..2073abb762 --- /dev/null +++ b/mindspore/ops/composite/multitype_ops/greater_equal_impl.py @@ -0,0 +1,53 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""greater_equal_impl""" +from mindspore.ops.composite import base +from mindspore.ops import functional as F + +# greater_equal is a metagraph object which will determine if two objects are greater_equal according to input type +# using ".register" decorator +greater_equal = base.MultitypeFuncGraph("greater_equal") + + +@greater_equal.register("Number", "Number") +def _greater_equal_scala(x, y): + """ + Determine whether x is greater equal than y + + Args: + x(Number): Number. + y(Number): Number. + + Returns: + bool, if x >= y return true, x < y return false. + """ + return F.scalar_ge(x, y) + +@greater_equal.register("Tensor", "Number") +@greater_equal.register("Number", "Tensor") +@greater_equal.register("Tensor", "Tensor") +def _greater_equal_tensor(x, y): + """ + Determine whether tensor x is greater equal than tensor y elementwise + + Args: + x(Tensor): Tensor. + y(Tensor): Tensor. + + Returns: + Tensor, return value by operator P.GreaterEqual. + """ + return F.tensor_ge(x, y) diff --git a/mindspore/ops/composite/multitype_ops/greater_impl.py b/mindspore/ops/composite/multitype_ops/greater_impl.py new file mode 100644 index 0000000000..7bbf53da49 --- /dev/null +++ b/mindspore/ops/composite/multitype_ops/greater_impl.py @@ -0,0 +1,53 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Ungreater required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""equal_impl""" +from mindspore.ops.composite import base +from mindspore.ops import functional as F + +# greater is a metafuncgraph object which will determine if two objects are greater according to input type +# using ".register" decorator +greater = base.MultitypeFuncGraph("greater") + + +@greater.register("Number", "Number") +def _greater_scala(x, y): + """ + Determine whether two numbers are greater. + + Args: + x(Number): Number. + y(Number): Number. + + Returns: + bool, if x > y return true, x <= y return false. + """ + return F.scalar_gt(x, y) + +@greater.register("Tensor", "Number") +@greater.register("Number", "Tensor") +@greater.register("Tensor", "Tensor") +def _greater_tensor(x, y): + """ + Determine whether two tensor are greater by element. + + Args: + x(Tensor): Tensor. + y(Tensor): Tensor. + + Returns: + tensor, return operation of x and y by P.Greater + """ + return F.tensor_gt(x, y) diff --git a/mindspore/ops/composite/multitype_ops/less_equal_impl.py b/mindspore/ops/composite/multitype_ops/less_equal_impl.py index f02ab61da1..dc1438da2c 100644 --- a/mindspore/ops/composite/multitype_ops/less_equal_impl.py +++ b/mindspore/ops/composite/multitype_ops/less_equal_impl.py @@ -36,7 +36,8 @@ def _less_equal_scala(x, y): """ return F.scalar_le(x, y) - +@less_equal.register("Tensor", "Number") +@less_equal.register("Number", "Tensor") @less_equal.register("Tensor", "Tensor") def _less_equal_tensor(x, y): """ diff --git a/mindspore/ops/composite/multitype_ops/less_impl.py b/mindspore/ops/composite/multitype_ops/less_impl.py index c9c20657e5..6e50e54c82 100644 --- a/mindspore/ops/composite/multitype_ops/less_impl.py +++ b/mindspore/ops/composite/multitype_ops/less_impl.py @@ -36,7 +36,8 @@ def _less_scala(x, y): """ return F.scalar_lt(x, y) - +@less.register("Tensor", "Number") +@less.register("Number", "Tensor") @less.register("Tensor", "Tensor") def _less_tensor(x, y): """ @@ -47,6 +48,6 @@ def _less_tensor(x, y): y(Tensor): Tensor. Returns: - bool, if x and y are less elements by element return true, else return false. + Tensor, return value of x and y by operation P.Less() """ return F.tensor_lt(x, y) diff --git a/mindspore/ops/composite/multitype_ops/logic_not_impl.py b/mindspore/ops/composite/multitype_ops/logic_not_impl.py new file mode 100644 index 0000000000..35ae766433 --- /dev/null +++ b/mindspore/ops/composite/multitype_ops/logic_not_impl.py @@ -0,0 +1,48 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""logical_not_impl""" +from mindspore.ops.composite import base +from mindspore.ops import functional as F + +# logical_not is a metagraph object which will generate function according to input type +# using ".register" decorator +logical_not = base.MultitypeFuncGraph("logical_not") + + +@logical_not.register("Number") +def _logical_not_scala(x): + """ + Return logical not operation result of x + + Args: + x(Number): Number. + + Returns: + bool, Return logical not operation result of x + """ + return F.bool_not(x.__bool__()) + + +@logical_not.register("Tensor") +def _logical_not_tensor(x): + """ + Return logical not operation result of x + Args: + x(Tensor): Tensor. + Returns: + Tensor, Return logical not operation result of x + """ + return F.logical_not(x) diff --git a/mindspore/ops/composite/multitype_ops/not_equal_impl.py b/mindspore/ops/composite/multitype_ops/not_equal_impl.py new file mode 100644 index 0000000000..de099a2b8f --- /dev/null +++ b/mindspore/ops/composite/multitype_ops/not_equal_impl.py @@ -0,0 +1,237 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""not_equal_impl""" + +from ...composite import base +from ... import functional as F + + +not_equal = base.MultitypeFuncGraph("not_equal") +""" +not_equal is a metafuncgraph object which will determine if two objects are not_equal according to input type +using ".register" decorator +""" + + +@not_equal.register("Number", "Number") +def _not_equal_scalar(x, y): + """ + Determine if two numbers is not equal. + + Args: + x (Number): x + y (NUmber): y + + Returns: + bool, if x != y return true, x == y return false. + """ + return not F.scalar_eq(x, y) + + +@not_equal.register("String", "String") +def _not_equal_string(x, y): + """ + Determine if two strings are not equal. + + Args: + x: str + y: str + + Returns: + bool, if x != y return true, x == y return false. + """ + return not F.string_eq(x, y) + + +@not_equal.register("String", "None") +def _string_not_equal_none(x, y): + """ + Determine if string not equals none. + + Args: + x: str. + y: None. + + Returns: + bool, return True. + """ + return True + + +@not_equal.register("None", "String") +def _none_not_equal_string(x, y): + """ + Determine if string not equals none. + + Args: + x: None. + y: str. + + Returns: + bool, return True. + """ + return True + + +@not_equal.register("None", "None") +def _none_not_equal_none(x, y): + """ + Determine if none not equals none. + + Args: + x: None. + y: None. + + Returns: + bool, return False. + """ + return False + + +@not_equal.register("Number", "None") +def _scalar_not_equal_none(x, y): + """ + Determine if number not equals none. + + Args: + x: Number. + y: None. + + Returns: + bool, return True. + """ + return True + + +@not_equal.register("None", "Number") +def _none_not_equal_scalar(x, y): + """ + Determine if number not_equals none. + + Args: + x: None. + y: NUmber. + + Returns: + bool, return True. + """ + return True + + +@not_equal.register("Tuple", "Tuple") +def _euqal_tuple(x, y): + """ + Determine if two tuples are not equal by element. + + Args: + x (tuple): x + y (tuple): y + + Returns: + bool, if x and y are not equal by element return true, else return false. + """ + return not F.tuple_equal(x, y) + + +@not_equal.register("List", "List") +def _euqal_list(x, y): + """ + Determine if two lists are not equal by element. + + Args: + x (list): x + y (list): y + + Returns: + bool, if x and y are not equal by element return true, else return false. + """ + return not F.list_equal(x, y) + + +@not_equal.register("Tuple", "None") +def _tuple_euqal_none(x, y): + """ + Determine if tuple element not equals none element. + + Args: + x: Tuple. + y: None. + + Returns: + bool, return True. + """ + return True + + +@not_equal.register("None", "Tuple") +def _none_not_equal_tuple(x, y): + """ + Determine if tuple element not equals none element. + + Args: + x: None. + y: Tuple. + + Returns: + bool, return True. + """ + return True + +@not_equal.register("Tensor", "Number") +@not_equal.register("Number", "Tensor") +@not_equal.register("Tensor", "Tensor") +def _tensor_not_equal_tensor(x, y): + """ + Determine if two tensors are not_equal. + + Args: + x : Tensor. + y : Tensor. + + Returns: + bool, if x == y return true, x != y return false. + """ + return F.not_equal(x, y) + + +@not_equal.register("Tensor", "None") +def _tensor_not_equal_none(x, y): + """ + Determine if tensor not_equal none. + + Args: + x : Tensor. + y : None. + + Returns: + bool, return True. + """ + return True + + +@not_equal.register("None", "Tensor") +def _none_not_equal_tensor(x, y): + """ + Determine if tensor not equal none. + + Args: + x : None. + y : Tensor. + + Returns: + bool, return True. + """ + return True diff --git a/mindspore/ops/composite/multitype_ops/uadd_impl.py b/mindspore/ops/composite/multitype_ops/uadd_impl.py new file mode 100644 index 0000000000..163120b541 --- /dev/null +++ b/mindspore/ops/composite/multitype_ops/uadd_impl.py @@ -0,0 +1,26 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""uadd_impl""" +from mindspore.ops.composite import base + +# uadd is a metagraph object which will return operation result regarding input +# using ".register" decorator +uadd = base.MultitypeFuncGraph("uadd") + +@uadd.register("Tensor") +@uadd.register("Number") +def _uadd_scala(x): + return x diff --git a/mindspore/ops/functional.py b/mindspore/ops/functional.py index 5adb6fac57..a74c1464a5 100644 --- a/mindspore/ops/functional.py +++ b/mindspore/ops/functional.py @@ -43,12 +43,15 @@ tensor_add = P.TensorAdd() neg_tensor = P.Neg() tensor_lt = P.Less() tensor_le = P.LessEqual() +tensor_gt = P.Greater() +tensor_ge = P.GreaterEqual() tensor_sub = P.Sub() tensor_mul = P.Mul() tensor_div = P.RealDiv() strided_slice = P.StridedSlice() same_type_shape = P.SameTypeShape() equal = P.Equal() +not_equal = P.NotEqual() assign_sub = P.AssignSub() assign = P.Assign() square = P.Square() @@ -97,6 +100,7 @@ bool_or = Primitive("bool_or") bool_and = Primitive("bool_and") logical_and = P.LogicalAnd() logical_or = P.LogicalOr() +logical_not = P.LogicalNot() array_to_scalar = Primitive('array_to_scalar') is_ = Primitive("is_") is_not = Primitive("is_not") diff --git a/tests/ut/cpp/python_input/gtest_input/vm/vm_test.py b/tests/ut/cpp/python_input/gtest_input/vm/vm_test.py index bdd3c900d6..947e9fa2c3 100644 --- a/tests/ut/cpp/python_input/gtest_input/vm/vm_test.py +++ b/tests/ut/cpp/python_input/gtest_input/vm/vm_test.py @@ -17,6 +17,7 @@ from mindspore.ops import Primitive scala_add = Primitive('scalar_add') scala_mul = Primitive('scalar_mul') +scalar_gt = Primitive('scalar_gt') def scalar_add(x, y): """Implement `scalar_add`.""" return scala_add(x, y) @@ -26,6 +27,6 @@ def scalar_mul(x, y): return scala_mul(x, y) def test_if(x, y): - if x > y: + if scalar_gt(x, y): return x return y diff --git a/tests/ut/python/ops/test_python_operators.py b/tests/ut/python/ops/test_python_operators.py index d6c6c03760..eb65a7f373 100644 --- a/tests/ut/python/ops/test_python_operators.py +++ b/tests/ut/python/ops/test_python_operators.py @@ -31,8 +31,20 @@ class ComparisonOpsNet(nn.Cell): def __init__(self): super(ComparisonOpsNet, self).__init__() def construct(self, x, y): - ret = x <= y - return ret + a = x <= y + b = x <= 1.0 + c = y >= 1.0 + d = y >= x + e = x < y + f = x < 1.0 + g = 1.0 > y + h = y > x + i = y == 3.0 + j = x != 4 + k = + x + l = + 1.0 + m = k != l + return a or b or c or d or e or f or g or h or i or j or m class LogicalNumberOpsNet(nn.Cell): def __init__(self): @@ -41,7 +53,7 @@ class LogicalNumberOpsNet(nn.Cell): self.one = 0 self.zero = 0.0 def construct(self, x, y): - if self.cond and self.one or self.zero: + if self.cond and self.one or self.zero and not self.one: return x + y return x - y @@ -51,7 +63,7 @@ class LogicalTensorOpsNet(nn.Cell): super(LogicalTensorOpsNet, self).__init__() self.const_true = Tensor(True, dtype=mstype.bool_) def construct(self, x, y): - ret = x and y and (y or self.const_true) + ret = x and y and (y or self.const_true) and (not self.const_true) return ret From 8ece432ae9a3ef85ba1b3ecefe5c8701d97f6e9d Mon Sep 17 00:00:00 2001 From: jjfeing Date: Thu, 2 Apr 2020 22:03:36 +0800 Subject: [PATCH 081/367] tr5 run package update --- cmake/dependency_graphengine.cmake | 25 ++++++++++--------- graphengine | 2 +- .../parallel_compile/tbe_compiler/common.py | 2 +- mindspore/ccsrc/CMakeLists.txt | 22 +++++++++------- 4 files changed, 28 insertions(+), 23 deletions(-) diff --git a/cmake/dependency_graphengine.cmake b/cmake/dependency_graphengine.cmake index dfe90de836..d2f80c4d04 100644 --- a/cmake/dependency_graphengine.cmake +++ b/cmake/dependency_graphengine.cmake @@ -38,19 +38,20 @@ elseif (DEFINED ENV{D_LINK_PATH}) find_library(cce libcce.so ${GE_LIB_PATH}) find_library(resource libresource.so ${GE_LIB_PATH}) else() - set(HIAI_INSTALLED_DIR /usr/local/HiAI) - set(HIAI_DRIVER_DIR ${HIAI_INSTALLED_DIR}/driver/lib64) - set(HIAI_RUNTIME_DIR ${HIAI_INSTALLED_DIR}/runtime/lib64) - find_library(c_sec libc_sec.so ${HIAI_DRIVER_DIR}) - find_library(slog libslog.so ${HIAI_DRIVER_DIR}) - find_library(mmpa libmmpa.so ${HIAI_DRIVER_DIR}) + # Ascend mode + set(ASCEND_PATH /usr/local/Ascend) + set(ASCEND_DRIVER_PATH ${ASCEND_PATH}/driver/lib64/common) + set(ASCEND_RUNTIME_PATH ${ASCEND_PATH}/fwkacllib/lib64) - find_library(cce libcce.so ${HIAI_RUNTIME_DIR}) - find_library(hccl libhccl.so ${HIAI_RUNTIME_DIR}) - find_library(runtime libruntime.so ${HIAI_RUNTIME_DIR}) - find_library(msprof libmsprof.so ${HIAI_RUNTIME_DIR}) - find_library(register libregister.so ${HIAI_RUNTIME_DIR}) - find_library(resource libresource.so ${HIAI_RUNTIME_DIR}) + find_library(c_sec libc_sec.so ${ASCEND_DRIVER_PATH}) + find_library(slog libslog.so ${ASCEND_DRIVER_PATH}) + find_library(mmpa libmmpa.so ${ASCEND_DRIVER_PATH}) + find_library(cce libcce.so ${ASCEND_RUNTIME_PATH}) + find_library(hccl libhccl.so ${ASCEND_RUNTIME_PATH}) + find_library(runtime libruntime.so ${ASCEND_RUNTIME_PATH}) + find_library(msprof libmsprof.so ${ASCEND_RUNTIME_PATH}) + find_library(register libregister.so ${ASCEND_RUNTIME_PATH}) + find_library(resource libresource.so ${ASCEND_RUNTIME_PATH}) endif() # compile libraries from following directories diff --git a/graphengine b/graphengine index 49351fb73a..60b2262ce7 160000 --- a/graphengine +++ b/graphengine @@ -1 +1 @@ -Subproject commit 49351fb73ac7786b9ed9e807568a1a8e06183693 +Subproject commit 60b2262ce700db538e4c728619e1972c3687f633 diff --git a/mindspore/_extends/parallel_compile/tbe_compiler/common.py b/mindspore/_extends/parallel_compile/tbe_compiler/common.py index 6cfa2afd0d..6258cf8d45 100644 --- a/mindspore/_extends/parallel_compile/tbe_compiler/common.py +++ b/mindspore/_extends/parallel_compile/tbe_compiler/common.py @@ -50,7 +50,7 @@ def get_build_in_impl_path(): tbe_impl_path = os.environ.get("TBE_IMPL_PATH") if tbe_impl_path is None: default_install_path = '/usr/local/HiAI/runtime/ops/op_impl/built-in/ai_core/tbe/' - backup_install_path = '/usr/local/Ascend/Ascend/opp/op_impl/built-in/ai_core/tbe/' + backup_install_path = '/usr/local/Ascend/opp/op_impl/built-in/ai_core/tbe/' if os.path.exists(default_install_path): tbe_impl_path = default_install_path elif os.path.exists(backup_install_path): diff --git a/mindspore/ccsrc/CMakeLists.txt b/mindspore/ccsrc/CMakeLists.txt index 0b4bb0d1df..c4deb53e68 100644 --- a/mindspore/ccsrc/CMakeLists.txt +++ b/mindspore/ccsrc/CMakeLists.txt @@ -279,15 +279,17 @@ if(ENABLE_D) endif() else() MESSAGE("use system default lib") - set(D_LIB_PATH "/usr/local/HiAI/runtime/lib64/") + set(ASCEND_PATH /usr/local/Ascend) + set(ASCEND_DRIVER_PATH ${ASCEND_PATH}/driver/lib64/common) + set(ASCEND_RUNTIME_PATH ${ASCEND_PATH}/fwkacllib/lib64) endif() - MESSAGE("USE DAV LIB PATH: ${D_LIB_PATH}") - find_library(HCCL hccl ${D_LIB_PATH}) - find_library(CCE_LIB cce ${D_LIB_PATH}) - find_library(RUNTIME_LIB runtime ${D_LIB_PATH}) - find_library(TSDCLIENT tsdclient ${D_LIB_PATH}) - find_library(PROFILING msprof ${D_LIB_PATH}) + MESSAGE("USE DAV LIB PATH: ${ASCEND_PATH}") + find_library(HCCL hccl ${ASCEND_RUNTIME_PATH}) + find_library(CCE_LIB cce ${ASCEND_RUNTIME_PATH}) + find_library(RUNTIME_LIB runtime ${ASCEND_RUNTIME_PATH}) + find_library(TSDCLIENT tsdclient ${ASCEND_RUNTIME_PATH}) + find_library(PROFILING msprof ${ASCEND_DRIVER_PATH}) target_link_libraries(mindspore ge_runtime ${CCE_LIB} ${RUNTIME_LIB} ${TSDCLIENT} ${PROFILING} ${HCCL} ${TSDCLIENT}) endif() @@ -481,12 +483,14 @@ add_dependencies(add_ms_lib _c_expression) if (NOT ENABLE_GE) if (ENABLE_D) + set(ASCEND_PATH /usr/local/Ascend) + set(ASCEND_DRIVER_PATH ${ASCEND_PATH}/driver/lib64/common) add_custom_target(add_ge_lib ALL COMMAND cp ${MS_CCSRC_BUILD_PATH}/../../graphengine/src/common/graph/libgraph.so ${MS_LIB_PATH} COMMAND cp ${MS_CCSRC_BUILD_PATH}/../../graphengine/src/ge/common/libge_common.so ${MS_LIB_PATH} COMMAND cp ${MS_CCSRC_BUILD_PATH}/../../graphengine/src/ge/ge_runtime/libge_runtime.so ${MS_LIB_PATH} - COMMAND cp /usr/local/HiAI/driver/lib64/libslog.so ${MS_LIB_PATH} - COMMAND cp /usr/local/HiAI/driver/lib64/libc_sec.so ${MS_LIB_PATH} + COMMAND cp ${ASCEND_DRIVER_PATH}/libslog.so ${MS_LIB_PATH} + COMMAND cp ${ASCEND_DRIVER_PATH}/libc_sec.so ${MS_LIB_PATH} ) add_dependencies(add_ge_lib add_ms_lib) add_dependencies(add_ge_lib graph) From b3cd639c09129168fc74e3e52d0d8f2728efabeb Mon Sep 17 00:00:00 2001 From: y00445136 Date: Thu, 2 Apr 2020 11:39:24 +0800 Subject: [PATCH 082/367] add support for bool --- mindspore/ccsrc/utils/tensorprint_utils.cc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mindspore/ccsrc/utils/tensorprint_utils.cc b/mindspore/ccsrc/utils/tensorprint_utils.cc index ee58ed418c..1036b424ba 100644 --- a/mindspore/ccsrc/utils/tensorprint_utils.cc +++ b/mindspore/ccsrc/utils/tensorprint_utils.cc @@ -37,13 +37,13 @@ static std::map print_type_map = { {"int32_t", TypeId::kNumberTypeInt32}, {"uint32_t", TypeId::kNumberTypeUInt32}, {"int64_t", TypeId::kNumberTypeInt64}, {"uint64_t", TypeId::kNumberTypeUInt64}, {"float16", TypeId::kNumberTypeFloat16}, {"float", TypeId::kNumberTypeFloat32}, - {"double", TypeId::kNumberTypeFloat64}}; + {"double", TypeId::kNumberTypeFloat64}, {"bool", TypeId::kNumberTypeBool}}; static std::map type_size_map = { {"int8_t", sizeof(int8_t)}, {"uint8_t", sizeof(uint8_t)}, {"int16_t", sizeof(int16_t)}, {"uint16_t", sizeof(uint16_t)}, {"int32_t", sizeof(int32_t)}, {"uint32_t", sizeof(uint32_t)}, {"int64_t", sizeof(int64_t)}, {"uint64_t", sizeof(uint64_t)}, {"float16", sizeof(float) / 2}, - {"float", sizeof(float)}, {"double", sizeof(double)}}; + {"float", sizeof(float)}, {"double", sizeof(double)}, {"bool", sizeof(bool)}}; bool ParseTensorShape(const std::string &input_shape_str, std::vector *const tensor_shape, size_t *dims) { if (tensor_shape == nullptr) { @@ -107,7 +107,7 @@ bool ConvertDataItem2Tensor(const std::vector &items) { } else { auto type_iter = print_type_map.find(item.tensorType_); if (type_iter == print_type_map.end()) { - MS_LOG(ERROR) << "type of tensor need to print is not soupport" << item.tensorType_; + MS_LOG(ERROR) << "type of tensor need to print is not support " << item.tensorType_; continue; } auto type_id = type_iter->second; From 0c81759ae695d2c07339c68ba821a961df0ba3a6 Mon Sep 17 00:00:00 2001 From: zhaoting Date: Wed, 1 Apr 2020 10:22:48 +0800 Subject: [PATCH 083/367] add YOLOv3 infer scipt and change dataset to MindRecord --- example/yolov3_coco2017/config.py | 3 + example/yolov3_coco2017/dataset.py | 265 +++++++----------- example/yolov3_coco2017/eval.py | 107 +++++++ .../yolov3_coco2017/run_distribute_train.sh | 24 +- example/yolov3_coco2017/run_eval.sh | 23 ++ .../yolov3_coco2017/run_standalone_train.sh | 8 +- example/yolov3_coco2017/train.py | 114 +++++--- example/yolov3_coco2017/util.py | 146 ++++++++++ mindspore/ccsrc/kernel/tbe/tbe_adapter.cc | 1 + mindspore/model_zoo/yolov3.py | 97 ++++++- mindspore/ops/_op_impl/tbe/__init__.py | 2 + mindspore/ops/_op_impl/tbe/reduce_min.py | 76 +++++ mindspore/ops/_op_impl/tbe/round.py | 65 +++++ 13 files changed, 704 insertions(+), 227 deletions(-) create mode 100644 example/yolov3_coco2017/eval.py create mode 100644 example/yolov3_coco2017/run_eval.sh create mode 100644 example/yolov3_coco2017/util.py create mode 100644 mindspore/ops/_op_impl/tbe/reduce_min.py create mode 100644 mindspore/ops/_op_impl/tbe/round.py diff --git a/example/yolov3_coco2017/config.py b/example/yolov3_coco2017/config.py index f00eb89dc7..6d858bcbcb 100644 --- a/example/yolov3_coco2017/config.py +++ b/example/yolov3_coco2017/config.py @@ -26,6 +26,7 @@ class ConfigYOLOV3ResNet18: img_shape = [352, 640] feature_shape = [32, 3, 352, 640] num_classes = 80 + nms_max_num = 50 backbone_input_shape = [64, 64, 128, 256] backbone_shape = [64, 128, 256, 512] @@ -33,6 +34,8 @@ class ConfigYOLOV3ResNet18: backbone_stride = [1, 2, 2, 2] ignore_threshold = 0.5 + obj_threshold = 0.3 + nms_threshold = 0.4 anchor_scales = [(10, 13), (16, 30), diff --git a/example/yolov3_coco2017/dataset.py b/example/yolov3_coco2017/dataset.py index 56520704b4..826fe16c53 100644 --- a/example/yolov3_coco2017/dataset.py +++ b/example/yolov3_coco2017/dataset.py @@ -16,16 +16,14 @@ """YOLOv3 dataset""" from __future__ import division -import abc -import io import os -import math -import json import numpy as np from PIL import Image from matplotlib.colors import rgb_to_hsv, hsv_to_rgb import mindspore.dataset as de +from mindspore.mindrecord import FileWriter import mindspore.dataset.transforms.vision.py_transforms as P +import mindspore.dataset.transforms.vision.c_transforms as C from config import ConfigYOLOV3ResNet18 iter_cnt = 0 @@ -114,6 +112,29 @@ def preprocess_fn(image, box, is_training): return y_true[0], y_true[1], y_true[2], pad_gt_box0, pad_gt_box1, pad_gt_box2 + def _infer_data(img_data, input_shape, box): + w, h = img_data.size + input_h, input_w = input_shape + scale = min(float(input_w) / float(w), float(input_h) / float(h)) + nw = int(w * scale) + nh = int(h * scale) + img_data = img_data.resize((nw, nh), Image.BICUBIC) + + new_image = np.zeros((input_h, input_w, 3), np.float32) + new_image.fill(128) + img_data = np.array(img_data) + if len(img_data.shape) == 2: + img_data = np.expand_dims(img_data, axis=-1) + img_data = np.concatenate([img_data, img_data, img_data], axis=-1) + + dh = int((input_h - nh) / 2) + dw = int((input_w - nw) / 2) + new_image[dh:(nh + dh), dw:(nw + dw), :] = img_data + new_image /= 255. + new_image = np.transpose(new_image, (2, 0, 1)) + new_image = np.expand_dims(new_image, 0) + return new_image, np.array([h, w], np.float32), box + def _data_aug(image, box, is_training, jitter=0.3, hue=0.1, sat=1.5, val=1.5, image_size=(352, 640)): """Data augmentation function.""" if not isinstance(image, Image.Image): @@ -124,32 +145,7 @@ def preprocess_fn(image, box, is_training): h, w = image_size if not is_training: - image = image.resize((w, h), Image.BICUBIC) - image_data = np.array(image) / 255. - if len(image_data.shape) == 2: - image_data = np.expand_dims(image_data, axis=-1) - image_data = np.concatenate([image_data, image_data, image_data], axis=-1) - image_data = image_data.astype(np.float32) - - # correct boxes - box_data = np.zeros((max_boxes, 5)) - if len(box) >= 1: - np.random.shuffle(box) - if len(box) > max_boxes: - box = box[:max_boxes] - # xmin ymin xmax ymax - box[:, [0, 2]] = box[:, [0, 2]] * float(w) / float(iw) - box[:, [1, 3]] = box[:, [1, 3]] * float(h) / float(ih) - box_data[:len(box)] = box - else: - image_data, box_data = None, None - - # preprocess bounding boxes - bbox_true_1, bbox_true_2, bbox_true_3, gt_box1, gt_box2, gt_box3 = \ - _preprocess_true_boxes(box_data, anchors, image_size) - - return image_data, bbox_true_1, bbox_true_2, bbox_true_3, \ - ori_image_shape, gt_box1, gt_box2, gt_box3 + return _infer_data(image, image_size, box) flip = _rand() < .5 # correct boxes @@ -235,12 +231,16 @@ def preprocess_fn(image, box, is_training): return image_data, bbox_true_1, bbox_true_2, bbox_true_3, \ ori_image_shape, gt_box1, gt_box2, gt_box3 - images, bbox_1, bbox_2, bbox_3, _, gt_box1, gt_box2, gt_box3 = _data_aug(image, box, is_training) - return images, bbox_1, bbox_2, bbox_3, gt_box1, gt_box2, gt_box3 + if is_training: + images, bbox_1, bbox_2, bbox_3, _, gt_box1, gt_box2, gt_box3 = _data_aug(image, box, is_training) + return images, bbox_1, bbox_2, bbox_3, gt_box1, gt_box2, gt_box3 + + images, shape, anno = _data_aug(image, box, is_training) + return images, shape, anno def anno_parser(annos_str): - """Annotation parser.""" + """Parse annotation from string to list.""" annos = [] for anno_str in annos_str: anno = list(map(int, anno_str.strip().split(','))) @@ -248,142 +248,71 @@ def anno_parser(annos_str): return annos -def expand_path(path): - """Get file list from path.""" - files = [] - if os.path.isdir(path): - for file in os.listdir(path): - if os.path.isfile(os.path.join(path, file)): - files.append(file) - else: +def filter_valid_data(image_dir, anno_path): + """Filter valid image file, which both in image_dir and anno_path.""" + image_files = [] + image_anno_dict = {} + if not os.path.isdir(image_dir): raise RuntimeError("Path given is not valid.") - return files - - -def read_image(img_path): - """Read image with PIL.""" - with open(img_path, "rb") as f: - img = f.read() - data = io.BytesIO(img) - img = Image.open(data) - return np.array(img) - - -class BaseDataset(): - """BaseDataset for GeneratorDataset iterator.""" - def __init__(self, image_dir, anno_path): - self.image_dir = image_dir - self.anno_path = anno_path - self.cur_index = 0 - self.samples = [] - self.image_anno_dict = {} - self._load_samples() - - def __getitem__(self, item): - sample = self.samples[item] - return self._next_data(sample, self.image_dir, self.image_anno_dict) - - def __len__(self): - return len(self.samples) - - @staticmethod - def _next_data(sample, image_dir, image_anno_dict): - """Get next data.""" - image = read_image(os.path.join(image_dir, sample)) - annos = image_anno_dict[sample] - return [np.array(image), np.array(annos)] - - @abc.abstractmethod - def _load_samples(self): - """Base load samples.""" - - -class YoloDataset(BaseDataset): - """YoloDataset for GeneratorDataset iterator.""" - def _load_samples(self): - """Load samples.""" - image_files_raw = expand_path(self.image_dir) - self.samples = self._filter_valid_data(self.anno_path, image_files_raw) - self.dataset_size = len(self.samples) - if self.dataset_size == 0: - raise RuntimeError("Valid dataset is none!") - - def _filter_valid_data(self, anno_path, image_files_raw): - """Filter valid data.""" - image_files = [] - anno_dict = {} - print("Start filter valid data.") - with open(anno_path, "rb") as f: - lines = f.readlines() - for line in lines: - line_str = line.decode("utf-8") - line_split = str(line_str).split(' ') - anno_dict[line_split[0].split("/")[-1]] = line_split[1:] - anno_set = set(anno_dict.keys()) - image_set = set(image_files_raw) - for image_file in (anno_set & image_set): - image_files.append(image_file) - self.image_anno_dict[image_file] = anno_parser(anno_dict[image_file]) - image_files.sort() - print("Filter valid data done!") - return image_files - - -class DistributedSampler(): - """DistributedSampler for YOLOv3""" - def __init__(self, dataset_size, batch_size, num_replicas=None, rank=None, shuffle=True): - if num_replicas is None: - num_replicas = 1 - if rank is None: - rank = 0 - self.dataset_size = dataset_size - self.num_replicas = num_replicas - self.rank = rank % num_replicas - self.epoch = 0 - self.num_samples = max(batch_size, int(math.ceil(dataset_size * 1.0 / self.num_replicas))) - self.total_size = self.num_samples * self.num_replicas - self.shuffle = shuffle - - def __iter__(self): - # deterministically shuffle based on epoch - if self.shuffle: - indices = np.random.RandomState(seed=self.epoch).permutation(self.dataset_size) - indices = indices.tolist() - else: - indices = list(range(self.dataset_size)) - - # add extra samples to make it evenly divisible - indices += indices[:(self.total_size - len(indices))] - assert len(indices) == self.total_size - - # subsample - indices = indices[self.rank:self.total_size:self.num_replicas] - assert len(indices) == self.num_samples - - return iter(indices) - - def __len__(self): - return self.num_samples - - def set_epoch(self, epoch): - self.epoch = epoch - - -def create_yolo_dataset(image_dir, anno_path, batch_size=32, repeat_num=10, device_num=1, rank=0, + if not os.path.isfile(anno_path): + raise RuntimeError("Annotation file is not valid.") + + with open(anno_path, "rb") as f: + lines = f.readlines() + for line in lines: + line_str = line.decode("utf-8").strip() + line_split = str(line_str).split(' ') + file_name = line_split[0] + if os.path.isfile(os.path.join(image_dir, file_name)): + image_anno_dict[file_name] = anno_parser(line_split[1:]) + image_files.append(file_name) + return image_files, image_anno_dict + + +def data_to_mindrecord_byte_image(image_dir, anno_path, mindrecord_dir, prefix="yolo.mindrecord", file_num=8): + """Create MindRecord file by image_dir and anno_path.""" + mindrecord_path = os.path.join(mindrecord_dir, prefix) + writer = FileWriter(mindrecord_path, file_num) + image_files, image_anno_dict = filter_valid_data(image_dir, anno_path) + + yolo_json = { + "image": {"type": "bytes"}, + "annotation": {"type": "int64", "shape": [-1, 5]}, + } + writer.add_schema(yolo_json, "yolo_json") + + for image_name in image_files: + image_path = os.path.join(image_dir, image_name) + with open(image_path, 'rb') as f: + img = f.read() + annos = np.array(image_anno_dict[image_name]) + row = {"image": img, "annotation": annos} + writer.write_raw_data([row]) + writer.commit() + + +def create_yolo_dataset(mindrecord_dir, batch_size=32, repeat_num=10, device_num=1, rank=0, is_training=True, num_parallel_workers=8): - """Creatr YOLOv3 dataset with GeneratorDataset.""" - yolo_dataset = YoloDataset(image_dir=image_dir, anno_path=anno_path) - distributed_sampler = DistributedSampler(yolo_dataset.dataset_size, batch_size, device_num, rank) - ds = de.GeneratorDataset(yolo_dataset, column_names=["image", "annotation"], sampler=distributed_sampler) - ds.set_dataset_size(len(distributed_sampler)) + """Creatr YOLOv3 dataset with MindDataset.""" + ds = de.MindDataset(mindrecord_dir, columns_list=["image", "annotation"], num_shards=device_num, shard_id=rank, + num_parallel_workers=num_parallel_workers, shuffle=is_training) + decode = C.Decode() + ds = ds.map(input_columns=["image"], operations=decode) compose_map_func = (lambda image, annotation: preprocess_fn(image, annotation, is_training)) - hwc_to_chw = P.HWC2CHW() - ds = ds.map(input_columns=["image", "annotation"], - output_columns=["image", "bbox_1", "bbox_2", "bbox_3", "gt_box1", "gt_box2", "gt_box3"], - columns_order=["image", "bbox_1", "bbox_2", "bbox_3", "gt_box1", "gt_box2", "gt_box3"], - operations=compose_map_func, num_parallel_workers=num_parallel_workers) - ds = ds.map(input_columns=["image"], operations=hwc_to_chw, num_parallel_workers=num_parallel_workers) - ds = ds.shuffle(buffer_size=256) - ds = ds.batch(batch_size, drop_remainder=True) - ds = ds.repeat(repeat_num) + + if is_training: + hwc_to_chw = P.HWC2CHW() + ds = ds.map(input_columns=["image", "annotation"], + output_columns=["image", "bbox_1", "bbox_2", "bbox_3", "gt_box1", "gt_box2", "gt_box3"], + columns_order=["image", "bbox_1", "bbox_2", "bbox_3", "gt_box1", "gt_box2", "gt_box3"], + operations=compose_map_func, num_parallel_workers=num_parallel_workers) + ds = ds.map(input_columns=["image"], operations=hwc_to_chw, num_parallel_workers=num_parallel_workers) + ds = ds.shuffle(buffer_size=256) + ds = ds.batch(batch_size, drop_remainder=True) + ds = ds.repeat(repeat_num) + else: + ds = ds.map(input_columns=["image", "annotation"], + output_columns=["image", "image_shape", "annotation"], + columns_order=["image", "image_shape", "annotation"], + operations=compose_map_func, num_parallel_workers=num_parallel_workers) return ds diff --git a/example/yolov3_coco2017/eval.py b/example/yolov3_coco2017/eval.py new file mode 100644 index 0000000000..8347f5be37 --- /dev/null +++ b/example/yolov3_coco2017/eval.py @@ -0,0 +1,107 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# less required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""Evaluation for yolo_v3""" +import os +import argparse +import time +from mindspore import context, Tensor +from mindspore.train.serialization import load_checkpoint, load_param_into_net +from mindspore.model_zoo.yolov3 import yolov3_resnet18, YoloWithEval +from dataset import create_yolo_dataset, data_to_mindrecord_byte_image +from config import ConfigYOLOV3ResNet18 +from util import metrics + +def yolo_eval(dataset_path, ckpt_path): + """Yolov3 evaluation.""" + + ds = create_yolo_dataset(dataset_path, is_training=False) + config = ConfigYOLOV3ResNet18() + net = yolov3_resnet18(config) + eval_net = YoloWithEval(net, config) + print("Load Checkpoint!") + param_dict = load_checkpoint(ckpt_path) + load_param_into_net(net, param_dict) + + + eval_net.set_train(False) + i = 1. + total = ds.get_dataset_size() + start = time.time() + pred_data = [] + print("\n========================================\n") + print("total images num: ", total) + print("Processing, please wait a moment.") + for data in ds.create_dict_iterator(): + img_np = data['image'] + image_shape = data['image_shape'] + annotation = data['annotation'] + + eval_net.set_train(False) + output = eval_net(Tensor(img_np), Tensor(image_shape)) + for batch_idx in range(img_np.shape[0]): + pred_data.append({"boxes": output[0].asnumpy()[batch_idx], + "box_scores": output[1].asnumpy()[batch_idx], + "annotation": annotation}) + percent = round(i / total * 100, 2) + + print(' %s [%d/%d]' % (str(percent) + '%', i, total), end='\r') + i += 1 + print(' %s [%d/%d] cost %d ms' % (str(100.0) + '%', total, total, int((time.time() - start) * 1000)), end='\n') + + precisions, recalls = metrics(pred_data) + print("\n========================================\n") + for i in range(config.num_classes): + print("class {} precision is {:.2f}%, recall is {:.2f}%".format(i, precisions[i] * 100, recalls[i] * 100)) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Yolov3 evaluation') + parser.add_argument("--device_id", type=int, default=0, help="Device id, default is 0.") + parser.add_argument("--mindrecord_dir", type=str, default="./Mindrecord_eval", + help="Mindrecord directory. If the mindrecord_dir is empty, it wil generate mindrecord file by" + "image_dir and anno_path. Note if mindrecord_dir isn't empty, it will use mindrecord_dir " + "rather than image_dir and anno_path. Default is ./Mindrecord_eval") + parser.add_argument("--image_dir", type=str, default="", help="Dataset directory, " + "the absolute image path is joined by the image_dir " + "and the relative path in anno_path.") + parser.add_argument("--anno_path", type=str, default="", help="Annotation path.") + parser.add_argument("--ckpt_path", type=str, required=True, help="Checkpoint path.") + args_opt = parser.parse_args() + + context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", device_id=args_opt.device_id) + context.set_context(enable_task_sink=True, enable_loop_sink=True, enable_mem_reuse=True, + enable_auto_mixed_precision=False) + + # It will generate mindrecord file in args_opt.mindrecord_dir, + # and the file name is yolo.mindrecord0, 1, ... file_num. + if not os.path.isdir(args_opt.mindrecord_dir): + os.makedirs(args_opt.mindrecord_dir) + + prefix = "yolo.mindrecord" + mindrecord_file = os.path.join(args_opt.mindrecord_dir, prefix + "0") + if not os.path.exists(mindrecord_file): + if os.path.isdir(args_opt.image_dir) and os.path.exists(args_opt.anno_path): + print("Create Mindrecord") + data_to_mindrecord_byte_image(args_opt.image_dir, + args_opt.anno_path, + args_opt.mindrecord_dir, + prefix=prefix, + file_num=8) + print("Create Mindrecord Done, at {}".format(args_opt.mindrecord_dir)) + else: + print("image_dir or anno_path not exits") + print("Start Eval!") + yolo_eval(mindrecord_file, args_opt.ckpt_path) diff --git a/example/yolov3_coco2017/run_distribute_train.sh b/example/yolov3_coco2017/run_distribute_train.sh index 98e6690b8c..0c43e776b9 100644 --- a/example/yolov3_coco2017/run_distribute_train.sh +++ b/example/yolov3_coco2017/run_distribute_train.sh @@ -14,17 +14,26 @@ # limitations under the License. # ============================================================================ +echo "==============================================================================================================" echo "Please run the scipt as: " -echo "sh run_distribute_train.sh DEVICE_NUM EPOCH_SIZE IMAGE_DIR ANNO_PATH MINDSPORE_HCCL_CONFIG_PATH" -echo "for example: sh run_distribute_train.sh 8 100 ./dataset/coco/train2017 ./dataset/train.txt ./hccl.json" -echo "After running the scipt, the network runs in the background. The log will be generated in LOGx/log.txt" +echo "sh run_distribute_train.sh DEVICE_NUM EPOCH_SIZE MINDRECORD_DIR IMAGE_DIR ANNO_PATH MINDSPORE_HCCL_CONFIG_PATH" +echo "for example: sh run_distribute_train.sh 8 100 /data/Mindrecord_train /data /data/train.txt /data/hccl.json" +echo "It is better to use absolute path." +echo "==============================================================================================================" -export RANK_SIZE=$1 EPOCH_SIZE=$2 -IMAGE_DIR=$3 -ANNO_PATH=$4 -export MINDSPORE_HCCL_CONFIG_PATH=$5 +MINDRECORD_DIR=$3 +IMAGE_DIR=$4 +ANNO_PATH=$5 + +# Before start distribute train, first create mindrecord files. +python train.py --only_create_dataset=1 --mindrecord_dir=$MINDRECORD_DIR --image_dir=$IMAGE_DIR \ +--anno_path=$ANNO_PATH +echo "After running the scipt, the network runs in the background. The log will be generated in LOGx/log.txt" + +export MINDSPORE_HCCL_CONFIG_PATH=$6 +export RANK_SIZE=$1 for((i=0;i log.txt 2>&1 & diff --git a/example/yolov3_coco2017/run_eval.sh b/example/yolov3_coco2017/run_eval.sh new file mode 100644 index 0000000000..4608e92589 --- /dev/null +++ b/example/yolov3_coco2017/run_eval.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +echo "==============================================================================================================" +echo "Please run the scipt as: " +echo "sh run_eval.sh DEVICE_ID CKPT_PATH MINDRECORD_DIR IMAGE_DIR ANNO_PATH" +echo "for example: sh run_eval.sh 0 yolo.ckpt ./Mindrecord_eval ./dataset ./dataset/eval.txt" +echo "==============================================================================================================" + +python eval.py --device_id=$1 --ckpt_path=$2 --mindrecord_dir=$3 --image_dir=$4 --anno_path=$5 diff --git a/example/yolov3_coco2017/run_standalone_train.sh b/example/yolov3_coco2017/run_standalone_train.sh index b3b8b0cd9a..d91d537008 100644 --- a/example/yolov3_coco2017/run_standalone_train.sh +++ b/example/yolov3_coco2017/run_standalone_train.sh @@ -14,8 +14,10 @@ # limitations under the License. # ============================================================================ +echo "==============================================================================================================" echo "Please run the scipt as: " -echo "sh run_standalone_train.sh DEVICE_ID EPOCH_SIZE IMAGE_DIR ANNO_PATH" -echo "for example: sh run_standalone_train.sh 0 50 ./dataset/coco/train2017 ./dataset/train.txt" +echo "sh run_standalone_train.sh DEVICE_ID EPOCH_SIZE MINDRECORD_DIR IMAGE_DIR ANNO_PATH" +echo "for example: sh run_standalone_train.sh 0 50 ./Mindrecord_train ./dataset ./dataset/train.txt" +echo "==============================================================================================================" -python train.py --device_id=$1 --epoch_size=$2 --image_dir=$3 --anno_path=$4 +python train.py --device_id=$1 --epoch_size=$2 --mindrecord_dir=$3 --image_dir=$4 --anno_path=$5 diff --git a/example/yolov3_coco2017/train.py b/example/yolov3_coco2017/train.py index a3dd8f34fe..3ac3816f4a 100644 --- a/example/yolov3_coco2017/train.py +++ b/example/yolov3_coco2017/train.py @@ -16,26 +16,30 @@ """ ######################## train YOLOv3 example ######################## train YOLOv3 and get network model files(.ckpt) : -python train.py --image_dir dataset/coco/coco/train2017 --anno_path dataset/coco/train_coco.txt +python train.py --image_dir /data --anno_path /data/coco/train_coco.txt --mindrecord_dir=/data/Mindrecord_train + +If the mindrecord_dir is empty, it wil generate mindrecord file by image_dir and anno_path. +Note if mindrecord_dir isn't empty, it will use mindrecord_dir rather than image_dir and anno_path. """ +import os import argparse import numpy as np import mindspore.nn as nn from mindspore import context, Tensor -from mindspore.common.initializer import initializer from mindspore.communication.management import init from mindspore.train.callback import CheckpointConfig, ModelCheckpoint, LossMonitor, TimeMonitor from mindspore.train import Model, ParallelMode from mindspore.train.serialization import load_checkpoint, load_param_into_net +from mindspore.common.initializer import initializer from mindspore.model_zoo.yolov3 import yolov3_resnet18, YoloWithLossCell, TrainingWrapper -from dataset import create_yolo_dataset +from dataset import create_yolo_dataset, data_to_mindrecord_byte_image from config import ConfigYOLOV3ResNet18 def get_lr(learning_rate, start_step, global_step, decay_step, decay_rate, steps=False): - """Set learning rate""" + """Set learning rate.""" lr_each_step = [] lr = learning_rate for i in range(global_step): @@ -57,7 +61,9 @@ def init_net_param(net, init='ones'): if __name__ == '__main__': - parser = argparse.ArgumentParser(description="YOLOv3") + parser = argparse.ArgumentParser(description="YOLOv3 train") + parser.add_argument("--only_create_dataset", type=bool, default=False, help="If set it true, only create " + "Mindrecord, default is false.") parser.add_argument("--distribute", type=bool, default=False, help="Run distribute, default is false.") parser.add_argument("--device_id", type=int, default=0, help="Device id, default is 0.") parser.add_argument("--device_num", type=int, default=1, help="Use device nums, default is 1.") @@ -67,12 +73,19 @@ if __name__ == '__main__': parser.add_argument("--checkpoint_path", type=str, default="", help="Checkpoint file path") parser.add_argument("--save_checkpoint_epochs", type=int, default=5, help="Save checkpoint epochs, default is 5.") parser.add_argument("--loss_scale", type=int, default=1024, help="Loss scale, default is 1024.") - parser.add_argument("--image_dir", type=str, required=True, help="Dataset image dir.") - parser.add_argument("--anno_path", type=str, required=True, help="Dataset anno path.") + parser.add_argument("--mindrecord_dir", type=str, default="./Mindrecord_train", + help="Mindrecord directory. If the mindrecord_dir is empty, it wil generate mindrecord file by" + "image_dir and anno_path. Note if mindrecord_dir isn't empty, it will use mindrecord_dir " + "rather than image_dir and anno_path. Default is ./Mindrecord_train") + parser.add_argument("--image_dir", type=str, default="", help="Dataset directory, " + "the absolute image path is joined by the image_dir " + "and the relative path in anno_path") + parser.add_argument("--anno_path", type=str, default="", help="Annotation path.") args_opt = parser.parse_args() context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", device_id=args_opt.device_id) - context.set_context(enable_task_sink=True, enable_loop_sink=True, enable_mem_reuse=True) + context.set_context(enable_task_sink=True, enable_loop_sink=True, enable_mem_reuse=True, + enable_auto_mixed_precision=False) if args_opt.distribute: device_num = args_opt.device_num context.reset_auto_parallel_context() @@ -80,36 +93,65 @@ if __name__ == '__main__': context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, mirror_mean=True, device_num=device_num) init() - rank = args_opt.device_id + rank = args_opt.device_id % device_num else: context.set_context(enable_hccl=False) rank = 0 device_num = 1 - loss_scale = float(args_opt.loss_scale) - dataset = create_yolo_dataset(args_opt.image_dir, args_opt.anno_path, repeat_num=args_opt.epoch_size, - batch_size=args_opt.batch_size, device_num=device_num, rank=rank) - dataset_size = dataset.get_dataset_size() - net = yolov3_resnet18(ConfigYOLOV3ResNet18()) - net = YoloWithLossCell(net, ConfigYOLOV3ResNet18()) - init_net_param(net, "XavierUniform") - - # checkpoint - ckpt_config = CheckpointConfig(save_checkpoint_steps=dataset_size * args_opt.save_checkpoint_epochs) - ckpoint_cb = ModelCheckpoint(prefix="yolov3", directory=None, config=ckpt_config) - if args_opt.checkpoint_path != "": - param_dict = load_checkpoint(args_opt.checkpoint_path) - load_param_into_net(net, param_dict) - - lr = Tensor(get_lr(learning_rate=0.001, start_step=0, global_step=args_opt.epoch_size * dataset_size, - decay_step=1000, decay_rate=0.95)) - opt = nn.Adam(filter(lambda x: x.requires_grad, net.get_parameters()), lr, loss_scale=loss_scale) - net = TrainingWrapper(net, opt, loss_scale) - callback = [TimeMonitor(data_size=dataset_size), LossMonitor(), ckpoint_cb] - - model = Model(net) - dataset_sink_mode = False - if args_opt.mode == "graph": - dataset_sink_mode = True - print("Start train YOLOv3.") - model.train(args_opt.epoch_size, dataset, callbacks=callback, dataset_sink_mode=dataset_sink_mode) + print("Start create dataset!") + + # It will generate mindrecord file in args_opt.mindrecord_dir, + # and the file name is yolo.mindrecord0, 1, ... file_num. + if not os.path.isdir(args_opt.mindrecord_dir): + os.makedirs(args_opt.mindrecord_dir) + + prefix = "yolo.mindrecord" + mindrecord_file = os.path.join(args_opt.mindrecord_dir, prefix + "0") + if not os.path.exists(mindrecord_file): + if os.path.isdir(args_opt.image_dir) and os.path.exists(args_opt.anno_path): + print("Create Mindrecord.") + data_to_mindrecord_byte_image(args_opt.image_dir, + args_opt.anno_path, + args_opt.mindrecord_dir, + prefix=prefix, + file_num=8) + print("Create Mindrecord Done, at {}".format(args_opt.mindrecord_dir)) + else: + print("image_dir or anno_path not exits.") + + if not args_opt.only_create_dataset: + loss_scale = float(args_opt.loss_scale) + + # When create MindDataset, using the fitst mindrecord file, such as yolo.mindrecord0. + dataset = create_yolo_dataset(mindrecord_file, repeat_num=args_opt.epoch_size, + batch_size=args_opt.batch_size, device_num=device_num, rank=rank) + dataset_size = dataset.get_dataset_size() + print("Create dataset done!") + + net = yolov3_resnet18(ConfigYOLOV3ResNet18()) + net = YoloWithLossCell(net, ConfigYOLOV3ResNet18()) + init_net_param(net, "XavierUniform") + + # checkpoint + ckpt_config = CheckpointConfig(save_checkpoint_steps=dataset_size * args_opt.save_checkpoint_epochs) + ckpoint_cb = ModelCheckpoint(prefix="yolov3", directory=None, config=ckpt_config) + + lr = Tensor(get_lr(learning_rate=0.001, start_step=0, global_step=args_opt.epoch_size * dataset_size, + decay_step=1000, decay_rate=0.95)) + opt = nn.Adam(filter(lambda x: x.requires_grad, net.get_parameters()), lr, loss_scale=loss_scale) + net = TrainingWrapper(net, opt, loss_scale) + + if args_opt.checkpoint_path != "": + param_dict = load_checkpoint(args_opt.checkpoint_path) + load_param_into_net(net, param_dict) + + callback = [TimeMonitor(data_size=dataset_size), LossMonitor(), ckpoint_cb] + + model = Model(net) + dataset_sink_mode = False + if args_opt.mode == "graph": + print("In graph mode, one epoch return a loss.") + dataset_sink_mode = True + print("Start train YOLOv3, the first epoch will be slower because of the graph compilation.") + model.train(args_opt.epoch_size, dataset, callbacks=callback, dataset_sink_mode=dataset_sink_mode) diff --git a/example/yolov3_coco2017/util.py b/example/yolov3_coco2017/util.py new file mode 100644 index 0000000000..62e15afe38 --- /dev/null +++ b/example/yolov3_coco2017/util.py @@ -0,0 +1,146 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""metrics utils""" + +import numpy as np +from config import ConfigYOLOV3ResNet18 + + +def calc_iou(bbox_pred, bbox_ground): + """Calculate iou of predicted bbox and ground truth.""" + x1 = bbox_pred[0] + y1 = bbox_pred[1] + width1 = bbox_pred[2] - bbox_pred[0] + height1 = bbox_pred[3] - bbox_pred[1] + + x2 = bbox_ground[0] + y2 = bbox_ground[1] + width2 = bbox_ground[2] - bbox_ground[0] + height2 = bbox_ground[3] - bbox_ground[1] + + endx = max(x1 + width1, x2 + width2) + startx = min(x1, x2) + width = width1 + width2 - (endx - startx) + + endy = max(y1 + height1, y2 + height2) + starty = min(y1, y2) + height = height1 + height2 - (endy - starty) + + if width <= 0 or height <= 0: + iou = 0 + else: + area = width * height + area1 = width1 * height1 + area2 = width2 * height2 + iou = area * 1. / (area1 + area2 - area) + + return iou + + +def apply_nms(all_boxes, all_scores, thres, max_boxes): + """Apply NMS to bboxes.""" + x1 = all_boxes[:, 0] + y1 = all_boxes[:, 1] + x2 = all_boxes[:, 2] + y2 = all_boxes[:, 3] + areas = (x2 - x1 + 1) * (y2 - y1 + 1) + + order = all_scores.argsort()[::-1] + keep = [] + + while order.size > 0: + i = order[0] + keep.append(i) + + if len(keep) >= max_boxes: + break + + xx1 = np.maximum(x1[i], x1[order[1:]]) + yy1 = np.maximum(y1[i], y1[order[1:]]) + xx2 = np.minimum(x2[i], x2[order[1:]]) + yy2 = np.minimum(y2[i], y2[order[1:]]) + + w = np.maximum(0.0, xx2 - xx1 + 1) + h = np.maximum(0.0, yy2 - yy1 + 1) + inter = w * h + + ovr = inter / (areas[i] + areas[order[1:]] - inter) + + inds = np.where(ovr <= thres)[0] + + order = order[inds + 1] + return keep + + +def metrics(pred_data): + """Calculate precision and recall of predicted bboxes.""" + config = ConfigYOLOV3ResNet18() + num_classes = config.num_classes + count_corrects = [1e-6 for _ in range(num_classes)] + count_grounds = [1e-6 for _ in range(num_classes)] + count_preds = [1e-6 for _ in range(num_classes)] + + for i, sample in enumerate(pred_data): + gt_anno = sample["annotation"] + box_scores = sample['box_scores'] + boxes = sample['boxes'] + mask = box_scores >= config.obj_threshold + boxes_ = [] + scores_ = [] + classes_ = [] + max_boxes = config.nms_max_num + for c in range(num_classes): + class_boxes = np.reshape(boxes, [-1, 4])[np.reshape(mask[:, c], [-1])] + class_box_scores = np.reshape(box_scores[:, c], [-1])[np.reshape(mask[:, c], [-1])] + nms_index = apply_nms(class_boxes, class_box_scores, config.nms_threshold, max_boxes) + class_boxes = class_boxes[nms_index] + class_box_scores = class_box_scores[nms_index] + classes = np.ones_like(class_box_scores, 'int32') * c + boxes_.append(class_boxes) + scores_.append(class_box_scores) + classes_.append(classes) + + boxes = np.concatenate(boxes_, axis=0) + classes = np.concatenate(classes_, axis=0) + + + # metric + count_correct = [1e-6 for _ in range(num_classes)] + count_ground = [1e-6 for _ in range(num_classes)] + count_pred = [1e-6 for _ in range(num_classes)] + + for anno in gt_anno: + count_ground[anno[4]] += 1 + + for box_index, box in enumerate(boxes): + bbox_pred = [box[1], box[0], box[3], box[2]] + count_pred[classes[box_index]] += 1 + + for anno in gt_anno: + class_ground = anno[4] + + if classes[box_index] == class_ground: + iou = calc_iou(bbox_pred, anno) + if iou >= 0.5: + count_correct[class_ground] += 1 + break + + count_corrects = [count_corrects[i] + count_correct[i] for i in range(num_classes)] + count_preds = [count_preds[i] + count_pred[i] for i in range(num_classes)] + count_grounds = [count_grounds[i] + count_ground[i] for i in range(num_classes)] + + precision = np.array([count_corrects[ix] / count_preds[ix] for ix in range(num_classes)]) + recall = np.array([count_corrects[ix] / count_grounds[ix] for ix in range(num_classes)]) + return precision, recall diff --git a/mindspore/ccsrc/kernel/tbe/tbe_adapter.cc b/mindspore/ccsrc/kernel/tbe/tbe_adapter.cc index 50fed77a9a..c0416f648b 100644 --- a/mindspore/ccsrc/kernel/tbe/tbe_adapter.cc +++ b/mindspore/ccsrc/kernel/tbe/tbe_adapter.cc @@ -34,6 +34,7 @@ static std::map tbe_func_adapter_map = { {"tensor_add", "add"}, {"reduce_mean", "reduce_mean_d"}, {"reduce_max", "reduce_max_d"}, + {"reduce_min", "reduce_min_d"}, {"conv2d_backprop_filter", "conv2d_backprop_filter_d"}, {"conv2d_backprop_input", "conv2d_backprop_input_d"}, {"top_kv2", "top_k"}, diff --git a/mindspore/model_zoo/yolov3.py b/mindspore/model_zoo/yolov3.py index 5ac3b67086..0ac6b21070 100644 --- a/mindspore/model_zoo/yolov3.py +++ b/mindspore/model_zoo/yolov3.py @@ -15,6 +15,7 @@ """YOLOv3 based on ResNet18.""" +import numpy as np import mindspore as ms import mindspore.nn as nn from mindspore import context, Tensor @@ -31,19 +32,14 @@ def weight_variable(): return TruncatedNormal(0.02) -class _conv_with_pad(nn.Cell): +class _conv2d(nn.Cell): """Create Conv2D with padding.""" def __init__(self, in_channels, out_channels, kernel_size, stride=1): - super(_conv_with_pad, self).__init__() - total_pad = kernel_size - 1 - pad_begin = total_pad // 2 - pad_end = total_pad - pad_begin - self.pad = P.Pad(((0, 0), (0, 0), (pad_begin, pad_end), (pad_begin, pad_end))) + super(_conv2d, self).__init__() self.conv = nn.Conv2d(in_channels, out_channels, - kernel_size=kernel_size, stride=stride, padding=0, pad_mode='valid', + kernel_size=kernel_size, stride=stride, padding=0, pad_mode='same', weight_init=weight_variable()) def construct(self, x): - x = self.pad(x) x = self.conv(x) return x @@ -101,15 +97,15 @@ class BasicBlock(nn.Cell): momentum=0.99): super(BasicBlock, self).__init__() - self.conv1 = _conv_with_pad(in_channels, out_channels, 3, stride=stride) + self.conv1 = _conv2d(in_channels, out_channels, 3, stride=stride) self.bn1 = _fused_bn(out_channels, momentum=momentum) - self.conv2 = _conv_with_pad(out_channels, out_channels, 3) + self.conv2 = _conv2d(out_channels, out_channels, 3) self.bn2 = _fused_bn(out_channels, momentum=momentum) self.relu = P.ReLU() self.down_sample_layer = None self.downsample = (in_channels != out_channels) if self.downsample: - self.down_sample_layer = _conv_with_pad(in_channels, out_channels, 1, stride=stride) + self.down_sample_layer = _conv2d(in_channels, out_channels, 1, stride=stride) self.add = P.TensorAdd() def construct(self, x): @@ -166,7 +162,7 @@ class ResNet(nn.Cell): raise ValueError("the length of " "layer_num, inchannel, outchannel list must be 4!") - self.conv1 = _conv_with_pad(3, 64, 7, stride=2) + self.conv1 = _conv2d(3, 64, 7, stride=2) self.bn1 = _fused_bn(64) self.relu = P.ReLU() self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode='same') @@ -452,7 +448,7 @@ class DetectionBlock(nn.Cell): if self.training: return grid, prediction, box_xy, box_wh - return self.concat((box_xy, box_wh, box_confidence, box_probs)) + return box_xy, box_wh, box_confidence, box_probs class Iou(nn.Cell): @@ -675,3 +671,78 @@ class TrainingWrapper(nn.Cell): # apply grad reducer on grads grads = self.grad_reducer(grads) return F.depend(loss, self.optimizer(grads)) + + +class YoloBoxScores(nn.Cell): + """ + Calculate the boxes of the original picture size and the score of each box. + + Args: + config (Class): YOLOv3 config. + + Returns: + Tensor, the boxes of the original picture size. + Tensor, the score of each box. + """ + def __init__(self, config): + super(YoloBoxScores, self).__init__() + self.input_shape = Tensor(np.array(config.img_shape), ms.float32) + self.num_classes = config.num_classes + + def construct(self, box_xy, box_wh, box_confidence, box_probs, image_shape): + batch_size = F.shape(box_xy)[0] + x = box_xy[:, :, :, :, 0:1] + y = box_xy[:, :, :, :, 1:2] + box_yx = P.Concat(-1)((y, x)) + w = box_wh[:, :, :, :, 0:1] + h = box_wh[:, :, :, :, 1:2] + box_hw = P.Concat(-1)((h, w)) + + new_shape = P.Round()(image_shape * P.ReduceMin()(self.input_shape / image_shape)) + offset = (self.input_shape - new_shape) / 2.0 / self.input_shape + scale = self.input_shape / new_shape + box_yx = (box_yx - offset) * scale + box_hw = box_hw * scale + + box_min = box_yx - box_hw / 2.0 + box_max = box_yx + box_hw / 2.0 + boxes = P.Concat(-1)((box_min[:, :, :, :, 0:1], + box_min[:, :, :, :, 1:2], + box_max[:, :, :, :, 0:1], + box_max[:, :, :, :, 1:2])) + image_scale = P.Tile()(image_shape, (1, 2)) + boxes = boxes * image_scale + boxes = F.reshape(boxes, (batch_size, -1, 4)) + boxes_scores = box_confidence * box_probs + boxes_scores = F.reshape(boxes_scores, (batch_size, -1, self.num_classes)) + return boxes, boxes_scores + + +class YoloWithEval(nn.Cell): + """ + Encapsulation class of YOLOv3 evaluation. + + Args: + network (Cell): The training network. Note that loss function and optimizer must not be added. + config (Class): YOLOv3 config. + + Returns: + Tensor, the boxes of the original picture size. + Tensor, the score of each box. + Tensor, the original picture size. + """ + def __init__(self, network, config): + super(YoloWithEval, self).__init__() + self.yolo_network = network + self.box_score_0 = YoloBoxScores(config) + self.box_score_1 = YoloBoxScores(config) + self.box_score_2 = YoloBoxScores(config) + + def construct(self, x, image_shape): + yolo_output = self.yolo_network(x) + boxes_0, boxes_scores_0 = self.box_score_0(*yolo_output[0], image_shape) + boxes_1, boxes_scores_1 = self.box_score_1(*yolo_output[1], image_shape) + boxes_2, boxes_scores_2 = self.box_score_2(*yolo_output[2], image_shape) + boxes = P.Concat(1)((boxes_0, boxes_1, boxes_2)) + boxes_scores = P.Concat(1)((boxes_scores_0, boxes_scores_1, boxes_scores_2)) + return boxes, boxes_scores, image_shape diff --git a/mindspore/ops/_op_impl/tbe/__init__.py b/mindspore/ops/_op_impl/tbe/__init__.py index b9b9af4396..0b79ae845b 100644 --- a/mindspore/ops/_op_impl/tbe/__init__.py +++ b/mindspore/ops/_op_impl/tbe/__init__.py @@ -85,7 +85,9 @@ from .logical_and import _logical_and_tbe from .logical_not import _logical_not_tbe from .logical_or import _logical_or_tbe from .reduce_max import _reduce_max_tbe +from .reduce_min import _reduce_min_tbe from .reduce_sum import _reduce_sum_tbe +from .round import _round_tbe from .tanh import _tanh_tbe from .tanh_grad import _tanh_grad_tbe from .softmax import _softmax_tbe diff --git a/mindspore/ops/_op_impl/tbe/reduce_min.py b/mindspore/ops/_op_impl/tbe/reduce_min.py new file mode 100644 index 0000000000..f440b1fff6 --- /dev/null +++ b/mindspore/ops/_op_impl/tbe/reduce_min.py @@ -0,0 +1,76 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""ReduceMin op""" +from mindspore.ops.op_info_register import op_info_register + + +@op_info_register("""{ + "op_name": "ReduceMin", + "imply_type": "TBE", + "fusion_type": "OPAQUE", + "async_flag": false, + "binfile_name": "reduce_min_d.so", + "compute_cost": 10, + "kernel_name": "reduce_min_d", + "partial_flag": true, + "attr": [ + { + "name": "axis", + "param_type": "required", + "type": "listInt", + "value": "all" + }, + { + "name": "keep_dims", + "param_type": "required", + "type": "bool", + "value": "all" + } + ], + "inputs": [ + { + "index": 0, + "dtype": [ + "float16", "float16", "float", "float", "int8", "int8", "uint8", "uint8" + ], + "format": [ + "DefaultFormat", "FracZ", "DefaultFormat", "FracZ", "DefaultFormat", "FracZ", "DefaultFormat", "FracZ" + ], + "name": "x", + "need_compile": false, + "param_type": "required", + "shape": "all" + } + ], + "outputs": [ + { + "index": 0, + "dtype": [ + "float16", "float16", "float", "float", "int8", "int8", "uint8", "uint8" + ], + "format": [ + "DefaultFormat", "FracZ", "DefaultFormat", "FracZ", "DefaultFormat", "FracZ", "DefaultFormat", "FracZ" + ], + "name": "y", + "need_compile": false, + "param_type": "required", + "shape": "all" + } + ] +}""") +def _reduce_min_tbe(): + """ReduceMin TBE register""" + return diff --git a/mindspore/ops/_op_impl/tbe/round.py b/mindspore/ops/_op_impl/tbe/round.py new file mode 100644 index 0000000000..1368100a8e --- /dev/null +++ b/mindspore/ops/_op_impl/tbe/round.py @@ -0,0 +1,65 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""Round op""" +from mindspore.ops.op_info_register import op_info_register + + +@op_info_register("""{ + "op_name": "Round", + "imply_type": "TBE", + "fusion_type": "ELEMWISE", + "async_flag": false, + "binfile_name": "round.so", + "compute_cost": 10, + "kernel_name": "round", + "partial_flag": true, + "attr": [ + + ], + "inputs": [ + { + "index": 0, + "dtype": [ + "float16", "float16", "float16", "float", "float", "float" + ], + "format": [ + "DefaultFormat", "NC1HWC0", "FracZ", "DefaultFormat", "NC1HWC0", "FracZ" + ], + "name": "x", + "need_compile": false, + "param_type": "required", + "shape": "all" + } + ], + "outputs": [ + { + "index": 0, + "dtype": [ + "float16", "float16", "float16", "float", "float", "float" + ], + "format": [ + "DefaultFormat", "NC1HWC0", "FracZ", "DefaultFormat", "NC1HWC0", "FracZ" + ], + "name": "y", + "need_compile": false, + "param_type": "required", + "shape": "all" + } + ] +}""") +def _round_tbe(): + """Round TBE register""" + return From 1b4041a8f1fc2a8e438a04a58d84ba3e092ac5e3 Mon Sep 17 00:00:00 2001 From: zhaoting Date: Fri, 3 Apr 2020 11:45:49 +0800 Subject: [PATCH 084/367] add weight decay in RMSProp optimizer --- mindspore/nn/optim/rmsprop.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/mindspore/nn/optim/rmsprop.py b/mindspore/nn/optim/rmsprop.py index 3000fdeeee..faaeacfaa8 100644 --- a/mindspore/nn/optim/rmsprop.py +++ b/mindspore/nn/optim/rmsprop.py @@ -18,7 +18,8 @@ from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter from mindspore._checkparam import ParamValidator as validator import mindspore.common.dtype as mstype -from .optimizer import Optimizer, grad_scale +from mindspore.common import Tensor +from .optimizer import Optimizer, grad_scale, apply_decay rmsprop_opt = C.MultitypeFuncGraph("rmsprop_opt") centered_rmsprop_opt = C.MultitypeFuncGraph("rmsprop_opt") @@ -118,6 +119,9 @@ class RMSProp(Optimizer): use_locking (bool): Enable a lock to protect the update of variable and accumlation tensors. Default: False. centered (bool): If True, gradients are normalized by the estimated variance of the gradient. Default: False loss_scale (float): A floating point value for the loss scale. Default: 1.0. + weight_decay (float): Weight decay (L2 penalty). Default: 0.0. + decay_filter (Function): A function to determine whether to apply weight decay on parameters. Default: + lambda x: 'beta' not in x.name and 'gamma' not in x.name. Inputs: - **gradients** (tuple[Tensor]) - The gradients of `params`, the shape is the same as `params`. @@ -132,7 +136,8 @@ class RMSProp(Optimizer): >>> model = Model(net, loss, opt) """ def __init__(self, params, learning_rate=0.1, decay=0.9, momentum=0.0, epsilon=1e-10, - use_locking=False, centered=False, loss_scale=1.0): + use_locking=False, centered=False, loss_scale=1.0, weight_decay=0.0, + decay_filter=lambda x: 'beta' not in x.name and 'gamma' not in x.name): super(RMSProp, self).__init__(learning_rate, params) if isinstance(momentum, float) and momentum < 0.0: @@ -159,6 +164,7 @@ class RMSProp(Optimizer): self.assignadd = P.AssignAdd() self.global_step = Parameter(initializer(0, [1], mstype.int32), name="global_step") self.axis = 0 + self.one = Tensor(1, mstype.int32) self.momentum = momentum @@ -167,10 +173,14 @@ class RMSProp(Optimizer): self.hyper_map = C.HyperMap() self.decay = decay + self.decay_tf = tuple(decay_filter(x) for x in self.parameters) self.reciprocal_scale = 1.0 / loss_scale + self.weight_decay = weight_decay * loss_scale def construct(self, gradients): params = self.parameters + if self.weight_decay > 0: + gradients = self.hyper_map(F.partial(apply_decay, self.weight_decay), self.decay_tf, params, gradients) if self.reciprocal_scale != 1.0: gradients = self.hyper_map(F.partial(grad_scale, self.reciprocal_scale), gradients) if self.dynamic_lr: From da123c5b3e8a5217030ffbf4e03c232d667d7547 Mon Sep 17 00:00:00 2001 From: wanghua Date: Fri, 3 Apr 2020 14:55:25 +0800 Subject: [PATCH 085/367] fix bert precison bug --- .../device/ascend/kernel_select_ascend.cc | 20 ++++++++++++------- mindspore/ccsrc/utils/utils.h | 3 +++ mindspore/ops/_op_impl/tbe/gelu.py | 8 ++++---- .../models/bert/bert_tdt_no_lossscale.py | 9 ++++----- 4 files changed, 24 insertions(+), 16 deletions(-) diff --git a/mindspore/ccsrc/device/ascend/kernel_select_ascend.cc b/mindspore/ccsrc/device/ascend/kernel_select_ascend.cc index 347d63be39..a7c8d131fb 100644 --- a/mindspore/ccsrc/device/ascend/kernel_select_ascend.cc +++ b/mindspore/ccsrc/device/ascend/kernel_select_ascend.cc @@ -35,6 +35,7 @@ enum MatchCountPriority : int { MATCH_COUNT_PRIORITY_BEGIN = 0, MATCH_DTYPE_COUNT = MATCH_COUNT_PRIORITY_BEGIN, MATCH_FORMAT_COUNT, + MATCH_SPECIAL_FORMAT_COUNT, MATCH_5D_FORMAT_COUNT, MATCH_OUTPUT_DTYPE_COUNT, MATCH_COUNT_PRIORITY_END @@ -81,6 +82,12 @@ bool IsValidKernelInfo(const std::shared_ptr &kernel_node, const kernel:: } return true; }; + if (AnfAlgo::GetCNodeName(kernel_node) == "LayerNormBetaGammaBackprop" || + AnfAlgo::GetCNodeName(kernel_node) == "LayerNormXBackprop") { + if (AnfAlgo::GetPrevNodeOutputFormat(kernel_node, 0) != kernel_build_info.GetInputFormat(0)) { + return true; + } + } if (AnfAlgo::GetCNodeName(kernel_node) == prim::kPrimCast->name()) { return AnfAlgo::GetOutputInferDataType(kernel_node, 0) == kernel_build_info.GetOutputDeviceType(0) && AnfAlgo::GetPrevNodeOutputInferDataType(kernel_node, 0) == kernel_build_info.GetInputDeviceType(0); @@ -154,7 +161,7 @@ bool PriorityChooseItem(const std::vector &cur_item, std::vector *best return false; } } - return false; + return true; } void UpdateCurMatchCounts(const kernel::KernelBuildInfo &kernel_build_info, const std::shared_ptr &kernel_node, @@ -174,12 +181,11 @@ void UpdateCurMatchCounts(const kernel::KernelBuildInfo &kernel_build_info, cons continue; } } - if (input_anf_node->isa()) { - if (AnfAlgo::GetOutputDeviceDataType(input_anf_node, 0) == kTypeUnknown) { - continue; - } - } if (kernel_build_info.GetInputFormat(input_index) == AnfAlgo::GetPrevNodeOutputFormat(kernel_node, input_index)) { + if (AnfAlgo::IsFeatureMapInput(kernel_node, input_index) && + kSpecialFormatSet.find(kernel_build_info.GetInputFormat(input_index)) != kSpecialFormatSet.end()) { + (*cur_kernelinfo_match_counts)[MATCH_SPECIAL_FORMAT_COUNT]++; + } (*cur_kernelinfo_match_counts)[MATCH_FORMAT_COUNT]++; } if (kernel_build_info.GetInputDeviceType(input_index) == @@ -203,7 +209,7 @@ void UpdateCurMatchCounts(const kernel::KernelBuildInfo &kernel_build_info, cons (*cur_kernelinfo_match_counts)[MATCH_OUTPUT_DTYPE_COUNT]++; } } -} +} // namespace void SetTensorDeviceInfo(const kernel::KernelBuildInfo &selected_kernel_info, const CNodePtr &kernel_node) { MS_EXCEPTION_IF_NULL(kernel_node); diff --git a/mindspore/ccsrc/utils/utils.h b/mindspore/ccsrc/utils/utils.h index ee1eeaddfc..ea5e969e52 100644 --- a/mindspore/ccsrc/utils/utils.h +++ b/mindspore/ccsrc/utils/utils.h @@ -195,6 +195,9 @@ const std::set kOptOperatorSet = { kApplyRMSPropOpName, }; +const std::set kSpecialFormatSet = {kOpFormat_FRAC_Z, kOpFormat_NC1KHKWHWC0, kOpFormat_NC1HWC0, + kOpFormat_FRAC_NZ, kOpFormat_C1HWNCoC0}; + static inline void ChangeFileMode(const std::string& file_name, mode_t mode) { if (access(file_name.c_str(), F_OK) != 0) { MS_LOG(DEBUG) << "File `" << file_name << "` does not exist."; diff --git a/mindspore/ops/_op_impl/tbe/gelu.py b/mindspore/ops/_op_impl/tbe/gelu.py index 8093312547..171d97c043 100644 --- a/mindspore/ops/_op_impl/tbe/gelu.py +++ b/mindspore/ops/_op_impl/tbe/gelu.py @@ -32,10 +32,10 @@ from mindspore.ops.op_info_register import op_info_register { "index": 0, "dtype": [ - "float16","float","float16","float16","float16","float16","float","float","float","float" + "float16","float","float16","float","float16","float16","float16","float16","float","float","float","float" ], "format": [ - "FracZ","FracZ","DefaultFormat","NC1HWC0","DefaultFormat","DefaultFormat","DefaultFormat","NC1HWC0","DefaultFormat","DefaultFormat" + "FRACTAL_NZ","FRACTAL_NZ","FracZ","FracZ","DefaultFormat","NC1HWC0","DefaultFormat","DefaultFormat","DefaultFormat","NC1HWC0","DefaultFormat","DefaultFormat" ], "name": "x", "need_compile": false, @@ -47,10 +47,10 @@ from mindspore.ops.op_info_register import op_info_register { "index": 0, "dtype": [ - "float16","float","float16","float16","float16","float16","float","float","float","float" + "float16","float","float16","float","float16","float16","float16","float16","float","float","float","float" ], "format": [ - "FracZ","FracZ","DefaultFormat","NC1HWC0","DefaultFormat","DefaultFormat","DefaultFormat","NC1HWC0","DefaultFormat","DefaultFormat" + "FRACTAL_NZ","FRACTAL_NZ","FracZ","FracZ","DefaultFormat","NC1HWC0","DefaultFormat","DefaultFormat","DefaultFormat","NC1HWC0","DefaultFormat","DefaultFormat" ], "name": "y", "need_compile": true, diff --git a/tests/st/networks/models/bert/bert_tdt_no_lossscale.py b/tests/st/networks/models/bert/bert_tdt_no_lossscale.py index 7c50707fbd..6f3ffc7dad 100644 --- a/tests/st/networks/models/bert/bert_tdt_no_lossscale.py +++ b/tests/st/networks/models/bert/bert_tdt_no_lossscale.py @@ -153,8 +153,7 @@ def test_bert_tdt(): batch_size = int(os.getenv('BATCH_SIZE', '16')) config = get_config(version=version, batch_size=batch_size) netwithloss = BertNetworkWithLoss(config, True) - optimizer = Lamb(netwithloss.trainable_params(), decay_steps=10000, start_learning_rate=1e-4, - end_learning_rate=0.0, power=10.0, warmup_steps=0, decay_filter=lambda x: False) + optimizer = Momentum(netwithloss.trainable_params(), learning_rate=2e-5, momentum=0.9) netwithgrads = BertTrainOneStepCell(netwithloss, optimizer=optimizer) netwithgrads.set_train(True) model = Model(netwithgrads) @@ -178,10 +177,10 @@ def test_bert_tdt(): param.default_input = weight_variable(value.asnumpy().shape) model.train(ds.get_repeat_count(), ds, callbacks=parallel_callback, dataset_sink_mode=False) loss_value = np.array(parallel_callback.loss_list) - expect_out = [12.191790, 11.739655, 11.523477, 11.320723, 11.113152, 11.203759, 10.841681, 10.826849, - 10.616718, 10.486609] + expect_out = [12.19179, 11.965041, 11.969687, 11.97815, 11.969171, 12.603289, 12.165594, + 12.824818, 12.38842, 12.604046] logger.info("expected loss value output: {}".format(expect_out)) - assert allclose(loss_value, expect_out, 0.001, 0.001) + assert allclose(loss_value, expect_out, 0.00001, 0.00001) if __name__ == '__main__': test_bert_tdt() From 01822c54d9928a08e0b688bb2493f0fe22276741 Mon Sep 17 00:00:00 2001 From: kingfo Date: Fri, 3 Apr 2020 10:53:46 +0800 Subject: [PATCH 086/367] fix ME+GE compile error --- CMakeLists.txt | 2 +- mindspore/ccsrc/CMakeLists.txt | 2 +- mindspore/ccsrc/pipeline/pipeline_ge.cc | 17 ++++------------- 3 files changed, 6 insertions(+), 15 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index bdac2da46e..d11314408e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -38,7 +38,7 @@ set(MS_CCSRC_BUILD_PATH ${BUILD_PATH}/mindspore/mindspore/ccsrc) if (ENABLE_GE) link_directories(${CMAKE_SOURCE_DIR}/third_party/ge/lib) -else() +elseif(ENABLE_D OR ENABLE_TESTCASES) include(${CMAKE_SOURCE_DIR}/cmake/dependency_graphengine.cmake) endif() diff --git a/mindspore/ccsrc/CMakeLists.txt b/mindspore/ccsrc/CMakeLists.txt index 0b4bb0d1df..ef9b7e951c 100644 --- a/mindspore/ccsrc/CMakeLists.txt +++ b/mindspore/ccsrc/CMakeLists.txt @@ -491,7 +491,7 @@ if (NOT ENABLE_GE) add_dependencies(add_ge_lib add_ms_lib) add_dependencies(add_ge_lib graph) add_dependencies(add_ge_lib ge_runtime) - else() + elseif(ENABLE_TESTCASES) add_custom_target(add_ge_lib ALL COMMAND cp ${MS_CCSRC_BUILD_PATH}/../../graphengine/src/common/graph/libgraph.so ${MS_LIB_PATH} COMMAND cp ${CMAKE_SOURCE_DIR}/graphengine/third_party/prebuild/${CMAKE_HOST_SYSTEM_PROCESSOR}/libslog.so ${MS_LIB_PATH} diff --git a/mindspore/ccsrc/pipeline/pipeline_ge.cc b/mindspore/ccsrc/pipeline/pipeline_ge.cc index 4617884ca0..5a2a8039dd 100644 --- a/mindspore/ccsrc/pipeline/pipeline_ge.cc +++ b/mindspore/ccsrc/pipeline/pipeline_ge.cc @@ -429,20 +429,11 @@ std::shared_ptr DoExecGraph(const FuncGraphPtr& graph, const std::ve std::shared_ptr ret = nullptr; -#ifdef ENABLE_GE - AnfNodePtr root = graph->get_return(); - MS_EXCEPTION_IF_NULL(root); - AbstractBasePtr output = root->abstract(); + AnfNodePtr output_node = graph->get_return()->input(1); + MS_EXCEPTION_IF_NULL(output_node); size_t count = 0; - py::object oj = StructureOutput(output, outputs, &count); + py::object oj = StructureOutput(output_node, outputs, &count); ret = std::make_shared(oj); -#else - if (outputs.size() == 1) { - ret = std::make_shared(outputs[0]); - } else { - ret = std::make_shared(outputs); - } -#endif return ret; } @@ -495,7 +486,7 @@ py::object ExecDFGraph(const std::map& info, const FuncGraphPtr anf_graph = info.at(phase)->func_graph; -#if (!defined ENABLE_GE) || (defined ENABLE_INFER) +#ifdef ENABLE_INFER // Now don't use the graph because the exec ge function don't take effect MS_EXCEPTION_IF_NULL(info.at(phase)->func_graph); if (ENABLE_TRAIN != info.at(phase)->func_graph->flags()["training"]) { From dd0d4e6b84bb80908b920421b5877562c7cf3d39 Mon Sep 17 00:00:00 2001 From: yangzhenzhang <285824651@qq.com> Date: Thu, 2 Apr 2020 15:18:34 +0800 Subject: [PATCH 087/367] add parallel ops for expand dims --- mindspore/ccsrc/parallel/dynamic_creator.h | 1 + .../parallel/ops_info/activation_info.cc | 163 ++++++++++++++++++ .../ccsrc/parallel/ops_info/activation_info.h | 20 +++ mindspore/ccsrc/parallel/ops_info/ops_utils.h | 4 + .../ccsrc/parallel/step_auto_parallel.cc | 1 + tests/ut/python/parallel/test_expand_dims.py | 110 ++++++++++++ 6 files changed, 299 insertions(+) create mode 100644 tests/ut/python/parallel/test_expand_dims.py diff --git a/mindspore/ccsrc/parallel/dynamic_creator.h b/mindspore/ccsrc/parallel/dynamic_creator.h index e6e1b41d76..1270116f50 100644 --- a/mindspore/ccsrc/parallel/dynamic_creator.h +++ b/mindspore/ccsrc/parallel/dynamic_creator.h @@ -125,6 +125,7 @@ REGISTER(SqrtInfo); REGISTER(GetNextInfo); REGISTER(NegInfo); REGISTER(BatchMatMulInfo); +REGISTER(ExpandDimsInfo); } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/ops_info/activation_info.cc b/mindspore/ccsrc/parallel/ops_info/activation_info.cc index 13155ee4f1..9ba3624b01 100644 --- a/mindspore/ccsrc/parallel/ops_info/activation_info.cc +++ b/mindspore/ccsrc/parallel/ops_info/activation_info.cc @@ -381,5 +381,168 @@ Status CastInfo::InferMirrorOps() { return SUCCESS; } + +Status ExpandDimsInfo::GetAttrs() { + if (input_value_.size() != EXPANDDIMS_INPUT_SIZE) { + MS_LOG(ERROR) << name_ << ": Invalid inputs size " << input_value_.size(); + return FAILED; + } + + if (!input_value_.back()->isa()) { + MS_LOG(ERROR) << name_ << ": The type of axis is not int"; + return FAILED; + } + + int32_t axis = GetValue(input_value_.back()); + + if (inputs_shape_.empty()) { + MS_LOG(ERROR) << name_ << ": The inputs shape is empty"; + return FAILED; + } + + int32_t dim = SizeToInt(inputs_shape_[0].size()); + if ((axis > dim) || (axis < -dim - 1)) { + MS_LOG(ERROR) << name_ << ": The axis(" << axis << ") is out of range[" << -dim - 1 << ", " << dim << "]"; + return FAILED; + } + + if (axis < 0) { + positive_axis_ = dim + axis + 1; + } else { + positive_axis_ = axis; + } + MS_LOG(INFO) << name_ << ": The axis is " << axis << ", and the positive axis is " << positive_axis_; + return SUCCESS; +} + +Status ExpandDimsInfo::InferTensorMap() { + if (inputs_shape_.empty()) { + MS_LOG(ERROR) << name_ << ": The inputs shape is empty"; + return FAILED; + } + + // for example: if the dimension of input is 3, and the axis is 2, + // then the input_tensor_map is [2, 1, 0], the output_tensor_map is [2, 1, -1, 0] + std::vector input_tensor_map, output_tensor_map; + size_t size = inputs_shape_[0].size(); + for (size_t i = 0; i < size; ++i) { + input_tensor_map.push_back(SizeToInt(size - i - 1)); + } + + inputs_tensor_map_.push_back(input_tensor_map); + + output_tensor_map = input_tensor_map; + if ((positive_axis_ < 0) || (positive_axis_ > SizeToInt(size))) { + MS_LOG(ERROR) << name_ << ": Invalid positive axis " << positive_axis_; + return FAILED; + } + (void)output_tensor_map.insert(output_tensor_map.begin() + positive_axis_, NO_SPLIT_MAP); + outputs_tensor_map_.push_back(output_tensor_map); + + MS_LOG(INFO) << name_ << ": The tensor map of input is " << ShapeToString(input_tensor_map) + << ", and the tensor map of output is " << ShapeToString(output_tensor_map); + return SUCCESS; +} + +Status ExpandDimsInfo::InferTensorStrategy() { + if (strategy_ == nullptr) { + MS_LOG(ERROR) << name_ << ": The strategy is null"; + return FAILED; + } + + inputs_strategy_ = strategy_->GetInputDim(); + if (inputs_strategy_.empty()) { + MS_LOG(ERROR) << name_ << ": The strategy is empty"; + return FAILED; + } + + Shape output_strategy = inputs_strategy_[0]; + if ((positive_axis_ < 0) || (positive_axis_ > SizeToInt(output_strategy.size()))) { + MS_LOG(ERROR) << name_ << ": Invalid positive axis " << positive_axis_; + return FAILED; + } + (void)output_strategy.insert(output_strategy.begin() + positive_axis_, NO_SPLIT_STRATEGY); + outputs_strategy_ = {output_strategy}; + return SUCCESS; +} + +Status ExpandDimsInfo::InferTensorInfo() { + if (inputs_shape_.empty() || outputs_shape_.empty()) { + MS_LOG(ERROR) << name_ << ": The shape of inputs or outputs is empty"; + return FAILED; + } + + if (inputs_tensor_map_.empty() || outputs_tensor_map_.empty()) { + MS_LOG(ERROR) << name_ << ": The tensor map of inputs or outputs is empty"; + return FAILED; + } + + Shape input_shape = inputs_shape_[0]; + Shape output_shape = outputs_shape_[0]; + + // infer slice shape + if (InferTensorStrategy() != SUCCESS) { + MS_LOG(ERROR) << name_ << ": Infer tensor strategy failed"; + return FAILED; + } + Shapes inputs_slice_shape, outputs_slice_shape; + if (InferSliceShape(inputs_strategy_, outputs_strategy_, &inputs_slice_shape, &outputs_slice_shape) != SUCCESS) { + MS_LOG(ERROR) << name_ << ": Infer slice shape failed"; + return FAILED; + } + + if (inputs_slice_shape.empty() || outputs_slice_shape.empty()) { + MS_LOG(ERROR) << name_ << ": The slice shape of inputs or outputs is empty"; + return FAILED; + } + + Shape input_slice_shape = inputs_slice_shape[0]; + Shape output_slice_shape = outputs_slice_shape[0]; + + TensorLayout input_tensor_layout, output_tensor_layout; + if (input_tensor_layout.InitFromVector(dev_matrix_shape_, inputs_tensor_map_[0], input_shape) != SUCCESS) { + MS_LOG(ERROR) << name_ << ": Init tensor layout for input failed"; + return FAILED; + } + + if (output_tensor_layout.InitFromVector(dev_matrix_shape_, outputs_tensor_map_[0], output_shape) != SUCCESS) { + MS_LOG(ERROR) << name_ << ": Init tensor layout for output failed"; + return FAILED; + } + + TensorInfo input_tensor_info(input_tensor_layout, input_shape, input_slice_shape); + TensorInfo output_tensor_info(output_tensor_layout, output_shape, output_slice_shape); + + inputs_tensor_info_.push_back(input_tensor_info); + outputs_tensor_info_.push_back(output_tensor_info); + return SUCCESS; +} + +Status ExpandDimsInfo::InferMirrorOps() { + mirror_ops_.clear(); + + if (inputs_tensor_map_.empty()) { + MS_LOG(ERROR) << name_ << ": The tensor map of inputs is empty"; + return FAILED; + } + + std::vector group; + if (CreateGroupByTensorMap(inputs_tensor_map_[0], &group) != SUCCESS) { + MS_LOG(ERROR) << name_ << ": Create group failed"; + return FAILED; + } + + if (group.empty()) { + MS_LOG(INFO) << name_ << ": No need to create mirror ops"; + return SUCCESS; + } + + OperatorVector mirror_op, placeholder_op; + mirror_op = CreateMirrorOps(group[0].name(), group[0].GetDevNum()); + mirror_ops_.push_back(mirror_op); + mirror_ops_.push_back(placeholder_op); + MS_LOG(INFO) << name_ << ": Create mirror ops success, the group name is " << group[0].name(); + return SUCCESS; +} } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/ops_info/activation_info.h b/mindspore/ccsrc/parallel/ops_info/activation_info.h index 3cadad6b80..183b593e23 100644 --- a/mindspore/ccsrc/parallel/ops_info/activation_info.h +++ b/mindspore/ccsrc/parallel/ops_info/activation_info.h @@ -174,6 +174,26 @@ class NegInfo : public ActivationOther { : ActivationOther(name, inputs_shape, outputs_shape, attrs) {} ~NegInfo() override = default; }; + +class ExpandDimsInfo : public ActivationOther { + public: + ExpandDimsInfo(const std::string& name, const Shapes& inputs_shape, const Shapes& outputs_shape, + const PrimitiveAttrs& attrs) + : ActivationOther(name, inputs_shape, outputs_shape, attrs) {} + ~ExpandDimsInfo() override = default; + + protected: + Status GetAttrs() override; + Status InferTensorMap() override; + Status InferTensorInfo() override; + Status InferMirrorOps() override; + Status InferTensorStrategy(); + + private: + int32_t positive_axis_ = -1; + Strategys inputs_strategy_; + Strategys outputs_strategy_; +}; } // namespace parallel } // namespace mindspore #endif // MINDSPORE_CCSRC_OPTIMIZER_OPS_INFO_PARALLEL_ACTIVATION_INFO_H_ diff --git a/mindspore/ccsrc/parallel/ops_info/ops_utils.h b/mindspore/ccsrc/parallel/ops_info/ops_utils.h index 4062847d73..fe2a5d2c86 100644 --- a/mindspore/ccsrc/parallel/ops_info/ops_utils.h +++ b/mindspore/ccsrc/parallel/ops_info/ops_utils.h @@ -24,6 +24,8 @@ constexpr size_t PRELU_OUTPUTS_SIZE = 1; constexpr size_t PRELU_SECOND_INPUT_SIZE = 1; constexpr int32_t PRELU_CHANNEL_INDEX = 1; constexpr int32_t PRELU_CHANNEL_STRATEGY = 1; +constexpr int32_t NO_SPLIT_MAP = -1; +constexpr int32_t NO_SPLIT_STRATEGY = 1; constexpr size_t MATMUL_ATTRS_SIZE = 2; constexpr size_t MATMUL_INPUTS_SIZE = 2; constexpr size_t MATMUL_OUTPUTS_SIZE = 1; @@ -31,6 +33,7 @@ constexpr size_t ACTIVATION_ATTR_SIZE = 1; constexpr size_t SOFTMAX_ATTR_SIZE = 1; constexpr size_t ACTIVATION_INPUTS_SIZE = 1; constexpr size_t ACTIVATION_OUTPUTS_SIZE = 1; +constexpr size_t EXPANDDIMS_INPUT_SIZE = 2; constexpr size_t SoftmaxCrossEntropyWithLogitsAttrSize = 1; constexpr size_t SoftmaxCrossEntropyWithLogitsInputsSize = 2; constexpr size_t SoftmaxCrossEntropyWithLogitsOutputsSize = 2; @@ -191,6 +194,7 @@ constexpr char GET_NEXT[] = "GetNext"; constexpr char SQUEEZE[] = "Squeeze"; constexpr char Neg[] = "Neg"; constexpr char BATCH_MATMUL[] = "BatchMatMul"; +constexpr char EXPAND_DIMS[] = "ExpandDims"; // Parallel don't care constexpr char TUPLE_GETITEM[] = "tuple_getitem"; diff --git a/mindspore/ccsrc/parallel/step_auto_parallel.cc b/mindspore/ccsrc/parallel/step_auto_parallel.cc index a359905494..50e6a1e84e 100644 --- a/mindspore/ccsrc/parallel/step_auto_parallel.cc +++ b/mindspore/ccsrc/parallel/step_auto_parallel.cc @@ -104,6 +104,7 @@ std::vector splittable_op_ = {MATMUL, CAST, Neg, BATCH_MATMUL, + EXPAND_DIMS, SQUEEZE}; std::vector elementwise_op_ = {ACTIVATION, GELU, TANH, SOFTMAX, LOG_SOFTMAX, RELU, SQRT, diff --git a/tests/ut/python/parallel/test_expand_dims.py b/tests/ut/python/parallel/test_expand_dims.py new file mode 100644 index 0000000000..676e9ed523 --- /dev/null +++ b/tests/ut/python/parallel/test_expand_dims.py @@ -0,0 +1,110 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +import mindspore as ms +from mindspore import context, Tensor, Parameter +from mindspore.nn import Cell, TrainOneStepCell, Momentum +from mindspore.ops import operations as P +from mindspore.common.api import _executor + + +class Net(Cell): + def __init__(self, mul_weight, strategy1=None, strategy2=None, strategy3=None): + super().__init__() + self.mul = P.Mul().set_strategy(strategy1) + self.expand_dims = P.ExpandDims().set_strategy(strategy2) + self.mul2 = P.Mul().set_strategy(strategy3) + self.mul_weight = Parameter(mul_weight, "w1") + + def construct(self, x, b): + out = self.mul(x, self.mul_weight) + out = self.expand_dims(out, -1) + out = self.mul2(out, b) + return out + + +class Net2(Cell): + def __init__(self, mul_weight, strategy1=None, strategy2=None): + super().__init__() + self.expand_dims = P.ExpandDims().set_strategy(strategy1) + self.mul = P.Mul().set_strategy(strategy2) + self.mul_weight = Parameter(mul_weight, "w1") + + def construct(self, x, b): + out = self.expand_dims(self.mul_weight, -1) + out = self.mul(out, b) + return out + + +_x = Tensor(np.ones([128, 64, 32]), dtype=ms.float32) +_w1 = Tensor(np.ones([128, 64, 32]), dtype=ms.float32) +_b = Tensor(np.ones([128, 64, 32, 1]), dtype=ms.float32) + + +def compile(net): + optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) + train_net = TrainOneStepCell(net, optimizer) + _executor.compile(train_net, _x, _b) + context.reset_auto_parallel_context() + + +def test_expand_dims_data_parallel(): + context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) + strategy1 = ((16, 1, 1), (16, 1, 1)) + strategy2 = ((16, 1, 1), ) + strategy3 = ((16, 1, 1, 1), (16, 1, 1, 1)) + net = Net(_w1, strategy1, strategy2, strategy3) + compile(net) + + +def test_expand_dims_model_parallel(): + context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) + strategy1 = ((1, 1, 16), (1, 1, 16)) + strategy2 = ((1, 1, 16), ) + strategy3 = ((1, 1, 16, 1), (1, 1, 16, 1)) + net = Net(_w1, strategy1, strategy2, strategy3) + compile(net) + + +def test_expand_dims_hybrid_parallel(): + context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) + strategy1 = ((2, 2, 4), (2, 2, 4)) + strategy2 = ((2, 2, 4), ) + strategy3 = ((2, 2, 4, 1), (2, 2, 4, 1)) + net = Net(_w1, strategy1, strategy2, strategy3) + compile(net) + + +def test_expand_dims_auto_parallel(): + context.set_auto_parallel_context(parallel_mode="auto_parallel", device_num=16, global_rank=0) + net = Net(_w1) + compile(net) + + +def test_expand_dims_repeat_calc(): + context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) + strategy1 = ((2, 2, 4), (2, 2, 4)) + strategy2 = ((1, 2, 2), ) + strategy3 = ((2, 2, 4, 1), (2, 2, 4, 1)) + net = Net(_w1, strategy1, strategy2, strategy3) + compile(net) + + +def test_expand_dims_parameter(): + context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) + strategy1 = ((1, 2, 2), ) + strategy2 = ((2, 2, 4, 1), (2, 2, 4, 1)) + net = Net2(_w1, strategy1, strategy2) + compile(net) From 0eb72d76f076d5bdeb08093721b9104a40e1ad28 Mon Sep 17 00:00:00 2001 From: guohongzilong <2713219276@qq.com> Date: Wed, 1 Apr 2020 21:08:40 +0800 Subject: [PATCH 088/367] import comment and function of op print --- mindspore/ccsrc/transform/op_adapter.h | 2 +- mindspore/ops/operations/debug_ops.py | 14 +++++++++----- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/mindspore/ccsrc/transform/op_adapter.h b/mindspore/ccsrc/transform/op_adapter.h index 3dd299f83d..524cdfb4aa 100644 --- a/mindspore/ccsrc/transform/op_adapter.h +++ b/mindspore/ccsrc/transform/op_adapter.h @@ -513,7 +513,7 @@ class OpAdapter : public BaseOpAdapter { return; } } else { - MS_LOG(ERROR) << "Update output desc failed, unknow output shape type"; + MS_LOG(WARNING) << "Update output desc failed, unknow output shape type"; return; } MS_EXCEPTION_IF_NULL(node); diff --git a/mindspore/ops/operations/debug_ops.py b/mindspore/ops/operations/debug_ops.py index 6640ef87ca..a69dcc2df1 100644 --- a/mindspore/ops/operations/debug_ops.py +++ b/mindspore/ops/operations/debug_ops.py @@ -14,6 +14,7 @@ # ============================================================================ """debug_ops""" +from ..._checkparam import ParamValidator as validator from ...common import dtype as mstype from ..primitive import Primitive, prim_attr_register, PrimitiveWithInfer @@ -157,19 +158,20 @@ class InsertGradientOf(PrimitiveWithInfer): class Print(PrimitiveWithInfer): """ - Output tensor to stdout. + Output tensor or string to stdout. Inputs: - - **input_x** (Tensor) - The graph node to attach to. + - **input_x** (Union[Tensor, str]) - The graph node to attach to. The input supports + multiple strings and tensors which are separated by ','. Examples: >>> class PrintDemo(nn.Cell): - >>> def __init__(self,): + >>> def __init__(self): >>> super(PrintDemo, self).__init__() >>> self.print = P.Print() >>> - >>> def construct(self, x): - >>> self.print(x) + >>> def construct(self, x, y): + >>> self.print('Print Tensor x and Tensor y:', x, y) >>> return x """ @@ -181,4 +183,6 @@ class Print(PrimitiveWithInfer): return [1] def infer_dtype(self, *inputs): + for dtype in inputs: + validator.check_subclass("input", dtype, (mstype.tensor, mstype.string)) return mstype.int32 From 651d9aae4022e97fe6e647f0b3362116d1571eec Mon Sep 17 00:00:00 2001 From: zjun Date: Wed, 1 Apr 2020 19:41:21 +0800 Subject: [PATCH 089/367] add new mode for operator info register --- mindspore/ccsrc/kernel/oplib/opinfo.h | 9 + mindspore/ccsrc/kernel/oplib/oplib.cc | 61 ++- mindspore/ccsrc/kernel/oplib/oplib.h | 4 +- mindspore/ops/__init__.py | 4 +- .../_op_impl/tbe/adam_apply_one_with_decay.py | 231 ++------- mindspore/ops/op_info_register.py | 441 +++++++++++++++++- 6 files changed, 535 insertions(+), 215 deletions(-) diff --git a/mindspore/ccsrc/kernel/oplib/opinfo.h b/mindspore/ccsrc/kernel/oplib/opinfo.h index 7861da34d9..56abea9269 100644 --- a/mindspore/ccsrc/kernel/oplib/opinfo.h +++ b/mindspore/ccsrc/kernel/oplib/opinfo.h @@ -61,6 +61,7 @@ class OpIOInfo { std::string name() const { return name_; } bool need_compile() const { return need_compile_; } std::string param_type() const { return param_type_; } + std::string reshape_type() const { return reshape_type_; } std::string shape() const { return shape_; } std::vector dtypes() const { return dtypes_; } std::vector formats() const { return formats_; } @@ -69,6 +70,7 @@ class OpIOInfo { void set_name(const std::string& name) { name_ = name; } void set_need_compile(const bool need_compile) { need_compile_ = need_compile; } void set_param_type(const std::string& param_type) { param_type_ = param_type; } + void set_reshape_type(const std::string& reshape_type) { reshape_type_ = reshape_type; } void set_shape(const std::string& shape) { shape_ = shape; } void set_dtypes(const std::vector& dtype) { dtypes_ = dtype; } void set_formats(const std::vector& formats) { formats_ = formats; } @@ -78,6 +80,7 @@ class OpIOInfo { std::string name_; bool need_compile_ = false; std::string param_type_; + std::string reshape_type_; std::string shape_; std::vector dtypes_; std::vector formats_; @@ -96,6 +99,8 @@ class OpInfo { int compute_cost() const { return compute_cost_; } std::string kernel_name() const { return kernel_name_; } bool partial_flag() const { return partial_flag_; } + bool dynamic_format() const { return dynamic_format_; } + std::string op_pattern() const { return op_pattern_; } std::vector> attrs_ptr() const { return attrs_ptr_; } std::vector> inputs_ptr() const { return inputs_ptr_; } std::vector> outputs_ptr() const { return outputs_ptr_; } @@ -110,6 +115,8 @@ class OpInfo { void set_compute_cost(const int compute_cost) { compute_cost_ = compute_cost; } void set_kernel_name(const std::string& kernel_name) { kernel_name_ = kernel_name; } void set_partial_flag(const bool partial_flag) { partial_flag_ = partial_flag; } + void set_dynamic_format(const bool dynamic_format) { dynamic_format_ = dynamic_format; } + void set_op_pattern(const std::string op_pattern) { op_pattern_ = op_pattern; } void add_attrs_ptr(const std::shared_ptr& attr) { attrs_ptr_.push_back(attr); } void add_inputs_ptr(const std::shared_ptr& input) { inputs_ptr_.push_back(input); } void add_outputs_ptr(const std::shared_ptr& output) { outputs_ptr_.push_back(output); } @@ -129,6 +136,8 @@ class OpInfo { int compute_cost_ = 0; std::string kernel_name_; bool partial_flag_ = false; + bool dynamic_format_ = false; + std::string op_pattern_; std::vector> attrs_ptr_; std::vector> inputs_ptr_; std::vector> outputs_ptr_; diff --git a/mindspore/ccsrc/kernel/oplib/oplib.cc b/mindspore/ccsrc/kernel/oplib/oplib.cc index b20bd741f1..4059b8e246 100644 --- a/mindspore/ccsrc/kernel/oplib/oplib.cc +++ b/mindspore/ccsrc/kernel/oplib/oplib.cc @@ -26,18 +26,22 @@ namespace mindspore { namespace kernel { constexpr auto kImplyType = "imply_type"; constexpr auto kOpName = "op_name"; -constexpr auto kTbe = "TBE"; -constexpr auto kAkg = "akg"; -constexpr auto kAutodiff = "AutoDiff"; constexpr auto kFusionType = "fusion_type"; constexpr auto kAsyncFlag = "async_flag"; constexpr auto kBinfileName = "binfile_name"; constexpr auto kComputeCost = "compute_cost"; constexpr auto kKernelName = "kernel_name"; constexpr auto kPartialFlag = "partial_flag"; +constexpr auto kReshapeType = "reshape_type"; +constexpr auto kOpPattern = "op_pattern"; +constexpr auto kDynamicFormat = "dynamic_format"; +constexpr auto kDtypeFormat = "dtype_format"; constexpr auto kAttr = "attr"; constexpr auto kIputs = "inputs"; constexpr auto kOutputs = "outputs"; +constexpr auto kTbe = "TBE"; +constexpr auto kAkg = "akg"; +constexpr auto kAutodiff = "AutoDiff"; constexpr auto kName = "name"; constexpr auto kParamType = "param_type"; constexpr auto kDtype = "dtype"; @@ -89,8 +93,8 @@ bool OpLib::DecodeOpInfo(const nlohmann::json& obj, const mindspore::kernel::OpI std::shared_ptr op_info = std::make_shared(); MS_EXCEPTION_IF_NULL(op_info); op_info->set_op_name(obj.at(kOpName)); - op_info->set_imply_type(imply_type); op_info->set_impl_path(impl_path); + op_info->set_imply_type(imply_type); op_info->set_fusion_type(obj.at(kFusionType)); if (imply_type == kTBE) { op_info->set_async_flag(obj.at(kAsyncFlag)); @@ -98,6 +102,12 @@ bool OpLib::DecodeOpInfo(const nlohmann::json& obj, const mindspore::kernel::OpI op_info->set_compute_cost(obj.at(kComputeCost)); op_info->set_kernel_name(obj.at(kKernelName)); op_info->set_partial_flag(obj.at(kPartialFlag)); + if (obj.find(kOpPattern) != obj.end()) { + op_info->set_op_pattern(obj.at(kOpPattern)); + } + if (obj.find(kDynamicFormat) != obj.end()) { + op_info->set_dynamic_format(obj.at(kDynamicFormat)); + } } auto attrs = obj.at(kAttr); for (const auto& attr : attrs) { @@ -106,16 +116,20 @@ bool OpLib::DecodeOpInfo(const nlohmann::json& obj, const mindspore::kernel::OpI return false; } } + nlohmann::json dtype_format; + if (obj.find(kDtypeFormat) != obj.end()) { + dtype_format = obj.at(kDtypeFormat); + } auto inputs = obj.at(kIputs); for (const auto& input : inputs) { - if (!DecodeInputOutput(input, imply_type, kInput, op_info)) { + if (!DecodeInputOutput(input, imply_type, kInput, op_info, dtype_format)) { MS_LOG(DEBUG) << "DecodeInputOutput Failed"; return false; } } auto outputs = obj.at(kOutputs); for (const auto& output : outputs) { - if (!DecodeInputOutput(output, imply_type, kOutput, op_info)) { + if (!DecodeInputOutput(output, imply_type, kOutput, op_info, dtype_format)) { MS_LOG(DEBUG) << "DecodeInputOutput Failed"; return false; } @@ -156,16 +170,42 @@ bool OpLib::DecodeAttr(const nlohmann::json& obj, const OpImplyType imply_type, return ret; } +bool OpLib::DecodeDtypeFormat(const nlohmann::json& dtype_format, const std::shared_ptr& op_io, + size_t index) { + bool ret = true; + try { + std::vector dtype; + std::vector format; + for (const auto& it : dtype_format) { + dtype.emplace_back(it[index][0]); + format.emplace_back(it[index][1]); + } + op_io->set_dtypes(dtype); + op_io->set_formats(format); + } catch (const std::exception& e) { + MS_LOG(ERROR) << "DecodeDtypeFormat falied" << e.what(); + ret = false; + } + return ret; +} + bool OpLib::DecodeInputOutput(const nlohmann::json& obj, const OpImplyType imply_type, const OpIOType io_type, - const std::shared_ptr& op_info) { + const std::shared_ptr& op_info, const nlohmann::json& dtype_format) { bool ret = true; try { std::shared_ptr op_io = std::make_shared(); MS_EXCEPTION_IF_NULL(op_io); op_io->set_index(obj.at(kIndex)); op_io->set_name(obj.at(kName)); - op_io->set_dtypes(obj.at(kDtype)); - op_io->set_formats(obj.at(kFormat)); + if (!dtype_format.empty()) { + if (!DecodeDtypeFormat(dtype_format, op_io, op_info->inputs_ptr().size() + op_info->outputs_ptr().size())) { + MS_LOG(ERROR) << "Decode dtype format failed"; + return false; + } + } else { + op_io->set_dtypes(obj.at(kDtype)); + op_io->set_formats(obj.at(kFormat)); + } if (op_io->dtypes().size() != op_io->formats().size()) { MS_LOG(DEBUG) << "op" << op_io->name() << "dtype size:" << op_io->dtypes() << "is not equal to format size:" << op_io->formats(); @@ -181,6 +221,9 @@ bool OpLib::DecodeInputOutput(const nlohmann::json& obj, const OpImplyType imply if (obj.find(kShape) != obj.end()) { op_io->set_shape(obj.at(kShape)); } + if (obj.find(kReshapeType) != obj.end()) { + op_io->set_reshape_type(obj.at(kReshapeType)); + } } if (io_type == kInput) { diff --git a/mindspore/ccsrc/kernel/oplib/oplib.h b/mindspore/ccsrc/kernel/oplib/oplib.h index 37c3fdcfc7..a4c5e04bb1 100644 --- a/mindspore/ccsrc/kernel/oplib/oplib.h +++ b/mindspore/ccsrc/kernel/oplib/oplib.h @@ -38,8 +38,10 @@ class OpLib { static bool DecodeOpInfo(const nlohmann::json& obj, const OpImplyType imply_type, const std::string& impl_path); static bool DecodeAttr(const nlohmann::json& obj, const OpImplyType imply_type, const std::shared_ptr& op_info); + static bool DecodeDtypeFormat(const nlohmann::json& dtype_format, const std::shared_ptr& op_io, + size_t index); static bool DecodeInputOutput(const nlohmann::json& obj, const OpImplyType imply_type, const OpIOType io_type, - const std::shared_ptr& op_info); + const std::shared_ptr& op_info, const nlohmann::json& dtype_format); static bool GetRefInfo(const std::shared_ptr& op_info); static bool CheckRepetition(const std::shared_ptr& op_info); }; diff --git a/mindspore/ops/__init__.py b/mindspore/ops/__init__.py index 23109b386a..6f4f680672 100644 --- a/mindspore/ops/__init__.py +++ b/mindspore/ops/__init__.py @@ -30,7 +30,7 @@ Note: from .primitive import Primitive, PrimitiveWithInfer, prim_attr_register from .vm_impl_registry import get_vm_impl_fn, vm_impl_registry -from .op_info_register import op_info_register +from .op_info_register import op_info_register, TBERegOp, DataType from .primitive import constexpr from .._c_expression import signature_rw, signature_kind @@ -40,6 +40,6 @@ __primitive__ = [ ] __all__ = ["get_vm_impl_fn", "vm_impl_registry", - "op_info_register", + "op_info_register", "TBERegOp", "DataType", "constexpr"] __all__.extend(__primitive__) diff --git a/mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay.py b/mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay.py index d1c43ca957..a8911e81bd 100644 --- a/mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay.py +++ b/mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay.py @@ -14,208 +14,41 @@ # ============================================================================ """AdamApplyOneWithDecay op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +adam_apply_one_with_decay_op_info = TBERegOp("AdamApplyOneWithDecay") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("adam_apply_one_with_decay.so") \ + .compute_cost(10) \ + .kernel_name("adam_apply_one_with_decay") \ + .partial_flag(True) \ + .input(0, "input0", False, "required", "all") \ + .input(1, "input1", False, "required", "all") \ + .input(2, "input2", False, "required", "all") \ + .input(3, "input3", False, "required", "all") \ + .input(4, "input4", False, "required", "all") \ + .input(5, "mul0_x", False, "required", "all") \ + .input(6, "mul1_x", False, "required", "all") \ + .input(7, "mul2_x", False, "required", "all") \ + .input(8, "mul3_x", False, "required", "all") \ + .input(9, "mul4_x", False, "required", "all") \ + .input(10, "add2_y", False, "required", "all") \ + .output(0, "output0", False, "required", "all") \ + .output(1, "output1", False, "required", "all") \ + .output(2, "output2", False, "required", "all") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, + DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, + DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, + DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, + DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, + DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, + DataType.F32_Default, DataType.F32_Default) \ + .get_op_info() -@op_info_register("""{ - "op_name": "AdamApplyOneWithDecay", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "adam_apply_one_with_decay.so", - "compute_cost": 10, - "kernel_name": "adam_apply_one_with_decay", - "partial_flag": true, - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16", "float" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "input0", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float16", "float" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "input1", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 2, - "dtype": [ - "float16", "float" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "input2", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 3, - "dtype": [ - "float16", "float" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "input3", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 4, - "dtype": [ - "float16", "float" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "input4", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 5, - "dtype": [ - "float16", "float" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "mul0_x", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 6, - "dtype": [ - "float16", "float" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "mul1_x", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 7, - "dtype": [ - "float16", "float" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "mul2_x", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 8, - "dtype": [ - "float16", "float" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "mul3_x", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 9, - "dtype": [ - "float16", "float" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "mul4_x", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 10, - "dtype": [ - "float16", "float" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "add2_y", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16", "float" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "output0", - "need_compile": true, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float16", "float" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "output1", - "need_compile": true, - "param_type": "required", - "shape": "all" - }, - { - "index": 2, - "dtype": [ - "float16", "float" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "output2", - "need_compile": true, - "param_type": "required", - "shape": "all" - } - ] -}""") +@op_info_register(adam_apply_one_with_decay_op_info) def _adam_apply_one_with_decay_tbe(): """AdamApplyOneWithDecay TBE register""" return diff --git a/mindspore/ops/op_info_register.py b/mindspore/ops/op_info_register.py index 80f40ff1d6..6a42099c89 100644 --- a/mindspore/ops/op_info_register.py +++ b/mindspore/ops/op_info_register.py @@ -16,6 +16,7 @@ """Operators info register.""" import os +import json import inspect from mindspore._c_expression import Oplib from mindspore._checkparam import ParamValidator as validator @@ -32,21 +33,453 @@ def op_info_register(op_info): 'op_info' must be a str of json format represent the op info, the op info will be added into oplib. Args: - op_info (str): op info of json format. + op_info (str or dict): op info of json format. Returns: Function, returns a decorator for op info register. """ def register_decorator(func): - validator.check_type("op_info", op_info, [str]) + if isinstance(op_info, dict): + op_info_real = json.dumps(op_info) + else: + op_info_real = op_info + validator.check_type("op_info", op_info_real, [str]) op_lib = Oplib() file_path = os.path.realpath(inspect.getfile(func)) # keep the path custom ops implementation. imply_path = "" if BUILT_IN_OPS_REGISTER_PATH in file_path else file_path - if not op_lib.reg_op(op_info, imply_path): - raise ValueError('Invalid op info {}:\n{}\n'.format(file_path, op_info)) + if not op_lib.reg_op(op_info_real, imply_path): + raise ValueError('Invalid op info {}:\n{}\n'.format(file_path, op_info_real)) def wrapped_function(*args, **kwargs): return func(*args, **kwargs) return wrapped_function return register_decorator + + +class RegOp(): + """ + Base class for op info register. + + Args: + op_name (str): Name of op. + inputs (list): Inputs inoformation of the op. + outputs (list): Outputs information of the op. + attr_ (list): Attribute information of the op. + dtype_format_ (list): Dtype and format information of the op. + """ + + def __init__(self, op_name=""): + if not isinstance(op_name, str): + raise ValueError("op name value must be string") + if not op_name.strip(): + raise ValueError("op name is empty") + self.op_name = op_name + self.inputs = [] + self.outputs = [] + self.attr_ = [] + self.dtype_format_ = [] + + def is_string(self, value): + """ + Check if the value is a str type. + + Args: + value: Parameter to to check. + + Raises: + TypeError: If the type of value is not a str. + """ + if not isinstance(value, str): + raise TypeError("%s value must be str" % str(value)) + + def is_int(self, value): + """ + Check if the value is a int. + + Args: + value: Parameter to to check. + + Raises: + TypeError: If the type of value is not a int. + """ + if not isinstance(value, int): + raise TypeError("%s value must be int" % str(value)) + + def is_bool(self, value): + """ + Check if the value is a bool. + + Args: + value: Parameter to to check. + + Raises: + TypeError: If the type of value is not a bool. + """ + if not isinstance(value, bool): + raise TypeError("%s value must be bool" % str(value)) + + def dtype_format(self, *args): + """ + Register dtype and format. + + Args: + args (tuple): Value of dtype and format. + + Raises: + ValueError: If the size of args not equal to input size add output size. + TypeError: If the type of args is not tuple. + """ + if len(self.inputs) + len(self.outputs) != len(args): + raise ValueError("input size add output size must be equal to detype format size") + dtype_format = [] + for arg in args: + if not isinstance(arg, tuple) or len(arg) != 2: + raise ValueError("dtype and format value must be tuple of two elements") + self.is_string(arg[0]) + self.is_string(arg[1]) + dtype_format.append(arg) + self.dtype_format_.append(tuple(dtype_format)) + return self + + def get_op_info(self): + """ + Return all registration information for this instance. + + The '_' character ending the key is removed here for compatibility with previous version. + + Key will be unified into an underlined form later. + """ + op_info = {} + for key, value in self.__dict__.items(): + if isinstance(key, str) and key.endswith('_'): + op_info[key.rstrip('_')] = value + else: + op_info[key] = value + return op_info + + +class TBERegOp(RegOp): + """Class for TBE op info register.""" + + def __init__(self, op_name=""): + super(TBERegOp, self).__init__(op_name) + self.imply_type = "TBE" + self.fusion_type_ = '' + self.async_flag_ = False + self.binfile_name_ = '' + self.compute_cost_ = 10 + self.kernel_name_ = '' + self.partial_flag_ = False + self.reshape_type_ = '' + self.dynamic_format_ = False + self.op_pattern_ = "" + + def fusion_type(self, fusion_type): + """ + Register fusion type. + + Args: + fusion_type (str): Value of fusion type. + """ + self.is_string(fusion_type) + self.fusion_type_ = fusion_type + return self + + def async_flag(self, async_flag): + """ + Register async flag. + + Args: + async_flag (bool): Value of async flag. + """ + self.is_bool(async_flag) + self.async_flag_ = async_flag + return self + + def binfile_name(self, binfile_name): + """ + Register binfile name. + + Args: + binfile_name (str): Name of op binfile. + """ + self.is_string(binfile_name) + self.binfile_name_ = binfile_name + return self + + def compute_cost(self, compute_cost): + """ + Register compute cost. + + Args: + compute_cost (int): Value of compute cost. + """ + self.is_int(compute_cost) + self.compute_cost_ = compute_cost + return self + + def kernel_name(self, kernel_name): + """ + Register kernel name. + + Args: + kernel_name (str): Name of op kernel. + """ + self.is_string(kernel_name) + self.kernel_name_ = kernel_name + return self + + def partial_flag(self, partial_flag): + """ + Register partial flag. + + Args: + partial_flag (bool): Value of partial flag. + """ + self.is_bool(partial_flag) + self.partial_flag_ = partial_flag + return self + + def reshape_type(self, reshape_type): + """ + Register reshape type. + + Args: + reshape_type (str): Value of reshape type. + """ + self.is_string(reshape_type) + self.reshape_type_ = reshape_type + return self + + def dynamic_format(self, dynamic_format): + """ + Register dynamic format. + + Args: + reshape_type (bool): Value of dynamic format. + """ + self.is_bool(dynamic_format) + self.dynamic_format_ = dynamic_format + return self + + def op_pattern(self, pattern=None): + """ + Register op pattern information. + + Args: + pattern (str): Value of op pattern. + """ + if pattern is not None and self.istring(pattern): + self.op_pattern_ = pattern + return self + + def attr(self, name=None, param_type=None, value_type=None, value=None, default_value=None, **kwargs): + """ + Register op attribute information. + + Args: + name (str): Name of the attribute. Default: None. + param_type (str): Param type of the attribute. Default: None. + type (str): Type of the attribute. Default: None. + value (str): Value of the attribute. Default: None. + default_value (str): Default value of attribute. Default: None. + kwargs (dict): Other information for the attribute. + """ + param_list = [name, param_type, value_type, value, default_value] + attr_dict = {} + for index, element in enumerate(param_list): + if element is not None: + self.is_string(element) + if index == 0: + attr_dict["name"] = element + elif index == 1: + attr_dict["param_type"] = element + elif index == 2: + attr_dict["type"] = element + elif index == 3: + attr_dict["value"] = element + elif index == 4: + attr_dict["default_value"] = element + if kwargs: + attr_dict = dict(attr_dict, **kwargs) + self.attr_.append(attr_dict) + return self + + def input(self, index=None, name=None, need_compile=None, param_type=None, shape=None, **kwargs): + """ + Register op input information. + + Args: + index (int): Order of the input. Default: None. + name (str): Name of the input. Default: None. + need_compile (bool): The input need compile whether or not. Default: None. + param_type (str): Type of the input. Default: None. + shape (str): Shape of the input. Default: None. + kwargs (dict): Other information for the input. + """ + param_list = [index, name, need_compile, param_type, shape] + input_dict = {} + for idx, element in enumerate(param_list): + if element is not None: + if idx == 0: + self.is_int(element) + input_dict["index"] = element + elif idx == 1: + self.is_string(element) + input_dict["name"] = element + elif idx == 2: + self.is_bool(element) + input_dict["need_compile"] = element + elif idx == 3: + self.is_string(element) + input_dict["param_type"] = element + elif idx == 4: + self.is_string(element) + input_dict["shape"] = element + if kwargs: + input_dict = dict(input_dict, **kwargs) + self.inputs.append(input_dict) + return self + + def output(self, index=None, name=None, need_compile=None, param_type=None, shape=None, **kwargs): + """ + Register op output information. + + Args: + index (int): Order of the output. Default: None. + name (str): Name of the output. Default: None. + need_compile (bool): The output need compile whether or not. Default: None. + param_type (str): Type of the output. Default: None. + shape (str): Shape of the output. Default: None. + kwargs (dict): Other information for the output. + """ + param_list = [index, name, need_compile, param_type, shape] + output_dict = {} + for idx, element in enumerate(param_list): + if element is not None: + if idx == 0: + self.is_int(element) + output_dict["index"] = element + elif idx == 1: + self.is_string(element) + output_dict["name"] = element + elif idx == 2: + self.is_bool(element) + output_dict["need_compile"] = element + elif idx == 3: + self.is_string(element) + output_dict["param_type"] = element + elif idx == 4: + self.is_string(element) + output_dict["shape"] = element + if kwargs: + output_dict = dict(output_dict, **kwargs) + self.outputs.append(output_dict) + return self + +class DataType(): + """ + Various combinations of dtype and formatself. + + The current list below maybe not completed. If necessary, please add it. + """ + + BOOL_None = ("bool", "") + BOOL_Default = ("bool", "DefaultFormat") + BOOL_5HD = ("bool", "NC1HWC0") + BOOL_NCHW = ("bool", "NCHW") + BOOL_NHWC = ("bool", "NHWC") + BOOL_HWCN = ("bool", "HWCN") + + I8_None = ("int8", "") + I8_Default = ("int8", "DefaultFormat") + I8_5HD = ("int8", "NC1HWC0") + I8_FracZ = ("int8", "Fracz") + I8_FracNZ = ("int8", "FRACTAL_NZ") + I8_NCHW = ("int8", "NCHW") + I8_NHWC = ("int8", "NHWC") + I8_HWCN = ("int8", "HWCN") + + U8_None = ("uint8", "") + U8_Default = ("uint8", "DefaultFormat") + U8_5HD = ("uint8", "NC1HWC0") + U8_FracZ = ("uint8", "Fracz") + U8_FracNZ = ("uint8", "FRACTAL_NZ") + U8_NCHW = ("uint8", "NCHW") + U8_NHWC = ("uint8", "NHWC") + U8_HWCN = ("uint8", "HWCN") + + I16_None = ("int16", "") + I16_Default = ("int16", "DefaultFormat") + I16_5HD = ("int16", "NC1HWC0") + I16_FracZ = ("int16", "Fracz") + I16_FracNZ = ("int16", "FRACTAL_NZ") + I16_NCHW = ("int16", "NCHW") + I16_NHWC = ("int16", "NHWC") + I16_HWCN = ("int16", "HWCN") + + U16_None = ("uint16", "") + U16_Default = ("uint16", "DefaultFormat") + U16_5HD = ("uint16", "NC1HWC0") + U16_FracZ = ("uint16", "Fracz") + U16_FracNZ = ("uint16", "FRACTAL_NZ") + U16_NCHW = ("uint16", "NCHW") + U16_NHWC = ("uint16", "NHWC") + U16_HWCN = ("uint16", "HWCN") + + I32_None = ("int32", "") + I32_Default = ("int32", "DefaultFormat") + I32_5HD = ("int32", "NC1HWC0") + I32_FracZ = ("int32", "Fracz") + I32_FracNZ = ("int32", "FRACTAL_NZ") + I32_NCHW = ("int32", "NCHW") + I32_NHWC = ("int32", "NHWC") + I32_HWCN = ("int32", "HWCN") + + U32_None = ("uint32", "") + U32_Default = ("uint32", "DefaultFormat") + U32_5HD = ("uint32", "NC1HWC0") + U32_FracZ = ("uint32", "Fracz") + U32_FracNZ = ("uint32", "FRACTAL_NZ") + U32_NCHW = ("uint32", "NCHW") + U32_NHWC = ("uint32", "NHWC") + U32_HWCN = ("uint32", "HWCN") + + I64_None = ("int64", "") + I64_Default = ("int64", "DefaultFormat") + I64_5HD = ("int64", "NC1HWC0") + I64_FracZ = ("int64", "Fracz") + I64_FracNZ = ("int64", "FRACTAL_NZ") + I64_NCHW = ("int64", "NCHW") + I64_NHWC = ("int64", "NHWC") + I64_HWCN = ("int64", "HWCN") + + U64_None = ("uint64", "") + U64_Default = ("uint64", "DefaultFormat") + U64_5HD = ("uint64", "NC1HWC0") + U64_FracZ = ("uint64", "Fracz") + U64_FracNZ = ("uint64", "FRACTAL_NZ") + U64_NCHW = ("uint64", "NCHW") + U64_NHWC = ("uint64", "NHWC") + U64_HWCN = ("uint64", "HWCN") + + F16_None = ("float16", "") + F16_Default = ("float16", "DefaultFormat") + F16_5HD = ("float16", "NC1HWC0") + F16_FracZ = ("float16", "Fracz") + F16_FracNZ = ("float16", "FRACTAL_NZ") + F16_C1HWNCoC0 = ("float16", "C1HWNCoC0") + F16_NCHW = ("float16", "NCHW") + F16_NHWC = ("float16", "NHWC") + F16_HWCN = ("float16", "HWCN") + + F32_None = ("float32", "") + F32_Default = ("float32", "DefaultFormat") + F32_5HD = ("float32", "NC1HWC0") + F32_FracZ = ("float32", "Fracz") + F32_FracNZ = ("float32", "FRACTAL_NZ") + F32_C1HWNCoC0 = ("float32", "C1HWNCoC0") + F32_NCHW = ("float32", "NCHW") + F32_NHWC = ("float32", "NHWC") + F32_HWCN = ("float32", "HWCN") From 144a636b514733b9abca3d4ede90e77ff7cceaa0 Mon Sep 17 00:00:00 2001 From: zhongligeng Date: Fri, 3 Apr 2020 10:45:03 +0800 Subject: [PATCH 090/367] resolve some issue in nn comments --- mindspore/nn/layer/basic.py | 23 +++++++++------------ mindspore/nn/layer/container.py | 5 ++--- mindspore/nn/layer/conv.py | 7 ++----- mindspore/nn/layer/embedding.py | 2 +- mindspore/nn/layer/lstm.py | 6 +++--- mindspore/nn/layer/normalization.py | 4 ++-- mindspore/nn/layer/pooling.py | 16 +++++++-------- mindspore/nn/loss/loss.py | 31 ++++++++++++++--------------- mindspore/nn/metrics/accuracy.py | 17 ++++++++-------- mindspore/nn/metrics/error.py | 8 ++++---- mindspore/nn/metrics/fbeta.py | 11 +++++----- mindspore/nn/metrics/loss.py | 3 +-- mindspore/nn/metrics/precision.py | 18 ++++++++--------- mindspore/nn/metrics/recall.py | 18 ++++++++--------- mindspore/nn/metrics/topk.py | 15 +++++++------- mindspore/nn/optim/adam.py | 6 +++--- mindspore/nn/optim/ftrl.py | 2 +- mindspore/nn/optim/lamb.py | 2 +- mindspore/nn/optim/lars.py | 4 ++-- mindspore/nn/optim/momentum.py | 2 +- mindspore/nn/optim/rmsprop.py | 2 +- mindspore/nn/optim/sgd.py | 2 +- mindspore/nn/wrap/cell_wrapper.py | 29 +++------------------------ mindspore/nn/wrap/grad_reducer.py | 23 +++++++++++++++------ mindspore/nn/wrap/loss_scale.py | 22 ++++++++++---------- 25 files changed, 128 insertions(+), 150 deletions(-) diff --git a/mindspore/nn/layer/basic.py b/mindspore/nn/layer/basic.py index 9dc0d5e623..30b94c738d 100644 --- a/mindspore/nn/layer/basic.py +++ b/mindspore/nn/layer/basic.py @@ -65,7 +65,7 @@ class Dropout(Cell): Tensor, output tensor with the same shape as the input. Examples: - >>> x = mindspore.Tensor(np.ones([20, 16, 50]), mindspore.float32) + >>> x = Tensor(np.ones([20, 16, 50]), mindspore.float32) >>> net = nn.Dropout(keep_prob=0.8) >>> net(x) """ @@ -111,7 +111,7 @@ class Flatten(Cell): Examples: >>> net = nn.Flatten() - >>> input = mindspore.Tensor(np.array([[[1.2, 1.2], [2.1, 2.1]], [[2.2, 2.2], [3.2, 3.2]]]), mindspore.float32) + >>> input = Tensor(np.array([[[1.2, 1.2], [2.1, 2.1]], [[2.2, 2.2], [3.2, 3.2]]]), mindspore.float32) >>> input.shape() (2, 2, 2) >>> net(input) @@ -149,9 +149,6 @@ class Dense(Cell): has_bias (bool): Specifies whether the layer uses a bias vector. Default: True. activation (str): Regularizer function applied to the output of the layer, eg. 'relu'. Default: None. - Returns: - Tensor, output tensor. - Raises: ValueError: If weight_init or bias_init shape is incorrect. @@ -163,7 +160,7 @@ class Dense(Cell): Examples: >>> net = nn.Dense(3, 4) - >>> input = mindspore.Tensor(np.random.randint(0, 255, [2, 3]), mindspore.float32) + >>> input = Tensor(np.random.randint(0, 255, [2, 3]), mindspore.float32) >>> net(input) [[ 2.5246444 2.2738023 0.5711005 -3.9399147 ] [ 1.0739875 4.0155234 0.94188046 -5.459526 ]] @@ -243,8 +240,8 @@ class ClipByNorm(Cell): Examples: >>> net = nn.ClipByNorm() - >>> input = mindspore.Tensor(np.random.randint(0, 10, [4, 16]), mindspore.float32) - >>> clip_norm = mindspore.Tensor(np.array([100]).astype(np.float32)) + >>> input = Tensor(np.random.randint(0, 10, [4, 16]), mindspore.float32) + >>> clip_norm = Tensor(np.array([100]).astype(np.float32)) >>> net(input, clip_norm) """ @@ -290,9 +287,6 @@ class Norm(Cell): keep_dims (bool): If True, the axis indicated in `axis` are kept with size 1. Otherwise, the dimensions in `axis` are removed from the output shape. Default: False. - Returns: - Tensor, a Tensor of the same type as input, containing the vector or matrix norms. - Inputs: - **input** (Tensor) - Tensor which is not empty. @@ -302,7 +296,7 @@ class Norm(Cell): Examples: >>> net = nn.Norm(axis=0) - >>> input = mindspore.Tensor(np.random.randint(0, 10, [4, 16]), mindspore.float32) + >>> input = Tensor(np.random.randint(0, 10, [4, 16]), mindspore.float32) >>> net(input) """ def __init__(self, axis=(), keep_dims=False): @@ -344,7 +338,8 @@ class OneHot(Cell): when indices[j] = i. Default: 1.0. off_value (float): A scalar defining the value to fill in output[i][j] when indices[j] != i. Default: 0.0. - dtype (:class:`mindspore.dtype`): Default: mindspore.float32. + dtype (:class:`mindspore.dtype`): Data type of 'on_value' and 'off_value', not the + data type of indices. Default: mindspore.float32. Inputs: - **indices** (Tensor) - A tensor of indices of data type mindspore.int32 and arbitrary shape. @@ -355,7 +350,7 @@ class OneHot(Cell): Examples: >>> net = nn.OneHot(depth=4, axis=1) - >>> indices = mindspore.Tensor([[1, 3], [0, 2]], dtype=mindspore.int32) + >>> indices = Tensor([[1, 3], [0, 2]], dtype=mindspore.int32) >>> net(indices) [[[0. 0.] [1. 0.] diff --git a/mindspore/nn/layer/container.py b/mindspore/nn/layer/container.py index 76c72ce421..709b3ef8ef 100644 --- a/mindspore/nn/layer/container.py +++ b/mindspore/nn/layer/container.py @@ -86,7 +86,7 @@ class SequentialCell(Cell): >>> relu = nn.ReLU() >>> seq = nn.SequentialCell([conv, bn, relu]) >>> - >>> x = mindspore.Tensor(np.random.random((1, 3, 4, 4)), dtype=mindspore.float32) + >>> x = Tensor(np.random.random((1, 3, 4, 4)), dtype=mindspore.float32) >>> seq(x) [[[[0.02531557 0. ] [0.04933941 0.04880078]] @@ -138,7 +138,6 @@ class SequentialCell(Cell): return len(self._cells) def construct(self, input_data): - """Processes the input with the defined sequence of Cells.""" for cell in self.cell_list: input_data = cell(input_data) return input_data @@ -161,7 +160,7 @@ class CellList(_CellListBase, Cell): >>> cell_ls = nn.CellList([bn]) >>> cell_ls.insert(0, conv) >>> cell_ls.append(relu) - >>> x = mindspore.Tensor(np.random.random((1, 3, 4, 4)), dtype=mindspore.float32) + >>> x = Tensor(np.random.random((1, 3, 4, 4)), dtype=mindspore.float32) >>> # not same as nn.SequentialCell, `cell_ls(x)` is not correct >>> cell_ls CellList< (0): Conv2d diff --git a/mindspore/nn/layer/conv.py b/mindspore/nn/layer/conv.py index dfbf96e150..eb73a9ce5a 100644 --- a/mindspore/nn/layer/conv.py +++ b/mindspore/nn/layer/conv.py @@ -146,9 +146,6 @@ class Conv2d(_Conv): Initializer and string are the same as 'weight_init'. Refer to the values of Initializer for more details. Default: 'zeros'. - Returns: - Tensor, output tensor. - Inputs: - **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`. @@ -157,7 +154,7 @@ class Conv2d(_Conv): Examples: >>> net = nn.Conv2d(120, 240, 4, has_bias=False, weight_init='normal') - >>> input = mindspore.Tensor(np.ones([1, 120, 1024, 640]), mindspore.float32) + >>> input = Tensor(np.ones([1, 120, 1024, 640]), mindspore.float32) >>> net(input).shape() (1, 240, 1024, 640) """ @@ -277,7 +274,7 @@ class Conv2dTranspose(_Conv): Examples: >>> net = nn.Conv2dTranspose(3, 64, 4, has_bias=False, weight_init='normal') - >>> input = Tensor(np.ones([1, 3, 16, 50]), mstype.float32) + >>> input = Tensor(np.ones([1, 3, 16, 50]), mindspore.float32) >>> net(input) """ def __init__(self, diff --git a/mindspore/nn/layer/embedding.py b/mindspore/nn/layer/embedding.py index 9579f35226..dfa8e66469 100755 --- a/mindspore/nn/layer/embedding.py +++ b/mindspore/nn/layer/embedding.py @@ -50,7 +50,7 @@ class Embedding(Cell): Examples: >>> net = nn.Embedding(20000, 768, True) - >>> input_data = mindspore.Tensor(np.ones([8, 128]), mindspore.int32) + >>> input_data = Tensor(np.ones([8, 128]), mindspore.int32) >>> >>> # Maps the input word IDs to word embedding. >>> output = net(input_data) diff --git a/mindspore/nn/layer/lstm.py b/mindspore/nn/layer/lstm.py index 317f754f67..cef926d365 100755 --- a/mindspore/nn/layer/lstm.py +++ b/mindspore/nn/layer/lstm.py @@ -96,9 +96,9 @@ class LSTM(Cell): >>> return self.lstm(inp, (h0, c0)) >>> >>> net = LstmNet(10, 12, 2, has_bias=True, batch_first=True, bidirectional=False) - >>> input = mindspore.Tensor(np.ones([3, 5, 10]).astype(np.float32)) - >>> h0 = mindspore.Tensor(np.ones([1 * 2, 3, 12]).astype(np.float32)) - >>> c0 = mindspore.Tensor(np.ones([1 * 2, 3, 12]).astype(np.float32)) + >>> input = Tensor(np.ones([3, 5, 10]).astype(np.float32)) + >>> h0 = Tensor(np.ones([1 * 2, 3, 12]).astype(np.float32)) + >>> c0 = Tensor(np.ones([1 * 2, 3, 12]).astype(np.float32)) >>> output, (hn, cn) = net(input, h0, c0) """ def __init__(self, diff --git a/mindspore/nn/layer/normalization.py b/mindspore/nn/layer/normalization.py index d43c1c8ab4..646db0db8c 100644 --- a/mindspore/nn/layer/normalization.py +++ b/mindspore/nn/layer/normalization.py @@ -159,7 +159,7 @@ class BatchNorm1d(_BatchNorm): Examples: >>> net = nn.BatchNorm1d(num_features=16) - >>> input = mindspore.Tensor(np.random.randint(0, 255, [3, 16]), mindspore.float32) + >>> input = Tensor(np.random.randint(0, 255, [3, 16]), mindspore.float32) >>> net(input) """ def _check_data_dim(self, x): @@ -258,7 +258,7 @@ class LayerNorm(Cell): Examples: >>> x = Tensor(np.ones([20, 5, 10, 10], np.float32)) >>> shape1 = x.shape()[1:] - >>> m = LayerNorm(shape1, begin_norm_axis=1, begin_params_axis=1) + >>> m = nn.LayerNorm(shape1, begin_norm_axis=1, begin_params_axis=1) >>> m(x) """ def __init__(self, diff --git a/mindspore/nn/layer/pooling.py b/mindspore/nn/layer/pooling.py index 0a4bd1662b..6ff28dd362 100644 --- a/mindspore/nn/layer/pooling.py +++ b/mindspore/nn/layer/pooling.py @@ -63,8 +63,8 @@ class MaxPool2d(_PoolNd): pad_mode for training only supports "same" and "valid". Args: - kernel_size (int): Size of the window to take a max over. - stride (int): Stride size of the window. Default: None. + kernel_size (int): Size of the window to take a max over. Default 1. + stride (int): Stride size of the window. Default: 1. pad_mode (str): Select the mode of the pad. The optional values are "same" and "valid". Default: "valid". @@ -75,7 +75,7 @@ class MaxPool2d(_PoolNd): - valid: Adopts the way of discarding. The possibly largest height and width of output will be return without padding. Extra pixels will be discarded. - padding (int): Now is not supported, mplicit zero padding to be added on both sides. Default: 0. + padding (int): Implicit zero padding to be added on both sides. Default: 0. Inputs: - **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`. @@ -85,7 +85,7 @@ class MaxPool2d(_PoolNd): Examples: >>> pool = MaxPool2d(kernel_size=3, stride=1) - >>> x = mindspore.Tensor(np.random.randint(0, 10, [1, 2, 4, 4]), mindspore.float32) + >>> x = Tensor(np.random.randint(0, 10, [1, 2, 4, 4]), mindspore.float32) [[[[1. 5. 5. 1.] [0. 3. 4. 8.] [4. 2. 7. 6.] @@ -149,8 +149,8 @@ class AvgPool2d(_PoolNd): pad_mode for training only supports "same" and "valid". Args: - kernel_size (int): Size of the window to take a max over. - stride (int): Stride size of the window. Default: None. + kernel_size (int): Size of the window to take a max over. Default: 1. + stride (int): Stride size of the window. Default: 1. pad_mode (str): Select the mode of the pad. The optional values are "same", "valid". Default: "valid". @@ -161,7 +161,7 @@ class AvgPool2d(_PoolNd): - valid: Adopts the way of discarding. The possibly largest height and width of output will be return without padding. Extra pixels will be discarded. - padding (int): Now is not supported, implicit zero padding to be added on both sides. Default: 0. + padding (int): Implicit zero padding to be added on both sides. Default: 0. Inputs: - **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`. @@ -171,7 +171,7 @@ class AvgPool2d(_PoolNd): Examples: >>> pool = AvgPool2d(kernel_size=3, stride=1) - >>> x = mindspore.Tensor(np.random.randint(0, 10, [1, 2, 4, 4]), mindspore.float32) + >>> x = Tensor(np.random.randint(0, 10, [1, 2, 4, 4]), mindspore.float32) [[[[5. 5. 9. 9.] [8. 4. 3. 0.] [2. 7. 1. 2.] diff --git a/mindspore/nn/loss/loss.py b/mindspore/nn/loss/loss.py index 340cbe73d8..806456e561 100644 --- a/mindspore/nn/loss/loss.py +++ b/mindspore/nn/loss/loss.py @@ -86,9 +86,9 @@ class L1Loss(_Loss): Tensor, loss float tensor. Examples: - >>> loss = L1Loss() - >>> input_data = Tensor(np.array([1, 2, 3]), mstype.float32) - >>> target_data = Tensor(np.array([1, 2, 2]), mstype.float32) + >>> loss = nn.L1Loss() + >>> input_data = Tensor(np.array([1, 2, 3]), mindspore.float32) + >>> target_data = Tensor(np.array([1, 2, 2]), mindspore.float32) >>> loss(input_data, target_data) """ def __init__(self, reduction='mean'): @@ -126,9 +126,9 @@ class MSELoss(_Loss): Tensor, weighted loss float tensor. Examples: - >>> loss = MSELoss() - >>> input_data = Tensor(np.array([1, 2, 3]), mstype.float32) - >>> target_data = Tensor(np.array([1, 2, 2]), mstype.float32) + >>> loss = nn.MSELoss() + >>> input_data = Tensor(np.array([1, 2, 3]), mindspore.float32) + >>> target_data = Tensor(np.array([1, 2, 2]), mindspore.float32) >>> loss(input_data, target_data) """ def construct(self, base, target): @@ -171,9 +171,9 @@ class SmoothL1Loss(_Loss): Tensor, loss float tensor. Examples: - >>> loss = SmoothL1Loss() - >>> input_data = Tensor(np.array([1, 2, 3]), mstype.float32) - >>> target_data = Tensor(np.array([1, 2, 2]), mstype.float32) + >>> loss = nn.SmoothL1Loss() + >>> input_data = Tensor(np.array([1, 2, 3]), mindspore.float32) + >>> target_data = Tensor(np.array([1, 2, 2]), mindspore.float32) >>> loss(input_data, target_data) """ def __init__(self, sigma=1.0): @@ -219,17 +219,16 @@ class SoftmaxCrossEntropyWithLogits(_Loss): Inputs: - **logits** (Tensor) - Tensor of shape :math:`(x_1, x_2, ..., x_R)`. - **labels** (Tensor) - Tensor of shape :math:`(y_1, y_2, ..., y_S)`. If `sparse` is True, The type of - `labels` is mstype.int32. If `sparse` is False, the type of `labels` is same as the type of `logits`. + `labels` is mindspore.int32. If `sparse` is False, the type of `labels` is same as the type of `logits`. Outputs: Tensor, a tensor of the same shape as logits with the component-wise logistic losses. Examples: - >>> loss = SoftmaxCrossEntropyWithLogits(sparse=True) - >>> logits = Tensor(np.random.randint(0, 9, [1, 10]), mstype.float32) - >>> labels_np = np.zeros([1, 10]).astype(np.int32) - >>> labels_np[0][0] = 1 + >>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True) + >>> logits = Tensor(np.random.randint(0, 9, [1, 10]), mindspore.float32) + >>> labels_np = np.ones([1,]).astype(np.int32) >>> labels = Tensor(labels_np) >>> loss(logits, labels) """ @@ -286,8 +285,8 @@ class SoftmaxCrossEntropyExpand(Cell): Examples: >>> loss = SoftmaxCrossEntropyExpand(sparse=True) - >>> input_data = Tensor(np.ones([64, 512]), dtype=mstype.float32) - >>> label = Tensor(np.ones([64]), dtype=mstype.int32) + >>> input_data = Tensor(np.ones([64, 512]), dtype=mindspore.float32) + >>> label = Tensor(np.ones([64]), dtype=mindspore.int32) >>> loss(input_data, label) """ def __init__(self, sparse=False): diff --git a/mindspore/nn/metrics/accuracy.py b/mindspore/nn/metrics/accuracy.py index 5a11fa9d08..f131432ddd 100644 --- a/mindspore/nn/metrics/accuracy.py +++ b/mindspore/nn/metrics/accuracy.py @@ -35,8 +35,8 @@ class Accuracy(EvaluationBase): Default: 'classification'. Examples: - >>> x = mindspore.Tensor(np.array([[0.2, 0.5], [0.3, 0.1], [0.9, 0.6]]), mindspore.float32) - >>> y = mindspore.Tensor(np.array([1, 0, 1]), mindspore.float32) + >>> x = Tensor(np.array([[0.2, 0.5], [0.3, 0.1], [0.9, 0.6]]), mindspore.float32) + >>> y = Tensor(np.array([1, 0, 1]), mindspore.float32) >>> metric = nn.Accuracy('classification') >>> metric.clear() >>> metric.update(x, y) @@ -58,13 +58,14 @@ class Accuracy(EvaluationBase): Args: inputs: Input `y_pred` and `y`. `y_pred` and `y` are a `Tensor`, a list or an array. - `y_pred` is in most cases (not strictly) a list of floating numbers in range :math:`[0, 1]` + For 'classification' evaluation type, `y_pred` is in most cases (not strictly) a list + of floating numbers in range :math:`[0, 1]` and the shape is :math:`(N, C)`, where :math:`N` is the number of cases and :math:`C` - is the number of categories. For 'multilabel' evaluation type, `y_pred` can only be one-hot - encoding with values 0 or 1. Indices with 1 indicate positive category. `y` contains values - of integers. The shape is :math:`(N, C)` if one-hot encoding is used. One-hot encoding - should be used when 'eval_type' is 'multilabel'. Shape can also be :math:`(N, 1)` if category - index is used in 'classification' evaluation type. + is the number of categories. Shape of `y` can be :math:`(N, C)` with values 0 and 1 if one-hot + encoding is used or the shape is :math:`(N,)` with integer values if index of category is used. + For 'multilabel' evaluation type, `y_pred` and `y` can only be one-hot encoding with + values 0 or 1. Indices with 1 indicate positive category. The shape of `y_pred` and `y` + are both :math:`(N, C)`. Raises: ValueError: If the number of the input is not 2. diff --git a/mindspore/nn/metrics/error.py b/mindspore/nn/metrics/error.py index 5dbd83645b..c803000192 100644 --- a/mindspore/nn/metrics/error.py +++ b/mindspore/nn/metrics/error.py @@ -33,8 +33,8 @@ class MAE(Metric): The method `update` must be called with the form `update(y_pred, y)`. Examples: - >>> x = mindspore.Tensor(np.array([0.1, 0.2, 0.6, 0.9]), mindspore.float32) - >>> y = mindspore.Tensor(np.array([0.1, 0.25, 0.7, 0.9]), mindspore.float32) + >>> x = Tensor(np.array([0.1, 0.2, 0.6, 0.9]), mindspore.float32) + >>> y = Tensor(np.array([0.1, 0.25, 0.7, 0.9]), mindspore.float32) >>> error = nn.MAE() >>> error.clear() >>> error.update(x, y) @@ -95,8 +95,8 @@ class MSE(Metric): where :math:`n` is batch size. Examples: - >>> x = mindspore.Tensor(np.array([0.1, 0.2, 0.6, 0.9]), mindspore.float32) - >>> y = mindspore.Tensor(np.array([0.1, 0.25, 0.5, 0.9]), mindspore.float32) + >>> x = Tensor(np.array([0.1, 0.2, 0.6, 0.9]), mindspore.float32) + >>> y = Tensor(np.array([0.1, 0.25, 0.5, 0.9]), mindspore.float32) >>> error = MSE() >>> error.clear() >>> error.update(x, y) diff --git a/mindspore/nn/metrics/fbeta.py b/mindspore/nn/metrics/fbeta.py index 6771b6ba36..68df4318b0 100755 --- a/mindspore/nn/metrics/fbeta.py +++ b/mindspore/nn/metrics/fbeta.py @@ -33,12 +33,11 @@ class Fbeta(Metric): beta (float): The weight of precision. Examples: - >>> x = mindspore.Tensor(np.array([[0.2, 0.5], [0.3, 0.1], [0.9, 0.6]])) - >>> y = mindspore.Tensor(np.array([1, 0, 1])) + >>> x = Tensor(np.array([[0.2, 0.5], [0.3, 0.1], [0.9, 0.6]])) + >>> y = Tensor(np.array([1, 0, 1])) >>> metric = nn.Fbeta(1) >>> metric.update(x, y) >>> fbeta = metric.eval() - [0.66666667 0.66666667] """ def __init__(self, beta): super(Fbeta, self).__init__() @@ -64,7 +63,7 @@ class Fbeta(Metric): `y_pred` is in most cases (not strictly) a list of floating numbers in range :math:`[0, 1]` and the shape is :math:`(N, C)`, where :math:`N` is the number of cases and :math:`C` is the number of categories. y contains values of integers. The shape is :math:`(N, C)` - if one-hot encoding is used. Shape can also be :math:`(N, 1)` if category index is used. + if one-hot encoding is used. Shape can also be :math:`(N,)` if category index is used. """ if len(inputs) != 2: raise ValueError('Fbeta need 2 inputs (y_pred, y), but got {}'.format(len(inputs))) @@ -126,8 +125,8 @@ class F1(Fbeta): F_\beta=\frac{2\cdot true\_positive}{2\cdot true\_positive + false\_negative + false\_positive} Examples: - >>> x = mindspore.Tensor(np.array([[0.2, 0.5], [0.3, 0.1], [0.9, 0.6]])) - >>> y = mindspore.Tensor(np.array([1, 0, 1])) + >>> x = Tensor(np.array([[0.2, 0.5], [0.3, 0.1], [0.9, 0.6]])) + >>> y = Tensor(np.array([1, 0, 1])) >>> metric = nn.F1() >>> metric.update(x, y) >>> fbeta = metric.eval() diff --git a/mindspore/nn/metrics/loss.py b/mindspore/nn/metrics/loss.py index bc4c58ef2f..3828fcdef5 100644 --- a/mindspore/nn/metrics/loss.py +++ b/mindspore/nn/metrics/loss.py @@ -25,12 +25,11 @@ class Loss(Metric): loss = \frac{\sum_{k=1}^{n}loss_k}{n} Examples: - >>> x = mindspore.Tensor(np.array(0.2), mindspore.float32) + >>> x = Tensor(np.array(0.2), mindspore.float32) >>> loss = nn.Loss() >>> loss.clear() >>> loss.update(x) >>> result = loss.eval() - 0.20000000298023224 """ def __init__(self): super(Loss, self).__init__() diff --git a/mindspore/nn/metrics/precision.py b/mindspore/nn/metrics/precision.py index a2c8502002..ad7b6c576f 100644 --- a/mindspore/nn/metrics/precision.py +++ b/mindspore/nn/metrics/precision.py @@ -41,13 +41,12 @@ class Precision(EvaluationBase): multilabel. Default: 'classification'. Examples: - >>> x = mindspore.Tensor(np.array([[0.2, 0.5], [0.3, 0.1], [0.9, 0.6]])) - >>> y = mindspore.Tensor(np.array([1, 0, 1])) + >>> x = Tensor(np.array([[0.2, 0.5], [0.3, 0.1], [0.9, 0.6]])) + >>> y = Tensor(np.array([1, 0, 1])) >>> metric = nn.Precision('classification') >>> metric.clear() >>> metric.update(x, y) >>> precision = metric.eval() - [0.5 1. ] """ def __init__(self, eval_type='classification'): super(Precision, self).__init__(eval_type) @@ -72,13 +71,14 @@ class Precision(EvaluationBase): Args: inputs: Input `y_pred` and `y`. `y_pred` and `y` are Tensor, list or numpy.ndarray. - `y_pred` is in most cases (not strictly) a list of floating numbers in range :math:`[0, 1]` + For 'classification' evaluation type, `y_pred` is in most cases (not strictly) a list + of floating numbers in range :math:`[0, 1]` and the shape is :math:`(N, C)`, where :math:`N` is the number of cases and :math:`C` - is the number of categories. For 'multilabel' evaluation type, `y_pred` can only be one-hot - encoding with values 0 or 1. Indices with 1 indicate positive category. `y` contains values - of integers. The shape is :math:`(N, C)` if one-hot encoding is used. One-hot encoding - should be used when 'eval_type' is 'multilabel'. Shape can also be :math:`(N, 1)` if category - index is used in 'classification' evaluation type. + is the number of categories. Shape of `y` can be :math:`(N, C)` with values 0 and 1 if one-hot + encoding is used or the shape is :math:`(N,)` with integer values if index of category is used. + For 'multilabel' evaluation type, `y_pred` and `y` can only be one-hot encoding with + values 0 or 1. Indices with 1 indicate positive category. The shape of `y_pred` and `y` + are both :math:`(N, C)`. Raises: ValueError: If the number of input is not 2. diff --git a/mindspore/nn/metrics/recall.py b/mindspore/nn/metrics/recall.py index 2ea284ec41..45ebf0d7db 100644 --- a/mindspore/nn/metrics/recall.py +++ b/mindspore/nn/metrics/recall.py @@ -41,13 +41,12 @@ class Recall(EvaluationBase): multilabel. Default: 'classification'. Examples: - >>> x = mindspore.Tensor(np.array([[0.2, 0.5], [0.3, 0.1], [0.9, 0.6]])) - >>> y = mindspore.Tensor(np.array([1, 0, 1])) + >>> x = Tensor(np.array([[0.2, 0.5], [0.3, 0.1], [0.9, 0.6]])) + >>> y = Tensor(np.array([1, 0, 1])) >>> metric = nn.Recall('classification') >>> metric.clear() >>> metric.update(x, y) >>> recall = metric.eval() - [1. 0.5] """ def __init__(self, eval_type='classification'): super(Recall, self).__init__(eval_type) @@ -72,13 +71,14 @@ class Recall(EvaluationBase): Args: inputs: Input `y_pred` and `y`. `y_pred` and `y` are a `Tensor`, a list or an array. - `y_pred` is in most cases (not strictly) a list of floating numbers in range :math:`[0, 1]` + For 'classification' evaluation type, `y_pred` is in most cases (not strictly) a list + of floating numbers in range :math:`[0, 1]` and the shape is :math:`(N, C)`, where :math:`N` is the number of cases and :math:`C` - is the number of categories. For 'multilabel' evaluation type, `y_pred` can only be one-hot - encoding with values 0 or 1. Indices with 1 indicate positive category. `y` contains values - of integers. The shape is :math:`(N, C)` if one-hot encoding is used. One-hot encoding - should be used when 'eval_type' is 'multilabel'. Shape can also be :math:`(N, 1)` if category - index is used in 'classification' evaluation type. + is the number of categories. Shape of `y` can be :math:`(N, C)` with values 0 and 1 if one-hot + encoding is used or the shape is :math:`(N,)` with integer values if index of category is used. + For 'multilabel' evaluation type, `y_pred` and `y` can only be one-hot encoding with + values 0 or 1. Indices with 1 indicate positive category. The shape of `y_pred` and `y` + are both :math:`(N, C)`. Raises: diff --git a/mindspore/nn/metrics/topk.py b/mindspore/nn/metrics/topk.py index 6afa631940..eab08a498d 100644 --- a/mindspore/nn/metrics/topk.py +++ b/mindspore/nn/metrics/topk.py @@ -33,14 +33,13 @@ class TopKCategoricalAccuracy(Metric): ValueError: If `k` is less than 1. Examples: - >>> x = mindspore.Tensor(np.array([[0.2, 0.5, 0.3, 0.6, 0.2], [0.1, 0.35, 0.5, 0.2, 0.], + >>> x = Tensor(np.array([[0.2, 0.5, 0.3, 0.6, 0.2], [0.1, 0.35, 0.5, 0.2, 0.], >>> [0.9, 0.6, 0.2, 0.01, 0.3]]), mindspore.float32) - >>> y = mindspore.Tensor(np.array([2, 0, 1]), mindspore.float32) + >>> y = Tensor(np.array([2, 0, 1]), mindspore.float32) >>> topk = nn.TopKCategoricalAccuracy(3) >>> topk.clear() >>> topk.update(x, y) >>> result = topk.eval() - 0.6666666666666666 """ def __init__(self, k): super(TopKCategoricalAccuracy, self).__init__() @@ -65,7 +64,7 @@ class TopKCategoricalAccuracy(Metric): y_pred is in most cases (not strictly) a list of floating numbers in range :math:`[0, 1]` and the shape is :math:`(N, C)`, where :math:`N` is the number of cases and :math:`C` is the number of categories. y contains values of integers. The shape is :math:`(N, C)` - if one-hot encoding is used. Shape can also be :math:`(N, 1)` if category index is used. + if one-hot encoding is used. Shape can also be :math:`(N,)` if category index is used. """ if len(inputs) != 2: raise ValueError('Topk need 2 inputs (y_pred, y), but got {}'.format(len(inputs))) @@ -98,9 +97,9 @@ class Top1CategoricalAccuracy(TopKCategoricalAccuracy): Refer to class 'TopKCategoricalAccuracy' for more details. Examples: - >>> x = mindspore.Tensor(np.array([[0.2, 0.5, 0.3, 0.6, 0.2], [0.1, 0.35, 0.5, 0.2, 0.], + >>> x = Tensor(np.array([[0.2, 0.5, 0.3, 0.6, 0.2], [0.1, 0.35, 0.5, 0.2, 0.], >>> [0.9, 0.6, 0.2, 0.01, 0.3]]), mindspore.float32) - >>> y = mindspore.Tensor(np.array([2, 0, 1]), mindspore.float32) + >>> y = Tensor(np.array([2, 0, 1]), mindspore.float32) >>> topk = nn.Top1CategoricalAccuracy() >>> topk.clear() >>> topk.update(x, y) @@ -116,9 +115,9 @@ class Top5CategoricalAccuracy(TopKCategoricalAccuracy): Refer to class 'TopKCategoricalAccuracy' for more details. Examples: - >>> x = mindspore.Tensor(np.array([[0.2, 0.5, 0.3, 0.6, 0.2], [0.1, 0.35, 0.5, 0.2, 0.], + >>> x = Tensor(np.array([[0.2, 0.5, 0.3, 0.6, 0.2], [0.1, 0.35, 0.5, 0.2, 0.], >>> [0.9, 0.6, 0.2, 0.01, 0.3]]), mindspore.float32) - >>> y = mindspore.Tensor(np.array([2, 0, 1]), mindspore.float32) + >>> y = Tensor(np.array([2, 0, 1]), mindspore.float32) >>> topk = nn.Top5CategoricalAccuracy() >>> topk.clear() >>> topk.update(x, y) diff --git a/mindspore/nn/optim/adam.py b/mindspore/nn/optim/adam.py index 2c901ae081..86ce2b2147 100755 --- a/mindspore/nn/optim/adam.py +++ b/mindspore/nn/optim/adam.py @@ -161,7 +161,7 @@ class Adam(Optimizer): Examples: >>> net = Net() >>> loss = nn.SoftmaxCrossEntropyWithLogits() - >>> optim = Adam(params=net.trainable_params()) + >>> optim = nn.Adam(params=net.trainable_params()) >>> model = Model(net, loss_fn=loss, optimizer=optim, metrics=None) """ @@ -252,7 +252,7 @@ class AdamWeightDecay(Optimizer): Examples: >>> net = Net() >>> loss = nn.SoftmaxCrossEntropyWithLogits() - >>> optim = AdamWeightDecay(params=net.trainable_params()) + >>> optim = nn.AdamWeightDecay(params=net.trainable_params()) >>> model = Model(net, loss_fn=loss, optimizer=optim, metrics=None) """ def __init__(self, params, learning_rate=1e-3, beta1=0.9, beta2=0.999, eps=1e-6, weight_decay=0.0): @@ -306,7 +306,7 @@ class AdamWeightDecayDynamicLR(Optimizer): Examples: >>> net = Net() >>> loss = nn.SoftmaxCrossEntropyWithLogits() - >>> optim = AdamWeightDecayDynamicLR(params=net.trainable_params(), decay_steps=10) + >>> optim = nn.AdamWeightDecayDynamicLR(params=net.trainable_params(), decay_steps=10) >>> model = Model(net, loss_fn=loss, optimizer=optim, metrics=None) """ def __init__(self, diff --git a/mindspore/nn/optim/ftrl.py b/mindspore/nn/optim/ftrl.py index 3f4da483ea..ee8fc9355f 100644 --- a/mindspore/nn/optim/ftrl.py +++ b/mindspore/nn/optim/ftrl.py @@ -87,7 +87,7 @@ class FTRL(Optimizer): Examples: >>> net = Net() >>> loss = nn.SoftmaxCrossEntropyWithLogits() - >>> opt = FTRL(net.trainable_params()) + >>> opt = nn.FTRL(net.trainable_params()) >>> model = Model(net, loss_fn=loss, optimizer=opt, metrics=None) """ def __init__(self, params, initial_accum=0.1, learning_rate=0.001, lr_power=-0.5, l1=0.0, l2=0.0, diff --git a/mindspore/nn/optim/lamb.py b/mindspore/nn/optim/lamb.py index e4fd3bf1d7..e74d6fc6a8 100755 --- a/mindspore/nn/optim/lamb.py +++ b/mindspore/nn/optim/lamb.py @@ -163,7 +163,7 @@ class Lamb(Optimizer): Examples: >>> net = Net() >>> loss = nn.SoftmaxCrossEntropyWithLogits() - >>> optim = Lamb(params=net.trainable_params(), decay_steps=10) + >>> optim = nn.Lamb(params=net.trainable_params(), decay_steps=10) >>> model = Model(net, loss_fn=loss, optimizer=optim, metrics=None) """ diff --git a/mindspore/nn/optim/lars.py b/mindspore/nn/optim/lars.py index a69057215d..c0cb71cfa6 100755 --- a/mindspore/nn/optim/lars.py +++ b/mindspore/nn/optim/lars.py @@ -90,8 +90,8 @@ class LARS(Cell): Examples: >>> net = Net() >>> loss = nn.SoftmaxCrossEntropyWithLogits() - >>> opt = Momentum(net.trainable_params(), 0.1, 0.9) - >>> opt_lars = LARS(opt, epsilon=1e-08, hyperpara=0.02) + >>> opt = nn.Momentum(net.trainable_params(), 0.1, 0.9) + >>> opt_lars = nn.LARS(opt, epsilon=1e-08, hyperpara=0.02) >>> model = Model(net, loss_fn=loss, optimizer=opt_lars, metrics=None) """ diff --git a/mindspore/nn/optim/momentum.py b/mindspore/nn/optim/momentum.py index 2cc6d76a86..21d3cc864e 100755 --- a/mindspore/nn/optim/momentum.py +++ b/mindspore/nn/optim/momentum.py @@ -83,7 +83,7 @@ class Momentum(Optimizer): Examples: >>> net = Net() >>> loss = nn.SoftmaxCrossEntropyWithLogits() - >>> optim = Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9) + >>> optim = nn.Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9) >>> model = Model(net, loss_fn=loss, optimizer=optim, metrics=None) """ def __init__(self, params, learning_rate, momentum, weight_decay=0.0, loss_scale=1.0, diff --git a/mindspore/nn/optim/rmsprop.py b/mindspore/nn/optim/rmsprop.py index faaeacfaa8..b17a101708 100644 --- a/mindspore/nn/optim/rmsprop.py +++ b/mindspore/nn/optim/rmsprop.py @@ -132,7 +132,7 @@ class RMSProp(Optimizer): Examples: >>> net = Net() >>> loss = nn.SoftmaxCrossEntropyWithLogits() - >>> opt = RMSProp(params=net.trainable_params(), learning_rate=lr) + >>> opt = nn.RMSProp(params=net.trainable_params(), learning_rate=lr) >>> model = Model(net, loss, opt) """ def __init__(self, params, learning_rate=0.1, decay=0.9, momentum=0.0, epsilon=1e-10, diff --git a/mindspore/nn/optim/sgd.py b/mindspore/nn/optim/sgd.py index 92e9a11070..dbc81ecdd6 100755 --- a/mindspore/nn/optim/sgd.py +++ b/mindspore/nn/optim/sgd.py @@ -77,7 +77,7 @@ class SGD(Optimizer): Examples: >>> net = Net() >>> loss = nn.SoftmaxCrossEntropyWithLogits() - >>> optim = SGD(params=net.trainable_params()) + >>> optim = nn.SGD(params=net.trainable_params()) >>> model = Model(net, loss_fn=loss, optimizer=optim, metrics=None) """ def __init__(self, params, learning_rate=0.1, momentum=0.0, dampening=0.0, weight_decay=0.0, nesterov=False, diff --git a/mindspore/nn/wrap/cell_wrapper.py b/mindspore/nn/wrap/cell_wrapper.py index efdfc9367e..53a535781d 100644 --- a/mindspore/nn/wrap/cell_wrapper.py +++ b/mindspore/nn/wrap/cell_wrapper.py @@ -50,8 +50,8 @@ class WithLossCell(Cell): >>> net_with_criterion = nn.WithLossCell(net, loss_fn) >>> >>> batch_size = 2 - >>> data = mindspore.Tensor(np.ones([batch_size, 3, 64, 64]).astype(np.float32) * 0.01) - >>> label = mindspore.Tensor(np.ones([batch_size, 1, 1, 1]).astype(np.int32)) + >>> data = Tensor(np.ones([batch_size, 3, 64, 64]).astype(np.float32) * 0.01) + >>> label = Tensor(np.ones([batch_size, 1, 1, 1]).astype(np.int32)) >>> >>> net_with_criterion(data, label) """ @@ -62,16 +62,6 @@ class WithLossCell(Cell): self._loss_fn = loss_fn def construct(self, data, label): - """ - Computes loss based on the wrapped loss cell. - - Args: - data (Tensor): Tensor data to train. - label (Tensor): Tensor label data. - - Returns: - Tensor, compute result. - """ out = self._backbone(data) return self._loss_fn(out, label) @@ -137,19 +127,6 @@ class WithGradCell(Cell): self.network_with_loss.set_train() def construct(self, data, label): - """ - Computes gradients based on the wrapped gradients cell. - - Note: - Run in PyNative mode. - - Args: - data (Tensor): Tensor data to train. - label (Tensor): Tensor label data. - - Returns: - Tensor, return compute gradients. - """ weights = self.weights if self.sens is None: grads = self.grad(self.network_with_loss, weights)(data, label) @@ -355,7 +332,7 @@ class ParameterUpdate(Cell): >>> param = network.parameters_dict()['learning_rate'] >>> update = nn.ParameterUpdate(param) >>> update.phase = "update_param" - >>> lr = mindspore.Tensor(0.001, mindspore.float32) + >>> lr = Tensor(0.001, mindspore.float32) >>> update(lr) """ diff --git a/mindspore/nn/wrap/grad_reducer.py b/mindspore/nn/wrap/grad_reducer.py index 8b34abc47b..01346698ee 100644 --- a/mindspore/nn/wrap/grad_reducer.py +++ b/mindspore/nn/wrap/grad_reducer.py @@ -120,25 +120,36 @@ class DistributedGradReducer(Cell): ValueError: If degree is not a int or less than 0. Examples: - >>> from mindspore.communication import get_group_size + >>> from mindspore.communication import init, get_group_size >>> from mindspore.ops import composite as C >>> from mindspore.ops import operations as P >>> from mindspore.ops import functional as F >>> from mindspore import context + >>> from mindspore import nn + >>> from mindspore import ParallelMode, ParameterTuple + >>> + >>> device_id = int(os.environ["DEVICE_ID"]) + >>> context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", save_graphs=True, + >>> device_id=int(device_id), enable_hccl=True) + >>> init() + >>> context.reset_auto_parallel_context() + >>> context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL) + >>> >>> >>> class TrainingWrapper(nn.Cell): >>> def __init__(self, network, optimizer, sens=1.0): >>> super(TrainingWrapper, self).__init__(auto_prefix=False) >>> self.network = network - >>> self.weights = mindspore.ParameterTuple(network.trainable_params()) + >>> self.network.add_flags(defer_inline=True) + >>> self.weights = ParameterTuple(network.trainable_params()) >>> self.optimizer = optimizer >>> self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True) >>> self.sens = sens >>> self.reducer_flag = False >>> self.grad_reducer = None >>> self.parallel_mode = context.get_auto_parallel_context("parallel_mode") - >>> if self.parallel_mode in [mindspore.ParallelMode.DATA_PARALLEL, - >>> mindspore.ParallelMode.HYBRID_PARALLEL]: + >>> if self.parallel_mode in [ParallelMode.DATA_PARALLEL, + >>> ParallelMode.HYBRID_PARALLEL]: >>> self.reducer_flag = True >>> if self.reducer_flag: >>> mean = context.get_auto_parallel_context("mirror_mean") @@ -161,8 +172,8 @@ class DistributedGradReducer(Cell): >>> network = Net() >>> optimizer = nn.Momentum(network.trainable_params(), learning_rate=0.1, momentum=0.9) >>> train_cell = TrainingWrapper(network, optimizer) - >>> inputs = mindspore.Tensor(np.ones([16, 16]).astype(np.float32)) - >>> label = mindspore.Tensor(np.zeros([16, 16]).astype(np.float32)) + >>> inputs = Tensor(np.ones([16, 16]).astype(np.float32)) + >>> label = Tensor(np.zeros([16, 16]).astype(np.float32)) >>> grads = train_cell(inputs, label) """ diff --git a/mindspore/nn/wrap/loss_scale.py b/mindspore/nn/wrap/loss_scale.py index a11c753eda..1ce3179273 100644 --- a/mindspore/nn/wrap/loss_scale.py +++ b/mindspore/nn/wrap/loss_scale.py @@ -65,9 +65,10 @@ class DynamicLossScaleUpdateCell(Cell): >>> train_network = nn.TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_update_cell=manager) >>> train_network.set_train() >>> - >>> inputs = mindspore.Tensor(np.ones([16, 16]).astype(np.float32)) - >>> label = mindspore.Tensor(np.zeros([16, 16]).astype(np.float32)) - >>> output = train_network(inputs, label) + >>> inputs = Tensor(np.ones([16, 16]).astype(np.float32)) + >>> label = Tensor(np.zeros([16, 16]).astype(np.float32)) + >>> scaling_sens = Tensor(np.full((1), np.finfo(np.float32).max), dtype=mindspore.float32) + >>> output = train_network(inputs, label, scaling_sens) """ def __init__(self, @@ -126,13 +127,14 @@ class FixedLossScaleUpdateCell(Cell): Examples: >>> net_with_loss = Net() >>> optimizer = nn.Momentum(net_with_loss.trainable_params(), learning_rate=0.1, momentum=0.9) - >>> manager = nn.FixedLossScaleUpdateCell(loss_scale_value=2**12, scale_factor=2, scale_window=1000) + >>> manager = nn.FixedLossScaleUpdateCell(loss_scale_value=2**12) >>> train_network = nn.TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_update_cell=manager) >>> train_network.set_train() >>> - >>> inputs = mindspore.Tensor(np.ones([16, 16]).astype(np.float32)) - >>> label = mindspore.Tensor(np.zeros([16, 16]).astype(np.float32)) - >>> output = train_network(inputs, label) + >>> inputs = Tensor(np.ones([16, 16]).astype(np.float32)) + >>> label = Tensor(np.zeros([16, 16]).astype(np.float32)) + >>> scaling_sens = Tensor(np.full((1), np.finfo(np.float32).max), dtype=mindspore.float32) + >>> output = train_network(inputs, label, scaling_sens) """ def __init__(self, loss_scale_value): @@ -181,9 +183,9 @@ class TrainOneStepWithLossScaleCell(Cell): >>> train_network = nn.TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_update_cell=manager) >>> train_network.set_train() >>> - >>> inputs = mindspore.Tensor(np.ones([16, 16]).astype(np.float32)) - >>> label = mindspore.Tensor(np.zeros([16, 16]).astype(np.float32)) - >>> scaling_sens = mindspore.Tensor(np.full((1), np.finfo(np.float32).max), dtype=mindspore.float32) + >>> inputs = Tensor(np.ones([16, 16]).astype(np.float32)) + >>> label = Tensor(np.zeros([16, 16]).astype(np.float32)) + >>> scaling_sens = Tensor(np.full((1), np.finfo(np.float32).max), dtype=mindspore.float32) >>> output = train_network(inputs, label, scaling_sens) """ From 2649bffb46ffa6faca6363eda3f511cc50fe323d Mon Sep 17 00:00:00 2001 From: leonwanghui Date: Fri, 3 Apr 2020 15:20:52 +0800 Subject: [PATCH 091/367] Update some required system packages in dockerfile Signed-off-by: leonwanghui --- docker/mindspore-cpu/Dockerfile | 23 +++++++++++++---------- docker/mindspore-cuda10.1/Dockerfile | 23 +++++++++++++---------- docker/mindspore-cuda9.2/Dockerfile | 23 +++++++++++++---------- 3 files changed, 39 insertions(+), 30 deletions(-) diff --git a/docker/mindspore-cpu/Dockerfile b/docker/mindspore-cpu/Dockerfile index dea2fd2149..d24d23cf6b 100644 --- a/docker/mindspore-cpu/Dockerfile +++ b/docker/mindspore-cpu/Dockerfile @@ -12,20 +12,22 @@ RUN apt update \ && DEBIAN_FRONTEND=noninteractive apt install -y \ vim \ wget \ + curl \ xz-utils \ net-tools \ openssh-client \ git \ - subversion \ ntpdate \ tzdata \ tcl \ - sudo + sudo \ + bash-completion # Install compile tools RUN DEBIAN_FRONTEND=noninteractive apt install -y \ gcc \ g++ \ + zlibc \ make \ libgmp-dev \ patch \ @@ -39,7 +41,8 @@ RUN echo "dash dash/sh boolean false" | debconf-set-selections RUN DEBIAN_FRONTEND=noninteractive dpkg-reconfigure dash # Install python (v3.7.5) -RUN apt install -y --no-install-recommends libffi-dev libssl-dev zlib1g-dev libbz2-dev libncurses5-dev libgdbm-dev liblzma-dev libreadline-dev \ +RUN apt install -y libffi-dev libssl-dev zlib1g-dev libbz2-dev libncurses5-dev \ + libgdbm-dev libgdbm-compat-dev liblzma-dev libreadline-dev libsqlite3-dev \ && cd /tmp \ && wget https://github.com/python/cpython/archive/v3.7.5.tar.gz \ && tar -xvf v3.7.5.tar.gz \ @@ -62,12 +65,12 @@ RUN mkdir -pv /root/.pip \ && echo "index-url=http://mirrors.aliyun.com/pypi/simple/" >> /root/.pip/pip.conf # Install pip package -RUN pip install numpy \ - && pip install wheel \ - && pip install nose \ - && pip install pytest \ - && pip install pytest-xdist \ - && pip list +RUN pip install --no-cache-dir \ + numpy \ + wheel \ + nose \ + pytest \ + pytest-xdist # Install cmake (v3.14.1) RUN cd /tmp \ @@ -77,4 +80,4 @@ RUN cd /tmp \ && rm -f /tmp/cmake-3.14.1-Linux-x86_64.sh # Install MindSpore cpu whl package -RUN pip install https://ms-release.obs.cn-north-4.myhuaweicloud.com/0.1.0-alpha/MindSpore/cpu/ubuntu-x86/mindspore-0.1.0-cp37-cp37m-linux_x86_64.whl +RUN pip install --no-cache-dir https://ms-release.obs.cn-north-4.myhuaweicloud.com/0.1.0-alpha/MindSpore/cpu/ubuntu-x86/mindspore-0.1.0-cp37-cp37m-linux_x86_64.whl diff --git a/docker/mindspore-cuda10.1/Dockerfile b/docker/mindspore-cuda10.1/Dockerfile index 83b23ff878..e2a1ee955a 100644 --- a/docker/mindspore-cuda10.1/Dockerfile +++ b/docker/mindspore-cuda10.1/Dockerfile @@ -12,20 +12,22 @@ RUN apt update \ && DEBIAN_FRONTEND=noninteractive apt install -y \ vim \ wget \ + curl \ xz-utils \ net-tools \ openssh-client \ git \ - subversion \ ntpdate \ tzdata \ tcl \ - sudo + sudo \ + bash-completion # Install compile tools RUN DEBIAN_FRONTEND=noninteractive apt install -y \ gcc \ g++ \ + zlibc \ make \ libgmp-dev \ patch \ @@ -39,7 +41,8 @@ RUN echo "dash dash/sh boolean false" | debconf-set-selections RUN DEBIAN_FRONTEND=noninteractive dpkg-reconfigure dash # Install python (v3.7.5) -RUN apt install -y --no-install-recommends libffi-dev libssl-dev zlib1g-dev libbz2-dev libncurses5-dev libgdbm-dev liblzma-dev libreadline-dev \ +RUN apt install -y libffi-dev libssl-dev zlib1g-dev libbz2-dev libncurses5-dev \ + libgdbm-dev libgdbm-compat-dev liblzma-dev libreadline-dev libsqlite3-dev \ && cd /tmp \ && wget https://github.com/python/cpython/archive/v3.7.5.tar.gz \ && tar -xvf v3.7.5.tar.gz \ @@ -62,12 +65,12 @@ RUN mkdir -pv /root/.pip \ && echo "index-url=http://mirrors.aliyun.com/pypi/simple/" >> /root/.pip/pip.conf # Install pip package -RUN pip install numpy \ - && pip install wheel \ - && pip install nose \ - && pip install pytest \ - && pip install pytest-xdist \ - && pip list +RUN pip install --no-cache-dir \ + numpy \ + wheel \ + nose \ + pytest \ + pytest-xdist # Install cmake (v3.14.1) RUN cd /tmp \ @@ -77,4 +80,4 @@ RUN cd /tmp \ && rm -f /tmp/cmake-3.14.1-Linux-x86_64.sh # Install MindSpore cuda-10.1 whl package -RUN pip install https://ms-release.obs.cn-north-4.myhuaweicloud.com/0.1.0-alpha/MindSpore/gpu/cuda-10.1/mindspore-0.1.0-cp37-cp37m-linux_x86_64.whl +RUN pip install --no-cache-dir https://ms-release.obs.cn-north-4.myhuaweicloud.com/0.1.0-alpha/MindSpore/gpu/cuda-10.1/mindspore-0.1.0-cp37-cp37m-linux_x86_64.whl diff --git a/docker/mindspore-cuda9.2/Dockerfile b/docker/mindspore-cuda9.2/Dockerfile index cbfcfd42ac..6e40106396 100644 --- a/docker/mindspore-cuda9.2/Dockerfile +++ b/docker/mindspore-cuda9.2/Dockerfile @@ -12,20 +12,22 @@ RUN apt update \ && DEBIAN_FRONTEND=noninteractive apt install -y \ vim \ wget \ + curl \ xz-utils \ net-tools \ openssh-client \ git \ - subversion \ ntpdate \ tzdata \ tcl \ - sudo + sudo \ + bash-completion # Install compile tools RUN DEBIAN_FRONTEND=noninteractive apt install -y \ gcc \ g++ \ + zlibc \ make \ libgmp-dev \ patch \ @@ -39,7 +41,8 @@ RUN echo "dash dash/sh boolean false" | debconf-set-selections RUN DEBIAN_FRONTEND=noninteractive dpkg-reconfigure dash # Install python (v3.7.5) -RUN apt install -y --no-install-recommends libffi-dev libssl-dev zlib1g-dev libbz2-dev libncurses5-dev libgdbm-dev liblzma-dev libreadline-dev \ +RUN apt install -y libffi-dev libssl-dev zlib1g-dev libbz2-dev libncurses5-dev \ + libgdbm-dev libgdbm-compat-dev liblzma-dev libreadline-dev libsqlite3-dev \ && cd /tmp \ && wget https://github.com/python/cpython/archive/v3.7.5.tar.gz \ && tar -xvf v3.7.5.tar.gz \ @@ -62,12 +65,12 @@ RUN mkdir -pv /root/.pip \ && echo "index-url=http://mirrors.aliyun.com/pypi/simple/" >> /root/.pip/pip.conf # Install pip package -RUN pip install numpy \ - && pip install wheel \ - && pip install nose \ - && pip install pytest \ - && pip install pytest-xdist \ - && pip list +RUN pip install --no-cache-dir \ + numpy \ + wheel \ + nose \ + pytest \ + pytest-xdist # Install cmake (v3.14.1) RUN cd /tmp \ @@ -77,4 +80,4 @@ RUN cd /tmp \ && rm -f /tmp/cmake-3.14.1-Linux-x86_64.sh # Install MindSpore cuda-9.2 whl package -RUN pip install https://ms-release.obs.cn-north-4.myhuaweicloud.com/0.1.0-alpha/MindSpore/gpu/cuda-9.2/mindspore-0.1.0-cp37-cp37m-linux_x86_64.whl +RUN pip install --no-cache-dir https://ms-release.obs.cn-north-4.myhuaweicloud.com/0.1.0-alpha/MindSpore/gpu/cuda-9.2/mindspore-0.1.0-cp37-cp37m-linux_x86_64.whl From bb5320be13e4f0df95cdb80a8185ad7b3542c96c Mon Sep 17 00:00:00 2001 From: guohongzilong <2713219276@qq.com> Date: Tue, 31 Mar 2020 16:12:43 +0800 Subject: [PATCH 092/367] iterfaces change: _Constant to Constant --- mindspore/common/initializer.py | 21 +++++++++++++++------ tests/ut/python/utils/test_initializer.py | 9 +++++++-- 2 files changed, 22 insertions(+), 8 deletions(-) diff --git a/mindspore/common/initializer.py b/mindspore/common/initializer.py index 99b4501307..bdc3418129 100644 --- a/mindspore/common/initializer.py +++ b/mindspore/common/initializer.py @@ -180,18 +180,18 @@ class HeUniform(Initializer): _assignment(arr, data) -class _Constant(Initializer): +class Constant(Initializer): """ Initialize a constant. Args: - value (int or numpy.ndarray): The value to initialize. + value (Union[int, numpy.ndarray]): The value to initialize. Returns: Array, initialize array. """ def __init__(self, value): - super(_Constant, self).__init__(value=value) + super(Constant, self).__init__(value=value) self.value = value def _initialize(self, arr): @@ -266,8 +266,16 @@ def initializer(init, shape=None, dtype=mstype.float32): Args: init (Union[Tensor, str, Initializer, numbers.Number]): Initialize value. + + - `str`: The `init` should be the alias of the class inheriting from `Initializer` and the corresponding + class will be called. + + - `Initializer`: The `init` should be the class inheriting from `Initializer` to initialize tensor. + + - `numbers.Number`: The `Constant` will be called to initialize tensor. + shape (Union[tuple, list, int]): A list of integers, a tuple of integers or an integer as the shape of - output. Default: None. + output. Default: None. dtype (:class:`mindspore.dtype`): The type of data in initialized tensor. Default: mstype.float32. Returns: @@ -295,7 +303,7 @@ def initializer(init, shape=None, dtype=mstype.float32): raise ValueError(msg) if isinstance(init, numbers.Number): - init_obj = _Constant(init) + init_obj = Constant(init) elif isinstance(init, str): init_obj = _INITIALIZER_ALIAS[init.lower()]() else: @@ -314,4 +322,5 @@ __all__ = [ 'HeUniform', 'XavierUniform', 'One', - 'Zero'] + 'Zero', + 'Constant'] diff --git a/tests/ut/python/utils/test_initializer.py b/tests/ut/python/utils/test_initializer.py index ff7ab8d119..31d2434341 100644 --- a/tests/ut/python/utils/test_initializer.py +++ b/tests/ut/python/utils/test_initializer.py @@ -37,8 +37,8 @@ def _check_value(tensor, value_min, value_max): for ele in nd.flatten(): if value_min <= ele <= value_max: continue - raise TypeError('value_min = %d, ele = %d, value_max = %d' - % (value_min, ele, value_max)) + raise ValueError('value_min = %d, ele = %d, value_max = %d' + % (value_min, ele, value_max)) def _check_uniform(tensor, boundary_a, boundary_b): @@ -92,6 +92,11 @@ def test_init_one_alias(): _check_value(tensor, 1, 1) +def test_init_constant(): + tensor = init.initializer(init.Constant(1), [2, 2], ms.float32) + _check_value(tensor, 1, 1) + + def test_init_uniform(): scale = 10 tensor = init.initializer(init.Uniform(scale=scale), [5, 4], ms.float32) From 2ff6f0de460986d3864e05a589963dfbe0e119a5 Mon Sep 17 00:00:00 2001 From: VectorSL Date: Fri, 3 Apr 2020 16:55:37 +0800 Subject: [PATCH 093/367] edit loss_scale for gpu --- mindspore/nn/wrap/loss_scale.py | 40 ++++++++++---- mindspore/ops/operations/__init__.py | 6 +- mindspore/ops/operations/math_ops.py | 83 ++++++++++++++++++++++++++++ 3 files changed, 117 insertions(+), 12 deletions(-) diff --git a/mindspore/nn/wrap/loss_scale.py b/mindspore/nn/wrap/loss_scale.py index a11c753eda..a294dbba18 100644 --- a/mindspore/nn/wrap/loss_scale.py +++ b/mindspore/nn/wrap/loss_scale.py @@ -25,6 +25,7 @@ from ...ops import operations as P from ...ops.operations import NPUGetFloatStatus, NPUAllocFloatStatus, NPUClearFloatStatus, ReduceSum, LessEqual, \ ControlDepend from ...common import dtype as mstype +import mindspore.context as context _grad_scale = C.MultitypeFuncGraph("grad_scale") reciprocal = P.Reciprocal() @@ -34,6 +35,12 @@ reciprocal = P.Reciprocal() def tensor_grad_scale(scale, grad): return grad * F.cast(reciprocal(scale), F.dtype(grad)) +_grad_overflow = C.MultitypeFuncGraph("_grad_overflow") +grad_overflow = P.FloatStatus() + +@_grad_overflow.register("Tensor") +def _tensor_grad_overflow(grad): + return grad_overflow(grad) class DynamicLossScaleUpdateCell(Cell): r""" @@ -195,9 +202,15 @@ class TrainOneStepWithLossScaleCell(Cell): self.optimizer = optimizer self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True) self.hyper_map = C.HyperMap() - self.alloc_status = NPUAllocFloatStatus() - self.get_status = NPUGetFloatStatus() - self.clear_status = NPUClearFloatStatus() + if context.get_context("device_target") == "GPU": + self.gpu_target = True + self.float_status = P.FloatStatus() + self.addn = P.AddN() + else: + self.gpu_target = False + self.alloc_status = NPUAllocFloatStatus() + self.get_status = NPUGetFloatStatus() + self.clear_status = NPUClearFloatStatus() self.reduce_sum = ReduceSum(keep_dims=False) self.base = Tensor(1, mstype.float32) self.less_equal = LessEqual() @@ -222,10 +235,11 @@ class TrainOneStepWithLossScaleCell(Cell): def construct(self, data, label, sens=None): weights = self.weights loss = self.network(data, label) - # init overflow buffer - init = self.alloc_status() - # clear overflow buffer - self.clear_status(init) + if not self.gpu_target: + # init overflow buffer + init = self.alloc_status() + # clear overflow buffer + self.clear_status(init) if sens is None: scaling_sens = self.loss_scale else: @@ -235,10 +249,14 @@ class TrainOneStepWithLossScaleCell(Cell): if self.reducer_flag: # apply grad reducer on grads grads = self.grad_reducer(grads) - # get the overflow buffer - self.get_status(init) - # sum overflow buffer elements, 0:not overflow , >0:overflow - flag_sum = self.reduce_sum(init, (0,)) + if not self.gpu_target: + # get the overflow buffer + self.get_status(init) + # sum overflow buffer elements, 0:not overflow , >0:overflow + flag_sum = self.reduce_sum(init, (0,)) + else: + flag_sum = self.hyper_map(F.partial(_grad_overflow), grads) + flag_sum = self.addn(flag_sum) if self.is_distributed: # sum overflow flag over devices flag_reduce = self.allreduce(flag_sum) diff --git a/mindspore/ops/operations/__init__.py b/mindspore/ops/operations/__init__.py index 846be05c4d..bc300cb670 100644 --- a/mindspore/ops/operations/__init__.py +++ b/mindspore/ops/operations/__init__.py @@ -44,7 +44,7 @@ from .math_ops import (Abs, ACos, AddN, AssignAdd, AssignSub, Atan2, BatchMatMul LogicalNot, LogicalOr, MatMul, Maximum, Minimum, Mul, Neg, NMSWithMask, NotEqual, NPUAllocFloatStatus, NPUClearFloatStatus, - NPUGetFloatStatus, Pow, RealDiv, + NPUGetFloatStatus, Pow, RealDiv, IsNan, IsInf, IsFinite, FloatStatus, Reciprocal, CumSum, Sin, Sqrt, Rsqrt, Square, Sub, TensorAdd, Sign, Round) @@ -151,6 +151,10 @@ __all__ = [ 'Neg', 'Slice', 'DType', + 'IsNan', + 'IsInf', + 'IsFinite', + 'FloatStatus', 'NPUAllocFloatStatus', 'NPUGetFloatStatus', 'NPUClearFloatStatus', diff --git a/mindspore/ops/operations/math_ops.py b/mindspore/ops/operations/math_ops.py index b7373416f9..f7f2cb83c0 100644 --- a/mindspore/ops/operations/math_ops.py +++ b/mindspore/ops/operations/math_ops.py @@ -1557,6 +1557,89 @@ class LogicalOr(_LogicBinaryOp): def infer_dtype(self, x_dtype, y_dtype): return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, (mstype.bool_,)) +class IsNan(PrimitiveWithInfer): + """ + Judging which elements are nan for each position + Inputs: + - **input_x** (Tensor) - The input tensor. + + Outputs: + Tensor, has the same shape of input. + """ + + @prim_attr_register + def __init__(self): + """init IsNan""" + self.init_prim_io_names(inputs=['x'], outputs=['output']) + + def infer_shape(self, x_shape): + return x_shape + + def infer_dtype(self, x_dtype): + return mstype.bool_ + +class IsInf(PrimitiveWithInfer): + """ + Judging which elements are inf or -inf for each position + Inputs: + - **input_x** (Tensor) - The input tensor. + + Outputs: + Tensor, has the same shape of input. + """ + + @prim_attr_register + def __init__(self): + """init IsInf""" + self.init_prim_io_names(inputs=['x'], outputs=['output']) + + def infer_shape(self, x_shape): + return x_shape + + def infer_dtype(self, x_dtype): + return mstype.bool_ + +class IsFinite(PrimitiveWithInfer): + """ + Judging which elements are finite for each position + Inputs: + - **input_x** (Tensor) - The input tensor. + + Outputs: + Tensor, has the same shape of input. + """ + + @prim_attr_register + def __init__(self): + """init IsFinite""" + self.init_prim_io_names(inputs=['x'], outputs=['output']) + + def infer_shape(self, x_shape): + return x_shape + + def infer_dtype(self, x_dtype): + return mstype.bool_ + +class FloatStatus(PrimitiveWithInfer): + """ + Determine if the elements contains nan, inf or -inf + Inputs: + - **input_x** (Tensor) - The input tensor. + + Outputs: + Tensor, has the shape of `(1,)`. + """ + + @prim_attr_register + def __init__(self): + """init FloatStatus""" + self.init_prim_io_names(inputs=['x'], outputs=['output']) + + def infer_shape(self, x_shape): + return [1] + + def infer_dtype(self, x_dtype): + return x_dtype class NPUAllocFloatStatus(PrimitiveWithInfer): """ From 5fc6e5be22b2028cc5b52881e5c548aba011c16d Mon Sep 17 00:00:00 2001 From: yanghaoran Date: Fri, 3 Apr 2020 17:13:09 +0800 Subject: [PATCH 094/367] GraphEngine now downloads thirdparty software into build directory too --- graphengine | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/graphengine b/graphengine index 60b2262ce7..5369646b48 160000 --- a/graphengine +++ b/graphengine @@ -1 +1 @@ -Subproject commit 60b2262ce700db538e4c728619e1972c3687f633 +Subproject commit 5369646b489114b380a7b5208ddd6e632acb447f From 7a57b15b7ce67fd65e538cda3620e27668e853f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E4=B8=87=E4=B8=87=E6=B2=A1=E6=83=B3=E5=88=B0?= Date: Mon, 23 Mar 2020 15:33:01 +0800 Subject: [PATCH 095/367] modify comments --- mindspore/ops/operations/nn_ops.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/mindspore/ops/operations/nn_ops.py b/mindspore/ops/operations/nn_ops.py index f5637e69d2..fedb33d283 100644 --- a/mindspore/ops/operations/nn_ops.py +++ b/mindspore/ops/operations/nn_ops.py @@ -1219,14 +1219,14 @@ class ApplyMomentum(PrimitiveWithInfer): gradient_scale (float): The scale of the gradient. Default: 1.0. Inputs: - - **variable** (Tensor) - Weights to be update. + - **variable** (Tensor) - Weights to be updated. - **accumulation** (Tensor) - Accumulated gradient value by moment weight. - **learning_rate** (float) - Learning rate. - **gradient** (Tensor) - Gradients. - **momentum** (float) - Momentum. Outputs: - Tensor, parameters to be update. + Tensor, parameters to be updated. Examples: >>> net = ResNet50() @@ -1318,15 +1318,15 @@ class SGD(PrimitiveWithInfer): nesterov (bool): Enable Nesterov momentum. Default: False. Inputs: - - **parameters** (Tensor) - Parameters to be update. + - **parameters** (Tensor) - Parameters to be updated. - **gradient** (Tensor) - Gradients. - **learning_rate** (Tensor) - Learning rate. e.g. Tensor(0.1, mindspore.float32). - - **accum** (Tensor) - Accum(velocity) to be update. + - **accum** (Tensor) - Accum(velocity) to be updated. - **momentum** (Tensor) - Momentum. e.g. Tensor(0.1, mindspore.float32). - **stat** (Tensor) - States to be updated with the same shape as gradient. Outputs: - Tensor, parameters to be update. + Tensor, parameters to be updated. """ @prim_attr_register @@ -2141,7 +2141,7 @@ class Adam(PrimitiveWithInfer): If False, updates the gradients without using NAG. Default: False. Inputs: - - **var** (Tensor) - Weights to be update. + - **var** (Tensor) - Weights to be updated. - **m** (Tensor) - The 1st moment vector in the updating formula. - **v** (Tensor) - the 2nd moment vector in the updating formula. - **beta1_power** (float) - :math:`beta_1^t` in the updating formula. @@ -2251,8 +2251,8 @@ class SparseApplyAdagrad(PrimitiveWithInfer): use_locking (bool): If True, updating of the var and accum tensors will be protected. Default: False. Inputs: - - **var** (Tensor) - Variable to be update. The type must be float32. - - **accum** (Tensor) - Accum to be update. The shape must be the same as `var`'s shape, + - **var** (Tensor) - Variable to be updated. The type must be float32. + - **accum** (Tensor) - Accum to be updated. The shape must be the same as `var`'s shape, the type must be float32. - **grad** (Tensor) - Gradient. The shape must be the same as `var`'s shape except first dimension, the type must be float32. @@ -2299,7 +2299,7 @@ class LARSUpdate(PrimitiveWithInfer): use_clip (bool): Whether to use clip operation for calculating the local learning rate. Default: False. Inputs: - - **weight** (Tensor) - The weight to be update. + - **weight** (Tensor) - The weight to be updated. - **gradient** (Tensor) - The gradient of weight, which has the same shape and dtype with weight. - **norm_weight** (Tensor) - A scalar tensor, representing the square sum of weight. - **norm_gradient** (Tensor) - A scalar tensor, representing the square sum of gradient. From 5b176f258bd05ad2f84ad690a88865975b023671 Mon Sep 17 00:00:00 2001 From: wanghua Date: Fri, 3 Apr 2020 17:51:56 +0800 Subject: [PATCH 096/367] modify bert test file --- mindspore/ccsrc/device/ascend/kernel_select_ascend.cc | 8 +------- tests/st/networks/models/bert/bert_tdt_no_lossscale.py | 2 +- 2 files changed, 2 insertions(+), 8 deletions(-) diff --git a/mindspore/ccsrc/device/ascend/kernel_select_ascend.cc b/mindspore/ccsrc/device/ascend/kernel_select_ascend.cc index a7c8d131fb..d05b9fafa1 100644 --- a/mindspore/ccsrc/device/ascend/kernel_select_ascend.cc +++ b/mindspore/ccsrc/device/ascend/kernel_select_ascend.cc @@ -82,12 +82,6 @@ bool IsValidKernelInfo(const std::shared_ptr &kernel_node, const kernel:: } return true; }; - if (AnfAlgo::GetCNodeName(kernel_node) == "LayerNormBetaGammaBackprop" || - AnfAlgo::GetCNodeName(kernel_node) == "LayerNormXBackprop") { - if (AnfAlgo::GetPrevNodeOutputFormat(kernel_node, 0) != kernel_build_info.GetInputFormat(0)) { - return true; - } - } if (AnfAlgo::GetCNodeName(kernel_node) == prim::kPrimCast->name()) { return AnfAlgo::GetOutputInferDataType(kernel_node, 0) == kernel_build_info.GetOutputDeviceType(0) && AnfAlgo::GetPrevNodeOutputInferDataType(kernel_node, 0) == kernel_build_info.GetInputDeviceType(0); @@ -161,7 +155,7 @@ bool PriorityChooseItem(const std::vector &cur_item, std::vector *best return false; } } - return true; + return false; } void UpdateCurMatchCounts(const kernel::KernelBuildInfo &kernel_build_info, const std::shared_ptr &kernel_node, diff --git a/tests/st/networks/models/bert/bert_tdt_no_lossscale.py b/tests/st/networks/models/bert/bert_tdt_no_lossscale.py index 6f3ffc7dad..9cc11997e6 100644 --- a/tests/st/networks/models/bert/bert_tdt_no_lossscale.py +++ b/tests/st/networks/models/bert/bert_tdt_no_lossscale.py @@ -27,7 +27,7 @@ from mindspore.common.tensor import Tensor from mindspore.train.model import Model from mindspore.train.callback import Callback from mindspore.model_zoo.Bert_NEZHA import BertConfig, BertNetworkWithLoss, BertTrainOneStepCell -from mindspore.nn.optim import Lamb +from mindspore.nn.optim import Momentum from mindspore import log as logger _current_dir = os.path.dirname(os.path.realpath(__file__)) DATA_DIR = ["/home/workspace/mindspore_dataset/bert/example/examples.tfrecord"] From 1fb776fe09e772671dabec1eb8015e98c8163d01 Mon Sep 17 00:00:00 2001 From: panyifeng Date: Fri, 3 Apr 2020 17:09:04 +0800 Subject: [PATCH 097/367] fix grad missing due to indirect dependent free morphism --- mindspore/ccsrc/optimizer/ad/dfunctor.cc | 43 +++++++++++++------ mindspore/ccsrc/optimizer/ad/dfunctor.h | 1 + mindspore/ccsrc/pipeline/pass.cc | 2 +- .../python/pynative_mode/test_cell_bprop.py | 3 +- .../python/pynative_mode/test_framstruct.py | 22 ++++++++++ 5 files changed, 54 insertions(+), 17 deletions(-) diff --git a/mindspore/ccsrc/optimizer/ad/dfunctor.cc b/mindspore/ccsrc/optimizer/ad/dfunctor.cc index 128e4463e6..3e1aa6e555 100644 --- a/mindspore/ccsrc/optimizer/ad/dfunctor.cc +++ b/mindspore/ccsrc/optimizer/ad/dfunctor.cc @@ -185,19 +185,32 @@ AdjointPtr DFunctor::MapMorphism(const AnfNodePtr &morph) { return node_adjoint; } +bool DFunctor::IsFreeMorphism(const AnfNodePtr &node) { + // Do not care about non-CNode + if (!node->isa()) { + return false; + } + // Do not care about kPrimReturn + if (IsPrimitiveCNode(node, prim::kPrimReturn)) { + return false; + } + auto &users = primal_graph_->manager()->node_users()[node]; + // Do not care about isolated morphisms + if (users.empty()) { + return false; + } + // Not free if it's used by some node in primal_graph + bool nonfree = std::any_of(std::begin(users), std::end(users), [&](const auto &kv) { + auto &user = kv.first; + return user->func_graph() == primal_graph_; + }); + return !nonfree; +} + void DFunctor::MapFreeMorphism() { // Handle cnode not attached to output, that might be refered in other functions. for (auto &node : primal_graph_->nodes()) { - auto adjoint = FindAdjoint(node); - if (adjoint != nullptr) { - continue; - } - if (!node->isa()) { - MS_LOG(DEBUG) << "MapFreeMorphism noncnode not mapped after MapMorphism " << node->ToString() << " " - << node->type_name() << "."; - continue; - } - if (IsPrimitiveCNode(node, prim::kPrimReturn)) { + if (!IsFreeMorphism(node)) { continue; } MS_LOG(DEBUG) << "MapFreeMorphism map nonoutput cnode after MapMorphism " << node->ToString() << "."; @@ -256,9 +269,10 @@ void DFunctor::MapMorphism() { // Set stop_gradient before MapMorphism. BroadCastStopFlag(); + // Handle free morphism before output, because in some case, free morphism might depend on output's fv tangent + MapFreeMorphism(); // Handle morphism from output. (void)MapMorphism(primal_graph_->output()); - MapFreeMorphism(); // Construct K for primal_graph_ auto output_adjoint = anfnode_to_adjoin_.find(primal_graph_->output()); @@ -298,9 +312,10 @@ FuncGraphPtr DFunctor::KUserDefined(const FuncGraphPtr &primal) { const size_t param_diff = 1; if (bprop_graph->output()->isa() && bprop_graph->output()->cast()->size() + param_diff != bprop_graph->parameters().size()) { - MS_LOG(EXCEPTION) << "User defined Cell bprop " << primal->ToString() << " in scope " - << primal->output()->scope()->name() - << " output must be a tuple and output number should be the same with inputs."; + // It does not matter with the final tangents, just a tip for debugging + MS_LOG(DEBUG) << "User defined Cell bprop " << primal->ToString() << " in scope " + << primal->output()->scope()->name() + << " output must be a tuple and output number should be the same with inputs."; } resources_->manager()->AddFuncGraph(bprop_graph); diff --git a/mindspore/ccsrc/optimizer/ad/dfunctor.h b/mindspore/ccsrc/optimizer/ad/dfunctor.h index f50f866efa..3059736171 100644 --- a/mindspore/ccsrc/optimizer/ad/dfunctor.h +++ b/mindspore/ccsrc/optimizer/ad/dfunctor.h @@ -61,6 +61,7 @@ class DFunctor { private: // Map one morphism. AdjointPtr MapMorphism(const AnfNodePtr &morph); + bool IsFreeMorphism(const AnfNodePtr &node); // Map morphism that's not attached to output. void MapFreeMorphism(); void BackPropagateFv(const AnfNodePtr &fv, const AnfNodePtr &din); diff --git a/mindspore/ccsrc/pipeline/pass.cc b/mindspore/ccsrc/pipeline/pass.cc index e2626d5314..16250aae51 100644 --- a/mindspore/ccsrc/pipeline/pass.cc +++ b/mindspore/ccsrc/pipeline/pass.cc @@ -111,7 +111,7 @@ OptPassGroupMap GetOptPassesA(const opt::irpass::OptimizeIRPassLib& irpass) { irpass.replace_applicator_, }); opt::OptPassConfig virtual_dataset = opt::OptPassConfig({irpass.virtual_dataset_eliminate_}); - opt::OptPassConfig grad = opt::OptPassConfig({irpass.inline_, irpass.expand_jprim_}, true); + opt::OptPassConfig grad = opt::OptPassConfig({irpass.expand_jprim_}, true); OptPassGroupMap map_a({{"a_1", a_1}, {"a_2", a_2}, diff --git a/tests/ut/python/pynative_mode/test_cell_bprop.py b/tests/ut/python/pynative_mode/test_cell_bprop.py index 03ae1affa5..054afe36c9 100644 --- a/tests/ut/python/pynative_mode/test_cell_bprop.py +++ b/tests/ut/python/pynative_mode/test_cell_bprop.py @@ -304,5 +304,4 @@ class MulAddWithWrongOutputNum(nn.Cell): def test_grad_mul_add_with_wrong_output_num(): mul_add = MulAddWithWrongOutputNum() - with pytest.raises(RuntimeError): - C.grad_all(mul_add)(1, 2) + C.grad_all(mul_add)(1, 2) diff --git a/tests/ut/python/pynative_mode/test_framstruct.py b/tests/ut/python/pynative_mode/test_framstruct.py index 2939337211..ff7cf67f52 100644 --- a/tests/ut/python/pynative_mode/test_framstruct.py +++ b/tests/ut/python/pynative_mode/test_framstruct.py @@ -15,6 +15,7 @@ """ test_framstruct """ import pytest import numpy as np +import mindspore as ms import mindspore.nn as nn from mindspore import context from mindspore.ops import composite as C @@ -706,3 +707,24 @@ def grad_refactor_14(a, b): return inner1(b) + inner2(a) + inner3(a) def test_grad_refactor_14(): assert C.grad_all(grad_refactor_14)(2, 3) == (3, 9) + + +class IfDeferInline(nn.Cell): + def __init__(self, mul_size): + super().__init__() + self.mul_weight = Tensor(np.full(mul_size, 0.6, dtype=np.float32)) + self.mul = P.Mul() + + def construct(self, inputs): + x = self.mul(inputs, self.mul_weight) + if True: + x = x + return x + +def test_grad_if_defer_inline(): + """ test_grad_if_defer_inline """ + network = IfDeferInline([128, 96]) + network.add_flags(defer_inline=False) + inp = Tensor(np.ones([128, 96]).astype(np.float32)) + grads = C.grad_all(network)(inp) + assert grads == (Tensor(np.full([128, 96], 0.6, dtype=np.float32)),) From d37b8c4f2dc8f994b891868b939923f20d735136 Mon Sep 17 00:00:00 2001 From: Alexey Shevlyakov Date: Thu, 2 Apr 2020 14:17:46 -0400 Subject: [PATCH 098/367] fix RandomCropDecodeResize test --- tests/ut/cpp/dataset/CMakeLists.txt | 2 +- .../dataset/random_crop_and_resize_op_test.cc | 36 ++---- ...c => random_crop_decode_resize_op_test.cc} | 105 +++++++++--------- 3 files changed, 62 insertions(+), 81 deletions(-) rename tests/ut/cpp/dataset/{random_crop_decode_resizeOp_test.cc => random_crop_decode_resize_op_test.cc} (56%) diff --git a/tests/ut/cpp/dataset/CMakeLists.txt b/tests/ut/cpp/dataset/CMakeLists.txt index 0da470ac89..086a67c7d7 100644 --- a/tests/ut/cpp/dataset/CMakeLists.txt +++ b/tests/ut/cpp/dataset/CMakeLists.txt @@ -32,7 +32,7 @@ SET(DE_UT_SRCS project_op_test.cc queue_test.cc random_crop_op_test.cc - random_crop_decode_resizeOp_test.cc + random_crop_decode_resize_op_test.cc random_crop_and_resize_op_test.cc random_color_adjust_op_test.cc random_horizontal_flip_op_test.cc diff --git a/tests/ut/cpp/dataset/random_crop_and_resize_op_test.cc b/tests/ut/cpp/dataset/random_crop_and_resize_op_test.cc index 864d713ed3..7be18fb02c 100644 --- a/tests/ut/cpp/dataset/random_crop_and_resize_op_test.cc +++ b/tests/ut/cpp/dataset/random_crop_and_resize_op_test.cc @@ -20,35 +20,17 @@ #include "utils/log_adapter.h" using namespace mindspore::dataset; -using mindspore::MsLogLevel::INFO; -using mindspore::ExceptionType::NoExceptionType; using mindspore::LogStream; +using mindspore::ExceptionType::NoExceptionType; +using mindspore::MsLogLevel::INFO; class MindDataTestRandomCropAndResizeOp : public UT::CVOP::CVOpCommon { public: MindDataTestRandomCropAndResizeOp() : CVOpCommon() {} }; -TEST_F(MindDataTestRandomCropAndResizeOp, TestOpDefault) { - MS_LOG(INFO) << "Doing testRandomCropAndResize."; - TensorShape s_in = input_tensor_->shape(); - std::shared_ptr output_tensor; - int h_out = 512; - int w_out = 512; - - TensorShape s_out({(uint32_t) h_out, (uint32_t) w_out, (uint32_t) s_in[2]}); - - std::unique_ptr op(new RandomCropAndResizeOp(h_out, w_out)); - Status s; - for (auto i = 0; i < 100; i++) { - s = op->Compute(input_tensor_, &output_tensor); - } - EXPECT_TRUE(s.IsOk()); - MS_LOG(INFO) << "testRandomCropAndResize end."; -} - -TEST_F(MindDataTestRandomCropAndResizeOp, TestOpExtended) { - MS_LOG(INFO) << "Doing testRandomCropAndResize."; +TEST_F(MindDataTestRandomCropAndResizeOp, TestOpSimpleTest) { + MS_LOG(INFO) << " starting RandomCropAndResizeOp simple test"; TensorShape s_in = input_tensor_->shape(); std::shared_ptr output_tensor; int h_out = 1024; @@ -58,14 +40,14 @@ TEST_F(MindDataTestRandomCropAndResizeOp, TestOpExtended) { float scale_lb = 0.0001; float scale_ub = 1.0; - TensorShape s_out({(uint32_t) h_out, (uint32_t) w_out, (uint32_t) s_in[2]}); + TensorShape s_out({h_out, w_out, s_in[2]}); - std::unique_ptr op( - new RandomCropAndResizeOp(h_out, w_out, scale_lb, scale_ub, aspect_lb, aspect_ub)); + auto op = std::make_unique(h_out, w_out, scale_lb, scale_ub, aspect_lb, aspect_ub); Status s; for (auto i = 0; i < 100; i++) { s = op->Compute(input_tensor_, &output_tensor); + EXPECT_TRUE(s.IsOk()); } - EXPECT_TRUE(s.IsOk()); - MS_LOG(INFO) << "testRandomCropAndResize end."; + + MS_LOG(INFO) << "RandomCropAndResizeOp simple test finished"; } diff --git a/tests/ut/cpp/dataset/random_crop_decode_resizeOp_test.cc b/tests/ut/cpp/dataset/random_crop_decode_resize_op_test.cc similarity index 56% rename from tests/ut/cpp/dataset/random_crop_decode_resizeOp_test.cc rename to tests/ut/cpp/dataset/random_crop_decode_resize_op_test.cc index facd35c4f7..d7e0b16aff 100644 --- a/tests/ut/cpp/dataset/random_crop_decode_resizeOp_test.cc +++ b/tests/ut/cpp/dataset/random_crop_decode_resize_op_test.cc @@ -23,9 +23,10 @@ #include "utils/log_adapter.h" using namespace mindspore::dataset; -using mindspore::MsLogLevel::INFO; -using mindspore::ExceptionType::NoExceptionType; using mindspore::LogStream; +using mindspore::ExceptionType::NoExceptionType; +using mindspore::MsLogLevel::INFO; +constexpr double kMseThreshold = 2.0; class MindDataTestRandomCropDecodeResizeOp : public UT::CVOP::CVOpCommon { public: @@ -33,39 +34,38 @@ class MindDataTestRandomCropDecodeResizeOp : public UT::CVOP::CVOpCommon { }; TEST_F(MindDataTestRandomCropDecodeResizeOp, TestOp2) { - MS_LOG(INFO) << "Doing testRandomCropDecodeResizeOp Test"; + MS_LOG(INFO) << "starting RandomCropDecodeResizeOp test 1"; - std::shared_ptr output_tensor1; - std::shared_ptr output_tensor2; + std::shared_ptr decode_and_crop_output; + std::shared_ptr crop_and_decode_output; - int target_height = 884; - int target_width = 718; - float scale_lb = 0.08; - float scale_ub = 1.0; - float aspect_lb = 0.75; - float aspect_ub = 1.333333; - InterpolationMode interpolation = InterpolationMode::kLinear; - uint32_t max_iter = 10; - std::unique_ptr op1(new RandomCropAndResizeOp( - target_height, target_width, scale_lb, scale_ub, aspect_lb, aspect_ub, interpolation, max_iter)); - EXPECT_TRUE(op1->OneToOne()); - std::unique_ptr op2(new RandomCropDecodeResizeOp( - target_height, target_width, scale_lb, scale_ub, aspect_lb, aspect_ub, interpolation, max_iter)); - EXPECT_TRUE(op2->OneToOne()); - Status s1, s2; + constexpr int target_height = 884; + constexpr int target_width = 718; + constexpr float scale_lb = 0.08; + constexpr float scale_ub = 1.0; + constexpr float aspect_lb = 0.75; + constexpr float aspect_ub = 1.333333; + const InterpolationMode interpolation = InterpolationMode::kLinear; + constexpr uint32_t max_iter = 10; + auto crop_and_decode = RandomCropDecodeResizeOp(target_height, target_width, scale_lb, scale_ub, aspect_lb, aspect_ub, + interpolation, max_iter); + auto crop_and_decode_copy = crop_and_decode; + auto decode_and_crop = static_cast(crop_and_decode_copy); + EXPECT_TRUE(crop_and_decode.OneToOne()); + GlobalContext::config_manager()->set_seed(42); for (int i = 0; i < 100; i++) { - s1 = op1->Compute(input_tensor_, &output_tensor1); - s2 = op2->Compute(raw_input_tensor_, &output_tensor2); - cv::Mat output1(target_height, target_width, CV_8UC3, output_tensor1->StartAddr()); - cv::Mat output2(target_height, target_width, CV_8UC3, output_tensor2->StartAddr()); + (void)crop_and_decode.Compute(raw_input_tensor_, &crop_and_decode_output); + (void)decode_and_crop.Compute(input_tensor_, &decode_and_crop_output); + cv::Mat output1(target_height, target_width, CV_8UC3, crop_and_decode_output->StartAddr()); + cv::Mat output2(target_height, target_width, CV_8UC3, decode_and_crop_output->StartAddr()); long int mse_sum = 0; long int count = 0; int a, b; - for (int i = 0; i < target_height; i++) { - for (int j = 0; j < target_width; j++) { - a = (int)output1.at(i, j)[1]; - b = (int)output2.at(i, j)[1]; + for (int j = 0; j < target_height; j++) { + for (int k = 0; k < target_width; k++) { + a = static_cast(output1.at(i, j)[1]); + b = static_cast(output2.at(i, j)[1]); mse_sum += sqrt((a - b) * (a - b)); if (a != b) { count++; @@ -73,24 +73,22 @@ TEST_F(MindDataTestRandomCropDecodeResizeOp, TestOp2) { } } double mse; - if (count > 0) { - mse = (double) mse_sum / count; - } else { - mse = mse_sum; - } - MS_LOG(DEBUG) << "mse: " << mse << std::endl; + mse = count > 0 ? static_cast(mse_sum) / count : mse_sum; + MS_LOG(INFO) << "mse: " << mse << std::endl; + EXPECT_LT(mse, kMseThreshold); } - MS_LOG(INFO) << "MindDataTestRandomCropDecodeResizeOp end!"; + + MS_LOG(INFO) << "RandomCropDecodeResizeOp test 1 finished"; } TEST_F(MindDataTestRandomCropDecodeResizeOp, TestOp1) { - MS_LOG(INFO) << "Doing MindDataTestRandomCropDecodeResizeOp"; - const unsigned int h = 884; - const unsigned int w = 718; - const float scale_lb = 0.1; - const float scale_ub = 1; - const float aspect_lb = 0.1; - const float aspect_ub = 10; + MS_LOG(INFO) << "starting RandomCropDecodeResizeOp test 2"; + constexpr int h = 884; + constexpr int w = 718; + constexpr float scale_lb = 0.1; + constexpr float scale_ub = 1; + constexpr float aspect_lb = 0.1; + constexpr float aspect_ub = 10; std::shared_ptr decoded, decoded_and_cropped, cropped_and_decoded; std::mt19937 rd; @@ -98,14 +96,14 @@ TEST_F(MindDataTestRandomCropDecodeResizeOp, TestOp1) { std::uniform_real_distribution rd_aspect(aspect_lb, aspect_ub); DecodeOp op(true); op.Compute(raw_input_tensor_, &decoded); - Status s1, s2; + Status crop_and_decode_status, decode_and_crop_status; float scale, aspect; int crop_width, crop_height; bool crop_success = false; - unsigned int mse_sum, m1, m2, count; - float mse; + int mse_sum, m1, m2, count; + double mse; - for (unsigned int k = 0; k < 100; ++k) { + for (int k = 0; k < 100; ++k) { mse_sum = 0; count = 0; for (auto i = 0; i < 100; i++) { @@ -132,13 +130,13 @@ TEST_F(MindDataTestRandomCropDecodeResizeOp, TestOp1) { int y = rd_y(rd); op.Compute(raw_input_tensor_, &decoded); - s1 = Crop(decoded, &decoded_and_cropped, x, y, crop_width, crop_height); - s2 = JpegCropAndDecode(raw_input_tensor_, &cropped_and_decoded, x, y, crop_width, crop_height); + crop_and_decode_status = Crop(decoded, &decoded_and_cropped, x, y, crop_width, crop_height); + decode_and_crop_status = JpegCropAndDecode(raw_input_tensor_, &cropped_and_decoded, x, y, crop_width, crop_height); { cv::Mat M1(crop_height, crop_width, CV_8UC3, decoded_and_cropped->StartAddr()); cv::Mat M2(crop_height, crop_width, CV_8UC3, cropped_and_decoded->StartAddr()); - for (unsigned int i = 0; i < crop_height; ++i) { - for (unsigned int j = 0; j < crop_width; ++j) { + for (int i = 0; i < crop_height; ++i) { + for (int j = 0; j < crop_width; ++j) { m1 = M1.at(i, j)[1]; m2 = M2.at(i, j)[1]; mse_sum += sqrt((m1 - m2) * (m1 - m2)); @@ -149,8 +147,9 @@ TEST_F(MindDataTestRandomCropDecodeResizeOp, TestOp1) { } } - mse = (count == 0) ? mse_sum : static_cast(mse_sum) / count; - MS_LOG(DEBUG) << "mse: " << mse << std::endl; + mse = count > 0 ? static_cast(mse_sum) / count : mse_sum; + MS_LOG(INFO) << "mse: " << mse << std::endl; + EXPECT_LT(mse, kMseThreshold); } - MS_LOG(INFO) << "MindDataTestRandomCropDecodeResizeOp end!"; + MS_LOG(INFO) << "RandomCropDecodeResizeOp test 2 finished"; } From ad76e1c42a4d73f65f906f95a2444c41e5ef5969 Mon Sep 17 00:00:00 2001 From: leonwanghui Date: Sat, 4 Apr 2020 13:00:18 +0800 Subject: [PATCH 099/367] Fix some typo errors in pipeline module Signed-off-by: leonwanghui --- mindspore/ccsrc/ir/func_graph.cc | 12 ++-- mindspore/ccsrc/ir/func_graph.h | 4 +- mindspore/ccsrc/pipeline/action.cc | 2 +- mindspore/ccsrc/pipeline/base.h | 2 +- .../ccsrc/pipeline/parse/data_converter.cc | 21 +++--- .../ccsrc/pipeline/parse/function_block.cc | 4 +- mindspore/ccsrc/pipeline/parse/parse.cc | 50 ++++++------- mindspore/ccsrc/pipeline/parse/parse.h | 6 +- mindspore/ccsrc/pipeline/parse/parse_base.h | 4 +- .../ccsrc/pipeline/parse/python_adapter.cc | 2 + mindspore/ccsrc/pipeline/parse/resolve.cc | 6 +- mindspore/ccsrc/pipeline/pass.cc | 4 +- mindspore/ccsrc/pipeline/pipeline.cc | 70 +++++++++---------- mindspore/ccsrc/pipeline/pipeline_ge.cc | 16 ++--- mindspore/ccsrc/pipeline/resource.cc | 2 +- .../static_analysis/abstract_value.cc | 2 +- .../pipeline/static_analysis/abstract_value.h | 4 +- .../pipeline/static_analysis/evaluator.cc | 2 +- .../static_analysis/param_validator.cc | 10 +-- .../ccsrc/pipeline/static_analysis/prim.cc | 46 ++++++------ .../static_analysis/program_specialize.cc | 10 +-- .../static_analysis/static_analysis.cc | 12 ++-- 22 files changed, 149 insertions(+), 142 deletions(-) diff --git a/mindspore/ccsrc/ir/func_graph.cc b/mindspore/ccsrc/ir/func_graph.cc index f515367635..7404db4af0 100644 --- a/mindspore/ccsrc/ir/func_graph.cc +++ b/mindspore/ccsrc/ir/func_graph.cc @@ -64,14 +64,14 @@ AbstractFunctionPtr FuncGraph::abstract() { for (auto& p : parameters_) { MS_EXCEPTION_IF_NULL(p); if (p->abstract() == nullptr) { - MS_LOG(ERROR) << "error!!"; + MS_LOG(ERROR) << "Error!!"; return nullptr; } args_spec_list.push_back(p->abstract()); } if (nullptr == output()) { - MS_LOG(ERROR) << "error func graph no output"; + MS_LOG(ERROR) << "Error func graph no output"; return nullptr; } @@ -543,6 +543,7 @@ void FuncGraph::GenerateKwargReplNode(const FuncGraphPtr& specialized_graph, TraceManager::EndTrace(); } } + bool FuncGraph::NeedGenerate(const std::vector& kwarg_list) { // if the function does not have any vararg/kwarg/kwonly/default value/kw args input // return the original graph @@ -556,6 +557,7 @@ bool FuncGraph::NeedGenerate(const std::vector& } return true; } + void FuncGraph::GenerateDefaultValue(const FuncGraphPtr& specialized_graph, const std::vector& specialized_parameter_list, std::unordered_map* repl_nodes) { @@ -664,7 +666,7 @@ void FuncGraph::EraseUnusedNodeInOrder() { auto mng = manager_.lock(); if (mng) { auto nodes = mng->nodes()[shared_from_base()]; - // Erase unusued cnode. + // Erase unused cnode. for (auto it = order_.begin(); it != order_.end();) { if (nodes.count(*it)) { (void)it++; @@ -695,7 +697,7 @@ void FuncGraph::CheckOrder() { if (found == it) { DumpCNodeList(); MS_LOG(EXCEPTION) << "The cnode " << (*it)->DebugString() << " order in " << ToString() - << " doesn't obey the input denpency, " + << " doesn't obey the input dependency, " << "as input " << input_node->DebugString() << " is not ahead of itself."; } } @@ -842,5 +844,5 @@ void FuncGraph::SetEffectDepends(const std::vector& depend_inputs) { } const PrimitivePtr FuncGraphTransform::func_graph_prim_ = std::make_shared("FuncGraph"); -const char kFuncGraphFlagUndetermin[] = "Undeterminate"; +const char kFuncGraphFlagUndetermined[] = "Undeterminate"; } // namespace mindspore diff --git a/mindspore/ccsrc/ir/func_graph.h b/mindspore/ccsrc/ir/func_graph.h index 95b26de473..13e8c9dc8f 100644 --- a/mindspore/ccsrc/ir/func_graph.h +++ b/mindspore/ccsrc/ir/func_graph.h @@ -96,7 +96,7 @@ class FuncGraphBase : public Value { MS_DECLARE_PARENT(FuncGraphBase, Value); }; -extern const char kFuncGraphFlagUndetermin[]; +extern const char kFuncGraphFlagUndetermined[]; class FuncGraph : public FuncGraphBase { public: @@ -200,7 +200,7 @@ class FuncGraph : public FuncGraphBase { // get all func graphs directly used by this func graph const FuncGraphCounterMap &func_graphs_used(); - // get all func graphs nestedly used by this func graph + // get all func graphs nested used by this func graph const FuncGraphSet &func_graphs_used_total(); // get all users of this func graph diff --git a/mindspore/ccsrc/pipeline/action.cc b/mindspore/ccsrc/pipeline/action.cc index 83e524e5df..baf4bea7ec 100644 --- a/mindspore/ccsrc/pipeline/action.cc +++ b/mindspore/ccsrc/pipeline/action.cc @@ -183,7 +183,7 @@ bool SymbolResolveAction(const ResourcePtr& res) { FuncGraphPtr func_graph = res->func_graph(); auto succ = parse::ResolveFuncGraph(func_graph, res); - // Remove usued nodes in cnode order list. + // Remove unused nodes in cnode order list. func_graph->EraseUnusedNodeInOrder(); func_graph->ReleaseFullOrderToEffectOrder(); for (auto fg : func_graph->func_graphs_used_total()) { diff --git a/mindspore/ccsrc/pipeline/base.h b/mindspore/ccsrc/pipeline/base.h index d007eac294..30524e84f6 100644 --- a/mindspore/ccsrc/pipeline/base.h +++ b/mindspore/ccsrc/pipeline/base.h @@ -40,7 +40,7 @@ using ExecutorInfoPtr = std::shared_ptr; inline std::string GetPhasePrefix(const std::string& phase) { auto pos = phase.find('.'); if (pos == std::string::npos) { - MS_LOG(EXCEPTION) << "phase has no . for prefix" << phase; + MS_LOG(EXCEPTION) << "Phase has no . for prefix" << phase; } return phase.substr(0, pos); } diff --git a/mindspore/ccsrc/pipeline/parse/data_converter.cc b/mindspore/ccsrc/pipeline/parse/data_converter.cc index aee7c35ba6..d25a202afc 100644 --- a/mindspore/ccsrc/pipeline/parse/data_converter.cc +++ b/mindspore/ccsrc/pipeline/parse/data_converter.cc @@ -39,7 +39,7 @@ using TensorPtr = mindspore::tensor::TensorPtr; namespace { bool ConvertTuple(const py::object& obj, ValuePtr* const data, bool use_signature) { - MS_LOG(DEBUG) << "converting python tuple"; + MS_LOG(DEBUG) << "Converting python tuple"; py::tuple tuple = obj.cast(); std::vector value_list; for (size_t it = 0; it < tuple.size(); ++it) { @@ -56,7 +56,7 @@ bool ConvertTuple(const py::object& obj, ValuePtr* const data, bool use_signatur } bool ConvertList(const py::object& obj, ValuePtr* const data, bool use_signature) { - MS_LOG(DEBUG) << "converting python list"; + MS_LOG(DEBUG) << "Converting python list"; py::list list = obj.cast(); std::vector value_list; @@ -73,7 +73,7 @@ bool ConvertList(const py::object& obj, ValuePtr* const data, bool use_signature } bool ConvertCellList(const py::object& obj, ValuePtr* const data, bool use_signature) { - MS_LOG(DEBUG) << "converting cell list"; + MS_LOG(DEBUG) << "Converting cell list"; py::sequence list = obj; std::vector value_list; for (size_t it = 0; it < list.size(); ++it) { @@ -89,7 +89,7 @@ bool ConvertCellList(const py::object& obj, ValuePtr* const data, bool use_signa } bool ConvertDict(const py::object& obj, ValuePtr* data, bool use_signature) { - MS_LOG(DEBUG) << "converting python dict"; + MS_LOG(DEBUG) << "Converting python dict"; py::dict dict_values = obj.cast(); std::vector> key_values; @@ -110,14 +110,14 @@ bool ConvertDict(const py::object& obj, ValuePtr* data, bool use_signature) { } void ConvertNameSpace(const py::object& obj, ValuePtr* const data) { - MS_LOG(DEBUG) << "converting python module"; + MS_LOG(DEBUG) << "Converting python module"; py::module mod = python_adapter::GetPyModule(PYTHON_MOD_PARSE_MODULE); py::object module_namespace = python_adapter::CallPyModFn(mod, PYTHON_MOD_GET_MODULE_NAMESPACE, obj); *data = std::make_shared(RESOLVE_NAMESPACE_NAME_MODULE, py::cast(module_namespace)); } void ConvertDataClass(py::object obj, ValuePtr* const data) { - MS_LOG(DEBUG) << "converting dataclass"; + MS_LOG(DEBUG) << "Converting dataclass"; // Maybe the obj is dataclass define auto desc = py::cast(python_adapter::CallPyObjMethod(obj, PYTHON_GET_OBJ_DESC, obj)); // desc has format "", strip the '<' and '>' by offset 1; @@ -247,7 +247,7 @@ bool ConvertOtherObj(py::object obj, ValuePtr* const data) { bool ConvertData(const py::object& obj, ValuePtr* const data, bool use_signature) { // check parameter valid if (data == nullptr) { - MS_LOG(ERROR) << " data is null pointer"; + MS_LOG(ERROR) << "Data is null pointer"; return false; } @@ -386,9 +386,9 @@ py::object CreatePythonObject(const py::object& type, const py::tuple& params) { py::module mod = python_adapter::GetPyModule(PYTHON_MOD_PARSE_MODULE); py::object obj; if (params.size() == 0) { - obj = python_adapter::CallPyModFn(mod, PYTHON_MOD_CRETAE_OBJ_INSTANCE, type); + obj = python_adapter::CallPyModFn(mod, PYTHON_MOD_CREATE_OBJ_INSTANCE, type); } else { - obj = python_adapter::CallPyModFn(mod, PYTHON_MOD_CRETAE_OBJ_INSTANCE, type, params); + obj = python_adapter::CallPyModFn(mod, PYTHON_MOD_CREATE_OBJ_INSTANCE, type, params); } return obj; } @@ -421,6 +421,7 @@ ValuePtr PyDataToValue(const py::object& obj) { (void)ConvertData(to_convert, &value); return value; } + void ClearObjectCache() { object_map_.clear(); object_graphs_map_.clear(); @@ -445,7 +446,7 @@ ClassPtr ParseDataClass(const py::object& cls_obj) { for (auto& item : names) { TypePtr type_value = item.second.cast(); MS_EXCEPTION_IF_NULL(type_value); - MS_LOG(DEBUG) << "(name: " << py::cast(item.first) << ", type: " << type_value->ToString() << ")"; + MS_LOG(DEBUG) << "(Name: " << py::cast(item.first) << ", type: " << type_value->ToString() << ")"; attributes.push_back(std::make_pair(py::cast(item.first), type_value)); } diff --git a/mindspore/ccsrc/pipeline/parse/function_block.cc b/mindspore/ccsrc/pipeline/parse/function_block.cc index 25cc3ab4d8..423e76c1d8 100644 --- a/mindspore/ccsrc/pipeline/parse/function_block.cc +++ b/mindspore/ccsrc/pipeline/parse/function_block.cc @@ -254,7 +254,7 @@ void FunctionBlock::Mature() { matured_ = true; } -// Force the conditon node to bool using bool operation +// Force the conditIon node to bool using bool operation CNodePtr FunctionBlock::ForceToBoolNode(const AnfNodePtr& cond) { TraceManager::DebugTrace(std::make_shared(cond->debug_info())); CNodePtr op_apply_node = func_graph()->NewCNode({MakeResolveOperation(NAMED_PRIMITIVE_BOOL), cond}); @@ -347,7 +347,7 @@ void FunctionBlock::InsertDependItemsBeforeReturn() { auto return_node = func_graph()->get_return(); if (return_node) { if (return_node->inputs().size() < 1) { - MS_LOG(EXCEPTION) << "length of inputs of output node is less than 2"; + MS_LOG(EXCEPTION) << "Length of inputs of output node is less than 2"; } old_ret = return_node->input(1); } else { diff --git a/mindspore/ccsrc/pipeline/parse/parse.cc b/mindspore/ccsrc/pipeline/parse/parse.cc index 60cc00a307..231b98ab00 100644 --- a/mindspore/ccsrc/pipeline/parse/parse.cc +++ b/mindspore/ccsrc/pipeline/parse/parse.cc @@ -294,14 +294,14 @@ FunctionBlockPtr Parser::ParseStatement(const FunctionBlockPtr &block, const py: TraceManager::EndTrace(); return stmt_block; } else { - errcode_ = PARSE_NODE_METHOD_UNSUPPORT; + errcode_ = PARSE_NODE_METHOD_UNSUPPORTED; py::list location = ast_->CallParserObjMethod(PYTHON_PARSE_GET_LOCATION, node); if (location.size() < 2) { MS_LOG(EXCEPTION) << "List size should not be less than 2."; } auto filename = location[0].cast(); auto line_no = location[1].cast(); - MS_LOG(EXCEPTION) << "unsupported syntax '" << node_name << "' at " << filename << ":" << line_no; + MS_LOG(EXCEPTION) << "Unsupported syntax '" << node_name << "' at " << filename << ":" << line_no; } } @@ -324,11 +324,11 @@ AnfNodePtr Parser::ParseExprNode(const FunctionBlockPtr &block, const py::object TraceManager::EndTrace(); return expr_node; } else { - errcode_ = PARSE_NODE_METHOD_UNSUPPORT; + errcode_ = PARSE_NODE_METHOD_UNSUPPORTED; py::list ret = ast_->CallParserObjMethod(PYTHON_PARSE_GET_LOCATION, node); auto filename = ret[0].cast(); auto line_no = ret[1].cast(); - MS_LOG(EXCEPTION) << "unsupported syntax '" << node_name << "' at " << filename << ":" << line_no; + MS_LOG(EXCEPTION) << "Unsupported syntax '" << node_name << "' at " << filename << ":" << line_no; } } @@ -339,7 +339,7 @@ FunctionBlockPtr Parser::ParseExpr(const FunctionBlockPtr &block, const py::obje // Expr only have value , no target py::tuple expand_info = ast_->CallParserObjMethod(PYTHON_PARSE_EXPAND_EXPR_STATEMENT, node); - // refer pypthon function expand_expr_statement, expand_info is one of the following: + // refer python function expand_expr_statement, expand_info is one of the following: // True, expr.value, x // True, expr.value // False, None, None @@ -453,8 +453,8 @@ AnfNodePtr Parser::ParseNum(const FunctionBlockPtr &, const py::object &node) { return NewValueNode(data); } else { // no else actually - MS_LOG(ERROR) << "unsupported Num type : " << (std::string)py::str(obj) << GetLocation(node)->ToString(); - errcode_ = PARSE_NODE_TYPE_UNKONW; + MS_LOG(ERROR) << "Unsupported Num type : " << (std::string)py::str(obj) << GetLocation(node)->ToString(); + errcode_ = PARSE_NODE_TYPE_UNKOWN; return nullptr; } } @@ -478,8 +478,8 @@ AnfNodePtr Parser::ParseNameConstant(const FunctionBlockPtr &, const py::object return NewValueNode(kNone); } else { // no else actually - MS_LOG(ERROR) << "unsupported NameConstant type: " << (std::string)py::str(obj) << GetLocation(node)->ToString(); - errcode_ = PARSE_NODE_TYPE_UNKONW; + MS_LOG(ERROR) << "Unsupported NameConstant type: " << (std::string)py::str(obj) << GetLocation(node)->ToString(); + errcode_ = PARSE_NODE_TYPE_UNKOWN; return nullptr; } } @@ -497,7 +497,7 @@ AnfNodePtr Parser::ParseCall(const FunctionBlockPtr &block, const py::object &no // process function call py::object function_ast_node = python_adapter::GetPyObjAttr(node, "func"); AnfNodePtr call_function_anf_node = ParseExprNode(block, function_ast_node); - // function call arguments should be passed in as groups and upacked later using unpack call + // function call arguments should be passed in as groups and unpacked later using unpack call py::list args = python_adapter::GetPyObjAttr(node, "args"); std::vector packed_arguments; std::vector group_arguments; @@ -614,7 +614,7 @@ AnfNodePtr Parser::ParseAttribute(const FunctionBlockPtr &block, const py::objec py::object value_body = python_adapter::GetPyObjAttr(node, "value"); AnfNodePtr value_node = ParseExprNode(block, value_body); if (value_node == nullptr) { - MS_LOG(WARNING) << "Parse Attribut failed"; + MS_LOG(WARNING) << "Parse attribute failed"; return nullptr; } @@ -637,7 +637,7 @@ AnfNodePtr Parser::ParseCompare(const FunctionBlockPtr &block, const py::object // which there is two ops , but we only support one now py::list ops = python_adapter::GetPyObjAttr(node, "ops"); if (ops.size() > MAX_COMPARISON_OPS_SUPPORTED) { - MS_LOG(ERROR) << "mindspore does not support comparison with operators more than one now, ops size =" << ops.size(); + MS_LOG(ERROR) << "MindSpore does not support comparison with operators more than one now, ops size =" << ops.size(); return nullptr; } @@ -817,7 +817,7 @@ AnfNodePtr Parser::ParseIndex(const FunctionBlockPtr &block, const py::object &n // process a UnaryOp, +a, -b AnfNodePtr Parser::ParseUnaryOp(const FunctionBlockPtr &block, const py::object &node) { - MS_LOG(DEBUG) << "process ast UnaryOp"; + MS_LOG(DEBUG) << "Process ast UnaryOp"; py::object op = python_adapter::GetPyObjAttr(node, "op"); MS_EXCEPTION_IF_NULL(block); @@ -831,7 +831,7 @@ AnfNodePtr Parser::ParseUnaryOp(const FunctionBlockPtr &block, const py::object // process a dict ast node expression AnfNodePtr Parser::ParseDict(const FunctionBlockPtr &block, const py::object &node) { - MS_LOG(DEBUG) << "process ast Dict"; + MS_LOG(DEBUG) << "Process ast Dict"; py::list keys = node.attr("keys"); py::list values = node.attr("values"); std::vector key_nodes; @@ -849,7 +849,7 @@ AnfNodePtr Parser::ParseDict(const FunctionBlockPtr &block, const py::object &no // process a augment assign such as a += b; FunctionBlockPtr Parser::ParseAugAssign(const FunctionBlockPtr &block, const py::object &node) { - MS_LOG(DEBUG) << "process ast AugAssign"; + MS_LOG(DEBUG) << "Process ast AugAssign"; py::object op = python_adapter::GetPyObjAttr(node, "op"); MS_EXCEPTION_IF_NULL(block); @@ -864,10 +864,10 @@ FunctionBlockPtr Parser::ParseAugAssign(const FunctionBlockPtr &block, const py: } else if (ast_->IsClassMember(target_node)) { read_node = ParseAttribute(block, target_node); } else { - MS_LOG(EXCEPTION) << "not supported augassign"; + MS_LOG(EXCEPTION) << "Not supported augassign"; } if (read_node == nullptr) { - MS_LOG(EXCEPTION) << "can not get target node "; + MS_LOG(EXCEPTION) << "Can not get target node "; } py::object value = python_adapter::GetPyObjAttr(node, "value"); @@ -879,7 +879,7 @@ FunctionBlockPtr Parser::ParseAugAssign(const FunctionBlockPtr &block, const py: // process global declaration such as 'global x'; FunctionBlockPtr Parser::ParseGlobal(const FunctionBlockPtr &block, const py::object &node) { - MS_LOG(DEBUG) << "process ast Global"; + MS_LOG(DEBUG) << "Process ast Global"; MS_EXCEPTION_IF_NULL(block); py::list vars = python_adapter::GetPyObjAttr(node, "names"); for (auto &item : vars) { @@ -890,7 +890,7 @@ FunctionBlockPtr Parser::ParseGlobal(const FunctionBlockPtr &block, const py::ob // process a if statement FunctionBlockPtr Parser::ParseIf(const FunctionBlockPtr &block, const py::object &node) { - MS_LOG(DEBUG) << "process ast If"; + MS_LOG(DEBUG) << "Process ast If"; py::object test_node = python_adapter::GetPyObjAttr(node, "test"); AnfNodePtr condition_node = ParseExprNode(block, test_node); MS_EXCEPTION_IF_NULL(block); @@ -934,7 +934,7 @@ FunctionBlockPtr Parser::ParseIf(const FunctionBlockPtr &block, const py::object } FunctionBlockPtr Parser::ParseWhile(const FunctionBlockPtr &block, const py::object &node) { - MS_LOG(DEBUG) << "process ast While"; + MS_LOG(DEBUG) << "Process ast While"; MS_EXCEPTION_IF_NULL(block); MS_LOG(INFO) << "Parse while statement"; TraceManager::DebugTrace(std::make_shared(block->func_graph()->debug_info())); @@ -999,7 +999,7 @@ FunctionBlockPtr Parser::GenerateBlockInFor(const TraceInfoPtr &trace_info) { // x, it = next(it) // body FunctionBlockPtr Parser::ParseFor(const FunctionBlockPtr &block, const py::object &node) { - MS_LOG(DEBUG) << "process ast For"; + MS_LOG(DEBUG) << "Process ast For"; MS_EXCEPTION_IF_NULL(block); AnfNodePtr op_iter = block->MakeResolveOperation(NAMED_PRIMITIVE_ITER); AnfNodePtr op_next = block->MakeResolveOperation(NAMED_PRIMITIVE_NEXT); @@ -1054,7 +1054,7 @@ FunctionBlockPtr Parser::ParseFor(const FunctionBlockPtr &block, const py::objec return after_block; } AnfNodePtr Parser::ParseIfExp(const FunctionBlockPtr &block, const py::object &node) { - MS_LOG(DEBUG) << "process ast IfExp"; + MS_LOG(DEBUG) << "Process ast IfExp"; MS_EXCEPTION_IF_NULL(block); py::object test_node = python_adapter::GetPyObjAttr(node, "test"); AnfNodePtr condition_node = ParseExprNode(block, test_node); @@ -1163,7 +1163,7 @@ void Parser::HandleAssignSubscript(const FunctionBlockPtr &block, const py::obje void Parser::WriteAssignVars(const FunctionBlockPtr &block, const py::object &targ, const AnfNodePtr &value_node) { MS_EXCEPTION_IF_NULL(value_node); - MS_LOG(DEBUG) << "process WriteAssignVars"; + MS_LOG(DEBUG) << "Process WriteAssignVars"; auto ast_type = AstSubType(py::cast(ast_->CallParserObjMethod(PYTHON_PARSE_GET_AST_TYPE, targ))); if (ast_type == AST_SUB_TYPE_NAME) { HandleAssignName(block, targ, value_node); @@ -1174,7 +1174,7 @@ void Parser::WriteAssignVars(const FunctionBlockPtr &block, const py::object &ta } else if (ast_->IsClassMember(targ)) { HandleAssignClassMember(block, targ, value_node); } else { - MS_LOG(EXCEPTION) << "not supported assign type: " << ast_type + MS_LOG(EXCEPTION) << "Not supported assign type: " << ast_type << " NodeInfo: " << trace::GetDebugInfo(value_node->debug_info()); } } @@ -1340,7 +1340,7 @@ bool ParseAst::UpdateFuncGraphFlags(const FuncGraphPtr &func_graph) { py::dict flags = python_adapter::GetPyObjAttr(obj_, PYTHON_EXTERN_MINDSPORE_FLAG); for (auto &item : flags) { if (!py::isinstance(item.first) || !py::isinstance(item.second)) { - MS_LOG(ERROR) << "type error in flags dict convert"; + MS_LOG(ERROR) << "Type error in flags dict convert"; return false; } auto name = py::cast(item.first); diff --git a/mindspore/ccsrc/pipeline/parse/parse.h b/mindspore/ccsrc/pipeline/parse/parse.h index 3e891e47dd..4dd1bc62aa 100644 --- a/mindspore/ccsrc/pipeline/parse/parse.h +++ b/mindspore/ccsrc/pipeline/parse/parse.h @@ -40,8 +40,8 @@ enum ParseStatusCode : int { PARSE_PARAMETER_INVALID, // parameter is invalid PARSE_NO_RETURN, // function no return node PARSE_NODE_TYPE_NO_MATCH, // ast node type is error - PARSE_NODE_TYPE_UNKONW, // node type is unkonw - PARSE_NODE_METHOD_UNSUPPORT, // no method to parse the node + PARSE_NODE_TYPE_UNKOWN, // node type is unkown + PARSE_NODE_METHOD_UNSUPPORTED, // no method to parse the node PARSE_DONT_RESOLVE_SYMBOL, // can't resolve the string PARSE_NOT_SUPPORTED_COMPARE_EXPR, // the comparison is not supported PARSE_FAILURE = 0xFF @@ -102,7 +102,7 @@ class Parser { AnfNodePtr ParseCall(const FunctionBlockPtr &block, const py::object &node); // process the if expression AnfNodePtr ParseIfExp(const FunctionBlockPtr &block, const py::object &node); - // process calss type define + // process class type define AnfNodePtr ParseAttribute(const FunctionBlockPtr &block, const py::object &node); // process a compare expression AnfNodePtr ParseCompare(const FunctionBlockPtr &block, const py::object &node); diff --git a/mindspore/ccsrc/pipeline/parse/parse_base.h b/mindspore/ccsrc/pipeline/parse/parse_base.h index 9f92687b6f..df2d1968a5 100644 --- a/mindspore/ccsrc/pipeline/parse/parse_base.h +++ b/mindspore/ccsrc/pipeline/parse/parse_base.h @@ -45,7 +45,7 @@ enum AstSubType : int { // define the parse target type enum ParseTargetTypeDef { - PARSE_TARGET_FUNCTION = 0, // funciton + PARSE_TARGET_FUNCTION = 0, // function PARSE_TARGET_METHOD = 1, // method PARSE_TARGET_OBJECT_INSTANCE = 2, // object instance PARSE_TARGET_UNKNOW = 0xFF // ERROR TYPE @@ -59,7 +59,7 @@ const char PYTHON_MOD_RESOLVE_GET_OBJ_KEY[] = "get_object_key"; const char PYTHON_MOD_PARSE_CHECK_IS_CLASS_MEMBER[] = "is_class_member"; const char PYTHON_MOD_RESOLVE_GET_OBJ_TYPE[] = "get_obj_type"; const char PYTHON_MOD_GET_CLASS_INSTANCE_TYPE[] = "get_class_instance_type"; -const char PYTHON_MOD_CRETAE_OBJ_INSTANCE[] = "create_obj_instance"; +const char PYTHON_MOD_CREATE_OBJ_INSTANCE[] = "create_obj_instance"; const char PYTHON_MOD_GET_DATACLASS_ATTRS[] = "get_dataclass_attributes"; const char PYTHON_MOD_GET_DATACLASS_METHODS[] = "get_dataclass_methods"; const char PYTHON_MOD_GET_MODULE_NAMESPACE[] = "get_module_namespace"; diff --git a/mindspore/ccsrc/pipeline/parse/python_adapter.cc b/mindspore/ccsrc/pipeline/parse/python_adapter.cc index db40238729..e2c86164d4 100644 --- a/mindspore/ccsrc/pipeline/parse/python_adapter.cc +++ b/mindspore/ccsrc/pipeline/parse/python_adapter.cc @@ -50,6 +50,7 @@ void SetPythonPath(const std::string& path) { (void)sys_path.attr("append")(path.c_str()); } } + std::shared_ptr set_python_scoped() { // if start process from python, no need set the python scope. if (!python_env_) { @@ -79,6 +80,7 @@ py::object GetPyObjAttr(const py::object& obj, const std::string& attr) { } return py::none(); } + py::object GetPyFn(const std::string& module, const std::string& name) { (void)python_adapter::set_python_scoped(); if (!module.empty() && !name.empty()) { diff --git a/mindspore/ccsrc/pipeline/parse/resolve.cc b/mindspore/ccsrc/pipeline/parse/resolve.cc index 976c474aa4..ebc1f65486 100644 --- a/mindspore/ccsrc/pipeline/parse/resolve.cc +++ b/mindspore/ccsrc/pipeline/parse/resolve.cc @@ -53,6 +53,7 @@ abstract::AbstractBasePtr ClassType::ToAbstract() { ret_val->set_value_desc(ToString()); return ret_val; } + // call python PYTHON_MOD_RESOLVE_FUNCTION interface to resolve the symbol in corresponding namespace bool SymbolResolver::Resolve() { py::module mod = python_adapter::GetPyModule(PYTHON_MOD_PARSE_MODULE); @@ -127,7 +128,7 @@ bool ResolveObjectToNode(const FuncGraphPtr& func_graph, const py::object& obj, MS_LOG(ERROR) << "Resolve parameter object failed, got nullptr"; return false; } - MS_LOG(DEBUG) << "add param graph:" << func_graph->ToString() << ", " << param->DebugString(); + MS_LOG(DEBUG) << "Add param graph:" << func_graph->ToString() << ", " << param->DebugString(); output = param; } else if (py::hasattr(obj, "__parameter_tuple__")) { @@ -160,6 +161,7 @@ bool ResolveObjectToNode(const FuncGraphPtr& func_graph, const py::object& obj, *node = output; return true; } + // transform the ValueTuple or ValueList of graph node to make tuple of const graph node bool TransformVectorGraphValueNode(const FuncGraphManagerPtr& manager, const AnfNodePtr& node, const ValueNodePtr& value_node, AnfNodePtr* const transformed) { @@ -175,7 +177,7 @@ bool TransformVectorGraphValueNode(const FuncGraphManagerPtr& manager, const Anf continue; } if (has_graph_in_list) { - MS_LOG(EXCEPTION) << "list has graph in it , but not all is graph"; + MS_LOG(EXCEPTION) << "List has graph in it, but not all is graph"; } } // The celllist or ordered_cell will be parsed as valuetuple of const graph in it, diff --git a/mindspore/ccsrc/pipeline/pass.cc b/mindspore/ccsrc/pipeline/pass.cc index e2626d5314..d89a0090a7 100644 --- a/mindspore/ccsrc/pipeline/pass.cc +++ b/mindspore/ccsrc/pipeline/pass.cc @@ -195,13 +195,13 @@ void ReclaimOptimizer() { bool OptPassGroup(const ResourcePtr& res, const std::string& name) { if (res->func_graph() == nullptr) { - MS_LOG(ERROR) << "opt passes int error"; + MS_LOG(ERROR) << "Opt passes int error"; return false; } abstract::AbstractBasePtrList args = res->args_spec(); FuncGraphPtr func_graph = res->func_graph(); - MS_LOG(DEBUG) << "start " << name << " func graph:" << func_graph->ToString() << ", " + MS_LOG(DEBUG) << "Start " << name << " func graph:" << func_graph->ToString() << ", " << func_graph->get_return()->DebugString(true); InitOpt(res); if (g_pass_opts.find(name) != g_pass_opts.end()) { diff --git a/mindspore/ccsrc/pipeline/pipeline.cc b/mindspore/ccsrc/pipeline/pipeline.cc index 861862b849..0d7790fb36 100644 --- a/mindspore/ccsrc/pipeline/pipeline.cc +++ b/mindspore/ccsrc/pipeline/pipeline.cc @@ -98,7 +98,7 @@ py::tuple GenerateKey(const std::string& name, const std::unordered_mapfunc_graph; } std::size_t ExecutorPy::ArgListSize(const std::string& phase) { if (info_.count(phase) == 0) { - MS_LOG(EXCEPTION) << "no phase in executor:" << GetPhasePrefix(phase); + MS_LOG(EXCEPTION) << "No phase in executor:" << GetPhasePrefix(phase); } return info_[phase]->arg_list_size; } @@ -243,7 +243,7 @@ void ExecutorPy::DelNetRes(const std::string& id) { auto tmp_info = info_; for (auto& item : tmp_info) { if (item.first.find(id) != string::npos) { - MS_LOG(INFO) << "delete network res:" << item.first; + MS_LOG(INFO) << "Delete network res:" << item.first; (void)info_.erase(item.first); flag = true; } @@ -262,7 +262,7 @@ void ExecutorPy::DelNetRes(const std::string& id) { } void ExecutorPy::ClearRes() { - MS_LOG(INFO) << "clean executor Resrouce!"; + MS_LOG(INFO) << "Clean executor resource!"; executor_ = nullptr; } @@ -278,27 +278,27 @@ void ExecutorPy::SaveCompiledGraph(const std::string& phase_s) { MS_EXCEPTION_IF_NULL(parallel::ParallelContext::GetInstance()); std::string parallel_mode = parallel::ParallelContext::GetInstance()->parallel_mode(); - MS_LOG(INFO) << "save compiled func graph(" << func_graph->ToString() << ") phase(" << phase_s << ")!"; + MS_LOG(INFO) << "Save compiled func graph(" << func_graph->ToString() << ") phase(" << phase_s << ")!"; info_[phase_s]->func_graph = func_graph; if ((func_graph != nullptr) && ((parallel_mode == parallel::AUTO_PARALLEL) || (parallel_mode == parallel::SEMI_AUTO_PARALLEL))) { - MS_LOG(DEBUG) << "save model parallel parameter layout graph!"; + MS_LOG(DEBUG) << "Save model parallel parameter layout graph!"; func_graph = info_[phase_s]->resource->results()[kStepParallelGraph].cast(); - ExecutorInfoPtr excutor_info = std::make_shared(); + ExecutorInfoPtr executor_info = std::make_shared(); std::string layout_graph = phase_s + kStepParallelGraph; - excutor_info->func_graph = func_graph; - info_[layout_graph] = excutor_info; + executor_info->func_graph = func_graph; + info_[layout_graph] = executor_info; } else { - MS_LOG(DEBUG) << "save model parallel parameter layout graph null!"; + MS_LOG(DEBUG) << "Save model parallel parameter layout graph null!"; } - MS_LOG(INFO) << "end save compiled func graph!"; + MS_LOG(INFO) << "End save compiled func graph!"; } bool ExecutorPy::ChangeExportGeirUseVmFlag(bool use_vm, const std::string& phase_s) const { std::string phase_prefix = GetPhasePrefix(phase_s); if (use_vm && phase_prefix == "export") { - MS_LOG(INFO) << "use ge backend to export geir"; + MS_LOG(INFO) << "Use ge backend to export geir"; use_vm = false; } return use_vm; @@ -316,7 +316,7 @@ void ExecutorPy::GetGeBackendPolicy() const { bool ExecutorPy::CompileInner(const py::object& obj, const py::tuple& args, const py::object& phase, bool use_vm) { MS_LOG(DEBUG) << "Start ExecutorPy compile!"; if ((!py::isinstance(phase))) { - MS_LOG(ERROR) << "arg phase must be string."; + MS_LOG(ERROR) << "Arg phase must be string."; return false; } // check the arg valid? @@ -327,7 +327,7 @@ bool ExecutorPy::CompileInner(const py::object& obj, const py::tuple& args, cons #ifdef ENABLE_GE GetGeBackendPolicy(); #endif - ExecutorInfoPtr excutor_info = std::make_shared(); + ExecutorInfoPtr executor_info = std::make_shared(); std::string phase_s = py::cast(phase); MS_LOG(INFO) << "ExecutorPy compile phase:" << phase_s << "!"; ResourcePtr resource = std::make_shared(obj); @@ -353,16 +353,16 @@ bool ExecutorPy::CompileInner(const py::object& obj, const py::tuple& args, cons ValuePtr converted = nullptr; bool succ = parse::ConvertData(args[i], &converted); if (!succ) { - MS_LOG(EXCEPTION) << "args convert error"; + MS_LOG(EXCEPTION) << "Args convert error"; } bool broaden = true; args_spec.push_back(abstract::FromValue(converted, broaden)); } resource->set_args_spec(args_spec); - excutor_info->arg_list_size = size; - excutor_info->resource = resource; - info_[phase_s] = excutor_info; + executor_info->arg_list_size = size; + executor_info->resource = resource; + info_[phase_s] = executor_info; pip->Run(); // save the run graph func to MsPipeLine @@ -439,7 +439,7 @@ std::string GetMsIrFile(void) { char real_path[PATH_MAX] = {0}; if (realpath(path, real_path) == nullptr) { - MS_LOG(ERROR) << "MS IR Path error, " << path; + MS_LOG(ERROR) << "MS IR path error, " << path; return file; } file = real_path; @@ -485,7 +485,7 @@ void RunPipelineAction(const ActionItem& action, pipeline::ResourcePtr resource, #endif void Pipeline::Run() { - MS_LOG(INFO) << "pipeline run"; + MS_LOG(INFO) << "Pipeline run"; MS_EXCEPTION_IF_NULL(resource_); FuncGraphPtr user_graph = nullptr; @@ -507,7 +507,7 @@ void Pipeline::Run() { MS_LOG(DEBUG) << "Action " << action.first << " end."; }; if (!result) { - MS_LOG(EXCEPTION) << "pipeline running to end, failed in step:" << action.first; + MS_LOG(EXCEPTION) << "Pipeline running to end, failed in step:" << action.first; } if (MsContext::GetInstance()->save_graphs_flag() && resource_->func_graph() != nullptr) { auto graph = resource_->func_graph(); @@ -555,7 +555,7 @@ void Pipeline::Run() { if (MsContext::GetInstance()->save_graphs_flag() && (user_graph != nullptr)) { std::string user_graph_file = GetFilePathName("ModelDigraph.dot"); - MS_LOG(DEBUG) << "save user graph to: " << user_graph_file; + MS_LOG(DEBUG) << "Save user graph to: " << user_graph_file; draw::DrawUserFuncGraph(user_graph_file, user_graph); #ifdef ENABLE_DUMP_IR @@ -572,7 +572,7 @@ void Pipeline::Run() { ChangeFileMode(filename, S_IRUSR); #endif } - MS_LOG(INFO) << "end"; + MS_LOG(INFO) << "End"; } void ExecutorPy::ProcessVmArg(const py::tuple& args, const std::string& phase, VectorRef* arg_list) { @@ -582,7 +582,7 @@ void ExecutorPy::ProcessVmArg(const py::tuple& args, const std::string& phase, V py::object arg = args[i]; auto ms_context = MsContext::GetInstance(); if (ms_context->backend_policy() == kMsConvert && py::isinstance(arg)) { - MS_LOG(EXCEPTION) << "args[" << i << "] is numpy array, not tensor"; + MS_LOG(EXCEPTION) << "Args[" << i << "] is numpy array, not tensor"; } (*arg_list).push_back(arg); } @@ -642,9 +642,9 @@ py::object ExecutorPy::Run(const py::tuple& args, const py::object& phase) { MS_LOG(EXCEPTION) << "Can't find run graph func for " << phase_s; } - MS_LOG(DEBUG) << "eval run" << backend; + MS_LOG(DEBUG) << "Eval run" << backend; BaseRef value = (*run)(arg_list); - MS_LOG(DEBUG) << "run end"; + MS_LOG(DEBUG) << "Run end"; return BaseRefToPyData(value); } @@ -704,9 +704,9 @@ bool InitExecDatasetVm(const std::string& queue_name, int64_t size, int64_t batc p_init->set_attr("shapes", MakeValue(int_shapes)); p_init->set_attr("input_indexes", MakeValue(int_input_indexes)); - const std::vector emply_str_list; - p_init->set_attr("input_names", MakeValue(emply_str_list)); - p_init->set_attr("output_names", MakeValue(emply_str_list)); + const std::vector empty_str_list; + p_init->set_attr("input_names", MakeValue(empty_str_list)); + p_init->set_attr("output_names", MakeValue(empty_str_list)); FuncGraphPtr func_graph = std::make_shared(); auto app_init = std::make_shared(AnfNodePtrList{NewValueNode(p_init)}, func_graph); @@ -730,7 +730,7 @@ bool InitExecDatasetVm(const std::string& queue_name, int64_t size, int64_t batc if (!(*runner.run)) { // empty function - MS_LOG(EXCEPTION) << "Backend " << backend->name() << " unsupports tdt dataset."; + MS_LOG(EXCEPTION) << "Backend " << backend->name() << " unsupported tdt dataset."; } // launch init dataset runner without inputs and outputs @@ -758,7 +758,7 @@ void InitHccl() { auto runtime_instance = device::KernelRuntimeManager::Instance().GetKernelRuntime(device_name, device_id); MS_EXCEPTION_IF_NULL(runtime_instance); if (!runtime_instance->Init()) { - MS_LOG(ERROR) << "kernel runtime init error."; + MS_LOG(ERROR) << "Kernel runtime init error."; return; } } @@ -795,7 +795,7 @@ void InitGe() { auto ms_context = MsContext::GetInstance(); MS_EXCEPTION_IF_NULL(ms_context); if (!ms_context->OpenTsd()) { - MS_LOG(EXCEPTION) << "open tsd failed"; + MS_LOG(EXCEPTION) << "Open tsd failed"; } (void)ms_context->InitGe(); } diff --git a/mindspore/ccsrc/pipeline/pipeline_ge.cc b/mindspore/ccsrc/pipeline/pipeline_ge.cc index 5a2a8039dd..abf55bb2d8 100644 --- a/mindspore/ccsrc/pipeline/pipeline_ge.cc +++ b/mindspore/ccsrc/pipeline/pipeline_ge.cc @@ -210,7 +210,7 @@ bool AddDFGraph(const std::map& info, const py::di (void)convertor.GenerateCheckpointGraph(); if (convertor.ErrCode() != 0) { DfGraphManager::GetInstance().ClearGraph(); - MS_LOG(ERROR) << "convert df graph failed, err:" << convertor.ErrCode(); + MS_LOG(ERROR) << "Convert df graph failed, err:" << convertor.ErrCode(); return false; } @@ -238,7 +238,7 @@ bool AddDFGraph(const std::map& info, const py::di FuncGraphPtr BuildDFGraph(const std::map& info, const py::dict& init_params, const std::string& phase, const py::object& broadcast_params) { if (info.count(phase) == 0) { - MS_LOG(EXCEPTION) << "no phase in executor:" << GetPhasePrefix(phase); + MS_LOG(EXCEPTION) << "No phase in executor:" << GetPhasePrefix(phase); } FuncGraphPtr anf_graph = info.at(phase)->func_graph; @@ -389,7 +389,7 @@ std::shared_ptr DoExecGraph(const FuncGraphPtr& graph, const std::ve const std::string& phase) { std::vector ge_tensors = TransformUtil::ConvertInputTensors(inputs, kOpFormat_NCHW); if (ge_tensors.size() != inputs.size()) { - MS_LOG(ERROR) << "args convert to ge tensor error"; + MS_LOG(ERROR) << "Args convert to ge tensor error"; return nullptr; } @@ -444,7 +444,7 @@ void ProcessGeArg(const std::map& info, const py:: std::size_t size = args.size(); if (info.count(phase) == 0) { - MS_LOG(EXCEPTION) << "no phase in executor:" << GetPhasePrefix(phase); + MS_LOG(EXCEPTION) << "No phase in executor:" << GetPhasePrefix(phase); } auto arg_size = info.at(phase)->arg_list_size; @@ -459,12 +459,12 @@ void ProcessGeArg(const std::map& info, const py:: ValuePtr converted = nullptr; bool succ = parse::ConvertData(args[i], &converted); if (!succ) { - MS_LOG(EXCEPTION) << "args convert error"; + MS_LOG(EXCEPTION) << "Args convert error"; } if (converted->isa()) { (*inputs).push_back(converted->cast()); } else { - MS_LOG(EXCEPTION) << "args, " << converted->ToString() << " is not tensor"; + MS_LOG(EXCEPTION) << "Args " << converted->ToString() << " is not tensor"; } } } @@ -481,7 +481,7 @@ py::object ExecDFGraph(const std::map& info, const } if (info.count(phase) == 0) { - MS_LOG(EXCEPTION) << "has no phase:" << phase; + MS_LOG(EXCEPTION) << "There is no phase:" << phase; } FuncGraphPtr anf_graph = info.at(phase)->func_graph; @@ -511,7 +511,7 @@ py::object ExecDFGraph(const std::map& info, const if (ret != nullptr) { return *ret; } else { - MS_LOG(EXCEPTION) << "exec graph failed"; + MS_LOG(EXCEPTION) << "Exec graph failed"; } } void ExportDFGraph(const std::string& file_name, const std::string& phase) { diff --git a/mindspore/ccsrc/pipeline/resource.cc b/mindspore/ccsrc/pipeline/resource.cc index 59ee04ad15..18695518be 100644 --- a/mindspore/ccsrc/pipeline/resource.cc +++ b/mindspore/ccsrc/pipeline/resource.cc @@ -189,7 +189,7 @@ Resource::~Resource() { // If exit normally, these global variables will be cleaned // in Resource::Clean call by MsPipeline::Compile, but if exit with MS_LOGEXCEPTION, // these global variables may not being cleaned, it may - // cause segmentfault when free python object inside these global varaibles + // cause segmentfault when free python object inside these global variables // after python interpreter got freed, so these global variables // are cleaned here. // So if exit normally, these global variable will be cleaned twice, diff --git a/mindspore/ccsrc/pipeline/static_analysis/abstract_value.cc b/mindspore/ccsrc/pipeline/static_analysis/abstract_value.cc index eef4e8b4ad..555a6d87c0 100644 --- a/mindspore/ccsrc/pipeline/static_analysis/abstract_value.cc +++ b/mindspore/ccsrc/pipeline/static_analysis/abstract_value.cc @@ -443,7 +443,7 @@ bool AbstractTensor::operator==(const AbstractTensor &other) const { auto v1 = GetValueTrack(); auto v2 = other.GetValueTrack(); if (v1 == nullptr || v2 == nullptr) { - MS_LOG(EXCEPTION) << "the value of AbstractTensor is nullptr"; + MS_LOG(EXCEPTION) << "The value of AbstractTensor is nullptr"; } bool is_value_equal = (v1 == v2); diff --git a/mindspore/ccsrc/pipeline/static_analysis/abstract_value.h b/mindspore/ccsrc/pipeline/static_analysis/abstract_value.h index 9d9585bba3..9e0dd82003 100644 --- a/mindspore/ccsrc/pipeline/static_analysis/abstract_value.h +++ b/mindspore/ccsrc/pipeline/static_analysis/abstract_value.h @@ -77,7 +77,7 @@ class AbstractBase : public Base { } protected: - // default implementation, it can be overrided by subclass; + // default implementation, it can be overwritten by subclass; virtual ValuePtr RealBuildValue() const { return kAnyValue; } private: @@ -495,7 +495,7 @@ class AbstractNone : public AbstractBase { }; using AbstractNonePtr = std::shared_ptr; -// the un assgined state value for variable, which means the variable is not assigned +// the un assigned state value for variable, which means the variable is not assigned class AbstractNull : public AbstractBase { public: AbstractNull() : AbstractBase(kNullObj) { set_type(std::make_shared()); } diff --git a/mindspore/ccsrc/pipeline/static_analysis/evaluator.cc b/mindspore/ccsrc/pipeline/static_analysis/evaluator.cc index 251f218145..9b120f731c 100644 --- a/mindspore/ccsrc/pipeline/static_analysis/evaluator.cc +++ b/mindspore/ccsrc/pipeline/static_analysis/evaluator.cc @@ -116,7 +116,7 @@ AbstractBasePtrList FuncGraphEvaluator::NormalizeArgs(const AbstractBasePtrList return broaded_list; } - if (func_graph_->has_flag(kFuncGraphFlagUndetermin)) { + if (func_graph_->has_flag(kFuncGraphFlagUndetermined)) { if (parent_context_) { MS_LOG(DEBUG) << "Undeterminate FuncGraphEvaluator " << ToString() << ", context: " << parent_context_->ToString(); diff --git a/mindspore/ccsrc/pipeline/static_analysis/param_validator.cc b/mindspore/ccsrc/pipeline/static_analysis/param_validator.cc index 1b70e2fe22..69f6af0dc0 100644 --- a/mindspore/ccsrc/pipeline/static_analysis/param_validator.cc +++ b/mindspore/ccsrc/pipeline/static_analysis/param_validator.cc @@ -56,7 +56,7 @@ TypePtr CheckTensorDType(const AbstractTensorPtr &tensor, const TypePtrList &acc } TypePtr ele_type = tensor->element()->BuildType(); if (ele_type == nullptr) { - MS_LOG(EXCEPTION) << "abstract tensor element type nullptr"; + MS_LOG(EXCEPTION) << "Abstract tensor element type nullptr"; } return CheckType(ele_type, accepts, error_message_prefix); } @@ -64,7 +64,7 @@ TypePtr CheckTensorDType(const AbstractTensorPtr &tensor, const TypePtrList &acc TypePtr CheckTensorsDTypeSame(const AbstractTensorPtrList &tensor_list, const TypePtrList &accepts, const std::string &error_message_prefix) { if (tensor_list.empty()) { - MS_LOG(EXCEPTION) << "array list is empty"; + MS_LOG(EXCEPTION) << "Array list is empty"; } auto sample_tensor = tensor_list[0]; @@ -78,7 +78,7 @@ TypePtr CheckTensorsDTypeSame(const AbstractTensorPtrList &tensor_list, const Ty auto aType = tensor_list[index]->element()->BuildType(); loginfoBuffer << " " << aType->ToString(); if (sample_type->type_id() != aType->type_id()) { - MS_LOG(EXCEPTION) << "expected type " << sample_type->ToString() << ", but got " << aType->ToString() + MS_LOG(EXCEPTION) << "Expected type " << sample_type->ToString() << ", but got " << aType->ToString() << ", index " << index; } } @@ -89,11 +89,11 @@ TypePtr CheckTensorsDTypeSame(const AbstractTensorPtrList &tensor_list, const Ty TypePtr CheckScalarType(const AbstractScalarPtr &scalar, const TypePtrList &accepts, const std::string &error_message_prefix) { if (scalar == nullptr) { - MS_LOG(EXCEPTION) << "scalar nullptr"; + MS_LOG(EXCEPTION) << "Scalar nullptr"; } auto type = scalar->BuildType(); if (type == nullptr) { - MS_LOG(EXCEPTION) << "scalar value nullptr"; + MS_LOG(EXCEPTION) << "Scalar value nullptr"; } return CheckType(type, accepts, error_message_prefix); diff --git a/mindspore/ccsrc/pipeline/static_analysis/prim.cc b/mindspore/ccsrc/pipeline/static_analysis/prim.cc index 4110f25811..56bcd77f67 100644 --- a/mindspore/ccsrc/pipeline/static_analysis/prim.cc +++ b/mindspore/ccsrc/pipeline/static_analysis/prim.cc @@ -182,30 +182,30 @@ AbstractBasePtr DoSignatureEvaluator::Run(AnalysisEnginePtr engine, const Config static AbstractBasePtrList GetUnpackGraphSpecArgsList(AbstractBasePtrList args_spec_list, bool need_unpack) { // arg[0] is the func graph to unpack, ignore it - AbstractBasePtrList sepcialize_args_before_unpack(args_spec_list.begin() + 1, args_spec_list.end()); - AbstractBasePtrList graph_sepcialize_args; + AbstractBasePtrList specialize_args_before_unpack(args_spec_list.begin() + 1, args_spec_list.end()); + AbstractBasePtrList graph_specialize_args; if (need_unpack) { - for (size_t index = 0; index < sepcialize_args_before_unpack.size(); index++) { - MS_EXCEPTION_IF_NULL(sepcialize_args_before_unpack[index]); - if (sepcialize_args_before_unpack[index]->isa()) { - AbstractTuplePtr arg_tuple = sepcialize_args_before_unpack[index]->cast(); + for (size_t index = 0; index < specialize_args_before_unpack.size(); index++) { + MS_EXCEPTION_IF_NULL(specialize_args_before_unpack[index]); + if (specialize_args_before_unpack[index]->isa()) { + AbstractTuplePtr arg_tuple = specialize_args_before_unpack[index]->cast(); std::transform(arg_tuple->elements().begin(), arg_tuple->elements().end(), - std::back_inserter(graph_sepcialize_args), [](AbstractBasePtr abs) { return abs; }); - } else if (sepcialize_args_before_unpack[index]->isa()) { - AbstractDictionaryPtr arg_dict = sepcialize_args_before_unpack[index]->cast(); + std::back_inserter(graph_specialize_args), [](AbstractBasePtr abs) { return abs; }); + } else if (specialize_args_before_unpack[index]->isa()) { + AbstractDictionaryPtr arg_dict = specialize_args_before_unpack[index]->cast(); auto dict_elems = arg_dict->elements(); (void)std::transform( - dict_elems.begin(), dict_elems.end(), std::back_inserter(graph_sepcialize_args), + dict_elems.begin(), dict_elems.end(), std::back_inserter(graph_specialize_args), [](const AbstractAttribute &item) { return std::make_shared(item.first, item.second); }); } else { MS_LOG(EXCEPTION) << "UnpackGraph require args should be tuple or dict, but got " - << sepcialize_args_before_unpack[index]->ToString(); + << specialize_args_before_unpack[index]->ToString(); } } } else { - graph_sepcialize_args = sepcialize_args_before_unpack; + graph_specialize_args = specialize_args_before_unpack; } - return graph_sepcialize_args; + return graph_specialize_args; } AbstractBasePtr UnpackGraphEvaluator::Run(AnalysisEnginePtr engine, const ConfigPtrList &args_conf_list, @@ -239,14 +239,14 @@ AbstractBasePtr UnpackGraphEvaluator::Run(AnalysisEnginePtr engine, const Config MS_EXCEPTION_IF_NULL(real_fn); FuncGraphPtr forward_graph = real_fn->func_graph(); MS_EXCEPTION_IF_NULL(forward_graph); - AbstractBasePtrList graph_sepcialize_args = + AbstractBasePtrList graph_specialize_args = GetUnpackGraphSpecArgsList(args_spec_list, unpack_graph->need_unpack_args()); - AbstractBasePtrList graph_sepcialize_args_without_sens; - (void)std::transform(graph_sepcialize_args.begin(), - graph_sepcialize_args.end() - (unpack_graph->with_sens_in_args() ? 1 : 0), - std::back_inserter(graph_sepcialize_args_without_sens), [](AbstractBasePtr abs) { return abs; }); - auto new_graph = forward_graph->GenerateGraph(graph_sepcialize_args_without_sens); + AbstractBasePtrList graph_specialize_args_without_sens; + (void)std::transform(graph_specialize_args.begin(), + graph_specialize_args.end() - (unpack_graph->with_sens_in_args() ? 1 : 0), + std::back_inserter(graph_specialize_args_without_sens), [](AbstractBasePtr abs) { return abs; }); + auto new_graph = forward_graph->GenerateGraph(graph_specialize_args_without_sens); engine->func_graph_manager()->AddFuncGraph(new_graph); ScopePtr scope = kDefaultScope; if (out_conf != nullptr) { @@ -635,8 +635,8 @@ AbstractBasePtr GetEvaluatedValueForClassAttrOrMethod(const AnalysisEnginePtr &e MS_LOG(EXCEPTION) << "Attribute type error"; } std::string item_name = item_v->cast()->value(); - MS_LOG(DEBUG) << "Resovle name: " << cls->tag().name(); - MS_LOG(DEBUG) << "Resovle item: " << item_name; + MS_LOG(DEBUG) << "Resolve name: " << cls->tag().name(); + MS_LOG(DEBUG) << "Resolve item: " << item_name; AbstractBasePtr attr = cls->GetAttribute(item_name); if (attr != nullptr) { @@ -720,7 +720,7 @@ class EmbedEvaluator : public SymbolicPrimEvaluator { ~EmbedEvaluator() override = default; MS_DECLARE_PARENT(EmbedEvaluator, SymbolicPrimEvaluator); AbstractBasePtr EvalPrim(const ConfigPtrList &args_conf_list) override { - // arg: free variable to be embeded + // arg: free variable to be embedded if (args_conf_list.size() != 1) { MS_LOG(EXCEPTION) << "EmbedEvaluator requires 1 parameter, but got " << args_conf_list.size(); } @@ -939,7 +939,7 @@ class PartialEvaluator : public Evaluator { AbstractBasePtr Run(AnalysisEnginePtr engine, const ConfigPtrList &args_conf_list, AnfNodeConfigPtr out_conf = nullptr) override { if (args_conf_list.size() == 0) { - MS_LOG(EXCEPTION) << "args size should be greater than 0"; + MS_LOG(EXCEPTION) << "Args size should be greater than 0"; } auto arg0_value = args_conf_list[0]->GetEvaluatedValue(); AbstractBasePtrList args_spec_list{arg0_value}; diff --git a/mindspore/ccsrc/pipeline/static_analysis/program_specialize.cc b/mindspore/ccsrc/pipeline/static_analysis/program_specialize.cc index bfa1e43ceb..987c5d1db0 100644 --- a/mindspore/ccsrc/pipeline/static_analysis/program_specialize.cc +++ b/mindspore/ccsrc/pipeline/static_analysis/program_specialize.cc @@ -190,7 +190,7 @@ void FuncGraphSpecializer::FirstPass() { } if (node->func_graph() != func_graph_) { if (parent_ == nullptr) { - MS_LOG(EXCEPTION) << "parent must not null NodeInfo: " << trace::GetDebugInfo(node->debug_info()); + MS_LOG(EXCEPTION) << "Parent must not null NodeInfo: " << trace::GetDebugInfo(node->debug_info()); } parent_->AddTodoItem(node); parent_->FirstPass(); @@ -365,16 +365,16 @@ AnfNodePtr FuncGraphSpecializer::BuildSpecializedNodeInner(const AbstractBasePtr } if (!eval->isa()) { - MS_LOG(EXCEPTION) << "eval is not BaseGraphEvaluator, but " << eval->ToString(); + MS_LOG(EXCEPTION) << "Eval is not BaseGraphEvaluator, but " << eval->ToString(); } auto real_eval = dyn_cast(eval); if (func->context() != nullptr) { if (!IsVisible(func_graph_, func->context()->func_graph())) { - MS_LOG(EXCEPTION) << "func is not visible NodeInfo: " << trace::GetDebugInfo(func_graph_->debug_info()); + MS_LOG(EXCEPTION) << "Func is not visible NodeInfo: " << trace::GetDebugInfo(func_graph_->debug_info()); } } else { - MS_LOG(EXCEPTION) << "func context is nullptr NodeInfo: " << trace::GetDebugInfo(func_graph_->debug_info()); + MS_LOG(EXCEPTION) << "Func context is nullptr NodeInfo: " << trace::GetDebugInfo(func_graph_->debug_info()); } AnalysisContextPtr context = real_eval->MakeContext(engine_, argvals); MS_LOG(DEBUG) << "Specialize function graph: " << context->func_graph()->ToString() << ", args: " << argvals.size() @@ -556,7 +556,7 @@ SpecializeStatusCode FuncGraphSpecializer::FindUniqueArgvals(const AbstractFunct if (!result->first.empty()) { return kSpecializeSuccess; } - MS_LOG(DEBUG) << "Find POLY code, it may be unused code or unresoved polymorphism."; + MS_LOG(DEBUG) << "Find POLY code, it may be unused code or unresolved polymorphism."; return kSpecializeFindUniqueArgvalPoly; } } diff --git a/mindspore/ccsrc/pipeline/static_analysis/static_analysis.cc b/mindspore/ccsrc/pipeline/static_analysis/static_analysis.cc index 49182e8d09..4ab7b9d20b 100644 --- a/mindspore/ccsrc/pipeline/static_analysis/static_analysis.cc +++ b/mindspore/ccsrc/pipeline/static_analysis/static_analysis.cc @@ -89,7 +89,7 @@ std::size_t AnfNodeConfigHasher::operator()(const AnfNodeConfigPtr conf) const { MS_EXCEPTION_IF_NULL(conf->node()); std::size_t hash_value = hash_combine(conf->node()->hash(), conf->context()->hash()); if (conf->context() != nullptr && conf->context()->func_graph() != nullptr) { - MS_LOG(DEBUG) << "NodeConfgHasher Node: " << conf->node()->DebugString() + MS_LOG(DEBUG) << "NodeConfigHasher Node: " << conf->node()->DebugString() << ", Graph: " << conf->context()->func_graph()->ToString() << " ### , hash value: " << hash_value; } else { MS_LOG(DEBUG) << "NodeConfigHasher Node: " << conf->node()->DebugString() << " ### , hash value: " << hash_value; @@ -456,13 +456,13 @@ AbstractBasePtr AnalysisEngine::ExecuteMultipleEvaluators(const std::vectorcast(); if (fg_eval) { - auto undetermin_fgs = fg_eval->func_graph()->recursive_graphs(); - if (undetermin_fgs) { - for (auto undetermin_fg : *undetermin_fgs) { - MS_LOG(DEBUG) << "Set graph undetermin: " << undetermin_fg->ToString(); + auto undetermined_fgs = fg_eval->func_graph()->recursive_graphs(); + if (undetermined_fgs) { + for (auto undetermined_fg : *undetermined_fgs) { + MS_LOG(DEBUG) << "Set graph undetermined: " << undetermined_fg->ToString(); // As the current evaluator has multiple possibles, all the func_graphs which // are recursive with the current func_graph are undetermined in control flow. - undetermin_fg->set_flags(kFuncGraphFlagUndetermin, true); + undetermined_fg->set_flags(kFuncGraphFlagUndetermined, true); } } } From fc9c3c6af47d5727478cce4f84b57b5066611001 Mon Sep 17 00:00:00 2001 From: Jonathan Yan Date: Sat, 4 Apr 2020 06:48:58 -0400 Subject: [PATCH 100/367] remove ENABLE_MINDRECORD flag --- mindspore/ccsrc/dataset/CMakeLists.txt | 2 -- mindspore/ccsrc/dataset/api/de_pipeline.cc | 12 +++--------- mindspore/ccsrc/dataset/api/de_pipeline.h | 4 ---- mindspore/ccsrc/dataset/api/python_bindings.cc | 6 ------ .../engine/datasetops/source/mindrecord_op.cc | 3 --- .../dataset/engine/datasetops/source/mindrecord_op.h | 2 -- tests/ut/cpp/CMakeLists.txt | 1 - tests/ut/cpp/dataset/mind_record_op_test.cc | 2 -- 8 files changed, 3 insertions(+), 29 deletions(-) diff --git a/mindspore/ccsrc/dataset/CMakeLists.txt b/mindspore/ccsrc/dataset/CMakeLists.txt index d6791f2b9b..477d37051e 100644 --- a/mindspore/ccsrc/dataset/CMakeLists.txt +++ b/mindspore/ccsrc/dataset/CMakeLists.txt @@ -17,8 +17,6 @@ if (ENABLE_TDTQUE) message(STATUS "TDT queue is enabled") endif () -add_definitions(-D ENABLE_MINDRECORD) - # conde coverage # option(ENABLE_COVERAGE "Enable code coverage report" OFF) # if (ENABLE_COVERAGE) diff --git a/mindspore/ccsrc/dataset/api/de_pipeline.cc b/mindspore/ccsrc/dataset/api/de_pipeline.cc index d51204f659..65ec8d30f2 100644 --- a/mindspore/ccsrc/dataset/api/de_pipeline.cc +++ b/mindspore/ccsrc/dataset/api/de_pipeline.cc @@ -29,11 +29,9 @@ #include "dataset/engine/datasetops/source/manifest_op.h" #include "dataset/engine/datasetops/source/cifar_op.h" #include "dataset/engine/datasetops/source/celeba_op.h" -#ifdef ENABLE_MINDRECORD -#include "./shard_category.h" -#include "./shard_sample.h" -#include "./shard_shuffle.h" -#endif +#include "mindrecord/include/shard_category.h" +#include "mindrecord/include/shard_sample.h" +#include "mindrecord/include/shard_shuffle.h" #include "dataset/util/random.h" #include "dataset/util/status.h" @@ -46,9 +44,7 @@ using pFunction = Status (DEPipeline::*)(const py::dict &, std::shared_ptr g_parse_op_func_ = {{kStorage, &DEPipeline::ParseStorageOp}, {kShuffle, &DEPipeline::ParseShuffleOp}, -#ifdef ENABLE_MINDRECORD {kMindrecord, &DEPipeline::ParseMindRecordOp}, -#endif {kMap, &DEPipeline::ParseMapOp}, {kBatch, &DEPipeline::ParseBatchOp}, {kRepeat, &DEPipeline::ParseRepeatOp}, @@ -364,7 +360,6 @@ Status DEPipeline::ParseShuffleOp(const py::dict &args, std::shared_ptr *in_partitions) { if (args["partitions"].is_none()) { std::string err_msg = "Error: partitions is not set (None)"; @@ -450,7 +445,6 @@ Status DEPipeline::ParseMindRecordOp(const py::dict &args, std::shared_ptr *ptr) { std::shared_ptr builder = std::make_shared(); diff --git a/mindspore/ccsrc/dataset/api/de_pipeline.h b/mindspore/ccsrc/dataset/api/de_pipeline.h index e8dde85a77..acffc390cc 100644 --- a/mindspore/ccsrc/dataset/api/de_pipeline.h +++ b/mindspore/ccsrc/dataset/api/de_pipeline.h @@ -38,9 +38,7 @@ using DsOpPtr = std::shared_ptr; enum OpName { kStorage = 0, kShuffle, -#ifdef ENABLE_MINDRECORD kMindrecord, -#endif kBatch, kCache, kRepeat, @@ -101,11 +99,9 @@ class DEPipeline { Status ParseShuffleOp(const py::dict &args, std::shared_ptr *ptr); -#ifdef ENABLE_MINDRECORD Status CheckMindRecordPartitionInfo(const py::dict &args, std::vector *ptr); Status ParseMindRecordOp(const py::dict &args, std::shared_ptr *ptr); -#endif Status ParseMapOp(const py::dict &args, std::shared_ptr *ptr); diff --git a/mindspore/ccsrc/dataset/api/python_bindings.cc b/mindspore/ccsrc/dataset/api/python_bindings.cc index 86b0a5d66a..e6c2691281 100644 --- a/mindspore/ccsrc/dataset/api/python_bindings.cc +++ b/mindspore/ccsrc/dataset/api/python_bindings.cc @@ -44,9 +44,7 @@ #include "dataset/engine/datasetops/source/io_block.h" #include "dataset/engine/datasetops/source/mnist_op.h" #include "dataset/engine/datasetops/source/manifest_op.h" -#ifdef ENABLE_MINDRECORD #include "dataset/engine/datasetops/source/mindrecord_op.h" -#endif #include "dataset/engine/datasetops/source/sampler/distributed_sampler.h" #include "dataset/engine/datasetops/source/sampler/pk_sampler.h" #include "dataset/engine/datasetops/source/sampler/random_sampler.h" @@ -146,14 +144,12 @@ void bindDatasetOps(py::module *m) { return py::make_tuple(count, num_classes); }); -#ifdef ENABLE_MINDRECORD (void)py::class_>(*m, "MindRecordOp") .def_static("get_num_rows", [](const std::string &path) { int64_t count = 0; THROW_IF_ERROR(MindRecordOp::CountTotalRows(path, &count)); return count; }); -#endif (void)py::class_>(*m, "ManifestOp") .def_static("get_num_rows_and_classes", @@ -424,9 +420,7 @@ PYBIND11_MODULE(_c_dataengine, m) { .value("STORAGE", OpName::kStorage) .value("SHUFFLE", OpName::kShuffle) .value("BATCH", OpName::kBatch) -#ifdef ENABLE_MINDRECORD .value("MINDRECORD", OpName::kMindrecord) -#endif .value("CACHE", OpName::kCache) .value("REPEAT", OpName::kRepeat) .value("TAKE", OpName::kTake) diff --git a/mindspore/ccsrc/dataset/engine/datasetops/source/mindrecord_op.cc b/mindspore/ccsrc/dataset/engine/datasetops/source/mindrecord_op.cc index b062371d7f..b5bea5416c 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/source/mindrecord_op.cc +++ b/mindspore/ccsrc/dataset/engine/datasetops/source/mindrecord_op.cc @@ -13,8 +13,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#ifdef ENABLE_MINDRECORD - #include "dataset/engine/datasetops/source/mindrecord_op.h" #include @@ -665,4 +663,3 @@ Status MindRecordOp::CountTotalRows(const std::string dataset_path, int64_t *cou } } // namespace dataset } // namespace mindspore -#endif diff --git a/mindspore/ccsrc/dataset/engine/datasetops/source/mindrecord_op.h b/mindspore/ccsrc/dataset/engine/datasetops/source/mindrecord_op.h index 2535acbc50..0b16391b20 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/source/mindrecord_op.h +++ b/mindspore/ccsrc/dataset/engine/datasetops/source/mindrecord_op.h @@ -15,7 +15,6 @@ */ #ifndef DATASET_ENGINE_DATASETOPS_SOURCE_MINDRECORD_OP_H_ #define DATASET_ENGINE_DATASETOPS_SOURCE_MINDRECORD_OP_H_ -#ifdef ENABLE_MINDRECORD #pragma once #include @@ -276,5 +275,4 @@ class MindRecordOp : public ParallelOp { }; } // namespace dataset } // namespace mindspore -#endif #endif // DATASET_ENGINE_DATASETOPS_SOURCE_MINDRECORD_OP_H_ diff --git a/tests/ut/cpp/CMakeLists.txt b/tests/ut/cpp/CMakeLists.txt index 5f4bd41b3b..8d3f8a8138 100644 --- a/tests/ut/cpp/CMakeLists.txt +++ b/tests/ut/cpp/CMakeLists.txt @@ -26,7 +26,6 @@ MESSAGE("check ut_test ${CMAKE_BINARY_DIR}") link_directories(${MS_CCSRC_BUILD_PATH}) if(ENABLE_MINDDATA) - add_definitions(-D ENABLE_MINDRECORD) add_definitions(-D ENABLE_MINDDATA) link_directories(${MS_CCSRC_BUILD_PATH}/dataset) link_directories(${MS_CCSRC_BUILD_PATH}/mindrecord) diff --git a/tests/ut/cpp/dataset/mind_record_op_test.cc b/tests/ut/cpp/dataset/mind_record_op_test.cc index abe7faef14..3d5c80b3f4 100644 --- a/tests/ut/cpp/dataset/mind_record_op_test.cc +++ b/tests/ut/cpp/dataset/mind_record_op_test.cc @@ -13,7 +13,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#ifdef ENABLE_MINDRECORD #include #include #include @@ -480,4 +479,3 @@ TEST_F(MindDataTestMindRecordOp, TestMindRecordBlockReaderRepeat) { row_count++; } } -#endif From fc9d0887948fd67371f6a47dedc2675e4c899ff0 Mon Sep 17 00:00:00 2001 From: ms_yan <6576637+ms_yan@user.noreply.gitee.com> Date: Thu, 2 Apr 2020 21:56:48 +0800 Subject: [PATCH 101/367] add parameter check for Class Schema --- mindspore/dataset/engine/datasets.py | 20 +++++++---- mindspore/dataset/engine/validators.py | 49 ++++++++++++++++++++++++++ 2 files changed, 63 insertions(+), 6 deletions(-) diff --git a/mindspore/dataset/engine/datasets.py b/mindspore/dataset/engine/datasets.py index db2b5169d2..2d5c219b71 100644 --- a/mindspore/dataset/engine/datasets.py +++ b/mindspore/dataset/engine/datasets.py @@ -38,7 +38,7 @@ from .iterators import DictIterator, TupleIterator from .validators import check, check_batch, check_shuffle, check_map, check_repeat, check_zip, check_rename, \ check_project, check_imagefolderdatasetv2, check_mnist_cifar_dataset, check_manifestdataset, \ check_tfrecorddataset, check_vocdataset, check_celebadataset, check_minddataset, check_generatordataset, \ - check_zip_dataset + check_zip_dataset, check_add_column, check_columns from ..core.datatypes import mstype_to_detype, mstypelist_to_detypelist try: @@ -2334,13 +2334,20 @@ class Schema: self.dataset_type = '' self.num_rows = 0 else: + if not os.path.isfile(schema_file) or not os.access(schema_file, os.R_OK): + raise ValueError("The file %s does not exist or permission denied!" % schema_file) try: with open(schema_file, 'r') as load_f: json_obj = json.load(load_f) - self.from_json(json_obj) except json.decoder.JSONDecodeError: - raise RuntimeError("Schema file failed to load") + raise RuntimeError("Schema file failed to load.") + except UnicodeDecodeError: + raise RuntimeError("Schema file failed to decode.") + except Exception: + raise RuntimeError("Schema file failed to open.") + self.from_json(json_obj) + @check_add_column def add_column(self, name, de_type, shape=None): """ Add new column to the schema. @@ -2359,10 +2366,8 @@ class Schema: if isinstance(de_type, typing.Type): de_type = mstype_to_detype(de_type) new_column["type"] = str(de_type) - elif isinstance(de_type, str): - new_column["type"] = str(DataType(de_type)) else: - raise ValueError("Unknown column type") + new_column["type"] = str(DataType(de_type)) if shape is not None: new_column["shape"] = shape @@ -2399,6 +2404,7 @@ class Schema: RuntimeError: If column's name field is missing. RuntimeError: If column's type field is missing. """ + check_columns(columns, columns) self.columns = [] for col in columns: name = None @@ -2443,6 +2449,8 @@ class Schema: RuntimeError: if dataset type is missing in the object. RuntimeError: if columns are missing in the object. """ + if not isinstance(json_obj, dict) or json_obj is None: + raise ValueError("Expected non-empty dict.") for k, v in json_obj.items(): if k == "datasetType": self.dataset_type = v diff --git a/mindspore/dataset/engine/validators.py b/mindspore/dataset/engine/validators.py index b4d22a4a01..1c374ae879 100644 --- a/mindspore/dataset/engine/validators.py +++ b/mindspore/dataset/engine/validators.py @@ -19,10 +19,15 @@ import inspect as ins import os from functools import wraps from multiprocessing import cpu_count +from mindspore._c_expression import typing from . import samplers from . import datasets INT32_MAX = 2147483647 +valid_detype = [ + "bool", "int8", "int16", "int32", "int64", "uint8", "uint16", + "uint32", "uint64", "float16", "float32", "float64" +] def check(method): @@ -188,6 +193,12 @@ def check(method): return wrapper +def check_valid_detype(type_): + if type_ not in valid_detype: + raise ValueError("Unknown column type") + return True + + def check_filename(path): """ check the filename in the path @@ -743,3 +754,41 @@ def check_project(method): return method(*args, **kwargs) return new_method + + +def check_shape(shape, name): + if isinstance(shape, list): + for element in shape: + if not isinstance(element, int): + raise TypeError( + "Each element in {0} should be of type int. Got {1}.".format(name, type(element))) + else: + raise TypeError("Expected int list.") + + +def check_add_column(method): + """check the input arguments of add_column.""" + @wraps(method) + def new_method(*args, **kwargs): + param_dict = make_param_dict(method, args, kwargs) + + # check name; required argument + name = param_dict.get("name") + if not isinstance(name, str) or not name: + raise TypeError("Expected non-empty string.") + + # check type; required argument + de_type = param_dict.get("de_type") + if not isinstance(de_type, str) or not de_type: + raise TypeError("Expected non-empty string.") + if not isinstance(de_type, typing.Type) and not check_valid_detype(de_type): + raise ValueError("Unknown column type.") + + # check shape + shape = param_dict.get("shape") + if shape is not None: + check_shape(shape, "shape") + + return method(*args, **kwargs) + + return new_method From 322ffef3e49dc43161baead55cad433f5fcaf530 Mon Sep 17 00:00:00 2001 From: leonwanghui Date: Mon, 6 Apr 2020 16:14:59 +0800 Subject: [PATCH 102/367] Fix some typo errors in mindspore ir module Signed-off-by: leonwanghui --- mindspore/ccsrc/common/trans.cc | 90 +++++++++---------- mindspore/ccsrc/debug/anf_ir_dump.cc | 16 ++-- mindspore/ccsrc/debug/anf_ir_utils.cc | 36 ++++---- mindspore/ccsrc/debug/draw.cc | 2 +- mindspore/ccsrc/debug/dump_proto.cc | 2 +- mindspore/ccsrc/debug/e2e_dump.cc | 24 ++--- mindspore/ccsrc/debug/info.cc | 4 +- mindspore/ccsrc/debug/trace.cc | 10 +-- mindspore/ccsrc/debug/trace_info.cc | 2 +- mindspore/ccsrc/ir/anf.h | 6 +- mindspore/ccsrc/ir/dtype/number.cc | 6 +- mindspore/ccsrc/ir/dtype/type.cc | 6 +- mindspore/ccsrc/ir/func_graph.h | 2 +- mindspore/ccsrc/ir/manager.cc | 4 +- mindspore/ccsrc/ir/manager.h | 2 +- mindspore/ccsrc/ir/meta_tensor.h | 8 +- mindspore/ccsrc/ir/visitor.cc | 7 +- mindspore/ccsrc/pynative/pynative_execute.cc | 14 +-- .../ccsrc/pynative/pynative_execute_ge.cc | 18 ++-- 19 files changed, 129 insertions(+), 130 deletions(-) diff --git a/mindspore/ccsrc/common/trans.cc b/mindspore/ccsrc/common/trans.cc index 4748d59286..380c51bcf9 100644 --- a/mindspore/ccsrc/common/trans.cc +++ b/mindspore/ccsrc/common/trans.cc @@ -122,7 +122,7 @@ bool CastKernel(const TypeIdArgs &args, void *dst, const size_t data_size, const TransDataSrc2Dst(args, dst, data_size); break; default: - MS_LOG(ERROR) << "unsupported datatype trans"; + MS_LOG(ERROR) << "Unsupported datatype trans"; return false; } return true; @@ -132,7 +132,7 @@ size_t CubeSizeByType(const TypeId data_type) { const size_t default_error = 0; auto dt_size = TypeIdSize(data_type); if (dt_size < 1) { - MS_LOG(ERROR) << "illegal dtype."; + MS_LOG(ERROR) << "Illegal dtype."; return default_error; } else if (dt_size == 1) { return kCubeSize * 2; @@ -146,12 +146,12 @@ size_t ShapeSize(const std::vector &shape) { } size_t TypeIdSize(const TypeId data_type) { - const size_t unsupport_type_error = 0; + const size_t unsupported_type_error = 0; auto iter = type_map.find(data_type); if (iter != type_map.end()) { return iter->second; } - return unsupport_type_error; + return unsupported_type_error; } std::vector TransShapeTo4d(const std::vector &shape) { @@ -174,7 +174,7 @@ std::vector TransShapeTo4d(const std::vector &shape) { } break; default: - MS_LOG(EXCEPTION) << "Unexpeted shape size = " << shape.size(); + MS_LOG(EXCEPTION) << "Unexpected shape size = " << shape.size(); } return shape_4d; } @@ -183,7 +183,7 @@ std::vector TransShapeToDevice(const std::vector &shape, const s std::vector device_shape; if (format == kOpFormat_FRAC_NZ) { if (shape.size() < 2) { - MS_EXCEPTION(NotSupportError) << "format " << format << " is not support shape " << shape.size(); + MS_EXCEPTION(NotSupportError) << "Format " << format << " is not support shape " << shape.size(); } if (shape.size() > 2) { (void)std::copy(shape.begin(), shape.end() - 2, std::back_inserter(device_shape)); @@ -231,37 +231,37 @@ std::vector TransShapeToDevice(const std::vector &shape, const s } bool TransDataType(const TypeIdArgs &args, void *result) { - MS_LOG(DEBUG) << "begin trans datatype from " << TypeIdLabel(args.host_data_type) << " to " + MS_LOG(DEBUG) << "Begin trans datatype from " << TypeIdLabel(args.host_data_type) << " to " << TypeIdLabel(args.device_data_type); MS_EXCEPTION_IF_NULL(result); std::pair type_info(args.host_data_type, args.device_data_type); auto iter = mode_map.find(type_info); if (iter == mode_map.end()) { - MS_LOG(ERROR) << "unsupported datatype trans. src_type :" << TypeIdLabel(args.host_data_type) + MS_LOG(ERROR) << "Unsupported datatype trans. src_type :" << TypeIdLabel(args.host_data_type) << ", dst_type:" << TypeIdLabel(args.device_data_type); return false; } auto trans_mode = iter->second; auto type_size = TypeIdSize(args.device_data_type); if (type_size < 1) { - MS_LOG(ERROR) << "invalid host data type."; + MS_LOG(ERROR) << "Invalid host data type."; return false; } if (args.host_shape_size < 1) { - MS_LOG(ERROR) << "invalid host data size."; + MS_LOG(ERROR) << "Invalid host data size."; return false; } if (!CastKernel(args, result, args.host_shape_size, trans_mode)) { - MS_LOG(ERROR) << "failed to trans datatype.."; + MS_LOG(ERROR) << "Failed to trans datatype.."; return false; } return true; } bool TransFormat(const FormatArgs &args, void *result) { - MS_LOG(DEBUG) << "start trans format."; + MS_LOG(DEBUG) << "Start trans format."; if (TypeIdSize(args.src_data_type) < 1) { - MS_LOG(ERROR) << "invalid datatype.."; + MS_LOG(ERROR) << "Invalid datatype.."; return false; } if ((args.host_format == kOpFormat_NCHW || args.host_format == kOpFormat_ND) && @@ -276,9 +276,9 @@ bool TransFormat(const FormatArgs &args, void *result) { } bool TransFormatFromDeviceToHost(const FormatArgs &args, void *result) { - MS_LOG(DEBUG) << "start trans format."; + MS_LOG(DEBUG) << "Start trans format."; if (TypeIdSize(args.src_data_type) < 1) { - MS_LOG(ERROR) << "invalid datatype.."; + MS_LOG(ERROR) << "Invalid datatype.."; return false; } if ((args.host_format == kOpFormat_NCHW || args.host_format == kOpFormat_ND) && @@ -293,15 +293,15 @@ bool TransFormatFromDeviceToHost(const FormatArgs &args, void *result) { } bool NchwToFracZ(const FormatArgs &args, void *result) { - MS_LOG(DEBUG) << "trans format from nchw to frac_z"; + MS_LOG(DEBUG) << "Trans format from nchw to frac_z"; MS_EXCEPTION_IF_NULL(result); if (args.host_shape.size() != kNchwDims) { - MS_LOG(ERROR) << "invalid host shape, host shape dims:" << args.host_shape.size() << ", expect dims:" << kNchwDims; + MS_LOG(ERROR) << "Invalid host shape, host shape dims:" << args.host_shape.size() << ", expect dims:" << kNchwDims; return false; } size_t size = TypeIdSize(args.src_data_type); if (size < 1) { - MS_LOG(ERROR) << "illegal dtype."; + MS_LOG(ERROR) << "Illegal dtype."; return false; } auto n = args.host_shape[0]; @@ -311,7 +311,7 @@ bool NchwToFracZ(const FormatArgs &args, void *result) { size_t c0 = CubeSizeByType(args.src_data_type); if (c0 < 1) { - MS_LOG(ERROR) << "illegal dtype."; + MS_LOG(ERROR) << "Illegal dtype."; return false; } size_t c1 = Ceil(c, c0); @@ -327,7 +327,7 @@ bool NchwToFracZ(const FormatArgs &args, void *result) { size_t dst_size = total_ele_cnt * size; if (dst_size != args.device_size) { - MS_LOG(ERROR) << "illegal total data size." + MS_LOG(ERROR) << "Illegal total data size." << "dst size is :" << dst_size << "device size is :" << args.device_size; return false; } @@ -369,20 +369,20 @@ bool NchwToFracZ(const FormatArgs &args, void *result) { } bool FracZToNchw(const FormatArgs &args, void *result) { - MS_LOG(DEBUG) << "trans format from frac_z to nchw"; + MS_LOG(DEBUG) << "Trans format from frac_z to nchw"; MS_EXCEPTION_IF_NULL(result); if (args.host_shape.size() != kNchwDims) { - MS_LOG(ERROR) << "invalid host shape, host shape dims:" << args.host_shape.size() << ", expect dims:" << kNchwDims; + MS_LOG(ERROR) << "Invalid host shape, host shape dims:" << args.host_shape.size() << ", expect dims:" << kNchwDims; return false; } size_t size = TypeIdSize(args.src_data_type); if (size < 1) { - MS_LOG(ERROR) << "illegal dtype."; + MS_LOG(ERROR) << "Illegal dtype."; return false; } size_t total_size = ShapeSize(args.device_shape) * size; if (total_size != args.device_size) { - MS_LOG(ERROR) << "illegal total data size, total_size:" << total_size << ", device_size:" << args.device_size; + MS_LOG(ERROR) << "Illegal total data size, total_size:" << total_size << ", device_size:" << args.device_size; return false; } @@ -435,7 +435,7 @@ bool FracZToNchw(const FormatArgs &args, void *result) { bool TransShapeToNz(const std::vector &host_shape, std::vector *hw_shape) { MS_EXCEPTION_IF_NULL(hw_shape); if (host_shape.empty()) { - MS_LOG(ERROR) << "size of vector is 0."; + MS_LOG(ERROR) << "Size of vector is 0."; return false; } switch (host_shape.size()) { @@ -447,7 +447,7 @@ bool TransShapeToNz(const std::vector &host_shape, std::vector * default: auto size = host_shape.size(); if (size < 2) { - MS_LOG(ERROR) << "illegal size."; + MS_LOG(ERROR) << "Illegal size."; return false; } size_t times = 1; @@ -462,26 +462,26 @@ bool TransShapeToNz(const std::vector &host_shape, std::vector * } bool NchwToFracNz(const FormatArgs &args, void *result) { - MS_LOG(DEBUG) << "trans format from nchw to frac_nz."; + MS_LOG(DEBUG) << "Trans format from nchw to frac_nz."; MS_EXCEPTION_IF_NULL(result); std::vector hw_shape; if (!TransShapeToNz(args.host_shape, &hw_shape)) { - MS_LOG(ERROR) << "trans shape failed.."; + MS_LOG(ERROR) << "Trans shape failed.."; return false; } if (hw_shape.size() < 3 || args.device_shape.size() < 4) { - MS_LOG(ERROR) << "invalid shape size."; + MS_LOG(ERROR) << "Invalid shape size."; return false; } auto size = TypeIdSize(args.src_data_type); if (size < 1) { - MS_LOG(ERROR) << "illegal dtype"; + MS_LOG(ERROR) << "Illegal dtype"; return false; } auto dst_size = ShapeSize(args.device_shape) * size; if (dst_size != args.device_size) { - MS_LOG(ERROR) << "illegal total data size, total_size:" << dst_size << ", device_size:" << args.device_size; + MS_LOG(ERROR) << "Illegal total data size, total_size:" << dst_size << ", device_size:" << args.device_size; return false; } auto times = hw_shape.at(0); @@ -538,26 +538,26 @@ bool NchwToFracNz(const FormatArgs &args, void *result) { } bool FracNzToNchw(const FormatArgs &args, void *result) { - MS_LOG(DEBUG) << "trans format from frac_nz to nchw"; + MS_LOG(DEBUG) << "Trans format from frac_nz to nchw"; MS_EXCEPTION_IF_NULL(result); std::vector hw_shape; if (!TransShapeToNz(args.host_shape, &hw_shape)) { - MS_LOG(ERROR) << "trans shape failed.."; + MS_LOG(ERROR) << "Trans shape failed.."; return false; } if (hw_shape.size() < 3 || args.device_shape.size() < 4) { - MS_LOG(ERROR) << "invalid shape size."; + MS_LOG(ERROR) << "Invalid shape size."; return false; } auto size = TypeIdSize(args.src_data_type); if (size < 1) { - MS_LOG(ERROR) << "illegal dtype"; + MS_LOG(ERROR) << "Illegal dtype"; return false; } auto dst_size = ShapeSize(args.device_shape) * size; if (dst_size != args.device_size) { - MS_LOG(ERROR) << "illegal total data size, total_size:" << dst_size << ", device_size:" << args.device_size; + MS_LOG(ERROR) << "Illegal total data size, total_size:" << dst_size << ", device_size:" << args.device_size; return false; } auto times = hw_shape.at(0); @@ -614,20 +614,20 @@ bool FracNzToNchw(const FormatArgs &args, void *result) { } bool NchwToNc1hwc0(const FormatArgs &args, void *result) { - MS_LOG(DEBUG) << "trans format from nchw to Nc1h1wc0"; + MS_LOG(DEBUG) << "Trans format from nchw to Nc1h1wc0"; MS_EXCEPTION_IF_NULL(result); if (args.host_shape.size() != kNchwDims) { - MS_LOG(ERROR) << "invalid host shape, host shape dims:" << args.host_shape.size() << ", expect dims:" << kNchwDims; + MS_LOG(ERROR) << "Invalid host shape, host shape dims:" << args.host_shape.size() << ", expect dims:" << kNchwDims; return false; } size_t size = TypeIdSize(args.src_data_type); if (size < 1) { - MS_LOG(ERROR) << "illegal dtype."; + MS_LOG(ERROR) << "Illegal dtype."; return false; } auto total_size = ShapeSize(args.device_shape) * size; if (total_size != args.device_size) { - MS_LOG(ERROR) << "illegal total data size, total_size:" << total_size << ", device_size:" << args.device_size; + MS_LOG(ERROR) << "Illegal total data size, total_size:" << total_size << ", device_size:" << args.device_size; return false; } @@ -637,7 +637,7 @@ bool NchwToNc1hwc0(const FormatArgs &args, void *result) { auto w = args.host_shape[3]; size_t c0 = CubeSizeByType(args.src_data_type); if (c0 < 1) { - MS_LOG(ERROR) << "illegal dtype."; + MS_LOG(ERROR) << "Illegal dtype."; return false; } size_t c1 = Ceil(c, c0); @@ -687,20 +687,20 @@ bool NchwToNc1hwc0(const FormatArgs &args, void *result) { } bool Nc1hwc0ToNchw(const FormatArgs &args, void *result) { - MS_LOG(DEBUG) << "trans format from nc1h1wc0 to nchw"; + MS_LOG(DEBUG) << "Trans format from nc1h1wc0 to nchw"; MS_EXCEPTION_IF_NULL(result); if (args.host_shape.size() != kNchwDims) { - MS_LOG(ERROR) << "invalid host shape, host shape dims:" << args.host_shape.size() << ", expect dims:" << kNchwDims; + MS_LOG(ERROR) << "Invalid host shape, host shape dims:" << args.host_shape.size() << ", expect dims:" << kNchwDims; return false; } size_t size = TypeIdSize(args.src_data_type); if (size < 1) { - MS_LOG(ERROR) << "illegal dtype."; + MS_LOG(ERROR) << "Illegal dtype."; return false; } size_t total_size = ShapeSize(args.device_shape) * size; if (total_size != args.device_size) { - MS_LOG(ERROR) << "illegal total data size, total_size:" << total_size << ", device_size:" << args.device_size; + MS_LOG(ERROR) << "Illegal total data size, total_size:" << total_size << ", device_size:" << args.device_size; return false; } diff --git a/mindspore/ccsrc/debug/anf_ir_dump.cc b/mindspore/ccsrc/debug/anf_ir_dump.cc index 9e24a17ede..9eb0a376cc 100644 --- a/mindspore/ccsrc/debug/anf_ir_dump.cc +++ b/mindspore/ccsrc/debug/anf_ir_dump.cc @@ -141,7 +141,7 @@ void DumpKernelInfo(const CNodePtr &node, const std::shared_ptr void DumpParams(const FuncGraphPtr &graph, std::ostringstream &buffer, OrderedMap *para_map) { if (graph == nullptr) { - MS_LOG(INFO) << "param graph is nullptr."; + MS_LOG(INFO) << "Param graph is nullptr."; return; } std::vector parameters = graph->parameters(); @@ -175,17 +175,17 @@ void DumpParams(const FuncGraphPtr &graph, std::ostringstream &buffer, OrderedMa if (para_map != nullptr) { (*para_map)[p] = para++; } - MS_LOG(DEBUG) << "record param: " << p->ToString() << " graph belong : " << p->func_graph()->ToString(); + MS_LOG(DEBUG) << "Record param: " << p->ToString() << " graph belong : " << p->func_graph()->ToString(); } } void DumpOperator(const AnfNodePtr &op, const std::shared_ptr &gsub) { if (op == nullptr) { - MS_LOG(INFO) << "param op is nullptr"; + MS_LOG(INFO) << "Param op is nullptr"; return; } if (gsub == nullptr) { - MS_LOG(INFO) << "param gsub is nullptr"; + MS_LOG(INFO) << "Param gsub is nullptr"; return; } @@ -338,7 +338,7 @@ void DumpCNode(const CNodePtr &nd, const FuncGraphPtr &sub_graph, OrderedMapinputs().empty()) { - MS_LOG(EXCEPTION) << "input of apply node is empty"; + MS_LOG(EXCEPTION) << "Input of apply node is empty"; } // print operator @@ -376,7 +376,7 @@ void DumpIRInSubgraph(const std::vector &nodes, OrderedMapfunc_graph(); if (sub_graph == nullptr) { - MS_LOG(DEBUG) << "node[" << nd->ToString() << "] belongs to no graph!"; + MS_LOG(DEBUG) << "Node[" << nd->ToString() << "] belongs to no graph!"; continue; } std::shared_ptr gsub = (*sub_graphs)[sub_graph]; @@ -430,12 +430,12 @@ void DumpIR(const std::string &filename, const FuncGraphPtr &graph, bool dump_fu return; } if (filename.size() > PATH_MAX) { - MS_LOG(ERROR) << "file path " << filename << " is too long."; + MS_LOG(ERROR) << "File path " << filename << " is too long."; return; } char real_path[PATH_MAX] = {0}; if (nullptr == realpath(filename.c_str(), real_path)) { - MS_LOG(DEBUG) << "dir " << filename << " does not exit."; + MS_LOG(DEBUG) << "Dir " << filename << " does not exit."; } OrderedMap para_map; diff --git a/mindspore/ccsrc/debug/anf_ir_utils.cc b/mindspore/ccsrc/debug/anf_ir_utils.cc index 98cd2f4b2f..c25ad862df 100644 --- a/mindspore/ccsrc/debug/anf_ir_utils.cc +++ b/mindspore/ccsrc/debug/anf_ir_utils.cc @@ -49,7 +49,7 @@ std::string GetMsIrPath(void) { path = path_ptr; char real_path[PATH_MAX] = {0}; if (path.size() > PATH_MAX || nullptr == realpath(path.c_str(), real_path)) { - MS_LOG(EXCEPTION) << "MS IR Path error, " << path_ptr; + MS_LOG(EXCEPTION) << "MS IR path error, " << path_ptr; } path = real_path; } @@ -144,8 +144,8 @@ std::string AnfExporter::GetValueNodeText(const FuncGraphPtr& fg, const ValueNod } std::string AnfExporter::GetMultitypeFuncGraphText(const prim::MultitypeFuncGraphPtr& mt_func_graph) { - auto py_funs = mt_func_graph->GetPyFunctions(); - if (py_funs.empty()) { + auto py_funcs = mt_func_graph->GetPyFunctions(); + if (py_funcs.empty()) { return ""; } @@ -153,7 +153,7 @@ std::string AnfExporter::GetMultitypeFuncGraphText(const prim::MultitypeFuncGrap oss << "{"; bool is_first = true; - for (const auto& py_func : py_funs) { + for (const auto& py_func : py_funcs) { if (is_first) { is_first = false; } else { @@ -626,7 +626,7 @@ void AnfExporter::ExportFuncGraph(const std::string& filename, const FuncGraphPt ofs << "\n\n"; (void)func_graph_set.erase(fg); } - ofs << "# num of total funcgraphs: " << exported.size(); + ofs << "# num of total function graphs: " << exported.size(); ofs.close(); } @@ -651,7 +651,7 @@ void AnfExporter::ExportFuncGraph(const std::string& filename, const std::vector ofs << "\n\n"; } - ofs << "# num of total funcgraphs: " << graphs.size(); + ofs << "# num of total function graphs: " << graphs.size(); ofs.close(); } @@ -763,7 +763,7 @@ class Lexer { fin.close(); } } catch (const std::exception& e) { - MS_LOG(ERROR) << "exception when closing file"; + MS_LOG(ERROR) << "Exception when closing file"; } catch (...) { std::string exName(abi::__cxa_current_exception_type()->name()); MS_LOG(ERROR) << "Error occurred when closing file. Exception name: " << exName; @@ -802,7 +802,7 @@ class Lexer { Token token = GetNextTokenInner(); const char* str = token_text[token]; std::string text = (str == nullptr ? GetTokenText() : str); - MS_LOG(DEBUG) << "------parse token] " << text; + MS_LOG(DEBUG) << "------Parse token] " << text; return token; } @@ -1642,7 +1642,7 @@ class IrParser { MS_LOG(EXCEPTION) << "Expect @file at line " << lexer_.GetLineNo(); } - // load prameter default value from serialized file + // load parameter default value from serialized file py::object default_obj = LoadObject(lexer_.GetTokenText()); param->set_default_param(default_obj); @@ -1950,7 +1950,7 @@ class IrParser { return TOK_ERROR; } - // restore python funciton of PrimitivePy from serialized file + // restore python function of PrimitivePy from serialized file py::object py_obj = LoadObject(lexer_.GetTokenText()); PrimitivePyPtr ptr = nullptr; if (py::hasattr(py_obj, "__setattr_flag__") && py::hasattr(py_obj, "_clone")) { @@ -1958,7 +1958,7 @@ class IrParser { py::object new_obj = clone_fn(); ptr = new_obj.cast(); if (ptr == nullptr) { - MS_LOG(EXCEPTION) << "cast to type 'PrimitivePyPtr' error"; + MS_LOG(EXCEPTION) << "Cast to type 'PrimitivePyPtr' error"; } } else { ptr = std::make_shared(id.substr(strlen("PrimitivePy::")), py_obj); @@ -2221,15 +2221,15 @@ class IrParser { }; std::vector ImportIR(const std::string& filename) { - IrParser paser(filename.c_str()); - paser.ParseFile(); - return paser.GetFuncGraphs(); + IrParser parser(filename.c_str()); + parser.ParseFile(); + return parser.GetFuncGraphs(); } #ifdef ENABLE_DUMP_IR void DumpIRProto(const FuncGraphPtr& func_graph, const std::string& suffix) { if (func_graph == nullptr) { - MS_LOG(ERROR) << "func graph is nullptr"; + MS_LOG(ERROR) << "Func graph is nullptr"; return; } auto ms_context = MsContext::GetInstance(); @@ -2243,16 +2243,16 @@ void DumpIRProto(const FuncGraphPtr& func_graph, const std::string& suffix) { } std::string file_path = save_graphs_path + "/" + "ms_output_" + suffix + ".pb"; if (file_path.size() > PATH_MAX) { - MS_LOG(ERROR) << "file path " << file_path << " is too long."; + MS_LOG(ERROR) << "File path " << file_path << " is too long."; return; } char real_path[PATH_MAX] = {0}; if (nullptr == realpath(file_path.c_str(), real_path)) { - MS_LOG(DEBUG) << "dir " << file_path << " does not exit."; + MS_LOG(DEBUG) << "Dir " << file_path << " does not exit."; } else { std::string path_string = real_path; if (chmod(common::SafeCStr(path_string), S_IRUSR | S_IWUSR) == -1) { - MS_LOG(ERROR) << "modify file:" << real_path << " to rw fail."; + MS_LOG(ERROR) << "Modify file:" << real_path << " to rw fail."; return; } } diff --git a/mindspore/ccsrc/debug/draw.cc b/mindspore/ccsrc/debug/draw.cc index e0949c0419..3e8cbfba19 100644 --- a/mindspore/ccsrc/debug/draw.cc +++ b/mindspore/ccsrc/debug/draw.cc @@ -362,7 +362,7 @@ Digraph::~Digraph() { fout_.close(); } } catch (const std::exception& e) { - MS_LOG(ERROR) << "exception when closing file " << filename_; + MS_LOG(ERROR) << "Exception when closing file " << filename_; } } diff --git a/mindspore/ccsrc/debug/dump_proto.cc b/mindspore/ccsrc/debug/dump_proto.cc index a12cf8b47f..a7a1e208a4 100644 --- a/mindspore/ccsrc/debug/dump_proto.cc +++ b/mindspore/ccsrc/debug/dump_proto.cc @@ -208,7 +208,7 @@ void ProtoExporter::SetValueToProto(const ValuePtr& val, irpb::ValueProto* value TypePtr elem_type = dyn_cast(val)->element(); type_proto->mutable_tensor_type()->set_elem_type(GetNumberDataType(elem_type)); } else { - MS_LOG(WARNING) << "Not supported type " << val->type_name(); + MS_LOG(WARNING) << "Unsupported type " << val->type_name(); } } diff --git a/mindspore/ccsrc/debug/e2e_dump.cc b/mindspore/ccsrc/debug/e2e_dump.cc index ba11eafa5f..fbe76cdc47 100644 --- a/mindspore/ccsrc/debug/e2e_dump.cc +++ b/mindspore/ccsrc/debug/e2e_dump.cc @@ -101,7 +101,7 @@ bool Dump::IsConfigValid(const nlohmann::json& dumpSettings) { auto kernels = dumpSettings.at("kernels"); if (!(enable.is_boolean() && trans_flag.is_boolean() && mode.is_number() && path.is_string() && net_name.is_string() && iteration.is_number() && kernels.is_array())) { - MS_LOG(ERROR) << "element's type in Dump config json is invalid."; + MS_LOG(ERROR) << "Element's type in Dump config json is invalid."; dump_enable_ = false; return false; } @@ -121,7 +121,7 @@ bool Dump::IsConfigValid(const nlohmann::json& dumpSettings) { bool Dump::SetDumpConfFromJsonFile() { const char* config_path_str = std::getenv("MINDSPORE_CONFIG_PATH"); if (config_path_str != nullptr) { - MS_LOG(INFO) << "getenv MINDSPORE_CONFIG_PATH :" << config_path_str; + MS_LOG(INFO) << "Getenv MINDSPORE_CONFIG_PATH :" << config_path_str; } else { MS_LOG(INFO) << "No need E2E Dump. please export MINDSPORE_CONFIG_PATH eg: MINDSPORE_CONFIG_PATH=/etc"; dump_enable_ = false; @@ -132,7 +132,7 @@ bool Dump::SetDumpConfFromJsonFile() { auto id = context_ptr->device_id(); char real_path[PATH_MAX] = {0}; if (nullptr == realpath(config_path_str, real_path)) { - MS_LOG(ERROR) << "env e2e dump path error, " << config_path_str; + MS_LOG(ERROR) << "Env e2e dump path error, " << config_path_str; dump_enable_ = false; return false; } @@ -150,20 +150,20 @@ bool Dump::SetDumpConfFromJsonFile() { bool Dump::DumpToFile(const std::string& filename, const void* data, size_t len) { if (filename.empty() || data == nullptr || len == 0) { - MS_LOG(ERROR) << "incorrect parameter."; + MS_LOG(ERROR) << "Incorrect parameter."; return false; } std::string realpath; bool ret = GetRealPath(filename, &realpath); if (!ret) { - MS_LOG(ERROR) << "get real path failed."; + MS_LOG(ERROR) << "Get real path failed."; return false; } std::ofstream fd; fd.open(realpath, std::ios::binary | std::ios::out); if (!fd.is_open()) { - MS_LOG(ERROR) << "open file " << realpath << " fail."; + MS_LOG(ERROR) << "Open file " << realpath << " fail."; return false; } (void)fd.write(reinterpret_cast(data), SizeToLong(len)); @@ -182,7 +182,7 @@ bool Dump::GetRealPath(const std::string& inpath, std::string* outpath) { if (path_split_pos != std::string::npos) { std::string prefix_path = inpath.substr(0, path_split_pos); if (prefix_path.length() >= PATH_MAX) { - MS_LOG(ERROR) << "prefix path is too longer!"; + MS_LOG(ERROR) << "Prefix path is too longer!"; return false; } std::string last_path = inpath.substr(path_split_pos, inpath.length() - path_split_pos); @@ -201,11 +201,11 @@ bool Dump::GetRealPath(const std::string& inpath, std::string* outpath) { if (path_split_pos == std::string::npos) { if (inpath.length() >= PATH_MAX) { - MS_LOG(ERROR) << "prefix path is too longer!"; + MS_LOG(ERROR) << "Prefix path is too longer!"; return false; } if (nullptr == realpath(inpath.c_str(), real_path)) { - MS_LOG(ERROR) << "file " << inpath << " does not exit, it will be created."; + MS_LOG(ERROR) << "File " << inpath << " does not exit, it will be created."; } *outpath = std::string(real_path); } @@ -218,7 +218,7 @@ bool Dump::CreateNotExistDirs(const std::string& path) { MS_EXCEPTION_IF_NULL(fs); char temp_path[PATH_MAX] = {0}; if (path.length() > PATH_MAX) { - MS_LOG(ERROR) << "path lens is max than " << PATH_MAX; + MS_LOG(ERROR) << "Path lens is max than " << PATH_MAX; return false; } for (uint32_t i = 0; i < path.length(); i++) { @@ -229,7 +229,7 @@ bool Dump::CreateNotExistDirs(const std::string& path) { temp_path[i] = '\0'; std::string path_handle(temp_path); if (!fs->FileExist(temp_path)) { - MS_LOG(INFO) << "dir " << path_handle << " does not exit, creating..."; + MS_LOG(INFO) << "Dir " << path_handle << " does not exit, creating..."; if (!fs->CreateDir(temp_path)) { MS_LOG(ERROR) << "Create " << path_handle << " dir error"; return false; @@ -241,7 +241,7 @@ bool Dump::CreateNotExistDirs(const std::string& path) { } if (!fs->FileExist(path)) { - MS_LOG(INFO) << "dir " << path << " does not exit, creating..."; + MS_LOG(INFO) << "Dir " << path << " does not exit, creating..."; if (!fs->CreateDir(path)) { MS_LOG(ERROR) << "Create " << path << " dir error"; return false; diff --git a/mindspore/ccsrc/debug/info.cc b/mindspore/ccsrc/debug/info.cc index 6f966e335c..5c1fc372c5 100644 --- a/mindspore/ccsrc/debug/info.cc +++ b/mindspore/ccsrc/debug/info.cc @@ -193,7 +193,7 @@ void TraceManager::DebugTrace(const TraceInfoPtr& trace_info) { } TraceContextPtr context = std::make_shared(trace_info); if (trace_info->debug_info() == nullptr) { - MS_LOG(EXCEPTION) << "trace debug info is null"; + MS_LOG(EXCEPTION) << "Trace debug info is null"; } TraceManager::trace_context_stack_.push(context); } @@ -205,7 +205,7 @@ void TraceManager::DebugTrace(const DebugInfoPtr& debug_info, const TraceInfoPtr auto cloned_info = trace_info->clone(); cloned_info->set_debug_info(debug_info); if (cloned_info->debug_info() == nullptr) { - MS_LOG(EXCEPTION) << "trace debug info is null with cloned trace"; + MS_LOG(EXCEPTION) << "Trace debug info is null with cloned trace"; } TraceContextPtr context = std::make_shared(cloned_info); TraceManager::trace_context_stack_.push(context); diff --git a/mindspore/ccsrc/debug/trace.cc b/mindspore/ccsrc/debug/trace.cc index 7ce13052c5..51cdd34946 100644 --- a/mindspore/ccsrc/debug/trace.cc +++ b/mindspore/ccsrc/debug/trace.cc @@ -89,7 +89,7 @@ std::string GetDebugInfo(const DebugInfoPtr& info, SourceLineTip tip) { return ""; } -// a trace info identifys a node transform, so we can trace the node transform through +// a trace info identifies a node transform, so we can trace the node transform through // a link of trace info and debug info std::string GetInfoWithAction(const std::vector& info_vec, SourceLineTip tip) { if (info_vec.size() < 1) { @@ -173,7 +173,7 @@ void DumpInferStack(std::ostringstream& oss) { } auto graph_context = graph_infer->graph_context(); if (graph_context == nullptr) { - MS_LOG(INFO) << "null context continue"; + MS_LOG(INFO) << "Null context continue"; continue; } auto graph = graph_context->func_graph(); @@ -264,7 +264,7 @@ void AnalyzedFuncGraphExporter::ExportFuncGraph(const std::string& filename, param_index = 1; auto tagged_func_graphs = CalcTaggedFuncGraphs(); - // first output grapn on the analysis stack + // first output graph on the analysis stack for (const auto& node_cfg : node_cfgs) { auto fg = node_cfg->context()->func_graph(); // the graph is already output, skip it @@ -291,7 +291,7 @@ void AnalyzedFuncGraphExporter::ExportFuncGraph(const std::string& filename, ofs << "\n\n"; (void)func_graph_set.erase(fg); } - ofs << "# num of total funcgraphs: " << exported.size(); + ofs << "# num of total function graphs: " << exported.size(); ofs.close(); } @@ -332,7 +332,7 @@ void GetInferStackInfo(std::ostringstream& oss) { MS_LOG(INFO) << "Get graph analysis information *end*"; } -// trace the graph evaluator statck +// trace the graph evaluator stack static std::stack> graph_infer_stack; // trace the cnode infer debug info static std::vector cnode_debug_stack{}; diff --git a/mindspore/ccsrc/debug/trace_info.cc b/mindspore/ccsrc/debug/trace_info.cc index e2da4ffcce..b01cd15010 100644 --- a/mindspore/ccsrc/debug/trace_info.cc +++ b/mindspore/ccsrc/debug/trace_info.cc @@ -36,6 +36,6 @@ std::string TraceInfo::GetActionBetweenNode(const DebugInfoPtr& info) { } else if (debug_info()->trace_info() != nullptr) { return act_name + debug_info()->trace_info()->GetActionBetweenNode(info); } - return "not in the traced info"; + return "Not in the traced info"; } } // namespace mindspore diff --git a/mindspore/ccsrc/ir/anf.h b/mindspore/ccsrc/ir/anf.h index e64b1329e9..16ccb15c43 100644 --- a/mindspore/ccsrc/ir/anf.h +++ b/mindspore/ccsrc/ir/anf.h @@ -83,7 +83,7 @@ class AnfVisitor; // Methods: // func_graph: return FuncGraph that this AnfNode belongs to. // scope: return the scope namespace of this AnfNode. Set it using set_scope. -// abstract: return the cached inferred abstract value. It cantains type, shape +// abstract: return the cached inferred abstract value. It contains type, shape // value. Set New cache using set_abstract. // intermediate_abstract: return the cached inferring abstract value. // Type/Shape: return the related info of this AnfNode. When this AnfNode is an @@ -284,7 +284,7 @@ class Parameter : public ANode { }; using ParameterPtr = std::shared_ptr; -// Value is used to represent the atomic expression metioned in BNF. +// Value is used to represent the atomic expression mentioned in BNF. // It mainly be stored in ValueNode. Value and ValueNode is related definition. class Value : public Base { public: @@ -313,7 +313,7 @@ using ValuePtr = std::shared_ptr; using ValuePtrList = std::vector; // ValueNode is used to hold value. Unlike CNode and Parameter, ValueNode -// do not belong to any particular function graph. +// does not belong to any particular function graph. class ValueNode : public ANode { public: explicit ValueNode(const ValuePtr &value) : value_(value) {} diff --git a/mindspore/ccsrc/ir/dtype/number.cc b/mindspore/ccsrc/ir/dtype/number.cc index 70d0aaeeaa..d9ef6bb3bd 100644 --- a/mindspore/ccsrc/ir/dtype/number.cc +++ b/mindspore/ccsrc/ir/dtype/number.cc @@ -34,19 +34,19 @@ bool Number::operator==(const Type& other) const { Int::Int(const int nbits) : Number(IntBitsToTypeId(nbits), nbits, false) { if (nbits != 8 && nbits != 16 && nbits != 32 && nbits != 64) { - MS_LOG(EXCEPTION) << "wrong number of bits."; + MS_LOG(EXCEPTION) << "Wrong number of bits."; } } UInt::UInt(const int nbits) : Number(UIntBitsToTypeId(nbits), nbits, false) { if (nbits != 8 && nbits != 16 && nbits != 32 && nbits != 64) { - MS_LOG(EXCEPTION) << "wrong number of bits."; + MS_LOG(EXCEPTION) << "Wrong number of bits."; } } Float::Float(const int nbits) : Number(FloatBitsToTypeId(nbits), nbits, false) { if (nbits != 16 && nbits != 32 && nbits != 64) { - MS_LOG(EXCEPTION) << "wrong number of bits."; + MS_LOG(EXCEPTION) << "Wrong number of bits."; } } diff --git a/mindspore/ccsrc/ir/dtype/type.cc b/mindspore/ccsrc/ir/dtype/type.cc index 6169acabbc..6fbd7f8111 100644 --- a/mindspore/ccsrc/ir/dtype/type.cc +++ b/mindspore/ccsrc/ir/dtype/type.cc @@ -37,7 +37,7 @@ TypeId IntBitsToTypeId(const int nbits) { case 64: return kNumberTypeInt64; default: - MS_LOG(EXCEPTION) << "wrong number of bits."; + MS_LOG(EXCEPTION) << "Wrong number of bits."; } } @@ -52,7 +52,7 @@ TypeId UIntBitsToTypeId(const int nbits) { case 64: return kNumberTypeUInt64; default: - MS_LOG(EXCEPTION) << "wrong number of bits."; + MS_LOG(EXCEPTION) << "Wrong number of bits."; } } @@ -65,7 +65,7 @@ TypeId FloatBitsToTypeId(const int nbits) { case 64: return kNumberTypeFloat64; default: - MS_LOG(EXCEPTION) << "wrong number of bits."; + MS_LOG(EXCEPTION) << "Wrong number of bits."; } } diff --git a/mindspore/ccsrc/ir/func_graph.h b/mindspore/ccsrc/ir/func_graph.h index 13e8c9dc8f..1d58c90755 100644 --- a/mindspore/ccsrc/ir/func_graph.h +++ b/mindspore/ccsrc/ir/func_graph.h @@ -174,7 +174,7 @@ class FuncGraph : public FuncGraphBase { GraphDebugInfoPtr debug_info(); void set_debug_info(const GraphDebugInfoPtr &info) { if (info == nullptr) { - MS_LOG(EXCEPTION) << "graph set null debug info"; + MS_LOG(EXCEPTION) << "Graph set null debug info"; } this->debug_info_ = info; } diff --git a/mindspore/ccsrc/ir/manager.cc b/mindspore/ccsrc/ir/manager.cc index 09bc7127c1..c1459014bb 100644 --- a/mindspore/ccsrc/ir/manager.cc +++ b/mindspore/ccsrc/ir/manager.cc @@ -817,7 +817,7 @@ void FuncGraphChildDirect::OnMoveAllCNode(FuncGraphPtr src, FuncGraphPtr dst) { void FuncGraphParentsDirectCollector::OnModEdge(AnfNodePtr node, int, AnfNodePtr inp, EdgeProcessDirection direction) { MS_EXCEPTION_IF_NULL(node); FuncGraphPtr fg1 = node->func_graph(); - // possible chirld parent + // possible child parent if (IsValueNode(inp)) { FuncGraphPtr fg2 = GetValueNode(inp); if (Mod(fg1, ParentProxy(fg2), direction)) { @@ -1181,7 +1181,7 @@ bool FuncGraphJTotalComputer::SeekJ(const FuncGraphPtr& fg, const FuncGraphSetPt } path->add(fg); - // checkg if func graphs used contains J(func_graph); + // check if func graphs used contains J(func_graph); auto& used = this->manager_->func_graphs_used(); for (auto& item : used[fg]) { auto used_g = item.first; diff --git a/mindspore/ccsrc/ir/manager.h b/mindspore/ccsrc/ir/manager.h index 8036bd68c0..dc8f656ae7 100644 --- a/mindspore/ccsrc/ir/manager.h +++ b/mindspore/ccsrc/ir/manager.h @@ -650,7 +650,7 @@ class FuncGraphTransaction { explicit FuncGraphTransaction(FuncGraphManager* manager) : manager_(manager), changes_() { MS_EXCEPTION_IF_NULL(manager_); if (!manager_->IsManaged()) { - MS_LOG(DEBUG) << "the manager is not managed yet"; + MS_LOG(DEBUG) << "The manager is not managed yet"; } } diff --git a/mindspore/ccsrc/ir/meta_tensor.h b/mindspore/ccsrc/ir/meta_tensor.h index 57c37851b6..3e28f29f37 100644 --- a/mindspore/ccsrc/ir/meta_tensor.h +++ b/mindspore/ccsrc/ir/meta_tensor.h @@ -148,7 +148,7 @@ class MetaTensor : public Value { // // The constructed MetaTensor object has the same type and shape with meta_tensor. // - // param meta_tensor An exisiting MetaTensor object. + // param meta_tensor An existing MetaTensor object. virtual MetaTensor& operator=(const MetaTensor& meta_tensor); // brief Compares two MetaTensor objects. @@ -166,7 +166,7 @@ class MetaTensor : public Value { TypeId data_type() const { return data_type_; } std::string ToString() const override; std::string DumpText() const override; - // bried Sets the data type of a tensor in its MetaTensor. + // brief Sets the data type of a tensor in its MetaTensor. // // param data_type The data type of the tensor to be set. virtual TypeId set_data_type(const TypeId data_type) { @@ -314,7 +314,7 @@ class Tensor : public MetaTensor { // // The constructed Tensor object has the same type and shape with tensor. // - // param tensor An exisiting Tensor object. + // param tensor An existing Tensor object. Tensor& operator=(const Tensor& tensor); // brief Compares two Tensor objects. @@ -383,7 +383,7 @@ class Tensor : public MetaTensor { // return The [TypeId] of the tensor data. TypeId GetDataType(const py::buffer_info& buf) const; - // bried Sets the data type of a tensor. + // brief Sets the data type of a tensor. // // param data_type The data type of the tensor to be set. // diff --git a/mindspore/ccsrc/ir/visitor.cc b/mindspore/ccsrc/ir/visitor.cc index 70e22b2808..efebe3124a 100644 --- a/mindspore/ccsrc/ir/visitor.cc +++ b/mindspore/ccsrc/ir/visitor.cc @@ -43,14 +43,13 @@ VisitFuncType AnfVisitor::Match(const PrimitivePtr &prim, const std::vectorcast()->inputs(); - // infact, funcs_size == inps_size - 1 auto funcs_size = funcs.size(); - auto inps_size = inputs.size(); + auto inputs_size = inputs.size(); // check the inputs are matched with the predicate functions if (funcs_size > 0) { // use the predicate function list to check the number of inputs - if (funcs_size != (inps_size - 1)) { + if (funcs_size != (inputs_size - 1)) { return; } @@ -63,7 +62,7 @@ VisitFuncType AnfVisitor::Match(const PrimitivePtr &prim, const std::vectorVisit(inputs[i]); } }; diff --git a/mindspore/ccsrc/pynative/pynative_execute.cc b/mindspore/ccsrc/pynative/pynative_execute.cc index 927e768bbe..e5fb0c6949 100644 --- a/mindspore/ccsrc/pynative/pynative_execute.cc +++ b/mindspore/ccsrc/pynative/pynative_execute.cc @@ -36,7 +36,7 @@ #endif const char SINGLE_OP_GRAPH[] = "single_op_graph"; -// primitive unable to infer value for constant input in pynative mode +// primitive unable to infer value for constant input in PyNative mode const std::unordered_set vm_operators = {"partial", "depend"}; namespace mindspore { @@ -45,7 +45,7 @@ inline ValuePtr PyAttrValue(const py::object& obj) { ValuePtr converted_ret = nullptr; bool converted = parse::ConvertData(obj, &converted_ret); if (!converted) { - MS_LOG(EXCEPTION) << "attribute convert error with type:" << std::string(py::str(obj)); + MS_LOG(EXCEPTION) << "Attribute convert error with type:" << std::string(py::str(obj)); } return converted_ret; } @@ -67,7 +67,7 @@ void PynativeInfer(const PrimitivePyPtr& prim, const py::tuple& py_args, OpExecI OpExecInfoPtr GenerateOpExecInfo(const py::args& args) { if (args.size() != PY_ARGS_NUM) { - MS_LOG(ERROR) << "four args are needed by RunOp"; + MS_LOG(ERROR) << "Four args are needed by RunOp"; return nullptr; } auto op_exec_info = std::make_shared(); @@ -145,13 +145,13 @@ py::object RunOpInVM(const OpExecInfoPtr& op_exec_info, PynativeStatusCode* stat py::object RunOpInMs(const OpExecInfoPtr& op_exec_info, PynativeStatusCode* status) { MS_EXCEPTION_IF_NULL(op_exec_info); - MS_LOG(INFO) << "start run op[" << op_exec_info->op_name << "] with backend policy ms"; + MS_LOG(INFO) << "Start run op[" << op_exec_info->op_name << "] with backend policy ms"; auto ms_context = MsContext::GetInstance(); MS_EXCEPTION_IF_NULL(ms_context); ms_context->set_enable_pynative_infer(true); std::string device_target = ms_context->device_target(); if (device_target != kAscendDevice && device_target != kGPUDevice) { - MS_EXCEPTION(ArgumentError) << "device target [" << device_target << "] is not supported in Pynative mode"; + MS_EXCEPTION(ArgumentError) << "Device target [" << device_target << "] is not supported in Pynative mode"; } std::shared_ptr session = session::SessionFactory::Get().Create(device_target); MS_EXCEPTION_IF_NULL(session); @@ -197,7 +197,7 @@ py::object RunOpWithBackendPolicy(MsBackendPolicy backend_policy, const OpExecIn break; } default: - MS_LOG(ERROR) << "No backend configed for run op"; + MS_LOG(ERROR) << "No backend configured for run op"; } return result; } @@ -240,7 +240,7 @@ py::tuple RunOp(const py::args& args) { } result = RunOpWithBackendPolicy(backend_policy, op_exec_info, &status); if (status != PYNATIVE_SUCCESS) { - MS_LOG(ERROR) << "Fail to run " << op_exec_info->op_name; + MS_LOG(ERROR) << "Failed to run " << op_exec_info->op_name; return err_ret; } diff --git a/mindspore/ccsrc/pynative/pynative_execute_ge.cc b/mindspore/ccsrc/pynative/pynative_execute_ge.cc index 4ed6088494..180b0006ff 100644 --- a/mindspore/ccsrc/pynative/pynative_execute_ge.cc +++ b/mindspore/ccsrc/pynative/pynative_execute_ge.cc @@ -47,7 +47,7 @@ inline ValuePtr PyAttrValue(const py::object& obj) { ValuePtr converted_ret = nullptr; bool converted = parse::ConvertData(obj, &converted_ret); if (!converted) { - MS_LOG(EXCEPTION) << "attribute convert error with type:" << std::string(py::str(obj)); + MS_LOG(EXCEPTION) << "Attribute convert error with type:" << std::string(py::str(obj)); } return converted_ret; } @@ -67,7 +67,7 @@ MeTensorPtr ConvertPyObjToTensor(const py::object& obj) { } else if (py::isinstance(obj)) { me_tensor_ptr = std::make_shared(py::cast(obj), nullptr); } else { - MS_LOG(EXCEPTION) << "run op inputs type is invalid!"; + MS_LOG(EXCEPTION) << "Run op inputs type is invalid!"; } return me_tensor_ptr; } @@ -97,7 +97,7 @@ bool SetInputsForSingleOpGraph(const OpExecInfoPtr& op_exec_info, const std::vec auto const_op_desc = transform::TransformUtil::GetGeTensorDesc(me_tensor_ptr->shape_c(), me_tensor_ptr->data_type(), kOpFormat_NCHW); if (const_op_desc == nullptr) { - MS_LOG(ERROR) << "Create variable " << op_name << " ouptut descriptor failed!"; + MS_LOG(ERROR) << "Create variable " << op_name << " output descriptor failed!"; return false; } auto pointer_cast_const_op = std::static_pointer_cast(const_op); @@ -108,7 +108,7 @@ bool SetInputsForSingleOpGraph(const OpExecInfoPtr& op_exec_info, const std::vec continue; } if (adapter->setInput(op, op_input_idx++, const_op)) { - MS_LOG(ERROR) << "fail to set params, index is " << op_input_idx; + MS_LOG(ERROR) << "Failed to set params, index is " << op_input_idx; return false; } graph_input_nodes->push_back(*const_op); @@ -178,7 +178,7 @@ void ToTensorPtr(const OpExecInfoPtr op_exec_info, std::vector* con MeTensorPtr me_tensor_ptr = ConvertPyObjToTensor(op_inputs[i]); auto ge_tensor_ptr = transform::TransformUtil::ConvertTensor(me_tensor_ptr, kOpFormat_NCHW); if (ge_tensor_ptr == nullptr) { - MS_LOG(EXCEPTION) << "convert inputs to GE tensor failed in op " << op_exec_info->op_name << "."; + MS_LOG(EXCEPTION) << "Convert inputs to GE tensor failed in op " << op_exec_info->op_name << "."; } // set inputs for operator to build single node graph inputs->push_back(ge_tensor_ptr); @@ -192,7 +192,7 @@ PynativeStatusCode ConvertAttributes(const OpExecInfoPtr& op_exec_info, const st for (auto& item : op_attrs) { if (!py::isinstance(item.first)) { - MS_LOG(ERROR) << "type error in py dict convert"; + MS_LOG(ERROR) << "Type error in py dict convert"; return PYNATIVE_OP_ATTRS_ERR; } std::string name = py::cast(item.first); @@ -203,7 +203,7 @@ PynativeStatusCode ConvertAttributes(const OpExecInfoPtr& op_exec_info, const st // build graph GeGraphPtr graph = std::make_shared(op_exec_info->op_name); if (BuildSingleOpGraph(op_exec_info, inputs, attrs, graph) == false) { - MS_LOG(ERROR) << "Fail to BuildSingleOpGraph"; + MS_LOG(ERROR) << "Failed to BuildSingleOpGraph"; return PYNATIVE_GRAPH_GE_BUILD_ERR; } @@ -211,7 +211,7 @@ PynativeStatusCode ConvertAttributes(const OpExecInfoPtr& op_exec_info, const st transform::Status ret = transform::DfGraphManager::GetInstance().AddGraph(SINGLE_OP_GRAPH, std::shared_ptr(graph)); if (ret != transform::SUCCESS) { - MS_LOG(ERROR) << "Fail to AddGraph into graph manager"; + MS_LOG(ERROR) << "Failed to AddGraph into graph manager"; return PYNATIVE_GRAPH_MANAGER_ERR; } @@ -289,7 +289,7 @@ py::object RunOpInGE(const OpExecInfoPtr& op_exec_info, PynativeStatusCode* stat run_ret = graph_runner->RunGraph(run_options, ge_inputs, &ge_outputs); } if (run_ret != transform::Status::SUCCESS) { - MS_LOG(ERROR) << "GraphRunner Fails to Run Graph"; + MS_LOG(ERROR) << "GraphRunner fails to run graph"; *status = PYNATIVE_GRAPH_GE_RUN_ERR; return std::move(err_ret); } From b94556e9a6bdfd7542816d9ac0d6802cec2fb5fa Mon Sep 17 00:00:00 2001 From: jjfeing Date: Tue, 7 Apr 2020 09:11:45 +0800 Subject: [PATCH 103/367] adapt graphengine upgrade --- cmake/dependency_graphengine.cmake | 1 - 1 file changed, 1 deletion(-) diff --git a/cmake/dependency_graphengine.cmake b/cmake/dependency_graphengine.cmake index d2f80c4d04..2420f47736 100644 --- a/cmake/dependency_graphengine.cmake +++ b/cmake/dependency_graphengine.cmake @@ -42,7 +42,6 @@ else() set(ASCEND_PATH /usr/local/Ascend) set(ASCEND_DRIVER_PATH ${ASCEND_PATH}/driver/lib64/common) set(ASCEND_RUNTIME_PATH ${ASCEND_PATH}/fwkacllib/lib64) - find_library(c_sec libc_sec.so ${ASCEND_DRIVER_PATH}) find_library(slog libslog.so ${ASCEND_DRIVER_PATH}) find_library(mmpa libmmpa.so ${ASCEND_DRIVER_PATH}) From 3202fc0df9bafcd9065460b780b6bae30d2a3bde Mon Sep 17 00:00:00 2001 From: kingfo Date: Fri, 3 Apr 2020 12:02:41 +0800 Subject: [PATCH 104/367] refactor callback for ge backend --- mindspore/ccsrc/CMakeLists.txt | 18 ++- mindspore/ccsrc/pipeline/pipeline.cc | 1 - mindspore/ccsrc/transform/graph_runner.cc | 3 + mindspore/ccsrc/utils/callbacks.cc | 153 ------------------ mindspore/ccsrc/utils/callbacks.h | 8 - mindspore/ccsrc/utils/callbacks_ge.cc | 182 ++++++++++++++++++++++ mindspore/ccsrc/utils/callbacks_ge.h | 38 +++++ mindspore/ccsrc/vm/backend.cc | 3 + tests/ut/cpp/utils/callback_test.cc | 3 + 9 files changed, 246 insertions(+), 163 deletions(-) create mode 100644 mindspore/ccsrc/utils/callbacks_ge.cc create mode 100644 mindspore/ccsrc/utils/callbacks_ge.h diff --git a/mindspore/ccsrc/CMakeLists.txt b/mindspore/ccsrc/CMakeLists.txt index fdefdce6a2..befe86f3c0 100644 --- a/mindspore/ccsrc/CMakeLists.txt +++ b/mindspore/ccsrc/CMakeLists.txt @@ -87,7 +87,22 @@ ms_build_flatbuffers("${FLATBUFFER_IN}" "${FLATBUFFER_IN}" GENERATED_OUTPUT_DIR file(GLOB_RECURSE MINDSPORE_SRC_LIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} "ir/*.cc" "ir/dtype/*.cc" - "utils/*.cc" + "utils/context/ms_context.cc" + "utils/symbolic.cc" + "utils/tensorprint_utils.cc" + "utils/convert_utils.cc" + "utils/graph_utils.cc" + "utils/misc.cc" + "utils/callbacks.cc" + "utils/profile.cc" + "utils/base_ref.cc" + "utils/summary/event_writer.cc" + "utils/log_adapter.cc" + "utils/comm_manager.cc" + "utils/any.cc" + "utils/config_manager.cc" + "utils/system/file_system.cc" + "utils/system/crc32c.cc" "common/*.cc" "parallel/*.cc" "pipeline/pipeline.cc" @@ -173,6 +188,7 @@ if(ENABLE_GE) file(GLOB_RECURSE GE_SRC_LIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} "transform/*.cc" "pynative/pynative_execute_ge.cc" + "utils/callbacks_ge.cc" "pipeline/pipeline_ge.cc" ) list(APPEND MINDSPORE_SRC_LIST ${GE_SRC_LIST}) diff --git a/mindspore/ccsrc/pipeline/pipeline.cc b/mindspore/ccsrc/pipeline/pipeline.cc index 0d7790fb36..003d4c15e9 100644 --- a/mindspore/ccsrc/pipeline/pipeline.cc +++ b/mindspore/ccsrc/pipeline/pipeline.cc @@ -616,7 +616,6 @@ py::object ExecutorPy::Run(const py::tuple& args, const py::object& phase) { return ExecDFGraph(info_, args, phase_s); } #else - MS_LOG(WARNING) << "In ut test " << size << phase_s; if (backend == "ge") { std::shared_ptr ret_val = std::make_shared(); if (info_.count(phase_s) != 0 && info_[phase_s]->func_graph != nullptr) { diff --git a/mindspore/ccsrc/transform/graph_runner.cc b/mindspore/ccsrc/transform/graph_runner.cc index e77b1bcd73..f1f270cdb6 100644 --- a/mindspore/ccsrc/transform/graph_runner.cc +++ b/mindspore/ccsrc/transform/graph_runner.cc @@ -24,6 +24,9 @@ #include "utils/callbacks.h" #include "utils/utils.h" #include "./common.h" +#ifdef ENABLE_GE +#include "utils/callbacks_ge.h" +#endif #ifdef NO_GE_CLIENT namespace ge { diff --git a/mindspore/ccsrc/utils/callbacks.cc b/mindspore/ccsrc/utils/callbacks.cc index cdee0be82d..03c6322afe 100644 --- a/mindspore/ccsrc/utils/callbacks.cc +++ b/mindspore/ccsrc/utils/callbacks.cc @@ -20,10 +20,6 @@ #include #include #include "pybind11/pybind11.h" -#ifdef ENABLE_GE -#include "transform/df_graph_manager.h" -#include "transform/util.h" -#endif #include "pipeline/parse/data_converter.h" #include "pipeline/parse/python_adapter.h" #include "utils/visible.h" @@ -38,155 +34,6 @@ const char kSummary[] = "Summary"; const char kCheckPoint[] = "Save"; const int ONE_SHAPE = 1; -#ifdef ENABLE_GE -using mindspore::transform::Status; -using mindspore::transform::TransformUtil; - -bool GetParameterShape(const FuncGraphPtr& graph, const std::string& param_name, - const std::shared_ptr>& shape) { - if (graph == nullptr) { - MS_LOG(ERROR) << "Graph is null, can not get graph parameter"; - return false; - } - - auto parameter_nodes = graph->parameters(); - for (auto& node : parameter_nodes) { - ParameterPtr param_node = std::static_pointer_cast(node); - if (param_node == nullptr) { - MS_LOG(ERROR) << "Parameter node is null, can not get graph parameter"; - return false; - } - if (param_node->name() == param_name) { - py::object parameter = param_node->default_param(); - ValuePtr value = parse::data_converter::PyDataToValue(parameter); - TensorPtr tensor = std::dynamic_pointer_cast(value); - if (tensor == nullptr) { - shape->push_back(ONE_SHAPE); - } else { - *shape = tensor->shape(); - } - return true; - } - } - MS_LOG(ERROR) << "Can not find parameter of name:" << param_name; - return false; -} - -static TensorPtr GetMeTensorTransformed(uint32_t graph_id, const std::string& parameter_name, - const std::shared_ptr& ge_tensor_ptr) { - FuncGraphPtr anf_graph = transform::DfGraphManager::GetInstance().GetAnfGraph(graph_id); - if (anf_graph == nullptr) { - MS_LOG(ERROR) << "Get anf graph failed during callback"; - return nullptr; - } - - std::shared_ptr> parameter_shape_ptr = std::make_shared>(); - if (!GetParameterShape(anf_graph, parameter_name, parameter_shape_ptr)) { - MS_LOG(ERROR) << "Can not get parameter shape during callback"; - return nullptr; - } - - return TransformUtil::ConvertGeTensor(ge_tensor_ptr, *parameter_shape_ptr); -} - -uint32_t CheckpointSaveCallback(uint32_t graph_id, const std::map& params_list) { - // Acquire GIL before calling Python code - py::gil_scoped_acquire acquire; - - MS_LOG(DEBUG) << "Start the checkpoint save callback function in checkpoint save process."; - py::list parameter_list = py::list(); - for (auto& item : params_list) { - std::string name = item.first; - std::shared_ptr ge_tensor_ptr = std::make_shared(item.second); - TensorPtr tensor_ptr = GetMeTensorTransformed(graph_id, name, ge_tensor_ptr); - if (tensor_ptr == nullptr) { - MS_LOG(EXCEPTION) << "Transform ge tensor to me tensor failed"; - } - py::dict param_dict; - param_dict["name"] = name; - param_dict["data"] = tensor_ptr; - parameter_list.append(param_dict); - } - py::bool_ ret = - parse::python_adapter::CallPyFn(PYTHON_MOD_CALLBACK_MODULE, PYTHON_FUN_PROCESS_CHECKPOINT, parameter_list); - auto bool_ret = py::cast(ret); - - uint32_t status = Status::SUCCESS; - if (!bool_ret) { - status = Status::FAILED; - MS_LOG(ERROR) << "python checkpoint return false during callback"; - } - return status; -} - -static TensorPtr GetMeTensorForSummary(const std::string& name, const std::shared_ptr& ge_tensor_ptr) { - // confirm the type by name - // Format: xxx[:Scalar] xxx[:Image] xxx[:Tensor] - if (name.empty()) { - MS_LOG(EXCEPTION) << "The summary name is empty."; - } - auto bpos = name.rfind("[:"); - if (bpos >= name.size()) { - MS_LOG(EXCEPTION) << "The summary name(" << name << ") is invalid."; - } - auto tname = name.substr(bpos); - if (tname == "[:Scalar]") { - MS_LOG(DEBUG) << "The summary(" << name << ") is Scalar"; - // process the scalar type summary - // Because the ge tensor is dim = 4, so set the (1,1,1,1)-->(1,) - // We do the (1,) shape is scalar - auto shape = std::vector({ONE_SHAPE}); - return TransformUtil::ConvertGeTensor(ge_tensor_ptr, shape); - } - if (tname == "[:Tensor]") { - MS_LOG(DEBUG) << "The summary(" << name << ") is Tensor"; - // process the tensor summary - // Now we can't get the real shape, so we keep same shape with GE - return TransformUtil::ConvertGeTensor(ge_tensor_ptr); - } - if (tname == "[:Image]") { - MS_LOG(DEBUG) << "The summary(" << name << ") is Image"; - // process the Image summary - // Image dim = 4, is same with ge, so we keep same shape with GE - return TransformUtil::ConvertGeTensor(ge_tensor_ptr); - } - - MS_LOG(EXCEPTION) << "The summary name(" << name << ") is invalid."; -} - -// Cache the summary callback data -// Output Format: [{"name": tag_name, "data": tensor}, {"name": tag_name, "data": tensor},...] -uint32_t MS_EXPORT SummarySaveCallback(uint32_t graph_id, const std::map& params_list) { - // Acquire GIL before calling Python code - py::gil_scoped_acquire acquire; - - MS_LOG(DEBUG) << "Start the summary save callback function for graph " << graph_id << "."; - py::list summary_list = py::list(); - MS_LOG(DEBUG) << "Param list size = " << params_list.size(); - for (auto& item : params_list) { - std::string tag_name = item.first; - std::shared_ptr ge_tensor_ptr = std::make_shared(item.second); - TensorPtr tensor_ptr = GetMeTensorForSummary(tag_name, ge_tensor_ptr); - if (tensor_ptr == nullptr) { - MS_LOG(EXCEPTION) << "ConvertGeTensor return tensor is null"; - } - py::dict summary_value_dict; - summary_value_dict["name"] = tag_name; - summary_value_dict["data"] = tensor_ptr; - summary_list.append(summary_value_dict); - } - - py::bool_ ret = parse::python_adapter::CallPyFn(PYTHON_MOD_CALLBACK_MODULE, PYTHON_FUN_PROCESS_SUMMARY, summary_list); - auto bool_ret = py::cast(ret); - if (!bool_ret) { - MS_LOG(ERROR) << "Python checkpoint return false during callback"; - return Status::FAILED; - } - MS_LOG(DEBUG) << "End the summary save callback function."; - return Status::SUCCESS; -} -#endif - // Cache the summary callback data from ME session // Remove the GE module on new architecture // Output Format: [{"name": tag_name, "data": tensor}, {"name": tag_name, "data": tensor},...] diff --git a/mindspore/ccsrc/utils/callbacks.h b/mindspore/ccsrc/utils/callbacks.h index 778b0a9ba2..a1e4e75d5b 100644 --- a/mindspore/ccsrc/utils/callbacks.h +++ b/mindspore/ccsrc/utils/callbacks.h @@ -21,10 +21,6 @@ #include #include #include "ir/meta_tensor.h" -#ifdef ENABLE_GE -#include "transform/types.h" -#include "transform/util.h" -#endif namespace mindspore { namespace callbacks { @@ -45,10 +41,6 @@ const int kCallbackFalied = 1; bool GetParameterShape(const FuncGraphPtr& anf_graph, const std::string& param_name, const std::shared_ptr>& shape); -#ifdef ENABLE_GE -uint32_t CheckpointSaveCallback(uint32_t, const std::map&); -uint32_t SummarySaveCallback(uint32_t, const std::map&); -#endif uint32_t SummarySaveCallback(uint32_t, const std::map&); } // namespace callbacks diff --git a/mindspore/ccsrc/utils/callbacks_ge.cc b/mindspore/ccsrc/utils/callbacks_ge.cc new file mode 100644 index 0000000000..50fd2f0b11 --- /dev/null +++ b/mindspore/ccsrc/utils/callbacks_ge.cc @@ -0,0 +1,182 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "utils/callbacks_ge.h" +#include "pybind11/pybind11.h" +#include "transform/df_graph_manager.h" +#include "transform/util.h" +#include "pipeline/parse/data_converter.h" +#include "pipeline/parse/python_adapter.h" +#include "utils/visible.h" + +namespace mindspore { +namespace callbacks { + +const char PYTHON_MOD_CALLBACK_MODULE[] = "mindspore.train.callback"; +const char PYTHON_FUN_PROCESS_CHECKPOINT[] = "_checkpoint_cb_for_save_op"; +const char PYTHON_FUN_PROCESS_SUMMARY[] = "_summary_cb_for_save_op"; +const char kSummary[] = "Summary"; +const char kCheckPoint[] = "Save"; +const int ONE_SHAPE = 1; + +using mindspore::transform::Status; +using mindspore::transform::TransformUtil; + +bool GetParameterShape(const FuncGraphPtr& graph, const std::string& param_name, + const std::shared_ptr>& shape) { + if (graph == nullptr) { + MS_LOG(ERROR) << "Graph is null, can not get graph parameter"; + return false; + } + + auto parameter_nodes = graph->parameters(); + for (auto& node : parameter_nodes) { + ParameterPtr param_node = std::static_pointer_cast(node); + if (param_node == nullptr) { + MS_LOG(ERROR) << "Parameter node is null, can not get graph parameter"; + return false; + } + if (param_node->name() == param_name) { + py::object parameter = param_node->default_param(); + ValuePtr value = parse::data_converter::PyDataToValue(parameter); + TensorPtr tensor = std::dynamic_pointer_cast(value); + if (tensor == nullptr) { + shape->push_back(ONE_SHAPE); + } else { + *shape = tensor->shape(); + } + return true; + } + } + MS_LOG(ERROR) << "Can not find parameter of name:" << param_name; + return false; +} + +static TensorPtr GetMeTensorTransformed(uint32_t graph_id, const std::string& parameter_name, + const std::shared_ptr& ge_tensor_ptr) { + FuncGraphPtr anf_graph = transform::DfGraphManager::GetInstance().GetAnfGraph(graph_id); + if (anf_graph == nullptr) { + MS_LOG(ERROR) << "Get anf graph failed during callback"; + return nullptr; + } + + std::shared_ptr> parameter_shape_ptr = std::make_shared>(); + if (!GetParameterShape(anf_graph, parameter_name, parameter_shape_ptr)) { + MS_LOG(ERROR) << "Can not get parameter shape during callback"; + return nullptr; + } + + return TransformUtil::ConvertGeTensor(ge_tensor_ptr, *parameter_shape_ptr); +} + +uint32_t CheckpointSaveCallback(uint32_t graph_id, const std::map& params_list) { + // Acquire GIL before calling Python code + py::gil_scoped_acquire acquire; + + MS_LOG(DEBUG) << "Start the checkpoint save callback function in checkpoint save process."; + py::list parameter_list = py::list(); + for (auto& item : params_list) { + std::string name = item.first; + std::shared_ptr ge_tensor_ptr = std::make_shared(item.second); + TensorPtr tensor_ptr = GetMeTensorTransformed(graph_id, name, ge_tensor_ptr); + if (tensor_ptr == nullptr) { + MS_LOG(EXCEPTION) << "Transform ge tensor to me tensor failed"; + } + py::dict param_dict; + param_dict["name"] = name; + param_dict["data"] = tensor_ptr; + parameter_list.append(param_dict); + } + py::bool_ ret = + parse::python_adapter::CallPyFn(PYTHON_MOD_CALLBACK_MODULE, PYTHON_FUN_PROCESS_CHECKPOINT, parameter_list); + auto bool_ret = py::cast(ret); + + uint32_t status = Status::SUCCESS; + if (!bool_ret) { + status = Status::FAILED; + MS_LOG(ERROR) << "Python checkpoint return false during callback"; + } + return status; +} + +static TensorPtr GetMeTensorForSummary(const std::string& name, const std::shared_ptr& ge_tensor_ptr) { + // confirm the type by name + // Format: xxx[:Scalar] xxx[:Image] xxx[:Tensor] + if (name.empty()) { + MS_LOG(EXCEPTION) << "The summary name is empty."; + } + auto bpos = name.rfind("[:"); + if (bpos >= name.size()) { + MS_LOG(EXCEPTION) << "The summary name(" << name << ") is invalid."; + } + auto tname = name.substr(bpos); + if (tname == "[:Scalar]") { + MS_LOG(DEBUG) << "The summary(" << name << ") is Scalar"; + // process the scalar type summary + // Because the ge tensor is dim = 4, so set the (1,1,1,1)-->(1,) + // We do the (1,) shape is scalar + auto shape = std::vector({ONE_SHAPE}); + return TransformUtil::ConvertGeTensor(ge_tensor_ptr, shape); + } + if (tname == "[:Tensor]") { + MS_LOG(DEBUG) << "The summary(" << name << ") is Tensor"; + // process the tensor summary + // Now we can't get the real shape, so we keep same shape with GE + return TransformUtil::ConvertGeTensor(ge_tensor_ptr); + } + if (tname == "[:Image]") { + MS_LOG(DEBUG) << "The summary(" << name << ") is Image"; + // process the Image summary + // Image dim = 4, is same with ge, so we keep same shape with GE + return TransformUtil::ConvertGeTensor(ge_tensor_ptr); + } + + MS_LOG(EXCEPTION) << "The summary name(" << name << ") is invalid."; +} + +// Cache the summary callback data +// Output Format: [{"name": tag_name, "data": tensor}, {"name": tag_name, "data": tensor},...] +uint32_t MS_EXPORT SummarySaveCallback(uint32_t graph_id, const std::map& params_list) { + // Acquire GIL before calling Python code + py::gil_scoped_acquire acquire; + + MS_LOG(DEBUG) << "Start the summary save callback function for graph " << graph_id << "."; + py::list summary_list = py::list(); + MS_LOG(DEBUG) << "Param list size = " << params_list.size(); + for (auto& item : params_list) { + std::string tag_name = item.first; + std::shared_ptr ge_tensor_ptr = std::make_shared(item.second); + TensorPtr tensor_ptr = GetMeTensorForSummary(tag_name, ge_tensor_ptr); + if (tensor_ptr == nullptr) { + MS_LOG(EXCEPTION) << "ConvertGeTensor return tensor is null"; + } + py::dict summary_value_dict; + summary_value_dict["name"] = tag_name; + summary_value_dict["data"] = tensor_ptr; + summary_list.append(summary_value_dict); + } + + py::bool_ ret = parse::python_adapter::CallPyFn(PYTHON_MOD_CALLBACK_MODULE, PYTHON_FUN_PROCESS_SUMMARY, summary_list); + auto bool_ret = py::cast(ret); + if (!bool_ret) { + MS_LOG(ERROR) << "Python checkpoint return false during callback"; + return Status::FAILED; + } + MS_LOG(DEBUG) << "End the summary save callback function."; + return Status::SUCCESS; +} +} // namespace callbacks +} // namespace mindspore diff --git a/mindspore/ccsrc/utils/callbacks_ge.h b/mindspore/ccsrc/utils/callbacks_ge.h new file mode 100644 index 0000000000..750ec74666 --- /dev/null +++ b/mindspore/ccsrc/utils/callbacks_ge.h @@ -0,0 +1,38 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_CCSRC_UTILS_CALLBACKS_GE_H_ +#define MINDSPORE_CCSRC_UTILS_CALLBACKS_GE_H_ + +#include +#include +#include +#include +#include "transform/types.h" +#include "transform/util.h" +#include "ir/meta_tensor.h" + +namespace mindspore { +namespace callbacks { + +using mindspore::tensor::TensorPtr; + +uint32_t CheckpointSaveCallback(uint32_t, const std::map&); +uint32_t SummarySaveCallback(uint32_t, const std::map&); + +} // namespace callbacks +} // namespace mindspore + +#endif // MINDSPORE_CCSRC_UTILS_CALLBACKS_GE_H_ diff --git a/mindspore/ccsrc/vm/backend.cc b/mindspore/ccsrc/vm/backend.cc index 28609abfa9..9355cca99c 100644 --- a/mindspore/ccsrc/vm/backend.cc +++ b/mindspore/ccsrc/vm/backend.cc @@ -24,6 +24,9 @@ #include "utils/graph_utils.h" #include "session/session_factory.h" #include "common/utils.h" +#ifdef ENABLE_GE +#include "utils/callbacks_ge.h" +#endif namespace mindspore { namespace compile { diff --git a/tests/ut/cpp/utils/callback_test.cc b/tests/ut/cpp/utils/callback_test.cc index 758e99ff59..c63f68f000 100644 --- a/tests/ut/cpp/utils/callback_test.cc +++ b/tests/ut/cpp/utils/callback_test.cc @@ -22,6 +22,9 @@ #include "pipeline/parse/python_adapter.h" #include "transform/df_graph_manager.h" #include "debug/draw.h" +#ifdef ENABLE_GE +#include "utils/callbacks_ge.h" +#endif namespace mindspore { namespace python_adapter = mindspore::parse::python_adapter; From e88bf9ca7912c53785301c4d725e248edc30daba Mon Sep 17 00:00:00 2001 From: chenhaozhe Date: Sat, 4 Apr 2020 15:52:58 +0800 Subject: [PATCH 105/367] change log level in pipeline_ge --- mindspore/ccsrc/pipeline/pipeline_ge.cc | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/mindspore/ccsrc/pipeline/pipeline_ge.cc b/mindspore/ccsrc/pipeline/pipeline_ge.cc index abf55bb2d8..2f68935591 100644 --- a/mindspore/ccsrc/pipeline/pipeline_ge.cc +++ b/mindspore/ccsrc/pipeline/pipeline_ge.cc @@ -227,10 +227,12 @@ bool AddDFGraph(const std::map& info, const py::di (void)DfGraphManager::GetInstance().AddGraph(phase, convertor.GetComputeGraph()); } (void)DfGraphManager::GetInstance().AddGraph(init_graph, convertor.GetInitGraph()); - (void)DfGraphManager::GetInstance().AddGraph(checkpoint_name, convertor.GetSaveCheckpointGraph()); (void)DfGraphManager::GetInstance().AddGraph(BROADCAST_GRAPH_NAME, convertor.GetBroadcastGraph()); - DfGraphManager::GetInstance().SetAnfGraph(checkpoint_name, anf_graph); + Status ret = DfGraphManager::GetInstance().AddGraph(checkpoint_name, convertor.GetSaveCheckpointGraph()); + if (ret == Status::SUCCESS) { + DfGraphManager::GetInstance().SetAnfGraph(checkpoint_name, anf_graph); + } return true; } @@ -389,8 +391,7 @@ std::shared_ptr DoExecGraph(const FuncGraphPtr& graph, const std::ve const std::string& phase) { std::vector ge_tensors = TransformUtil::ConvertInputTensors(inputs, kOpFormat_NCHW); if (ge_tensors.size() != inputs.size()) { - MS_LOG(ERROR) << "Args convert to ge tensor error"; - return nullptr; + MS_LOG(EXCEPTION) << "Convert me args to ge tensor error."; } std::vector ge_outputs; @@ -401,8 +402,7 @@ std::shared_ptr DoExecGraph(const FuncGraphPtr& graph, const std::ve auto graph_runner = DfGraphManager::GetInstance().GetGraphRunner(); if (graph_runner == nullptr) { - MS_LOG(ERROR) << "Can not found GraphRunner"; - return nullptr; + MS_LOG(EXCEPTION) << "Can not found GraphRunner."; } { @@ -419,7 +419,7 @@ std::shared_ptr DoExecGraph(const FuncGraphPtr& graph, const std::ve std::vector me_outputs = TransformUtil::ConvertGeTensors(ge_outputs); if (me_outputs.size() != ge_outputs.size()) { - MS_LOG(ERROR) << "Convert output Ge tensor to Me tensor failed"; + MS_LOG(WARNING) << "Convert output Ge tensor to Me tensor failed"; } py::tuple outputs(me_outputs.size()); From 69ed72f10da5f64a63ae78669e2cddf4e4270b59 Mon Sep 17 00:00:00 2001 From: fary86 Date: Thu, 2 Apr 2020 07:50:13 +0800 Subject: [PATCH 106/367] Add primitive name to param error message for math_ops.py --- mindspore/_checkparam.py | 126 ++- mindspore/ops/_utils/broadcast.py | 6 +- mindspore/ops/operations/array_ops.py | 13 +- mindspore/ops/operations/math_ops.py | 206 +++-- mindspore/ops/primitive.py | 3 + .../components/executor/check_exceptions.py | 11 +- .../utils/config_util.py | 5 +- .../utils/facade_util.py | 8 +- .../mindspore_test_framework/utils/keyword.py | 1 + tests/ut/python/ops/test_array_ops.py | 2 +- tests/ut/python/ops/test_math_ops_check.py | 751 ++++++++++++++++++ 11 files changed, 1003 insertions(+), 129 deletions(-) create mode 100755 tests/ut/python/ops/test_math_ops_check.py diff --git a/mindspore/_checkparam.py b/mindspore/_checkparam.py index 61b7cc9818..d553bcd364 100644 --- a/mindspore/_checkparam.py +++ b/mindspore/_checkparam.py @@ -15,6 +15,7 @@ """Check parameters.""" import re from enum import Enum +from functools import reduce from itertools import repeat from collections import Iterable @@ -93,8 +94,131 @@ rel_strs = { } +class Validator: + """validator for checking input parameters""" + + @staticmethod + def check(arg_name, arg_value, value_name, value, rel=Rel.EQ, prim_name=None): + """ + Method for judging relation between two int values or list/tuple made up of ints. + + This method is not suitable for judging relation between floats, since it does not consider float error. + """ + + rel_fn = Rel.get_fns(rel) + if not rel_fn(arg_value, value): + rel_str = Rel.get_strs(rel).format(f'{value_name}: {value}') + msg_prefix = f'For {prim_name} the' if prim_name else "The" + raise ValueError(f'{msg_prefix} `{arg_name}` should be {rel_str}, but got {arg_value}.') + + @staticmethod + def check_integer(arg_name, arg_value, value, rel, prim_name): + """Integer value judgment.""" + rel_fn = Rel.get_fns(rel) + type_mismatch = not isinstance(arg_value, int) or isinstance(arg_value, bool) + if type_mismatch or not rel_fn(arg_value, value): + rel_str = Rel.get_strs(rel).format(value) + raise ValueError(f'For {prim_name} the `{arg_name}` should be an int and must {rel_str},' + f' but got {arg_value}.') + return arg_value + + @staticmethod + def check_int_range(arg_name, arg_value, lower_limit, upper_limit, rel, prim_name): + """Method for checking whether an int value is in some range.""" + rel_fn = Rel.get_fns(rel) + type_mismatch = not isinstance(arg_value, int) + if type_mismatch or not rel_fn(arg_value, lower_limit, upper_limit): + rel_str = Rel.get_strs(rel).format(lower_limit, upper_limit) + raise ValueError(f'For \'{prim_name}\' the `{arg_name}` should be an int in range {rel_str},' + f' but got {arg_value}.') + return arg_value + + @staticmethod + def check_subclass(arg_name, type_, template_type, prim_name): + """Check whether some type is sublcass of another type""" + if not isinstance(template_type, Iterable): + template_type = (template_type,) + if not any([mstype.issubclass_(type_, x) for x in template_type]): + type_str = (type(type_).__name__ if isinstance(type_, (tuple, list)) else "") + str(type_) + raise TypeError(f'For \'{prim_name}\' the type of `{arg_name}` should be subclass' + f' of {",".join((str(x) for x in template_type))}, but got {type_str}.') + + @staticmethod + def check_tensor_type_same(args, valid_values, prim_name): + """check whether the element types of input tensors are the same.""" + def _check_tensor_type(arg): + arg_key, arg_val = arg + Validator.check_subclass(arg_key, arg_val, mstype.tensor, prim_name) + elem_type = arg_val.element_type() + if not elem_type in valid_values: + raise TypeError(f'For \'{prim_name}\' element type of `{arg_key}` should be in {valid_values},' + f' but `{arg_key}` is {elem_type}.') + return (arg_key, elem_type) + + def _check_types_same(arg1, arg2): + arg1_name, arg1_type = arg1 + arg2_name, arg2_type = arg2 + if arg1_type != arg2_type: + raise TypeError(f'For \'{prim_name}\' element type of `{arg2_name}` should be same as `{arg1_name}`,' + f' but `{arg1_name}` is {arg1_type} and `{arg2_name}` is {arg2_type}.') + return arg1 + + elem_types = map(_check_tensor_type, args.items()) + reduce(_check_types_same, elem_types) + + + @staticmethod + def check_scalar_or_tensor_type_same(args, valid_values, prim_name): + """check whether the types of inputs are the same. if the input args are tensors, check their element types""" + def _check_argument_type(arg): + arg_key, arg_val = arg + if isinstance(arg_val, type(mstype.tensor)): + arg_val = arg_val.element_type() + if not arg_val in valid_values: + raise TypeError(f'For \'{prim_name}\' the `{arg_key}` should be in {valid_values},' + f' but `{arg_key}` is {arg_val}.') + return arg + + def _check_types_same(arg1, arg2): + arg1_name, arg1_type = arg1 + arg2_name, arg2_type = arg2 + excp_flag = False + if isinstance(arg1_type, type(mstype.tensor)) and isinstance(arg2_type, type(mstype.tensor)): + arg1_type = arg1_type.element_type() + arg2_type = arg2_type.element_type() + elif not (isinstance(arg1_type, type(mstype.tensor)) or isinstance(arg2_type, type(mstype.tensor))): + pass + else: + excp_flag = True + + if excp_flag or arg1_type != arg2_type: + raise TypeError(f'For \'{prim_name}\' type of `{arg2_name}` should be same as `{arg1_name}`,' + f' but `{arg1_name}` is {arg1_type} and `{arg2_name}` is {arg2_type}.') + return arg1 + reduce(_check_types_same, map(_check_argument_type, args.items())) + + @staticmethod + def check_value_type(arg_name, arg_value, valid_types, prim_name): + """Check whether a values is instance of some types.""" + def raise_error_msg(): + """func for raising error message when check failed""" + type_names = [t.__name__ for t in valid_types] + num_types = len(valid_types) + raise TypeError(f'For \'{prim_name}\' the type of `{arg_name}` should be ' + f'{"one of " if num_types > 1 else ""}' + f'{type_names if num_types > 1 else type_names[0]}, but got {type(arg_value).__name__}.') + + # Notice: bool is subclass of int, so `check_value_type('x', True, [int])` will check fail, and + # `check_value_type('x', True, [bool, int])` will check pass + if isinstance(arg_value, bool) and bool not in tuple(valid_types): + raise_error_msg() + if isinstance(arg_value, tuple(valid_types)): + return arg_value + raise_error_msg() + + class ParamValidator: - """Parameter validator.""" + """Parameter validator. NOTICE: this class will be replaced by `class Validator`""" @staticmethod def equal(arg_name, arg_value, cond_str, cond): diff --git a/mindspore/ops/_utils/broadcast.py b/mindspore/ops/_utils/broadcast.py index 2c9eb8a54b..c71158de57 100644 --- a/mindspore/ops/_utils/broadcast.py +++ b/mindspore/ops/_utils/broadcast.py @@ -16,13 +16,14 @@ """broadcast""" -def _get_broadcast_shape(x_shape, y_shape): +def _get_broadcast_shape(x_shape, y_shape, prim_name): """ Doing broadcast between tensor x and tensor y. Args: x_shape (list): The shape of tensor x. y_shape (list): The shape of tensor y. + prim_name (str): Primitive name. Returns: List, the shape that broadcast between tensor x and tensor y. @@ -50,7 +51,8 @@ def _get_broadcast_shape(x_shape, y_shape): elif x_shape[i] == y_shape[i]: broadcast_shape_back.append(x_shape[i]) else: - raise ValueError("The x_shape {} and y_shape {} can not broadcast.".format(x_shape, y_shape)) + raise ValueError("For '{}' the x_shape {} and y_shape {} can not broadcast.".format( + prim_name, x_shape, y_shape)) broadcast_shape_front = y_shape[0: y_len - length] if length == x_len else x_shape[0: x_len - length] broadcast_shape = broadcast_shape_front + broadcast_shape_back diff --git a/mindspore/ops/operations/array_ops.py b/mindspore/ops/operations/array_ops.py index 3dabf2ab0a..53997ecce0 100644 --- a/mindspore/ops/operations/array_ops.py +++ b/mindspore/ops/operations/array_ops.py @@ -28,9 +28,16 @@ from ..._checkparam import ParamValidator as validator from ..._checkparam import Rel from ...common import dtype as mstype from ...common.tensor import Tensor -from ..operations.math_ops import _check_infer_attr_reduce, _infer_shape_reduce +from ..operations.math_ops import _infer_shape_reduce from ..primitive import Primitive, PrimitiveWithInfer, prim_attr_register +def _check_infer_attr_reduce(axis, keep_dims): + validator.check_type('keep_dims', keep_dims, [bool]) + validator.check_type('axis', axis, [int, tuple]) + if isinstance(axis, tuple): + for index, value in enumerate(axis): + validator.check_type('axis[%d]' % index, value, [int]) + class ExpandDims(PrimitiveWithInfer): """ @@ -1091,7 +1098,7 @@ class ArgMaxWithValue(PrimitiveWithInfer): axis = self.axis x_rank = len(x_shape) validator.check_int_range("axis", axis, -x_rank, x_rank, Rel.INC_LEFT) - ouput_shape = _infer_shape_reduce(x_shape, self.axis, self.keep_dims) + ouput_shape = _infer_shape_reduce(x_shape, self.axis, self.keep_dims, self.prim_name()) return ouput_shape, ouput_shape def infer_dtype(self, x_dtype): @@ -1137,7 +1144,7 @@ class ArgMinWithValue(PrimitiveWithInfer): axis = self.axis x_rank = len(x_shape) validator.check_int_range("axis", axis, -x_rank, x_rank, Rel.INC_LEFT) - ouput_shape = _infer_shape_reduce(x_shape, self.axis, self.keep_dims) + ouput_shape = _infer_shape_reduce(x_shape, self.axis, self.keep_dims, self.prim_name()) return ouput_shape, ouput_shape def infer_dtype(self, x_dtype): diff --git a/mindspore/ops/operations/math_ops.py b/mindspore/ops/operations/math_ops.py index b7373416f9..1411bcb8e6 100644 --- a/mindspore/ops/operations/math_ops.py +++ b/mindspore/ops/operations/math_ops.py @@ -19,7 +19,7 @@ import numpy as np from ..._c_expression import signature_rw as sig_rw from ..._c_expression import signature_kind as sig_kind from ..._c_expression import signature_dtype as sig_dtype -from ..._checkparam import ParamValidator as validator +from ..._checkparam import Validator as validator from ..._checkparam import Rel from ...common import dtype as mstype from ...common.tensor import Tensor @@ -27,16 +27,16 @@ from .._utils import _get_broadcast_shape from ..primitive import PrimitiveWithInfer, prim_attr_register -def _infer_shape_reduce(x, axis, keep_dims): +def _infer_shape_reduce(x, axis, keep_dims, prim_name): """Common infer for reduce operator""" def reduce_one_axis(one_axis): - validator.check_int_range('axis', one_axis, -dim, dim, Rel.INC_LEFT) + validator.check_int_range('axis', one_axis, -dim, dim, Rel.INC_LEFT, prim_name) if one_axis < 0: one_axis += dim axis_reduce.add(one_axis) - validator.check_type('axis', axis, [int, tuple, list]) + validator.check_value_type('axis', axis, [int, tuple, list], prim_name) dim = len(x) axis_reduce = set() @@ -48,7 +48,7 @@ def _infer_shape_reduce(x, axis, keep_dims): return [1] * dim return [] for index, one_axis in enumerate(axis): - validator.check_type('axis[%d]' % index, one_axis, [int]) + validator.check_value_type('axis[%d]' % index, one_axis, [int], prim_name) reduce_one_axis(one_axis) out_shape = [] @@ -61,14 +61,6 @@ def _infer_shape_reduce(x, axis, keep_dims): return out_shape -def _check_infer_attr_reduce(axis, keep_dims): - validator.check_type('keep_dims', keep_dims, [bool]) - validator.check_type('axis', axis, [int, tuple]) - if isinstance(axis, tuple): - for index, value in enumerate(axis): - validator.check_type('axis[%d]' % index, value, [int]) - - class _BinaryOp(PrimitiveWithInfer): """ Define binary operators. @@ -82,7 +74,7 @@ class _BinaryOp(PrimitiveWithInfer): self.init_prim_io_names(inputs=['x', 'y'], outputs=['output']) def infer_shape(self, x_shape, y_shape): - return _get_broadcast_shape(x_shape, y_shape) + return _get_broadcast_shape(x_shape, y_shape, self.prim_name()) class _MathBinaryOp(_BinaryOp): @@ -91,15 +83,13 @@ class _MathBinaryOp(_BinaryOp): """ @staticmethod - def do_infer_dtype(x_dtype, y_dtype, valid_dtype=mstype.number_type): + def do_infer_dtype(x_dtype, y_dtype, valid_dtype=mstype.number_type, prim_name=None): args_type = {"x": x_dtype, "y": y_dtype} - validator.check_args_tensor(args_type) - args_dtype = {"x_dtype": x_dtype, "y_dtype": y_dtype} - validator.check_type_same(args_dtype, valid_dtype) + validator.check_tensor_type_same(args_type, valid_dtype, prim_name) return x_dtype def infer_dtype(self, x_dtype, y_dtype): - return _MathBinaryOp.do_infer_dtype(x_dtype, y_dtype) + return _MathBinaryOp.do_infer_dtype(x_dtype, y_dtype, mstype.number_type, self.prim_name()) class TensorAdd(_MathBinaryOp): @@ -166,7 +156,7 @@ class AssignAdd(PrimitiveWithInfer): def infer_dtype(self, variable, value): args = {"value": value} - validator.check_type_same(args, mstype.number_type) + validator.check_scalar_or_tensor_type_same(args, mstype.number_type, self.prim_name()) return value @@ -207,7 +197,7 @@ class AssignSub(PrimitiveWithInfer): def infer_dtype(self, variable, value): args = {"value": value} - validator.check_type_same(args, mstype.number_type) + validator.check_scalar_or_tensor_type_same(args, mstype.number_type, self.prim_name()) return value @@ -228,15 +218,16 @@ class _Reduce(PrimitiveWithInfer): @prim_attr_register def __init__(self, keep_dims=False): """init Reduce""" - validator.check_type('keep_dims', keep_dims, [bool]) + validator.check_value_type('keep_dims', keep_dims, [bool], self.prim_name()) self.init_prim_io_names(inputs=['input_x', 'axis'], outputs=['y']) def do_infer(self, input_x, axis, valid_dtype=mstype.number_type): axis_v = axis['value'] input_shp = input_x['shape'] - validator.check_subclass('input_x', input_x['dtype'], mstype.tensor) - validator.check_typename('input_x', input_x['dtype'], valid_dtype) - input_shp = _infer_shape_reduce(input_shp, axis_v, self.keep_dims) + args = {'input_x': input_x['dtype']} + validator.check_tensor_type_same(args, valid_dtype, self.prim_name()) + + input_shp = _infer_shape_reduce(input_shp, axis_v, self.keep_dims, self.prim_name()) return {'shape': input_shp, 'dtype': input_x['dtype'], 'value': None} @@ -471,16 +462,17 @@ class CumProd(PrimitiveWithInfer): """ @prim_attr_register def __init__(self, exclusive=False, reverse=False): - self.exclusive = validator.check_type("exclusive", exclusive, [bool]) - self.reverse = validator.check_type("reverse", reverse, [bool]) + cls_name = self.prim_name() + self.exclusive = validator.check_value_type("exclusive", exclusive, [bool], cls_name) + self.reverse = validator.check_value_type("reverse", reverse, [bool], cls_name) def infer_shape(self, x_shape, axis_shape): return x_shape def infer_dtype(self, x_type, axis_type): - validator.check_subclass('x_type', x_type, mstype.tensor) - validator.check_typename('x_type', x_type, mstype.number_type) - validator.check_subclass("axis_type", axis_type, mstype.int_) + cls_name = self.prim_name() + validator.check_tensor_type_same({'x': x_type}, mstype.number_type, cls_name) + validator.check_subclass("axis", axis_type, mstype.int_, cls_name) return x_type @@ -514,8 +506,9 @@ class MatMul(PrimitiveWithInfer): def __init__(self, transpose_a=False, transpose_b=False): self.init_prim_io_names(inputs=['x1', 'x2'], outputs=['output']) self.__setattr_flag__ = True - validator.check_type("transpose_a", transpose_a, [bool]) - validator.check_type("transpose_b", transpose_b, [bool]) + cls_name = self.prim_name() + validator.check_value_type("transpose_a", transpose_a, [bool], cls_name) + validator.check_value_type("transpose_b", transpose_b, [bool], cls_name) def check_shape_size(self, x, y): if len(x) != 2 or len(y) != 2: @@ -524,11 +517,11 @@ class MatMul(PrimitiveWithInfer): def infer_shape(self, x, y): self.check_shape_size(x, y) - cls_name = self.__class__.__name__ + cls_name = self.prim_name() # expected dimension of x, y, x:[...,a,b] y:[..., c,d], the dim size should be the same except the last two for i in range(len(x) - 2): if x[i] != y[i]: - raise ValueError(f'{cls_name} shape in dim[{i}] not the same, while x is {x[i]}, y is {y[i]}') + raise ValueError(f'For \'{cls_name}\' shape in dim[{i}] not the same, while x is {x[i]}, y is {y[i]}') # validate whether last two dims satifing matrix multiply x_last = x[-2:] @@ -537,8 +530,8 @@ class MatMul(PrimitiveWithInfer): x_col = x_last[not self.transpose_a] # x_col = x_last[1] if (not transpose_a) else x_last[0] y_row = y_last[self.transpose_b] # y_row = y_last[0] if (not transpose_b) else y_last[1] if x_col != y_row: - raise ValueError(f'{cls_name} evaluator shapes of inputs can not do this operator, got {x_col} and {y_row}' - + f' for {cls_name}, with x shape {x}(transpose_a={self.transpose_a})' + raise ValueError(f'For \'{cls_name}\' evaluator shapes of inputs can not do this operator,' + + f' got {x_col} and {y_row}, with x shape {x}(transpose_a={self.transpose_a})' + f', y shape {y}(transpose_b={self.transpose_b}).') # set attribute self.add_prim_attr('transpose_x1', self.transpose_a) @@ -548,10 +541,8 @@ class MatMul(PrimitiveWithInfer): return ret_dims def infer_dtype(self, x, y): - validator.check_subclass("x", x, mstype.tensor) - validator.check_subclass("y", y, mstype.tensor) - args = {"x dtype": x, "y dtype": y} - validator.check_type_same(args, mstype.float_type + mstype.int_type) + args = {"x": x, "y": y} + validator.check_tensor_type_same(args, mstype.float_type + mstype.int_type, self.prim_name()) return x @@ -595,12 +586,13 @@ class BatchMatMul(MatMul): def __init__(self, transpose_a=False, transpose_b=False): self.init_prim_io_names(inputs=['x1', 'x2'], outputs=['output']) self.__setattr_flag__ = True - validator.check_type("transpose_a", transpose_a, [bool]) - validator.check_type("transpose_b", transpose_b, [bool]) + cls_name = self.prim_name() + validator.check_value_type("transpose_a", transpose_a, [bool], cls_name) + validator.check_value_type("transpose_b", transpose_b, [bool], cls_name) def check_shape_size(self, x, y): if len(x) != len(y) or len(x) < 3: - raise ValueError('BatchMatMul input x, y should be the same dimension size and should be ' + raise ValueError('For \'BatchMatMul\' input x, y should be the same dimension size and should be ' 'greater or equal to 3,' + f' while x size = {len(x)}, y size= {len(y)}') @@ -632,18 +624,17 @@ class CumSum(PrimitiveWithInfer): @prim_attr_register def __init__(self, exclusive=False, reverse=False): """init cumsum""" - self.exclusive = validator.check_type('exclusive', exclusive, [bool]) - self.add_prim_attr("exclusive", self.exclusive) - self.reverse = validator.check_type('reverse', reverse, [bool]) - self.add_prim_attr("reverse", self.reverse) + cls_name = self.prim_name() + validator.check_value_type('exclusive', exclusive, [bool], cls_name) + validator.check_value_type('reverse', reverse, [bool], cls_name) self.init_prim_io_names(inputs=['x', 'axis'], outputs=['y']) def __infer__(self, x, axis): + cls_name = self.prim_name() x_shp = x['shape'] - validator.check_type('axis', axis['value'], [int]) - validator.check_subclass('x', x['dtype'], mstype.tensor) - validator.check_typename('x', x['dtype'], [mstype.uint8, mstype.int8, - mstype.int32, mstype.float16, mstype.float32]) + validator.check_value_type('axis', axis['value'], [int], cls_name) + valid_types = [mstype.uint8, mstype.int8, mstype.int32, mstype.float16, mstype.float32] + validator.check_tensor_type_same({'x': x['dtype']}, valid_types, cls_name) return {'shape': x_shp, 'dtype': x['dtype'], 'value': None} @@ -684,21 +675,22 @@ class AddN(PrimitiveWithInfer): self.init_prim_io_names(inputs=["inputs"], outputs=["sum"]) def infer_shape(self, inputs): - validator.check_integer("inputs", len(inputs), 1, Rel.GE) + cls_name = self.prim_name() + validator.check_integer("inputs", len(inputs), 1, Rel.GE, cls_name) self.add_prim_attr('n', len(inputs)) shp0 = inputs[0] for i, shp in enumerate(inputs): - validator.check(f"shape of inputs[{i}]", shp, 'shape of inputs[0]', shp0) + validator.check(f"shape of inputs[{i}]", shp, 'shape of inputs[0]', shp0, Rel.EQ, cls_name) return shp0 def infer_dtype(self, inputs): - validator.check_type("inputs", inputs, [tuple, list]) - validator.check_integer("inputs", len(inputs), 1, Rel.GE) + cls_name = self.prim_name() + validator.check_value_type("inputs", inputs, [tuple, list], cls_name) + validator.check_integer("inputs", len(inputs), 1, Rel.GE, cls_name) args = {} for i, dtype in enumerate(inputs): - validator.check_subclass(f"inputs[{i}]", dtype, mstype.tensor) args[f"inputs[{i}]"] = dtype - validator.check_type_same(args, mstype.number_type + (mstype.bool_,)) + validator.check_tensor_type_same(args, mstype.number_type + (mstype.bool_,), cls_name) return inputs[0] @@ -722,8 +714,7 @@ class Neg(PrimitiveWithInfer): return input_x def infer_dtype(self, input_x): - validator.check_subclass("input_x", input_x, mstype.tensor) - validator.check_typename("input_x", input_x, mstype.number_type) + validator.check_tensor_type_same({"input_x": input_x}, mstype.number_type, self.prim_name()) return input_x @@ -806,8 +797,7 @@ class Square(PrimitiveWithInfer): return x_shape def infer_dtype(self, x_type): - validator.check_subclass("x", x_type, mstype.tensor) - validator.check_typename("x_dtype", x_type, mstype.number_type) + validator.check_tensor_type_same({"x": x_type}, mstype.number_type, self.prim_name()) return x_type @@ -836,8 +826,7 @@ class Rsqrt(PrimitiveWithInfer): return x_shape def infer_dtype(self, x_type): - validator.check_subclass("x", x_type, mstype.tensor) - validator.check_typename("x_dtype", x_type, mstype.number_type) + validator.check_tensor_type_same({"x": x_type}, mstype.number_type, self.prim_name()) return x_type @@ -866,8 +855,7 @@ class Sqrt(PrimitiveWithInfer): return x_shape def infer_dtype(self, x_type): - validator.check_subclass("x", x_type, mstype.tensor) - validator.check_typename("x_dtype", x_type, mstype.number_type) + validator.check_tensor_type_same({"x": x_type}, mstype.number_type, self.prim_name()) return x_type @@ -897,7 +885,7 @@ class Reciprocal(PrimitiveWithInfer): return x def infer_dtype(self, x): - validator.check_subclass("x", x, mstype.tensor) + validator.check_subclass("x", x, mstype.tensor, self.prim_name()) return x @@ -935,8 +923,7 @@ class Pow(PrimitiveWithInfer): return x def infer_dtype(self, x, power): - validator.check_subclass("x", x, mstype.tensor) - validator.check_typename("power", power, mstype.number_type) + validator.check_tensor_type_same({"x": x}, mstype.number_type, self.prim_name()) return x @@ -966,7 +953,7 @@ class Exp(PrimitiveWithInfer): return x_shape def infer_dtype(self, x_type): - validator.check_subclass("x", x_type, mstype.tensor) + validator.check_subclass("x", x_type, mstype.tensor, self.prim_name()) return x_type @@ -995,7 +982,7 @@ class Log(PrimitiveWithInfer): return x def infer_dtype(self, x): - validator.check_subclass("x", x, mstype.tensor) + validator.check_subclass("x", x, mstype.tensor, self.prim_name()) return x @@ -1178,8 +1165,7 @@ class Floor(PrimitiveWithInfer): return x_shape def infer_dtype(self, x_dtype): - validator.check_subclass("x", x_dtype, mstype.tensor) - validator.check_typename("x_dtype", x_dtype, mstype.float_type) + validator.check_tensor_type_same({"x": x_dtype}, mstype.float_type, self.prim_name()) return x_dtype @@ -1234,8 +1220,7 @@ class Acosh(PrimitiveWithInfer): return x def infer_dtype(self, x): - validator.check_subclass("x_dtype", x, mstype.tensor) - validator.check_typename('x_dtype', x, mstype.number_type) + validator.check_tensor_type_same({'x': x}, mstype.number_type, self.prim_name()) return x @@ -1245,15 +1230,13 @@ class _LogicBinaryOp(_BinaryOp): """ @staticmethod - def do_infer_dtype(x_dtype, y_dtype, valid_type=mstype.number_type): - args_type = {"x": x_dtype, "y": y_dtype} - validator.check_args_tensor(args_type) - args_dtype = {"x_dtype": x_dtype, "y_dtype": y_dtype} - validator.check_type_same(args_dtype, valid_type) + def do_infer_dtype(x_dtype, y_dtype, valid_type=mstype.number_type, prim_name=None): + args_dtype = {"x": x_dtype, "y": y_dtype} + validator.check_tensor_type_same(args_dtype, valid_type, prim_name) return mstype.tensor_type(mstype.bool_) def infer_dtype(self, x_dtype, y_dtype): - return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype) + return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, prim_name=self.prim_name()) class Equal(_LogicBinaryOp): @@ -1289,7 +1272,7 @@ class Equal(_LogicBinaryOp): """ def infer_dtype(self, x_dtype, y_dtype): - return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, mstype.number_type + (mstype.bool_,)) + return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, mstype.number_type + (mstype.bool_,), self.prim_name()) class EqualCount(PrimitiveWithInfer): @@ -1318,11 +1301,13 @@ class EqualCount(PrimitiveWithInfer): """init EqualCount""" self.init_prim_io_names(inputs=['x', 'y'], outputs=['output']) - def infer_shape(self, x_shape, w_shape): + def infer_shape(self, x_shape, y_shape): output_shape = (1,) return output_shape - def infer_dtype(self, x_dtype, w_dtype): + def infer_dtype(self, x_dtype, y_dtype): + args = {'x': x_dtype, 'y': y_dtype} + validator.check_tensor_type_same(args, mstype.number_type + (mstype.bool_,), self.prim_name()) return x_dtype @@ -1359,7 +1344,7 @@ class NotEqual(_LogicBinaryOp): """ def infer_dtype(self, x_dtype, y_dtype): - return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, mstype.number_type + (mstype.bool_,)) + return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, mstype.number_type + (mstype.bool_,), self.prim_name()) class Greater(_LogicBinaryOp): @@ -1495,8 +1480,7 @@ class LogicalNot(PrimitiveWithInfer): return x_shape def infer_dtype(self, x_dtype): - validator.check_subclass("x", x_dtype, mstype.tensor) - validator.check_typename("x_dtype", x_dtype, [mstype.bool_]) + validator.check_tensor_type_same({"x": x_dtype}, [mstype.bool_], self.prim_name()) return mstype.tensor_type(mstype.bool_) @@ -1526,7 +1510,7 @@ class LogicalAnd(_LogicBinaryOp): """ def infer_dtype(self, x_dtype, y_dtype): - return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, (mstype.bool_,)) + return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, (mstype.bool_,), self.prim_name()) class LogicalOr(_LogicBinaryOp): @@ -1555,7 +1539,7 @@ class LogicalOr(_LogicBinaryOp): """ def infer_dtype(self, x_dtype, y_dtype): - return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, (mstype.bool_,)) + return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, (mstype.bool_,), self.prim_name()) class NPUAllocFloatStatus(PrimitiveWithInfer): @@ -1616,13 +1600,13 @@ class NPUGetFloatStatus(PrimitiveWithInfer): self.add_prim_attr("_side_effect_flag", True) def infer_shape(self, x_shape): - validator.check_integer("len(x_shape)", len(x_shape), 1, Rel.EQ) - validator.check_integer("x_shape[0]", x_shape[0], 8, Rel.EQ) + cls_name = self.prim_name() + validator.check_integer("len(x_shape)", len(x_shape), 1, Rel.EQ, cls_name) + validator.check_integer("x_shape[0]", x_shape[0], 8, Rel.EQ, cls_name) return [8] def infer_dtype(self, x_dtype): - args = {"x_dtype": x_dtype} - validator.check_type_same(args, [mstype.float32]) + validator.check_tensor_type_same({'x': x_dtype}, [mstype.float32], self.prim_name()) return mstype.float32 @@ -1658,13 +1642,13 @@ class NPUClearFloatStatus(PrimitiveWithInfer): self.add_prim_attr("_side_effect_flag", True) def infer_shape(self, x_shape): - validator.check_integer("len(x_shape)", len(x_shape), 1, Rel.EQ) - validator.check_integer("x_shape[0]", x_shape[0], 8, Rel.EQ) + cls_name = self.prim_name() + validator.check_integer("len(x_shape)", len(x_shape), 1, Rel.EQ, cls_name) + validator.check_integer("x_shape[0]", x_shape[0], 8, Rel.EQ, cls_name) return [8] def infer_dtype(self, x_dtype): - args = {"x_dtype": x_dtype} - validator.check_type_same(args, [mstype.float32]) + validator.check_tensor_type_same({'x': x_dtype}, [mstype.float32], self.prim_name()) return mstype.float32 @@ -1692,8 +1676,7 @@ class Cos(PrimitiveWithInfer): return x def infer_dtype(self, x): - validator.check_subclass("x_dtype", x, mstype.tensor) - validator.check_typename('x_dtype', x, mstype.number_type) + validator.check_tensor_type_same({'x': x}, mstype.number_type, self.prim_name()) return x @@ -1721,8 +1704,7 @@ class ACos(PrimitiveWithInfer): return x def infer_dtype(self, x): - validator.check_subclass("x_dtype", x, mstype.tensor) - validator.check_typename('x_dtype', x, mstype.number_type) + validator.check_tensor_type_same({'x': x}, mstype.number_type, self.prim_name()) return x @@ -1750,8 +1732,7 @@ class Sin(PrimitiveWithInfer): return x def infer_dtype(self, x): - validator.check_subclass("x_dtype", x, mstype.tensor) - validator.check_typename('x_dtype', x, mstype.number_type) + validator.check_tensor_type_same({'x': x}, mstype.number_type, self.prim_name()) return x @@ -1796,19 +1777,19 @@ class NMSWithMask(PrimitiveWithInfer): @prim_attr_register def __init__(self, iou_threshold=0.5): """Init NMSWithMask""" - validator.check_type("iou_threshold", iou_threshold, [float]) + validator.check_value_type("iou_threshold", iou_threshold, [float], self.prim_name()) self.init_prim_io_names(inputs=['bboxes'], outputs=['selected_boxes', 'selected_idx', 'selected_mask']) def infer_shape(self, bboxes_shape): - validator.check_integer("bboxes rank", len(bboxes_shape), 2, Rel.EQ) - validator.check_integer("bboxes.shape()[0]", bboxes_shape[0], 0, Rel.GT) - validator.check_integer("bboxes.shape()[1]", bboxes_shape[1], 5, Rel.EQ) + cls_name = self.prim_name() + validator.check_integer("bboxes rank", len(bboxes_shape), 2, Rel.EQ, cls_name) + validator.check_integer("bboxes.shape()[0]", bboxes_shape[0], 0, Rel.GT, cls_name) + validator.check_integer("bboxes.shape()[1]", bboxes_shape[1], 5, Rel.EQ, cls_name) num = bboxes_shape[0] return (bboxes_shape, (num,), (num,)) def infer_dtype(self, bboxes_dtype): - validator.check_subclass("bboxes_dtype", bboxes_dtype, mstype.tensor) - validator.check_typename("bboxes_dtype", bboxes_dtype, [mstype.float16, mstype.float32]) + validator.check_tensor_type_same({"bboxes": bboxes_dtype}, [mstype.float16, mstype.float32], self.prim_name()) return (bboxes_dtype, mstype.int32, mstype.bool_) @@ -1837,8 +1818,7 @@ class Abs(PrimitiveWithInfer): return x_shape def infer_dtype(self, x_type): - validator.check_subclass("x_dtype", x_type, mstype.tensor) - validator.check_typename('x_dtype', x_type, mstype.number_type) + validator.check_tensor_type_same({'x': x_type}, mstype.number_type, self.prim_name()) return x_type def infer_value(self, x): @@ -1880,8 +1860,7 @@ class Sign(PrimitiveWithInfer): return x_shape def infer_dtype(self, x_dtype): - validator.check_subclass('x', x_dtype, mstype.tensor) - validator.check_typename('x_dtype', x_dtype, mstype.number_type) + validator.check_tensor_type_same({'x': x_dtype}, mstype.number_type, self.prim_name()) return x_dtype @@ -1910,8 +1889,7 @@ class Round(PrimitiveWithInfer): return x_shape def infer_dtype(self, x_type): - validator.check_subclass("x_dtype", x_type, mstype.tensor) - validator.check_typename('x_dtype', x_type, mstype.number_type) + validator.check_tensor_type_same({'x': x_type}, mstype.number_type, self.prim_name()) return x_type diff --git a/mindspore/ops/primitive.py b/mindspore/ops/primitive.py index d281b4f76c..96e754f5f7 100644 --- a/mindspore/ops/primitive.py +++ b/mindspore/ops/primitive.py @@ -194,6 +194,9 @@ class PrimitiveWithInfer(Primitive): Primitive.__init__(self, name) self.set_prim_type(prim_type.py_infer_shape) + def prim_name(self): + return self.__class__.__name__ + def _clone(self): """ Deeply clones the primitive object. diff --git a/tests/mindspore_test_framework/components/executor/check_exceptions.py b/tests/mindspore_test_framework/components/executor/check_exceptions.py index eca0578b7f..fe57a3d287 100644 --- a/tests/mindspore_test_framework/components/executor/check_exceptions.py +++ b/tests/mindspore_test_framework/components/executor/check_exceptions.py @@ -23,20 +23,25 @@ from ...utils import keyword class CheckExceptionsEC(IExectorComponent): """ - Check if the function raises the expected Exception. + Check if the function raises the expected Exception and the error message contains specified keywords if not None. Examples: { 'block': f, - 'exception': Exception + 'exception': Exception, + 'error_keywords': ['TensorAdd', 'shape'] } """ def run_function(self, function, inputs, verification_set): f = function[keyword.block] args = inputs[keyword.desc_inputs] e = function.get(keyword.exception, Exception) + error_kws = function.get(keyword.error_keywords, None) try: - with pytest.raises(e): + with pytest.raises(e) as exec_info: f(*args) except: raise Exception(f"Expect {e}, but got {sys.exc_info()[0]}") + if error_kws and any(keyword not in str(exec_info.value) for keyword in error_kws): + raise ValueError('Error message `{}` does not contain all keywords `{}`'.format( + str(exec_info.value), error_kws)) diff --git a/tests/mindspore_test_framework/utils/config_util.py b/tests/mindspore_test_framework/utils/config_util.py index d2eb91d042..b7be15ac23 100644 --- a/tests/mindspore_test_framework/utils/config_util.py +++ b/tests/mindspore_test_framework/utils/config_util.py @@ -87,8 +87,9 @@ def get_function_config(function): init_param_with = function.get(keyword.init_param_with, None) split_outputs = function.get(keyword.split_outputs, True) exception = function.get(keyword.exception, Exception) + error_keywords = function.get(keyword.error_keywords, None) return delta, max_error, input_selector, output_selector, sampling_times, \ - reduce_output, init_param_with, split_outputs, exception + reduce_output, init_param_with, split_outputs, exception, error_keywords def get_grad_checking_options(function, inputs): """ @@ -104,6 +105,6 @@ def get_grad_checking_options(function, inputs): """ f = function[keyword.block] args = inputs[keyword.desc_inputs] - delta, max_error, input_selector, output_selector, sampling_times, reduce_output, _, _, _ = \ + delta, max_error, input_selector, output_selector, sampling_times, reduce_output, _, _, _, _ = \ get_function_config(function) return f, args, delta, max_error, input_selector, output_selector, sampling_times, reduce_output diff --git a/tests/mindspore_test_framework/utils/facade_util.py b/tests/mindspore_test_framework/utils/facade_util.py index bbbf1dd375..4c5896b3df 100644 --- a/tests/mindspore_test_framework/utils/facade_util.py +++ b/tests/mindspore_test_framework/utils/facade_util.py @@ -54,11 +54,12 @@ def fill_block_config(ret, block_config, tid, group, desc_inputs, desc_bprop, ex block = block_config delta, max_error, input_selector, output_selector, \ - sampling_times, reduce_output, init_param_with, split_outputs, exception = get_function_config({}) + sampling_times, reduce_output, init_param_with, split_outputs, exception, error_keywords = get_function_config({}) if isinstance(block_config, tuple) and isinstance(block_config[-1], dict): block = block_config[0] delta, max_error, input_selector, output_selector, \ - sampling_times, reduce_output, init_param_with, split_outputs, exception = get_function_config(block_config[-1]) + sampling_times, reduce_output, init_param_with, \ + split_outputs, exception, error_keywords = get_function_config(block_config[-1]) if block: func_list.append({ @@ -78,7 +79,8 @@ def fill_block_config(ret, block_config, tid, group, desc_inputs, desc_bprop, ex keyword.const_first: const_first, keyword.add_fake_input: add_fake_input, keyword.split_outputs: split_outputs, - keyword.exception: exception + keyword.exception: exception, + keyword.error_keywords: error_keywords }) if desc_inputs or desc_const: diff --git a/tests/mindspore_test_framework/utils/keyword.py b/tests/mindspore_test_framework/utils/keyword.py index 16618be70a..79f1f91d8f 100644 --- a/tests/mindspore_test_framework/utils/keyword.py +++ b/tests/mindspore_test_framework/utils/keyword.py @@ -73,5 +73,6 @@ keyword.const_first = "const_first" keyword.add_fake_input = "add_fake_input" keyword.fake_input_type = "fake_input_type" keyword.exception = "exception" +keyword.error_keywords = "error_keywords" sys.modules[__name__] = keyword diff --git a/tests/ut/python/ops/test_array_ops.py b/tests/ut/python/ops/test_array_ops.py index 1c4895465f..faaa9d5402 100644 --- a/tests/ut/python/ops/test_array_ops.py +++ b/tests/ut/python/ops/test_array_ops.py @@ -234,7 +234,7 @@ raise_set = [ 'block': (lambda x: P.Squeeze(axis=((1.2, 1.3))), {'exception': ValueError}), 'desc_inputs': [Tensor(np.ones(shape=[3, 1, 5]))]}), ('ReduceSum_Error', { - 'block': (lambda x: P.ReduceSum(keep_dims=1), {'exception': ValueError}), + 'block': (lambda x: P.ReduceSum(keep_dims=1), {'exception': TypeError}), 'desc_inputs': [Tensor(np.ones(shape=[3, 1, 5]))]}), ] diff --git a/tests/ut/python/ops/test_math_ops_check.py b/tests/ut/python/ops/test_math_ops_check.py new file mode 100755 index 0000000000..be6e5691ea --- /dev/null +++ b/tests/ut/python/ops/test_math_ops_check.py @@ -0,0 +1,751 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" test ops """ +import functools +import numpy as np +from mindspore import ops +from mindspore.ops import functional as F +from mindspore.ops import operations as P +from mindspore.ops.operations import _grad_ops as G +import mindspore.ops.composite as C +import mindspore.nn as nn +from mindspore import Tensor +from mindspore.common import dtype as mstype +from mindspore.common.parameter import Parameter +from ..ut_filter import non_graph_engine +from mindspore.common.api import _executor + +from ....mindspore_test_framework.mindspore_test import mindspore_test +from ....mindspore_test_framework.pipeline.forward.compile_forward\ + import (pipeline_for_compile_forward_ge_graph_for_case_by_case_config, + pipeline_for_compile_forward_ge_graph_for_case_by_case_config_exception) +from ....mindspore_test_framework.pipeline.gradient.compile_gradient\ + import pipeline_for_compile_grad_ge_graph_for_case_by_case_config + + +class AssignAddNet(nn.Cell): + def __init__(self,): + super(AssignAddNet, self).__init__() + self.op = P.AssignAdd() + self.inputdata = Parameter(Tensor(np.zeros([1]).astype(np.bool_), mstype.bool_), name="assign_add1") + + def construct(self, x): + self.op(self.inputdata, x) + return self.inputdata + + +class AssignSubNet(nn.Cell): + def __init__(self,): + super(AssignSubNet, self).__init__() + self.op = P.AssignSub() + self.inputdata = Parameter(Tensor(np.zeros([1]).astype(np.bool_), mstype.bool_), name="assign_sub1") + + def construct(self, x): + self.op(self.inputdata, x) + return self.inputdata + + +class ReduceNet(nn.Cell): + def __init__(self, op_class, keep_dims, axis): + super(ReduceNet, self).__init__() + self.axis = axis + self.op = op_class(keep_dims=keep_dims) + + def construct(self, x): + return self.op(x, self.axis) + + +class CumProdNet(nn.Cell): + def __init__(self): + super(CumProdNet, self).__init__() + self.op = P.CumProd() + + def construct(self, x, axis): + return self.op(x, axis) + + +class CumSumNet(nn.Cell): + def __init__(self, axis): + super(CumSumNet, self).__init__() + self.axis = axis + self.op = P.CumSum() + + def construct(self, x): + return self.op(x, self.axis) + + +raise_set = [ + # one input is scalar, and another is Tensor(float32) + ('TensorAdd0', { + 'block': (P.TensorAdd(), {'exception': TypeError, 'error_keywords': ['TensorAdd']}), + 'desc_inputs': [5.0, Tensor(np.ones([3, 4]).astype(np.float32))], + 'skip': ['backward']}), + # input two tensors, but element types are not same + ('TensorAdd1', { + 'block': (P.TensorAdd(), {'exception': TypeError, 'error_keywords': ['TensorAdd']}), + 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.int32)), Tensor(np.ones([3, 4]).astype(np.float32))], + 'skip': ['backward']}), + # input two tensors, their shapes do not match + ('TensorAdd2', { + 'block': (P.TensorAdd(), {'exception': ValueError, 'error_keywords': ['TensorAdd']}), + 'desc_inputs': [Tensor(np.ones([3, 5]).astype(np.float32)), Tensor(np.ones([3, 4]).astype(np.float32))], + 'skip': ['backward']}), + + # check input Tensor(bool_) + ('AssignAdd', { + 'block': (AssignAddNet(), {'exception': TypeError, 'error_keywords': ['AssignAdd']}), + 'desc_inputs': [Tensor(np.ones([1]).astype(np.bool_), mstype.bool_)], + 'skip': ['backward']}), + + # check input Tensor(bool_) + ('AssignSub', { + 'block': (AssignSubNet(), {'exception': TypeError, 'error_keywords': ['AssignSub']}), + 'desc_inputs': [Tensor(np.ones([1]).astype(np.bool_), mstype.bool_)], + 'skip': ['backward']}), + + # type of axis is float, not int + ('ReduceMean1', { + 'block': (ReduceNet(P.ReduceMean, keep_dims=True, axis=5.0), + {'exception': TypeError, 'error_keywords': ['ReduceMean']}), + 'desc_inputs': [Tensor(np.ones([2, 3, 5]).astype(np.float32))], + 'skip': ['backward']}), + # axis is out of range + ('ReduceMean2', { + 'block': (ReduceNet(P.ReduceMean, keep_dims=True, axis=5), + {'exception': ValueError, 'error_keywords': ['ReduceMean']}), + 'desc_inputs': [Tensor(np.ones([2, 3, 5]).astype(np.float32))], + 'skip': ['backward']}), + + # type of axis is float, not int + ('ReduceSum1', { + 'block': (ReduceNet(P.ReduceSum, keep_dims=True, axis=5.0), + {'exception': TypeError, 'error_keywords': ['ReduceSum']}), + 'desc_inputs': [Tensor(np.ones([2, 3, 5]).astype(np.float32))], + 'skip': ['backward']}), + # axis is out of range + ('ReduceSum2', { + 'block': (ReduceNet(P.ReduceSum, keep_dims=True, axis=5), + {'exception': ValueError, 'error_keywords': ['ReduceSum']}), + 'desc_inputs': [Tensor(np.ones([2, 3, 5]).astype(np.float32))], + 'skip': ['backward']}), + + # type of axis is float, not int + ('ReduceAll1', { + 'block': (ReduceNet(P.ReduceAll, keep_dims=True, axis=5.0), + {'exception': TypeError, 'error_keywords': ['ReduceAll']}), + 'desc_inputs': [Tensor(np.ones([2, 3, 5]).astype(np.bool_))], + 'skip': ['backward']}), + # axis is out of range + ('ReduceAll2', { + 'block': (ReduceNet(P.ReduceAll, keep_dims=True, axis=5), + {'exception': ValueError, 'error_keywords': ['ReduceAll']}), + 'desc_inputs': [Tensor(np.ones([2, 3, 5]).astype(np.bool_))], + 'skip': ['backward']}), + + # type of axis is float, not int + ('ReduceMax1', { + 'block': (ReduceNet(P.ReduceMax, keep_dims=True, axis=5.0), + {'exception': TypeError, 'error_keywords': ['ReduceMax']}), + 'desc_inputs': [Tensor(np.ones([2, 3, 5]).astype(np.float32))], + 'skip': ['backward']}), + # axis is out of range + ('ReduceMax2', { + 'block': (ReduceNet(P.ReduceMax, keep_dims=True, axis=5), + {'exception': ValueError, 'error_keywords': ['ReduceMax']}), + 'desc_inputs': [Tensor(np.ones([2, 3, 5]).astype(np.float32))], + 'skip': ['backward']}), + + # type of axis is float, not int + ('ReduceMin1', { + 'block': (ReduceNet(P.ReduceMin, keep_dims=True, axis=5.0), + {'exception': TypeError, 'error_keywords': ['ReduceMin']}), + 'desc_inputs': [Tensor(np.ones([2, 3, 5]).astype(np.float32))], + 'skip': ['backward']}), + # axis is out of range + ('ReduceMin2', { + 'block': (ReduceNet(P.ReduceMin, keep_dims=True, axis=5), + {'exception': ValueError, 'error_keywords': ['ReduceMin']}), + 'desc_inputs': [Tensor(np.ones([2, 3, 5]).astype(np.float32))], + 'skip': ['backward']}), + + # type of axis is float, not int + ('ReduceProd1', { + 'block': (ReduceNet(P.ReduceProd, keep_dims=True, axis=5.0), + {'exception': TypeError, 'error_keywords': ['ReduceProd']}), + 'desc_inputs': [Tensor(np.ones([2, 3, 5]).astype(np.float32))], + 'skip': ['backward']}), + # axis is out of range + ('ReduceProd2', { + 'block': (ReduceNet(P.ReduceProd, keep_dims=True, axis=5), + {'exception': ValueError, 'error_keywords': ['ReduceProd']}), + 'desc_inputs': [Tensor(np.ones([2, 3, 5]).astype(np.float32))], + 'skip': ['backward']}), + + # type of x is Tensor(bool) + ('CumProd1', { + 'block': (CumProdNet(), + {'exception': TypeError, 'error_keywords': ['CumProd']}), + 'desc_inputs': [Tensor(np.ones([2, 3, 5]).astype(np.bool)), 1], + 'skip': ['backward']}), + # type of axis in float, not int + ('CumProd2', { + 'block': (CumProdNet(), + {'exception': TypeError, 'error_keywords': ['CumProd']}), + 'desc_inputs': [Tensor(np.ones([2, 3, 5]).astype(np.float32)), 5.0], + 'skip': ['backward']}), + + # type of x and y are Tensor(uint32) + ('MatMul1', { + 'block': (P.MatMul(), + {'exception': TypeError, 'error_keywords': ['MatMul']}), + 'desc_inputs': [Tensor(np.ones([2, 3]).astype(np.uint32)), Tensor(np.ones([3, 2]).astype(np.uint32))], + 'skip': ['backward']}), + # type of x and y not match + ('MatMul2', { + 'block': (P.MatMul(), + {'exception': TypeError, 'error_keywords': ['MatMul']}), + 'desc_inputs': [Tensor(np.ones([2, 3]).astype(np.float32)), Tensor(np.ones([3, 2]).astype(np.int32))], + 'skip': ['backward']}), + # shape of x and y not match + ('MatMul3', { + 'block': (P.MatMul(), + {'exception': ValueError, 'error_keywords': ['MatMul']}), + 'desc_inputs': [Tensor(np.ones([2, 3]).astype(np.float32)), Tensor(np.ones([2, 3]).astype(np.float32))], + 'skip': ['backward']}), + + # dims of x and y are less than 3 + ('BatchMatMul1', { + 'block': (P.BatchMatMul(), + {'exception': ValueError, 'error_keywords': ['BatchMatMul']}), + 'desc_inputs': [Tensor(np.ones([2, 3]).astype(np.int32)), Tensor(np.ones([3, 2]).astype(np.int32))], + 'skip': ['backward']}), + + # type of x is Tensor(bool) + ('CumSum1', { + 'block': (CumSumNet(axis=1), + {'exception': TypeError, 'error_keywords': ['CumSum']}), + 'desc_inputs': [Tensor(np.ones([2, 3, 5]).astype(np.bool))], + 'skip': ['backward']}), + # type of axis in float, not int + ('CumSum2', { + 'block': (CumSumNet(axis=1.0), + {'exception': TypeError, 'error_keywords': ['CumSum']}), + 'desc_inputs': [Tensor(np.ones([2, 3, 5]).astype(np.bool))], + 'skip': ['backward']}), + + # intput is not tuple or list + ('AddN1', { + 'block': (P.AddN(), + {'exception': TypeError, 'error_keywords': ['AddN']}), + 'desc_inputs': [Tensor(np.ones([2, 3]).astype(np.uint32))], + 'skip': ['backward']}), + # type not match + ('AddN2', { + 'block': (P.AddN(), + {'exception': TypeError, 'error_keywords': ['AddN']}), + 'desc_inputs': [(Tensor(np.ones([2, 3]).astype(np.uint32)), Tensor(np.ones([3, 2]).astype(np.int32)))], + 'skip': ['backward']}), + # shape not match + ('AddN3', { + 'block': (P.AddN(), + {'exception': ValueError, 'error_keywords': ['AddN']}), + 'desc_inputs': [(Tensor(np.ones([2, 3]).astype(np.int32)), Tensor(np.ones([3, 2]).astype(np.int32)))], + 'skip': ['backward']}), + + # input is Tensor(bool) + ('Neg1', { + 'block': (P.Neg(), + {'exception': TypeError, 'error_keywords': ['Neg']}), + 'desc_inputs': [Tensor(np.ones([2, 3]).astype(np.bool_))], + 'skip': ['backward']}), + + # one input is scalar, and another is Tensor(float32) + ('Sub0', { + 'block': (P.Sub(), {'exception': TypeError, 'error_keywords': ['Sub']}), + 'desc_inputs': [5.0, Tensor(np.ones([3, 4]).astype(np.float32))], + 'skip': ['backward']}), + # input two tensors, but element types are not same + ('Sub1', { + 'block': (P.Sub(), {'exception': TypeError, 'error_keywords': ['Sub']}), + 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.int32)), Tensor(np.ones([3, 4]).astype(np.float32))], + 'skip': ['backward']}), + # input two tensors, their shapes do not match + ('Sub2', { + 'block': (P.Sub(), {'exception': ValueError, 'error_keywords': ['Sub']}), + 'desc_inputs': [Tensor(np.ones([3, 5]).astype(np.float32)), Tensor(np.ones([3, 4]).astype(np.float32))], + 'skip': ['backward']}), + + # one input is scalar, and another is Tensor(float32) + ('Mul0', { + 'block': (P.Mul(), {'exception': TypeError, 'error_keywords': ['Mul']}), + 'desc_inputs': [5.0, Tensor(np.ones([3, 4]).astype(np.float32))], + 'skip': ['backward']}), + # input two tensors, but element types are not same + ('Mul1', { + 'block': (P.Mul(), {'exception': TypeError, 'error_keywords': ['Mul']}), + 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.int32)), Tensor(np.ones([3, 4]).astype(np.float32))], + 'skip': ['backward']}), + # input two tensors, their shapes do not match + ('Mul2', { + 'block': (P.Mul(), {'exception': ValueError, 'error_keywords': ['Mul']}), + 'desc_inputs': [Tensor(np.ones([3, 5]).astype(np.float32)), Tensor(np.ones([3, 4]).astype(np.float32))], + 'skip': ['backward']}), + + # input is Tensor(bool) + ('Square1', { + 'block': (P.Square(), + {'exception': TypeError, 'error_keywords': ['Square']}), + 'desc_inputs': [Tensor(np.ones([2, 3]).astype(np.bool_))], + 'skip': ['backward']}), + + # input is Tensor(bool) + ('Rsqrt1', { + 'block': (P.Rsqrt(), + {'exception': TypeError, 'error_keywords': ['Rsqrt']}), + 'desc_inputs': [Tensor(np.ones([2, 3]).astype(np.bool_))], + 'skip': ['backward']}), + + # input is Tensor(bool) + ('Sqrt1', { + 'block': (P.Sqrt(), + {'exception': TypeError, 'error_keywords': ['Sqrt']}), + 'desc_inputs': [Tensor(np.ones([2, 3]).astype(np.bool_))], + 'skip': ['backward']}), + + # input is not Tensor + ('Reciprocal1', { + 'block': (P.Reciprocal(), + {'exception': TypeError, 'error_keywords': ['Reciprocal']}), + 'desc_inputs': [5.0], + 'skip': ['backward']}), + + # input x is Tensor(bool) + ('Pow1', { + 'block': (P.Pow(), + {'exception': TypeError, 'error_keywords': ['Pow']}), + 'desc_inputs': [Tensor(np.ones([2, 3]).astype(np.bool_)), 2.0], + 'skip': ['backward']}), + + # input is not Tensor + ('Exp1', { + 'block': (P.Exp(), + {'exception': TypeError, 'error_keywords': ['Exp']}), + 'desc_inputs': [5.0], + 'skip': ['backward']}), + + # input is not Tensor + ('Log1', { + 'block': (P.Log(), + {'exception': TypeError, 'error_keywords': ['Log']}), + 'desc_inputs': [5.0], + 'skip': ['backward']}), + + # one input is scalar, and another is Tensor(float32) + ('Minimum0', { + 'block': (P.Minimum(), {'exception': TypeError, 'error_keywords': ['Minimum']}), + 'desc_inputs': [5.0, Tensor(np.ones([3, 4]).astype(np.float32))], + 'skip': ['backward']}), + # input two tensors, but element types are not same + ('Minimum1', { + 'block': (P.Minimum(), {'exception': TypeError, 'error_keywords': ['Minimum']}), + 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.int32)), Tensor(np.ones([3, 4]).astype(np.float32))], + 'skip': ['backward']}), + # input two tensors, their shapes do not match + ('Minimum2', { + 'block': (P.Minimum(), {'exception': ValueError, 'error_keywords': ['Minimum']}), + 'desc_inputs': [Tensor(np.ones([3, 5]).astype(np.float32)), Tensor(np.ones([3, 4]).astype(np.float32))], + 'skip': ['backward']}), + + # one input is scalar, and another is Tensor(float32) + ('Maximum0', { + 'block': (P.Maximum(), {'exception': TypeError, 'error_keywords': ['Maximum']}), + 'desc_inputs': [5.0, Tensor(np.ones([3, 4]).astype(np.float32))], + 'skip': ['backward']}), + # input two tensors, but element types are not same + ('Maximum1', { + 'block': (P.Maximum(), {'exception': TypeError, 'error_keywords': ['Maximum']}), + 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.int32)), Tensor(np.ones([3, 4]).astype(np.float32))], + 'skip': ['backward']}), + # input two tensors, their shapes do not match + ('Maximum2', { + 'block': (P.Maximum(), {'exception': ValueError, 'error_keywords': ['Maximum']}), + 'desc_inputs': [Tensor(np.ones([3, 5]).astype(np.float32)), Tensor(np.ones([3, 4]).astype(np.float32))], + 'skip': ['backward']}), + + # one input is scalar, and another is Tensor(float32) + ('RealDiv0', { + 'block': (P.RealDiv(), {'exception': TypeError, 'error_keywords': ['RealDiv']}), + 'desc_inputs': [5.0, Tensor(np.ones([3, 4]).astype(np.float32))], + 'skip': ['backward']}), + # input two tensors, but element types are not same + ('RealDiv1', { + 'block': (P.RealDiv(), {'exception': TypeError, 'error_keywords': ['RealDiv']}), + 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.int32)), Tensor(np.ones([3, 4]).astype(np.float32))], + 'skip': ['backward']}), + # input two tensors, their shapes do not match + ('RealDiv2', { + 'block': (P.RealDiv(), {'exception': ValueError, 'error_keywords': ['RealDiv']}), + 'desc_inputs': [Tensor(np.ones([3, 5]).astype(np.float32)), Tensor(np.ones([3, 4]).astype(np.float32))], + 'skip': ['backward']}), + + # one input is scalar, and another is Tensor(float32) + ('Div0', { + 'block': (P.Div(), {'exception': TypeError, 'error_keywords': ['Div']}), + 'desc_inputs': [5.0, Tensor(np.ones([3, 4]).astype(np.float32))], + 'skip': ['backward']}), + # input two tensors, but element types are not same + ('Div1', { + 'block': (P.Div(), {'exception': TypeError, 'error_keywords': ['Div']}), + 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.int32)), Tensor(np.ones([3, 4]).astype(np.float32))], + 'skip': ['backward']}), + # input two tensors, their shapes do not match + ('Div2', { + 'block': (P.Div(), {'exception': ValueError, 'error_keywords': ['Div']}), + 'desc_inputs': [Tensor(np.ones([3, 5]).astype(np.float32)), Tensor(np.ones([3, 4]).astype(np.float32))], + 'skip': ['backward']}), + + # one input is scalar, and another is Tensor(float32) + ('FloorDiv0', { + 'block': (P.FloorDiv(), {'exception': TypeError, 'error_keywords': ['FloorDiv']}), + 'desc_inputs': [5.0, Tensor(np.ones([3, 4]).astype(np.float32))], + 'skip': ['backward']}), + # input two tensors, but element types are not same + ('FloorDiv1', { + 'block': (P.FloorDiv(), {'exception': TypeError, 'error_keywords': ['FloorDiv']}), + 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.int32)), Tensor(np.ones([3, 4]).astype(np.float32))], + 'skip': ['backward']}), + # input two tensors, their shapes do not match + ('FloorDiv2', { + 'block': (P.FloorDiv(), {'exception': ValueError, 'error_keywords': ['FloorDiv']}), + 'desc_inputs': [Tensor(np.ones([3, 5]).astype(np.float32)), Tensor(np.ones([3, 4]).astype(np.float32))], + 'skip': ['backward']}), + + # input x is Tensor(int32), not Tensor(float) + ('Floor1', { + 'block': (P.Floor(), + {'exception': TypeError, 'error_keywords': ['Floor']}), + 'desc_inputs': [Tensor(np.ones([2, 3]).astype(np.int32))], + 'skip': ['backward']}), + + # one input is scalar, and another is Tensor(float32) + ('FloorMod0', { + 'block': (P.FloorMod(), {'exception': TypeError, 'error_keywords': ['FloorMod']}), + 'desc_inputs': [5.0, Tensor(np.ones([3, 4]).astype(np.float32))], + 'skip': ['backward']}), + # input two tensors, but element types are not same + ('FloorMod1', { + 'block': (P.FloorMod(), {'exception': TypeError, 'error_keywords': ['FloorMod']}), + 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.int32)), Tensor(np.ones([3, 4]).astype(np.float32))], + 'skip': ['backward']}), + # input two tensors, their shapes do not match + ('FFloorMod2', { + 'block': (P.FloorMod(), {'exception': ValueError, 'error_keywords': ['FloorMod']}), + 'desc_inputs': [Tensor(np.ones([3, 5]).astype(np.float32)), Tensor(np.ones([3, 4]).astype(np.float32))], + 'skip': ['backward']}), + + # input x is Tensor(int32), not Tensor(float) + ('Acosh1', { + 'block': (P.Acosh(), + {'exception': TypeError, 'error_keywords': ['Acosh']}), + 'desc_inputs': [Tensor(np.ones([2, 3]).astype(np.bool_))], + 'skip': ['backward']}), + + # input is not tensor + ('Equal0', { + 'block': (P.Equal(), {'exception': TypeError, 'error_keywords': ['Equal']}), + 'desc_inputs': [5.0, Tensor(np.ones([3, 4]).astype(np.float32))], + 'skip': ['backward']}), + # type of x and y not match + ('Equal1', { + 'block': (P.Equal(), {'exception': TypeError, 'error_keywords': ['Equal']}), + 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.int32)), Tensor(np.ones([3, 4]).astype(np.float32))], + 'skip': ['backward']}), + # shape of x and y not match + ('Equal2', { + 'block': (P.Equal(), {'exception': ValueError, 'error_keywords': ['Equal']}), + 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.float32)), Tensor(np.ones([3, 2]).astype(np.float32))], + 'skip': ['backward']}), + + # input is not tensor + ('EqualCount0', { + 'block': (P.EqualCount(), {'exception': TypeError, 'error_keywords': ['EqualCount']}), + 'desc_inputs': [5.0, Tensor(np.ones([3, 4]).astype(np.float32))], + 'skip': ['backward']}), + # type of x and y not match + ('EqualCount1', { + 'block': (P.EqualCount(), {'exception': TypeError, 'error_keywords': ['EqualCount']}), + 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.int32)), Tensor(np.ones([3, 4]).astype(np.float32))], + 'skip': ['backward']}), + # shape of x and y not match + + # input is not tensor + ('NotEqual0', { + 'block': (P.NotEqual(), {'exception': TypeError, 'error_keywords': ['NotEqual']}), + 'desc_inputs': [5.0, Tensor(np.ones([3, 4]).astype(np.float32))], + 'skip': ['backward']}), + # type of x and y not match + ('NotEqual1', { + 'block': (P.NotEqual(), {'exception': TypeError, 'error_keywords': ['NotEqual']}), + 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.int32)), Tensor(np.ones([3, 4]).astype(np.float32))], + 'skip': ['backward']}), + # shape of x and y not match + ('NotEqual2', { + 'block': (P.NotEqual(), {'exception': ValueError, 'error_keywords': ['NotEqual']}), + 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.float32)), Tensor(np.ones([3, 2]).astype(np.float32))], + 'skip': ['backward']}), + + # input is not tensor + ('Greater0', { + 'block': (P.Greater(), {'exception': TypeError, 'error_keywords': ['Greater']}), + 'desc_inputs': [5.0, Tensor(np.ones([3, 4]).astype(np.float32))], + 'skip': ['backward']}), + # type of x and y not match + ('Greater1', { + 'block': (P.Greater(), {'exception': TypeError, 'error_keywords': ['Greater']}), + 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.int32)), Tensor(np.ones([3, 4]).astype(np.float32))], + 'skip': ['backward']}), + # shape of x and y not match + ('Greater2', { + 'block': (P.Greater(), {'exception': ValueError, 'error_keywords': ['Greater']}), + 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.float32)), Tensor(np.ones([3, 2]).astype(np.float32))], + 'skip': ['backward']}), + + # input is not tensor + ('GreaterEqual0', { + 'block': (P.GreaterEqual(), {'exception': TypeError, 'error_keywords': ['GreaterEqual']}), + 'desc_inputs': [5.0, Tensor(np.ones([3, 4]).astype(np.float32))], + 'skip': ['backward']}), + # type of x and y not match + ('GreaterEqual1', { + 'block': (P.GreaterEqual(), {'exception': TypeError, 'error_keywords': ['GreaterEqual']}), + 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.int32)), Tensor(np.ones([3, 4]).astype(np.float32))], + 'skip': ['backward']}), + # shape of x and y not match + ('GreaterEqual2', { + 'block': (P.GreaterEqual(), {'exception': ValueError, 'error_keywords': ['GreaterEqual']}), + 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.float32)), Tensor(np.ones([3, 2]).astype(np.float32))], + 'skip': ['backward']}), + + # input is not tensor + ('Less0', { + 'block': (P.Less(), {'exception': TypeError, 'error_keywords': ['Less']}), + 'desc_inputs': [5.0, Tensor(np.ones([3, 4]).astype(np.float32))], + 'skip': ['backward']}), + # type of x and y not match + ('Less1', { + 'block': (P.Less(), {'exception': TypeError, 'error_keywords': ['Less']}), + 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.int32)), Tensor(np.ones([3, 4]).astype(np.float32))], + 'skip': ['backward']}), + # shape of x and y not match + ('Less2', { + 'block': (P.Less(), {'exception': ValueError, 'error_keywords': ['Less']}), + 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.float32)), Tensor(np.ones([3, 2]).astype(np.float32))], + 'skip': ['backward']}), + + # input is not tensor + ('LessEqual0', { + 'block': (P.LessEqual(), {'exception': TypeError, 'error_keywords': ['LessEqual']}), + 'desc_inputs': [5.0, Tensor(np.ones([3, 4]).astype(np.float32))], + 'skip': ['backward']}), + # type of x and y not match + ('LessEqual1', { + 'block': (P.LessEqual(), {'exception': TypeError, 'error_keywords': ['LessEqual']}), + 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.int32)), Tensor(np.ones([3, 4]).astype(np.float32))], + 'skip': ['backward']}), + # shape of x and y not match + ('LessEqual2', { + 'block': (P.LessEqual(), {'exception': ValueError, 'error_keywords': ['LessEqual']}), + 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.float32)), Tensor(np.ones([3, 2]).astype(np.float32))], + 'skip': ['backward']}), + + # input x is not Tensor(bool) + ('LogicalNot1', { + 'block': (P.LogicalNot(), + {'exception': TypeError, 'error_keywords': ['LogicalNot']}), + 'desc_inputs': [Tensor(np.ones([2, 3]).astype(np.int32))], + 'skip': ['backward']}), + + # type of x and y not match + ('LogicalAnd1', { + 'block': (P.LogicalAnd(), {'exception': TypeError, 'error_keywords': ['LogicalAnd']}), + 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.int32)), Tensor(np.ones([3, 4]).astype(np.bool_))], + 'skip': ['backward']}), + # shape of x and y not match + ('LogicalAnd2', { + 'block': (P.LogicalAnd(), {'exception': ValueError, 'error_keywords': ['LogicalAnd']}), + 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.bool_)), Tensor(np.ones([3, 2]).astype(np.bool_))], + 'skip': ['backward']}), + + # type of x and y not match + ('LogicalOr1', { + 'block': (P.LogicalOr(), {'exception': TypeError, 'error_keywords': ['LogicalOr']}), + 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.int32)), Tensor(np.ones([3, 4]).astype(np.bool_))], + 'skip': ['backward']}), + # shape of x and y not match + ('LogicalOr2', { + 'block': (P.LogicalOr(), {'exception': ValueError, 'error_keywords': ['LogicalOr']}), + 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.bool_)), Tensor(np.ones([3, 2]).astype(np.bool_))], + 'skip': ['backward']}), + + # input is not tensor + ('NPUGetFloatStatus0', { + 'block': (P.NPUGetFloatStatus(), {'exception': TypeError, 'error_keywords': ['NPUGetFloatStatus']}), + 'desc_inputs': [5.0], + 'skip': ['backward']}), + # input is Tensor(int32), not Tensor(float32) + ('NPUGetFloatStatus1', { + 'block': (P.NPUGetFloatStatus(), {'exception': TypeError, 'error_keywords': ['NPUGetFloatStatus']}), + 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.int32))], + 'skip': ['backward']}), + # dims is not 1 + ('NPUGetFloatStatus2', { + 'block': (P.NPUGetFloatStatus(), {'exception': ValueError, 'error_keywords': ['NPUGetFloatStatus']}), + 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.float32))], + 'skip': ['backward']}), + # shape[0] is not 8 + ('NPUGetFloatStatus3', { + 'block': (P.NPUGetFloatStatus(), {'exception': ValueError, 'error_keywords': ['NPUGetFloatStatus']}), + 'desc_inputs': [Tensor(np.ones([3]).astype(np.float32))], + 'skip': ['backward']}), + + # input is not tensor + ('NPUClearFloatStatus0', { + 'block': (P.NPUClearFloatStatus(), {'exception': TypeError, 'error_keywords': ['NPUClearFloatStatus']}), + 'desc_inputs': [5.0], + 'skip': ['backward']}), + # input is Tensor(int32), not Tensor(float32) + ('NPUClearFloatStatus1', { + 'block': (P.NPUClearFloatStatus(), {'exception': TypeError, 'error_keywords': ['NPUClearFloatStatus']}), + 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.int32))], + 'skip': ['backward']}), + # dims is not 1 + ('NPUClearFloatStatus2', { + 'block': (P.NPUClearFloatStatus(), {'exception': ValueError, 'error_keywords': ['NPUClearFloatStatus']}), + 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.float32))], + 'skip': ['backward']}), + # shape[0] is not 8 + ('NPUClearFloatStatus3', { + 'block': (P.NPUClearFloatStatus(), {'exception': ValueError, 'error_keywords': ['NPUClearFloatStatus']}), + 'desc_inputs': [Tensor(np.ones([3]).astype(np.float32))], + 'skip': ['backward']}), + + # input is not tensor + ('Cos0', { + 'block': (P.Cos(), {'exception': TypeError, 'error_keywords': ['Cos']}), + 'desc_inputs': [5.0], + 'skip': ['backward']}), + # input is Tensor(bool) + ('Cos1', { + 'block': (P.Cos(), {'exception': TypeError, 'error_keywords': ['Cos']}), + 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.bool_))], + 'skip': ['backward']}), + + # input is not tensor + ('ACos0', { + 'block': (P.ACos(), {'exception': TypeError, 'error_keywords': ['ACos']}), + 'desc_inputs': [5.0], + 'skip': ['backward']}), + # input is Tensor(bool) + ('ACos1', { + 'block': (P.ACos(), {'exception': TypeError, 'error_keywords': ['ACos']}), + 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.bool_))], + 'skip': ['backward']}), + + # input is not tensor + ('Sin0', { + 'block': (P.Sin(), {'exception': TypeError, 'error_keywords': ['Sin']}), + 'desc_inputs': [5.0], + 'skip': ['backward']}), + # input is Tensor(bool) + ('Sin1', { + 'block': (P.Sin(), {'exception': TypeError, 'error_keywords': ['Sin']}), + 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.bool_))], + 'skip': ['backward']}), + + # input is not tensor + ('NMSWithMask0', { + 'block': (P.NMSWithMask(), {'exception': TypeError, 'error_keywords': ['NMSWithMask']}), + 'desc_inputs': [5.0], + 'skip': ['backward']}), + # input is not Tensor(float16) or Tensor(float32) + ('NMSWithMask1', { + 'block': (P.NMSWithMask(), {'exception': TypeError, 'error_keywords': ['NMSWithMask']}), + 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.int32))], + 'skip': ['backward']}), + # dims is not 2 + ('NMSWithMask2', { + 'block': (P.NMSWithMask(), {'exception': ValueError, 'error_keywords': ['NMSWithMask']}), + 'desc_inputs': [Tensor(np.ones([3, 4, 2]).astype(np.float32))], + 'skip': ['backward']}), + # shape[1] is not 5 + ('NMSWithMask3', { + 'block': (P.NMSWithMask(), {'exception': ValueError, 'error_keywords': ['NMSWithMask']}), + 'desc_inputs': [Tensor(np.ones([3, 2]).astype(np.float32))], + 'skip': ['backward']}), + + # input is not tensor + ('Abs0', { + 'block': (P.Abs(), {'exception': TypeError, 'error_keywords': ['Abs']}), + 'desc_inputs': [5.0], + 'skip': ['backward']}), + # input is Tensor(bool) + ('Abs1', { + 'block': (P.Abs(), {'exception': TypeError, 'error_keywords': ['Abs']}), + 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.bool_))], + 'skip': ['backward']}), + + # input is not tensor + ('Sign0', { + 'block': (P.Sign(), {'exception': TypeError, 'error_keywords': ['Sign']}), + 'desc_inputs': [5.0], + 'skip': ['backward']}), + # input is Tensor(bool) + ('Sign1', { + 'block': (P.Sign(), {'exception': TypeError, 'error_keywords': ['Sign']}), + 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.bool_))], + 'skip': ['backward']}), + + # input is not tensor + ('Round0', { + 'block': (P.Round(), {'exception': TypeError, 'error_keywords': ['Round']}), + 'desc_inputs': [5.0], + 'skip': ['backward']}), + # input is Tensor(bool) + ('Round1', { + 'block': (P.Round(), {'exception': TypeError, 'error_keywords': ['Round']}), + 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.bool_))], + 'skip': ['backward']}), + + # one input is scalar, and another is Tensor(float32) + ('Atan20', { + 'block': (P.Atan2(), {'exception': TypeError, 'error_keywords': ['Atan2']}), + 'desc_inputs': [5.0, Tensor(np.ones([3, 4]).astype(np.float32))], + 'skip': ['backward']}), + # input two tensors, but element types are not same + ('Atan21', { + 'block': (P.Atan2(), {'exception': TypeError, 'error_keywords': ['Atan2']}), + 'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.int32)), Tensor(np.ones([3, 4]).astype(np.float32))], + 'skip': ['backward']}), + # input two tensors, their shapes do not match + ('Atan22', { + 'block': (P.Atan2(), {'exception': ValueError, 'error_keywords': ['Atan2']}), + 'desc_inputs': [Tensor(np.ones([3, 5]).astype(np.float32)), Tensor(np.ones([3, 4]).astype(np.float32))], + 'skip': ['backward']}), +] + + +@mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config_exception) +def test_check_exception(): + return raise_set From 113c0d8cd28f5d637e458dfe46dc618f43adfd92 Mon Sep 17 00:00:00 2001 From: Wei Luning Date: Sun, 5 Apr 2020 09:27:29 +0000 Subject: [PATCH 107/367] fix InsertGradientOf with class method --- mindspore/ccsrc/pipeline/parse/resolve.cc | 8 ++++ mindspore/ccsrc/pipeline/pass.cc | 8 ++++ .../ccsrc/pipeline/static_analysis/prim.cc | 37 +++---------------- .../pynative_mode/test_insert_grad_of.py | 36 +++++++++++++++++- 4 files changed, 57 insertions(+), 32 deletions(-) diff --git a/mindspore/ccsrc/pipeline/parse/resolve.cc b/mindspore/ccsrc/pipeline/parse/resolve.cc index ebc1f65486..f90fc5039c 100644 --- a/mindspore/ccsrc/pipeline/parse/resolve.cc +++ b/mindspore/ccsrc/pipeline/parse/resolve.cc @@ -103,6 +103,14 @@ AnfNodePtr ResolveParameterObj(const FuncGraphPtr& func_graph, const py::object& if (para_node == nullptr) { ParameterPtr node = top_graph->AddWeightParameter(param_name); node->set_default_param(obj); + + // set_abstract for parameter + auto to_convert = py::cast(python_adapter::GetPyObjAttr(obj, "default_input")); + ValuePtr converted = nullptr; + (void)ConvertData(to_convert, &converted); + bool broaden = true; + node->set_abstract(abstract::FromValue(converted, broaden)); + para_node = node; } auto iter = func_graph->make_ref_params().find(para_node); diff --git a/mindspore/ccsrc/pipeline/pass.cc b/mindspore/ccsrc/pipeline/pass.cc index e0336443e6..a58ecf41b6 100644 --- a/mindspore/ccsrc/pipeline/pass.cc +++ b/mindspore/ccsrc/pipeline/pass.cc @@ -112,6 +112,13 @@ OptPassGroupMap GetOptPassesA(const opt::irpass::OptimizeIRPassLib& irpass) { }); opt::OptPassConfig virtual_dataset = opt::OptPassConfig({irpass.virtual_dataset_eliminate_}); opt::OptPassConfig grad = opt::OptPassConfig({irpass.expand_jprim_}, true); + opt::irpass::ResolveIRPassLib resolve_irpass; + + opt::OptPassConfig resolve_pass = opt::OptPassConfig({ + resolve_irpass.resolver_resolve_, + resolve_irpass.resolver_getattr_, + irpass.get_make_ref_eliminate_, + }); OptPassGroupMap map_a({{"a_1", a_1}, {"a_2", a_2}, @@ -120,6 +127,7 @@ OptPassGroupMap GetOptPassesA(const opt::irpass::OptimizeIRPassLib& irpass) { {"allreduce_fusion", opt::OptPassConfig(parallel::StepAllreduceFusion)}, {"virtual_dataset", virtual_dataset}, {"grad", grad}, + {"resolve", resolve_pass}, {"renormalize", opt::OptPassConfig::Renormalize()}, {"cse", opt::OptPassConfig(opt::CSE(false))}, {"a_3", a_3}}); diff --git a/mindspore/ccsrc/pipeline/static_analysis/prim.cc b/mindspore/ccsrc/pipeline/static_analysis/prim.cc index 56bcd77f67..d71e098009 100644 --- a/mindspore/ccsrc/pipeline/static_analysis/prim.cc +++ b/mindspore/ccsrc/pipeline/static_analysis/prim.cc @@ -554,24 +554,6 @@ AbstractBasePtr StaticGetterInferred(const ValuePtr &value, const ConfigPtr &dat return eng->ForwardConfig(old_conf, fn_conf); } -AbstractBasePtr GenerateResolveAbstract(const AnfNodeConfigPtr &out_conf, const py::object &obj, - const ValuePtr &converted_ret) { - if (py::hasattr(obj, PYTHON_DATACLASS_FIELDS)) { - TypePtr cls_ptr = parse::ParseDataClass(converted_ret->cast>()->obj()); - - std::vector input = {NewValueNode(prim::kPrimPartial), NewValueNode(prim::kPrimMakeRecord), - NewValueNode(cls_ptr)}; - MS_EXCEPTION_IF_NULL(out_conf); - FuncGraphPtr func_graph = out_conf->node()->func_graph(); - CNodePtr new_cnode = func_graph->NewCNode(input); - AnalysisEnginePtr eng = out_conf->engine(); - AnfNodeConfigPtr fn_conf = eng->MakeConfig(new_cnode, out_conf->context()); - return eng->ForwardConfig(out_conf, fn_conf); - } else { - return ToAbstract(converted_ret, AnalysisContext::DummyContext(), out_conf); - } -} - AbstractBasePtr GetEvaluatedValueForNameSpaceString(const AnalysisEnginePtr &engine, const AbstractBasePtrList &args_spec_list, const AnfNodeConfigPtr &out_conf) { @@ -602,23 +584,16 @@ AbstractBasePtr GetEvaluatedValueForNameSpaceString(const AnalysisEnginePtr &eng // item_name to func addr from obj_map parse::SymbolPtr symbol = item_v->cast(); parse::NameSpacePtr name_space = data_v->cast(); + FuncGraphPtr func_graph = out_conf->node()->func_graph(); - parse::SymbolResolverPtr symbol_resolver = - std::make_shared(name_space, symbol, out_conf->node()); - if (!symbol_resolver->Resolve()) { + auto new_node = parse::ResolveSymbol(func_graph->manager(), name_space, symbol, out_conf->node()); + if (new_node == nullptr) { MS_LOG(EXCEPTION) << "Resolve node failed"; } - py::object obj = symbol_resolver->result(); - ValuePtr converted_ret = nullptr; - bool converted = parse::ConvertData(obj, &converted_ret, true); - if (!converted) { - MS_LOG(EXCEPTION) << "Convert data failed"; - } - if (converted_ret->isa()) { - AddToManager(engine, converted_ret->cast()); - } - return GenerateResolveAbstract(out_conf, obj, converted_ret); + AnalysisEnginePtr eng = out_conf->engine(); + AnfNodeConfigPtr fn_conf = eng->MakeConfig(new_node, out_conf->context()); + return eng->ForwardConfig(out_conf, fn_conf); } AbstractBasePtr GetEvaluatedValueForClassAttrOrMethod(const AnalysisEnginePtr &engine, diff --git a/tests/ut/python/pynative_mode/test_insert_grad_of.py b/tests/ut/python/pynative_mode/test_insert_grad_of.py index 38432d79f3..104ac4d1c7 100644 --- a/tests/ut/python/pynative_mode/test_insert_grad_of.py +++ b/tests/ut/python/pynative_mode/test_insert_grad_of.py @@ -17,13 +17,14 @@ import numpy as np import mindspore.nn as nn from mindspore.ops import composite as C from mindspore.ops import operations as P +from mindspore.ops import functional as F from mindspore.common.api import ms_function from ....mindspore_test_framework.utils.bprop_util import bprop from ....mindspore_test_framework.utils.debug_util import PrintShapeTypeCell, PrintGradShapeTypeCell from mindspore import Tensor from mindspore import context - +import mindspore def setup_module(module): context.set_context(mode=context.PYNATIVE_MODE) @@ -107,3 +108,36 @@ def test_print_shape_type(): return z bprop(Mul(), Tensor(np.ones([2, 2]).astype(np.float32)), Tensor(np.ones([2, 2]).astype(np.float32))) + +def test_cell_assign(): + context.set_context(mode=context.GRAPH_MODE, save_graphs=True) + class GradNetWrap(nn.Cell): + """ GradNetWrap definition """ + def __init__(self, net): + super(GradNetWrap, self).__init__() + self.net = net + self.weights = mindspore.ParameterTuple(net.get_parameters()) + + def construct(self, x, y): + return C.grad_by_list(self.net, self.weights)(x, y) + + class Mul(nn.Cell): + def __init__(self): + super(Mul, self).__init__() + self.get_g = P.InsertGradientOf(self.save_gradient) + self.matrix_w = mindspore.Parameter(Tensor(np.ones([2, 2], np.float32)), name="matrix_w") + self.matrix_g = mindspore.Parameter(Tensor(np.ones([2, 2], np.float32)), name="matrix_g") + + def save_gradient(self, dout): + self.matrix_g = dout + return dout + + def construct(self, x, y): + z = x * self.matrix_w + z = self.get_g(z) + z = z * y + return z + + input_x = Tensor(np.ones([2, 2], np.float32)) + input_y = Tensor(np.ones([2, 2], np.float32)) + GradNetWrap(Mul())(input_x, input_y) From 7696c21e65df2d478c6ab0f48732329b65b96b22 Mon Sep 17 00:00:00 2001 From: chang zherui <760161589@qq.com> Date: Tue, 7 Apr 2020 11:39:10 +0800 Subject: [PATCH 108/367] fix runtest.sh for python ut --- mindspore/dataset/engine/datasets.py | 20 ++++------- mindspore/dataset/engine/validators.py | 49 -------------------------- tests/ut/python/ops/test_ops_check.py | 4 +-- tests/ut/python/runtest.sh | 16 ++++++--- 4 files changed, 20 insertions(+), 69 deletions(-) diff --git a/mindspore/dataset/engine/datasets.py b/mindspore/dataset/engine/datasets.py index 2d5c219b71..db2b5169d2 100644 --- a/mindspore/dataset/engine/datasets.py +++ b/mindspore/dataset/engine/datasets.py @@ -38,7 +38,7 @@ from .iterators import DictIterator, TupleIterator from .validators import check, check_batch, check_shuffle, check_map, check_repeat, check_zip, check_rename, \ check_project, check_imagefolderdatasetv2, check_mnist_cifar_dataset, check_manifestdataset, \ check_tfrecorddataset, check_vocdataset, check_celebadataset, check_minddataset, check_generatordataset, \ - check_zip_dataset, check_add_column, check_columns + check_zip_dataset from ..core.datatypes import mstype_to_detype, mstypelist_to_detypelist try: @@ -2334,20 +2334,13 @@ class Schema: self.dataset_type = '' self.num_rows = 0 else: - if not os.path.isfile(schema_file) or not os.access(schema_file, os.R_OK): - raise ValueError("The file %s does not exist or permission denied!" % schema_file) try: with open(schema_file, 'r') as load_f: json_obj = json.load(load_f) + self.from_json(json_obj) except json.decoder.JSONDecodeError: - raise RuntimeError("Schema file failed to load.") - except UnicodeDecodeError: - raise RuntimeError("Schema file failed to decode.") - except Exception: - raise RuntimeError("Schema file failed to open.") - self.from_json(json_obj) + raise RuntimeError("Schema file failed to load") - @check_add_column def add_column(self, name, de_type, shape=None): """ Add new column to the schema. @@ -2366,8 +2359,10 @@ class Schema: if isinstance(de_type, typing.Type): de_type = mstype_to_detype(de_type) new_column["type"] = str(de_type) - else: + elif isinstance(de_type, str): new_column["type"] = str(DataType(de_type)) + else: + raise ValueError("Unknown column type") if shape is not None: new_column["shape"] = shape @@ -2404,7 +2399,6 @@ class Schema: RuntimeError: If column's name field is missing. RuntimeError: If column's type field is missing. """ - check_columns(columns, columns) self.columns = [] for col in columns: name = None @@ -2449,8 +2443,6 @@ class Schema: RuntimeError: if dataset type is missing in the object. RuntimeError: if columns are missing in the object. """ - if not isinstance(json_obj, dict) or json_obj is None: - raise ValueError("Expected non-empty dict.") for k, v in json_obj.items(): if k == "datasetType": self.dataset_type = v diff --git a/mindspore/dataset/engine/validators.py b/mindspore/dataset/engine/validators.py index 1c374ae879..b4d22a4a01 100644 --- a/mindspore/dataset/engine/validators.py +++ b/mindspore/dataset/engine/validators.py @@ -19,15 +19,10 @@ import inspect as ins import os from functools import wraps from multiprocessing import cpu_count -from mindspore._c_expression import typing from . import samplers from . import datasets INT32_MAX = 2147483647 -valid_detype = [ - "bool", "int8", "int16", "int32", "int64", "uint8", "uint16", - "uint32", "uint64", "float16", "float32", "float64" -] def check(method): @@ -193,12 +188,6 @@ def check(method): return wrapper -def check_valid_detype(type_): - if type_ not in valid_detype: - raise ValueError("Unknown column type") - return True - - def check_filename(path): """ check the filename in the path @@ -754,41 +743,3 @@ def check_project(method): return method(*args, **kwargs) return new_method - - -def check_shape(shape, name): - if isinstance(shape, list): - for element in shape: - if not isinstance(element, int): - raise TypeError( - "Each element in {0} should be of type int. Got {1}.".format(name, type(element))) - else: - raise TypeError("Expected int list.") - - -def check_add_column(method): - """check the input arguments of add_column.""" - @wraps(method) - def new_method(*args, **kwargs): - param_dict = make_param_dict(method, args, kwargs) - - # check name; required argument - name = param_dict.get("name") - if not isinstance(name, str) or not name: - raise TypeError("Expected non-empty string.") - - # check type; required argument - de_type = param_dict.get("de_type") - if not isinstance(de_type, str) or not de_type: - raise TypeError("Expected non-empty string.") - if not isinstance(de_type, typing.Type) and not check_valid_detype(de_type): - raise ValueError("Unknown column type.") - - # check shape - shape = param_dict.get("shape") - if shape is not None: - check_shape(shape, "shape") - - return method(*args, **kwargs) - - return new_method diff --git a/tests/ut/python/ops/test_ops_check.py b/tests/ut/python/ops/test_ops_check.py index 5083878dae..a7e1b41c4a 100644 --- a/tests/ut/python/ops/test_ops_check.py +++ b/tests/ut/python/ops/test_ops_check.py @@ -62,7 +62,7 @@ def test_net_without_construct(): try: _executor.compile(net, inp) except RuntimeError as err: - if str(err).find("unsupported syntax 'Raise' at ") >= 0: + if str(err).find("Unsupported syntax 'Raise' at ") >= 0: print(str(err)) else: raise err @@ -86,7 +86,7 @@ def test_net_with_raise(): try: _executor.compile(net, inp) except RuntimeError as err: - if str(err).find("unsupported syntax 'Raise' at ") >= 0: + if str(err).find("Unsupported syntax 'Raise' at ") >= 0: print(str(err)) else: raise err diff --git a/tests/ut/python/runtest.sh b/tests/ut/python/runtest.sh index 035e454f24..6502d3a1bf 100755 --- a/tests/ut/python/runtest.sh +++ b/tests/ut/python/runtest.sh @@ -40,12 +40,8 @@ fi if [ $# -gt 0 ]; then pytest -s --ignore=$1/pynative_mode --ignore=$1/parallel --ignore=$1/train $IGNORE_EXEC $1 - pytest -n 4 --dist=loadfile -v $1/parallel - pytest -n 4 --dist=loadfile -v $1/train else pytest --ignore=$CURRPATH/pynative_mode --ignore=$CURRPATH/parallel --ignore=$CURRPATH/train $IGNORE_EXEC $CURRPATH - pytest -n 4 --dist=loadfile -v $CURRPATH/parallel - pytest -n 4 --dist=loadfile -v $CURRPATH/train fi RET=$? @@ -57,6 +53,18 @@ if [ ${RET} -ne 0 ]; then exit ${RET} fi +if [ $# -gt 0 ]; then + pytest -n 4 --dist=loadfile -v $1/parallel $1/train +else + pytest -n 4 --dist=loadfile -v $CURRPATH/parallel $CURRPATH/train +fi + +RET=$? + +if [ ${RET} -ne 0 ]; then + exit ${RET} +fi + if [ $# -gt 0 ]; then pytest -s $1/pynative_mode else From 2303719352a8f264523253e931a4c6e607441fd8 Mon Sep 17 00:00:00 2001 From: dengwentao Date: Tue, 7 Apr 2020 11:22:53 +0800 Subject: [PATCH 109/367] updata mkl-dnn link and md5 --- Third_Party_Open_Source_Software_Notice | 2 +- cmake/external_libs/mkl_dnn.cmake | 18 +++++++++--------- cmake/utils.cmake | 2 ++ mindspore/ccsrc/CMakeLists.txt | 2 +- 4 files changed, 13 insertions(+), 11 deletions(-) diff --git a/Third_Party_Open_Source_Software_Notice b/Third_Party_Open_Source_Software_Notice index 498b5b8d1b..60ad7cf47c 100644 --- a/Third_Party_Open_Source_Software_Notice +++ b/Third_Party_Open_Source_Software_Notice @@ -368,7 +368,7 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -Software: MKL-DNN 1.1.2 +Software: oneDNN 1.1.2 Copyright (c) 2009-2018 The MathJax Consortium Copyright 2018 Intel Corporation Copyright 2019 Intel Corporation diff --git a/cmake/external_libs/mkl_dnn.cmake b/cmake/external_libs/mkl_dnn.cmake index 17d8020d3a..6f033fa565 100644 --- a/cmake/external_libs/mkl_dnn.cmake +++ b/cmake/external_libs/mkl_dnn.cmake @@ -1,11 +1,11 @@ -set(mkl_dnn_CXXFLAGS "-D_FORTIFY_SOURCE=2 -O2") -set(mkl_dnn_CFLAGS "-D_FORTIFY_SOURCE=2 -O2") -mindspore_add_pkg(mkl_dnn - VER 1.1.1 +set(onednn_CXXFLAGS "-D_FORTIFY_SOURCE=2 -O2") +set(onednn_CFLAGS "-D_FORTIFY_SOURCE=2 -O2") +mindspore_add_pkg(onednn + VER 1.1.2 LIBS dnnl mkldnn - URL https://github.com/intel/mkl-dnn/archive/v1.1.1.tar.gz - MD5 d6a422b00459600bdc22242590953f38 + URL https://github.com/oneapi-src/oneDNN/archive/v1.1.2.tar.gz + MD5 ab40d52230f3ad1d7a6f06ce0f6bc17a CMAKE_OPTION -DDNNL_ARCH_OPT_FLAGS='' -DDNNL_CPU_RUNTIME='SEQ' -DDNNL_BUILD_EXAMPLES=OFF -DDNNL_BUILD_TESTS=OFF) -include_directories(${mkl_dnn_INC}) -add_library(mindspore::dnnl ALIAS mkl_dnn::dnnl) -add_library(mindspore::mkldnn ALIAS mkl_dnn::mkldnn) +include_directories(${onednn_INC}) +add_library(mindspore::dnnl ALIAS onednn::dnnl) +add_library(mindspore::mkldnn ALIAS onednn::mkldnn) diff --git a/cmake/utils.cmake b/cmake/utils.cmake index 060e400820..99c064fdd4 100644 --- a/cmake/utils.cmake +++ b/cmake/utils.cmake @@ -40,6 +40,8 @@ else() set(JOBS 8) if (${JOBS} GREATER ${N}) set(THNUM ${N}) + else() + set(THNUM ${JOBS}) endif() endif () message("set make thread num: ${THNUM}") diff --git a/mindspore/ccsrc/CMakeLists.txt b/mindspore/ccsrc/CMakeLists.txt index 1c684b6736..a979a84ac1 100644 --- a/mindspore/ccsrc/CMakeLists.txt +++ b/mindspore/ccsrc/CMakeLists.txt @@ -508,7 +508,7 @@ endif() if (ENABLE_CPU) add_custom_target(add_cpu_lib ALL - COMMAND cp ${mkl_dnn_LIBPATH}/libdnnl.so.1.1 ${MS_LIB_PATH}/libdnnl.so.1 + COMMAND cp ${onednn_LIBPATH}/libdnnl.so.1.1 ${MS_LIB_PATH}/libdnnl.so.1 ) add_dependencies(add_cpu_lib add_ms_lib) endif() From cc9a0e1310280930c88a0f261cf233d9d801e341 Mon Sep 17 00:00:00 2001 From: chengang Date: Tue, 7 Apr 2020 16:56:33 +0800 Subject: [PATCH 110/367] =?UTF-8?q?=E5=9B=9E=E9=80=80=20'Pull=20Request=20?= =?UTF-8?q?!133=20:=20Edit=20loss=5Fscale=20to=20fit=20GPU'?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- mindspore/nn/wrap/loss_scale.py | 40 ++++---------- mindspore/ops/operations/__init__.py | 6 +- mindspore/ops/operations/math_ops.py | 83 ---------------------------- 3 files changed, 12 insertions(+), 117 deletions(-) diff --git a/mindspore/nn/wrap/loss_scale.py b/mindspore/nn/wrap/loss_scale.py index 4d929352b3..1ce3179273 100644 --- a/mindspore/nn/wrap/loss_scale.py +++ b/mindspore/nn/wrap/loss_scale.py @@ -25,7 +25,6 @@ from ...ops import operations as P from ...ops.operations import NPUGetFloatStatus, NPUAllocFloatStatus, NPUClearFloatStatus, ReduceSum, LessEqual, \ ControlDepend from ...common import dtype as mstype -import mindspore.context as context _grad_scale = C.MultitypeFuncGraph("grad_scale") reciprocal = P.Reciprocal() @@ -35,12 +34,6 @@ reciprocal = P.Reciprocal() def tensor_grad_scale(scale, grad): return grad * F.cast(reciprocal(scale), F.dtype(grad)) -_grad_overflow = C.MultitypeFuncGraph("_grad_overflow") -grad_overflow = P.FloatStatus() - -@_grad_overflow.register("Tensor") -def _tensor_grad_overflow(grad): - return grad_overflow(grad) class DynamicLossScaleUpdateCell(Cell): r""" @@ -204,15 +197,9 @@ class TrainOneStepWithLossScaleCell(Cell): self.optimizer = optimizer self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True) self.hyper_map = C.HyperMap() - if context.get_context("device_target") == "GPU": - self.gpu_target = True - self.float_status = P.FloatStatus() - self.addn = P.AddN() - else: - self.gpu_target = False - self.alloc_status = NPUAllocFloatStatus() - self.get_status = NPUGetFloatStatus() - self.clear_status = NPUClearFloatStatus() + self.alloc_status = NPUAllocFloatStatus() + self.get_status = NPUGetFloatStatus() + self.clear_status = NPUClearFloatStatus() self.reduce_sum = ReduceSum(keep_dims=False) self.base = Tensor(1, mstype.float32) self.less_equal = LessEqual() @@ -237,11 +224,10 @@ class TrainOneStepWithLossScaleCell(Cell): def construct(self, data, label, sens=None): weights = self.weights loss = self.network(data, label) - if not self.gpu_target: - # init overflow buffer - init = self.alloc_status() - # clear overflow buffer - self.clear_status(init) + # init overflow buffer + init = self.alloc_status() + # clear overflow buffer + self.clear_status(init) if sens is None: scaling_sens = self.loss_scale else: @@ -251,14 +237,10 @@ class TrainOneStepWithLossScaleCell(Cell): if self.reducer_flag: # apply grad reducer on grads grads = self.grad_reducer(grads) - if not self.gpu_target: - # get the overflow buffer - self.get_status(init) - # sum overflow buffer elements, 0:not overflow , >0:overflow - flag_sum = self.reduce_sum(init, (0,)) - else: - flag_sum = self.hyper_map(F.partial(_grad_overflow), grads) - flag_sum = self.addn(flag_sum) + # get the overflow buffer + self.get_status(init) + # sum overflow buffer elements, 0:not overflow , >0:overflow + flag_sum = self.reduce_sum(init, (0,)) if self.is_distributed: # sum overflow flag over devices flag_reduce = self.allreduce(flag_sum) diff --git a/mindspore/ops/operations/__init__.py b/mindspore/ops/operations/__init__.py index bc300cb670..846be05c4d 100644 --- a/mindspore/ops/operations/__init__.py +++ b/mindspore/ops/operations/__init__.py @@ -44,7 +44,7 @@ from .math_ops import (Abs, ACos, AddN, AssignAdd, AssignSub, Atan2, BatchMatMul LogicalNot, LogicalOr, MatMul, Maximum, Minimum, Mul, Neg, NMSWithMask, NotEqual, NPUAllocFloatStatus, NPUClearFloatStatus, - NPUGetFloatStatus, Pow, RealDiv, IsNan, IsInf, IsFinite, FloatStatus, + NPUGetFloatStatus, Pow, RealDiv, Reciprocal, CumSum, Sin, Sqrt, Rsqrt, Square, Sub, TensorAdd, Sign, Round) @@ -151,10 +151,6 @@ __all__ = [ 'Neg', 'Slice', 'DType', - 'IsNan', - 'IsInf', - 'IsFinite', - 'FloatStatus', 'NPUAllocFloatStatus', 'NPUGetFloatStatus', 'NPUClearFloatStatus', diff --git a/mindspore/ops/operations/math_ops.py b/mindspore/ops/operations/math_ops.py index 5f8c24d78b..175b72560f 100644 --- a/mindspore/ops/operations/math_ops.py +++ b/mindspore/ops/operations/math_ops.py @@ -1541,89 +1541,6 @@ class LogicalOr(_LogicBinaryOp): def infer_dtype(self, x_dtype, y_dtype): return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, (mstype.bool_,), self.prim_name()) -class IsNan(PrimitiveWithInfer): - """ - Judging which elements are nan for each position - Inputs: - - **input_x** (Tensor) - The input tensor. - - Outputs: - Tensor, has the same shape of input. - """ - - @prim_attr_register - def __init__(self): - """init IsNan""" - self.init_prim_io_names(inputs=['x'], outputs=['output']) - - def infer_shape(self, x_shape): - return x_shape - - def infer_dtype(self, x_dtype): - return mstype.bool_ - -class IsInf(PrimitiveWithInfer): - """ - Judging which elements are inf or -inf for each position - Inputs: - - **input_x** (Tensor) - The input tensor. - - Outputs: - Tensor, has the same shape of input. - """ - - @prim_attr_register - def __init__(self): - """init IsInf""" - self.init_prim_io_names(inputs=['x'], outputs=['output']) - - def infer_shape(self, x_shape): - return x_shape - - def infer_dtype(self, x_dtype): - return mstype.bool_ - -class IsFinite(PrimitiveWithInfer): - """ - Judging which elements are finite for each position - Inputs: - - **input_x** (Tensor) - The input tensor. - - Outputs: - Tensor, has the same shape of input. - """ - - @prim_attr_register - def __init__(self): - """init IsFinite""" - self.init_prim_io_names(inputs=['x'], outputs=['output']) - - def infer_shape(self, x_shape): - return x_shape - - def infer_dtype(self, x_dtype): - return mstype.bool_ - -class FloatStatus(PrimitiveWithInfer): - """ - Determine if the elements contains nan, inf or -inf - Inputs: - - **input_x** (Tensor) - The input tensor. - - Outputs: - Tensor, has the shape of `(1,)`. - """ - - @prim_attr_register - def __init__(self): - """init FloatStatus""" - self.init_prim_io_names(inputs=['x'], outputs=['output']) - - def infer_shape(self, x_shape): - return [1] - - def infer_dtype(self, x_dtype): - return x_dtype class NPUAllocFloatStatus(PrimitiveWithInfer): """ From c68826567176b88e78d46788249b2d3a8566bba4 Mon Sep 17 00:00:00 2001 From: jonyguo Date: Fri, 3 Apr 2020 16:53:45 +0800 Subject: [PATCH 111/367] fix: when use MindDataset block_reade=True hung --- mindspore/ccsrc/mindrecord/io/shard_reader.cc | 2 ++ mindspore/mindrecord/filewriter.py | 1 + mindspore/mindrecord/tools/cifar100_to_mr.py | 9 ++++--- tests/ut/python/dataset/test_minddataset.py | 27 ++++++++++++++++--- .../mindrecord/test_cifar100_to_mindrecord.py | 4 ++- .../mindrecord/test_mindrecord_exception.py | 8 +++++- 6 files changed, 43 insertions(+), 8 deletions(-) diff --git a/mindspore/ccsrc/mindrecord/io/shard_reader.cc b/mindspore/ccsrc/mindrecord/io/shard_reader.cc index 791de6c60b..32825fd9df 100644 --- a/mindspore/ccsrc/mindrecord/io/shard_reader.cc +++ b/mindspore/ccsrc/mindrecord/io/shard_reader.cc @@ -785,6 +785,8 @@ vector ShardReader::GetAllColumns() { MSRStatus ShardReader::CreateTasksByBlock(const std::vector> &row_group_summary, const std::vector> &operators) { + vector columns = GetAllColumns(); + CheckIfColumnInIndex(columns); for (const auto &rg : row_group_summary) { auto shard_id = std::get<0>(rg); auto group_id = std::get<1>(rg); diff --git a/mindspore/mindrecord/filewriter.py b/mindspore/mindrecord/filewriter.py index d1471f47cb..4056825ff3 100644 --- a/mindspore/mindrecord/filewriter.py +++ b/mindspore/mindrecord/filewriter.py @@ -143,6 +143,7 @@ class FileWriter: ParamTypeError: If index field is invalid. MRMDefineIndexError: If index field is not primitive type. MRMAddIndexError: If failed to add index field. + MRMGetMetaError: If the schema is not set or get meta failed. """ if not index_fields or not isinstance(index_fields, list): raise ParamTypeError('index_fields', 'list') diff --git a/mindspore/mindrecord/tools/cifar100_to_mr.py b/mindspore/mindrecord/tools/cifar100_to_mr.py index a359de853d..c011c8f4b0 100644 --- a/mindspore/mindrecord/tools/cifar100_to_mr.py +++ b/mindspore/mindrecord/tools/cifar100_to_mr.py @@ -24,7 +24,7 @@ from mindspore import log as logger from .cifar100 import Cifar100 from ..common.exceptions import PathNotExistsError from ..filewriter import FileWriter -from ..shardutils import check_filename +from ..shardutils import check_filename, SUCCESS try: cv2 = import_module("cv2") except ModuleNotFoundError: @@ -98,8 +98,11 @@ class Cifar100ToMR: data_list = _construct_raw_data(images, fine_labels, coarse_labels) test_data_list = _construct_raw_data(test_images, test_fine_labels, test_coarse_labels) - _generate_mindrecord(self.destination, data_list, fields, "img_train") - _generate_mindrecord(self.destination + "_test", test_data_list, fields, "img_test") + if _generate_mindrecord(self.destination, data_list, fields, "img_train") != SUCCESS: + return FAILED + if _generate_mindrecord(self.destination + "_test", test_data_list, fields, "img_test") != SUCCESS: + return FAILED + return SUCCESS def _construct_raw_data(images, fine_labels, coarse_labels): """ diff --git a/tests/ut/python/dataset/test_minddataset.py b/tests/ut/python/dataset/test_minddataset.py index da22f5c3b7..460a728b5c 100644 --- a/tests/ut/python/dataset/test_minddataset.py +++ b/tests/ut/python/dataset/test_minddataset.py @@ -47,7 +47,9 @@ def add_and_remove_cv_file(): os.remove("{}.db".format(x)) if os.path.exists("{}.db".format(x)) else None writer = FileWriter(CV_FILE_NAME, FILES_NUM) data = get_data(CV_DIR_NAME) - cv_schema_json = {"file_name": {"type": "string"}, "label": {"type": "int32"}, + cv_schema_json = {"id": {"type": "int32"}, + "file_name": {"type": "string"}, + "label": {"type": "int32"}, "data": {"type": "bytes"}} writer.add_schema(cv_schema_json, "img_schema") writer.add_index(["file_name", "label"]) @@ -226,6 +228,24 @@ def test_cv_minddataset_blockreader_tutorial(add_and_remove_cv_file): num_iter += 1 assert num_iter == 20 +def test_cv_minddataset_blockreader_some_field_not_in_index_tutorial(add_and_remove_cv_file): + """tutorial for cv minddataset.""" + columns_list = ["id", "data", "label"] + num_readers = 4 + data_set = ds.MindDataset(CV_FILE_NAME + "0", columns_list, num_readers, shuffle=False, + block_reader=True) + assert data_set.get_dataset_size() == 10 + repeat_num = 2 + data_set = data_set.repeat(repeat_num) + num_iter = 0 + for item in data_set.create_dict_iterator(): + logger.info("-------------- block reader repeat tow {} -----------------".format(num_iter)) + logger.info("-------------- item[id]: {} ----------------------------".format(item["id"])) + logger.info("-------------- item[label]: {} ----------------------------".format(item["label"])) + logger.info("-------------- item[data]: {} -----------------------------".format(item["data"])) + num_iter += 1 + assert num_iter == 20 + def test_cv_minddataset_reader_basic_tutorial(add_and_remove_cv_file): """tutorial for cv minderdataset.""" @@ -359,13 +379,14 @@ def get_data(dir_name): lines = file_reader.readlines() data_list = [] - for line in lines: + for i, line in enumerate(lines): try: filename, label = line.split(",") label = label.strip("\n") with open(os.path.join(img_dir, filename), "rb") as file_reader: img = file_reader.read() - data_json = {"file_name": filename, + data_json = {"id": i, + "file_name": filename, "data": img, "label": int(label)} data_list.append(data_json) diff --git a/tests/ut/python/mindrecord/test_cifar100_to_mindrecord.py b/tests/ut/python/mindrecord/test_cifar100_to_mindrecord.py index b3a8d94589..e95f25aae4 100644 --- a/tests/ut/python/mindrecord/test_cifar100_to_mindrecord.py +++ b/tests/ut/python/mindrecord/test_cifar100_to_mindrecord.py @@ -18,6 +18,7 @@ import pytest from mindspore.mindrecord import Cifar100ToMR from mindspore.mindrecord import FileReader from mindspore.mindrecord import MRMOpenError +from mindspore.mindrecord import SUCCESS from mindspore import log as logger CIFAR100_DIR = "../data/mindrecord/testCifar100Data" @@ -26,7 +27,8 @@ MINDRECORD_FILE = "./cifar100.mindrecord" def test_cifar100_to_mindrecord_without_index_fields(): """test transform cifar100 dataset to mindrecord without index fields.""" cifar100_transformer = Cifar100ToMR(CIFAR100_DIR, MINDRECORD_FILE) - cifar100_transformer.transform() + ret = cifar100_transformer.transform() + assert ret == SUCCESS, "Failed to tranform from cifar100 to mindrecord" assert os.path.exists(MINDRECORD_FILE) assert os.path.exists(MINDRECORD_FILE + "_test") read() diff --git a/tests/ut/python/mindrecord/test_mindrecord_exception.py b/tests/ut/python/mindrecord/test_mindrecord_exception.py index 0a51fbf4e7..1f7a3f859d 100644 --- a/tests/ut/python/mindrecord/test_mindrecord_exception.py +++ b/tests/ut/python/mindrecord/test_mindrecord_exception.py @@ -16,7 +16,7 @@ import os import pytest from mindspore.mindrecord import FileWriter, FileReader, MindPage -from mindspore.mindrecord import MRMOpenError, MRMGenerateIndexError, ParamValueError +from mindspore.mindrecord import MRMOpenError, MRMGenerateIndexError, ParamValueError, MRMGetMetaError from mindspore import log as logger from utils import get_data @@ -280,3 +280,9 @@ def test_cv_file_writer_shard_num_greater_than_1000(): with pytest.raises(ParamValueError) as err: FileWriter(CV_FILE_NAME, 1001) assert 'Shard number should between' in str(err.value) + +def test_add_index_without_add_schema(): + with pytest.raises(MRMGetMetaError) as err: + fw = FileWriter(CV_FILE_NAME) + fw.add_index(["label"]) + assert 'Failed to get meta info' in str(err.value) From 22c6baeea2b1b5f26ead36dabbcafc456da62aba Mon Sep 17 00:00:00 2001 From: WeibiaoYu Date: Thu, 2 Apr 2020 08:43:13 -0400 Subject: [PATCH 112/367] Support to config whether to save integeated checkpoint, in auto model parallel scene --- mindspore/common/api.py | 35 -------------------------- mindspore/train/callback.py | 16 +++++++++--- mindspore/train/serialization.py | 7 +++--- tests/ut/python/utils/test_callback.py | 4 +-- 4 files changed, 19 insertions(+), 43 deletions(-) diff --git a/mindspore/common/api.py b/mindspore/common/api.py index 8e23e9184d..371ebbb445 100644 --- a/mindspore/common/api.py +++ b/mindspore/common/api.py @@ -374,9 +374,6 @@ class _Executor: obj.parameter_layout_dict = self._executor.get_parameter_layout(phase) obj.load_parameter_slice(params) - if _get_parallel_mode() in ["hybrid_parallel"]: - obj.parameter_layout_dict = self._build_parameter_layout(obj) - # the following GE init process is not needed when use vm or ms backend if enable_ge: # decide whether to sink based on whether the inputs is virtual or not @@ -449,38 +446,6 @@ class _Executor: return self._exec_pip(obj, *args, phase=phase_real) raise KeyError('{} graph is not exist.'.format(phase_real)) - def _build_parameter_layout(self, obj): - """ - Build parameter layout, for layerwise_parallel parameter. - - Args: - obj (Function or Cell): The function or cell instance need to be compiled. - - Returns: - Dictionary, parameter layout info. - """ - parameter_layout_dict = {} - layerwise_parallel_parameters = [] - for key in obj.parameters_dict(): - if obj.parameters_dict()[key].layerwise_parallel is True: - layerwise_parallel_parameters.append(key) - - if not layerwise_parallel_parameters: - return parameter_layout_dict - - from ..communication.management import get_group_size - group_size = [get_group_size()] - for key in layerwise_parallel_parameters: - tensor_map = [0] - shape = obj.parameters_dict()[key].data.shape() - for x in range(len(shape)): # dim 0 set 0, others set -1 - if x: - tensor_map.append(-1) - layout = [group_size, tensor_map] - parameter_layout_dict[key] = layout - - return parameter_layout_dict - def del_net_res(self, net_id): self._executor.del_net_res(net_id) diff --git a/mindspore/train/callback.py b/mindspore/train/callback.py index 62f847089d..dcf630342c 100644 --- a/mindspore/train/callback.py +++ b/mindspore/train/callback.py @@ -24,7 +24,7 @@ import mindspore.context as context from mindspore.train.serialization import _exec_save_checkpoint, _fill_param_into_net, _save_graph from mindspore.train._utils import _make_directory from mindspore import log as logger -from mindspore._checkparam import check_int_non_negative +from mindspore._checkparam import check_int_non_negative, check_bool from mindspore.common.tensor import Tensor from .summary.summary_record import _cache_summary_tensor_data @@ -150,6 +150,8 @@ class CheckpointConfig: keep_checkpoint_max (int): Maximum step to save checkpoint. Default: 5. keep_checkpoint_per_n_minutes (int): Keep one checkpoint every n minutes. Default: 0. Can't be used with keep_checkpoint_max at the same time. + integrated_save (bool): Whether to intergrated save in automatic model parall scene. Default: True. + Integrated save function is only supported in automatic parall scene, not supported in manual parallel. Raises: ValueError: If the input_param is None or 0. @@ -163,7 +165,8 @@ class CheckpointConfig: save_checkpoint_steps=1, save_checkpoint_seconds=0, keep_checkpoint_max=5, - keep_checkpoint_per_n_minutes=0): + keep_checkpoint_per_n_minutes=0, + integrated_save=True): if not save_checkpoint_steps and not save_checkpoint_seconds and \ not keep_checkpoint_max and not keep_checkpoint_per_n_minutes: @@ -191,6 +194,8 @@ class CheckpointConfig: if not self._keep_checkpoint_per_n_minutes or self._keep_checkpoint_per_n_minutes == 0: self._keep_checkpoint_max = 1 + self._integrated_save = check_bool(integrated_save) + @property def save_checkpoint_steps(self): """Get the value of _save_checkpoint_steps.""" @@ -211,6 +216,11 @@ class CheckpointConfig: """Get the value of _keep_checkpoint_per_n_minutes.""" return self._keep_checkpoint_per_n_minutes + @property + def integrated_save(self): + """Get the value of _integrated_save.""" + return self._integrated_save + def get_checkpoint_policy(self): """Get the policy of checkpoint.""" checkpoint_policy = {'save_checkpoint_steps': self._save_checkpoint_steps, @@ -619,7 +629,7 @@ class ModelCheckpoint(Callback): _set_cur_net(cb_params.train_network) cb_params.train_network.exec_checkpoint_graph() - _exec_save_checkpoint(cb_params.train_network, gen_file) + _exec_save_checkpoint(cb_params.train_network, gen_file, self._config.integrated_save) if os.path.exists(gen_file): shutil.move(gen_file, cur_file) diff --git a/mindspore/train/serialization.py b/mindspore/train/serialization.py index 0478bbc071..b334c3e9d8 100644 --- a/mindspore/train/serialization.py +++ b/mindspore/train/serialization.py @@ -279,13 +279,14 @@ def _save_graph(network, file_name): os.chmod(file_name, stat.S_IWUSR | stat.S_IRUSR) -def _exec_save_checkpoint(train_network, ckpoint_file_name): +def _exec_save_checkpoint(train_network, ckpoint_file_name, integrated_save=True): """ Saves checkpoint for 'ms' backend. Args: train_network (Network): The train network for training. ckpoint_file_name (str): The name of checkpoint file. + integrated_save (bool): Whether to intergrated save in automatic model parallel scene. """ param_dict = {} @@ -300,9 +301,9 @@ def _exec_save_checkpoint(train_network, ckpoint_file_name): else: param_data = Tensor(value.data) - # in model parallel scenario, some parameters were spliteds to all the devices, + # in automatic model parallel scenario, some parameters were spliteds to all the devices, # which should be combined before saving - if key in train_network.parameter_layout_dict: + if integrated_save and key in train_network.parameter_layout_dict: param_data = _get_merged_param_data(train_network, key, param_data) each_param["data"] = param_data diff --git a/tests/ut/python/utils/test_callback.py b/tests/ut/python/utils/test_callback.py index 60e4c6527a..43cf827330 100644 --- a/tests/ut/python/utils/test_callback.py +++ b/tests/ut/python/utils/test_callback.py @@ -308,10 +308,10 @@ def test_RunContext(): def test_Checkpoint_Config(): """Test CheckpointConfig all None or 0.""" with pytest.raises(ValueError): - CheckpointConfig(0, 0, 0, 0) + CheckpointConfig(0, 0, 0, 0, True) with pytest.raises(ValueError): - CheckpointConfig(0, None, 0, 0) + CheckpointConfig(0, None, 0, 0, True) def test_step_end_save_graph(): From ff38eff9ae94adb72604be0ae47ee07dab5c1498 Mon Sep 17 00:00:00 2001 From: ms_yan <6576637+ms_yan@user.noreply.gitee.com> Date: Thu, 2 Apr 2020 21:56:48 +0800 Subject: [PATCH 113/367] add parameter check for Class Schema --- mindspore/dataset/engine/datasets.py | 23 ++++++++---- mindspore/dataset/engine/validators.py | 50 ++++++++++++++++++++++++++ 2 files changed, 66 insertions(+), 7 deletions(-) diff --git a/mindspore/dataset/engine/datasets.py b/mindspore/dataset/engine/datasets.py index db2b5169d2..de604a67e9 100644 --- a/mindspore/dataset/engine/datasets.py +++ b/mindspore/dataset/engine/datasets.py @@ -38,7 +38,7 @@ from .iterators import DictIterator, TupleIterator from .validators import check, check_batch, check_shuffle, check_map, check_repeat, check_zip, check_rename, \ check_project, check_imagefolderdatasetv2, check_mnist_cifar_dataset, check_manifestdataset, \ check_tfrecorddataset, check_vocdataset, check_celebadataset, check_minddataset, check_generatordataset, \ - check_zip_dataset + check_zip_dataset, check_add_column from ..core.datatypes import mstype_to_detype, mstypelist_to_detypelist try: @@ -2334,13 +2334,20 @@ class Schema: self.dataset_type = '' self.num_rows = 0 else: + if not os.path.isfile(schema_file) or not os.access(schema_file, os.R_OK): + raise ValueError("The file %s does not exist or permission denied!" % schema_file) try: with open(schema_file, 'r') as load_f: json_obj = json.load(load_f) - self.from_json(json_obj) except json.decoder.JSONDecodeError: - raise RuntimeError("Schema file failed to load") + raise RuntimeError("Schema file failed to load.") + except UnicodeDecodeError: + raise RuntimeError("Schema file failed to decode.") + except Exception: + raise RuntimeError("Schema file failed to open.") + self.from_json(json_obj) + @check_add_column def add_column(self, name, de_type, shape=None): """ Add new column to the schema. @@ -2359,10 +2366,8 @@ class Schema: if isinstance(de_type, typing.Type): de_type = mstype_to_detype(de_type) new_column["type"] = str(de_type) - elif isinstance(de_type, str): - new_column["type"] = str(DataType(de_type)) else: - raise ValueError("Unknown column type") + new_column["type"] = str(DataType(de_type)) if shape is not None: new_column["shape"] = shape @@ -2391,7 +2396,7 @@ class Schema: Parse the columns and add it to self. Args: - columns (list[str]): names of columns. + columns (dict or list[str]): names of columns. Raises: RuntimeError: If failed to parse schema file. @@ -2399,6 +2404,8 @@ class Schema: RuntimeError: If column's name field is missing. RuntimeError: If column's type field is missing. """ + if columns is None: + raise TypeError("Expected non-empty dict or string list.") self.columns = [] for col in columns: name = None @@ -2443,6 +2450,8 @@ class Schema: RuntimeError: if dataset type is missing in the object. RuntimeError: if columns are missing in the object. """ + if not isinstance(json_obj, dict) or json_obj is None: + raise ValueError("Expected non-empty dict.") for k, v in json_obj.items(): if k == "datasetType": self.dataset_type = v diff --git a/mindspore/dataset/engine/validators.py b/mindspore/dataset/engine/validators.py index b4d22a4a01..26d6241945 100644 --- a/mindspore/dataset/engine/validators.py +++ b/mindspore/dataset/engine/validators.py @@ -19,10 +19,15 @@ import inspect as ins import os from functools import wraps from multiprocessing import cpu_count +from mindspore._c_expression import typing from . import samplers from . import datasets INT32_MAX = 2147483647 +valid_detype = [ + "bool", "int8", "int16", "int32", "int64", "uint8", "uint16", + "uint32", "uint64", "float16", "float32", "float64" +] def check(method): @@ -188,6 +193,12 @@ def check(method): return wrapper +def check_valid_detype(type_): + if type_ not in valid_detype: + raise ValueError("Unknown column type") + return True + + def check_filename(path): """ check the filename in the path @@ -743,3 +754,42 @@ def check_project(method): return method(*args, **kwargs) return new_method + + +def check_shape(shape, name): + if isinstance(shape, list): + for element in shape: + if not isinstance(element, int): + raise TypeError( + "Each element in {0} should be of type int. Got {1}.".format(name, type(element))) + else: + raise TypeError("Expected int list.") + + +def check_add_column(method): + """check the input arguments of add_column.""" + @wraps(method) + def new_method(*args, **kwargs): + param_dict = make_param_dict(method, args, kwargs) + + # check name; required argument + name = param_dict.get("name") + if not isinstance(name, str) or not name: + raise TypeError("Expected non-empty string.") + + # check type; required argument + de_type = param_dict.get("de_type") + if de_type is not None: + if not isinstance(de_type, typing.Type) and not check_valid_detype(de_type): + raise ValueError("Unknown column type.") + else: + raise TypeError("Expected non-empty string.") + + # check shape + shape = param_dict.get("shape") + if shape is not None: + check_shape(shape, "shape") + + return method(*args, **kwargs) + + return new_method From c4f9230f037a23d4a66470e76319af6204dbf569 Mon Sep 17 00:00:00 2001 From: guohongzilong <2713219276@qq.com> Date: Tue, 7 Apr 2020 20:39:51 +0800 Subject: [PATCH 114/367] usr mindspore. instead of mstype. --- mindspore/common/api.py | 4 ++-- mindspore/common/initializer.py | 2 +- mindspore/dataset/engine/datasets.py | 4 ++-- mindspore/ops/operations/array_ops.py | 7 ++++--- mindspore/ops/operations/math_ops.py | 4 ++-- mindspore/train/model.py | 2 +- 6 files changed, 12 insertions(+), 11 deletions(-) diff --git a/mindspore/common/api.py b/mindspore/common/api.py index 9ee95ef772..d392b33515 100644 --- a/mindspore/common/api.py +++ b/mindspore/common/api.py @@ -230,8 +230,8 @@ def ms_function(fn=None, obj=None, input_signature=None): >>> z = F.tensor_add(x, y) >>> return z >>> - >>> @ms_function(input_signature=(MetaTensor(mstype.float32, (1, 1, 3, 3)), - >>> MetaTensor(mstype.float32, (1, 1, 3, 3)))) + >>> @ms_function(input_signature=(MetaTensor(mindspore.float32, (1, 1, 3, 3)), + >>> MetaTensor(mindspore.float32, (1, 1, 3, 3)))) >>> def tensor_add_with_sig(x, y): >>> z = F.tensor_add(x, y) >>> return z diff --git a/mindspore/common/initializer.py b/mindspore/common/initializer.py index bdc3418129..4261621272 100644 --- a/mindspore/common/initializer.py +++ b/mindspore/common/initializer.py @@ -282,7 +282,7 @@ def initializer(init, shape=None, dtype=mstype.float32): Tensor, initialized tensor. Examples: - >>> tensor = initializer('ones', [1, 2, 3], mstype.float32) + >>> tensor = initializer('ones', [1, 2, 3], mindspore.float32) """ if not isinstance(init, (Tensor, numbers.Number, str, Initializer)): raise TypeError('Unsupported init type.') diff --git a/mindspore/dataset/engine/datasets.py b/mindspore/dataset/engine/datasets.py index db2b5169d2..658cc3ff49 100644 --- a/mindspore/dataset/engine/datasets.py +++ b/mindspore/dataset/engine/datasets.py @@ -1814,7 +1814,7 @@ class TFRecordDataset(SourceDataset): >>> tfdataset = ds.TFRecordDataset(dataset_files=dataset_files) >>> # 2) get all rows from dataset_files with user-defined schema: >>> schema = ds.Schema() - >>> schema.add_column('col_1d', de_type=mstype.int64, shape=[2]) + >>> schema.add_column('col_1d', de_type=mindspore.int64, shape=[2]) >>> tfdataset = ds.TFRecordDataset(dataset_files=dataset_files, schema=schema) >>> # 3) get all rows from dataset_files with schema file "./schema.json": >>> tfdataset = ds.TFRecordDataset(dataset_files=dataset_files, schema="./schema.json") @@ -2325,7 +2325,7 @@ class Schema: >>> import mindspore.common.dtype as mstype >>> # create schema, specify column name, mindspore.dtype and shape of the column >>> schema = ds.Schema() - >>> schema.add_column('col1', de_type=mstype.int64, shape=[2]) + >>> schema.add_column('col1', de_type=mindspore.int64, shape=[2]) """ def __init__(self, schema_file=None): diff --git a/mindspore/ops/operations/array_ops.py b/mindspore/ops/operations/array_ops.py index fdad46a31f..b91c2cbc7d 100644 --- a/mindspore/ops/operations/array_ops.py +++ b/mindspore/ops/operations/array_ops.py @@ -1535,7 +1535,8 @@ class StridedSlice(PrimitiveWithInfer): - Finally, the output is [3, 3, 3]. Examples - >>> input_x = Tensor([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]], [[5, 5, 5], [6, 6, 6]]]) + >>> input_x = Tensor([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]], + >>> [[5, 5, 5], [6, 6, 6]]], mindspore.float32) >>> slice = StridedSlice() >>> output = slice(input_x, (1, 0, 0), (2, 1, 3), (1, 1, 1)) >>> output.shape() @@ -2067,7 +2068,7 @@ class SpaceToBatch(PrimitiveWithInfer): >>> block_size = 2 >>> paddings = [[0, 0], [0, 0]] >>> space_to_batch = P.SpaceToBatch(block_size, paddings) - >>> x = Tensor(np.array([[[[1, 2], [3, 4]]]]), mstype.float32) + >>> x = Tensor(np.array([[[[1, 2], [3, 4]]]]), mindspore.float32) >>> space_to_batch(x) [[[[1.]]], [[[2.]]], [[[3.]]], [[[4.]]]] @@ -2135,7 +2136,7 @@ class BatchToSpace(PrimitiveWithInfer): >>> block_size = 2 >>> crops = [[0, 0], [0, 0]] >>> op = P.BatchToSpace(block_size, crops) - >>> x = Tensor(np.array([[[[1]]], [[[2]]], [[[3]]], [[[4]]]]), mstype.float32) + >>> x = Tensor(np.array([[[[1]]], [[[2]]], [[[3]]], [[[4]]]]), mindspore.float32) >>> output = op(x) [[[[1., 2.], [3., 4.]]]] diff --git a/mindspore/ops/operations/math_ops.py b/mindspore/ops/operations/math_ops.py index 175b72560f..9b87560735 100644 --- a/mindspore/ops/operations/math_ops.py +++ b/mindspore/ops/operations/math_ops.py @@ -1908,8 +1908,8 @@ class Atan2(_MathBinaryOp): Tensor, the shape is same as the shape after broadcasting, and the data type is same as 'input_x'. Examples: - >>> input_x = Tensor(np.array([[0, 1]]), mstype.float32) - >>> input_y = Tensor(np.array([[1, 1]]), mstype.float32) + >>> input_x = Tensor(np.array([[0, 1]]), mindspore.float32) + >>> input_y = Tensor(np.array([[1, 1]]), mindspore.float32) >>> atan2 = P.Atan2() >>> atan2(input_x, input_y) [[0. 0.7853982]] diff --git a/mindspore/train/model.py b/mindspore/train/model.py index bcfd897f58..41b372f85a 100755 --- a/mindspore/train/model.py +++ b/mindspore/train/model.py @@ -528,7 +528,7 @@ class Model: Tensor, array(s) of predictions. Examples: - >>> input_data = Tensor(np.random.randint(0, 255, [1, 3, 224, 224]), mstype.float32) + >>> input_data = Tensor(np.random.randint(0, 255, [1, 3, 224, 224]), mindspore.float32) >>> model = Model(Net()) >>> model.predict(input_data) """ From 84d780c1a45d1d2c672fe65518478a524eddae2f Mon Sep 17 00:00:00 2001 From: Alexey Shevlyakov Date: Tue, 7 Apr 2020 13:32:52 -0400 Subject: [PATCH 115/367] remove make_unique.h --- mindspore/ccsrc/dataset/api/de_pipeline.cc | 7 ++- .../ccsrc/dataset/core/global_context.cc | 6 +-- mindspore/ccsrc/dataset/core/tensor.cc | 5 +-- mindspore/ccsrc/dataset/engine/data_buffer.cc | 2 +- mindspore/ccsrc/dataset/engine/data_schema.cc | 8 ++-- .../dataset/engine/datasetops/batch_op.cc | 16 +++---- .../dataset/engine/datasetops/dataset_op.cc | 10 ++--- .../engine/datasetops/device_queue_op.cc | 2 +- .../ccsrc/dataset/engine/datasetops/map_op.cc | 2 +- .../dataset/engine/datasetops/parallel_op.cc | 2 +- .../dataset/engine/datasetops/project_op.cc | 2 +- .../dataset/engine/datasetops/rename_op.cc | 4 +- .../dataset/engine/datasetops/shuffle_op.cc | 12 +++--- .../engine/datasetops/source/celeba_op.cc | 24 +++++------ .../engine/datasetops/source/cifar_op.cc | 24 +++++------ .../engine/datasetops/source/generator_op.cc | 8 ++-- .../datasetops/source/image_folder_op.cc | 26 +++++------ .../engine/datasetops/source/manifest_op.cc | 22 +++++----- .../engine/datasetops/source/mindrecord_op.cc | 43 ++++++++++--------- .../engine/datasetops/source/mnist_op.cc | 26 +++++------ .../source/sampler/distributed_sampler.cc | 6 +-- .../datasetops/source/sampler/pk_sampler.cc | 6 +-- .../source/sampler/random_sampler.cc | 8 ++-- .../datasetops/source/sampler/sampler.cc | 2 +- .../datasetops/source/sampler/sampler.h | 1 - .../source/sampler/sequential_sampler.cc | 6 +-- .../source/sampler/subset_random_sampler.cc | 6 +-- .../source/sampler/weighted_random_sampler.cc | 10 ++--- .../datasetops/source/storage_client.cc | 9 ++-- .../engine/datasetops/source/storage_op.cc | 8 ++-- .../engine/datasetops/source/tf_buffer.cc | 7 ++- .../engine/datasetops/source/tf_reader_op.cc | 37 ++++++++-------- .../engine/datasetops/source/voc_op.cc | 22 +++++----- .../ccsrc/dataset/engine/datasetops/zip_op.cc | 14 +++--- mindspore/ccsrc/dataset/engine/db_connector.h | 2 +- .../ccsrc/dataset/engine/execution_tree.cc | 2 +- .../dataset/kernels/image/image_utils.cc | 3 +- mindspore/ccsrc/dataset/kernels/py_func_op.cc | 1 - mindspore/ccsrc/dataset/util/arena.cc | 1 - mindspore/ccsrc/dataset/util/circular_pool.cc | 4 +- mindspore/ccsrc/dataset/util/de_error.h | 7 +++ mindspore/ccsrc/dataset/util/list.h | 3 +- mindspore/ccsrc/dataset/util/lock.cc | 1 + mindspore/ccsrc/dataset/util/lock.h | 1 - mindspore/ccsrc/dataset/util/make_unique.h | 37 ---------------- mindspore/ccsrc/dataset/util/queue.h | 2 +- mindspore/ccsrc/dataset/util/task.h | 1 - mindspore/ccsrc/device/gpu/blocking_queue.cc | 3 +- .../kernel/gpu/math/bias_add_gpu_kernel.h | 5 +-- .../kernel/gpu/nn/bias_add_grad_gpu_kenel.h | 5 +-- .../ccsrc/kernel/gpu/nn/lstm_gpu_kernel.h | 5 +-- .../kernel/gpu/nn/lstm_grad_data_gpu_kernel.h | 7 ++- .../gpu/nn/lstm_grad_weight_gpu_kernel.h | 5 +-- tests/ut/cpp/dataset/celeba_op_test.cc | 2 +- tests/ut/cpp/dataset/cifar_op_test.cc | 2 +- tests/ut/cpp/dataset/image_folder_op_test.cc | 14 +++--- tests/ut/cpp/dataset/manifest_op_test.cc | 2 +- tests/ut/cpp/dataset/project_op_test.cc | 2 +- .../cpp/dataset/stand_alone_samplers_test.cc | 2 +- tests/ut/cpp/dataset/tfReader_op_test.cc | 20 ++++----- 60 files changed, 241 insertions(+), 291 deletions(-) delete mode 100644 mindspore/ccsrc/dataset/util/make_unique.h diff --git a/mindspore/ccsrc/dataset/api/de_pipeline.cc b/mindspore/ccsrc/dataset/api/de_pipeline.cc index d51204f659..53ec8e5565 100644 --- a/mindspore/ccsrc/dataset/api/de_pipeline.cc +++ b/mindspore/ccsrc/dataset/api/de_pipeline.cc @@ -23,7 +23,6 @@ #include "dataset/engine/datasetops/source/image_folder_op.h" #include "dataset/engine/datasetops/source/mnist_op.h" #include "dataset/engine/datasetops/source/voc_op.h" -#include "dataset/util/make_unique.h" #include "dataset/core/tensor.h" #include "dataset/engine/dataset_iterator.h" #include "dataset/engine/datasetops/source/manifest_op.h" @@ -123,7 +122,7 @@ Status DEPipeline::AssignRootNode(const DsOpPtr &dataset_op) { return (tree_->As Status DEPipeline::LaunchTreeExec() { RETURN_IF_NOT_OK(tree_->Prepare()); RETURN_IF_NOT_OK(tree_->Launch()); - iterator_ = make_unique(tree_); + iterator_ = std::make_unique(tree_); if (iterator_ == nullptr) RETURN_STATUS_UNEXPECTED("Cannot create an Iterator."); return Status::OK(); } @@ -311,7 +310,7 @@ Status DEPipeline::ParseStorageOp(const py::dict &args, std::shared_ptrSetSchemaFile(ToString(args["schema"])); } else if (!args["schema_json_string"].is_none()) { - std::unique_ptr schema = make_unique(); + std::unique_ptr schema = std::make_unique(); std::string s = ToString(args["schema_json_string"]); RETURN_IF_NOT_OK(schema->LoadSchemaString(s, std::vector())); (void)builder->SetNumRows(schema->num_rows()); @@ -689,7 +688,7 @@ Status DEPipeline::ParseTFReaderOp(const py::dict &args, std::shared_ptr schema = make_unique(); + std::unique_ptr schema = std::make_unique(); if (args.contains("schema_file_path")) { RETURN_IF_NOT_OK(schema->LoadSchemaFile(ToString(args["schema_file_path"]), columns_to_load)); } else { diff --git a/mindspore/ccsrc/dataset/core/global_context.cc b/mindspore/ccsrc/dataset/core/global_context.cc index 7e361a1f2c..3de8e0fcd8 100644 --- a/mindspore/ccsrc/dataset/core/global_context.cc +++ b/mindspore/ccsrc/dataset/core/global_context.cc @@ -55,9 +55,9 @@ Status GlobalContext::Init() { // For testing we can use Dummy pool instead // Create some tensor allocators for the different types and hook them into the pool. - tensor_allocator_ = mindspore::make_unique>(mem_pool_); - cv_tensor_allocator_ = mindspore::make_unique>(mem_pool_); - int_allocator_ = mindspore::make_unique(mem_pool_); + tensor_allocator_ = std::make_unique>(mem_pool_); + cv_tensor_allocator_ = std::make_unique>(mem_pool_); + int_allocator_ = std::make_unique(mem_pool_); return Status::OK(); } diff --git a/mindspore/ccsrc/dataset/core/tensor.cc b/mindspore/ccsrc/dataset/core/tensor.cc index 6aa34fa342..8f0eae459a 100644 --- a/mindspore/ccsrc/dataset/core/tensor.cc +++ b/mindspore/ccsrc/dataset/core/tensor.cc @@ -28,7 +28,6 @@ #include "dataset/core/global_context.h" #include "dataset/core/pybind_support.h" #include "dataset/core/tensor_shape.h" -#include "dataset/util/make_unique.h" namespace py = pybind11; namespace mindspore { @@ -53,7 +52,7 @@ namespace dataset { Tensor::Tensor(const TensorShape &shape, const DataType &type) : shape_(shape), type_(type), data_(nullptr) { // grab the mem pool from global context and create the allocator for char data area std::shared_ptr global_pool = GlobalContext::Instance()->mem_pool(); - data_allocator_ = mindspore::make_unique>(global_pool); + data_allocator_ = std::make_unique>(global_pool); } Tensor::Tensor(const TensorShape &shape, const DataType &type, const unsigned char *data) : Tensor(shape, type) { @@ -137,7 +136,7 @@ Status Tensor::CreateTensor(std::shared_ptr *ptr, py::array arr) { if ((*ptr)->type_ == DataType::DE_UNKNOWN) RETURN_STATUS_UNEXPECTED("Invalid data type."); std::shared_ptr global_pool = GlobalContext::Instance()->mem_pool(); - (*ptr)->data_allocator_ = mindspore::make_unique>(global_pool); + (*ptr)->data_allocator_ = std::make_unique>(global_pool); static_cast((*ptr)->StartAddr()); int64_t byte_size = (*ptr)->SizeInBytes(); unsigned char *data = static_cast(arr.request().ptr); diff --git a/mindspore/ccsrc/dataset/engine/data_buffer.cc b/mindspore/ccsrc/dataset/engine/data_buffer.cc index a0f47512ec..4aed994d3c 100644 --- a/mindspore/ccsrc/dataset/engine/data_buffer.cc +++ b/mindspore/ccsrc/dataset/engine/data_buffer.cc @@ -40,7 +40,7 @@ Status DataBuffer::CreateDataBuffer( case DatasetType::kTf: { // This type of buffer is for TF record data. // Allocate derived class version for a TF buffers - new_data_buffer = mindspore::make_unique(id, kDeBFlagNone, storage_client); + new_data_buffer = std::make_unique(id, kDeBFlagNone, storage_client); break; } default: { diff --git a/mindspore/ccsrc/dataset/engine/data_schema.cc b/mindspore/ccsrc/dataset/engine/data_schema.cc index 68666796be..4fe5d665c6 100644 --- a/mindspore/ccsrc/dataset/engine/data_schema.cc +++ b/mindspore/ccsrc/dataset/engine/data_schema.cc @@ -26,8 +26,8 @@ #include "common/utils.h" #include "dataset/util/status.h" #include "dataset/core/tensor_shape.h" -#include "dataset/util/make_unique.h" #include "utils/log_adapter.h" +#include "dataset/util/de_error.h" namespace mindspore { namespace dataset { @@ -58,7 +58,7 @@ ColDescriptor::ColDescriptor(const std::string &col_name, DataType col_type, Ten // our shape. Otherwise, set our shape to be empty. if (in_shape != nullptr) { // Create a shape and copy construct it into our column's shape. - tensor_shape_ = mindspore::make_unique(*in_shape); + tensor_shape_ = std::make_unique(*in_shape); } else { tensor_shape_ = nullptr; } @@ -75,7 +75,7 @@ ColDescriptor::ColDescriptor(const std::string &col_name, DataType col_type, Ten ColDescriptor::ColDescriptor(const ColDescriptor &in_cd) : type_(in_cd.type_), rank_(in_cd.rank_), tensor_impl_(in_cd.tensor_impl_), col_name_(in_cd.col_name_) { // If it has a tensor shape, make a copy of it with our own unique_ptr. - tensor_shape_ = in_cd.hasShape() ? mindspore::make_unique(in_cd.shape()) : nullptr; + tensor_shape_ = in_cd.hasShape() ? std::make_unique(in_cd.shape()) : nullptr; } // Assignment overload @@ -86,7 +86,7 @@ ColDescriptor &ColDescriptor::operator=(const ColDescriptor &in_cd) { tensor_impl_ = in_cd.tensor_impl_; col_name_ = in_cd.col_name_; // If it has a tensor shape, make a copy of it with our own unique_ptr. - tensor_shape_ = in_cd.hasShape() ? mindspore::make_unique(in_cd.shape()) : nullptr; + tensor_shape_ = in_cd.hasShape() ? std::make_unique(in_cd.shape()) : nullptr; } return *this; } diff --git a/mindspore/ccsrc/dataset/engine/datasetops/batch_op.cc b/mindspore/ccsrc/dataset/engine/datasetops/batch_op.cc index 7c5d4bd4c8..8778fe1b45 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/batch_op.cc +++ b/mindspore/ccsrc/dataset/engine/datasetops/batch_op.cc @@ -59,8 +59,8 @@ Status BatchOp::operator()() { TaskManager::FindMe()->Post(); int32_t epoch_num = 0, batch_num = 0, cnt = 0; TensorRow new_row; - std::unique_ptr table = make_unique(); - child_iterator_ = mindspore::make_unique(this, 0, 0); + std::unique_ptr table = std::make_unique(); + child_iterator_ = std::make_unique(this, 0, 0); RETURN_IF_NOT_OK(child_iterator_->FetchNextTensorRow(&new_row)); column_name_map_ = child_iterator_->col_name_id_map(); int32_t cur_batch_size = 0; @@ -72,7 +72,7 @@ Status BatchOp::operator()() { if (table->size() == static_cast(cur_batch_size)) { RETURN_IF_NOT_OK(worker_queues_[cnt++ % num_workers_]->EmplaceBack( std::make_pair(std::move(table), CBatchInfo(epoch_num, batch_num++, cnt - epoch_num)))); - table = make_unique(); + table = std::make_unique(); RETURN_IF_NOT_OK(GetBatchSize(&cur_batch_size, CBatchInfo(epoch_num, batch_num, cnt - epoch_num))); } RETURN_IF_NOT_OK(child_iterator_->FetchNextTensorRow(&new_row)); @@ -82,7 +82,7 @@ Status BatchOp::operator()() { RETURN_IF_NOT_OK(worker_queues_[cnt++ % num_workers_]->EmplaceBack( std::make_pair(std::move(table), CBatchInfo(epoch_num, batch_num++, cnt - epoch_num)))); } - table = make_unique(); // this drops when drop == true + table = std::make_unique(); // this drops when drop == true // end of the current epoch, batch_num should start from 0 again batch_num = 0; epoch_num++; @@ -153,9 +153,9 @@ Status BatchOp::WorkerEntry(int32_t workerId) { RETURN_IF_NOT_OK(worker_queues_[workerId]->PopFront(&table_pair)); while (table_pair.second.ctrl_ != batchCtrl::kQuit) { if (table_pair.second.ctrl_ == batchCtrl::kEOE) { - RETURN_IF_NOT_OK(out_connector_->Add(workerId, make_unique(0, DataBuffer::kDeBFlagEOE))); + RETURN_IF_NOT_OK(out_connector_->Add(workerId, std::make_unique(0, DataBuffer::kDeBFlagEOE))); } else if (table_pair.second.ctrl_ == batchCtrl::kEOF) { - RETURN_IF_NOT_OK(out_connector_->Add(workerId, make_unique(0, DataBuffer::kDeBFlagEOF))); + RETURN_IF_NOT_OK(out_connector_->Add(workerId, std::make_unique(0, DataBuffer::kDeBFlagEOF))); } else if (table_pair.second.ctrl_ == batchCtrl::kNoCtrl) { std::unique_ptr db = nullptr; RETURN_IF_NOT_OK(MakeBatchedBuffer(std::move(table_pair), &db)); @@ -170,8 +170,8 @@ Status BatchOp::MakeBatchedBuffer(std::pair, CBatc std::unique_ptr *db) { RETURN_UNEXPECTED_IF_NULL(table_pair.first); if (!input_column_names_.empty()) RETURN_IF_NOT_OK(MapColumns(&table_pair)); // pass it through pyfunc - (*db) = make_unique(table_pair.second.batch_num_, DataBuffer::kDeBFlagNone); - std::unique_ptr dest_table = make_unique(); + (*db) = std::make_unique(table_pair.second.batch_num_, DataBuffer::kDeBFlagNone); + std::unique_ptr dest_table = std::make_unique(); RETURN_IF_NOT_OK(BatchRows(&table_pair.first, &dest_table, table_pair.first->size())); (*db)->set_tensor_table(std::move(dest_table)); (*db)->set_column_name_map(column_name_map_); diff --git a/mindspore/ccsrc/dataset/engine/datasetops/dataset_op.cc b/mindspore/ccsrc/dataset/engine/datasetops/dataset_op.cc index d3b85b84fb..f51c2a1539 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/dataset_op.cc +++ b/mindspore/ccsrc/dataset/engine/datasetops/dataset_op.cc @@ -80,9 +80,9 @@ void DatasetOp::CreateConnector(int32_t num_producers, int32_t num_consumers) { MS_LOG(INFO) << "Creating connector in tree operator: " << operator_id_ << ". Producer: " << num_producers << ". Consumer: " << num_consumers << "."; if (oc_queue_size_ > 0) { - out_connector_ = mindspore::make_unique(num_producers, // The number of producers - num_consumers, // Only one consumer (the training App) - oc_queue_size_); + out_connector_ = std::make_unique(num_producers, // The number of producers + num_consumers, // Only one consumer (the training App) + oc_queue_size_); } else { // Some op's may choose not to have an output connector MS_LOG(INFO) << "Bypassed connector creation for tree operator: " << operator_id_ << "."; @@ -149,7 +149,7 @@ Status DatasetOp::GetNextInput(std::unique_ptr *p_buffer, int32_t wo // The base class implementation simply flows the eoe message to output. Derived classes // may override if they need to perform special eoe handling. Status DatasetOp::EoeReceived(int32_t worker_id) { - std::unique_ptr eoe_buffer = mindspore::make_unique(0, DataBuffer::kDeBFlagEOE); + std::unique_ptr eoe_buffer = std::make_unique(0, DataBuffer::kDeBFlagEOE); return (out_connector_->Add(static_cast(worker_id), std::move(eoe_buffer))); } @@ -157,7 +157,7 @@ Status DatasetOp::EoeReceived(int32_t worker_id) { // The base class implementation simply flows the eof message to output. Derived classes // may override if they need to perform special eof handling. Status DatasetOp::EofReceived(int32_t worker_id) { - std::unique_ptr eof_buffer = mindspore::make_unique(0, DataBuffer::kDeBFlagEOF); + std::unique_ptr eof_buffer = std::make_unique(0, DataBuffer::kDeBFlagEOF); return (out_connector_->Add(static_cast(worker_id), std::move(eof_buffer))); } diff --git a/mindspore/ccsrc/dataset/engine/datasetops/device_queue_op.cc b/mindspore/ccsrc/dataset/engine/datasetops/device_queue_op.cc index 3c2eea16ee..71e4ce64a4 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/device_queue_op.cc +++ b/mindspore/ccsrc/dataset/engine/datasetops/device_queue_op.cc @@ -225,7 +225,7 @@ Status DeviceQueueOp::SendDataToCPU() { MS_LOG(INFO) << "Device queue, sending data to CPU."; int64_t total_batch = 0; - std::unique_ptr child_iterator = mindspore::make_unique(this, 0, 0); + std::unique_ptr child_iterator = std::make_unique(this, 0, 0); while (!(child_iterator->eof_handled())) { TensorRow curr_row; RETURN_IF_NOT_OK(child_iterator->FetchNextTensorRow(&curr_row)); diff --git a/mindspore/ccsrc/dataset/engine/datasetops/map_op.cc b/mindspore/ccsrc/dataset/engine/datasetops/map_op.cc index c9f1f98ae0..b6d603bac9 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/map_op.cc +++ b/mindspore/ccsrc/dataset/engine/datasetops/map_op.cc @@ -179,7 +179,7 @@ Status MapOp::WorkerEntry(int32_t worker_id) { RETURN_IF_NOT_OK(WorkerEntryInit(in_buffer.get(), &keep_input_columns, &to_process_indices, &final_col_name_id_map, &input_columns, &output_columns)); - std::unique_ptr new_tensor_table(mindspore::make_unique()); + std::unique_ptr new_tensor_table(std::make_unique()); // Perform the compute function of TensorOp(s) and store the result in new_tensor_table. RETURN_IF_NOT_OK(WorkerCompute(in_buffer.get(), to_process_indices, new_tensor_table.get(), keep_input_columns, &input_columns, &output_columns)); diff --git a/mindspore/ccsrc/dataset/engine/datasetops/parallel_op.cc b/mindspore/ccsrc/dataset/engine/datasetops/parallel_op.cc index d9792312a3..4b2af2250a 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/parallel_op.cc +++ b/mindspore/ccsrc/dataset/engine/datasetops/parallel_op.cc @@ -48,7 +48,7 @@ Status ParallelOp::CreateWorkerConnector(int32_t worker_connector_size) { // Instantiate the worker connector. This is the internal connector, not the operators // output connector. It has single master consuming from it (num producers is 1), and the number // of workers is the defined count from the op. - worker_connector_ = mindspore::make_unique(num_workers_, num_producers_, worker_connector_size); + worker_connector_ = std::make_unique(num_workers_, num_producers_, worker_connector_size); return Status::OK(); } diff --git a/mindspore/ccsrc/dataset/engine/datasetops/project_op.cc b/mindspore/ccsrc/dataset/engine/datasetops/project_op.cc index 11296f84f4..b87967dde8 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/project_op.cc +++ b/mindspore/ccsrc/dataset/engine/datasetops/project_op.cc @@ -79,7 +79,7 @@ Status ProjectOp::Project(std::unique_ptr *data_buffer) { new_column_name_mapping[current_column] = i; projected_column_indices.push_back(column_name_mapping[current_column]); } - std::unique_ptr new_tensor_table = mindspore::make_unique(); + std::unique_ptr new_tensor_table = std::make_unique(); while ((*data_buffer)->NumRows() > 0) { TensorRow current_row; RETURN_IF_NOT_OK((*data_buffer)->PopRow(¤t_row)); diff --git a/mindspore/ccsrc/dataset/engine/datasetops/rename_op.cc b/mindspore/ccsrc/dataset/engine/datasetops/rename_op.cc index c09f56141e..725476bf91 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/rename_op.cc +++ b/mindspore/ccsrc/dataset/engine/datasetops/rename_op.cc @@ -84,13 +84,13 @@ Status RenameOp::operator()() { // we got eoe, now try again until we get eof MS_LOG(INFO) << "Rename operator EOE Received."; - RETURN_IF_NOT_OK(out_connector_->Add(0, std::move(mindspore::make_unique(0, DataBuffer::kDeBFlagEOE)))); + RETURN_IF_NOT_OK(out_connector_->Add(0, std::move(std::make_unique(0, DataBuffer::kDeBFlagEOE)))); MS_LOG(DEBUG) << "Rename operator fetching buffer after EOE."; RETURN_IF_NOT_OK(GetNextInput(&curr_buffer)); } // end of while eof loop MS_LOG(INFO) << "Rename opeerator EOF Received."; - RETURN_IF_NOT_OK(out_connector_->Add(0, std::move(mindspore::make_unique(0, DataBuffer::kDeBFlagEOF)))); + RETURN_IF_NOT_OK(out_connector_->Add(0, std::move(std::make_unique(0, DataBuffer::kDeBFlagEOF)))); return Status::OK(); } diff --git a/mindspore/ccsrc/dataset/engine/datasetops/shuffle_op.cc b/mindspore/ccsrc/dataset/engine/datasetops/shuffle_op.cc index 5dae48ad73..2afafe2128 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/shuffle_op.cc +++ b/mindspore/ccsrc/dataset/engine/datasetops/shuffle_op.cc @@ -70,7 +70,7 @@ ShuffleOp::ShuffleOp(int32_t shuffle_size, uint32_t shuffle_seed, int32_t op_con rng_(shuffle_seed), buffer_counter_(0), rows_per_buffer_(rows_per_buffer), - shuffle_buffer_(mindspore::make_unique()), + shuffle_buffer_(std::make_unique()), shuffle_last_row_idx_(0), shuffle_buffer_state_(kShuffleStateInit) {} @@ -90,7 +90,7 @@ Status ShuffleOp::SelfReset() { shuffle_seed_ = distribution(random_device); rng_ = std::mt19937_64(shuffle_seed_); } - shuffle_buffer_ = mindspore::make_unique(); + shuffle_buffer_ = std::make_unique(); buffer_counter_ = 0; shuffle_last_row_idx_ = 0; shuffle_buffer_state_ = kShuffleStateInit; @@ -142,7 +142,7 @@ Status ShuffleOp::operator()() { // Create the child iterator to fetch our data from. int32_t worker_id = 0; int32_t child_idx = 0; - child_iterator_ = mindspore::make_unique(this, worker_id, child_idx); + child_iterator_ = std::make_unique(this, worker_id, child_idx); // Main operator loop while (true) { @@ -161,7 +161,7 @@ Status ShuffleOp::operator()() { // Step 1) // Create an output tensor table if one is not created yet. if (!new_buffer_table) { - new_buffer_table = mindspore::make_unique(); + new_buffer_table = std::make_unique(); } // Step 2) @@ -176,7 +176,7 @@ Status ShuffleOp::operator()() { // and send this buffer on it's way up the pipeline. Special case is if this is the // last row then we also send it. if (new_buffer_table->size() == rows_per_buffer_ || shuffle_last_row_idx_ == 0) { - auto new_buffer = mindspore::make_unique(buffer_counter_, DataBuffer::kDeBFlagNone); + auto new_buffer = std::make_unique(buffer_counter_, DataBuffer::kDeBFlagNone); new_buffer->set_tensor_table(std::move(new_buffer_table)); new_buffer->set_column_name_map(column_name_map_); buffer_counter_++; @@ -218,7 +218,7 @@ Status ShuffleOp::operator()() { // Since we overloaded eoeReceived function, we are responsible to flow the EOE up the // pipepline manually now that we are done draining the shuffle buffer MS_LOG(INFO) << "Shuffle operator sending EOE."; - auto eoe_buffer = mindspore::make_unique(0, DataBuffer::kDeBFlagEOE); + auto eoe_buffer = std::make_unique(0, DataBuffer::kDeBFlagEOE); RETURN_IF_NOT_OK(out_connector_->Add(0, std::move(eoe_buffer))); // Do not wait for any reset to be flown down from operators above us. diff --git a/mindspore/ccsrc/dataset/engine/datasetops/source/celeba_op.cc b/mindspore/ccsrc/dataset/engine/datasetops/source/celeba_op.cc index 570fc9f454..0c2e20729e 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/source/celeba_op.cc +++ b/mindspore/ccsrc/dataset/engine/datasetops/source/celeba_op.cc @@ -40,7 +40,7 @@ Status CelebAOp::Builder::Build(std::shared_ptr *op) { builder_sampler_ = std::make_shared(); } - builder_schema_ = make_unique(); + builder_schema_ = std::make_unique(); RETURN_IF_NOT_OK( builder_schema_->AddColumn(ColDescriptor("image", DataType(DataType::DE_UINT8), TensorImpl::kFlexible, 1))); // label is like this:0 1 0 0 1...... @@ -83,7 +83,7 @@ CelebAOp::CelebAOp(int32_t num_workers, int32_t rows_per_buffer, const std::stri col_name_map_[data_schema_->column(index).name()] = index; } - attr_info_queue_ = make_unique>>(queue_size); + attr_info_queue_ = std::make_unique>>(queue_size); io_block_queues_.Init(num_workers_, queue_size); } @@ -311,7 +311,7 @@ Status CelebAOp::AddIOBlock(std::unique_ptr *data_buffer) { row_count++; if (row_count % rows_per_buffer_ == 0) { RETURN_IF_NOT_OK(io_block_queues_[buff_count++ % num_workers_]->Add( - make_unique(IOBlock(keys, IOBlock::kDeIoBlockNone)))); + std::make_unique(IOBlock(keys, IOBlock::kDeIoBlockNone)))); keys.clear(); } } @@ -320,21 +320,21 @@ Status CelebAOp::AddIOBlock(std::unique_ptr *data_buffer) { if (!keys.empty()) { RETURN_IF_NOT_OK(io_block_queues_[(buff_count++) % num_workers_]->Add( - make_unique(IOBlock(keys, IOBlock::kDeIoBlockNone)))); + std::make_unique(IOBlock(keys, IOBlock::kDeIoBlockNone)))); } if (!BitTest(op_ctrl_flags_, kDeOpRepeated) || BitTest(op_ctrl_flags_, kDeOpLastRepeat)) { RETURN_IF_NOT_OK( - io_block_queues_[(buff_count++) % num_workers_]->Add(make_unique(IOBlock::kDeIoBlockFlagEoe))); + io_block_queues_[(buff_count++) % num_workers_]->Add(std::make_unique(IOBlock::kDeIoBlockFlagEoe))); RETURN_IF_NOT_OK( - io_block_queues_[(buff_count++) % num_workers_]->Add(make_unique(IOBlock::kDeIoBlockFlagEof))); + io_block_queues_[(buff_count++) % num_workers_]->Add(std::make_unique(IOBlock::kDeIoBlockFlagEof))); for (int32_t i = 0; i < num_workers_; i++) { RETURN_IF_NOT_OK( - io_block_queues_[i]->Add(std::move(make_unique(std::vector(), IOBlock::kDeIoBlockNone)))); + io_block_queues_[i]->Add(std::make_unique(std::vector(), IOBlock::kDeIoBlockNone))); } return Status::OK(); } else { // not the last repeat. Acquire lock, sleeps master thread, wait for the wake-up from reset RETURN_IF_NOT_OK( - io_block_queues_[(buff_count++) % num_workers_]->Add(make_unique(IOBlock::kDeIoBlockFlagEoe))); + io_block_queues_[(buff_count++) % num_workers_]->Add(std::make_unique(IOBlock::kDeIoBlockFlagEoe))); RETURN_IF_NOT_OK(wp_.Wait()); // Master thread goes to sleep after it has made all the IOBlocks wp_.Clear(); RETURN_IF_NOT_OK(sampler_->GetNextBuffer(data_buffer)); @@ -349,17 +349,17 @@ Status CelebAOp::WorkerEntry(int32_t worker_id) { RETURN_IF_NOT_OK(io_block_queues_[worker_id]->PopFront(&io_block)); while (io_block != nullptr) { if (io_block->eoe() == true) { - RETURN_IF_NOT_OK(out_connector_->Add(worker_id, std::move(make_unique(0, DataBuffer::kDeBFlagEOE)))); + RETURN_IF_NOT_OK(out_connector_->Add(worker_id, std::make_unique(0, DataBuffer::kDeBFlagEOE))); buffer_id = worker_id; } else if (io_block->eof() == true) { - RETURN_IF_NOT_OK(out_connector_->Add(worker_id, std::move(make_unique(0, DataBuffer::kDeBFlagEOF)))); + RETURN_IF_NOT_OK(out_connector_->Add(worker_id, std::make_unique(0, DataBuffer::kDeBFlagEOF))); } else { std::vector keys; RETURN_IF_NOT_OK(io_block->GetKeys(&keys)); if (keys.empty()) { return Status::OK(); // empty key is a quit signal for workers } - std::unique_ptr db = make_unique(buffer_id, DataBuffer::kDeBFlagNone); + std::unique_ptr db = std::make_unique(buffer_id, DataBuffer::kDeBFlagNone); RETURN_IF_NOT_OK(LoadBuffer(keys, &db)); RETURN_IF_NOT_OK(out_connector_->Add(worker_id, std::move(db))); buffer_id += num_workers_; @@ -370,7 +370,7 @@ Status CelebAOp::WorkerEntry(int32_t worker_id) { } Status CelebAOp::LoadBuffer(const std::vector &keys, std::unique_ptr *db) { - std::unique_ptr deq = make_unique(); + std::unique_ptr deq = std::make_unique(); for (const auto &key : keys) { TensorRow row; RETURN_IF_NOT_OK(LoadTensorRow(image_labels_vec_[key], &row)); diff --git a/mindspore/ccsrc/dataset/engine/datasetops/source/cifar_op.cc b/mindspore/ccsrc/dataset/engine/datasetops/source/cifar_op.cc index 260a4a4dc5..3e64c8a3e6 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/source/cifar_op.cc +++ b/mindspore/ccsrc/dataset/engine/datasetops/source/cifar_op.cc @@ -47,7 +47,7 @@ Status CifarOp::Builder::Build(std::shared_ptr *ptr) { if (sampler_ == nullptr) { sampler_ = std::make_shared(); } - schema_ = make_unique(); + schema_ = std::make_unique(); TensorShape scalar = TensorShape::CreateScalar(); RETURN_IF_NOT_OK(schema_->AddColumn(ColDescriptor("image", DataType(DataType::DE_UINT8), TensorImpl::kFlexible, 1))); if (cifar_type_ == kCifar10) { @@ -91,7 +91,7 @@ CifarOp::CifarOp(CifarType type, int32_t num_works, int32_t rows_per_buf, const col_name_map_[data_schema_->column(i).name()] = i; } constexpr uint64_t kUtilQueueSize = 512; - cifar_raw_data_block_ = make_unique>>(kUtilQueueSize); + cifar_raw_data_block_ = std::make_unique>>(kUtilQueueSize); io_block_queues_.Init(num_workers_, queue_size); } @@ -114,7 +114,7 @@ Status CifarOp::operator()() { if (row_cnt_ >= num_samples_) break; // enough row read, break for loop if (row_cnt_ % rows_per_buffer_ == 0) { RETURN_IF_NOT_OK(io_block_queues_[buf_cnt_++ % num_workers_]->Add( - make_unique(IOBlock(keys, IOBlock::kDeIoBlockNone)))); + std::make_unique(IOBlock(keys, IOBlock::kDeIoBlockNone)))); keys.clear(); } } @@ -122,21 +122,21 @@ Status CifarOp::operator()() { } if (keys.empty() == false) { RETURN_IF_NOT_OK(io_block_queues_[(buf_cnt_++) % num_workers_]->Add( - make_unique(IOBlock(keys, IOBlock::kDeIoBlockNone)))); + std::make_unique(IOBlock(keys, IOBlock::kDeIoBlockNone)))); } if (!BitTest(op_ctrl_flags_, kDeOpRepeated) || BitTest(op_ctrl_flags_, kDeOpLastRepeat)) { RETURN_IF_NOT_OK( - io_block_queues_[(buf_cnt_++) % num_workers_]->Add(make_unique(IOBlock::kDeIoBlockFlagEoe))); + io_block_queues_[(buf_cnt_++) % num_workers_]->Add(std::make_unique(IOBlock::kDeIoBlockFlagEoe))); RETURN_IF_NOT_OK( - io_block_queues_[(buf_cnt_++) % num_workers_]->Add(make_unique(IOBlock::kDeIoBlockFlagEof))); + io_block_queues_[(buf_cnt_++) % num_workers_]->Add(std::make_unique(IOBlock::kDeIoBlockFlagEof))); for (int32_t i = 0; i < num_workers_; i++) { RETURN_IF_NOT_OK( - io_block_queues_[i]->Add(make_unique(std::vector(), IOBlock::kDeIoBlockNone))); + io_block_queues_[i]->Add(std::make_unique(std::vector(), IOBlock::kDeIoBlockNone))); } return Status::OK(); } else { // not the last repeat. Acquire lock, sleeps master thread, wait for the wake-up from reset RETURN_IF_NOT_OK( - io_block_queues_[(buf_cnt_++) % num_workers_]->Add(make_unique(IOBlock::kDeIoBlockFlagEoe))); + io_block_queues_[(buf_cnt_++) % num_workers_]->Add(std::make_unique(IOBlock::kDeIoBlockFlagEoe))); RETURN_IF_NOT_OK(wp_.Wait()); // Master thread goes to sleep after it has made all the IOBlocks wp_.Clear(); RETURN_IF_NOT_OK(sampler_->GetNextBuffer(&sampler_buffer)); @@ -169,17 +169,17 @@ Status CifarOp::WorkerEntry(int32_t worker_id) { RETURN_IF_NOT_OK(io_block_queues_[worker_id]->PopFront(&io_block)); while (io_block != nullptr) { if (io_block->eoe() == true) { - RETURN_IF_NOT_OK(out_connector_->Add(worker_id, make_unique(0, DataBuffer::kDeBFlagEOE))); + RETURN_IF_NOT_OK(out_connector_->Add(worker_id, std::make_unique(0, DataBuffer::kDeBFlagEOE))); buffer_id = worker_id; } else if (io_block->eof() == true) { - RETURN_IF_NOT_OK(out_connector_->Add(worker_id, make_unique(0, DataBuffer::kDeBFlagEOF))); + RETURN_IF_NOT_OK(out_connector_->Add(worker_id, std::make_unique(0, DataBuffer::kDeBFlagEOF))); } else { std::vector keys; RETURN_IF_NOT_OK(io_block->GetKeys(&keys)); if (keys.empty() == true) { return Status::OK(); // empty key is a quit signal for workers } - std::unique_ptr db = make_unique(buffer_id, DataBuffer::kDeBFlagNone); + std::unique_ptr db = std::make_unique(buffer_id, DataBuffer::kDeBFlagNone); RETURN_IF_NOT_OK(LoadBuffer(keys, &db)); RETURN_IF_NOT_OK(out_connector_->Add(worker_id, std::move(db))); buffer_id += num_workers_; @@ -213,7 +213,7 @@ Status CifarOp::LoadTensorRow(uint64_t index, TensorRow *trow) { // Looping over LoadTensorRow to make 1 DataBuffer. 1 function call produces 1 buffer Status CifarOp::LoadBuffer(const std::vector &keys, std::unique_ptr *db) { - std::unique_ptr deq = make_unique(); + std::unique_ptr deq = std::make_unique(); for (const int64_t &key : keys) { TensorRow trow; RETURN_IF_NOT_OK(LoadTensorRow(key, &trow)); diff --git a/mindspore/ccsrc/dataset/engine/datasetops/source/generator_op.cc b/mindspore/ccsrc/dataset/engine/datasetops/source/generator_op.cc index ceb88ceb0e..37a74f019a 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/source/generator_op.cc +++ b/mindspore/ccsrc/dataset/engine/datasetops/source/generator_op.cc @@ -173,9 +173,9 @@ Status GeneratorOp::operator()() { bool eof = false; while (!eof) { // Create new buffer each iteration - fetched_buffer = mindspore::make_unique(buffer_id_++, DataBuffer::kDeBFlagNone); + fetched_buffer = std::make_unique(buffer_id_++, DataBuffer::kDeBFlagNone); fetched_buffer->set_column_name_map(column_names_map_); - std::unique_ptr fetched_table = mindspore::make_unique(); + std::unique_ptr fetched_table = std::make_unique(); bool eoe = false; { py::gil_scoped_acquire gil_acquire; @@ -201,12 +201,12 @@ Status GeneratorOp::operator()() { if (eoe) { // Push out EOE upon StopIteration exception from generator MS_LOG(INFO) << "Generator operator sends out EOE."; - std::unique_ptr eoe_buffer = mindspore::make_unique(0, DataBuffer::kDeBFlagEOE); + std::unique_ptr eoe_buffer = std::make_unique(0, DataBuffer::kDeBFlagEOE); RETURN_IF_NOT_OK(out_connector_->Add(0, std::move(eoe_buffer))); if (!BitTest(op_ctrl_flags_, kDeOpRepeated) || BitTest(op_ctrl_flags_, kDeOpLastRepeat)) { // If last repeat or not repeated, push out EOF and exit master loop MS_LOG(INFO) << "Generator operator sends out EOF."; - std::unique_ptr eof_buffer = mindspore::make_unique(0, DataBuffer::kDeBFlagEOF); + std::unique_ptr eof_buffer = std::make_unique(0, DataBuffer::kDeBFlagEOF); RETURN_IF_NOT_OK(out_connector_->Add(0, std::move(eof_buffer))); MS_LOG(INFO) << "Generator operator main execution loop complete."; eof = true; diff --git a/mindspore/ccsrc/dataset/engine/datasetops/source/image_folder_op.cc b/mindspore/ccsrc/dataset/engine/datasetops/source/image_folder_op.cc index b8044fb38a..f6cf377666 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/source/image_folder_op.cc +++ b/mindspore/ccsrc/dataset/engine/datasetops/source/image_folder_op.cc @@ -39,7 +39,7 @@ Status ImageFolderOp::Builder::Build(std::shared_ptr *ptr) { if (builder_sampler_ == nullptr) { builder_sampler_ = std::make_shared(); } - builder_schema_ = make_unique(); + builder_schema_ = std::make_unique(); TensorShape scalar = TensorShape::CreateScalar(); RETURN_IF_NOT_OK( builder_schema_->AddColumn(ColDescriptor("image", DataType(DataType::DE_UINT8), TensorImpl::kFlexible, 1))); @@ -82,8 +82,8 @@ ImageFolderOp::ImageFolderOp(int32_t num_wkrs, int32_t rows_per_buffer, std::str for (int32_t i = 0; i < data_schema_->NumColumns(); ++i) { col_name_map_[data_schema_->column(i).name()] = i; } - folder_name_queue_ = make_unique>(num_wkrs * queue_size); - image_name_queue_ = make_unique>(num_wkrs * queue_size); + folder_name_queue_ = std::make_unique>(num_wkrs * queue_size); + image_name_queue_ = std::make_unique>(num_wkrs * queue_size); io_block_queues_.Init(num_workers_, queue_size); } @@ -143,7 +143,7 @@ Status ImageFolderOp::operator()() { row_cnt_++; if (row_cnt_ % rows_per_buffer_ == 0) { RETURN_IF_NOT_OK( - io_block_queues_[buf_cnt_++ % num_workers_]->Add(make_unique(keys, IOBlock::kDeIoBlockNone))); + io_block_queues_[buf_cnt_++ % num_workers_]->Add(std::make_unique(keys, IOBlock::kDeIoBlockNone))); keys.clear(); } } @@ -151,21 +151,21 @@ Status ImageFolderOp::operator()() { } if (keys.empty() == false) { RETURN_IF_NOT_OK( - io_block_queues_[(buf_cnt_++) % num_workers_]->Add(make_unique(keys, IOBlock::kDeIoBlockNone))); + io_block_queues_[(buf_cnt_++) % num_workers_]->Add(std::make_unique(keys, IOBlock::kDeIoBlockNone))); } if (!BitTest(op_ctrl_flags_, kDeOpRepeated) || BitTest(op_ctrl_flags_, kDeOpLastRepeat)) { - std::unique_ptr eoe_block = make_unique(IOBlock::kDeIoBlockFlagEoe); - std::unique_ptr eof_block = make_unique(IOBlock::kDeIoBlockFlagEof); + std::unique_ptr eoe_block = std::make_unique(IOBlock::kDeIoBlockFlagEoe); + std::unique_ptr eof_block = std::make_unique(IOBlock::kDeIoBlockFlagEof); RETURN_IF_NOT_OK(io_block_queues_[(buf_cnt_++) % num_workers_]->Add(std::move(eoe_block))); RETURN_IF_NOT_OK(io_block_queues_[(buf_cnt_++) % num_workers_]->Add(std::move(eof_block))); for (int32_t i = 0; i < num_workers_; ++i) { RETURN_IF_NOT_OK( - io_block_queues_[i]->Add(make_unique(std::vector(), IOBlock::kDeIoBlockNone))); + io_block_queues_[i]->Add(std::make_unique(std::vector(), IOBlock::kDeIoBlockNone))); } return Status::OK(); } else { // not the last repeat. Sleep master thread, wait for the wake-up from reset RETURN_IF_NOT_OK( - io_block_queues_[(buf_cnt_++) % num_workers_]->Add(make_unique(IOBlock::kDeIoBlockFlagEoe))); + io_block_queues_[(buf_cnt_++) % num_workers_]->Add(std::make_unique(IOBlock::kDeIoBlockFlagEoe))); RETURN_IF_NOT_OK(wp_.Wait()); // Master thread goes to sleep after it has made all the IOBlocks wp_.Clear(); RETURN_IF_NOT_OK(sampler_->GetNextBuffer(&sampler_buffer)); @@ -182,15 +182,15 @@ Status ImageFolderOp::WorkerEntry(int32_t worker_id) { RETURN_IF_NOT_OK(io_block_queues_[worker_id]->PopFront(&io_block)); while (io_block != nullptr) { if (io_block->eoe() == true) { - RETURN_IF_NOT_OK(out_connector_->Add(worker_id, make_unique(0, DataBuffer::kDeBFlagEOE))); + RETURN_IF_NOT_OK(out_connector_->Add(worker_id, std::make_unique(0, DataBuffer::kDeBFlagEOE))); buffer_id = worker_id; } else if (io_block->eof() == true) { - RETURN_IF_NOT_OK(out_connector_->Add(worker_id, make_unique(0, DataBuffer::kDeBFlagEOF))); + RETURN_IF_NOT_OK(out_connector_->Add(worker_id, std::make_unique(0, DataBuffer::kDeBFlagEOF))); } else { std::vector keys; RETURN_IF_NOT_OK(io_block->GetKeys(&keys)); if (keys.empty() == true) return Status::OK(); // empty key is a quit signal for workers - std::unique_ptr db = make_unique(buffer_id, DataBuffer::kDeBFlagNone); + std::unique_ptr db = std::make_unique(buffer_id, DataBuffer::kDeBFlagNone); RETURN_IF_NOT_OK(LoadBuffer(keys, &db)); RETURN_IF_NOT_OK(out_connector_->Add(worker_id, std::move(db))); buffer_id += num_workers_; @@ -231,7 +231,7 @@ Status ImageFolderOp::LoadTensorRow(ImageLabelPair pairPtr, TensorRow *trow) { // Looping over LoadTensorRow to make 1 DataBuffer. 1 function call produces 1 buffer Status ImageFolderOp::LoadBuffer(const std::vector &keys, std::unique_ptr *db) { - std::unique_ptr deq = make_unique(); + std::unique_ptr deq = std::make_unique(); TensorRow trow; for (const int64_t &key : keys) { RETURN_IF_NOT_OK(this->LoadTensorRow(image_label_pairs_[key], &trow)); diff --git a/mindspore/ccsrc/dataset/engine/datasetops/source/manifest_op.cc b/mindspore/ccsrc/dataset/engine/datasetops/source/manifest_op.cc index 52db199e5b..6907647952 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/source/manifest_op.cc +++ b/mindspore/ccsrc/dataset/engine/datasetops/source/manifest_op.cc @@ -40,7 +40,7 @@ Status ManifestOp::Builder::Build(std::shared_ptr *ptr) { if (builder_sampler_ == nullptr) { builder_sampler_ = std::make_shared(); } - builder_schema_ = make_unique(); + builder_schema_ = std::make_unique(); RETURN_IF_NOT_OK( builder_schema_->AddColumn(ColDescriptor("image", DataType(DataType::DE_UINT8), TensorImpl::kFlexible, 1))); RETURN_IF_NOT_OK( @@ -105,7 +105,7 @@ Status ManifestOp::AddIoBlock(std::unique_ptr *sampler_buffer) { row_cnt_++; if (row_cnt_ % rows_per_buffer_ == 0) { RETURN_IF_NOT_OK(io_block_queues_[buf_cnt_++ % num_workers_]->Add( - make_unique(IOBlock(keys, IOBlock::kDeIoBlockNone)))); + std::make_unique(IOBlock(keys, IOBlock::kDeIoBlockNone)))); keys.clear(); } } @@ -113,21 +113,21 @@ Status ManifestOp::AddIoBlock(std::unique_ptr *sampler_buffer) { } if (keys.empty() == false) { RETURN_IF_NOT_OK(io_block_queues_[(buf_cnt_++) % num_workers_]->Add( - make_unique(IOBlock(keys, IOBlock::kDeIoBlockNone)))); + std::make_unique(IOBlock(keys, IOBlock::kDeIoBlockNone)))); } if (!BitTest(op_ctrl_flags_, kDeOpRepeated) || BitTest(op_ctrl_flags_, kDeOpLastRepeat)) { RETURN_IF_NOT_OK( - io_block_queues_[(buf_cnt_++) % num_workers_]->Add(make_unique(IOBlock::kDeIoBlockFlagEoe))); + io_block_queues_[(buf_cnt_++) % num_workers_]->Add(std::make_unique(IOBlock::kDeIoBlockFlagEoe))); RETURN_IF_NOT_OK( - io_block_queues_[(buf_cnt_++) % num_workers_]->Add(make_unique(IOBlock::kDeIoBlockFlagEof))); + io_block_queues_[(buf_cnt_++) % num_workers_]->Add(std::make_unique(IOBlock::kDeIoBlockFlagEof))); for (int32_t i = 0; i < num_workers_; i++) { RETURN_IF_NOT_OK( - io_block_queues_[i]->Add(make_unique(std::vector(), IOBlock::kDeIoBlockNone))); + io_block_queues_[i]->Add(std::make_unique(std::vector(), IOBlock::kDeIoBlockNone))); } return Status::OK(); } else { RETURN_IF_NOT_OK( - io_block_queues_[(buf_cnt_++) % num_workers_]->Add(make_unique(IOBlock::kDeIoBlockFlagEoe))); + io_block_queues_[(buf_cnt_++) % num_workers_]->Add(std::make_unique(IOBlock::kDeIoBlockFlagEoe))); RETURN_IF_NOT_OK(wp_.Wait()); // Master thread goes to sleep after it has made all the IOBlocks wp_.Clear(); RETURN_IF_NOT_OK(sampler_->GetNextBuffer(sampler_buffer)); @@ -160,17 +160,17 @@ Status ManifestOp::WorkerEntry(int32_t worker_id) { RETURN_IF_NOT_OK(io_block_queues_[worker_id]->PopFront(&io_block)); while (io_block != nullptr) { if (io_block->eoe() == true) { - RETURN_IF_NOT_OK(out_connector_->Add(worker_id, make_unique(0, DataBuffer::kDeBFlagEOE))); + RETURN_IF_NOT_OK(out_connector_->Add(worker_id, std::make_unique(0, DataBuffer::kDeBFlagEOE))); buffer_id = worker_id; } else if (io_block->eof() == true) { - RETURN_IF_NOT_OK(out_connector_->Add(worker_id, make_unique(0, DataBuffer::kDeBFlagEOF))); + RETURN_IF_NOT_OK(out_connector_->Add(worker_id, std::make_unique(0, DataBuffer::kDeBFlagEOF))); } else { std::vector keys; RETURN_IF_NOT_OK(io_block->GetKeys(&keys)); if (keys.empty()) { return Status::OK(); // empty key is a quit signal for workers } - std::unique_ptr db = make_unique(buffer_id, DataBuffer::kDeBFlagNone); + std::unique_ptr db = std::make_unique(buffer_id, DataBuffer::kDeBFlagNone); RETURN_IF_NOT_OK(LoadBuffer(keys, &db)); RETURN_IF_NOT_OK(out_connector_->Add(worker_id, std::move(db))); buffer_id += num_workers_; @@ -227,7 +227,7 @@ Status ManifestOp::LoadTensorRow(const std::pair &keys, std::unique_ptr *db) { - std::unique_ptr deq = make_unique(); + std::unique_ptr deq = std::make_unique(); for (const auto &key : keys) { TensorRow trow; RETURN_IF_NOT_OK(LoadTensorRow(image_labelname_[static_cast(key)], &trow)); diff --git a/mindspore/ccsrc/dataset/engine/datasetops/source/mindrecord_op.cc b/mindspore/ccsrc/dataset/engine/datasetops/source/mindrecord_op.cc index b062371d7f..f490c9ff0b 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/source/mindrecord_op.cc +++ b/mindspore/ccsrc/dataset/engine/datasetops/source/mindrecord_op.cc @@ -30,7 +30,6 @@ #include "dataset/engine/datasetops/dataset_op.h" #include "dataset/engine/db_connector.h" #include "dataset/engine/execution_tree.h" -#include "dataset/util/make_unique.h" #include "utils/log_adapter.h" namespace mindspore { @@ -96,18 +95,18 @@ MindRecordOp::MindRecordOp(int32_t num_mind_record_workers, int32_t rows_per_buf io_blk_queues_.Init(num_workers_, op_connector_queue_size); if (!block_reader_) return; for (int32_t i = 0; i < num_workers_; ++i) { - block_buffer_.emplace_back(make_unique>(std::vector{})); + block_buffer_.emplace_back(std::make_unique>(std::vector{})); } } // Private helper method to encapsulate some common construction/reset tasks Status MindRecordOp::Init() { - shard_reader_ = mindspore::make_unique(); + shard_reader_ = std::make_unique(); auto rc = shard_reader_->Open(dataset_file_, num_mind_record_workers_, columns_to_load_, operators_, block_reader_); CHECK_FAIL_RETURN_UNEXPECTED(rc != MSRStatus::FAILED, "MindRecordOp init failed."); - data_schema_ = mindspore::make_unique(); + data_schema_ = std::make_unique(); std::vector> schema_vec = shard_reader_->get_shard_header()->get_schemas(); // check whether schema exists, if so use the first one @@ -144,7 +143,7 @@ Status MindRecordOp::Init() { } if (!load_all_cols) { - std::unique_ptr tmp_schema = make_unique(); + std::unique_ptr tmp_schema = std::make_unique(); for (std::string colname : columns_to_load_) { CHECK_FAIL_RETURN_UNEXPECTED(colname_to_ind.find(colname) != colname_to_ind.end(), colname + ": doesn't exist"); RETURN_IF_NOT_OK(tmp_schema->AddColumn(data_schema_->column(colname_to_ind[colname]))); @@ -298,7 +297,7 @@ Status MindRecordOp::LoadFloat(TensorShape *new_shape, std::unique_ptr *arr RETURN_IF_NOT_OK(GetFloat(&value, columns_json[column_name], use_double)); *new_shape = TensorShape::CreateScalar(); - *array_data = mindspore::make_unique(1); + *array_data = std::make_unique(1); (*array_data)[0] = value; } else { if (column.hasShape()) { @@ -309,7 +308,7 @@ Status MindRecordOp::LoadFloat(TensorShape *new_shape, std::unique_ptr *arr } int idx = 0; - *array_data = mindspore::make_unique(new_shape->NumOfElements()); + *array_data = std::make_unique(new_shape->NumOfElements()); for (auto &element : columns_json[column_name]) { T value = 0; RETURN_IF_NOT_OK(GetFloat(&value, element, use_double)); @@ -350,7 +349,7 @@ Status MindRecordOp::LoadInt(TensorShape *new_shape, std::unique_ptr *array RETURN_IF_NOT_OK(GetInt(&value, columns_json[column_name])); *new_shape = TensorShape::CreateScalar(); - *array_data = mindspore::make_unique(1); + *array_data = std::make_unique(1); (*array_data)[0] = value; } else { if (column.hasShape()) { @@ -361,7 +360,7 @@ Status MindRecordOp::LoadInt(TensorShape *new_shape, std::unique_ptr *array } int idx = 0; - *array_data = mindspore::make_unique(new_shape->NumOfElements()); + *array_data = std::make_unique(new_shape->NumOfElements()); for (auto &element : columns_json[column_name]) { T value = 0; RETURN_IF_NOT_OK(GetInt(&value, element)); @@ -431,12 +430,14 @@ Status MindRecordOp::WorkerEntry(int32_t worker_id) { RETURN_IF_NOT_OK(io_blk_queues_[worker_id]->PopFront(&io_block)); while (io_block != nullptr) { if (io_block->eoe() == true) { - RETURN_IF_NOT_OK(out_connector_->Add(worker_id, std::move(make_unique(0, DataBuffer::kDeBFlagEOE)))); + RETURN_IF_NOT_OK( + out_connector_->Add(worker_id, std::move(std::make_unique(0, DataBuffer::kDeBFlagEOE)))); RETURN_IF_NOT_OK(io_blk_queues_[worker_id]->PopFront(&io_block)); continue; } if (io_block->eof() == true) { - RETURN_IF_NOT_OK(out_connector_->Add(worker_id, std::move(make_unique(0, DataBuffer::kDeBFlagEOF)))); + RETURN_IF_NOT_OK( + out_connector_->Add(worker_id, std::move(std::make_unique(0, DataBuffer::kDeBFlagEOF)))); RETURN_IF_NOT_OK(io_blk_queues_[worker_id]->PopFront(&io_block)); continue; } @@ -486,9 +487,9 @@ Status MindRecordOp::WorkerEntry(int32_t worker_id) { Status MindRecordOp::GetBufferFromReader(std::unique_ptr *fetched_buffer, int64_t buffer_id, int32_t worker_id) { - *fetched_buffer = mindspore::make_unique(buffer_id, DataBuffer::kDeBFlagNone); + *fetched_buffer = std::make_unique(buffer_id, DataBuffer::kDeBFlagNone); (*fetched_buffer)->set_column_name_map(column_name_mapping_); - std::unique_ptr tensor_table = mindspore::make_unique(); + std::unique_ptr tensor_table = std::make_unique(); for (int32_t i = 0; i < rows_per_buffer_; ++i) { ShardTuple tupled_buffer; if (block_reader_) { @@ -597,22 +598,22 @@ Status MindRecordOp::operator()() { for (int32_t i = 0; i < buffers_needed_; ++i) { if (block_reader_) RETURN_IF_NOT_OK(FetchBlockBuffer(i)); std::vector keys(1, i); - RETURN_IF_NOT_OK( - io_blk_queues_[buf_cnt_++ % num_workers_]->Add(make_unique(IOBlock(keys, IOBlock::kDeIoBlockNone)))); + RETURN_IF_NOT_OK(io_blk_queues_[buf_cnt_++ % num_workers_]->Add( + std::make_unique(IOBlock(keys, IOBlock::kDeIoBlockNone)))); } if (!BitTest(op_ctrl_flags_, kDeOpRepeated) || BitTest(op_ctrl_flags_, kDeOpLastRepeat)) { RETURN_IF_NOT_OK( - io_blk_queues_[(buf_cnt_++) % num_workers_]->Add(make_unique(IOBlock::kDeIoBlockFlagEoe))); + io_blk_queues_[(buf_cnt_++) % num_workers_]->Add(std::make_unique(IOBlock::kDeIoBlockFlagEoe))); RETURN_IF_NOT_OK( - io_blk_queues_[(buf_cnt_++) % num_workers_]->Add(make_unique(IOBlock::kDeIoBlockFlagEof))); + io_blk_queues_[(buf_cnt_++) % num_workers_]->Add(std::make_unique(IOBlock::kDeIoBlockFlagEof))); for (int32_t i = 0; i < num_workers_; i++) { - RETURN_IF_NOT_OK( - io_blk_queues_[i]->Add(std::move(make_unique(std::vector(), IOBlock::kDeIoBlockNone)))); + RETURN_IF_NOT_OK(io_blk_queues_[i]->Add( + std::move(std::make_unique(std::vector(), IOBlock::kDeIoBlockNone)))); } return Status::OK(); } else { // not the last repeat. Acquire lock, sleeps master thread, wait for the wake-up from reset RETURN_IF_NOT_OK( - io_blk_queues_[(buf_cnt_++) % num_workers_]->Add(make_unique(IOBlock::kDeIoBlockFlagEoe))); + io_blk_queues_[(buf_cnt_++) % num_workers_]->Add(std::make_unique(IOBlock::kDeIoBlockFlagEoe))); // reset our buffer count and go to loop again. RETURN_IF_NOT_OK(shard_reader_wait_post_.Wait()); @@ -656,7 +657,7 @@ Status MindRecordOp::LaunchThreadAndInitOp() { } Status MindRecordOp::CountTotalRows(const std::string dataset_path, int64_t *count) { - std::unique_ptr shard_reader = mindspore::make_unique(); + std::unique_ptr shard_reader = std::make_unique(); MSRStatus rc = shard_reader->CountTotalRows(dataset_path, count); if (rc == MSRStatus::FAILED) { RETURN_STATUS_UNEXPECTED("MindRecordOp count total rows failed."); diff --git a/mindspore/ccsrc/dataset/engine/datasetops/source/mnist_op.cc b/mindspore/ccsrc/dataset/engine/datasetops/source/mnist_op.cc index f76fb9314d..3431e58aea 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/source/mnist_op.cc +++ b/mindspore/ccsrc/dataset/engine/datasetops/source/mnist_op.cc @@ -43,7 +43,7 @@ Status MnistOp::Builder::Build(std::shared_ptr *ptr) { if (builder_sampler_ == nullptr) { builder_sampler_ = std::make_shared(); } - builder_schema_ = make_unique(); + builder_schema_ = std::make_unique(); RETURN_IF_NOT_OK( builder_schema_->AddColumn(ColDescriptor("image", DataType(DataType::DE_UINT8), TensorImpl::kCv, 1))); TensorShape scalar = TensorShape::CreateScalar(); @@ -89,7 +89,7 @@ Status MnistOp::TraversalSampleIds(const std::shared_ptr &sample_ids, st row_cnt_++; if (row_cnt_ % rows_per_buffer_ == 0) { RETURN_IF_NOT_OK(io_block_queues_[buf_cnt_++ % num_workers_]->Add( - make_unique(IOBlock(*keys, IOBlock::kDeIoBlockNone)))); + std::make_unique(IOBlock(*keys, IOBlock::kDeIoBlockNone)))); keys->clear(); } } @@ -115,21 +115,21 @@ Status MnistOp::operator()() { } if (keys.empty() == false) { RETURN_IF_NOT_OK(io_block_queues_[(buf_cnt_++) % num_workers_]->Add( - make_unique(IOBlock(keys, IOBlock::kDeIoBlockNone)))); + std::make_unique(IOBlock(keys, IOBlock::kDeIoBlockNone)))); } if (!BitTest(op_ctrl_flags_, kDeOpRepeated) || BitTest(op_ctrl_flags_, kDeOpLastRepeat)) { RETURN_IF_NOT_OK( - io_block_queues_[(buf_cnt_++) % num_workers_]->Add(make_unique(IOBlock::kDeIoBlockFlagEoe))); + io_block_queues_[(buf_cnt_++) % num_workers_]->Add(std::make_unique(IOBlock::kDeIoBlockFlagEoe))); RETURN_IF_NOT_OK( - io_block_queues_[(buf_cnt_++) % num_workers_]->Add(make_unique(IOBlock::kDeIoBlockFlagEof))); + io_block_queues_[(buf_cnt_++) % num_workers_]->Add(std::make_unique(IOBlock::kDeIoBlockFlagEof))); for (int32_t i = 0; i < num_workers_; ++i) { RETURN_IF_NOT_OK( - io_block_queues_[i]->Add(make_unique(std::vector(), IOBlock::kDeIoBlockNone))); + io_block_queues_[i]->Add(std::make_unique(std::vector(), IOBlock::kDeIoBlockNone))); } return Status::OK(); } else { RETURN_IF_NOT_OK( - io_block_queues_[(buf_cnt_++) % num_workers_]->Add(make_unique(IOBlock::kDeIoBlockFlagEoe))); + io_block_queues_[(buf_cnt_++) % num_workers_]->Add(std::make_unique(IOBlock::kDeIoBlockFlagEoe))); RETURN_IF_NOT_OK(wp_.Wait()); // Master thread goes to sleep after it has made all the IOBlocks wp_.Clear(); RETURN_IF_NOT_OK(sampler_->GetNextBuffer(&sampler_buffer)); @@ -145,15 +145,15 @@ Status MnistOp::WorkerEntry(int32_t worker_id) { RETURN_IF_NOT_OK(io_block_queues_[worker_id]->PopFront(&iOBlock)); while (iOBlock != nullptr) { if (iOBlock->eoe() == true) { - RETURN_IF_NOT_OK(out_connector_->Add(worker_id, make_unique(0, DataBuffer::kDeBFlagEOE))); + RETURN_IF_NOT_OK(out_connector_->Add(worker_id, std::make_unique(0, DataBuffer::kDeBFlagEOE))); buffer_id = worker_id; } else if (iOBlock->eof() == true) { - RETURN_IF_NOT_OK(out_connector_->Add(worker_id, make_unique(0, DataBuffer::kDeBFlagEOF))); + RETURN_IF_NOT_OK(out_connector_->Add(worker_id, std::make_unique(0, DataBuffer::kDeBFlagEOF))); } else { std::vector keys; RETURN_IF_NOT_OK(iOBlock->GetKeys(&keys)); if (keys.empty() == true) return Status::OK(); // empty key is a quit signal for workers - std::unique_ptr db = make_unique(buffer_id, DataBuffer::kDeBFlagNone); + std::unique_ptr db = std::make_unique(buffer_id, DataBuffer::kDeBFlagNone); RETURN_IF_NOT_OK(LoadBuffer(keys, &db)); RETURN_IF_NOT_OK(out_connector_->Add(worker_id, std::move(db))); buffer_id += num_workers_; @@ -178,7 +178,7 @@ Status MnistOp::LoadTensorRow(const MnistLabelPair &mnist_pair, TensorRow *trow) // Looping over LoadTensorRow to make 1 DataBuffer. 1 function call produces 1 buffer Status MnistOp::LoadBuffer(const std::vector &keys, std::unique_ptr *db) { - std::unique_ptr deq = make_unique(); + std::unique_ptr deq = std::make_unique(); TensorRow trow; for (const int64_t &key : keys) { RETURN_IF_NOT_OK(this->LoadTensorRow(image_label_pairs_[key], &trow)); @@ -309,8 +309,8 @@ Status MnistOp::ReadImageAndLabel(std::ifstream *image_reader, std::ifstream *la CHECK_FAIL_RETURN_UNEXPECTED((num_images == num_labels), "num_images != num_labels"); // The image size of the Mnist dataset is fixed at [28,28] int64_t size = kMnistImageRows * kMnistImageCols; - auto images_buf = mindspore::make_unique(size * num_images); - auto labels_buf = mindspore::make_unique(num_images); + auto images_buf = std::make_unique(size * num_images); + auto labels_buf = std::make_unique(num_images); if (images_buf == nullptr || labels_buf == nullptr) { std::string err_msg = "Fail to allocate memory for MNIST Buffer."; MS_LOG(ERROR) << err_msg.c_str(); diff --git a/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/distributed_sampler.cc b/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/distributed_sampler.cc index 51ad71e8cf..28a5705648 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/distributed_sampler.cc +++ b/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/distributed_sampler.cc @@ -52,9 +52,9 @@ Status DistributedSampler::GetNextBuffer(std::unique_ptr *out_buffer if (cnt_ > samples_per_buffer_) { RETURN_STATUS_UNEXPECTED("Distributed Sampler Error"); } else if (cnt_ == samples_per_buffer_) { - (*out_buffer) = mindspore::make_unique(0, DataBuffer::kDeBFlagEOE); + (*out_buffer) = std::make_unique(0, DataBuffer::kDeBFlagEOE); } else { - (*out_buffer) = mindspore::make_unique(cnt_, DataBuffer::kDeBFlagNone); + (*out_buffer) = std::make_unique(cnt_, DataBuffer::kDeBFlagNone); std::shared_ptr sample_ids; RETURN_IF_NOT_OK(CreateSamplerTensor(&sample_ids, samples_per_buffer_)); int64_t *id_ptr = reinterpret_cast(sample_ids->StartAddr()); @@ -63,7 +63,7 @@ Status DistributedSampler::GetNextBuffer(std::unique_ptr *out_buffer *(id_ptr++) = shuffle_ ? shuffle_vec_[static_cast(next_id)] : next_id; } TensorRow row(1, sample_ids); - (*out_buffer)->set_tensor_table(make_unique(1, row)); + (*out_buffer)->set_tensor_table(std::make_unique(1, row)); } return Status::OK(); } diff --git a/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/pk_sampler.cc b/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/pk_sampler.cc index 04a6ad17a2..8c8c12fce2 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/pk_sampler.cc +++ b/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/pk_sampler.cc @@ -53,9 +53,9 @@ Status PKSampler::GetNextBuffer(std::unique_ptr *out_buffer) { if (next_id_ > num_pk_samples_ || num_pk_samples_ == 0) { RETURN_STATUS_UNEXPECTED("Index out of bound in PKSampler"); } else if (next_id_ == num_pk_samples_) { - (*out_buffer) = mindspore::make_unique(0, DataBuffer::kDeBFlagEOE); + (*out_buffer) = std::make_unique(0, DataBuffer::kDeBFlagEOE); } else { - (*out_buffer) = mindspore::make_unique(next_id_, DataBuffer::kDeBFlagNone); + (*out_buffer) = std::make_unique(next_id_, DataBuffer::kDeBFlagNone); std::shared_ptr sample_ids; int64_t last_id = (samples_per_buffer_ + next_id_ > num_pk_samples_) ? num_pk_samples_ : samples_per_buffer_ + next_id_; @@ -68,7 +68,7 @@ Status PKSampler::GetNextBuffer(std::unique_ptr *out_buffer) { *(id_ptr++) = samples[rnd_ind]; } TensorRow row(1, sample_ids); - (*out_buffer)->set_tensor_table(make_unique(1, row)); + (*out_buffer)->set_tensor_table(std::make_unique(1, row)); } return Status::OK(); } diff --git a/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/random_sampler.cc b/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/random_sampler.cc index de4d89d950..216f322052 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/random_sampler.cc +++ b/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/random_sampler.cc @@ -32,9 +32,9 @@ Status RandomSampler::GetNextBuffer(std::unique_ptr *out_buffer) { if (next_id_ > num_samples_) { RETURN_STATUS_UNEXPECTED("RandomSampler Internal Error"); } else if (next_id_ == num_samples_) { - (*out_buffer) = make_unique(0, DataBuffer::kDeBFlagEOE); + (*out_buffer) = std::make_unique(0, DataBuffer::kDeBFlagEOE); } else { - (*out_buffer) = make_unique(next_id_, DataBuffer::kDeBFlagNone); + (*out_buffer) = std::make_unique(next_id_, DataBuffer::kDeBFlagNone); std::shared_ptr sampleIds; int64_t last_id = samples_per_buffer_ + next_id_ > num_samples_ ? num_samples_ : samples_per_buffer_ + next_id_; RETURN_IF_NOT_OK(CreateSamplerTensor(&sampleIds, last_id - next_id_)); @@ -44,7 +44,7 @@ Status RandomSampler::GetNextBuffer(std::unique_ptr *out_buffer) { } next_id_ = last_id; TensorRow row(1, sampleIds); - (*out_buffer)->set_tensor_table(make_unique(1, row)); + (*out_buffer)->set_tensor_table(std::make_unique(1, row)); } return Status::OK(); } @@ -61,7 +61,7 @@ Status RandomSampler::Init(const RandomAccessOp *op) { } std::shuffle(shuffled_ids_.begin(), shuffled_ids_.end(), rnd_); } else { - dist = make_unique>(0, num_rows_ - 1); + dist = std::make_unique>(0, num_rows_ - 1); } rnd_.seed(seed_++); return Status::OK(); diff --git a/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/sampler.cc b/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/sampler.cc index 9818cd8a17..aa3838f8b5 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/sampler.cc +++ b/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/sampler.cc @@ -35,7 +35,7 @@ Status Sampler::CreateSamplerTensor(std::shared_ptr *sample_ids, int64_t } if (col_desc_ == nullptr) { // a ColDescriptor for Tensor that holds SampleIds - col_desc_ = make_unique("sampleIds", DataType(DataType::DE_INT64), TensorImpl::kFlexible, 1); + col_desc_ = std::make_unique("sampleIds", DataType(DataType::DE_INT64), TensorImpl::kFlexible, 1); } TensorShape shape(std::vector(1, num_elements)); RETURN_IF_NOT_OK(Tensor::CreateTensor(sample_ids, col_desc_->tensorImpl(), shape, col_desc_->type())); diff --git a/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/sampler.h b/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/sampler.h index d9a20f9170..801565508b 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/sampler.h +++ b/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/sampler.h @@ -27,7 +27,6 @@ #include "dataset/engine/data_buffer.h" #include "dataset/engine/data_schema.h" #include "dataset/engine/datasetops/dataset_op.h" -#include "dataset/util/make_unique.h" namespace mindspore { namespace dataset { diff --git a/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/sequential_sampler.cc b/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/sequential_sampler.cc index 71c3dd07c4..72131a6de1 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/sequential_sampler.cc +++ b/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/sequential_sampler.cc @@ -25,9 +25,9 @@ Status SequentialSampler::GetNextBuffer(std::unique_ptr *out_buffer) if (next_id_ > num_samples_) { RETURN_STATUS_UNEXPECTED("Sequential Sampler Internal Error"); } else if (next_id_ == num_samples_) { - (*out_buffer) = make_unique(0, DataBuffer::kDeBFlagEOE); + (*out_buffer) = std::make_unique(0, DataBuffer::kDeBFlagEOE); } else { - (*out_buffer) = make_unique(next_id_, DataBuffer::kDeBFlagNone); + (*out_buffer) = std::make_unique(next_id_, DataBuffer::kDeBFlagNone); std::shared_ptr sampleIds; int64_t lastId = (samples_per_buffer_ + next_id_ > num_samples_) ? num_samples_ : samples_per_buffer_ + next_id_; RETURN_IF_NOT_OK(CreateSamplerTensor(&sampleIds, lastId - next_id_)); @@ -36,7 +36,7 @@ Status SequentialSampler::GetNextBuffer(std::unique_ptr *out_buffer) *(idPtr++) = next_id_++; } TensorRow row(1, sampleIds); - (*out_buffer)->set_tensor_table(make_unique(1, row)); + (*out_buffer)->set_tensor_table(std::make_unique(1, row)); } return Status::OK(); } diff --git a/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/subset_random_sampler.cc b/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/subset_random_sampler.cc index 4f727fcd04..16603939b3 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/subset_random_sampler.cc +++ b/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/subset_random_sampler.cc @@ -64,9 +64,9 @@ Status SubsetRandomSampler::Reset() { Status SubsetRandomSampler::GetNextBuffer(std::unique_ptr *out_buffer) { // All samples have been drawn if (sample_id_ == indices_.size()) { - (*out_buffer) = make_unique(buffer_id_++, DataBuffer::kDeBFlagEOE); + (*out_buffer) = std::make_unique(buffer_id_++, DataBuffer::kDeBFlagEOE); } else { - (*out_buffer) = make_unique(buffer_id_++, DataBuffer::kDeBFlagNone); + (*out_buffer) = std::make_unique(buffer_id_++, DataBuffer::kDeBFlagNone); std::shared_ptr outputIds; int64_t last_id = sample_id_ + samples_per_buffer_; @@ -92,7 +92,7 @@ Status SubsetRandomSampler::GetNextBuffer(std::unique_ptr *out_buffe } // Create a TensorTable from that single tensor and push into DataBuffer - (*out_buffer)->set_tensor_table(make_unique(1, TensorRow(1, outputIds))); + (*out_buffer)->set_tensor_table(std::make_unique(1, TensorRow(1, outputIds))); } return Status::OK(); diff --git a/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/weighted_random_sampler.cc b/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/weighted_random_sampler.cc index f305474182..f2957e74be 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/weighted_random_sampler.cc +++ b/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/weighted_random_sampler.cc @@ -46,10 +46,10 @@ Status WeightedRandomSampler::Init(const RandomAccessOp *op) { CHECK_FAIL_RETURN_UNEXPECTED(num_samples_ > 0 && samples_per_buffer_ > 0, "Fail to init WeightedRandomSampler"); if (!replacement_) { - exp_dist_ = mindspore::make_unique>(1); + exp_dist_ = std::make_unique>(1); InitOnePassSampling(); } else { - discrete_dist_ = mindspore::make_unique>(weights_.begin(), weights_.end()); + discrete_dist_ = std::make_unique>(weights_.begin(), weights_.end()); } return Status::OK(); @@ -96,9 +96,9 @@ Status WeightedRandomSampler::GetNextBuffer(std::unique_ptr *out_buf } if (sample_id_ == num_samples_) { - (*out_buffer) = make_unique(buffer_id_++, DataBuffer::kDeBFlagEOE); + (*out_buffer) = std::make_unique(buffer_id_++, DataBuffer::kDeBFlagEOE); } else { - (*out_buffer) = make_unique(buffer_id_++, DataBuffer::kDeBFlagNone); + (*out_buffer) = std::make_unique(buffer_id_++, DataBuffer::kDeBFlagNone); std::shared_ptr outputIds; int64_t last_id = sample_id_ + samples_per_buffer_; @@ -132,7 +132,7 @@ Status WeightedRandomSampler::GetNextBuffer(std::unique_ptr *out_buf } // Create a TensorTable from that single tensor and push into DataBuffer - (*out_buffer)->set_tensor_table(make_unique(1, TensorRow(1, outputIds))); + (*out_buffer)->set_tensor_table(std::make_unique(1, TensorRow(1, outputIds))); } return Status::OK(); diff --git a/mindspore/ccsrc/dataset/engine/datasetops/source/storage_client.cc b/mindspore/ccsrc/dataset/engine/datasetops/source/storage_client.cc index 95720a97be..862edcf63a 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/source/storage_client.cc +++ b/mindspore/ccsrc/dataset/engine/datasetops/source/storage_client.cc @@ -24,7 +24,6 @@ #include "dataset/engine/datasetops/source/storage_client.h" #include "dataset/engine/datasetops/source/storage_op.h" #include "dataset/engine/datasetops/source/tf_client.h" -#include "dataset/util/make_unique.h" #include "dataset/util/status.h" namespace mindspore { @@ -57,7 +56,7 @@ static Status CreateStorageClientSwitch( case DatasetType::kTf: { // Construct the derived class TFClient, stored as base class StorageClient store_op->set_rows_per_buffer(32); - *out_client = mindspore::make_unique(std::move(schema), store_op); + *out_client = std::make_unique(std::move(schema), store_op); break; } case DatasetType::kUnknown: @@ -83,7 +82,7 @@ Status StorageClient::CreateStorageClient( std::shared_ptr *out_client) { // Out: the created storage client // Make a new schema first. This only assigns the dataset type. It does not // create the columns yet. - auto new_schema = mindspore::make_unique(); + auto new_schema = std::make_unique(); RETURN_IF_NOT_OK(new_schema->LoadDatasetType(dataset_schema_path)); RETURN_IF_NOT_OK(CreateStorageClientSwitch(std::move(new_schema), store_op, out_client)); return Status::OK(); @@ -99,7 +98,7 @@ Status StorageClient::CreateStorageClient( std::shared_ptr *out_client) { // Out: the created storage client // The dataset type is passed in by the user. Create an empty schema with only // only the dataset type filled in and then create the client with it. - auto new_schema = mindspore::make_unique(); + auto new_schema = std::make_unique(); new_schema->set_dataset_type(in_type); RETURN_IF_NOT_OK(CreateStorageClientSwitch(std::move(new_schema), store_op, out_client)); return Status::OK(); @@ -147,7 +146,7 @@ Status StorageClient::AssignDatasetLayout(uint32_t num_rows, // In: Th // The current schema was just an empty one with only the dataset field populated. // Let's copy construct a new one that will be a copy of the input schema (releasing the old // one) and then set the number of rows that the user requested. - data_schema_ = mindspore::make_unique(schema); + data_schema_ = std::make_unique(schema); CHECK_FAIL_RETURN_UNEXPECTED(num_rows <= MAX_INTEGER_INT32, "numRows exceeds the boundary numRows>2147483647"); num_rows_in_dataset_ = num_rows; diff --git a/mindspore/ccsrc/dataset/engine/datasetops/source/storage_op.cc b/mindspore/ccsrc/dataset/engine/datasetops/source/storage_op.cc index 9da27eac04..2ca957ae6d 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/source/storage_op.cc +++ b/mindspore/ccsrc/dataset/engine/datasetops/source/storage_op.cc @@ -303,7 +303,7 @@ Status StorageOp::init() { // For simplicity, we'll make both of them 3 so they are the same size. int32_t action_queue_size = (buffers_needed / num_workers_) + 1; for (int32_t i = 0; i < num_workers_; ++i) { - auto new_queue = mindspore::make_unique>(action_queue_size); + auto new_queue = std::make_unique>(action_queue_size); action_queue_.push_back(std::move(new_queue)); } } @@ -483,10 +483,10 @@ Status StorageOp::operator()() { // Post the control message to tell the workers to stop waiting on action queue // because we are done! RETURN_IF_NOT_OK(this->PostEndOfData()); - std::unique_ptr eoeBuffer = mindspore::make_unique(0, DataBuffer::kDeBFlagEOE); + std::unique_ptr eoeBuffer = std::make_unique(0, DataBuffer::kDeBFlagEOE); RETURN_IF_NOT_OK(out_connector_->Add(0, std::move(eoeBuffer))); MS_LOG(INFO) << "StorageOp master: Flow end-of-data eof message."; - std::unique_ptr eofBuffer = mindspore::make_unique(0, DataBuffer::kDeBFlagEOF); + std::unique_ptr eofBuffer = std::make_unique(0, DataBuffer::kDeBFlagEOF); RETURN_IF_NOT_OK(out_connector_->Add(0, std::move(eofBuffer))); MS_LOG(INFO) << "StorageOp master: Main execution loop complete."; done = true; // while loop exit @@ -496,7 +496,7 @@ Status StorageOp::operator()() { // RepeatOp above us somewhere in the tree will re-init us with the data to fetch again // once it gets the end-of-epoch message. MS_LOG(INFO) << "StorageOp master: Flow end-of-epoch eoe message."; - std::unique_ptr eoe_buffer = mindspore::make_unique(0, DataBuffer::kDeBFlagEOE); + std::unique_ptr eoe_buffer = std::make_unique(0, DataBuffer::kDeBFlagEOE); RETURN_IF_NOT_OK(out_connector_->Add(0, std::move(eoe_buffer))); // reset our buffer count and go to loop again. diff --git a/mindspore/ccsrc/dataset/engine/datasetops/source/tf_buffer.cc b/mindspore/ccsrc/dataset/engine/datasetops/source/tf_buffer.cc index 766c2149c4..372dcd2c1c 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/source/tf_buffer.cc +++ b/mindspore/ccsrc/dataset/engine/datasetops/source/tf_buffer.cc @@ -27,7 +27,6 @@ #include "dataset/core/data_type.h" #include "dataset/engine/datasetops/source/storage_client.h" #include "dataset/engine/data_schema.h" -#include "dataset/util/make_unique.h" namespace mindspore { namespace dataset { @@ -72,7 +71,7 @@ Status TFBuffer::Load() { } // Construct the Tensor table for this buffer. - tensor_table_ = mindspore::make_unique(); + tensor_table_ = std::make_unique(); // At each position in the tensor table, instantiate the shared pointer to it's Tensor. uint32_t row = 0; @@ -272,7 +271,7 @@ Status TFBuffer::LoadFloatList(const ColDescriptor ¤t_col, const dataengin // Identify how many values we have and then create a local array of these // to deserialize into *num_elements = float_list.value_size(); - *float_array = mindspore::make_unique(*num_elements); + *float_array = std::make_unique(*num_elements); for (int i = 0; i < float_list.value_size(); i++) { (*float_array)[i] = float_list.value(i); } @@ -294,7 +293,7 @@ Status TFBuffer::LoadIntList(const ColDescriptor ¤t_col, const dataengine: // Identify how many values we have and then create a local array of these // to deserialize into *num_elements = int64_list.value_size(); - *int_array = mindspore::make_unique(*num_elements); + *int_array = std::make_unique(*num_elements); for (int i = 0; i < int64_list.value_size(); i++) { (*int_array)[i] = int64_list.value(i); } diff --git a/mindspore/ccsrc/dataset/engine/datasetops/source/tf_reader_op.cc b/mindspore/ccsrc/dataset/engine/datasetops/source/tf_reader_op.cc index c872c02015..0764d7e0ad 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/source/tf_reader_op.cc +++ b/mindspore/ccsrc/dataset/engine/datasetops/source/tf_reader_op.cc @@ -36,7 +36,6 @@ #include "dataset/engine/db_connector.h" #include "dataset/engine/execution_tree.h" #include "dataset/engine/jagged_connector.h" -#include "dataset/util/make_unique.h" #include "dataset/util/path.h" #include "dataset/util/queue.h" #include "dataset/util/random.h" @@ -54,7 +53,7 @@ TFReaderOp::Builder::Builder() builder_op_connector_size_ = config_manager->op_connector_size(); builder_rows_per_buffer_ = config_manager->rows_per_buffer(); builder_shuffle_files_ = false; - builder_data_schema_ = make_unique(); + builder_data_schema_ = std::make_unique(); } Status TFReaderOp::Builder::ValidateInputs() const { @@ -103,7 +102,7 @@ TFReaderOp::TFReaderOp(int32_t num_workers, int32_t worker_connector_size, int64 finished_reading_dataset_(false), shuffle_files_(shuffle_files), data_schema_(std::move(data_schema)), - filename_index_(make_unique()), + filename_index_(std::make_unique()), load_io_block_queue_(true), load_jagged_connector_(true), num_rows_(0), @@ -129,7 +128,7 @@ Status TFReaderOp::Init() { // parallel op base. RETURN_IF_NOT_OK(ParallelOp::CreateWorkerConnector(worker_connector_size_)); - jagged_buffer_connector_ = mindspore::make_unique(num_workers_, 1, worker_connector_size_); + jagged_buffer_connector_ = std::make_unique(num_workers_, 1, worker_connector_size_); // temporary: make size large enough to hold all files + EOE to avoid hangs int32_t safe_queue_size = static_cast(std::ceil(dataset_files_list_.size() / num_workers_)) + 1; @@ -229,7 +228,7 @@ Status TFReaderOp::operator()() { } // all workers finished reading for this epoch, and we have read all the data from all workers - std::unique_ptr eoe_buffer = mindspore::make_unique(0, DataBuffer::kDeBFlagEOE); + std::unique_ptr eoe_buffer = std::make_unique(0, DataBuffer::kDeBFlagEOE); RETURN_IF_NOT_OK(out_connector_->Add(0, std::move(eoe_buffer))); if (!BitTest(op_ctrl_flags_, kDeOpRepeated) || BitTest(op_ctrl_flags_, kDeOpLastRepeat)) { @@ -241,7 +240,7 @@ Status TFReaderOp::operator()() { } } - std::unique_ptr eof_buffer = mindspore::make_unique(0, DataBuffer::kDeBFlagEOF); + std::unique_ptr eof_buffer = std::make_unique(0, DataBuffer::kDeBFlagEOF); RETURN_IF_NOT_OK(out_connector_->Add(0, std::move(eof_buffer))); RETURN_IF_NOT_OK(PostEndOfData()); @@ -274,7 +273,7 @@ Status TFReaderOp::WorkerEntry(int32_t worker_id) { MS_LOG(INFO) << "TFReader operator worker " << worker_id << " loaded file " << filename << "."; } } else { - std::unique_ptr eoe_buffer = mindspore::make_unique(1, DataBuffer::kDeBFlagEOE); + std::unique_ptr eoe_buffer = std::make_unique(1, DataBuffer::kDeBFlagEOE); RETURN_IF_NOT_OK(jagged_buffer_connector_->Add(worker_id, std::move(eoe_buffer))); } @@ -288,7 +287,7 @@ Status TFReaderOp::WorkerEntry(int32_t worker_id) { // When the worker pops this control indicator, it will shut itself down gracefully. Status TFReaderOp::PostEndOfData() { for (int i = 0; i < num_workers_; ++i) { - std::unique_ptr eof = mindspore::make_unique(IOBlock::kDeIoBlockFlagEof); + std::unique_ptr eof = std::make_unique(IOBlock::kDeIoBlockFlagEof); RETURN_IF_NOT_OK(PushIoBlockQueue(i, std::move(eof))); } @@ -299,7 +298,7 @@ Status TFReaderOp::PostEndOfData() { // pops this control indicator, it will wait until the next epoch starts and then resume execution. Status TFReaderOp::PostEndOfEpoch(int32_t queue_index) { for (int i = 0; i < num_workers_; ++i) { - std::unique_ptr eoe = mindspore::make_unique(IOBlock::kDeIoBlockFlagEoe); + std::unique_ptr eoe = std::make_unique(IOBlock::kDeIoBlockFlagEoe); RETURN_IF_NOT_OK(PushIoBlockQueue((queue_index + i) % num_workers_, std::move(eoe))); } @@ -358,7 +357,7 @@ Status TFReaderOp::FillIOBlockShuffle(const std::vector &i_keys) { } if (!equal_rows_per_shard_) { if (key_index++ % num_devices_ == device_id_) { - auto ioBlock = make_unique(*it, kInvalidOffset, kInvalidOffset, IOBlock::kDeIoBlockNone); + auto ioBlock = std::make_unique(*it, kInvalidOffset, kInvalidOffset, IOBlock::kDeIoBlockNone); RETURN_IF_NOT_OK(PushIoBlockQueue(queue_index, std::move(ioBlock))); queue_index = (queue_index + 1) % num_workers_; } @@ -367,7 +366,7 @@ Status TFReaderOp::FillIOBlockShuffle(const std::vector &i_keys) { auto file_it = filename_index_->Search(*it); std::string file_name = file_it.value(); if (NeedPushFileToblockQueue(file_name, &start_offset, &end_offset, pre_count)) { - auto ioBlock = make_unique(*it, start_offset, end_offset, IOBlock::kDeIoBlockNone); + auto ioBlock = std::make_unique(*it, start_offset, end_offset, IOBlock::kDeIoBlockNone); RETURN_IF_NOT_OK(PushIoBlockQueue(queue_index, std::move(ioBlock))); MS_LOG(DEBUG) << "File name " << *it << " start offset " << start_offset << " end_offset " << end_offset; queue_index = (queue_index + 1) % num_workers_; @@ -404,14 +403,15 @@ Status TFReaderOp::FillIOBlockNoShuffle() { } if (!equal_rows_per_shard_) { if (key_index++ % num_devices_ == device_id_) { - auto ioBlock = make_unique(it.key(), kInvalidOffset, kInvalidOffset, IOBlock::kDeIoBlockNone); + auto ioBlock = + std::make_unique(it.key(), kInvalidOffset, kInvalidOffset, IOBlock::kDeIoBlockNone); RETURN_IF_NOT_OK(PushIoBlockQueue(queue_index, std::move(ioBlock))); queue_index = (queue_index + 1) % num_workers_; } } else { std::string file_name = it.value(); if (NeedPushFileToblockQueue(file_name, &start_offset, &end_offset, pre_count)) { - auto ioBlock = make_unique(it.key(), start_offset, end_offset, IOBlock::kDeIoBlockNone); + auto ioBlock = std::make_unique(it.key(), start_offset, end_offset, IOBlock::kDeIoBlockNone); RETURN_IF_NOT_OK(PushIoBlockQueue(queue_index, std::move(ioBlock))); queue_index = (queue_index + 1) % num_workers_; } @@ -490,14 +490,13 @@ Status TFReaderOp::LoadFile(const std::string &filename, const int64_t start_off int64_t rows_read = 0; int64_t rows_total = 0; - std::unique_ptr current_buffer = - mindspore::make_unique(0, DataBuffer::BufferFlags::kDeBFlagNone); + std::unique_ptr current_buffer = std::make_unique(0, DataBuffer::BufferFlags::kDeBFlagNone); std::unordered_map column_name_map; for (int32_t i = 0; i < data_schema_->NumColumns(); ++i) { column_name_map[data_schema_->column(i).name()] = i; } current_buffer->set_column_name_map(column_name_map); - std::unique_ptr new_tensor_table = make_unique(); + std::unique_ptr new_tensor_table = std::make_unique(); while (reader.peek() != EOF) { if (!load_jagged_connector_) { @@ -532,9 +531,9 @@ Status TFReaderOp::LoadFile(const std::string &filename, const int64_t start_off current_buffer->set_tensor_table(std::move(new_tensor_table)); RETURN_IF_NOT_OK(jagged_buffer_connector_->Add(worker_id, std::move(current_buffer))); - current_buffer = make_unique(0, DataBuffer::BufferFlags::kDeBFlagNone); + current_buffer = std::make_unique(0, DataBuffer::BufferFlags::kDeBFlagNone); current_buffer->set_column_name_map(column_name_map); - new_tensor_table = make_unique(); + new_tensor_table = std::make_unique(); rows_read = 0; } } @@ -742,7 +741,7 @@ Status TFReaderOp::LoadFloatList(const ColDescriptor ¤t_col, const dataeng // Identify how many values we have and then create a local array of these // to deserialize into *num_elements = float_list.value_size(); - *float_array = mindspore::make_unique(*num_elements); + *float_array = std::make_unique(*num_elements); for (int i = 0; i < float_list.value_size(); ++i) { (*float_array)[i] = float_list.value(i); } diff --git a/mindspore/ccsrc/dataset/engine/datasetops/source/voc_op.cc b/mindspore/ccsrc/dataset/engine/datasetops/source/voc_op.cc index d3785f4660..71b4c47cf5 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/source/voc_op.cc +++ b/mindspore/ccsrc/dataset/engine/datasetops/source/voc_op.cc @@ -38,7 +38,7 @@ Status VOCOp::Builder::Build(std::shared_ptr *ptr) { if (builder_sampler_ == nullptr) { builder_sampler_ = std::make_shared(); } - builder_schema_ = make_unique(); + builder_schema_ = std::make_unique(); RETURN_IF_NOT_OK( builder_schema_->AddColumn(ColDescriptor("image", DataType(DataType::DE_UINT8), TensorImpl::kFlexible, 1))); RETURN_IF_NOT_OK( @@ -85,7 +85,7 @@ Status VOCOp::TraverseSampleIds(const std::shared_ptr &sample_ids, std:: row_cnt_++; if (row_cnt_ % rows_per_buffer_ == 0) { RETURN_IF_NOT_OK(io_block_queues_[buf_cnt_++ % num_workers_]->Add( - make_unique(IOBlock(*keys, IOBlock::kDeIoBlockNone)))); + std::make_unique(IOBlock(*keys, IOBlock::kDeIoBlockNone)))); keys->clear(); } } @@ -110,21 +110,21 @@ Status VOCOp::operator()() { } if (keys.empty() == false) { RETURN_IF_NOT_OK(io_block_queues_[(buf_cnt_++) % num_workers_]->Add( - make_unique(IOBlock(keys, IOBlock::kDeIoBlockNone)))); + std::make_unique(IOBlock(keys, IOBlock::kDeIoBlockNone)))); } if (!BitTest(op_ctrl_flags_, kDeOpRepeated) || BitTest(op_ctrl_flags_, kDeOpLastRepeat)) { - std::unique_ptr eoe_block = make_unique(IOBlock::kDeIoBlockFlagEoe); - std::unique_ptr eof_block = make_unique(IOBlock::kDeIoBlockFlagEof); + std::unique_ptr eoe_block = std::make_unique(IOBlock::kDeIoBlockFlagEoe); + std::unique_ptr eof_block = std::make_unique(IOBlock::kDeIoBlockFlagEof); RETURN_IF_NOT_OK(io_block_queues_[(buf_cnt_++) % num_workers_]->Add(std::move(eoe_block))); RETURN_IF_NOT_OK(io_block_queues_[(buf_cnt_++) % num_workers_]->Add(std::move(eof_block))); for (int32_t i = 0; i < num_workers_; i++) { RETURN_IF_NOT_OK( - io_block_queues_[i]->Add(make_unique(std::vector(), IOBlock::kDeIoBlockNone))); + io_block_queues_[i]->Add(std::make_unique(std::vector(), IOBlock::kDeIoBlockNone))); } return Status::OK(); } else { RETURN_IF_NOT_OK( - io_block_queues_[(buf_cnt_++) % num_workers_]->Add(make_unique(IOBlock::kDeIoBlockFlagEoe))); + io_block_queues_[(buf_cnt_++) % num_workers_]->Add(std::make_unique(IOBlock::kDeIoBlockFlagEoe))); RETURN_IF_NOT_OK(wp_.Wait()); wp_.Clear(); RETURN_IF_NOT_OK(sampler_->GetNextBuffer(&sampler_buffer)); @@ -164,7 +164,7 @@ Status VOCOp::LoadTensorRow(const std::string &image_id, TensorRow *trow) { } Status VOCOp::LoadBuffer(const std::vector &keys, std::unique_ptr *db) { - std::unique_ptr deq = make_unique(); + std::unique_ptr deq = std::make_unique(); TensorRow trow; for (const uint64_t &key : keys) { RETURN_IF_NOT_OK(this->LoadTensorRow(image_ids_[key], &trow)); @@ -182,15 +182,15 @@ Status VOCOp::WorkerEntry(int32_t worker_id) { RETURN_IF_NOT_OK(io_block_queues_[worker_id]->PopFront(&io_block)); while (io_block != nullptr) { if (io_block->eoe() == true) { - RETURN_IF_NOT_OK(out_connector_->Add(worker_id, make_unique(0, DataBuffer::kDeBFlagEOE))); + RETURN_IF_NOT_OK(out_connector_->Add(worker_id, std::make_unique(0, DataBuffer::kDeBFlagEOE))); buffer_id = worker_id; } else if (io_block->eof() == true) { - RETURN_IF_NOT_OK(out_connector_->Add(worker_id, (make_unique(0, DataBuffer::kDeBFlagEOF)))); + RETURN_IF_NOT_OK(out_connector_->Add(worker_id, (std::make_unique(0, DataBuffer::kDeBFlagEOF)))); } else { std::vector keys; RETURN_IF_NOT_OK(io_block->GetKeys(&keys)); if (keys.empty() == true) return Status::OK(); - std::unique_ptr db = make_unique(buffer_id, DataBuffer::kDeBFlagNone); + std::unique_ptr db = std::make_unique(buffer_id, DataBuffer::kDeBFlagNone); RETURN_IF_NOT_OK(LoadBuffer(keys, &db)); RETURN_IF_NOT_OK(out_connector_->Add(worker_id, std::move(db))); buffer_id += num_workers_; diff --git a/mindspore/ccsrc/dataset/engine/datasetops/zip_op.cc b/mindspore/ccsrc/dataset/engine/datasetops/zip_op.cc index 716c853488..ec771740c1 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/zip_op.cc +++ b/mindspore/ccsrc/dataset/engine/datasetops/zip_op.cc @@ -65,13 +65,13 @@ Status ZipOp::operator()() { // initialize the iterators for (int32_t i = 0; i < children_num_; ++i) { // magic number 0 since Zip is not a parallel Op - child_iterators_.push_back(mindspore::make_unique(this, 0, i)); + child_iterators_.push_back(std::make_unique(this, 0, i)); } // Loop until eof is true while (!eof_) { // Create tensor table and prepare it by fetching and packing the first zipped row into it. - std::unique_ptr curr_table = mindspore::make_unique(); + std::unique_ptr curr_table = std::make_unique(); RETURN_IF_NOT_OK(prepare(curr_table.get())); // If an eof got picked up during the above prepare, then we're done @@ -81,7 +81,7 @@ Status ZipOp::operator()() { while (!draining_) { // 1. If a previous loop iteration sent the current table out, then create a new one. if (curr_table == nullptr) { - curr_table = mindspore::make_unique(); + curr_table = std::make_unique(); } // 2 fill the table. Note: draining mode might get turned on if any of the child inputs were done @@ -89,8 +89,7 @@ Status ZipOp::operator()() { // 3 create and update buffer and send it to the out connector if (!curr_table->empty()) { - std::unique_ptr curr_buffer = - mindspore::make_unique(buffer_id_, DataBuffer::kDeBFlagNone); + std::unique_ptr curr_buffer = std::make_unique(buffer_id_, DataBuffer::kDeBFlagNone); curr_buffer->set_tensor_table(std::move(curr_table)); curr_buffer->set_column_name_map(col_name_id_map_); MS_LOG(DEBUG) << "Zip operator finished one buffer, pushing, rows " << curr_buffer->NumRows() << ", cols " @@ -105,15 +104,14 @@ Status ZipOp::operator()() { MS_LOG(DEBUG) << "Zip operator is now draining child inputs."; RETURN_IF_NOT_OK(drainPipeline()); // Now that we have drained child inputs, send the eoe up. - RETURN_IF_NOT_OK( - out_connector_->Add(0, std::move(mindspore::make_unique(0, DataBuffer::kDeBFlagEOE)))); + RETURN_IF_NOT_OK(out_connector_->Add(0, std::move(std::make_unique(0, DataBuffer::kDeBFlagEOE)))); } } // 5 handle eof // propagate eof here. MS_LOG(INFO) << "Zip operator got EOF, propagating."; - RETURN_IF_NOT_OK(out_connector_->Add(0, std::move(mindspore::make_unique(0, DataBuffer::kDeBFlagEOF)))); + RETURN_IF_NOT_OK(out_connector_->Add(0, std::move(std::make_unique(0, DataBuffer::kDeBFlagEOF)))); return Status::OK(); } diff --git a/mindspore/ccsrc/dataset/engine/db_connector.h b/mindspore/ccsrc/dataset/engine/db_connector.h index 243e77e6ac..7ea9837c44 100644 --- a/mindspore/ccsrc/dataset/engine/db_connector.h +++ b/mindspore/ccsrc/dataset/engine/db_connector.h @@ -65,7 +65,7 @@ class DbConnector : public Connector> { RETURN_IF_NOT_OK(cv_.Wait(&lk, [this, worker_id]() { return expect_consumer_ == worker_id; })); // Once an EOF message is encountered this flag will be set and we can return early. if (end_of_file_) { - *result = mindspore::make_unique(0, DataBuffer::kDeBFlagEOF); + *result = std::make_unique(0, DataBuffer::kDeBFlagEOF); } else { RETURN_IF_NOT_OK(queues_[pop_from_]->PopFront(result)); if (*result == nullptr) { diff --git a/mindspore/ccsrc/dataset/engine/execution_tree.cc b/mindspore/ccsrc/dataset/engine/execution_tree.cc index 3dbeaa5ed1..20fcb836c5 100644 --- a/mindspore/ccsrc/dataset/engine/execution_tree.cc +++ b/mindspore/ccsrc/dataset/engine/execution_tree.cc @@ -24,7 +24,7 @@ namespace mindspore { namespace dataset { // Constructor ExecutionTree::ExecutionTree() : id_count_(0) { - tg_ = mindspore::make_unique(); + tg_ = std::make_unique(); tree_state_ = kDeTStateInit; prepare_flags_ = kDePrepNone; } diff --git a/mindspore/ccsrc/dataset/kernels/image/image_utils.cc b/mindspore/ccsrc/dataset/kernels/image/image_utils.cc index 0412f07636..8735cf7a05 100644 --- a/mindspore/ccsrc/dataset/kernels/image/image_utils.cc +++ b/mindspore/ccsrc/dataset/kernels/image/image_utils.cc @@ -24,7 +24,6 @@ #include "dataset/core/cv_tensor.h" #include "dataset/core/tensor.h" #include "dataset/core/tensor_shape.h" -#include "dataset/util/make_unique.h" #include "dataset/util/random.h" #define MAX_INT_PRECISION 16777216 // float int precision is 16777216 @@ -376,7 +375,7 @@ Status HwcToChw(std::shared_ptr input, std::shared_ptr *output) int width = input_cv->shape()[1]; int num_channels = input_cv->shape()[2]; - auto output_cv = mindspore::make_unique(TensorShape{num_channels, height, width}, input_cv->type()); + auto output_cv = std::make_unique(TensorShape{num_channels, height, width}, input_cv->type()); for (int i = 0; i < num_channels; ++i) { cv::Mat mat; RETURN_IF_NOT_OK(output_cv->Mat({i}, &mat)); diff --git a/mindspore/ccsrc/dataset/kernels/py_func_op.cc b/mindspore/ccsrc/dataset/kernels/py_func_op.cc index 69bd3443c4..c9e5d5b169 100644 --- a/mindspore/ccsrc/dataset/kernels/py_func_op.cc +++ b/mindspore/ccsrc/dataset/kernels/py_func_op.cc @@ -20,7 +20,6 @@ #include "dataset/core/tensor.h" #include "dataset/kernels/tensor_op.h" -#include "dataset/util/make_unique.h" #include "dataset/util/status.h" namespace mindspore { diff --git a/mindspore/ccsrc/dataset/util/arena.cc b/mindspore/ccsrc/dataset/util/arena.cc index 856f7fef24..68673529ff 100644 --- a/mindspore/ccsrc/dataset/util/arena.cc +++ b/mindspore/ccsrc/dataset/util/arena.cc @@ -16,7 +16,6 @@ #include "dataset/util/arena.h" #include #include -#include "dataset/util/make_unique.h" #include "dataset/util/system_pool.h" #include "dataset/util/de_error.h" #include "./securec.h" diff --git a/mindspore/ccsrc/dataset/util/circular_pool.cc b/mindspore/ccsrc/dataset/util/circular_pool.cc index f6e43f35bf..92b169c94a 100644 --- a/mindspore/ccsrc/dataset/util/circular_pool.cc +++ b/mindspore/ccsrc/dataset/util/circular_pool.cc @@ -18,10 +18,8 @@ #include #include #include - #include "./securec.h" - -#include "dataset/util/make_unique.h" +#include "dataset/util/de_error.h" #include "dataset/util/system_pool.h" namespace mindspore { diff --git a/mindspore/ccsrc/dataset/util/de_error.h b/mindspore/ccsrc/dataset/util/de_error.h index 07d7c125f7..d4988c58db 100644 --- a/mindspore/ccsrc/dataset/util/de_error.h +++ b/mindspore/ccsrc/dataset/util/de_error.h @@ -16,6 +16,13 @@ #ifndef DATASET_UTIL_DE_ERROR_H_ #define DATASET_UTIL_DE_ERROR_H_ +#ifdef DEBUG +#include +#define DS_ASSERT(f) assert(f) +#else +#define DS_ASSERT(f) ((void)0) +#endif + #include #include "utils/error_code.h" diff --git a/mindspore/ccsrc/dataset/util/list.h b/mindspore/ccsrc/dataset/util/list.h index f01201e34d..5a08f4514e 100644 --- a/mindspore/ccsrc/dataset/util/list.h +++ b/mindspore/ccsrc/dataset/util/list.h @@ -18,8 +18,7 @@ #include #include - -#include "dataset/util/make_unique.h" +#include "dataset/util/de_error.h" namespace mindspore { namespace dataset { diff --git a/mindspore/ccsrc/dataset/util/lock.cc b/mindspore/ccsrc/dataset/util/lock.cc index 7e92a1e53f..13a43e3e84 100644 --- a/mindspore/ccsrc/dataset/util/lock.cc +++ b/mindspore/ccsrc/dataset/util/lock.cc @@ -14,6 +14,7 @@ * limitations under the License. */ #include "dataset/util/lock.h" +#include "dataset/util/de_error.h" namespace mindspore { namespace dataset { diff --git a/mindspore/ccsrc/dataset/util/lock.h b/mindspore/ccsrc/dataset/util/lock.h index 8fef6a143a..9492d34bdf 100644 --- a/mindspore/ccsrc/dataset/util/lock.h +++ b/mindspore/ccsrc/dataset/util/lock.h @@ -19,7 +19,6 @@ #include #include #include -#include "dataset/util/make_unique.h" namespace mindspore { namespace dataset { diff --git a/mindspore/ccsrc/dataset/util/make_unique.h b/mindspore/ccsrc/dataset/util/make_unique.h deleted file mode 100644 index 2fe0bf4550..0000000000 --- a/mindspore/ccsrc/dataset/util/make_unique.h +++ /dev/null @@ -1,37 +0,0 @@ -/** - * Copyright 2019 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef DATASET_UTIL_MAKE_UNIQUE_H_ -#define DATASET_UTIL_MAKE_UNIQUE_H_ - -#ifdef DEBUG -#include -#define DS_ASSERT(f) assert(f) -#else -#define DS_ASSERT(f) ((void)0) -#endif - -#include -#include -#include -#include "dataset/util/de_error.h" -#include "utils/log_adapter.h" - -namespace mindspore { -using std::make_unique; -} // namespace mindspore - -#endif // DATASET_UTIL_MAKE_UNIQUE_H_ diff --git a/mindspore/ccsrc/dataset/util/queue.h b/mindspore/ccsrc/dataset/util/queue.h index 4048deb86b..f0b087cf6d 100644 --- a/mindspore/ccsrc/dataset/util/queue.h +++ b/mindspore/ccsrc/dataset/util/queue.h @@ -212,7 +212,7 @@ class QueueList { void Init(int num_queues, int capacity) { queue_list_.reserve(num_queues); for (int i = 0; i < num_queues; i++) { - queue_list_.emplace_back(mindspore::make_unique>(capacity)); + queue_list_.emplace_back(std::make_unique>(capacity)); } } diff --git a/mindspore/ccsrc/dataset/util/task.h b/mindspore/ccsrc/dataset/util/task.h index aaf2f80a3d..d6149caec8 100644 --- a/mindspore/ccsrc/dataset/util/task.h +++ b/mindspore/ccsrc/dataset/util/task.h @@ -27,7 +27,6 @@ #include #include #include "dataset/util/de_error.h" -#include "dataset/util/make_unique.h" #include "dataset/util/intrp_resource.h" #include "dataset/util/list.h" #include "dataset/util/memory_pool.h" diff --git a/mindspore/ccsrc/device/gpu/blocking_queue.cc b/mindspore/ccsrc/device/gpu/blocking_queue.cc index 7417115ae9..c36b1cdbf5 100644 --- a/mindspore/ccsrc/device/gpu/blocking_queue.cc +++ b/mindspore/ccsrc/device/gpu/blocking_queue.cc @@ -17,7 +17,6 @@ #include "device/gpu/blocking_queue.h" #include #include "device/gpu/gpu_common.h" -#include "dataset/util/make_unique.h" #include "common/utils.h" namespace mindspore { @@ -32,7 +31,7 @@ GpuQueue::GpuQueue(void *addr, size_t feature_size, size_t label_size, size_t ca stream_(0), node_info_(nullptr) { CHECK_CUDA_RET_WITH_ERROR(cudaStreamCreate(&stream_), "Cuda Create Stream Failed"); - node_info_ = mindspore::make_unique(capacity); + node_info_ = std::make_unique(capacity); } GpuQueue::~GpuQueue() { buffer_ = nullptr; } diff --git a/mindspore/ccsrc/kernel/gpu/math/bias_add_gpu_kernel.h b/mindspore/ccsrc/kernel/gpu/math/bias_add_gpu_kernel.h index 265180afe6..90609c3be5 100644 --- a/mindspore/ccsrc/kernel/gpu/math/bias_add_gpu_kernel.h +++ b/mindspore/ccsrc/kernel/gpu/math/bias_add_gpu_kernel.h @@ -23,7 +23,6 @@ #include #include "kernel/gpu/gpu_kernel.h" #include "kernel/gpu/gpu_kernel_factory.h" -#include "dataset/util/make_unique.h" #include "kernel/gpu/kernel_constants.h" namespace mindspore { @@ -74,8 +73,8 @@ class BiasAddGpuKernel : public GpuKernel { // Expand to 4 dims for cudnnSetTensorNdDescriptorEx. auto cudnn_dims = std::max(num_dims, 4UL); - std::unique_ptr x_dims = mindspore::make_unique(cudnn_dims); - std::unique_ptr b_dims = mindspore::make_unique(cudnn_dims); + std::unique_ptr x_dims = std::make_unique(cudnn_dims); + std::unique_ptr b_dims = std::make_unique(cudnn_dims); for (size_t i = 0; i < cudnn_dims; i++) { x_dims[i] = (i < num_dims) ? SizeToInt(x_shape[i]) : 1; b_dims[i] = (i == pos) ? SizeToInt(x_shape[i]) : 1; diff --git a/mindspore/ccsrc/kernel/gpu/nn/bias_add_grad_gpu_kenel.h b/mindspore/ccsrc/kernel/gpu/nn/bias_add_grad_gpu_kenel.h index b0e8102ee3..fd73f378d8 100644 --- a/mindspore/ccsrc/kernel/gpu/nn/bias_add_grad_gpu_kenel.h +++ b/mindspore/ccsrc/kernel/gpu/nn/bias_add_grad_gpu_kenel.h @@ -26,7 +26,6 @@ #include "kernel/gpu/gpu_kernel.h" #include "kernel/gpu/gpu_kernel_factory.h" #include "kernel/gpu/kernel_constants.h" -#include "dataset/util/make_unique.h" namespace mindspore { namespace kernel { @@ -84,8 +83,8 @@ class BiasAddGradGpuKernel : public GpuKernel { // Expand to 4 dims for cudnnSetTensorNdDescriptorEx. auto cudnn_dims = std::max(num_dims, 4UL); - std::unique_ptr dy_dims = mindspore::make_unique(cudnn_dims); - std::unique_ptr db_dims = mindspore::make_unique(cudnn_dims); + std::unique_ptr dy_dims = std::make_unique(cudnn_dims); + std::unique_ptr db_dims = std::make_unique(cudnn_dims); for (size_t i = 0; i < cudnn_dims; i++) { dy_dims[i] = (i < num_dims) ? SizeToInt(dy_shape[i]) : 1; db_dims[i] = (i == pos) ? SizeToInt(dy_shape[i]) : 1; diff --git a/mindspore/ccsrc/kernel/gpu/nn/lstm_gpu_kernel.h b/mindspore/ccsrc/kernel/gpu/nn/lstm_gpu_kernel.h index 51a2da8574..c3e839b9c5 100644 --- a/mindspore/ccsrc/kernel/gpu/nn/lstm_gpu_kernel.h +++ b/mindspore/ccsrc/kernel/gpu/nn/lstm_gpu_kernel.h @@ -22,7 +22,6 @@ #include #include "kernel/gpu/gpu_kernel.h" #include "kernel/gpu/gpu_kernel_factory.h" -#include "dataset/util/make_unique.h" #include "kernel/gpu/kernel_constants.h" namespace mindspore { @@ -144,8 +143,8 @@ class LstmGpuKernel : public GpuKernel { int x_dims[3]{batch_size_, input_size_, 1}; int y_dims[3]{batch_size_, hidden_size_ * (bidirectional_ ? 2 : 1), 1}; - x_desc_ = mindspore::make_unique(seq_len_); - y_desc_ = mindspore::make_unique(seq_len_); + x_desc_ = std::make_unique(seq_len_); + y_desc_ = std::make_unique(seq_len_); for (size_t i = 0; i < IntToSize(seq_len_); ++i) { CHECK_CUDNN_RET_WITH_EXCEPT(cudnnCreateTensorDescriptor(&x_desc_[i]), "create x_desc failed"); diff --git a/mindspore/ccsrc/kernel/gpu/nn/lstm_grad_data_gpu_kernel.h b/mindspore/ccsrc/kernel/gpu/nn/lstm_grad_data_gpu_kernel.h index a60ab78f7d..b12fa3bea5 100644 --- a/mindspore/ccsrc/kernel/gpu/nn/lstm_grad_data_gpu_kernel.h +++ b/mindspore/ccsrc/kernel/gpu/nn/lstm_grad_data_gpu_kernel.h @@ -23,7 +23,6 @@ #include "kernel/gpu/gpu_kernel.h" #include "kernel/gpu/gpu_kernel_factory.h" #include "kernel/gpu/kernel_constants.h" -#include "dataset/util/make_unique.h" namespace mindspore { namespace kernel { @@ -212,9 +211,9 @@ class LstmGradDataGpuKernel : public GpuKernel { int x_dims[3]{batch_size_, input_size_, 1}; int y_dims[3]{batch_size_, hidden_size_ * (bidirectional_ ? 2 : 1), 1}; - dx_desc_ = mindspore::make_unique(seq_len_); - y_desc_ = mindspore::make_unique(seq_len_); - dy_desc_ = mindspore::make_unique(seq_len_); + dx_desc_ = std::make_unique(seq_len_); + y_desc_ = std::make_unique(seq_len_); + dy_desc_ = std::make_unique(seq_len_); for (size_t i = 0; i < IntToSize(seq_len_); ++i) { CHECK_CUDNN_RET_WITH_EXCEPT(cudnnCreateTensorDescriptor(&dx_desc_[i]), "create x_desc failed"); diff --git a/mindspore/ccsrc/kernel/gpu/nn/lstm_grad_weight_gpu_kernel.h b/mindspore/ccsrc/kernel/gpu/nn/lstm_grad_weight_gpu_kernel.h index b28736cc96..e081b9d070 100644 --- a/mindspore/ccsrc/kernel/gpu/nn/lstm_grad_weight_gpu_kernel.h +++ b/mindspore/ccsrc/kernel/gpu/nn/lstm_grad_weight_gpu_kernel.h @@ -22,7 +22,6 @@ #include #include "kernel/gpu/gpu_kernel.h" #include "kernel/gpu/gpu_kernel_factory.h" -#include "dataset/util/make_unique.h" #include "kernel/gpu/kernel_constants.h" namespace mindspore { namespace kernel { @@ -169,8 +168,8 @@ class LstmGradWeightGpuKernel : public GpuKernel { int x_dims[3]{batch_size_, input_size_, 1}; int y_dims[3]{batch_size_, hidden_size_ * (bidirectional_ ? 2 : 1), 1}; - x_desc_ = mindspore::make_unique(seq_len_); - y_desc_ = mindspore::make_unique(seq_len_); + x_desc_ = std::make_unique(seq_len_); + y_desc_ = std::make_unique(seq_len_); for (size_t i = 0; i < IntToSize(seq_len_); ++i) { CHECK_CUDNN_RET_WITH_EXCEPT(cudnnCreateTensorDescriptor(&x_desc_[i]), "create x_desc failed"); diff --git a/tests/ut/cpp/dataset/celeba_op_test.cc b/tests/ut/cpp/dataset/celeba_op_test.cc index 69314771a3..35be4d7378 100644 --- a/tests/ut/cpp/dataset/celeba_op_test.cc +++ b/tests/ut/cpp/dataset/celeba_op_test.cc @@ -116,7 +116,7 @@ TEST_F(MindDataTestCelebaDataset, TestCelebaRepeat) { TEST_F(MindDataTestCelebaDataset, TestSubsetRandomSamplerCeleba) { std::vector indices({1}); - std::unique_ptr sampler = mindspore::make_unique(indices); + std::unique_ptr sampler = std::make_unique(indices); uint32_t expect_labels[1][40] = {{0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,1,0,1,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1}}; std::string dir = datasets_root_path_ + "/testCelebAData/"; uint32_t count = 0; diff --git a/tests/ut/cpp/dataset/cifar_op_test.cc b/tests/ut/cpp/dataset/cifar_op_test.cc index 0cd1db65b5..dcbea83df4 100644 --- a/tests/ut/cpp/dataset/cifar_op_test.cc +++ b/tests/ut/cpp/dataset/cifar_op_test.cc @@ -92,7 +92,7 @@ TEST_F(MindDataTestCifarOp, TestSequentialSamplerCifar10) { TEST_F(MindDataTestCifarOp, TestRandomSamplerCifar10) { uint32_t original_seed = GlobalContext::config_manager()->seed(); GlobalContext::config_manager()->set_seed(0); - std::unique_ptr sampler = mindspore::make_unique(true, 12); + std::unique_ptr sampler = std::make_unique(true, 12); std::string folder_path = datasets_root_path_ + "/testCifar10Data/"; auto tree = Build({Cifarop(16, 2, 32, folder_path, std::move(sampler), 100)}); tree->Prepare(); diff --git a/tests/ut/cpp/dataset/image_folder_op_test.cc b/tests/ut/cpp/dataset/image_folder_op_test.cc index 5b118a629a..e149e687c6 100644 --- a/tests/ut/cpp/dataset/image_folder_op_test.cc +++ b/tests/ut/cpp/dataset/image_folder_op_test.cc @@ -138,7 +138,7 @@ TEST_F(MindDataTestImageFolderSampler, TestRandomImageFolder) { TEST_F(MindDataTestImageFolderSampler, TestRandomSamplerImageFolder) { int32_t original_seed = GlobalContext::config_manager()->seed(); GlobalContext::config_manager()->set_seed(0); - std::unique_ptr sampler = mindspore::make_unique(true, 12); + std::unique_ptr sampler = std::make_unique(true, 12); int32_t res[] = {2, 2, 2, 3, 2, 3, 2, 3, 1, 2, 2, 1}; // ground truth label std::string folder_path = datasets_root_path_ + "/testPK/data"; auto tree = Build({ImageFolder(16, 2, 32, folder_path, false, std::move(sampler))}); @@ -200,7 +200,7 @@ TEST_F(MindDataTestImageFolderSampler, TestSequentialImageFolderWithRepeatBatch) TEST_F(MindDataTestImageFolderSampler, TestSubsetRandomSamplerImageFolder) { // id range 0 - 10 is label 0, and id range 11 - 21 is label 1 std::vector indices({0, 1, 2, 3, 4, 5, 12, 13, 14, 15, 16, 11}); - std::unique_ptr sampler = mindspore::make_unique(indices); + std::unique_ptr sampler = std::make_unique(indices); std::string folder_path = datasets_root_path_ + "/testPK/data"; // Expect 6 samples for label 0 and 1 int res[2] = {6, 6}; @@ -238,7 +238,7 @@ TEST_F(MindDataTestImageFolderSampler, TestWeightedRandomSamplerImageFolder) { // create sampler with replacement = replacement std::unique_ptr sampler = - mindspore::make_unique(weights, num_samples, true, samples_per_buffer); + std::make_unique(weights, num_samples, true, samples_per_buffer); std::string folder_path = datasets_root_path_ + "/testPK/data"; auto tree = Build({ImageFolder(16, 2, 32, folder_path, false, std::move(sampler))}); @@ -295,7 +295,7 @@ TEST_F(MindDataTestImageFolderSampler, TestImageFolderClassIndex) { } TEST_F(MindDataTestImageFolderSampler, TestDistributedSampler) { - std::unique_ptr sampler = mindspore::make_unique(11, 10, false); + std::unique_ptr sampler = std::make_unique(11, 10, false); std::string folder_path = datasets_root_path_ + "/testPK/data"; auto tree = Build({ImageFolder(16, 2, 32, folder_path, false, std::move(sampler)), Repeat(4)}); tree->Prepare(); @@ -322,7 +322,7 @@ TEST_F(MindDataTestImageFolderSampler, TestDistributedSampler) { } TEST_F(MindDataTestImageFolderSampler, TestPKSamplerImageFolder) { - std::unique_ptr sampler = mindspore::make_unique(3, false, 4); + std::unique_ptr sampler = std::make_unique(3, false, 4); int32_t res[] = {0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3}; // ground truth label std::string folder_path = datasets_root_path_ + "/testPK/data"; auto tree = Build({ImageFolder(16, 2, 32, folder_path, false, std::move(sampler))}); @@ -431,7 +431,7 @@ TEST_F(MindDataTestImageFolderSampler, TestImageFolderDatasetSize) { } TEST_F(MindDataTestImageFolderSampler, TestImageFolderSharding1) { - std::unique_ptr sampler = mindspore::make_unique(4, 0, false); + std::unique_ptr sampler = std::make_unique(4, 0, false); std::string folder_path = datasets_root_path_ + "/testPK/data"; // numWrks, rows, conns, path, shuffle, sampler, map, numSamples, decode auto tree = Build({ImageFolder(16, 2, 32, folder_path, false, std::move(sampler), {}, 5)}); @@ -460,7 +460,7 @@ TEST_F(MindDataTestImageFolderSampler, TestImageFolderSharding1) { } TEST_F(MindDataTestImageFolderSampler, TestImageFolderSharding2) { - std::unique_ptr sampler = mindspore::make_unique(4, 3, false); + std::unique_ptr sampler = std::make_unique(4, 3, false); std::string folder_path = datasets_root_path_ + "/testPK/data"; // numWrks, rows, conns, path, shuffle, sampler, map, numSamples, decode auto tree = Build({ImageFolder(16, 16, 32, folder_path, false, std::move(sampler), {}, 12)}); diff --git a/tests/ut/cpp/dataset/manifest_op_test.cc b/tests/ut/cpp/dataset/manifest_op_test.cc index 9e36f8c747..f662f98fc8 100644 --- a/tests/ut/cpp/dataset/manifest_op_test.cc +++ b/tests/ut/cpp/dataset/manifest_op_test.cc @@ -86,7 +86,7 @@ TEST_F(MindDataTestManifest, TestSequentialManifestWithRepeat) { TEST_F(MindDataTestManifest, TestSubsetRandomSamplerManifest) { std::vector indices({1}); - std::unique_ptr sampler = mindspore::make_unique(indices); + std::unique_ptr sampler = std::make_unique(indices); std::string file = datasets_root_path_ + "/testManifestData/cpp.json"; // Expect 6 samples for label 0 and 1 auto tree = Build({Manifest(16, 2, 32, file, "train", std::move(sampler))}); diff --git a/tests/ut/cpp/dataset/project_op_test.cc b/tests/ut/cpp/dataset/project_op_test.cc index 1df2ce05bb..484396321c 100644 --- a/tests/ut/cpp/dataset/project_op_test.cc +++ b/tests/ut/cpp/dataset/project_op_test.cc @@ -45,7 +45,7 @@ TEST_F(MindDataTestProjectOp, TestProjectProject) { .SetRowsPerBuffer(16) .SetWorkerConnectorSize(16) .SetNumWorkers(16); - std::unique_ptr schema = mindspore::make_unique(); + std::unique_ptr schema = std::make_unique(); schema->LoadSchemaFile(datasets_root_path_ + "/testTFTestAllTypes/datasetSchema.json", {}); builder.SetDataSchema(std::move(schema)); Status rc = builder.Build(&my_tfreader_op); ASSERT_TRUE(rc.IsOk()); diff --git a/tests/ut/cpp/dataset/stand_alone_samplers_test.cc b/tests/ut/cpp/dataset/stand_alone_samplers_test.cc index c686a9486b..48cc811615 100644 --- a/tests/ut/cpp/dataset/stand_alone_samplers_test.cc +++ b/tests/ut/cpp/dataset/stand_alone_samplers_test.cc @@ -74,7 +74,7 @@ TEST_F(MindDataTestStandAloneSampler, TestDistributedSampler) { std::unique_ptr db; std::shared_ptr tensor; for (int i = 0; i < 6; i++) { - std::unique_ptr sampler = mindspore::make_unique(3, i % 3, (i < 3 ? false : true)); + std::unique_ptr sampler = std::make_unique(3, i % 3, (i < 3 ? false : true)); sampler->Init(&mock); sampler->GetNextBuffer(&db); db->GetTensor(&tensor, 0, 0); diff --git a/tests/ut/cpp/dataset/tfReader_op_test.cc b/tests/ut/cpp/dataset/tfReader_op_test.cc index c70d5fb6ee..5fb1f4e909 100644 --- a/tests/ut/cpp/dataset/tfReader_op_test.cc +++ b/tests/ut/cpp/dataset/tfReader_op_test.cc @@ -48,7 +48,7 @@ TEST_F(MindDataTestTFReaderOp, TestTFReaderBasic1) { builder.SetDatasetFilesList({dataset_path}) .SetRowsPerBuffer(16) .SetNumWorkers(16); - std::unique_ptr schema = mindspore::make_unique(); + std::unique_ptr schema = std::make_unique(); schema->LoadSchemaFile(datasets_root_path_ + "/testTFTestAllTypes/datasetSchema.json", {}); builder.SetDataSchema(std::move(schema)); Status rc = builder.Build(&my_tfreader_op); @@ -102,7 +102,7 @@ TEST_F(MindDataTestTFReaderOp, TestTFReaderLargeRowsPerBuffer) { builder.SetDatasetFilesList({dataset_path}) .SetRowsPerBuffer(500) .SetNumWorkers(16); - std::unique_ptr schema = mindspore::make_unique(); + std::unique_ptr schema = std::make_unique(); schema->LoadSchemaFile(datasets_root_path_ + "/testTFTestAllTypes/datasetSchema.json", {}); builder.SetDataSchema(std::move(schema)); Status rc = builder.Build(&my_tfreader_op); @@ -156,7 +156,7 @@ TEST_F(MindDataTestTFReaderOp, TestTFReaderSmallRowsPerBuffer) { builder.SetDatasetFilesList({dataset_path}) .SetRowsPerBuffer(1) .SetNumWorkers(16); - std::unique_ptr schema = mindspore::make_unique(); + std::unique_ptr schema = std::make_unique(); schema->LoadSchemaFile(datasets_root_path_ + "/testTFTestAllTypes/datasetSchema.json", {}); builder.SetDataSchema(std::move(schema)); Status rc = builder.Build(&my_tfreader_op); @@ -211,7 +211,7 @@ TEST_F(MindDataTestTFReaderOp, TestTFReaderLargeQueueSize) { .SetWorkerConnectorSize(1) .SetRowsPerBuffer(16) .SetNumWorkers(16); - std::unique_ptr schema = mindspore::make_unique(); + std::unique_ptr schema = std::make_unique(); schema->LoadSchemaFile(datasets_root_path_ + "/testTFTestAllTypes/datasetSchema.json", {}); builder.SetDataSchema(std::move(schema)); Status rc = builder.Build(&my_tfreader_op); @@ -265,7 +265,7 @@ TEST_F(MindDataTestTFReaderOp, TestTFReaderOneThread) { builder.SetDatasetFilesList({dataset_path}) .SetRowsPerBuffer(16) .SetNumWorkers(1); - std::unique_ptr schema = mindspore::make_unique(); + std::unique_ptr schema = std::make_unique(); schema->LoadSchemaFile(datasets_root_path_ + "/testTFTestAllTypes/datasetSchema.json", {}); builder.SetDataSchema(std::move(schema)); Status rc = builder.Build(&my_tfreader_op); @@ -321,7 +321,7 @@ TEST_F(MindDataTestTFReaderOp, TestTFReaderRepeat) { .SetRowsPerBuffer(16) .SetWorkerConnectorSize(16) .SetNumWorkers(16); - std::unique_ptr schema = mindspore::make_unique(); + std::unique_ptr schema = std::make_unique(); schema->LoadSchemaFile(datasets_root_path_ + "/testTFTestAllTypes/datasetSchema.json", {}); builder.SetDataSchema(std::move(schema)); Status rc= builder.Build(&my_tfreader_op); @@ -379,7 +379,7 @@ TEST_F(MindDataTestTFReaderOp, TestTFReaderSchemaConstructor) { std::string dataset_path; dataset_path = datasets_root_path_ + "/testTFTestAllTypes"; - std::unique_ptr data_schema = mindspore::make_unique(); + std::unique_ptr data_schema = std::make_unique(); std::vector columns_to_load; columns_to_load.push_back("col_sint32"); columns_to_load.push_back("col_binary"); @@ -445,7 +445,7 @@ TEST_F(MindDataTestTFReaderOp, TestTFReaderTake1Row) { std::shared_ptr my_tfreader_op; TFReaderOp::Builder builder; builder.SetDatasetFilesList({dataset_path + "/test.data"}).SetRowsPerBuffer(5).SetNumWorkers(16); - std::unique_ptr schema = mindspore::make_unique(); + std::unique_ptr schema = std::make_unique(); schema->LoadSchemaFile(datasets_root_path_ + "/testTFTestAllTypes/datasetSchema1Row.json", {}); builder.SetDataSchema(std::move(schema)); @@ -503,7 +503,7 @@ TEST_F(MindDataTestTFReaderOp, TestTFReaderTake1Buffer) { std::shared_ptr my_tfreader_op; TFReaderOp::Builder builder; builder.SetDatasetFilesList({dataset_path + "/test.data"}).SetRowsPerBuffer(5).SetNumWorkers(16); - std::unique_ptr schema = mindspore::make_unique(); + std::unique_ptr schema = std::make_unique(); schema->LoadSchemaFile(datasets_root_path_ + "/testTFTestAllTypes/datasetSchema5Rows.json", {}); builder.SetDataSchema(std::move(schema)); @@ -561,7 +561,7 @@ TEST_F(MindDataTestTFReaderOp, TestTFReaderTake7Rows) { std::shared_ptr my_tfreader_op; TFReaderOp::Builder builder; builder.SetDatasetFilesList({dataset_path + "/test.data"}).SetRowsPerBuffer(5).SetNumWorkers(16); - std::unique_ptr schema = mindspore::make_unique(); + std::unique_ptr schema = std::make_unique(); schema->LoadSchemaFile(datasets_root_path_ + "/testTFTestAllTypes/datasetSchema7Rows.json", {}); builder.SetDataSchema(std::move(schema)); From 9d0fde29f4bdad550087fd6a7447b50db6c1bf23 Mon Sep 17 00:00:00 2001 From: Jonathan Yan Date: Sat, 4 Apr 2020 06:48:58 -0400 Subject: [PATCH 116/367] remove ENABLE_MINDRECORD flag --- mindspore/ccsrc/dataset/CMakeLists.txt | 2 -- mindspore/ccsrc/dataset/api/de_pipeline.cc | 12 +++--------- mindspore/ccsrc/dataset/api/de_pipeline.h | 4 ---- mindspore/ccsrc/dataset/api/python_bindings.cc | 6 ------ .../engine/datasetops/source/mindrecord_op.cc | 3 --- .../dataset/engine/datasetops/source/mindrecord_op.h | 2 -- tests/ut/cpp/CMakeLists.txt | 1 - tests/ut/cpp/dataset/mind_record_op_test.cc | 2 -- 8 files changed, 3 insertions(+), 29 deletions(-) diff --git a/mindspore/ccsrc/dataset/CMakeLists.txt b/mindspore/ccsrc/dataset/CMakeLists.txt index d6791f2b9b..477d37051e 100644 --- a/mindspore/ccsrc/dataset/CMakeLists.txt +++ b/mindspore/ccsrc/dataset/CMakeLists.txt @@ -17,8 +17,6 @@ if (ENABLE_TDTQUE) message(STATUS "TDT queue is enabled") endif () -add_definitions(-D ENABLE_MINDRECORD) - # conde coverage # option(ENABLE_COVERAGE "Enable code coverage report" OFF) # if (ENABLE_COVERAGE) diff --git a/mindspore/ccsrc/dataset/api/de_pipeline.cc b/mindspore/ccsrc/dataset/api/de_pipeline.cc index d51204f659..65ec8d30f2 100644 --- a/mindspore/ccsrc/dataset/api/de_pipeline.cc +++ b/mindspore/ccsrc/dataset/api/de_pipeline.cc @@ -29,11 +29,9 @@ #include "dataset/engine/datasetops/source/manifest_op.h" #include "dataset/engine/datasetops/source/cifar_op.h" #include "dataset/engine/datasetops/source/celeba_op.h" -#ifdef ENABLE_MINDRECORD -#include "./shard_category.h" -#include "./shard_sample.h" -#include "./shard_shuffle.h" -#endif +#include "mindrecord/include/shard_category.h" +#include "mindrecord/include/shard_sample.h" +#include "mindrecord/include/shard_shuffle.h" #include "dataset/util/random.h" #include "dataset/util/status.h" @@ -46,9 +44,7 @@ using pFunction = Status (DEPipeline::*)(const py::dict &, std::shared_ptr g_parse_op_func_ = {{kStorage, &DEPipeline::ParseStorageOp}, {kShuffle, &DEPipeline::ParseShuffleOp}, -#ifdef ENABLE_MINDRECORD {kMindrecord, &DEPipeline::ParseMindRecordOp}, -#endif {kMap, &DEPipeline::ParseMapOp}, {kBatch, &DEPipeline::ParseBatchOp}, {kRepeat, &DEPipeline::ParseRepeatOp}, @@ -364,7 +360,6 @@ Status DEPipeline::ParseShuffleOp(const py::dict &args, std::shared_ptr *in_partitions) { if (args["partitions"].is_none()) { std::string err_msg = "Error: partitions is not set (None)"; @@ -450,7 +445,6 @@ Status DEPipeline::ParseMindRecordOp(const py::dict &args, std::shared_ptr *ptr) { std::shared_ptr builder = std::make_shared(); diff --git a/mindspore/ccsrc/dataset/api/de_pipeline.h b/mindspore/ccsrc/dataset/api/de_pipeline.h index e8dde85a77..acffc390cc 100644 --- a/mindspore/ccsrc/dataset/api/de_pipeline.h +++ b/mindspore/ccsrc/dataset/api/de_pipeline.h @@ -38,9 +38,7 @@ using DsOpPtr = std::shared_ptr; enum OpName { kStorage = 0, kShuffle, -#ifdef ENABLE_MINDRECORD kMindrecord, -#endif kBatch, kCache, kRepeat, @@ -101,11 +99,9 @@ class DEPipeline { Status ParseShuffleOp(const py::dict &args, std::shared_ptr *ptr); -#ifdef ENABLE_MINDRECORD Status CheckMindRecordPartitionInfo(const py::dict &args, std::vector *ptr); Status ParseMindRecordOp(const py::dict &args, std::shared_ptr *ptr); -#endif Status ParseMapOp(const py::dict &args, std::shared_ptr *ptr); diff --git a/mindspore/ccsrc/dataset/api/python_bindings.cc b/mindspore/ccsrc/dataset/api/python_bindings.cc index 86b0a5d66a..e6c2691281 100644 --- a/mindspore/ccsrc/dataset/api/python_bindings.cc +++ b/mindspore/ccsrc/dataset/api/python_bindings.cc @@ -44,9 +44,7 @@ #include "dataset/engine/datasetops/source/io_block.h" #include "dataset/engine/datasetops/source/mnist_op.h" #include "dataset/engine/datasetops/source/manifest_op.h" -#ifdef ENABLE_MINDRECORD #include "dataset/engine/datasetops/source/mindrecord_op.h" -#endif #include "dataset/engine/datasetops/source/sampler/distributed_sampler.h" #include "dataset/engine/datasetops/source/sampler/pk_sampler.h" #include "dataset/engine/datasetops/source/sampler/random_sampler.h" @@ -146,14 +144,12 @@ void bindDatasetOps(py::module *m) { return py::make_tuple(count, num_classes); }); -#ifdef ENABLE_MINDRECORD (void)py::class_>(*m, "MindRecordOp") .def_static("get_num_rows", [](const std::string &path) { int64_t count = 0; THROW_IF_ERROR(MindRecordOp::CountTotalRows(path, &count)); return count; }); -#endif (void)py::class_>(*m, "ManifestOp") .def_static("get_num_rows_and_classes", @@ -424,9 +420,7 @@ PYBIND11_MODULE(_c_dataengine, m) { .value("STORAGE", OpName::kStorage) .value("SHUFFLE", OpName::kShuffle) .value("BATCH", OpName::kBatch) -#ifdef ENABLE_MINDRECORD .value("MINDRECORD", OpName::kMindrecord) -#endif .value("CACHE", OpName::kCache) .value("REPEAT", OpName::kRepeat) .value("TAKE", OpName::kTake) diff --git a/mindspore/ccsrc/dataset/engine/datasetops/source/mindrecord_op.cc b/mindspore/ccsrc/dataset/engine/datasetops/source/mindrecord_op.cc index b062371d7f..b5bea5416c 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/source/mindrecord_op.cc +++ b/mindspore/ccsrc/dataset/engine/datasetops/source/mindrecord_op.cc @@ -13,8 +13,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#ifdef ENABLE_MINDRECORD - #include "dataset/engine/datasetops/source/mindrecord_op.h" #include @@ -665,4 +663,3 @@ Status MindRecordOp::CountTotalRows(const std::string dataset_path, int64_t *cou } } // namespace dataset } // namespace mindspore -#endif diff --git a/mindspore/ccsrc/dataset/engine/datasetops/source/mindrecord_op.h b/mindspore/ccsrc/dataset/engine/datasetops/source/mindrecord_op.h index 2535acbc50..0b16391b20 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/source/mindrecord_op.h +++ b/mindspore/ccsrc/dataset/engine/datasetops/source/mindrecord_op.h @@ -15,7 +15,6 @@ */ #ifndef DATASET_ENGINE_DATASETOPS_SOURCE_MINDRECORD_OP_H_ #define DATASET_ENGINE_DATASETOPS_SOURCE_MINDRECORD_OP_H_ -#ifdef ENABLE_MINDRECORD #pragma once #include @@ -276,5 +275,4 @@ class MindRecordOp : public ParallelOp { }; } // namespace dataset } // namespace mindspore -#endif #endif // DATASET_ENGINE_DATASETOPS_SOURCE_MINDRECORD_OP_H_ diff --git a/tests/ut/cpp/CMakeLists.txt b/tests/ut/cpp/CMakeLists.txt index 5f4bd41b3b..8d3f8a8138 100644 --- a/tests/ut/cpp/CMakeLists.txt +++ b/tests/ut/cpp/CMakeLists.txt @@ -26,7 +26,6 @@ MESSAGE("check ut_test ${CMAKE_BINARY_DIR}") link_directories(${MS_CCSRC_BUILD_PATH}) if(ENABLE_MINDDATA) - add_definitions(-D ENABLE_MINDRECORD) add_definitions(-D ENABLE_MINDDATA) link_directories(${MS_CCSRC_BUILD_PATH}/dataset) link_directories(${MS_CCSRC_BUILD_PATH}/mindrecord) diff --git a/tests/ut/cpp/dataset/mind_record_op_test.cc b/tests/ut/cpp/dataset/mind_record_op_test.cc index abe7faef14..3d5c80b3f4 100644 --- a/tests/ut/cpp/dataset/mind_record_op_test.cc +++ b/tests/ut/cpp/dataset/mind_record_op_test.cc @@ -13,7 +13,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#ifdef ENABLE_MINDRECORD #include #include #include @@ -480,4 +479,3 @@ TEST_F(MindDataTestMindRecordOp, TestMindRecordBlockReaderRepeat) { row_count++; } } -#endif From d0c24fb706666085a6eabfa5b279d0c37fa61211 Mon Sep 17 00:00:00 2001 From: VectorSL Date: Tue, 7 Apr 2020 19:48:34 +0800 Subject: [PATCH 117/367] update lossscale for gpu --- mindspore/nn/wrap/loss_scale.py | 40 +++++++++---- mindspore/ops/operations/__init__.py | 6 +- mindspore/ops/operations/math_ops.py | 88 ++++++++++++++++++++++++++++ 3 files changed, 123 insertions(+), 11 deletions(-) diff --git a/mindspore/nn/wrap/loss_scale.py b/mindspore/nn/wrap/loss_scale.py index 1ce3179273..6a1f15a402 100644 --- a/mindspore/nn/wrap/loss_scale.py +++ b/mindspore/nn/wrap/loss_scale.py @@ -13,6 +13,7 @@ # limitations under the License. # ============================================================================ """Loss scale cell for loss scale training.""" +import mindspore.context as context from mindspore.nn.wrap.grad_reducer import DistributedGradReducer from mindspore.train.parallel_utils import ParallelMode from mindspore.parallel._utils import _get_device_num, _get_parallel_mode, _get_mirror_mean @@ -34,6 +35,13 @@ reciprocal = P.Reciprocal() def tensor_grad_scale(scale, grad): return grad * F.cast(reciprocal(scale), F.dtype(grad)) +_grad_overflow = C.MultitypeFuncGraph("_grad_overflow") +grad_overflow = P.FloatStatus() + + +@_grad_overflow.register("Tensor") +def _tensor_grad_overflow(grad): + return grad_overflow(grad) class DynamicLossScaleUpdateCell(Cell): r""" @@ -197,9 +205,15 @@ class TrainOneStepWithLossScaleCell(Cell): self.optimizer = optimizer self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True) self.hyper_map = C.HyperMap() - self.alloc_status = NPUAllocFloatStatus() - self.get_status = NPUGetFloatStatus() - self.clear_status = NPUClearFloatStatus() + if context.get_context("device_target") == "GPU": + self.gpu_target = True + self.float_status = P.FloatStatus() + self.addn = P.AddN() + else: + self.gpu_target = False + self.alloc_status = NPUAllocFloatStatus() + self.get_status = NPUGetFloatStatus() + self.clear_status = NPUClearFloatStatus() self.reduce_sum = ReduceSum(keep_dims=False) self.base = Tensor(1, mstype.float32) self.less_equal = LessEqual() @@ -224,10 +238,12 @@ class TrainOneStepWithLossScaleCell(Cell): def construct(self, data, label, sens=None): weights = self.weights loss = self.network(data, label) - # init overflow buffer - init = self.alloc_status() - # clear overflow buffer - self.clear_status(init) + init = False + if not self.gpu_target: + # init overflow buffer + init = self.alloc_status() + # clear overflow buffer + self.clear_status(init) if sens is None: scaling_sens = self.loss_scale else: @@ -238,9 +254,13 @@ class TrainOneStepWithLossScaleCell(Cell): # apply grad reducer on grads grads = self.grad_reducer(grads) # get the overflow buffer - self.get_status(init) - # sum overflow buffer elements, 0:not overflow , >0:overflow - flag_sum = self.reduce_sum(init, (0,)) + if not self.gpu_target: + self.get_status(init) + # sum overflow buffer elements, 0:not overflow , >0:overflow + flag_sum = self.reduce_sum(init, (0,)) + else: + flag_sum = self.hyper_map(F.partial(_grad_overflow), grads) + flag_sum = self.addn(flag_sum) if self.is_distributed: # sum overflow flag over devices flag_reduce = self.allreduce(flag_sum) diff --git a/mindspore/ops/operations/__init__.py b/mindspore/ops/operations/__init__.py index 846be05c4d..89a5ea0249 100644 --- a/mindspore/ops/operations/__init__.py +++ b/mindspore/ops/operations/__init__.py @@ -44,7 +44,7 @@ from .math_ops import (Abs, ACos, AddN, AssignAdd, AssignSub, Atan2, BatchMatMul LogicalNot, LogicalOr, MatMul, Maximum, Minimum, Mul, Neg, NMSWithMask, NotEqual, NPUAllocFloatStatus, NPUClearFloatStatus, - NPUGetFloatStatus, Pow, RealDiv, + NPUGetFloatStatus, Pow, RealDiv, IsNan, IsInf, IsFinite, FloatStatus, Reciprocal, CumSum, Sin, Sqrt, Rsqrt, Square, Sub, TensorAdd, Sign, Round) @@ -154,6 +154,10 @@ __all__ = [ 'NPUAllocFloatStatus', 'NPUGetFloatStatus', 'NPUClearFloatStatus', + 'IsNan', + 'IsFinite', + 'IsInf', + 'FloatStatus', 'Reciprocal', 'SmoothL1Loss', 'ReduceAll', diff --git a/mindspore/ops/operations/math_ops.py b/mindspore/ops/operations/math_ops.py index 175b72560f..127d3c513c 100644 --- a/mindspore/ops/operations/math_ops.py +++ b/mindspore/ops/operations/math_ops.py @@ -1541,6 +1541,94 @@ class LogicalOr(_LogicBinaryOp): def infer_dtype(self, x_dtype, y_dtype): return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, (mstype.bool_,), self.prim_name()) +class IsNan(PrimitiveWithInfer): + """ + Judging which elements are nan for each position + + Inputs: + - **input_x** (Tensor) - The input tensor. + + Outputs: + Tensor, has the same shape of input, and the dtype is bool. + """ + + @prim_attr_register + def __init__(self): + """init IsNan""" + self.init_prim_io_names(inputs=['x'], outputs=['output']) + + def infer_shape(self, x_shape): + return x_shape + + def infer_dtype(self, x_dtype): + return mstype.bool_ + +class IsInf(PrimitiveWithInfer): + """ + Judging which elements are inf or -inf for each position + + Inputs: + - **input_x** (Tensor) - The input tensor. + + Outputs: + Tensor, has the same shape of input, and the dtype is bool. + """ + + @prim_attr_register + def __init__(self): + """init IsInf""" + self.init_prim_io_names(inputs=['x'], outputs=['output']) + + def infer_shape(self, x_shape): + return x_shape + + def infer_dtype(self, x_dtype): + return mstype.bool_ + +class IsFinite(PrimitiveWithInfer): + """ + Judging which elements are finite for each position + + Inputs: + - **input_x** (Tensor) - The input tensor. + + Outputs: + Tensor, has the same shape of input, and the dtype is bool. + """ + + @prim_attr_register + def __init__(self): + """init IsFinite""" + self.init_prim_io_names(inputs=['x'], outputs=['output']) + + def infer_shape(self, x_shape): + return x_shape + + def infer_dtype(self, x_dtype): + return mstype.bool_ + +class FloatStatus(PrimitiveWithInfer): + """ + Determine if the elements contains nan, inf or -inf. `0` for normal, `1` for overflow. + + Inputs: + - **input_x** (Tensor) - The input tensor. + + Outputs: + Tensor, has the shape of `(1,)`, and has the same dtype of input `mindspore.dtype.float32` or + `mindspore.dtype.float16`. + """ + + @prim_attr_register + def __init__(self): + """init FloatStatus""" + self.init_prim_io_names(inputs=['x'], outputs=['output']) + + def infer_shape(self, x_shape): + return [1] + + def infer_dtype(self, x_dtype): + return x_dtype class NPUAllocFloatStatus(PrimitiveWithInfer): """ From 27e49d141501478ba14c252fa68b64b288020422 Mon Sep 17 00:00:00 2001 From: zhoufeng Date: Mon, 6 Apr 2020 12:17:43 +0800 Subject: [PATCH 118/367] Distinguish package name according to hardware platform --- build.sh | 6 ++++-- package.sh | 17 ++++++++++++++++- setup_package.py | 2 +- 3 files changed, 21 insertions(+), 4 deletions(-) diff --git a/build.sh b/build.sh index 9d812d6dcc..e0d0d45da2 100755 --- a/build.sh +++ b/build.sh @@ -452,8 +452,10 @@ if [[ "X$INC_BUILD" = "Xoff" ]]; then bash "${PROJECT_PATH}/package.sh" ge elif [[ "X$ENABLE_GPU" = "Xon" ]]; then bash "${PROJECT_PATH}/package.sh" ms gpu - elif [[ "X$ENABLE_D" = "Xon" ]] || [[ "X$ENABLE_CPU" = "Xon" ]]; then - bash "${PROJECT_PATH}/package.sh" ms + elif [[ "X$ENABLE_D" = "Xon" ]]; then + bash "${PROJECT_PATH}/package.sh" ms ascend + elif [[ "X$ENABLE_CPU" = "Xon" ]]; then + bash "${PROJECT_PATH}/package.sh" ms cpu else bash "${PROJECT_PATH}/package.sh" debug fi diff --git a/package.sh b/package.sh index 0d4147c9f6..67f4761f37 100755 --- a/package.sh +++ b/package.sh @@ -96,14 +96,29 @@ if [ -n "$1" ];then else export BACKEND_POLICY="ms" fi + +# package name +if [[ "X$1" = "Xge" ]]; then + export MS_PACKAGE_NAME="mindspore" +elif [[ "X$1" = "Xms" && "X$2" = "Xgpu" ]]; then + export MS_PACKAGE_NAME="mindspore-gpu" +elif [[ "X$1" = "Xms" && "X$2" = "Xascend" ]]; then + export MS_PACKAGE_NAME="mindspore-ascend" +elif [[ "X$1" = "Xms" && "X$2" = "Xcpu" ]]; then + export MS_PACKAGE_NAME="mindspore" +else + export MS_PACKAGE_NAME="mindspore" +fi + ${PYTHON} "${BASEPATH}/setup_package.py" bdist_wheel chmod -R 700 ${PACKAGE_PATH}/mindspore/ -chmod -R 700 ${PACKAGE_PATH}/mindspore.egg-info/ +chmod -R 700 ${PACKAGE_PATH}/${MS_PACKAGE_NAME//-/_}.egg-info/ # rename package PACKAGE_FULL_NAME=$(find "${PACKAGE_PATH}" -iname "*.whl") PACKAGE_BASE_NAME=$(echo ${PACKAGE_FULL_NAME} | awk -F / '{print $NF}' | awk -F - '{print $1"-"$2}') +PACKAGE_BASE_NAME=${PACKAGE_BASE_NAME//_*-/-} PACKAGE_NEW_NAME="${PACKAGE_BASE_NAME}-${PY_TAGS}-${PLATFORM_TAG}.whl" cp -rf "${PACKAGE_PATH}/dist"/*.whl "${PACKAGE_PATH}/${PACKAGE_NEW_NAME}" diff --git a/setup_package.py b/setup_package.py index 8b6889cd34..87b5718de2 100644 --- a/setup_package.py +++ b/setup_package.py @@ -21,7 +21,6 @@ from setuptools import setup, find_packages from setuptools.command.egg_info import egg_info from setuptools.command.build_py import build_py -package_name = 'mindspore' version = '0.1.0' author = 'The MindSpore Authors' author_email = 'contact@mindspore.cn' @@ -29,6 +28,7 @@ home_page = 'https://www.mindspore.cn' backend_policy = os.getenv('BACKEND_POLICY') commit_id = os.getenv('COMMIT_ID').replace("\n", "") +package_name = os.getenv('MS_PACKAGE_NAME').replace("\n", "") pwd = os.path.dirname(os.path.realpath(__file__)) pkg_dir = os.path.join(pwd, 'build/package') From d88dbbb138da9fd3447748ae2f97f95f44470091 Mon Sep 17 00:00:00 2001 From: chenhaozhe Date: Tue, 7 Apr 2020 20:48:26 +0800 Subject: [PATCH 119/367] pass auto mixed precision flag to ge init options --- mindspore/ccsrc/utils/context/ms_context.cc | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/mindspore/ccsrc/utils/context/ms_context.cc b/mindspore/ccsrc/utils/context/ms_context.cc index cdc7af62e6..6c15e16714 100644 --- a/mindspore/ccsrc/utils/context/ms_context.cc +++ b/mindspore/ccsrc/utils/context/ms_context.cc @@ -358,7 +358,9 @@ void MsContext::GetGeOptions(std::map* ge_options) con MS_LOG(ERROR) << "Set proto lib path failed!"; } - // Disbale the global variable acc, only enable it whlie adding training graph in pipeline + // Enable auto mixed precision according to the context options + (*ge_options)["ge.exec.auto_mix_precision"] = std::to_string(auto_mixed_precision_flag_); + // Disable the global variable acc, only enable it whlie adding training graph in pipeline (*ge_options)["ge.exec.variable_acc"] = "0"; #endif } From f9d180d413ba1dddfc512765a66ababb7af37a89 Mon Sep 17 00:00:00 2001 From: zhaozhenlong Date: Fri, 3 Apr 2020 15:37:42 +0800 Subject: [PATCH 120/367] add api image gradients --- mindspore/nn/layer/__init__.py | 4 +- mindspore/nn/layer/basic.py | 45 ++++++++++++++ tests/st/ops/davinci/test_image_gradients.py | 62 ++++++++++++++++++++ tests/ut/python/nn/test_image_gradients.py | 49 ++++++++++++++++ 4 files changed, 158 insertions(+), 2 deletions(-) create mode 100644 tests/st/ops/davinci/test_image_gradients.py create mode 100644 tests/ut/python/nn/test_image_gradients.py diff --git a/mindspore/nn/layer/__init__.py b/mindspore/nn/layer/__init__.py index bb29935602..dae18fe663 100644 --- a/mindspore/nn/layer/__init__.py +++ b/mindspore/nn/layer/__init__.py @@ -22,7 +22,7 @@ from .normalization import BatchNorm1d, BatchNorm2d, LayerNorm from .container import SequentialCell, CellList from .conv import Conv2d, Conv2dTranspose from .lstm import LSTM -from .basic import Dropout, Flatten, Dense, ClipByNorm, Norm, OneHot +from .basic import Dropout, Flatten, Dense, ClipByNorm, Norm, OneHot, ImageGradients from .embedding import Embedding from .pooling import AvgPool2d, MaxPool2d @@ -31,7 +31,7 @@ __all__ = ['Softmax', 'LogSoftmax', 'ReLU', 'ReLU6', 'Tanh', 'GELU', 'Sigmoid', 'SequentialCell', 'CellList', 'Conv2d', 'Conv2dTranspose', 'LSTM', - 'Dropout', 'Flatten', 'Dense', 'ClipByNorm', 'Norm', 'OneHot', + 'Dropout', 'Flatten', 'Dense', 'ClipByNorm', 'Norm', 'OneHot', 'ImageGradients', 'Embedding', 'AvgPool2d', 'MaxPool2d', ] diff --git a/mindspore/nn/layer/basic.py b/mindspore/nn/layer/basic.py index 30b94c738d..de49685dac 100644 --- a/mindspore/nn/layer/basic.py +++ b/mindspore/nn/layer/basic.py @@ -370,3 +370,48 @@ class OneHot(Cell): def construct(self, indices): return self.onehot(indices, self.depth, self.on_value, self.off_value) + + +class ImageGradients(Cell): + r""" + Returns two tensors, the first is along the height dimension and the second is along the width dimension. + + Assume an image shape is :math:`h*w`. The gradients along the height and the width are :math:`dy` and :math:`dx`, + respectively. + + .. math:: + dy[i] = \begin{cases} image[i+1, :]-image[i, :], &if\ 0<=i>> net = nn.ImageGradients() + >>> image = Tensor(np.array([[[[1,2],[3,4]]]]), dtype=mstype.int32) + >>> net(image) + [[[[2,2] + [0,0]]]] + [[[[1,0] + [1,0]]]] + """ + def __init__(self): + super(ImageGradients, self).__init__() + + def construct(self, images): + batch_size, depth, height, width = P.Shape()(images) + dy = images[:, :, 1:, :] - images[:, :, :height - 1, :] + dy_last = P.Fill()(P.DType()(images), (batch_size, depth, 1, width), 0) + dy = P.Concat(2)((dy, dy_last)) + + dx = images[:, :, :, 1:] - images[:, :, :, :width - 1] + dx_last = P.Fill()(P.DType()(images), (batch_size, depth, height, 1), 0) + dx = P.Concat(3)((dx, dx_last)) + return dy, dx diff --git a/tests/st/ops/davinci/test_image_gradients.py b/tests/st/ops/davinci/test_image_gradients.py new file mode 100644 index 0000000000..ea385158c9 --- /dev/null +++ b/tests/st/ops/davinci/test_image_gradients.py @@ -0,0 +1,62 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +import numpy as np +import mindspore.nn as nn +import mindspore.context as context +import mindspore.common.dtype as mstype +from mindspore import Tensor +from mindspore.common.api import ms_function + +context.set_context(device_target="Ascend") +class Net(nn.Cell): + def __init__(self): + super(Net, self).__init__() + self.image_gradients = nn.ImageGradients() + + @ms_function + def construct(self, x): + return self.image_gradients(x) + + +def test_image_gradients(): + image = Tensor(np.array([[[[1,2],[3,4]]]]), dtype=mstype.int32) + expected_dy = np.array([[[[2,2],[0,0]]]]).astype(np.int32) + expected_dx = np.array([[[[1,0],[1,0]]]]).astype(np.int32) + net = Net() + dy, dx = net(image) + assert np.any(dx.asnumpy()-expected_dx) == False + assert np.any(dy.asnumpy()-expected_dy) == False + + +def test_image_gradients_multi_channel_depth(): + # 4 x 2 x 2 x 2 + dtype = mstype.int32 + image = Tensor(np.array([[[[1,2],[3,4]], [[5,6],[7,8]]], + [[[3,5],[7,9]], [[11,13],[15,17]]], + [[[5,10],[15,20]], [[25,30],[35,40]]], + [[[10,20],[30,40]], [[50,60],[70,80]]]]), dtype=dtype) + expected_dy = Tensor(np.array([[[[2,2],[0,0]], [[2,2],[0,0]]], + [[[4,4],[0,0]], [[4,4],[0,0]]], + [[[10,10],[0,0]], [[10,10],[0,0]]], + [[[20,20],[0,0]], [[20,20],[0,0]]]]), dtype=dtype) + expected_dx = Tensor(np.array([[[[1,0],[1,0]], [[1,0],[1,0]]], + [[[2,0],[2,0]], [[2,0],[2,0]]], + [[[5,0],[5,0]], [[5,0],[5,0]]], + [[[10,0],[10,0]], [[10,0],[10,0]]]]), dtype=dtype) + net = Net() + dy, dx = net(image) + + assert np.any(dx.asnumpy()-expected_dx.asnumpy()) == False + assert np.any(dy.asnumpy()-expected_dy.asnumpy()) == False diff --git a/tests/ut/python/nn/test_image_gradients.py b/tests/ut/python/nn/test_image_gradients.py new file mode 100644 index 0000000000..f65f38ec0a --- /dev/null +++ b/tests/ut/python/nn/test_image_gradients.py @@ -0,0 +1,49 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" test loss """ +import numpy as np +import mindspore.nn as nn +import mindspore.context as context +import mindspore.common.dtype as mstype +from mindspore import Tensor +from mindspore.common.api import _executor +from mindspore.common.api import ms_function + +context.set_context(device_target="Ascend") +class Net(nn.Cell): + def __init__(self): + super(Net, self).__init__() + self.image_gradients = nn.ImageGradients() + + @ms_function + def construct(self, x): + return self.image_gradients(x) + +def test_compile(): + # input shape 1 x 1 x 2 x 2 + image = Tensor(np.array([[[[1,2],[3,4]]]]), dtype=mstype.int32) + net = Net() + _executor.compile(net, image) + + +def test_compile_multi_channel(): + # input shape 4 x 2 x 2 x 2 + dtype = mstype.int32 + image = Tensor(np.array([[[[1,2],[3,4]], [[5,6],[7,8]]], + [[[3,5],[7,9]], [[11,13],[15,17]]], + [[[5,10],[15,20]], [[25,30],[35,40]]], + [[[10,20],[30,40]], [[50,60],[70,80]]]]), dtype=dtype) + net = Net() + _executor.compile(net, image) From a153fad874ec3c3608999defdeb34daa550bbb12 Mon Sep 17 00:00:00 2001 From: Xiaoda Zhang Date: Fri, 3 Apr 2020 14:33:04 +0800 Subject: [PATCH 121/367] This commit is to separate the computation cost and memory cost in auto_parallel. Some related memory correction is removed. --- .../ccsrc/parallel/auto_parallel/costmodel.cc | 12 +- .../ccsrc/parallel/auto_parallel/costmodel.h | 10 +- .../auto_parallel/dp_algo_costmodel.h | 2 +- .../parallel/auto_parallel/edge_costmodel.cc | 51 ++-- .../parallel/auto_parallel/edge_costmodel.h | 2 +- .../parallel/auto_parallel/graph_costmodel.cc | 105 +++----- .../parallel/auto_parallel/graph_costmodel.h | 8 +- .../auto_parallel/operator_costmodel.cc | 114 ++++---- .../auto_parallel/operator_costmodel.h | 244 +++++++++--------- .../ccsrc/parallel/ops_info/matmul_info.cc | 8 +- .../ccsrc/parallel/ops_info/operator_info.cc | 25 +- .../ccsrc/parallel/ops_info/operator_info.h | 6 +- .../ccsrc/parallel/step_auto_parallel.cc | 30 +-- .../tensor_layout/tensor_redistribution.cc | 24 +- .../tensor_layout/tensor_redistribution.h | 9 +- .../auto_parallel/graph_costmodel_test.cc | 4 +- .../auto_parallel/operator_costmodel_test.cc | 12 +- .../cpp/parallel/ops_info/activation_test.cc | 8 +- .../cpp/parallel/ops_info/matmul_info_test.cc | 8 +- .../parallel/ops_info/tensor_add_info_test.cc | 8 +- .../cpp/parallel/ops_info/tmpidentity_test.cc | 4 +- 21 files changed, 322 insertions(+), 372 deletions(-) diff --git a/mindspore/ccsrc/parallel/auto_parallel/costmodel.cc b/mindspore/ccsrc/parallel/auto_parallel/costmodel.cc index f5cf5069be..190f589bb5 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/costmodel.cc +++ b/mindspore/ccsrc/parallel/auto_parallel/costmodel.cc @@ -23,8 +23,8 @@ namespace mindspore { namespace parallel { void Simplify(CostPtrList* clist_ptrs) { - // Sort the cost_list with the memory_cost increasing, and communication_cost decreasing order. This method - // excludes the cost with greater memory_cost and greater communication_cost. + // Sort the cost_list with the computation_cost_ increasing, and communication_cost decreasing order. This method + // excludes the cost with greater computation_cost_ and greater communication_cost. // E.g. clist_ptrs = {<100, 20>, <200, 10>, <300, 50>}. After this method, clist_ptrs = {<200, 10>, <100, 20>} if (!COST_MODEL_SIMPLIFY_CALCULATION) { return; @@ -33,7 +33,7 @@ void Simplify(CostPtrList* clist_ptrs) { std::vector id(clist_ptrs->size()); std::iota(id.begin(), id.end(), size_t(0)); std::sort(id.begin(), id.end(), [&clist_ptrs](size_t x, size_t y) { - return clist_ptrs->at(x)->memory_cost_ < clist_ptrs->at(y)->memory_cost_; + return clist_ptrs->at(x)->computation_cost_ < clist_ptrs->at(y)->computation_cost_; }); CostPtrList ret; for (size_t i = 0; i < clist_ptrs->size(); ++i) { @@ -45,8 +45,8 @@ void Simplify(CostPtrList* clist_ptrs) { } void SimplifyForDreasingCommunicationWithPartialPara(CostPtrList* clist_ptrs) { - // Sort the cost_list with the memory_cost increasing, and communication_with_partial_para_cost decreasing order. - // This method excludes the cost with greater memory_cost and greater communication_without_para_cost. + // Sort the cost_list with the computation_cost_ increasing, and communication_with_partial_para_cost decreasing + // order. This method excludes the cost with greater computation_cost_ and greater communication_without_para_cost. if (!COST_MODEL_SIMPLIFY_CALCULATION) { return; } @@ -54,7 +54,7 @@ void SimplifyForDreasingCommunicationWithPartialPara(CostPtrList* clist_ptrs) { std::vector id(clist_ptrs->size()); std::iota(id.begin(), id.end(), size_t(0)); std::sort(id.begin(), id.end(), [&clist_ptrs](size_t x, size_t y) { - return clist_ptrs->at(x)->memory_cost_ < clist_ptrs->at(y)->memory_cost_; + return clist_ptrs->at(x)->computation_cost_ < clist_ptrs->at(y)->computation_cost_; }); CostPtrList ret; for (size_t i = 0; i < clist_ptrs->size(); ++i) { diff --git a/mindspore/ccsrc/parallel/auto_parallel/costmodel.h b/mindspore/ccsrc/parallel/auto_parallel/costmodel.h index 361c19573f..229f0fbf5e 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/costmodel.h +++ b/mindspore/ccsrc/parallel/auto_parallel/costmodel.h @@ -44,14 +44,18 @@ using RedistributionOpListPtr = std::shared_ptr& decision_ = nullptr) - : memory_cost_(memory), communication_cost_(commuication), decision_ptr_(std::move(decision_)) { + Cost(double computation, double commuication, const std::shared_ptr& decision_ = nullptr) + : computation_cost_(computation), communication_cost_(commuication), decision_ptr_(std::move(decision_)) { + memory_with_reuse_ = 0.0; communication_without_parameter_ = 0.0; communication_with_partial_para_ = 0.0; communication_redis_forward_ = 0.0; communication_redis_backward_ = 0.0; } - double memory_cost_; + // 'memory_with_reuse_' calculates the peak memory usage in a training phase + double memory_with_reuse_; + // 'computation_cost_' models the training time of an iteration in a training phase + double computation_cost_; // 'communication_cost_' includes communications from operators (forward and backward) and edges double communication_cost_; // communication_without_parameter_ = communication_cost_ - (backward communication from operators) diff --git a/mindspore/ccsrc/parallel/auto_parallel/dp_algo_costmodel.h b/mindspore/ccsrc/parallel/auto_parallel/dp_algo_costmodel.h index c9b6a07317..0cb58c49da 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/dp_algo_costmodel.h +++ b/mindspore/ccsrc/parallel/auto_parallel/dp_algo_costmodel.h @@ -35,7 +35,7 @@ namespace parallel { // interpretation of 6 operations in costmodel.h. // Phase 2: Search the cost_list in the final graph, and determine the optimal one // Create the cost_list for the final graph, and choose the optimal one: one the minimum quantity -// COST_MODEL_ALPHA * memory_cost + COST_MODEL_BETA * communication_cost +// COST_MODEL_ALPHA * computation_cost + COST_MODEL_BETA * communication_cost // Phase 3: Recover the original CostGraph, the determine strategy for each operator // After determining the optimal cost for the final graph, the algorithm recovers the original graph by applying // the 4 operations in the reverse order in the Phase 1. Because each operation decision contains the strategy, diff --git a/mindspore/ccsrc/parallel/auto_parallel/edge_costmodel.cc b/mindspore/ccsrc/parallel/auto_parallel/edge_costmodel.cc index 6381049f17..653f6c903d 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/edge_costmodel.cc +++ b/mindspore/ccsrc/parallel/auto_parallel/edge_costmodel.cc @@ -69,7 +69,7 @@ Status Edge::InitEdgeCost() { MS_LOG(EXCEPTION) << "Failure: redistribution cost calculation failed"; } MS_EXCEPTION_IF_NULL(cost); - MS_LOG(DEBUG) << "The redistribution cost: memory_cost: " << cost->memory_cost_ + MS_LOG(DEBUG) << "The redistribution cost: computation_cost: " << cost->computation_cost_ << ", communication_cost: " << cost->communication_cost_ << ", communication_without_parameter_: " << cost->communication_without_parameter_ << ", communication_with_partial_para_: " << cost->communication_with_partial_para_ << "."; @@ -117,9 +117,9 @@ Status Edge::GetRedistributionCost(const TensorLayout& prev_op_output_layout, co double comm_cost = tensor_redistribution.comm_cost(); double forward_comm_cost = tensor_redistribution.forward_comm_cost(); double backward_comm_cost = tensor_redistribution.backward_comm_cost(); - double mem_cost = tensor_redistribution.mem_cost(); + double computation_cost = tensor_redistribution.computation_cost(); - *cost = std::make_shared(type_length * mem_cost, type_length * comm_cost); + *cost = std::make_shared(type_length * computation_cost, type_length * comm_cost); (*cost)->communication_without_parameter_ = type_length * comm_cost; (*cost)->communication_with_partial_para_ = (*cost)->communication_without_parameter_ + @@ -150,26 +150,26 @@ CostPtrList Edge::CreateEdgeEliminationCostList(const StrategyPtr& output_st_ptr (void)std::transform(edges.begin(), edges.end(), all_cost_list.begin(), LocalGetCostList); CostPtrList selected_cost_list(all_cost_list.size(), nullptr); - std::function recursive = [&](size_t k, double memory, double communication, - double communication_without_para) { - if (k == edges.size()) { - auto decision = std::make_shared(selected_cost_list); - CostPtr new_cost = std::make_shared(memory, communication); - MS_EXCEPTION_IF_NULL(new_cost); - new_cost->communication_without_parameter_ = communication_without_para; - new_cost->communication_with_partial_para_ = - communication_without_para + COST_MODEL_GAMMA * (communication - communication_without_para); - new_cost->decision_ptr_ = decision; - result.push_back(new_cost); - return; - } - for (auto& c : all_cost_list[k]) { - MS_EXCEPTION_IF_NULL(c); - selected_cost_list[k] = c; - recursive(k + 1, memory + c->memory_cost_, communication + c->communication_cost_, - communication_without_para + c->communication_without_parameter_); - } - }; + std::function recursive = + [&](size_t k, double computation, double communication, double communication_without_para) { + if (k == edges.size()) { + auto decision = std::make_shared(selected_cost_list); + CostPtr new_cost = std::make_shared(computation, communication); + MS_EXCEPTION_IF_NULL(new_cost); + new_cost->communication_without_parameter_ = communication_without_para; + new_cost->communication_with_partial_para_ = + communication_without_para + COST_MODEL_GAMMA * (communication - communication_without_para); + new_cost->decision_ptr_ = decision; + result.push_back(new_cost); + return; + } + for (auto& c : all_cost_list[k]) { + MS_EXCEPTION_IF_NULL(c); + selected_cost_list[k] = c; + recursive(k + 1, computation + c->computation_cost_, communication + c->communication_cost_, + communication_without_para + c->communication_without_parameter_); + } + }; recursive(0, 0, 0, 0); SimplifyForDreasingCommunicationWithPartialPara(&result); return result; @@ -203,7 +203,8 @@ void Edge::CreateOpEliminationSubCostList(StrategyPtr op_strategy, const CostPtr MS_EXCEPTION_IF_NULL(middle_cost); for (auto& right_cost : right_cost_list) { MS_EXCEPTION_IF_NULL(right_cost); - double memory = left_cost->memory_cost_ + middle_cost->memory_cost_ + right_cost->memory_cost_; + double computation = + left_cost->computation_cost_ + middle_cost->computation_cost_ + right_cost->computation_cost_; double communication = left_cost->communication_cost_ + middle_cost->communication_cost_ + right_cost->communication_cost_; double communication_without_para = left_cost->communication_without_parameter_ + @@ -211,7 +212,7 @@ void Edge::CreateOpEliminationSubCostList(StrategyPtr op_strategy, const CostPtr right_cost->communication_without_parameter_; auto decision = std::make_shared(op_strategy, left_cost, middle_cost, right_cost); - auto cost = std::make_shared(memory, communication, decision); + auto cost = std::make_shared(computation, communication, decision); MS_EXCEPTION_IF_NULL(cost); cost->communication_without_parameter_ = communication_without_para; cost->communication_with_partial_para_ = diff --git a/mindspore/ccsrc/parallel/auto_parallel/edge_costmodel.h b/mindspore/ccsrc/parallel/auto_parallel/edge_costmodel.h index 1fa49029fa..eb89466d7c 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/edge_costmodel.h +++ b/mindspore/ccsrc/parallel/auto_parallel/edge_costmodel.h @@ -133,7 +133,7 @@ class Edge { void set_parameter_involve(int para_invol) { is_output_parameter_involve_ = para_invol; } // When the input of a operator contains WEIGHT or a output from other operators involving WEIGHT, then these input // should stay in memory until it is used in the backward phase, which is kept in memory at the end of forward phase. - Status CorrectStrategyCostForMemoryReuse() const { return SUCCESS; } + Status CalculateMemoryCost() const { return SUCCESS; } private: std::string edge_name_; diff --git a/mindspore/ccsrc/parallel/auto_parallel/graph_costmodel.cc b/mindspore/ccsrc/parallel/auto_parallel/graph_costmodel.cc index 59b9d9e992..88a54662d3 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/graph_costmodel.cc +++ b/mindspore/ccsrc/parallel/auto_parallel/graph_costmodel.cc @@ -247,7 +247,7 @@ CostPtrList CostGraph::CreateFinalCostList(const OperatorInfoPtr& u, const std:: MS_EXCEPTION_IF_NULL(cost1); MS_EXCEPTION_IF_NULL(cost2); MS_EXCEPTION_IF_NULL(cost3); - double memory = cost1->memory_cost_ + cost2->memory_cost_ + cost3->memory_cost_; + double computation = cost1->computation_cost_ + cost2->computation_cost_ + cost3->computation_cost_; double commmunication = cost1->communication_cost_ + cost2->communication_cost_ + cost3->communication_cost_; double communication_without_para = cost1->communication_without_parameter_ + @@ -255,7 +255,7 @@ CostPtrList CostGraph::CreateFinalCostList(const OperatorInfoPtr& u, const std:: cost3->communication_without_parameter_; auto decision = std::make_shared(u_strategy->strategy_ptr, v_strategy->strategy_ptr, cost1, cost2, cost3); - auto cost = std::make_shared(memory, commmunication, decision); + auto cost = std::make_shared(computation, commmunication, decision); MS_EXCEPTION_IF_NULL(cost); cost->communication_without_parameter_ = communication_without_para; cost->communication_with_partial_para_ = @@ -282,7 +282,7 @@ CostPtrList CostGraph::CreateFinalSingleCostList(const OperatorInfoPtr& u) { for (const auto& cost1 : clist1) { MS_EXCEPTION_IF_NULL(cost1); auto decision = std::make_shared(u_strategy_ptr, cost1); - auto new_cost = std::make_shared(cost1->memory_cost_, cost1->communication_cost_, decision); + auto new_cost = std::make_shared(cost1->computation_cost_, cost1->communication_cost_, decision); MS_EXCEPTION_IF_NULL(new_cost); new_cost->communication_without_parameter_ = cost1->communication_without_parameter_; new_cost->communication_with_partial_para_ = @@ -297,12 +297,12 @@ CostPtrList CostGraph::CreateFinalSingleCostList(const OperatorInfoPtr& u) { } CostPtr CostGraph::SelectCostWithMemoryConstraint(const CostPtrList& cost_list, double memory) { - if (cost_list.empty() || cost_list[0]->memory_cost_ >= memory) { + if (cost_list.empty() || cost_list[0]->computation_cost_ >= memory) { return nullptr; } std::function LocalCompare = [&](CostPtr init, const CostPtr& cost_x) { MS_EXCEPTION_IF_NULL(cost_x); - if (init == nullptr || cost_x->memory_cost_ < memory) { + if (init == nullptr || cost_x->computation_cost_ < memory) { init = cost_x; } return init; @@ -313,36 +313,36 @@ CostPtr CostGraph::SelectCostWithMemoryConstraint(const CostPtrList& cost_list, CostPtr CostGraph::SelectCostWithMinTrainingTime(const CostPtrList& cost_list, double memory) { // Select the cost with minimum training time. Currently, the training time is modeled as = - // costmodel_alpha_ * memory_cost + costmodel_beta_ * communication_with_partial_para_ + // costmodel_alpha_ * computation_cost + costmodel_beta_ * communication_with_partial_para_ if (cost_list.empty()) { MS_LOG(ERROR) << "Final cost list is null."; return nullptr; } CostPtr ret = cost_list[0]; MS_EXCEPTION_IF_NULL(ret); - if (ret->memory_cost_ >= memory) { - MS_LOG(ERROR) << "No available cost; the minimum cost is " << ret->memory_cost_ + if (ret->computation_cost_ >= memory) { + MS_LOG(ERROR) << "No available cost; the minimum cost is " << ret->computation_cost_ << ", the memory capacity is: " << memory << "."; return nullptr; } - double minimum = costmodel_alpha_ * ret->memory_cost_ + costmodel_beta_ * ret->communication_with_partial_para_; - MS_LOG(INFO) << "minimum: " << minimum << ", memory_cost_: " << ret->memory_cost_ + double minimum = costmodel_alpha_ * ret->computation_cost_ + costmodel_beta_ * ret->communication_with_partial_para_; + MS_LOG(INFO) << "minimum: " << minimum << ", computation_cost_: " << ret->computation_cost_ << ", communication_with_partial_para_: " << ret->communication_with_partial_para_ << ", communication_cost_: " << ret->communication_cost_ << ", communication_without_parameter_: " << ret->communication_without_parameter_ << "."; for (size_t i = 1; i < cost_list.size(); ++i) { MS_EXCEPTION_IF_NULL(cost_list[i]); - if (cost_list[i]->memory_cost_ >= memory) { - MS_LOG(INFO) << "cost_list " << i << " memory_cost_: " << cost_list[i]->memory_cost_ + if (cost_list[i]->computation_cost_ >= memory) { + MS_LOG(INFO) << "cost_list " << i << " computation_cost_: " << cost_list[i]->computation_cost_ << ", is larger than the memory capacity: " << memory << "."; break; } - MS_LOG(INFO) << "cost_list " << i << " memory_cost_: " << cost_list[i]->memory_cost_ + MS_LOG(INFO) << "cost_list " << i << " computation_cost_: " << cost_list[i]->computation_cost_ << ", communication_with_partial_para_: " << cost_list[i]->communication_with_partial_para_ << ", communication_cost_: " << cost_list[i]->communication_cost_ << ", communication_without_parameter_: " << cost_list[i]->communication_without_parameter_ << "."; - auto tmp = - costmodel_alpha_ * cost_list[i]->memory_cost_ + costmodel_beta_ * cost_list[i]->communication_with_partial_para_; + auto tmp = costmodel_alpha_ * cost_list[i]->computation_cost_ + + costmodel_beta_ * cost_list[i]->communication_with_partial_para_; MS_LOG(INFO) << "tmp: " << tmp; if (minimum > tmp) { minimum = tmp; @@ -363,8 +363,8 @@ CostPtrList CostGraph::SelectCostListWithMinTrainingTimeMultiple(const std::vect MS_LOG(ERROR) << "The cost list " << i << " is empty."; return ret; } else { - total_memory += all_cost_list[i][0]->memory_cost_; - minimum += costmodel_alpha_ * all_cost_list[i][0]->memory_cost_ + + total_memory += all_cost_list[i][0]->computation_cost_; + minimum += costmodel_alpha_ * all_cost_list[i][0]->computation_cost_ + costmodel_beta_ * all_cost_list[i][0]->communication_with_partial_para_; ret[i] = all_cost_list[i][0]; } @@ -381,8 +381,8 @@ CostPtrList CostGraph::SelectCostListWithMinTrainingTimeMultiple(const std::vect double tmp_memory = 0.0, tmp_minimum = 0.0; for (size_t i = 0; i < selected_cost_list.size(); ++i) { MS_EXCEPTION_IF_NULL(selected_cost_list[i]); - tmp_memory += selected_cost_list[i]->memory_cost_; - tmp_minimum += costmodel_alpha_ * selected_cost_list[i]->memory_cost_ + + tmp_memory += selected_cost_list[i]->computation_cost_; + tmp_minimum += costmodel_alpha_ * selected_cost_list[i]->computation_cost_ + costmodel_beta_ * selected_cost_list[i]->communication_with_partial_para_; } MS_LOG(INFO) << "tmp_memory: " << tmp_memory << ", tmp_minimum: " << tmp_minimum << ", minimum: " << minimum @@ -394,6 +394,7 @@ CostPtrList CostGraph::SelectCostListWithMinTrainingTimeMultiple(const std::vect } return; } + MS_LOG(DEBUG) << "The value minimum: " << minimum << ", available_memory: " << available_memory << "."; for (auto& c : all_cost_list[k]) { selected_cost_list[k] = c; @@ -814,7 +815,7 @@ void CostGraph::CreateMergeEliminationSubCostList(StrategyPtr op_strategy, const for (size_t k = 0; k < tar_cost_list.size(); ++k) { auto& tar_cost = tar_cost_list[k]; MS_EXCEPTION_IF_NULL(tar_cost); - double memory = op_cost->memory_cost_ + edge_cost->memory_cost_ + tar_cost->memory_cost_; + double computation = op_cost->computation_cost_ + edge_cost->computation_cost_ + tar_cost->computation_cost_; double communication = op_cost->communication_cost_ + edge_cost->communication_cost_ + tar_cost->communication_cost_; double communication_without_para = op_cost->communication_without_parameter_ + @@ -823,7 +824,7 @@ void CostGraph::CreateMergeEliminationSubCostList(StrategyPtr op_strategy, const auto decision = std::make_shared(op_strategy, op_cost, edge_cost, tar_op_strategy, tar_cost); - auto new_cost = std::make_shared(memory, communication, decision); + auto new_cost = std::make_shared(computation, communication, decision); MS_EXCEPTION_IF_NULL(new_cost); new_cost->communication_without_parameter_ = communication_without_para; new_cost->communication_with_partial_para_ = @@ -891,7 +892,8 @@ void CostGraph::CreateContractEliminationSubCostList(StrategyPtr contract_op_str for (size_t k = 0; k < tar_cost_list.size(); ++k) { auto& tar_cost = tar_cost_list[k]; MS_EXCEPTION_IF_NULL(tar_cost); - double memory = contract_op_cost->memory_cost_ + edge_cost->memory_cost_ + tar_cost->memory_cost_; + double computation = + contract_op_cost->computation_cost_ + edge_cost->computation_cost_ + tar_cost->computation_cost_; double communication = contract_op_cost->communication_cost_ + edge_cost->communication_cost_ + tar_cost->communication_cost_; double communication_without_para = contract_op_cost->communication_without_parameter_ + @@ -900,7 +902,7 @@ void CostGraph::CreateContractEliminationSubCostList(StrategyPtr contract_op_str auto decision = std::make_shared(contract_op_stra, contract_op_cost, edge_cost, target_op_stra, tar_cost); - auto new_cost = std::make_shared(memory, communication, decision); + auto new_cost = std::make_shared(computation, communication, decision); new_cost->communication_without_parameter_ = communication_without_para; new_cost->communication_with_partial_para_ = communication_without_para + COST_MODEL_GAMMA * (communication - communication_without_para); @@ -963,9 +965,9 @@ void CostGraph::CreateTriangleEliminationSubCostList(StrategyPtr elimi_op_stra, MS_EXCEPTION_IF_NULL(left_edge_cost); for (auto& left_node_cost : left_node_clist_origin) { MS_EXCEPTION_IF_NULL(left_node_cost); - double new_memory_cost = elimi_op_cost->memory_cost_ + left_edge_cost->memory_cost_ + - left_node_cost->memory_cost_ + right_edge_cost->memory_cost_ + - right_op_cost->memory_cost_; + double new_computation = elimi_op_cost->computation_cost_ + left_edge_cost->computation_cost_ + + left_node_cost->computation_cost_ + right_edge_cost->computation_cost_ + + right_op_cost->computation_cost_; double new_commu_cost = elimi_op_cost->communication_cost_ + left_edge_cost->communication_cost_ + left_node_cost->communication_cost_ + right_edge_cost->communication_cost_ + right_op_cost->communication_cost_; @@ -977,7 +979,7 @@ void CostGraph::CreateTriangleEliminationSubCostList(StrategyPtr elimi_op_stra, auto decision = std::make_shared(elimi_op_stra, elimi_op_cost, left_edge_cost, right_edge_cost, left_op_stra, left_node_cost, right_op_stra, right_op_cost); - auto new_cost = std::make_shared(new_memory_cost, new_commu_cost, decision); + auto new_cost = std::make_shared(new_computation, new_commu_cost, decision); new_cost->communication_without_parameter_ = new_commu_without; new_cost->communication_with_partial_para_ = new_commu_without + COST_MODEL_GAMMA * (new_commu_cost - new_commu_without); @@ -1082,11 +1084,12 @@ void CostGraph::CreateStarEliminationSubCostList(const StrategyPtr& first_succ_n succ_edges_costs[0] = first_succ_edge_cost; succ_nodes_costs[0] = first_succ_node_cost; - double memory_cost = merged_node_cost->memory_cost_, commu_cost = merged_node_cost->communication_cost_, + double computation_cost = merged_node_cost->computation_cost_, + commu_cost = merged_node_cost->communication_cost_, commu_without = merged_node_cost->communication_without_parameter_; for (size_t i = 0; i < succ_nodes_stras.size(); ++i) { MS_EXCEPTION_IF_NULL(succ_edges_costs[i]); - memory_cost += succ_edges_costs[i]->memory_cost_ + succ_nodes_costs[i]->memory_cost_; + computation_cost += succ_edges_costs[i]->computation_cost_ + succ_nodes_costs[i]->computation_cost_; commu_cost += succ_edges_costs[i]->communication_cost_ + succ_nodes_costs[i]->communication_cost_; commu_without += succ_edges_costs[i]->communication_without_parameter_ + succ_nodes_costs[i]->communication_without_parameter_; @@ -1094,7 +1097,7 @@ void CostGraph::CreateStarEliminationSubCostList(const StrategyPtr& first_succ_n auto decision = std::make_shared(merged_op_stra, merged_node_cost, succ_edges_costs, succ_nodes_stras, succ_nodes_costs); - auto new_cost = std::make_shared(memory_cost, commu_cost, decision); + auto new_cost = std::make_shared(computation_cost, commu_cost, decision); new_cost->communication_without_parameter_ = commu_without; new_cost->communication_with_partial_para_ = commu_without + COST_MODEL_GAMMA * (commu_cost - commu_without); first_succ_node_clist_new->emplace_back(std::move(new_cost)); @@ -1210,36 +1213,6 @@ Status CostGraph::InitSelectedStrategy() { return SUCCESS; } -Status CostGraph::CorrectOpsStrategyCostForMultiOutputUse() { - for (auto& op : ops_) { - MS_EXCEPTION_IF_NULL(op); - if (op->GetAliveSuccEdges().size() > 1) { - // Filter out the case of a output being used by multiple operators - std::map output_count; - for (size_t i = 0; i < op->GetAliveSuccEdges().size(); ++i) { - auto output_index = op->GetAliveSuccEdges()[i]->prev_op_output_index(); - output_count[output_index]++; - } - for (size_t i = 0; i < op->GetAliveSuccEdges().size(); ++i) { - auto output_index = op->GetAliveSuccEdges()[i]->prev_op_output_index(); - if (output_count[output_index] <= 1) { - continue; - } - auto next_op = op->GetAliveSuccEdges()[i]->next_operator(); - MS_EXCEPTION_IF_NULL(next_op); - auto input_index = op->GetAliveSuccEdges()[i]->next_op_input_index(); - if (next_op->CorrectStrategyCostForMultiOutputUse(input_index) != SUCCESS) { - MS_LOG(ERROR) << "The operator name: " << op->name() << ", the next operator name: " << next_op->name() - << ", the output_index: " << output_index << ", the input_index: " << input_index << "."; - return FAILED; - } - output_count[output_index]--; - } - } - } - return SUCCESS; -} - Status CostGraph::ComputeOpsAndEdgesParameterInvolved() { for (auto& op : ops_) { MS_EXCEPTION_IF_NULL(op); @@ -1252,23 +1225,23 @@ Status CostGraph::ComputeOpsAndEdgesParameterInvolved() { return SUCCESS; } -Status CostGraph::CorrectOpsStrategyCostForMemoryReuse() { +Status CostGraph::CalculateOpsMemoryCost() { for (auto& op : ops_) { MS_EXCEPTION_IF_NULL(op); - if (op->CorrectStrategyCostForMemoryReuse() != SUCCESS) { - MS_LOG(ERROR) << "Correcting Operator: " << op->name() << " cost for memory reuse failed."; + if (op->CalculateMemoryCost() != SUCCESS) { + MS_LOG(ERROR) << "Calculate Operator: " << op->name() << " cost for memory usage failed."; return FAILED; } } return SUCCESS; } -Status CostGraph::CorrectEdgesStrategyCostForMemoryReuse() { +Status CostGraph::CalculateEdgesMemoryCost() { for (auto& edge_pair : edges_) { const auto& edges = edge_pair.second; for (auto& one_edge : edges) { - if (one_edge->CorrectStrategyCostForMemoryReuse() != SUCCESS) { - MS_LOG(ERROR) << "Correcting Edge: " << one_edge->edge_name() << " cost for memory reuse failed."; + if (one_edge->CalculateMemoryCost() != SUCCESS) { + MS_LOG(ERROR) << "Calculate Edge: " << one_edge->edge_name() << " cost for memory usage failed."; return FAILED; } } diff --git a/mindspore/ccsrc/parallel/auto_parallel/graph_costmodel.h b/mindspore/ccsrc/parallel/auto_parallel/graph_costmodel.h index e4cbdffb61..c149534826 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/graph_costmodel.h +++ b/mindspore/ccsrc/parallel/auto_parallel/graph_costmodel.h @@ -175,16 +175,12 @@ class CostGraph { void CreateStarEliminationSubCostList(const StrategyPtr&, const CostPtrList&, const CostPtrList&, const StrategyPtr&, const CostPtrList&, std::vector, CostPtrList&, CostPtrList&, CostPtrList*); - - // When a output of a operator is being used by multiple operators, the memory cost of this part should be calculated - // only once. This method is for correcting the 'strategy_cost_' for operators - Status CorrectOpsStrategyCostForMultiOutputUse(); // When the input of a operator is neither a WEIGHT, nor a output of a subsequent operator involving WEIGHT, then // the memory cost can be resused. - Status CorrectOpsStrategyCostForMemoryReuse(); + Status CalculateOpsMemoryCost(); // When the input of the edge is neither a WEIGHT, nor a output of a subsequent operator involving WEIGHT, then // the memory cost can be resused. - Status CorrectEdgesStrategyCostForMemoryReuse(); + Status CalculateEdgesMemoryCost(); Status ComputeOpsAndEdgesParameterInvolved(); std::vector GetOperators() const { return ops_; } diff --git a/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.cc b/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.cc index 6958932fd6..7c17b499b1 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.cc +++ b/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.cc @@ -74,8 +74,8 @@ double MatMulCost::GetBackwardCommCost(const std::vector& inputs, co // Return the per device memory cost in the forward phase. The cost is calculated according to the bytes // this operator uses -double MatMulCost::GetForwardMemoryCost(const std::vector& inputs, const std::vector& outputs, - const int32_t&) const { +double MatMulCost::GetForwardComputationCost(const std::vector& inputs, + const std::vector& outputs, const int32_t&) const { // In forward phase, the memory cost = slice(A) + slice(B) + (0 or 1) allreduce(slice(C)) double result = 0.0; TensorInfo output0 = outputs[0]; @@ -93,8 +93,8 @@ double MatMulCost::GetForwardMemoryCost(const std::vector& inputs, c // Return the per device memory cost in the forward phase. The cost is calculated according to the bytes // this operator uses -double MatMulCost::GetBackwardMemoryCost(const std::vector& inputs, const std::vector&, - const int32_t& stage_id) const { +double MatMulCost::GetBackwardComputationCost(const std::vector& inputs, const std::vector&, + const int32_t& stage_id) const { // In backward phase, the memory cost = (0 or 1) allreduce(slice(B)) double result = 0.0; if (is_parameter_[1]) { @@ -147,8 +147,8 @@ double ActivationCost::GetBackwardCommCost(const std::vector& inputs // Return the per memory cost in the forward phase. The cost is calculated according to the bytes // this operator uses -double ActivationCost::GetForwardMemoryCost(const std::vector& inputs, const std::vector&, - const int32_t&) const { +double ActivationCost::GetForwardComputationCost(const std::vector& inputs, const std::vector&, + const int32_t&) const { TensorInfo input0_info = inputs[0]; Shape input0_slice_shape = input0_info.slice_shape(); return ListProduct(input0_slice_shape) * static_cast(inputs_type_lengths_[0]); @@ -156,8 +156,8 @@ double ActivationCost::GetForwardMemoryCost(const std::vector& input // Return the per memory cost in the forward phase. The cost is calculated according to the bytes // this operator uses -double ActivationCost::GetBackwardMemoryCost(const std::vector&, const std::vector&, - const int32_t&) const { +double ActivationCost::GetBackwardComputationCost(const std::vector&, const std::vector&, + const int32_t&) const { return 0.0; } @@ -191,8 +191,8 @@ double SoftmaxCost::GetBackwardCommCost(const std::vector& inputs, c // Return the per memory cost in the forward phase. The cost is calculated according to the bytes // this operator uses -double SoftmaxCost::GetForwardMemoryCost(const std::vector& inputs, const std::vector&, - const int32_t&) const { +double SoftmaxCost::GetForwardComputationCost(const std::vector& inputs, const std::vector&, + const int32_t&) const { // In the forward phase, the memory cost = slice(A) TensorInfo input0 = inputs[0]; Shape input0_slice_shape = input0.slice_shape(); @@ -201,8 +201,9 @@ double SoftmaxCost::GetForwardMemoryCost(const std::vector& inputs, // Return the per memory cost in the forward phase. The cost is calculated according to the bytes // this operator uses -double SoftmaxCost::GetBackwardMemoryCost(const std::vector&, - const std::vector&, const int32_t&) const { +double SoftmaxCost::GetBackwardComputationCost(const std::vector&, + const std::vector&, + const int32_t&) const { return 0.0; } @@ -222,9 +223,9 @@ double TmpIdentityCost::GetBackwardCommCost(const std::vector& inputs, - const std::vector&, - const int32_t&) const { +double TmpIdentityCost::GetForwardComputationCost(const std::vector& inputs, + const std::vector&, + const int32_t&) const { TensorInfo input0_info = inputs[0]; Shape input0_slice_shape = input0_info.slice_shape(); return ListProduct(input0_slice_shape) * static_cast(inputs_type_lengths_[0]); @@ -232,15 +233,15 @@ double TmpIdentityCost::GetForwardMemoryCost(const std::vector&, - const std::vector&, - const int32_t&) const { +double TmpIdentityCost::GetBackwardComputationCost(const std::vector&, + const std::vector&, + const int32_t&) const { return 0.0; } -double BatchParallelCost::GetForwardMemoryCost(const std::vector& inputs, - const std::vector&, - const int32_t&) const { +double BatchParallelCost::GetForwardComputationCost(const std::vector& inputs, + const std::vector&, + const int32_t&) const { double cost = 0.0; for (size_t i = 0; i < inputs.size(); ++i) { cost += ListProduct(inputs[i].slice_shape()) * static_cast(inputs_type_lengths_[i]); @@ -248,9 +249,9 @@ double BatchParallelCost::GetForwardMemoryCost(const std::vector&, - const std::vector&, - const int32_t&) const { +double BatchParallelCost::GetBackwardComputationCost(const std::vector&, + const std::vector&, + const int32_t&) const { return 0.0; } @@ -285,8 +286,8 @@ double PReLUCost::GetBackwardCommCost(const std::vector& inputs, con // Return the per memory cost in the forward phase. The cost is calculated according to the bytes // this operator uses -double PReLUCost::GetForwardMemoryCost(const std::vector& inputs, const std::vector&, - const int32_t&) const { +double PReLUCost::GetForwardComputationCost(const std::vector& inputs, const std::vector&, + const int32_t&) const { // In forward phase, the memory cost = slice(A) + slice(B) Shape input0_slice_shape = inputs[0].slice_shape(); Shape input1_slice_shape = inputs[1].slice_shape(); @@ -297,9 +298,9 @@ double PReLUCost::GetForwardMemoryCost(const std::vector& inputs, co // Return the per memory cost in the backward phase. The cost is calculated according to the bytes // this operator uses -double PReLUCost::GetBackwardMemoryCost(const std::vector& inputs, - const std::vector&, - const int32_t& stage_id) const { +double PReLUCost::GetBackwardComputationCost(const std::vector& inputs, + const std::vector&, + const int32_t& stage_id) const { // In backward phase, the memory cost = (0 or 1) allreduce(slice(B)) double result = 0.0; if (is_parameter_[1]) { @@ -338,8 +339,8 @@ double OneHotCost::GetBackwardCommCost(const std::vector&, const std // Return the per memory cost in the forward phase. The cost is calculated according to the bytes // this operator uses -double OneHotCost::GetForwardMemoryCost(const std::vector& inputs, const std::vector&, - const int32_t&) const { +double OneHotCost::GetForwardComputationCost(const std::vector& inputs, const std::vector&, + const int32_t&) const { // In onehot's forward phase, the memory cost = slice(A) Shape input0_slice_shape = inputs[0].slice_shape(); return ListProduct(input0_slice_shape) * static_cast(inputs_type_lengths_[0]); @@ -347,8 +348,8 @@ double OneHotCost::GetForwardMemoryCost(const std::vector& inputs, c // Return the per memory cost in the backward phase. The cost is calculated according to the bytes // this operator uses -double OneHotCost::GetBackwardMemoryCost(const std::vector&, const std::vector&, - const int32_t&) const { +double OneHotCost::GetBackwardComputationCost(const std::vector&, const std::vector&, + const int32_t&) const { return 0.0; } @@ -368,8 +369,9 @@ double SoftmaxCrossEntropyWithLogitsCost::GetBackwardCommCost(const std::vector< // Return the per memory cost in the forward phase. The cost is calculated according to the bytes // this operator uses -double SoftmaxCrossEntropyWithLogitsCost::GetForwardMemoryCost(const std::vector& inputs, - const std::vector&, const int32_t&) const { +double SoftmaxCrossEntropyWithLogitsCost::GetForwardComputationCost(const std::vector& inputs, + const std::vector&, + const int32_t&) const { // In forward phase, the memory cost = slice(A) + slice(B) Shape input0_slice_shape = inputs[0].slice_shape(); Shape input1_slice_shape = inputs[1].slice_shape(); @@ -380,8 +382,9 @@ double SoftmaxCrossEntropyWithLogitsCost::GetForwardMemoryCost(const std::vector // Return the per memory cost in the backward phase. The cost is calculated according to the bytes // this operator uses -double SoftmaxCrossEntropyWithLogitsCost::GetBackwardMemoryCost(const std::vector&, - const std::vector&, const int32_t&) const { +double SoftmaxCrossEntropyWithLogitsCost::GetBackwardComputationCost(const std::vector&, + const std::vector&, + const int32_t&) const { return 0.0; } @@ -409,8 +412,8 @@ double ReshapeCost::GetBackwardCommCost(const std::vector&, const st // Return the per memory cost in the forward phase. The cost is calculated according to the bytes // this operator uses -double ReshapeCost::GetForwardMemoryCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const { +double ReshapeCost::GetForwardComputationCost(const std::vector& inputs, + const std::vector& outputs, const int32_t& stage_id) const { CheckGlobalDeviceManager(); MS_EXCEPTION_IF_NULL(g_device_manager); RankList dev_list = g_device_manager->GetDeviceListByStageId(stage_id); @@ -421,26 +424,27 @@ double ReshapeCost::GetForwardMemoryCost(const std::vector& inputs, if (tensor_redistribution.ComputeCost() == FAILED) { MS_LOG(EXCEPTION) << "Failure: tensor_redistribution ComputeCost failed."; } - return (inputs_type_lengths_[0] * tensor_redistribution.mem_cost()); + return (inputs_type_lengths_[0] * tensor_redistribution.computation_cost()); } // Return the per memory cost in the backward phase. The cost is calculated according to the bytes // this operator uses -double ReshapeCost::GetBackwardMemoryCost(const std::vector&, - const std::vector&, const int32_t&) const { +double ReshapeCost::GetBackwardComputationCost(const std::vector&, + const std::vector&, + const int32_t&) const { return 0.0; } -double ArithmeticCost::GetForwardMemoryCost(const std::vector& inputs, const std::vector&, - const int32_t&) const { +double ArithmeticCost::GetForwardComputationCost(const std::vector& inputs, const std::vector&, + const int32_t&) const { double result; result = ListProduct(inputs[0].slice_shape()) * static_cast(inputs_type_lengths_[0]) + ListProduct(inputs[1].slice_shape()) * static_cast(inputs_type_lengths_[1]); return result; } -double ArithmeticCost::GetBackwardMemoryCost(const std::vector& inputs, const std::vector&, - const int32_t& stage_id) const { +double ArithmeticCost::GetBackwardComputationCost(const std::vector& inputs, const std::vector&, + const int32_t& stage_id) const { double result = 0.0; CheckGlobalDeviceManager(); MS_EXCEPTION_IF_NULL(g_device_manager); @@ -533,15 +537,15 @@ double L2NormalizeCost::GetBackwardCommCost(const std::vector& input return result; } -double L2NormalizeCost::GetForwardMemoryCost(const std::vector& inputs, const std::vector&, - const int32_t&) const { +double L2NormalizeCost::GetForwardComputationCost(const std::vector& inputs, const std::vector&, + const int32_t&) const { TensorInfo input0_info = inputs[0]; Shape input0_slice_shape = input0_info.slice_shape(); return ListProduct(input0_slice_shape) * static_cast(inputs_type_lengths_[0]); } -double L2NormalizeCost::GetBackwardMemoryCost(const std::vector& inputs, const std::vector&, - const int32_t& stage_id) const { +double L2NormalizeCost::GetBackwardComputationCost(const std::vector& inputs, + const std::vector&, const int32_t& stage_id) const { double result = 0.0; if (is_parameter_[0]) { @@ -618,8 +622,9 @@ double ReduceMethodCost::GetBackwardCommCost(const std::vector& inpu return result; } -double ReduceMethodCost::GetForwardMemoryCost(const std::vector& inputs, - const std::vector& outputs, const int32_t& stage_id) const { +double ReduceMethodCost::GetForwardComputationCost(const std::vector& inputs, + const std::vector& outputs, + const int32_t& stage_id) const { double result = 0.0; TensorInfo input0 = inputs[0]; TensorInfo output0 = outputs[0]; @@ -640,8 +645,9 @@ double ReduceMethodCost::GetForwardMemoryCost(const std::vector& inp return result; } -double ReduceMeanCost::GetForwardMemoryCost(const std::vector& inputs, - const std::vector& outputs, const int32_t& stage_id) const { +double ReduceMeanCost::GetForwardComputationCost(const std::vector& inputs, + const std::vector& outputs, + const int32_t& stage_id) const { double result = 0.0; TensorInfo input0 = inputs[0]; TensorInfo output0 = outputs[0]; diff --git a/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.h b/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.h index 9fb86d467e..8f0099bba3 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.h +++ b/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.h @@ -65,12 +65,12 @@ class OperatorCost { virtual double GetBackwardCommCost(const std::vector& inputs, const std::vector& outputs, const int32_t& stage_id) const = 0; // per device computation cost - virtual double GetMemoryCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const = 0; - virtual double GetForwardMemoryCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const = 0; - virtual double GetBackwardMemoryCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const = 0; + virtual double GetComputationCost(const std::vector& inputs, const std::vector& outputs, + const int32_t& stage_id) const = 0; + virtual double GetForwardComputationCost(const std::vector& inputs, + const std::vector& outputs, const int32_t& stage_id) const = 0; + virtual double GetBackwardComputationCost(const std::vector& inputs, + const std::vector& outputs, const int32_t& stage_id) const = 0; protected: // for each input in 'inputs_', there is a bool variable indicating whether that the corresponding input is parameter @@ -96,14 +96,14 @@ class MatMulCost : public OperatorCost { const int32_t& stage_id) const override; // per device computation cost - double GetMemoryCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override { - return GetForwardMemoryCost(inputs, outputs, stage_id) + GetBackwardMemoryCost(inputs, outputs, stage_id); - } - double GetForwardMemoryCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; - double GetBackwardMemoryCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; + double GetComputationCost(const std::vector& inputs, const std::vector& outputs, + const int32_t& stage_id) const override { + return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id); + } + double GetForwardComputationCost(const std::vector& inputs, const std::vector& outputs, + const int32_t& stage_id) const override; + double GetBackwardComputationCost(const std::vector& inputs, const std::vector& outputs, + const int32_t& stage_id) const override; }; using MatMulCostPtr = std::shared_ptr; @@ -121,14 +121,14 @@ class ActivationCost : public OperatorCost { const int32_t& stage_id) const override; double GetBackwardCommCost(const std::vector& inputs, const std::vector& outputs, const int32_t& stage_id) const override; - double GetMemoryCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override { - return GetForwardMemoryCost(inputs, outputs, stage_id) + GetBackwardMemoryCost(inputs, outputs, stage_id); - } - double GetForwardMemoryCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; - double GetBackwardMemoryCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; + double GetComputationCost(const std::vector& inputs, const std::vector& outputs, + const int32_t& stage_id) const override { + return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id); + } + double GetForwardComputationCost(const std::vector& inputs, const std::vector& outputs, + const int32_t& stage_id) const override; + double GetBackwardComputationCost(const std::vector& inputs, const std::vector& outputs, + const int32_t& stage_id) const override; }; using ActivationCostPtr = std::shared_ptr; @@ -146,14 +146,14 @@ class SoftmaxCost : public OperatorCost { const int32_t& stage_id) const override; double GetBackwardCommCost(const std::vector& inputs, const std::vector& outputs, const int32_t& stage_id) const override; - double GetMemoryCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override { - return GetForwardMemoryCost(inputs, outputs, stage_id) + GetBackwardMemoryCost(inputs, outputs, stage_id); - } - double GetForwardMemoryCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; - double GetBackwardMemoryCost(const std::vector& inputs, const std::vector& outputs, - const int32_t&) const override; + double GetComputationCost(const std::vector& inputs, const std::vector& outputs, + const int32_t& stage_id) const override { + return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id); + } + double GetForwardComputationCost(const std::vector& inputs, const std::vector& outputs, + const int32_t& stage_id) const override; + double GetBackwardComputationCost(const std::vector& inputs, const std::vector& outputs, + const int32_t&) const override; }; using SoftmaxCostPtr = std::shared_ptr; @@ -171,14 +171,14 @@ class TmpIdentityCost : public OperatorCost { const int32_t& stage_id) const override; double GetBackwardCommCost(const std::vector& inputs, const std::vector& outputs, const int32_t& stage_id) const override; - double GetMemoryCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override { - return GetForwardMemoryCost(inputs, outputs, stage_id) + GetBackwardMemoryCost(inputs, outputs, stage_id); - } - double GetForwardMemoryCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; - double GetBackwardMemoryCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; + double GetComputationCost(const std::vector& inputs, const std::vector& outputs, + const int32_t& stage_id) const override { + return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id); + } + double GetForwardComputationCost(const std::vector& inputs, const std::vector& outputs, + const int32_t& stage_id) const override; + double GetBackwardComputationCost(const std::vector& inputs, const std::vector& outputs, + const int32_t& stage_id) const override; }; using TmpIdentityCostPtr = std::shared_ptr; @@ -199,14 +199,14 @@ class BatchParallelCost : public OperatorCost { const int32_t&) const override { return 0.0; } - double GetMemoryCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override { - return GetForwardMemoryCost(inputs, outputs, stage_id) + GetBackwardMemoryCost(inputs, outputs, stage_id); + double GetComputationCost(const std::vector& inputs, const std::vector& outputs, + const int32_t& stage_id) const override { + return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id); } - double GetForwardMemoryCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; - double GetBackwardMemoryCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; + double GetForwardComputationCost(const std::vector& inputs, const std::vector& outputs, + const int32_t& stage_id) const override; + double GetBackwardComputationCost(const std::vector& inputs, const std::vector& outputs, + const int32_t& stage_id) const override; }; using BatchParallelCostPtr = std::shared_ptr; @@ -227,16 +227,16 @@ class VirtualDatasetCost : public OperatorCost { const int32_t&) const override { return 0.0; } - double GetMemoryCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override { - return GetForwardMemoryCost(inputs, outputs, stage_id) + GetBackwardMemoryCost(inputs, outputs, stage_id); + double GetComputationCost(const std::vector& inputs, const std::vector& outputs, + const int32_t& stage_id) const override { + return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id); } - double GetForwardMemoryCost(const std::vector&, const std::vector&, - const int32_t&) const override { + double GetForwardComputationCost(const std::vector&, const std::vector&, + const int32_t&) const override { return 0.0; } - double GetBackwardMemoryCost(const std::vector&, const std::vector&, - const int32_t&) const override { + double GetBackwardComputationCost(const std::vector&, const std::vector&, + const int32_t&) const override { return 0.0; } }; @@ -259,18 +259,18 @@ class GeneratorBaseCost : public OperatorCost { const int32_t&) const override { return 0.0; } - double GetMemoryCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override { - return GetForwardMemoryCost(inputs, outputs, stage_id) + GetBackwardMemoryCost(inputs, outputs, stage_id); + double GetComputationCost(const std::vector& inputs, const std::vector& outputs, + const int32_t& stage_id) const override { + return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id); } // Inputs vector is empty for generator ops. - double GetForwardMemoryCost(const std::vector&, const std::vector&, - const int32_t&) const override { + double GetForwardComputationCost(const std::vector&, const std::vector&, + const int32_t&) const override { return 0.0; } // Generator ops don't have backward steps. - double GetBackwardMemoryCost(const std::vector&, const std::vector&, - const int32_t&) const override { + double GetBackwardComputationCost(const std::vector&, const std::vector&, + const int32_t&) const override { return 0.0; } }; @@ -292,14 +292,14 @@ class PReLUCost : public OperatorCost { const int32_t& stage_id) const override; // per device computation cost - double GetMemoryCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override { - return GetForwardMemoryCost(inputs, outputs, stage_id) + GetBackwardMemoryCost(inputs, outputs, stage_id); - } - double GetForwardMemoryCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; - double GetBackwardMemoryCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; + double GetComputationCost(const std::vector& inputs, const std::vector& outputs, + const int32_t& stage_id) const override { + return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id); + } + double GetForwardComputationCost(const std::vector& inputs, const std::vector& outputs, + const int32_t& stage_id) const override; + double GetBackwardComputationCost(const std::vector& inputs, const std::vector& outputs, + const int32_t& stage_id) const override; }; using PReLUCostPtr = std::shared_ptr; @@ -319,14 +319,14 @@ class OneHotCost : public OperatorCost { const int32_t& stage_id) const override; // per device computation cost - double GetMemoryCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override { - return GetForwardMemoryCost(inputs, outputs, stage_id) + GetBackwardMemoryCost(inputs, outputs, stage_id); - } - double GetForwardMemoryCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; - double GetBackwardMemoryCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; + double GetComputationCost(const std::vector& inputs, const std::vector& outputs, + const int32_t& stage_id) const override { + return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id); + } + double GetForwardComputationCost(const std::vector& inputs, const std::vector& outputs, + const int32_t& stage_id) const override; + double GetBackwardComputationCost(const std::vector& inputs, const std::vector& outputs, + const int32_t& stage_id) const override; }; using OneHotCostPtr = std::shared_ptr; @@ -346,14 +346,14 @@ class SoftmaxCrossEntropyWithLogitsCost : public OperatorCost { const int32_t& stage_id) const override; // per device computation cost - double GetMemoryCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override { - return GetForwardMemoryCost(inputs, outputs, stage_id) + GetBackwardMemoryCost(inputs, outputs, stage_id); - } - double GetForwardMemoryCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; - double GetBackwardMemoryCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; + double GetComputationCost(const std::vector& inputs, const std::vector& outputs, + const int32_t& stage_id) const override { + return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id); + } + double GetForwardComputationCost(const std::vector& inputs, const std::vector& outputs, + const int32_t& stage_id) const override; + double GetBackwardComputationCost(const std::vector& inputs, const std::vector& outputs, + const int32_t& stage_id) const override; }; using SoftmaxCrossEntropyWithLogitsCostPtr = std::shared_ptr; @@ -376,16 +376,16 @@ class ReshapeCost : public OperatorCost { const int32_t& stage_id) const override; // per device computation cost - double GetMemoryCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override { - return GetForwardMemoryCost(inputs, outputs, stage_id) + GetBackwardMemoryCost(inputs, outputs, stage_id); + double GetComputationCost(const std::vector& inputs, const std::vector& outputs, + const int32_t& stage_id) const override { + return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id); } - double GetForwardMemoryCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; + double GetForwardComputationCost(const std::vector& inputs, const std::vector& outputs, + const int32_t& stage_id) const override; - double GetBackwardMemoryCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; + double GetBackwardComputationCost(const std::vector& inputs, const std::vector& outputs, + const int32_t& stage_id) const override; }; using ReshapeCostPtr = std::shared_ptr; @@ -405,14 +405,14 @@ class ArithmeticCost : public OperatorCost { double GetBackwardCommCost(const std::vector&, const std::vector&, const int32_t&) const override; - double GetMemoryCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override { - return GetForwardMemoryCost(inputs, outputs, stage_id) + GetBackwardMemoryCost(inputs, outputs, stage_id); + double GetComputationCost(const std::vector& inputs, const std::vector& outputs, + const int32_t& stage_id) const override { + return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id); } - double GetForwardMemoryCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; - double GetBackwardMemoryCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; + double GetForwardComputationCost(const std::vector& inputs, const std::vector& outputs, + const int32_t& stage_id) const override; + double GetBackwardComputationCost(const std::vector& inputs, const std::vector& outputs, + const int32_t& stage_id) const override; }; using ArithmeticCostPtr = std::shared_ptr; @@ -431,14 +431,14 @@ class L2NormalizeCost : public OperatorCost { } double GetBackwardCommCost(const std::vector& inputs, const std::vector& outputs, const int32_t& stage_id) const override; - double GetMemoryCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override { - return GetForwardMemoryCost(inputs, outputs, stage_id) + GetBackwardMemoryCost(inputs, outputs, stage_id); - } - double GetForwardMemoryCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; - double GetBackwardMemoryCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; + double GetComputationCost(const std::vector& inputs, const std::vector& outputs, + const int32_t& stage_id) const override { + return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id); + } + double GetForwardComputationCost(const std::vector& inputs, const std::vector& outputs, + const int32_t& stage_id) const override; + double GetBackwardComputationCost(const std::vector& inputs, const std::vector& outputs, + const int32_t& stage_id) const override; }; using L2NormalizeCostPtr = std::shared_ptr; @@ -455,14 +455,14 @@ class ReduceMethodCost : public OperatorCost { const int32_t& stage_id) const override; double GetBackwardCommCost(const std::vector& inputs, const std::vector& outputs, const int32_t& stage_id) const override; - double GetMemoryCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override { - return GetForwardMemoryCost(inputs, outputs, stage_id) + GetBackwardCommCost(inputs, outputs, stage_id); - } - double GetForwardMemoryCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; - double GetBackwardMemoryCost(const std::vector&, const std::vector&, - const int32_t&) const override { + double GetComputationCost(const std::vector& inputs, const std::vector& outputs, + const int32_t& stage_id) const override { + return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardCommCost(inputs, outputs, stage_id); + } + double GetForwardComputationCost(const std::vector& inputs, const std::vector& outputs, + const int32_t& stage_id) const override; + double GetBackwardComputationCost(const std::vector&, const std::vector&, + const int32_t&) const override { return 0.0; } void set_cross_batch(bool cb) { cross_batch_ = cb; } @@ -477,8 +477,8 @@ class ReduceMeanCost : public ReduceMethodCost { ReduceMeanCost() = default; ~ReduceMeanCost() override = default; - double GetForwardMemoryCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; + double GetForwardComputationCost(const std::vector& inputs, const std::vector& outputs, + const int32_t& stage_id) const override; }; using ReduceMeanCostPtr = std::shared_ptr; @@ -499,18 +499,18 @@ class GetNextCost : public OperatorCost { const int32_t&) const override { return 0.0; } - double GetMemoryCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override { - return GetForwardMemoryCost(inputs, outputs, stage_id) + GetBackwardMemoryCost(inputs, outputs, stage_id); + double GetComputationCost(const std::vector& inputs, const std::vector& outputs, + const int32_t& stage_id) const override { + return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id); } // Inputs vector is empty for generator ops. - double GetForwardMemoryCost(const std::vector&, const std::vector&, - const int32_t&) const override { + double GetForwardComputationCost(const std::vector&, const std::vector&, + const int32_t&) const override { return 0.0; } // Generator ops don't have backward steps. - double GetBackwardMemoryCost(const std::vector&, const std::vector&, - const int32_t&) const override { + double GetBackwardComputationCost(const std::vector&, const std::vector&, + const int32_t&) const override { return 0.0; } }; diff --git a/mindspore/ccsrc/parallel/ops_info/matmul_info.cc b/mindspore/ccsrc/parallel/ops_info/matmul_info.cc index ad6409be0a..2b02dc100d 100644 --- a/mindspore/ccsrc/parallel/ops_info/matmul_info.cc +++ b/mindspore/ccsrc/parallel/ops_info/matmul_info.cc @@ -592,10 +592,10 @@ Status MatMulBase::SetCostUnderStrategy(const mindspore::parallel::StrategyPtr& int32_t stage_id = strategy->GetInputStage(); // Here, we use the origin outputs_, because we only use the slice size of the output tensor. // It does not matter whether the output tensor is transposed or not. - double memory_cost = - matmulcost_ptr->GetForwardMemoryCost(relica_inputs_tensor_vector, outputs_tensor_info_, stage_id); + double computation_cost = + matmulcost_ptr->GetForwardComputationCost(relica_inputs_tensor_vector, outputs_tensor_info_, stage_id); double communication_cost = matmulcost_ptr->GetCommCost(relica_inputs_tensor_vector, outputs_tensor_info_, stage_id); - std::shared_ptr result = std::make_shared(memory_cost, communication_cost); + std::shared_ptr result = std::make_shared(computation_cost, communication_cost); result->communication_without_parameter_ = matmulcost_ptr->GetForwardCommCost(relica_inputs_tensor_vector, outputs_tensor_info_, stage_id); result->communication_with_partial_para_ = @@ -604,7 +604,7 @@ Status MatMulBase::SetCostUnderStrategy(const mindspore::parallel::StrategyPtr& // Breaking ties for preferring data parallelization BreakingTiesForPerferringDataParallel(strategy, result); - MS_LOG(DEBUG) << name_ << " : memory_cost: " << result->memory_cost_ + MS_LOG(DEBUG) << name_ << " : computation_cost: " << result->computation_cost_ << ", communication_cost: " << result->communication_cost_ << ", communication_without_parameter_: " << result->communication_without_parameter_ << ", communication_with_partial_para_: " << result->communication_with_partial_para_; diff --git a/mindspore/ccsrc/parallel/ops_info/operator_info.cc b/mindspore/ccsrc/parallel/ops_info/operator_info.cc index 8b96425bf7..11c518d844 100644 --- a/mindspore/ccsrc/parallel/ops_info/operator_info.cc +++ b/mindspore/ccsrc/parallel/ops_info/operator_info.cc @@ -1034,9 +1034,10 @@ Status OperatorInfo::SetCostUnderStrategyBase(const StrategyPtr& strategy) { return FAILED; } int32_t stage_id = strategy->GetInputStage(); - double memory_cost = GetOperatorCost()->GetForwardMemoryCost(inputs_tensor_info_, outputs_tensor_info_, stage_id); + double computation_cost = + GetOperatorCost()->GetForwardComputationCost(inputs_tensor_info_, outputs_tensor_info_, stage_id); double communication_cost = GetOperatorCost()->GetCommCost(inputs_tensor_info_, outputs_tensor_info_, stage_id); - std::shared_ptr result = std::make_shared(memory_cost, communication_cost); + std::shared_ptr result = std::make_shared(computation_cost, communication_cost); result->communication_without_parameter_ = GetOperatorCost()->GetForwardCommCost(inputs_tensor_info_, outputs_tensor_info_, stage_id); result->communication_with_partial_para_ = @@ -1056,22 +1057,6 @@ Status OperatorInfo::SetCostUnderStrategyBase(const StrategyPtr& strategy) { return SUCCESS; } -Status OperatorInfo::CorrectStrategyCostForMultiOutputUse(size_t input_index) { - for (auto& swc : strategy_cost_) { - double parameter_memory_cost = ListProduct(swc->inputs_ptr[input_index].slice_shape()) * - static_cast(GetOperatorCost()->inputs_type_lengths()[input_index]); - // remove the parameter memory cost - swc->cost_list[0]->memory_cost_ -= parameter_memory_cost; - if (swc->cost_list[0]->memory_cost_ < -1) { - MS_LOG(ERROR) << "The memory cost after correction is " << swc->cost_list[0]->memory_cost_ - << ", the parameter_memory_cost is " << parameter_memory_cost; - return FAILED; - } - } - corrected_input_indices_.push_back(input_index); - return SUCCESS; -} - int OperatorInfo::ComputeOpAndPrevEdgeParameterInvolved() { if (is_output_parameter_involve_ != -1) { return is_output_parameter_involve_; @@ -1217,7 +1202,7 @@ void OperatorInfo::BreakingTiesForPerferringDataParallel(const StrategyPtr& stra CheckGlobalDeviceManager(); auto total_device_num = g_device_manager->GetDeviceListByStageId(stra->GetInputStage()).size(); if (IntToSize(stra->GetInputDim()[0][0]) == total_device_num) { - cost->memory_cost_ -= 1.0; + cost->computation_cost_ -= 1.0; cost->communication_cost_ -= 1.0; cost->communication_with_partial_para_ -= 1.0; cost->communication_without_parameter_ -= 1.0; @@ -1226,7 +1211,7 @@ void OperatorInfo::BreakingTiesForPerferringDataParallel(const StrategyPtr& stra } double OperatorInfo::GetForwardMemoryCostFromCNode() { - return GetOperatorCost()->GetForwardMemoryCost(inputs_tensor_info_, outputs_tensor_info_, 0); + return GetOperatorCost()->GetForwardComputationCost(inputs_tensor_info_, outputs_tensor_info_, 0); } } // namespace parallel diff --git a/mindspore/ccsrc/parallel/ops_info/operator_info.h b/mindspore/ccsrc/parallel/ops_info/operator_info.h index cc70f1b870..e7b8af0a7e 100644 --- a/mindspore/ccsrc/parallel/ops_info/operator_info.h +++ b/mindspore/ccsrc/parallel/ops_info/operator_info.h @@ -87,13 +87,9 @@ class OperatorInfo { // is checked Status SetCostUnderStrategyBase(const StrategyPtr& strategy); std::vector> GetStrategyCost() { return strategy_cost_; } - // In the case of a Parameter (or a output) being used by multiple operators, the memory cost induced by - // the parameter (or a output) should be calculated only once. This method is used to - // remove this part from the 'strategy_cost_'. - Status CorrectStrategyCostForMultiOutputUse(size_t input_index); // When the input of a operator contains WEIGHT or a output from other operators involving WEIGHT, then these input // should stay in memory until it is used in the backward phase, which is kept in memory at the end of forward phase. - Status CorrectStrategyCostForMemoryReuse() const { return SUCCESS; } + Status CalculateMemoryCost() const { return SUCCESS; } int ComputeOpAndPrevEdgeParameterInvolved(); ForwardOp forward_op() const { return forward_op_; } diff --git a/mindspore/ccsrc/parallel/step_auto_parallel.cc b/mindspore/ccsrc/parallel/step_auto_parallel.cc index 50e6a1e84e..d7d48c35bb 100644 --- a/mindspore/ccsrc/parallel/step_auto_parallel.cc +++ b/mindspore/ccsrc/parallel/step_auto_parallel.cc @@ -387,7 +387,7 @@ OperatorInfoPtr CreateTheOperatorInfo(const PrimitivePtr &prim, const CNodePtr & operator_info->set_outputs_dtype(cnode->Type()); operator_info->set_cnode(cnode); // If no strategy has been configured for this operator, then candidate strategies are generated for - // auto-strategy searchingm if this primitive is Cast, we ignore the user-specified strategy + // auto-strategy searching; if this primitive is CAST, we ignore the user-specified strategy if (!StrategyFound(attrs) || prim->name() == CAST) { // Compute split_flag_list_, indicating which input has batch dimension. This is ONLY used for preparation for // BatchParallelInfo operator @@ -600,13 +600,7 @@ void ConstructCostGraphEdges(const std::vector &all_nodes) { } MS_LOG(INFO) << "Successfully created " << edge_count << " edges for: " << cnode->operator_info()->name(); } - // For the case of a output being used by multiple subsequent operators, the output induced memory cost should be - // calculated only once. This method is for correct the operators' memory cost calculation. - if (entire_costgraph->CorrectOpsStrategyCostForMultiOutputUse() != SUCCESS) { - MS_LOG(EXCEPTION) << "Correcting strategy_cost_ for operators failed."; - } else { - MS_LOG(INFO) << "Correcting strategy_cost_ for operators succeeded."; - } + MS_LOG(INFO) << "Constructing edges for cost graph ends."; } @@ -803,14 +797,6 @@ void AugmentCostGraph(const std::vector &all_nodes) { std::shared_ptr edge_ptr = std::make_shared( edge_name, tmp_identity_ptr, target_cnode->operator_info(), 0, input_index - 1, false, true); - // Correct the memory calculation for a parameter being used by multiple operators. The parameter is calculated - // only once - if (target_cnode->operator_info()->CorrectStrategyCostForMultiOutputUse(IntToSize(input_index - 1)) != SUCCESS) { - MS_LOG(EXCEPTION) << "Correcting strategy_cost_ failed : " << prim->name(); - } else { - MS_LOG(INFO) << "Correcting strategy_cost_ succeeded. " << prim->name(); - } - if (edge_ptr->InitEdgeCost() != SUCCESS) { MS_LOG(EXCEPTION) << "Edge cost initialization failed"; } @@ -840,7 +826,7 @@ Status ParallelStrategySearch(const std::vector &all_nodes, const Fu // taking care for the case of a single Parameter being used by multiple operators. Create a TmpIdentity // operator for this Parameter, and add an edge for the use of this Parameter by each // subsequent operator; - // Step 3.1: Correct the memory calculation for memory reuse + // Step 3.1: Calculate memory usage // Step 4: Run the Dynamic Programming algorithm: // in this process, cost is calculated based on not only the operators, but also the edges. Here, the edge // cost is caused by the redistribution of a operator's output tensor layout to the next operator's input @@ -867,14 +853,14 @@ Status ParallelStrategySearch(const std::vector &all_nodes, const Fu MS_LOG(INFO) << "After the augmenting procedure, there are " << entire_costgraph->GetOperators().size() << " operators, and " << entire_costgraph->GetNumPairs() << " edges."; - // Step 3.1: Correcting calculation for memory reuse + // Step 3.1: Calculate the memory usage if (entire_costgraph->ComputeOpsAndEdgesParameterInvolved() == SUCCESS) { - // Correcting operators' memory usage - if (entire_costgraph->CorrectOpsStrategyCostForMemoryReuse() != SUCCESS) { + // Calculate operators' memory usage + if (entire_costgraph->CalculateOpsMemoryCost() != SUCCESS) { MS_LOG(EXCEPTION) << "Correcting operators' cost for memory reuse failed."; } - // Correcting edges' memory usage - if (entire_costgraph->CorrectEdgesStrategyCostForMemoryReuse() != SUCCESS) { + // Calculate edges' memory usage + if (entire_costgraph->CalculateEdgesMemoryCost() != SUCCESS) { MS_LOG(EXCEPTION) << "Correcting edges' cost for memory reuse failed."; } } else { diff --git a/mindspore/ccsrc/parallel/tensor_layout/tensor_redistribution.cc b/mindspore/ccsrc/parallel/tensor_layout/tensor_redistribution.cc index 93bda5da81..55e6a300e0 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/tensor_redistribution.cc +++ b/mindspore/ccsrc/parallel/tensor_layout/tensor_redistribution.cc @@ -144,7 +144,7 @@ Status TensorRedistribution::ComputeCost() { MS_LOG(ERROR) << "Failure: InferTensorRedistribution failed"; return Status::FAILED; } - // Compute redistribution communication cost and memory cost + // Compute redistribution communication cost and computation cost for (auto& op_cost : operator_list_) { OperatorR op = op_cost.first; Shape slice_shape = op_cost.second; @@ -154,14 +154,14 @@ Status TensorRedistribution::ComputeCost() { if (str == PERMUTE_BY_AXIS) { // The shape does not change after PermuteByAxis operation. // communication cost = all_to_all + all_to_all = 2 * slice_shape - // memory cost = slice_shape + // computation cost = slice_shape forward_comm_cost_ += prod; backward_comm_cost_ += prod; comm_cost_ += 2.0 * prod; - mem_cost_ += prod; + computation_cost_ += prod; } else if (str == CONCAT_BY_AXIS) { // communication cost = all_gather + reduce_scatter = before_slice_shape + after_slice_shape - // memory cost = before_slice_shape + // computation cost = before_slice_shape if (op.second.size() < 3) { MS_LOG(ERROR) << "op.second size should not be less than 3!"; return Status::FAILED; @@ -173,22 +173,22 @@ Status TensorRedistribution::ComputeCost() { comm_cost_ += prod * (dev_num + 1.0); int32_t concat_dim = op.second[0]; if (concat_dim == 0) { - // memory cost = all_gather - mem_cost_ += prod; + // computation cost = all_gather + computation_cost_ += prod; } else { - // memory cost = all_gather + split + concat - mem_cost_ += (prod + prod * dev_num + prod * dev_num); + // computation cost = all_gather + split + concat + computation_cost_ += (prod + prod * dev_num + prod * dev_num); } } else { - // There is only memory cost in SplitByAxis. - // memory cost = before_slice_shape - mem_cost_ += prod; + // There is only computation cost in SplitByAxis. + // computation cost = before_slice_shape + computation_cost_ += prod; } } if (reshape_flag()) { Shape prev_slice_shape = from_.slice_shape().array(); double prev_prod = std::accumulate(prev_slice_shape.begin(), prev_slice_shape.end(), 1, std::multiplies()); - mem_cost_ += 2.0 * prev_prod; + computation_cost_ += 2.0 * prev_prod; } return Status::SUCCESS; } diff --git a/mindspore/ccsrc/parallel/tensor_layout/tensor_redistribution.h b/mindspore/ccsrc/parallel/tensor_layout/tensor_redistribution.h index 38fb5959ad..e933b9b8eb 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/tensor_redistribution.h +++ b/mindspore/ccsrc/parallel/tensor_layout/tensor_redistribution.h @@ -41,7 +41,7 @@ class TensorRedistribution { comm_cost_(0.0), forward_comm_cost_(0.0), backward_comm_cost_(0.0), - mem_cost_(0.0), + computation_cost_(0.0), construct_op_flag_(construct_op_flag), keep_reshape_(keep_reshape) {} Status Init(const TensorLayout& from, const TensorLayout& to, const RankList& dev_list); @@ -51,7 +51,7 @@ class TensorRedistribution { bool reshape_flag() const { return reshape_flag_; } Status ComputeCost(); double comm_cost() const { return comm_cost_; } - double mem_cost() const { return mem_cost_; } + double computation_cost() const { return computation_cost_; } double forward_comm_cost() const { return forward_comm_cost_; } double backward_comm_cost() const { return backward_comm_cost_; } @@ -66,10 +66,13 @@ class TensorRedistribution { RankList dev_list_; OperatorList operator_list_; bool reshape_flag_; + // communication cost double comm_cost_; + // forward communication cost double forward_comm_cost_; + // backward communication cost double backward_comm_cost_; - double mem_cost_; + double computation_cost_; bool construct_op_flag_; bool keep_reshape_; }; diff --git a/tests/ut/cpp/parallel/auto_parallel/graph_costmodel_test.cc b/tests/ut/cpp/parallel/auto_parallel/graph_costmodel_test.cc index 83a9eceacc..415a1fdd55 100644 --- a/tests/ut/cpp/parallel/auto_parallel/graph_costmodel_test.cc +++ b/tests/ut/cpp/parallel/auto_parallel/graph_costmodel_test.cc @@ -322,8 +322,8 @@ TEST_F(TestCostGraph, test_SelectCostListWithMinTrainingTimeMultiple) { auto ret_list = entire_cost_graph.SelectCostListWithMinTrainingTimeMultiple(all_list, memory); ASSERT_EQ(ret_list.size(), 2); - ASSERT_DOUBLE_EQ(ret_list[0]->memory_cost_, 10); - ASSERT_DOUBLE_EQ(ret_list[1]->memory_cost_, 1010); + ASSERT_DOUBLE_EQ(ret_list[0]->computation_cost_, 10); + ASSERT_DOUBLE_EQ(ret_list[1]->computation_cost_, 1010); } TEST_F(TestCostGraph, test_CheckOpElimination) { diff --git a/tests/ut/cpp/parallel/auto_parallel/operator_costmodel_test.cc b/tests/ut/cpp/parallel/auto_parallel/operator_costmodel_test.cc index 3bd65c049c..919c5b43ec 100644 --- a/tests/ut/cpp/parallel/auto_parallel/operator_costmodel_test.cc +++ b/tests/ut/cpp/parallel/auto_parallel/operator_costmodel_test.cc @@ -76,8 +76,8 @@ TEST_F(TestMatMulCost, test_CostGeneration) { mmcost_.SetInputAndOutputTypeLength(inputs_length, outputs_length); mmcost_.GetForwardCommCost(inputs, outputs, 0); mmcost_.GetBackwardCommCost(inputs, outputs, 0); - mmcost_.GetForwardMemoryCost(inputs, outputs, 0); - mmcost_.GetBackwardMemoryCost(inputs, outputs, 0); + mmcost_.GetForwardComputationCost(inputs, outputs, 0); + mmcost_.GetForwardComputationCost(inputs, outputs, 0); } class TestActivationCost : public UT::Common { @@ -128,8 +128,8 @@ TEST_F(TestActivationCost, test_CostGeneration) { std::vector inputs_length = {4, 4}; std::vector outputs_length = {4}; ac_cost_.SetInputAndOutputTypeLength(inputs_length, outputs_length); - ac_cost_.GetForwardMemoryCost(inputs, outputs, 0); - ac_cost_.GetBackwardMemoryCost(inputs, outputs, 0); + ac_cost_.GetForwardComputationCost(inputs, outputs, 0); + ac_cost_.GetBackwardComputationCost(inputs, outputs, 0); } class TestPReLUCost : public UT::Common { @@ -184,8 +184,8 @@ TEST_F(TestPReLUCost, test_CostGeneration) { prelu_cost_.SetInputAndOutputTypeLength(inputs_length, outputs_length); double BCC, FMC, GMC; BCC = prelu_cost_.GetBackwardCommCost(inputs, outputs, 0); - FMC = prelu_cost_.GetForwardMemoryCost(inputs, outputs, 0); - GMC = prelu_cost_.GetBackwardMemoryCost(inputs, outputs, 0); + FMC = prelu_cost_.GetForwardComputationCost(inputs, outputs, 0); + GMC = prelu_cost_.GetBackwardComputationCost(inputs, outputs, 0); ASSERT_EQ(BCC, 32 * 4); ASSERT_EQ(FMC, 8 * 32 * 8 * 8 * 4 + 32 * 4); ASSERT_EQ(GMC, 128); diff --git a/tests/ut/cpp/parallel/ops_info/activation_test.cc b/tests/ut/cpp/parallel/ops_info/activation_test.cc index 149aa9d5af..5d18c5372f 100644 --- a/tests/ut/cpp/parallel/ops_info/activation_test.cc +++ b/tests/ut/cpp/parallel/ops_info/activation_test.cc @@ -84,8 +84,8 @@ TEST_F(TestActivation, test_activation_strategies) { act_ptr_->InitForCostModel(sp); std::vector inputs_info = act_ptr_->inputs_tensor_info(); std::vector outputs_info = act_ptr_->outputs_tensor_info(); - ASSERT_DOUBLE_EQ(act_ptr_->GetOperatorCost()->GetMemoryCost(inputs_info, outputs_info, sp->GetInputStage()), - cost.memory_cost_); + ASSERT_DOUBLE_EQ(act_ptr_->GetOperatorCost()->GetComputationCost(inputs_info, outputs_info, sp->GetInputStage()), + cost.computation_cost_); ASSERT_DOUBLE_EQ(act_ptr_->GetOperatorCost()->GetCommCost(inputs_info, outputs_info, sp->GetInputStage()), cost.communication_cost_); } @@ -109,8 +109,8 @@ TEST_F(TestActivation, test_softmax_strategies) { soft_ptr_->InitForCostModel(sp); std::vector inputs_info = soft_ptr_->inputs_tensor_info(); std::vector outputs_info = soft_ptr_->outputs_tensor_info(); - ASSERT_DOUBLE_EQ(soft_ptr_->GetOperatorCost()->GetMemoryCost(inputs_info, outputs_info, sp->GetInputStage()), - cost.memory_cost_); + ASSERT_DOUBLE_EQ(soft_ptr_->GetOperatorCost()->GetComputationCost(inputs_info, outputs_info, sp->GetInputStage()), + cost.computation_cost_); ASSERT_DOUBLE_EQ(soft_ptr_->GetOperatorCost()->GetCommCost(inputs_info, outputs_info, sp->GetInputStage()), cost.communication_cost_); } diff --git a/tests/ut/cpp/parallel/ops_info/matmul_info_test.cc b/tests/ut/cpp/parallel/ops_info/matmul_info_test.cc index 978b792a0c..99ca9f8e0e 100644 --- a/tests/ut/cpp/parallel/ops_info/matmul_info_test.cc +++ b/tests/ut/cpp/parallel/ops_info/matmul_info_test.cc @@ -569,8 +569,8 @@ TEST_F(TestMatmulInfo, test_GenerateStrategies1) { matmul1->InitForCostModel(sp); std::vector inputs_info = matmul1->inputs_tensor_info(); std::vector outputs_info = matmul1->outputs_tensor_info(); - ASSERT_DOUBLE_EQ(matmul1->GetOperatorCost()->GetMemoryCost(inputs_info, outputs_info, sp->GetInputStage()), - cost.memory_cost_); + ASSERT_DOUBLE_EQ(matmul1->GetOperatorCost()->GetComputationCost(inputs_info, outputs_info, sp->GetInputStage()), + cost.computation_cost_); break; } } @@ -599,8 +599,8 @@ TEST_F(TestMatmulInfo, test_GenerateStrategies2) { TensorInfo replica_input1_info(tly, input1_shape, input1_slice_shape); replica_inputs_info.push_back(replica_input1_info); - ASSERT_DOUBLE_EQ(matmul3->GetOperatorCost()->GetMemoryCost(replica_inputs_info, outputs_info, sp->GetInputStage()), - cost.memory_cost_); + ASSERT_DOUBLE_EQ(matmul3->GetOperatorCost()->GetComputationCost(replica_inputs_info, outputs_info, sp->GetInputStage()), + cost.computation_cost_); break; } } diff --git a/tests/ut/cpp/parallel/ops_info/tensor_add_info_test.cc b/tests/ut/cpp/parallel/ops_info/tensor_add_info_test.cc index e7736a4b3e..6cb9739b1c 100644 --- a/tests/ut/cpp/parallel/ops_info/tensor_add_info_test.cc +++ b/tests/ut/cpp/parallel/ops_info/tensor_add_info_test.cc @@ -188,8 +188,8 @@ TEST_F(TestTensorAddInfo, GenerateStrategies) { tensor_add->InitForCostModel(sp); std::vector inputs_info = tensor_add->inputs_tensor_info(); std::vector outputs_info = tensor_add->outputs_tensor_info(); - double memory_cost0 = tensor_add->GetOperatorCost()->GetMemoryCost(inputs_info, outputs_info, sp->GetInputStage()); - double memory_cost1 = cost.memory_cost_; + double memory_cost0 = tensor_add->GetOperatorCost()->GetComputationCost(inputs_info, outputs_info, sp->GetInputStage()); + double memory_cost1 = cost.computation_cost_; bool memory = memory_cost0 - memory_cost1 <= 1.0; double comm_cost0 = tensor_add->GetOperatorCost()->GetCommCost(inputs_info, outputs_info, sp->GetInputStage()); @@ -210,8 +210,8 @@ TEST_F(TestTensorAddInfo, GenerateStrategies1) { tensor_add1->InitForCostModel(sp); std::vector inputs_info = tensor_add1->inputs_tensor_info(); std::vector outputs_info = tensor_add1->outputs_tensor_info(); - double memory_cost0 = tensor_add1->GetOperatorCost()->GetMemoryCost(inputs_info, outputs_info, sp->GetInputStage()); - double memory_cost1 = cost.memory_cost_; + double memory_cost0 = tensor_add1->GetOperatorCost()->GetComputationCost(inputs_info, outputs_info, sp->GetInputStage()); + double memory_cost1 = cost.computation_cost_; bool memory = memory_cost0 - memory_cost1 <= 1.0; double comm_cost0 = tensor_add1->GetOperatorCost()->GetCommCost(inputs_info, outputs_info, sp->GetInputStage()); diff --git a/tests/ut/cpp/parallel/ops_info/tmpidentity_test.cc b/tests/ut/cpp/parallel/ops_info/tmpidentity_test.cc index ce1238baeb..043746498f 100644 --- a/tests/ut/cpp/parallel/ops_info/tmpidentity_test.cc +++ b/tests/ut/cpp/parallel/ops_info/tmpidentity_test.cc @@ -145,8 +145,8 @@ TEST_F(TestTmpIdentityInfo, test_generate_strategies) { identity_ptr->Init(sp); std::vector inputs_info = identity_ptr->inputs_tensor_info(); std::vector outputs_info = identity_ptr->outputs_tensor_info(); - ASSERT_DOUBLE_EQ(identity_ptr->GetOperatorCost()->GetMemoryCost(inputs_info, outputs_info, sp->GetInputStage()), - cost.memory_cost_); + ASSERT_DOUBLE_EQ(identity_ptr->GetOperatorCost()->GetComputationCost(inputs_info, outputs_info, sp->GetInputStage()), + cost.computation_cost_); ASSERT_DOUBLE_EQ(identity_ptr->GetOperatorCost()->GetCommCost(inputs_info, outputs_info, sp->GetInputStage()), cost.communication_cost_); } From c5bfbc35569895721a48ed81dea9f0e55e158217 Mon Sep 17 00:00:00 2001 From: yoonlee666 Date: Wed, 8 Apr 2020 14:31:18 +0800 Subject: [PATCH 122/367] use TFRecordDataset in bert ci script and add absolute position embedding code in bert model --- mindspore/model_zoo/Bert_NEZHA/bert_model.py | 14 ++++++++++++++ .../networks/models/bert/bert_tdt_no_lossscale.py | 4 ++-- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/mindspore/model_zoo/Bert_NEZHA/bert_model.py b/mindspore/model_zoo/Bert_NEZHA/bert_model.py index f20c57dd75..d7f9355b3c 100644 --- a/mindspore/model_zoo/Bert_NEZHA/bert_model.py +++ b/mindspore/model_zoo/Bert_NEZHA/bert_model.py @@ -165,6 +165,7 @@ class EmbeddingPostprocessor(nn.Cell): def __init__(self, embedding_size, embedding_shape, + use_relative_positions=False, use_token_type=False, token_type_vocab_size=16, use_one_hot_embeddings=False, @@ -192,6 +193,13 @@ class EmbeddingPostprocessor(nn.Cell): self.layernorm = nn.LayerNorm(embedding_size) self.dropout = nn.Dropout(1 - dropout_prob) self.gather = P.GatherV2() + self.use_relative_positions = use_relative_positions + self.slice = P.Slice() + self.full_position_embeddings = Parameter(initializer + (TruncatedNormal(initializer_range), + [max_position_embeddings, + embedding_size]), + name='full_position_embeddings') def construct(self, token_type_ids, word_embeddings): output = word_embeddings @@ -206,6 +214,11 @@ class EmbeddingPostprocessor(nn.Cell): token_type_embeddings = self.gather(self.embedding_table, flat_ids, 0) token_type_embeddings = self.reshape(token_type_embeddings, self.shape) output += token_type_embeddings + if not self.use_relative_positions: + _, seq, width = self.shape + position_embeddings = self.slice(self.full_position_embeddings, [0, 0], [seq, width]) + position_embeddings = self.reshape(position_embeddings, (1, seq, width)) + output += position_embeddings output = self.layernorm(output) output = self.dropout(output) return output @@ -853,6 +866,7 @@ class BertModel(nn.Cell): self.bert_embedding_postprocessor = EmbeddingPostprocessor( embedding_size=self.embedding_size, embedding_shape=output_embedding_shape, + use_relative_positions=config.use_relative_positions, use_token_type=True, token_type_vocab_size=config.type_vocab_size, use_one_hot_embeddings=use_one_hot_embeddings, diff --git a/tests/st/networks/models/bert/bert_tdt_no_lossscale.py b/tests/st/networks/models/bert/bert_tdt_no_lossscale.py index 9cc11997e6..5b6268505b 100644 --- a/tests/st/networks/models/bert/bert_tdt_no_lossscale.py +++ b/tests/st/networks/models/bert/bert_tdt_no_lossscale.py @@ -103,9 +103,9 @@ def me_de_train_dataset(): """test me de train dataset""" # apply repeat operations repeat_count = 1 - ds = de.StorageDataset(DATA_DIR, SCHEMA_DIR, columns_list=["input_ids", "input_mask", "segment_ids", + ds = de.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["input_ids", "input_mask", "segment_ids", "next_sentence_labels", "masked_lm_positions", - "masked_lm_ids", "masked_lm_weights"]) + "masked_lm_ids", "masked_lm_weights"], shuffle=False) type_cast_op = C.TypeCast(mstype.int32) ds = ds.map(input_columns="masked_lm_ids", operations=type_cast_op) ds = ds.map(input_columns="masked_lm_positions", operations=type_cast_op) From fb4e84c0ee6105badd4c5efd92cc1bcd919ffa03 Mon Sep 17 00:00:00 2001 From: anzhengqi Date: Wed, 8 Apr 2020 14:47:16 +0800 Subject: [PATCH 123/367] modify part of comments --- .../ccsrc/dataset/kernels/image/image_utils.h | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/mindspore/ccsrc/dataset/kernels/image/image_utils.h b/mindspore/ccsrc/dataset/kernels/image/image_utils.h index d289f6f56e..a4ddef40d6 100644 --- a/mindspore/ccsrc/dataset/kernels/image/image_utils.h +++ b/mindspore/ccsrc/dataset/kernels/image/image_utils.h @@ -84,18 +84,8 @@ Status Resize(const std::shared_ptr &input, std::shared_ptr *out // Returns Decoded image // Supported images: -// - Windows bitmaps - \*.bmp, \*.dib (always supported) -// - JPEG files - \*.jpeg, \*.jpg, \*.jpe (see the *Note* section) -// - JPEG 2000 files - \*.jp2 (see the *Note* section) -// - Portable Network Graphics - \*.png (see the *Note* section) -// - WebP - \*.webp (see the *Note* section) -// - Portable image format - \*.pbm, \*.pgm, \*.ppm \*.pxm, \*.pnm (always supported) -// - PFM files - \*.pfm (see the *Note* section) -// - Sun rasters - \*.sr, \*.ras (always supported) -// - TIFF files - \*.tiff, \*.tif (see the *Note* section) -// - OpenEXR Image files - \*.exr (see the *Note* section) -// - Radiance HDR - \*.hdr, \*.pic (always supported) -// - Raster and Vector geospatial data supported by GDAL (see the *Note* section) +// BMP JPEG JPG PNG TIFF +// supported by opencv, if user need more image analysis capabilities, please compile opencv particularlly. // @param input: CVTensor containing the not decoded image 1D bytes // @param output: Decoded image Tensor of shape and type DE_UINT8. Pixel order is RGB Status Decode(const std::shared_ptr &input, std::shared_ptr *output); From effdb483d6166f1440fe6c1d00302b3847ad994c Mon Sep 17 00:00:00 2001 From: jojobugfree Date: Tue, 7 Apr 2020 19:59:36 +0800 Subject: [PATCH 124/367] profiling feature enhancement --- .../device/ascend/ascend_stream_assign.cc | 2 +- .../ascend/profiling/profiling_manager.h | 4 - .../ascend/profiling/profiling_utils.cc | 229 ++++++++---------- .../device/ascend/profiling/profiling_utils.h | 93 ++++--- mindspore/ccsrc/device/kernel_adjust.cc | 31 +-- mindspore/ccsrc/device/kernel_adjust.h | 6 +- .../ascend/ascend_backend_optimization.cc | 2 +- mindspore/ccsrc/session/ascend_session.cc | 2 +- .../tasksink/ascend_stream_assign_stub.cc | 2 +- 9 files changed, 190 insertions(+), 181 deletions(-) diff --git a/mindspore/ccsrc/device/ascend/ascend_stream_assign.cc b/mindspore/ccsrc/device/ascend/ascend_stream_assign.cc index 4f16c596c7..8c4d1f4a8f 100644 --- a/mindspore/ccsrc/device/ascend/ascend_stream_assign.cc +++ b/mindspore/ccsrc/device/ascend/ascend_stream_assign.cc @@ -702,7 +702,7 @@ void AscendStreamAssign::PrintGraphExeOrders(const shared_ptr(primitive->GetAttr(kAttrEventId)) << "]"; } else { - MS_LOG(INFO) << "node name[" << AnfAlgo::GetCNodeName(cur_cnode_ptr) << "], logic id[" + MS_LOG(INFO) << "node name[" << cur_cnode_ptr->fullname_with_scope() << "], logic id[" << AnfAlgo::GetStreamDistinctionLabel(cur_cnode_ptr.get()) << "], stream id[" << AnfAlgo::GetStreamId(cur_cnode_ptr) << "]"; } diff --git a/mindspore/ccsrc/device/ascend/profiling/profiling_manager.h b/mindspore/ccsrc/device/ascend/profiling/profiling_manager.h index de8f6a7d0a..b826c4cf36 100644 --- a/mindspore/ccsrc/device/ascend/profiling/profiling_manager.h +++ b/mindspore/ccsrc/device/ascend/profiling/profiling_manager.h @@ -29,10 +29,6 @@ namespace ascend { // PROFILING_CUSTOM_LOGID_START 3 const uint64_t kProfilingFpStartLogId = 1; const uint64_t kProfilingBpEndLogId = 2; -const uint64_t kProfilingAllReduce1Start = 3; -const uint64_t kProfilingAllReduce1End = 4; -const uint64_t kProfilingAllReduce2Start = 5; -const uint64_t kProfilingAllReduce2End = 6; const uint64_t kProfilingIterEndLogId = 255; class ProfilingEngineImpl; diff --git a/mindspore/ccsrc/device/ascend/profiling/profiling_utils.cc b/mindspore/ccsrc/device/ascend/profiling/profiling_utils.cc index 0d7088300e..aa71aa0566 100644 --- a/mindspore/ccsrc/device/ascend/profiling/profiling_utils.cc +++ b/mindspore/ccsrc/device/ascend/profiling/profiling_utils.cc @@ -14,10 +14,8 @@ * limitations under the License. */ -#include "device/ascend/profiling/profiling_utils.h" - #include - +#include "device/ascend/profiling/profiling_utils.h" #include "kernel/kernel.h" #include "device/ascend/profiling/profiling_manager.h" #include "session/anf_runtime_algorithm.h" @@ -27,82 +25,61 @@ namespace mindspore { namespace device { namespace ascend { -const char ProfilingUtils::kProfiling[] = "Profiling"; -const char ProfilingUtils::kNotify[] = "notify"; -const char ProfilingUtils::kProfilerTraceId[] = "profiler_trace_id"; -const char ProfilingUtils::kFlags[] = "flags"; +constexpr uint32_t kMaxProfilingNodeNum = 100; +constexpr char kCustomNode[] = "PROFILING_CUSTOM_"; +constexpr char kFpStartNode[] = "PROFILING_FP_START"; +constexpr char kBpEndNode[] = "PROFILING_BP_END"; +constexpr char kIterEndNode[] = "PROFILING_ITER_END"; std::unordered_map> ProfilingUtils::graph_kernel_name_; -bool ProfilingUtils::GetProfilingTraceInfo(const std::shared_ptr &graph_ptr, - ProfilingTraceInfo *profiling_trace_info) { - MS_EXCEPTION_IF_NULL(profiling_trace_info); - MS_EXCEPTION_IF_NULL(graph_ptr); - bool find_begin = false; - bool first_allreduce = true; - for (const auto &anf_node : graph_ptr->execution_order()) { - if (anf_node->isa()) { - const std::string kernel_name = AnfAlgo::GetCNodeName(anf_node); - if ((kernel_name == "Cast" || kernel_name == "Four2Five") && !find_begin) { - profiling_trace_info->profiling_trace_begin = anf_node->fullname_with_scope(); - find_begin = true; - } - if (kernel_name == "Conv2DBackpropFilter") { - profiling_trace_info->profiling_trace_bp_end = anf_node->fullname_with_scope(); - } - if (kernel_name == kFusedMulApplyMomentumOpName || kernel_name == kApplyMomentumOpName) { - profiling_trace_info->profiling_trace_netoutput = anf_node->fullname_with_scope(); - } - if (kernel_name == kAllReduceOpName) { - if (first_allreduce) { - profiling_trace_info->profiling_allreduce1_start = anf_node->fullname_with_scope(); - profiling_trace_info->profiling_allreduce1_end = anf_node->fullname_with_scope(); - first_allreduce = false; - } else { - profiling_trace_info->profiling_allreduce2_start = anf_node->fullname_with_scope(); - profiling_trace_info->profiling_allreduce2_end = anf_node->fullname_with_scope(); - } - } +uint32_t ProfilingUtils::custom_node_index_ = 1; + +ProfilingTraceInfo ProfilingUtils::GetProfilingTraceFromEnv(NotNull graph_ptr) { + MS_LOG(INFO) << "get env start"; + custom_node_index_ = 1; + auto &cnode_exec_order = graph_ptr->execution_order(); + ProfilingTraceInfo profiling_trace; + profiling_trace.trace_begin = GetTraceBegin(cnode_exec_order); + profiling_trace.trace_bp_end = GetTraceBpEnd(); + profiling_trace.trace_netoutput = GetTraceNetoutput(cnode_exec_order); + + MS_LOG(INFO) << "[profiling] trace_begin:" << profiling_trace.trace_begin + << " trace_bp_end:" << profiling_trace.trace_bp_end + << " trace_netoutput:" << profiling_trace.trace_netoutput; + + for (uint32_t i = 1; i <= kMaxProfilingNodeNum; ++i) { + std::string env_str = std::string(kCustomNode) + std::to_string(i); + const char *node_full_name = std::getenv(env_str.c_str()); + if (node_full_name == nullptr) { + break; } + MS_LOG(INFO) << "Get profiling node:" << node_full_name; + profiling_trace.trace_custom_node.insert(node_full_name); } - MS_LOG(INFO) << "[profiling]begin:" << profiling_trace_info->profiling_trace_begin - << ", net_output:" << profiling_trace_info->profiling_trace_netoutput - << ", end:" << profiling_trace_info->profiling_trace_bp_end - << ", allreduce1:" << profiling_trace_info->profiling_allreduce1_start - << ", allreduce2:" << profiling_trace_info->profiling_allreduce2_start; - return profiling_trace_info->IsValid(); + MS_LOG(INFO) << "get env end"; + return profiling_trace; } -bool ProfilingUtils::GetNetOutput(AnfNodePtr anf_node, std::string *profiling_trace_net_output) { - MS_EXCEPTION_IF_NULL(anf_node); - MS_EXCEPTION_IF_NULL(profiling_trace_net_output); - MS_LOG(INFO) << "[profiling]Anf node's full name with scope:" << anf_node->fullname_with_scope(); - if (!profiling_trace_net_output->empty()) { - MS_LOG(INFO) << "[profiling]Has got the net_output:" << profiling_trace_net_output->c_str(); - return true; - } - - if (AnfAlgo::IsRealKernel(anf_node)) { - *profiling_trace_net_output = anf_node->fullname_with_scope(); - return true; - } +std::string ProfilingUtils::GetTraceBegin(const std::vector &cnode_exec_order) { + const char *trace_begin = std::getenv(kFpStartNode); + auto &first_cnode = cnode_exec_order.front(); + MS_EXCEPTION_IF_NULL(first_cnode); + return trace_begin == nullptr ? first_cnode->fullname_with_scope() : std::string(trace_begin); +} - auto cnode = anf_node->cast(); - if (cnode == nullptr) { - MS_LOG(ERROR) << "[profiling]Anf node should be a CNode"; - return false; - } +std::string ProfilingUtils::GetTraceBpEnd() { + const char *trace_bp_end = std::getenv(kBpEndNode); + return trace_bp_end == nullptr ? "" : std::string(trace_bp_end); +} - auto inputs = cnode->inputs(); - auto input_size = inputs.size(); - if (input_size < 2) { - MS_LOG(ERROR) << "[profiling]Anf node' input size(" << input_size << ") < 2, don't support get apply kernel node."; - return false; - } - return GetNetOutput(inputs[1], profiling_trace_net_output); +std::string ProfilingUtils::GetTraceNetoutput(const std::vector &cnode_exec_order) { + const char *trace_netoutput = std::getenv(kIterEndNode); + auto &last_cnode = cnode_exec_order.back(); + MS_EXCEPTION_IF_NULL(last_cnode); + return trace_netoutput == nullptr ? last_cnode->fullname_with_scope() : std::string(trace_netoutput); } -CNodePtr ProfilingUtils::CreateProfilingCNode(const std::shared_ptr &graph_ptr, bool notify, - uint64_t profiler_trace_id, uint32_t flags) { - MS_EXCEPTION_IF_NULL(graph_ptr); +NotNull ProfilingUtils::CreateProfilingCNode(const ProfilingContent &profiling_content, + NotNull graph_ptr) { kernel::KernelBuildInfo::KernelBuildInfoBuilder selected_kernel_builder; selected_kernel_builder.SetInputsFormat({kOpFormat_DEFAULT, kOpFormat_DEFAULT}); selected_kernel_builder.SetInputsDeviceType({TypeId::kNumberTypeInt32, TypeId::kNumberTypeInt32}); @@ -118,75 +95,79 @@ CNodePtr ProfilingUtils::CreateProfilingCNode(const std::shared_ptrset_abstract(type_none_abstract); // set attr - ValuePtr notify_value = MakeValue(notify); - ValuePtr trace_id_value = MakeValue(profiler_trace_id); - ValuePtr flags_value = MakeValue(flags); + ValuePtr notify_value = MakeValue(profiling_content.notify); + ValuePtr trace_id_value = MakeValue(profiling_content.profiler_trace_id); + ValuePtr flags_value = MakeValue(profiling_content.flags); AnfAlgo::SetNodeAttr(ProfilingUtils::kNotify, notify_value, cnode_ptr); AnfAlgo::SetNodeAttr(ProfilingUtils::kProfilerTraceId, trace_id_value, cnode_ptr); AnfAlgo::SetNodeAttr(ProfilingUtils::kFlags, flags_value, cnode_ptr); - return cnode_ptr; + return NOT_NULL(cnode_ptr); } -void ProfilingUtils::ProfilingTraceFpStart(const std::shared_ptr &graph_ptr, - const mindspore::AnfNodePtr &anf_node, - const mindspore::device::ascend::ProfilingTraceInfo &profiling_trace_info, - std::vector *kernel_list) { - if (profiling_trace_info.IsValid() && profiling_trace_info.profiling_trace_begin == anf_node->fullname_with_scope()) { - if (graph_ptr == nullptr || kernel_list == nullptr || anf_node == nullptr) { - MS_LOG(ERROR) << "[profiling]input param invalid"; - return; - } +void ProfilingUtils::ProfilingTraceFpStart(const mindspore::AnfNodePtr &anf_node, + const ProfilingTraceInfo &profiling_trace_info, + NotNull graph_ptr, + NotNull *> kernel_list) { + if (profiling_trace_info.trace_begin == anf_node->fullname_with_scope()) { auto job_id = ProfilingManager::GetInstance().GetJobId(); - // job task info - CNodePtr job_kernel_ptr = CreateProfilingCNode(graph_ptr, false, job_id, 0); - AnfAlgo::SetStreamDistinctionLabel(AnfAlgo::GetStreamDistinctionLabel(anf_node.get()), job_kernel_ptr.get()); - AnfAlgo::SetStreamId(AnfAlgo::GetStreamId(anf_node), job_kernel_ptr.get()); - // fp task info - CNodePtr start_kernel_ptr = CreateProfilingCNode(graph_ptr, false, kProfilingFpStartLogId, 0); - AnfAlgo::SetStreamDistinctionLabel(AnfAlgo::GetStreamDistinctionLabel(anf_node.get()), start_kernel_ptr.get()); - AnfAlgo::SetStreamId(AnfAlgo::GetStreamId(anf_node), start_kernel_ptr.get()); - kernel_list->emplace_back(job_kernel_ptr); - kernel_list->emplace_back(start_kernel_ptr); + ProfilingContent job_profiling_context = {false, job_id, 0}; + auto job_profiling_node = CreateProfilingCNodeWithStream(anf_node, job_profiling_context, graph_ptr); + kernel_list->emplace_back(job_profiling_node); + + ProfilingContent fp_profiling_content = {false, kProfilingFpStartLogId, 0}; + auto fp_profiling_node = CreateProfilingCNodeWithStream(anf_node, fp_profiling_content, graph_ptr); + kernel_list->emplace_back(fp_profiling_node); } } -void ProfilingUtils::ProfilingAllReduce(const std::shared_ptr &graph_ptr, - const AnfNodePtr &anf_node, int job_id, const std::string &profiling_node_name, - std::vector *kernel_list) { - MS_EXCEPTION_IF_NULL(graph_ptr); +CNodePtr ProfilingUtils::CreateProfilingCNodeWithStream(const mindspore::AnfNodePtr &anf_node, + const ProfilingContent &profiling_content, + NotNull graph_ptr) { + CNodePtr profiling_node = CreateProfilingCNode(profiling_content, graph_ptr); + AnfAlgo::SetStreamDistinctionLabel(AnfAlgo::GetStreamDistinctionLabel(anf_node.get()), profiling_node.get()); + AnfAlgo::SetStreamId(AnfAlgo::GetStreamId(anf_node), profiling_node.get()); + return profiling_node; +} + +void ProfilingUtils::ProfilingCustomOp(const AnfNodePtr &anf_node, const ProfilingTraceInfo &profiling_trace_info, + NotNull graph_ptr, + NotNull *> kernel_list) { MS_EXCEPTION_IF_NULL(anf_node); - MS_EXCEPTION_IF_NULL(kernel_list); - auto full_scope_name = anf_node->fullname_with_scope(); - if (profiling_node_name == full_scope_name) { - CNodePtr allreduce_kernel_ptr = CreateProfilingCNode(graph_ptr, false, job_id, 0); - AnfAlgo::SetStreamDistinctionLabel(AnfAlgo::GetStreamDistinctionLabel(anf_node.get()), allreduce_kernel_ptr.get()); - AnfAlgo::SetStreamId(AnfAlgo::GetStreamId(anf_node), allreduce_kernel_ptr.get()); - kernel_list->emplace_back(allreduce_kernel_ptr); + auto iter = profiling_trace_info.trace_custom_node.find(anf_node->fullname_with_scope()); + if (iter == profiling_trace_info.trace_custom_node.end()) { + return; } + // custom op profiling job start from 3. + ProfilingContent front_profiling_content = {false, 2 * custom_node_index_ + 1, 0}; + CNodePtr front_node = CreateProfilingCNodeWithStream(anf_node, front_profiling_content, graph_ptr); + kernel_list->insert(kernel_list->end() - 1, front_node); + + ProfilingContent back_profiling_content = {false, 2 * custom_node_index_ + 2, 0}; + CNodePtr back_node = CreateProfilingCNodeWithStream(anf_node, back_profiling_content, graph_ptr); + kernel_list->insert(kernel_list->end(), back_node); + ++custom_node_index_; } -void ProfilingUtils::ProfilingTraceEnd(const std::shared_ptr &graph_ptr, - const mindspore::AnfNodePtr &anf_node, - const mindspore::device::ascend::ProfilingTraceInfo &profiling_trace_info, - std::vector *kernel_list) { - MS_EXCEPTION_IF_NULL(graph_ptr); +void ProfilingUtils::ProfilingTraceBpEnd(const AnfNodePtr &anf_node, const ProfilingTraceInfo &profiling_trace_info, + NotNull graph_ptr, + NotNull *> kernel_list) { MS_EXCEPTION_IF_NULL(anf_node); - MS_EXCEPTION_IF_NULL(kernel_list); - if (profiling_trace_info.IsValid()) { - auto full_scope_name = anf_node->fullname_with_scope(); - if (profiling_trace_info.profiling_trace_netoutput == full_scope_name) { - CNodePtr bp_kernel_ptr = CreateProfilingCNode(graph_ptr, true, kProfilingIterEndLogId, 0); - AnfAlgo::SetStreamDistinctionLabel(AnfAlgo::GetStreamDistinctionLabel(anf_node.get()), bp_kernel_ptr.get()); - AnfAlgo::SetStreamId(AnfAlgo::GetStreamId(anf_node), bp_kernel_ptr.get()); - kernel_list->emplace_back(bp_kernel_ptr); - } + if (profiling_trace_info.trace_bp_end == anf_node->fullname_with_scope()) { + ProfilingContent bp_end_profiling_content = {false, kProfilingBpEndLogId, 0}; + CNodePtr bp_end_node = CreateProfilingCNodeWithStream(anf_node, bp_end_profiling_content, graph_ptr); + kernel_list->emplace_back(bp_end_node); + } +} - if (profiling_trace_info.profiling_trace_bp_end == full_scope_name) { - CNodePtr end_task_info = CreateProfilingCNode(graph_ptr, false, kProfilingBpEndLogId, 0); - AnfAlgo::SetStreamDistinctionLabel(AnfAlgo::GetStreamDistinctionLabel(anf_node.get()), end_task_info.get()); - AnfAlgo::SetStreamId(AnfAlgo::GetStreamId(anf_node), end_task_info.get()); - kernel_list->emplace_back(end_task_info); - } +void ProfilingUtils::ProfilingTraceEnd(const AnfNodePtr &anf_node, const ProfilingTraceInfo &profiling_trace_info, + NotNull graph_ptr, + NotNull *> kernel_list) { + MS_EXCEPTION_IF_NULL(anf_node); + auto full_scope_name = anf_node->fullname_with_scope(); + if (profiling_trace_info.trace_netoutput == full_scope_name) { + ProfilingContent bp_end_profiling_content = {true, kProfilingIterEndLogId, 0}; + CNodePtr bp_kernel_ptr = CreateProfilingCNodeWithStream(anf_node, bp_end_profiling_content, graph_ptr); + kernel_list->emplace_back(bp_kernel_ptr); } } diff --git a/mindspore/ccsrc/device/ascend/profiling/profiling_utils.h b/mindspore/ccsrc/device/ascend/profiling/profiling_utils.h index ca0ef6f1f0..c59e856249 100644 --- a/mindspore/ccsrc/device/ascend/profiling/profiling_utils.h +++ b/mindspore/ccsrc/device/ascend/profiling/profiling_utils.h @@ -19,63 +19,102 @@ #include #include #include +#include #include #include "session/kernel_graph.h" +#include "utils/contract.h" namespace mindspore { namespace device { namespace ascend { struct ProfilingTraceInfo { // execute order's first execute op(like: Cast or Four2Five ...), except tdt op(GetNext ...) - std::string profiling_trace_begin; + std::string trace_begin; // get first net_output(apply kernel) from graph outputs: fp ->net_output<- bp - std::string profiling_trace_bp_end; + std::string trace_bp_end; // execute order's end execute (like: Conv2DBackpropFilter) - std::string profiling_trace_netoutput; + std::string trace_netoutput; - std::string profiling_allreduce1_start; - - std::string profiling_allreduce1_end; - - std::string profiling_allreduce2_start; - - std::string profiling_allreduce2_end; + // profiling specific op, such as AllReduce; + std::set trace_custom_node; // 1. insert profiling_trace_begin if profiling_trace_bp_end is not empty. // 2. op lanuch get task info with callback func. // 3. insert profiling_trace_bp_end. // 4. insert profiling_trace_net_output if profiling_trace_bp_end is not empty. - bool IsValid() const { return !(profiling_trace_begin.empty() || profiling_trace_bp_end.empty()); } + bool IsValid() const { return !(trace_begin.empty() || trace_bp_end.empty() || trace_netoutput.empty()); } +}; + +struct ProfilingContent { + // true -send data from device to host and finish profiling + bool notify; + uint64_t profiler_trace_id; + uint32_t flags; }; class ProfilingUtils { public: ProfilingUtils() = default; ~ProfilingUtils() = default; - static bool GetProfilingTraceInfo(const std::shared_ptr &graph_ptr, - ProfilingTraceInfo *profiling_trace_info); - static void ProfilingTraceFpStart(const std::shared_ptr &graph_ptr, const AnfNodePtr &anf_node, - const ProfilingTraceInfo &profiling_trace_info, std::vector *kernel_list); - static void ProfilingAllReduce(const std::shared_ptr &graph_ptr, const AnfNodePtr &anf_node, - int job_id, const std::string &profiling_node_name, - std::vector *kernel_list); - static void ProfilingTraceEnd(const std::shared_ptr &graph_ptr, const AnfNodePtr &anf_node, - const ProfilingTraceInfo &profiling_trace_info, std::vector *kernel_list); + + // Insert job_id profiling node and fp_start profiling node. + // Job_id is got from envs, which shound be a number greater than 255 + // Fp_start node should been inserted in the start of a network, and the log_id is hard code to 1. + static void ProfilingTraceFpStart(const AnfNodePtr &anf_node, const ProfilingTraceInfo &profiling_trace_info, + NotNull graph_ptr, + NotNull *> kernel_list); + + // Insert net output profiling node, which tells the device to stop profiling. + // The notify in struct ProfilingContent should be 'true', which tells the device to send data to host. + static void ProfilingTraceEnd(const AnfNodePtr &anf_node, const ProfilingTraceInfo &profiling_trace_info, + NotNull graph_ptr, + NotNull *> kernel_list); + + // Insert bp_end profiling node, which should been inserted after the last backpropagation CNode in the network. + static void ProfilingTraceBpEnd(const mindspore::AnfNodePtr &anf_node, const ProfilingTraceInfo &profiling_trace_info, + NotNull graph_ptr, + NotNull *> kernel_list); + + // Mapping graph id and the kernels' name in the graph static void SetGraphKernelName(uint32_t graph_id, const std::vector &kernel_names); + + // Mapping task_id and kernel name for device to generate the time cost of specific kernel. + // Device calculate the time cost of the task which is marked by task id. + // But we need data of (kernel name , time cost) static void ReportProfilingData(uint32_t graph_id, const std::vector &task_ids); - static const char kProfiling[]; - static const char kNotify[]; - static const char kProfilerTraceId[]; - static const char kFlags[]; + // Get profiling trace point from envs. + // export PROFILING_FP_START='full name of the first cnode to execute' + // export PROFILING_BP_END='full name of the last backpropagation cnode to execute' + // export PROFILING_ITER_END='full name of last cnode in graph to execute' + // And other cnode, like AllReduce, export PROFILING_CUSTOM_1='full name of AllReduce cnode' + // GetNext, export PROFIFLING_CUSTOM_2='full name fo GetNext cnode' + // The variable i in PROFILING_CUSTOM_i should start from 1 without interruption. + static ProfilingTraceInfo GetProfilingTraceFromEnv(NotNull graph_ptr); + + // Insert two profiling trace points, one in front and one behind + static void ProfilingCustomOp(const mindspore::AnfNodePtr &anf_node, const ProfilingTraceInfo &profiling_trace_info, + NotNull graph_ptr, + NotNull *> kernel_list); + + inline static constexpr char kProfiling[] = "Profiling"; + inline static constexpr char kNotify[] = "notify"; + inline static constexpr char kProfilerTraceId[] = "profiler_trace_id"; + inline static constexpr char kFlags[] = "flags"; private: - static bool GetNetOutput(AnfNodePtr anf_node, std::string *profiling_trace_net_output); - static CNodePtr CreateProfilingCNode(const std::shared_ptr &graph_ptr, bool notify, - uint64_t profiler_trace_id, uint32_t flags); + static NotNull CreateProfilingCNode(const ProfilingContent &profiling_content, + NotNull graph_ptr); + static CNodePtr CreateProfilingCNodeWithStream(const AnfNodePtr &anf_node, const ProfilingContent &profiling_content, + NotNull graph_ptr); + static std::string GetTraceBegin(const std::vector &cnode_exec_order); + static std::string GetTraceBpEnd(); + static std::string GetTraceNetoutput(const std::vector &cnode_exec_order); + // graph id --> (kernel name list) static std::unordered_map> graph_kernel_name_; + static uint32_t custom_node_index_; }; } // namespace ascend } // namespace device diff --git a/mindspore/ccsrc/device/kernel_adjust.cc b/mindspore/ccsrc/device/kernel_adjust.cc index c422d50b51..9a6f48025f 100644 --- a/mindspore/ccsrc/device/kernel_adjust.cc +++ b/mindspore/ccsrc/device/kernel_adjust.cc @@ -438,23 +438,22 @@ void KernelAdjust::LoadSwitchInputs(std::vector *inputs) { MS_LOG(INFO) << "---------------- LoadSwitchInputs End--"; } -void KernelAdjust::Profiling(const std::shared_ptr &kernel_graph_ptr) { +void KernelAdjust::Profiling(NotNull kernel_graph_ptr) { if (!ascend::ProfilingManager::GetInstance().IsProfiling()) { MS_LOG(INFO) << "No need to profiling"; return; } - ProfilingTraceInfo profiling_trace_info; - if (ProfilingUtils::GetProfilingTraceInfo(kernel_graph_ptr, &profiling_trace_info)) { - InsertProfilingKernel(kernel_graph_ptr, profiling_trace_info); - } else { - MS_LOG(WARNING) << "[profiling] GetProfilingTraceInfo failed"; + ProfilingTraceInfo profiling_trace_info = ProfilingUtils::GetProfilingTraceFromEnv(kernel_graph_ptr); + if (!profiling_trace_info.IsValid()) { + MS_LOG(WARNING) << "[profiling] no profiling node found!"; + return; } + InsertProfilingKernel(profiling_trace_info, kernel_graph_ptr); } -void KernelAdjust::InsertProfilingKernel(const std::shared_ptr &kernel_graph_ptr, - const ProfilingTraceInfo &profiling_trace_info) { +void KernelAdjust::InsertProfilingKernel(const ProfilingTraceInfo &profiling_trace_info, + NotNull kernel_graph_ptr) { MS_LOG(INFO) << "[profiling] Insert profiling kernel start"; - MS_EXCEPTION_IF_NULL(kernel_graph_ptr); if (!profiling_trace_info.IsValid()) { MS_LOG(WARNING) << "Profiling trace point not found"; return; @@ -462,18 +461,12 @@ void KernelAdjust::InsertProfilingKernel(const std::shared_ptr new_cnode_list; std::vector cnode_ptr_list = kernel_graph_ptr->execution_order(); for (const auto &cnode_ptr : cnode_ptr_list) { - ProfilingUtils::ProfilingTraceFpStart(kernel_graph_ptr, cnode_ptr, profiling_trace_info, &new_cnode_list); - ProfilingUtils::ProfilingAllReduce(kernel_graph_ptr, cnode_ptr, ascend::kProfilingAllReduce1Start, - profiling_trace_info.profiling_allreduce1_start, &new_cnode_list); - ProfilingUtils::ProfilingAllReduce(kernel_graph_ptr, cnode_ptr, ascend::kProfilingAllReduce2Start, - profiling_trace_info.profiling_allreduce2_start, &new_cnode_list); + ProfilingUtils::ProfilingTraceFpStart(cnode_ptr, profiling_trace_info, kernel_graph_ptr, NOT_NULL(&new_cnode_list)); new_cnode_list.emplace_back(cnode_ptr); - ProfilingUtils::ProfilingAllReduce(kernel_graph_ptr, cnode_ptr, ascend::kProfilingAllReduce1End, - profiling_trace_info.profiling_allreduce1_end, &new_cnode_list); - ProfilingUtils::ProfilingAllReduce(kernel_graph_ptr, cnode_ptr, ascend::kProfilingAllReduce2End, - profiling_trace_info.profiling_allreduce2_end, &new_cnode_list); - ProfilingUtils::ProfilingTraceEnd(kernel_graph_ptr, cnode_ptr, profiling_trace_info, &new_cnode_list); + ProfilingUtils::ProfilingCustomOp(cnode_ptr, profiling_trace_info, kernel_graph_ptr, NOT_NULL(&new_cnode_list)); + ProfilingUtils::ProfilingTraceBpEnd(cnode_ptr, profiling_trace_info, kernel_graph_ptr, NOT_NULL(&new_cnode_list)); + ProfilingUtils::ProfilingTraceEnd(cnode_ptr, profiling_trace_info, kernel_graph_ptr, NOT_NULL(&new_cnode_list)); } kernel_graph_ptr->set_execution_order(new_cnode_list); } diff --git a/mindspore/ccsrc/device/kernel_adjust.h b/mindspore/ccsrc/device/kernel_adjust.h index 62c64d98b9..ca01d51e54 100644 --- a/mindspore/ccsrc/device/kernel_adjust.h +++ b/mindspore/ccsrc/device/kernel_adjust.h @@ -48,7 +48,7 @@ class KernelAdjust { void SetStreamSwitchOps(const std::shared_ptr &kernel_graph_ptr); bool StepLoadCtrlInputs(const std::shared_ptr &context, const std::shared_ptr &kernel_graph_ptr); - void Profiling(const std::shared_ptr &kernel_graph_ptr); + void Profiling(NotNull kernel_graph_ptr); static bool NeedInsertSwitch(); CNodePtr CreateSteamActiveOp(const std::shared_ptr &kernel_graph_ptr); @@ -66,8 +66,8 @@ class KernelAdjust { kernel::KernelBuildInfo::KernelBuildInfoBuilder CreateMngKernelBuilder(const std::vector &formats, const std::vector &type_ids); void LoadSwitchInputs(std::vector *inputs); - void InsertProfilingKernel(const std::shared_ptr &kernel_graph_ptr, - const ProfilingTraceInfo &profiling_trace_info); + void InsertProfilingKernel(const ProfilingTraceInfo &profiling_trace_info, + NotNull kernel_graph_ptr); }; } // namespace device } // namespace mindspore diff --git a/mindspore/ccsrc/pre_activate/ascend/ascend_backend_optimization.cc b/mindspore/ccsrc/pre_activate/ascend/ascend_backend_optimization.cc index 8212d64c27..432d88e7a4 100644 --- a/mindspore/ccsrc/pre_activate/ascend/ascend_backend_optimization.cc +++ b/mindspore/ccsrc/pre_activate/ascend/ascend_backend_optimization.cc @@ -246,7 +246,7 @@ void AscendBackendOptimization(const std::shared_ptr &kern kernel_graph->SetExecOrderByDefault(); if (save_graphs) { std::string file_path = save_graphs_path + "/" + "hwopt_d_end.ir"; - DumpIR(file_path, kernel_graph); + DumpIR(file_path, kernel_graph, true); DumpIRProto(kernel_graph, "after_hwopt"); } } diff --git a/mindspore/ccsrc/session/ascend_session.cc b/mindspore/ccsrc/session/ascend_session.cc index 9d351f3199..34c05aed08 100644 --- a/mindspore/ccsrc/session/ascend_session.cc +++ b/mindspore/ccsrc/session/ascend_session.cc @@ -136,7 +136,7 @@ void AscendSession::BuildGraph(GraphId graph_id) { // Assign streams for control sink and hccl and so on AssignStream(graph); - device::KernelAdjust::GetInstance().Profiling(graph); + device::KernelAdjust::GetInstance().Profiling(NOT_NULL(graph.get())); // build kernel if node is cnode BuildKernel(graph); auto ms_context = MsContext::GetInstance(); diff --git a/tests/ut/cpp/stub/tasksink/ascend_stream_assign_stub.cc b/tests/ut/cpp/stub/tasksink/ascend_stream_assign_stub.cc index ebd2ac8b46..e0b5ab0d61 100755 --- a/tests/ut/cpp/stub/tasksink/ascend_stream_assign_stub.cc +++ b/tests/ut/cpp/stub/tasksink/ascend_stream_assign_stub.cc @@ -42,6 +42,6 @@ bool KernelAdjust::StepLoadCtrlInputs(const std::shared_ptr &c return true; } bool KernelAdjust::NeedInsertSwitch() { return true; } -void KernelAdjust::Profiling(const std::shared_ptr &kernel_graph_ptr) { return; } +void KernelAdjust::Profiling(NotNull kernel_graph_ptr) { return; } } // namespace device } // namespace mindspore From 20d1b6444353e806ad37989f7a7c8b2c10418536 Mon Sep 17 00:00:00 2001 From: jonyguo Date: Wed, 8 Apr 2020 15:52:53 +0800 Subject: [PATCH 125/367] fix: error info is not exactly when column list invalid --- .../engine/datasetops/source/mindrecord_op.cc | 3 +- .../engine/datasetops/source/mindrecord_op.h | 1 + .../ccsrc/mindrecord/common/shard_error.cc | 178 ++++++++++++++++++ .../ccsrc/mindrecord/include/shard_error.h | 102 +++++----- mindspore/ccsrc/mindrecord/io/shard_reader.cc | 2 +- tests/ut/cpp/dataset/mind_record_op_test.cc | 36 ++++ .../ut/cpp/mindrecord/ut_shard_reader_test.cc | 2 +- 7 files changed, 271 insertions(+), 53 deletions(-) create mode 100644 mindspore/ccsrc/mindrecord/common/shard_error.cc diff --git a/mindspore/ccsrc/dataset/engine/datasetops/source/mindrecord_op.cc b/mindspore/ccsrc/dataset/engine/datasetops/source/mindrecord_op.cc index b5bea5416c..cb0f135a0d 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/source/mindrecord_op.cc +++ b/mindspore/ccsrc/dataset/engine/datasetops/source/mindrecord_op.cc @@ -103,7 +103,8 @@ Status MindRecordOp::Init() { shard_reader_ = mindspore::make_unique(); auto rc = shard_reader_->Open(dataset_file_, num_mind_record_workers_, columns_to_load_, operators_, block_reader_); - CHECK_FAIL_RETURN_UNEXPECTED(rc != MSRStatus::FAILED, "MindRecordOp init failed."); + CHECK_FAIL_RETURN_UNEXPECTED(rc != MSRStatus::FAILED, + "MindRecordOp init failed. Error message: " + ErrnoToMessage(rc)); data_schema_ = mindspore::make_unique(); diff --git a/mindspore/ccsrc/dataset/engine/datasetops/source/mindrecord_op.h b/mindspore/ccsrc/dataset/engine/datasetops/source/mindrecord_op.h index 0b16391b20..aca5c86c2c 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/source/mindrecord_op.h +++ b/mindspore/ccsrc/dataset/engine/datasetops/source/mindrecord_op.h @@ -32,6 +32,7 @@ #include "dataset/engine/datasetops/source/io_block.h" #include "dataset/util/queue.h" #include "dataset/util/status.h" +#include "mindrecord/include/shard_error.h" #include "mindrecord/include/shard_reader.h" #include "mindrecord/include/common/shard_utils.h" #include "dataset/util/wait_post.h" diff --git a/mindspore/ccsrc/mindrecord/common/shard_error.cc b/mindspore/ccsrc/mindrecord/common/shard_error.cc new file mode 100644 index 0000000000..cf43dcb315 --- /dev/null +++ b/mindspore/ccsrc/mindrecord/common/shard_error.cc @@ -0,0 +1,178 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "mindrecord/include/shard_error.h" + +namespace mindspore { +namespace mindrecord { +std::string ErrnoToMessage(MSRStatus status) { + switch (status) { + case FAILED: + return "operator failed"; + break; + case SUCCESS: + return "operator success"; + break; + case OPEN_FILE_FAILED: + return "open file failed"; + break; + case CLOSE_FILE_FAILED: + return "close file failed"; + break; + case WRITE_METADATA_FAILED: + return "write metadata failed"; + break; + case WRITE_RAWDATA_FAILED: + return "write rawdata failed"; + break; + case GET_SCHEMA_FAILED: + return "get schema failed"; + break; + case ILLEGAL_RAWDATA: + return "illegal raw data"; + break; + case PYTHON_TO_JSON_FAILED: + return "pybind: python object to json failed"; + break; + case DIR_CREATE_FAILED: + return "directory create failed"; + break; + case OPEN_DIR_FAILED: + return "open directory failed"; + break; + case INVALID_STATISTICS: + return "invalid statistics object"; + break; + case OPEN_DATABASE_FAILED: + return "open database failed"; + break; + case CLOSE_DATABASE_FAILED: + return "close database failed"; + break; + case DATABASE_OPERATE_FAILED: + return "database operate failed"; + break; + case BUILD_SCHEMA_FAILED: + return "build schema failed"; + break; + case DIVISOR_IS_ILLEGAL: + return "divisor is illegal"; + break; + case INVALID_FILE_PATH: + return "file path is invalid"; + break; + case SECURE_FUNC_FAILED: + return "secure function failed"; + break; + case ALLOCATE_MEM_FAILED: + return "allocate memory failed"; + break; + case ILLEGAL_FIELD_NAME: + return "illegal field name"; + break; + case ILLEGAL_FIELD_TYPE: + return "illegal field type"; + break; + case SET_METADATA_FAILED: + return "set metadata failed"; + break; + case ILLEGAL_SCHEMA_DEFINITION: + return "illegal schema definition"; + break; + case ILLEGAL_COLUMN_LIST: + return "illegal column list"; + break; + case SQL_ERROR: + return "sql error"; + break; + case ILLEGAL_SHARD_COUNT: + return "illegal shard count"; + break; + case ILLEGAL_SCHEMA_COUNT: + return "illegal schema count"; + break; + case VERSION_ERROR: + return "data version is not matched"; + break; + case ADD_SCHEMA_FAILED: + return "add schema failed"; + break; + case ILLEGAL_Header_SIZE: + return "illegal header size"; + break; + case ILLEGAL_Page_SIZE: + return "illegal page size"; + break; + case ILLEGAL_SIZE_VALUE: + return "illegal size value"; + break; + case INDEX_FIELD_ERROR: + return "add index fields failed"; + break; + case GET_CANDIDATE_CATEGORYFIELDS_FAILED: + return "get candidate category fields failed"; + break; + case GET_CATEGORY_INFO_FAILED: + return "get category information failed"; + break; + case ILLEGAL_CATEGORY_ID: + return "illegal category id"; + break; + case ILLEGAL_ROWNUMBER_OF_PAGE: + return "illegal row number of page"; + break; + case ILLEGAL_SCHEMA_ID: + return "illegal schema id"; + break; + case DESERIALIZE_SCHEMA_FAILED: + return "deserialize schema failed"; + break; + case DESERIALIZE_STATISTICS_FAILED: + return "deserialize statistics failed"; + break; + case ILLEGAL_DB_FILE: + return "illegal db file"; + break; + case OVERWRITE_DB_FILE: + return "overwrite db file"; + break; + case OVERWRITE_MINDRECORD_FILE: + return "overwrite mindrecord file"; + break; + case ILLEGAL_MINDRECORD_FILE: + return "illegal mindrecord file"; + break; + case PARSE_JSON_FAILED: + return "parse json failed"; + break; + case ILLEGAL_PARAMETERS: + return "illegal parameters"; + break; + case GET_PAGE_BY_GROUP_ID_FAILED: + return "get page by group id failed"; + break; + case GET_SYSTEM_STATE_FAILED: + return "get system state failed"; + break; + case IO_FAILED: + return "io operate failed"; + break; + default: + return "invalid error no"; + } +} +} // namespace mindrecord +} // namespace mindspore diff --git a/mindspore/ccsrc/mindrecord/include/shard_error.h b/mindspore/ccsrc/mindrecord/include/shard_error.h index 026ee836e3..b85eeb71c0 100644 --- a/mindspore/ccsrc/mindrecord/include/shard_error.h +++ b/mindspore/ccsrc/mindrecord/include/shard_error.h @@ -18,63 +18,65 @@ #define MINDRECORD_INCLUDE_SHARD_ERROR_H_ #include -#include "utils/error_code.h" +#include namespace mindspore { namespace mindrecord { -DE_ERRORNO_MINDRECORD(OPEN_FILE_FAILED, 0, "open file failed"); -DE_ERRORNO_MINDRECORD(CLOSE_FILE_FAILED, 1, "close file failed"); -DE_ERRORNO_MINDRECORD(WRITE_METADATA_FAILED, 2, "write metadata failed"); -DE_ERRORNO_MINDRECORD(WRITE_RAWDATA_FAILED, 3, "write rawdata failed"); -DE_ERRORNO_MINDRECORD(GET_SCHEMA_FAILED, 4, "get schema failed"); -DE_ERRORNO_MINDRECORD(ILLEGAL_RAWDATA, 5, "illegal raw data"); -DE_ERRORNO_MINDRECORD(PYTHON_TO_JSON_FAILED, 6, "pybind: python object to json failed"); -DE_ERRORNO_MINDRECORD(DIR_CREATE_FAILED, 7, "directory create failed"); -DE_ERRORNO_MINDRECORD(OPEN_DIR_FAILED, 8, "open directory failed"); -DE_ERRORNO_MINDRECORD(INVALID_STATISTICS, 9, "invalid statistics object"); -DE_ERRORNO_MINDRECORD(OPEN_DATABASE_FAILED, 10, "open database failed"); -DE_ERRORNO_MINDRECORD(CLOSE_DATABASE_FAILED, 11, "close database failed"); -DE_ERRORNO_MINDRECORD(DATABASE_OPERATE_FAILED, 12, "database operate failed"); -DE_ERRORNO_MINDRECORD(BUILD_SCHEMA_FAILED, 13, "build schema failed"); -DE_ERRORNO_MINDRECORD(DIVISOR_IS_ILLEGAL, 14, "divisor is illegal"); -DE_ERRORNO_MINDRECORD(INVALID_FILE_PATH, 15, "file path is invalid"); -DE_ERRORNO_MINDRECORD(SECURE_FUNC_FAILED, 16, "secure function failed"); -DE_ERRORNO_MINDRECORD(ALLOCATE_MEM_FAILED, 17, "allocate memory failed"); -DE_ERRORNO_MINDRECORD(ILLEGAL_FIELD_NAME, 18, "illegal field name"); -DE_ERRORNO_MINDRECORD(ILLEGAL_FIELD_TYPE, 19, "illegal field type"); -DE_ERRORNO_MINDRECORD(SET_METADATA_FAILED, 20, "set metadata failed"); -DE_ERRORNO_MINDRECORD(ILLEGAL_SCHEMA_DEFINITION, 21, "illegal schema definition"); -DE_ERRORNO_MINDRECORD(ILLEGAL_COLUMN_LIST, 22, "illegal column list"); -DE_ERRORNO_MINDRECORD(SQL_ERROR, 23, "sql error"); -DE_ERRORNO_MINDRECORD(ILLEGAL_SHARD_COUNT, 24, "illegal shard count"); -DE_ERRORNO_MINDRECORD(ILLEGAL_SCHEMA_COUNT, 25, "illegal schema count"); -DE_ERRORNO_MINDRECORD(VERSION_ERROR, 26, "data version is not matched"); -DE_ERRORNO_MINDRECORD(ADD_SCHEMA_FAILED, 27, "add schema failed"); -DE_ERRORNO_MINDRECORD(ILLEGAL_Header_SIZE, 28, "illegal header size"); -DE_ERRORNO_MINDRECORD(ILLEGAL_Page_SIZE, 29, "illegal page size"); -DE_ERRORNO_MINDRECORD(ILLEGAL_SIZE_VALUE, 30, "illegal size value"); -DE_ERRORNO_MINDRECORD(INDEX_FIELD_FAILED, 31, "add index fields failed"); -DE_ERRORNO_MINDRECORD(GET_CANDIDATE_CATEGORYFIELDS_FAILED, 32, "get candidate categoryFields failed"); -DE_ERRORNO_MINDRECORD(GET_CATEGORY_INFO, 33, "get category information failed"); -DE_ERRORNO_MINDRECORD(ILLEGAL_CATEGORY_ID, 34, "illegal category id"); -DE_ERRORNO_MINDRECORD(ILLEGAL_ROWNUMBER_OF_PAGE, 35, "illegal row number of page"); -DE_ERRORNO_MINDRECORD(ILLEGAL_SCHEMA_ID, 36, "illegal schema id"); -DE_ERRORNO_MINDRECORD(DESERIALIZE_SCHEMA_FAILED, 37, "deserialize schema failed"); -DE_ERRORNO_MINDRECORD(DESERIALIZE_STATISTICS_FAILED, 38, "deserialize statistics failed"); -DE_ERRORNO_MINDRECORD(ILLEGAL_DB_FILE, 39, "illegal db file."); -DE_ERRORNO_MINDRECORD(OVERWRITE_DB_FILE, 40, "overwrite db file."); -DE_ERRORNO_MINDRECORD(OVERWRITE_MINDRECORD_FILE, 41, "overwrite mindrecord file."); -DE_ERRORNO_MINDRECORD(ILLEGAL_MINDRECORD_FILE, 42, "illegal mindrecord file."); -DE_ERRORNO_MINDRECORD(PARSE_JSON_FAILED, 43, "parse json failed."); -DE_ERRORNO_MINDRECORD(ILLEGAL_PARAMETERS, 44, "illegal parameters."); -DE_ERRORNO_MINDRECORD(GET_PAGE_BY_GROUP_ID_FAILED, 46, "get page by group id failed."); -DE_ERRORNO_MINDRECORD(GET_SYSTEM_STATE_FAILED, 47, "get system state failed."); -DE_ERRORNO_MINDRECORD(IO_FAILED, 48, "io operate failed."); - enum MSRStatus { SUCCESS = 0, FAILED = 1, + OPEN_FILE_FAILED, + CLOSE_FILE_FAILED, + WRITE_METADATA_FAILED, + WRITE_RAWDATA_FAILED, + GET_SCHEMA_FAILED, + ILLEGAL_RAWDATA, + PYTHON_TO_JSON_FAILED, + DIR_CREATE_FAILED, + OPEN_DIR_FAILED, + INVALID_STATISTICS, + OPEN_DATABASE_FAILED, + CLOSE_DATABASE_FAILED, + DATABASE_OPERATE_FAILED, + BUILD_SCHEMA_FAILED, + DIVISOR_IS_ILLEGAL, + INVALID_FILE_PATH, + SECURE_FUNC_FAILED, + ALLOCATE_MEM_FAILED, + ILLEGAL_FIELD_NAME, + ILLEGAL_FIELD_TYPE, + SET_METADATA_FAILED, + ILLEGAL_SCHEMA_DEFINITION, + ILLEGAL_COLUMN_LIST, + SQL_ERROR, + ILLEGAL_SHARD_COUNT, + ILLEGAL_SCHEMA_COUNT, + VERSION_ERROR, + ADD_SCHEMA_FAILED, + ILLEGAL_Header_SIZE, + ILLEGAL_Page_SIZE, + ILLEGAL_SIZE_VALUE, + INDEX_FIELD_ERROR, + GET_CANDIDATE_CATEGORYFIELDS_FAILED, + GET_CATEGORY_INFO_FAILED, + ILLEGAL_CATEGORY_ID, + ILLEGAL_ROWNUMBER_OF_PAGE, + ILLEGAL_SCHEMA_ID, + DESERIALIZE_SCHEMA_FAILED, + DESERIALIZE_STATISTICS_FAILED, + ILLEGAL_DB_FILE, + OVERWRITE_DB_FILE, + OVERWRITE_MINDRECORD_FILE, + ILLEGAL_MINDRECORD_FILE, + PARSE_JSON_FAILED, + ILLEGAL_PARAMETERS, + GET_PAGE_BY_GROUP_ID_FAILED, + GET_SYSTEM_STATE_FAILED, + IO_FAILED }; + +// convert error no to string message +std::string ErrnoToMessage(MSRStatus status); } // namespace mindrecord } // namespace mindspore diff --git a/mindspore/ccsrc/mindrecord/io/shard_reader.cc b/mindspore/ccsrc/mindrecord/io/shard_reader.cc index 32825fd9df..f91d28544e 100644 --- a/mindspore/ccsrc/mindrecord/io/shard_reader.cc +++ b/mindspore/ccsrc/mindrecord/io/shard_reader.cc @@ -676,7 +676,7 @@ MSRStatus ShardReader::Open(const std::string &file_path, int n_consumer, if (CheckColumnList(selected_columns_) == FAILED) { MS_LOG(ERROR) << "Illegal column list"; - return FAILED; + return ILLEGAL_COLUMN_LIST; } // Initialize argument diff --git a/tests/ut/cpp/dataset/mind_record_op_test.cc b/tests/ut/cpp/dataset/mind_record_op_test.cc index 3d5c80b3f4..90f41fdeb9 100644 --- a/tests/ut/cpp/dataset/mind_record_op_test.cc +++ b/tests/ut/cpp/dataset/mind_record_op_test.cc @@ -21,6 +21,7 @@ #include "common/utils.h" #include "gtest/gtest.h" #include "mindrecord/include/shard_category.h" +#include "mindrecord/include/shard_error.h" #include "mindrecord/include/shard_sample.h" #include "mindrecord/include/shard_shuffle.h" #include "utils/log_adapter.h" @@ -479,3 +480,38 @@ TEST_F(MindDataTestMindRecordOp, TestMindRecordBlockReaderRepeat) { row_count++; } } + +TEST_F(MindDataTestMindRecordOp, TestMindRecordInvalidColumnList) { + // single MindRecord op and nothing else + // + // MindRecordOp + + MS_LOG(INFO) << "UT test TestMindRecordInvalidColumnList"; + + Status rc; + + // Start with an empty execution tree + auto my_tree = std::make_shared(); + + // Test info: + // Dataset from testDataset1 has 10 rows, 2 columns. + // RowsPerBuffer buffer setting of 3 yields 4 buffers with the last buffer having single row + // only. 2 workers. + // Test a column selection instead of all columns as well. + + std::vector column_list; + std::string label_col_name("file_name_2"); + column_list.push_back(label_col_name); + label_col_name = "label"; + column_list.push_back(label_col_name); + + std::shared_ptr my_mindrecord_op; + MindRecordOp::Builder builder; + builder.SetDatasetFile(mindrecord_root_path_ + "/testMindDataSet/testImageNetData/imagenet.mindrecord0") + .SetRowsPerBuffer(3) + .SetNumMindRecordWorkers(4) + .SetColumnsToLoad(column_list); + rc = builder.Build(&my_mindrecord_op); + ASSERT_TRUE(rc.IsError()); + ASSERT_TRUE(rc.ToString().find_first_of("illegal column list") != std::string::npos); +} diff --git a/tests/ut/cpp/mindrecord/ut_shard_reader_test.cc b/tests/ut/cpp/mindrecord/ut_shard_reader_test.cc index fd63373e20..f7ed39a006 100644 --- a/tests/ut/cpp/mindrecord/ut_shard_reader_test.cc +++ b/tests/ut/cpp/mindrecord/ut_shard_reader_test.cc @@ -155,7 +155,7 @@ TEST_F(TestShardReader, TestShardReaderColumnNotInSchema) { auto column_list = std::vector{"file_namex"}; ShardReader dataset; MSRStatus ret = dataset.Open(file_name, 4, column_list); - ASSERT_EQ(ret, FAILED); + ASSERT_EQ(ret, ILLEGAL_COLUMN_LIST); } TEST_F(TestShardReader, TestShardVersion) { From ee5b406b3746d01422000df3abf2b92fe1742d2b Mon Sep 17 00:00:00 2001 From: simson <526422051@qq.com> Date: Wed, 8 Apr 2020 15:19:57 +0800 Subject: [PATCH 126/367] rebuild graph before rungraph if needed --- mindspore/ccsrc/transform/graph_runner.cc | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/mindspore/ccsrc/transform/graph_runner.cc b/mindspore/ccsrc/transform/graph_runner.cc index f1f270cdb6..51ab7b9922 100644 --- a/mindspore/ccsrc/transform/graph_runner.cc +++ b/mindspore/ccsrc/transform/graph_runner.cc @@ -138,6 +138,13 @@ Status GraphRunner::RunGraph(const RunOptions& options, const std::vectorIsGraphNeedRebuild(wrap_ptr->id_)) { + sess_->RemoveGraph(wrap_ptr->id_); + sess_->AddGraph(wrap_ptr->id_, *(it->graph_ptr_), it->options_); + } + ge::Status ret = sess_->RunGraph(wrap_ptr->id_, ge_inputs, ge_outputs); if (ret != ge::GRAPH_SUCCESS) { MS_LOG(ERROR) << "Call GE RunGraph Failed, ret is: " << ret; From 47d903ff57f720b283dc092482318dd674fcd9ce Mon Sep 17 00:00:00 2001 From: liuxiao Date: Mon, 6 Apr 2020 10:22:47 +0800 Subject: [PATCH 127/367] Add pack and unpack --- mindspore/ccsrc/operator/ops.h | 1 + mindspore/ccsrc/transform/convert.cc | 6 +- mindspore/ops/_grad/grad_array_ops.py | 24 +++++ mindspore/ops/operations/__init__.py | 4 +- mindspore/ops/operations/array_ops.py | 144 ++++++++++++++++++++++++++ tests/ut/python/ops/test_ops.py | 53 ++++++++++ 6 files changed, 229 insertions(+), 3 deletions(-) diff --git a/mindspore/ccsrc/operator/ops.h b/mindspore/ccsrc/operator/ops.h index 727d66dfb3..5fbf2b7067 100644 --- a/mindspore/ccsrc/operator/ops.h +++ b/mindspore/ccsrc/operator/ops.h @@ -135,6 +135,7 @@ extern const PrimitivePtr kPrimGatherV2; extern const PrimitivePtr kPrimSize; extern const PrimitivePtr kPrimArgMax; extern const PrimitivePtr kPrimPack; +extern const PrimitivePtr kPrimUnpack; extern const PrimitivePtr kPrimUnsortedSegmentSum; extern const PrimitivePtr kPrimConcatOffset; extern const PrimitivePtr kPrimReshape; diff --git a/mindspore/ccsrc/transform/convert.cc b/mindspore/ccsrc/transform/convert.cc index c400d1c573..d1c4a3d42e 100755 --- a/mindspore/ccsrc/transform/convert.cc +++ b/mindspore/ccsrc/transform/convert.cc @@ -148,7 +148,8 @@ const char kNameSlice[] = "Slice"; const char kNameAddN[] = "AddN"; const char kNameLess[] = "Less"; const char kNameGreater[] = "Greater"; -const char kNamePack[] = "Stack"; +const char kNameStack[] = "Stack"; +const char kNameUnstack[] = "Unstack"; const char kNameMerge[] = "Merge"; const char kNameGeSwitch[] = "GeSwitch"; @@ -199,7 +200,8 @@ std::unordered_map &DfGraphConvertor::get_adpt_ma {string(kNameMaxPool), ADPT_DESC(MaxPool)}, {string(kNameAvgPool), ADPT_DESC(AvgPool)}, {string(kNameTopK), ADPT_DESC(TopKV2)}, - {string(kNamePack), ADPT_DESC(Pack)}, + {string(kNameStack), ADPT_DESC(Pack)}, + {string(kNameUnstack), ADPT_DESC(Unpack)}, {string(kNameSplitD), ADPT_DESC(SplitD)}, {string(kNameAllReduce), ADPT_DESC(HcomAllReduce)}, {string(kNameBroadcast), ADPT_DESC(HcomBroadcast)}, diff --git a/mindspore/ops/_grad/grad_array_ops.py b/mindspore/ops/_grad/grad_array_ops.py index 81d38a1e1e..0a0caf471e 100644 --- a/mindspore/ops/_grad/grad_array_ops.py +++ b/mindspore/ops/_grad/grad_array_ops.py @@ -266,6 +266,30 @@ def get_bprop_gather_v2(self): return bprop +@bprop_getters.register(P.Stack) +def get_bprop_stack(self): + """Generate bprop for Stack""" + axis = self.axis + + def bprop(x, out, dout): + stack_grad = P.Unstack(axis) + out = stack_grad(dout) + return (out,) + return bprop + + +@bprop_getters.register(P.Unstack) +def get_bprop_unstack(self): + """Generate bprop for Unstack""" + axis = self.axis + + def bprop(x, out, dout): + unstack_grad = P.Stack(axis) + out = unstack_grad(dout) + return (out,) + return bprop + + @bprop_getters.register(P.StridedSlice) def get_bprop_strided_slice(self): """Generate bprop for StridedSlice""" diff --git a/mindspore/ops/operations/__init__.py b/mindspore/ops/operations/__init__.py index 89a5ea0249..7a8655b46c 100644 --- a/mindspore/ops/operations/__init__.py +++ b/mindspore/ops/operations/__init__.py @@ -19,7 +19,7 @@ Primitive operator classes. A collection of operators to build nerual networks or computing functions. """ -from .array_ops import (Argmax, Argmin, Cast, ConcatOffset, Concat, +from .array_ops import (Argmax, Argmin, Cast, ConcatOffset, Concat, Stack, Unstack, Diag, DiagPart, DType, ExpandDims, Eye, Fill, GatherNd, GatherV2, InvertPermutation, IsInstance, IsSubClass, ArgMaxWithValue, OnesLike, ZerosLike, @@ -112,6 +112,8 @@ __all__ = [ 'OneHot', 'GatherV2', 'Concat', + 'Stack', + 'Unstack', 'Tile', 'BiasAdd', 'Gelu', diff --git a/mindspore/ops/operations/array_ops.py b/mindspore/ops/operations/array_ops.py index b91c2cbc7d..59d3083c5d 100644 --- a/mindspore/ops/operations/array_ops.py +++ b/mindspore/ops/operations/array_ops.py @@ -1350,6 +1350,150 @@ class Concat(PrimitiveWithInfer): return out +def _get_stack_shape(x_shape, x_type, axis): + """for satck output shape""" + validator.check_type("shape", x_shape, [tuple]) + validator.check_integer("len of input_x shape", len(x_shape), 0, Rel.GT) + validator.check_subclass("shape0", x_type[0], mstype.tensor) + validator.check_integer("len of input_x0 shape", len(x_shape[0]), 0, Rel.GT) + rank_base = len(x_shape[0]) + N = len(x_shape) + out_shape = x_shape[0] + validator.check_int_range('axis', axis, -rank_base - 1, rank_base, Rel.INC_BOTH) + if axis < 0: + axis = axis + rank_base + 1 + for i in range(1, N): + v = x_shape[i] + validator.check('len of x_shape[%d]' % i, len(v), 'len of rank_base', rank_base) + validator.check('x_type[%d]' % i, x_type[i], 'base', x_type[0]) + for j in range(rank_base): + if v[j] != x_shape[0][j]: + raise ValueError("Stack evaluator element %d shape in input can not stack with first element" % i) + out_shape.insert(axis, N) + return out_shape + +class Stack(PrimitiveWithInfer): + r""" + Stacks a list of rank-`R` tensors into one rank-`(R+1)` tensor. + + Packs the list of tensors in `input_x` into a tensor with rank one higher than + each tensor in `input_x`, by packing them along the `axis` dimension. + Given a list of length `N` of tensors of shape `(A, B, C)`; + + If `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`. + + If `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`. Etc. + + Args: + axis (int): The axis to stack along. Negative values wrap around, + so the valid range is [-(R+1), R+1). Default: 0. + + Inputs: + - **input_x** (Union[tuple, list]) - A Tuple or list of Tensor objects with the same shape and type. + + Outputs: + Tensor. A stacked Tensor with the same type as values. + + Examples: + >>> data1 = Tensor(np.array([0, 1]).astype(np.float32)) + >>> data2 = Tensor(np.array([2, 3]).astype(np.float32)) + >>> op = P.Stack() + >>> output = op([data1, data2]) + [[0, 1], [2, 3]] + """ + + @prim_attr_register + def __init__(self, axis=0): + """init Stack""" + self.__setattr_flag__ = True + validator.check_type("axis", axis, [int]) + self.axis = axis + + def __infer__(self, value): + x_shape = value['shape'] + x_type = value['dtype'] + self.add_prim_attr('num', len(x_shape)) + all_shape = _get_stack_shape(x_shape, x_type, self.axis) + out = {'shape': all_shape, + 'dtype': x_type[0], + 'value': None} + return out + + +class Unstack(PrimitiveWithInfer): + r""" + Unpacks the given dimension of a rank-`R` tensor into rank-`(R-1)` tensors. + + Unpacks num tensors from value by chipping it along the axis dimension. + If num is not specified (the default), it is inferred from value's shape. + If value.shape[axis] is not known, ValueError is raised. + + For example, given a tensor of shape (A, B, C, D); + + If axis == 0 then the i'th tensor in output is the slice value[i, :, :, :] and + each tensor in output will have shape (B, C, D). (Note that the dimension unpacked along is gone, unlike split). + + If axis == 1 then the i'th tensor in output is the slice value[:, i, :, :] and + each tensor in output will have shape (A, C, D). Etc. + + This is the opposite of stack. + + Args: + axis (int): The axis to unstack along. Defaults to the first dimension. + Negative values wrap around, so the valid range is [-R, R). + + Inputs: + - **input_x** (Tensor) - The shape is :math:`(x_1, x_2, ..., x_R)`. + A rank R > 0 Tensor to be unstacked. + + Outputs: + A tuple of Tensors, the shape of each objects is same. + + Raises: + ValueError: If axis is out of the range [-len(input_x.shape()), len(input_x.shape())), + or if len(input_x.shape[axis]) not equal to num. + + Examples: + >>> unstack = P.Unstack() + >>> x = Tensor(np.array([[1, 1, 1, 1], [2, 2, 2, 2]])) + >>> output = unstack(x) + ([1, 1, 1, 1], [2, 2, 2, 2]) + """ + + @prim_attr_register + def __init__(self, axis=0): + """init Unstack""" + self.__setattr_flag__ = True + validator.check_type("axis", axis, [int]) + self.axis = axis + + def __infer__(self, x): + validator.check_subclass("x", x['dtype'], mstype.tensor) + x_shape = list(x['shape']) + dim = len(x_shape) + validator.check_int_range('axis value', self.axis, -dim, dim, Rel.INC_LEFT) + if self.axis < 0: + self.axis = self.axis + dim + output_num = x_shape[self.axis] + validator.check_type("num", output_num, [int]) + validator.check_integer("output_num", output_num, 0, Rel.GT) + self.add_prim_attr('num', output_num) + output_valid_check = x_shape[self.axis] - output_num + validator.check_integer("the dimension which to unstack divides output_num", output_valid_check, 0, Rel.EQ) + out_shapes = [] + out_dtypes = [] + out_shape = x_shape[:self.axis] + x_shape[self.axis + 1:] + for _ in range(output_num): + out_shapes.append(tuple(out_shape)) + out_dtypes.append(x['dtype']) + out_shapes = tuple(out_shapes) + out_dtypes = tuple(out_dtypes) + out = {'shape': out_shapes, + 'dtype': out_dtypes, + 'value': None} + return out + + class Slice(PrimitiveWithInfer): """ Slice a tensor in specified shape. diff --git a/tests/ut/python/ops/test_ops.py b/tests/ut/python/ops/test_ops.py index 0f5b716e39..5dcd2d553a 100755 --- a/tests/ut/python/ops/test_ops.py +++ b/tests/ut/python/ops/test_ops.py @@ -80,6 +80,29 @@ class NetForConcat1(nn.Cell): return self.concat((x1, x2)) +class NetForStackInput(nn.Cell): + def __init__(self, op): + super(NetForStackInput, self).__init__() + self.op = op + self.mul = P.Mul() + + def construct(self, *args): + t = () + for i in range(len(args)): + t = t + (self.mul(args[i], args[i]),) + return self.op(t) + + +class NetForUnstackInput(nn.Cell): + def __init__(self, op): + super(NetForUnstackInput, self).__init__() + self.op = op + self.mul = P.Mul() + + def construct(self, x1): + return self.op((self.mul(x1, x1))) + + class NetForFlatten(nn.Cell): def __init__(self): super(NetForFlatten, self).__init__() @@ -968,6 +991,36 @@ test_case_array_ops = [ Tensor(np.array([1], np.float32)), Tensor(np.array([1], np.float32)))], 'desc_bprop': [[3,]]}), + ('StackV2_0', { + 'block': NetForStackInput(P.Stack()), + 'desc_inputs':[[2, 2], [2, 2], [2, 2]], + 'desc_bprop':[[3, 2, 2]], + }), + ('StackV2_1', { + 'block': NetForStackInput(P.Stack(axis=-2)), + 'desc_inputs':[[3, 2, 3], [3, 2, 3], [3, 2, 3]], + 'desc_bprop':[[3, 2, 3, 3]], + }), + ('StackV2_2', { + 'block': NetForStackInput(P.Stack()), + 'desc_inputs':[[2, 2]], + 'desc_bprop':[[2, 2, 2]], + }), + ('StackV2_3', { + 'block': NetForStackInput(P.Stack()), + 'desc_inputs':[[128, 128], [128, 128]], + 'desc_bprop':[[2, 128, 128]], + }), + ('UnstackV2_0', { + 'block': NetForUnstackInput(P.Unstack(axis=0)), + 'desc_inputs':[[2, 4]], + 'desc_bprop':[[4], [4]], + }), + ('UnstackV2_1', { + 'block': NetForUnstackInput(P.Unstack(axis=-1)), + 'desc_inputs':[Tensor(np.array([[1, 1, 1]], np.float32))], + 'desc_bprop':[[1], [1], [1]], + }), ('Diag', { 'block': P.Diag(), 'desc_inputs': [[4]], From a8bc8bfecbc52ab48cc1caa93f071230324ca4be Mon Sep 17 00:00:00 2001 From: simson <526422051@qq.com> Date: Wed, 8 Apr 2020 17:48:42 +0800 Subject: [PATCH 128/367] fix compile error --- mindspore/ccsrc/transform/graph_runner.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mindspore/ccsrc/transform/graph_runner.cc b/mindspore/ccsrc/transform/graph_runner.cc index 51ab7b9922..8b0ddfd18d 100644 --- a/mindspore/ccsrc/transform/graph_runner.cc +++ b/mindspore/ccsrc/transform/graph_runner.cc @@ -142,7 +142,7 @@ Status GraphRunner::RunGraph(const RunOptions& options, const std::vectorIsGraphNeedRebuild(wrap_ptr->id_)) { sess_->RemoveGraph(wrap_ptr->id_); - sess_->AddGraph(wrap_ptr->id_, *(it->graph_ptr_), it->options_); + sess_->AddGraph(wrap_ptr->id_, *(wrap_ptr->graph_ptr_), wrap_ptr->options_); } ge::Status ret = sess_->RunGraph(wrap_ptr->id_, ge_inputs, ge_outputs); From e17e08618649522a37d83d011bbb613b0df3675c Mon Sep 17 00:00:00 2001 From: guohongzilong <2713219276@qq.com> Date: Wed, 8 Apr 2020 18:00:33 +0800 Subject: [PATCH 129/367] unified tensor and mindspore.type --- mindspore/common/tensor.py | 8 ++++---- mindspore/ops/operations/math_ops.py | 6 +++--- mindspore/ops/operations/random_ops.py | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/mindspore/common/tensor.py b/mindspore/common/tensor.py index d17661595f..709b2ae280 100644 --- a/mindspore/common/tensor.py +++ b/mindspore/common/tensor.py @@ -42,14 +42,14 @@ class Tensor(Tensor_): Examples: >>> # init a tensor with input data - >>> t1 = mindspore.Tensor(np.zeros([1, 2, 3]), mindspore.float32) - >>> assert isinstance(t1, mindspore.Tensor) + >>> t1 = Tensor(np.zeros([1, 2, 3]), mindspore.float32) + >>> assert isinstance(t1, Tensor) >>> assert t1.shape() == (1, 2, 3) >>> assert t1.dtype() == mindspore.float32 >>> >>> # init a tensor with a float scalar - >>> t2 = mindspore.Tensor(0.1) - >>> assert isinstance(t2, mindspore.Tensor) + >>> t2 = Tensor(0.1) + >>> assert isinstance(t2, Tensor) >>> assert t2.dtype() == mindspore.float64 """ diff --git a/mindspore/ops/operations/math_ops.py b/mindspore/ops/operations/math_ops.py index 47b9e490f1..d003f6ee8b 100644 --- a/mindspore/ops/operations/math_ops.py +++ b/mindspore/ops/operations/math_ops.py @@ -1208,7 +1208,7 @@ class Acosh(PrimitiveWithInfer): Examples: >>> acosh = Acosh() - >>> X = Tensor(np.array([1.0, 1.5, 3.0, 100.0]), ms.float32) + >>> X = Tensor(np.array([1.0, 1.5, 3.0, 100.0]), mindspore.float32) >>> output = acosh(X) """ @@ -1752,7 +1752,7 @@ class Cos(PrimitiveWithInfer): Examples: >>> cos = P.Cos() - >>> X = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), ms.float32) + >>> X = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32) >>> output = cos(X) """ @@ -1808,7 +1808,7 @@ class Sin(PrimitiveWithInfer): Examples: >>> sin = P.Sin() - >>> input_x = Tensor(np.array([0.62, 0.28, 0.43, 0.62]), ms.float32) + >>> input_x = Tensor(np.array([0.62, 0.28, 0.43, 0.62]), mindspore.float32) >>> output = sin(input_x) """ diff --git a/mindspore/ops/operations/random_ops.py b/mindspore/ops/operations/random_ops.py index 9ef5b301f9..95692a622e 100644 --- a/mindspore/ops/operations/random_ops.py +++ b/mindspore/ops/operations/random_ops.py @@ -45,7 +45,7 @@ class RandomChoiceWithMask(PrimitiveWithInfer): Examples: >>> rnd_choice_mask = RandomChoiceWithMask() - >>> input_x = Tensor(np.ones(shape=[240000, 4]), ms.bool_) + >>> input_x = Tensor(np.ones(shape=[240000, 4]), mindspore.bool_) >>> output_y, output_mask = rnd_choice_mask(input_x) """ From ac62faa38886b36670a58ac9c1c0eb9a8158c004 Mon Sep 17 00:00:00 2001 From: jinyaohui Date: Tue, 7 Apr 2020 17:23:17 +0800 Subject: [PATCH 130/367] modify set_dataset_mode_config api param --- example/yolov3_coco2017/train.py | 6 +++--- mindspore/ccsrc/pipeline/pipeline_ge.cc | 6 +++--- mindspore/ccsrc/transform/convert.cc | 12 ++++++------ mindspore/ccsrc/utils/config_manager.cc | 4 ++-- mindspore/ccsrc/utils/config_manager.h | 4 ++-- mindspore/common/api.py | 4 ++-- mindspore/nn/wrap/loss_scale.py | 2 +- tests/ut/python/utils/test_callback.py | 16 +++++++++------- 8 files changed, 28 insertions(+), 26 deletions(-) diff --git a/example/yolov3_coco2017/train.py b/example/yolov3_coco2017/train.py index 3ac3816f4a..0a32a6d30d 100644 --- a/example/yolov3_coco2017/train.py +++ b/example/yolov3_coco2017/train.py @@ -67,7 +67,7 @@ if __name__ == '__main__': parser.add_argument("--distribute", type=bool, default=False, help="Run distribute, default is false.") parser.add_argument("--device_id", type=int, default=0, help="Device id, default is 0.") parser.add_argument("--device_num", type=int, default=1, help="Use device nums, default is 1.") - parser.add_argument("--mode", type=str, default="graph", help="Run graph mode or feed mode, default is graph") + parser.add_argument("--mode", type=str, default="sink", help="Run sink mode or non-sink mode, default is sink") parser.add_argument("--epoch_size", type=int, default=10, help="Epoch size, default is 10") parser.add_argument("--batch_size", type=int, default=32, help="Batch size, default is 32.") parser.add_argument("--checkpoint_path", type=str, default="", help="Checkpoint file path") @@ -150,8 +150,8 @@ if __name__ == '__main__': model = Model(net) dataset_sink_mode = False - if args_opt.mode == "graph": - print("In graph mode, one epoch return a loss.") + if args_opt.mode == "sink": + print("In sink mode, one epoch return a loss.") dataset_sink_mode = True print("Start train YOLOv3, the first epoch will be slower because of the graph compilation.") model.train(args_opt.epoch_size, dataset, callbacks=callback, dataset_sink_mode=dataset_sink_mode) diff --git a/mindspore/ccsrc/pipeline/pipeline_ge.cc b/mindspore/ccsrc/pipeline/pipeline_ge.cc index 2f68935591..60960a2eb7 100644 --- a/mindspore/ccsrc/pipeline/pipeline_ge.cc +++ b/mindspore/ccsrc/pipeline/pipeline_ge.cc @@ -116,7 +116,7 @@ bool InitExecDatasetGe(const std::string& queue_name, int64_t size, int64_t batc return transform::TransformUtil::ConvertDataType(i->type_id()); }); - ConfigManager::GetInstance().set_dataset_mode(DatasetMode::DS_GRAPH_MODE); + ConfigManager::GetInstance().set_dataset_mode(DatasetMode::DS_SINK_MODE); ConfigManager::GetInstance().set_iter_num(size); ConfigManager::GetInstance().set_dataset_phase(phase); @@ -453,8 +453,8 @@ void ProcessGeArg(const std::map& info, const py:: } // process the first args of tensor - // only in Dataset Feed Mode, fp_bp graph need input tensors - if (ConfigManager::GetInstance().dataset_mode() == DS_FEED_MODE) { + // only in Dataset non-sink Mode, fp_bp graph need input tensors + if (ConfigManager::GetInstance().dataset_mode() == DS_NORMAL_MODE) { for (std::size_t i = 0; i < size; i++) { ValuePtr converted = nullptr; bool succ = parse::ConvertData(args[i], &converted); diff --git a/mindspore/ccsrc/transform/convert.cc b/mindspore/ccsrc/transform/convert.cc index c400d1c573..250e83432a 100755 --- a/mindspore/ccsrc/transform/convert.cc +++ b/mindspore/ccsrc/transform/convert.cc @@ -440,10 +440,10 @@ void DfGraphConvertor::InitLoopVar(std::vector *init_input) { int64_t value = 0; auto const_iter_num = std::make_shared("const/npu_runconfig/iterations_per_loop"); - if (ConfigManager::GetInstance().dataset_mode() == DS_GRAPH_MODE) { + if (ConfigManager::GetInstance().dataset_mode() == DS_SINK_MODE) { value = ConfigManager::GetInstance().iter_num(); } else { - MS_LOG(INFO) << "Run with feed mode, the iterator number will always be 1"; + MS_LOG(INFO) << "Run with non-sink mode, the iterator number will always be 1"; value = 1; ConfigManager::GetInstance().set_iter_num(value); } @@ -574,7 +574,7 @@ void DfGraphConvertor::SetupParamInitSubGraph(const TensorOrderMap &tensors, std void DfGraphConvertor::MakeDatasetHandler(const std::string &name, const size_t &input_idx, const AnfNodePtr &it) { MS_LOG(INFO) << "The " << name << " is the " << input_idx << "(st/nd/th) input"; - if (ConfigManager::GetInstance().dataset_mode() == DS_GRAPH_MODE) { + if (ConfigManager::GetInstance().dataset_mode() == DS_SINK_MODE) { auto getnext_idx = static_cast(input_idx); DatasetGraphParam param = ConfigManager::GetInstance().dataset_param(); if (!param.input_indexes().empty() && input_idx <= param.input_indexes().size()) { @@ -866,7 +866,7 @@ DfGraphConvertor &DfGraphConvertor::ConvertAllNode() { } // Create dataset iterator and iterator_getnext node - if (ConfigManager::GetInstance().dataset_mode() == DS_GRAPH_MODE) { + if (ConfigManager::GetInstance().dataset_mode() == DS_SINK_MODE) { DatasetGraphParam param = ConfigManager::GetInstance().dataset_param(); MS_LOG(INFO) << "Dataset param is " << param.ToString() << "."; // GetNext @@ -975,7 +975,7 @@ void DfGraphConvertor::TraceOutputFromParameter(const AnfNodePtr &anf_out) { } void SetupDatasetIterGetNextNode(const OperatorPtr &op) { - if (ConfigManager::GetInstance().dataset_mode() == DS_GRAPH_MODE) { + if (ConfigManager::GetInstance().dataset_mode() == DS_SINK_MODE) { DatasetGraphParam param = ConfigManager::GetInstance().dataset_param(); size_t output_num = param.ge_types().size(); MS_LOG(INFO) << "Set iterator_getnext op's output num = " << output_num << "."; @@ -1034,7 +1034,7 @@ DfGraphConvertor &DfGraphConvertor::BuildGraph() { // set graph input according to the order from anf graph std::vector inputs; - if (ConfigManager::GetInstance().dataset_mode() == DS_GRAPH_MODE) { + if (ConfigManager::GetInstance().dataset_mode() == DS_SINK_MODE) { inputs.push_back(*dataset_iter_getnext_); } else { auto params = anf_graph_->parameters(); diff --git a/mindspore/ccsrc/utils/config_manager.cc b/mindspore/ccsrc/utils/config_manager.cc index ac8a965878..6d66b37436 100644 --- a/mindspore/ccsrc/utils/config_manager.cc +++ b/mindspore/ccsrc/utils/config_manager.cc @@ -28,7 +28,7 @@ ConfigManager& ConfigManager::GetInstance() noexcept { } void ConfigManager::SetDatasetModeConfig(const std::string& mode) { - static const std::map mode_map = {{"feed", DS_FEED_MODE}, {"graph", DS_GRAPH_MODE}}; + static const std::map mode_map = {{"normal", DS_NORMAL_MODE}, {"sink", DS_SINK_MODE}}; if (mode_map.find(mode) == mode_map.end()) { MS_LOG(ERROR) << "Invalid dataset mode:" << mode; return; @@ -38,7 +38,7 @@ void ConfigManager::SetDatasetModeConfig(const std::string& mode) { void ConfigManager::ResetConfig() noexcept { parallel_strategy_ = ONE_DEVICE; - dataset_mode_ = DS_FEED_MODE; + dataset_mode_ = DS_NORMAL_MODE; dataset_param_ = DatasetGraphParam("", 0, 0, {}, {}, {}); iter_num_ = 1; } diff --git a/mindspore/ccsrc/utils/config_manager.h b/mindspore/ccsrc/utils/config_manager.h index 31137f6243..db7d7d0c14 100644 --- a/mindspore/ccsrc/utils/config_manager.h +++ b/mindspore/ccsrc/utils/config_manager.h @@ -33,7 +33,7 @@ enum ParallelStrategy { DISTRIBUTION, }; -enum DatasetMode { DS_FEED_MODE = 0, DS_GRAPH_MODE }; +enum DatasetMode { DS_NORMAL_MODE = 0, DS_SINK_MODE }; class DatasetGraphParam { public: @@ -106,7 +106,7 @@ class ConfigManager { ~ConfigManager() = default; ParallelStrategy parallel_strategy_{ONE_DEVICE}; - DatasetMode dataset_mode_{DS_FEED_MODE}; + DatasetMode dataset_mode_{DS_NORMAL_MODE}; DatasetGraphParam dataset_param_{"", 0, 0, {}, {}, {}}; int64_t iter_num_{1}; std::string dataset_phase_{""}; diff --git a/mindspore/common/api.py b/mindspore/common/api.py index 9ee95ef772..5af17bbd78 100644 --- a/mindspore/common/api.py +++ b/mindspore/common/api.py @@ -381,9 +381,9 @@ class _Executor: if enable_ge: # decide whether to sink based on whether the inputs is virtual or not if args_list and isinstance(args_list[0], Tensor) and args_list[0].virtual_flag: - _set_dataset_mode_config('graph') + _set_dataset_mode_config('sink') else: - _set_dataset_mode_config('feed') + _set_dataset_mode_config('normal') self._build_data_graph(obj, params, phase) diff --git a/mindspore/nn/wrap/loss_scale.py b/mindspore/nn/wrap/loss_scale.py index 1ce3179273..5cdb34cf11 100644 --- a/mindspore/nn/wrap/loss_scale.py +++ b/mindspore/nn/wrap/loss_scale.py @@ -43,7 +43,7 @@ class DynamicLossScaleUpdateCell(Cell): In every training step, the loss scaling value will be updated by loss scaling value/`scale_factor` when there is overflow. And it will be increased by loss scaling value * `scale_factor` if there is no overflow for a continuous `scale_window` steps. This cell is used for Graph mode training in which all - logic will be executed on device side(Another training mode is feed mode in which some logic will be + logic will be executed on device side(Another training mode is non-sink mode in which some logic will be executed on host). Args: diff --git a/tests/ut/python/utils/test_callback.py b/tests/ut/python/utils/test_callback.py index 60e4c6527a..c6fea04231 100644 --- a/tests/ut/python/utils/test_callback.py +++ b/tests/ut/python/utils/test_callback.py @@ -24,11 +24,12 @@ from mindspore import context from mindspore.common.tensor import Tensor from mindspore.nn.optim import Momentum from mindspore.nn import TrainOneStepCell, WithLossCell -from mindspore.train.callback import ModelCheckpoint, _check_file_name_prefix, RunContext,_checkpoint_cb_for_save_op,\ - LossMonitor, _InternalCallbackParam, _chg_ckpt_file_name_if_same_exist,\ - _build_callbacks, CheckpointConfig, _set_cur_net +from mindspore.train.callback import ModelCheckpoint, _check_file_name_prefix, RunContext, _checkpoint_cb_for_save_op, \ + LossMonitor, _InternalCallbackParam, _chg_ckpt_file_name_if_same_exist, \ + _build_callbacks, CheckpointConfig, _set_cur_net from mindspore.common.api import ms_function + class Net(nn.Cell): """Net definition.""" @@ -52,6 +53,7 @@ class Net(nn.Cell): class LossNet(nn.Cell): """ LossNet definition """ + def __init__(self): super(LossNet, self).__init__() self.conv = nn.Conv2d(3, 64, 3, has_bias=False, weight_init='normal', pad_mode='valid') @@ -110,8 +112,8 @@ def test_save_checkpoint(): os.remove('./test_files/test_ckpt-model.pkl') -def test_loss_monitor_graph_model(): - """Test lossmonitor Graph model.""" +def test_loss_monitor_sink_model(): + """Test loss monitor sink model.""" cb_params = _InternalCallbackParam() cb_params.cur_epoch_num = 4 cb_params.cur_step_num = 2 @@ -129,8 +131,8 @@ def test_loss_monitor_graph_model(): callbacklist.end(run_context) -def test_Loss_Monitor_feed_feed_model(): - """Test Loss Monitor feed feed mode.""" +def test_loss_monitor_feed_model(): + """Test loss monitor non-sink mode.""" cb_params = _InternalCallbackParam() run_context = RunContext(cb_params) loss_cb = LossMonitor(1) From b5e3fa959346bc7598ca6bbef24193d9f5ef2546 Mon Sep 17 00:00:00 2001 From: yao_yf Date: Wed, 8 Apr 2020 17:24:22 +0800 Subject: [PATCH 131/367] fix auto parallel prelu --- mindspore/ccsrc/parallel/ops_info/prelu_info.cc | 2 +- tests/ut/cpp/parallel/ops_info/prelu_test.cc | 6 ++---- tests/ut/python/parallel/test_prelu.py | 17 +++++++++++++++++ 3 files changed, 20 insertions(+), 5 deletions(-) diff --git a/mindspore/ccsrc/parallel/ops_info/prelu_info.cc b/mindspore/ccsrc/parallel/ops_info/prelu_info.cc index 9aa8513331..1a44501f42 100644 --- a/mindspore/ccsrc/parallel/ops_info/prelu_info.cc +++ b/mindspore/ccsrc/parallel/ops_info/prelu_info.cc @@ -52,7 +52,7 @@ Status PReLUInfo::CheckStrategy(const StrategyPtr& strategy) { } return FAILED; } - if ((stra[0][PRELU_CHANNEL_INDEX] != PRELU_CHANNEL_STRATEGY) || (stra[1][0] != PRELU_CHANNEL_STRATEGY)) { + if (stra[0][PRELU_CHANNEL_INDEX] != stra[1][0]) { if (is_auto_parallel_) { MS_LOG(DEBUG) << name_ << ": Invalid channel strategy."; } else { diff --git a/tests/ut/cpp/parallel/ops_info/prelu_test.cc b/tests/ut/cpp/parallel/ops_info/prelu_test.cc index 5ff261234f..d6db1b8460 100644 --- a/tests/ut/cpp/parallel/ops_info/prelu_test.cc +++ b/tests/ut/cpp/parallel/ops_info/prelu_test.cc @@ -146,11 +146,10 @@ TEST_F(TestPReLUInfo, CheckStrategy1) { } TEST_F(TestPReLUInfo, CheckStrategy2) { - // Success: {{2,1,8,16},{1}} std::vector inputs = {{2, 4, 8, 16}, {4}}; StrategyPtr strategy = NewStrategy(0, inputs); Status ret = prelu->Init(strategy); - ASSERT_EQ(ret, FAILED); + ASSERT_EQ(ret, SUCCESS); } TEST_F(TestPReLUInfo, AutoStrategy1) { @@ -252,11 +251,10 @@ TEST_F(TestPReLUInfo, CheckStrategy_2d1) { } TEST_F(TestPReLUInfo, CheckStrategy_2d2) { - // Success: {{2,1,8,16},{1}} std::vector inputs = {{128, 4}, {4}}; StrategyPtr strategy = NewStrategy(0, inputs); Status ret = prelu_2d->Init(strategy); - ASSERT_EQ(ret, FAILED); + ASSERT_EQ(ret, SUCCESS); } TEST_F(TestPReLUInfo, AutoStrategy_2d1) { diff --git a/tests/ut/python/parallel/test_prelu.py b/tests/ut/python/parallel/test_prelu.py index c601045491..d3ad1cc710 100755 --- a/tests/ut/python/parallel/test_prelu.py +++ b/tests/ut/python/parallel/test_prelu.py @@ -149,3 +149,20 @@ def test_prelu_parallel_success3(): w = Tensor(np.random.rand(16),dtype=ms.float32) net = GradWrap(NetWithLoss(Net(strategy1, strategy2))) _executor.compile(net, x, y, w) + +def test_prelu_parallel_success4(): + class Net(nn.Cell): + def __init__(self, strategy): + super().__init__() + self.prelu = P.PReLU().set_strategy(strategy) + def construct(self, x, y): + out = self.prelu(x, y) + return out + context.reset_auto_parallel_context() + context.set_auto_parallel_context(device_num=64, global_rank=0) + context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") + strategy = ((2, 4, 4, 2), (4, )) + x = Tensor(np.random.rand(4, 16, 32, 64),dtype=ms.float32) + w = Tensor(np.random.rand(16),dtype=ms.float32) + net = GradWrap(NetWithLoss(Net(strategy))) + _executor.compile(net, x, w) From 7541d3b067e67a79df3674638cbff9176853978b Mon Sep 17 00:00:00 2001 From: buxue Date: Thu, 2 Apr 2020 11:58:45 +0800 Subject: [PATCH 132/367] Develop op MaxPoolWithArgMax --- mindspore/ccsrc/kernel/tbe/tbe_adapter.cc | 44 --- mindspore/ccsrc/transform/convert.cc | 2 + mindspore/ccsrc/transform/op_declare.cc | 18 +- mindspore/ccsrc/transform/op_declare.h | 4 +- mindspore/model_zoo/resnet.py | 2 +- mindspore/nn/layer/pooling.py | 143 ++++---- mindspore/ops/_grad/grad_nn_ops.py | 11 +- .../_op_impl/tbe/max_pool_grad_with_argmax.py | 6 +- .../ops/_op_impl/tbe/max_pool_with_argmax.py | 6 +- mindspore/ops/operations/_grad_ops.py | 121 +++---- mindspore/ops/operations/nn_ops.py | 338 +++++++++--------- tests/perf_test/resnet_example.py | 2 +- tests/st/networks/test_cpu_lenet.py | 6 +- tests/st/networks/test_gpu_alexnet.py | 2 +- .../ops/davinci/test_maxpool_with_argmax.py | 15 +- .../davinci/test_maxpool_with_argmax_grad.py | 6 +- tests/st/tbe_networks/resnet.py | 2 +- .../gtest_input/pre_activate/hw_opt_test.py | 2 +- .../pre_activate/insert_trans_op_test.py | 2 +- .../pre_activate/mixed_precision_test.py | 2 +- .../pre_activate/transdata_split_test.py | 2 +- .../transpose_transdata_fusion_test.py | 2 +- .../gtest_input/session/session_test.py | 2 +- .../test_data_parallel_resnet.py | 4 +- tests/ut/python/exec/resnet_example.py | 2 +- tests/ut/python/exec/test_pooling.py | 6 +- tests/ut/python/model/res18_example.py | 4 +- tests/ut/python/nn/test_cell.py | 2 +- tests/ut/python/nn/test_pooling.py | 3 +- tests/ut/python/ops/test_nn_ops.py | 105 +++--- tests/ut/python/ops/test_ops.py | 2 +- tests/ut/python/ops/test_ops_check.py | 8 +- .../pynative_mode/ge/ops/test_pooling.py | 4 +- tests/ut/python/pynative_mode/nn/test_cell.py | 2 +- .../python/pynative_mode/nn/test_pooling.py | 11 +- tests/ut/python/pynative_mode/vm/test_vm.py | 8 +- tests/ut/python/utils/test_serialize.py | 2 +- tests/vm_impl/nn_ops_vm_impl.py | 90 ++++- tests/vm_impl/vm_me.py | 46 ++- 39 files changed, 535 insertions(+), 504 deletions(-) mode change 100755 => 100644 mindspore/ccsrc/transform/op_declare.cc diff --git a/mindspore/ccsrc/kernel/tbe/tbe_adapter.cc b/mindspore/ccsrc/kernel/tbe/tbe_adapter.cc index c0416f648b..229a3eb34a 100644 --- a/mindspore/ccsrc/kernel/tbe/tbe_adapter.cc +++ b/mindspore/ccsrc/kernel/tbe/tbe_adapter.cc @@ -148,8 +148,6 @@ void TbeAdapter::InputOrderPass(const std::string &op_name, std::vector TbeAdapter::build_json_attr_pass_map_ = { - {"MaxPoolWithArgmax", TbeAdapter::MaxPoolWithArgmaxAttrJsonPass}, - {"MaxPoolGradWithArgmax", TbeAdapter::MaxPoolGradWithArgmaxAttrJsonPass}, {"Conv2D", TbeAdapter::Conv2DAttrJsonPass}, {"Conv2DBackpropFilter", TbeAdapter::Conv2DBackpropFilterAttrJsonPass}, {"Conv2DBackpropInput", TbeAdapter::Conv2DBackpropInputAttrJsonPass}, @@ -170,48 +168,6 @@ bool TbeAdapter::RunAttrPass(const mindspore::AnfNodePtr &anf_node, return false; } -void TbeAdapter::MaxPoolWithArgmaxAttrJsonPass( - const mindspore::AnfNodePtr &anf_node, const std::vector> &op_info_attrs, - nlohmann::json *attrs_json) { - MS_EXCEPTION_IF_NULL(anf_node); - MS_EXCEPTION_IF_NULL(attrs_json); - auto attr_num = op_info_attrs.size(); - auto primitive = AnfAlgo::GetCNodePrimitive(anf_node); - MS_EXCEPTION_IF_NULL(primitive); - for (size_t i = 0; i < attr_num; i++) { - nlohmann::json attr_obj; - MS_EXCEPTION_IF_NULL(op_info_attrs[i]); - std::string attr_name = op_info_attrs[i]->name(); - if (primitive->GetAttr(attr_name) != nullptr) { - auto value = primitive->GetAttr(attr_name); - if (attr_name == "pad_mode") { - std::string attr_value = GetValue(value); - (void)transform(attr_value.begin(), attr_value.end(), attr_value.begin(), ::toupper); - attr_obj["value"] = attr_value; - } else { - std::vector attr_value; - int data = GetValue(value); - attr_value.push_back(1); - attr_value.push_back(data); - attr_value.push_back(data); - attr_value.push_back(1); - attr_obj["value"] = attr_value; - } - attr_obj["valid"] = true; - } else { - attr_obj["valid"] = false; - } - attr_obj["name"] = attr_name; - attrs_json->push_back(attr_obj); - } -} - -void TbeAdapter::MaxPoolGradWithArgmaxAttrJsonPass( - const mindspore::AnfNodePtr &anf_node, const std::vector> &op_info_attrs, - nlohmann::json *attrs_json) { - MaxPoolWithArgmaxAttrJsonPass(anf_node, op_info_attrs, attrs_json); -} - void TbeAdapter::Conv2DAttrJsonPass(const mindspore::AnfNodePtr &anf_node, const std::vector> &op_info_attrs, nlohmann::json *attrs_json) { diff --git a/mindspore/ccsrc/transform/convert.cc b/mindspore/ccsrc/transform/convert.cc index c400d1c573..bee460d84a 100755 --- a/mindspore/ccsrc/transform/convert.cc +++ b/mindspore/ccsrc/transform/convert.cc @@ -161,6 +161,7 @@ const char kNameTopK[] = "TopK"; const char kNameSoftmaxGrad[] = "SoftmaxGrad"; const char kNameMaxPool[] = "MaxPool"; const char kNameAvgPool[] = "AvgPool"; +const char kNameMaxPoolWithArgmax[] = "MaxPoolWithArgmax"; const char kNameBatchNorm[] = "BatchNorm"; const char kNameBatchNormGrad[] = "BatchNormGrad"; const char kNameROIAlign[] = "ROIAlign"; @@ -198,6 +199,7 @@ std::unordered_map &DfGraphConvertor::get_adpt_ma {string(kNameApplyMomentum), ADPT_DESC(ApplyMomentum)}, {string(kNameMaxPool), ADPT_DESC(MaxPool)}, {string(kNameAvgPool), ADPT_DESC(AvgPool)}, + {string(kNameMaxPoolWithArgmax), ADPT_DESC(MaxPoolWithArgmax)}, {string(kNameTopK), ADPT_DESC(TopKV2)}, {string(kNamePack), ADPT_DESC(Pack)}, {string(kNameSplitD), ADPT_DESC(SplitD)}, diff --git a/mindspore/ccsrc/transform/op_declare.cc b/mindspore/ccsrc/transform/op_declare.cc old mode 100755 new mode 100644 index 0af2923cc4..419805c37f --- a/mindspore/ccsrc/transform/op_declare.cc +++ b/mindspore/ccsrc/transform/op_declare.cc @@ -734,14 +734,22 @@ ATTR_MAP(AvgPoolGrad) = {{"ksize", ATTR_DESC(ksize, AnyTraits(), AnyTraits< OUTPUT_MAP(AvgPoolGrad) = {{0, OUTPUT_DESC(out_grad)}}; // MaxPoolWithArgmax +INPUT_MAP(MaxPoolWithArgmax) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(MaxPoolWithArgmax) = {{"ksize", ATTR_DESC(ksize, AnyTraits(), AnyTraits>())}, + {"strides", ATTR_DESC(strides, AnyTraits(), AnyTraits>())}, + {"padding", ATTR_DESC(padding, AnyTraits())}}; +OUTPUT_MAP(MaxPoolWithArgmax) = {{0, OUTPUT_DESC(y)}, {1, OUTPUT_DESC(argmax)}}; + +// MaxPoolGradWithArgmax INPUT_MAP(MaxPoolGradWithArgmax) = { {1, INPUT_DESC(x)}, - {2, INPUT_DESC(argmax)}, - {3, INPUT_DESC(grad)}, + {2, INPUT_DESC(grad)}, + {3, INPUT_DESC(argmax)}, }; -ATTR_MAP(MaxPoolGradWithArgmax) = {{"pad_mode", ATTR_DESC(padding, AnyTraits())}, - {"window", ATTR_DESC(ksize, "window", AnyTraits>())}, - {"stride", ATTR_DESC(strides, "stride", AnyTraits>())}}; +ATTR_MAP(MaxPoolGradWithArgmax) = {{"ksize", ATTR_DESC(ksize, AnyTraits(), AnyTraits>())}, + {"strides", ATTR_DESC(strides, AnyTraits(), AnyTraits>())}, + {"padding", ATTR_DESC(padding, AnyTraits())}}; +OUTPUT_MAP(MaxPoolGradWithArgmax) = {{0, OUTPUT_DESC(y)}}; // Conv2D INPUT_MAP(Conv2D) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(filter)}}; diff --git a/mindspore/ccsrc/transform/op_declare.h b/mindspore/ccsrc/transform/op_declare.h index d120c94989..e4d4101127 100755 --- a/mindspore/ccsrc/transform/op_declare.h +++ b/mindspore/ccsrc/transform/op_declare.h @@ -88,8 +88,10 @@ DECLARE_OP_ADAPTER(FusedBatchNormGrad) DECLARE_OP_USE_OUTPUT(FusedBatchNormGrad) DECLARE_OP_ADAPTER(BiasAddGrad) DECLARE_OP_USE_OUTPUT(BiasAddGrad) +DECLARE_OP_ADAPTER(MaxPoolWithArgmax) +DECLARE_OP_USE_OUTPUT(MaxPoolWithArgmax) DECLARE_OP_ADAPTER(MaxPoolGradWithArgmax) -DECLARE_OP_USE_ENUM(MaxPoolGradWithArgmax) +DECLARE_OP_USE_OUTPUT(MaxPoolGradWithArgmax) DECLARE_OP_ADAPTER(Conv2D) DECLARE_OP_USE_ENUM(Conv2D) DECLARE_OP_USE_OUTPUT(Conv2D) diff --git a/mindspore/model_zoo/resnet.py b/mindspore/model_zoo/resnet.py index 403f66e415..9d010eede1 100755 --- a/mindspore/model_zoo/resnet.py +++ b/mindspore/model_zoo/resnet.py @@ -168,7 +168,7 @@ class ResNet(nn.Cell): self.conv1 = _conv7x7(3, 64, stride=2) self.bn1 = _bn(64) self.relu = P.ReLU() - self.maxpool = P.MaxPoolWithArgmax(pad_mode='same', window=3, stride=2) + self.maxpool = P.MaxPoolWithArgmax(padding="same", ksize=3, strides=2) self.layer1 = self._make_layer(block, layer_nums[0], diff --git a/mindspore/nn/layer/pooling.py b/mindspore/nn/layer/pooling.py index 6ff28dd362..bf90fcc9de 100644 --- a/mindspore/nn/layer/pooling.py +++ b/mindspore/nn/layer/pooling.py @@ -13,36 +13,52 @@ # limitations under the License. # ============================================================================ """pooling""" - from mindspore.ops import operations as P from mindspore._checkparam import ParamValidator as validator from mindspore._checkparam import Rel +from ... import context from ..cell import Cell class _PoolNd(Cell): """N-D AvgPool""" - def __init__(self, - kernel_size, - stride, - pad_mode, - padding=0, - pool=None): + def __init__(self, kernel_size, stride, pad_mode): + name = self.__class__.__name__ super(_PoolNd, self).__init__() + validator.check_type('kernel_size', kernel_size, [int, tuple]) + validator.check_type('stride', stride, [int, tuple]) + self.pad_mode = validator.check_string('pad_mode', pad_mode.upper(), ['VALID', 'SAME']) + + if isinstance(kernel_size, int): + validator.check_integer("kernel_size", kernel_size, 1, Rel.GE) + else: + if (len(kernel_size) != 2 or + (not isinstance(kernel_size[0], int)) or + (not isinstance(kernel_size[1], int)) or + kernel_size[0] <= 0 or + kernel_size[1] <= 0): + raise ValueError(f'The kernel_size passed to cell {name} should be an positive int number or' + f'a tuple of two positive int numbers, but got {kernel_size}') self.kernel_size = kernel_size + + if isinstance(stride, int): + validator.check_integer("stride", stride, 1, Rel.GE) + else: + if (len(stride) != 2 or + (not isinstance(stride[0], int)) or + (not isinstance(stride[1], int)) or + stride[0] <= 0 or + stride[1] <= 0): + raise ValueError(f'The stride passed to cell {name} should be an positive int number or' + f'a tuple of two positive int numbers, but got {stride}') self.stride = stride - self.pad_mode = pad_mode - self.padding = validator.check_integer('padding', padding, 0, Rel.GE) - self.pool = pool - if self.pool is None: - raise NotImplementedError - def construct(self, x): - return self.pool(x) + def construct(self, *inputs): + pass def extend_repr(self): - return 'kernel_size={kernel_size}, stride={stride}, pad_mode={pad_mode}'.format(**self.__dict__) + return 'kernel_size={kernel_size}, strides={strides}, pad_mode={pad_mode}'.format(**self.__dict__) class MaxPool2d(_PoolNd): @@ -63,19 +79,23 @@ class MaxPool2d(_PoolNd): pad_mode for training only supports "same" and "valid". Args: - kernel_size (int): Size of the window to take a max over. Default 1. - stride (int): Stride size of the window. Default: 1. - pad_mode (str): Select the mode of the pad. The optional values are - "same" and "valid". Default: "valid". + kernel_size (Union[int, tuple[int]]): The size of kernel used to take the max value, + is an int number that represents height and width are both kernel_size, + or a tuple of two int numbers that represent height and width respectively. + Default: 1. + stride (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents + the height and width of movement are both strides, or a tuple of two int numbers that + represent height and width of movement respectively. Default: 1. + pad_mode (str): The optional values for pad mode, is "same" or "valid", not case sensitive. + Default: "valid". - same: Adopts the way of completion. Output height and width will be the same as the input. Total number of padding will be calculated for horizontal and vertical - direction and evenly distributed to top and bottom, left and right if possible. Otherwise, the - last extra padding will be done from the bottom and the right side. + direction and evenly distributed to top and bottom, left and right if possible. + Otherwise, the last extra padding will be done from the bottom and the right side. - - valid: Adopts the way of discarding. The possibly largest height and width of output will be return - without padding. Extra pixels will be discarded. - padding (int): Implicit zero padding to be added on both sides. Default: 0. + - valid: Adopts the way of discarding. The possibly largest height and width of output + will be return without padding. Extra pixels will be discarded. Inputs: - **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`. @@ -103,31 +123,22 @@ class MaxPool2d(_PoolNd): [[7. 8.] [8. 8.]]]] """ - def __init__(self, - kernel_size=1, - stride=1, - pad_mode="VALID", - padding=0): - max_pool = P.MaxPool(ksize=kernel_size, - strides=stride, - padding=pad_mode) - self.is_autodiff_backend = False - if self.is_autodiff_backend: - - # At present, pad mode of max pool is not unified, so it is a temporarily avoided - pad_mode = validator.check_string('pad_mode', pad_mode.lower(), ['valid', 'same']) - - max_pool = P.MaxPoolWithArgmax(window=kernel_size, - stride=stride, - pad_mode=pad_mode, - pad=padding) - super(MaxPool2d, self).__init__(kernel_size, stride, pad_mode, padding, max_pool) + + def __init__(self, kernel_size=1, stride=1, pad_mode="valid"): + super(MaxPool2d, self).__init__(kernel_size, stride, pad_mode) + self.max_pool = P.MaxPool(ksize=self.kernel_size, + strides=self.stride, + padding=self.pad_mode) + self.max_pool_with_arg_max = P.MaxPoolWithArgmax(ksize=self.kernel_size, + strides=self.stride, + padding=self.pad_mode) + self.is_tbe = context.get_context("device_target") == "Ascend" def construct(self, x): - if self.is_autodiff_backend: - out = self.pool(x)[0] + if self.is_tbe and self.training: + out = self.max_pool_with_arg_max(x)[0] else: - out = self.pool(x) + out = self.max_pool(x) return out @@ -149,19 +160,24 @@ class AvgPool2d(_PoolNd): pad_mode for training only supports "same" and "valid". Args: - kernel_size (int): Size of the window to take a max over. Default: 1. - stride (int): Stride size of the window. Default: 1. - pad_mode (str): Select the mode of the pad. The optional values are - "same", "valid". Default: "valid". + kernel_size (Union[int, tuple[int]]): The size of kernel used to take the average value, + is an int number that represents height and width are both kernel_size, + or a tuple of two int numbers that represent height and width respectively. + Default: 1. + stride (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents + the height and width of movement are both strides, or a tuple of two int numbers that + represent height and width of movement respectively. Default: 1. + pad_mode (str): The optional values for pad mode, is "same" or "valid", not case sensitive. + Default: "valid". - same: Adopts the way of completion. Output height and width will be the same as the input. Total number of padding will be calculated for horizontal and vertical - direction and evenly distributed to top and bottom, left and right if possible. Otherwise, the - last extra padding will be done from the bottom and the right side. + direction and evenly distributed to top and bottom, left and right if possible. + Otherwise, the last extra padding will be done from the bottom and the right side. + + - valid: Adopts the way of discarding. The possibly largest height and width of output + will be return without padding. Extra pixels will be discarded. - - valid: Adopts the way of discarding. The possibly largest height and width of output will be return - without padding. Extra pixels will be discarded. - padding (int): Implicit zero padding to be added on both sides. Default: 0. Inputs: - **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`. @@ -170,7 +186,7 @@ class AvgPool2d(_PoolNd): Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`. Examples: - >>> pool = AvgPool2d(kernel_size=3, stride=1) + >>> pool = AvgPool2d(kernel_size=3, strides=1) >>> x = Tensor(np.random.randint(0, 10, [1, 2, 4, 4]), mindspore.float32) [[[[5. 5. 9. 9.] [8. 4. 3. 0.] @@ -189,12 +205,15 @@ class AvgPool2d(_PoolNd): [[4.2222223 4.5555553] [3.2222223 4.5555553]]]] """ + def __init__(self, kernel_size=1, stride=1, - pad_mode="VALID", - padding=0): - avg_pool = P.AvgPool(ksize=kernel_size, - strides=stride, - padding=pad_mode) - super(AvgPool2d, self).__init__(kernel_size, stride, pad_mode, padding, avg_pool) + pad_mode="valid"): + super(AvgPool2d, self).__init__(kernel_size, stride, pad_mode) + self.avg_pool = P.AvgPool(ksize=self.kernel_size, + strides=self.stride, + padding=self.pad_mode) + + def construct(self, x): + return self.avg_pool(x) diff --git a/mindspore/ops/_grad/grad_nn_ops.py b/mindspore/ops/_grad/grad_nn_ops.py index bad99351a5..fbe48aff97 100755 --- a/mindspore/ops/_grad/grad_nn_ops.py +++ b/mindspore/ops/_grad/grad_nn_ops.py @@ -76,14 +76,9 @@ def get_bprop_depthwise_conv2d_native(self): def get_bprop_max_pool_with_argmax(self): """Grad definition for `MaxPoolWithArgmax` operation.""" maxpool_grad = G.MaxPoolGradWithArgmax( - pad_mode=self.pad_mode, - window=self.window, - pad=self.pad, - stride=self.stride, - data_mode=self.data_mode, - ceil_mode=self.ceil_mode, - alpha=self.alpha, - beta=self.beta) + ksize=self.ksize, + strides=self.strides, + padding=self.padding,) def bprop(x, out, dout): dx = maxpool_grad(x, dout[0], out[1]) diff --git a/mindspore/ops/_op_impl/tbe/max_pool_grad_with_argmax.py b/mindspore/ops/_op_impl/tbe/max_pool_grad_with_argmax.py index a167ef85f8..3730ee1b93 100644 --- a/mindspore/ops/_op_impl/tbe/max_pool_grad_with_argmax.py +++ b/mindspore/ops/_op_impl/tbe/max_pool_grad_with_argmax.py @@ -28,19 +28,19 @@ from mindspore.ops.op_info_register import op_info_register "partial_flag": true, "attr": [ { - "name": "window", + "name": "ksize", "param_type": "required", "type": "listInt", "value": "all" }, { - "name": "stride", + "name": "strides", "param_type": "required", "type": "listInt", "value": "all" }, { - "name": "pad_mode", + "name": "padding", "param_type": "required", "type": "str", "value": "all" diff --git a/mindspore/ops/_op_impl/tbe/max_pool_with_argmax.py b/mindspore/ops/_op_impl/tbe/max_pool_with_argmax.py index 04d0eeb92c..2e081c1082 100644 --- a/mindspore/ops/_op_impl/tbe/max_pool_with_argmax.py +++ b/mindspore/ops/_op_impl/tbe/max_pool_with_argmax.py @@ -28,19 +28,19 @@ from mindspore.ops.op_info_register import op_info_register "partial_flag": true, "attr": [ { - "name": "window", + "name": "ksize", "param_type": "required", "type": "listInt", "value": "all" }, { - "name": "stride", + "name": "strides", "param_type": "required", "type": "listInt", "value": "all" }, { - "name": "pad_mode", + "name": "padding", "param_type": "required", "type": "str", "value": "all" diff --git a/mindspore/ops/operations/_grad_ops.py b/mindspore/ops/operations/_grad_ops.py index a699c23adc..f38044ab6a 100644 --- a/mindspore/ops/operations/_grad_ops.py +++ b/mindspore/ops/operations/_grad_ops.py @@ -15,7 +15,6 @@ """Operators for gradients.""" -import math from ..._c_expression import signature_rw as sig_rw from ..._c_expression import signature_kind as sig_kind from ..primitive import Primitive, PrimitiveWithInfer, prim_attr_register @@ -340,59 +339,60 @@ class _PoolGrad(PrimitiveWithInfer): """Gradients of the max/avg pool operation.""" @prim_attr_register - def __init__(self, ksize=1, strides=1, padding="VALID"): + def __init__(self, ksize, strides, padding="VALID"): self.init_prim_io_names(inputs=['x_origin', 'out_origin', 'grad'], outputs=['output']) - self.ksize = ksize - self.strides = strides - self.padding = padding - - self.ksize = validator.check_type('ksize', self.ksize, [int, tuple]) - self.strides = validator.check_type('strides', self.strides, [int, tuple]) - validator.check_type('padding', self.padding, [str]) - self.padding = validator.check_string('padding', self.padding, ['VALID', 'SAME']) + validator.check_type('ksize', ksize, [int, tuple]) + validator.check_type('strides', strides, [int, tuple]) + self.padding = validator.check_string('padding', padding.upper(), ['VALID', 'SAME']) self.add_prim_attr("padding", self.padding) - self.add_prim_attr('data_format', "NCHW") - - if isinstance(self.ksize, int): - self.pool_h = validator.check_integer("ksize", self.ksize, 1, Rel.GE) - self.pool_w = self.pool_h - self.add_prim_attr("ksize", (1, 1, self.ksize, self.ksize)) - elif isinstance(self.ksize, tuple): - if (len(self.ksize) != 2 and len(self.ksize) != 4): - raise ValueError('Attr \'ksize\' of \'Pool\' Op passed ' + - str(self.ksize)+', should be a int or a tuple of length 2 or 4.') - for ksize_val in self.ksize: - if (not isinstance(ksize_val, int)) or (ksize_val <= 0): - raise ValueError('Each value of attr \'ksize\' of \'MaxPool\' Op passed ' + - str(self.ksize)+', should be int and greater than 0.') - self.pool_h = self.ksize[-2] - self.pool_w = self.ksize[-1] - self.add_prim_attr("ksize", (1, 1, self.ksize[-2], self.ksize[-1])) - - if isinstance(self.strides, int): - self.stride_h = validator.check_integer("strides", self.strides, 1, Rel.GE) - self.stride_w = self.stride_h - self.add_prim_attr("strides", (1, 1, self.strides, self.strides)) - elif isinstance(self.strides, tuple): - if (len(self.strides) != 2 and len(self.strides) != 4): - raise ValueError('Attr \'strides\' of \'MaxPool\' Op passed ' + - str(self.strides)+', should be a int or a tuple of length 2 or 4.') - for stride_val in self.strides: - if (not isinstance(stride_val, int)) or (stride_val <= 0): - raise ValueError('Each value of attr \'strides\' of \'MaxPool\' Op passed ' + - str(self.strides)+', should be int and greater than 0.') - self.stride_h = self.strides[-2] - self.stride_w = self.strides[-1] - self.add_prim_attr("strides", (1, 1, self.strides[-2], self.strides[-1])) - - if self.padding == "VALID": - self.pad = 0 - elif self.padding == "SAME": - self.pad = math.floor((self.pool_h - 1) / 2) + self.is_maxpoolgradwithargmax = (self.name == "MaxPoolGradWithArgmax") + if not self.is_maxpoolgradwithargmax: + self.add_prim_attr('data_format', "NCHW") + + if isinstance(ksize, int): + validator.check_integer("ksize", ksize, 1, Rel.GE) + if self.is_maxpoolgradwithargmax: + self.ksize = (1, ksize, ksize, 1) + else: + self.ksize = (1, 1, ksize, ksize) else: - raise ValueError('The padding should be str and must be SAME or VALID,' - ' but got {}.'.format(self.padding)) + ksize_error = ValueError(f"The 'ksize' passed to operator {self.name} should be an positive int number" + f"or a tuple of two or four positive int numbers, but got {ksize}") + if len(ksize) != 2 and len(ksize) != 4: + raise ksize_error + for ksize_val in ksize: + if not isinstance(ksize_val, int) or (ksize_val <= 0): + raise ksize_error + if len(ksize) == 2 and self.is_maxpoolgradwithargmax: + self.ksize = (1, ksize[0], ksize[1], 1) + elif len(ksize) == 2 and not self.is_maxpoolgradwithargmax: + self.ksize = (1, 1, ksize[0], ksize[1]) + else: + self.ksize = ksize + self.add_prim_attr("ksize", self.ksize) + + if isinstance(strides, int): + validator.check_integer("strides", strides, 1, Rel.GE) + if self.is_maxpoolgradwithargmax: + self.strides = (1, strides, strides, 1) + else: + self.strides = (1, 1, strides, strides) + else: + strides_error = ValueError(f"The 'strides' passed to operator {self.name} should be an positive int number" + f"or a tuple of two or four positive int numbers, but got {strides}") + if len(strides) != 2 and len(strides) != 4: + raise strides_error + for strides_val in strides: + if not isinstance(strides_val, int) or (strides_val <= 0): + raise strides_error + if len(strides) == 2 and self.is_maxpoolgradwithargmax: + self.strides = (1, strides[0], strides[1], 1) + elif len(strides) == 2 and not self.is_maxpoolgradwithargmax: + self.strides = (1, 1, strides[0], strides[1]) + else: + self.strides = strides + self.add_prim_attr("strides", self.strides) class AvgPoolGrad(_PoolGrad): @@ -451,28 +451,13 @@ class MaximumGrad(Primitive): raise NotImplementedError -class MaxPoolGradWithArgmax(PrimitiveWithInfer): +class MaxPoolGradWithArgmax(_PoolGrad): """Computes the gradients of MaxPoolWithArgmax.""" @prim_attr_register - def __init__(self, - pad_mode="valid", - window=0, - pad=0, - stride=1, - data_mode=1, - ceil_mode=0, - alpha=1.0, - beta=0.0): + def __init__(self, ksize=1, strides=1, padding="VALID",): self.init_prim_io_names(inputs=['x', 'grad', 'argmax'], outputs=['output']) - - self.window = window - self.pool_h = self.pool_w = window - self.pad = pad - self.pad_mode = pad_mode - self.stride = stride - self.data_mode = data_mode - self.ceil_mode = ceil_mode + super(MaxPoolGradWithArgmax, self).__init__(ksize, strides, padding) def infer_shape(self, x_shape, grad_shape, argmax_shape): if not grad_shape: diff --git a/mindspore/ops/operations/nn_ops.py b/mindspore/ops/operations/nn_ops.py index 21effd4bd3..9ee98d174e 100644 --- a/mindspore/ops/operations/nn_ops.py +++ b/mindspore/ops/operations/nn_ops.py @@ -682,186 +682,83 @@ class DepthwiseConv2dNative(PrimitiveWithInfer): return x_dtype -class MaxPoolWithArgmax(PrimitiveWithInfer): - r""" - Performs max pooling on the input Tensor and return both max values and indices. - - Typically the input is of shape :math:`(N_{in}, C_{in}, H_{in}, W_{in})`, MaxPool outputs - regional maximum in the :math:`(H_{in}, W_{in})`-dimension. Given kernel size - :math:`ks = (h_{ker}, w_{ker})` and stride :math:`s = (s_0, s_1)`, the operation is as follows. - - .. math:: - \text{output}(N_i, C_j, h, w) = \max_{m=0, \ldots, h_{ker}-1} \max_{n=0, \ldots, w_{ker}-1} - \text{input}(N_i, C_j, s_0 \times h + m, s_1 \times w + n) - - Args: - pad_mode (str): "valid", "same", "pad" the mode to fill padding. Default: "valid". - window (Union[int, tuple[int]]): The size of window, which is the kernel size, two `int` for width - and height. Default: 1. - pad (Union[int, tuple[int]]): If `pad_mode` is `pad`, the pad value to fill, two `int` for width - and height. Default: 0. - stride (Union[int, tuple[int]]): The stride of the window, that should be a tuple of two `int` for - width and height. Default: 1. - - Inputs: - - **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`. - - Outputs: - Tuple of 2 Tensor, the maxpool result and where max values from. - - - **output** (Tensor) - Maxpooling result, with shape :math:`(N, C_{out}, H_{out}, W_{out})`. - - **mask** (Tensor) - Max values' index represented by the mask. - """ - - @prim_attr_register - def __init__(self, - pad_mode="valid", - window=1, - pad=0, - stride=1, - data_mode=1, - ceil_mode=0, - alpha=1.0, - beta=0.0): - self.init_prim_io_names(inputs=['x'], outputs=['output', 'argmax']) - self.window = validator.check_type('window', window, [int, tuple]) - if isinstance(window, int) and window <= 0: - raise ValueError('Attr \'window\' of \'MaxPoolWithArgmax\' Op passed ' - + str(self.window)+', should be a int or tuple and greater than 0.') - if isinstance(window, tuple) and (len(window) != 2 or - (not isinstance(window[0], int)) or - (not isinstance(window[1], int)) or - window[0] <= 0 or window[1] <= 0): - raise ValueError('Attr \'window\' of \'MaxPoolWithArgmax\' Op passed ' - + str(self.window)+', should be a int or tuple and greater than 0.') - self.pool_h = self.pool_w = window - self.pad_mode = validator.check_string('pad_mode', pad_mode, ['valid', 'same', 'pad']) - if self.pad_mode == "valid": - self.pad = 0 - elif self.pad_mode == "same": - self.pad = math.floor((self.window - 1) / 2) - elif self.pad_mode == "pad": - self.pad = validator.check_integer('pad', pad, 0, Rel.GE) - - self.data_mode = validator.check_integer('data_mode', data_mode, 1, Rel.EQ) - self.ceil_mode = validator.check_integer('ceil_mode', ceil_mode, 0, Rel.EQ) - self.stride = validator.check_integer('stride', stride, 1, Rel.GE) - self.alpha = validator.check_type('alpha', alpha, [int, float]) - self.beta = validator.check_type('beta', beta, [int, float]) - self.is_tbe = not context.get_context("enable_ge") and context.get_context("device_target") == "Ascend" - - def infer_shape(self, x_shape): - validator.check_integer("x_shape", len(x_shape), 4, Rel.EQ) - pad = self.pad - h_input = x_shape[2] - w_input = x_shape[3] - h_out = (h_input + 2 * pad - (self.window - 1) - 1) / self.stride + 1 - h_out = math.floor(h_out) - w_out = (w_input + 2 * pad - (self.window - 1) - 1) / self.stride + 1 - w_out = math.floor(w_out) - out_shape = [x_shape[0], x_shape[1], h_out, w_out] - for shape_value in out_shape: - if shape_value <= 0: - raise ValueError("The kernel size is not valid please check it if is larger than data's shape size.") - k_size_vec = [1, self.window, self.window, 1] - argmax_shape = [] - if self.is_tbe: - for i in range(4): - if i == 2: - dim = k_size_vec[i - 1] * k_size_vec[i] - argmax_shape.append(dim) - elif i == 3: - dim = math.ceil(out_shape[i - 1] * out_shape[i] / 16) + 1 - argmax_shape.append(dim) - else: - argmax_shape.append(x_shape[i]) - else: - argmax_shape = out_shape - return out_shape, argmax_shape - - def infer_dtype(self, x_dtype): - out_dtype = x_dtype - validator.check_typename("x_type", x_dtype, (mstype.float16, mstype.float32)) - argmax_dtype = mstype.int32 - return out_dtype, argmax_dtype - - class _Pool(PrimitiveWithInfer): r""" Performs max/avg pooling operation. Args: - ksize (Union[int, tuple[int]]): The size of the window to take a max over, that should be a tuple - of two `int` for width and height. Default: 1. - stride (Union[int, tuple[int]]): The stride of the window, that should be a tuple of two `int` for - width and height. Default: 1. - padding (str): The optional values for pad mode "SAME", "VALID". Default: "VALID". + ksize (Union[int, tuple[int]]): The size of the kernel, that should be a tuple + of two `int` for height and width. Default: 1. + strides (Union[int, tuple[int]]): The stride of the window, that should be + a tuple of two `int` for height and width. Default: 1. + padding (str): The optional values for pad mode, is "same" or "valid", not case sensitive. + Default: "valid". """ @prim_attr_register - def __init__(self, ksize=1, strides=1, padding="VALID"): - self.init_prim_io_names(inputs=['x'], outputs=['output']) - validator.check_type('padding', padding, [str]) - self.ksize = ksize - self.strides = strides - self.padding = padding.upper() - self.ksize = validator.check_type('ksize', self.ksize, [int, tuple]) - self.strides = validator.check_type('strides', self.strides, [int, tuple]) - self.padding = validator.check_string('padding', self.padding, ['VALID', 'SAME']) + def __init__(self, ksize=1, strides=1, padding="valid"): self.init_prim_io_names(inputs=['x'], outputs=['output']) + validator.check_type('ksize', ksize, [int, tuple]) + validator.check_type('strides', strides, [int, tuple]) + self.padding = validator.check_string('padding', padding.upper(), ['VALID', 'SAME']) self.add_prim_attr("padding", self.padding) - self.add_prim_attr('data_format', "NCHW") - - if isinstance(self.ksize, int): - self.pool_h = validator.check_integer("ksize", self.ksize, 1, Rel.GE) - self.pool_w = self.pool_h - self.add_prim_attr("ksize", (1, 1, self.ksize, self.ksize)) - elif isinstance(self.ksize, tuple): - if (len(self.ksize) != 2 or (not isinstance(self.ksize[0], int)) or (not isinstance(self.ksize[1], int)) - or self.ksize[0] <= 0 or self.ksize[1] <= 0): - raise ValueError('Each value of attr \'ksize\' of \'MaxPool\' Op passed ' + - str(self.ksize) + ', should be a int or a tuple of length 2 and greater than 0.') - self.pool_h = self.ksize[0] - self.pool_w = self.ksize[1] - self.add_prim_attr("ksize", (1, 1, self.ksize[0], self.ksize[1])) - - if isinstance(self.strides, int): - self.stride_h = validator.check_integer("strides", self.strides, 1, Rel.GE) - self.stride_w = self.stride_h - self.add_prim_attr("strides", (1, 1, self.strides, self.strides)) - elif isinstance(self.strides, tuple): - if (len(self.strides) != 2 or (not isinstance(self.strides[0], int)) or - (not isinstance(self.strides[1], int)) or self.strides[0] <= 0 or self.strides[1] <= 0): - raise ValueError('Each value of attr \'strides\' of \'MaxPool\' Op passed ' + - str(self.strides) + ', should be a int or a tuple of length 2 and greater than 0.') - self.stride_h = self.strides[0] - self.stride_w = self.strides[1] - self.add_prim_attr("strides", (1, 1, self.strides[0], self.strides[1])) + self.is_maxpoolwithargmax = (self.name == "MaxPoolWithArgmax") + if not self.is_maxpoolwithargmax: + self.add_prim_attr('data_format', "NCHW") - if self.padding == "VALID": - self.pad = 0 - elif self.padding == "SAME": - self.pad = math.floor((self.pool_h - 1) / 2) + if isinstance(ksize, int): + validator.check_integer("ksize", ksize, 1, Rel.GE) + self.ksize = (1, 1, ksize, ksize) else: - raise ValueError('The padding should be str and must be SAME or VALID,' - ' but got {}.'.format(self.padding)) - self.add_prim_attr('pad', self.pad) + if (len(ksize) != 2 or + (not isinstance(ksize[0], int)) or + (not isinstance(ksize[1], int)) or + ksize[0] <= 0 or + ksize[1] <= 0): + raise ValueError(f"The 'ksize' passed to operator {self.name} should be an positive int number or" + f"a tuple of two positive int numbers, but got {ksize}") + self.ksize = (1, 1, ksize[0], ksize[1]) + if self.is_maxpoolwithargmax: + self.ksize = (1, self.ksize[-2], self.ksize[-1], 1) + self.add_prim_attr("ksize", self.ksize) + + if isinstance(strides, int): + validator.check_integer("strides", strides, 1, Rel.GE) + self.strides = (1, 1, strides, strides) + else: + if (len(strides) != 2 or + (not isinstance(strides[0], int)) or + (not isinstance(strides[1], int)) or + strides[0] <= 0 or + strides[1] <= 0): + raise ValueError(f"The 'strides' passed to operator {self.name} should be an positive int number or" + f"a tuple of two positive int numbers, but got {strides}") + self.strides = (1, 1, strides[0], strides[1]) + if self.is_maxpoolwithargmax: + self.strides = (1, self.strides[-2], self.strides[-1], 1) + self.add_prim_attr("strides", self.strides) def infer_shape(self, x_shape): validator.check_integer("x_shape", len(x_shape), 4, Rel.EQ) - h_input = x_shape[2] - w_input = x_shape[3] + batch, channel, input_h, input_w = x_shape + if self.is_maxpoolwithargmax: + _, kernel_h, kernel_w, _ = self.ksize + _, stride_h, stride_w, _ = self.strides + else: + _, _, kernel_h, kernel_w = self.ksize + _, _, stride_h, stride_w = self.strides + if self.padding == "VALID": - h_out = math.ceil((h_input - (self.pool_h - 1)) / self.stride_h) - w_out = math.ceil((w_input - (self.pool_w - 1)) / self.stride_w) + out_h = math.ceil((input_h - (kernel_h - 1)) / stride_h) + out_w = math.ceil((input_w - (kernel_w - 1)) / stride_w) elif self.padding == "SAME": - h_out = math.ceil(h_input / self.stride_h) - w_out = math.ceil(w_input / self.stride_w) + out_h = math.ceil(input_h / stride_h) + out_w = math.ceil(input_w / stride_w) else: - raise ValueError('The padding should be str and must be SAME or VALID,' - ' but got {}.'.format(self.padding)) + raise ValueError(f"The padding of operator {self.name} should be a str and must be 'SAME' or 'VALID', " + f"but got {self.padding}.") + out_shape = [batch, channel, out_h, out_w] - out_shape = [x_shape[0], x_shape[1], h_out, w_out] for shape_value in out_shape: if shape_value <= 0: raise ValueError("The kernel size is not valid please check it if is larger than data's shape size.") @@ -887,11 +784,22 @@ class MaxPool(_Pool): \text{input}(N_i, C_j, s_0 \times h + m, s_1 \times w + n) Args: - ksize (Union[int, tuple[int]]): The size of the window to take a max over, that should be a tuple - of two `int` for width and height. Default: 1. - stride (Union[int, tuple[int]]): The stride of the window, that should be a tuple of two `int` for - width and height. Default: 1. - padding (str): The optional values for pad mode "SAME", "VALID". Default: "VALID". + ksize (Union[int, tuple[int]]): The size of kernel used to take the maximum value, + is an int number that represents height and width are both ksize, or a tuple + of two int numbers that represent height and width respectively. Default: 1. + strides (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents + the height and width of movement are both strides, or a tuple of two int numbers that + represent height and width of movement respectively. Default: 1. + padding (str): The optional values for pad mode, is "same" or "valid", not case sensitive. + Default: "valid". + + - same: Adopts the way of completion. Output height and width will be the same as + the input. Total number of padding will be calculated for horizontal and vertical + direction and evenly distributed to top and bottom, left and right if possible. + Otherwise, the last extra padding will be done from the bottom and the right side. + + - valid: Adopts the way of discarding. The possibly largest height and width of output + will be return without padding. Extra pixels will be discarded. Inputs: - **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`. @@ -901,10 +809,83 @@ class MaxPool(_Pool): """ @prim_attr_register - def __init__(self, ksize=1, strides=1, padding="VALID"): + def __init__(self, ksize=1, strides=1, padding="valid"): super(MaxPool, self).__init__(ksize, strides, padding) +class MaxPoolWithArgmax(_Pool): + r""" + Performs max pooling on the input Tensor and return both max values and indices. + + Typically the input is of shape :math:`(N_{in}, C_{in}, H_{in}, W_{in})`, MaxPool outputs + regional maximum in the :math:`(H_{in}, W_{in})`-dimension. Given kernel size + :math:`ks = (h_{ker}, w_{ker})` and stride :math:`s = (s_0, s_1)`, the operation is as follows. + + .. math:: + \text{output}(N_i, C_j, h, w) = \max_{m=0, \ldots, h_{ker}-1} \max_{n=0, \ldots, w_{ker}-1} + \text{input}(N_i, C_j, s_0 \times h + m, s_1 \times w + n) + + Args: + ksize (Union[int, tuple[int]]): The size of kernel used to take the maximum value and arg value, + is an int number that represents height and width are both ksize, or a tuple of + two int numbers that represent height and width respectively. Default: 1. + strides (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents + the height and width of movement are both strides, or a tuple of two int numbers that + represent height and width of movement respectively. Default: 1. + padding (str): The optional values for pad mode, is "same" or "valid", not case sensitive. + Default: "valid". + + - same: Adopts the way of completion. Output height and width will be the same as + the input. Total number of padding will be calculated for horizontal and vertical + direction and evenly distributed to top and bottom, left and right if possible. + Otherwise, the last extra padding will be done from the bottom and the right side. + + - valid: Adopts the way of discarding. The possibly largest height and width of output + will be return without padding. Extra pixels will be discarded. + + + Inputs: + - **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`. + + Outputs: + Tuple of 2 Tensor, the maxpool result and where max values from. + + - **output** (Tensor) - Maxpooling result, with shape :math:`(N, C_{out}, H_{out}, W_{out})`. + - **mask** (Tensor) - Max values' index represented by the mask. + """ + + def __init__(self, ksize=1, strides=1, padding="valid"): + super(MaxPoolWithArgmax, self).__init__(ksize, strides, padding) + self.is_tbe = context.get_context("device_target") == "Ascend" + + def infer_shape(self, x_shape): + out_shape = _Pool.infer_shape(self, x_shape) + _, _, out_h, out_w = out_shape + _, kernel_h, kernel_w, _ = self.ksize + + argmax_shape = [] + if self.is_tbe: + for i in range(4): + if i == 2: + dim = kernel_h * kernel_w + argmax_shape.append(dim) + elif i == 3: + dim = math.ceil(out_h * out_w / 16) + 1 + argmax_shape.append(dim) + else: + argmax_shape.append(x_shape[i]) + else: + argmax_shape = out_shape + + return out_shape, argmax_shape + + def infer_dtype(self, x_dtype): + out_dtype = x_dtype + validator.check_typename("x_type", x_dtype, (mstype.float16, mstype.float32)) + argmax_dtype = mstype.uint16 + return out_dtype, argmax_dtype + + class AvgPool(_Pool): r""" Average pooling operation. @@ -919,11 +900,22 @@ class AvgPool(_Pool): \text{input}(N_i, C_j, s_0 \times h + m, s_1 \times w + n) Args: - ksize (Union[int, tuple[int]]): The size of the window to take a average over, that should be a tuple - of two `int` for width and height. Default: 1. - stride (Union[int, tuple[int]]): The stride of the window, that should be a tuple of two `int` for - width and height. Default: 1. - padding (str): The optional values for pad mode "SAME", "VALID". Default: "VALID". + ksize (Union[int, tuple[int]]): The size of kernel used to take the average value, + is an int number that represents height and width are both ksize, or a tuple + of two int numbers that represent height and width respectively. Default: 1. + strides (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents + the height and width of movement are both strides, or a tuple of two int numbers that + represent height and width of movement respectively. Default: 1. + padding (str): The optional values for pad mode, is "same" or "valid", not case sensitive. + Default: "valid". + + - same: Adopts the way of completion. Output height and width will be the same as + the input. Total number of padding will be calculated for horizontal and vertical + direction and evenly distributed to top and bottom, left and right if possible. + Otherwise, the last extra padding will be done from the bottom and the right side. + + - valid: Adopts the way of discarding. The possibly largest height and width of output + will be return without padding. Extra pixels will be discarded. Inputs: - **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`. @@ -933,7 +925,7 @@ class AvgPool(_Pool): """ @prim_attr_register - def __init__(self, ksize=1, strides=1, padding="VALID"): + def __init__(self, ksize=1, strides=1, padding="valid"): if context.get_context("device_target") == "GPU": self.target = "GPU" else: diff --git a/tests/perf_test/resnet_example.py b/tests/perf_test/resnet_example.py index 19d235c2b1..34413109de 100644 --- a/tests/perf_test/resnet_example.py +++ b/tests/perf_test/resnet_example.py @@ -103,7 +103,7 @@ class ResNet50(nn.Cell): self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, pad_mode='pad') self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU() - self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, pad_mode='valid') + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode='valid') self.layer1 = self.MakeLayer( block, 3, in_channels=64, out_channels=256, stride=1) diff --git a/tests/st/networks/test_cpu_lenet.py b/tests/st/networks/test_cpu_lenet.py index a3105721d3..9fd50f5d9b 100644 --- a/tests/st/networks/test_cpu_lenet.py +++ b/tests/st/networks/test_cpu_lenet.py @@ -21,6 +21,7 @@ import mindspore.nn as nn from mindspore.ops import operations as P from mindspore import Tensor + class LeNet(nn.Cell): def __init__(self): super(LeNet, self).__init__() @@ -50,8 +51,10 @@ class LeNet(nn.Cell): output = self.fc3(output) return output + context.set_context(mode=context.GRAPH_MODE, device_target="CPU") + def train(net, data, label): learning_rate = 0.01 momentum = 0.9 @@ -67,11 +70,12 @@ def train(net, data, label): print("+++++++++++++++++++++++++++") assert res + @pytest.mark.level0 @pytest.mark.platform_x86_cpu @pytest.mark.env_onecard def test_lenet(): - data = Tensor(np.ones([32, 1 ,32, 32]).astype(np.float32) * 0.01) + data = Tensor(np.ones([32, 1, 32, 32]).astype(np.float32) * 0.01) label = Tensor(np.ones([32]).astype(np.int32)) net = LeNet() train(net, data, label) diff --git a/tests/st/networks/test_gpu_alexnet.py b/tests/st/networks/test_gpu_alexnet.py index 3b193e17d6..9f92fc630e 100644 --- a/tests/st/networks/test_gpu_alexnet.py +++ b/tests/st/networks/test_gpu_alexnet.py @@ -38,7 +38,7 @@ class AlexNet(nn.Cell): self.conv4 = nn.Conv2d(384, 384, 3, stride=1, pad_mode="same") self.conv5 = nn.Conv2d(384, 256, 3, stride=1, pad_mode="same") self.relu = nn.ReLU() - self.max_pool2d = nn.MaxPool2d(kernel_size=3, stride=2,pad_mode="valid",padding=0) + self.max_pool2d = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="valid") self.flatten = nn.Flatten() self.fc1 = nn.Dense(6*6*256, 4096) self.fc2 = nn.Dense(4096, 4096) diff --git a/tests/st/ops/davinci/test_maxpool_with_argmax.py b/tests/st/ops/davinci/test_maxpool_with_argmax.py index c9312d666c..a6c875a9e8 100644 --- a/tests/st/ops/davinci/test_maxpool_with_argmax.py +++ b/tests/st/ops/davinci/test_maxpool_with_argmax.py @@ -20,26 +20,29 @@ import numpy as np import mindspore.context as context from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter + context.set_context(device_target="Ascend") + + class Net(nn.Cell): def __init__(self): super(Net, self).__init__() - self.maxpool = P.MaxPoolWithArgmax(pad_mode="same", - window=3, - stride=2) + self.maxpool = P.MaxPoolWithArgmax(padding="same", + ksize=3, + strides=2) self.x = Parameter(initializer( - 'normal', [1, 64, 112, 112]), name='w') + 'normal', [1, 64, 112, 112]), name='w') self.add = P.TensorAdd() - @ms_function def construct(self): output = self.maxpool(self.x) return output[0] + def test_net(): - x = np.random.randn(1,64,112,112).astype(np.float32) + x = np.random.randn(1, 64, 112, 112).astype(np.float32) maxpool = Net() output = maxpool() print("***********output output*********") diff --git a/tests/st/ops/davinci/test_maxpool_with_argmax_grad.py b/tests/st/ops/davinci/test_maxpool_with_argmax_grad.py index d97e2a06f8..3bbc835c1b 100644 --- a/tests/st/ops/davinci/test_maxpool_with_argmax_grad.py +++ b/tests/st/ops/davinci/test_maxpool_with_argmax_grad.py @@ -37,9 +37,9 @@ class Net(nn.Cell): def __init__(self): super(Net, self).__init__() - self.maxpool = P.MaxPoolWithArgmax(pad_mode="same", - window=3, - stride=2) + self.maxpool = P.MaxPoolWithArgmax(padding="same", + ksize=3, + strides=2) @ms_function def construct(self, x): diff --git a/tests/st/tbe_networks/resnet.py b/tests/st/tbe_networks/resnet.py index a1ece6556e..2024286b8f 100644 --- a/tests/st/tbe_networks/resnet.py +++ b/tests/st/tbe_networks/resnet.py @@ -267,7 +267,7 @@ class ResNet(nn.Cell): self.bn1 = bn_with_initialize(64) self.relu = P.ReLU() - self.maxpool = P.MaxPoolWithArgmax(window=3, stride=2, pad_mode="same") + self.maxpool = P.MaxPoolWithArgmax(ksize=3, strides=2, padding="SAME") self.layer1 = MakeLayer0(block, layer_num[0], in_channels=64, out_channels=256, stride=1) self.layer2 = MakeLayer1(block, layer_num[1], in_channels=256, out_channels=512, stride=2) diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/hw_opt_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/hw_opt_test.py index 0afffc99df..2877bc7c7a 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/hw_opt_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/hw_opt_test.py @@ -21,7 +21,7 @@ addn = P.AddN() add = P.TensorAdd() sub = P.Sub() mul = P.Mul() -max_pool = P.MaxPoolWithArgmax(pad_mode="same", window=3, stride=2) +max_pool = P.MaxPoolWithArgmax(padding="same", ksize=3, strides=2) make_tuple = Primitive('make_tuple') four2five = Primitive('Four2Five') five2four = Primitive('Five2Four') diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/insert_trans_op_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/insert_trans_op_test.py index 57bd2000c4..a24501e8b1 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/insert_trans_op_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/insert_trans_op_test.py @@ -17,7 +17,7 @@ from mindspore.ops import Primitive tuple_getitem = Primitive('tuple_getitem') add = P.TensorAdd() -max_pool = P.MaxPoolWithArgmax(pad_mode="same", window=3, stride=2) +max_pool = P.MaxPoolWithArgmax(padding="same", ksize=3, strides=2) make_tuple = Primitive('make_tuple') transdata = Primitive("TransData") diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/mixed_precision_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/mixed_precision_test.py index 8cbad52db1..7d3985376b 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/mixed_precision_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/mixed_precision_test.py @@ -21,7 +21,7 @@ addn = P.AddN() add = P.TensorAdd() sub = P.Sub() mul = P.Mul() -max_pool = P.MaxPoolWithArgmax(pad_mode="same", window=3, stride=2) +max_pool = P.MaxPoolWithArgmax(padding="same", ksize=3, strides=2) make_tuple = Primitive('make_tuple') cast = Primitive('Cast') diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/transdata_split_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/transdata_split_test.py index 8cd18d1ac3..e353cf8fbe 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/transdata_split_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/transdata_split_test.py @@ -17,7 +17,7 @@ from mindspore.ops import Primitive tuple_getitem = Primitive('tuple_getitem') add = P.TensorAdd() -max_pool = P.MaxPoolWithArgmax(pad_mode="same", window=3, stride=2) +max_pool = P.MaxPoolWithArgmax(padding="same", ksize=3, strides=2) make_tuple = Primitive('make_tuple') four2five = Primitive('Four2Five') five2four = Primitive('Five2Four') diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/transpose_transdata_fusion_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/transpose_transdata_fusion_test.py index ea3def743d..c4fc50e0da 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/transpose_transdata_fusion_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/transpose_transdata_fusion_test.py @@ -17,7 +17,7 @@ from mindspore.ops import Primitive tuple_getitem = Primitive('tuple_getitem') add = P.TensorAdd() -max_pool = P.MaxPoolWithArgmax(pad_mode="same", window=3, stride=2) +max_pool = P.MaxPoolWithArgmax(padding="same", ksize=3, strides=2) make_tuple = Primitive('make_tuple') transdata = Primitive("TransData") Transpose = P.Transpose() diff --git a/tests/ut/cpp/python_input/gtest_input/session/session_test.py b/tests/ut/cpp/python_input/gtest_input/session/session_test.py index ed074fc8d6..ee034a1ae0 100644 --- a/tests/ut/cpp/python_input/gtest_input/session/session_test.py +++ b/tests/ut/cpp/python_input/gtest_input/session/session_test.py @@ -22,7 +22,7 @@ add = P.TensorAdd() reshape = P.Reshape() cast = P.Cast() tuple_getitem = Primitive('tuple_getitem') -max_pool = P.MaxPoolWithArgmax(pad_mode="same", window=3, stride=2) +max_pool = P.MaxPoolWithArgmax(padding="same", ksize=3, strides=2) def test_addn_cast(x, y, z): sum = addn((x, y)) diff --git a/tests/ut/python/communication/test_data_parallel_resnet.py b/tests/ut/python/communication/test_data_parallel_resnet.py index 037152a0b7..220e553b4f 100644 --- a/tests/ut/python/communication/test_data_parallel_resnet.py +++ b/tests/ut/python/communication/test_data_parallel_resnet.py @@ -107,7 +107,7 @@ class ResNet18(nn.Cell): self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, pad_mode='pad') self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU() - self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, pad_mode='pad') + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode='same') self.layer1 = self.MakeLayer( block, 2, in_channels=64, out_channels=256, stride=1) @@ -176,7 +176,7 @@ class ResNet9(nn.Cell): self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, pad_mode='pad') self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU() - self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, pad_mode='same') + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode='same') self.layer1 = self.MakeLayer( block, 1, in_channels=64, out_channels=256, stride=1) diff --git a/tests/ut/python/exec/resnet_example.py b/tests/ut/python/exec/resnet_example.py index bfbb64f732..913e90a0bb 100644 --- a/tests/ut/python/exec/resnet_example.py +++ b/tests/ut/python/exec/resnet_example.py @@ -189,7 +189,7 @@ class ResNet50(nn.Cell): self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, weight_init=weight_conv) self.bn1 = bn_with_initialize(64) self.relu = nn.ReLU() - self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2) self.layer1 = MakeLayer3( block, in_channels=64, out_channels=256, stride=1) diff --git a/tests/ut/python/exec/test_pooling.py b/tests/ut/python/exec/test_pooling.py index 9c378c15c2..0e526ff8d6 100644 --- a/tests/ut/python/exec/test_pooling.py +++ b/tests/ut/python/exec/test_pooling.py @@ -23,12 +23,10 @@ class MaxNet(nn.Cell): """MaxNet definition""" def __init__(self, kernel_size, - stride=None, - padding=0): + stride=None): super(MaxNet, self).__init__() self.maxpool = nn.MaxPool2d(kernel_size, - stride, - padding=padding) + stride) def construct(self, input_x): return self.maxpool(input_x) diff --git a/tests/ut/python/model/res18_example.py b/tests/ut/python/model/res18_example.py index eaf8bbc387..8875333465 100644 --- a/tests/ut/python/model/res18_example.py +++ b/tests/ut/python/model/res18_example.py @@ -106,7 +106,7 @@ class ResNet18(nn.Cell): self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, pad_mode='pad') self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU() - self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, pad_mode='pad') + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode='same') self.layer1 = self.MakeLayer( block, 2, in_channels=64, out_channels=256, stride=1) @@ -175,7 +175,7 @@ class ResNet9(nn.Cell): self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU() - self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2) self.layer1 = self.MakeLayer( block, 1, in_channels=64, out_channels=256, stride=1) diff --git a/tests/ut/python/nn/test_cell.py b/tests/ut/python/nn/test_cell.py index 882756f3d2..c583b27c1d 100644 --- a/tests/ut/python/nn/test_cell.py +++ b/tests/ut/python/nn/test_cell.py @@ -87,7 +87,7 @@ class ConvNet(nn.Cell): self.conv1 = nn.Conv2d(3, ConvNet.output_ch, kernel_size=7, stride=2, pad_mode="pad", padding=3) self.bn1 = nn.BatchNorm2d(ConvNet.output_ch) self.relu = nn.ReLU() - self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="pad", padding=1) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="same") self.flatten = nn.Flatten() self.fc = nn.Dense( int(ConvNet.image_h*ConvNet.image_w*ConvNet.output_ch/(4*4)), diff --git a/tests/ut/python/nn/test_pooling.py b/tests/ut/python/nn/test_pooling.py index 694d202d13..10bb7632b2 100644 --- a/tests/ut/python/nn/test_pooling.py +++ b/tests/ut/python/nn/test_pooling.py @@ -46,8 +46,7 @@ class MaxNet(nn.Cell): padding=0): super(MaxNet, self).__init__() self.maxpool = nn.MaxPool2d(kernel_size, - stride, - padding=padding) + stride) def construct(self, x): return self.maxpool(x) diff --git a/tests/ut/python/ops/test_nn_ops.py b/tests/ut/python/ops/test_nn_ops.py index 5b9f37864c..cadac6dfb4 100644 --- a/tests/ut/python/ops/test_nn_ops.py +++ b/tests/ut/python/ops/test_nn_ops.py @@ -108,6 +108,7 @@ class ResidualBlock(nn.Cell): class VirtualLossGrad(PrimitiveWithInfer): """ VirtualLossGrad definition """ + @prim_attr_register def __init__(self): """init VirtualLossGrad""" @@ -124,6 +125,7 @@ class VirtualLossGrad(PrimitiveWithInfer): class VirtualLoss(PrimitiveWithInfer): """ VirtualLoss definition """ + @prim_attr_register def __init__(self): """init VirtualLoss""" @@ -138,6 +140,7 @@ class VirtualLoss(PrimitiveWithInfer): # pylint: disable=unused-argument dx = loss_grad(x, out, dout) return (dx,) + return bprop def infer_shape(self, x_shape): @@ -149,6 +152,7 @@ class VirtualLoss(PrimitiveWithInfer): class VirtualNetWithLoss(nn.Cell): """ VirtualNetWithLoss definition """ + def __init__(self, network): super(VirtualNetWithLoss, self).__init__() self.loss = VirtualLoss() @@ -161,6 +165,7 @@ class VirtualNetWithLoss(nn.Cell): class SoftMaxGrad(nn.Cell): """ SoftMaxGrad definition """ + def __init__(self, network): super(SoftMaxGrad, self).__init__() self.network = network @@ -171,6 +176,7 @@ class SoftMaxGrad(nn.Cell): class DropoutGrad(nn.Cell): """ DropoutGrad definition """ + def __init__(self, network): super(DropoutGrad, self).__init__() self.network = network @@ -181,6 +187,7 @@ class DropoutGrad(nn.Cell): class ScalarSummaryNet(nn.Cell): """ ScalarSummaryNet definition """ + def __init__(self): super(ScalarSummaryNet, self).__init__() self.summary = P.ScalarSummary() @@ -193,6 +200,7 @@ class ScalarSummaryNet(nn.Cell): class FusedBatchNormGrad(nn.Cell): """ FusedBatchNormGrad definition """ + def __init__(self, network): super(FusedBatchNormGrad, self).__init__() self.grad = C.GradOperation(name="get_all", get_all=True, sens_param=True) @@ -204,6 +212,7 @@ class FusedBatchNormGrad(nn.Cell): class NetWithLoss(nn.Cell): """ NetWithLoss definition """ + def __init__(self, network): super(NetWithLoss, self).__init__() self.loss = P.SmoothL1Loss() @@ -216,6 +225,7 @@ class NetWithLoss(nn.Cell): class Grad(nn.Cell): """ GradWrap definition """ + def __init__(self, network): super(Grad, self).__init__() self.network = network @@ -227,6 +237,7 @@ class Grad(nn.Cell): class BatchnormNet(nn.Cell): """ BatchnormNet definition """ + def __init__(self): super(BatchnormNet, self).__init__() self.conv1 = nn.Conv2d(3, 4, kernel_size=8, stride=2, pad_mode="pad", padding=3) @@ -247,6 +258,7 @@ class BatchnormNet(nn.Cell): class NetWithLossClass(nn.Cell): """ NetWithLossClass definition """ + def __init__(self, network): super(NetWithLossClass, self).__init__(auto_prefix=False) self.loss = nn.SoftmaxCrossEntropyWithLogits() @@ -259,12 +271,13 @@ class NetWithLossClass(nn.Cell): class BlockNet(nn.Cell): """ BlockNet definition """ + def __init__(self): super(BlockNet, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, pad_mode="pad", padding=3) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU() - self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=0) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2) self.block_down_sample = ResidualBlock( 64, 256, stride=1, down_sample=True ) @@ -281,6 +294,7 @@ class BlockNet(nn.Cell): class Conv2dWithBiasNet(nn.Cell): """ Conv2dWithBiasNet definition """ + def __init__(self): super(Conv2dWithBiasNet, self).__init__() self.conv = nn.Conv2d(3, 10, 1, bias_init='zeros') @@ -292,6 +306,7 @@ class Conv2dWithBiasNet(nn.Cell): class Conv2dNativeNet(nn.Cell): """ Conv2dNativeNet definition """ + def __init__(self): super(Conv2dNativeNet, self).__init__() self.conv = P.DepthwiseConv2dNative(channel_multiplier=3, kernel_size=(3, 3)) @@ -309,9 +324,10 @@ class Conv2dNativeNet(nn.Cell): class MakeRefKeyNet(nn.Cell): """ MakeRefKeyNet definition """ + def __init__(self): super(MakeRefKeyNet, self).__init__() - self.y= Parameter(Tensor([1.0], mindspore.float32), name="y") + self.y = Parameter(Tensor([1.0], mindspore.float32), name="y") def construct(self, x): key = P.MakeRefKey("y")() @@ -321,6 +337,7 @@ class MakeRefKeyNet(nn.Cell): class StateNet(nn.Cell): """ StateTestTensor definition """ + def __init__(self): super(StateNet, self).__init__() weight = Tensor(np.ones([2, 1, 2, 2], np.float32)) @@ -347,6 +364,24 @@ class ComparisonNet(nn.Cell): return ret +def test_max_pool_with_arg_max(): + class NetMaxPoolWithArgMax(nn.Cell): + def __init__(self): + """ ComparisonNet definition """ + super(NetMaxPoolWithArgMax, self).__init__() + self.max_pool_with_arg_max = P.MaxPoolWithArgmax(padding="valid", ksize=2, strides=1) + + def construct(self, x): + ret = self.max_pool_with_arg_max(x) + return ret + + x = Tensor(np.ones([1, 1, 3, 3], np.float32)) + net = NetMaxPoolWithArgMax() + context.set_context(mode=context.GRAPH_MODE, save_graphs=True) + ret = net(x) + print(ret) + + test_cases = [ ('SoftMaxGrad', { 'block': SoftMaxGrad(VirtualNetWithLoss(P.Softmax())), @@ -382,7 +417,7 @@ test_cases = [ 'desc_inputs': [Tensor(np.ones([1, 3, 8, 8], np.float32)), Tensor(np.zeros([1, 64, 4, 4], np.float32))], }), ('Conv2dWithBiasGrad', { - 'block': Grad(NetWithLossClass(Conv2dWithBiasNet())), + 'block': Grad(NetWithLossClass(Conv2dWithBiasNet())), 'desc_inputs': [Tensor(np.ones([1, 3, 16, 16], np.float32)), Tensor(np.zeros([1, 2560], np.float32))], }), ('Conv2dNativeGrad', { @@ -407,114 +442,93 @@ test_cases = [ }), ] - test_cases_for_verify_exception = [ ('Conv2d_ValueError_1', { - 'block': (lambda _ : P.Conv2D(3, 4, mode=-2.0), {'exception': ValueError}), + 'block': (lambda _: P.Conv2D(3, 4, mode=-2.0), {'exception': ValueError}), 'desc_inputs': [0], }), ('Conv2d_ValueError_2', { - 'block': (lambda _ : P.Conv2D(3, 4, mode=-2), {'exception': ValueError}), + 'block': (lambda _: P.Conv2D(3, 4, mode=-2), {'exception': ValueError}), 'desc_inputs': [0], }), ('MaxPoolWithArgmax_ValueError_1', { - 'block': (lambda _ : P.MaxPoolWithArgmax(pad_mode='sane'), {'exception': ValueError}), + 'block': (lambda _: P.MaxPoolWithArgmax(padding='sane'), {'exception': ValueError}), 'desc_inputs': [0], }), ('MaxPoolWithArgmax_ValueError_2', { - 'block': (lambda _ : P.MaxPoolWithArgmax(data_mode=2), {'exception': ValueError}), + 'block': (lambda _: P.MaxPoolWithArgmax(ksize='1'), {'exception': ValueError}), 'desc_inputs': [0], }), ('MaxPoolWithArgmax_ValueError_3', { - 'block': (lambda _ : P.MaxPoolWithArgmax(ceil_mode=2), {'exception': ValueError}), + 'block': (lambda _: P.MaxPoolWithArgmax(ksize=-2), {'exception': ValueError}), 'desc_inputs': [0], }), ('MaxPoolWithArgmax_ValueError_4', { - 'block': (lambda _ : P.MaxPoolWithArgmax(pad_mode="pad", pad=-1), {'exception': ValueError}), - 'desc_inputs': [0], - }), - ('MaxPoolWithArgmax_ValueError_5', { - 'block': (lambda _ : P.MaxPoolWithArgmax(pad_mode="pad", pad='1'), {'exception': ValueError}), - 'desc_inputs': [0], - }), - ('MaxPoolWithArgmax_ValueError_6', { - 'block': (lambda _ : P.MaxPoolWithArgmax(window='1'), {'exception': ValueError}), - 'desc_inputs': [0], - }), - ('MaxPoolWithArgmax_ValueError_7', { - 'block': (lambda _ : P.MaxPoolWithArgmax(window=-2), {'exception': ValueError}), - 'desc_inputs': [0], - }), - ('MaxPoolWithArgmax_ValueError_8', { - 'block': (lambda _ : P.MaxPoolWithArgmax(stride=-1), {'exception': ValueError}), - 'desc_inputs': [0], - }), - ('MaxPoolWithArgmax_ValueError_9', { - 'block': (lambda _ : P.MaxPoolWithArgmax(alpha='1'), {'exception': ValueError}), + 'block': (lambda _: P.MaxPoolWithArgmax(strides=-1), {'exception': ValueError}), 'desc_inputs': [0], }), ('FusedBatchNorm_ValueError_1', { - 'block': (lambda _ : P.FusedBatchNorm(mode="1", epsilon=1e-5, momentum=0.1), {'exception': ValueError}), + 'block': (lambda _: P.FusedBatchNorm(mode="1", epsilon=1e-5, momentum=0.1), {'exception': ValueError}), 'desc_inputs': [0], }), ('FusedBatchNorm_ValueError_2', { - 'block': (lambda _ : P.FusedBatchNorm(mode=2, epsilon=1e-5, momentum=0.1), {'exception': ValueError}), + 'block': (lambda _: P.FusedBatchNorm(mode=2, epsilon=1e-5, momentum=0.1), {'exception': ValueError}), 'desc_inputs': [0], }), ('FusedBatchNorm_ValueError_3', { - 'block': (lambda _ : P.FusedBatchNorm(mode=0, epsilon=-1e-5, momentum=0.1), {'exception': ValueError}), + 'block': (lambda _: P.FusedBatchNorm(mode=0, epsilon=-1e-5, momentum=0.1), {'exception': ValueError}), 'desc_inputs': [0], }), ('FusedBatchNorm_ValueError_4', { - 'block': (lambda _ : P.FusedBatchNorm(mode=0, epsilon=1e-5, momentum=-0.1), {'exception': ValueError}), + 'block': (lambda _: P.FusedBatchNorm(mode=0, epsilon=1e-5, momentum=-0.1), {'exception': ValueError}), 'desc_inputs': [0], }), ('FusedBatchNorm_ValueError_5', { - 'block': (lambda _ : P.FusedBatchNorm(mode=1, epsilon=-0.001, momentum=0.0), {'exception': ValueError}), + 'block': (lambda _: P.FusedBatchNorm(mode=1, epsilon=-0.001, momentum=0.0), {'exception': ValueError}), 'desc_inputs': [0], }), ('Softmax_ValueError_1', { - 'block': (lambda _ : P.Softmax("1"), {'exception': ValueError}), + 'block': (lambda _: P.Softmax("1"), {'exception': ValueError}), 'desc_inputs': [0], }), ('Softmax_ValueError_2', { - 'block': (lambda _ : P.Softmax(1.1), {'exception': ValueError}), + 'block': (lambda _: P.Softmax(1.1), {'exception': ValueError}), 'desc_inputs': [0], }), ('Softmax_ValueError_3', { - 'block': (lambda _ : P.Softmax(axis="1"), {'exception': ValueError}), + 'block': (lambda _: P.Softmax(axis="1"), {'exception': ValueError}), 'desc_inputs': [0], }), ('DropoutGenMask_ValueError_1', { - 'block': (lambda _ : P.DropoutGenMask(Seed0="seed0"), {'exception': ValueError}), + 'block': (lambda _: P.DropoutGenMask(Seed0="seed0"), {'exception': ValueError}), 'desc_inputs': [0], }), ('DropoutGenMask_ValueError_2', { - 'block': (lambda _ : P.DropoutGenMask(Seed0=1.0), {'exception': ValueError}), + 'block': (lambda _: P.DropoutGenMask(Seed0=1.0), {'exception': ValueError}), 'desc_inputs': [0], }), ('DropoutGenMask_ValueError_3', { - 'block': (lambda _ : P.DropoutGenMask(Seed1="seed1"), {'exception': ValueError}), + 'block': (lambda _: P.DropoutGenMask(Seed1="seed1"), {'exception': ValueError}), 'desc_inputs': [0], }), ('DropoutGenMask_ValueError_4', { - 'block': (lambda _ : P.DropoutGenMask(Seed1=2.0), {'exception': ValueError}), + 'block': (lambda _: P.DropoutGenMask(Seed1=2.0), {'exception': ValueError}), 'desc_inputs': [0], }), ('MaxPool2d_ValueError_1', { - 'block': (nn.MaxPool2d(kernel_size=120, stride=1, pad_mode="valid", padding=0), {'exception': ValueError}), + 'block': (nn.MaxPool2d(kernel_size=120, stride=1, pad_mode="valid"), {'exception': ValueError}), 'desc_inputs': [Tensor(np.random.randn(32, 3, 112, 112).astype(np.float32).transpose(0, 3, 1, 2))], }), ('MaxPool2d_ValueError_2', { 'block': ( - lambda _ : nn.MaxPool2d(kernel_size=120, stride=True, pad_mode="valid", padding=0), + lambda _: nn.MaxPool2d(kernel_size=120, stride=True, pad_mode="valid"), {'exception': ValueError}, ), 'desc_inputs': [Tensor(np.random.randn(32, 3, 112, 112).astype(np.float32).transpose(0, 3, 1, 2))], }), ('MaxPool2d_ValueError_3', { 'block': ( - lambda _ : nn.MaxPool2d(kernel_size=3, stride=True, pad_mode="valid", padding=0), + lambda _: nn.MaxPool2d(kernel_size=3, stride=True, pad_mode="valid"), {'exception': ValueError}, ), 'desc_inputs': [Tensor(np.random.randn(32, 3, 112, 112).astype(np.float32).transpose(0, 3, 1, 2))], @@ -532,4 +546,3 @@ def test_compile(): @mindspore_test(pipeline_for_verify_exception_for_case_by_case_config) def test_check_exception(): return test_cases_for_verify_exception - diff --git a/tests/ut/python/ops/test_ops.py b/tests/ut/python/ops/test_ops.py index 0f5b716e39..092d6e32be 100755 --- a/tests/ut/python/ops/test_ops.py +++ b/tests/ut/python/ops/test_ops.py @@ -571,7 +571,7 @@ test_case_nn_ops = [ 'desc_bprop': [[3, 4, 6, 6]], 'skip': ['backward']}), ('MaxPoolWithArgmax', { - 'block': P.MaxPoolWithArgmax(window=2, stride=2), + 'block': P.MaxPoolWithArgmax(ksize=2, strides=2), 'desc_inputs': [[128, 32, 32, 64]], 'desc_bprop': [[128, 32, 8, 16], [128, 32, 8, 16]]}), ('SoftmaxCrossEntropyWithLogits', { diff --git a/tests/ut/python/ops/test_ops_check.py b/tests/ut/python/ops/test_ops_check.py index a7e1b41c4a..aa379cc64e 100644 --- a/tests/ut/python/ops/test_ops_check.py +++ b/tests/ut/python/ops/test_ops_check.py @@ -160,16 +160,16 @@ test_case_check_ops = [ 'block': nn.Dense(1, 6, has_bias=False, bias_init=Tensor(np.ones([6]).astype(np.float32))), 'desc_inputs': [Tensor(np.ones(shape=[6, 1]).astype(np.float32))]}), ('MaxPool2d_1', { - 'block': nn.MaxPool2d(5, pad_mode='same', padding=0), + 'block': nn.MaxPool2d(5, pad_mode='same'), 'desc_inputs': [Tensor(np.ones(shape=[5, 5, 8, 8]).astype(np.float32))]}), ('MaxPool2d_2', { - 'block': nn.MaxPool2d(5, pad_mode='valid', padding=0), + 'block': nn.MaxPool2d(5, pad_mode='valid'), 'desc_inputs': [Tensor(np.ones(shape=[5, 5, 8, 8]).astype(np.float32))]}), ('AvgPool2d_1', { - 'block': nn.AvgPool2d(5, pad_mode='same', padding=0), + 'block': nn.AvgPool2d(5, pad_mode='same'), 'desc_inputs': [Tensor(np.ones(shape=[5, 5, 8, 8]).astype(np.float32))]}), ('AvgPool2d_2', { - 'block': nn.AvgPool2d(5, pad_mode='valid', padding=0), + 'block': nn.AvgPool2d(5, pad_mode='valid'), 'desc_inputs': [Tensor(np.ones(shape=[5, 5, 8, 8]).astype(np.float32))]}), ('Conv2D_1', { 'block': P.Conv2D(1, 6, pad_mode='same', pad=0), diff --git a/tests/ut/python/pynative_mode/ge/ops/test_pooling.py b/tests/ut/python/pynative_mode/ge/ops/test_pooling.py index e6cf88a9ca..d5b90b6edd 100644 --- a/tests/ut/python/pynative_mode/ge/ops/test_pooling.py +++ b/tests/ut/python/pynative_mode/ge/ops/test_pooling.py @@ -42,12 +42,10 @@ def test_maxpool2d(): """ test_maxpool2d """ kernel_size = 3 stride = 3 - padding = 0 - max_pool = nn.MaxPool2d(kernel_size, stride, padding=padding) + max_pool = nn.MaxPool2d(kernel_size, stride) assert max_pool.kernel_size == 3 assert max_pool.stride == 3 - assert max_pool.padding == 0 input_data = Tensor(np.random.randint(0, 255, [1, 3, 6, 6]).astype(np.float32)) output = max_pool(input_data) output_np = output.asnumpy() diff --git a/tests/ut/python/pynative_mode/nn/test_cell.py b/tests/ut/python/pynative_mode/nn/test_cell.py index 16adcd6119..2d5196b80d 100644 --- a/tests/ut/python/pynative_mode/nn/test_cell.py +++ b/tests/ut/python/pynative_mode/nn/test_cell.py @@ -89,7 +89,7 @@ class ConvNet(nn.Cell): self.conv1 = nn.Conv2d(3, ConvNet.output_ch, kernel_size=7, stride=2, pad_mode='pad', padding=3) self.bn1 = nn.BatchNorm2d(ConvNet.output_ch) self.relu = nn.ReLU() - self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode='pad', padding=1) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="same") self.flatten = nn.Flatten() self.fc = nn.Dense( int(ConvNet.image_h*ConvNet.image_w*ConvNet.output_ch/(4*4)), diff --git a/tests/ut/python/pynative_mode/nn/test_pooling.py b/tests/ut/python/pynative_mode/nn/test_pooling.py index ab95fec091..bb1822f8a8 100644 --- a/tests/ut/python/pynative_mode/nn/test_pooling.py +++ b/tests/ut/python/pynative_mode/nn/test_pooling.py @@ -49,23 +49,14 @@ def test_maxpool2d(): """ test_maxpool2d """ kernel_size = 3 stride = 3 - padding = 2 - max_pool = nn.MaxPool2d(kernel_size, stride, pad_mode='SAME', padding=padding) + max_pool = nn.MaxPool2d(kernel_size, stride, pad_mode='SAME') assert max_pool.kernel_size == 3 assert max_pool.stride == 3 - assert max_pool.padding == 2 input_data = Tensor(np.random.randint(0, 255, [1, 3, 6, 6])*0.1) output = max_pool(input_data) output_np = output.asnumpy() assert isinstance(output_np[0][0][0][0], (np.float32, np.float64)) -def test_maxpool2d_error_padding(): - """ test_maxpool2d_error_padding """ - kernel_size = 3.5 - stride = 3 - padding = 1 - with pytest.raises(ValueError): - nn.MaxPool2d(kernel_size, stride, padding=padding) diff --git a/tests/ut/python/pynative_mode/vm/test_vm.py b/tests/ut/python/pynative_mode/vm/test_vm.py index 4ea0abd753..77510337b0 100644 --- a/tests/ut/python/pynative_mode/vm/test_vm.py +++ b/tests/ut/python/pynative_mode/vm/test_vm.py @@ -23,7 +23,7 @@ def test_avg_pooling(): [-9., -1., 3., 4.], [1., -1., -3., -6.], [-2., -1., -2., -15.]]]]).astype(np.float32) - out = vm.avg_pooling(input_data, pool_h=2, pool_w=2, stride=1, pad=0) + out = vm.avg_pooling(input_data, pool_h=2, pool_w=2, stride=1) expect_out = [[[[-4.25, 0.0, 4.25], [-2.5, -0.5, -0.5], [-0.75, -1.75, -6.5]]]] @@ -37,9 +37,9 @@ def test_avg_pool_grad(): [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]]]]).astype(np.float32) - dout = vm.avg_pooling(input_data, pool_h=2, pool_w=2, stride=1, pad=0) + dout = vm.avg_pooling(input_data, pool_h=2, pool_w=2, stride=1) print("vm.avg_pooling dout: ", dout) - out = vm.avg_pool_grad(dout, input_data.shape, 2, 2, 1, 0) + out = vm.avg_pool_grad(dout, input_data.shape, 2, 2, 1) print("vm.avg_pool_grad: ", out) assert True @@ -202,7 +202,7 @@ def test_max_pooling(): [-9., -1., 3., 4.], [1., -1., -3., -6.], [-2., -1., -2., -15.]]]]).astype(np.float32) - out = vm.max_pooling(input_data, pool_h=2, pool_w=2, stride=1, pad=0) + out = vm.max_pooling(input_data, pool_h=2, pool_w=2, stride=1) expect_out = [[[[-1., 3., 9.], [1., 3., 4.], [1., -1., -2.]]]] diff --git a/tests/ut/python/utils/test_serialize.py b/tests/ut/python/utils/test_serialize.py index 41da45ab25..cc6f346b77 100644 --- a/tests/ut/python/utils/test_serialize.py +++ b/tests/ut/python/utils/test_serialize.py @@ -44,7 +44,7 @@ class Net(nn.Cell): self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=0, weight_init="zeros") self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU() - self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=0) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2) self.flatten = nn.Flatten() self.fc = nn.Dense(int(224*224*64/16), num_classes) diff --git a/tests/vm_impl/nn_ops_vm_impl.py b/tests/vm_impl/nn_ops_vm_impl.py index f6bbdca55a..fc1fa95024 100644 --- a/tests/vm_impl/nn_ops_vm_impl.py +++ b/tests/vm_impl/nn_ops_vm_impl.py @@ -19,66 +19,82 @@ from mindspore.ops.operations import _grad_ops as G from mindspore.common.tensor import Tensor from mindspore.ops.vm_impl_registry import vm_impl_registry as vm_impl_getters from .vm_interface import vm + + # pylint: disable=unused-argument @vm_impl_getters.register(P.ScalarSummary) def vm_impl_scalar_summary(self): """Generate vm_impl function for ScalarSummary""" + def vm_impl(string_in, scalar): """Implement by vm mode.""" return scalar + return vm_impl @vm_impl_getters.register(P.ReLU) def vm_impl_relu(self): """Generate vm_impl function for ReLU""" + def vm_impl(x): x = x.asnumpy() output = Tensor(vm.relu(x)) return output + return vm_impl + @vm_impl_getters.register(P.Flatten) def vm_impl_flatten(self): """Generate vm_impl function for Flatten""" + def vm_impl(x): x = x.asnumpy() return Tensor(vm.flatten_batch(x)) + return vm_impl @vm_impl_getters.register(P.Softmax) def vm_impl_softmax(self): """Generate vm_impl function for Softmax""" + def vm_impl(x): x = x.asnumpy() return Tensor(vm.softmax(x)) + return vm_impl @vm_impl_getters.register(P.LogSoftmax) def vm_impl_log_softmax(self): """Generate vm_impl function for LogSoftmax""" + def vm_impl(x): x = x.asnumpy() return Tensor(vm.logsoftmax(x)) + return vm_impl @vm_impl_getters.register(P.Tanh) def vm_impl_tanh(self): """Generate vm_impl function for Tanh""" + def vm_impl(x): x = x.asnumpy() return Tensor(vm.tanh(x)) + return vm_impl @vm_impl_getters.register(P.FusedBatchNorm) def vm_impl_fused_batch_norm(self): """Generate vm_impl function for FusedBatchNorm""" + def vm_impl(x, scale, b, mean, variance): # pylint: disable=unused-argument x = x.asnumpy() @@ -92,12 +108,14 @@ def vm_impl_fused_batch_norm(self): momentum=self.momentum) return Tensor(out), Tensor(x_mean), Tensor(x_var), \ Tensor(running_mean), Tensor(running_var) + return vm_impl @vm_impl_getters.register(P.BatchNorm) def vm_impl_batch_norm(self): """Generate vm_impl function for BatchNorm""" + def vm_impl(x, scale, b, mean, variance): # pylint: disable=unused-argument x = x.asnumpy() @@ -110,83 +128,106 @@ def vm_impl_batch_norm(self): eps=self.epsilon) return Tensor(out), Tensor(x_mean), Tensor(x_var), \ Tensor(running_mean), Tensor(running_var) + return vm_impl @vm_impl_getters.register(P.Conv2D) def vm_impl_conv2d(self): """Generate vm_impl function for Conv2D""" + def vm_impl(x, w): x = x.asnumpy() weight = w.asnumpy() bias = None out = vm.conv2d(x, weight, bias, self.stride, self.pad, self.dilation) return Tensor(out) + return vm_impl @vm_impl_getters.register(G.MaxPoolGradWithArgmax) def vm_impl_max_pool_grad_with_argmax(self): """Generate vm_impl function for MaxPoolGradWithArgmax""" - def vm_impl(x, argmax, dout): + + def vm_impl(x, dout, argmax): + print("buxue") + print(argmax) x = x.asnumpy() dout = dout.asnumpy() arg_max = argmax.asnumpy() - dx = vm.max_pool_grad_with_argmax(x, arg_max, dout, self.pool_h, self.pool_w, self.stride, self.pad) + dx = vm.max_pool_grad_with_argmax(x, dout, arg_max, + self.ksize[1], self.ksize[2], self.strides[1]) return Tensor(dx) + return vm_impl @vm_impl_getters.register(P.MaxPoolWithArgmax) def vm_impl_max_pool_with_argmax(self): """Generate vm_impl function for MaxPoolWithArgmax""" + def vm_impl(x): x = x.asnumpy() - out, out_argmax = vm.max_pool_with_argmax(x, self.pool_h, self.pool_w, self.stride, self.pad) + out, out_argmax = vm.max_pool_with_argmax(x, self.ksize[1], self.ksize[2], self.strides[1]) return Tensor(out), Tensor(out_argmax) + return vm_impl + @vm_impl_getters.register(P.MaxPool) def vm_impl_max_pool(self): """Generate vm_impl function for MaxPool""" + def vm_impl(x): x = x.asnumpy() - out = vm.max_pooling(x, self.pool_h, self.pool_w, self.stride_h, self.pad) + out = vm.max_pooling(x, self.ksize[-2], self.ksize[-1], self.strides[-2]) return Tensor(out) + return vm_impl + @vm_impl_getters.register(G.MaxPoolGrad) def vm_impl_max_pool_grad(self): """Generate vm_impl function for MaxPoolGrad""" + def vm_impl(x, out, dout): x = x.asnumpy() dout = dout.asnumpy() - out = vm.max_pool_grad(x, dout, self.pool_h, self.pool_w, self.stride_h, self.pad) + out = vm.max_pool_grad(x, dout, self.ksize[-2], self.ksize[-1], self.strides[-2]) return Tensor(out) + return vm_impl + @vm_impl_getters.register(P.AvgPool) -def vm_impl_max_pool(self): +def vm_impl_avg_pool(self): """Generate vm_impl function for AvgPool""" + def vm_impl(x): x = x.asnumpy() - out = vm.avg_pooling(x, self.pool_h, self.pool_w, self.stride_h, self.pad) + out = vm.avg_pooling(x, self.ksize[-2], self.ksize[-1], self.strides[-2]) return Tensor(out) + return vm_impl + @vm_impl_getters.register(G.AvgPoolGrad) def vm_impl_avg_pool_grad(self): """Generate vm_impl function for AvgPoolGrad""" + def vm_impl(dout, origin_shape): dout = dout.asnumpy() - out = vm.avg_pool_grad(dout, origin_shape, self.pool_h, self.pool_w, self.stride_h, self.pad) + out = vm.avg_pool_grad(dout, origin_shape, self.ksize[-2], self.ksize[-1], self.strides[-2]) return Tensor(out) + return vm_impl @vm_impl_getters.register(G.FusedBatchNormGrad) def vm_impl_fused_batch_norm_grad(self): """Generate vm_impl function for FusedBatchNormGrad""" + def vm_impl(dy, x, scale, save_mean, save_inv_variance): dy = dy.asnumpy() x = x.asnumpy() @@ -195,11 +236,14 @@ def vm_impl_fused_batch_norm_grad(self): save_inv_variance = save_inv_variance.asnumpy() dx, dscale, dshift = vm.batch_norm_grad(dy, x, scale, save_mean, save_inv_variance) return (Tensor(dx), Tensor(dscale), Tensor(dshift)) + return vm_impl + @vm_impl_getters.register(G.BatchNormGrad) def vm_impl_fused_batch_norm_grad(self): """Generate vm_impl function for BatchNormGrad""" + def vm_impl(dy, x, scale, save_mean, save_inv_variance): dy = dy.asnumpy() x = x.asnumpy() @@ -208,104 +252,123 @@ def vm_impl_fused_batch_norm_grad(self): save_inv_variance = save_inv_variance.asnumpy() dx, dscale, dshift = vm.batch_norm_grad(dy, x, scale, save_mean, save_inv_variance) return (Tensor(dx), Tensor(dscale), Tensor(dshift)) + return vm_impl @vm_impl_getters.register(G.ReluGrad) def vm_impl_relu_grad(self): """Generate vm_impl function for ReluGrad""" + def vm_impl(y_backprop, x): x = x.asnumpy() y_backprop = y_backprop.asnumpy() - y_backprop = vm.relu_grad(x.copy())*y_backprop + y_backprop = vm.relu_grad(x.copy()) * y_backprop return Tensor(y_backprop) + return vm_impl @vm_impl_getters.register(P.Conv2DBackpropInput) def vm_impl_conv2d_backprop_input(self): """Generate vm_impl function for Conv2DBackpropInput""" + def vm_impl(dout, w, x_size): dout = dout.asnumpy() w = w.asnumpy() dx = vm.conv2d_backprop_input(dout, x_size, w, self.stride, self.pad) return Tensor(dx) + return vm_impl @vm_impl_getters.register(G.Conv2DBackpropFilter) def vm_impl_conv2d_backprop_filter(self): """Generate vm_impl function for Conv2DBackpropFilter""" + def vm_impl(dout, x, w_size): x = x.asnumpy() dout = dout.asnumpy() dw = vm.conv2d_backprop_filter(dout, x, w_size, self.stride, self.pad) return Tensor(dw) + return vm_impl @vm_impl_getters.register(G.FlattenGrad) def vm_impl_flatten_grad(self): """Generate vm_impl function for FlattenGrad""" + def vm_impl(dout, x): dout = dout.asnumpy() dout = vm.flatten_grad(dout, x) return Tensor(dout) + return vm_impl @vm_impl_getters.register(P.BiasAdd) def vm_impl_bias_add(self): """Generate vm_impl function for BiasAdd""" + def vm_impl(wx, bias): wx = wx.asnumpy() bias = bias.asnumpy() out = wx + bias return Tensor(out) + return vm_impl @vm_impl_getters.register(G.BiasAddGrad) def vm_impl_bias_add_grad(self): """Generate vm_impl function for BiasAddGrad""" + def vm_impl(dout): dout = dout.asnumpy() shape = np.shape(dout) return Tensor(np.add.reduce(dout, axis=tuple(range(len(shape) - 1)))) + return vm_impl @vm_impl_getters.register(P.SoftmaxCrossEntropyWithLogits) def vm_impl_softmax_cross_entropy_with_logits(self): """Generate vm_impl function for SoftmaxCrossEntropyWithLogits""" + def vm_impl(logits, labels): logits = logits.asnumpy() labels = labels.asnumpy() loss, dx = vm.softmax_cross_entropy_with_logits(logits, labels) return (Tensor(np.array(loss)), Tensor(dx)) + return vm_impl @vm_impl_getters.register(P.SparseSoftmaxCrossEntropyWithLogits) def vm_impl_sparse_softmax_cross_entropy_with_logits(self): """Generate vm_impl function for SparseSoftmaxCrossEntropyWithLogits""" + def vm_impl(logits, labels): logits = logits.asnumpy() labels = labels.asnumpy() n_class = labels.max() + 1 n_sample = labels.shape[0] - one_hot_label = np.zeros((n_sample, n_class))#3个样本,4个类别 - one_hot_label[:, labels] = 1#非零列赋值为1 + one_hot_label = np.zeros((n_sample, n_class)) # 3个样本,4个类别 + one_hot_label[:, labels] = 1 # 非零列赋值为1 loss, dx = vm.softmax_cross_entropy_with_logits(logits, one_hot_label) if self.is_grad: return (Tensor(dx),) return (Tensor(np.array(loss)),) + return vm_impl + @vm_impl_getters.register(P.ApplyMomentum) def vm_impl_momentum(self): """Generate vm_impl function for Momentum""" + def vm_impl(variable, accumulation, learning_rate, @@ -327,19 +390,24 @@ def vm_impl_momentum(self): return vm_impl + @vm_impl_getters.register(P.ResizeBilinear) def vm_impl_resize_bilinear(self): """Generate vm_impl function for ResizeBilinear""" + def vm_impl(x): out = vm.ResizeBilinear(x) return Tensor(out) + return vm_impl @vm_impl_getters.register(G.ResizeBilinearGrad) def vm_impl_resize_bilinear_grad(self): """Generate vm_impl function for ResizeBilinearGrad""" + def vm_impl(dout, original_image): out = vm.ResizeBilinearGrad(dout, original_image) return Tensor(out) + return vm_impl diff --git a/tests/vm_impl/vm_me.py b/tests/vm_impl/vm_me.py index 03a0e1a885..ba51a3b13b 100644 --- a/tests/vm_impl/vm_me.py +++ b/tests/vm_impl/vm_me.py @@ -19,7 +19,7 @@ from mindspore._checkparam import Rel from mindspore._checkparam import ParamValidator as validator -def avg_pooling(x, pool_h, pool_w, stride, pad): +def avg_pooling(x, pool_h, pool_w, stride): """ Applies average pooling over an input array. @@ -28,26 +28,25 @@ def avg_pooling(x, pool_h, pool_w, stride, pad): pool_h (int): Height of the pooling window. pool_w (int): Width of the pooling window. stride (int): The stride of the sliding window. - pad (int): Padding to be added on height and width. Returns: numpy.ndarray, an output array after applying average pooling on input array. """ validator.check_integer("stride", stride, 0, Rel.GT) num, channel, height, width = x.shape - out_h = (height + 2*pad - pool_h)//stride + 1 - out_w = (width + 2*pad - pool_w)//stride + 1 + out_h = (height - pool_h)//stride + 1 + out_w = (width - pool_w)//stride + 1 - col = im2col(x, pool_h, pool_w, stride, pad) + col = im2col(x, pool_h, pool_w, stride) col = col.reshape(-1, pool_h*pool_w) out = np.mean(col, axis=1) - out = out.reshape(num, out_h, out_w, channel).transpose(0, 3, 1, 2) + out = out.reshape((num, out_h, out_w, channel)).transpose(0, 3, 1, 2) return out -def avg_pool_grad(dout, origin_shape, pool_h, pool_w, stride, pad): +def avg_pool_grad(dout, origin_shape, pool_h, pool_w, stride): """ Gets grad of average pooling. @@ -57,7 +56,6 @@ def avg_pool_grad(dout, origin_shape, pool_h, pool_w, stride, pad): pool_h (int): Height of the pooling window. pool_w (int): Width of the pooling window. stride (int): The stride of the sliding window. - pad (int): Padding to be added on height and width. Returns: numpy.ndarray, grad of avgerage pooling. @@ -324,38 +322,38 @@ def matmul(x, w, b=None): return y -def max_pooling(x, pool_h, pool_w, stride, pad): +def max_pooling(x, pool_h, pool_w, stride): """Max pooling.""" validator.check_integer("stride", stride, 0, Rel.GT) num, channel, height, width = x.shape - out_h = (height + 2*pad - pool_h)//stride + 1 - out_w = (width + 2*pad - pool_w)//stride + 1 + out_h = (height - pool_h)//stride + 1 + out_w = (width - pool_w)//stride + 1 - col = im2col(x, pool_h, pool_w, stride, pad) + col = im2col(x, pool_h, pool_w, stride) col = col.reshape(-1, pool_h*pool_w) out = np.max(col, axis=1) - out = out.reshape(num, out_h, out_w, channel).transpose(0, 3, 1, 2) + out = out.reshape((num, out_h, out_w, channel)).transpose(0, 3, 1, 2) return out -def max_pool_grad(x, dout, pool_h, pool_w, stride, pad): +def max_pool_grad(x, dout, pool_h, pool_w, stride): """Grad of max pooling.""" dout = dout.transpose(0, 2, 3, 1) pool_size = pool_h * pool_w dmax = np.zeros((dout.size, pool_size)) - col = im2col(x, pool_h, pool_w, stride, pad) + col = im2col(x, pool_h, pool_w, stride) col = col.reshape(-1, pool_h*pool_w) arg_max = np.argmax(col, axis=1) dmax[np.arange(arg_max.size), arg_max.flatten()] = dout.flatten() dmax = dmax.reshape(dout.shape + (pool_size,)) dcol = dmax.reshape(dmax.shape[0]*dmax.shape[1]*dmax.shape[2], -1) - dx = col2im(dcol, x.shape, pool_h, pool_w, stride, pad) + dx = col2im(dcol, x.shape, pool_h, pool_w, stride) return dx -def max_pool_grad_with_argmax(x, arg_max, dout, pool_h, pool_w, stride, pad): +def max_pool_grad_with_argmax(x, dout, arg_max, pool_h, pool_w, stride): """Grad of max pooling with argmax.""" dout = dout.transpose(0, 2, 3, 1) pool_size = pool_h * pool_w @@ -363,22 +361,22 @@ def max_pool_grad_with_argmax(x, arg_max, dout, pool_h, pool_w, stride, pad): dmax[np.arange(arg_max.size), arg_max.flatten()] = dout.flatten() dmax = dmax.reshape(dout.shape + (pool_size,)) dcol = dmax.reshape(dmax.shape[0]*dmax.shape[1]*dmax.shape[2], -1) - dx = col2im(dcol, x.shape, pool_h, pool_w, stride, pad) + dx = col2im(dcol, x.shape, pool_h, pool_w, stride) return dx -def max_pool_with_argmax(x, pool_h, pool_w, stride, pad): +def max_pool_with_argmax(x, pool_h, pool_w, stride): """Max pooling with argmax.""" validator.check_integer("stride", stride, 0, Rel.GT) num, channel, height, width = x.shape - out_h = (height + 2*pad - pool_h)//stride + 1 - out_w = (width + 2*pad - pool_w)//stride + 1 - col = im2col(x, pool_h, pool_w, stride, pad) + out_h = (height - pool_h)//stride + 1 + out_w = (width - pool_w)//stride + 1 + col = im2col(x, pool_h, pool_w, stride) col = col.reshape(-1, pool_h*pool_w) out = np.max(col, axis=1) out_argmax = np.argmax(col, axis=1) - out = out.reshape(num, out_h, out_w, channel).transpose(0, 3, 1, 2) - out_argmax = out_argmax.reshape(num, out_h, out_w, channel).transpose(0, 3, 1, 2) + out = out.reshape((num, out_h, out_w, channel)).transpose(0, 3, 1, 2) + out_argmax = out_argmax.reshape((num, out_h, out_w, channel)).transpose(0, 3, 1, 2) return out, out_argmax From 5637f80692854eeae6e1fd4a439b5194905fb465 Mon Sep 17 00:00:00 2001 From: Zirui Wu Date: Wed, 1 Apr 2020 11:24:25 -0400 Subject: [PATCH 133/367] implemented multi-thread index writer for mindrecord num threads cannot be more than num shards minor fix clang style fix address review comments --- .../include/shard_index_generator.h | 10 +- .../mindrecord/io/shard_index_generator.cc | 98 ++++++++++++------- 2 files changed, 72 insertions(+), 36 deletions(-) diff --git a/mindspore/ccsrc/mindrecord/include/shard_index_generator.h b/mindspore/ccsrc/mindrecord/include/shard_index_generator.h index f59dbe9bf0..1febd28fc2 100644 --- a/mindspore/ccsrc/mindrecord/include/shard_index_generator.h +++ b/mindspore/ccsrc/mindrecord/include/shard_index_generator.h @@ -85,14 +85,14 @@ class ShardIndexGenerator { /// \param sql /// \param data /// \return - MSRStatus BindParamaterExecuteSQL( + MSRStatus BindParameterExecuteSQL( sqlite3 *db, const std::string &sql, const std::vector>> &data); INDEX_FIELDS GenerateIndexFields(const std::vector &schema_detail); - MSRStatus ExcuteTransaction(const int &shard_no, const std::pair &db, - const std::vector &raw_page_ids, const std::map &blob_id_to_page_id); + MSRStatus ExecuteTransaction(const int &shard_no, const std::pair &db, + const std::vector &raw_page_ids, const std::map &blob_id_to_page_id); MSRStatus CreateShardNameTable(sqlite3 *db, const std::string &shard_name); @@ -103,12 +103,16 @@ class ShardIndexGenerator { void AddIndexFieldByRawData(const std::vector &schema_detail, std::vector> &row_data); + void DatabaseWriter(); // worker thread + std::string file_path_; bool append_; ShardHeader shard_header_; uint64_t page_size_; uint64_t header_size_; int schema_count_; + std::atomic_int task_; + std::atomic_bool write_success_; std::vector> fields_; }; } // namespace mindrecord diff --git a/mindspore/ccsrc/mindrecord/io/shard_index_generator.cc b/mindspore/ccsrc/mindrecord/io/shard_index_generator.cc index 1c14d30f30..c0108241a1 100644 --- a/mindspore/ccsrc/mindrecord/io/shard_index_generator.cc +++ b/mindspore/ccsrc/mindrecord/io/shard_index_generator.cc @@ -13,6 +13,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ +#include #include "mindrecord/include/shard_index_generator.h" #include "common/utils.h" @@ -26,7 +27,13 @@ using mindspore::MsLogLevel::INFO; namespace mindspore { namespace mindrecord { ShardIndexGenerator::ShardIndexGenerator(const std::string &file_path, bool append) - : file_path_(file_path), append_(append), page_size_(0), header_size_(0), schema_count_(0) {} + : file_path_(file_path), + append_(append), + page_size_(0), + header_size_(0), + schema_count_(0), + task_(0), + write_success_(true) {} MSRStatus ShardIndexGenerator::Build() { ShardHeader header = ShardHeader(); @@ -284,7 +291,7 @@ std::pair ShardIndexGenerator::GenerateRawSQL( return {SUCCESS, sql}; } -MSRStatus ShardIndexGenerator::BindParamaterExecuteSQL( +MSRStatus ShardIndexGenerator::BindParameterExecuteSQL( sqlite3 *db, const std::string &sql, const std::vector>> &data) { sqlite3_stmt *stmt = nullptr; @@ -471,9 +478,9 @@ INDEX_FIELDS ShardIndexGenerator::GenerateIndexFields(const std::vector &s return {SUCCESS, std::move(fields)}; } -MSRStatus ShardIndexGenerator::ExcuteTransaction(const int &shard_no, const std::pair &db, - const std::vector &raw_page_ids, - const std::map &blob_id_to_page_id) { +MSRStatus ShardIndexGenerator::ExecuteTransaction(const int &shard_no, const std::pair &db, + const std::vector &raw_page_ids, + const std::map &blob_id_to_page_id) { // Add index data to database std::string shard_address = shard_header_.get_shard_address_by_id(shard_no); if (shard_address.empty()) { @@ -493,7 +500,7 @@ MSRStatus ShardIndexGenerator::ExcuteTransaction(const int &shard_no, const std: if (data.first != SUCCESS) { return FAILED; } - if (BindParamaterExecuteSQL(db.second, sql.second, data.second) == FAILED) { + if (BindParameterExecuteSQL(db.second, sql.second, data.second) == FAILED) { return FAILED; } MS_LOG(INFO) << "Insert " << data.second.size() << " rows to index db."; @@ -514,37 +521,62 @@ MSRStatus ShardIndexGenerator::WriteToDatabase() { page_size_ = shard_header_.get_page_size(); header_size_ = shard_header_.get_header_size(); schema_count_ = shard_header_.get_schema_count(); - if (shard_header_.get_shard_count() <= kMaxShardCount) { - // Create one database per shard - for (int shard_no = 0; shard_no < shard_header_.get_shard_count(); ++shard_no) { - // Create database - auto db = CreateDatabase(shard_no); - if (db.first != SUCCESS || db.second == nullptr) { - return FAILED; - } - MS_LOG(INFO) << "Init index db for shard: " << shard_no << " successfully."; - - // Pre-processing page information - auto total_pages = shard_header_.GetLastPageId(shard_no) + 1; - - std::map blob_id_to_page_id; - std::vector raw_page_ids; - for (uint64_t i = 0; i < total_pages; ++i) { - std::shared_ptr cur_page = shard_header_.GetPage(shard_no, i).first; - if (cur_page->get_page_type() == "RAW_DATA") { - raw_page_ids.push_back(i); - } else if (cur_page->get_page_type() == "BLOB_DATA") { - blob_id_to_page_id[cur_page->get_page_type_id()] = i; - } - } + if (shard_header_.get_shard_count() > kMaxShardCount) { + MS_LOG(ERROR) << "num shards: " << shard_header_.get_shard_count() << " exceeds max count:" << kMaxSchemaCount; + return FAILED; + } + task_ = 0; // set two atomic vars to initial value + write_success_ = true; - if (ExcuteTransaction(shard_no, db, raw_page_ids, blob_id_to_page_id) != SUCCESS) { - return FAILED; + // spawn half the physical threads or total number of shards whichever is smaller + const unsigned int num_workers = + std::min(std::thread::hardware_concurrency() / 2 + 1, static_cast(shard_header_.get_shard_count())); + + std::vector threads; + threads.reserve(num_workers); + + for (size_t t = 0; t < threads.capacity(); t++) { + threads.emplace_back(std::thread(&ShardIndexGenerator::DatabaseWriter, this)); + } + + for (size_t t = 0; t < threads.capacity(); t++) { + threads[t].join(); + } + return write_success_ ? SUCCESS : FAILED; +} + +void ShardIndexGenerator::DatabaseWriter() { + int shard_no = task_++; + while (shard_no < shard_header_.get_shard_count()) { + auto db = CreateDatabase(shard_no); + if (db.first != SUCCESS || db.second == nullptr || write_success_ == false) { + write_success_ = false; + return; + } + + MS_LOG(INFO) << "Init index db for shard: " << shard_no << " successfully."; + + // Pre-processing page information + auto total_pages = shard_header_.GetLastPageId(shard_no) + 1; + + std::map blob_id_to_page_id; + std::vector raw_page_ids; + for (uint64_t i = 0; i < total_pages; ++i) { + std::shared_ptr cur_page = shard_header_.GetPage(shard_no, i).first; + if (cur_page->get_page_type() == "RAW_DATA") { + raw_page_ids.push_back(i); + } else if (cur_page->get_page_type() == "BLOB_DATA") { + blob_id_to_page_id[cur_page->get_page_type_id()] = i; } - MS_LOG(INFO) << "Generate index db for shard: " << shard_no << " successfully."; } + + if (ExecuteTransaction(shard_no, db, raw_page_ids, blob_id_to_page_id) != SUCCESS) { + write_success_ = false; + return; + } + MS_LOG(INFO) << "Generate index db for shard: " << shard_no << " successfully."; + shard_no = task_++; } - return SUCCESS; } } // namespace mindrecord } // namespace mindspore From 917aa02e2af3905d081edc37128cfc022b9fe35c Mon Sep 17 00:00:00 2001 From: Cathy Wong Date: Wed, 8 Apr 2020 16:36:06 -0400 Subject: [PATCH 134/367] Correct dataset error checking --- mindspore/dataset/engine/datasets.py | 2 -- mindspore/dataset/engine/validators.py | 8 ++++---- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/mindspore/dataset/engine/datasets.py b/mindspore/dataset/engine/datasets.py index ab2290c13c..2058bbf826 100644 --- a/mindspore/dataset/engine/datasets.py +++ b/mindspore/dataset/engine/datasets.py @@ -82,8 +82,6 @@ def zip(datasets): if len(datasets) <= 1: raise ValueError( "Can't zip empty or just one dataset!") - if not isinstance(datasets, tuple): - raise TypeError("The zip function %s type error!" % (datasets)) return ZipDataset(datasets) diff --git a/mindspore/dataset/engine/validators.py b/mindspore/dataset/engine/validators.py index 26d6241945..4c84cfe354 100644 --- a/mindspore/dataset/engine/validators.py +++ b/mindspore/dataset/engine/validators.py @@ -105,13 +105,13 @@ def check(method): "The %s function %s exceeds the boundary!" % ( func_name, param_name)) if isinstance(arg, int) and param_name == "num_parallel_workers" and ( - arg <= 0 or arg > cpu_count()): + arg < 1 or arg > cpu_count()): raise ValueError( "The %s function %s exceeds the boundary(%s)!" % ( func_name, param_name, cpu_count())) if isinstance(arg, int) and param_name != "seed" \ and param_name != "count" and param_name != "prefetch_size" \ - and param_name != "num_parallel_workers" and (arg <= 0 or arg > 2147483647): + and param_name != "num_parallel_workers" and (arg < 1 or arg > 2147483647): raise ValueError( "The %s function %s exceeds the boundary!" % ( func_name, param_name)) @@ -271,8 +271,8 @@ def check_interval_closed(param, param_name, valid_range): def check_num_parallel_workers(value): check_type(value, 'num_parallel_workers', int) - if value <= 0 or value > cpu_count(): - raise ValueError("num_parallel_workers exceeds the boundary between 0 and {}!".format(cpu_count())) + if value < 1 or value > cpu_count(): + raise ValueError("num_parallel_workers exceeds the boundary between 1 and {}!".format(cpu_count())) def check_num_samples(value): From 0790ef8a85d624040e444650f681cce82e5d1a20 Mon Sep 17 00:00:00 2001 From: jinyaohui Date: Thu, 9 Apr 2020 09:23:39 +0800 Subject: [PATCH 135/367] modify comment --- example/yolov3_coco2017/train.py | 2 +- mindspore/ccsrc/pipeline/pipeline_ge.cc | 2 +- mindspore/ccsrc/transform/convert.cc | 2 +- mindspore/nn/wrap/loss_scale.py | 2 +- tests/ut/python/utils/test_callback.py | 8 ++++---- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/example/yolov3_coco2017/train.py b/example/yolov3_coco2017/train.py index 0a32a6d30d..121e2aa810 100644 --- a/example/yolov3_coco2017/train.py +++ b/example/yolov3_coco2017/train.py @@ -67,7 +67,7 @@ if __name__ == '__main__': parser.add_argument("--distribute", type=bool, default=False, help="Run distribute, default is false.") parser.add_argument("--device_id", type=int, default=0, help="Device id, default is 0.") parser.add_argument("--device_num", type=int, default=1, help="Use device nums, default is 1.") - parser.add_argument("--mode", type=str, default="sink", help="Run sink mode or non-sink mode, default is sink") + parser.add_argument("--mode", type=str, default="sink", help="Run sink mode or not, default is sink") parser.add_argument("--epoch_size", type=int, default=10, help="Epoch size, default is 10") parser.add_argument("--batch_size", type=int, default=32, help="Batch size, default is 32.") parser.add_argument("--checkpoint_path", type=str, default="", help="Checkpoint file path") diff --git a/mindspore/ccsrc/pipeline/pipeline_ge.cc b/mindspore/ccsrc/pipeline/pipeline_ge.cc index 60960a2eb7..6ce0ea5316 100644 --- a/mindspore/ccsrc/pipeline/pipeline_ge.cc +++ b/mindspore/ccsrc/pipeline/pipeline_ge.cc @@ -453,7 +453,7 @@ void ProcessGeArg(const std::map& info, const py:: } // process the first args of tensor - // only in Dataset non-sink Mode, fp_bp graph need input tensors + // only in dataset normal(non-sink) mode, fp_bp graph need input tensors if (ConfigManager::GetInstance().dataset_mode() == DS_NORMAL_MODE) { for (std::size_t i = 0; i < size; i++) { ValuePtr converted = nullptr; diff --git a/mindspore/ccsrc/transform/convert.cc b/mindspore/ccsrc/transform/convert.cc index 24b0188fa1..c9a27a2607 100755 --- a/mindspore/ccsrc/transform/convert.cc +++ b/mindspore/ccsrc/transform/convert.cc @@ -447,7 +447,7 @@ void DfGraphConvertor::InitLoopVar(std::vector *init_input) { if (ConfigManager::GetInstance().dataset_mode() == DS_SINK_MODE) { value = ConfigManager::GetInstance().iter_num(); } else { - MS_LOG(INFO) << "Run with non-sink mode, the iterator number will always be 1"; + MS_LOG(INFO) << "Run with normal(non-sink) mode, the iterator number will always be 1"; value = 1; ConfigManager::GetInstance().set_iter_num(value); } diff --git a/mindspore/nn/wrap/loss_scale.py b/mindspore/nn/wrap/loss_scale.py index fd1c22be1f..c6d61e6983 100644 --- a/mindspore/nn/wrap/loss_scale.py +++ b/mindspore/nn/wrap/loss_scale.py @@ -51,7 +51,7 @@ class DynamicLossScaleUpdateCell(Cell): In every training step, the loss scaling value will be updated by loss scaling value/`scale_factor` when there is overflow. And it will be increased by loss scaling value * `scale_factor` if there is no overflow for a continuous `scale_window` steps. This cell is used for Graph mode training in which all - logic will be executed on device side(Another training mode is non-sink mode in which some logic will be + logic will be executed on device side(Another training mode is normal(non-sink) mode in which some logic will be executed on host). Args: diff --git a/tests/ut/python/utils/test_callback.py b/tests/ut/python/utils/test_callback.py index 7e7b893e0c..8c10c8886d 100644 --- a/tests/ut/python/utils/test_callback.py +++ b/tests/ut/python/utils/test_callback.py @@ -112,8 +112,8 @@ def test_save_checkpoint(): os.remove('./test_files/test_ckpt-model.pkl') -def test_loss_monitor_sink_model(): - """Test loss monitor sink model.""" +def test_loss_monitor_sink_mode(): + """Test loss monitor sink mode.""" cb_params = _InternalCallbackParam() cb_params.cur_epoch_num = 4 cb_params.cur_step_num = 2 @@ -131,8 +131,8 @@ def test_loss_monitor_sink_model(): callbacklist.end(run_context) -def test_loss_monitor_feed_model(): - """Test loss monitor non-sink mode.""" +def test_loss_monitor_normal_mode(): + """Test loss monitor normal(non-sink) mode.""" cb_params = _InternalCallbackParam() run_context = RunContext(cb_params) loss_cb = LossMonitor(1) From fb343bd6073d3498b08c74468f74d421e2de30b0 Mon Sep 17 00:00:00 2001 From: kswang Date: Wed, 8 Apr 2020 17:05:17 +0800 Subject: [PATCH 136/367] add mem manager --- mindspore/ccsrc/CMakeLists.txt | 1 + .../device/ascend/ascend_kernel_runtime.cc | 57 +---- .../device/ascend/ascend_kernel_runtime.h | 5 +- .../device/ascend/ascend_memory_manager.cc | 65 +++++ .../device/ascend/ascend_memory_manager.h | 35 +++ mindspore/ccsrc/device/device_address.h | 5 + .../ccsrc/device/gpu/gpu_kernel_runtime.cc | 108 ++------ .../ccsrc/device/gpu/gpu_kernel_runtime.h | 9 +- .../ccsrc/device/gpu/gpu_memory_manager.cc | 88 +++++++ .../ccsrc/device/gpu/gpu_memory_manager.h | 40 +++ mindspore/ccsrc/device/kernel_runtime.cc | 231 +++--------------- mindspore/ccsrc/device/kernel_runtime.h | 38 +-- mindspore/ccsrc/device/memory_manager.cc | 170 +++++++++++++ mindspore/ccsrc/device/memory_manager.h | 71 ++++++ .../ccsrc/session/anf_runtime_algorithm.cc | 10 + .../ccsrc/session/anf_runtime_algorithm.h | 1 + mindspore/ccsrc/session/gpu_session.cc | 4 - tests/ut/cpp/CMakeLists.txt | 2 + 18 files changed, 562 insertions(+), 378 deletions(-) create mode 100644 mindspore/ccsrc/device/ascend/ascend_memory_manager.cc create mode 100644 mindspore/ccsrc/device/ascend/ascend_memory_manager.h create mode 100644 mindspore/ccsrc/device/gpu/gpu_memory_manager.cc create mode 100644 mindspore/ccsrc/device/gpu/gpu_memory_manager.h create mode 100644 mindspore/ccsrc/device/memory_manager.cc create mode 100644 mindspore/ccsrc/device/memory_manager.h diff --git a/mindspore/ccsrc/CMakeLists.txt b/mindspore/ccsrc/CMakeLists.txt index 9f559a51eb..1d104148c3 100644 --- a/mindspore/ccsrc/CMakeLists.txt +++ b/mindspore/ccsrc/CMakeLists.txt @@ -132,6 +132,7 @@ file(GLOB_RECURSE MINDSPORE_SRC_LIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} "kernel/kash/*.cc" "device/kernel_info.cc" "device/kernel_runtime.cc" + "device/memory_manager.cc" "device/kernel_runtime_manager.cc" "device/convert_tensor_utils.cc" "pre_activate/common/*.cc" diff --git a/mindspore/ccsrc/device/ascend/ascend_kernel_runtime.cc b/mindspore/ccsrc/device/ascend/ascend_kernel_runtime.cc index dc7eb5449b..0c2a97a5a6 100644 --- a/mindspore/ccsrc/device/ascend/ascend_kernel_runtime.cc +++ b/mindspore/ccsrc/device/ascend/ascend_kernel_runtime.cc @@ -37,6 +37,7 @@ #include "kernel/tbe/tbe_utils.h" #include "kernel/tbe/tbe_python_funcs.h" #include "pre_activate/mem_reuse/mem_reuse_checker.h" +#include "device/ascend/ascend_memory_manager.h" using mindspore::device::ascend::ProfilingManager; using mindspore::device::ascend::ProfilingUtils; @@ -47,8 +48,6 @@ using std::vector; namespace mindspore { namespace device { namespace ascend { -static const uint64_t ASCEND_MEM_SIZE = 20; -static const uint64_t ASCEND_MEM_SIZE_BYTE = (ASCEND_MEM_SIZE << 30); static const size_t PRAMATER_OUTPUT_INDEX = 0; AscendKernelRuntime::~AscendKernelRuntime() { graph_model_map_.clear(); } @@ -86,7 +85,8 @@ void AscendKernelRuntime::ReleaseDeviceRes() { MS_EXCEPTION(DeviceProcessError) << "rtSetDevice, ret[" << static_cast(ret) << "]"; } - FreeDeviceMemory(); + MS_EXCEPTION_IF_NULL(mem_manager_); + mem_manager_->FreeDeviceMemory(); (void)DestroyHccl(); (void)ResetDevice(); (void)ProfilingManager::GetInstance().StopProfiling(); @@ -109,11 +109,9 @@ bool AscendKernelRuntime::Init() { if (!ret) { return ret; } - - ret = MallocDeviceMemory(); - if (!ret) { - return ret; - } + mem_manager_ = std::make_shared(); + MS_EXCEPTION_IF_NULL(mem_manager_); + mem_manager_->MallocDeviceMemory(); ret = ProfilingManager::GetInstance().StartupProfiling(device_id_); if (!ret) { @@ -239,13 +237,6 @@ DeviceAddressPtr AscendKernelRuntime::CreateDeviceAddress(void *device_ptr, size return std::make_shared(device_ptr, device_size, format, type_id); } -void AscendKernelRuntime::MallocOpMemory(const DeviceAddressPtr address, size_t size, int) { - auto device_ptr = AscendMemoryAllocator::GetInstance().AllocTensorMem(size); - MS_EXCEPTION_IF_NULL(device_ptr); - address->ptr_ = device_ptr; - address->mem_dynamic_alloc_ = true; -} - bool AscendKernelRuntime::GenTask(const session::KernelGraph *graph) { auto context_ptr = MsContext::GetInstance(); MS_EXCEPTION_IF_NULL(context_ptr); @@ -474,42 +465,6 @@ bool AscendKernelRuntime::DestroyHccl() { context_ptr->set_enable_hccl(false); return true; } - -bool AscendKernelRuntime::MallocDeviceMemory() { - device_mem_size_ = ASCEND_MEM_SIZE_BYTE; - static_mem_offset_ = FloatToSize(device_mem_size_ * GRAPH_INIT_ASCEND_MEM_RATIO); - auto ret = rtMalloc(reinterpret_cast(&device_mem_base_), static_mem_offset_, RT_MEMORY_HBM); - if (ret != RT_ERROR_NONE) { - MS_EXCEPTION(DeviceProcessError) << "rtMalloc mem size[" << static_mem_offset_ << "] fail, ret[" << ret << "]"; - } - device_mem_pool_size_ = FloatToSize(device_mem_size_ * (1 - GRAPH_INIT_ASCEND_MEM_RATIO)); - ret = rtMalloc(reinterpret_cast(&device_mem_pool_base_), device_mem_pool_size_, RT_MEMORY_HBM); - if (ret != RT_ERROR_NONE) { - MS_EXCEPTION(DeviceProcessError) << "rtMalloc mem size[" << device_mem_pool_size_ << "] fail, ret[" << ret << "]"; - } - AscendMemoryAllocator::GetInstance().set_device_mem_pool_base(device_mem_pool_base_); - AscendMemoryAllocator::GetInstance().set_device_mem_pool_size(device_mem_pool_size_); - return true; -} - -void AscendKernelRuntime::FreeDeviceMemory() { - if (device_mem_base_ != nullptr) { - auto ret = rtFree(device_mem_base_); - if (ret != RT_ERROR_NONE) { - MS_LOG(ERROR) << "rtFree mem size[" << device_mem_size_ << "] fail, ret[" << ret << "]"; - } - device_mem_base_ = nullptr; - } - if (device_mem_pool_base_ != nullptr) { - auto ret = rtFree(device_mem_pool_base_); - if (ret != RT_ERROR_NONE) { - MS_LOG(ERROR) << "rtFree mem size[" << device_mem_pool_size_ << "] fail, ret[" << ret << "]"; - } - device_mem_pool_base_ = nullptr; - } -} - -void AscendKernelRuntime::FreeHostMemory() { dynamic_mem_offset_ = 0; } } // namespace ascend } // namespace device } // namespace mindspore diff --git a/mindspore/ccsrc/device/ascend/ascend_kernel_runtime.h b/mindspore/ccsrc/device/ascend/ascend_kernel_runtime.h index dbd1460d24..0eedad3d2b 100644 --- a/mindspore/ccsrc/device/ascend/ascend_kernel_runtime.h +++ b/mindspore/ccsrc/device/ascend/ascend_kernel_runtime.h @@ -39,13 +39,11 @@ class AscendKernelRuntime : public KernelRuntime { bool GenTask(const session::KernelGraph *graph) override; bool RunTask(const session::KernelGraph *graph) override; bool LoadTask(const session::KernelGraph *graph) override; - void FreeHostMemory() override; protected: DeviceAddressPtr CreateDeviceAddress(void *device_ptr, size_t device_size, const string &format, TypeId type_id) override; bool SyncStream() override; - void MallocOpMemory(const DeviceAddressPtr address, size_t size, int flag) override; private: bool InitDevice(); @@ -53,8 +51,7 @@ class AscendKernelRuntime : public KernelRuntime { bool HcclInit(); bool NeedDestroyHccl(); bool DestroyHccl(); - bool MallocDeviceMemory(); - void FreeDeviceMemory(); + void ClearGraphModelMap(); void ReleaseDeviceRes() override; uint32_t GetGraphModelId(const session::KernelGraph *kernel_graph); diff --git a/mindspore/ccsrc/device/ascend/ascend_memory_manager.cc b/mindspore/ccsrc/device/ascend/ascend_memory_manager.cc new file mode 100644 index 0000000000..f033d81d82 --- /dev/null +++ b/mindspore/ccsrc/device/ascend/ascend_memory_manager.cc @@ -0,0 +1,65 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "device/ascend/ascend_memory_manager.h" +#include "device/ascend/ascend_memory_allocator.h" +#include "utils/context/ms_context.h" +#include "runtime/mem.h" +namespace mindspore { +namespace device { +namespace ascend { +static const uint64_t ASCEND_MEM_SIZE = 20; +static const uint64_t ASCEND_MEM_SIZE_BYTE = (ASCEND_MEM_SIZE << 30); + +void AscendMemoryManager::MallocDeviceMemory() { + device_mem_size_ = ASCEND_MEM_SIZE_BYTE; + static_mem_offset_ = FloatToSize(device_mem_size_ * GRAPH_INIT_ASCEND_MEM_RATIO); + auto ret = rtMalloc(reinterpret_cast(&device_mem_base_), static_mem_offset_, RT_MEMORY_HBM); + if (ret != RT_ERROR_NONE) { + MS_EXCEPTION(DeviceProcessError) << "rtMalloc mem size[" << static_mem_offset_ << "] fail, ret[" << ret << "]"; + } + device_mem_pool_size_ = FloatToSize(device_mem_size_ * (1 - GRAPH_INIT_ASCEND_MEM_RATIO)); + ret = rtMalloc(reinterpret_cast(&device_mem_pool_base_), device_mem_pool_size_, RT_MEMORY_HBM); + if (ret != RT_ERROR_NONE) { + MS_EXCEPTION(DeviceProcessError) << "rtMalloc mem size[" << device_mem_pool_size_ << "] fail, ret[" << ret << "]"; + } + AscendMemoryAllocator::GetInstance().set_device_mem_pool_base(device_mem_pool_base_); + AscendMemoryAllocator::GetInstance().set_device_mem_pool_size(device_mem_pool_size_); +} + +void AscendMemoryManager::FreeDeviceMemory() { + if (device_mem_base_ != nullptr) { + auto ret = rtFree(device_mem_base_); + if (ret != RT_ERROR_NONE) { + MS_LOG(ERROR) << "rtFree mem size[" << device_mem_size_ << "] fail, ret[" << ret << "]"; + } + device_mem_base_ = nullptr; + } + if (device_mem_pool_base_ != nullptr) { + auto ret = rtFree(device_mem_pool_base_); + if (ret != RT_ERROR_NONE) { + MS_LOG(ERROR) << "rtFree mem size[" << device_mem_pool_size_ << "] fail, ret[" << ret << "]"; + } + device_mem_pool_base_ = nullptr; + } +} + +void *AscendMemoryManager::AllocTensorMemDynamic(size_t size) { + return AscendMemoryAllocator::GetInstance().AllocTensorMem(size); +} +} // namespace ascend +} // namespace device +} // namespace mindspore diff --git a/mindspore/ccsrc/device/ascend/ascend_memory_manager.h b/mindspore/ccsrc/device/ascend/ascend_memory_manager.h new file mode 100644 index 0000000000..8639fb5c72 --- /dev/null +++ b/mindspore/ccsrc/device/ascend/ascend_memory_manager.h @@ -0,0 +1,35 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_MINDSPORE_CCSRC_DEVICE_ASCEND_ASCEND_MEMORY_MANAGER_H_ +#define MINDSPORE_MINDSPORE_CCSRC_DEVICE_ASCEND_ASCEND_MEMORY_MANAGER_H_ +#include "device/memory_manager.h" +namespace mindspore { +namespace device { +namespace ascend { +class AscendMemoryManager : public MemoryManager { + public: + AscendMemoryManager() = default; + virtual ~AscendMemoryManager() = default; + + void MallocDeviceMemory() override; + void FreeDeviceMemory() override; + void *AllocTensorMemDynamic(size_t size) override; +}; +} // namespace ascend +} // namespace device +} // namespace mindspore +#endif // MINDSPORE_MINDSPORE_CCSRC_DEVICE_ASCEND_ASCEND_MEMORY_MANAGER_H_ diff --git a/mindspore/ccsrc/device/device_address.h b/mindspore/ccsrc/device/device_address.h index 1610d43372..cb022427e3 100644 --- a/mindspore/ccsrc/device/device_address.h +++ b/mindspore/ccsrc/device/device_address.h @@ -33,12 +33,14 @@ class CPUKernelRuntime; } // namespace cpu namespace ascend { class AscendKernelRuntime; +class AscendMemoryManager; namespace tasksink { class TaskGenerator; } // namespace tasksink } // namespace ascend namespace gpu { class GPUKernelRuntime; +class GPUMemoryManager; } // namespace gpu } // namespace device } // namespace mindspore @@ -70,12 +72,15 @@ class DeviceAddress { TypeId type_id_{kNumberTypeFloat16}; bool mem_dynamic_alloc_{false}; friend class KernelRuntime; + friend class MemoryManager; friend class mindspore::device::ascend::tasksink::TaskGenerator; friend class mindspore::device::cpu::CPUSimpleMemPlan; friend class mindspore::device::cpu::CPUResourceManager; friend class mindspore::device::cpu::CPUKernelRuntime; friend class mindspore::device::gpu::GPUKernelRuntime; + friend class mindspore::device::gpu::GPUMemoryManager; friend class mindspore::device::ascend::AscendKernelRuntime; + friend class mindspore::device::ascend::AscendMemoryManager; }; using DeviceAddressPtr = std::shared_ptr; diff --git a/mindspore/ccsrc/device/gpu/gpu_kernel_runtime.cc b/mindspore/ccsrc/device/gpu/gpu_kernel_runtime.cc index 9eeb1062f7..597e188e9d 100644 --- a/mindspore/ccsrc/device/gpu/gpu_kernel_runtime.cc +++ b/mindspore/ccsrc/device/gpu/gpu_kernel_runtime.cc @@ -26,6 +26,7 @@ #include "device/kernel_runtime_manager.h" #include "device/gpu/gpu_common.h" #include "common/utils.h" +#include "device/gpu/gpu_memory_manager.h" namespace mindspore { namespace device { @@ -36,26 +37,14 @@ bool GPUKernelRuntime::Init() { if (device_init_ == true) { return true; } - auto ret = InitDevice(); if (!ret) { MS_LOG(ERROR) << "InitDevice error."; return ret; } - - auto context_ptr = MsContext::GetInstance(); - MS_EXCEPTION_IF_NULL(context_ptr); - // If use the dynamic memory pool, then alloc the first memory block to init. - if (context_ptr->enable_dynamic_mem_pool()) { - auto device_addr = AllocTensorMemDynamic(1); - if (!device_addr) { - MS_LOG(ERROR) << "Dynamic memory pool init error."; - return false; - } - } else { - MallocDeviceMemory(); - } - + mem_manager_ = std::make_shared(); + MS_EXCEPTION_IF_NULL(mem_manager_); + mem_manager_->MallocDeviceMemory(); const void *collective_handle_ = CollectiveInitializer::instance().collective_handle(); bool collective_inited = CollectiveInitializer::instance().collective_inited(); if (collective_inited && collective_handle_ != nullptr) { @@ -101,16 +90,6 @@ bool GPUKernelRuntime::InitDevice() { return true; } -void GPUKernelRuntime::MallocDeviceMemory() { - // Need to reserve 20% space for dynamic memory - const float init_gpu_mem_ratio = 0.8; - size_t mem_size = FloatToSize(GPUMemoryAllocator::GetInstance().free_mem_size() * init_gpu_mem_ratio); - auto alloc_size = - GPUMemoryAllocator::GetInstance().AllocDeviceMem(mem_size, reinterpret_cast(&device_mem_base_)); - device_mem_size_ = alloc_size; - static_mem_offset_ = device_mem_size_; -} - void GPUKernelRuntime::ReleaseDeviceRes() { // For dataset mode. if (GpuBufferMgr::GetInstance().IsInit()) { @@ -122,39 +101,22 @@ void GPUKernelRuntime::ReleaseDeviceRes() { CHECK_OP_RET_WITH_EXCEPT(GpuBufferMgr::GetInstance().Destroy(), "Could not destroy gpu data queue."); } GPUDeviceManager::GetInstance().ReleaseDevice(); - if (device_mem_base_ != nullptr) { - if (!GPUMemoryAllocator::GetInstance().FreeDeviceMem(device_mem_base_)) { - MS_LOG(EXCEPTION) << "Could not free gpu device memory."; - } - } - GPUMemoryAllocator::GetInstance().ReleaseDeviceRes(); -} - -void GPUKernelRuntime::FreeHostMemory() { dynamic_mem_offset_ = 0; } - -void *GPUKernelRuntime::AllocTensorMemDynamic(size_t size) { - return GPUMemoryAllocator::GetInstance().AllocTensorMem(size); -} - -void GPUKernelRuntime::FreeTensorMemDynamic(void *device_ptr) { - GPUMemoryAllocator::GetInstance().FreeTensorMem(device_ptr); + MS_EXCEPTION_IF_NULL(mem_manager_); + mem_manager_->FreeDeviceMemory(); } void GPUKernelRuntime::AssignMemory(session::KernelGraph *graph) { auto context_ptr = MsContext::GetInstance(); MS_EXCEPTION_IF_NULL(context_ptr); + MS_EXCEPTION_IF_NULL(mem_manager_); + mem_manager_->ResetDynamicMemory(); AssignStaticMemory(graph); - bool is_enable_mem_reuse = context_ptr->enable_mem_reuse(); bool is_enable_dynamic_mem = context_ptr->enable_dynamic_mem_pool(); if (is_enable_dynamic_mem) { // Use the dynamic memory pool. InitKernelRefCount(graph); InitKernelOutputAddress(graph); - } else if (is_enable_mem_reuse) { - // Use the memory reuse. - ReuseAssignDynamicMemory(graph); } else { - // Normal way. AssignDynamicMemory(graph); } } @@ -179,32 +141,6 @@ bool GPUKernelRuntime::Run(session::KernelGraph *graph) { return ret; } -uint8_t *GPUKernelRuntime::MallocStaticMem(size_t size, bool) { - auto context_ptr = MsContext::GetInstance(); - MS_EXCEPTION_IF_NULL(context_ptr); - if (context_ptr->enable_dynamic_mem_pool()) { - auto device_ptr = AllocTensorMemDynamic(size); - MS_EXCEPTION_IF_NULL(device_ptr); - return AddressOffset(device_ptr, 0); - } - - auto align_size = GetCommonAlignSize(size); - if (static_mem_offset_ < align_size) { - MS_LOG(EXCEPTION) << "Out of memory!!! total[" << device_mem_size_ << "](dynamic[" << total_dynamic_size_ - << "] static[" << total_static_size_ << "])" - << " malloc [" << align_size << "] failed!"; - } - auto offset = static_mem_offset_ - align_size; - if (dynamic_mem_offset_ > offset) { - MS_LOG(EXCEPTION) << "Out of memory!!! total[" << device_mem_size_ << "](dynamic[" << total_dynamic_size_ - << "] static[" << total_static_size_ << "])" - << " malloc [" << align_size << "] failed!"; - } - total_static_size_ += align_size; - static_mem_offset_ = offset; - return device_mem_base_ + offset; -} - void GPUKernelRuntime::InitKernelRefCount(const session::KernelGraph *graph) { MS_EXCEPTION_IF_NULL(graph); MemReuseUtilPtr mem_reuse_util_ptr = std::make_shared(); @@ -273,6 +209,7 @@ void GPUKernelRuntime::AllocKernelDynamicRes(const mindspore::kernel::KernelMod MS_EXCEPTION_IF_NULL(kernel_inputs); MS_EXCEPTION_IF_NULL(kernel_workspaces); MS_EXCEPTION_IF_NULL(kernel_outputs); + MS_EXCEPTION_IF_NULL(mem_manager_); for (size_t i = 0; i < AnfAlgo::GetInputTensorNum(kernel); ++i) { auto device_address = AnfAlgo::GetPrevNodeOutputAddr(kernel, i); MS_EXCEPTION_IF_NULL(device_address); @@ -290,7 +227,7 @@ void GPUKernelRuntime::AllocKernelDynamicRes(const mindspore::kernel::KernelMod MS_EXCEPTION_IF_NULL(device_address); auto device_ptr = device_address->ptr_; if (device_ptr == nullptr) { - device_ptr = AllocTensorMemDynamic(output_sizes[i]); + device_ptr = mem_manager_->AllocTensorMemDynamic(output_sizes[i]); MS_EXCEPTION_IF_NULL(device_ptr); device_address->ptr_ = device_ptr; } @@ -307,7 +244,7 @@ void GPUKernelRuntime::AllocKernelDynamicRes(const mindspore::kernel::KernelMod kernel_workspaces->emplace_back(nullptr); continue; } - auto device_ptr = AllocTensorMemDynamic(workspace_sizes[i]); + auto device_ptr = mem_manager_->AllocTensorMemDynamic(workspace_sizes[i]); MS_EXCEPTION_IF_NULL(device_ptr); kernel::AddressPtr workspace = std::make_shared(); MS_EXCEPTION_IF_NULL(workspace); @@ -333,6 +270,7 @@ void GPUKernelRuntime::AllocCommunicationOpDynamicRes(const session::KernelGraph void GPUKernelRuntime::AllocCommunicationOpInputDynamicRes(const mindspore::AnfNodePtr &kernel) { MS_EXCEPTION_IF_NULL(kernel); + MS_EXCEPTION_IF_NULL(mem_manager_); // The reference count of communication kernel input is not 0. if (communication_op_input_ref_count_ != 0) { MS_LOG(ERROR) << "The reference count of communication kernel input is not 0."; @@ -354,7 +292,7 @@ void GPUKernelRuntime::AllocCommunicationOpInputDynamicRes(const mindspore::AnfN addr_size.emplace_back(device_address.get(), output_size); } - auto device_mem_ptr = AllocTensorMemDynamic(total); + auto device_mem_ptr = mem_manager_->AllocTensorMemDynamic(total); MS_EXCEPTION_IF_NULL(device_mem_ptr); for (const auto &iter : addr_size) { MS_EXCEPTION_IF_NULL(iter.first); @@ -366,6 +304,7 @@ void GPUKernelRuntime::AllocCommunicationOpInputDynamicRes(const mindspore::AnfN void GPUKernelRuntime::AllocCommunicationOpOutputDynamicRes(const mindspore::AnfNodePtr &kernel) { MS_EXCEPTION_IF_NULL(kernel); + MS_EXCEPTION_IF_NULL(mem_manager_); // The reference count of communication kernel output is not 0. if (communication_op_output_ref_count_ != 0) { MS_LOG(ERROR) << "The reference count of communication kernel output is not 0."; @@ -389,7 +328,7 @@ void GPUKernelRuntime::AllocCommunicationOpOutputDynamicRes(const mindspore::Anf addr_size.emplace_back(device_address.get(), output_sizes[i]); } - auto device_mem_ptr = AllocTensorMemDynamic(total); + auto device_mem_ptr = mem_manager_->AllocTensorMemDynamic(total); MS_EXCEPTION_IF_NULL(device_mem_ptr); for (const auto &iter : addr_size) { MS_EXCEPTION_IF_NULL(iter.first); @@ -402,6 +341,7 @@ void GPUKernelRuntime::AllocCommunicationOpOutputDynamicRes(const mindspore::Anf void GPUKernelRuntime::FreeKernelDynamicRes(const mindspore::AnfNodePtr &kernel, const AddressPtrList &kernel_workspaces) { MS_EXCEPTION_IF_NULL(kernel); + MS_EXCEPTION_IF_NULL(mem_manager_); auto cnode = kernel->cast(); MS_EXCEPTION_IF_NULL(cnode); // Free the input of kernel by reference count. @@ -421,7 +361,7 @@ void GPUKernelRuntime::FreeKernelDynamicRes(const mindspore::AnfNodePtr &kernel, auto device_address = AnfAlgo::GetPrevNodeMutableOutputAddr(kernel, i); MS_EXCEPTION_IF_NULL(device_address); MS_EXCEPTION_IF_NULL(device_address->ptr_); - FreeTensorMemDynamic(device_address->ptr_); + mem_manager_->FreeTensorMemDynamic(device_address->ptr_); device_address->ptr_ = nullptr; } } @@ -432,7 +372,7 @@ void GPUKernelRuntime::FreeKernelDynamicRes(const mindspore::AnfNodePtr &kernel, auto workspace = kernel_workspaces[i]; if (workspace != nullptr) { MS_EXCEPTION_IF_NULL(workspace->addr); - FreeTensorMemDynamic(workspace->addr); + mem_manager_->FreeTensorMemDynamic(workspace->addr); workspace->addr = nullptr; } } @@ -441,6 +381,7 @@ void GPUKernelRuntime::FreeKernelDynamicRes(const mindspore::AnfNodePtr &kernel, void GPUKernelRuntime::FreeCommunicationOpDynamicRes(const mindspore::AnfNodePtr &kernel, size_t input_idx, bool *is_communication_op) { MS_EXCEPTION_IF_NULL(kernel); + MS_EXCEPTION_IF_NULL(mem_manager_); // The inputs memory of communication kernel is one piece memory, need release together. if (AnfAlgo::GetCNodeName(kernel) == kAllReduceOpName) { communication_op_input_ref_count_--; @@ -448,7 +389,7 @@ void GPUKernelRuntime::FreeCommunicationOpDynamicRes(const mindspore::AnfNodePtr auto device_address = AnfAlgo::GetPrevNodeMutableOutputAddr(kernel, 0); MS_EXCEPTION_IF_NULL(device_address); MS_EXCEPTION_IF_NULL(device_address->ptr_); - FreeTensorMemDynamic(device_address->ptr_); + mem_manager_->FreeTensorMemDynamic(device_address->ptr_); device_address->ptr_ = nullptr; } *is_communication_op = true; @@ -470,19 +411,12 @@ void GPUKernelRuntime::FreeCommunicationOpDynamicRes(const mindspore::AnfNodePtr auto device_address = AnfAlgo::GetMutableOutputAddr(kernel_input.first, 0); MS_EXCEPTION_IF_NULL(device_address); MS_EXCEPTION_IF_NULL(device_address->ptr_); - FreeTensorMemDynamic(device_address->ptr_); + mem_manager_->FreeTensorMemDynamic(device_address->ptr_); device_address->ptr_ = nullptr; } *is_communication_op = true; } } - -void GPUKernelRuntime::MallocOpMemory(const DeviceAddressPtr address, size_t size, int) { - auto device_ptr = AllocTensorMemDynamic(size); - MS_EXCEPTION_IF_NULL(device_ptr); - address->ptr_ = device_ptr; - address->mem_dynamic_alloc_ = true; -} } // namespace gpu } // namespace device } // namespace mindspore diff --git a/mindspore/ccsrc/device/gpu/gpu_kernel_runtime.h b/mindspore/ccsrc/device/gpu/gpu_kernel_runtime.h index f3fdb5fa98..6f761342d3 100644 --- a/mindspore/ccsrc/device/gpu/gpu_kernel_runtime.h +++ b/mindspore/ccsrc/device/gpu/gpu_kernel_runtime.h @@ -33,7 +33,6 @@ class GPUKernelRuntime : public KernelRuntime { ~GPUKernelRuntime() override = default; bool Init() override; void ReleaseDeviceRes() override; - void FreeHostMemory() override; void AssignMemory(session::KernelGraph *graph) override; bool Run(session::KernelGraph *graph) override; @@ -41,18 +40,11 @@ class GPUKernelRuntime : public KernelRuntime { DeviceAddressPtr CreateDeviceAddress(void *device_ptr, size_t device_size, const string &format, TypeId type_id) override; bool SyncStream() override; - // Alloc memory use the dynamic memory pool. - void *AllocTensorMemDynamic(size_t size) override; - // Free memory use the dynamic memory pool. - void FreeTensorMemDynamic(void *device_ptr) override; - void MallocOpMemory(const DeviceAddressPtr address, size_t size, int flag) override; - uint8_t *MallocStaticMem(size_t size, bool communication_mem) override; private: GPUKernelRuntime(const GPUKernelRuntime &); GPUKernelRuntime &operator=(const GPUKernelRuntime &); bool InitDevice(); - void MallocDeviceMemory(); bool device_init_{false}; // The related functions and members for using dynamic memory pool. @@ -69,6 +61,7 @@ class GPUKernelRuntime : public KernelRuntime { void FreeCommunicationOpDynamicRes(const mindspore::AnfNodePtr &kernel, size_t input_idx, bool *is_communication_op); size_t communication_op_input_ref_count_{0}; size_t communication_op_output_ref_count_{0}; + MemReuseUtilPtr mem_reuse_util_ptr_{nullptr}; }; MS_REG_KERNEL_RUNTIME(kGPUDevice, GPUKernelRuntime); } // namespace gpu diff --git a/mindspore/ccsrc/device/gpu/gpu_memory_manager.cc b/mindspore/ccsrc/device/gpu/gpu_memory_manager.cc new file mode 100644 index 0000000000..3944b504e4 --- /dev/null +++ b/mindspore/ccsrc/device/gpu/gpu_memory_manager.cc @@ -0,0 +1,88 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "device/gpu/gpu_memory_manager.h" +#include "device/gpu/gpu_memory_allocator.h" +#include "utils/context/ms_context.h" +#include "utils/convert_utils.h" +namespace mindspore { +namespace device { +namespace gpu { +void *GPUMemoryManager::AllocTensorMemDynamic(size_t size) { + return GPUMemoryAllocator::GetInstance().AllocTensorMem(size); +} + +void GPUMemoryManager::FreeTensorMemDynamic(void *device_ptr) { + GPUMemoryAllocator::GetInstance().FreeTensorMem(device_ptr); +} + +void GPUMemoryManager::MallocDeviceMemory() { + auto context_ptr = MsContext::GetInstance(); + MS_EXCEPTION_IF_NULL(context_ptr); + // If use the dynamic memory pool, then alloc the first memory block to init. + if (context_ptr->enable_dynamic_mem_pool()) { + auto device_addr = AllocTensorMemDynamic(1); + if (!device_addr) { + MS_LOG(ERROR) << "Dynamic memory pool init error."; + } + } else { + // Need to reserve 20% space for dynamic memory + const float init_gpu_mem_ratio = 0.8; + size_t mem_size = FloatToSize(GPUMemoryAllocator::GetInstance().free_mem_size() * init_gpu_mem_ratio); + auto alloc_size = + GPUMemoryAllocator::GetInstance().AllocDeviceMem(mem_size, reinterpret_cast(&device_mem_base_)); + device_mem_size_ = alloc_size; + static_mem_offset_ = device_mem_size_; + } +} + +void GPUMemoryManager::FreeDeviceMemory() { + if (device_mem_base_ != nullptr) { + if (!GPUMemoryAllocator::GetInstance().FreeDeviceMem(device_mem_base_)) { + MS_LOG(EXCEPTION) << "Could not free gpu device memory."; + } + } + GPUMemoryAllocator::GetInstance().ReleaseDeviceRes(); +} + +uint8_t *GPUMemoryManager::MallocStaticMem(size_t size, bool) { + auto context_ptr = MsContext::GetInstance(); + MS_EXCEPTION_IF_NULL(context_ptr); + if (context_ptr->enable_dynamic_mem_pool()) { + auto device_ptr = AllocTensorMemDynamic(size); + MS_EXCEPTION_IF_NULL(device_ptr); + return AddressOffset(device_ptr, 0); + } + + auto align_size = GetCommonAlignSize(size); + if (static_mem_offset_ < align_size) { + MS_LOG(EXCEPTION) << "Out of memory!!! total[" << device_mem_size_ << "](dynamic[" << total_dynamic_size_ + << "] static[" << total_static_size_ << "])" + << " malloc [" << align_size << "] failed!"; + } + auto offset = static_mem_offset_ - align_size; + if (dynamic_mem_offset_ > offset) { + MS_LOG(EXCEPTION) << "Out of memory!!! total[" << device_mem_size_ << "](dynamic[" << total_dynamic_size_ + << "] static[" << total_static_size_ << "])" + << " malloc [" << align_size << "] failed!"; + } + total_static_size_ += align_size; + static_mem_offset_ = offset; + return device_mem_base_ + offset; +} +} // namespace gpu +} // namespace device +} // namespace mindspore diff --git a/mindspore/ccsrc/device/gpu/gpu_memory_manager.h b/mindspore/ccsrc/device/gpu/gpu_memory_manager.h new file mode 100644 index 0000000000..a18226bdf3 --- /dev/null +++ b/mindspore/ccsrc/device/gpu/gpu_memory_manager.h @@ -0,0 +1,40 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_MINDSPORE_CCSRC_DEVICE_GPU_GPU_MEMORY_MANAGER_H_ +#define MINDSPORE_MINDSPORE_CCSRC_DEVICE_GPU_GPU_MEMORY_MANAGER_H_ +#include "device/memory_manager.h" +namespace mindspore { +namespace device { +namespace gpu { +class GPUMemoryManager : public MemoryManager { + public: + GPUMemoryManager() = default; + virtual ~GPUMemoryManager() = default; + + void MallocDeviceMemory() override; + void FreeDeviceMemory() override; + + void *AllocTensorMemDynamic(size_t size) override; + void FreeTensorMemDynamic(void *device_ptr) override; + + protected: + uint8_t *MallocStaticMem(size_t size, bool communication_mem); +}; +} // namespace gpu +} // namespace device +} // namespace mindspore +#endif // MINDSPORE_MINDSPORE_CCSRC_DEVICE_GPU_GPU_MEMORY_MANAGER_H_ diff --git a/mindspore/ccsrc/device/kernel_runtime.cc b/mindspore/ccsrc/device/kernel_runtime.cc index 0a9be35fb5..16025ed8a4 100644 --- a/mindspore/ccsrc/device/kernel_runtime.cc +++ b/mindspore/ccsrc/device/kernel_runtime.cc @@ -31,18 +31,13 @@ #include "ir/value.h" using mindspore::kernel::Address; using mindspore::kernel::AddressPtr; -using mindspore::memreuse::BestFitMemReuse; -using mindspore::memreuse::MemReuseUtilPtr; namespace mindspore { namespace device { KernelRuntime::~KernelRuntime() { - device_mem_base_ = nullptr; - device_mem_pool_base_ = nullptr; #ifdef ENABLE_DUMP_E2E dump_conf_ptr_ = nullptr; #endif - mem_reuse_util_ptr_ = nullptr; } bool KernelRuntime::Run(session::KernelGraph *graph) { @@ -88,11 +83,6 @@ bool KernelRuntime::LoadTask(const session::KernelGraph *graph) { return false; } -void KernelRuntime::FreeHostMemory() { - dynamic_mem_offset_ = 0; - static_mem_offset_ = 0; -} - // for D to impl bool KernelRuntime::RunTask(const session::KernelGraph *graph) { if (graph != nullptr) { @@ -126,13 +116,11 @@ size_t KernelRuntime::CountNodeDeviceMemorySize(const mindspore::AnfNodePtr &nod void KernelRuntime::AssignMemory(session::KernelGraph *graph) { auto context_ptr = MsContext::GetInstance(); MS_EXCEPTION_IF_NULL(context_ptr); + MS_EXCEPTION_IF_NULL(mem_manager_); + mem_manager_->ResetDynamicMemory(); AssignStaticMemory(graph); - bool is_enable_mem_reuse = context_ptr->enable_mem_reuse(); - if (is_enable_mem_reuse) { - ReuseAssignDynamicMemory(graph); - } else { - AssignDynamicMemory(graph); - } + AssignDynamicMemory(graph); + UpdateRefNodeOutputMem(graph); } @@ -159,6 +147,7 @@ void KernelRuntime::AssignStaticMemory(session::KernelGraph *graph) { void KernelRuntime::RunOpAssignInputMemory(const std::vector &input_tensors, const session::KernelGraph *graph) { MS_EXCEPTION_IF_NULL(graph); + MS_EXCEPTION_IF_NULL(mem_manager_); for (size_t input_index = 0; input_index < graph->inputs().size(); ++input_index) { auto item = graph->inputs()[input_index]; MS_EXCEPTION_IF_NULL(item); @@ -180,7 +169,7 @@ void KernelRuntime::RunOpAssignInputMemory(const std::vector auto device_address = CreateDeviceAddress(nullptr, tensor_size, AnfAlgo::GetOutputFormat(item, index), output_type_id); MS_EXCEPTION_IF_NULL(device_address); - MallocOpMemory(device_address, tensor_size, kStaticMem); + mem_manager_->MallocOpMemory(device_address, tensor_size); AnfAlgo::SetOutputAddr(device_address, index, item.get()); } } @@ -188,6 +177,7 @@ void KernelRuntime::RunOpAssignInputMemory(const std::vector void KernelRuntime::RunOpAssignOutputMemory(const AnfNodePtr &kernel) { MS_EXCEPTION_IF_NULL(kernel); + MS_EXCEPTION_IF_NULL(mem_manager_); auto kernel_mod = AnfAlgo::GetKernelMod(kernel); MS_EXCEPTION_IF_NULL(kernel_mod); auto output_sizes = kernel_mod->GetOutputSizeList(); @@ -208,13 +198,14 @@ void KernelRuntime::RunOpAssignOutputMemory(const AnfNodePtr &kernel) { auto output_type = AnfAlgo::GetOutputDeviceDataType(kernel, i); auto device_address = CreateDeviceAddress(nullptr, output_sizes[i], output_format, output_type); MS_EXCEPTION_IF_NULL(device_address); - MallocOpMemory(device_address, output_sizes[i], kDynamicMem); + mem_manager_->MallocOpMemory(device_address, output_sizes[i]); AnfAlgo::SetOutputAddr(device_address, i, kernel.get()); } } void KernelRuntime::RunOpAssignWorkSpaceMemory(const AnfNodePtr &kernel) { MS_EXCEPTION_IF_NULL(kernel); + MS_EXCEPTION_IF_NULL(mem_manager_); if (kernel->isa()) { auto kernel_mod = AnfAlgo::GetKernelMod(kernel); MS_EXCEPTION_IF_NULL(kernel_mod); @@ -222,7 +213,7 @@ void KernelRuntime::RunOpAssignWorkSpaceMemory(const AnfNodePtr &kernel) { for (size_t i = 0; i < workspace_lists.size(); ++i) { auto device_address = CreateDeviceAddress(nullptr, workspace_lists[i], "", kTypeUnknown); MS_EXCEPTION_IF_NULL(device_address); - MallocOpMemory(device_address, workspace_lists[i], kDynamicMem); + mem_manager_->MallocOpMemory(device_address, workspace_lists[i]); AnfAlgo::SetWorkspaceAddr(device_address, i, kernel.get()); } } @@ -230,6 +221,7 @@ void KernelRuntime::RunOpAssignWorkSpaceMemory(const AnfNodePtr &kernel) { void KernelRuntime::AssignStaticMemoryInput(const session::KernelGraph *graph) { MS_EXCEPTION_IF_NULL(graph); + MS_EXCEPTION_IF_NULL(mem_manager_); for (auto &item : graph->inputs()) { MS_EXCEPTION_IF_NULL(item); if (!item->isa()) { @@ -247,7 +239,7 @@ void KernelRuntime::AssignStaticMemoryInput(const session::KernelGraph *graph) { output_type_id = AnfAlgo::GetOutputInferDataType(item, index); } auto tensor_size = CountNodeDeviceMemorySize(item, index); - auto ptr = MallocStaticMem(tensor_size, false); + auto ptr = mem_manager_->MallocMem(kStaticMem, tensor_size); auto address = CreateDeviceAddress(ptr, tensor_size, AnfAlgo::GetOutputFormat(item, index), output_type_id); AnfAlgo::SetOutputAddr(address, index, item.get()); } @@ -301,6 +293,7 @@ void KernelRuntime::UpdateRefNodeOutputMem(const session::KernelGraph *graph) { void KernelRuntime::AssignCommunicationNodeOutputMem(int flag, const AnfNodePtr &node) { MS_EXCEPTION_IF_NULL(node); + MS_EXCEPTION_IF_NULL(mem_manager_); auto kernel_mod = AnfAlgo::GetKernelMod(node); MS_EXCEPTION_IF_NULL(kernel_mod); auto output_sizes = kernel_mod->GetOutputSizeList(); @@ -314,12 +307,12 @@ void KernelRuntime::AssignCommunicationNodeOutputMem(int flag, const AnfNodePtr std::vector align_size_list; for (uint64_t mem_size : output_sizes) { if (context_ptr->enable_hccl()) { - mem_size = GetCommonAlignSize(mem_size); + mem_size = mem_manager_->GetCommonAlignSize(mem_size); } total_size += mem_size; align_size_list.emplace_back(mem_size); } - uint8_t *output_ptr = CalDeviceMem(node, total_size, flag, 0); + uint8_t *output_ptr = mem_manager_->MallocOutputMem(node, 0, flag, total_size); for (size_t j = 0; j < align_size_list.size(); ++j) { std::string output_format = AnfAlgo::GetOutputFormat(node, j); auto output_type = AnfAlgo::GetOutputDeviceDataType(node, j); @@ -333,6 +326,7 @@ void KernelRuntime::UpdateCommunicationOpInputMem(const AnfNodePtr &node) { auto context_ptr = MsContext::GetInstance(); MS_EXCEPTION_IF_NULL(context_ptr); MS_EXCEPTION_IF_NULL(node); + MS_EXCEPTION_IF_NULL(mem_manager_); size_t total_size = 0; std::vector> addr_size; for (size_t i = 0; i < AnfAlgo::GetInputTensorNum(node); ++i) { @@ -340,12 +334,12 @@ void KernelRuntime::UpdateCommunicationOpInputMem(const AnfNodePtr &node) { MS_EXCEPTION_IF_NULL(address); auto mem_size = address->size(); if (context_ptr->enable_hccl()) { - mem_size = GetCommonAlignSize(mem_size); + mem_size = mem_manager_->GetCommonAlignSize(mem_size); } total_size += mem_size; addr_size.emplace_back(address.get(), mem_size); } - uint8_t *input_ptr = CalDeviceMem(node, total_size, kDynamicMem, 0); + uint8_t *input_ptr = mem_manager_->MallocOutputMem(node, 0, kDynamicMem, total_size); for (const auto &iter : addr_size) { MS_EXCEPTION_IF_NULL(iter.first); iter.first->set_ptr(input_ptr); @@ -355,7 +349,8 @@ void KernelRuntime::UpdateCommunicationOpInputMem(const AnfNodePtr &node) { void KernelRuntime::AssignNodeOutputMem(int flag, const AnfNodePtr &node, int index) { MS_EXCEPTION_IF_NULL(node); - if (IsCommunicationOp(node)) { + MS_EXCEPTION_IF_NULL(mem_manager_); + if (AnfAlgo::IsCommunicationOp(node)) { UpdateCommunicationOpInputMem(node); AssignCommunicationNodeOutputMem(flag, node); return; @@ -375,7 +370,7 @@ void KernelRuntime::AssignNodeOutputMem(int flag, const AnfNodePtr &node, int in MS_LOG(INFO) << "Already malloc index:" << i; continue; } - auto ptr = CalDeviceMem(node, output_sizes[i], flag, i); + auto ptr = mem_manager_->MallocOutputMem(node, i, flag, output_sizes[i]); if (ptr == nullptr) { // reused ptr, no need alloc, continue; continue; @@ -390,6 +385,7 @@ void KernelRuntime::AssignValueNodeTensor(const ValueNodePtr &value_node, const size_t output_idx) { MS_EXCEPTION_IF_NULL(value_node); MS_EXCEPTION_IF_NULL(node_value); + MS_EXCEPTION_IF_NULL(mem_manager_); auto tensor = node_value->cast(); if (tensor == nullptr) { MS_LOG(WARNING) << "Tensor is null"; @@ -397,7 +393,7 @@ void KernelRuntime::AssignValueNodeTensor(const ValueNodePtr &value_node, const } size_t tensor_size = tensor->data().nbytes(); auto node_size = CountNodeDeviceMemorySize(value_node, output_idx); - auto ptr = MallocStaticMem(node_size, false); + auto ptr = mem_manager_->MallocMem(kStaticMem, node_size); TypeId output_type_id = AnfAlgo::GetOutputDeviceDataType(value_node, output_idx); if (output_type_id == kTypeUnknown) { output_type_id = AnfAlgo::GetOutputInferDataType(value_node, output_idx); @@ -414,6 +410,7 @@ void KernelRuntime::AssignValueNodeTensor(const ValueNodePtr &value_node, const void KernelRuntime::AssignStaticMemoryValueNode(session::KernelGraph *graph) { MS_EXCEPTION_IF_NULL(graph); + MS_EXCEPTION_IF_NULL(mem_manager_); for (auto &value_node : graph->graph_value_nodes()) { MS_EXCEPTION_IF_NULL(value_node); if (AnfAlgo::OutputAddrExist(value_node, 0)) { @@ -440,7 +437,7 @@ void KernelRuntime::AssignStaticMemoryValueNode(session::KernelGraph *graph) { } else if (node_value->isa()) { auto value = GetValue(node_value); size_t tensor_size = value.size(); - auto ptr = MallocStaticMem(tensor_size, false); + auto ptr = mem_manager_->MallocMem(kStaticMem, tensor_size); auto address = CreateDeviceAddress(ptr, tensor_size, kOpFormat_DEFAULT, kNumberTypeUInt8); MS_EXCEPTION_IF_NULL(address); AnfAlgo::SetOutputAddr(address, 0, value_node.get()); @@ -452,103 +449,37 @@ void KernelRuntime::AssignStaticMemoryValueNode(session::KernelGraph *graph) { } } -void KernelRuntime::AssignDynamicMemory(const session::KernelGraph *graph) { +void KernelRuntime::AssignDynamicMemory(session::KernelGraph *graph) { MS_EXCEPTION_IF_NULL(graph); - // reset dynamic mem offset - dynamic_mem_offset_ = 0; - auto &kernels = graph->execution_order(); - for (auto &kernel : kernels) { - AssignNodeOutputMem(kDynamicMem, kernel, kGetAllOuts); - AssignWorkSpaceMem(kernel); + MS_EXCEPTION_IF_NULL(mem_manager_); + auto context_ptr = MsContext::GetInstance(); + MS_EXCEPTION_IF_NULL(context_ptr); + bool is_enable_mem_reuse = context_ptr->enable_mem_reuse(); + auto mem_flag = kDynamicMem; + if (is_enable_mem_reuse) { + mem_manager_->InitReuseDynamicMemory(graph); + mem_flag = kReuseDynamicMem; } -} - -void KernelRuntime::ReuseAssignDynamicMemory(session::KernelGraph *graph) { - MS_EXCEPTION_IF_NULL(graph); - dynamic_mem_offset_ = 0; - MemReuseUtilPtr mem_reuse_util_ptr = std::make_shared(); - MS_EXCEPTION_IF_NULL(mem_reuse_util_ptr); - // set all infos - mem_reuse_util_ptr->SetAllInfo(graph); - auto bestfit_mem_reuse = std::make_shared(); - MS_EXCEPTION_IF_NULL(bestfit_mem_reuse); - bestfit_mem_reuse->Reuse(mem_reuse_util_ptr.get()); - size_t total_allocated_size = bestfit_mem_reuse->GetAllocatedSize(); - MS_LOG(INFO) << "TotalReuseDynamicSize [" << total_allocated_size << "]"; - mem_reuse_util_ptr_ = mem_reuse_util_ptr; - auto base_ptr = MallocDynamicMem(total_allocated_size, false); - mem_reuse_util_ptr_->set_mem_base(base_ptr); auto &kernels = graph->execution_order(); for (auto &kernel : kernels) { - AssignNodeOutputMem(kReuseDynamicMem, kernel, kGetAllOuts); - AssignReuseWorkSpaceMem(kernel); + AssignNodeOutputMem(mem_flag, kernel, kGetAllOuts); + AssignWorkSpaceMem(mem_flag, kernel); } } -void KernelRuntime::AssignReuseWorkSpaceMem(const AnfNodePtr &node) { +void KernelRuntime::AssignWorkSpaceMem(int flag, const AnfNodePtr &node) { MS_EXCEPTION_IF_NULL(node); + MS_EXCEPTION_IF_NULL(mem_manager_); auto kernel_mod = AnfAlgo::GetKernelMod(node); MS_EXCEPTION_IF_NULL(kernel_mod); size_t index = 0; for (auto &size : kernel_mod->GetWorkspaceSizeList()) { - auto wk_ptr = mem_reuse_util_ptr_->GetNodeWorkSpacePtr(node, index); - AnfAlgo::SetWorkspaceAddr(CreateDeviceAddress(wk_ptr, size, "", kTypeUnknown), index, node.get()); + auto ptr = mem_manager_->MallocWorkSpaceMem(node, flag, index, size); + AnfAlgo::SetWorkspaceAddr(CreateDeviceAddress(ptr, size, "", kTypeUnknown), index, node.get()); index++; } } -void KernelRuntime::AssignWorkSpaceMem(const AnfNodePtr &node) { - MS_EXCEPTION_IF_NULL(node); - if (node->isa()) { - auto kernel_mod = AnfAlgo::GetKernelMod(node); - MS_EXCEPTION_IF_NULL(kernel_mod); - size_t index = 0; - for (auto &size : kernel_mod->GetWorkspaceSizeList()) { - auto ptr = MallocDynamicMem(size, false); - AnfAlgo::SetWorkspaceAddr(CreateDeviceAddress(ptr, size, "", kTypeUnknown), index, node.get()); - index++; - } - } -} - -bool KernelRuntime::IsCommunicationOp(const AnfNodePtr &node) { - MS_EXCEPTION_IF_NULL(node); - auto kernel_name = AnfAlgo::GetCNodeName(node); - auto kernel_type = AnfAlgo::GetKernelType(node); - if (kernel_name == kAllReduceOpName || kernel_type == HCCL_KERNEL) { - return true; - } - return false; -} - -uint8_t *KernelRuntime::CalDeviceMem(const AnfNodePtr &node, size_t size, int flag, size_t index) { - MS_EXCEPTION_IF_NULL(node); - auto context_ptr = MsContext::GetInstance(); - MS_EXCEPTION_IF_NULL(context_ptr); - uint8_t *ptr = nullptr; - if (IsCommunicationOp(node)) { - bool communication_mem = false; - if (context_ptr->enable_hccl()) { - communication_mem = true; - } - if (flag == kStaticMem) { - ptr = MallocStaticMem(size, communication_mem); - } else { - ptr = MallocDynamicMem(size, communication_mem); - } - return ptr; - } - - if (flag == kStaticMem) { - ptr = MallocStaticMem(size, false); - } else if (flag == kDynamicMem) { - ptr = MallocDynamicMem(size, false); - } else if (flag == kReuseDynamicMem) { - ptr = mem_reuse_util_ptr_->GetNodeOutputPtr(node, index); - } - return ptr; -} - void KernelRuntime::GenLaunchArgs(const mindspore::kernel::KernelMod &kernel_mod, const mindspore::AnfNodePtr &kernel, AddressPtrList *kernel_inputs, AddressPtrList *const kernel_workspaces, AddressPtrList *kernel_outputs) { @@ -659,65 +590,6 @@ bool KernelRuntime::LaunchKernelMod(const session::KernelGraph &graph) { return true; } -size_t KernelRuntime::GetCommonAlignSize(size_t input_size) const { - return (input_size + mem_align_size_ + 31) / mem_align_size_ * mem_align_size_; -} - -size_t KernelRuntime::GetCommunicationAlignSize(size_t input_size) const { - return (input_size + mem_align_size_ - 1) / mem_align_size_ * mem_align_size_ + 2 * mem_align_size_; -} - -uint8_t *KernelRuntime::MallocStaticMem(size_t size, bool communication_mem) { - size_t align_size = 0; - if (communication_mem) { - align_size = GetCommunicationAlignSize(size); - } else { - align_size = GetCommonAlignSize(size); - } - if (static_mem_offset_ < align_size) { - MS_LOG(EXCEPTION) << "Out of memory!!! total[" << device_mem_size_ << "](dynamic[" << total_dynamic_size_ - << "] static[" << total_static_size_ << "])" - << " malloc [" << align_size << "] failed!"; - } - total_static_size_ += align_size; - auto offset = static_mem_offset_ - align_size; - if (dynamic_mem_offset_ > offset) { - MS_LOG(EXCEPTION) << "Out of memory!!! total[" << device_mem_size_ << "](dynamic[" << total_dynamic_size_ - << "] static[" << total_static_size_ << "])" - << " malloc [" << align_size << "] failed!"; - } - static_mem_offset_ = offset; - if (communication_mem) { - return device_mem_base_ + offset + mem_align_size_; - } else { - return device_mem_base_ + offset; - } -} - -uint8_t *KernelRuntime::MallocDynamicMem(size_t size, bool communication_mem) { - size_t align_size = 0; - if (communication_mem) { - align_size = GetCommunicationAlignSize(size); - } else { - align_size = GetCommonAlignSize(size); - } - uint64_t offset = dynamic_mem_offset_; - auto new_offset = dynamic_mem_offset_ + align_size; - if (new_offset > static_mem_offset_) { - MS_LOG(EXCEPTION) << "Out of memory!!! total[" << device_mem_size_ << "](dynamic[" << total_dynamic_size_ - << "] static[" << total_static_size_ << "])" - << " malloc [" << align_size << "] failed!"; - } - total_dynamic_size_ += align_size; - dynamic_mem_offset_ = new_offset; - - if (communication_mem) { - return device_mem_base_ + offset + mem_align_size_; - } else { - return device_mem_base_ + offset; - } -} - bool KernelRuntime::LaunchKernel(const session::KernelGraph *graph) { MS_EXCEPTION_IF_NULL(graph); if (!LaunchKernelMod(*graph)) { @@ -731,29 +603,6 @@ bool KernelRuntime::LaunchKernel(const session::KernelGraph *graph) { return true; } -void KernelRuntime::MallocOpMemory(const DeviceAddressPtr address, size_t size, int flag) { - if (flag == kStaticMem) { - address->ptr_ = MallocStaticMem(size, false); - } else if (flag == kDynamicMem) { - address->ptr_ = MallocDynamicMem(size, false); - } else { - MS_LOG(EXCEPTION) << "Unknown memory type!"; - } -} - -void *KernelRuntime::AllocTensorMemDynamic(size_t size) { - if (size == 0) { - MS_LOG(ERROR) << "AllocTensorMemDynamic size is 0."; - } - return nullptr; -} - -void KernelRuntime::FreeTensorMemDynamic(void *device_ptr) { - if (device_ptr == nullptr) { - MS_LOG(ERROR) << "FreeTensorMemDynamic device_ptr is null."; - } -} - #ifdef ENABLE_DUMP_E2E bool KernelRuntime::SetDumpConf() { dump_conf_ptr_ = std::make_shared(); diff --git a/mindspore/ccsrc/device/kernel_runtime.h b/mindspore/ccsrc/device/kernel_runtime.h index ac9a56ed4d..1224bf14eb 100644 --- a/mindspore/ccsrc/device/kernel_runtime.h +++ b/mindspore/ccsrc/device/kernel_runtime.h @@ -20,8 +20,7 @@ #include #include #include -#include "pre_activate/mem_reuse/mem_reuse.h" -#include "pre_activate/mem_reuse/mem_reuse_allocator.h" + #include "device/device_address.h" #include "ir/meta_tensor.h" #include "predict/generator/utils/ir_model_util.h" @@ -32,21 +31,16 @@ #include "session/anf_runtime_algorithm.h" #include "kernel/kernel.h" #include "utils/context/ms_context.h" +#include "device/memory_manager.h" // using mindspore::session::KernelGraph; using mindspore::tensor::Tensor; using TensorPtr = std::shared_ptr; -using MemReuseUtilPtr = mindspore::memreuse::MemReuseUtilPtr; using mindspore::kernel::AddressPtr; using AddressPtrList = std::vector; namespace mindspore { namespace device { -const int kStaticMem = 0; -const int kDynamicMem = 1; -const int kReuseDynamicMem = 2; -const int kGetAllOuts = -1; - class KernelRuntime { public: KernelRuntime() = default; @@ -65,7 +59,6 @@ class KernelRuntime { DumpConfPtr GetDumpConf(); #endif virtual bool LoadTask(const session::KernelGraph *graph); - virtual void FreeHostMemory(); // for GPU and D to impl virtual void ReleaseDeviceRes() {} void set_device_id(uint32_t device_id) { device_id_ = device_id; } @@ -75,29 +68,17 @@ class KernelRuntime { TypeId type_id) = 0; virtual bool SyncStream() = 0; void AssignStaticMemory(session::KernelGraph *graph); - void AssignDynamicMemory(const session::KernelGraph *graph); + void AssignDynamicMemory(session::KernelGraph *graph); void ReuseAssignDynamicMemory(session::KernelGraph *graph); void AssignNodeOutputMem(int flag, const AnfNodePtr &node, int index); - void AssignWorkSpaceMem(const AnfNodePtr &node); + void AssignWorkSpaceMem(int flag, const AnfNodePtr &node); void AssignReuseWorkSpaceMem(const AnfNodePtr &node); void AssignCommunicationNodeOutputMem(int flag, const AnfNodePtr &node); void UpdateRefNodeOutputMem(const session::KernelGraph *graph); void UpdateCommunicationOpInputMem(const AnfNodePtr &node); - bool IsCommunicationOp(const AnfNodePtr &node); - size_t GetCommonAlignSize(size_t input_size) const; - size_t GetCommunicationAlignSize(size_t input_size) const; - - uint8_t *CalDeviceMem(const AnfNodePtr &node, size_t size, int flag, size_t index); - virtual uint8_t *MallocStaticMem(size_t size, bool communication_mem); - uint8_t *MallocDynamicMem(size_t size, bool communication_mem); #ifdef ENABLE_DUMP_E2E bool SetDumpConf(); #endif - // Alloc memory use the dynamic memory pool. - virtual void *AllocTensorMemDynamic(size_t size); - // Free memory use the dynamic memory pool. - virtual void FreeTensorMemDynamic(void *device_ptr); - virtual void MallocOpMemory(const DeviceAddressPtr address, size_t size, int flag); private: void AssignStaticMemoryOutput(const session::KernelGraph *graph); @@ -114,20 +95,11 @@ class KernelRuntime { protected: uint32_t device_id_{0}; - uint8_t *device_mem_base_{nullptr}; - uint8_t *device_mem_pool_base_{nullptr}; - uint64_t device_mem_size_{0}; - uint64_t device_mem_pool_size_{0}; - uint64_t dynamic_mem_offset_{0}; - uint64_t static_mem_offset_{0}; - const uint64_t mem_align_size_ = 512; #ifdef ENABLE_DUMP_E2E DumpConfPtr dump_conf_ptr_; #endif void *stream_ = nullptr; - size_t total_static_size_ = 0; - size_t total_dynamic_size_ = 0; - MemReuseUtilPtr mem_reuse_util_ptr_{nullptr}; + std::shared_ptr mem_manager_{nullptr}; }; using KernelRuntimePtr = std::shared_ptr; } // namespace device diff --git a/mindspore/ccsrc/device/memory_manager.cc b/mindspore/ccsrc/device/memory_manager.cc new file mode 100644 index 0000000000..3c1ddee6bc --- /dev/null +++ b/mindspore/ccsrc/device/memory_manager.cc @@ -0,0 +1,170 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "device/memory_manager.h" +#include "session/anf_runtime_algorithm.h" +#include "utils/context/ms_context.h" +using mindspore::memreuse::BestFitMemReuse; +using mindspore::memreuse::MemReuseUtilPtr; +namespace mindspore { +namespace device { +MemoryManager::~MemoryManager() { + device_mem_base_ = nullptr; + device_mem_pool_base_ = nullptr; + mem_reuse_util_ptr_ = nullptr; +} + +size_t MemoryManager::GetCommonAlignSize(size_t input_size) const { + return (input_size + kMemAlignSize + 31) / kMemAlignSize * kMemAlignSize; +} + +size_t MemoryManager::GetCommunicationAlignSize(size_t input_size) const { + return (input_size + kMemAlignSize - 1) / kMemAlignSize * kMemAlignSize + 2 * kMemAlignSize; +} + +void MemoryManager::InitReuseDynamicMemory(session::KernelGraph *graph) { + MS_EXCEPTION_IF_NULL(graph); + MemReuseUtilPtr mem_reuse_util_ptr = std::make_shared(); + MS_EXCEPTION_IF_NULL(mem_reuse_util_ptr); + // set all infos + mem_reuse_util_ptr->SetAllInfo(graph); + auto bestfit_mem_reuse = std::make_shared(); + MS_EXCEPTION_IF_NULL(bestfit_mem_reuse); + bestfit_mem_reuse->Reuse(mem_reuse_util_ptr.get()); + size_t total_allocated_size = bestfit_mem_reuse->GetAllocatedSize(); + MS_LOG(INFO) << "TotalReuseDynamicSize [" << total_allocated_size << "]"; + mem_reuse_util_ptr_ = mem_reuse_util_ptr; + auto base_ptr = MallocDynamicMem(total_allocated_size, false); + mem_reuse_util_ptr_->set_mem_base(base_ptr); +} + +uint8_t *MemoryManager::MallocOutputMem(const AnfNodePtr &node, size_t index, int flag, size_t size) { + MS_EXCEPTION_IF_NULL(node); + auto context_ptr = MsContext::GetInstance(); + MS_EXCEPTION_IF_NULL(context_ptr); + uint8_t *ptr = nullptr; + if (AnfAlgo::IsCommunicationOp(node)) { + bool communication_mem = false; + if (context_ptr->enable_hccl()) { + communication_mem = true; + } + if (flag == kStaticMem) { + ptr = MallocStaticMem(size, communication_mem); + } else { + ptr = MallocDynamicMem(size, communication_mem); + } + return ptr; + } + + if (flag == kStaticMem) { + ptr = MallocStaticMem(size, false); + } else if (flag == kDynamicMem) { + ptr = MallocDynamicMem(size, false); + } else if (flag == kReuseDynamicMem) { + ptr = mem_reuse_util_ptr_->GetNodeOutputPtr(node, index); + } + return ptr; +} + +uint8_t *MemoryManager::MallocWorkSpaceMem(const AnfNodePtr &node, size_t index, int flag, size_t size) { + if (flag == kReuseDynamicMem) { + return mem_reuse_util_ptr_->GetNodeWorkSpacePtr(node, index); + } + return MallocDynamicMem(size, false); +} + +uint8_t *MemoryManager::MallocMem(int flag, size_t size) { + uint8_t *ptr = nullptr; + if (flag == kStaticMem) { + ptr = MallocStaticMem(size, false); + } else if (flag == kDynamicMem) { + ptr = MallocDynamicMem(size, false); + } + return ptr; +} + +uint8_t *MemoryManager::MallocStaticMem(size_t size, bool communication_mem) { + size_t align_size = 0; + if (communication_mem) { + align_size = GetCommunicationAlignSize(size); + } else { + align_size = GetCommonAlignSize(size); + } + if (static_mem_offset_ < align_size) { + MS_LOG(EXCEPTION) << "Out of memory!!! total[" << device_mem_size_ << "](dynamic[" << total_dynamic_size_ + << "] static[" << total_static_size_ << "])" + << " malloc [" << align_size << "] failed!"; + } + total_static_size_ += align_size; + auto offset = static_mem_offset_ - align_size; + if (dynamic_mem_offset_ > offset) { + MS_LOG(EXCEPTION) << "Out of memory!!! total[" << device_mem_size_ << "](dynamic[" << total_dynamic_size_ + << "] static[" << total_static_size_ << "])" + << " malloc [" << align_size << "] failed!"; + } + static_mem_offset_ = offset; + if (communication_mem) { + return device_mem_base_ + offset + kMemAlignSize; + } else { + return device_mem_base_ + offset; + } +} + +uint8_t *MemoryManager::MallocDynamicMem(size_t size, bool communication_mem) { + size_t align_size = 0; + if (communication_mem) { + align_size = GetCommunicationAlignSize(size); + } else { + align_size = GetCommonAlignSize(size); + } + uint64_t offset = dynamic_mem_offset_; + auto new_offset = dynamic_mem_offset_ + align_size; + if (new_offset > static_mem_offset_) { + MS_LOG(EXCEPTION) << "Out of memory!!! total[" << device_mem_size_ << "](dynamic[" << total_dynamic_size_ + << "] static[" << total_static_size_ << "])" + << " malloc [" << align_size << "] failed!"; + } + total_dynamic_size_ += align_size; + dynamic_mem_offset_ = new_offset; + + if (communication_mem) { + return device_mem_base_ + offset + kMemAlignSize; + } else { + return device_mem_base_ + offset; + } +} + +void MemoryManager::MallocOpMemory(const DeviceAddressPtr address, size_t size) { + auto device_ptr = AllocTensorMemDynamic(size); + MS_EXCEPTION_IF_NULL(device_ptr); + address->ptr_ = device_ptr; + address->mem_dynamic_alloc_ = true; +} + +void *MemoryManager::AllocTensorMemDynamic(size_t size) { + if (size == 0) { + MS_LOG(ERROR) << "AllocTensorMemDynamic size is 0."; + } + return nullptr; +} + +void MemoryManager::FreeTensorMemDynamic(void *device_ptr) { + if (device_ptr == nullptr) { + MS_LOG(ERROR) << "FreeTensorMemDynamic device_ptr is null."; + } +} +} // namespace device +} // namespace mindspore diff --git a/mindspore/ccsrc/device/memory_manager.h b/mindspore/ccsrc/device/memory_manager.h new file mode 100644 index 0000000000..2e47237def --- /dev/null +++ b/mindspore/ccsrc/device/memory_manager.h @@ -0,0 +1,71 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_MINDSPORE_CCSRC_DEVICE_MEMORY_MANAGER_H_ +#define MINDSPORE_MINDSPORE_CCSRC_DEVICE_MEMORY_MANAGER_H_ +#include +#include "pre_activate/mem_reuse/mem_reuse.h" +#include "pre_activate/mem_reuse/mem_reuse_allocator.h" +namespace mindspore { +namespace device { +const int kStaticMem = 0; +const int kDynamicMem = 1; +const int kReuseDynamicMem = 2; +const int kGetAllOuts = -1; +const uint64_t kMemAlignSize = 512; +using MemReuseUtilPtr = mindspore::memreuse::MemReuseUtilPtr; + +class MemoryManager { + public: + MemoryManager() = default; + virtual ~MemoryManager(); + + virtual void MallocDeviceMemory() = 0; + virtual void FreeDeviceMemory() = 0; + void ResetDynamicMemory() { + total_dynamic_size_ = 0; + dynamic_mem_offset_ = 0; + } + + void InitReuseDynamicMemory(session::KernelGraph *graph); + uint8_t *MallocOutputMem(const AnfNodePtr &node, size_t index, int flag, size_t size); + uint8_t *MallocWorkSpaceMem(const AnfNodePtr &node, size_t index, int flag, size_t size); + virtual uint8_t *MallocMem(int flag, size_t size); + + // Alloc memory use the dynamic memory pool. + virtual void *AllocTensorMemDynamic(size_t size); + // Free memory use the dynamic memory pool. + virtual void FreeTensorMemDynamic(void *device_ptr); + virtual void MallocOpMemory(const DeviceAddressPtr address, size_t size); + size_t GetCommonAlignSize(size_t input_size) const; + size_t GetCommunicationAlignSize(size_t input_size) const; + + protected: + virtual uint8_t *MallocStaticMem(size_t size, bool communication_mem); + virtual uint8_t *MallocDynamicMem(size_t size, bool communication_mem); + uint8_t *device_mem_base_{nullptr}; + uint8_t *device_mem_pool_base_{nullptr}; + uint64_t device_mem_size_{0}; + uint64_t device_mem_pool_size_{0}; + uint64_t dynamic_mem_offset_{0}; + uint64_t static_mem_offset_{0}; + size_t total_static_size_ = 0; + size_t total_dynamic_size_ = 0; + MemReuseUtilPtr mem_reuse_util_ptr_{nullptr}; +}; +} // namespace device +} // namespace mindspore +#endif // MINDSPORE_MINDSPORE_CCSRC_DEVICE_MEMORY_MANAGER_H_ diff --git a/mindspore/ccsrc/session/anf_runtime_algorithm.cc b/mindspore/ccsrc/session/anf_runtime_algorithm.cc index cc23dbbdd2..78922448af 100644 --- a/mindspore/ccsrc/session/anf_runtime_algorithm.cc +++ b/mindspore/ccsrc/session/anf_runtime_algorithm.cc @@ -857,5 +857,15 @@ void AnfRuntimeAlgorithm::SetNodeInput(const CNodePtr &node, const AnfNodePtr &i MS_EXCEPTION_IF_NULL(input_node); node->set_input(index + 1, input_node); } + +bool AnfRuntimeAlgorithm::IsCommunicationOp(const AnfNodePtr &node) { + MS_EXCEPTION_IF_NULL(node); + auto kernel_name = AnfAlgo::GetCNodeName(node); + auto kernel_type = AnfAlgo::GetKernelType(node); + if (kernel_name == kAllReduceOpName || kernel_type == HCCL_KERNEL) { + return true; + } + return false; +} } // namespace session } // namespace mindspore diff --git a/mindspore/ccsrc/session/anf_runtime_algorithm.h b/mindspore/ccsrc/session/anf_runtime_algorithm.h index 2de68f0098..55650ac31e 100644 --- a/mindspore/ccsrc/session/anf_runtime_algorithm.h +++ b/mindspore/ccsrc/session/anf_runtime_algorithm.h @@ -166,6 +166,7 @@ class AnfRuntimeAlgorithm { static bool IsFeatureMapInput(const AnfNodePtr &node, size_t input_index); // get real input index for some tbe ops which input order is different between me and tbe impl static size_t GetRealInputIndex(const AnfNodePtr &anf_node, const size_t cur_index); + static bool IsCommunicationOp(const AnfNodePtr &node); }; } // namespace session using AnfAlgo = session::AnfRuntimeAlgorithm; diff --git a/mindspore/ccsrc/session/gpu_session.cc b/mindspore/ccsrc/session/gpu_session.cc index 29330fb193..bbcf2228cc 100644 --- a/mindspore/ccsrc/session/gpu_session.cc +++ b/mindspore/ccsrc/session/gpu_session.cc @@ -102,10 +102,6 @@ GraphId GPUSession::CompileGraph(const AnfNodePtrList &lst, const AnfNodePtrList graph->set_execution_order(execution_order); // Alloc memory, including static memory and dynamic memory AllocateMemory(graph.get()); - // Reset memory resource - auto runtime_instance = device::KernelRuntimeManager::Instance().GetSingleKernelRuntime(kGPUDevice, device_id_); - MS_EXCEPTION_IF_NULL(runtime_instance); - runtime_instance->FreeHostMemory(); return graph_id; } diff --git a/tests/ut/cpp/CMakeLists.txt b/tests/ut/cpp/CMakeLists.txt index 8d3f8a8138..3c1351a857 100644 --- a/tests/ut/cpp/CMakeLists.txt +++ b/tests/ut/cpp/CMakeLists.txt @@ -85,6 +85,7 @@ file(GLOB_RECURSE MINDSPORE_SRC_LIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} "../../../mindspore/ccsrc/kernel/oplib/*.cc" "../../../mindspore/ccsrc/kernel/tbe/*.cc" "../../../mindspore/ccsrc/device/kernel_runtime.cc" + "../../../mindspore/ccsrc/device/memory_manager.cc" "../../../mindspore/ccsrc/device/kernel_runtime_manager.cc" "../../../mindspore/ccsrc/device/kernel_info.cc" "../../../mindspore/ccsrc/device/ascend/profiling/*.cc" @@ -92,6 +93,7 @@ file(GLOB_RECURSE MINDSPORE_SRC_LIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} "../../../mindspore/ccsrc/device/convert_tensor_utils.cc" "../../../mindspore/ccsrc/device/ascend/kernel_build_ascend.cc" "../../../mindspore/ccsrc/device/ascend/ascend_kernel_runtime.cc" + "../../../mindspore/ccsrc/device/ascend/ascend_memory_manager.cc" "../../../mindspore/ccsrc/device/ascend/ascend_device_address.cc" "../../../mindspore/ccsrc/device/ascend/ascend_memory_allocator.cc" "../../../mindspore/ccsrc/predict/generator/utils/ir_model_util.cc" From 99f22c5b8583b74dd8cdf81772e11773f481495c Mon Sep 17 00:00:00 2001 From: zhousiyi Date: Tue, 7 Apr 2020 01:52:36 +0000 Subject: [PATCH 137/367] fix children issue: missing users of the children's direct users. --- mindspore/ccsrc/ir/manager.cc | 38 +++---------------- mindspore/ccsrc/ir/manager.h | 14 +------ .../python/pynative_mode/test_framstruct.py | 27 +++++-------- 3 files changed, 18 insertions(+), 61 deletions(-) diff --git a/mindspore/ccsrc/ir/manager.cc b/mindspore/ccsrc/ir/manager.cc index 09bc7127c1..ac3e256703 100644 --- a/mindspore/ccsrc/ir/manager.cc +++ b/mindspore/ccsrc/ir/manager.cc @@ -985,40 +985,14 @@ void ParentComputer::RealRecompute(FuncGraphPtr fg) { } } -// children include: -// A. func graphs which use variables in fg as free variables; (child_direct_) -// B. func graphs which call func func graph in A. (all_users_) -FuncGraphSetPtr ChildrenComputer::SeekChildren(const FuncGraphPtr& fg, const FuncGraphSetPtr& path) { - if (path == nullptr || path->contains(fg)) { - return std::make_shared(); - } - std::shared_ptr children = std::make_shared(); - auto& deps = *child_direct_; - auto& users = *all_users_; - MS_LOG(DEBUG) << "" << fg->ToString() << " start func graph dep size:" << deps[fg].size(); - for (auto& dep : deps[fg]) { - FuncGraphPtr child = dep.first; - children->add(child); - path->add(child); - MS_LOG(DEBUG) << "Child func graph:" << fg->ToString() << " child " << child->ToString(); - for (auto& user : users[child]) { - auto user_func_graph = user.first; - MS_LOG(DEBUG) << "Func graph:" << fg->ToString() << " user " << user_func_graph->ToString(); - children->add(user_func_graph); - path->add(user_func_graph); - } - children->update(SeekChildren(child, path)); - } - (void)children->erase(fg); - MS_LOG(DEBUG) << "End in children: " << children->size(); - return children; -} - void ChildrenComputer::RealRecompute(FuncGraphPtr fg) { MS_EXCEPTION_IF_NULL(manager_); - child_direct_ = &manager_->func_graph_child_direct(); - all_users_ = &manager_->func_graph_users(); - children_analysis_[fg].update(SeekChildren(fg)); + auto used_fg_total = manager_->func_graphs_used_total(fg); + for (auto& used_fg : used_fg_total) { + if (manager_->parent(used_fg) == fg) { + children_analysis_[fg].add(used_fg); + } + } } void ScopeComputer::RealRecompute(FuncGraphPtr fg) { diff --git a/mindspore/ccsrc/ir/manager.h b/mindspore/ccsrc/ir/manager.h index 8036bd68c0..f3092613a1 100644 --- a/mindspore/ccsrc/ir/manager.h +++ b/mindspore/ccsrc/ir/manager.h @@ -398,11 +398,8 @@ class ParentComputer final : public DepComputer { // graph's children graph except self class ChildrenComputer final : public DepComputer { public: - explicit ChildrenComputer(const FuncGraphManager* m) : DepComputer(m), all_users_(nullptr), child_direct_(nullptr) {} - ~ChildrenComputer() override { - all_users_ = nullptr; - child_direct_ = nullptr; - } + explicit ChildrenComputer(const FuncGraphManager* m) : DepComputer(m) {} + ~ChildrenComputer() override = default; FuncGraphToFuncGraphSetMap& children_analysis() { return children_analysis_; } @@ -414,13 +411,6 @@ class ChildrenComputer final : public DepComputer { void ExtraReset() override { children_analysis_.clear(); } void RealRecompute(FuncGraphPtr fg) override; - - private: - FuncGraphSetPtr SeekChildren(const FuncGraphPtr& fg, const FuncGraphSetPtr& path = std::make_shared()); - // when SeekChildren calls itself recursively, it can access these variables by class member - // other than pass by formal parameters, it can save 2 parameters for SeekChildren(). - FuncGraphToFuncGraphCounterMap* all_users_; - FuncGraphToFuncGraphCounterMap* child_direct_; }; // graph's children graph include self diff --git a/tests/ut/python/pynative_mode/test_framstruct.py b/tests/ut/python/pynative_mode/test_framstruct.py index ff7cf67f52..eb3b76765a 100644 --- a/tests/ut/python/pynative_mode/test_framstruct.py +++ b/tests/ut/python/pynative_mode/test_framstruct.py @@ -38,16 +38,6 @@ def setup_module(module): context.set_context(mode=context.PYNATIVE_MODE) -@ms_function -def refactor_fac(n): - """ grad_refactor_fac """ - if n == 0: - return 1 - return n * refactor_fac(n-1) -def test_refactor(): - res = refactor_fac(3) - assert res == 6 - @ms_function def while_upper_bound(upper): rval = 2 @@ -386,16 +376,19 @@ def test_grad_while(): assert grad_while(5) == (60,) @ms_function -def fac(n): - """ fac """ +def factorial(n): + """ factorial """ if n == 0: return 1 - return n * fac(n-1) + return n * factorial(n-1) + +def test_factorial(): + res = factorial(3) + assert res == 6 -def test_fac(): - """ test_fac """ - res = fac(4) - assert res == 24 +def test_grad_factorial(): + res = C.grad(factorial)(3) + assert res == 11 def _for(x): """ _for """ From f385f2a48b1a277b73785ef224a526847faf49f3 Mon Sep 17 00:00:00 2001 From: zhoufeng Date: Mon, 6 Apr 2020 11:22:47 +0800 Subject: [PATCH 138/367] default build command "-z" (minddata) and "-M on" (gpu) by default in build.sh --- build.sh | 24 +++++++++++++++++------- cmake/mind_expression.cmake | 8 ++++---- 2 files changed, 21 insertions(+), 11 deletions(-) diff --git a/build.sh b/build.sh index 9d812d6dcc..8bb5f3d95a 100755 --- a/build.sh +++ b/build.sh @@ -26,7 +26,7 @@ usage() echo "Usage:" echo "bash build.sh [-d] [-r] [-v] [-c on|off] [-t on|off] [-g on|off] [-h] [-s] [-b ge|cpu] [-m infer|train] \\" echo " [-a on|off] [-g on|off] [-p on|off] [-i] [-L] [-R] [-D on|off] [-j[n]] [-e gpu|d|cpu] \\" - echo " [-P on|off] [-z] [-M on|off] [-V 9.2|10.1] [-I] [-K]" + echo " [-P on|off] [-z [on|off]] [-M on|off] [-V 9.2|10.1] [-I] [-K]" echo "" echo "Options:" echo " -d Debug mode" @@ -50,8 +50,8 @@ usage() echo " -P Enable dump anf graph to file in ProtoBuffer format, default on" echo " -Q Enable dump end to end, default off" echo " -D Enable dumping of function graph ir, default on" - echo " -z Compile dataset & mindrecord, default off" - echo " -M Enable MPI and NCCL for GPU training, default off" + echo " -z Compile dataset & mindrecord, default on" + echo " -M Enable MPI and NCCL for GPU training, default on" echo " -V Specify the minimum required cuda version, default CUDA 9.2" echo " -I Compile predict, default off" echo " -K Compile with AKG, default off" @@ -88,8 +88,8 @@ checkopts() ENABLE_DUMP2PROTO="on" ENABLE_DUMPE2E="off" ENABLE_DUMP_IR="on" - COMPILE_MINDDATA="off" - ENABLE_MPI="off" + COMPILE_MINDDATA="on" + ENABLE_MPI="on" CUDA_VERSION="9.2" COMPILE_PREDICT="off" USE_GLOG="on" @@ -177,7 +177,7 @@ checkopts() if [[ "X$OPTARG" == "Xgpu" ]]; then ENABLE_GPU="on" ENABLE_CPU="on" - elif [[ "X$OPTARG" == "Xd" ]]; then + elif [[ "X$OPTARG" == "Xd" || "X$OPTARG" == "Xascend" ]]; then ENABLE_D="on" ENABLE_CPU="on" elif [[ "X$OPTARG" == "Xcpu" ]]; then @@ -216,7 +216,17 @@ checkopts() echo "enable dump function graph ir" ;; z) - COMPILE_MINDDATA="on" + eval ARG=\$\{$OPTIND\} + if [[ -n $ARG && $ARG != -* ]]; then + OPTARG=$ARG + check_on_off $OPTARG z + OPTIND=$((OPTIND + 1)) + else + OPTARG="" + fi + if [[ "X$OPTARG" == "Xoff" ]]; then + COMPILE_MINDDATA="off" + fi ;; I) COMPILE_PREDICT="on" diff --git a/cmake/mind_expression.cmake b/cmake/mind_expression.cmake index 345fd4675e..af122d4117 100644 --- a/cmake/mind_expression.cmake +++ b/cmake/mind_expression.cmake @@ -29,11 +29,11 @@ if (ENABLE_GPU) include(${CMAKE_SOURCE_DIR}/cmake/external_libs/dmlc_core.cmake) include(${CMAKE_SOURCE_DIR}/cmake/external_libs/rang.cmake) include(${CMAKE_SOURCE_DIR}/cmake/external_libs/tvm_gpu.cmake) -endif() -if (ENABLE_MPI) - include(${CMAKE_SOURCE_DIR}/cmake/external_libs/nccl.cmake) - include(${CMAKE_SOURCE_DIR}/cmake/external_libs/ompi.cmake) + if (ENABLE_MPI) + include(${CMAKE_SOURCE_DIR}/cmake/external_libs/nccl.cmake) + include(${CMAKE_SOURCE_DIR}/cmake/external_libs/ompi.cmake) + endif() endif() if (ENABLE_GE) From d9f7e56b434daa29c075a41e446ef9ac36690ebf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E4=B8=87=E4=B8=87=E6=B2=A1=E6=83=B3=E5=88=B0?= Date: Tue, 7 Apr 2020 14:56:05 +0800 Subject: [PATCH 139/367] fix review opinions in doc/comments --- mindspore/common/initializer.py | 2 +- mindspore/ops/operations/array_ops.py | 86 ++++++++++----------- mindspore/ops/operations/math_ops.py | 105 +++++++++++++------------- mindspore/ops/operations/nn_ops.py | 44 +++++------ mindspore/train/serialization.py | 2 +- 5 files changed, 116 insertions(+), 123 deletions(-) diff --git a/mindspore/common/initializer.py b/mindspore/common/initializer.py index 4261621272..d55e03314d 100644 --- a/mindspore/common/initializer.py +++ b/mindspore/common/initializer.py @@ -276,7 +276,7 @@ def initializer(init, shape=None, dtype=mstype.float32): shape (Union[tuple, list, int]): A list of integers, a tuple of integers or an integer as the shape of output. Default: None. - dtype (:class:`mindspore.dtype`): The type of data in initialized tensor. Default: mstype.float32. + dtype (:class:`mindspore.dtype`): The type of data in initialized tensor. Default: mindspore.float32. Returns: Tensor, initialized tensor. diff --git a/mindspore/ops/operations/array_ops.py b/mindspore/ops/operations/array_ops.py index 59d3083c5d..dda490566f 100644 --- a/mindspore/ops/operations/array_ops.py +++ b/mindspore/ops/operations/array_ops.py @@ -62,7 +62,7 @@ class ExpandDims(PrimitiveWithInfer): Examples: >>> input_tensor = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32) - >>> expand_dims = ExpandDims() + >>> expand_dims = P.ExpandDims() >>> output = expand_dims(input_tensor, 0) """ @@ -101,7 +101,7 @@ class DType(PrimitiveWithInfer): Examples: >>> input_tensor = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32) - >>> type = DType()(input_tensor) + >>> type = P.DType()(input_tensor) """ @prim_attr_register @@ -134,7 +134,7 @@ class SameTypeShape(PrimitiveWithInfer): Examples: >>> input_x = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32) >>> input_y = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32) - >>> out = SameTypeShape()(input_x, input_y) + >>> out = P.SameTypeShape()(input_x, input_y) """ @prim_attr_register @@ -175,7 +175,7 @@ class Cast(PrimitiveWithInfer): >>> input_np = np.random.randn(2, 3, 4, 5).astype(np.float32) >>> input_x = Tensor(input_np) >>> type_dst = mindspore.int32 - >>> cast = Cast() + >>> cast = P.Cast() >>> result = cast(input_x, type_dst) >>> expect = input_np.astype(type_dst) """ @@ -227,7 +227,7 @@ class IsSubClass(PrimitiveWithInfer): bool, the check result. Examples: - >>> result = IsSubClass()(mindspore.int32, mindspore.intc) + >>> result = P.IsSubClass()(mindspore.int32, mindspore.intc) """ @prim_attr_register @@ -262,7 +262,7 @@ class IsInstance(PrimitiveWithInfer): Examples: >>> a = 1 - >>> result = IsInstance()(a, mindspore.int32) + >>> result = P.IsInstance()(a, mindspore.int32) """ @prim_attr_register @@ -303,7 +303,7 @@ class Reshape(PrimitiveWithInfer): Examples: >>> input_tensor = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32) - >>> reshape = Reshape() + >>> reshape = P.Reshape() >>> output = reshape(input_tensor, (3, 2)) """ @@ -366,7 +366,7 @@ class Shape(Primitive): Examples: >>> input_tensor = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32) - >>> shape = Shape() + >>> shape = P.Shape() >>> output = shape(input_tensor) """ @@ -398,7 +398,7 @@ class Squeeze(PrimitiveWithInfer): Examples: >>> input_tensor = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32) - >>> squeeze = Squeeze(2) + >>> squeeze = P.Squeeze(2) >>> output = squeeze(input_tensor) """ @@ -450,7 +450,7 @@ class Transpose(PrimitiveWithInfer): Examples: >>> input_tensor = Tensor(np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]), mindspore.float32) >>> perm = (0, 2, 1) - >>> transpose = Transpose() + >>> transpose = P.Transpose() >>> output = transpose(input_tensor, perm) """ @@ -504,10 +504,10 @@ class GatherV2(PrimitiveWithInfer): Tensor, the shape of tensor is :math:`(z_1, z_2, ..., z_N)`. Examples: - >>> params = Tensor(np.array([[1, 2, 7, 42], [3, 4, 54, 22], [2, 2, 55, 3]]), mindspore.float32) - >>> indices = Tensor(np.array([1, 2]), mindspore.int32) + >>> input_params = Tensor(np.array([[1, 2, 7, 42], [3, 4, 54, 22], [2, 2, 55, 3]]), mindspore.float32) + >>> input_indices = Tensor(np.array([1, 2]), mindspore.int32) >>> axis = 1 - >>> out = GatherV2()(params, indices, axis) + >>> out = P.GatherV2()(input_params, input_indices, axis) """ @prim_attr_register @@ -556,7 +556,7 @@ class Split(PrimitiveWithInfer): :math:`(y_1, y_2, ..., y_S)`. Examples: - >>> split = Split(1, 2) + >>> split = P.Split(1, 2) >>> x = Tensor(np.array([[1, 1, 1, 1], [2, 2, 2, 2]])) >>> output = split(x) """ @@ -606,7 +606,7 @@ class Rank(PrimitiveWithInfer): Examples: >>> input_tensor = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32) - >>> rank = Rank() + >>> rank = P.Rank() >>> rank(input_tensor) """ @@ -640,7 +640,7 @@ class TruncatedNormal(PrimitiveWithInfer): Examples: >>> input_shape = Tensor(np.array([1, 2, 3])) - >>> truncated_normal = TruncatedNormal() + >>> truncated_normal = P.TruncatedNormal() >>> output = truncated_normal(input_shape) """ @@ -681,7 +681,7 @@ class Size(PrimitiveWithInfer): Examples: >>> input_tensor = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32) - >>> size = Size() + >>> size = P.Size() >>> output = size(input_tensor) """ @@ -826,7 +826,7 @@ class TupleToArray(PrimitiveWithInfer): Tensor, if the input tuple contain `N` numbers, then the output tensor shape is (N,). Examples: - >>> type = TupleToArray()((1,2,3)) + >>> type = P.TupleToArray()((1,2,3)) """ @prim_attr_register @@ -861,7 +861,7 @@ class ScalarToArray(PrimitiveWithInfer): Tensor. 0-D Tensor and the content is the input. Examples: - >>> op = ScalarToArray() + >>> op = P.ScalarToArray() >>> data = 1.0 >>> output = op(data) """ @@ -893,7 +893,7 @@ class ScalarToTensor(PrimitiveWithInfer): Tensor. 0-D Tensor and the content is the input. Examples: - >>> op = ScalarToTensor() + >>> op = P.ScalarToTensor() >>> data = 1 >>> output = op(data, mindspore.float32) """ @@ -934,7 +934,7 @@ class InvertPermutation(PrimitiveWithInfer): tuple[int]. the lenth is same as input. Examples: - >>> invert = InvertPermutation() + >>> invert = P.InvertPermutation() >>> input_data = (3, 4, 0, 2, 1) >>> output = invert(input_data) >>> output == (2, 4, 3, 0, 1) @@ -982,8 +982,8 @@ class Argmax(PrimitiveWithInfer): Tensor, indices of the max value of input tensor across the axis. Examples: - >>> input = Tensor(np.array([2.0, 3.1, 1.2])) - >>> index = Argmax()(input) + >>> input_x = Tensor(np.array([2.0, 3.1, 1.2])) + >>> index = P.Argmax()(input_x) >>> assert index == Tensor(1, mindspore.int64) """ @@ -1030,8 +1030,8 @@ class Argmin(PrimitiveWithInfer): Tensor, indices of the min value of input tensor across the axis. Examples: - >>> input = Tensor(np.array([2.0, 3.1, 1.2])) - >>> index = Argmin()(input) + >>> input_x = Tensor(np.array([2.0, 3.1, 1.2])) + >>> index = P.Argmin()(input_x) >>> assert index == Tensor(2, mindspore.int64) """ @@ -1082,8 +1082,8 @@ class ArgMaxWithValue(PrimitiveWithInfer): :math:`(x_1, x_2, ..., x_{axis-1}, x_{axis+1}, ..., x_N)`. Examples: - >>> input = Tensor(np.random.rand(5)) - >>> index, output = ArgMaxWithValue()(input) + >>> input_x = Tensor(np.random.rand(5)) + >>> index, output = P.ArgMaxWithValue()(input_x) """ @prim_attr_register @@ -1129,8 +1129,8 @@ class ArgMinWithValue(PrimitiveWithInfer): :math:`(x_1, x_2, ..., x_{axis-1}, x_{axis+1}, ..., x_N)`. Examples: - >>> input = Tensor(np.random.rand(5)) - >>> index, output = ArgMinWithValue()(input) + >>> input_x = Tensor(np.random.rand(5)) + >>> index, output = P.ArgMinWithValue()(input_x) """ @prim_attr_register def __init__(self, axis=0, keep_dims=False): @@ -1325,7 +1325,7 @@ class Concat(PrimitiveWithInfer): Examples: >>> data1 = Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32)) >>> data2 = Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32)) - >>> op = Concat() + >>> op = P.Concat() >>> output = op((data1, data2)) """ @@ -1607,7 +1607,7 @@ class Select(PrimitiveWithInfer): Tensor, has the same shape as input_y. The shape is :math:`(x_1, x_2, ..., x_N, ..., x_R)`. Examples: - >>> select = Select() + >>> select = P.Select() >>> input_x = Tensor([True, False]) >>> input_y = Tensor([2,3], mindspore.float32) >>> input_z = Tensor([1,2], mindspore.float32) @@ -1681,7 +1681,7 @@ class StridedSlice(PrimitiveWithInfer): Examples >>> input_x = Tensor([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]], >>> [[5, 5, 5], [6, 6, 6]]], mindspore.float32) - >>> slice = StridedSlice() + >>> slice = P.StridedSlice() >>> output = slice(input_x, (1, 0, 0), (2, 1, 3), (1, 1, 1)) >>> output.shape() (1, 1, 3) @@ -1913,9 +1913,9 @@ class ScatterNd(PrimitiveWithInfer): Tensor, the new tensor, has the same type as `update` and the same shape as `shape`. Examples: - >>> op = ScatterNd() - >>> update = Tensor(np.array([3.2, 1.1]), mindspore.float32) + >>> op = P.ScatterNd() >>> indices = Tensor(np.array([[0, 1], [1, 1]]), mindspore.int32) + >>> update = Tensor(np.array([3.2, 1.1]), mindspore.float32) >>> shape = (3, 3) >>> output = op(indices, update, shape) """ @@ -1964,7 +1964,7 @@ class ResizeNearestNeighbor(PrimitiveWithInfer): Examples: >>> input_tensor = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32) - >>> resize = ResizeNearestNeighbor((2, 2)) + >>> resize = P.ResizeNearestNeighbor((2, 2)) >>> output = resize(input_tensor) """ @@ -1997,7 +1997,7 @@ class GatherNd(PrimitiveWithInfer): Examples: >>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32) >>> indices = Tensor(np.array([[0, 0], [1, 1]]), mindspore.int32) - >>> op = GatherNd() + >>> op = P.GatherNd() >>> output = op(input_x, indices) """ @@ -2039,7 +2039,7 @@ class ScatterNdUpdate(PrimitiveWithInfer): >>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32) >>> indices = Tensor(np.array([[0, 0], [1, 1]]), mindspore.int32) >>> update = Tensor(np.array([1.0, 2.2]), mindspore.float32) - >>> op = ScatterNdUpdate() + >>> op = P.ScatterNdUpdate() >>> output = op(input_x, indices, update) """ @@ -2090,7 +2090,7 @@ class SpaceToDepth(PrimitiveWithInfer): Examples: >>> x = Tensor(np.random.rand(1,3,2,2), mindspore.float32) >>> block_size = 2 - >>> op = SpaceToDepth(block_size) + >>> op = P.SpaceToDepth(block_size) >>> output = op(x) >>> output.asnumpy().shape == (1,12,1,1) """ @@ -2148,7 +2148,7 @@ class DepthToSpace(PrimitiveWithInfer): Examples: >>> x = Tensor(np.random.rand(1,12,1,1), mindspore.float32) >>> block_size = 2 - >>> op = DepthToSpace(block_size) + >>> op = P.DepthToSpace(block_size) >>> output = op(x) >>> output.asnumpy().shape == (1,3,2,2) """ @@ -2212,8 +2212,8 @@ class SpaceToBatch(PrimitiveWithInfer): >>> block_size = 2 >>> paddings = [[0, 0], [0, 0]] >>> space_to_batch = P.SpaceToBatch(block_size, paddings) - >>> x = Tensor(np.array([[[[1, 2], [3, 4]]]]), mindspore.float32) - >>> space_to_batch(x) + >>> input_x = Tensor(np.array([[[[1, 2], [3, 4]]]]), mindspore.float32) + >>> space_to_batch(input_x) [[[[1.]]], [[[2.]]], [[[3.]]], [[[4.]]]] """ @@ -2280,8 +2280,8 @@ class BatchToSpace(PrimitiveWithInfer): >>> block_size = 2 >>> crops = [[0, 0], [0, 0]] >>> op = P.BatchToSpace(block_size, crops) - >>> x = Tensor(np.array([[[[1]]], [[[2]]], [[[3]]], [[[4]]]]), mindspore.float32) - >>> output = op(x) + >>> input_x = Tensor(np.array([[[[1]]], [[[2]]], [[[3]]], [[[4]]]]), mindspore.float32) + >>> output = op(input_x) [[[[1., 2.], [3., 4.]]]] """ diff --git a/mindspore/ops/operations/math_ops.py b/mindspore/ops/operations/math_ops.py index d003f6ee8b..1294a65d02 100644 --- a/mindspore/ops/operations/math_ops.py +++ b/mindspore/ops/operations/math_ops.py @@ -112,9 +112,9 @@ class TensorAdd(_MathBinaryOp): Examples: >>> add = P.TensorAdd() - >>> x = Tensor(np.array([1,2,3]).astype(np.float32)) - >>> y = Tensor(np.array([4,5,6]).astype(np.float32)) - >>> add(x, y) + >>> input_x = Tensor(np.array([1,2,3]).astype(np.float32)) + >>> input_y = Tensor(np.array([4,5,6]).astype(np.float32)) + >>> add(input_x, input_y) [5,7,9] """ @@ -124,23 +124,24 @@ class AssignAdd(PrimitiveWithInfer): Updates a `Parameter` by adding a value to it. Inputs: - - **input_x** (Parameter) - The `Parameter`. - - **input_y** (Union[scalar, Tensor]) - Has the same shape as `input_x`. + - **variable** (Parameter) - The `Parameter`. + - **value** (Union[numbers.Number, Tensor]) - The value to be added to the `variable`. + It should have the same shape as `variable` if it is a Tensor. Examples: >>> class Net(Cell): >>> def __init__(self): >>> super(Net, self).__init__() >>> self.AssignAdd = P.AssignAdd() - >>> self.inputdata = Parameter(initializer(1, [1], mindspore.int64), name="global_step") + >>> self.variable = Parameter(initializer(1, [1], mindspore.int64), name="global_step") >>> >>> def construct(self, x): - >>> self.AssignAdd(self.inputdata, x) - >>> return self.inputdata + >>> self.AssignAdd(self.variable, x) + >>> return self.variable >>> >>> net = Net() - >>> x = Tensor(np.ones([1]).astype(np.int64)*100) - >>> net(x) + >>> value = Tensor(np.ones([1]).astype(np.int64)*100) + >>> net(value) """ __mindspore_signature__ = ( ('variable', sig_rw.RW_WRITE, sig_kind.KIND_POSITIONAL_KEYWORD), @@ -166,22 +167,24 @@ class AssignSub(PrimitiveWithInfer): Updates a `Parameter` by subtracting a value from it. Inputs: - - **input_x** (Parameter) - The `Parameter`. - - **input_y** (Union[scalar, Tensor]) - Has the same shape as `input_x`. + - **variable** (Parameter) - The `Parameter`. + - **value** (Union[numbers.Number, Tensor]) - The value to be subtracted from the `variable`. + It should have the same shape as `variable` if it is a Tensor. Examples: >>> class Net(Cell): >>> def __init__(self): + >>> super(Net, self).__init__() >>> self.AssignSub = P.AssignSub() - >>> self.inputdata = Parameter(initializer(1, [1], mindspore.int64), name="global_step") + >>> self.variable = Parameter(initializer(1, [1], mindspore.int64), name="global_step") >>> >>> def construct(self, x): - >>> self.AssignSub(self.inputdata, x) - >>> return self.inputdata + >>> self.AssignSub(self.variable, x) + >>> return self.variable >>> >>> net = Net() - >>> x = Tensor(np.ones([1]).astype(np.int64)*100) - >>> net(x) + >>> value = Tensor(np.ones([1]).astype(np.int64)*100) + >>> net(value) """ __mindspore_signature__ = ( @@ -263,9 +266,9 @@ class ReduceMean(_Reduce): the shape of output is :math:`(x_1, x_4, ..., x_R)`. Examples: - >>> data = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32)) + >>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32)) >>> op = P.ReduceMean(keep_dims=True) - >>> output = op(data, 1) + >>> output = op(input_x, 1) """ @@ -295,9 +298,9 @@ class ReduceSum(_Reduce): the shape of output is :math:`(x_1, x_4, ..., x_R)`. Examples: - >>> data = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32)) + >>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32)) >>> op = P.ReduceSum(keep_dims=True) - >>> output = op(data, 1) + >>> output = op(input_x, 1) """ @@ -328,9 +331,9 @@ class ReduceAll(_Reduce): the shape of output is :math:`(x_1, x_4, ..., x_R)`. Examples: - >>> data = Tensor(np.array([[True, False], [True, True]])) + >>> input_x = Tensor(np.array([[True, False], [True, True]])) >>> op = P.ReduceAll(keep_dims=True) - >>> output = op(data, 1) + >>> output = op(input_x, 1) """ def __infer__(self, input_x, axis): @@ -364,9 +367,9 @@ class ReduceMax(_Reduce): the shape of output is :math:`(x_1, x_4, ..., x_R)`. Examples: - >>> data = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32)) + >>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32)) >>> op = P.ReduceMax(keep_dims=True) - >>> output = op(data, 1) + >>> output = op(input_x, 1) """ @@ -397,9 +400,9 @@ class ReduceMin(_Reduce): the shape of output is :math:`(x_1, x_4, ..., x_R)`. Examples: - >>> data = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32)) + >>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32)) >>> op = P.ReduceMin(keep_dims=True) - >>> output = op(data, 1) + >>> output = op(input_x, 1) """ @@ -429,9 +432,9 @@ class ReduceProd(_Reduce): the shape of output is :math:`(x_1, x_4, ..., x_R)`. Examples: - >>> data = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32)) + >>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32)) >>> op = P.ReduceProd(keep_dims=True) - >>> output = op(data, 1) + >>> output = op(input_x, 1) """ @@ -451,15 +454,15 @@ class CumProd(PrimitiveWithInfer): Tensor, has the same shape and dtype as the 'input_x'. Examples: - >>> data = Tensor(np.array([a, b, c]).astype(np.float32)) + >>> input_x = Tensor(np.array([a, b, c]).astype(np.float32)) >>> op0 = P.CumProd() - >>> output = op0(data, 0) # output=[a, a * b, a * b * c] + >>> output = op0(input_x, 0) # output=[a, a * b, a * b * c] >>> op1 = P.CumProd(exclusive=True) - >>> output = op1(data, 0) # output=[1, a, a * b] + >>> output = op1(input_x, 0) # output=[1, a, a * b] >>> op2 = P.CumProd(reverse=True) - >>> output = op2(data, 0) # output=[a * b * c, b * c, c] + >>> output = op2(input_x, 0) # output=[a * b * c, b * c, c] >>> op3 = P.CumProd(exclusive=True, reverse=True) - >>> output = op3(data, 0) # output=[b * c, c, 1] + >>> output = op3(input_x, 0) # output=[b * c, c, 1] """ @prim_attr_register def __init__(self, exclusive=False, reverse=False): @@ -1190,7 +1193,7 @@ class FloorMod(_MathBinaryOp): Examples: >>> input_x = Tensor(np.array([2, 4, -1]), mindspore.int32) >>> input_y = Tensor(np.array([3, 3, 3]), mindspore.int32) - >>> floor_mod = FloorMod() + >>> floor_mod = P.FloorMod() >>> floor_mod(input_x, input_y) [2, 1, 2] """ @@ -1207,9 +1210,9 @@ class Acosh(PrimitiveWithInfer): Tensor, has the same shape as `input_x`. Examples: - >>> acosh = Acosh() - >>> X = Tensor(np.array([1.0, 1.5, 3.0, 100.0]), mindspore.float32) - >>> output = acosh(X) + >>> acosh = P.Acosh() + >>> input_x = Tensor(np.array([1.0, 1.5, 3.0, 100.0]), mindspore.float32) + >>> output = acosh(input_x) """ @prim_attr_register @@ -1286,7 +1289,7 @@ class EqualCount(PrimitiveWithInfer): - **input_y** (Tensor) - The second input tensor. Outputs: - Tensor, has the same shape as the `input_x`. + Tensor, with the type as `mindspore.int32` and size as (1,). Examples: >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32) @@ -1324,7 +1327,7 @@ class NotEqual(_LogicBinaryOp): Inputs: - **input_x** (Union[Tensor, Number, bool]) - The first input is a tensor whose data type is number or bool, or a number or a bool object. - - **input_y** (Union[Tensor, Number, bool]) - The second input tensor whose data type is same as 'input_x' or + - **input_y** (Union[Tensor, Number, bool]) - The second input tensor whose data type is same as `input_x` or a number or a bool object. Outputs: @@ -1359,11 +1362,11 @@ class Greater(_LogicBinaryOp): Inputs: - **input_x** (Union[Tensor, Number]) - The first input is a tensor whose data type is number or a number. - - **input_y** (Union[Tensor, Number]) - The second input is a tensor whose data type is same as 'input_x' or + - **input_y** (Union[Tensor, Number]) - The second input is a tensor whose data type is same as `input_x` or a number. Outputs: - Tensor, the shape is same as the shape after broadcasting, and the data type is same as 'input_x'. + Tensor, the shape is same as the shape after broadcasting, and the data type is bool. Examples: >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32) @@ -1386,11 +1389,11 @@ class GreaterEqual(_LogicBinaryOp): Inputs: - **input_x** (Union[Tensor, Number]) - The first input is a tensor whose data type is number or a number. - - **input_y** (Union[Tensor, Number]) - The second input is a tensor whose data type is same as 'input_x' or + - **input_y** (Union[Tensor, Number]) - The second input is a tensor whose data type is same as `input_x` or a number. Outputs: - Tensor, the shape is same as the shape after broadcasting, and the data type is bool'. + Tensor, the shape is same as the shape after broadcasting, and the data type is bool. Examples: >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32) @@ -1413,7 +1416,7 @@ class Less(_LogicBinaryOp): Inputs: - **input_x** (Union[Tensor, Number]) - The first input is a tensor whose data type is number or a number. - - **input_y** (Union[Tensor, Number]) - The second input is a tensor whose data type is same as 'input_x' or + - **input_y** (Union[Tensor, Number]) - The second input is a tensor whose data type is same as `input_x` or a number. Outputs: @@ -1440,7 +1443,7 @@ class LessEqual(_LogicBinaryOp): Inputs: - **input_x** (Union[Tensor, Number]) - The first input is a tensor whose data type is number or a number. - - **input_y** (Union[Tensor, Number]) - The second input is a tensor whose data type is same as 'input_x' or + - **input_y** (Union[Tensor, Number]) - The second input is a tensor whose data type is same as `input_x` or a number. Outputs: @@ -1752,8 +1755,8 @@ class Cos(PrimitiveWithInfer): Examples: >>> cos = P.Cos() - >>> X = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32) - >>> output = cos(X) + >>> input_x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32) + >>> output = cos(input_x) """ @prim_attr_register @@ -1780,8 +1783,8 @@ class ACos(PrimitiveWithInfer): Examples: >>> acos = P.ACos() - >>> X = Tensor(np.array([0.74, 0.04, 0.30, 0.56]), mindspore.float32) - >>> output = acos(X) + >>> input_x = Tensor(np.array([0.74, 0.04, 0.30, 0.56]), mindspore.float32) + >>> output = acos(input_x) """ @prim_attr_register @@ -1993,7 +1996,7 @@ class Atan2(_MathBinaryOp): - **input_y** (Tensor) - The input tensor. Outputs: - Tensor, the shape is same as the shape after broadcasting, and the data type is same as 'input_x'. + Tensor, the shape is same as the shape after broadcasting, and the data type is same as `input_x`. Examples: >>> input_x = Tensor(np.array([[0, 1]]), mindspore.float32) diff --git a/mindspore/ops/operations/nn_ops.py b/mindspore/ops/operations/nn_ops.py index 9ee98d174e..83f76455e0 100644 --- a/mindspore/ops/operations/nn_ops.py +++ b/mindspore/ops/operations/nn_ops.py @@ -41,7 +41,7 @@ class Flatten(PrimitiveWithInfer): Examples: >>> input_tensor = Tensor(np.ones(shape=[1, 2, 3, 4]), mindspore.float32) - >>> flatten = Flatten() + >>> flatten = P.Flatten() >>> output = flatten(input_tensor) >>> assert output.shape() == (1, 24) """ @@ -155,7 +155,7 @@ class ReLU(PrimitiveWithInfer): Examples: >>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]], np.float32)) - >>> relu = ReLU() + >>> relu = P.ReLU() >>> result = relu(input_x) [[0, 4.0, 0.0], [2.0, 0.0, 9.0]] """ @@ -188,7 +188,7 @@ class ReLU6(PrimitiveWithInfer): Examples: >>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]], np.float32)) - >>> relu6 = ReLU6() + >>> relu6 = P.ReLU6() >>> result = relu6(input_x) """ @@ -222,10 +222,10 @@ class Elu(PrimitiveWithInfer): Examples: >>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]], np.float32)) - >>> elu = Elu() + >>> elu = P.Elu() >>> result = elu(input_x) Tensor([[-0.632 4.0 -0.999] - [2.0 -0.993 9.0 ]], shape=(2, 3), dtype=ms.float32) + [2.0 -0.993 9.0 ]], shape=(2, 3), dtype=mindspore.float32) """ @prim_attr_register @@ -1082,7 +1082,7 @@ class TopK(PrimitiveWithInfer): Examples: >>> topk = P.TopK(sorted=True) - >>> input_x = Tensor([1, 2, 3, 4, 5], mindspore.float16)) + >>> input_x = Tensor([1, 2, 3, 4, 5], mindspore.float16) >>> k = 3 >>> values, indices = topk(input_x, k) >>> assert values == Tensor(np.array([5, 4, 3])) @@ -1223,8 +1223,8 @@ class ApplyMomentum(PrimitiveWithInfer): Examples: >>> net = ResNet50() - >>> loss = SoftmaxCrossEntropyWithLogits() - >>> opt = ApplyMomentum(Tensor(np.array([0.001])), Tensor(np.array([0.9])), + >>> loss = nn.SoftmaxCrossEntropyWithLogits() + >>> opt = P.ApplyMomentum(Tensor(np.array([0.001])), Tensor(np.array([0.9])), filter(lambda x: x.requires_grad, net.get_parameters())) >>> model = Model(net, loss, opt) """ @@ -1351,6 +1351,7 @@ class SGD(PrimitiveWithInfer): class ApplyRMSProp(PrimitiveWithInfer): """ Optimizer that implements the Root Mean Square prop(RMSProp) algorithm. + Please refer to the usage in source code of `nn.RMSProp`. Note: Update `var` according to the RMSProp algorithm. @@ -1386,12 +1387,6 @@ class ApplyRMSProp(PrimitiveWithInfer): Outputs: Tensor, parameters to be update. - - Examples: - >>> net = Net() - >>> loss = nn.SoftmaxCrossEntropyWithLogits() - >>> opt = RMSProp(params=net.trainable_params(), learning_rate=learning_rate) - >>> model = Model(net, loss, opt) """ @prim_attr_register @@ -1424,6 +1419,7 @@ class ApplyRMSProp(PrimitiveWithInfer): class ApplyCenteredRMSProp(PrimitiveWithInfer): """ Optimizer that implements the centered RMSProp algorithm. + Please refer to the usage in source code of `nn.RMSProp`. Note: Update `var` according to the centered RMSProp algorithm. @@ -1464,12 +1460,6 @@ class ApplyCenteredRMSProp(PrimitiveWithInfer): Outputs: Tensor, parameters to be update. - - Examples: - >>> net = Net() - >>> loss = nn.SoftmaxCrossEntropyWithLogits() - >>> opt = RMSProp(params=net.trainable_params(), learning_rate=learning_rate, centered=True) - >>> model = Model(net, loss, opt) """ @prim_attr_register @@ -1596,7 +1586,7 @@ class DropoutGenMask(Primitive): Tensor, the value of generated mask for input shape. Examples: - >>> dropout_gen_mask = DropoutGenMask() + >>> dropout_gen_mask = P.DropoutGenMask() >>> shape = (20, 16, 50) >>> keep_prob = Tensor(0.5, mindspore.float32) >>> mask = dropout_gen_mask(shape, keep_prob) @@ -1631,8 +1621,8 @@ class DropoutDoMask(PrimitiveWithInfer): >>> x = Tensor(np.ones([20, 16, 50]), mindspore.float32) >>> shape = (20, 16, 50) >>> keep_prob = Tensor(0.5, mindspore.float32) - >>> dropout_gen_mask = DropoutGenMask() - >>> dropout_do_mask = DropoutDoMask() + >>> dropout_gen_mask = P.DropoutGenMask() + >>> dropout_do_mask = P.DropoutDoMask() >>> mask = dropout_gen_mask(shape, keep_prob) >>> output = dropout_do_mask(x, mask, keep_prob) >>> assert output.shape() == (20, 16, 50) @@ -1737,7 +1727,7 @@ class OneHot(PrimitiveWithInfer): Examples: >>> indices = Tensor(np.array([0, 1, 2]), mindspore.int32) >>> depth, on_value, off_value = 3, Tensor(1.0, mindspore.float32), Tensor(0.0, mindspore.float32) - >>> onehot = OneHot() + >>> onehot = P.OneHot() >>> result = onehot(indices, depth, on_value, off_value) [[1, 0, 0], [0, 1, 0], [0, 0, 1]] """ @@ -1793,7 +1783,7 @@ class Gelu(PrimitiveWithInfer): Examples: >>> tensor = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32) - >>> gelu = Gelu() + >>> gelu = P.Gelu() >>> result = gelu(tensor) """ @@ -1834,7 +1824,7 @@ class GetNext(PrimitiveWithInfer): and the type is described is `types`. Examples: - >>> get_next = GetNext([mindspore.float32, mindspore.int32], [[32, 1, 28, 28], [10]], 'shared_name') + >>> get_next = P.GetNext([mindspore.float32, mindspore.int32], [[32, 1, 28, 28], [10]], 'shared_name') >>> feature, label = get_next() """ @@ -2015,7 +2005,7 @@ class Pad(PrimitiveWithInfer): Examples: >>> input_tensor = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32) - >>> pad_op = Pad(((1, 2), (2, 1))) + >>> pad_op = P.Pad(((1, 2), (2, 1))) >>> output_tensor = pad_op(input_tensor) >>> assert output_tensor == Tensor(np.array([[ 0. , 0. , 0. , 0. , 0. , 0. ], >>> [ 0. , 0. , -0.1, 0.3, 3.6, 0. ], diff --git a/mindspore/train/serialization.py b/mindspore/train/serialization.py index b334c3e9d8..90d8816094 100644 --- a/mindspore/train/serialization.py +++ b/mindspore/train/serialization.py @@ -406,7 +406,7 @@ def export(net, *inputs, file_name, file_format='GEIR'): file_format (str): MindSpore currently supports 'GEIR', 'ONNX' and 'LITE' format for exported model. - GEIR: Graph Engine Intermidiate Representation. An intermidiate representation format of - Ascend model. + Ascend model. - ONNX: Open Neural Network eXchange. An open format built to represent machine learning models. - LITE: Huawei model format for mobile. """ From ede8a987299df4757100bcc8e8af70ef619429ec Mon Sep 17 00:00:00 2001 From: jojobugfree Date: Thu, 9 Apr 2020 14:47:05 +0800 Subject: [PATCH 140/367] change logging to mindspore.log --- mindspore/context.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/mindspore/context.py b/mindspore/context.py index 89365f3d1c..2938b87119 100644 --- a/mindspore/context.py +++ b/mindspore/context.py @@ -17,16 +17,14 @@ The context of mindspore, used to configure the current execution environment, including execution mode, execution backend and other feature switchs. """ import threading -import logging from collections import namedtuple from types import FunctionType +from mindspore import log as logger from mindspore._c_expression import MSContext from mindspore._extends.pynative_helper import args_type_check from mindspore.parallel._auto_parallel_context import _set_auto_parallel_context, _get_auto_parallel_context, \ _reset_auto_parallel_context -logger = logging.getLogger('Context') - __all__ = ['GRAPH_MODE', 'PYNATIVE_MODE', 'set_context', 'get_context', 'set_auto_parallel_context', 'get_auto_parallel_context', 'reset_auto_parallel_context'] From b410a2f0a4e67bd6324dd3cface1df28e1fbb306 Mon Sep 17 00:00:00 2001 From: leonwanghui Date: Thu, 9 Apr 2020 15:58:36 +0800 Subject: [PATCH 141/367] Update setuptool info Signed-off-by: leonwanghui --- CONTRIBUTING.md | 4 +- README.md | 2 +- RELEASE.md | 2 +- package.sh | 2 +- setup_package.py => setup.py | 129 +++++++++++++++++++++++------------ 5 files changed, 90 insertions(+), 49 deletions(-) rename setup_package.py => setup.py (53%) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 85fee704c2..105c620942 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -105,11 +105,11 @@ When reporting issues, refer to this format: * If it is a new feature that needs lots of design details, a design proposal should also be submitted. * After reaching consensus in the issue discussions and design proposal reviews, complete the development on the forked repo and submit a PR. * None of PRs is not permitted until it receives **2+ LGTM** from approvers. Please NOTICE that approver is NOT allowed to add *LGTM* on his own PR. -* After PR is sufficiently discussed, it will get merged, abondoned or rejected depending on the outcome of the discussion. +* After PR is sufficiently discussed, it will get merged, abandoned or rejected depending on the outcome of the discussion. **PRs advisory:** - Any irrelevant changes should be avoided. - Make sure your commit history being ordered. - Always keep your branch up with the master branch. -- For bug-fix PRs, make sure all related issues being linked. +- For bug-fix PRs, make sure all related issues being linked. diff --git a/README.md b/README.md index 925c22591d..be8ca5189a 100644 --- a/README.md +++ b/README.md @@ -129,7 +129,7 @@ Check out how MindSpore Open Governance [works](https://gitee.com/mindspore/comm - [MindSpore Slack](https://join.slack.com/t/mindspore/shared_invite/enQtOTcwMTIxMDI3NjM0LTNkMWM2MzI5NjIyZWU5ZWQ5M2EwMTQ5MWNiYzMxOGM4OWFhZjI4M2E5OGI2YTg3ODU1ODE2Njg1MThiNWI3YmQ) - Communication platform for developers. - IRC channel at `#mindspore` (only for meeting minutes logging purpose) - Video Conferencing: meet.jit.si -- Mailing-list: https://mailweb.mindspore.cn/postorius/lists +- Mailing-list: https://mailweb.mindspore.cn/postorius/lists ## Contributing diff --git a/RELEASE.md b/RELEASE.md index 8920095bb5..ce9064e4b1 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -70,4 +70,4 @@ * [MindSpore Official Website] (https://www.mindspore.cn/) * [MindInsight Visualization Debugging and Optimization] (https://gitee.com/mindspore/mindinsight) * [MindArmour Model Security Hardening Package] (https://gitee.com/mindspore/mindarmour) -* [GraphEngine Computational Graph Engine] (https://gitee.com/mindspore/graphengine) \ No newline at end of file +* [GraphEngine Computational Graph Engine] (https://gitee.com/mindspore/graphengine) diff --git a/package.sh b/package.sh index 67f4761f37..0c75a1bbfd 100755 --- a/package.sh +++ b/package.sh @@ -110,7 +110,7 @@ else export MS_PACKAGE_NAME="mindspore" fi -${PYTHON} "${BASEPATH}/setup_package.py" bdist_wheel +${PYTHON} "${BASEPATH}/setup.py" bdist_wheel chmod -R 700 ${PACKAGE_PATH}/mindspore/ chmod -R 700 ${PACKAGE_PATH}/${MS_PACKAGE_NAME//-/_}.egg-info/ diff --git a/setup_package.py b/setup.py similarity index 53% rename from setup_package.py rename to setup.py index 87b5718de2..e009a9b312 100644 --- a/setup_package.py +++ b/setup.py @@ -14,17 +14,15 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ -"""setup_package.""" +"""setup package.""" import os import stat + from setuptools import setup, find_packages from setuptools.command.egg_info import egg_info from setuptools.command.build_py import build_py version = '0.1.0' -author = 'The MindSpore Authors' -author_email = 'contact@mindspore.cn' -home_page = 'https://www.mindspore.cn' backend_policy = os.getenv('BACKEND_POLICY') commit_id = os.getenv('COMMIT_ID').replace("\n", "") @@ -33,56 +31,70 @@ package_name = os.getenv('MS_PACKAGE_NAME').replace("\n", "") pwd = os.path.dirname(os.path.realpath(__file__)) pkg_dir = os.path.join(pwd, 'build/package') -def write_version(file): + +def _read_file(filename): + with open(os.path.join(pwd, filename)) as f: + return f.read() + + +readme = _read_file('README.md') +release = _read_file('RELEASE.md') + + +def _write_version(file): file.write("__version__ = '{}'\n".format(version)) -def write_config(file): + +def _write_config(file): file.write("__backend__ = '{}'\n".format(backend_policy)) -def write_commit_file(file): + +def _write_commit_file(file): file.write("__commit_id__ = '{}'\n".format(commit_id)) -def build_depends(): + +def build_dependencies(): """generate python file""" - version_file = os.path.join(pwd, 'build/package/mindspore', 'version.py') + version_file = os.path.join(pkg_dir, 'mindspore', 'version.py') with open(version_file, 'w') as f: - write_version(f) + _write_version(f) - version_file = os.path.join(pwd, 'mindspore/', 'version.py') + version_file = os.path.join(pwd, 'mindspore', 'version.py') with open(version_file, 'w') as f: - write_version(f) + _write_version(f) - config_file = os.path.join(pwd, 'build/package/mindspore', 'default_config.py') + config_file = os.path.join(pkg_dir, 'mindspore', 'default_config.py') with open(config_file, 'w') as f: - write_config(f) + _write_config(f) - config_file = os.path.join(pwd, 'mindspore/', 'default_config.py') + config_file = os.path.join(pwd, 'mindspore', 'default_config.py') with open(config_file, 'w') as f: - write_config(f) + _write_config(f) - commit_file = os.path.join(pwd, 'build/package/mindspore', '.commit_id') + commit_file = os.path.join(pkg_dir, 'mindspore', '.commit_id') with open(commit_file, 'w') as f: - write_commit_file(f) + _write_commit_file(f) - commit_file = os.path.join(pwd, 'mindspore/', '.commit_id') + commit_file = os.path.join(pwd, 'mindspore', '.commit_id') with open(commit_file, 'w') as f: - write_commit_file(f) - -descriptions = 'An AI computing framework that supports development for AI applications in all scenarios.' - -requires = [ - 'numpy >= 1.17.0', - 'protobuf >= 3.8.0', - 'asttokens >= 1.1.13', - 'pillow >= 6.2.0', - 'scipy == 1.3.3', - 'easydict >= 1.9', - 'sympy >= 1.4', - 'cffi >= 1.13.2', - 'decorator >= 4.4.0' - ], + _write_commit_file(f) + -package_datas = { +build_dependencies() + +required_package = [ + 'numpy >= 1.17.0', + 'protobuf >= 3.8.0', + 'asttokens >= 1.1.13', + 'pillow >= 6.2.0', + 'scipy == 1.3.3', + 'easydict >= 1.9', + 'sympy >= 1.4', + 'cffi >= 1.13.2', + 'decorator >= 4.4.0' +] + +package_data = { '': [ '*.so*', 'lib/*.so*', @@ -91,7 +103,6 @@ package_datas = { ] } -build_depends() def update_permissions(path): """ @@ -103,20 +114,25 @@ def update_permissions(path): for dirpath, dirnames, filenames in os.walk(path): for dirname in dirnames: dir_fullpath = os.path.join(dirpath, dirname) - os.chmod(dir_fullpath, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC | stat.S_IRGRP | stat.S_IXGRP) + os.chmod(dir_fullpath, stat.S_IREAD | stat.S_IWRITE | + stat.S_IEXEC | stat.S_IRGRP | stat.S_IXGRP) for filename in filenames: file_fullpath = os.path.join(dirpath, filename) os.chmod(file_fullpath, stat.S_IREAD) + class EggInfo(egg_info): """Egg info.""" + def run(self): super().run() egg_info_dir = os.path.join(pkg_dir, 'mindspore.egg-info') update_permissions(egg_info_dir) + class BuildPy(build_py): """BuildPy.""" + def run(self): super().run() mindspore_dir = os.path.join(pkg_dir, 'build', 'lib', 'mindspore') @@ -124,21 +140,46 @@ class BuildPy(build_py): mindspore_dir = os.path.join(pkg_dir, 'build', 'lib', 'akg') update_permissions(mindspore_dir) + setup( - python_requires='>=3.7', name=package_name, version=version, - author=author, - author_email=author_email, - url=home_page, + author='The MindSpore Authors', + author_email='contact@mindspore.cn', + url='https://www.mindspore.cn', + download_url='https://gitee.com/mindspore/mindspore/tags', + project_urls={ + 'Sources': 'https://gitee.com/mindspore/mindspore', + 'Issue Tracker': 'https://gitee.com/mindspore/mindspore/issues', + }, + description='MindSpore is a new open source deep learning training/inference ' + 'framework that could be used for mobile, edge and cloud scenarios.', + long_description="\n\n".join([readme, release]), packages=find_packages(), - package_data=package_datas, + package_data=package_data, include_package_data=True, cmdclass={ 'egg_info': EggInfo, 'build_py': BuildPy, }, - install_requires=requires, - description=descriptions, + python_requires='>=3.7', + install_requires=required_package, + classifiers=[ + 'Development Status :: 4 - Beta', + 'Environment :: Console', + 'Intended Audience :: Science/Research', + 'Intended Audience :: Developers', + 'License :: OSI Approved :: Apache Software License', + 'Programming Language :: Python :: 3 :: Only', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: C++', + 'Topic :: Scientific/Engineering', + 'Topic :: Scientific/Engineering :: Artificial Intelligence', + 'Topic :: Software Development', + 'Topic :: Software Development :: Libraries', + 'Topic :: Software Development :: Libraries :: Python Modules', + ], license='Apache 2.0', + keywords='mindspore machine learning', ) From bef62db128d02ad7df6a5ec9de7dcb72ba384972 Mon Sep 17 00:00:00 2001 From: kswang Date: Thu, 9 Apr 2020 15:08:47 +0800 Subject: [PATCH 142/367] add ascend mem pool --- .../device/ascend/ascend_device_address.cc | 4 +-- .../device/ascend/ascend_device_address.h | 2 +- .../device/ascend/ascend_kernel_runtime.cc | 2 +- .../device/ascend/ascend_memory_manager.cc | 22 +++++++------ .../device/ascend/ascend_memory_manager.h | 6 +++- ...ory_allocator.cc => ascend_memory_pool.cc} | 31 +++++++------------ ...emory_allocator.h => ascend_memory_pool.h} | 31 ++++++++++--------- mindspore/ccsrc/device/device_address.h | 2 +- .../ccsrc/device/gpu/gpu_device_address.cc | 2 +- .../ccsrc/device/gpu/gpu_kernel_runtime.cc | 16 +++++----- .../ccsrc/device/gpu/gpu_memory_manager.cc | 8 ++--- .../ccsrc/device/gpu/gpu_memory_manager.h | 6 ++-- mindspore/ccsrc/device/kernel_runtime.cc | 8 ++--- mindspore/ccsrc/device/kernel_runtime.h | 1 - mindspore/ccsrc/device/memory_manager.cc | 22 +++++-------- mindspore/ccsrc/device/memory_manager.h | 15 ++++----- tests/ut/cpp/CMakeLists.txt | 2 +- 17 files changed, 84 insertions(+), 96 deletions(-) rename mindspore/ccsrc/device/ascend/{ascend_memory_allocator.cc => ascend_memory_pool.cc} (62%) rename mindspore/ccsrc/device/ascend/{ascend_memory_allocator.h => ascend_memory_pool.h} (67%) diff --git a/mindspore/ccsrc/device/ascend/ascend_device_address.cc b/mindspore/ccsrc/device/ascend/ascend_device_address.cc index b8b7f452e3..93f039af0e 100644 --- a/mindspore/ccsrc/device/ascend/ascend_device_address.cc +++ b/mindspore/ccsrc/device/ascend/ascend_device_address.cc @@ -262,8 +262,8 @@ AscendDeviceAddress::~AscendDeviceAddress() { if (ptr_ == nullptr) { return; } - if (mem_dynamic_alloc_) { - AscendMemoryAllocator::GetInstance().FreeTensorMem(ptr_); + if (from_mem_pool_) { + AscendMemoryPool::GetInstance().FreeTensorMem(ptr_); ptr_ = nullptr; } } diff --git a/mindspore/ccsrc/device/ascend/ascend_device_address.h b/mindspore/ccsrc/device/ascend/ascend_device_address.h index 60cc64cca7..93746082c1 100644 --- a/mindspore/ccsrc/device/ascend/ascend_device_address.h +++ b/mindspore/ccsrc/device/ascend/ascend_device_address.h @@ -21,7 +21,7 @@ #include #include #include "device/device_address.h" -#include "device/ascend/ascend_memory_allocator.h" +#include "device/ascend/ascend_memory_pool.h" #include "ir/dtype.h" namespace mindspore { diff --git a/mindspore/ccsrc/device/ascend/ascend_kernel_runtime.cc b/mindspore/ccsrc/device/ascend/ascend_kernel_runtime.cc index 0c2a97a5a6..0c6861e21f 100644 --- a/mindspore/ccsrc/device/ascend/ascend_kernel_runtime.cc +++ b/mindspore/ccsrc/device/ascend/ascend_kernel_runtime.cc @@ -29,7 +29,7 @@ #include "hccl/hcom.h" #include "runtime/context.h" #include "device/ascend/ascend_stream_assign.h" -#include "device/ascend/ascend_memory_allocator.h" +#include "device/ascend/ascend_memory_pool.h" #include "framework/ge_runtime/model_runner.h" #include "device/ascend/tasksink/task_generator.h" #include "session/anf_runtime_algorithm.h" diff --git a/mindspore/ccsrc/device/ascend/ascend_memory_manager.cc b/mindspore/ccsrc/device/ascend/ascend_memory_manager.cc index f033d81d82..42830f54fa 100644 --- a/mindspore/ccsrc/device/ascend/ascend_memory_manager.cc +++ b/mindspore/ccsrc/device/ascend/ascend_memory_manager.cc @@ -15,29 +15,31 @@ */ #include "device/ascend/ascend_memory_manager.h" -#include "device/ascend/ascend_memory_allocator.h" +#include "device/ascend/ascend_memory_pool.h" #include "utils/context/ms_context.h" #include "runtime/mem.h" namespace mindspore { namespace device { namespace ascend { -static const uint64_t ASCEND_MEM_SIZE = 20; -static const uint64_t ASCEND_MEM_SIZE_BYTE = (ASCEND_MEM_SIZE << 30); +const uint64_t kAscendDeviceMemGB = 20; +const uint64_t kAscendMemPoolGB = 5; +const uint64_t kAscendDeviceMemSize = (kAscendDeviceMemGB << 30); +const uint64_t kAscendMemPoolSize = (kAscendMemPoolGB << 30); void AscendMemoryManager::MallocDeviceMemory() { - device_mem_size_ = ASCEND_MEM_SIZE_BYTE; - static_mem_offset_ = FloatToSize(device_mem_size_ * GRAPH_INIT_ASCEND_MEM_RATIO); + device_mem_size_ = kAscendDeviceMemSize; + static_mem_offset_ = device_mem_size_; auto ret = rtMalloc(reinterpret_cast(&device_mem_base_), static_mem_offset_, RT_MEMORY_HBM); if (ret != RT_ERROR_NONE) { MS_EXCEPTION(DeviceProcessError) << "rtMalloc mem size[" << static_mem_offset_ << "] fail, ret[" << ret << "]"; } - device_mem_pool_size_ = FloatToSize(device_mem_size_ * (1 - GRAPH_INIT_ASCEND_MEM_RATIO)); + device_mem_pool_size_ = kAscendMemPoolSize; ret = rtMalloc(reinterpret_cast(&device_mem_pool_base_), device_mem_pool_size_, RT_MEMORY_HBM); if (ret != RT_ERROR_NONE) { MS_EXCEPTION(DeviceProcessError) << "rtMalloc mem size[" << device_mem_pool_size_ << "] fail, ret[" << ret << "]"; } - AscendMemoryAllocator::GetInstance().set_device_mem_pool_base(device_mem_pool_base_); - AscendMemoryAllocator::GetInstance().set_device_mem_pool_size(device_mem_pool_size_); + AscendMemoryPool::GetInstance().set_device_mem_pool_base(device_mem_pool_base_); + AscendMemoryPool::GetInstance().set_device_mem_pool_size(device_mem_pool_size_); } void AscendMemoryManager::FreeDeviceMemory() { @@ -57,8 +59,8 @@ void AscendMemoryManager::FreeDeviceMemory() { } } -void *AscendMemoryManager::AllocTensorMemDynamic(size_t size) { - return AscendMemoryAllocator::GetInstance().AllocTensorMem(size); +void *AscendMemoryManager::MallocMemFromMemPool(size_t size) { + return AscendMemoryPool::GetInstance().AllocTensorMem(size); } } // namespace ascend } // namespace device diff --git a/mindspore/ccsrc/device/ascend/ascend_memory_manager.h b/mindspore/ccsrc/device/ascend/ascend_memory_manager.h index 8639fb5c72..dea88ac10a 100644 --- a/mindspore/ccsrc/device/ascend/ascend_memory_manager.h +++ b/mindspore/ccsrc/device/ascend/ascend_memory_manager.h @@ -27,7 +27,11 @@ class AscendMemoryManager : public MemoryManager { void MallocDeviceMemory() override; void FreeDeviceMemory() override; - void *AllocTensorMemDynamic(size_t size) override; + void *MallocMemFromMemPool(size_t size) override; + + private: + uint8_t *device_mem_pool_base_{nullptr}; + uint64_t device_mem_pool_size_{0}; }; } // namespace ascend } // namespace device diff --git a/mindspore/ccsrc/device/ascend/ascend_memory_allocator.cc b/mindspore/ccsrc/device/ascend/ascend_memory_pool.cc similarity index 62% rename from mindspore/ccsrc/device/ascend/ascend_memory_allocator.cc rename to mindspore/ccsrc/device/ascend/ascend_memory_pool.cc index 08a30a28b7..2c38e4290d 100644 --- a/mindspore/ccsrc/device/ascend/ascend_memory_allocator.cc +++ b/mindspore/ccsrc/device/ascend/ascend_memory_pool.cc @@ -14,24 +14,15 @@ * limitations under the License. */ -#include "device/ascend/ascend_memory_allocator.h" +#include "device/ascend/ascend_memory_pool.h" #include "device/ascend/ascend_kernel_runtime.h" #include "utils/log_adapter.h" namespace mindspore { namespace device { namespace ascend { -const uint64_t MEM_SIZE = 20; -const uint64_t MEM_SIZE_BYTE = (MEM_SIZE << 30); - -AscendMemoryAllocator::AscendMemoryAllocator() { - hasMalloc_ = false; - free_mem_size_ = FloatToSize(MEM_SIZE_BYTE * (1 - GRAPH_INIT_ASCEND_MEM_RATIO)); - total_mem_size_ = free_mem_size_; -} - -size_t AscendMemoryAllocator::AllocDeviceMem(size_t size, DeviceMemPtr* addr) { - if (hasMalloc_) { +size_t AscendMemoryPool::AllocDeviceMem(size_t size, DeviceMemPtr* addr) { + if (has_malloc_) { MS_LOG(EXCEPTION) << "Has alloc memory pool memory !"; } if (size == 0 || size > free_mem_size_) { @@ -41,35 +32,35 @@ size_t AscendMemoryAllocator::AllocDeviceMem(size_t size, DeviceMemPtr* addr) { if (*addr == nullptr) { MS_LOG(EXCEPTION) << "Device memory pool base is nullptr, failed to alloc memory pool memory!"; } - hasMalloc_ = true; + has_malloc_ = true; free_mem_size_ -= size; return size; } -bool AscendMemoryAllocator::FreeDeviceMem(const DeviceMemPtr& addr) { +bool AscendMemoryPool::FreeDeviceMem(const DeviceMemPtr& addr) { MS_EXCEPTION_IF_NULL(addr); - hasMalloc_ = false; + has_malloc_ = false; free_mem_size_ = total_mem_size_; return true; } -size_t AscendMemoryAllocator::AlignMemorySize(size_t size) const { +size_t AscendMemoryPool::AlignMemorySize(size_t size) const { if (size == 0) { return DYNAMIC_MEM_ALIGN_SIZE; } return ((size + DYNAMIC_MEM_ALIGN_SIZE + 31) / DYNAMIC_MEM_ALIGN_SIZE) * DYNAMIC_MEM_ALIGN_SIZE; } -size_t AscendMemoryAllocator::mem_alloc_unit_size() const { return free_mem_size_ - 512; } +size_t AscendMemoryPool::mem_alloc_unit_size() const { return free_mem_size_ - 512; } -void AscendMemoryAllocator::set_device_mem_pool_base(uint8_t* device_mem_pool_base) { +void AscendMemoryPool::set_device_mem_pool_base(uint8_t* device_mem_pool_base) { MS_EXCEPTION_IF_NULL(device_mem_pool_base); device_mem_pool_base_ = device_mem_pool_base; } -size_t AscendMemoryAllocator::free_mem_size() { return free_mem_size_; } +size_t AscendMemoryPool::free_mem_size() { return free_mem_size_; } -size_t AscendMemoryAllocator::total_mem_size() { return total_mem_size_; } +size_t AscendMemoryPool::total_mem_size() { return total_mem_size_; } } // namespace ascend } // namespace device } // namespace mindspore diff --git a/mindspore/ccsrc/device/ascend/ascend_memory_allocator.h b/mindspore/ccsrc/device/ascend/ascend_memory_pool.h similarity index 67% rename from mindspore/ccsrc/device/ascend/ascend_memory_allocator.h rename to mindspore/ccsrc/device/ascend/ascend_memory_pool.h index 8b0f89a9b8..c2a29725f4 100644 --- a/mindspore/ccsrc/device/ascend/ascend_memory_allocator.h +++ b/mindspore/ccsrc/device/ascend/ascend_memory_pool.h @@ -14,8 +14,8 @@ * limitations under the License. */ -#ifndef MINDSPORE_CCSRC_DEVICE_ASCEND_ASCEND_MEMORY_ALLOCATOR_H_ -#define MINDSPORE_CCSRC_DEVICE_ASCEND_ASCEND_MEMORY_ALLOCATOR_H_ +#ifndef MINDSPORE_CCSRC_DEVICE_ASCEND_ASCEND_MEMORY_POOL_H_ +#define MINDSPORE_CCSRC_DEVICE_ASCEND_ASCEND_MEMORY_POOL_H_ #include #include "pre_activate/mem_reuse/mem_dynamic_allocator.h" @@ -23,22 +23,23 @@ namespace mindspore { namespace device { namespace ascend { -// The fraction of total ascend memory used to compute the graph. -static const float GRAPH_INIT_ASCEND_MEM_RATIO = 0.8; - -class AscendMemoryAllocator : public DynamicMemPoolBestFit { +class AscendMemoryPool : public DynamicMemPoolBestFit { public: - ~AscendMemoryAllocator() override = default; + ~AscendMemoryPool() override = default; size_t AllocDeviceMem(size_t size, DeviceMemPtr* addr) override; bool FreeDeviceMem(const DeviceMemPtr& addr) override; void set_device_mem_pool_base(uint8_t* device_mem_pool_base); - void set_device_mem_pool_size(uint64_t device_mem_pool_size) { device_mem_pool_size_ = device_mem_pool_size; } + void set_device_mem_pool_size(uint64_t device_mem_pool_size) { + device_mem_pool_size_ = device_mem_pool_size; + free_mem_size_ = device_mem_pool_size_; + total_mem_size_ = free_mem_size_; + } size_t free_mem_size() override; size_t total_mem_size() override; - static AscendMemoryAllocator& GetInstance() { - static AscendMemoryAllocator instance; + static AscendMemoryPool& GetInstance() { + static AscendMemoryPool instance; return instance; } @@ -49,10 +50,10 @@ class AscendMemoryAllocator : public DynamicMemPoolBestFit { size_t mem_alloc_unit_size() const override; private: - AscendMemoryAllocator(); - AscendMemoryAllocator(const AscendMemoryAllocator&) = delete; - AscendMemoryAllocator& operator=(const AscendMemoryAllocator&) = delete; - bool hasMalloc_; + AscendMemoryPool() = default; + AscendMemoryPool(const AscendMemoryPool&) = delete; + AscendMemoryPool& operator=(const AscendMemoryPool&) = delete; + bool has_malloc_{false}; uint8_t* device_mem_pool_base_{nullptr}; uint64_t device_mem_pool_size_{0}; size_t free_mem_size_; @@ -62,4 +63,4 @@ class AscendMemoryAllocator : public DynamicMemPoolBestFit { } // namespace device } // namespace mindspore -#endif // MINDSPORE_CCSRC_DEVICE_ASCEND_ASCEND_MEMORY_ALLOCATOR_H_ +#endif // MINDSPORE_CCSRC_DEVICE_ASCEND_ASCEND_MEMORY_POOL_H_ diff --git a/mindspore/ccsrc/device/device_address.h b/mindspore/ccsrc/device/device_address.h index cb022427e3..2d43963934 100644 --- a/mindspore/ccsrc/device/device_address.h +++ b/mindspore/ccsrc/device/device_address.h @@ -70,7 +70,7 @@ class DeviceAddress { size_t ref_count_{0}; string format_{"DefaultFormat"}; TypeId type_id_{kNumberTypeFloat16}; - bool mem_dynamic_alloc_{false}; + bool from_mem_pool_{false}; friend class KernelRuntime; friend class MemoryManager; friend class mindspore::device::ascend::tasksink::TaskGenerator; diff --git a/mindspore/ccsrc/device/gpu/gpu_device_address.cc b/mindspore/ccsrc/device/gpu/gpu_device_address.cc index 36391d27db..c27a1aa65b 100644 --- a/mindspore/ccsrc/device/gpu/gpu_device_address.cc +++ b/mindspore/ccsrc/device/gpu/gpu_device_address.cc @@ -46,7 +46,7 @@ GPUDeviceAddress::~GPUDeviceAddress() { } auto ms_context = MsContext::GetInstance(); MS_EXCEPTION_IF_NULL(ms_context); - if (mem_dynamic_alloc_) { + if (from_mem_pool_) { GPUMemoryAllocator::GetInstance().FreeTensorMem(ptr_); ptr_ = nullptr; } diff --git a/mindspore/ccsrc/device/gpu/gpu_kernel_runtime.cc b/mindspore/ccsrc/device/gpu/gpu_kernel_runtime.cc index 597e188e9d..2ec1a5df29 100644 --- a/mindspore/ccsrc/device/gpu/gpu_kernel_runtime.cc +++ b/mindspore/ccsrc/device/gpu/gpu_kernel_runtime.cc @@ -227,7 +227,7 @@ void GPUKernelRuntime::AllocKernelDynamicRes(const mindspore::kernel::KernelMod MS_EXCEPTION_IF_NULL(device_address); auto device_ptr = device_address->ptr_; if (device_ptr == nullptr) { - device_ptr = mem_manager_->AllocTensorMemDynamic(output_sizes[i]); + device_ptr = mem_manager_->MallocMemFromMemPool(output_sizes[i]); MS_EXCEPTION_IF_NULL(device_ptr); device_address->ptr_ = device_ptr; } @@ -244,7 +244,7 @@ void GPUKernelRuntime::AllocKernelDynamicRes(const mindspore::kernel::KernelMod kernel_workspaces->emplace_back(nullptr); continue; } - auto device_ptr = mem_manager_->AllocTensorMemDynamic(workspace_sizes[i]); + auto device_ptr = mem_manager_->MallocMemFromMemPool(workspace_sizes[i]); MS_EXCEPTION_IF_NULL(device_ptr); kernel::AddressPtr workspace = std::make_shared(); MS_EXCEPTION_IF_NULL(workspace); @@ -292,7 +292,7 @@ void GPUKernelRuntime::AllocCommunicationOpInputDynamicRes(const mindspore::AnfN addr_size.emplace_back(device_address.get(), output_size); } - auto device_mem_ptr = mem_manager_->AllocTensorMemDynamic(total); + auto device_mem_ptr = mem_manager_->MallocMemFromMemPool(total); MS_EXCEPTION_IF_NULL(device_mem_ptr); for (const auto &iter : addr_size) { MS_EXCEPTION_IF_NULL(iter.first); @@ -328,7 +328,7 @@ void GPUKernelRuntime::AllocCommunicationOpOutputDynamicRes(const mindspore::Anf addr_size.emplace_back(device_address.get(), output_sizes[i]); } - auto device_mem_ptr = mem_manager_->AllocTensorMemDynamic(total); + auto device_mem_ptr = mem_manager_->MallocMemFromMemPool(total); MS_EXCEPTION_IF_NULL(device_mem_ptr); for (const auto &iter : addr_size) { MS_EXCEPTION_IF_NULL(iter.first); @@ -361,7 +361,7 @@ void GPUKernelRuntime::FreeKernelDynamicRes(const mindspore::AnfNodePtr &kernel, auto device_address = AnfAlgo::GetPrevNodeMutableOutputAddr(kernel, i); MS_EXCEPTION_IF_NULL(device_address); MS_EXCEPTION_IF_NULL(device_address->ptr_); - mem_manager_->FreeTensorMemDynamic(device_address->ptr_); + mem_manager_->FreeMemFromMemPool(device_address->ptr_); device_address->ptr_ = nullptr; } } @@ -372,7 +372,7 @@ void GPUKernelRuntime::FreeKernelDynamicRes(const mindspore::AnfNodePtr &kernel, auto workspace = kernel_workspaces[i]; if (workspace != nullptr) { MS_EXCEPTION_IF_NULL(workspace->addr); - mem_manager_->FreeTensorMemDynamic(workspace->addr); + mem_manager_->FreeMemFromMemPool(workspace->addr); workspace->addr = nullptr; } } @@ -389,7 +389,7 @@ void GPUKernelRuntime::FreeCommunicationOpDynamicRes(const mindspore::AnfNodePtr auto device_address = AnfAlgo::GetPrevNodeMutableOutputAddr(kernel, 0); MS_EXCEPTION_IF_NULL(device_address); MS_EXCEPTION_IF_NULL(device_address->ptr_); - mem_manager_->FreeTensorMemDynamic(device_address->ptr_); + mem_manager_->FreeMemFromMemPool(device_address->ptr_); device_address->ptr_ = nullptr; } *is_communication_op = true; @@ -411,7 +411,7 @@ void GPUKernelRuntime::FreeCommunicationOpDynamicRes(const mindspore::AnfNodePtr auto device_address = AnfAlgo::GetMutableOutputAddr(kernel_input.first, 0); MS_EXCEPTION_IF_NULL(device_address); MS_EXCEPTION_IF_NULL(device_address->ptr_); - mem_manager_->FreeTensorMemDynamic(device_address->ptr_); + mem_manager_->FreeMemFromMemPool(device_address->ptr_); device_address->ptr_ = nullptr; } *is_communication_op = true; diff --git a/mindspore/ccsrc/device/gpu/gpu_memory_manager.cc b/mindspore/ccsrc/device/gpu/gpu_memory_manager.cc index 3944b504e4..7d042264b6 100644 --- a/mindspore/ccsrc/device/gpu/gpu_memory_manager.cc +++ b/mindspore/ccsrc/device/gpu/gpu_memory_manager.cc @@ -21,11 +21,11 @@ namespace mindspore { namespace device { namespace gpu { -void *GPUMemoryManager::AllocTensorMemDynamic(size_t size) { +void *GPUMemoryManager::MallocMemFromMemPool(size_t size) { return GPUMemoryAllocator::GetInstance().AllocTensorMem(size); } -void GPUMemoryManager::FreeTensorMemDynamic(void *device_ptr) { +void GPUMemoryManager::FreeMemFromMemPool(void *device_ptr) { GPUMemoryAllocator::GetInstance().FreeTensorMem(device_ptr); } @@ -34,7 +34,7 @@ void GPUMemoryManager::MallocDeviceMemory() { MS_EXCEPTION_IF_NULL(context_ptr); // If use the dynamic memory pool, then alloc the first memory block to init. if (context_ptr->enable_dynamic_mem_pool()) { - auto device_addr = AllocTensorMemDynamic(1); + auto device_addr = MallocMemFromMemPool(1); if (!device_addr) { MS_LOG(ERROR) << "Dynamic memory pool init error."; } @@ -62,7 +62,7 @@ uint8_t *GPUMemoryManager::MallocStaticMem(size_t size, bool) { auto context_ptr = MsContext::GetInstance(); MS_EXCEPTION_IF_NULL(context_ptr); if (context_ptr->enable_dynamic_mem_pool()) { - auto device_ptr = AllocTensorMemDynamic(size); + auto device_ptr = MallocMemFromMemPool(size); MS_EXCEPTION_IF_NULL(device_ptr); return AddressOffset(device_ptr, 0); } diff --git a/mindspore/ccsrc/device/gpu/gpu_memory_manager.h b/mindspore/ccsrc/device/gpu/gpu_memory_manager.h index a18226bdf3..cc5dac2a5e 100644 --- a/mindspore/ccsrc/device/gpu/gpu_memory_manager.h +++ b/mindspore/ccsrc/device/gpu/gpu_memory_manager.h @@ -28,11 +28,11 @@ class GPUMemoryManager : public MemoryManager { void MallocDeviceMemory() override; void FreeDeviceMemory() override; - void *AllocTensorMemDynamic(size_t size) override; - void FreeTensorMemDynamic(void *device_ptr) override; + void *MallocMemFromMemPool(size_t size) override; + void FreeMemFromMemPool(void *device_ptr) override; protected: - uint8_t *MallocStaticMem(size_t size, bool communication_mem); + uint8_t *MallocStaticMem(size_t size, bool communication_mem) override; }; } // namespace gpu } // namespace device diff --git a/mindspore/ccsrc/device/kernel_runtime.cc b/mindspore/ccsrc/device/kernel_runtime.cc index 16025ed8a4..eebc650347 100644 --- a/mindspore/ccsrc/device/kernel_runtime.cc +++ b/mindspore/ccsrc/device/kernel_runtime.cc @@ -169,7 +169,7 @@ void KernelRuntime::RunOpAssignInputMemory(const std::vector auto device_address = CreateDeviceAddress(nullptr, tensor_size, AnfAlgo::GetOutputFormat(item, index), output_type_id); MS_EXCEPTION_IF_NULL(device_address); - mem_manager_->MallocOpMemory(device_address, tensor_size); + mem_manager_->MallocMemFromMemPool(device_address, tensor_size); AnfAlgo::SetOutputAddr(device_address, index, item.get()); } } @@ -198,7 +198,7 @@ void KernelRuntime::RunOpAssignOutputMemory(const AnfNodePtr &kernel) { auto output_type = AnfAlgo::GetOutputDeviceDataType(kernel, i); auto device_address = CreateDeviceAddress(nullptr, output_sizes[i], output_format, output_type); MS_EXCEPTION_IF_NULL(device_address); - mem_manager_->MallocOpMemory(device_address, output_sizes[i]); + mem_manager_->MallocMemFromMemPool(device_address, output_sizes[i]); AnfAlgo::SetOutputAddr(device_address, i, kernel.get()); } } @@ -213,7 +213,7 @@ void KernelRuntime::RunOpAssignWorkSpaceMemory(const AnfNodePtr &kernel) { for (size_t i = 0; i < workspace_lists.size(); ++i) { auto device_address = CreateDeviceAddress(nullptr, workspace_lists[i], "", kTypeUnknown); MS_EXCEPTION_IF_NULL(device_address); - mem_manager_->MallocOpMemory(device_address, workspace_lists[i]); + mem_manager_->MallocMemFromMemPool(device_address, workspace_lists[i]); AnfAlgo::SetWorkspaceAddr(device_address, i, kernel.get()); } } @@ -457,7 +457,7 @@ void KernelRuntime::AssignDynamicMemory(session::KernelGraph *graph) { bool is_enable_mem_reuse = context_ptr->enable_mem_reuse(); auto mem_flag = kDynamicMem; if (is_enable_mem_reuse) { - mem_manager_->InitReuseDynamicMemory(graph); + mem_manager_->MallocReusedDynamicMem(graph); mem_flag = kReuseDynamicMem; } auto &kernels = graph->execution_order(); diff --git a/mindspore/ccsrc/device/kernel_runtime.h b/mindspore/ccsrc/device/kernel_runtime.h index 1224bf14eb..61b43fd5c0 100644 --- a/mindspore/ccsrc/device/kernel_runtime.h +++ b/mindspore/ccsrc/device/kernel_runtime.h @@ -33,7 +33,6 @@ #include "utils/context/ms_context.h" #include "device/memory_manager.h" -// using mindspore::session::KernelGraph; using mindspore::tensor::Tensor; using TensorPtr = std::shared_ptr; using mindspore::kernel::AddressPtr; diff --git a/mindspore/ccsrc/device/memory_manager.cc b/mindspore/ccsrc/device/memory_manager.cc index 3c1ddee6bc..6977628eb1 100644 --- a/mindspore/ccsrc/device/memory_manager.cc +++ b/mindspore/ccsrc/device/memory_manager.cc @@ -21,12 +21,6 @@ using mindspore::memreuse::BestFitMemReuse; using mindspore::memreuse::MemReuseUtilPtr; namespace mindspore { namespace device { -MemoryManager::~MemoryManager() { - device_mem_base_ = nullptr; - device_mem_pool_base_ = nullptr; - mem_reuse_util_ptr_ = nullptr; -} - size_t MemoryManager::GetCommonAlignSize(size_t input_size) const { return (input_size + kMemAlignSize + 31) / kMemAlignSize * kMemAlignSize; } @@ -35,7 +29,7 @@ size_t MemoryManager::GetCommunicationAlignSize(size_t input_size) const { return (input_size + kMemAlignSize - 1) / kMemAlignSize * kMemAlignSize + 2 * kMemAlignSize; } -void MemoryManager::InitReuseDynamicMemory(session::KernelGraph *graph) { +void MemoryManager::MallocReusedDynamicMem(session::KernelGraph *graph) { MS_EXCEPTION_IF_NULL(graph); MemReuseUtilPtr mem_reuse_util_ptr = std::make_shared(); MS_EXCEPTION_IF_NULL(mem_reuse_util_ptr); @@ -147,23 +141,23 @@ uint8_t *MemoryManager::MallocDynamicMem(size_t size, bool communication_mem) { } } -void MemoryManager::MallocOpMemory(const DeviceAddressPtr address, size_t size) { - auto device_ptr = AllocTensorMemDynamic(size); +void MemoryManager::MallocMemFromMemPool(const DeviceAddressPtr address, size_t size) { + auto device_ptr = MallocMemFromMemPool(size); MS_EXCEPTION_IF_NULL(device_ptr); address->ptr_ = device_ptr; - address->mem_dynamic_alloc_ = true; + address->from_mem_pool_ = true; } -void *MemoryManager::AllocTensorMemDynamic(size_t size) { +void *MemoryManager::MallocMemFromMemPool(size_t size) { if (size == 0) { - MS_LOG(ERROR) << "AllocTensorMemDynamic size is 0."; + MS_LOG(ERROR) << "MallocMemFromMemPool size is 0."; } return nullptr; } -void MemoryManager::FreeTensorMemDynamic(void *device_ptr) { +void MemoryManager::FreeMemFromMemPool(void *device_ptr) { if (device_ptr == nullptr) { - MS_LOG(ERROR) << "FreeTensorMemDynamic device_ptr is null."; + MS_LOG(ERROR) << "FreeMemFromMemPool device_ptr is null."; } } } // namespace device diff --git a/mindspore/ccsrc/device/memory_manager.h b/mindspore/ccsrc/device/memory_manager.h index 2e47237def..82c22f4548 100644 --- a/mindspore/ccsrc/device/memory_manager.h +++ b/mindspore/ccsrc/device/memory_manager.h @@ -31,7 +31,7 @@ using MemReuseUtilPtr = mindspore::memreuse::MemReuseUtilPtr; class MemoryManager { public: MemoryManager() = default; - virtual ~MemoryManager(); + virtual ~MemoryManager() = default; virtual void MallocDeviceMemory() = 0; virtual void FreeDeviceMemory() = 0; @@ -40,16 +40,15 @@ class MemoryManager { dynamic_mem_offset_ = 0; } - void InitReuseDynamicMemory(session::KernelGraph *graph); + void MallocReusedDynamicMem(session::KernelGraph *graph); uint8_t *MallocOutputMem(const AnfNodePtr &node, size_t index, int flag, size_t size); uint8_t *MallocWorkSpaceMem(const AnfNodePtr &node, size_t index, int flag, size_t size); virtual uint8_t *MallocMem(int flag, size_t size); - // Alloc memory use the dynamic memory pool. - virtual void *AllocTensorMemDynamic(size_t size); - // Free memory use the dynamic memory pool. - virtual void FreeTensorMemDynamic(void *device_ptr); - virtual void MallocOpMemory(const DeviceAddressPtr address, size_t size); + virtual void MallocMemFromMemPool(const DeviceAddressPtr address, size_t size); + virtual void *MallocMemFromMemPool(size_t size); + virtual void FreeMemFromMemPool(void *device_ptr); + size_t GetCommonAlignSize(size_t input_size) const; size_t GetCommunicationAlignSize(size_t input_size) const; @@ -57,9 +56,7 @@ class MemoryManager { virtual uint8_t *MallocStaticMem(size_t size, bool communication_mem); virtual uint8_t *MallocDynamicMem(size_t size, bool communication_mem); uint8_t *device_mem_base_{nullptr}; - uint8_t *device_mem_pool_base_{nullptr}; uint64_t device_mem_size_{0}; - uint64_t device_mem_pool_size_{0}; uint64_t dynamic_mem_offset_{0}; uint64_t static_mem_offset_{0}; size_t total_static_size_ = 0; diff --git a/tests/ut/cpp/CMakeLists.txt b/tests/ut/cpp/CMakeLists.txt index 3c1351a857..f5bc07ff69 100644 --- a/tests/ut/cpp/CMakeLists.txt +++ b/tests/ut/cpp/CMakeLists.txt @@ -95,7 +95,7 @@ file(GLOB_RECURSE MINDSPORE_SRC_LIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} "../../../mindspore/ccsrc/device/ascend/ascend_kernel_runtime.cc" "../../../mindspore/ccsrc/device/ascend/ascend_memory_manager.cc" "../../../mindspore/ccsrc/device/ascend/ascend_device_address.cc" - "../../../mindspore/ccsrc/device/ascend/ascend_memory_allocator.cc" + "../../../mindspore/ccsrc/device/ascend/ascend_memory_pool.cc" "../../../mindspore/ccsrc/predict/generator/utils/ir_model_util.cc" "../../../mindspore/ccsrc/predict/predict.cc" "../../../mindspore/ccsrc/predict/converter/*.cc" From 16296da5c786b5adb73a11c7d42825d91b49ccca Mon Sep 17 00:00:00 2001 From: zjun Date: Tue, 7 Apr 2020 21:52:28 +0800 Subject: [PATCH 143/367] add aicpu opinfo register --- .../ccsrc/kernel/aicpu/aicpu_kernel_build.cc | 116 +++++---- .../kernel/aicpu/aicpu_kernel_metadata.cc | 78 ++---- mindspore/ccsrc/kernel/aicpu/aicpu_util.h | 3 +- mindspore/ccsrc/kernel/common_utils.cc | 9 + mindspore/ccsrc/kernel/oplib/opinfo.h | 2 +- mindspore/ccsrc/kernel/oplib/oplib.cc | 21 +- mindspore/ops/__init__.py | 4 +- mindspore/ops/_op_impl/__init__.py | 1 + mindspore/ops/_op_impl/aicpu/__init__.py | 19 ++ .../ops/_op_impl/aicpu/dropout_genmask.py | 32 +++ mindspore/ops/_op_impl/aicpu/get_next.py | 39 +++ .../ops/_op_impl/aicpu/init_data_set_queue.py | 27 +++ mindspore/ops/_op_impl/aicpu/print_tensor.py | 39 +++ mindspore/ops/op_info_register.py | 225 +++++++++++------- 14 files changed, 409 insertions(+), 206 deletions(-) create mode 100644 mindspore/ops/_op_impl/aicpu/__init__.py create mode 100644 mindspore/ops/_op_impl/aicpu/dropout_genmask.py create mode 100644 mindspore/ops/_op_impl/aicpu/get_next.py create mode 100644 mindspore/ops/_op_impl/aicpu/init_data_set_queue.py create mode 100644 mindspore/ops/_op_impl/aicpu/print_tensor.py diff --git a/mindspore/ccsrc/kernel/aicpu/aicpu_kernel_build.cc b/mindspore/ccsrc/kernel/aicpu/aicpu_kernel_build.cc index c89e27c8ce..cf23779415 100644 --- a/mindspore/ccsrc/kernel/aicpu/aicpu_kernel_build.cc +++ b/mindspore/ccsrc/kernel/aicpu/aicpu_kernel_build.cc @@ -39,45 +39,7 @@ namespace mindspore { namespace kernel { using FNodeAttrHandle = std::function &anf_node, mindspore::NodeDef *proto)>; -const std::vector local_framework_op_vec = {kInitDataSetQueue, kGetNext, kDropoutGenMask, kPrint}; - -void InitDataSetQueueAttr(const std::shared_ptr &anf_node, mindspore::NodeDef *proto) { - MS_EXCEPTION_IF_NULL(anf_node); - MS_EXCEPTION_IF_NULL(proto); - - ::google::protobuf::Map<::std::string, ::mindspore::AttrValue> *node_attr = proto->mutable_attrs(); - MS_EXCEPTION_IF_NULL(node_attr); - std::string channel_name = AnfAlgo::GetNodeAttr(anf_node, kQueueName); - (*node_attr)[kChannelName].set_s(channel_name); -} - -void GetNextAttr(const std::shared_ptr &anf_node, mindspore::NodeDef *proto) { - MS_EXCEPTION_IF_NULL(anf_node); - MS_EXCEPTION_IF_NULL(proto); - - ::google::protobuf::Map<::std::string, ::mindspore::AttrValue> *node_attr = proto->mutable_attrs(); - MS_EXCEPTION_IF_NULL(node_attr); - std::string shared_name = AnfAlgo::GetNodeAttr(anf_node, kSharedName); - (*node_attr)[kChannelName].set_s(shared_name); -} - -void DropoutGenMaskAttr(const std::shared_ptr &anf_node, mindspore::NodeDef *proto) { - MS_EXCEPTION_IF_NULL(anf_node); - MS_EXCEPTION_IF_NULL(proto); - - ::google::protobuf::Map<::std::string, ::mindspore::AttrValue> *node_attr = proto->mutable_attrs(); - MS_EXCEPTION_IF_NULL(node_attr); - int seed = AnfAlgo::GetNodeAttr(anf_node, kSeed); - int seed2 = AnfAlgo::GetNodeAttr(anf_node, kSeed2); - (*node_attr)["seed"].set_i(seed); - (*node_attr)["seed2"].set_i(seed2); -} - -void CreateAttrFuncMap(std::map *mOpAttrFuncMap) { - (void)mOpAttrFuncMap->emplace(std::pair(kInitDataSetQueue, InitDataSetQueueAttr)); - (void)mOpAttrFuncMap->emplace(std::pair(kGetNext, GetNextAttr)); - (void)mOpAttrFuncMap->emplace(std::pair(kDropoutGenMask, DropoutGenMaskAttr)); -} +const std::vector local_framework_op_vec = {kInitData, kGetNext, kDropoutGenMask, kPrint}; bool SetIOIputSize(const std::shared_ptr &anf_node, const size_t &input_num, std::vector *input_size_list) { @@ -147,24 +109,74 @@ bool SetIOSize(const std::shared_ptr &anf_node, const std::shared_ptr *node_attr) { + MS_EXCEPTION_IF_NULL(node_attr); + if (type == "int") { + auto attr_value = GetValue(value); + (*node_attr)[attr_name].set_i(attr_value); + } else if (type == "str") { + auto attr_value = GetValue(value); + (*node_attr)[attr_name].set_s(attr_value); + } else if (type == "bool") { + auto attr_value = GetValue(value); + (*node_attr)[attr_name].set_b(attr_value); + } else if (type == "float") { + auto attr_value = GetValue(value); + (*node_attr)[attr_name].set_f(attr_value); + } else if (type == "listInt") { + std::vector attr_value; + auto value_type = value->type(); + MS_EXCEPTION_IF_NULL(value_type); + auto value_type_str = value_type->ToString(); + if (value_type_str == "Int32") { + int data = GetValue(value); + attr_value.push_back(data); + } else { + attr_value = GetValue>(value); + } + mindspore::AttrValue input_shape_attr; + mindspore::AttrValue_ArrayValue *input_shape_attr_list = input_shape_attr.mutable_array(); + MS_EXCEPTION_IF_NULL(input_shape_attr_list); + for (const auto shape : attr_value) { + input_shape_attr_list->add_i(shape); + } + (*node_attr)[attr_name] = input_shape_attr; + } else { + MS_LOG(EXCEPTION) << "type: " << type << "not support"; + } +} + void SetNodeAttr(const std::shared_ptr &anf_node, mindspore::NodeDef *proto) { std::string op_name = AnfAlgo::GetCNodeName(anf_node); - if (op_name == "InitDataSetQueue") { - op_name = "InitData"; + if (op_name == kInitDataSetQueue) { + op_name = kInitData; } - if (op_name == "Print") { + if (op_name == kPrint) { return; } - std::map mOpAttrFuncMap; - CreateAttrFuncMap(&mOpAttrFuncMap); - FNodeAttrHandle func_ptr = nullptr; - auto iter = mOpAttrFuncMap.find(op_name); - if (iter != mOpAttrFuncMap.end()) { - func_ptr = iter->second; - MS_EXCEPTION_IF_NULL(func_ptr); - func_ptr(anf_node, proto); - } else { - MS_LOG(ERROR) << "Don't support node [" << op_name << "] to set nodedef of attr"; + + auto op_info_ptr = mindspore::kernel::OpLib::FindOp(op_name, OpImplyType::kAICPU); + MS_EXCEPTION_IF_NULL(op_info_ptr); + auto attrs_ptr = op_info_ptr->attrs_ptr(); + auto primitive = AnfAlgo::GetCNodePrimitive(anf_node); + MS_EXCEPTION_IF_NULL(primitive); + ::google::protobuf::Map<::std::string, ::mindspore::AttrValue> *node_attr = proto->mutable_attrs(); + for (const auto &attr_ptr : attrs_ptr) { + std::string attr_name = attr_ptr->name(); + std::string real_name; + auto value = primitive->GetAttr(attr_name); + if (value != nullptr) { + if (attr_name == kQueueName || attr_name == kSharedName) { + real_name = kChannelName; + } else if (attr_name == kSeed) { + real_name = "seed"; + } else if (attr_name == kSeed2) { + real_name = "seed2"; + } + std::string type = attr_ptr->type(); + ParseAttrValue(type, real_name, value, node_attr); + } } MS_LOG(INFO) << "Set node attr end!"; } diff --git a/mindspore/ccsrc/kernel/aicpu/aicpu_kernel_metadata.cc b/mindspore/ccsrc/kernel/aicpu/aicpu_kernel_metadata.cc index ac0b0d9f7a..6675051069 100644 --- a/mindspore/ccsrc/kernel/aicpu/aicpu_kernel_metadata.cc +++ b/mindspore/ccsrc/kernel/aicpu/aicpu_kernel_metadata.cc @@ -17,68 +17,27 @@ #include "kernel/aicpu/aicpu_kernel_metadata.h" #include #include +#include "kernel/oplib/oplib.h" +#include "kernel/common_utils.h" +#include "kernel/aicpu/aicpu_util.h" #include "session/anf_runtime_algorithm.h" namespace mindspore { namespace kernel { -constexpr auto kInitDataSetQueueOpName = "InitDataSetQueue"; -constexpr auto kGetNext = "GetNext"; -constexpr auto kDropoutGenMask = "DropoutGenMask"; -constexpr auto kPrint = "Print"; -const std::vector AICPU_OPS = {kInitDataSetQueueOpName, kGetNext, kDropoutGenMask, kPrint}; - -std::shared_ptr CreateKernelInfo(const std::vector &inputs_format, - const std::vector &inputs_device_type, - const std::vector &outputs_format, - const std::vector &outputs_device_type) { - auto builder = KernelBuildInfo::KernelBuildInfoBuilder(); - builder.SetInputsFormat(inputs_format); - builder.SetInputsDeviceType(inputs_device_type); - builder.SetOutputsFormat(outputs_format); - builder.SetOutputsDeviceType(outputs_device_type); - builder.SetProcessor(AICPU); - builder.SetKernelType(AICPU_KERNEL); - builder.SetFusionType(OPAQUE); - return builder.Build(); -} - -bool CheckIfExistAicpuMeta(const std::string &op_name) { - if (std::find(AICPU_OPS.begin(), AICPU_OPS.end(), op_name) != AICPU_OPS.end()) { - return false; - } - return true; -} - void AicpuMetadataInfo(const CNodePtr &kernel_node, std::vector> *kernel_info_list) { MS_LOG(INFO) << "AicpuMetadataInfo."; MS_EXCEPTION_IF_NULL(kernel_node); MS_EXCEPTION_IF_NULL(kernel_info_list); std::string op_name = AnfAlgo::GetCNodeName(kernel_node); - if (CheckIfExistAicpuMeta(op_name)) { - MS_LOG(DEBUG) << "Aicpu doesn't have metadata of op [" << op_name << "]."; - return; - } - - if (op_name == kInitDataSetQueueOpName) { - kernel_info_list->push_back(CreateKernelInfo({}, {}, {}, {})); + if (op_name == kInitDataSetQueue) { + op_name = kInitData; } - - if (op_name == kGetNext) { - std::vector outputs_format; - std::vector outputs_type; - for (size_t output_index = 0; output_index < AnfAlgo::GetOutputTensorNum(kernel_node); ++output_index) { - outputs_format.emplace_back(kOpFormat_DEFAULT); - outputs_type.push_back(AnfAlgo::GetOutputInferDataType(kernel_node, output_index)); - } - kernel_info_list->push_back(CreateKernelInfo({}, {}, outputs_format, outputs_type)); - } - - if (op_name == kDropoutGenMask) { - kernel_info_list->push_back(CreateKernelInfo({kOpFormat_NCHW, kOpFormat_NCHW}, - {kInt32->type_id(), kFloat16->type_id()}, {kOpFormat_NCHW}, - {kUInt8->type_id()})); + auto op_info_ptr = mindspore::kernel::OpLib::FindOp(op_name, OpImplyType::kAICPU); + if (op_info_ptr == nullptr) { + MS_LOG(WARNING) << "Aicpu doestn't have metadata of op [" << op_name << "]"; + return; } - + // For compatibility with the current framework if (op_name == kPrint) { std::vector inputs_format; std::vector inputs_type; @@ -92,11 +51,20 @@ void AicpuMetadataInfo(const CNodePtr &kernel_node, std::vectorpush_back(CreateKernelInfo(inputs_format, inputs_type, outputs_format, outputs_type)); + auto builder = KernelBuildInfo::KernelBuildInfoBuilder(); + builder.SetInputsFormat(inputs_format); + builder.SetInputsDeviceType(inputs_type); + builder.SetOutputsFormat(outputs_format); + builder.SetOutputsDeviceType(outputs_type); + builder.SetProcessor(AICPU); + builder.SetKernelType(AICPU_KERNEL); + builder.SetFusionType(OPAQUE); + kernel_info_list->push_back(builder.Build()); + return; } - - if (kernel_info_list->empty()) { - MS_LOG(INFO) << "Aicpu dose not has metadata of op[ " << op_name << "]."; + if (!ParseMetadata(kernel_node, op_info_ptr, AICPU, kernel_info_list)) { + MS_LOG(WARNING) << "Aicpu parsed metadata op [" << op_name << "] failed"; + return; } } } // namespace kernel diff --git a/mindspore/ccsrc/kernel/aicpu/aicpu_util.h b/mindspore/ccsrc/kernel/aicpu/aicpu_util.h index f521418f6b..08fca16a3b 100644 --- a/mindspore/ccsrc/kernel/aicpu/aicpu_util.h +++ b/mindspore/ccsrc/kernel/aicpu/aicpu_util.h @@ -24,7 +24,8 @@ namespace mindspore { namespace kernel { -constexpr auto kInitDataSetQueue = "InitData"; +constexpr auto kInitDataSetQueue = "InitDataSetQueue"; +constexpr auto kInitData = "InitData"; constexpr auto kGetNext = "GetNext"; constexpr auto kDropoutGenMask = "DropoutGenMask"; constexpr auto kPrint = "Print"; diff --git a/mindspore/ccsrc/kernel/common_utils.cc b/mindspore/ccsrc/kernel/common_utils.cc index c2f2638753..137ae65414 100644 --- a/mindspore/ccsrc/kernel/common_utils.cc +++ b/mindspore/ccsrc/kernel/common_utils.cc @@ -417,6 +417,8 @@ void SetKernelBuildInfo(const std::shared_ptrSetKernelType(AUTO_DIFF_KERNEL); + } else if (imply_type == kAICPU) { + builder->SetKernelType(AICPU_KERNEL); } else { builder->SetKernelType(TBE_KERNEL); } @@ -471,6 +473,13 @@ bool ParseMetadata(const CNodePtr &kernel_node, const std::shared_ptrpush_back(builder->Build()); + } + } else { + if (processor == AICPU) { + auto builder = std::make_shared(); + MS_EXCEPTION_IF_NULL(builder); + SetKernelBuildInfo(builder, processor, op_info_ptr); kernel_info_list->push_back(builder->Build()); } } diff --git a/mindspore/ccsrc/kernel/oplib/opinfo.h b/mindspore/ccsrc/kernel/oplib/opinfo.h index 56abea9269..215df21776 100644 --- a/mindspore/ccsrc/kernel/oplib/opinfo.h +++ b/mindspore/ccsrc/kernel/oplib/opinfo.h @@ -24,7 +24,7 @@ namespace mindspore { namespace kernel { -enum OpImplyType { kAKG = 0, kTBE }; +enum OpImplyType { kAKG = 0, kTBE = 1, kAICPU }; enum OpIOType { kInput = 0, kOutput }; class OpAttr { diff --git a/mindspore/ccsrc/kernel/oplib/oplib.cc b/mindspore/ccsrc/kernel/oplib/oplib.cc index 4059b8e246..d2464bce47 100644 --- a/mindspore/ccsrc/kernel/oplib/oplib.cc +++ b/mindspore/ccsrc/kernel/oplib/oplib.cc @@ -39,6 +39,7 @@ constexpr auto kDtypeFormat = "dtype_format"; constexpr auto kAttr = "attr"; constexpr auto kIputs = "inputs"; constexpr auto kOutputs = "outputs"; +constexpr auto kAiCPU = "AiCPU"; constexpr auto kTbe = "TBE"; constexpr auto kAkg = "akg"; constexpr auto kAutodiff = "AutoDiff"; @@ -60,6 +61,8 @@ std::string ImplTypeToStr(OpImplyType impl_type) { return kTbe; case kAKG: return kAkg; + case kAICPU: + return kAiCPU; default: return "unknow"; } @@ -76,6 +79,9 @@ bool OpLib::RegOp(const std::string& json_string, const std::string& impl_path) } else if (imply_type_string == kAutodiff) { OpImplyType imply_type = kAKG; ret = DecodeOpInfo(op_json, imply_type, impl_path); + } else if (imply_type_string == kAiCPU) { + OpImplyType imply_type = kAICPU; + ret = DecodeOpInfo(op_json, imply_type, impl_path); } else { MS_LOG(DEBUG) << "Not support imply_type"; } @@ -154,7 +160,9 @@ bool OpLib::DecodeAttr(const nlohmann::json& obj, const OpImplyType imply_type, std::shared_ptr op_attr = std::make_shared(); MS_EXCEPTION_IF_NULL(op_attr); op_attr->set_name(obj.at(kName)); - op_attr->set_param_type(obj.at(kParamType)); + if (imply_type != kAICPU) { + op_attr->set_param_type(obj.at(kParamType)); + } op_attr->set_type(obj.at(kType)); if (imply_type == kTBE) { op_attr->set_value(obj.at(kValue)); @@ -242,9 +250,10 @@ std::shared_ptr OpLib::FindOp(const std::string& op_name, OpImplyType im auto context = MsContext::GetInstance(); MS_EXCEPTION_IF_NULL(context); bool is_gpu = (context->device_target() == kGPUDevice); - if ((is_gpu && imply_type == kTBE) || (!is_gpu && imply_type != kTBE)) { - MS_LOG(DEBUG) << "FindOp failed: opname:" << op_name << "imply_type:" << ImplTypeToStr(imply_type) - << "current op num:" << op_info_.size(); + if ((is_gpu && (imply_type == kTBE || imply_type == kAICPU)) || + (!is_gpu && (imply_type != kTBE && imply_type != kAICPU))) { + MS_LOG(ERROR) << "FindOp failed: opname:" << op_name << ", imply_type:" << ImplTypeToStr(imply_type) + << ", current op num:" << op_info_.size(); return nullptr; } for (const auto& op_info : op_info_) { @@ -253,8 +262,8 @@ std::shared_ptr OpLib::FindOp(const std::string& op_name, OpImplyType im return op_info; } } - MS_LOG(DEBUG) << "FindOp failed: opname:" << op_name << "imply_type:" << ImplTypeToStr(imply_type) - << "current op num:" << op_info_.size(); + MS_LOG(DEBUG) << "FindOp failed: opname:" << op_name << ", imply_type:" << ImplTypeToStr(imply_type) + << ", current op num:" << op_info_.size(); return nullptr; } diff --git a/mindspore/ops/__init__.py b/mindspore/ops/__init__.py index 6f4f680672..0e6c114566 100644 --- a/mindspore/ops/__init__.py +++ b/mindspore/ops/__init__.py @@ -30,7 +30,7 @@ Note: from .primitive import Primitive, PrimitiveWithInfer, prim_attr_register from .vm_impl_registry import get_vm_impl_fn, vm_impl_registry -from .op_info_register import op_info_register, TBERegOp, DataType +from .op_info_register import op_info_register, AiCPURegOp, TBERegOp, DataType from .primitive import constexpr from .._c_expression import signature_rw, signature_kind @@ -40,6 +40,6 @@ __primitive__ = [ ] __all__ = ["get_vm_impl_fn", "vm_impl_registry", - "op_info_register", "TBERegOp", "DataType", + "op_info_register", "AiCPURegOp", "TBERegOp", "DataType", "constexpr"] __all__.extend(__primitive__) diff --git a/mindspore/ops/_op_impl/__init__.py b/mindspore/ops/_op_impl/__init__.py index b8370cc64e..76444881cc 100644 --- a/mindspore/ops/_op_impl/__init__.py +++ b/mindspore/ops/_op_impl/__init__.py @@ -16,5 +16,6 @@ from .akg.gpu import * from .tbe import * +from .aicpu import * __all__ = [] diff --git a/mindspore/ops/_op_impl/aicpu/__init__.py b/mindspore/ops/_op_impl/aicpu/__init__.py new file mode 100644 index 0000000000..b0f90a629b --- /dev/null +++ b/mindspore/ops/_op_impl/aicpu/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""aicpu ops""" +from .init_data_set_queue import _init_data_set_queue_aicpu +from .dropout_genmask import _dropout_genmask_aicpu +from .get_next import _get_next_aicpu +from .print_tensor import _print_aicpu diff --git a/mindspore/ops/_op_impl/aicpu/dropout_genmask.py b/mindspore/ops/_op_impl/aicpu/dropout_genmask.py new file mode 100644 index 0000000000..96707a5010 --- /dev/null +++ b/mindspore/ops/_op_impl/aicpu/dropout_genmask.py @@ -0,0 +1,32 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""InitDataSetQueue op""" +from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType + +dropout_genmask_op_info = AiCPURegOp("DropoutGenMask") \ + .fusion_type("OPAQUE") \ + .input(0, "x1", "required") \ + .input(1, "x2", "required") \ + .output(0, "y", "required") \ + .attr("Seed0", "int") \ + .attr("Seed1", "int") \ + .dtype_format(DataType.I32_NCHW, DataType.F16_NCHW, DataType.U8_NCHW) \ + .get_op_info() + +@op_info_register(dropout_genmask_op_info) +def _dropout_genmask_aicpu(): + """Dropout AiCPU register""" + return diff --git a/mindspore/ops/_op_impl/aicpu/get_next.py b/mindspore/ops/_op_impl/aicpu/get_next.py new file mode 100644 index 0000000000..ce32014211 --- /dev/null +++ b/mindspore/ops/_op_impl/aicpu/get_next.py @@ -0,0 +1,39 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""InitDataSetQueue op""" +from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType + +get_next_op_info = AiCPURegOp("GetNext") \ + .fusion_type("OPAQUE") \ + .output(0, "y", "dynamic") \ + .attr("shared_name", "str") \ + .dtype_format(DataType.BOOL_Default) \ + .dtype_format(DataType.I8_Default) \ + .dtype_format(DataType.I16_Default) \ + .dtype_format(DataType.I32_Default) \ + .dtype_format(DataType.I64_Default) \ + .dtype_format(DataType.F16_Default) \ + .dtype_format(DataType.U8_Default) \ + .dtype_format(DataType.U16_Default) \ + .dtype_format(DataType.U32_Default) \ + .dtype_format(DataType.U64_Default) \ + .dtype_format(DataType.F32_Default) \ + .get_op_info() + +@op_info_register(get_next_op_info) +def _get_next_aicpu(): + """GetNext AiCPU register""" + return diff --git a/mindspore/ops/_op_impl/aicpu/init_data_set_queue.py b/mindspore/ops/_op_impl/aicpu/init_data_set_queue.py new file mode 100644 index 0000000000..a48e01eced --- /dev/null +++ b/mindspore/ops/_op_impl/aicpu/init_data_set_queue.py @@ -0,0 +1,27 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""InitDataSetQueue op""" +from mindspore.ops.op_info_register import op_info_register, AiCPURegOp + +init_data_set_queue_op_info = AiCPURegOp("InitData") \ + .fusion_type("OPAQUE") \ + .attr("queue_name", "str") \ + .get_op_info() + +@op_info_register(init_data_set_queue_op_info) +def _init_data_set_queue_aicpu(): + """InitDataSetQueue AiCPU register""" + return diff --git a/mindspore/ops/_op_impl/aicpu/print_tensor.py b/mindspore/ops/_op_impl/aicpu/print_tensor.py new file mode 100644 index 0000000000..011f4a3d9d --- /dev/null +++ b/mindspore/ops/_op_impl/aicpu/print_tensor.py @@ -0,0 +1,39 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""InitDataSetQueue op""" +from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType + +print_op_info = AiCPURegOp("Print") \ + .fusion_type("OPAQUE") \ + .input(0, "x", "dynamic") \ + .output(0, "y", "required") \ + .dtype_format(DataType.BOOL_Default, DataType.BOOL_Default) \ + .dtype_format(DataType.I8_Default, DataType.I8_Default) \ + .dtype_format(DataType.I16_Default, DataType.I16_Default) \ + .dtype_format(DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.I64_Default, DataType.I64_Default) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.U8_Default, DataType.U8_Default) \ + .dtype_format(DataType.U16_Default, DataType.U16_Default) \ + .dtype_format(DataType.U32_Default, DataType.U32_Default) \ + .dtype_format(DataType.U64_Default, DataType.U64_Default) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .get_op_info() + +@op_info_register(print_op_info) +def _print_aicpu(): + """Print AiCPU register""" + return diff --git a/mindspore/ops/op_info_register.py b/mindspore/ops/op_info_register.py index 6a42099c89..0750094e18 100644 --- a/mindspore/ops/op_info_register.py +++ b/mindspore/ops/op_info_register.py @@ -78,14 +78,15 @@ class RegOp(): self.inputs = [] self.outputs = [] self.attr_ = [] + self.fusion_type_ = '' self.dtype_format_ = [] - def is_string(self, value): + def _is_string(self, value): """ Check if the value is a str type. Args: - value: Parameter to to check. + value: Parameter to be checked. Raises: TypeError: If the type of value is not a str. @@ -93,12 +94,12 @@ class RegOp(): if not isinstance(value, str): raise TypeError("%s value must be str" % str(value)) - def is_int(self, value): + def _is_int(self, value): """ Check if the value is a int. Args: - value: Parameter to to check. + value: Parameter to be checked. Raises: TypeError: If the type of value is not a int. @@ -106,12 +107,12 @@ class RegOp(): if not isinstance(value, int): raise TypeError("%s value must be int" % str(value)) - def is_bool(self, value): + def _is_bool(self, value): """ Check if the value is a bool. Args: - value: Parameter to to check. + value: Parameter to be checked. Raises: TypeError: If the type of value is not a bool. @@ -119,6 +120,51 @@ class RegOp(): if not isinstance(value, bool): raise TypeError("%s value must be bool" % str(value)) + def _check_param(self, param_list, key_list, fn_list, kwargs): + """ + Check if the parameter type is correct. + + Args: + param_list (list): Parameter list to be checked. + key_list (list): The keys of output dict. + fn_list (list): Function used for parameter checking. If the function list has only one element, + all parameters will use the same function. + kwargs (dict): Other parameter information. + + Raises: + TypeError: If the type of value is not list. + ValueError: If the size of param list is not equal to the size of key list, or + the size of param list is not equal to the size of funtion list. + """ + for i in [param_list, key_list, fn_list]: + if not isinstance(i, list): + raise TypeError("%s value must be list type" % str(i)) + if len(param_list) != len(key_list) or (len(fn_list) != 1 and len(param_list) != len(fn_list)): + raise ValueError("param_list size {}, key_list size {}, must be equal.And fn_list size {}.". + format(len(param_list), len(key_list), len(fn_list))) + out_dict = {} + for idx, element in enumerate(param_list): + if element is not None: + if len(fn_list) == 1: + fn_list[0](element) + else: + fn_list[idx](element) + out_dict[key_list[idx]] = element + if kwargs: + out_dict = dict(out_dict, kwargs) + return out_dict + + def fusion_type(self, fusion_type): + """ + Register fusion type. + + Args: + fusion_type (str): Value of fusion type. + """ + self._is_string(fusion_type) + self.fusion_type_ = fusion_type + return self + def dtype_format(self, *args): """ Register dtype and format. @@ -136,8 +182,8 @@ class RegOp(): for arg in args: if not isinstance(arg, tuple) or len(arg) != 2: raise ValueError("dtype and format value must be tuple of two elements") - self.is_string(arg[0]) - self.is_string(arg[1]) + self._is_string(arg[0]) + self._is_string(arg[1]) dtype_format.append(arg) self.dtype_format_.append(tuple(dtype_format)) return self @@ -159,13 +205,71 @@ class RegOp(): return op_info +class AiCPURegOp(RegOp): + """Class for AiCPU op info register""" + + def __init__(self, op_name): + super(AiCPURegOp, self).__init__(op_name) + self.imply_type = "AiCPU" + + def input(self, index=None, name=None, param_type=None, **kwargs): + """ + Register AiCPU op input information. + + Args: + index (int): Order of the input. Default: None. + name (str): Name of the input. Default: None. + param_type (str): Param type of the input. Default: None. + kwargs (dict): Other information for the input. + """ + param_list = [index, name, param_type] + key_list = ["index", "name", "param_type"] + fn_list = [self._is_int, self._is_string, self._is_string] + input_dict = self._check_param(param_list, key_list, fn_list, kwargs) + self.inputs.append(input_dict) + return self + + def output(self, index=None, name=None, param_type=None, **kwargs): + """ + Register AiCPU op output information. + + Args: + index (int): Order of the output. Default: None. + name (str): Name of the output. Default: None. + param_type (str): Param type of the output. Default: None. + kwargs (dict): Other information for the output. + """ + param_list = [index, name, param_type] + key_list = ["index", "name", "param_type"] + fn_list = [self._is_int, self._is_string, self._is_string] + output_dict = self._check_param(param_list, key_list, fn_list, kwargs) + self.outputs.append(output_dict) + return self + + def attr(self, name=None, value_type=None, value=None, **kwargs): + """ + Register AiCPU op attribute information. + + Args: + name (str): Name of the attribute. Default: None. + value_type (str): Value type of the attribute. Default: None. + value (str): Value type of the attribute. Default: None. + kwargs (dict): Other information for the attribute. + """ + param_list = [name, value_type, value] + key_list = ["name", "type", "value"] + fn_list = [self._is_string] + attr_dict = self._check_param(param_list, key_list, fn_list, kwargs) + self.attr_.append(attr_dict) + return self + + class TBERegOp(RegOp): """Class for TBE op info register.""" def __init__(self, op_name=""): super(TBERegOp, self).__init__(op_name) self.imply_type = "TBE" - self.fusion_type_ = '' self.async_flag_ = False self.binfile_name_ = '' self.compute_cost_ = 10 @@ -175,17 +279,6 @@ class TBERegOp(RegOp): self.dynamic_format_ = False self.op_pattern_ = "" - def fusion_type(self, fusion_type): - """ - Register fusion type. - - Args: - fusion_type (str): Value of fusion type. - """ - self.is_string(fusion_type) - self.fusion_type_ = fusion_type - return self - def async_flag(self, async_flag): """ Register async flag. @@ -193,7 +286,7 @@ class TBERegOp(RegOp): Args: async_flag (bool): Value of async flag. """ - self.is_bool(async_flag) + self._is_bool(async_flag) self.async_flag_ = async_flag return self @@ -204,7 +297,7 @@ class TBERegOp(RegOp): Args: binfile_name (str): Name of op binfile. """ - self.is_string(binfile_name) + self._is_string(binfile_name) self.binfile_name_ = binfile_name return self @@ -215,7 +308,7 @@ class TBERegOp(RegOp): Args: compute_cost (int): Value of compute cost. """ - self.is_int(compute_cost) + self._is_int(compute_cost) self.compute_cost_ = compute_cost return self @@ -226,7 +319,7 @@ class TBERegOp(RegOp): Args: kernel_name (str): Name of op kernel. """ - self.is_string(kernel_name) + self._is_string(kernel_name) self.kernel_name_ = kernel_name return self @@ -237,7 +330,7 @@ class TBERegOp(RegOp): Args: partial_flag (bool): Value of partial flag. """ - self.is_bool(partial_flag) + self._is_bool(partial_flag) self.partial_flag_ = partial_flag return self @@ -248,7 +341,7 @@ class TBERegOp(RegOp): Args: reshape_type (str): Value of reshape type. """ - self.is_string(reshape_type) + self._is_string(reshape_type) self.reshape_type_ = reshape_type return self @@ -259,56 +352,43 @@ class TBERegOp(RegOp): Args: reshape_type (bool): Value of dynamic format. """ - self.is_bool(dynamic_format) + self._is_bool(dynamic_format) self.dynamic_format_ = dynamic_format return self def op_pattern(self, pattern=None): """ - Register op pattern information. + Register TBE op pattern information. Args: pattern (str): Value of op pattern. """ - if pattern is not None and self.istring(pattern): + if pattern is not None and self._is_string(pattern): self.op_pattern_ = pattern return self def attr(self, name=None, param_type=None, value_type=None, value=None, default_value=None, **kwargs): """ - Register op attribute information. + Register TBE op attribute information. Args: name (str): Name of the attribute. Default: None. param_type (str): Param type of the attribute. Default: None. - type (str): Type of the attribute. Default: None. + value_type (str): Type of the attribute. Default: None. value (str): Value of the attribute. Default: None. default_value (str): Default value of attribute. Default: None. kwargs (dict): Other information for the attribute. """ param_list = [name, param_type, value_type, value, default_value] - attr_dict = {} - for index, element in enumerate(param_list): - if element is not None: - self.is_string(element) - if index == 0: - attr_dict["name"] = element - elif index == 1: - attr_dict["param_type"] = element - elif index == 2: - attr_dict["type"] = element - elif index == 3: - attr_dict["value"] = element - elif index == 4: - attr_dict["default_value"] = element - if kwargs: - attr_dict = dict(attr_dict, **kwargs) + key_list = ["name", "param_type", "type", "value", "default_value"] + fn_list = [self._is_string] + attr_dict = self._check_param(param_list, key_list, fn_list, kwargs) self.attr_.append(attr_dict) return self def input(self, index=None, name=None, need_compile=None, param_type=None, shape=None, **kwargs): """ - Register op input information. + Register TBE op input information. Args: index (int): Order of the input. Default: None. @@ -319,32 +399,15 @@ class TBERegOp(RegOp): kwargs (dict): Other information for the input. """ param_list = [index, name, need_compile, param_type, shape] - input_dict = {} - for idx, element in enumerate(param_list): - if element is not None: - if idx == 0: - self.is_int(element) - input_dict["index"] = element - elif idx == 1: - self.is_string(element) - input_dict["name"] = element - elif idx == 2: - self.is_bool(element) - input_dict["need_compile"] = element - elif idx == 3: - self.is_string(element) - input_dict["param_type"] = element - elif idx == 4: - self.is_string(element) - input_dict["shape"] = element - if kwargs: - input_dict = dict(input_dict, **kwargs) + key_list = ["index", "name", "need_compile", "param_type", "shape"] + fn_list = [self._is_int, self._is_string, self._is_bool, self._is_string, self._is_string] + input_dict = self._check_param(param_list, key_list, fn_list, kwargs) self.inputs.append(input_dict) return self def output(self, index=None, name=None, need_compile=None, param_type=None, shape=None, **kwargs): """ - Register op output information. + Register TBE op output information. Args: index (int): Order of the output. Default: None. @@ -355,29 +418,13 @@ class TBERegOp(RegOp): kwargs (dict): Other information for the output. """ param_list = [index, name, need_compile, param_type, shape] - output_dict = {} - for idx, element in enumerate(param_list): - if element is not None: - if idx == 0: - self.is_int(element) - output_dict["index"] = element - elif idx == 1: - self.is_string(element) - output_dict["name"] = element - elif idx == 2: - self.is_bool(element) - output_dict["need_compile"] = element - elif idx == 3: - self.is_string(element) - output_dict["param_type"] = element - elif idx == 4: - self.is_string(element) - output_dict["shape"] = element - if kwargs: - output_dict = dict(output_dict, **kwargs) + key_list = ["index", "name", "need_compile", "param_type", "shape"] + fn_list = [self._is_int, self._is_string, self._is_bool, self._is_string, self._is_string] + output_dict = self._check_param(param_list, key_list, fn_list, kwargs) self.outputs.append(output_dict) return self + class DataType(): """ Various combinations of dtype and formatself. From 6fdcc245856446e575c743edc488531d014e6358 Mon Sep 17 00:00:00 2001 From: yao_yf Date: Thu, 9 Apr 2020 15:02:33 +0800 Subject: [PATCH 144/367] Integrate two allreduce fusion set interfaces into one --- mindspore/parallel/__init__.py | 4 +--- mindspore/parallel/_auto_parallel_context.py | 14 ++++++++++---- ...allreduce_fusion.py => _dp_allreduce_fusion.py} | 4 ++-- 3 files changed, 13 insertions(+), 9 deletions(-) rename mindspore/parallel/{dp_allreduce_fusion.py => _dp_allreduce_fusion.py} (94%) diff --git a/mindspore/parallel/__init__.py b/mindspore/parallel/__init__.py index c79704f110..79d8e67a8d 100644 --- a/mindspore/parallel/__init__.py +++ b/mindspore/parallel/__init__.py @@ -15,9 +15,7 @@ """ This interface is ONLY used in Auto-parallel procedure. """ -from .dp_allreduce_fusion import set_fusion_strategy_by_idx, set_fusion_strategy_by_size from .algo_parameter_config import get_algo_parameters, reset_algo_parameters, \ set_algo_parameters -__all__ = ["set_fusion_strategy_by_idx", "set_fusion_strategy_by_size", "get_algo_parameters", - "reset_algo_parameters", "set_algo_parameters"] +__all__ = ["get_algo_parameters", "reset_algo_parameters", "set_algo_parameters"] diff --git a/mindspore/parallel/_auto_parallel_context.py b/mindspore/parallel/_auto_parallel_context.py index 3564ad4395..c99ac4a3c7 100644 --- a/mindspore/parallel/_auto_parallel_context.py +++ b/mindspore/parallel/_auto_parallel_context.py @@ -14,6 +14,8 @@ # ============================================================================ """Context of auto parallel""" import threading +import mindspore.context as context +from mindspore.parallel._dp_allreduce_fusion import _set_fusion_strategy_by_idx, _set_fusion_strategy_by_size from mindspore._c_expression import AutoParallelContext from mindspore._extends.pynative_helper import args_type_check @@ -219,13 +221,15 @@ class _AutoParallelContext: indices (list): Indices list. Raises: - ValueError: If type of indices item is not int. + TypeError: If type of indices item is not int. """ self.check_context_handle() for index in indices: if not isinstance(index, int): raise TypeError('indices has invalid value') - return self._context_handle.set_all_reduce_fusion_split_indices(indices) + self._context_handle.set_all_reduce_fusion_split_indices(indices) + if context.get_context("device_target") == "Ascend": + _set_fusion_strategy_by_idx(indices) def get_all_reduce_fusion_split_indices(self): """Get allreduce fusion split indices.""" @@ -240,13 +244,15 @@ class _AutoParallelContext: sizes (list): Sizes list. Raises: - ValueError: If type of sizes item is not int. + TypeError: If type of sizes item is not int. """ self.check_context_handle() for size in sizes: if not isinstance(size, int): raise TypeError('sizes has invalid value') - return self._context_handle.set_all_reduce_fusion_split_sizes(sizes) + self._context_handle.set_all_reduce_fusion_split_sizes(sizes) + if context.get_context("device_target") == "Ascend": + _set_fusion_strategy_by_size(sizes) def get_all_reduce_fusion_split_sizes(self): """Get allreduce fusion split sizes.""" diff --git a/mindspore/parallel/dp_allreduce_fusion.py b/mindspore/parallel/_dp_allreduce_fusion.py similarity index 94% rename from mindspore/parallel/dp_allreduce_fusion.py rename to mindspore/parallel/_dp_allreduce_fusion.py index 979823bd80..3c7039dbd6 100644 --- a/mindspore/parallel/dp_allreduce_fusion.py +++ b/mindspore/parallel/_dp_allreduce_fusion.py @@ -43,7 +43,7 @@ def _c_array(ctype, values): return (ctype * len(values))(*values) -def set_fusion_strategy_by_idx(idxList, group="hccl_world_group"): +def _set_fusion_strategy_by_idx(idxList, group="hccl_world_group"): """ A function set gradient segment strategy according to the index list. @@ -100,7 +100,7 @@ def set_fusion_strategy_by_idx(idxList, group="hccl_world_group"): raise RuntimeError('Allreduce split error') -def set_fusion_strategy_by_size(dataSizeList, group="hccl_world_group"): +def _set_fusion_strategy_by_size(dataSizeList, group="hccl_world_group"): """ A function set gradient segment strategy according to the data size percentage list. From ead50a21700a0204589ac23e58bf0caf5b7498c2 Mon Sep 17 00:00:00 2001 From: seatea Date: Thu, 9 Apr 2020 18:24:21 +0800 Subject: [PATCH 145/367] Define the default decay_filter for `Adam` optimizer. --- mindspore/nn/optim/adam.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/mindspore/nn/optim/adam.py b/mindspore/nn/optim/adam.py index 86ce2b2147..521510fa58 100755 --- a/mindspore/nn/optim/adam.py +++ b/mindspore/nn/optim/adam.py @@ -166,7 +166,8 @@ class Adam(Optimizer): """ def __init__(self, params, learning_rate=1e-3, beta1=0.9, beta2=0.999, eps=1e-8, use_locking=False, - use_nesterov=False, weight_decay=0.0, loss_scale=1.0): + use_nesterov=False, weight_decay=0.0, loss_scale=1.0, + decay_filter=lambda x: 'beta' not in x.name and 'gamma' not in x.name): super(Adam, self).__init__(learning_rate, params) _check_param_value(beta1, beta2, eps, weight_decay) validator.check_type("use_locking", use_locking, [bool]) @@ -192,6 +193,7 @@ class Adam(Optimizer): self.moment1 = self.parameters.clone(prefix="moment1", init='zeros') self.moment2 = self.parameters.clone(prefix="moment2", init='zeros') + self.decay_tf = tuple(decay_filter(x) for x in self.parameters) self.hyper_map = C.HyperMap() self.opt = P.Adam(use_locking, use_nesterov) self.weight_decay = weight_decay * loss_scale From 0f0f83e292ff9dbacf51aaf1a1d10787ff06dabc Mon Sep 17 00:00:00 2001 From: liuxiao Date: Thu, 9 Apr 2020 14:37:00 +0800 Subject: [PATCH 146/367] modified api name Stack -> Pack, Unstack -> Unpack --- mindspore/ccsrc/transform/convert.cc | 8 +-- mindspore/ops/_grad/grad_array_ops.py | 20 ++++---- mindspore/ops/operations/__init__.py | 6 +-- mindspore/ops/operations/array_ops.py | 70 ++++++++++++--------------- tests/ut/python/ops/test_ops.py | 32 ++++++------ 5 files changed, 64 insertions(+), 72 deletions(-) diff --git a/mindspore/ccsrc/transform/convert.cc b/mindspore/ccsrc/transform/convert.cc index c9a27a2607..c53367a20f 100755 --- a/mindspore/ccsrc/transform/convert.cc +++ b/mindspore/ccsrc/transform/convert.cc @@ -148,8 +148,8 @@ const char kNameSlice[] = "Slice"; const char kNameAddN[] = "AddN"; const char kNameLess[] = "Less"; const char kNameGreater[] = "Greater"; -const char kNameStack[] = "Stack"; -const char kNameUnstack[] = "Unstack"; +const char kNamePack[] = "Pack"; +const char kNameUnpack[] = "Unpack"; const char kNameMerge[] = "Merge"; const char kNameGeSwitch[] = "GeSwitch"; @@ -202,8 +202,8 @@ std::unordered_map &DfGraphConvertor::get_adpt_ma {string(kNameAvgPool), ADPT_DESC(AvgPool)}, {string(kNameMaxPoolWithArgmax), ADPT_DESC(MaxPoolWithArgmax)}, {string(kNameTopK), ADPT_DESC(TopKV2)}, - {string(kNameStack), ADPT_DESC(Pack)}, - {string(kNameUnstack), ADPT_DESC(Unpack)}, + {string(kNamePack), ADPT_DESC(Pack)}, + {string(kNameUnpack), ADPT_DESC(Unpack)}, {string(kNameSplitD), ADPT_DESC(SplitD)}, {string(kNameAllReduce), ADPT_DESC(HcomAllReduce)}, {string(kNameBroadcast), ADPT_DESC(HcomBroadcast)}, diff --git a/mindspore/ops/_grad/grad_array_ops.py b/mindspore/ops/_grad/grad_array_ops.py index 0a0caf471e..abad030ae9 100644 --- a/mindspore/ops/_grad/grad_array_ops.py +++ b/mindspore/ops/_grad/grad_array_ops.py @@ -266,26 +266,26 @@ def get_bprop_gather_v2(self): return bprop -@bprop_getters.register(P.Stack) -def get_bprop_stack(self): - """Generate bprop for Stack""" +@bprop_getters.register(P.Pack) +def get_bprop_pack(self): + """Generate bprop for Pack""" axis = self.axis def bprop(x, out, dout): - stack_grad = P.Unstack(axis) - out = stack_grad(dout) + pack_grad = P.Unpack(axis) + out = pack_grad(dout) return (out,) return bprop -@bprop_getters.register(P.Unstack) -def get_bprop_unstack(self): - """Generate bprop for Unstack""" +@bprop_getters.register(P.Unpack) +def get_bprop_unpack(self): + """Generate bprop for Unpack""" axis = self.axis def bprop(x, out, dout): - unstack_grad = P.Stack(axis) - out = unstack_grad(dout) + unpack_grad = P.Pack(axis) + out = unpack_grad(dout) return (out,) return bprop diff --git a/mindspore/ops/operations/__init__.py b/mindspore/ops/operations/__init__.py index 7a8655b46c..45cd856298 100644 --- a/mindspore/ops/operations/__init__.py +++ b/mindspore/ops/operations/__init__.py @@ -19,7 +19,7 @@ Primitive operator classes. A collection of operators to build nerual networks or computing functions. """ -from .array_ops import (Argmax, Argmin, Cast, ConcatOffset, Concat, Stack, Unstack, +from .array_ops import (Argmax, Argmin, Cast, ConcatOffset, Concat, Pack, Unpack, Diag, DiagPart, DType, ExpandDims, Eye, Fill, GatherNd, GatherV2, InvertPermutation, IsInstance, IsSubClass, ArgMaxWithValue, OnesLike, ZerosLike, @@ -112,8 +112,8 @@ __all__ = [ 'OneHot', 'GatherV2', 'Concat', - 'Stack', - 'Unstack', + 'Pack', + 'Unpack', 'Tile', 'BiasAdd', 'Gelu', diff --git a/mindspore/ops/operations/array_ops.py b/mindspore/ops/operations/array_ops.py index dda490566f..ac7f8ed699 100644 --- a/mindspore/ops/operations/array_ops.py +++ b/mindspore/ops/operations/array_ops.py @@ -1350,8 +1350,8 @@ class Concat(PrimitiveWithInfer): return out -def _get_stack_shape(x_shape, x_type, axis): - """for satck output shape""" +def _get_pack_shape(x_shape, x_type, axis): + """for pack output shape""" validator.check_type("shape", x_shape, [tuple]) validator.check_integer("len of input_x shape", len(x_shape), 0, Rel.GT) validator.check_subclass("shape0", x_type[0], mstype.tensor) @@ -1368,43 +1368,40 @@ def _get_stack_shape(x_shape, x_type, axis): validator.check('x_type[%d]' % i, x_type[i], 'base', x_type[0]) for j in range(rank_base): if v[j] != x_shape[0][j]: - raise ValueError("Stack evaluator element %d shape in input can not stack with first element" % i) + raise ValueError("Pack evaluator element %d shape in input can not pack with first element" % i) out_shape.insert(axis, N) return out_shape -class Stack(PrimitiveWithInfer): +class Pack(PrimitiveWithInfer): r""" - Stacks a list of rank-`R` tensors into one rank-`(R+1)` tensor. + Packs a list of tensors in specified axis. - Packs the list of tensors in `input_x` into a tensor with rank one higher than - each tensor in `input_x`, by packing them along the `axis` dimension. - Given a list of length `N` of tensors of shape `(A, B, C)`; + Packs the list of input tensors with the same rank `R`, output is a tensor of rank `(R+1)`. - If `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`. - - If `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`. Etc. + Given input tensors of shape :math:`(x_1, x_2, ..., x_R)`. Set the number of input tensors as `N`. + If :math:`0 \le axis`, the output tensor shape is :math:`(x_1, x_2, ..., x_{axis}, N, x_{axis+1}, ..., x_R)`. Args: - axis (int): The axis to stack along. Negative values wrap around, - so the valid range is [-(R+1), R+1). Default: 0. + axis (int): Dimension along which to pack. Default: 0. + Negative values wrap around. The range is [-(R+1), R+1). Inputs: - **input_x** (Union[tuple, list]) - A Tuple or list of Tensor objects with the same shape and type. Outputs: - Tensor. A stacked Tensor with the same type as values. + Tensor. A packed Tensor with the same type as `input_x`. Examples: >>> data1 = Tensor(np.array([0, 1]).astype(np.float32)) >>> data2 = Tensor(np.array([2, 3]).astype(np.float32)) - >>> op = P.Stack() - >>> output = op([data1, data2]) + >>> pack = P.Pack() + >>> output = pack([data1, data2]) [[0, 1], [2, 3]] """ @prim_attr_register def __init__(self, axis=0): - """init Stack""" + """init Pack""" self.__setattr_flag__ = True validator.check_type("axis", axis, [int]) self.axis = axis @@ -1413,38 +1410,33 @@ class Stack(PrimitiveWithInfer): x_shape = value['shape'] x_type = value['dtype'] self.add_prim_attr('num', len(x_shape)) - all_shape = _get_stack_shape(x_shape, x_type, self.axis) + all_shape = _get_pack_shape(x_shape, x_type, self.axis) out = {'shape': all_shape, 'dtype': x_type[0], 'value': None} return out -class Unstack(PrimitiveWithInfer): +class Unpack(PrimitiveWithInfer): r""" - Unpacks the given dimension of a rank-`R` tensor into rank-`(R-1)` tensors. - - Unpacks num tensors from value by chipping it along the axis dimension. - If num is not specified (the default), it is inferred from value's shape. - If value.shape[axis] is not known, ValueError is raised. + Unpacks tensor in specified axis. - For example, given a tensor of shape (A, B, C, D); + Unpacks a tensor of rank `R` along axis dimension, output tensors will have rank `(R-1)`. - If axis == 0 then the i'th tensor in output is the slice value[i, :, :, :] and - each tensor in output will have shape (B, C, D). (Note that the dimension unpacked along is gone, unlike split). + Given a tensor of shape :math:`(x_1, x_2, ..., x_R)`. If :math:`0 \le axis`, + the shape of tensor in output is :math:`(x_1, x_2, ..., x_{axis}, x_{axis+2}, ..., x_R)`. - If axis == 1 then the i'th tensor in output is the slice value[:, i, :, :] and - each tensor in output will have shape (A, C, D). Etc. - - This is the opposite of stack. + This is the opposite of pack. Args: - axis (int): The axis to unstack along. Defaults to the first dimension. - Negative values wrap around, so the valid range is [-R, R). + axis (int): Dimension along which to pack. Default: 0. + Negative values wrap around. The range is [-R, R). + num (int): The number of tensors to be unpacked to. Default : "None". + If `num` is not specified, it is inferred from the shape of `input_x`. Inputs: - **input_x** (Tensor) - The shape is :math:`(x_1, x_2, ..., x_R)`. - A rank R > 0 Tensor to be unstacked. + A rank R > 0 Tensor to be unpacked. Outputs: A tuple of Tensors, the shape of each objects is same. @@ -1454,15 +1446,15 @@ class Unstack(PrimitiveWithInfer): or if len(input_x.shape[axis]) not equal to num. Examples: - >>> unstack = P.Unstack() - >>> x = Tensor(np.array([[1, 1, 1, 1], [2, 2, 2, 2]])) - >>> output = unstack(x) + >>> unpack = P.Unpack() + >>> input_x = Tensor(np.array([[1, 1, 1, 1], [2, 2, 2, 2]])) + >>> output = unpack(input_x) ([1, 1, 1, 1], [2, 2, 2, 2]) """ @prim_attr_register def __init__(self, axis=0): - """init Unstack""" + """init Unpack""" self.__setattr_flag__ = True validator.check_type("axis", axis, [int]) self.axis = axis @@ -1479,7 +1471,7 @@ class Unstack(PrimitiveWithInfer): validator.check_integer("output_num", output_num, 0, Rel.GT) self.add_prim_attr('num', output_num) output_valid_check = x_shape[self.axis] - output_num - validator.check_integer("the dimension which to unstack divides output_num", output_valid_check, 0, Rel.EQ) + validator.check_integer("The dimension which to unpack divides output_num", output_valid_check, 0, Rel.EQ) out_shapes = [] out_dtypes = [] out_shape = x_shape[:self.axis] + x_shape[self.axis + 1:] diff --git a/tests/ut/python/ops/test_ops.py b/tests/ut/python/ops/test_ops.py index a650309d98..a6b064bdb0 100755 --- a/tests/ut/python/ops/test_ops.py +++ b/tests/ut/python/ops/test_ops.py @@ -80,9 +80,9 @@ class NetForConcat1(nn.Cell): return self.concat((x1, x2)) -class NetForStackInput(nn.Cell): +class NetForPackInput(nn.Cell): def __init__(self, op): - super(NetForStackInput, self).__init__() + super(NetForPackInput, self).__init__() self.op = op self.mul = P.Mul() @@ -93,9 +93,9 @@ class NetForStackInput(nn.Cell): return self.op(t) -class NetForUnstackInput(nn.Cell): +class NetForUnpackInput(nn.Cell): def __init__(self, op): - super(NetForUnstackInput, self).__init__() + super(NetForUnpackInput, self).__init__() self.op = op self.mul = P.Mul() @@ -991,33 +991,33 @@ test_case_array_ops = [ Tensor(np.array([1], np.float32)), Tensor(np.array([1], np.float32)))], 'desc_bprop': [[3,]]}), - ('StackV2_0', { - 'block': NetForStackInput(P.Stack()), + ('Pack_0', { + 'block': NetForPackInput(P.Pack()), 'desc_inputs':[[2, 2], [2, 2], [2, 2]], 'desc_bprop':[[3, 2, 2]], }), - ('StackV2_1', { - 'block': NetForStackInput(P.Stack(axis=-2)), + ('Pack_1', { + 'block': NetForPackInput(P.Pack(axis=-2)), 'desc_inputs':[[3, 2, 3], [3, 2, 3], [3, 2, 3]], 'desc_bprop':[[3, 2, 3, 3]], }), - ('StackV2_2', { - 'block': NetForStackInput(P.Stack()), + ('Pack_2', { + 'block': NetForPackInput(P.Pack()), 'desc_inputs':[[2, 2]], 'desc_bprop':[[2, 2, 2]], }), - ('StackV2_3', { - 'block': NetForStackInput(P.Stack()), + ('Pack_3', { + 'block': NetForPackInput(P.Pack()), 'desc_inputs':[[128, 128], [128, 128]], 'desc_bprop':[[2, 128, 128]], }), - ('UnstackV2_0', { - 'block': NetForUnstackInput(P.Unstack(axis=0)), + ('Unpack_0', { + 'block': NetForUnpackInput(P.Unpack(axis=0)), 'desc_inputs':[[2, 4]], 'desc_bprop':[[4], [4]], }), - ('UnstackV2_1', { - 'block': NetForUnstackInput(P.Unstack(axis=-1)), + ('Unpack_1', { + 'block': NetForUnpackInput(P.Unpack(axis=-1)), 'desc_inputs':[Tensor(np.array([[1, 1, 1]], np.float32))], 'desc_bprop':[[1], [1], [1]], }), From 576f65ad489f99eb271bd36acfa1b322b18431a7 Mon Sep 17 00:00:00 2001 From: yanghaoran Date: Thu, 9 Apr 2020 19:07:01 +0800 Subject: [PATCH 147/367] add custom environment variable ASCEND_CUSTOM_PATH for customized Ascend software installation --- cmake/dependency_graphengine.cmake | 6 +++++- graphengine | 2 +- mindspore/ccsrc/CMakeLists.txt | 12 ++++++++++-- 3 files changed, 16 insertions(+), 4 deletions(-) diff --git a/cmake/dependency_graphengine.cmake b/cmake/dependency_graphengine.cmake index 2420f47736..2a90cc1458 100644 --- a/cmake/dependency_graphengine.cmake +++ b/cmake/dependency_graphengine.cmake @@ -39,7 +39,11 @@ elseif (DEFINED ENV{D_LINK_PATH}) find_library(resource libresource.so ${GE_LIB_PATH}) else() # Ascend mode - set(ASCEND_PATH /usr/local/Ascend) + if(DEFINED ENV{ASCEND_CUSTOM_PATH}) + set(ASCEND_PATH $ENV{ASCEND_CUSTOM_PATH}) + else() + set(ASCEND_PATH /usr/local/Ascend) + endif() set(ASCEND_DRIVER_PATH ${ASCEND_PATH}/driver/lib64/common) set(ASCEND_RUNTIME_PATH ${ASCEND_PATH}/fwkacllib/lib64) find_library(c_sec libc_sec.so ${ASCEND_DRIVER_PATH}) diff --git a/graphengine b/graphengine index 5369646b48..5bd0dc1ed5 160000 --- a/graphengine +++ b/graphengine @@ -1 +1 @@ -Subproject commit 5369646b489114b380a7b5208ddd6e632acb447f +Subproject commit 5bd0dc1ed59a9ec4ea6a602bf1385c59d845f922 diff --git a/mindspore/ccsrc/CMakeLists.txt b/mindspore/ccsrc/CMakeLists.txt index 1d104148c3..c49c962bdd 100644 --- a/mindspore/ccsrc/CMakeLists.txt +++ b/mindspore/ccsrc/CMakeLists.txt @@ -296,7 +296,11 @@ if(ENABLE_D) endif() else() MESSAGE("use system default lib") - set(ASCEND_PATH /usr/local/Ascend) + if(DEFINED ENV{ASCEND_CUSTOM_PATH}) + set(ASCEND_PATH $ENV{ASCEND_CUSTOM_PATH}) + else() + set(ASCEND_PATH /usr/local/Ascend) + endif() set(ASCEND_DRIVER_PATH ${ASCEND_PATH}/driver/lib64/common) set(ASCEND_RUNTIME_PATH ${ASCEND_PATH}/fwkacllib/lib64) endif() @@ -500,7 +504,11 @@ add_dependencies(add_ms_lib _c_expression) if (NOT ENABLE_GE) if (ENABLE_D) - set(ASCEND_PATH /usr/local/Ascend) + if(DEFINED ENV{ASCEND_CUSTOM_PATH}) + set(ASCEND_PATH $ENV{ASCEND_CUSTOM_PATH}) + else() + set(ASCEND_PATH /usr/local/Ascend) + endif() set(ASCEND_DRIVER_PATH ${ASCEND_PATH}/driver/lib64/common) add_custom_target(add_ge_lib ALL COMMAND cp ${MS_CCSRC_BUILD_PATH}/../../graphengine/src/common/graph/libgraph.so ${MS_LIB_PATH} From 363632ca9d2fa88278a235c477ae6395ae23a18f Mon Sep 17 00:00:00 2001 From: Yanjun Peng Date: Thu, 9 Apr 2020 11:04:13 +0800 Subject: [PATCH 148/367] fix dataset para validator check --- mindspore/dataset/engine/samplers.py | 1 - mindspore/dataset/engine/validators.py | 5 +++++ mindspore/dataset/transforms/vision/validators.py | 4 ++++ 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/mindspore/dataset/engine/samplers.py b/mindspore/dataset/engine/samplers.py index ed36e72b65..62a3dbed18 100644 --- a/mindspore/dataset/engine/samplers.py +++ b/mindspore/dataset/engine/samplers.py @@ -127,7 +127,6 @@ class RandomSampler(): Raises: ValueError: If replacement is not boolean. - ValueError: If num_samples is not None and replacement is false. ValueError: If num_samples is not positive. """ diff --git a/mindspore/dataset/engine/validators.py b/mindspore/dataset/engine/validators.py index 26d6241945..b5ebc24b39 100644 --- a/mindspore/dataset/engine/validators.py +++ b/mindspore/dataset/engine/validators.py @@ -556,6 +556,11 @@ def check_generatordataset(method): if column_names is None: raise ValueError("column_names is not provided.") + # check prefetch_size range + prefetch_size = param_dict.get('prefetch_size') + if prefetch_size is not None and (prefetch_size <= 0 or prefetch_size > 1024): + raise ValueError("prefetch_size exceeds the boundary.") + check_param_type(nreq_param_int, param_dict, int) check_param_type(nreq_param_list, param_dict, list) diff --git a/mindspore/dataset/transforms/vision/validators.py b/mindspore/dataset/transforms/vision/validators.py index caab120af4..ef4b879f8c 100644 --- a/mindspore/dataset/transforms/vision/validators.py +++ b/mindspore/dataset/transforms/vision/validators.py @@ -104,6 +104,10 @@ def check_padding(padding): raise ValueError("The size of the padding list or tuple should be 2 or 4.") else: raise TypeError("Padding can be any of: a number, a tuple or list of size 2 or 4.") + if not (isinstance(left, int) and isinstance(top, int) and isinstance(right, int) and isinstance(bottom, int)): + raise TypeError("Padding value should be integer.") + if left < 0 or top < 0 or right < 0 or bottom < 0: + raise ValueError("Padding value could not be negative.") return left, top, right, bottom From b413638f2328288ce3b693ed161dd99f210f58d9 Mon Sep 17 00:00:00 2001 From: c00425699 Date: Thu, 9 Apr 2020 14:40:43 +0800 Subject: [PATCH 149/367] refactor OperatorCostPtr in OperatorInfo --- .../auto_parallel/operator_costmodel.cc | 54 ------------------- .../auto_parallel/operator_costmodel.h | 30 ++--------- .../ccsrc/parallel/ops_info/activation_info.h | 18 ++----- .../ccsrc/parallel/ops_info/arithmetic_info.h | 6 +-- .../parallel/ops_info/batch_parallel_info.h | 6 +-- .../ccsrc/parallel/ops_info/bias_add_info.h | 6 +-- .../parallel/ops_info/dropout_do_mask_info.h | 8 +-- .../ccsrc/parallel/ops_info/generator_info.h | 6 +-- .../ccsrc/parallel/ops_info/get_next_info.h | 6 +-- .../parallel/ops_info/l2_normalize_info.h | 6 +-- mindspore/ccsrc/parallel/ops_info/loss_info.h | 6 +-- .../ccsrc/parallel/ops_info/matmul_info.cc | 6 +-- .../ccsrc/parallel/ops_info/matmul_info.h | 7 +-- .../ccsrc/parallel/ops_info/onehot_info.h | 6 +-- .../ccsrc/parallel/ops_info/operator_info.cc | 13 +++-- .../ccsrc/parallel/ops_info/operator_info.h | 13 +++-- .../ccsrc/parallel/ops_info/prelu_info.h | 6 +-- .../parallel/ops_info/reduce_method_info.cc | 8 ++- .../parallel/ops_info/reduce_method_info.h | 8 +-- .../ccsrc/parallel/ops_info/reshape_info.h | 8 +-- .../parallel/ops_info/tmp_identity_info.h | 8 +-- .../ccsrc/parallel/ops_info/transpose_info.h | 6 +-- .../parallel/ops_info/virtual_dataset_info.h | 8 +-- .../cpp/parallel/ops_info/activation_test.cc | 8 +-- .../cpp/parallel/ops_info/matmul_info_test.cc | 4 +- .../parallel/ops_info/tensor_add_info_test.cc | 8 +-- .../cpp/parallel/ops_info/tmpidentity_test.cc | 4 +- 27 files changed, 62 insertions(+), 211 deletions(-) diff --git a/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.cc b/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.cc index 7c17b499b1..93d7dc56c5 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.cc +++ b/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.cc @@ -514,60 +514,6 @@ double ArithmeticCost::GetBackwardCommCost(const std::vector& inputs return result; } -double L2NormalizeCost::GetBackwardCommCost(const std::vector& inputs, const std::vector&, - const int32_t& stage_id) const { - double result = 0.0; - if (is_parameter_[0]) { - TensorInfo input_tensor_info = inputs[0]; - CheckGlobalDeviceManager(); - MS_EXCEPTION_IF_NULL(g_device_manager); - auto total_device_num = g_device_manager->GetDeviceListByStageId(stage_id).size(); - - Shape input_shape = input_tensor_info.shape(); - Shape input_slice_shape = input_tensor_info.slice_shape(); - int32_t used_device_num = 1; - for (size_t i = 0; i < input_shape.size(); ++i) { - used_device_num *= input_shape[i] / input_slice_shape[i]; - } - - if (total_device_num != IntToSize(used_device_num)) - result += ListProduct(input_slice_shape) * static_cast(inputs_type_lengths_[0]); - } - - return result; -} - -double L2NormalizeCost::GetForwardComputationCost(const std::vector& inputs, const std::vector&, - const int32_t&) const { - TensorInfo input0_info = inputs[0]; - Shape input0_slice_shape = input0_info.slice_shape(); - return ListProduct(input0_slice_shape) * static_cast(inputs_type_lengths_[0]); -} - -double L2NormalizeCost::GetBackwardComputationCost(const std::vector& inputs, - const std::vector&, const int32_t& stage_id) const { - double result = 0.0; - - if (is_parameter_[0]) { - TensorInfo input_tensor_info = inputs[0]; - CheckGlobalDeviceManager(); - MS_EXCEPTION_IF_NULL(g_device_manager); - auto total_device_num = g_device_manager->GetDeviceListByStageId(stage_id).size(); - - Shape input_shape = input_tensor_info.shape(); - Shape input_slice_shape = input_tensor_info.slice_shape(); - int32_t used_device_num = 1; - for (size_t i = 0; i < input_shape.size(); ++i) { - used_device_num *= input_shape[i] / input_slice_shape[i]; - } - - if (total_device_num != IntToSize(used_device_num)) - result += ListProduct(input_slice_shape) * static_cast(inputs_type_lengths_[0]); - } - - return result; -} - bool IsDataParallel(const Shape& shape, const Shape& slice_shape, const int32_t& stage_id) { CheckGlobalDeviceManager(); MS_EXCEPTION_IF_NULL(g_device_manager); diff --git a/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.h b/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.h index 8f0099bba3..73f3ff139f 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.h +++ b/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.h @@ -132,6 +132,8 @@ class ActivationCost : public OperatorCost { }; using ActivationCostPtr = std::shared_ptr; +using TransposeCost = ActivationCost; +using TransposeCostPtr = std::shared_ptr; class SoftmaxCost : public OperatorCost { public: @@ -415,32 +417,8 @@ class ArithmeticCost : public OperatorCost { const int32_t& stage_id) const override; }; using ArithmeticCostPtr = std::shared_ptr; - -class L2NormalizeCost : public OperatorCost { - public: - L2NormalizeCost() = default; - ~L2NormalizeCost() override = default; - - double GetCommCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override { - return GetForwardCommCost(inputs, outputs, stage_id) + GetBackwardCommCost(inputs, outputs, stage_id); - } - double GetForwardCommCost(const std::vector&, const std::vector&, - const int32_t&) const override { - return 0.0; - } - double GetBackwardCommCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; - double GetComputationCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override { - return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id); - } - double GetForwardComputationCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; - double GetBackwardComputationCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; -}; -using L2NormalizeCostPtr = std::shared_ptr; +using BiasAddCost = ArithmeticCost; +using BiasAddCostPtr = std::shared_ptr; class ReduceMethodCost : public OperatorCost { public: diff --git a/mindspore/ccsrc/parallel/ops_info/activation_info.h b/mindspore/ccsrc/parallel/ops_info/activation_info.h index 183b593e23..21774c43ee 100644 --- a/mindspore/ccsrc/parallel/ops_info/activation_info.h +++ b/mindspore/ccsrc/parallel/ops_info/activation_info.h @@ -32,8 +32,8 @@ namespace parallel { class ActivationBase : public OperatorInfo { public: ActivationBase(const std::string& operator_name, const Shapes& inputs_shape, const Shapes& outputs_shape, - const PrimitiveAttrs& attrs) - : OperatorInfo(operator_name, inputs_shape, outputs_shape, attrs) {} + const PrimitiveAttrs& attrs, OperatorCostPtr cost) + : OperatorInfo(operator_name, inputs_shape, outputs_shape, attrs, cost) {} ~ActivationBase() override = default; Status Init(const StrategyPtr& strategy) override; @@ -51,19 +51,13 @@ class Activation : public ActivationBase { public: Activation(const std::string& name, const Shapes& inputs_shape, const Shapes& outputs_shape, const PrimitiveAttrs& attrs) - : ActivationBase(name, inputs_shape, outputs_shape, attrs) { - ac_cost_ptr_ = std::make_shared(); - } + : ActivationBase(name, inputs_shape, outputs_shape, attrs, std::make_shared()) {} ~Activation() override = default; Status GenerateStrategies(int32_t stage_id) override; Status SetCostUnderStrategy(const StrategyPtr& strategy) override; - OperatorCostPtr GetOperatorCost() const override { return ac_cost_ptr_; } protected: Status CheckStrategy(const StrategyPtr& strategy) override; - - private: - ActivationCostPtr ac_cost_ptr_; }; class ActivationInfo : public Activation { @@ -108,13 +102,10 @@ class Softmax : public ActivationBase { public: explicit Softmax(const std::string& name, const Shapes& inputs_shape, const Shapes& outputs_shape, const PrimitiveAttrs& attrs) - : ActivationBase(name, inputs_shape, outputs_shape, attrs) { - sm_cost_ptr_ = std::make_shared(); - } + : ActivationBase(name, inputs_shape, outputs_shape, attrs, std::make_shared()) {} ~Softmax() override = default; Status GenerateStrategies(int32_t stage_id) override; Status SetCostUnderStrategy(const StrategyPtr& strategy) override; - OperatorCostPtr GetOperatorCost() const override { return sm_cost_ptr_; } protected: Status CheckStrategy(const StrategyPtr& strategy) override; @@ -122,7 +113,6 @@ class Softmax : public ActivationBase { private: std::vector axis_; - SoftmaxCostPtr sm_cost_ptr_; }; class SoftmaxInfo : public Softmax { diff --git a/mindspore/ccsrc/parallel/ops_info/arithmetic_info.h b/mindspore/ccsrc/parallel/ops_info/arithmetic_info.h index 7cd0d66b1b..daa2ad595c 100644 --- a/mindspore/ccsrc/parallel/ops_info/arithmetic_info.h +++ b/mindspore/ccsrc/parallel/ops_info/arithmetic_info.h @@ -33,15 +33,12 @@ class ArithmeticBase : public OperatorInfo { public: ArithmeticBase(const std::string& operator_name, const Shapes& inputs_shape, const Shapes& outputs_shape, const PrimitiveAttrs& attrs) - : OperatorInfo(operator_name, inputs_shape, outputs_shape, attrs) { - arithmeticcost_ptr_ = std::make_shared(); - } + : OperatorInfo(operator_name, inputs_shape, outputs_shape, attrs, std::make_shared()) {} ~ArithmeticBase() override = default; Status Init(const StrategyPtr& strategy) override; Status InitForCostModel(const StrategyPtr& strategy) override; Status GenerateStrategies(int32_t) override; Status SetCostUnderStrategy(const StrategyPtr&) override; - OperatorCostPtr GetOperatorCost() const override { return arithmeticcost_ptr_; } void ReComputeBatchSplitFlagList() override; protected: @@ -54,7 +51,6 @@ class ArithmeticBase : public OperatorInfo { Status InferTensorMap() override; Status InferTensorLayout(TensorLayouts* inputs_layout, TensorLayouts* outputs_layout, const Shape& dev_matrix_array); Shapes InferExpendShape(); - ArithmeticCostPtr arithmeticcost_ptr_; }; class SubInfo : public ArithmeticBase { diff --git a/mindspore/ccsrc/parallel/ops_info/batch_parallel_info.h b/mindspore/ccsrc/parallel/ops_info/batch_parallel_info.h index 57711b5298..fae96dcab5 100644 --- a/mindspore/ccsrc/parallel/ops_info/batch_parallel_info.h +++ b/mindspore/ccsrc/parallel/ops_info/batch_parallel_info.h @@ -31,16 +31,13 @@ class BatchParallelInfo : public OperatorInfo { public: BatchParallelInfo(const std::string& name, const Shapes& inputs_shape, const Shapes& outputs_shape, const PrimitiveAttrs& attrs) - : OperatorInfo(name, inputs_shape, outputs_shape, attrs), dev_num_(1) { - bp_cost_ptr_ = std::make_shared(); - } + : OperatorInfo(name, inputs_shape, outputs_shape, attrs, std::make_shared()), dev_num_(1) {} ~BatchParallelInfo() override = default; Status Init(const StrategyPtr& strategy) override; Status InitForCostModel(const StrategyPtr& strategy) override; Status GenerateStrategies(int32_t stage_id) override; Status SetCostUnderStrategy(const StrategyPtr& strategy) override; - OperatorCostPtr GetOperatorCost() const override { return bp_cost_ptr_; } protected: Status CheckStrategy(const StrategyPtr& strategy) override; @@ -55,7 +52,6 @@ class BatchParallelInfo : public OperatorInfo { private: int32_t dev_num_; - BatchParallelCostPtr bp_cost_ptr_; }; class SparseSoftmaxCrossEntropyWithLogitsInfo : public BatchParallelInfo { diff --git a/mindspore/ccsrc/parallel/ops_info/bias_add_info.h b/mindspore/ccsrc/parallel/ops_info/bias_add_info.h index 07f0bc00ff..dea5c90c88 100644 --- a/mindspore/ccsrc/parallel/ops_info/bias_add_info.h +++ b/mindspore/ccsrc/parallel/ops_info/bias_add_info.h @@ -34,16 +34,13 @@ class BiasAddInfo : public OperatorInfo { public: BiasAddInfo(const std::string& operator_name, const Shapes& inputs_shape, const Shapes& outputs_shape, const PrimitiveAttrs& attrs) - : OperatorInfo(operator_name, inputs_shape, outputs_shape, attrs) { - biasaddcost_ptr_ = std::make_shared(); - } + : OperatorInfo(operator_name, inputs_shape, outputs_shape, attrs, std::make_shared()) {} ~BiasAddInfo() override = default; Status Init(const StrategyPtr& strategy) override; Status InitForCostModel(const StrategyPtr& strategy) override; Status GenerateStrategies(int32_t) override; Status SetCostUnderStrategy(const StrategyPtr&) override; - OperatorCostPtr GetOperatorCost() const override { return biasaddcost_ptr_; } void ReComputeBatchSplitFlagList() override; protected: @@ -55,7 +52,6 @@ class BiasAddInfo : public OperatorInfo { Status InferDevMatrixShape() override; Status InferTensorMap() override; Status InferTensorLayout(TensorLayouts* inputs_layout, TensorLayouts* outputs_layout, const Shape& dev_matrix_array); - ArithmeticCostPtr biasaddcost_ptr_; }; } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/ops_info/dropout_do_mask_info.h b/mindspore/ccsrc/parallel/ops_info/dropout_do_mask_info.h index e43601355a..859b3e06a4 100644 --- a/mindspore/ccsrc/parallel/ops_info/dropout_do_mask_info.h +++ b/mindspore/ccsrc/parallel/ops_info/dropout_do_mask_info.h @@ -33,15 +33,12 @@ class DropoutDoMaskInfo : public OperatorInfo { public: DropoutDoMaskInfo(const std::string& name, const Shapes& inputs_shape, const Shapes& outputs_shape, const PrimitiveAttrs& attrs) - : OperatorInfo(name, inputs_shape, outputs_shape, attrs) { - bpcost_ptr_ = std::make_shared(); - } + : OperatorInfo(name, inputs_shape, outputs_shape, attrs, std::make_shared()) {} ~DropoutDoMaskInfo() override = default; Status Init(const StrategyPtr& strategy) override; Status GenerateStrategies(int32_t stage_id) override; Status SetCostUnderStrategy(const StrategyPtr& strategy) override; - OperatorCostPtr GetOperatorCost() const override { return bpcost_ptr_; } Status InitForCostModel(const StrategyPtr& strategy) override; std::shared_ptr>> GenerateBatchStrategies() override; @@ -53,9 +50,6 @@ class DropoutDoMaskInfo : public OperatorInfo { Status GetAttrs() override { return SUCCESS; } Status InferTensorInfo() override; Status InferDevMatrixShape() override; - - private: - BatchParallelCostPtr bpcost_ptr_; }; } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/ops_info/generator_info.h b/mindspore/ccsrc/parallel/ops_info/generator_info.h index a280fac28e..68024593f3 100644 --- a/mindspore/ccsrc/parallel/ops_info/generator_info.h +++ b/mindspore/ccsrc/parallel/ops_info/generator_info.h @@ -32,15 +32,12 @@ class GeneratorBase : public OperatorInfo { public: GeneratorBase(const std::string &operator_name, const Shapes &inputs_shape, const Shapes &outputs_shape, const PrimitiveAttrs &attrs) - : OperatorInfo(operator_name, inputs_shape, outputs_shape, attrs) { - generatorbasecost_ptr_ = std::make_shared(); - } + : OperatorInfo(operator_name, inputs_shape, outputs_shape, attrs, std::make_shared()) {} ~GeneratorBase() override = default; Status Init(const StrategyPtr &strategy) override; Status SetCostUnderStrategy(const StrategyPtr &strategy) override; - OperatorCostPtr GetOperatorCost() const override { return generatorbasecost_ptr_; } Status InitForCostModel(const StrategyPtr &strategy) override; protected: @@ -52,7 +49,6 @@ class GeneratorBase : public OperatorInfo { Status InferMirrorOps() override { return SUCCESS; } Status InferForwardCommunication() override { return SUCCESS; } virtual Status InferReplaceOps(const StrategyPtr &strategy) = 0; - GeneratorBaseCostPtr generatorbasecost_ptr_; }; class DropoutGenMaskInfo : public GeneratorBase { diff --git a/mindspore/ccsrc/parallel/ops_info/get_next_info.h b/mindspore/ccsrc/parallel/ops_info/get_next_info.h index 32adce1165..9a65eff035 100644 --- a/mindspore/ccsrc/parallel/ops_info/get_next_info.h +++ b/mindspore/ccsrc/parallel/ops_info/get_next_info.h @@ -32,14 +32,11 @@ class GetNextInfo : public OperatorInfo { public: GetNextInfo(const std::string &operator_name, const Shapes &inputs_shape, const Shapes &outputs_shape, const PrimitiveAttrs &attrs) - : OperatorInfo(operator_name, inputs_shape, outputs_shape, attrs) { - getnextcost_ptr_ = std::make_shared(); - } + : OperatorInfo(operator_name, inputs_shape, outputs_shape, attrs, std::make_shared()) {} ~GetNextInfo() override = default; Status Init(const StrategyPtr &strategy) override; Status SetCostUnderStrategy(const StrategyPtr &strategy) override; - OperatorCostPtr GetOperatorCost() const override { return getnextcost_ptr_; } Status InitForCostModel(const StrategyPtr &strategy) override; Status GenerateStrategies(int32_t stage_id) override; @@ -65,7 +62,6 @@ class GetNextInfo : public OperatorInfo { Shapes shapes_; int32_t output_num_ = 0; std::string shared_name_; - GetNextCostPtr getnextcost_ptr_; }; } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/ops_info/l2_normalize_info.h b/mindspore/ccsrc/parallel/ops_info/l2_normalize_info.h index c0af9dbcb9..22ed5a965b 100644 --- a/mindspore/ccsrc/parallel/ops_info/l2_normalize_info.h +++ b/mindspore/ccsrc/parallel/ops_info/l2_normalize_info.h @@ -33,12 +33,9 @@ class L2NormalizeInfo : public Activation { public: L2NormalizeInfo(const std::string& name, const Shapes& inputs_shape, const Shapes& outputs_shape, const PrimitiveAttrs& attrs) - : Activation(name, inputs_shape, outputs_shape, attrs) { - l2normalizecost_ptr_ = std::make_shared(); - } + : Activation(name, inputs_shape, outputs_shape, attrs) {} ~L2NormalizeInfo() override = default; Status GenerateStrategies(int32_t stage_id) override; - OperatorCostPtr GetOperatorCost() const override { return l2normalizecost_ptr_; } protected: Status GetAttrs() override; @@ -47,7 +44,6 @@ class L2NormalizeInfo : public Activation { private: int32_t axis_ = 0; // Default value = 0 - L2NormalizeCostPtr l2normalizecost_ptr_; }; } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/ops_info/loss_info.h b/mindspore/ccsrc/parallel/ops_info/loss_info.h index 6a9697a447..f1c2537a39 100644 --- a/mindspore/ccsrc/parallel/ops_info/loss_info.h +++ b/mindspore/ccsrc/parallel/ops_info/loss_info.h @@ -36,16 +36,13 @@ class SoftmaxCrossEntropyWithLogitsInfo : public OperatorInfo { public: SoftmaxCrossEntropyWithLogitsInfo(const std::string& name, const Shapes& inputs_shape, const Shapes& outputs_shape, const PrimitiveAttrs& attrs) - : OperatorInfo(name, inputs_shape, outputs_shape, attrs) { - softmax_loss_cost_ptr_ = std::make_shared(); - } + : OperatorInfo(name, inputs_shape, outputs_shape, attrs, std::make_shared()) {} ~SoftmaxCrossEntropyWithLogitsInfo() override = default; Status Init(const StrategyPtr& strategy) override; Status InitForCostModel(const StrategyPtr& strategy) override; Status GenerateStrategies(int32_t stage_id) override; Status SetCostUnderStrategy(const StrategyPtr& strategy) override; - OperatorCostPtr GetOperatorCost() const override { return softmax_loss_cost_ptr_; } void ReComputeBatchSplitFlagList() override; protected: @@ -59,7 +56,6 @@ class SoftmaxCrossEntropyWithLogitsInfo : public OperatorInfo { // There are two outputs for SoftmaxCrossEntropyWithLogits, and outputs[1] is used for grad and overload // the InferAsLossDivisor. Status InferAsLossDivisor() override; - SoftmaxCrossEntropyWithLogitsCostPtr softmax_loss_cost_ptr_; private: int32_t axis_ = -1; // default -1 diff --git a/mindspore/ccsrc/parallel/ops_info/matmul_info.cc b/mindspore/ccsrc/parallel/ops_info/matmul_info.cc index 2b02dc100d..848116d68a 100644 --- a/mindspore/ccsrc/parallel/ops_info/matmul_info.cc +++ b/mindspore/ccsrc/parallel/ops_info/matmul_info.cc @@ -593,11 +593,11 @@ Status MatMulBase::SetCostUnderStrategy(const mindspore::parallel::StrategyPtr& // Here, we use the origin outputs_, because we only use the slice size of the output tensor. // It does not matter whether the output tensor is transposed or not. double computation_cost = - matmulcost_ptr->GetForwardComputationCost(relica_inputs_tensor_vector, outputs_tensor_info_, stage_id); - double communication_cost = matmulcost_ptr->GetCommCost(relica_inputs_tensor_vector, outputs_tensor_info_, stage_id); + cost()->GetForwardComputationCost(relica_inputs_tensor_vector, outputs_tensor_info_, stage_id); + double communication_cost = cost()->GetCommCost(relica_inputs_tensor_vector, outputs_tensor_info_, stage_id); std::shared_ptr result = std::make_shared(computation_cost, communication_cost); result->communication_without_parameter_ = - matmulcost_ptr->GetForwardCommCost(relica_inputs_tensor_vector, outputs_tensor_info_, stage_id); + cost()->GetForwardCommCost(relica_inputs_tensor_vector, outputs_tensor_info_, stage_id); result->communication_with_partial_para_ = result->communication_without_parameter_ + COST_MODEL_GAMMA * (communication_cost - result->communication_without_parameter_); diff --git a/mindspore/ccsrc/parallel/ops_info/matmul_info.h b/mindspore/ccsrc/parallel/ops_info/matmul_info.h index 7ced12b14a..2d3312774d 100644 --- a/mindspore/ccsrc/parallel/ops_info/matmul_info.h +++ b/mindspore/ccsrc/parallel/ops_info/matmul_info.h @@ -34,9 +34,7 @@ class MatMulBase : public OperatorInfo { public: MatMulBase(const std::string& name, const Shapes& inputs_shape, const Shapes& outputs_shape, const PrimitiveAttrs& attrs) - : OperatorInfo(name, inputs_shape, outputs_shape, attrs) { - matmulcost_ptr = std::make_shared(); - } + : OperatorInfo(name, inputs_shape, outputs_shape, attrs, std::make_shared()) {} ~MatMulBase() override = default; Status Init(const StrategyPtr& strategy) override; @@ -48,7 +46,6 @@ class MatMulBase : public OperatorInfo { Status PrepareStrategy(int32_t stage_id, size_t dev_num, Dimensions combined_partitions, size_t input0_shape_size, size_t input1_shape_size, StrategyPtr* sp); - OperatorCostPtr GetOperatorCost() const override { return matmulcost_ptr; } Status SwapLastTwoElements(Shape* shape); protected: @@ -66,8 +63,6 @@ class MatMulBase : public OperatorInfo { bool transpose_b_ = false; size_t mat_a_dimension_ = 0; size_t mat_b_dimension_ = 0; - - MatMulCostPtr matmulcost_ptr; }; class MatMul : public MatMulBase { diff --git a/mindspore/ccsrc/parallel/ops_info/onehot_info.h b/mindspore/ccsrc/parallel/ops_info/onehot_info.h index 4697e201a4..a54d8479b3 100644 --- a/mindspore/ccsrc/parallel/ops_info/onehot_info.h +++ b/mindspore/ccsrc/parallel/ops_info/onehot_info.h @@ -33,16 +33,13 @@ class OneHotInfo : public OperatorInfo { public: OneHotInfo(const std::string& name, const Shapes& inputs_shape, const Shapes& outputs_shape, const PrimitiveAttrs& attrs) - : OperatorInfo(name, inputs_shape, outputs_shape, attrs) { - onehot_cost_ptr_ = std::make_shared(); - } + : OperatorInfo(name, inputs_shape, outputs_shape, attrs, std::make_shared()) {} ~OneHotInfo() override = default; Status Init(const StrategyPtr& strategy) override; Status InitForCostModel(const StrategyPtr& strategy) override; Status GenerateStrategies(int32_t stage_id) override; Status SetCostUnderStrategy(const StrategyPtr& strategy) override; - OperatorCostPtr GetOperatorCost() const override { return onehot_cost_ptr_; } ReplaceGraphPtr replace_graph(const CNodePtr& cnode) override; std::shared_ptr>> GenerateBatchStrategies() override; @@ -60,7 +57,6 @@ class OneHotInfo : public OperatorInfo { Status ComputeReplaceGraph(const CNodePtr& cnode); int axis_ = -1; - OneHotCostPtr onehot_cost_ptr_; int32_t rank_ = 0; int32_t total_class_number_ = 1; int32_t classes_each_device_ = 1; diff --git a/mindspore/ccsrc/parallel/ops_info/operator_info.cc b/mindspore/ccsrc/parallel/ops_info/operator_info.cc index 11c518d844..a24f3e616b 100644 --- a/mindspore/ccsrc/parallel/ops_info/operator_info.cc +++ b/mindspore/ccsrc/parallel/ops_info/operator_info.cc @@ -1034,12 +1034,11 @@ Status OperatorInfo::SetCostUnderStrategyBase(const StrategyPtr& strategy) { return FAILED; } int32_t stage_id = strategy->GetInputStage(); - double computation_cost = - GetOperatorCost()->GetForwardComputationCost(inputs_tensor_info_, outputs_tensor_info_, stage_id); - double communication_cost = GetOperatorCost()->GetCommCost(inputs_tensor_info_, outputs_tensor_info_, stage_id); + double computation_cost = cost()->GetForwardComputationCost(inputs_tensor_info_, outputs_tensor_info_, stage_id); + double communication_cost = cost()->GetCommCost(inputs_tensor_info_, outputs_tensor_info_, stage_id); std::shared_ptr result = std::make_shared(computation_cost, communication_cost); result->communication_without_parameter_ = - GetOperatorCost()->GetForwardCommCost(inputs_tensor_info_, outputs_tensor_info_, stage_id); + cost()->GetForwardCommCost(inputs_tensor_info_, outputs_tensor_info_, stage_id); result->communication_with_partial_para_ = result->communication_without_parameter_ + COST_MODEL_GAMMA * (communication_cost - result->communication_without_parameter_); @@ -1096,7 +1095,7 @@ Status OperatorInfo::set_is_parameter(const std::vector& is_parameter) { return FAILED; } is_parameter_ = is_parameter; - GetOperatorCost()->set_is_parameter(is_parameter); + cost()->set_is_parameter(is_parameter); return SUCCESS; } @@ -1193,7 +1192,7 @@ Status OperatorInfo::SetInputAndOutputTypeLength(const std::vector& inpu } inputs_type_lengths_ = input_lengths; outputs_type_lengths_ = output_lengths; - GetOperatorCost()->SetInputAndOutputTypeLength(input_lengths, output_lengths); + cost()->SetInputAndOutputTypeLength(input_lengths, output_lengths); return SUCCESS; } @@ -1211,7 +1210,7 @@ void OperatorInfo::BreakingTiesForPerferringDataParallel(const StrategyPtr& stra } double OperatorInfo::GetForwardMemoryCostFromCNode() { - return GetOperatorCost()->GetForwardComputationCost(inputs_tensor_info_, outputs_tensor_info_, 0); + return cost()->GetForwardComputationCost(inputs_tensor_info_, outputs_tensor_info_, 0); } } // namespace parallel diff --git a/mindspore/ccsrc/parallel/ops_info/operator_info.h b/mindspore/ccsrc/parallel/ops_info/operator_info.h index e7b8af0a7e..8fcae8ad33 100644 --- a/mindspore/ccsrc/parallel/ops_info/operator_info.h +++ b/mindspore/ccsrc/parallel/ops_info/operator_info.h @@ -53,12 +53,13 @@ class Edge; class OperatorInfo { public: - OperatorInfo(std::string name, Shapes inputs_shape, Shapes outputs_shape, PrimitiveAttrs attrs) + OperatorInfo(std::string name, Shapes inputs_shape, Shapes outputs_shape, PrimitiveAttrs attrs, OperatorCostPtr cost) : name_(std::move(name)), inputs_shape_(std::move(inputs_shape)), outputs_shape_(std::move(outputs_shape)), attrs_(std::move(attrs)), - is_alive_(true) { + is_alive_(true), + cost_(cost) { std::vector not_parameteter(inputs_shape_.size(), false); is_parameter_ = not_parameteter; refkey_parameter_name_ = ""; @@ -75,7 +76,8 @@ class OperatorInfo { // Given the stage_id (which indicates the number of devices), // generate all strategies for this operator virtual Status GenerateStrategies(int32_t stage_id) = 0; - virtual OperatorCostPtr GetOperatorCost() const = 0; + const OperatorCostPtr& cost() const { return cost_; } + void set_cost(const OperatorCostPtr& cost) { cost_ = cost; } virtual Status SetCostUnderStrategy(const StrategyPtr& strategy) = 0; virtual std::shared_ptr>> GenerateBatchStrategies(); @@ -115,7 +117,7 @@ class OperatorInfo { void ReplaceSuccEdge(const std::shared_ptr& op, const std::shared_ptr& new_edge); void ReplacePreEdges(const std::shared_ptr& op, const std::shared_ptr& new_edge); void ReplaceSuccEdges(const std::shared_ptr& op, const std::shared_ptr& new_edge); - std::vector GetOutputTypeLengths() const { return GetOperatorCost()->outputs_type_lengths(); } + std::vector GetOutputTypeLengths() const { return cost()->outputs_type_lengths(); } void SetSelectedStrategyAndCost(const StrategyPtr& s_strategy, const CostPtr& cost) { selected_strategy_ = s_strategy; selected_cost_ = cost; @@ -221,6 +223,9 @@ class OperatorInfo { std::string refkey_parameter_name_; CNodePtr cnode_; int32_t used_devices_ = -1; + + private: + OperatorCostPtr cost_; }; Shape GetSliceShape(const Shape& tensor_shape, const Dimensions& strategy); diff --git a/mindspore/ccsrc/parallel/ops_info/prelu_info.h b/mindspore/ccsrc/parallel/ops_info/prelu_info.h index d491ecb331..bdfb11550b 100644 --- a/mindspore/ccsrc/parallel/ops_info/prelu_info.h +++ b/mindspore/ccsrc/parallel/ops_info/prelu_info.h @@ -35,15 +35,12 @@ class PReLUInfo : public OperatorInfo { public: PReLUInfo(const std::string& name, const Shapes& inputs_shape, const Shapes& outputs_shape, const PrimitiveAttrs& attrs) - : OperatorInfo(name, inputs_shape, outputs_shape, attrs) { - prelucost_ptr = std::make_shared(); - } + : OperatorInfo(name, inputs_shape, outputs_shape, attrs, std::make_shared()) {} ~PReLUInfo() override = default; Status Init(const StrategyPtr& strategy) override; Status InitForCostModel(const StrategyPtr& strategy) override; Status GenerateStrategies(int32_t stage_id) override; - OperatorCostPtr GetOperatorCost() const override { return prelucost_ptr; } Status SetCostUnderStrategy(const StrategyPtr& strategy) override; protected: @@ -59,7 +56,6 @@ class PReLUInfo : public OperatorInfo { private: Dimensions input_strategy_; - PReLUCostPtr prelucost_ptr; }; } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/ops_info/reduce_method_info.cc b/mindspore/ccsrc/parallel/ops_info/reduce_method_info.cc index 5b07f8d0a9..aa64e72d05 100644 --- a/mindspore/ccsrc/parallel/ops_info/reduce_method_info.cc +++ b/mindspore/ccsrc/parallel/ops_info/reduce_method_info.cc @@ -109,8 +109,12 @@ Status ReduceMethod::GetAttrs() { } cross_batch_ = cross_batch_iter->second->cast()->value(); } - reducemethodcost_ptr_->set_cross_batch(cross_batch_); - + auto reducemethodcost = std::dynamic_pointer_cast(cost()); + if (reducemethodcost == nullptr) { + MS_LOG(ERROR) << "Cost cast to ReduceMethodCostPtr failed!"; + return FAILED; + } + reducemethodcost->set_cross_batch(cross_batch_); return SUCCESS; } diff --git a/mindspore/ccsrc/parallel/ops_info/reduce_method_info.h b/mindspore/ccsrc/parallel/ops_info/reduce_method_info.h index 8e2e17af99..c2ddbc87ce 100644 --- a/mindspore/ccsrc/parallel/ops_info/reduce_method_info.h +++ b/mindspore/ccsrc/parallel/ops_info/reduce_method_info.h @@ -34,9 +34,7 @@ class ReduceMethod : public OperatorInfo { public: ReduceMethod(const std::string &name, const Shapes &inputs_shape, const Shapes &outputs_shape, const PrimitiveAttrs &attrs) - : OperatorInfo(name, inputs_shape, outputs_shape, attrs) { - reducemethodcost_ptr_ = std::make_shared(); - } + : OperatorInfo(name, inputs_shape, outputs_shape, attrs, std::make_shared()) {} ~ReduceMethod() override = default; Status Init(const StrategyPtr &strategy) override; @@ -44,13 +42,11 @@ class ReduceMethod : public OperatorInfo { Status GenerateStrategies(int32_t stage_id) override; Status SetCostUnderStrategy(const StrategyPtr &strategy) override; - OperatorCostPtr GetOperatorCost() const override { return reducemethodcost_ptr_; } protected: std::string reduce_method_; bool keepdims_ = false; bool cross_batch_ = false; - ReduceMethodCostPtr reducemethodcost_ptr_; Status CheckStrategy(const StrategyPtr &strategy) override; Status GetAttrs() override; Dimensions InferOutputStrategy(); @@ -110,7 +106,7 @@ class ReduceMeanInfo : public ReduceMethod { ReduceMeanInfo(const std::string &name, const Shapes &inputs_shape, const Shapes &outputs_shape, const PrimitiveAttrs &attrs) : ReduceMethod(name, inputs_shape, outputs_shape, attrs) { - reducemethodcost_ptr_ = std::make_shared(); + set_cost(std::make_shared()); } ~ReduceMeanInfo() override = default; diff --git a/mindspore/ccsrc/parallel/ops_info/reshape_info.h b/mindspore/ccsrc/parallel/ops_info/reshape_info.h index 1d6a14b1f6..38192a5d01 100644 --- a/mindspore/ccsrc/parallel/ops_info/reshape_info.h +++ b/mindspore/ccsrc/parallel/ops_info/reshape_info.h @@ -36,12 +36,10 @@ class ReshapeInfo : public OperatorInfo { public: ReshapeInfo(const std::string& name, const Shapes& inputs_shape, const Shapes& outputs_shape, const PrimitiveAttrs& attrs) - : OperatorInfo(name, inputs_shape, outputs_shape, attrs), + : OperatorInfo(name, inputs_shape, outputs_shape, attrs, std::make_shared()), dev_num_(0), input_layout_set_flag_(false), - output_layout_set_flag_(false) { - reshape_cost_ptr_ = std::make_shared(); - } + output_layout_set_flag_(false) {} ~ReshapeInfo() override = default; Status Init(const StrategyPtr& strategy) override; void SetInputLayout(const TensorLayout& input_layout) { @@ -55,7 +53,6 @@ class ReshapeInfo : public OperatorInfo { Status InitForCostModel(const StrategyPtr& strategy) override; Status GenerateStrategies(int32_t stage_id) override; Status SetCostUnderStrategy(const StrategyPtr& strategy) override; - OperatorCostPtr GetOperatorCost() const override { return reshape_cost_ptr_; } protected: Status CheckStrategy(const StrategyPtr& strategy) override; @@ -67,7 +64,6 @@ class ReshapeInfo : public OperatorInfo { Status InferTensorLayout(TensorLayouts* inputs_layout, TensorLayouts* outputs_layout); Status GetAttrs() override; Strategys GetOutputsStrategy(); - ReshapeCostPtr reshape_cost_ptr_; private: Status GetParameterInput(); diff --git a/mindspore/ccsrc/parallel/ops_info/tmp_identity_info.h b/mindspore/ccsrc/parallel/ops_info/tmp_identity_info.h index 6df5856e0c..cf850683a6 100644 --- a/mindspore/ccsrc/parallel/ops_info/tmp_identity_info.h +++ b/mindspore/ccsrc/parallel/ops_info/tmp_identity_info.h @@ -34,9 +34,7 @@ class TmpIdentityInfo : public OperatorInfo { public: TmpIdentityInfo(const Shapes& inputs_shape, const Shapes& outputs_shape, const PrimitiveAttrs& attrs, const std::string& name = IDENTITY_INFO) - : OperatorInfo(name, inputs_shape, outputs_shape, attrs) { - id_cost_ptr_ = std::make_shared(); - } + : OperatorInfo(name, inputs_shape, outputs_shape, attrs, std::make_shared()) {} ~TmpIdentityInfo() override = default; Status Init(const StrategyPtr& strategy) override; @@ -44,7 +42,6 @@ class TmpIdentityInfo : public OperatorInfo { Status GenerateStrategies(int32_t stage_id) override; Status SetCostUnderStrategy(const StrategyPtr& strategy) override; - OperatorCostPtr GetOperatorCost() const override { return id_cost_ptr_; } protected: Status CheckStrategy(const StrategyPtr& strategy) override; @@ -54,9 +51,6 @@ class TmpIdentityInfo : public OperatorInfo { Status InferTensorInfo() override; Status InferDevMatrixShape() override; Status InferTensorMap() override; - - private: - TmpIdentityCostPtr id_cost_ptr_; }; } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/ops_info/transpose_info.h b/mindspore/ccsrc/parallel/ops_info/transpose_info.h index 4f6f6bb695..2714b352b6 100644 --- a/mindspore/ccsrc/parallel/ops_info/transpose_info.h +++ b/mindspore/ccsrc/parallel/ops_info/transpose_info.h @@ -35,15 +35,12 @@ class TransposeInfo : public OperatorInfo { public: TransposeInfo(const std::string& name, const Shapes& inputs_shape, const Shapes& outputs_shape, const PrimitiveAttrs& attrs) - : OperatorInfo(name, inputs_shape, outputs_shape, attrs) { - transpose_cost_ptr_ = std::make_shared(); - } + : OperatorInfo(name, inputs_shape, outputs_shape, attrs, std::make_shared()) {} ~TransposeInfo() override = default; Status Init(const StrategyPtr& strategy) override; Status InitForCostModel(const StrategyPtr& strategy) override; Status GenerateStrategies(int32_t stage_id) override; Status SetCostUnderStrategy(const StrategyPtr& strategy) override; - OperatorCostPtr GetOperatorCost() const override { return transpose_cost_ptr_; } protected: Status CheckStrategy(const StrategyPtr& strategy) override; @@ -60,7 +57,6 @@ class TransposeInfo : public OperatorInfo { Status ComputeAxis(); std::vector axis_v_; Dimensions input_strategy_; - ActivationCostPtr transpose_cost_ptr_; }; } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/ops_info/virtual_dataset_info.h b/mindspore/ccsrc/parallel/ops_info/virtual_dataset_info.h index d0278f27d9..b958adeabe 100644 --- a/mindspore/ccsrc/parallel/ops_info/virtual_dataset_info.h +++ b/mindspore/ccsrc/parallel/ops_info/virtual_dataset_info.h @@ -32,16 +32,13 @@ class VirtualDatasetInfo : public OperatorInfo { public: VirtualDatasetInfo(const std::string& name, const Shapes& inputs_shape, const Shapes& outputs_shape, const PrimitiveAttrs& attrs) - : OperatorInfo(name, inputs_shape, outputs_shape, attrs) { - vd_cost_ptr_ = std::make_shared(); - } + : OperatorInfo(name, inputs_shape, outputs_shape, attrs, std::make_shared()) {} ~VirtualDatasetInfo() override = default; Status Init(const StrategyPtr& strategy) override; Status InitForCostModel(const StrategyPtr& strategy) override; Status GenerateStrategies(int32_t stage_id) override; Status SetCostUnderStrategy(const StrategyPtr& strategy) override; - OperatorCostPtr GetOperatorCost() const override { return vd_cost_ptr_; } void ReComputeBatchSplitFlagList() override; protected: @@ -53,9 +50,6 @@ class VirtualDatasetInfo : public OperatorInfo { Status InferTensorMap() override; Status GetAttrs() override; Status InferAsLossDivisor() override; - - private: - VirtualDatasetCostPtr vd_cost_ptr_; }; } // namespace parallel diff --git a/tests/ut/cpp/parallel/ops_info/activation_test.cc b/tests/ut/cpp/parallel/ops_info/activation_test.cc index 5d18c5372f..a8f8425ae9 100644 --- a/tests/ut/cpp/parallel/ops_info/activation_test.cc +++ b/tests/ut/cpp/parallel/ops_info/activation_test.cc @@ -84,9 +84,9 @@ TEST_F(TestActivation, test_activation_strategies) { act_ptr_->InitForCostModel(sp); std::vector inputs_info = act_ptr_->inputs_tensor_info(); std::vector outputs_info = act_ptr_->outputs_tensor_info(); - ASSERT_DOUBLE_EQ(act_ptr_->GetOperatorCost()->GetComputationCost(inputs_info, outputs_info, sp->GetInputStage()), + ASSERT_DOUBLE_EQ(act_ptr_->cost()->GetComputationCost(inputs_info, outputs_info, sp->GetInputStage()), cost.computation_cost_); - ASSERT_DOUBLE_EQ(act_ptr_->GetOperatorCost()->GetCommCost(inputs_info, outputs_info, sp->GetInputStage()), + ASSERT_DOUBLE_EQ(act_ptr_->cost()->GetCommCost(inputs_info, outputs_info, sp->GetInputStage()), cost.communication_cost_); } } @@ -109,9 +109,9 @@ TEST_F(TestActivation, test_softmax_strategies) { soft_ptr_->InitForCostModel(sp); std::vector inputs_info = soft_ptr_->inputs_tensor_info(); std::vector outputs_info = soft_ptr_->outputs_tensor_info(); - ASSERT_DOUBLE_EQ(soft_ptr_->GetOperatorCost()->GetComputationCost(inputs_info, outputs_info, sp->GetInputStage()), + ASSERT_DOUBLE_EQ(soft_ptr_->cost()->GetComputationCost(inputs_info, outputs_info, sp->GetInputStage()), cost.computation_cost_); - ASSERT_DOUBLE_EQ(soft_ptr_->GetOperatorCost()->GetCommCost(inputs_info, outputs_info, sp->GetInputStage()), + ASSERT_DOUBLE_EQ(soft_ptr_->cost()->GetCommCost(inputs_info, outputs_info, sp->GetInputStage()), cost.communication_cost_); } } diff --git a/tests/ut/cpp/parallel/ops_info/matmul_info_test.cc b/tests/ut/cpp/parallel/ops_info/matmul_info_test.cc index 99ca9f8e0e..2fece098e8 100644 --- a/tests/ut/cpp/parallel/ops_info/matmul_info_test.cc +++ b/tests/ut/cpp/parallel/ops_info/matmul_info_test.cc @@ -569,7 +569,7 @@ TEST_F(TestMatmulInfo, test_GenerateStrategies1) { matmul1->InitForCostModel(sp); std::vector inputs_info = matmul1->inputs_tensor_info(); std::vector outputs_info = matmul1->outputs_tensor_info(); - ASSERT_DOUBLE_EQ(matmul1->GetOperatorCost()->GetComputationCost(inputs_info, outputs_info, sp->GetInputStage()), + ASSERT_DOUBLE_EQ(matmul1->cost()->GetComputationCost(inputs_info, outputs_info, sp->GetInputStage()), cost.computation_cost_); break; } @@ -599,7 +599,7 @@ TEST_F(TestMatmulInfo, test_GenerateStrategies2) { TensorInfo replica_input1_info(tly, input1_shape, input1_slice_shape); replica_inputs_info.push_back(replica_input1_info); - ASSERT_DOUBLE_EQ(matmul3->GetOperatorCost()->GetComputationCost(replica_inputs_info, outputs_info, sp->GetInputStage()), + ASSERT_DOUBLE_EQ(matmul3->cost()->GetComputationCost(replica_inputs_info, outputs_info, sp->GetInputStage()), cost.computation_cost_); break; } diff --git a/tests/ut/cpp/parallel/ops_info/tensor_add_info_test.cc b/tests/ut/cpp/parallel/ops_info/tensor_add_info_test.cc index 6cb9739b1c..8c956328a7 100644 --- a/tests/ut/cpp/parallel/ops_info/tensor_add_info_test.cc +++ b/tests/ut/cpp/parallel/ops_info/tensor_add_info_test.cc @@ -188,11 +188,11 @@ TEST_F(TestTensorAddInfo, GenerateStrategies) { tensor_add->InitForCostModel(sp); std::vector inputs_info = tensor_add->inputs_tensor_info(); std::vector outputs_info = tensor_add->outputs_tensor_info(); - double memory_cost0 = tensor_add->GetOperatorCost()->GetComputationCost(inputs_info, outputs_info, sp->GetInputStage()); + double memory_cost0 = tensor_add->cost()->GetComputationCost(inputs_info, outputs_info, sp->GetInputStage()); double memory_cost1 = cost.computation_cost_; bool memory = memory_cost0 - memory_cost1 <= 1.0; - double comm_cost0 = tensor_add->GetOperatorCost()->GetCommCost(inputs_info, outputs_info, sp->GetInputStage()); + double comm_cost0 = tensor_add->cost()->GetCommCost(inputs_info, outputs_info, sp->GetInputStage()); double comm_cost1 = cost.communication_cost_; bool comm = comm_cost0 - comm_cost1 <= 1.0; @@ -210,11 +210,11 @@ TEST_F(TestTensorAddInfo, GenerateStrategies1) { tensor_add1->InitForCostModel(sp); std::vector inputs_info = tensor_add1->inputs_tensor_info(); std::vector outputs_info = tensor_add1->outputs_tensor_info(); - double memory_cost0 = tensor_add1->GetOperatorCost()->GetComputationCost(inputs_info, outputs_info, sp->GetInputStage()); + double memory_cost0 = tensor_add1->cost()->GetComputationCost(inputs_info, outputs_info, sp->GetInputStage()); double memory_cost1 = cost.computation_cost_; bool memory = memory_cost0 - memory_cost1 <= 1.0; - double comm_cost0 = tensor_add1->GetOperatorCost()->GetCommCost(inputs_info, outputs_info, sp->GetInputStage()); + double comm_cost0 = tensor_add1->cost()->GetCommCost(inputs_info, outputs_info, sp->GetInputStage()); double comm_cost1 = cost.communication_cost_; bool comm = comm_cost0 - comm_cost1 <= 1.0; diff --git a/tests/ut/cpp/parallel/ops_info/tmpidentity_test.cc b/tests/ut/cpp/parallel/ops_info/tmpidentity_test.cc index 043746498f..3971a2b471 100644 --- a/tests/ut/cpp/parallel/ops_info/tmpidentity_test.cc +++ b/tests/ut/cpp/parallel/ops_info/tmpidentity_test.cc @@ -145,9 +145,9 @@ TEST_F(TestTmpIdentityInfo, test_generate_strategies) { identity_ptr->Init(sp); std::vector inputs_info = identity_ptr->inputs_tensor_info(); std::vector outputs_info = identity_ptr->outputs_tensor_info(); - ASSERT_DOUBLE_EQ(identity_ptr->GetOperatorCost()->GetComputationCost(inputs_info, outputs_info, sp->GetInputStage()), + ASSERT_DOUBLE_EQ(identity_ptr->cost()->GetComputationCost(inputs_info, outputs_info, sp->GetInputStage()), cost.computation_cost_); - ASSERT_DOUBLE_EQ(identity_ptr->GetOperatorCost()->GetCommCost(inputs_info, outputs_info, sp->GetInputStage()), + ASSERT_DOUBLE_EQ(identity_ptr->cost()->GetCommCost(inputs_info, outputs_info, sp->GetInputStage()), cost.communication_cost_); } } From 62bbf560c66e7dee5a8c555feb8aa3fa13d118ca Mon Sep 17 00:00:00 2001 From: biffex Date: Thu, 9 Apr 2020 15:04:24 +0800 Subject: [PATCH 150/367] constant duplicate mul for momentum --- mindspore/ccsrc/optimizer/irpass.cc | 6 +-- .../optimizer/irpass/arithmetic_simplify.h | 54 ++++++++++++++++++- mindspore/ccsrc/utils/graph_utils.cc | 2 + mindspore/ops/operations/math_ops.py | 8 +++ tests/ut/cpp/optimizer/lib_test.cc | 13 +++++ .../gtest_input/optimizer/opt_test.py | 33 ++++++++++++ 6 files changed, 112 insertions(+), 4 deletions(-) diff --git a/mindspore/ccsrc/optimizer/irpass.cc b/mindspore/ccsrc/optimizer/irpass.cc index cdc960792f..0991c31b00 100644 --- a/mindspore/ccsrc/optimizer/irpass.cc +++ b/mindspore/ccsrc/optimizer/irpass.cc @@ -45,9 +45,9 @@ namespace mindspore { namespace opt { namespace irpass { OptimizeIRPassLib::OptimizeIRPassLib() { - arithmetic_simplify_ = MakeSubstitution( - ArithmeticSimplify(), "arithmetic_simplify", - {prim::kPrimScalarAdd, prim::kPrimScalarMul, prim::kPrimTensorAdd, prim::kPrimIdentity, prim::kPrimMomentum}); + arithmetic_simplify_ = MakeSubstitution(ArithmeticSimplify(), "arithmetic_simplify", + {prim::kPrimScalarAdd, prim::kPrimScalarMul, prim::kPrimTensorAdd, + prim::kPrimIdentity, prim::kPrimMomentum, prim::kPrimMul}); special_op_eliminate_ = MakeSubstitution(SpecialOpEliminater(), "special_op_eliminate", {prim::kPrimInsertGradientOf, prim::kPrimPrintShapeType, prim::kPrimGetRefKey, prim::kPrimMirror, prim::kPrimVirtualDiv}); diff --git a/mindspore/ccsrc/optimizer/irpass/arithmetic_simplify.h b/mindspore/ccsrc/optimizer/irpass/arithmetic_simplify.h index 8c5610ed1b..ab191aab20 100644 --- a/mindspore/ccsrc/optimizer/irpass/arithmetic_simplify.h +++ b/mindspore/ccsrc/optimizer/irpass/arithmetic_simplify.h @@ -179,6 +179,55 @@ class OptUpdateZeroTensor : public AnfVisitor { } }; +// {prim::kPrimMul, Tensor1, {orim::kPrimMul, Tensor2, {...}}} -> +// {prim::kPrimMul, {...}, {prim::kPrimMul, Tensor1, Tensor2}} +class ConstantDuplicateMul : public AnfVisitor { + public: + AnfNodePtr operator()(const OptimizerPtr &, const AnfNodePtr &node) override { + Reset(); + // {prim::kPrimMul, Tensor1, {...}} + AnfVisitor::Match(prim::kPrimMul, {IsNode, IsNode})(node); + if (vnode_ == nullptr || cnode_ == nullptr) { + return nullptr; + } + auto tensor1 = vnode_; + auto mul = cnode_; + + Reset(); + // {prim::kPrimMul, Tensor2, {...}} + AnfVisitor::Match(prim::kPrimMul, {IsNode, IsNode})(mul); + if (vnode_ == nullptr || cnode_ == nullptr) { + return nullptr; + } + auto tensor2 = vnode_; + auto cnode = cnode_; + + auto PrimMul = GetValueNode(mul->input(0)); + auto fg = node->func_graph(); + auto ttmul = NewCNode({NewValueNode(PrimMul), tensor1, tensor2}, fg); + return NewCNode({NewValueNode(PrimMul), cnode, ttmul}, fg); + } + + void Visit(const AnfNodePtr &node) override { + if (IsValueNode(node)) { + vnode_ = node; + } + + if (IsCNode(node)) { + cnode_ = node->cast(); + } + } + + void Reset() { + vnode_ = nullptr; + cnode_ = nullptr; + } + + private: + AnfNodePtr vnode_; + CNodePtr cnode_; +}; + class ArithmeticSimplify { public: ArithmeticSimplify() @@ -186,12 +235,14 @@ class ArithmeticSimplify { add_by_zero_(), tensor_add_by_zero_(), identity_(prim::kPrimIdentity), - opt_update_zero_tensor_() { + opt_update_zero_tensor_(), + constant_duplicate_mul_() { eliminaters_.emplace_back(multiply_by_zero_or_one_); eliminaters_.emplace_back(add_by_zero_); eliminaters_.emplace_back(tensor_add_by_zero_); eliminaters_.emplace_back(identity_); eliminaters_.emplace_back(opt_update_zero_tensor_); + eliminaters_.emplace_back(constant_duplicate_mul_); } ~ArithmeticSimplify() = default; @@ -212,6 +263,7 @@ class ArithmeticSimplify { TensorAddByZero tensor_add_by_zero_; PrimEliminater identity_; OptUpdateZeroTensor opt_update_zero_tensor_; + ConstantDuplicateMul constant_duplicate_mul_; std::vector eliminaters_{}; }; } // namespace irpass diff --git a/mindspore/ccsrc/utils/graph_utils.cc b/mindspore/ccsrc/utils/graph_utils.cc index 938df2c291..55ef8dc3d5 100644 --- a/mindspore/ccsrc/utils/graph_utils.cc +++ b/mindspore/ccsrc/utils/graph_utils.cc @@ -400,6 +400,8 @@ static bool SameNodeShallow(const AnfNodePtr& node1, const AnfNodePtr& node2, Fu auto a2 = GetValueNode(node2); if (a1->isa() && a2->isa()) { return a1->cast()->name() == a2->cast()->name(); + } else if (a1->isa() && a2->isa()) { + return a1->cast()->ValueEqual(*(a2->cast())); } else { return *a1 == *a2; } diff --git a/mindspore/ops/operations/math_ops.py b/mindspore/ops/operations/math_ops.py index d003f6ee8b..e5e89615df 100644 --- a/mindspore/ops/operations/math_ops.py +++ b/mindspore/ops/operations/math_ops.py @@ -771,6 +771,14 @@ class Mul(_MathBinaryOp): >>> mul(input_x, input_y) [4, 10, 18] """ + def infer_value(self, x, y): + if x is not None and y is not None: + x = x.asnumpy() + y = y.asnumpy() + out = x * y + out = np.array(out, x.dtype) + return Tensor(out) + return None class Square(PrimitiveWithInfer): diff --git a/tests/ut/cpp/optimizer/lib_test.cc b/tests/ut/cpp/optimizer/lib_test.cc index ff3c00d37a..2d4cf0e78e 100644 --- a/tests/ut/cpp/optimizer/lib_test.cc +++ b/tests/ut/cpp/optimizer/lib_test.cc @@ -543,5 +543,18 @@ TEST_F(TestOptLib, test_print_tuple_wrapper) { ASSERT_TRUE(CheckOpt(before2, after2, patterns)); ASSERT_TRUE(CheckOpt(before3, before3, patterns)); } + +TEST_F(TestOptLib, test_constant_duplicate_mul) { + FuncGraphPtr beforell = getPyFun.CallAndParseRet("test_constant_duplicate_mul", "beforell"); + FuncGraphPtr beforelr = getPyFun.CallAndParseRet("test_constant_duplicate_mul", "beforelr"); + FuncGraphPtr beforerl = getPyFun.CallAndParseRet("test_constant_duplicate_mul", "beforerl"); + FuncGraphPtr beforerr = getPyFun.CallAndParseRet("test_constant_duplicate_mul", "beforerr"); + FuncGraphPtr after = getPyFun.CallAndParseRet("test_constant_duplicate_mul", "after"); + auto patterns = std::vector({irpass.arithmetic_simplify_}); + ASSERT_TRUE(CheckOpt(beforell, after, patterns)); + ASSERT_TRUE(CheckOpt(beforelr, after, patterns)); + ASSERT_TRUE(CheckOpt(beforerl, after, patterns)); + ASSERT_TRUE(CheckOpt(beforerr, after, patterns)); +} } // namespace opt } // namespace mindspore diff --git a/tests/ut/cpp/python_input/gtest_input/optimizer/opt_test.py b/tests/ut/cpp/python_input/gtest_input/optimizer/opt_test.py index 53eb2130f0..d494ad27d3 100644 --- a/tests/ut/cpp/python_input/gtest_input/optimizer/opt_test.py +++ b/tests/ut/cpp/python_input/gtest_input/optimizer/opt_test.py @@ -16,6 +16,8 @@ from mindspore.ops import Primitive, PrimitiveWithInfer from mindspore.ops import operations as P from mindspore.ops.operations import _grad_ops as G +from mindspore import Tensor +import numpy as np # pylint: disable=unused-variable @@ -903,3 +905,34 @@ def test_print_tuple_wrapper(tag): return print_(make_tuple(x, y, z)) return fns[tag] + +def test_constant_duplicate_mul(tag): + fns = FnDict() + Mul = Primitive('Mul'); + Sqrt = Primitive('Sqrt'); + + x = Tensor(np.array([[2, 2], [2, 3]]).astype('float32')) + tensor1 = Tensor(np.array([[1.2, 2.1], [2.2, 3.2]]).astype('float32')) + tensor2 = Tensor(np.array([[2.2, 3.1], [3.2, 4.2]]).astype('float32')) + + @fns + def beforell(): + return Mul(tensor1, Mul(tensor2, Sqrt(x))) + + @fns + def beforelr(): + return Mul(tensor1, Mul(Sqrt(x), tensor2)) + + @fns + def beforerl(): + return Mul(Mul(Sqrt(x), tensor2), tensor1) + + @fns + def beforerr(): + return Mul(Mul(Sqrt(x), tensor2), tensor1) + + @fns + def after(): + return Mul(Sqrt(x), Mul(tensor1, tensor2)) + + return fns[tag] From 19f2ea6d41f9a6299eab4b325fac23ab41954de9 Mon Sep 17 00:00:00 2001 From: Zirui Wu Date: Thu, 9 Apr 2020 10:44:14 -0400 Subject: [PATCH 151/367] change int32 to int64 to avoid overflow in batch fix clang pybind fix --- mindspore/ccsrc/dataset/api/python_bindings.cc | 2 +- .../ccsrc/dataset/engine/datasetops/batch_op.cc | 2 +- .../ccsrc/dataset/engine/datasetops/batch_op.h | 14 +++++++------- .../dataset/engine/datasetops/source/voc_op.h | 4 ++-- 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/mindspore/ccsrc/dataset/api/python_bindings.cc b/mindspore/ccsrc/dataset/api/python_bindings.cc index e6c2691281..1b0d913f3e 100644 --- a/mindspore/ccsrc/dataset/api/python_bindings.cc +++ b/mindspore/ccsrc/dataset/api/python_bindings.cc @@ -406,7 +406,7 @@ void bindSamplerOps(py::module *m) { void bindInfoObjects(py::module *m) { (void)py::class_(*m, "CBatchInfo") - .def(py::init()) + .def(py::init()) .def("get_epoch_num", &BatchOp::CBatchInfo::get_epoch_num) .def("get_batch_num", &BatchOp::CBatchInfo::get_batch_num); } diff --git a/mindspore/ccsrc/dataset/engine/datasetops/batch_op.cc b/mindspore/ccsrc/dataset/engine/datasetops/batch_op.cc index 8778fe1b45..c80078cb44 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/batch_op.cc +++ b/mindspore/ccsrc/dataset/engine/datasetops/batch_op.cc @@ -57,7 +57,7 @@ BatchOp::BatchOp(int32_t batch_size, bool drop, int32_t op_queue_size, int32_t n Status BatchOp::operator()() { RETURN_IF_NOT_OK(LaunchThreadsAndInitOp()); TaskManager::FindMe()->Post(); - int32_t epoch_num = 0, batch_num = 0, cnt = 0; + int64_t epoch_num = 0, batch_num = 0, cnt = 0; TensorRow new_row; std::unique_ptr table = std::make_unique(); child_iterator_ = std::make_unique(this, 0, 0); diff --git a/mindspore/ccsrc/dataset/engine/datasetops/batch_op.h b/mindspore/ccsrc/dataset/engine/datasetops/batch_op.h index 9037b8e94e..32d386e3c9 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/batch_op.h +++ b/mindspore/ccsrc/dataset/engine/datasetops/batch_op.h @@ -124,17 +124,17 @@ class BatchOp : public ParallelOp { // This struct is used for both internal control and python callback. // This struct is bound to python with read-only access. struct CBatchInfo { - CBatchInfo(int32_t ep, int32_t bat, int32_t cur, batchCtrl ctrl) + CBatchInfo(int64_t ep, int64_t bat, int64_t cur, batchCtrl ctrl) : epoch_num_(ep), batch_num_(bat), total_batch_num_(cur), ctrl_(ctrl) {} - CBatchInfo(int32_t ep, int32_t bat, int32_t cur) : CBatchInfo(ep, bat, cur, batchCtrl::kNoCtrl) {} + CBatchInfo(int64_t ep, int64_t bat, int64_t cur) : CBatchInfo(ep, bat, cur, batchCtrl::kNoCtrl) {} CBatchInfo() : CBatchInfo(0, 0, 0, batchCtrl::kNoCtrl) {} explicit CBatchInfo(batchCtrl ctrl) : CBatchInfo(0, 0, 0, ctrl) {} - int32_t epoch_num_; // i-th epoch. i starts from 0 - int32_t batch_num_; // i-th batch since the start of current epoch. i starts from 0 - int32_t total_batch_num_; // i-th batch since the start of first epoch. i starts from 0 + int64_t epoch_num_; // i-th epoch. i starts from 0 + int64_t batch_num_; // i-th batch since the start of current epoch. i starts from 0 + int64_t total_batch_num_; // i-th batch since the start of first epoch. i starts from 0 batchCtrl ctrl_; // No control=0, EOE=1, EOF=2, Quit=3 - const int32_t get_batch_num() const { return batch_num_; } - const int32_t get_epoch_num() const { return epoch_num_; } + const int64_t get_batch_num() const { return batch_num_; } + const int64_t get_epoch_num() const { return epoch_num_; } }; // BatchOp constructor diff --git a/mindspore/ccsrc/dataset/engine/datasetops/source/voc_op.h b/mindspore/ccsrc/dataset/engine/datasetops/source/voc_op.h index a267ef866a..5751388519 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/source/voc_op.h +++ b/mindspore/ccsrc/dataset/engine/datasetops/source/voc_op.h @@ -201,8 +201,8 @@ class VOCOp : public ParallelOp, public RandomAccessOp { Status Reset() override; bool decode_; - uint64_t row_cnt_; - uint64_t buf_cnt_; + int64_t row_cnt_; + int64_t buf_cnt_; int64_t num_rows_; int64_t num_samples_; std::string folder_path_; From 59a714c654dbbab090cc8af0012a6b1fc1e4e3a1 Mon Sep 17 00:00:00 2001 From: Cathy Wong Date: Thu, 9 Apr 2020 15:22:33 -0400 Subject: [PATCH 152/367] Correct shuffle UT buffer_size > #dataset-row as valid --- .../data/dataset/golden/shuffle_05_result.npz | Bin 0 -> 1507 bytes tests/ut/python/dataset/test_shuffle.py | 39 +++++++++--------- 2 files changed, 20 insertions(+), 19 deletions(-) create mode 100644 tests/ut/data/dataset/golden/shuffle_05_result.npz diff --git a/tests/ut/data/dataset/golden/shuffle_05_result.npz b/tests/ut/data/dataset/golden/shuffle_05_result.npz new file mode 100644 index 0000000000000000000000000000000000000000..27eb0a470d370fab9d4938e5fb9e3eb50d603bd1 GIT binary patch literal 1507 zcmbW1OH&hB6vw+00!c(c4LUmWtAJtD)+OVZ!lkD_Ooy{uH(5UT@6!HZoH}d``mL*e)pVH>6?PTC89M~ul9TG z?dO$%rm1g8i)dNL$qdDd<(jT_vQs6hpjvN}i*u=EU3;$m94*+Dyc3-;qA%v7F(bNK za;i?Ym?=30Te)Ym8x@=Um9=cyro9-C8N(w3G2<8GyH~jG&lk5g%e8pEZTT|wXz+S zZA(6CbAC()p+SEwLklu2;9t_o`$FzYCXf0b zj85?;sAfj14xJo(1@>V-8T$kd;2;_M1@@qWjEF!B0%UXvv|~3J2Lyr$k#SHUf-W)+ z3ACb(jKcz9>>{IEz(6nhaDp~R7~!TI{mSkI>AIpj#sBTIS#Pf0IggLD*Yq+`gFj^hdG1Z>hvct&~|1=1^6CB15y z{nj4_{h??xX-$6dHm3F0=SJi2X5^Y>M*scfZmwI_c4W#j`#${nxS2L>nE?+lW0^hw zyfm7TS(izJLF*8M$GZ9bVc^%k1_d2?VJqKqtMG(CUo_8FL7cCg&Qz(qkTN zq$&7FZ=;oT0WG9=5GK6~Kj|U@r1zM0pIJ)@I=N2HWoA84Z=Ry9Fzq4J(oB2Av<%Z8 zGcC)s9MkelD{Q}##IPB;`X;DeXyDERm#Z%CI5&yUDWg3FJ>gp=mzNJ$Lv?_;ApHX_g H`G6I{m literal 0 HcmV?d00001 diff --git a/tests/ut/python/dataset/test_shuffle.py b/tests/ut/python/dataset/test_shuffle.py index 2b7a251d2c..4a823c5fb7 100644 --- a/tests/ut/python/dataset/test_shuffle.py +++ b/tests/ut/python/dataset/test_shuffle.py @@ -98,6 +98,25 @@ def test_shuffle_04(): save_and_check(data1, parameters, filename, generate_golden=GENERATE_GOLDEN) +def test_shuffle_05(): + """ + Test shuffle: buffer_size > number-of-rows-in-dataset + """ + logger.info("test_shuffle_05") + # define parameters + buffer_size = 13 + seed = 1 + parameters = {"params": {'buffer_size': buffer_size, "seed": seed}} + + # apply dataset operations + data1 = ds.TFRecordDataset(DATA_DIR, shuffle=ds.Shuffle.FILES) + ds.config.set_seed(seed) + data1 = data1.shuffle(buffer_size=buffer_size) + + filename = "shuffle_05_result.npz" + save_and_check(data1, parameters, filename, generate_golden=GENERATE_GOLDEN) + + def test_shuffle_exception_01(): """ Test shuffle exception: buffer_size<0 @@ -152,24 +171,6 @@ def test_shuffle_exception_03(): assert "buffer_size" in str(e) -def test_shuffle_exception_04(): - """ - Test shuffle exception: buffer_size > number-of-rows-in-dataset - """ - logger.info("test_shuffle_exception_04") - - # apply dataset operations - data1 = ds.TFRecordDataset(DATA_DIR) - ds.config.set_seed(1) - try: - data1 = data1.shuffle(buffer_size=13) - sum([1 for _ in data1]) - - except BaseException as e: - logger.info("Got an exception in DE: {}".format(str(e))) - assert "buffer_size" in str(e) - - def test_shuffle_exception_05(): """ Test shuffle exception: Missing mandatory buffer_size input parameter @@ -229,10 +230,10 @@ if __name__ == '__main__': test_shuffle_02() test_shuffle_03() test_shuffle_04() + test_shuffle_05() test_shuffle_exception_01() test_shuffle_exception_02() test_shuffle_exception_03() - test_shuffle_exception_04() test_shuffle_exception_05() test_shuffle_exception_06() test_shuffle_exception_07() From b0fc7b7289f37529aa9ae92e64355e4802c694e8 Mon Sep 17 00:00:00 2001 From: yoonlee666 Date: Fri, 10 Apr 2020 10:51:54 +0800 Subject: [PATCH 153/367] change op Slice to StridedSlice in bert model --- mindspore/model_zoo/Bert_NEZHA/bert_model.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mindspore/model_zoo/Bert_NEZHA/bert_model.py b/mindspore/model_zoo/Bert_NEZHA/bert_model.py index d7f9355b3c..b9c6e8c4a1 100644 --- a/mindspore/model_zoo/Bert_NEZHA/bert_model.py +++ b/mindspore/model_zoo/Bert_NEZHA/bert_model.py @@ -194,7 +194,7 @@ class EmbeddingPostprocessor(nn.Cell): self.dropout = nn.Dropout(1 - dropout_prob) self.gather = P.GatherV2() self.use_relative_positions = use_relative_positions - self.slice = P.Slice() + self.slice = P.StridedSlice() self.full_position_embeddings = Parameter(initializer (TruncatedNormal(initializer_range), [max_position_embeddings, @@ -216,7 +216,7 @@ class EmbeddingPostprocessor(nn.Cell): output += token_type_embeddings if not self.use_relative_positions: _, seq, width = self.shape - position_embeddings = self.slice(self.full_position_embeddings, [0, 0], [seq, width]) + position_embeddings = self.slice(self.full_position_embeddings, (0, 0), (seq, width), (1, 1)) position_embeddings = self.reshape(position_embeddings, (1, seq, width)) output += position_embeddings output = self.layernorm(output) From d1a0ded6c2f5c9077464ff67e6d3231680cba00f Mon Sep 17 00:00:00 2001 From: chenfei Date: Fri, 27 Mar 2020 14:49:16 +0800 Subject: [PATCH 154/367] use first depend create parameter --- mindspore/ccsrc/operator/ops.cc | 3 + mindspore/ccsrc/operator/ops.h | 3 + mindspore/ccsrc/session/ascend_session.cc | 59 +++++++++++---- mindspore/ccsrc/session/ascend_session.h | 6 +- mindspore/ccsrc/session/kernel_graph.cc | 3 +- mindspore/ccsrc/session/kernel_graph.h | 5 ++ mindspore/ccsrc/session/session_basic.cc | 90 ++++++++++++++++------- mindspore/ccsrc/session/session_basic.h | 5 +- mindspore/ccsrc/vm/backend.cc | 2 +- 9 files changed, 128 insertions(+), 48 deletions(-) mode change 100644 => 100755 mindspore/ccsrc/operator/ops.cc mode change 100644 => 100755 mindspore/ccsrc/operator/ops.h mode change 100644 => 100755 mindspore/ccsrc/session/ascend_session.cc mode change 100644 => 100755 mindspore/ccsrc/session/ascend_session.h mode change 100644 => 100755 mindspore/ccsrc/session/kernel_graph.cc mode change 100644 => 100755 mindspore/ccsrc/session/kernel_graph.h mode change 100644 => 100755 mindspore/ccsrc/session/session_basic.cc mode change 100644 => 100755 mindspore/ccsrc/session/session_basic.h mode change 100644 => 100755 mindspore/ccsrc/vm/backend.cc diff --git a/mindspore/ccsrc/operator/ops.cc b/mindspore/ccsrc/operator/ops.cc old mode 100644 new mode 100755 index 12e6b70a6f..f3053cac7d --- a/mindspore/ccsrc/operator/ops.cc +++ b/mindspore/ccsrc/operator/ops.cc @@ -154,6 +154,9 @@ const PrimitivePtr kPrimMul = std::make_shared("Mul"); const PrimitivePtr kPrimMinimum = std::make_shared("Minimum"); const PrimitivePtr kPrimMaximum = std::make_shared("Maximum"); const PrimitivePtr kPrimSquare = std::make_shared("Square"); +const PrimitivePtr kPrimEqual = std::make_shared("Equal"); +const PrimitivePtr kPrimLess = std::make_shared("Less"); +const PrimitivePtr kPrimLessEqual = std::make_shared("LessEqual"); // NN const PrimitivePtr kPrimFlatten = std::make_shared("Flatten"); diff --git a/mindspore/ccsrc/operator/ops.h b/mindspore/ccsrc/operator/ops.h old mode 100644 new mode 100755 index 5fbf2b7067..2dc7072972 --- a/mindspore/ccsrc/operator/ops.h +++ b/mindspore/ccsrc/operator/ops.h @@ -160,6 +160,9 @@ extern const PrimitivePtr kPrimMul; extern const PrimitivePtr kPrimMinimum; extern const PrimitivePtr kPrimMaximum; extern const PrimitivePtr kPrimSquare; +extern const PrimitivePtr kPrimEqual; +extern const PrimitivePtr kPrimLess; +extern const PrimitivePtr kPrimLessEqual; // NN extern const PrimitivePtr kPrimFlatten; diff --git a/mindspore/ccsrc/session/ascend_session.cc b/mindspore/ccsrc/session/ascend_session.cc old mode 100644 new mode 100755 index 34c05aed08..f255b2f15f --- a/mindspore/ccsrc/session/ascend_session.cc +++ b/mindspore/ccsrc/session/ascend_session.cc @@ -506,11 +506,13 @@ void AscendSession::InsertSwitchToGraph(GraphId condition_graph_id, GraphId true kernel_build_info_builder->SetFusionType(kernel::FusionType::OPAQUE); kernel_build_info_builder->SetProcessor(kernel::Processor::AICORE); kernel_build_info_builder->SetKernelType(KernelType::RT_KERNEL); - // condition graph's output must be single output - if (condition_graph->outputs().size() != 1) { - MS_LOG(EXCEPTION) << "Condition_graph output num " << condition_graph_id << " should be 1"; + auto cond_output_it = condition_output_.find(condition_graph_id); + if (cond_output_it == condition_output_.end()) { + MS_LOG(EXCEPTION) << "Can't find condition graph" << condition_graph_id; } - AnfNodePtr cond_output_kernel = condition_graph->outputs()[0]; + auto cond_output_kernel = + AnfAlgo::VisitKernel(condition_graph->GetBackendAnfByFrontAnf(cond_output_it->second), 0).first; + MS_EXCEPTION_IF_NULL(cond_output_kernel); std::vector inputs = {NewValueNode(switch_primitive), cond_output_kernel, counter_const}; CNodePtr switch_node = condition_graph->NewCNode(inputs); AnfAlgo::SetSelectKernelBuildInfo(kernel_build_info_builder->Build(), switch_node.get()); @@ -569,12 +571,14 @@ void AscendSession::CopyOutputOfIf(GraphId false_graph_id) { } } -void AscendSession::SwitchCompile(GraphId cond_graph_id, GraphId true_graph_id, GraphId false_graph_id) { +void AscendSession::SwitchCompile(GraphId cond_graph_id, GraphId true_graph_id, GraphId false_graph_id, + const AnfNodePtr &output) { if (switches_.find(cond_graph_id) != switches_.end()) { MS_LOG(WARNING) << "Condition graph" << cond_graph_id << " has been set before "; return; } switches_[cond_graph_id] = std::pair(true_graph_id, false_graph_id); + condition_output_[cond_graph_id] = output; MS_LOG(INFO) << "New switch compile " << cond_graph_id << " " << true_graph_id << " " << false_graph_id; // set the type of condition graph auto cond_graph_index = ExecOrderOfChildGraph(final_graph_id_, cond_graph_id); @@ -682,12 +686,14 @@ void AscendSession::SetChildGraphParameter(const AnfNodePtr &front_anf, const An auto from_graph_id = GetGraphIdByNode(front_anf); auto from_graph = GetGraph(from_graph_id); MS_EXCEPTION_IF_NULL(from_graph); - + auto to_graph_id = AnfAlgo::GetGraphId(backend_parameter.get()); + auto to_graph = GetGraph(to_graph_id); + auto backend_arg = from_graph->GetBackendAnfByFrontAnf(front_anf); + MS_EXCEPTION_IF_NULL(to_graph); MS_LOG(INFO) << "Set node[" << front_anf->DebugString() << "] of graph[" << from_graph_id << "]to node[" << backend_parameter->DebugString() << "] of graph[" << AnfAlgo::GetGraphId(backend_parameter.get()) << "]"; // a node should not assign to itself - auto backend_arg = from_graph->GetBackendAnfByFrontAnf(front_anf); if (backend_arg.get() == backend_parameter.get()) { return; } @@ -703,15 +709,16 @@ void AscendSession::SetChildGraphParameter(const AnfNodePtr &front_anf, const An return; } } - InsertMultipleAssignToGraph(from_graph_id, backend_arg, backend_parameter); - // if front anf is a parameter, we can assign the value back, because backend_parameter - // won't be changed in it's graph unless it's a weight. If backend_parameter is a weight, - // we do should assign the value back. - auto to_graph_id = AnfAlgo::GetGraphId(backend_parameter.get()); - auto to_graph = GetGraph(to_graph_id); - MS_EXCEPTION_IF_NULL(to_graph); + // if a parameter is a weight and not linked to any executable node,device type will be kTypeUnknown,set it's device + // type same to arg + if (AnfAlgo::GetOutputDeviceDataType(backend_parameter, 0) == kTypeUnknown) { + AnfAlgo::SetSelectKernelBuildInfo(AnfAlgo::GetSelectKernelBuildInfo(backend_arg), backend_parameter.get()); + } + InsertAssignToGraph(from_graph_id, backend_arg, backend_parameter); + // if front anf is a parameter,we can assign the value back,because backend_parameter won't be change in it's graph + // unless it's a weigth.If backend_parameter is a weight,we do should assign the value back if (backend_arg->isa() && !to_graph->execution_order().empty()) { - InsertMultipleAssignToGraph(to_graph_id, backend_parameter, backend_arg); + InsertAssignToGraph(to_graph_id, backend_parameter, backend_arg); } MS_LOG(INFO) << "Finish!"; } @@ -755,7 +762,25 @@ void AscendSession::SetChildGraphInput(GraphId g, const VectorRef &args) { DumpGraphInputArgs(args); UpdateGraphOrder(g); std::vector graph_inputs = to_graph->inputs(); + auto valid_inputs = to_graph->ValidInputs(); + size_t real_args_size = 0; + for (size_t i = 0; i < args.size(); i++) { + real_args_size += AnfAlgo::GetAllOutput(utils::cast(args[i]), {prim::kPrimTupleGetItem}).size(); + } + if (real_args_size != graph_inputs.size()) { + for (size_t j = 0; j < valid_inputs.size(); j++) { + if (valid_inputs[j]) { + MS_LOG(INFO) << "index: " << j << ", nodes: " << graph_inputs[j]->DebugString(); + } + } + MS_LOG(WARNING) << "real_args_size: " << real_args_size << ", graph_inputs.size(): " << graph_inputs.size() + << " not equal"; + } size_t input_index = 0; + if (graph_inputs.size() != valid_inputs.size()) { + MS_LOG(EXCEPTION) << "graph_inputs.size(): " << graph_inputs.size() + << ", valid_inputs.size(): " << valid_inputs.size() << " not equal"; + } for (size_t i = 0; i < args.size(); i++) { if (input_index >= graph_inputs.size()) { MS_LOG(EXCEPTION) << "input_index " << input_index << " out of range size " << graph_inputs.size(); @@ -763,6 +788,10 @@ void AscendSession::SetChildGraphInput(GraphId g, const VectorRef &args) { if (utils::isa(args[i])) { // arg is a anf node for (const auto &real_arg : AnfAlgo::GetAllOutput(utils::cast(args[i]), {prim::kPrimTupleGetItem})) { + if (!valid_inputs[input_index]) { + MS_LOG(DEBUG) << "Invalid input arg" << real_arg->DebugString(); + continue; + } SetChildGraphParameter(real_arg, graph_inputs[input_index]); input_index++; } diff --git a/mindspore/ccsrc/session/ascend_session.h b/mindspore/ccsrc/session/ascend_session.h old mode 100644 new mode 100755 index caec4b35f7..c45ab6630a --- a/mindspore/ccsrc/session/ascend_session.h +++ b/mindspore/ccsrc/session/ascend_session.h @@ -49,9 +49,8 @@ class AscendSession : public SessionBasic { // set output of final graph void SetFinalGraphOutput(const BaseRef &output) override; // insert switch and set the relative active ops - void SwitchCompile(GraphId cond_g, GraphId true_g, GraphId false_g) override; - // set args of child graph. the arg maybe come from a output of other child graphs, - // or from final graph's parameter + void SwitchCompile(GraphId cond_g, GraphId true_g, GraphId false_g, const AnfNodePtr &condition_output) override; + // set args of child graph.the arg maybe come from a output of other child graphs,or from final graph's parameter void SetChildGraphInput(GraphId g, const VectorRef &args) override; // get graph id in child graphs by ME front anf node pointer GraphId GetGraphIdByNode(const AnfNodePtr &front_anf) const override; @@ -116,6 +115,7 @@ class AscendSession : public SessionBasic { std::unordered_map while_condition_graphs_; // record all conditions std::unordered_map> switches_; + std::unordered_map condition_output_; // final_graph_id is used in every root graph has it's own session situation GraphId final_graph_id_; }; diff --git a/mindspore/ccsrc/session/kernel_graph.cc b/mindspore/ccsrc/session/kernel_graph.cc old mode 100644 new mode 100755 index 84ff6b81a2..dbf6e07e7e --- a/mindspore/ccsrc/session/kernel_graph.cc +++ b/mindspore/ccsrc/session/kernel_graph.cc @@ -372,8 +372,7 @@ void KernelGraph::UpdateControlDependRelations(const std::vector &de MS_EXCEPTION_IF_NULL(depend_node); std::vector prior_nodes = {prior_node}; std::vector depend_nodes = {depend_node}; - MS_LOG(INFO) << "Prior node[" << prior_node->DebugString() << "],depend node[" << depend_node->DebugString() - << "],depend_mode=[" << AnfAlgo::GetNodeAttr(cnode, "depend_mode") << "]"; + MS_LOG(INFO) << "Prior node[" << prior_node->DebugString() << "], depend node[" << depend_node->DebugString(); if (prior_node->isa()) { prior_nodes = GetOutputNodes(prior_node); } diff --git a/mindspore/ccsrc/session/kernel_graph.h b/mindspore/ccsrc/session/kernel_graph.h old mode 100644 new mode 100755 index e11f6807f5..ff964482bb --- a/mindspore/ccsrc/session/kernel_graph.h +++ b/mindspore/ccsrc/session/kernel_graph.h @@ -86,6 +86,9 @@ class KernelGraph : public FuncGraph { bool executable() const { return executable_; } // set executable of graph void set_executable(bool executable) { executable_ = executable; } + // set invalid inputs for control sink + std::vector *MutableValidInputs() { return &valid_inputs_; } + std::vector ValidInputs() { return valid_inputs_; } private: // remove value node form graph @@ -118,6 +121,8 @@ class KernelGraph : public FuncGraph { std::unordered_map>> node_output_edges_; // graph needn't execute bool executable_; + // valid inputs + std::vector valid_inputs_; }; } // namespace session using KernelGraphPtr = std::shared_ptr; diff --git a/mindspore/ccsrc/session/session_basic.cc b/mindspore/ccsrc/session/session_basic.cc old mode 100644 new mode 100755 index ede3ae7419..d2a255229d --- a/mindspore/ccsrc/session/session_basic.cc +++ b/mindspore/ccsrc/session/session_basic.cc @@ -243,29 +243,38 @@ ValueNodePtr CreateNewValueNode(const AnfNodePtr &anf, KernelGraph *graph) { return new_value_node; } -ParameterPtr CreateNewParameterFromParameter(const AnfNodePtr &anf, KernelGraph *graph) { +ParameterPtr CreateNewParameterFromParameter(const AnfNodePtr &anf, bool valid_input, KernelGraph *graph) { MS_EXCEPTION_IF_NULL(anf); if (!anf->isa()) { MS_LOG(EXCEPTION) << "anf[" << anf->DebugString() << "] is not a parameter"; } auto graph_inputs = graph->MutableInputs(); MS_EXCEPTION_IF_NULL(graph_inputs); + auto valid_inputs = graph->MutableValidInputs(); + MS_EXCEPTION_IF_NULL(valid_inputs); ParameterPtr new_parameter = graph->NewParameter(anf->cast()); - graph->FrontBackendlMapAdd(anf, new_parameter); graph_inputs->push_back(new_parameter); + valid_inputs->push_back(valid_input); return new_parameter; } -std::vector CreateParameterFromTuple(const AnfNodePtr &node, KernelGraph *graph) { +std::vector CreateParameterFromTuple(const AnfNodePtr &node, bool valid_input, KernelGraph *graph) { MS_EXCEPTION_IF_NULL(node); MS_EXCEPTION_IF_NULL(graph); std::vector parameters; std::vector pre_graph_out = AnfAlgo::GetAllOutput(node, {prim::kPrimTupleGetItem}); + auto valid_inputs = graph->MutableValidInputs(); + MS_EXCEPTION_IF_NULL(valid_inputs); + auto graph_inputs = graph->MutableInputs(); + MS_EXCEPTION_IF_NULL(graph_inputs); auto create_parameter = [&](const AbstractBasePtr &abstract) -> void { auto parameter = graph->NewParameter(); MS_EXCEPTION_IF_NULL(parameter); parameter->set_abstract(abstract); - parameters.push_back(graph->NewParameter(parameter)); + auto new_parameter = graph->NewParameter(parameter); + parameters.push_back(new_parameter); + valid_inputs->push_back(valid_input); + graph_inputs->push_back(new_parameter); }; for (const auto &out_node : pre_graph_out) { MS_EXCEPTION_IF_NULL(out_node); @@ -287,18 +296,15 @@ std::vector CreateParameterFromTuple(const AnfNodePtr &node, KernelG return parameters; } -AnfNodePtr CreateNewParameterFromCNode(const AnfNodePtr &anf, KernelGraph *graph) { +AnfNodePtr CreateNewParameterFromCNode(const AnfNodePtr &anf, bool valid_input, KernelGraph *graph) { MS_EXCEPTION_IF_NULL(anf); if (!anf->isa()) { - MS_LOG(EXCEPTION) << "anf[" << anf->DebugString() << "] is not a cnode"; + MS_LOG(EXCEPTION) << "Anf[" << anf->DebugString() << "] is not a cnode"; } - MS_LOG(INFO) << "create a new parameter from cnode[" << anf->DebugString() << "]"; - auto parameters = CreateParameterFromTuple(anf, graph); - auto graph_inputs = graph->MutableInputs(); - MS_EXCEPTION_IF_NULL(graph_inputs); - (void)std::copy(parameters.begin(), parameters.end(), std::back_inserter(*graph_inputs)); + MS_LOG(INFO) << "Create a new parameter from cnode[" << anf->DebugString() << "]"; + auto parameters = CreateParameterFromTuple(anf, valid_input, graph); if (parameters.empty()) { - MS_LOG(EXCEPTION) << "no parameter exist!!"; + MS_LOG(EXCEPTION) << "No parameter exist!!"; } if (parameters.size() == 1) { return parameters[0]; @@ -307,7 +313,7 @@ AnfNodePtr CreateNewParameterFromCNode(const AnfNodePtr &anf, KernelGraph *graph (void)std::copy(parameters.begin(), parameters.end(), std::back_inserter(make_tuple_input)); auto make_tuple = graph->NewCNode(make_tuple_input); MS_EXCEPTION_IF_NULL(make_tuple); - MS_LOG(INFO) << "new make tuple [" << make_tuple->DebugString() << "] of parameters"; + MS_LOG(INFO) << "New make tuple [" << make_tuple->DebugString() << "] of parameters"; return make_tuple; } @@ -397,14 +403,20 @@ void DumpGraphOutput(const Any &any, size_t recurse_level = 0) { GraphId SessionBasic::graph_sum_ = 0; -CNodePtr SessionBasic::CreateNewCNode(const CNodePtr &cnode, KernelGraph *graph) { +CNodePtr SessionBasic::CreateNewCNode(const CNodePtr &cnode, bool valid_input, KernelGraph *graph, + bool *from_other_graph, + std::unordered_map *other_graph_cnode) { MS_EXCEPTION_IF_NULL(cnode); MS_EXCEPTION_IF_NULL(graph); + MS_EXCEPTION_IF_NULL(from_other_graph); + MS_EXCEPTION_IF_NULL(other_graph_cnode); + *from_other_graph = false; // get primitive of old node auto prim = AnfAlgo::GetCNodePrimitive(cnode); MS_EXCEPTION_IF_NULL(prim); // push attr to inputs[0] of new cnode std::vector cnode_inputs = {std::make_shared(std::make_shared(*prim))}; + // if has multiple depends,only select first depend as parameter for (size_t input_idx = 1; input_idx < cnode->inputs().size(); input_idx++) { auto anf = cnode->inputs()[input_idx]; MS_EXCEPTION_IF_NULL(anf); @@ -412,6 +424,9 @@ CNodePtr SessionBasic::CreateNewCNode(const CNodePtr &cnode, KernelGraph *graph) if (graph->GetBackendAnfByFrontAnf(anf) != nullptr) { cnode_inputs.emplace_back(graph->GetBackendAnfByFrontAnf(anf)); continue; + } else if (other_graph_cnode->find(anf) != other_graph_cnode->end()) { + cnode_inputs.push_back((*other_graph_cnode)[anf]); + continue; } else if (anf->isa() && !IsValueNode(anf)) { // if input is a value node, auto new_value_node = CreateNewValueNode(anf, graph); @@ -421,38 +436,60 @@ CNodePtr SessionBasic::CreateNewCNode(const CNodePtr &cnode, KernelGraph *graph) continue; } else if (anf->isa()) { // if anf is a parameter - cnode_inputs.emplace_back(CreateNewParameterFromParameter(anf, graph)); + auto new_parameter = CreateNewParameterFromParameter(anf, valid_input, graph); + cnode_inputs.push_back(new_parameter); + if (GetGraphIdByNode(anf) == kInvalidGraphId) { + graph->FrontBackendlMapAdd(anf, new_parameter); + } else { + (*other_graph_cnode)[anf] = new_parameter; + } continue; } else if (anf->isa()) { + *from_other_graph = true; // the input node is a cnode from other graph - cnode_inputs.emplace_back(CreateNewParameterFromCNode(anf, graph)); + auto parameter_from_cnode = CreateNewParameterFromCNode(anf, valid_input, graph); + cnode_inputs.push_back(parameter_from_cnode); + (*other_graph_cnode)[anf] = parameter_from_cnode; continue; } - MS_LOG(EXCEPTION) << "unexpected input[" << anf->DebugString() << "]"; + MS_LOG(EXCEPTION) << "Unexpected input[" << anf->DebugString() << "]"; } - return graph->NewCNode(cnode_inputs); + TraceManager::DebugTrace(std::make_shared(cnode->debug_info())); + auto new_cnode = graph->NewCNode(cnode_inputs); + TraceManager::EndTrace(); + return new_cnode; } KernelGraphPtr SessionBasic::ConstructKernelGraph(const AnfNodePtrList &lst, const AnfNodePtrList &outputs) { + std::unordered_map other_graph_cnode; auto graph = std::make_shared(); graph->set_graph_id(graph_sum_); + MS_LOG(INFO) << "Create graph: " << graph_sum_; + size_t from_other_graph_depend_num = 0; for (const auto &node : lst) { MS_EXCEPTION_IF_NULL(node); - MS_LOG(DEBUG) << "start create new cnode,node = " << node->DebugString(); + MS_LOG(DEBUG) << "Start create new cnode, node = " << node->DebugString(); if (!node->isa()) { - MS_LOG(EXCEPTION) << "Inst node " << node->DebugString() << " is not CNode"; + MS_LOG(EXCEPTION) << "Node " << node->DebugString() << " is not CNode"; } auto cnode = node->cast(); MS_EXCEPTION_IF_NULL(cnode); - TraceManager::DebugTrace(std::make_shared(cnode->debug_info())); // create a new cnode object - auto new_cnode = CreateNewCNode(cnode, graph.get()); + bool from_other_graph = false; + // only first depend from other graph can create + bool valid_input = true; + if (from_other_graph_depend_num != 0 && AnfAlgo::CheckPrimitiveType(node, prim::kPrimDepend)) { + valid_input = false; + } + auto new_cnode = CreateNewCNode(cnode, valid_input, graph.get(), &from_other_graph, &other_graph_cnode); + if (AnfAlgo::CheckPrimitiveType(node, prim::kPrimDepend) && from_other_graph) { + from_other_graph_depend_num++; + } MS_EXCEPTION_IF_NULL(new_cnode); new_cnode->set_abstract(cnode->abstract()); new_cnode->set_scope(cnode->scope()); // record map relations between anf from ME and new anf node used in backend graph->FrontBackendlMapAdd(node, new_cnode); - TraceManager::EndTrace(); } // add a make_tuple at the end of graph as output graph->set_output(ConstructOutput(outputs, graph)); @@ -631,12 +668,15 @@ void SessionBasic::ToTensorPtr(const OpRunInfo &op_run_info, std::vector &graph) { MS_EXCEPTION_IF_NULL(graph); std::vector output_args; - auto FindEqu = [graph](const AnfNodePtr &out) -> AnfNodePtr { + auto FindEqu = [graph, outputs](const AnfNodePtr &out) -> AnfNodePtr { auto backend_anf = graph->GetBackendAnfByFrontAnf(out); if (backend_anf != nullptr) { return backend_anf; } - MS_LOG(EXCEPTION) << "Can not find the node in the equiv map!"; + for (const auto &output : outputs) { + MS_LOG(INFO) << "output:" << output->DebugString(); + } + MS_LOG(EXCEPTION) << "Can't find the node in the equiv map!"; }; output_args.push_back(NewValueNode(prim::kPrimMakeTuple)); (void)std::transform(outputs.begin(), outputs.end(), std::back_inserter(output_args), diff --git a/mindspore/ccsrc/session/session_basic.h b/mindspore/ccsrc/session/session_basic.h old mode 100644 new mode 100755 index 9aadb78cb2..f1872e375c --- a/mindspore/ccsrc/session/session_basic.h +++ b/mindspore/ccsrc/session/session_basic.h @@ -69,14 +69,15 @@ class SessionBasic { std::shared_ptr ConstructKernelGraph(const AnfNodePtrList &lst, const AnfNodePtrList &outputs); - CNodePtr CreateNewCNode(const CNodePtr &cnode, KernelGraph *graph); + CNodePtr CreateNewCNode(const CNodePtr &cnode, bool valid_input, KernelGraph *graph, bool *from_other_graph, + std::unordered_map *other_graph_cnode); // set parameters of final graph virtual GraphId SetFinalGraphInput(const std::vector &) { return kInvalidGraphId; } // set output of final graph virtual void SetFinalGraphOutput(const BaseRef &) {} // insert switch and set the relative active ops - virtual void SwitchCompile(GraphId, GraphId, GraphId) {} + virtual void SwitchCompile(GraphId, GraphId, GraphId, const AnfNodePtr &) {} // set args of child graph.the arg maybe come from a output of other child graphs,or from final graph's parameter virtual void SetChildGraphInput(GraphId, const VectorRef &) {} // get graph id in child graphs by ME front anf node pointer diff --git a/mindspore/ccsrc/vm/backend.cc b/mindspore/ccsrc/vm/backend.cc old mode 100644 new mode 100755 index 9355cca99c..e69d25d2dc --- a/mindspore/ccsrc/vm/backend.cc +++ b/mindspore/ccsrc/vm/backend.cc @@ -136,7 +136,7 @@ void MsBackend::SetSwitchGraph() { MS_LOG(EXCEPTION) << "cond not a anf node:" << curr_switch_.ToString(); } MS_LOG(DEBUG) << "switch compile:" << cond_g << ", " << true_g << ", " << false_g; - sess_->SwitchCompile(cond_g, true_g, false_g); + sess_->SwitchCompile(cond_g, true_g, false_g, utils::cast(curr_switch_)); } is_switch_call_ = false; MS_LOG(DEBUG) << "end SetSwitchGraph:" << curr_cond << ", " << is_switch_call_; From 2bef22d8a379aa7342517895dabacbf13174fc68 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E4=B9=89=E5=B3=B0=E6=BD=98?= Date: Fri, 10 Apr 2020 10:54:31 +0800 Subject: [PATCH 155/367] fix bprop cache caused error with variable params --- mindspore/ccsrc/optimizer/ad/kprim.cc | 9 +++++++-- tests/ut/python/pynative_mode/test_stop_gradient.py | 12 ++++++++++++ 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/mindspore/ccsrc/optimizer/ad/kprim.cc b/mindspore/ccsrc/optimizer/ad/kprim.cc index 4576cc1ea9..2c8ddbfa82 100644 --- a/mindspore/ccsrc/optimizer/ad/kprim.cc +++ b/mindspore/ccsrc/optimizer/ad/kprim.cc @@ -92,9 +92,11 @@ FuncGraphPtr KPrim::KPrimitive(const ValueNodePtr &value_node, const pipeline::R return nullptr; } + bool is_faked_bprop = false; auto bprop_fg = GetBprop(prim); if (bprop_fg == nullptr) { bprop_fg = FakeBprop(value_node, resources); + is_faked_bprop = true; } auto expanded_fg = BpropToK(prim, bprop_fg); @@ -104,8 +106,11 @@ FuncGraphPtr KPrim::KPrimitive(const ValueNodePtr &value_node, const pipeline::R << trace::GetDebugInfo(bprop_fg->debug_info()); } - // Set bprop_g graph cache - bprop_registry_[prim] = expanded_fg; + // To support primitives with variable params, do not cache faked bprop + if (!is_faked_bprop) { + // Set bprop_g graph cache + bprop_registry_[prim] = expanded_fg; + } return expanded_fg; } diff --git a/tests/ut/python/pynative_mode/test_stop_gradient.py b/tests/ut/python/pynative_mode/test_stop_gradient.py index b274b3988a..a26d635aad 100644 --- a/tests/ut/python/pynative_mode/test_stop_gradient.py +++ b/tests/ut/python/pynative_mode/test_stop_gradient.py @@ -366,3 +366,15 @@ def test_stop_gradient_11(): with pytest.raises(RuntimeError): bprop(PrimWithNoBprop_(), Tensor(np.ones([2]).astype(np.float32)), Tensor(np.ones([2]).astype(np.float32))) + +def test_stop_print(): + class StopPrint(nn.Cell): + def __init__(self): + super(StopPrint, self).__init__() + self.printm = P.Print() + def construct(self, x, y): + self.printm("StopPrint", x) + self.printm(y) + return x, y + C.grad_all(StopPrint())(Tensor(np.ones([2]).astype(np.float32)), + Tensor(np.ones([2]).astype(np.float32))) From d8b9442ab862b1af3a4b302c7e1bae29d63d6e7b Mon Sep 17 00:00:00 2001 From: guohongzilong <2713219276@qq.com> Date: Tue, 7 Apr 2020 17:44:07 +0800 Subject: [PATCH 156/367] dataset_sink_mode is supported in model.eval() and not in model.train() in pynative mode --- mindspore/train/model.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/mindspore/train/model.py b/mindspore/train/model.py index bcfd897f58..657c84de65 100755 --- a/mindspore/train/model.py +++ b/mindspore/train/model.py @@ -206,6 +206,8 @@ class Model: function respectively. callbacks (list): List of callback object. Callbacks which should be executed while training. Default: None. dataset_sink_mode (bool): Determines whether to pass the data through dataset channel. Default: True. + Configure pynative mode, the training process will be performed with + dataset not sink. """ epoch = check_int_positive(epoch) self._train_network.set_train() @@ -227,8 +229,13 @@ class Model: cb_params.train_dataset = train_dataset cb_params.list_callback = list_callback - if dataset_sink_mode and context.get_context("mode") == context.GRAPH_MODE: - self._train_dataset_sink_process(epoch, train_dataset, list_callback, cb_params) + if dataset_sink_mode: + if context.get_context("mode") == context.PYNATIVE_MODE: + logger.warning("The pynative mode cannot support dataset sink mode currently." + "So the training process will be performed with dataset not sink.") + self._train_process(epoch, train_dataset, list_callback, cb_params) + else: + self._train_dataset_sink_process(epoch, train_dataset, list_callback, cb_params) else: self._train_process(epoch, train_dataset, list_callback, cb_params) @@ -349,7 +356,7 @@ class Model: """ Training API where the iteration is controlled by python front-end. - Configure to pynative mode, the training will be performed with dataset non-sink mode. + When setting pynative mode, the training process will be performed with dataset not sink. Note: CPU is not supported when dataset_sink_mode is true. @@ -363,6 +370,8 @@ class Model: function respectively. callbacks (list): List of callback object. Callbacks which should be excuted while training. Default: None. dataset_sink_mode (bool): Determines whether to pass the data through dataset channel. Default: True. + Configure pynative mode, the training process will be performed with + dataset not sink. Examples: @@ -508,7 +517,7 @@ class Model: self._clear_metrics() - if dataset_sink_mode and context.get_context("mode") == context.GRAPH_MODE: + if dataset_sink_mode: return self._eval_dataset_sink_process(valid_dataset, list_callback, cb_params) return self._eval_process(valid_dataset, list_callback, cb_params) From c1ad92ff4dca0a225203edc899d85b578fbc7395 Mon Sep 17 00:00:00 2001 From: jojobugfree Date: Fri, 10 Apr 2020 14:26:21 +0800 Subject: [PATCH 157/367] remove python3.6 support in package.sh --- package.sh | 8 +++----- requirements.txt | 1 - 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/package.sh b/package.sh index 0c75a1bbfd..f49f15632f 100755 --- a/package.sh +++ b/package.sh @@ -45,18 +45,16 @@ PYTHON_VERSION=$("${PYTHON}" -V 2>&1 | awk '{print $2}' | cut -d. -f-2) if [[ $(uname) == "Linux" ]]; then if [[ "${PYTHON_VERSION}" == "3.7" ]]; then PY_TAGS="cp37-cp37m" - elif [[ "${PYTHON_VERSION}" == "3.6" ]]; then - PY_TAGS="cp36-cp36m" else - echo "Could not find 'Python 3.6' or 'Python 3.7'" + echo "Could not find 'Python 3.7'" exit 1 fi PLATFORM_TAG=$(to_lower "$(uname)_$(uname -m)") elif [[ $(uname) == "Darwin" ]]; then - if [[ "${PYTHON_VERSION}" == "3.7" || "${PYTHON_VERSION}" == "3.6" ]]; then + if [[ "${PYTHON_VERSION}" == "3.7" ]]; then PY_TAGS="py3-none" else - echo "Could not find 'Python 3.6' or 'Python 3.7'" + echo "Could not find 'Python 3.7'" exit 1 fi PLATFORM_TAG="any" diff --git a/requirements.txt b/requirements.txt index e4b61f2b6f..e182cd7a3b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,7 +3,6 @@ protobuf >= 3.8.0 asttokens >= 1.1.13 pillow >= 6.2.0 scipy >= 1.3.3 -dataclasses >= 0.6 easydict >= 1.9 sympy >= 1.4 cffi >= 1.13.2 From f5ee197b6c26eecc58cac055bf8adeb3c4efd100 Mon Sep 17 00:00:00 2001 From: zjun Date: Fri, 10 Apr 2020 15:24:57 +0800 Subject: [PATCH 158/367] Modify custom op register --- .../st/ops/custom_ops_tbe/cus_conv2d_impl.py | 109 ++++-------------- tests/st/ops/custom_ops_tbe/square_impl.py | 61 +++------- 2 files changed, 38 insertions(+), 132 deletions(-) diff --git a/tests/st/ops/custom_ops_tbe/cus_conv2d_impl.py b/tests/st/ops/custom_ops_tbe/cus_conv2d_impl.py index 54f6954a18..04ac7c2ff7 100644 --- a/tests/st/ops/custom_ops_tbe/cus_conv2d_impl.py +++ b/tests/st/ops/custom_ops_tbe/cus_conv2d_impl.py @@ -13,95 +13,28 @@ # limitations under the License. # ============================================================================ from tests.st.ops.custom_ops_tbe.conv2d import conv2d -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType -@op_info_register("""{ - "op_name": "Cus_Conv2D", - "imply_type": "TBE", - "fusion_type": "CONVLUTION", - "async_flag": false, - "binfile_name": "conv2d.so", - "compute_cost": 10, - "kernel_name": "Cus_Conv2D", - "partial_flag": true, - "attr": [ - { - "name": "stride", - "param_type": "required", - "type": "listInt", - "value": "all" - }, - { - "name": "pad_list", - "param_type": "required", - "type": "listInt", - "value": "all" - }, - { - "name": "dilation", - "param_type": "required", - "type": "listInt", - "value": "all" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16" - ], - "format": [ - "NC1HWC0" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float16" - ], - "format": [ - "FracZ" - ], - "name": "filter", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 2, - "dtype": [ - "float16" - ], - "format": [ - "DefaultFormat" - ], - "name": "bias", - "need_compile": false, - "param_type": "optional", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16" - ], - "format": [ - "NC1HWC0" - ], - "name": "y", - "need_compile": true, - "param_type": "required", - "shape": "all" - } - ] -}""") +cus_conv2D_op_info = TBERegOp("Cus_Conv2D") \ + .fusion_type("CONVLUTION") \ + .async_flag(False) \ + .binfile_name("conv2d.so") \ + .compute_cost(10) \ + .kernel_name("Cus_Conv2D") \ + .partial_flag(True) \ + .attr("stride", "required", "listInt", "all") \ + .attr("pad_list", "required", "listInt", "all") \ + .attr("dilation", "required", "listInt", "all") \ + .input(0, "x", False, "required", "all") \ + .input(1, "filter", False, "required", "all") \ + .input(2, "bias", False, "optional", "all") \ + .output(0, "y", True, "required", "all") \ + .dtype_format(DataType.F16_5HD, DataType.F16_FracZ, DataType.F32_Default, DataType.F16_5HD) \ + .get_op_info() + + +@op_info_register(cus_conv2D_op_info) def Cus_Conv2D(inputs, weights, bias, outputs, strides, pads, dilations, kernel_name="conv2d"): conv2d(inputs, weights, bias, outputs, strides, pads, dilations, - kernel_name) \ No newline at end of file + kernel_name) diff --git a/tests/st/ops/custom_ops_tbe/square_impl.py b/tests/st/ops/custom_ops_tbe/square_impl.py index e5992eff1c..f3a1e0751d 100644 --- a/tests/st/ops/custom_ops_tbe/square_impl.py +++ b/tests/st/ops/custom_ops_tbe/square_impl.py @@ -18,11 +18,12 @@ from topi import generic import te.lang.cce from topi.cce import util from te.platform.fusion_manager import fusion_manager -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType # shape size limit for aicore is 2**31 SHAPE_SIZE_LIMIT = 200000000 + @fusion_manager.register("square") def square_compute(input_x, output_y, kernel_name="square"): """ @@ -46,49 +47,21 @@ def square_compute(input_x, output_y, kernel_name="square"): res = te.lang.cce.vmul(input_x, input_x) return res -@op_info_register("""{ - "op_name": "CusSquare", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "square.so", - "compute_cost": 10, - "kernel_name": "CusSquare", - "partial_flag": true, - "attr": [ - - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float32" - ], - "format": [ - "DefaultFormat" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float32" - ], - "format": [ - "DefaultFormat" - ], - "name": "y", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") + +cus_conv2D_op_info = TBERegOp("CusSquare") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("square.so") \ + .compute_cost(10) \ + .kernel_name("CusSquare") \ + .partial_flag(True) \ + .input(0, "x", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .get_op_info() + + +@op_info_register(cus_conv2D_op_info) def CusSquare(input_x, output_y, kernel_name="square"): """ algorithm: square From 8cd3308a4c2da0360a4097f59bc4de962f693742 Mon Sep 17 00:00:00 2001 From: lizhenyu Date: Fri, 10 Apr 2020 16:54:17 +0800 Subject: [PATCH 159/367] change package name akg to _akg --- mindspore/{akg => _akg}/__init__.py | 12 ++++----- mindspore/{akg => _akg}/gpu/__init__.py | 0 mindspore/{akg => _akg}/gpu/cast.py | 10 +++---- .../{akg => _akg}/gpu/default_schedule.py | 4 +-- mindspore/{akg => _akg}/gpu/equal.py | 10 +++---- mindspore/{akg => _akg}/gpu/mean.py | 4 +-- mindspore/{akg => _akg}/gpu/mean_grad.py | 8 +++--- mindspore/{akg => _akg}/gpu/mul.py | 6 ++--- mindspore/{akg => _akg}/gpu/relu6.py | 6 ++--- mindspore/{akg => _akg}/gpu/relu6_grad.py | 4 +-- mindspore/{akg => _akg}/gpu/squeeze.py | 4 +-- mindspore/{akg => _akg}/gpu/squeeze_grad.py | 2 +- mindspore/{akg => _akg}/gpu/tile.py | 10 +++---- mindspore/{akg => _akg}/message.py | 10 +++---- mindspore/{akg => _akg}/op_build.py | 14 +++++----- mindspore/{akg => _akg}/ops/__init__.py | 0 mindspore/{akg => _akg}/ops/array/__init__.py | 0 mindspore/{akg => _akg}/ops/array/tile.py | 10 +++---- mindspore/{akg => _akg}/ops/math/__init__.py | 0 mindspore/{akg => _akg}/ops/math/cast.py | 10 +++---- mindspore/{akg => _akg}/ops/math/equal.py | 26 +++++++++---------- mindspore/{akg => _akg}/ops/math/mean.py | 14 +++++----- mindspore/{akg => _akg}/ops/math/mul.py | 8 +++--- mindspore/{akg => _akg}/ops/math/sub.py | 10 +++---- mindspore/{akg => _akg}/ops/math/sum.py | 14 +++++----- mindspore/{akg => _akg}/save_gpu_param.py | 8 +++--- mindspore/{akg => _akg}/utils/__init__.py | 0 mindspore/{akg => _akg}/utils/dsl_create.py | 16 ++++++------ .../{akg => _akg}/utils/format_transform.py | 14 +++++----- .../{akg => _akg}/utils/validation_check.py | 2 +- .../parallel_compile/multi_compiler.py | 2 +- mindspore/ccsrc/kernel/common_utils.h | 2 +- package.sh | 8 +++--- setup.py | 2 +- 34 files changed, 125 insertions(+), 125 deletions(-) rename mindspore/{akg => _akg}/__init__.py (88%) rename mindspore/{akg => _akg}/gpu/__init__.py (100%) rename mindspore/{akg => _akg}/gpu/cast.py (86%) rename mindspore/{akg => _akg}/gpu/default_schedule.py (94%) rename mindspore/{akg => _akg}/gpu/equal.py (85%) rename mindspore/{akg => _akg}/gpu/mean.py (97%) rename mindspore/{akg => _akg}/gpu/mean_grad.py (95%) rename mindspore/{akg => _akg}/gpu/mul.py (93%) rename mindspore/{akg => _akg}/gpu/relu6.py (95%) rename mindspore/{akg => _akg}/gpu/relu6_grad.py (97%) rename mindspore/{akg => _akg}/gpu/squeeze.py (96%) rename mindspore/{akg => _akg}/gpu/squeeze_grad.py (98%) rename mindspore/{akg => _akg}/gpu/tile.py (85%) rename mindspore/{akg => _akg}/message.py (94%) rename mindspore/{akg => _akg}/op_build.py (88%) rename mindspore/{akg => _akg}/ops/__init__.py (100%) rename mindspore/{akg => _akg}/ops/array/__init__.py (100%) rename mindspore/{akg => _akg}/ops/array/tile.py (84%) rename mindspore/{akg => _akg}/ops/math/__init__.py (100%) rename mindspore/{akg => _akg}/ops/math/cast.py (83%) rename mindspore/{akg => _akg}/ops/math/equal.py (63%) rename mindspore/{akg => _akg}/ops/math/mean.py (82%) rename mindspore/{akg => _akg}/ops/math/mul.py (86%) rename mindspore/{akg => _akg}/ops/math/sub.py (86%) rename mindspore/{akg => _akg}/ops/math/sum.py (79%) rename mindspore/{akg => _akg}/save_gpu_param.py (95%) rename mindspore/{akg => _akg}/utils/__init__.py (100%) rename mindspore/{akg => _akg}/utils/dsl_create.py (91%) rename mindspore/{akg => _akg}/utils/format_transform.py (86%) rename mindspore/{akg => _akg}/utils/validation_check.py (99%) diff --git a/mindspore/akg/__init__.py b/mindspore/_akg/__init__.py similarity index 88% rename from mindspore/akg/__init__.py rename to mindspore/_akg/__init__.py index a0c0364bd6..e3dceaf35e 100644 --- a/mindspore/akg/__init__.py +++ b/mindspore/_akg/__init__.py @@ -18,7 +18,7 @@ import sys import os def AKGAddPath(): - """akg add path.""" + """_akg add path.""" pwd = os.path.dirname(os.path.realpath(__file__)) tvm_path = os.path.realpath(pwd) if tvm_path not in sys.path: @@ -32,12 +32,12 @@ class AKGMetaPathFinder: """class AKGMetaPath finder.""" def find_module(self, fullname, path=None): - """method akg find module.""" - if fullname.startswith("akg.tvm"): - rname = fullname[4:] + """method _akg find module.""" + if fullname.startswith("_akg.tvm"): + rname = fullname[5:] return AKGMetaPathLoader(rname) - if fullname.startswith("akg.topi"): - rname = fullname[4:] + if fullname.startswith("_akg.topi"): + rname = fullname[5:] return AKGMetaPathLoader(rname) return None diff --git a/mindspore/akg/gpu/__init__.py b/mindspore/_akg/gpu/__init__.py similarity index 100% rename from mindspore/akg/gpu/__init__.py rename to mindspore/_akg/gpu/__init__.py diff --git a/mindspore/akg/gpu/cast.py b/mindspore/_akg/gpu/cast.py similarity index 86% rename from mindspore/akg/gpu/cast.py rename to mindspore/_akg/gpu/cast.py index 458772a803..d6b38b6e9b 100644 --- a/mindspore/akg/gpu/cast.py +++ b/mindspore/_akg/gpu/cast.py @@ -14,9 +14,9 @@ """cast""" import logging -import akg.tvm -from akg.ops.math import cast -from akg.topi.generic import schedule_elemwise +import _akg.tvm +from _akg.ops.math import cast +from _akg.topi.generic import schedule_elemwise def Cast(x, dst_type): """cast.""" @@ -34,10 +34,10 @@ def gpu_schedule_Cast(outs): sch (schedule.Schedule): The created schedule. """ device = 'cuda' - ctx = akg.tvm.context(device, 0) + ctx = _akg.tvm.context(device, 0) if not ctx.exist: logging.info("Skip because %s is not enabled", device) return None - with akg.tvm.target.create(device): + with _akg.tvm.target.create(device): sch = schedule_elemwise(outs) return sch diff --git a/mindspore/akg/gpu/default_schedule.py b/mindspore/_akg/gpu/default_schedule.py similarity index 94% rename from mindspore/akg/gpu/default_schedule.py rename to mindspore/_akg/gpu/default_schedule.py index 2e2892c055..811cc2d710 100644 --- a/mindspore/akg/gpu/default_schedule.py +++ b/mindspore/_akg/gpu/default_schedule.py @@ -15,7 +15,7 @@ """default schedule function for GPU""" from queue import Queue -import akg.tvm as tvm +import _akg.tvm as tvm DEFAULT_GPU_THREAD = 1024 @@ -31,7 +31,7 @@ def default_schedule(outs): sch (schedule.Schedule): The created schedule. """ if not isinstance(outs, tvm.tensor.Tensor) and not isinstance(outs, list): - raise ValueError("outs should be list of akg.tvm.tensor.Tensor or akg.tvm.tensor.Tensor") + raise ValueError("outs should be list of _akg.tvm.tensor.Tensor or _akg.tvm.tensor.Tensor") device = 'cuda' ctx = tvm.context(device, 0) if not ctx.exist: diff --git a/mindspore/akg/gpu/equal.py b/mindspore/_akg/gpu/equal.py similarity index 85% rename from mindspore/akg/gpu/equal.py rename to mindspore/_akg/gpu/equal.py index 05dce89622..3321c10b2c 100644 --- a/mindspore/akg/gpu/equal.py +++ b/mindspore/_akg/gpu/equal.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. """equal""" -import akg.tvm -from akg.ops.math import equal -from akg.topi.generic import schedule_elemwise +import _akg.tvm +from _akg.ops.math import equal +from _akg.topi.generic import schedule_elemwise def Equal(x, y): """equal.""" @@ -32,9 +32,9 @@ def gpu_schedule_Equal(outs): sch (schedule.Schedule): The created schedule. """ device = 'cuda' - ctx = akg.tvm.context(device, 0) + ctx = _akg.tvm.context(device, 0) if not ctx.exist: raise SystemError("Skip because %s is not enabled" % device) - with akg.tvm.target.create(device): + with _akg.tvm.target.create(device): sch = schedule_elemwise(outs) return sch diff --git a/mindspore/akg/gpu/mean.py b/mindspore/_akg/gpu/mean.py similarity index 97% rename from mindspore/akg/gpu/mean.py rename to mindspore/_akg/gpu/mean.py index a68e929409..e9cdb6d551 100644 --- a/mindspore/akg/gpu/mean.py +++ b/mindspore/_akg/gpu/mean.py @@ -13,8 +13,8 @@ # limitations under the License. """mean op compute and schedule""" -import akg.tvm as tvm -from akg.ops.math.mean import mean +import _akg.tvm as tvm +from _akg.ops.math.mean import mean from .default_schedule import DEFAULT_GPU_THREAD def Mean(x, axis=None, keepdims=True): diff --git a/mindspore/akg/gpu/mean_grad.py b/mindspore/_akg/gpu/mean_grad.py similarity index 95% rename from mindspore/akg/gpu/mean_grad.py rename to mindspore/_akg/gpu/mean_grad.py index ef77690a5d..9d91ee3f40 100644 --- a/mindspore/akg/gpu/mean_grad.py +++ b/mindspore/_akg/gpu/mean_grad.py @@ -13,9 +13,9 @@ # limitations under the License. """mean_grad""" -import akg.tvm as tvm -import akg -from akg.ops.math import mean +import _akg.tvm as tvm +import _akg +from _akg.ops.math import mean from .default_schedule import DEFAULT_GPU_THREAD @@ -30,7 +30,7 @@ def mean_ad(head, input_shape, axis, keepdims): if tensor_b.op.name == "mean_output": tensor_b = tensor_b.op.input_tensors[0] - jacs = list(akg.differentiate(tensor_b, [tensor_a], head)) + jacs = list(_akg.differentiate(tensor_b, [tensor_a], head)) return jacs[0] diff --git a/mindspore/akg/gpu/mul.py b/mindspore/_akg/gpu/mul.py similarity index 93% rename from mindspore/akg/gpu/mul.py rename to mindspore/_akg/gpu/mul.py index 975a237837..5c289a62a6 100644 --- a/mindspore/akg/gpu/mul.py +++ b/mindspore/_akg/gpu/mul.py @@ -13,9 +13,9 @@ # limitations under the License. """mul""" -import akg.topi as topi -import akg.tvm as tvm -from akg.ops.math import mul +import _akg.topi as topi +import _akg.tvm as tvm +from _akg.ops.math import mul def Mul(x, y): """mul.""" diff --git a/mindspore/akg/gpu/relu6.py b/mindspore/_akg/gpu/relu6.py similarity index 95% rename from mindspore/akg/gpu/relu6.py rename to mindspore/_akg/gpu/relu6.py index bdcf23f05a..9a0a3d7a45 100644 --- a/mindspore/akg/gpu/relu6.py +++ b/mindspore/_akg/gpu/relu6.py @@ -13,9 +13,9 @@ # limitations under the License. """relu6""" -import akg.topi as topi -import akg.tvm as tvm -from akg.topi import tag +import _akg.topi as topi +import _akg.tvm as tvm +from _akg.topi import tag @tvm.tag_scope(tag=tag.ELEMWISE) def topi_nn_relu6(x): diff --git a/mindspore/akg/gpu/relu6_grad.py b/mindspore/_akg/gpu/relu6_grad.py similarity index 97% rename from mindspore/akg/gpu/relu6_grad.py rename to mindspore/_akg/gpu/relu6_grad.py index e0590cf6ef..62aeabb4c0 100644 --- a/mindspore/akg/gpu/relu6_grad.py +++ b/mindspore/_akg/gpu/relu6_grad.py @@ -13,8 +13,8 @@ # limitations under the License. """relu6 grad""" -import akg.topi as topi -import akg.tvm as tvm +import _akg.topi as topi +import _akg.tvm as tvm def ReLU6Grad(y_grad, x): """ diff --git a/mindspore/akg/gpu/squeeze.py b/mindspore/_akg/gpu/squeeze.py similarity index 96% rename from mindspore/akg/gpu/squeeze.py rename to mindspore/_akg/gpu/squeeze.py index 34fa423b8c..b5f55facaa 100644 --- a/mindspore/akg/gpu/squeeze.py +++ b/mindspore/_akg/gpu/squeeze.py @@ -13,8 +13,8 @@ # limitations under the License. """squeeze""" -import akg.topi as topi -import akg.tvm as tvm +import _akg.topi as topi +import _akg.tvm as tvm def Squeeze(x, axis=None): """ diff --git a/mindspore/akg/gpu/squeeze_grad.py b/mindspore/_akg/gpu/squeeze_grad.py similarity index 98% rename from mindspore/akg/gpu/squeeze_grad.py rename to mindspore/_akg/gpu/squeeze_grad.py index ef6a4242ba..8180ff9638 100644 --- a/mindspore/akg/gpu/squeeze_grad.py +++ b/mindspore/_akg/gpu/squeeze_grad.py @@ -13,7 +13,7 @@ # limitations under the License. """squeeze grad""" -import akg.topi as topi +import _akg.topi as topi def SqueezeGrad(y_grad, x_shape, axis=None): """ diff --git a/mindspore/akg/gpu/tile.py b/mindspore/_akg/gpu/tile.py similarity index 85% rename from mindspore/akg/gpu/tile.py rename to mindspore/_akg/gpu/tile.py index cd3c663f97..1eb6979b09 100644 --- a/mindspore/akg/gpu/tile.py +++ b/mindspore/_akg/gpu/tile.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. """tile""" -import akg.tvm -from akg.ops.array import tile -from akg.topi.generic import schedule_elemwise +import _akg.tvm +from _akg.ops.array import tile +from _akg.topi.generic import schedule_elemwise def Tile(x, multiples): """tile.""" @@ -31,9 +31,9 @@ def gpu_schedule_Tile(outs): sch (schedule.Schedule): The created schedule. """ device = 'cuda' - ctx = akg.tvm.context(device, 0) + ctx = _akg.tvm.context(device, 0) if not ctx.exist: raise SystemError("Skip because %s is not enabled" % device) - with akg.tvm.target.create(device): + with _akg.tvm.target.create(device): s = schedule_elemwise(outs) return s diff --git a/mindspore/akg/message.py b/mindspore/_akg/message.py similarity index 94% rename from mindspore/akg/message.py rename to mindspore/_akg/message.py index 86bdf2899c..4528771848 100644 --- a/mindspore/akg/message.py +++ b/mindspore/_akg/message.py @@ -20,9 +20,9 @@ import logging import traceback import os.path from pathlib import Path -import akg.tvm -from akg.utils import validation_check as vc_util -from akg.utils.dsl_create import TensorUtils +import _akg.tvm +from _akg.utils import validation_check as vc_util +from _akg.utils.dsl_create import TensorUtils from . import gpu from . import op_build @@ -67,7 +67,7 @@ def compilewithjson(json_str): tensor_shape = input_desc[0]['shape'] tensor_shape = (1,) if not tensor_shape else tensor_shape vc_util.shape_dtype_max_size_check(tensor_shape) - args[input_desc[0]['name']] = akg.tvm.placeholder( + args[input_desc[0]['name']] = _akg.tvm.placeholder( shape=tensor_shape, name=input_desc[0]['tensor_name'], dtype=input_desc[0]['data_type']) tsr.append(args[input_desc[0]['name']]) else: @@ -76,7 +76,7 @@ def compilewithjson(json_str): tensor_shape = tmp_desc['shape'] tensor_shape = (1,) if not tensor_shape else tensor_shape vc_util.shape_dtype_max_size_check(tensor_shape) - tmp_input.append(akg.tvm.placeholder( + tmp_input.append(_akg.tvm.placeholder( shape=tensor_shape, name=tmp_desc['tensor_name'], dtype=tmp_desc['data_type'])) args[input_desc[0]['name']] = tmp_input tsr = tsr + tmp_input diff --git a/mindspore/akg/op_build.py b/mindspore/_akg/op_build.py similarity index 88% rename from mindspore/akg/op_build.py rename to mindspore/_akg/op_build.py index e3d3ec2b78..44a250bd9e 100644 --- a/mindspore/akg/op_build.py +++ b/mindspore/_akg/op_build.py @@ -19,10 +19,10 @@ import types import typing import logging import traceback -import akg.tvm -import akg -from akg import save_gpu_param as gpu_utils -from akg.utils import validation_check as vc_util +import _akg.tvm +import _akg +from _akg import save_gpu_param as gpu_utils +from _akg.utils import validation_check as vc_util MS_CUDA_KERNEL_PATH = "/tmp/cuda_meta/" @@ -38,21 +38,21 @@ def op_build(opnames, computes, args, custom_schedule, device, kernel_name, attr return None schedule_name = 'gpu_schedule_' + opnames[0] - schedule_func = getattr(akg.gpu, schedule_name) + schedule_func = getattr(_akg.gpu, schedule_name) if not isinstance(schedule_func, (types.FunctionType, typing.Callable)): logging.error("no schedule func found %s", str(schedule_name)) return None ptx_file = os.path.realpath(MS_CUDA_KERNEL_PATH + kernel_name + ".ptx") if os.path.exists(ptx_file): - os.remove(ptx_file) + os.chmod(ptx_file, 0o600) try: with open(ptx_file, 'at') as file: fcntl.flock(file.fileno(), fcntl.LOCK_EX) file.seek(0, 2) if file.tell() == 0: s = schedule_func(computes) - foo = akg.tvm.build(s, args, device, name=kernel_name) + foo = _akg.tvm.build(s, args, device, name=kernel_name) ptx_code = foo.imported_modules[0].get_source("ptx") file.write(ptx_code) json_file = os.path.realpath(MS_CUDA_KERNEL_PATH + kernel_name + ".json") diff --git a/mindspore/akg/ops/__init__.py b/mindspore/_akg/ops/__init__.py similarity index 100% rename from mindspore/akg/ops/__init__.py rename to mindspore/_akg/ops/__init__.py diff --git a/mindspore/akg/ops/array/__init__.py b/mindspore/_akg/ops/array/__init__.py similarity index 100% rename from mindspore/akg/ops/array/__init__.py rename to mindspore/_akg/ops/array/__init__.py diff --git a/mindspore/akg/ops/array/tile.py b/mindspore/_akg/ops/array/tile.py similarity index 84% rename from mindspore/akg/ops/array/tile.py rename to mindspore/_akg/ops/array/tile.py index e60fcc4ffb..2fa485ea36 100644 --- a/mindspore/akg/ops/array/tile.py +++ b/mindspore/_akg/ops/array/tile.py @@ -13,12 +13,12 @@ # limitations under the License. """operator dsl function: tile""" -import akg.tvm -import akg.topi -from akg.utils import validation_check as vc_util +import _akg.tvm +import _akg.topi +from _akg.utils import validation_check as vc_util -@vc_util.check_input_type(akg.tvm.tensor.Tensor, (list, tuple)) +@vc_util.check_input_type(_akg.tvm.tensor.Tensor, (list, tuple)) def tile(data, multiples): """ Repeats the data in the specified dimensions according to the multiples. @@ -32,5 +32,5 @@ def tile(data, multiples): """ vc_util.check_shape(data.shape) vc_util.check_int_list(multiples, "multiples") - output = akg.topi.tile(data, multiples) + output = _akg.topi.tile(data, multiples) return output diff --git a/mindspore/akg/ops/math/__init__.py b/mindspore/_akg/ops/math/__init__.py similarity index 100% rename from mindspore/akg/ops/math/__init__.py rename to mindspore/_akg/ops/math/__init__.py diff --git a/mindspore/akg/ops/math/cast.py b/mindspore/_akg/ops/math/cast.py similarity index 83% rename from mindspore/akg/ops/math/cast.py rename to mindspore/_akg/ops/math/cast.py index 7266fd60c1..78140bfe27 100644 --- a/mindspore/akg/ops/math/cast.py +++ b/mindspore/_akg/ops/math/cast.py @@ -13,12 +13,12 @@ # limitations under the License. """operator dsl function: cast""" -import akg.tvm -import akg.topi -from akg.utils import validation_check as vc_util +import _akg.tvm +import _akg.topi +from _akg.utils import validation_check as vc_util -@vc_util.check_input_type(akg.tvm.tensor.Tensor, str) +@vc_util.check_input_type(_akg.tvm.tensor.Tensor, str) def cast(data, dst_type): """ cast data to target type. @@ -31,6 +31,6 @@ def cast(data, dst_type): tvm.tensor.Tensor, type is dst_type. """ vc_util.check_shape(data.shape) - out = akg.topi.cast(data, dst_type) + out = _akg.topi.cast(data, dst_type) return out diff --git a/mindspore/akg/ops/math/equal.py b/mindspore/_akg/ops/math/equal.py similarity index 63% rename from mindspore/akg/ops/math/equal.py rename to mindspore/_akg/ops/math/equal.py index eb446ac52b..2dbb1ba733 100644 --- a/mindspore/akg/ops/math/equal.py +++ b/mindspore/_akg/ops/math/equal.py @@ -13,13 +13,13 @@ # limitations under the License. """operator dsl function: equal""" -import akg.tvm -import akg.topi -from akg.utils.dsl_create import produce_shapes -from akg.utils import validation_check as vc_util +import _akg.tvm +import _akg.topi +from _akg.utils.dsl_create import produce_shapes +from _akg.utils import validation_check as vc_util -@vc_util.check_input_type(akg.tvm.tensor.Tensor, akg.tvm.tensor.Tensor) +@vc_util.check_input_type(_akg.tvm.tensor.Tensor, _akg.tvm.tensor.Tensor) def equal(input1, input2): """ check whether input1 equals to input2. @@ -42,13 +42,13 @@ def equal(input1, input2): dtype = input1.dtype # get equal compute - t_value = akg.tvm.compute(shape, lambda *indice: akg.tvm.const(1, dtype), "T") - f_value = akg.tvm.compute(shape, lambda *indice: akg.tvm.const(0, dtype), "F") - - input1_bro = akg.topi.broadcast_to(input1, shape) - input2_bro = akg.topi.broadcast_to(input2, shape) - c_out = akg.tvm.compute(shape, lambda *indice: akg.tvm.expr.Select(input1_bro[indice] == input2_bro[indice], - t_value[indice], f_value[indice]), name="C") - res = akg.tvm.compute(shape, lambda *indice: c_out(*indice).astype("bool"), name="res") + t_value = _akg.tvm.compute(shape, lambda *indice: _akg.tvm.const(1, dtype), "T") + f_value = _akg.tvm.compute(shape, lambda *indice: _akg.tvm.const(0, dtype), "F") + + input1_bro = _akg.topi.broadcast_to(input1, shape) + input2_bro = _akg.topi.broadcast_to(input2, shape) + c_out = _akg.tvm.compute(shape, lambda *indice: _akg.tvm.expr.Select(input1_bro[indice] == input2_bro[indice], + t_value[indice], f_value[indice]), name="C") + res = _akg.tvm.compute(shape, lambda *indice: c_out(*indice).astype("bool"), name="res") return res diff --git a/mindspore/akg/ops/math/mean.py b/mindspore/_akg/ops/math/mean.py similarity index 82% rename from mindspore/akg/ops/math/mean.py rename to mindspore/_akg/ops/math/mean.py index a26bc29087..8764387d33 100644 --- a/mindspore/akg/ops/math/mean.py +++ b/mindspore/_akg/ops/math/mean.py @@ -13,14 +13,14 @@ # limitations under the License. """operator dsl function: mean""" -import akg.topi -import akg.tvm -from akg.utils import format_transform as ft_util -from akg.utils import validation_check as vc_util -from akg.ops.math import sum +import _akg.topi +import _akg.tvm +from _akg.utils import format_transform as ft_util +from _akg.utils import validation_check as vc_util +from _akg.ops.math import sum -@vc_util.check_input_type(akg.tvm.tensor.Tensor, (list, tuple, int, type(None)), (bool, type(None))) +@vc_util.check_input_type(_akg.tvm.tensor.Tensor, (list, tuple, int, type(None)), (bool, type(None))) def mean(data, axis=None, keepdims=False): """ Computes the mean of the values of a Tensor over the whole dataset. @@ -42,6 +42,6 @@ def mean(data, axis=None, keepdims=False): for i in axis: count *= shape[i] output, _ = sum.sum_value(data, axis, keepdims) - res = akg.topi.divide(output, count) + res = _akg.topi.divide(output, count) return res diff --git a/mindspore/akg/ops/math/mul.py b/mindspore/_akg/ops/math/mul.py similarity index 86% rename from mindspore/akg/ops/math/mul.py rename to mindspore/_akg/ops/math/mul.py index 8377a63d69..a690089da2 100644 --- a/mindspore/akg/ops/math/mul.py +++ b/mindspore/_akg/ops/math/mul.py @@ -13,11 +13,11 @@ # limitations under the License. """operator dsl function: mul""" -import akg.topi -from akg.utils import validation_check as vc_util +import _akg.topi +from _akg.utils import validation_check as vc_util -@vc_util.check_input_type(akg.tvm.tensor.Tensor, akg.tvm.tensor.Tensor) +@vc_util.check_input_type(_akg.tvm.tensor.Tensor, _akg.tvm.tensor.Tensor) def mul(l_input, r_input): """ Calculate x * y element-wise. @@ -38,6 +38,6 @@ def mul(l_input, r_input): vc_util.check_shape(shape2) vc_util.auto_broadcast_check(shape1, shape2) vc_util.elemwise_dtype_check(l_input.dtype, r_input.dtype) - output = akg.topi.multiply(l_input, r_input) + output = _akg.topi.multiply(l_input, r_input) return output diff --git a/mindspore/akg/ops/math/sub.py b/mindspore/_akg/ops/math/sub.py similarity index 86% rename from mindspore/akg/ops/math/sub.py rename to mindspore/_akg/ops/math/sub.py index a4a85b0a09..6ae2ee51ef 100644 --- a/mindspore/akg/ops/math/sub.py +++ b/mindspore/_akg/ops/math/sub.py @@ -13,12 +13,12 @@ # limitations under the License. """operator dsl function: sub""" -import akg.topi -import akg.tvm -from akg.utils import validation_check as vc_util +import _akg.topi +import _akg.tvm +from _akg.utils import validation_check as vc_util -@vc_util.check_input_type(akg.tvm.tensor.Tensor, akg.tvm.tensor.Tensor) +@vc_util.check_input_type(_akg.tvm.tensor.Tensor, _akg.tvm.tensor.Tensor) def sub(data1, data2): """ Computes data1 - data2 elementwise, broadcast is supported. @@ -35,6 +35,6 @@ def sub(data1, data2): vc_util.check_shape(data2.shape) vc_util.auto_broadcast_check(data1.shape, data2.shape) - res = akg.topi.subtract(data1, data2) + res = _akg.topi.subtract(data1, data2) return res diff --git a/mindspore/akg/ops/math/sum.py b/mindspore/_akg/ops/math/sum.py similarity index 79% rename from mindspore/akg/ops/math/sum.py rename to mindspore/_akg/ops/math/sum.py index ea71bab9c4..b9720469a6 100644 --- a/mindspore/akg/ops/math/sum.py +++ b/mindspore/_akg/ops/math/sum.py @@ -14,13 +14,13 @@ """operator dsl function: sum""" -import akg.topi -import akg.tvm -from akg.utils import format_transform as ft_util -from akg.utils import validation_check as vc_util +import _akg.topi +import _akg.tvm +from _akg.utils import format_transform as ft_util +from _akg.utils import validation_check as vc_util -@vc_util.check_input_type(akg.tvm.tensor.Tensor, (list, tuple, int, type(None)), (bool, type(None))) +@vc_util.check_input_type(_akg.tvm.tensor.Tensor, (list, tuple, int, type(None)), (bool, type(None))) def sum_value(inputs, axis=None, keepdims=False): """ Compute the sum of elements across dimensions of a tensor. @@ -38,8 +38,8 @@ def sum_value(inputs, axis=None, keepdims=False): vc_util.check_shape(inputs.shape) if not axis: - output = akg.topi.identity(inputs) + output = _akg.topi.identity(inputs) else: - output = akg.topi.sum(inputs, axis=axis, keepdims=keepdims) + output = _akg.topi.sum(inputs, axis=axis, keepdims=keepdims) return output diff --git a/mindspore/akg/save_gpu_param.py b/mindspore/_akg/save_gpu_param.py similarity index 95% rename from mindspore/akg/save_gpu_param.py rename to mindspore/_akg/save_gpu_param.py index 228bdf32ca..ed2c9fe23a 100644 --- a/mindspore/akg/save_gpu_param.py +++ b/mindspore/_akg/save_gpu_param.py @@ -15,9 +15,9 @@ """save gpu param""" import os import hashlib -import akg.tvm -from akg.tvm import schedule -from akg.utils import validation_check as vc_util +import _akg.tvm +from _akg.tvm import schedule +from _akg.utils import validation_check as vc_util def get_dim(dim, axis=True): @@ -66,7 +66,7 @@ def save_gpu_params(s, args, kernel_info): ptx_code = kernel_info[0] file_name = kernel_info[1] kernel_name = kernel_info[2] - ir = str(akg.tvm.lower(s, args, simple_mode=True)) + ir = str(_akg.tvm.lower(s, args, simple_mode=True)) file_path = os.path.realpath(file_name) if os.path.exists(file_path): os.remove(file_path) diff --git a/mindspore/akg/utils/__init__.py b/mindspore/_akg/utils/__init__.py similarity index 100% rename from mindspore/akg/utils/__init__.py rename to mindspore/_akg/utils/__init__.py diff --git a/mindspore/akg/utils/dsl_create.py b/mindspore/_akg/utils/dsl_create.py similarity index 91% rename from mindspore/akg/utils/dsl_create.py rename to mindspore/_akg/utils/dsl_create.py index aaea913143..9d27039b28 100644 --- a/mindspore/akg/utils/dsl_create.py +++ b/mindspore/_akg/utils/dsl_create.py @@ -13,8 +13,8 @@ # limitations under the License. """dsl create helping function""" -import akg -from akg.utils import format_transform as ft_util +import _akg +from _akg.utils import format_transform as ft_util class TensorUtils: """Class for creating tensor.""" @@ -33,11 +33,11 @@ class TensorUtils: """update tensor attrs.""" tensor_attrs = cls.get_tensor_attrs(tensor) tensor_attrs.update(attrs) - tensor = akg.tvm.compute(tensor.shape, - lambda *indice: tensor[indice], - name=tensor.op.name, - tag=tensor.op.tag, - attrs=tensor_attrs) + tensor = _akg.tvm.compute(tensor.shape, + lambda *indice: tensor[indice], + name=tensor.op.name, + tag=tensor.op.tag, + attrs=tensor_attrs) return tensor @classmethod @@ -61,7 +61,7 @@ class TensorUtils: raise RuntimeError("Shape of the input_tensor and the output_tensor should be equal, " "but got %s and %s"%(input_tensor_shape, output_tensor_shape)) output_tensor = cls.update_tensor_attrs(output_tensor, {cls.CREATE_SCH_ONLY: 1}) - data_buf = akg.tvm.decl_buffer(input_tensor.shape, input_tensor.dtype, name=buffer_name) + data_buf = _akg.tvm.decl_buffer(input_tensor.shape, input_tensor.dtype, name=buffer_name) binds_info = {input_tensor: data_buf, output_tensor: data_buf} return output_tensor, binds_info diff --git a/mindspore/akg/utils/format_transform.py b/mindspore/_akg/utils/format_transform.py similarity index 86% rename from mindspore/akg/utils/format_transform.py rename to mindspore/_akg/utils/format_transform.py index 816cbcaadb..f83130a32a 100644 --- a/mindspore/akg/utils/format_transform.py +++ b/mindspore/_akg/utils/format_transform.py @@ -13,7 +13,7 @@ # limitations under the License. """format transform function""" -import akg +import _akg def refine_reduce_axis(input, axis): """make reduce axis legal.""" @@ -43,11 +43,11 @@ def refine_reduce_axis(input, axis): def get_shape_from_tensor(data): - """translate akg.tvm.shape to list type in python.""" + """translate _akg.tvm.shape to list type in python.""" tvm_shape = data.shape py_shape = [] for i in tvm_shape: - if isinstance(i, akg.tvm.expr.Var): + if isinstance(i, _akg.tvm.expr.Var): py_shape.append(i) else: py_shape.append(i.value) @@ -55,10 +55,10 @@ def get_shape_from_tensor(data): def tvm_shape_to_list(tvm_shape): - """translate akg.tvm.shape to list type in python.""" + """translate _akg.tvm.shape to list type in python.""" py_shape = [] for i in tvm_shape: - if isinstance(i, akg.tvm.expr.Var): + if isinstance(i, _akg.tvm.expr.Var): py_shape.append(i) else: py_shape.append(i.value) @@ -67,9 +67,9 @@ def tvm_shape_to_list(tvm_shape): def get_shape(data): """get shape and save it as list.""" - if isinstance(data, akg.tvm.tensor.Tensor): + if isinstance(data, _akg.tvm.tensor.Tensor): shape = get_shape_from_tensor(data) - elif isinstance(data, akg.tvm.container.Array): + elif isinstance(data, _akg.tvm.container.Array): shape = tvm_shape_to_list(data) elif isinstance(data, int): shape = [data] diff --git a/mindspore/akg/utils/validation_check.py b/mindspore/_akg/utils/validation_check.py similarity index 99% rename from mindspore/akg/utils/validation_check.py rename to mindspore/_akg/utils/validation_check.py index 72494c5281..1231b3110e 100644 --- a/mindspore/akg/utils/validation_check.py +++ b/mindspore/_akg/utils/validation_check.py @@ -14,7 +14,7 @@ """validation check functions""" from functools import wraps, reduce -from akg.utils.format_transform import get_shape +from _akg.utils.format_transform import get_shape MAX_DATA_SIZE = 2 ** 31 diff --git a/mindspore/_extends/parallel_compile/multi_compiler.py b/mindspore/_extends/parallel_compile/multi_compiler.py index 542167888b..86e1b684d2 100644 --- a/mindspore/_extends/parallel_compile/multi_compiler.py +++ b/mindspore/_extends/parallel_compile/multi_compiler.py @@ -32,7 +32,7 @@ def _compiletask(platform, *jsons): """ if platform == "AKG": - p = __import__("akg", globals(), locals(), ['ms'], 0) + p = __import__("_akg", globals(), locals(), ['ms'], 0) func = getattr(p.ms, "compilewithjson") for json_item in jsons: res = func(json_item) diff --git a/mindspore/ccsrc/kernel/common_utils.h b/mindspore/ccsrc/kernel/common_utils.h index 6e3635d904..07f191cc7b 100644 --- a/mindspore/ccsrc/kernel/common_utils.h +++ b/mindspore/ccsrc/kernel/common_utils.h @@ -37,7 +37,7 @@ constexpr auto kProcessorCuda = "cuda"; constexpr auto kJsonSuffix = ".json"; constexpr auto kInfoSuffix = ".info"; constexpr unsigned int AUTODIFF_COMPILE_OVERTIME = 600; -constexpr auto kAkgModule = "akg"; +constexpr auto kAkgModule = "_akg"; constexpr auto kArgDataformat = "data_format"; const std::vector support_devices = {"aicore", "aicpu", "cuda"}; diff --git a/package.sh b/package.sh index 0c75a1bbfd..685fa1205a 100755 --- a/package.sh +++ b/package.sh @@ -77,11 +77,11 @@ cp -rf "${BUILD_PATH}/../mindspore/ops" "${PACKAGE_PATH}/mindspore" cp -rf "${BUILD_PATH}/../mindspore/communication" "${PACKAGE_PATH}/mindspore" if [[ "X$2" = "Xgpu" ]]; then - echo "package akg when gpu enable." - cp -rf "${BASEPATH}/mindspore/akg" "${PACKAGE_PATH}" + echo "package _akg when gpu enable." + cp -rf "${BASEPATH}/mindspore/_akg" "${PACKAGE_PATH}" if [[ -d "${BUILD_PATH}/mindspore/incubator-tvm" ]]; then - cp -rf "${BUILD_PATH}/mindspore/incubator-tvm/topi/python/topi" "${PACKAGE_PATH}/akg" - cp -rf "${BUILD_PATH}/mindspore/incubator-tvm/python/tvm" "${PACKAGE_PATH}/akg" + cp -rf "${BUILD_PATH}/mindspore/incubator-tvm/topi/python/topi" "${PACKAGE_PATH}/_akg" + cp -rf "${BUILD_PATH}/mindspore/incubator-tvm/python/tvm" "${PACKAGE_PATH}/_akg" fi fi diff --git a/setup.py b/setup.py index e009a9b312..221c6dc4f2 100644 --- a/setup.py +++ b/setup.py @@ -137,7 +137,7 @@ class BuildPy(build_py): super().run() mindspore_dir = os.path.join(pkg_dir, 'build', 'lib', 'mindspore') update_permissions(mindspore_dir) - mindspore_dir = os.path.join(pkg_dir, 'build', 'lib', 'akg') + mindspore_dir = os.path.join(pkg_dir, 'build', 'lib', '_akg') update_permissions(mindspore_dir) From d16756d33aafc5b42bc1660f066e9447d01cbb44 Mon Sep 17 00:00:00 2001 From: leonwanghui Date: Tue, 7 Apr 2020 11:04:49 +0800 Subject: [PATCH 160/367] Reduce MindSpore docker image size Signed-off-by: leonwanghui --- docker/README.md | 12 +-- .../{ => 0.1.0-alpha}/Dockerfile | 0 docker/mindspore-cuda9.2/Dockerfile | 83 ------------------- .../0.1.0-alpha}/Dockerfile | 21 ++++- 4 files changed, 21 insertions(+), 95 deletions(-) rename docker/mindspore-cpu/{ => 0.1.0-alpha}/Dockerfile (100%) delete mode 100644 docker/mindspore-cuda9.2/Dockerfile rename docker/{mindspore-cuda10.1 => mindspore-gpu/0.1.0-alpha}/Dockerfile (78%) diff --git a/docker/README.md b/docker/README.md index 76eae12f88..c6851fe531 100644 --- a/docker/README.md +++ b/docker/README.md @@ -7,17 +7,11 @@ This folder hosts all the `Dockerfile` to build MindSpore container images with * CPU ``` - cd mindspore-cpu && docker build . -t mindspore/mindspore-cpu:0.1.0-alpha + cd mindspore-cpu/0.1.0-alpha && docker build . -t mindspore/mindspore-cpu:0.1.0-alpha ``` -* GPU (CUDA 9.2) +* GPU ``` - cd mindspore-cuda9.2 && docker build . -t mindspore/mindspore-cuda9.2:0.1.0-alpha - ``` - -* GPU (CUDA 10.1) - - ``` - cd mindspore-cuda10.1 && docker build . -t mindspore/mindspore-cuda10.1:0.1.0-alpha + cd mindspore-gpu/0.1.0-alpha && docker build . -t mindspore/mindspore-gpu:0.1.0-alpha ``` diff --git a/docker/mindspore-cpu/Dockerfile b/docker/mindspore-cpu/0.1.0-alpha/Dockerfile similarity index 100% rename from docker/mindspore-cpu/Dockerfile rename to docker/mindspore-cpu/0.1.0-alpha/Dockerfile diff --git a/docker/mindspore-cuda9.2/Dockerfile b/docker/mindspore-cuda9.2/Dockerfile deleted file mode 100644 index 6e40106396..0000000000 --- a/docker/mindspore-cuda9.2/Dockerfile +++ /dev/null @@ -1,83 +0,0 @@ -FROM nvidia/cuda:9.2-cudnn7-devel-ubuntu18.04 - -MAINTAINER leonwanghui - -# Set env -ENV PYTHON_ROOT_PATH /usr/local/python-3.7.5 -ENV CMAKE_ROOT_PATH /usr/local/cmake-3.14.1 -ENV PATH ${PYTHON_ROOT_PATH}/bin:${CMAKE_ROOT_PATH}/bin:/usr/local/bin:$PATH - -# Install base tools -RUN apt update \ - && DEBIAN_FRONTEND=noninteractive apt install -y \ - vim \ - wget \ - curl \ - xz-utils \ - net-tools \ - openssh-client \ - git \ - ntpdate \ - tzdata \ - tcl \ - sudo \ - bash-completion - -# Install compile tools -RUN DEBIAN_FRONTEND=noninteractive apt install -y \ - gcc \ - g++ \ - zlibc \ - make \ - libgmp-dev \ - patch \ - autoconf \ - libtool \ - automake \ - flex - -# Set bash -RUN echo "dash dash/sh boolean false" | debconf-set-selections -RUN DEBIAN_FRONTEND=noninteractive dpkg-reconfigure dash - -# Install python (v3.7.5) -RUN apt install -y libffi-dev libssl-dev zlib1g-dev libbz2-dev libncurses5-dev \ - libgdbm-dev libgdbm-compat-dev liblzma-dev libreadline-dev libsqlite3-dev \ - && cd /tmp \ - && wget https://github.com/python/cpython/archive/v3.7.5.tar.gz \ - && tar -xvf v3.7.5.tar.gz \ - && cd /tmp/cpython-3.7.5 \ - && mkdir -p ${PYTHON_ROOT_PATH} \ - && ./configure --prefix=${PYTHON_ROOT_PATH} \ - && make -j4 \ - && make install -j4 \ - && rm -f /usr/local/bin/python \ - && rm -f /usr/local/bin/pip \ - && ln -s ${PYTHON_ROOT_PATH}/bin/python3.7 /usr/local/bin/python \ - && ln -s ${PYTHON_ROOT_PATH}/bin/pip3.7 /usr/local/bin/pip \ - && rm -rf /tmp/cpython-3.7.5 \ - && rm -f /tmp/v3.7.5.tar.gz - -# Set pip source -RUN mkdir -pv /root/.pip \ - && echo "[global]" > /root/.pip/pip.conf \ - && echo "trusted-host=mirrors.aliyun.com" >> /root/.pip/pip.conf \ - && echo "index-url=http://mirrors.aliyun.com/pypi/simple/" >> /root/.pip/pip.conf - -# Install pip package -RUN pip install --no-cache-dir \ - numpy \ - wheel \ - nose \ - pytest \ - pytest-xdist - -# Install cmake (v3.14.1) -RUN cd /tmp \ - && wget https://github.com/Kitware/CMake/releases/download/v3.14.1/cmake-3.14.1-Linux-x86_64.sh \ - && mkdir -p ${CMAKE_ROOT_PATH} \ - && bash ./cmake-3.14.1-Linux-x86_64.sh --prefix=${CMAKE_ROOT_PATH} --exclude-subdir --skip-license \ - && rm -f /tmp/cmake-3.14.1-Linux-x86_64.sh - -# Install MindSpore cuda-9.2 whl package -RUN pip install --no-cache-dir https://ms-release.obs.cn-north-4.myhuaweicloud.com/0.1.0-alpha/MindSpore/gpu/cuda-9.2/mindspore-0.1.0-cp37-cp37m-linux_x86_64.whl diff --git a/docker/mindspore-cuda10.1/Dockerfile b/docker/mindspore-gpu/0.1.0-alpha/Dockerfile similarity index 78% rename from docker/mindspore-cuda10.1/Dockerfile rename to docker/mindspore-gpu/0.1.0-alpha/Dockerfile index e2a1ee955a..7b82b21a1e 100644 --- a/docker/mindspore-cuda10.1/Dockerfile +++ b/docker/mindspore-gpu/0.1.0-alpha/Dockerfile @@ -1,11 +1,12 @@ -FROM nvidia/cuda:10.1-cudnn7-devel-ubuntu18.04 +FROM nvidia/cuda:10.1-cudnn7-runtime-ubuntu18.04 MAINTAINER leonwanghui # Set env ENV PYTHON_ROOT_PATH /usr/local/python-3.7.5 ENV CMAKE_ROOT_PATH /usr/local/cmake-3.14.1 -ENV PATH ${PYTHON_ROOT_PATH}/bin:${CMAKE_ROOT_PATH}/bin:/usr/local/bin:$PATH +ENV OMPI_ROOT_PATH /usr/local/openmpi-3.1.5 +ENV PATH ${PYTHON_ROOT_PATH}/bin:${CMAKE_ROOT_PATH}/bin:${OMPI_ROOT_PATH}/bin:/usr/local/bin:$PATH # Install base tools RUN apt update \ @@ -34,7 +35,9 @@ RUN DEBIAN_FRONTEND=noninteractive apt install -y \ autoconf \ libtool \ automake \ - flex + flex \ + libnccl2=2.4.8-1+cuda10.1 \ + libnccl-dev=2.4.8-1+cuda10.1 # Set bash RUN echo "dash dash/sh boolean false" | debconf-set-selections @@ -79,5 +82,17 @@ RUN cd /tmp \ && bash ./cmake-3.14.1-Linux-x86_64.sh --prefix=${CMAKE_ROOT_PATH} --exclude-subdir --skip-license \ && rm -f /tmp/cmake-3.14.1-Linux-x86_64.sh +# Install openmpi (v3.1.5) +RUN cd /tmp \ + && wget https://download.open-mpi.org/release/open-mpi/v3.1/openmpi-3.1.5.tar.gz \ + && tar -xvf openmpi-3.1.5.tar.gz \ + && cd /tmp/openmpi-3.1.5 \ + && mkdir -p ${OMPI_ROOT_PATH} \ + && ./configure --prefix=${OMPI_ROOT_PATH} \ + && make -j4 \ + && make install -j4 \ + && rm -rf /tmp/openmpi-3.1.5 \ + && rm -f /tmp/openmpi-3.1.5.tar.gz + # Install MindSpore cuda-10.1 whl package RUN pip install --no-cache-dir https://ms-release.obs.cn-north-4.myhuaweicloud.com/0.1.0-alpha/MindSpore/gpu/cuda-10.1/mindspore-0.1.0-cp37-cp37m-linux_x86_64.whl From efc6c7be635cfe12e4790a61f617a83db5a0c0f7 Mon Sep 17 00:00:00 2001 From: zhoufeng Date: Fri, 10 Apr 2020 17:41:02 +0800 Subject: [PATCH 161/367] delete abandoned script files autogen.sh: delete py_filter: delete dbg_dump_parser.sh: move to script --- autogen.sh | 19 ------------------- py_filter | 17 ----------------- .../dbg_dump_parser.sh | 4 ++++ 3 files changed, 4 insertions(+), 36 deletions(-) delete mode 100755 autogen.sh delete mode 100644 py_filter rename dbg_dump_parser.sh => scripts/dbg_dump_parser.sh (98%) diff --git a/autogen.sh b/autogen.sh deleted file mode 100755 index 65ee17a72a..0000000000 --- a/autogen.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash -# Copyright 2019 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ - -git submodule update --init --recursive - - diff --git a/py_filter b/py_filter deleted file mode 100644 index 8301a0a257..0000000000 --- a/py_filter +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -# Copyright 2019 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ - -echo "\"\"\"doc\"\"\"" && python3 -m doxypypy.doxypypy -a -c $1 \ No newline at end of file diff --git a/dbg_dump_parser.sh b/scripts/dbg_dump_parser.sh similarity index 98% rename from dbg_dump_parser.sh rename to scripts/dbg_dump_parser.sh index 1d1ec28248..cae3409419 100755 --- a/dbg_dump_parser.sh +++ b/scripts/dbg_dump_parser.sh @@ -81,6 +81,8 @@ function checkopts() # check options checkopts "$@" +CUR_PATH=$(pwd) +cd "`dirname $0`/.." cd build/mindspore/ make -j8 @@ -118,3 +120,5 @@ if [[ "${mode}" == "${MODE_DBG}" || "${mode}" == "${MODE_ALL}" ]]; then echo "MS_IR_FILE=$(pwd)/anf_ir_file.dbg MS_IR_PATH=$(pwd)/pkl_objs.dbg/ pytest -s ${UT_NAME}" MS_IR_FILE=$(pwd)/anf_ir_file.dbg MS_IR_PATH=$(pwd)/pkl_objs.dbg/ pytest -s "${UT_NAME}" fi + +cd $CUR_PATH From 53a98210af4590f89b351b42b7b2a630951c452a Mon Sep 17 00:00:00 2001 From: panfengfeng Date: Sat, 11 Apr 2020 09:28:09 +0800 Subject: [PATCH 162/367] skip ut test cases temporarily --- tests/ut/cpp/pipeline/parse/parser_class_test.cc | 2 ++ tests/ut/cpp/pipeline/parse/parser_integrate_test.cc | 2 ++ tests/ut/cpp/pipeline/parse/parser_primitive_test.cc | 2 ++ tests/ut/cpp/pipeline/static_analysis/evaluator_test.cc | 3 +++ tests/ut/cpp/pipeline/static_analysis/prim_test.cc | 5 ++++- .../ut/cpp/pipeline/static_analysis/static_analysis_test.cc | 3 +++ 6 files changed, 16 insertions(+), 1 deletion(-) diff --git a/tests/ut/cpp/pipeline/parse/parser_class_test.cc b/tests/ut/cpp/pipeline/parse/parser_class_test.cc index 599994aab2..dcedc32b1b 100644 --- a/tests/ut/cpp/pipeline/parse/parser_class_test.cc +++ b/tests/ut/cpp/pipeline/parse/parser_class_test.cc @@ -84,6 +84,7 @@ TEST_F(TestParserClass, TestParseDataClassApi) { } } +/* # skip ut test cases temporarily // Test case 2: test parse object, transfore the CELL instance to api. TEST_F(TestParserClass, TestParseMethod) { py::object obj_ = python_adapter::CallPyFn("gtest_input.pipeline.parse.parse_class", "test_parse_object_instance"); @@ -114,6 +115,7 @@ TEST_F(TestParserClass, TestParseCompileAPI) { python_adapter::CallPyFn("gtest_input.pipeline.parse.parse_compile", "test_build"); MS_LOG(DEBUG) << "Test end"; } +*/ } // namespace parse } // namespace mindspore diff --git a/tests/ut/cpp/pipeline/parse/parser_integrate_test.cc b/tests/ut/cpp/pipeline/parse/parser_integrate_test.cc index 3ec260c6c0..fd8438503f 100644 --- a/tests/ut/cpp/pipeline/parse/parser_integrate_test.cc +++ b/tests/ut/cpp/pipeline/parse/parser_integrate_test.cc @@ -86,10 +86,12 @@ TEST_F(TestParserIntegrate, TestParseGraphResolveGetAttr) { ASSERT_TRUE(func_graph != nullptr); } +/* skip ut test case temporarily TEST_F(TestParserIntegrate, TestParseGraphResolveUnknown) { EXPECT_THROW({ python_adapter::CallPyFn("gtest_input.pipeline.parse.parser_integrate", "test_undefined_symbol"); }, std::runtime_error); } +*/ /* #not supported yet TEST_F(TestParserIntegrate, TestParseGraphTestModelInside) { diff --git a/tests/ut/cpp/pipeline/parse/parser_primitive_test.cc b/tests/ut/cpp/pipeline/parse/parser_primitive_test.cc index e4cfd5132f..adc09cca32 100644 --- a/tests/ut/cpp/pipeline/parse/parser_primitive_test.cc +++ b/tests/ut/cpp/pipeline/parse/parser_primitive_test.cc @@ -109,6 +109,7 @@ TEST_F(TestParserPrimitive, TestParsePrimitive) { #endif } +/* skip ut test case temporarily TEST_F(TestParserPrimitive, TestParsePrimitiveParmeter) { py::object obj_ = python_adapter::CallPyFn("gtest_input.pipeline.parse.parse_primitive", "test_primitive_obj_parameter"); @@ -157,6 +158,7 @@ TEST_F(TestParserPrimitive, TestParsePrimitiveParmeter2) { i++; } } +*/ } // namespace parse } // namespace mindspore diff --git a/tests/ut/cpp/pipeline/static_analysis/evaluator_test.cc b/tests/ut/cpp/pipeline/static_analysis/evaluator_test.cc index d3983552e8..80acbe6ad5 100644 --- a/tests/ut/cpp/pipeline/static_analysis/evaluator_test.cc +++ b/tests/ut/cpp/pipeline/static_analysis/evaluator_test.cc @@ -63,6 +63,7 @@ TEST_F(TestEvaluatorCacheMap, test_evaluator_cache_map) { ASSERT_TRUE(iter == cache.end()); } +/* skip ut test cases temporarily class TestStandardEvaluator : public UT::Common { public: TestStandardEvaluator() : getPyFun("gtest_input.pipeline.infer.infer_test", true), engine_(nullptr) {} @@ -240,5 +241,7 @@ TEST_F(TestPartialEvaluator, test_infer_construct_sub_unresolved) { ASSERT_TRUE(*(abs_base_got->GetTypeTrack()) == *(abstract_x->GetTypeTrack())); ASSERT_TRUE(abs_base_got->GetTypeTrack()->type_id() == kNumberTypeFloat64); } +*/ + } // namespace abstract } // namespace mindspore diff --git a/tests/ut/cpp/pipeline/static_analysis/prim_test.cc b/tests/ut/cpp/pipeline/static_analysis/prim_test.cc index 629f410601..f54961af94 100644 --- a/tests/ut/cpp/pipeline/static_analysis/prim_test.cc +++ b/tests/ut/cpp/pipeline/static_analysis/prim_test.cc @@ -83,12 +83,13 @@ const std::shared_ptr UTPrimUtils::kI16 = std::make_shared(16); const std::shared_ptr UTPrimUtils::kI64 = std::make_shared(64); const std::shared_ptr UTPrimUtils::kU64 = std::make_shared(64); namespace { +/* skip ut test cases temporarily AbstractBasePtr ArrayOfTensor(const TypePtr &t, std::initializer_list shp) { auto shape = std::vector(shp); auto tensor = std::make_shared(t->type_id(), shape); return ToAbstract(tensor); } - +*/ } // namespace class TestPrim : public UT::Common { @@ -496,6 +497,7 @@ TEST_F(TestPrim, test_relu) { ASSERT_TRUE(*res == *expected); } +/* TEST_F(TestPrim, test_relu2) { FuncGraphPtr func_graph = getPyFun("get_relu"); ASSERT_TRUE(func_graph != nullptr); @@ -1151,6 +1153,7 @@ TEST_F(TestPrim, test_DictGetItem2) { ASSERT_TRUE(*tensor_ret == *expect); } +*/ } // namespace abstract } // namespace mindspore diff --git a/tests/ut/cpp/pipeline/static_analysis/static_analysis_test.cc b/tests/ut/cpp/pipeline/static_analysis/static_analysis_test.cc index 2da631d744..ac857dfac9 100644 --- a/tests/ut/cpp/pipeline/static_analysis/static_analysis_test.cc +++ b/tests/ut/cpp/pipeline/static_analysis/static_analysis_test.cc @@ -442,6 +442,7 @@ void TestGraphInfer::TearDown() { parse::data_converter::ClearObjectCache(); } +/* skip ut test cases temporarily TEST_F(TestGraphInfer, test_graph_infer_defaults) { FuncGraphPtr graph = getPyFun.CallAndParseRet("test_graph_infer_defaults"); AbstractBasePtrList args_spec_list = {}; @@ -497,5 +498,7 @@ TEST_F(TestGraphInfer, test_graph_infer_vararg_kwonlyargs_kwarg_defaults) { AbstractBasePtr expect = FromValue(MakeValue(57), false); ASSERT_EQ(*res, *expect); } +*/ + } // namespace abstract } // namespace mindspore From a9443635b768377903eaf4e7e0dbdab90c1f571d Mon Sep 17 00:00:00 2001 From: jonyguo Date: Thu, 9 Apr 2020 11:50:46 +0800 Subject: [PATCH 163/367] fix: mindpage enhance parameter check and search by filename failed --- .../mindrecord/include/common/shard_utils.h | 7 + .../include/shard_index_generator.h | 10 +- .../mindrecord/io/shard_index_generator.cc | 78 ++++++---- mindspore/ccsrc/mindrecord/io/shard_reader.cc | 66 ++++++-- .../ccsrc/mindrecord/io/shard_segment.cc | 17 +- mindspore/mindrecord/mindpage.py | 24 +-- .../ut_shard_index_generator_test.cc | 3 + .../python/mindrecord/test_mindrecord_base.py | 145 ++++++++++++++++++ .../mindrecord/test_mindrecord_exception.py | 69 ++++++++- 9 files changed, 354 insertions(+), 65 deletions(-) diff --git a/mindspore/ccsrc/mindrecord/include/common/shard_utils.h b/mindspore/ccsrc/mindrecord/include/common/shard_utils.h index c452b49fbc..55319cabfe 100644 --- a/mindspore/ccsrc/mindrecord/include/common/shard_utils.h +++ b/mindspore/ccsrc/mindrecord/include/common/shard_utils.h @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -117,6 +118,12 @@ const char kPoint = '.'; // field type used by check schema validation const std::set kFieldTypeSet = {"bytes", "string", "int32", "int64", "float32", "float64"}; +// can be searched field list +const std::set kScalarFieldTypeSet = {"string", "int32", "int64", "float32", "float64"}; + +// number field list +const std::set kNumberFieldTypeSet = {"int32", "int64", "float32", "float64"}; + /// \brief split a string using a character /// \param[in] field target string /// \param[in] separator a character for spliting diff --git a/mindspore/ccsrc/mindrecord/include/shard_index_generator.h b/mindspore/ccsrc/mindrecord/include/shard_index_generator.h index 1febd28fc2..f91d0f17a7 100644 --- a/mindspore/ccsrc/mindrecord/include/shard_index_generator.h +++ b/mindspore/ccsrc/mindrecord/include/shard_index_generator.h @@ -42,11 +42,11 @@ class ShardIndexGenerator { ~ShardIndexGenerator() {} - /// \brief fetch value in json by field path - /// \param[in] field_path - /// \param[in] schema - /// \return the vector of value - static std::vector GetField(const std::string &field_path, json schema); + /// \brief fetch value in json by field name + /// \param[in] field + /// \param[in] input + /// \return pair + std::pair GetValueByField(const string &field, json input); /// \brief fetch field type in schema n by field path /// \param[in] field_path diff --git a/mindspore/ccsrc/mindrecord/io/shard_index_generator.cc b/mindspore/ccsrc/mindrecord/io/shard_index_generator.cc index c0108241a1..254ddfbb16 100644 --- a/mindspore/ccsrc/mindrecord/io/shard_index_generator.cc +++ b/mindspore/ccsrc/mindrecord/io/shard_index_generator.cc @@ -38,7 +38,7 @@ ShardIndexGenerator::ShardIndexGenerator(const std::string &file_path, bool appe MSRStatus ShardIndexGenerator::Build() { ShardHeader header = ShardHeader(); if (header.Build(file_path_) != SUCCESS) { - MS_LOG(ERROR) << "Build shard schema failed"; + MS_LOG(ERROR) << "Build shard schema failed."; return FAILED; } shard_header_ = header; @@ -46,35 +46,49 @@ MSRStatus ShardIndexGenerator::Build() { return SUCCESS; } -std::vector ShardIndexGenerator::GetField(const string &field_path, json schema) { - std::vector field_name = StringSplit(field_path, kPoint); - std::vector res; - if (schema.empty()) { - res.emplace_back("null"); - return res; +std::pair ShardIndexGenerator::GetValueByField(const string &field, json input) { + if (field.empty()) { + MS_LOG(ERROR) << "The input field is None."; + return {FAILED, ""}; } - for (uint64_t i = 0; i < field_name.size(); i++) { - // Check if field is part of an array of objects - auto &child = schema.at(field_name[i]); - if (child.is_array() && !child.empty() && child[0].is_object()) { - schema = schema[field_name[i]]; - std::string new_field_path; - for (uint64_t j = i + 1; j < field_name.size(); j++) { - if (j > i + 1) new_field_path += '.'; - new_field_path += field_name[j]; - } - // Return multiple field data since multiple objects in array - for (auto &single_schema : schema) { - auto child_res = GetField(new_field_path, single_schema); - res.insert(res.end(), child_res.begin(), child_res.end()); - } - return res; + + if (input.empty()) { + MS_LOG(ERROR) << "The input json is None."; + return {FAILED, ""}; + } + + // parameter input does not contain the field + if (input.find(field) == input.end()) { + MS_LOG(ERROR) << "The field " << field << " is not found in parameter " << input; + return {FAILED, ""}; + } + + // schema does not contain the field + auto schema = shard_header_.get_schemas()[0]->GetSchema()["schema"]; + if (schema.find(field) == schema.end()) { + MS_LOG(ERROR) << "The field " << field << " is not found in schema " << schema; + return {FAILED, ""}; + } + + // field should be scalar type + if (kScalarFieldTypeSet.find(schema[field]["type"]) == kScalarFieldTypeSet.end()) { + MS_LOG(ERROR) << "The field " << field << " type is " << schema[field]["type"] << ", it is not retrievable"; + return {FAILED, ""}; + } + + if (kNumberFieldTypeSet.find(schema[field]["type"]) != kNumberFieldTypeSet.end()) { + auto schema_field_options = schema[field]; + if (schema_field_options.find("shape") == schema_field_options.end()) { + return {SUCCESS, input[field].dump()}; + } else { + // field with shape option + MS_LOG(ERROR) << "The field " << field << " shape is " << schema[field]["shape"] << " which is not retrievable"; + return {FAILED, ""}; } - schema = schema.at(field_name[i]); } - // Return vector of one field data (not array of objects) - return std::vector{schema.dump()}; + // the field type is string in here + return {SUCCESS, input[field].get()}; } std::string ShardIndexGenerator::TakeFieldType(const string &field_path, json schema) { @@ -304,6 +318,7 @@ MSRStatus ShardIndexGenerator::BindParameterExecuteSQL( const auto &place_holder = std::get<0>(field); const auto &field_type = std::get<1>(field); const auto &field_value = std::get<2>(field); + int index = sqlite3_bind_parameter_index(stmt, common::SafeCStr(place_holder)); if (field_type == "INTEGER") { if (sqlite3_bind_int(stmt, index, std::stoi(field_value)) != SQLITE_OK) { @@ -463,17 +478,24 @@ INDEX_FIELDS ShardIndexGenerator::GenerateIndexFields(const std::vector &s if (field.first >= schema_detail.size()) { return {FAILED, {}}; } - auto field_value = GetField(field.second, schema_detail[field.first]); + auto field_value = GetValueByField(field.second, schema_detail[field.first]); + if (field_value.first != SUCCESS) { + MS_LOG(ERROR) << "Get value from json by field name failed"; + return {FAILED, {}}; + } + auto result = shard_header_.GetSchemaByID(field.first); if (result.second != SUCCESS) { return {FAILED, {}}; } + std::string field_type = ConvertJsonToSQL(TakeFieldType(field.second, result.first->GetSchema()["schema"])); auto ret = GenerateFieldName(field); if (ret.first != SUCCESS) { return {FAILED, {}}; } - fields.emplace_back(ret.second, field_type, field_value[0]); + + fields.emplace_back(ret.second, field_type, field_value.second); } return {SUCCESS, std::move(fields)}; } diff --git a/mindspore/ccsrc/mindrecord/io/shard_reader.cc b/mindspore/ccsrc/mindrecord/io/shard_reader.cc index f91d28544e..12aecea21f 100644 --- a/mindspore/ccsrc/mindrecord/io/shard_reader.cc +++ b/mindspore/ccsrc/mindrecord/io/shard_reader.cc @@ -25,6 +25,15 @@ using mindspore::MsLogLevel::INFO; namespace mindspore { namespace mindrecord { +template +// convert the string to exactly number type (int32_t/int64_t/float/double) +Type StringToNum(const std::string &str) { + std::istringstream iss(str); + Type num; + iss >> num; + return num; +} + ShardReader::ShardReader() { task_id_ = 0; deliver_id_ = 0; @@ -259,16 +268,25 @@ MSRStatus ShardReader::ConvertLabelToJson(const std::vectorget_schemas()[0]->GetSchema()["schema"]; + + // convert the string to base type by schema + if (schema[columns[j]]["type"] == "int32") { + construct_json[columns[j]] = StringToNum(labels[i][j + 3]); + } else if (schema[columns[j]]["type"] == "int64") { + construct_json[columns[j]] = StringToNum(labels[i][j + 3]); + } else if (schema[columns[j]]["type"] == "float32") { + construct_json[columns[j]] = StringToNum(labels[i][j + 3]); + } else if (schema[columns[j]]["type"] == "float64") { + construct_json[columns[j]] = StringToNum(labels[i][j + 3]); + } else { + construct_json[columns[j]] = std::string(labels[i][j + 3]); } } - json_str += "}"; - column_values[shard_id].emplace_back(json::parse(json_str)); + column_values[shard_id].emplace_back(construct_json); } } @@ -402,7 +420,16 @@ std::vector> ShardReader::GetImageOffset(int page_id, int // whether use index search if (!criteria.first.empty()) { - sql += " AND " + criteria.first + "_" + std::to_string(column_schema_id_[criteria.first]) + " = " + criteria.second; + auto schema = shard_header_->get_schemas()[0]->GetSchema(); + + // not number field should add '' in sql + if (kNumberFieldTypeSet.find(schema["schema"][criteria.first]["type"]) != kNumberFieldTypeSet.end()) { + sql += + " AND " + criteria.first + "_" + std::to_string(column_schema_id_[criteria.first]) + " = " + criteria.second; + } else { + sql += " AND " + criteria.first + "_" + std::to_string(column_schema_id_[criteria.first]) + " = '" + + criteria.second + "'"; + } } sql += ";"; std::vector> image_offsets; @@ -603,16 +630,25 @@ std::pair> ShardReader::GetLabels(int page_id, int std::vector ret; for (unsigned int i = 0; i < labels.size(); ++i) ret.emplace_back(json{}); for (unsigned int i = 0; i < labels.size(); ++i) { - string json_str = "{"; + json construct_json; for (unsigned int j = 0; j < columns.size(); ++j) { - // construct string json "f1": value - json_str = json_str + "\"" + columns[j] + "\":" + labels[i][j]; - if (j < columns.size() - 1) { - json_str += ","; + // construct json "f1": value + auto schema = shard_header_->get_schemas()[0]->GetSchema()["schema"]; + + // convert the string to base type by schema + if (schema[columns[j]]["type"] == "int32") { + construct_json[columns[j]] = StringToNum(labels[i][j]); + } else if (schema[columns[j]]["type"] == "int64") { + construct_json[columns[j]] = StringToNum(labels[i][j]); + } else if (schema[columns[j]]["type"] == "float32") { + construct_json[columns[j]] = StringToNum(labels[i][j]); + } else if (schema[columns[j]]["type"] == "float64") { + construct_json[columns[j]] = StringToNum(labels[i][j]); + } else { + construct_json[columns[j]] = std::string(labels[i][j]); } } - json_str += "}"; - ret[i] = json::parse(json_str); + ret[i] = construct_json; } return {SUCCESS, ret}; } diff --git a/mindspore/ccsrc/mindrecord/io/shard_segment.cc b/mindspore/ccsrc/mindrecord/io/shard_segment.cc index 94ef0d8167..e015831d6b 100644 --- a/mindspore/ccsrc/mindrecord/io/shard_segment.cc +++ b/mindspore/ccsrc/mindrecord/io/shard_segment.cc @@ -311,14 +311,23 @@ std::pair, json>>> ShardS MS_LOG(ERROR) << "Get category info"; return {FAILED, std::vector, json>>{}}; } + + // category_name to category_id + int64_t category_id = -1; for (const auto &categories : ret.second) { - if (std::get<1>(categories) == category_name) { - auto result = ReadAllAtPageById(std::get<0>(categories), page_no, n_rows_of_page); - return {SUCCESS, result.second}; + std::string categories_name = std::get<1>(categories); + + if (categories_name == category_name) { + category_id = std::get<0>(categories); + break; } } - return {SUCCESS, std::vector, json>>{}}; + if (category_id == -1) { + return {FAILED, std::vector, json>>{}}; + } + + return ReadAllAtPageById(category_id, page_no, n_rows_of_page); } std::pair, pybind11::object>>> ShardSegment::ReadAtPageByIdPy( diff --git a/mindspore/mindrecord/mindpage.py b/mindspore/mindrecord/mindpage.py index 2d19006af4..4baaa6013b 100644 --- a/mindspore/mindrecord/mindpage.py +++ b/mindspore/mindrecord/mindpage.py @@ -133,15 +133,15 @@ class MindPage: Raises: ParamValueError: If any parameter is invalid. - MRMFetchDataError: If failed to read by category id. + MRMFetchDataError: If failed to fetch data by category. MRMUnsupportedSchemaError: If schema is invalid. """ - if category_id < 0: - raise ParamValueError("Category id should be greater than 0.") - if page < 0: - raise ParamValueError("Page should be greater than 0.") - if num_row < 0: - raise ParamValueError("num_row should be greater than 0.") + if not isinstance(category_id, int) or category_id < 0: + raise ParamValueError("Category id should be int and greater than or equal to 0.") + if not isinstance(page, int) or page < 0: + raise ParamValueError("Page should be int and greater than or equal to 0.") + if not isinstance(num_row, int) or num_row <= 0: + raise ParamValueError("num_row should be int and greater than 0.") return self._segment.read_at_page_by_id(category_id, page, num_row) def read_at_page_by_name(self, category_name, page, num_row): @@ -157,8 +157,10 @@ class MindPage: Returns: str, read at page. """ - if page < 0: - raise ParamValueError("Page should be greater than 0.") - if num_row < 0: - raise ParamValueError("num_row should be greater than 0.") + if not isinstance(category_name, str): + raise ParamValueError("Category name should be str.") + if not isinstance(page, int) or page < 0: + raise ParamValueError("Page should be int and greater than or equal to 0.") + if not isinstance(num_row, int) or num_row <= 0: + raise ParamValueError("num_row should be int and greater than 0.") return self._segment.read_at_page_by_name(category_name, page, num_row) diff --git a/tests/ut/cpp/mindrecord/ut_shard_index_generator_test.cc b/tests/ut/cpp/mindrecord/ut_shard_index_generator_test.cc index a5e343a5b3..0c33d33ffd 100644 --- a/tests/ut/cpp/mindrecord/ut_shard_index_generator_test.cc +++ b/tests/ut/cpp/mindrecord/ut_shard_index_generator_test.cc @@ -53,6 +53,7 @@ class TestShardIndexGenerator : public UT::Common { TestShardIndexGenerator() {} }; +/* TEST_F(TestShardIndexGenerator, GetField) { MS_LOG(INFO) << FormatInfo("Test ShardIndex: get field"); @@ -82,6 +83,8 @@ TEST_F(TestShardIndexGenerator, GetField) { } } } +*/ + TEST_F(TestShardIndexGenerator, TakeFieldType) { MS_LOG(INFO) << FormatInfo("Test ShardSchema: take field Type"); diff --git a/tests/ut/python/mindrecord/test_mindrecord_base.py b/tests/ut/python/mindrecord/test_mindrecord_base.py index 7fdf1f0f94..93e5c609f7 100644 --- a/tests/ut/python/mindrecord/test_mindrecord_base.py +++ b/tests/ut/python/mindrecord/test_mindrecord_base.py @@ -13,6 +13,7 @@ # limitations under the License. # ============================================================================ """test mindrecord base""" +import numpy as np import os import uuid from mindspore.mindrecord import FileWriter, FileReader, MindPage, SUCCESS @@ -25,6 +26,105 @@ CV2_FILE_NAME = "./imagenet_loop.mindrecord" CV3_FILE_NAME = "./imagenet_append.mindrecord" NLP_FILE_NAME = "./aclImdb.mindrecord" +def test_write_read_process(): + mindrecord_file_name = "test.mindrecord" + data = [{"file_name": "001.jpg", "label": 43, "score": 0.8, "mask": np.array([3, 6, 9], dtype=np.int64), + "segments": np.array([[5.0, 1.6], [65.2, 8.3]], dtype=np.float32), + "data": bytes("image bytes abc", encoding='UTF-8')}, + {"file_name": "002.jpg", "label": 91, "score": 5.4, "mask": np.array([1, 4, 7], dtype=np.int64), + "segments": np.array([[5.1, 9.1], [2.0, 65.4]], dtype=np.float32), + "data": bytes("image bytes def", encoding='UTF-8')}, + {"file_name": "003.jpg", "label": 61, "score": 6.4, "mask": np.array([7, 6, 3], dtype=np.int64), + "segments": np.array([[0.0, 5.6], [3.0, 16.3]], dtype=np.float32), + "data": bytes("image bytes ghi", encoding='UTF-8')}, + {"file_name": "004.jpg", "label": 29, "score": 8.1, "mask": np.array([2, 8, 0], dtype=np.int64), + "segments": np.array([[5.9, 7.2], [4.0, 89.0]], dtype=np.float32), + "data": bytes("image bytes jkl", encoding='UTF-8')}, + {"file_name": "005.jpg", "label": 78, "score": 7.7, "mask": np.array([3, 1, 2], dtype=np.int64), + "segments": np.array([[0.6, 8.1], [5.3, 49.3]], dtype=np.float32), + "data": bytes("image bytes mno", encoding='UTF-8')}, + {"file_name": "006.jpg", "label": 37, "score": 9.4, "mask": np.array([7, 6, 7], dtype=np.int64), + "segments": np.array([[4.2, 6.3], [8.9, 81.8]], dtype=np.float32), + "data": bytes("image bytes pqr", encoding='UTF-8')} + ] + writer = FileWriter(mindrecord_file_name) + schema = {"file_name": {"type": "string"}, + "label": {"type": "int32"}, + "score": {"type": "float64"}, + "mask": {"type": "int64", "shape": [-1]}, + "segments": {"type": "float32", "shape": [2, 2]}, + "data": {"type": "bytes"}} + writer.add_schema(schema, "data is so cool") + writer.write_raw_data(data) + writer.commit() + + reader = FileReader(mindrecord_file_name) + count = 0 + for index, x in enumerate(reader.get_next()): + assert len(x) == 6 + for field in x: + if isinstance(x[field], np.ndarray): + assert (x[field] == data[count][field]).all() + else: + assert x[field] == data[count][field] + count = count + 1 + logger.info("#item{}: {}".format(index, x)) + assert count == 6 + reader.close() + + os.remove("{}".format(mindrecord_file_name)) + os.remove("{}.db".format(mindrecord_file_name)) + +def test_write_read_process_with_define_index_field(): + mindrecord_file_name = "test.mindrecord" + data = [{"file_name": "001.jpg", "label": 43, "score": 0.8, "mask": np.array([3, 6, 9], dtype=np.int64), + "segments": np.array([[5.0, 1.6], [65.2, 8.3]], dtype=np.float32), + "data": bytes("image bytes abc", encoding='UTF-8')}, + {"file_name": "002.jpg", "label": 91, "score": 5.4, "mask": np.array([1, 4, 7], dtype=np.int64), + "segments": np.array([[5.1, 9.1], [2.0, 65.4]], dtype=np.float32), + "data": bytes("image bytes def", encoding='UTF-8')}, + {"file_name": "003.jpg", "label": 61, "score": 6.4, "mask": np.array([7, 6, 3], dtype=np.int64), + "segments": np.array([[0.0, 5.6], [3.0, 16.3]], dtype=np.float32), + "data": bytes("image bytes ghi", encoding='UTF-8')}, + {"file_name": "004.jpg", "label": 29, "score": 8.1, "mask": np.array([2, 8, 0], dtype=np.int64), + "segments": np.array([[5.9, 7.2], [4.0, 89.0]], dtype=np.float32), + "data": bytes("image bytes jkl", encoding='UTF-8')}, + {"file_name": "005.jpg", "label": 78, "score": 7.7, "mask": np.array([3, 1, 2], dtype=np.int64), + "segments": np.array([[0.6, 8.1], [5.3, 49.3]], dtype=np.float32), + "data": bytes("image bytes mno", encoding='UTF-8')}, + {"file_name": "006.jpg", "label": 37, "score": 9.4, "mask": np.array([7, 6, 7], dtype=np.int64), + "segments": np.array([[4.2, 6.3], [8.9, 81.8]], dtype=np.float32), + "data": bytes("image bytes pqr", encoding='UTF-8')} + ] + writer = FileWriter(mindrecord_file_name) + schema = {"file_name": {"type": "string"}, + "label": {"type": "int32"}, + "score": {"type": "float64"}, + "mask": {"type": "int64", "shape": [-1]}, + "segments": {"type": "float32", "shape": [2, 2]}, + "data": {"type": "bytes"}} + writer.add_schema(schema, "data is so cool") + writer.add_index(["label"]) + writer.write_raw_data(data) + writer.commit() + + reader = FileReader(mindrecord_file_name) + count = 0 + for index, x in enumerate(reader.get_next()): + assert len(x) == 6 + for field in x: + if isinstance(x[field], np.ndarray): + assert (x[field] == data[count][field]).all() + else: + assert x[field] == data[count][field] + count = count + 1 + logger.info("#item{}: {}".format(index, x)) + assert count == 6 + reader.close() + + os.remove("{}".format(mindrecord_file_name)) + os.remove("{}.db".format(mindrecord_file_name)) + def test_cv_file_writer_tutorial(): """tutorial for cv dataset writer.""" writer = FileWriter(CV_FILE_NAME, FILES_NUM) @@ -137,6 +237,51 @@ def test_cv_page_reader_tutorial(): assert len(row1[0]) == 3 assert row1[0]['label'] == 822 +def test_cv_page_reader_tutorial_by_file_name(): + """tutorial for cv page reader.""" + reader = MindPage(CV_FILE_NAME + "0") + fields = reader.get_category_fields() + assert fields == ['file_name', 'label'],\ + 'failed on getting candidate category fields.' + + ret = reader.set_category_field("file_name") + assert ret == SUCCESS, 'failed on setting category field.' + + info = reader.read_category_info() + logger.info("category info: {}".format(info)) + + row = reader.read_at_page_by_id(0, 0, 1) + assert len(row) == 1 + assert len(row[0]) == 3 + assert row[0]['label'] == 490 + + row1 = reader.read_at_page_by_name("image_00007.jpg", 0, 1) + assert len(row1) == 1 + assert len(row1[0]) == 3 + assert row1[0]['label'] == 13 + +def test_cv_page_reader_tutorial_new_api(): + """tutorial for cv page reader.""" + reader = MindPage(CV_FILE_NAME + "0") + fields = reader.candidate_fields + assert fields == ['file_name', 'label'],\ + 'failed on getting candidate category fields.' + + reader.category_field = "file_name" + + info = reader.read_category_info() + logger.info("category info: {}".format(info)) + + row = reader.read_at_page_by_id(0, 0, 1) + assert len(row) == 1 + assert len(row[0]) == 3 + assert row[0]['label'] == 490 + + row1 = reader.read_at_page_by_name("image_00007.jpg", 0, 1) + assert len(row1) == 1 + assert len(row1[0]) == 3 + assert row1[0]['label'] == 13 + paths = ["{}{}".format(CV_FILE_NAME, str(x).rjust(1, '0')) for x in range(FILES_NUM)] for x in paths: diff --git a/tests/ut/python/mindrecord/test_mindrecord_exception.py b/tests/ut/python/mindrecord/test_mindrecord_exception.py index 1f7a3f859d..75a32eb347 100644 --- a/tests/ut/python/mindrecord/test_mindrecord_exception.py +++ b/tests/ut/python/mindrecord/test_mindrecord_exception.py @@ -15,8 +15,9 @@ """test mindrecord exception""" import os import pytest -from mindspore.mindrecord import FileWriter, FileReader, MindPage -from mindspore.mindrecord import MRMOpenError, MRMGenerateIndexError, ParamValueError, MRMGetMetaError +from mindspore.mindrecord import FileWriter, FileReader, MindPage, SUCCESS +from mindspore.mindrecord import MRMOpenError, MRMGenerateIndexError, ParamValueError, MRMGetMetaError, \ + MRMFetchDataError from mindspore import log as logger from utils import get_data @@ -286,3 +287,67 @@ def test_add_index_without_add_schema(): fw = FileWriter(CV_FILE_NAME) fw.add_index(["label"]) assert 'Failed to get meta info' in str(err.value) + +def test_mindpage_pageno_pagesize_not_int(): + """test page reader when some partition does not exist.""" + create_cv_mindrecord(4) + reader = MindPage(CV_FILE_NAME + "0") + fields = reader.get_category_fields() + assert fields == ['file_name', 'label'],\ + 'failed on getting candidate category fields.' + + ret = reader.set_category_field("label") + assert ret == SUCCESS, 'failed on setting category field.' + + info = reader.read_category_info() + logger.info("category info: {}".format(info)) + + with pytest.raises(ParamValueError) as err: + reader.read_at_page_by_id(0, "0", 1) + + with pytest.raises(ParamValueError) as err: + reader.read_at_page_by_id(0, 0, "b") + + with pytest.raises(ParamValueError) as err: + reader.read_at_page_by_name("822", "e", 1) + + with pytest.raises(ParamValueError) as err: + reader.read_at_page_by_name("822", 0, "qwer") + + with pytest.raises(MRMFetchDataError, match="Failed to fetch data by category."): + reader.read_at_page_by_id(99999, 0, 1) + + paths = ["{}{}".format(CV_FILE_NAME, str(x).rjust(1, '0')) + for x in range(FILES_NUM)] + for x in paths: + os.remove("{}".format(x)) + os.remove("{}.db".format(x)) + +def test_mindpage_filename_not_exist(): + """test page reader when some partition does not exist.""" + create_cv_mindrecord(4) + reader = MindPage(CV_FILE_NAME + "0") + fields = reader.get_category_fields() + assert fields == ['file_name', 'label'],\ + 'failed on getting candidate category fields.' + + ret = reader.set_category_field("file_name") + assert ret == SUCCESS, 'failed on setting category field.' + + info = reader.read_category_info() + logger.info("category info: {}".format(info)) + + with pytest.raises(MRMFetchDataError) as err: + reader.read_at_page_by_id(9999, 0, 1) + + with pytest.raises(MRMFetchDataError) as err: + reader.read_at_page_by_name("abc.jpg", 0, 1) + + with pytest.raises(ParamValueError) as err: + reader.read_at_page_by_name(1, 0, 1) + + paths = ["{}{}".format(CV_FILE_NAME, str(x).rjust(1, '0')) + for x in range(FILES_NUM)] + for x in paths: + os.remove("{}".format(x)) + os.remove("{}.db".format(x)) From bd57d123e54717328276db0f318082f64c833217 Mon Sep 17 00:00:00 2001 From: c00425699 Date: Sat, 11 Apr 2020 11:35:54 +0800 Subject: [PATCH 164/367] add_bool_type_check_in_comm_op --- mindspore/ops/operations/comm_ops.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/mindspore/ops/operations/comm_ops.py b/mindspore/ops/operations/comm_ops.py index 1644c5800a..441e441c2c 100644 --- a/mindspore/ops/operations/comm_ops.py +++ b/mindspore/ops/operations/comm_ops.py @@ -162,6 +162,8 @@ class AllGather(PrimitiveWithInfer): return x_shape def infer_dtype(self, x_dtype): + if x_dtype == mstype.bool_: + raise TypeError("AllGather does not support 'Bool' as the dtype of input!") return x_dtype def __call__(self, tensor): @@ -219,6 +221,8 @@ class ReduceScatter(PrimitiveWithInfer): return x_shape def infer_dtype(self, x_dtype): + if x_dtype == mstype.bool_: + raise TypeError("ReduceScatter does not support 'Bool' as the dtype of input!") return x_dtype def __call__(self, tensor): @@ -276,6 +280,8 @@ class Broadcast(PrimitiveWithInfer): return x_shape def infer_dtype(self, x_dtype): + if x_dtype == mstype.bool_: + raise TypeError("Broadcast does not support 'Bool' as the dtype of input!") return x_dtype @@ -318,6 +324,8 @@ class _AlltoAll(PrimitiveWithInfer): return x_shape def infer_dtype(self, x_dtype): + if x_dtype == mstype.bool_: + raise TypeError("AlltoAll does not support 'Bool' as the dtype of input!") return x_dtype def __call__(self, tensor): From b5ca2a3e305af67fa551f426f8d41be20087e138 Mon Sep 17 00:00:00 2001 From: dengwentao Date: Thu, 2 Apr 2020 10:14:37 +0800 Subject: [PATCH 165/367] :modify protobuf cmake --- cmake/external_libs/protobuf.cmake | 29 ++++++++++--------- cmake/utils.cmake | 8 +++-- graphengine | 2 +- mindspore/ccsrc/CMakeLists.txt | 7 ++--- mindspore/ccsrc/dataset/CMakeLists.txt | 2 +- .../engine/datasetops/source/CMakeLists.txt | 2 +- .../ccsrc/dataset/engine/tdt/CMakeLists.txt | 2 +- mindspore/ccsrc/mindrecord/CMakeLists.txt | 2 +- 8 files changed, 28 insertions(+), 26 deletions(-) diff --git a/cmake/external_libs/protobuf.cmake b/cmake/external_libs/protobuf.cmake index c354bcb65d..9e64785a7a 100644 --- a/cmake/external_libs/protobuf.cmake +++ b/cmake/external_libs/protobuf.cmake @@ -1,22 +1,23 @@ -mindspore_add_pkg(protobuf - VER 3.8.0 - HEAD_ONLY ./ - URL https://github.com/protocolbuffers/protobuf/archive/v3.8.0.tar.gz - MD5 3d9e32700639618a4d2d342c99d4507a) - -set(protobuf_BUILD_TESTS OFF CACHE BOOL "Disable protobuf test") -set(protobuf_BUILD_SHARED_LIBS OFF CACHE BOOL "Gen shared library") +set(protobuf_USE_STATIC_LIBS ON) +set(protobuf_CXXFLAGS "-fstack-protector-all -Wno-maybe-uninitialized -Wno-unused-parameter -fPIC -fvisibility=hidden -D_FORTIFY_SOURCE=2 -O2") +set(protobuf_LDFLAGS "-Wl,-z,relro,-z,now,-z,noexecstack") set(_ms_tmp_CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS}) - +set(CMAKE_CXX_FLAGS ${_ms_tmp_CMAKE_CXX_FLAGS}) string(REPLACE " -Wall" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") string(REPLACE " -Werror" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") -add_subdirectory(${protobuf_DIRPATH}/cmake ${protobuf_DIRPATH}/build) -set(CMAKE_CXX_FLAGS ${_ms_tmp_CMAKE_CXX_FLAGS}) +mindspore_add_pkg(protobuf + VER 3.8.0 + LIBS protobuf + EXE protoc + URL https://github.com/protocolbuffers/protobuf/archive/v3.8.0.tar.gz + MD5 3d9e32700639618a4d2d342c99d4507a + CMAKE_PATH cmake/ + CMAKE_OPTION -Dprotobuf_BUILD_TESTS=OFF -Dprotobuf_BUILD_SHARED_LIBS=OFF) -set(PROTOBUF_LIBRARY protobuf::libprotobuf) -include_directories(${protobuf_DIRPATH}/src) -add_library(mindspore::protobuf ALIAS libprotobuf) +include_directories(${protobuf_INC}) +add_library(mindspore::protobuf ALIAS protobuf::protobuf) +set(CMAKE_CXX_FLAGS ${_ms_tmp_CMAKE_CXX_FLAGS}) function(ms_protobuf_generate c_var h_var) if(NOT ARGN) diff --git a/cmake/utils.cmake b/cmake/utils.cmake index 99c064fdd4..c9b7d944f0 100644 --- a/cmake/utils.cmake +++ b/cmake/utils.cmake @@ -192,10 +192,12 @@ set(MS_FIND_NO_DEFAULT_PATH ${MS_FIND_NO_DEFAULT_PATH} PARENT_SCOPE) function(mindspore_add_pkg pkg_name ) set(options ) - set(oneValueArgs URL MD5 GIT_REPOSITORY GIT_TAG VER EXE DIR HEAD_ONLY) + set(oneValueArgs URL MD5 GIT_REPOSITORY GIT_TAG VER EXE DIR HEAD_ONLY CMAKE_PATH) set(multiValueArgs CMAKE_OPTION LIBS PRE_CONFIGURE_COMMAND CONFIGURE_COMMAND BUILD_OPTION INSTALL_INCS INSTALL_LIBS PATCHES) cmake_parse_arguments(PKG "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN} ) - + if (NOT PKG_CMAKE_PATH) + set(PKG_CMAKE_PATH .) + endif () set(__FIND_PKG_NAME ${pkg_name}) string(TOLOWER ${pkg_name} pkg_name) message("pkg name:${__FIND_PKG_NAME},${pkg_name}") @@ -304,7 +306,7 @@ function(mindspore_add_pkg pkg_name ) __exec_cmd(COMMAND ${CMAKE_COMMAND} ${PKG_CMAKE_OPTION} -G ${CMAKE_GENERATOR} ${${pkg_name}_CMAKE_CFLAGS} ${${pkg_name}_CMAKE_CXXFLAGS} ${${pkg_name}_CMAKE_LDFLAGS} - -DCMAKE_INSTALL_PREFIX=${${pkg_name}_BASE_DIR} .. + -DCMAKE_INSTALL_PREFIX=${${pkg_name}_BASE_DIR} ${${pkg_name}_SOURCE_DIR}/${PKG_CMAKE_PATH} WORKING_DIRECTORY ${${pkg_name}_SOURCE_DIR}/_build) __exec_cmd(COMMAND ${CMAKE_COMMAND} --build . --target install -- -j${THNUM} diff --git a/graphengine b/graphengine index 5bd0dc1ed5..40e9f6f834 160000 --- a/graphengine +++ b/graphengine @@ -1 +1 @@ -Subproject commit 5bd0dc1ed59a9ec4ea6a602bf1385c59d845f922 +Subproject commit 40e9f6f834469d2d228f9782a0a41f95be94d429 diff --git a/mindspore/ccsrc/CMakeLists.txt b/mindspore/ccsrc/CMakeLists.txt index c49c962bdd..f3100f62c9 100644 --- a/mindspore/ccsrc/CMakeLists.txt +++ b/mindspore/ccsrc/CMakeLists.txt @@ -157,10 +157,9 @@ file(GLOB_RECURSE MEM_REUSE_SRC_LIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} if(NOT ENABLE_DUMP_E2E) list(REMOVE_ITEM MINDSPORE_SRC_LIST "debug/e2e_dump.cc") endif() - file(COPY "${ms_onnx_INC}/onnx/onnx.proto" DESTINATION ${CMAKE_CURRENT_SOURCE_DIR}) file(GLOB_RECURSE ONNX_PROTO RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} "${CMAKE_CURRENT_SOURCE_DIR}/onnx.proto") -message(“onnx proto path is : ${ONNX_PROTO}”) +message("onnx proto path is : ${ONNX_PROTO}") ms_protobuf_generate(ONNX_PROTO_SRCS ONNX_PROTO_HDRS ${ONNX_PROTO}) list(APPEND MINDSPORE_PROTO_LIST ${ONNX_PROTO_SRCS}) @@ -319,7 +318,7 @@ target_link_libraries(mindspore dl) target_link_libraries(mindspore mindspore::flatbuffers) # link protobuf if (ENABLE_D) - target_link_libraries(mindspore protobuf::libprotobuf) + target_link_libraries(mindspore mindspore::protobuf) endif() # set c_expression building @@ -464,7 +463,7 @@ if(ENABLE_GPU) endif() if(ENABLE_DUMP_PROTO) - target_link_libraries(_c_expression PRIVATE protobuf::libprotobuf) + target_link_libraries(_c_expression PRIVATE mindspore::protobuf) endif() if(ENABLE_GPU) diff --git a/mindspore/ccsrc/dataset/CMakeLists.txt b/mindspore/ccsrc/dataset/CMakeLists.txt index 477d37051e..5bf210a8ba 100644 --- a/mindspore/ccsrc/dataset/CMakeLists.txt +++ b/mindspore/ccsrc/dataset/CMakeLists.txt @@ -76,7 +76,7 @@ set_target_properties(_c_dataengine PROPERTIES ################# Link with external libraries ######################## target_link_libraries(_c_dataengine PRIVATE mindspore mindspore_gvar) -target_link_libraries(_c_dataengine PRIVATE mindspore::pybind11_module -ldl protobuf::libprotobuf ${SECUREC_LIBRARY}) +target_link_libraries(_c_dataengine PRIVATE mindspore::pybind11_module -ldl mindspore::protobuf ${SECUREC_LIBRARY}) target_link_libraries(_c_dataengine PUBLIC mindspore::jpeg_turbo mindspore::opencv_core mindspore::opencv_imgcodecs mindspore::opencv_imgproc) if (ENABLE_GPUQUE) diff --git a/mindspore/ccsrc/dataset/engine/datasetops/source/CMakeLists.txt b/mindspore/ccsrc/dataset/engine/datasetops/source/CMakeLists.txt index 5a02a2ec31..a7c0dfd725 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/source/CMakeLists.txt +++ b/mindspore/ccsrc/dataset/engine/datasetops/source/CMakeLists.txt @@ -20,4 +20,4 @@ add_library(engine-datasetops-source OBJECT celeba_op.cc ) -add_dependencies(engine-datasetops-source protobuf::libprotobuf) +add_dependencies(engine-datasetops-source mindspore::protobuf) diff --git a/mindspore/ccsrc/dataset/engine/tdt/CMakeLists.txt b/mindspore/ccsrc/dataset/engine/tdt/CMakeLists.txt index 4a2adff310..9c6ec4b388 100644 --- a/mindspore/ccsrc/dataset/engine/tdt/CMakeLists.txt +++ b/mindspore/ccsrc/dataset/engine/tdt/CMakeLists.txt @@ -4,4 +4,4 @@ add_library(engine-tdt OBJECT ${FEATURE_SRCS} ) -add_dependencies(engine-tdt protobuf::libprotobuf) +add_dependencies(engine-tdt mindspore::protobuf) diff --git a/mindspore/ccsrc/mindrecord/CMakeLists.txt b/mindspore/ccsrc/mindrecord/CMakeLists.txt index eb1c1fb591..4b8448287d 100644 --- a/mindspore/ccsrc/mindrecord/CMakeLists.txt +++ b/mindspore/ccsrc/mindrecord/CMakeLists.txt @@ -26,7 +26,7 @@ set_target_properties(_c_mindrecord PROPERTIES ) # add link library -target_link_libraries(_c_mindrecord PRIVATE mindspore::sqlite ${PYTHON_LIB} ${SECUREC_LIBRARY} mindspore mindspore_gvar protobuf::libprotobuf) +target_link_libraries(_c_mindrecord PRIVATE mindspore::sqlite ${PYTHON_LIB} ${SECUREC_LIBRARY} mindspore mindspore_gvar mindspore::protobuf) if (USE_GLOG) target_link_libraries(_c_mindrecord PRIVATE mindspore::glog) From 4fb76e6d7ea08a86d20e59d5d15472f81a7661af Mon Sep 17 00:00:00 2001 From: jjfeing Date: Sat, 11 Apr 2020 11:48:32 +0800 Subject: [PATCH 166/367] modify Float64 to float64, del double --- mindspore/ccsrc/kernel/tbe/tbe_convert_utils.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mindspore/ccsrc/kernel/tbe/tbe_convert_utils.cc b/mindspore/ccsrc/kernel/tbe/tbe_convert_utils.cc index 025ff935e2..1159bd888d 100644 --- a/mindspore/ccsrc/kernel/tbe/tbe_convert_utils.cc +++ b/mindspore/ccsrc/kernel/tbe/tbe_convert_utils.cc @@ -51,7 +51,7 @@ const std::map type_id_str_maps = { const std::map type_str_maps = { {"Float32", "float32"}, {"Float16", "float16"}, {"Int8", "int8"}, {"Int16", "int16"}, {"UInt16", "uint16"}, {"UInt8", "uint8"}, {"Int32", "int32"}, {"UInt32", "uint32"}, - {"Int64", "int64"}, {"UInt64", "uint64"}, {"Bool_", "int8"}, {"Float64", "double"}, + {"Int64", "int64"}, {"UInt64", "uint64"}, {"Bool_", "int8"}, {"Float64", "float64"}, }; const std::unordered_map type_nbyte_maps = { From b34c0e7a178f6c7266c9853c2bf9d73e8e86d891 Mon Sep 17 00:00:00 2001 From: yangzhenzhang <285824651@qq.com> Date: Wed, 8 Apr 2020 14:32:01 +0800 Subject: [PATCH 167/367] add parallel op for dropoutdomask --- .../auto_parallel/operator_costmodel.cc | 10 + .../auto_parallel/operator_costmodel.h | 32 +++ mindspore/ccsrc/parallel/dynamic_creator.h | 1 - mindspore/ccsrc/parallel/node_check.cc | 1 + .../parallel/ops_info/dropout_do_mask_info.cc | 223 +++++++++++++----- .../parallel/ops_info/dropout_do_mask_info.h | 5 +- .../ccsrc/parallel/ops_info/generator_info.cc | 188 --------------- .../ccsrc/parallel/ops_info/generator_info.h | 70 ------ .../parallel/ops_info/ops_info_head_files.h | 1 - mindspore/ccsrc/parallel/ops_info/ops_utils.h | 4 + .../ccsrc/parallel/step_auto_parallel.cc | 1 - mindspore/ccsrc/parallel/step_parallel.cc | 46 +++- .../ops_info/dropout_do_mask_info_test.cc | 166 ------------- .../parallel/ops_info/generator_info_test.cc | 137 ----------- .../python/parallel/test_dropout_do_mask.py | 94 ++++++++ 15 files changed, 355 insertions(+), 624 deletions(-) delete mode 100644 mindspore/ccsrc/parallel/ops_info/generator_info.cc delete mode 100644 mindspore/ccsrc/parallel/ops_info/generator_info.h delete mode 100644 tests/ut/cpp/parallel/ops_info/dropout_do_mask_info_test.cc delete mode 100644 tests/ut/cpp/parallel/ops_info/generator_info_test.cc create mode 100644 tests/ut/python/parallel/test_dropout_do_mask.py diff --git a/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.cc b/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.cc index 93d7dc56c5..1f420e8797 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.cc +++ b/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.cc @@ -613,5 +613,15 @@ double ReduceMeanCost::GetForwardComputationCost(const std::vector& return result; } + +double DropOutCost::GetForwardComputationCost(const std::vector& inputs, const std::vector&, + const int32_t&) const { + if (inputs.empty()) { + return 0.0; + } + TensorInfo input0 = inputs[0]; + Shape input0_slice_shape = input0.slice_shape(); + return ListProduct(input0_slice_shape) * static_cast(inputs_type_lengths_[0]) * DROPOUT_COST_RATE; +} } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.h b/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.h index 73f3ff139f..b642ada0d9 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.h +++ b/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.h @@ -26,6 +26,7 @@ namespace mindspore { namespace parallel { #define MAXIMUM_INPUT_NUMBER 100 #define DEFAULT_DATA_TYPE_LENGTH 4 +#define DROPOUT_COST_RATE 1.125 // the DropoutGenMask need 12.5% memory class OperatorCost; using OperatorCostPtr = std::shared_ptr; @@ -493,6 +494,37 @@ class GetNextCost : public OperatorCost { } }; using GetNextCostPtr = std::shared_ptr; + +class DropOutCost : public OperatorCost { + public: + DropOutCost() = default; + ~DropOutCost() override = default; + + double GetCommCost(const std::vector& inputs, const std::vector& outputs, + const int32_t& stage_id) const override { + return GetForwardCommCost(inputs, outputs, stage_id) + GetBackwardCommCost(inputs, outputs, stage_id); + } + double GetForwardCommCost(const std::vector&, const std::vector&, + const int32_t&) const override { + return 0.0; + } + double GetBackwardCommCost(const std::vector&, const std::vector&, + const int32_t&) const override { + return 0.0; + } + double GetComputationCost(const std::vector& inputs, const std::vector& outputs, + const int32_t& stage_id) const override { + return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id); + } + double GetForwardComputationCost(const std::vector&, const std::vector&, + const int32_t&) const override; + double GetBackwardComputationCost(const std::vector&, const std::vector&, + const int32_t&) const override { + return 0.0; + } +}; + +using DropOutCostPtr = std::shared_ptr; } // namespace parallel } // namespace mindspore #endif // PARALLEL_AUTO_PARALLEL_OPERATOR_COSTMODEL_H_ diff --git a/mindspore/ccsrc/parallel/dynamic_creator.h b/mindspore/ccsrc/parallel/dynamic_creator.h index 1270116f50..62cc4c5da3 100644 --- a/mindspore/ccsrc/parallel/dynamic_creator.h +++ b/mindspore/ccsrc/parallel/dynamic_creator.h @@ -111,7 +111,6 @@ REGISTER(ReduceMinInfo); REGISTER(TransposeInfo); REGISTER(PReLUInfo); REGISTER(DropoutDoMaskInfo); -REGISTER(DropoutGenMaskInfo) REGISTER(ReshapeInfo); REGISTER(FloorDivInfo); REGISTER(MaximumInfo); diff --git a/mindspore/ccsrc/parallel/node_check.cc b/mindspore/ccsrc/parallel/node_check.cc index fc6115c3c5..ffd40e421d 100644 --- a/mindspore/ccsrc/parallel/node_check.cc +++ b/mindspore/ccsrc/parallel/node_check.cc @@ -71,6 +71,7 @@ const std::set BLACK_LIST = {TUPLE_GETITEM, BROADCASTGRADIENTARGS, INVERTPERMUTATION, CONTROLDEPEND, + DROPOUT_GEN_MASK, EMBED, CREATINSTANCE, ZEROSLIKETENSOR, diff --git a/mindspore/ccsrc/parallel/ops_info/dropout_do_mask_info.cc b/mindspore/ccsrc/parallel/ops_info/dropout_do_mask_info.cc index c6cd94b7be..c755cc785d 100644 --- a/mindspore/ccsrc/parallel/ops_info/dropout_do_mask_info.cc +++ b/mindspore/ccsrc/parallel/ops_info/dropout_do_mask_info.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -22,6 +22,7 @@ #include #include "ir/value.h" +#include "pipeline/resource.h" #include "parallel/auto_parallel/costmodel.h" #include "parallel/device_matrix.h" #include "parallel/strategy.h" @@ -29,13 +30,32 @@ namespace mindspore { namespace parallel { +static int32_t SEED_NUM = 1; + Status DropoutDoMaskInfo::CheckStrategy(const StrategyPtr& strategy) { - Shapes input_shape = {inputs_shape_.at(0)}; + if (strategy == nullptr) { + MS_LOG(ERROR) << name_ << ": The strategy is null"; + return FAILED; + } + + std::vector stra = strategy->GetInputDim(); + if (stra.size() != 1) { + MS_LOG(ERROR) << name_ << ": Invalid strategy size " << stra.size() << ", it must be 1"; + return FAILED; + } + + if (inputs_shape_.empty()) { + MS_LOG(ERROR) << name_ << ": The inputs shape is empty"; + return FAILED; + } + + // only check the input[0] + Shapes input_shape = {inputs_shape_[0]}; if (CheckStrategyValue(strategy, input_shape, is_auto_parallel_) != SUCCESS) { if (is_auto_parallel_) { - MS_LOG(DEBUG) << name_ << " : Invalid strategy."; + MS_LOG(DEBUG) << name_ << ": Invalid strategy"; } else { - MS_LOG(ERROR) << name_ << " : Invalid strategy."; + MS_LOG(ERROR) << name_ << ": Invalid strategy"; } return FAILED; } @@ -43,68 +63,69 @@ Status DropoutDoMaskInfo::CheckStrategy(const StrategyPtr& strategy) { } Status DropoutDoMaskInfo::InferDevMatrixShape() { - std::vector stra = strategy_->GetInputDim(); - Dimensions input_strategy = stra.at(0); + if (strategy_ == nullptr) { + MS_LOG(ERROR) << name_ << ": The strategy is null"; + return FAILED; + } - dev_matrix_shape_ = input_strategy; + std::vector strategy = strategy_->GetInputDim(); + if (strategy.empty()) { + MS_LOG(ERROR) << name_ << ": The strategy is empty"; + return FAILED; + } + dev_matrix_shape_ = strategy[0]; return SUCCESS; } Status DropoutDoMaskInfo::InferTensorMap() { + if (inputs_shape_.empty()) { + MS_LOG(ERROR) << name_ << ": The inputs shape is empty"; + return FAILED; + } + std::vector tensor_map_index; - size_t size = inputs_shape_.at(0).size(); - // such as 4: tensor_map_index [3,2,1,0] + size_t size = inputs_shape_[0].size(); + // if the dimension of input is 4, and tensor_map_index is [3, 2, 1, 0] for (size_t i = 0; i < size; ++i) { - tensor_map_index.push_back((int32_t)(LAST_INDEX(size) - i)); + tensor_map_index.push_back(SizeToInt(size - i - 1)); } - TensorMap input_b_tensor_map = {MAP_NONE}; - inputs_tensor_map_.push_back(tensor_map_index); - inputs_tensor_map_.push_back(input_b_tensor_map); - outputs_tensor_map_.push_back(tensor_map_index); + // the input[1] do not need tensor map + inputs_tensor_map_.push_back(tensor_map_index); // input_0 + outputs_tensor_map_.push_back(tensor_map_index); // output return SUCCESS; } Status DropoutDoMaskInfo::InferTensorInfo() { - // infer tensor shape - Shape input_a_shape = inputs_shape_.at(0); - Shape input_b_shape = inputs_shape_.at(1); - Shape output_shape = outputs_shape_.at(0); - - // infer slice shape - Shapes inputs_slice_shape, outputs_slice_shape; - Strategys inputs_strategy = strategy_->GetInputDim(); - Dimensions input_b_strategy = {1}, input_x_strategy = {}; - inputs_strategy.emplace_back(input_b_strategy); - inputs_strategy.emplace_back(input_x_strategy); - Strategys outputs_strategy = {inputs_strategy.at(0)}; - if (InferSliceShape(inputs_strategy, outputs_strategy, &inputs_slice_shape, &outputs_slice_shape) != SUCCESS) { + if (inputs_shape_.size() != 3) { + MS_LOG(ERROR) << name_ << ": Invalid inputs shape size " << inputs_shape_.size(); return FAILED; } - Shape input_a_slice_shape = inputs_slice_shape.at(0); - Shape input_b_slice_shape = inputs_slice_shape.at(1); - Shape output_slice_shape = outputs_slice_shape.at(0); - TensorLayout input_a_tensor_layout, input_b_tensor_layout; - TensorLayout output_tensor_layout; - if (input_a_tensor_layout.InitFromVector(dev_matrix_shape_, inputs_tensor_map_[0], input_a_shape) != SUCCESS) { + if (strategy_ == nullptr) { + MS_LOG(ERROR) << name_ << ": The strategy is null"; return FAILED; } - if (input_b_tensor_layout.InitFromVector(dev_matrix_shape_, inputs_tensor_map_[1], input_b_shape) != SUCCESS) { + + Shape input_0_shape = inputs_shape_[0]; + + if (inputs_tensor_map_.empty()) { + MS_LOG(ERROR) << name_ << ": The inputs tensor map is empty"; return FAILED; } - if (output_tensor_layout.InitFromVector(dev_matrix_shape_, outputs_tensor_map_[0], output_shape) != SUCCESS) { + + TensorLayout input_0_tensor_layout; + if (input_0_tensor_layout.InitFromVector(dev_matrix_shape_, inputs_tensor_map_[0], input_0_shape) != SUCCESS) { + MS_LOG(ERROR) << name_ << ": Init tensor layout failed"; return FAILED; } - TensorInfo input_a_tensor_info(input_a_tensor_layout, input_a_shape, input_a_slice_shape); - TensorInfo input_b_tensor_info(input_b_tensor_layout, input_b_shape, input_b_slice_shape); - TensorInfo output_tensor_info(output_tensor_layout, output_shape, output_slice_shape); - inputs_tensor_info_.push_back(input_a_tensor_info); - inputs_tensor_info_.push_back(input_b_tensor_info); - outputs_tensor_info_.push_back(output_tensor_info); + TensorInfo input_0_tensor_info(input_0_tensor_layout); + // input_1 do not need tensor info + inputs_tensor_info_.push_back(input_0_tensor_info); // input_0 + outputs_tensor_info_.push_back(input_0_tensor_info); // output return SUCCESS; } @@ -122,20 +143,29 @@ Status DropoutDoMaskInfo::SetCostUnderStrategy(const StrategyPtr& strategy) { } Status DropoutDoMaskInfo::GenerateStrategies(int32_t stage_id) { - CheckGlobalDeviceManager(); + if (inputs_shape_.empty()) { + MS_LOG(ERROR) << name_ << ": The inputs shape is empty"; + return FAILED; + } + is_auto_parallel_ = true; - size_t dev_num = g_device_manager->GetDeviceListByStageId(stage_id).size(); - Dimensions strategy(inputs_shape_[0].size() - 1, 1); - (void)strategy.insert(strategy.begin(), SizeToInt(dev_num)); - std::vector stra = {strategy}; - StrategyPtr sp = std::make_shared(stage_id, stra); - if (SetCostUnderStrategy(sp) == SUCCESS) { - MS_LOG(INFO) << name_ << " : Successfully generated batch-parallel-strategy."; - PrintStrategy(sp); - } else { - MS_LOG(ERROR) << name_ << " : Generating batch-parallel-strategy failed."; + Shape input0_split(inputs_shape_[0].size(), 1); + Shapes splittable_inputs = {input0_split}; + Shapes used_inputs_shape = {inputs_shape_[0]}; + + std::vector sp_vector; + if (GenerateStrategiesForIndependentInputs(stage_id, used_inputs_shape, splittable_inputs, &sp_vector) != SUCCESS) { + MS_LOG(ERROR) << name_ << ": Generate strategies failed"; return FAILED; } + size_t success = 0; + for (auto& sp : sp_vector) { + if (SetCostUnderStrategy(sp) == SUCCESS) { + success++; + MS_LOG(INFO) << name_ << ": Successfully generated " << success << " strategy"; + PrintStrategy(sp); + } + } return SUCCESS; } @@ -150,26 +180,105 @@ std::shared_ptr>> DropoutDoMaskInfo::GenerateBa Status DropoutDoMaskInfo::Init(const StrategyPtr& strategy) { if (InitWithAutoRepeatCalc(strategy) != SUCCESS) { - MS_LOG(ERROR) << name_ << " : Init failed."; + MS_LOG(ERROR) << name_ << ": Init failed."; return FAILED; } - MS_LOG(INFO) << name_ << " : Init success."; + MS_LOG(INFO) << name_ << ": Init success."; return SUCCESS; } Status DropoutDoMaskInfo::InitForCostModel(const StrategyPtr& strategy) { if (InitForCostModelWithAutoRepeatCalc(strategy) != SUCCESS) { if (is_auto_parallel_) { - MS_LOG(DEBUG) << name_ << " : Init for cost model failed."; + MS_LOG(DEBUG) << name_ << ": Init for cost model failed."; } else { - MS_LOG(ERROR) << name_ << " : Init for cost model failed."; + MS_LOG(ERROR) << name_ << ": Init for cost model failed."; } return FAILED; } - MS_LOG(INFO) << name_ << " : Init for cost model success."; + MS_LOG(INFO) << name_ << ": Init for cost model success."; return SUCCESS; } + +PrimitivePtr GetDropoutGenMaskPrim(const CNodePtr& cnode) { + MS_EXCEPTION_IF_NULL(cnode); + if (cnode->inputs().size() != DROPOUT_DO_MASK_CNODE_INPUT_SIZE) { + MS_LOG(EXCEPTION) << "The size of dropout do mask cnode's inputs must be " << DROPOUT_DO_MASK_CNODE_INPUT_SIZE; + } + + AnfNodePtr dropout_gen_mask = cnode->input(DROPOUT_GEN_MASK_INDEX); + MS_EXCEPTION_IF_NULL(dropout_gen_mask); + if (!dropout_gen_mask->isa()) { + MS_LOG(EXCEPTION) << "The dropout do mask cnode's input[" << DROPOUT_GEN_MASK_INDEX << "] must be a cnode"; + } + + auto dropout_gen_mask_cnode = dropout_gen_mask->cast(); + MS_EXCEPTION_IF_NULL(dropout_gen_mask_cnode); + if (dropout_gen_mask_cnode->inputs().size() != DROPOUT_GEN_MASK_CNODE_INPUT_SIZE) { + MS_LOG(EXCEPTION) << "The size of dropout gen mask cnode's inputs must be " << DROPOUT_GEN_MASK_CNODE_INPUT_SIZE; + } + if (!IsValueNode(dropout_gen_mask_cnode->input(0))) { + MS_LOG(EXCEPTION) << "The input[0] of dropout gen mask cnode is not primitive"; + } + + ValueNodePtr value_node = dropout_gen_mask_cnode->input(0)->cast(); + MS_EXCEPTION_IF_NULL(value_node); + PrimitivePtr prim = value_node->value()->cast(); + MS_EXCEPTION_IF_NULL(prim); + if (prim->name() != DROPOUT_GEN_MASK) { + MS_LOG(EXCEPTION) << "The primitive name is not DropoutGenMask"; + } + return prim; +} + +// DropoutDoMask needs to be used together with DropoutGenMask. Only the first input tensor of DropoutGenMask is +// split. Find the DropoutGenMask node in the anf graph according to DropoutDoMask node, and modify the input shape +// of DropoutGenMask according to the strategy of DropoutDoMask. When the DropoutDoMask performs repeated calculation +// and both seeds of DropoutGenMask are 0, two new seeds are automatically generated for DropoutGenMask. +Operator DropoutDoMaskInfo::GetDropoutGenMaskReplaceOp(const CNodePtr& cnode) { + MS_EXCEPTION_IF_NULL(cnode); + PrimitivePtr prim = GetDropoutGenMaskPrim(cnode); + MS_EXCEPTION_IF_NULL(prim); + + if (inputs_tensor_info_.empty()) { + MS_LOG(EXCEPTION) << "The tensor info of dropout do mask is empty"; + } + + if (cnode->inputs().size() != DROPOUT_DO_MASK_CNODE_INPUT_SIZE) { + MS_LOG(EXCEPTION) << "The size of dropout do mask cnode's inputs must be " << DROPOUT_DO_MASK_CNODE_INPUT_SIZE; + } + + if (!cnode->input(DROPOUT_DO_MASK_KEEP_PROB_INDEX)->isa()) { + MS_LOG(EXCEPTION) << "The keep prob of dropout do mask is not value node"; + } + + ValuePtr keep_prob = GetValueNode(cnode->input(DROPOUT_DO_MASK_KEEP_PROB_INDEX)); + MS_EXCEPTION_IF_NULL(keep_prob); + auto attr = prim->attrs(); + if ((attr.find(SEED0) == attr.end()) || (attr.find(SEED1) == attr.end())) { + MS_LOG(EXCEPTION) << "The attrs of dropout gen mask must be have seed0 and seed1"; + } + int32_t seed_0 = GetValue(attr[SEED0]); + int32_t seed_1 = GetValue(attr[SEED1]); + if ((seed_0 == 0) && (seed_1 == 0) && (repeated_calc_num_ > 1)) { + seed_0 = SEED_NUM; + seed_1 = SEED_NUM; + SEED_NUM++; + } + + Shape input_slice_shape = inputs_tensor_info_[0].slice_shape(); + ValuePtr new_shape = MakeValue(input_slice_shape); + Attr attr_0 = std::make_pair(SEED0, MakeValue(seed_0)); + Attr attr_1 = std::make_pair(SEED1, MakeValue(seed_1)); + OperatorAttrs attrs = {attr_0, attr_1}; + Attr param_0 = std::make_pair(SHAPE, new_shape); + Attr param_1 = std::make_pair(KEEP_PROB, keep_prob); + OperatorParams params = {std::make_pair(param_0, 1), std::make_pair(param_1, 2)}; + OperatorArgs args = std::make_pair(attrs, params); + Operator replace_op = {std::make_pair(DROPOUT_GEN_MASK, args)}; + return replace_op; +} } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/ops_info/dropout_do_mask_info.h b/mindspore/ccsrc/parallel/ops_info/dropout_do_mask_info.h index 859b3e06a4..7ebe677997 100644 --- a/mindspore/ccsrc/parallel/ops_info/dropout_do_mask_info.h +++ b/mindspore/ccsrc/parallel/ops_info/dropout_do_mask_info.h @@ -33,7 +33,7 @@ class DropoutDoMaskInfo : public OperatorInfo { public: DropoutDoMaskInfo(const std::string& name, const Shapes& inputs_shape, const Shapes& outputs_shape, const PrimitiveAttrs& attrs) - : OperatorInfo(name, inputs_shape, outputs_shape, attrs, std::make_shared()) {} + : OperatorInfo(name, inputs_shape, outputs_shape, attrs, std::make_shared()) {} ~DropoutDoMaskInfo() override = default; Status Init(const StrategyPtr& strategy) override; @@ -41,6 +41,7 @@ class DropoutDoMaskInfo : public OperatorInfo { Status SetCostUnderStrategy(const StrategyPtr& strategy) override; Status InitForCostModel(const StrategyPtr& strategy) override; std::shared_ptr>> GenerateBatchStrategies() override; + Operator GetDropoutGenMaskReplaceOp(const CNodePtr& cnode); protected: Status CheckStrategy(const StrategyPtr& strategy) override; @@ -51,6 +52,8 @@ class DropoutDoMaskInfo : public OperatorInfo { Status InferTensorInfo() override; Status InferDevMatrixShape() override; }; + +using DropoutDoMaskInfoPtr = std::shared_ptr; } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/ops_info/generator_info.cc b/mindspore/ccsrc/parallel/ops_info/generator_info.cc deleted file mode 100644 index a39f9faab9..0000000000 --- a/mindspore/ccsrc/parallel/ops_info/generator_info.cc +++ /dev/null @@ -1,188 +0,0 @@ -/** - * Copyright 2019 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "parallel/ops_info/generator_info.h" - -#include -#include -#include -#include - -#include "ir/value.h" -#include "parallel/device_matrix.h" -#include "parallel/strategy.h" -#include "parallel/tensor_layout/tensor_redistribution.h" - -namespace mindspore { -namespace parallel { -Status GeneratorBase::InferTensorMap() { - TensorMap output_tensor_map = {MAP_NONE}; - outputs_tensor_map_.push_back(output_tensor_map); - return SUCCESS; -} - -Status GeneratorBase::InferTensorInfo() { - Shape output_shape = outputs_shape_.at(0); - Shape output_slice_shape = outputs_shape_.at(0); - - TensorLayout output_tensor_layout; - if (output_tensor_layout.InitFromVector(dev_matrix_shape_, outputs_tensor_map_[0], output_shape) != SUCCESS) { - MS_LOG(ERROR) << name_ << " : Creat output tensor layout failed."; - return FAILED; - } - TensorInfo output_tensor_info(output_tensor_layout, output_shape, output_slice_shape); - outputs_tensor_info_.push_back(output_tensor_info); - - return SUCCESS; -} - -Status GeneratorBase::InferDevMatrixShape() { - std::vector stra = strategy_->GetInputDim(); - Dimensions input_strategy = stra.at(0); - - dev_matrix_shape_ = input_strategy; - - return SUCCESS; -} - -Status GeneratorBase::SetCostUnderStrategy(const StrategyPtr &strategy) { - if (SetCostUnderStrategyBase(strategy) != SUCCESS) { - if (is_auto_parallel_) { - MS_LOG(DEBUG) << name_ << " : Set cost under strategy failed."; - } else { - MS_LOG(ERROR) << name_ << " : Set cost under strategy failed."; - } - return FAILED; - } - - return SUCCESS; -} - -Status DropoutGenMaskInfo::GenerateStrategies(int32_t stage_id) { - if (input_value_.empty()) { - MS_LOG(ERROR) << name_ << " : Input value is empty."; - return FAILED; - } - Shape param = GetValue>(input_value_[0]); - if (param.empty()) { - MS_LOG(ERROR) << name_ << " : Input value [0] is empty."; - return FAILED; - } - // Now,only support batch parallel. - CheckGlobalDeviceManager(); - is_auto_parallel_ = true; - size_t dev_num = g_device_manager->GetDeviceListByStageId(stage_id).size(); - Dimensions strategy(param.size() - 1, 1); - (void)strategy.insert(strategy.begin(), SizeToInt(dev_num)); - std::vector stra = {strategy}; - StrategyPtr sp = std::make_shared(stage_id, stra); - if (SetCostUnderStrategy(sp) == SUCCESS) { - MS_LOG(INFO) << name_ << " : Successfully generated batch-parallel-strategy."; - PrintStrategy(sp); - } else { - MS_LOG(ERROR) << name_ << " : Generating batch-parallel-strategy failed."; - return FAILED; - } - return SUCCESS; -} - -Status DropoutGenMaskInfo::CheckStrategy(const StrategyPtr &strategy) { - if (strategy->GetInputNumber() != 1) { - if (is_auto_parallel_) { - MS_LOG(DEBUG) << name_ << " : The strategy is wrong."; - } else { - MS_LOG(ERROR) << name_ << " : The strategy is wrong."; - } - return FAILED; - } - - return SUCCESS; -} - -Status DropoutGenMaskInfo::InferReplaceOps(const StrategyPtr &strategy) { - Shape shape = GetValue>(input_value_[0]); - Strategys stra = strategy->GetInputDim(); - Dimensions input_strategy = stra.at(0); - int32_t dev_num = *(input_strategy.begin()); - if (dev_num <= 0) { - MS_LOG(ERROR) << name_ << " : The number of devices should not be less than 0."; - return FAILED; - } - // Batch parallel - if (shape[0] % dev_num != 0) { - MS_LOG(ERROR) << name_ << " : The shape " << shape[0] << " can't be exact divided by device number " << dev_num; - return FAILED; - } - shape[0] = shape[0] / dev_num; - ValuePtr shape_ptr = MakeValue(shape); - Attr attr_0 = std::make_pair(SEED0, attrs_[SEED0]); - Attr attr_1 = std::make_pair(SEED1, attrs_[SEED1]); - OperatorAttrs attrs = {attr_0, attr_1}; - Attr param_0 = std::make_pair(SHAPE, shape_ptr); - Attr param_1 = std::make_pair(KEEP_PROB, input_value_[1]); - OperatorParams params = {std::make_pair(param_0, 1), std::make_pair(param_1, 2)}; - OperatorArgs args = std::make_pair(attrs, params); - replace_op_ = {std::make_pair(DROPOUT_GEN_MASK, args)}; - return SUCCESS; -} - -std::shared_ptr>> DropoutGenMaskInfo::GenerateBatchStrategies() { - if (input_value_.empty()) { - MS_LOG(EXCEPTION) << name_ << " : Input value is empty."; - } - Shape param = GetValue>(input_value_[0]); - if (param.empty()) { - MS_LOG(EXCEPTION) << name_ << " : Input value [0] is empty."; - } - // Now,only support batch parallel. - CheckGlobalDeviceManager(); - size_t dev_num = g_device_manager->GetDeviceListByStageId(0).size(); - Dimensions strategy(param.size() - 1, 1); - (void)strategy.insert(strategy.begin(), SizeToInt(dev_num)); - std::vector strategy_v = {strategy}; - return std::make_shared>>(strategy_v); -} - -Status GeneratorBase::Init(const StrategyPtr &strategy) { - if (InitWithAutoRepeatCalc(strategy) != SUCCESS) { - MS_LOG(ERROR) << name_ << " : Init failed."; - return FAILED; - } - - if (InferReplaceOps(strategy) != SUCCESS) { - MS_LOG(ERROR) << name_ << " : Infer replace ops failed."; - return FAILED; - } - - MS_LOG(INFO) << name_ << " : Init success."; - return SUCCESS; -} - -Status GeneratorBase::InitForCostModel(const StrategyPtr &strategy) { - if (InitForCostModelWithAutoRepeatCalc(strategy) != SUCCESS) { - if (is_auto_parallel_) { - MS_LOG(DEBUG) << name_ << " : Init for cost model failed."; - } else { - MS_LOG(ERROR) << name_ << " : Init for cost model failed."; - } - return FAILED; - } - - MS_LOG(INFO) << name_ << " : Init for cost model success."; - return SUCCESS; -} -} // namespace parallel -} // namespace mindspore diff --git a/mindspore/ccsrc/parallel/ops_info/generator_info.h b/mindspore/ccsrc/parallel/ops_info/generator_info.h deleted file mode 100644 index 68024593f3..0000000000 --- a/mindspore/ccsrc/parallel/ops_info/generator_info.h +++ /dev/null @@ -1,70 +0,0 @@ -/** - * Copyright 2019 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_PARALLEL_OPS_INFO_GENERATOR_INFO_H_ -#define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_GENERATOR_INFO_H_ - -#include -#include -#include -#include - -#include "parallel/auto_parallel/operator_costmodel.h" -#include "parallel/ops_info/operator_info.h" -#include "parallel/strategy.h" - -namespace mindspore { -namespace parallel { -class GeneratorBase : public OperatorInfo { - public: - GeneratorBase(const std::string &operator_name, const Shapes &inputs_shape, const Shapes &outputs_shape, - const PrimitiveAttrs &attrs) - : OperatorInfo(operator_name, inputs_shape, outputs_shape, attrs, std::make_shared()) {} - - ~GeneratorBase() override = default; - - Status Init(const StrategyPtr &strategy) override; - Status SetCostUnderStrategy(const StrategyPtr &strategy) override; - Status InitForCostModel(const StrategyPtr &strategy) override; - - protected: - // For now, generator ops don't have attributes - Status GetAttrs() override { return Status::SUCCESS; } - Status InferTensorMap() override; - Status InferTensorInfo() override; - Status InferDevMatrixShape() override; - Status InferMirrorOps() override { return SUCCESS; } - Status InferForwardCommunication() override { return SUCCESS; } - virtual Status InferReplaceOps(const StrategyPtr &strategy) = 0; -}; - -class DropoutGenMaskInfo : public GeneratorBase { - public: - DropoutGenMaskInfo(const std::string &name, const Shapes &inputs_shape, const Shapes &outputs_shape, - const PrimitiveAttrs &attrs) - : GeneratorBase(name, inputs_shape, outputs_shape, attrs) {} - ~DropoutGenMaskInfo() override = default; - Status GenerateStrategies(int32_t stage_id) override; - std::shared_ptr>> GenerateBatchStrategies() override; - - protected: - Status CheckStrategy(const StrategyPtr &strategy) override; - Status InferReplaceOps(const StrategyPtr &strategy) override; -}; -} // namespace parallel -} // namespace mindspore - -#endif // MINDSPORE_CCSRC_PARALLEL_OPS_INFO_GENERATOR_INFO_H_ diff --git a/mindspore/ccsrc/parallel/ops_info/ops_info_head_files.h b/mindspore/ccsrc/parallel/ops_info/ops_info_head_files.h index cc13512b54..1681c8f796 100644 --- a/mindspore/ccsrc/parallel/ops_info/ops_info_head_files.h +++ b/mindspore/ccsrc/parallel/ops_info/ops_info_head_files.h @@ -24,7 +24,6 @@ #include "parallel/ops_info/comparison_function_info.h" #include "parallel/ops_info/dropout_do_mask_info.h" #include "parallel/ops_info/elementary_function_info.h" -#include "parallel/ops_info/generator_info.h" #include "parallel/ops_info/get_next_info.h" #include "parallel/ops_info/l2_normalize_info.h" #include "parallel/ops_info/loss_info.h" diff --git a/mindspore/ccsrc/parallel/ops_info/ops_utils.h b/mindspore/ccsrc/parallel/ops_info/ops_utils.h index fe2a5d2c86..befd26e318 100644 --- a/mindspore/ccsrc/parallel/ops_info/ops_utils.h +++ b/mindspore/ccsrc/parallel/ops_info/ops_utils.h @@ -34,6 +34,10 @@ constexpr size_t SOFTMAX_ATTR_SIZE = 1; constexpr size_t ACTIVATION_INPUTS_SIZE = 1; constexpr size_t ACTIVATION_OUTPUTS_SIZE = 1; constexpr size_t EXPANDDIMS_INPUT_SIZE = 2; +constexpr size_t DROPOUT_DO_MASK_CNODE_INPUT_SIZE = 4; +constexpr size_t DROPOUT_GEN_MASK_CNODE_INPUT_SIZE = 3; +constexpr size_t DROPOUT_GEN_MASK_INDEX = 2; +constexpr size_t DROPOUT_DO_MASK_KEEP_PROB_INDEX = 3; constexpr size_t SoftmaxCrossEntropyWithLogitsAttrSize = 1; constexpr size_t SoftmaxCrossEntropyWithLogitsInputsSize = 2; constexpr size_t SoftmaxCrossEntropyWithLogitsOutputsSize = 2; diff --git a/mindspore/ccsrc/parallel/step_auto_parallel.cc b/mindspore/ccsrc/parallel/step_auto_parallel.cc index d7d48c35bb..fe6be575ee 100644 --- a/mindspore/ccsrc/parallel/step_auto_parallel.cc +++ b/mindspore/ccsrc/parallel/step_auto_parallel.cc @@ -69,7 +69,6 @@ std::vector splittable_op_ = {MATMUL, RELU, ONEHOT, DROPOUT_DO_MASK, - DROPOUT_GEN_MASK, REDUCE_MAX, REDUCE_MIN, ARGMAXWITHVALUE, diff --git a/mindspore/ccsrc/parallel/step_parallel.cc b/mindspore/ccsrc/parallel/step_parallel.cc index 78bec00bcf..0a6d0b0bef 100644 --- a/mindspore/ccsrc/parallel/step_parallel.cc +++ b/mindspore/ccsrc/parallel/step_parallel.cc @@ -484,8 +484,6 @@ void StepSplitTensor(const AnfNodePtr& node, const FuncGraphManagerPtr& manager) } if (IsParallelCareNode(use_cnode)) { SplitTensor(node, use_cnode, node_pair.second); - } else { - StepSplitTensor(use_cnode, manager); } } } @@ -525,6 +523,26 @@ std::vector ReplaceOpInput(const Operator& replace_op, const std::st return replace_input; } +void ReplaceOneOp(const Operator& replace_op, const CNodePtr& node) { + FuncGraphPtr func_graph = node->func_graph(); + MS_EXCEPTION_IF_NULL(func_graph); + FuncGraphManagerPtr manager = func_graph->manager(); + if (manager == nullptr) { + MS_LOG(EXCEPTION) << "Failure:AddNode error since manager is nullptr"; + } + std::string instance_name = CreateInstanceName(node, 0); + std::vector replace_input; + replace_input = ReplaceOpInput(replace_op, instance_name, node); + CNodePtr replace_node = func_graph->NewCNode(replace_input); + MS_EXCEPTION_IF_NULL(replace_node); + ScopePtr scope = node->scope(); + MS_EXCEPTION_IF_NULL(scope); + replace_node->set_scope(scope); + replace_node->set_in_forward_flag(true); + replace_input[0]->set_scope(scope); + (void)manager->Replace(node, replace_node); +} + void StepReplaceOp(OperatorVector replace_op, const CNodePtr& node) { // step1:get graph manager distribute_operator OperatorInfoPtr distribute_operator = node->operator_info(); @@ -1757,6 +1775,28 @@ void StepReplace(const OperatorInfoPtr& distribute_operator, const CNodePtr& cno } } +void HandleDropoutNode(const OperatorInfoPtr& distribute_operator, const CNodePtr& cnode) { + MS_EXCEPTION_IF_NULL(distribute_operator); + MS_EXCEPTION_IF_NULL(cnode); + + std::string op_name = distribute_operator->name(); + if (op_name.find(DROPOUT_DO_MASK) == std::string::npos) { + return; + } + + DropoutDoMaskInfoPtr dropout_do_mask = std::dynamic_pointer_cast(distribute_operator); + MS_EXCEPTION_IF_NULL(dropout_do_mask); + Operator replace_op = dropout_do_mask->GetDropoutGenMaskReplaceOp(cnode); + if (cnode->inputs().size() != DROPOUT_DO_MASK_CNODE_INPUT_SIZE) { + MS_LOG(EXCEPTION) << "The size of drop out do mask cnode's input is not " << DROPOUT_DO_MASK_CNODE_INPUT_SIZE; + } + ReplaceOneOp(replace_op, cnode->input(DROPOUT_GEN_MASK_INDEX)->cast()); +} + +void HandleSpecialNode(const OperatorInfoPtr& distribute_operator, const CNodePtr& cnode) { + HandleDropoutNode(distribute_operator, cnode); +} + void ParallelCommunication(const FuncGraphPtr& root, const std::vector& all_nodes, const FuncGraphManagerPtr& manager) { MS_EXCEPTION_IF_NULL(root); @@ -1804,6 +1844,8 @@ void ParallelCommunication(const FuncGraphPtr& root, const std::vector(node)) { StepSplitTensor(node, manager); } diff --git a/tests/ut/cpp/parallel/ops_info/dropout_do_mask_info_test.cc b/tests/ut/cpp/parallel/ops_info/dropout_do_mask_info_test.cc deleted file mode 100644 index 2f17fb4450..0000000000 --- a/tests/ut/cpp/parallel/ops_info/dropout_do_mask_info_test.cc +++ /dev/null @@ -1,166 +0,0 @@ -/** - * Copyright 2019 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include -#include -#include "common/common_test.h" -#include "parallel/strategy.h" -#include "parallel/ops_info/dropout_do_mask_info.h" -#include "parallel/device_manager.h" -#include "parallel/step_parallel.h" - -namespace mindspore { -namespace parallel { - -class DropoutDoMaskInfo; -using DropoutDoMaskInfoPtr = std::shared_ptr; -DropoutDoMaskInfoPtr do_mask; - -class TestDropoutDoMaskInfo : public UT::Common { - public: - TestDropoutDoMaskInfo() {} - void SetUp(); - void TearDown() {} -}; - -void TestDropoutDoMaskInfo::SetUp() { - std::vector dev_list; - - for (int32_t i = 0; i < 34; i++) { - dev_list.push_back(i); - } - - std::vector stage_map; - stage_map.push_back(32); - stage_map.push_back(2); - - int32_t local_dev = 0; - - // create a new g_device_manager - g_device_manager = std::make_shared(); - g_device_manager->Init(dev_list, local_dev, stage_map, "hccl"); - - std::unordered_map attr; - - Shapes inputs_shape = {{32, 128}, {64}, {}}; - Shapes outputs_shape = {{32, 128}}; - do_mask = std::make_shared("do_mask_info", inputs_shape, outputs_shape, attr); -} - -TEST_F(TestDropoutDoMaskInfo, InferDevMatrixShape) { - std::vector stra = {{4, 8}}; - StrategyPtr strategy = NewStrategy(0, stra); - - do_mask->Init(strategy); - std::vector dev_matrix_shape = do_mask->dev_matrix_shape(); - - std::vector expect = {4, 8}; - ASSERT_EQ(dev_matrix_shape, expect); -} - -TEST_F(TestDropoutDoMaskInfo, InferSliceShape) { - std::vector stra = {{4, 8}}; - StrategyPtr strategy = NewStrategy(0, stra); - - do_mask->Init(strategy); - std::vector inputs = do_mask->inputs_tensor_info(); - std::vector outputs = do_mask->outputs_tensor_info(); - - Shape input_a_slice_shape_expect = {8, 16}; - Shape input_b_slice_shape_expect = {64}; - Shape output_slice_shape_expect = {8, 16}; - - TensorInfo input_a_tensor_info = inputs.at(0); - TensorInfo input_b_tensor_info = inputs.at(1); - TensorInfo output_tensor_info = outputs.at(0); - Shape input_a_slice_shape = input_a_tensor_info.slice_shape(); - Shape input_b_slice_shape = input_b_tensor_info.slice_shape(); - Shape output_slice_shape = output_tensor_info.slice_shape(); - - ASSERT_EQ(input_a_slice_shape, input_a_slice_shape_expect); - ASSERT_EQ(input_b_slice_shape, input_b_slice_shape_expect); - ASSERT_EQ(output_slice_shape, output_slice_shape_expect); -} - -TEST_F(TestDropoutDoMaskInfo, GetTensorLayout) { - std::vector stra = {{4, 8}}; - StrategyPtr strategy = NewStrategy(0, stra); - - do_mask->Init(strategy); - std::vector inputs = do_mask->inputs_tensor_info(); - std::vector outputs = do_mask->outputs_tensor_info(); - - TensorMap input_a_map_expect = {1, 0}; - TensorMap input_b_map_expect = {-1}; - TensorMap output_map_expect = {1, 0}; - - TensorInfo input_a_tensor_info = inputs.at(0); - TensorInfo input_b_tensor_info = inputs.at(1); - TensorInfo output_tensor_info = outputs.at(0); - Map input_a_tensor_map = input_a_tensor_info.tensor_layout().origin_tensor_map(); - Map input_b_tensor_map = input_b_tensor_info.tensor_layout().origin_tensor_map(); - Map output_tensor_map = output_tensor_info.tensor_layout().origin_tensor_map(); - - ASSERT_EQ(input_a_tensor_map.array(), input_a_map_expect); - ASSERT_EQ(input_b_tensor_map.array(), input_b_map_expect); - ASSERT_EQ(output_tensor_map.array(), output_map_expect); -} - -TEST_F(TestDropoutDoMaskInfo, GetForwardOp) { - std::vector stra = {{4, 8}}; - StrategyPtr strategy = NewStrategy(0, stra); - - do_mask->Init(strategy); - OperatorVector forward_op = do_mask->forward_op(); - size_t size = forward_op.size(); - - ASSERT_EQ(size, 0); -} - -TEST_F(TestDropoutDoMaskInfo, CheckStrategy1) { - std::vector stra = {{4, 8, 2}}; - StrategyPtr strategy = NewStrategy(0, stra); - - Status ret = do_mask->Init(strategy); - ASSERT_EQ(ret, FAILED); -} - -TEST_F(TestDropoutDoMaskInfo, CheckStrategy2) { - std::vector stra = {{8, 8}}; - StrategyPtr strategy = NewStrategy(0, stra); - - Status ret = do_mask->Init(strategy); - ASSERT_EQ(ret, FAILED); -} - -TEST_F(TestDropoutDoMaskInfo, CheckStrategy3) { - std::vector stra = {{4, 8}, {4, 8}}; - StrategyPtr strategy = NewStrategy(0, stra); - - Status ret = do_mask->Init(strategy); - ASSERT_EQ(ret, FAILED); -} - -TEST_F(TestDropoutDoMaskInfo, CheckStrategy4) { - std::vector stra = {{4, 8}}; - StrategyPtr strategy = NewStrategy(0, stra); - - Status ret = do_mask->Init(strategy); - ASSERT_EQ(ret, SUCCESS); -} -} // namespace parallel -} // namespace mindspore diff --git a/tests/ut/cpp/parallel/ops_info/generator_info_test.cc b/tests/ut/cpp/parallel/ops_info/generator_info_test.cc deleted file mode 100644 index eb463066a6..0000000000 --- a/tests/ut/cpp/parallel/ops_info/generator_info_test.cc +++ /dev/null @@ -1,137 +0,0 @@ -/** - * Copyright 2019 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include -#include -#include "common/common_test.h" -#include "parallel/strategy.h" -#include "parallel/ops_info/generator_info.h" -#include "parallel/device_manager.h" -#include "parallel/step_parallel.h" - -namespace mindspore { -namespace parallel { - -class DropoutGenMaskInfo; -using DropoutGenMaskInfoPtr = std::shared_ptr; -DropoutGenMaskInfoPtr gen_mask; - -class TestDropoutGenMaskInfo : public UT::Common { - public: - TestDropoutGenMaskInfo() {} - void SetUp(); - void TearDown() {} -}; - -void TestDropoutGenMaskInfo::SetUp() { - std::vector dev_list; - - for (int32_t i = 0; i < 10; i++) { - dev_list.push_back(i); - } - - std::vector stage_map; - stage_map.push_back(8); - stage_map.push_back(2); - - int32_t local_dev = 0; - - // create a new g_device_manager - g_device_manager = std::make_shared(); - g_device_manager->Init(dev_list, local_dev, stage_map, "hccl"); - - std::unordered_map attr; - - Shapes inputs_shape; - Shapes outputs_shape = {{128}}; - std::vector shape = {32, 128}; - ValuePtr val0 = MakeValue(shape); - ValuePtr val1; - std::vector val = {val0, val1}; - gen_mask = std::make_shared("gen_mask_info", inputs_shape, outputs_shape, attr); - gen_mask->set_input_value(val); -} - -TEST_F(TestDropoutGenMaskInfo, InferDevMatrixShape) { - std::vector stra = {{8, 1}}; - StrategyPtr strategy = NewStrategy(0, stra); - - gen_mask->Init(strategy); - std::vector dev_matrix_shape = gen_mask->dev_matrix_shape(); - - std::vector expect = {8, 1}; - ASSERT_EQ(dev_matrix_shape, expect); -} - -TEST_F(TestDropoutGenMaskInfo, InferSliceShape) { - std::vector stra = {{8, 1}}; - StrategyPtr strategy = NewStrategy(0, stra); - - gen_mask->Init(strategy); - std::vector outputs = gen_mask->outputs_tensor_info(); - - Shape output_slice_shape_expect = {128}; - - TensorInfo output_tensor_info = outputs.at(0); - Shape output_slice_shape = output_tensor_info.slice_shape(); - - ASSERT_EQ(output_slice_shape, output_slice_shape_expect); -} - -TEST_F(TestDropoutGenMaskInfo, GetTensorLayout) { - std::vector stra = {{8, 1}}; - StrategyPtr strategy = NewStrategy(0, stra); - - gen_mask->Init(strategy); - std::vector outputs = gen_mask->outputs_tensor_info(); - - TensorMap output_map_expect = {-1}; - - TensorInfo output_tensor_info = outputs.at(0); - Map output_tensor_map = output_tensor_info.tensor_layout().origin_tensor_map(); - - ASSERT_EQ(output_tensor_map.array(), output_map_expect); -} - -TEST_F(TestDropoutGenMaskInfo, GetForwardOp) { - std::vector stra = {{8, 1}}; - StrategyPtr strategy = NewStrategy(0, stra); - - gen_mask->Init(strategy); - OperatorVector forward_op = gen_mask->forward_op(); - size_t size = forward_op.size(); - - ASSERT_EQ(size, 0); -} - -TEST_F(TestDropoutGenMaskInfo, CheckStrategy1) { - std::vector stra = {{4, 8, 2}, {2, 3}}; - StrategyPtr strategy = NewStrategy(0, stra); - - Status ret = gen_mask->Init(strategy); - ASSERT_EQ(ret, FAILED); -} - -TEST_F(TestDropoutGenMaskInfo, CheckStrategy2) { - std::vector stra = {{8, 1}}; - StrategyPtr strategy = NewStrategy(0, stra); - - Status ret = gen_mask->Init(strategy); - ASSERT_EQ(ret, SUCCESS); -} -} // namespace parallel -} // namespace mindspore diff --git a/tests/ut/python/parallel/test_dropout_do_mask.py b/tests/ut/python/parallel/test_dropout_do_mask.py new file mode 100644 index 0000000000..cfa7f50135 --- /dev/null +++ b/tests/ut/python/parallel/test_dropout_do_mask.py @@ -0,0 +1,94 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +import mindspore as ms +from mindspore import context, Tensor, Parameter +from mindspore.nn import Cell, TrainOneStepCell, Momentum +from mindspore.ops import operations as P +from mindspore.common.api import _executor + + +class Net(Cell): + def __init__(self, mul_weight, strategy1=None, strategy2=None): + super().__init__() + self.mul = P.Mul().set_strategy(strategy1) + self.mul2 = P.Mul().set_strategy(strategy1) + self.dropout_do_mask = P.DropoutDoMask().set_strategy(strategy2) + self.dropout_gen_mask = P.DropoutGenMask() + self.get_shape = P.Shape() + self.cast = P.Cast() + self.mul_weight = Parameter(mul_weight, "w1") + self.mul_weight2 = Parameter(mul_weight, "w2") + self.keep_prob = Tensor(0.9) + + def construct(self, x, b): + out = self.mul(x, self.mul_weight) + shape = self.get_shape(out) + dtype = P.DType()(out) + keep_prob = self.cast(self.keep_prob, dtype) + mask = self.dropout_gen_mask(shape, keep_prob) + out = self.dropout_do_mask(out, mask, keep_prob) + out = self.mul2(out, self.mul_weight2) + return out + + +_x = Tensor(np.ones([128, 64]), dtype=ms.float32) +_w1 = Tensor(np.ones([128, 64]), dtype=ms.float32) +_b = Tensor(np.ones([128, 64]), dtype=ms.float32) + + +def compile(net): + optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) + train_net = TrainOneStepCell(net, optimizer) + _executor.compile(train_net, _x, _b) + context.reset_auto_parallel_context() + + +def test_dropout_do_mask_data_parallel(): + context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) + strategy1 = ((16, 1), (16, 1)) + strategy2 = ((16, 1),) + net = Net(_w1, strategy1, strategy2) + compile(net) + + +def test_dropout_do_mask_model_parallel(): + context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) + strategy1 = ((1, 16), (1, 16)) + strategy2 = ((1, 16),) + net = Net(_w1, strategy1, strategy2) + compile(net) + + +def test_dropout_do_mask_hybrid_parallel(): + context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) + strategy1 = ((4, 4), (4, 4)) + strategy2 = ((4, 4),) + net = Net(_w1, strategy1, strategy2) + compile(net) + + +def test_dropout_do_mask_auto_parallel(): + context.set_auto_parallel_context(parallel_mode="auto_parallel", device_num=16, global_rank=0) + net = Net(_w1) + compile(net) + + +def test_dropout_do_mask_repeat_calc(): + context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) + strategy1 = ((4, 4), (4, 4)) + strategy2 = ((2, 4),) + net = Net(_w1, strategy1, strategy2) + compile(net) From e09f220f17205ff13395e5991bd0c7d333949b60 Mon Sep 17 00:00:00 2001 From: chenzomi Date: Sat, 11 Apr 2020 11:06:39 +0800 Subject: [PATCH 168/367] fix complite bug in clang --- mindspore/ccsrc/onnx/onnx_exporter.cc | 4 ++-- mindspore/ccsrc/optimizer/irpass/incorporate_getitem.h | 2 +- mindspore/ccsrc/parallel/ops_info/operator_info.cc | 2 +- mindspore/ccsrc/utils/log_adapter.h | 1 + 4 files changed, 5 insertions(+), 4 deletions(-) diff --git a/mindspore/ccsrc/onnx/onnx_exporter.cc b/mindspore/ccsrc/onnx/onnx_exporter.cc index 3bd4a38881..e6ba60ff9c 100644 --- a/mindspore/ccsrc/onnx/onnx_exporter.cc +++ b/mindspore/ccsrc/onnx/onnx_exporter.cc @@ -174,14 +174,14 @@ OPERATOR_ONNX_CONVERT_DEFINE(Sigmoid, Sigmoid, OpNameInfo()) OPERATOR_ONNX_CONVERT_DEFINE(Flatten, Flatten, OpNameInfo()) OPERATOR_ONNX_CONVERT_DEFINE(Squeeze, Squeeze, OpNameInfo().Attr("axis", "axes", onnx::AttributeProto_AttributeType_INTS, - SetAttrTupleValueToProto)) + SetAttrTupleValueToProto<0>)) OPERATOR_ONNX_CONVERT_DEFINE( Conv2D, Conv, OpNameInfo() .Attr("dilation", "dilations", onnx::AttributeProto_AttributeType_INTS, SetAttrValueToProto) .Attr("group", "group", onnx::AttributeProto_AttributeType_INT, SetAttrValueToProto) - .Attr("kernel_size", "kernel_shape", onnx::AttributeProto_AttributeType_INTS, SetAttrTupleValueToProto) + .Attr("kernel_size", "kernel_shape", onnx::AttributeProto_AttributeType_INTS, SetAttrTupleValueToProto<0>) .Attr("pad_mode", "auto_pad", onnx::AttributeProto_AttributeType_STRING, [](ValuePtr value, onnx::AttributeProto_AttributeType, onnx::AttributeProto* const attr_proto, const PrimitivePtr& prim) { diff --git a/mindspore/ccsrc/optimizer/irpass/incorporate_getitem.h b/mindspore/ccsrc/optimizer/irpass/incorporate_getitem.h index 9dc8e7255b..77f3fa7b36 100644 --- a/mindspore/ccsrc/optimizer/irpass/incorporate_getitem.h +++ b/mindspore/ccsrc/optimizer/irpass/incorporate_getitem.h @@ -15,7 +15,7 @@ */ #ifndef MINDSPORE_CCSRC_OPTIMIZER_IRPASS_INCORPORATE_GETITEM_H_ -#define MINDSPORE_CCSRC_OPTIMIZER_IRPASS_INCORPORATE_GETITEM_H__ +#define MINDSPORE_CCSRC_OPTIMIZER_IRPASS_INCORPORATE_GETITEM_H_ #include #include diff --git a/mindspore/ccsrc/parallel/ops_info/operator_info.cc b/mindspore/ccsrc/parallel/ops_info/operator_info.cc index a24f3e616b..68c73bc548 100644 --- a/mindspore/ccsrc/parallel/ops_info/operator_info.cc +++ b/mindspore/ccsrc/parallel/ops_info/operator_info.cc @@ -236,7 +236,7 @@ OperatorVector CreateMirrorOps(const std::string& group_name, size_t dev_num) { OperatorName operator_name = MIRROR_OPERATOR; ValuePtr attr0_value = MakeValue(group_name); - ValuePtr attr1_value = MakeValue(dev_num); + ValuePtr attr1_value = MakeValue(SizeToInt(dev_num)); ValuePtr attr2_value = MakeValue(mean_flag); Attr attr0 = std::make_pair(GROUP, attr0_value); diff --git a/mindspore/ccsrc/utils/log_adapter.h b/mindspore/ccsrc/utils/log_adapter.h index 61c253782e..2122870c3b 100644 --- a/mindspore/ccsrc/utils/log_adapter.h +++ b/mindspore/ccsrc/utils/log_adapter.h @@ -22,6 +22,7 @@ #include #include #include +#include "./overload.h" #include "./securec.h" #ifdef USE_GLOG #include "glog/logging.h" From a2850cae327f5d8d65ad1ac59b420139b0d85686 Mon Sep 17 00:00:00 2001 From: suteng Date: Sat, 11 Apr 2020 15:30:24 +0800 Subject: [PATCH 169/367] =?UTF-8?q?=E5=9B=9E=E9=80=80=20'Pull=20Request=20?= =?UTF-8?q?!231=20:=20add=20bool=20type=20check=20in=20communication=20ope?= =?UTF-8?q?rator=20'?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- mindspore/ops/operations/comm_ops.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/mindspore/ops/operations/comm_ops.py b/mindspore/ops/operations/comm_ops.py index 441e441c2c..1644c5800a 100644 --- a/mindspore/ops/operations/comm_ops.py +++ b/mindspore/ops/operations/comm_ops.py @@ -162,8 +162,6 @@ class AllGather(PrimitiveWithInfer): return x_shape def infer_dtype(self, x_dtype): - if x_dtype == mstype.bool_: - raise TypeError("AllGather does not support 'Bool' as the dtype of input!") return x_dtype def __call__(self, tensor): @@ -221,8 +219,6 @@ class ReduceScatter(PrimitiveWithInfer): return x_shape def infer_dtype(self, x_dtype): - if x_dtype == mstype.bool_: - raise TypeError("ReduceScatter does not support 'Bool' as the dtype of input!") return x_dtype def __call__(self, tensor): @@ -280,8 +276,6 @@ class Broadcast(PrimitiveWithInfer): return x_shape def infer_dtype(self, x_dtype): - if x_dtype == mstype.bool_: - raise TypeError("Broadcast does not support 'Bool' as the dtype of input!") return x_dtype @@ -324,8 +318,6 @@ class _AlltoAll(PrimitiveWithInfer): return x_shape def infer_dtype(self, x_dtype): - if x_dtype == mstype.bool_: - raise TypeError("AlltoAll does not support 'Bool' as the dtype of input!") return x_dtype def __call__(self, tensor): From 87714b3c7f5e334a69e38fce0525b7e9e1d5df4e Mon Sep 17 00:00:00 2001 From: Zhang Qinghua Date: Fri, 10 Apr 2020 13:08:38 +0800 Subject: [PATCH 170/367] Remove the repeats of inferring and optimize the sorting routine. Total Renormalizes: ----- 69.05010 --> 62.28941 ----- --- .../pipeline/static_analysis/evaluator.cc | 42 +++++++++++++++---- 1 file changed, 33 insertions(+), 9 deletions(-) diff --git a/mindspore/ccsrc/pipeline/static_analysis/evaluator.cc b/mindspore/ccsrc/pipeline/static_analysis/evaluator.cc index 9b120f731c..99cb893104 100644 --- a/mindspore/ccsrc/pipeline/static_analysis/evaluator.cc +++ b/mindspore/ccsrc/pipeline/static_analysis/evaluator.cc @@ -17,6 +17,7 @@ #include "pipeline/static_analysis/evaluator.h" #include +#include #include "ir/func_graph_cloner.h" #include "pipeline/static_analysis/utils.h" @@ -61,6 +62,29 @@ AnalysisContextPtr BaseFuncGraphEvaluator::MakeContext(const AnalysisEnginePtr & return context; } +static std::vector FastShadowSort(const AnfNodePtr &ret_node) { + std::vector sorted_nodes; + std::unordered_set checked_cnodes; + std::size_t index = 0; + sorted_nodes.emplace_back(ret_node); + while (index < sorted_nodes.size()) { + auto current = sorted_nodes[index]; + index++; + MS_EXCEPTION_IF_NULL(current); + if (current->isa()) { + auto &inputs = current->cast()->inputs(); + for (auto it = inputs.begin(); it != inputs.end(); it++) { + AnfNodePtr input = *it; + if (input != nullptr && input->isa() && checked_cnodes.find(input) == checked_cnodes.end()) { + sorted_nodes.emplace_back(input); + (void)checked_cnodes.insert(input); + } + } + } + } + return sorted_nodes; +} + AbstractBasePtr BaseFuncGraphEvaluator::Infer(AnalysisEnginePtr engine, const AbstractBasePtrList &args_spec_list) { FuncGraphPtr fg = GetFuncGraph(engine, args_spec_list); MS_EXCEPTION_IF_NULL(fg); @@ -86,20 +110,20 @@ AbstractBasePtr BaseFuncGraphEvaluator::Infer(AnalysisEnginePtr engine, const Ab MS_LOG(DEBUG) << "Analysis FuncGraph begin, func graph: " << fg->ToString() << ", context: " << graph_context_->ToString() << ", return node: " << func_node->DebugString(); - const std::vector &all_nodes = TopoSort(func_node); - for (const auto &node : all_nodes) { + AbstractBasePtr ret_base = nullptr; + std::vector nodes = FastShadowSort(func_node); + for (auto it = nodes.crbegin(); it != nodes.crend(); it++) { + const auto &node = *it; AnfNodeConfigPtr node_conf = engine->MakeConfig(node, graph_context_); MS_LOG(DEBUG) << "Analysis node begin, func graph: " << fg->ToString() << ", node_conf: " << node_conf->ToString(); - AbstractBasePtr base = engine->GetEvaluatedValue(node_conf); + ret_base = engine->GetEvaluatedValue(node_conf); MS_LOG(DEBUG) << "Analysis node end, func graph: " << fg->ToString() << ", node_conf: " << node_conf->ToString() - << ", abstract: " << base->ToString(); + << ", abstract: " << ret_base->ToString(); } - AnfNodeConfigPtr ret_conf = engine->MakeConfig(func_node, graph_context_); - AbstractBasePtr base = engine->GetEvaluatedValue(ret_conf); - MS_EXCEPTION_IF_NULL(base); - MS_LOG(DEBUG) << "BaseFuncGraph " << fg->ToString() << " infer end, inferred abstract: " << base->ToString(); - return base; + MS_EXCEPTION_IF_NULL(ret_base); + MS_LOG(DEBUG) << "BaseFuncGraph " << fg->ToString() << " infer end, inferred abstract: " << ret_base->ToString(); + return ret_base; } AbstractBasePtrList FuncGraphEvaluator::NormalizeArgs(const AbstractBasePtrList &args_spec_list) const { From 32cd280c1adabd1ab9b949c5c9d1d27e3f080e75 Mon Sep 17 00:00:00 2001 From: lichenever Date: Tue, 7 Apr 2020 19:00:51 +0800 Subject: [PATCH 171/367] add squeeze distributed op --- mindspore/ccsrc/parallel/dynamic_creator.h | 1 + .../parallel/ops_info/activation_info.cc | 156 ++++++++++++++++++ .../ccsrc/parallel/ops_info/activation_info.h | 21 ++- .../ccsrc/parallel/ops_info/arithmetic_info.h | 2 +- .../ops_info/comparison_function_info.h | 2 +- .../ccsrc/parallel/ops_info/onehot_info.h | 2 +- mindspore/ccsrc/parallel/step_parallel.cc | 5 +- tests/ut/python/parallel/test_squeeze_info.py | 79 +++++++++ 8 files changed, 261 insertions(+), 7 deletions(-) create mode 100644 tests/ut/python/parallel/test_squeeze_info.py diff --git a/mindspore/ccsrc/parallel/dynamic_creator.h b/mindspore/ccsrc/parallel/dynamic_creator.h index 62cc4c5da3..1650ff0b21 100644 --- a/mindspore/ccsrc/parallel/dynamic_creator.h +++ b/mindspore/ccsrc/parallel/dynamic_creator.h @@ -125,6 +125,7 @@ REGISTER(GetNextInfo); REGISTER(NegInfo); REGISTER(BatchMatMulInfo); REGISTER(ExpandDimsInfo); +REGISTER(SqueezeInfo); } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/ops_info/activation_info.cc b/mindspore/ccsrc/parallel/ops_info/activation_info.cc index 9ba3624b01..c59ca8402b 100644 --- a/mindspore/ccsrc/parallel/ops_info/activation_info.cc +++ b/mindspore/ccsrc/parallel/ops_info/activation_info.cc @@ -19,6 +19,7 @@ #include #include #include +#include #include "ir/value.h" #include "parallel/auto_parallel/costmodel.h" @@ -544,5 +545,160 @@ Status ExpandDimsInfo::InferMirrorOps() { MS_LOG(INFO) << name_ << ": Create mirror ops success, the group name is " << group[0].name(); return SUCCESS; } + +Status SqueezeInfo::InferAxis(const ValueTuplePtr& value_tuple) { + std::vector axis; + auto axis_list = value_tuple->value(); + if (inputs_shape_.empty()) { + MS_LOG(ERROR) << name_ << ": The inputs shape is empty"; + return FAILED; + } + Shape input_shape = inputs_shape_.at(0); + size_t input_size = input_shape.size(); + // if axis tuple is empty, we should exclude the axis that the corresponding slice shape is 1. + if (axis_list.empty()) { + for (size_t i = 0; i < input_size; ++i) { + if (input_shape[i] == 1) { + axis.push_back(i); + } + } + axis_ = MakeValue(axis)->cast(); + return SUCCESS; + } + + // convert negative axis to positive. + for (auto& dim : axis_list) { + if (!dim->isa()) { + MS_LOG(ERROR) << name_ << ": The type of axis is not int"; + return FAILED; + } + int32_t dim_value = GetValue(dim); + int32_t positive_value = (dim_value < 0) ? (dim_value + SizeToInt(input_size)) : dim_value; + axis.push_back(positive_value); + } + axis_ = MakeValue(axis)->cast(); + return SUCCESS; +} + +Status SqueezeInfo::GetAttrs() { + auto iter = attrs_.find(AXIS); + if (iter == attrs_.end()) { + MS_LOG(ERROR) << name_ << ": Can't find axis attribute."; + return FAILED; + } + MS_EXCEPTION_IF_NULL(iter->second); + auto value_tuple = iter->second->cast(); + MS_EXCEPTION_IF_NULL(value_tuple); + InferAxis(value_tuple); + attrs_[AXIS] = axis_; + return SUCCESS; +} + +Status SqueezeInfo::InferReplaceOps(const StrategyPtr& strategy) { + Attr attr = std::make_pair(AXIS, axis_); + OperatorAttrs attrs = {attr}; + OperatorParams params; + OperatorArgs args = std::make_pair(attrs, params); + replace_op_ = {std::make_pair(SQUEEZE, args)}; + return SUCCESS; +} + +Status SqueezeInfo::InferTensorMap() { + // for example: if the shape of input is [32, 32, 1], and the axis is (2, ), + // then the input_tensor_map is [2, 1, 0], the output_tensor_map is [2, 1] + std::vector input_tensor_map, output_tensor_map; + if (inputs_shape_.empty()) { + MS_LOG(ERROR) << name_ << ": The inputs shape is empty"; + return FAILED; + } + size_t size = inputs_shape_[0].size(); + std::vector axis = GetValue>(axis_); + for (size_t i = 0; i < size; ++i) { + size_t index = size - i - 1; + auto iter = std::find(axis.begin(), axis.end(), SizeToInt(i)); + if (iter == axis.end()) { + output_tensor_map.push_back(SizeToInt(index)); + } + input_tensor_map.push_back(SizeToInt(index)); + } + inputs_tensor_map_.push_back(input_tensor_map); + outputs_tensor_map_.push_back(output_tensor_map); + MS_LOG(INFO) << name_ << ": The tensor map of input is " << ShapeToString(input_tensor_map) + << ", and the tensor map of output is " << ShapeToString(output_tensor_map); + + return SUCCESS; +} + +Status SqueezeInfo::InferTensorInfo() { + if (inputs_shape_.empty() || outputs_shape_.empty()) { + MS_LOG(ERROR) << name_ << ": The shape of inputs or outputs is empty"; + return FAILED; + } + + if (inputs_tensor_map_.empty() || outputs_tensor_map_.empty()) { + MS_LOG(ERROR) << name_ << ": The tensor map of inputs or outputs is empty"; + return FAILED; + } + + Shape input_shape = inputs_shape_[0]; + Shape output_shape = outputs_shape_[0]; + + // infer slice shape + Shapes inputs_slice_shape, outputs_slice_shape; + Strategys inputs_strategy = strategy_->GetInputDim(); + Dimensions output_strategy; + std::vector axis = GetValue>(axis_); + for (size_t i = 0; i < inputs_shape_[0].size(); ++i) { + auto iter = std::find(axis.begin(), axis.end(), SizeToInt(i)); + if (iter == axis.end()) { + output_strategy.push_back(inputs_strategy[0].at(i)); + } + } + Strategys outputs_strategy = {output_strategy}; + if (InferSliceShape(inputs_strategy, outputs_strategy, &inputs_slice_shape, &outputs_slice_shape) != SUCCESS) { + MS_LOG(ERROR) << name_ << ": Infer slice shape failed"; + return FAILED; + } + + if (inputs_slice_shape.empty() || outputs_slice_shape.empty()) { + MS_LOG(ERROR) << name_ << ": The slice shape of inputs or outputs is empty"; + return FAILED; + } + + Shape input_slice_shape = inputs_slice_shape[0]; + Shape output_slice_shape = outputs_slice_shape[0]; + + // infer tensor layout + TensorLayout input_tensor_layout, output_tensor_layout; + if (input_tensor_layout.InitFromVector(dev_matrix_shape_, inputs_tensor_map_[0], input_shape) != SUCCESS) { + MS_LOG(ERROR) << name_ << ": Init tensor layout for input failed"; + return FAILED; + } + + if (output_tensor_layout.InitFromVector(dev_matrix_shape_, outputs_tensor_map_[0], output_shape) != SUCCESS) { + MS_LOG(ERROR) << name_ << ": Init tensor layout for output failed"; + return FAILED; + } + + TensorInfo input_tensor_info(input_tensor_layout, input_shape, input_slice_shape); + TensorInfo output_tensor_info(output_tensor_layout, output_shape, output_slice_shape); + + inputs_tensor_info_.push_back(input_tensor_info); + outputs_tensor_info_.push_back(output_tensor_info); + return SUCCESS; +} + +Status SqueezeInfo::Init(const StrategyPtr& strategy) { + if (InitWithAutoRepeatCalc(strategy) != SUCCESS) { + MS_LOG(ERROR) << name_ << " : Init failed."; + } + + if (InferReplaceOps(strategy) != SUCCESS) { + MS_LOG(ERROR) << name_ << " : Infer replace ops failed"; + } + + MS_LOG(INFO) << name_ << " : Init success."; + return SUCCESS; +} } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/ops_info/activation_info.h b/mindspore/ccsrc/parallel/ops_info/activation_info.h index 21774c43ee..b19e38b910 100644 --- a/mindspore/ccsrc/parallel/ops_info/activation_info.h +++ b/mindspore/ccsrc/parallel/ops_info/activation_info.h @@ -184,6 +184,25 @@ class ExpandDimsInfo : public ActivationOther { Strategys inputs_strategy_; Strategys outputs_strategy_; }; + +class SqueezeInfo : public ActivationOther { + public: + SqueezeInfo(const std::string& name, const Shapes& inputs_shape, const Shapes& outputs_shape, + const PrimitiveAttrs& attrs) + : ActivationOther(name, inputs_shape, outputs_shape, attrs) {} + ~SqueezeInfo() override = default; + + protected: + Status InferAxis(const ValueTuplePtr& value_tuple); + Status GetAttrs() override; + Status InferReplaceOps(const StrategyPtr& strategy); + Status InferTensorMap() override; + Status InferTensorInfo() override; + Status Init(const StrategyPtr& strategy) override; + + private: + ValueTuplePtr axis_; +}; } // namespace parallel } // namespace mindspore -#endif // MINDSPORE_CCSRC_OPTIMIZER_OPS_INFO_PARALLEL_ACTIVATION_INFO_H_ +#endif // MINDSPORE_CCSRC_PARALLEL_OPS_INFO_ACTIVATION_INFO_H_ diff --git a/mindspore/ccsrc/parallel/ops_info/arithmetic_info.h b/mindspore/ccsrc/parallel/ops_info/arithmetic_info.h index daa2ad595c..8010b2890a 100644 --- a/mindspore/ccsrc/parallel/ops_info/arithmetic_info.h +++ b/mindspore/ccsrc/parallel/ops_info/arithmetic_info.h @@ -116,4 +116,4 @@ class AssignSubInfo : public ArithmeticBase { } // namespace parallel } // namespace mindspore -#endif // MINDSPORE_CCSRC_OPTIMIZER_OPS_INFO_PARALLEL_ARITHMETIC_INFO_H_ +#endif // MINDSPORE_CCSRC_PARALLEL_OPS_INFO_ARITHMETIC_INFO_H_ diff --git a/mindspore/ccsrc/parallel/ops_info/comparison_function_info.h b/mindspore/ccsrc/parallel/ops_info/comparison_function_info.h index 110a9a6c38..5f51f1d0a9 100644 --- a/mindspore/ccsrc/parallel/ops_info/comparison_function_info.h +++ b/mindspore/ccsrc/parallel/ops_info/comparison_function_info.h @@ -53,4 +53,4 @@ class MaximumInfo : public ArithmeticBase { } // namespace parallel } // namespace mindspore -#endif // MINDSPORE_CCSRC_OPTIMIZER_OPS_INFO_PARALLEL_COMPARISON_FUNCTION_INFO_H_ +#endif // MINDSPORE_CCSRC_PARALLEL_OPS_INFO_COMPARISON_FUNCTION_INFO_H_ diff --git a/mindspore/ccsrc/parallel/ops_info/onehot_info.h b/mindspore/ccsrc/parallel/ops_info/onehot_info.h index a54d8479b3..fec8d96324 100644 --- a/mindspore/ccsrc/parallel/ops_info/onehot_info.h +++ b/mindspore/ccsrc/parallel/ops_info/onehot_info.h @@ -65,4 +65,4 @@ class OneHotInfo : public OperatorInfo { }; } // namespace parallel } // namespace mindspore -#endif // MINDSPORE_CCSRC_OPTIMIZER_OPS_INFO_PARALLEL_ONEHOT_INFO_H_ +#endif // MINDSPORE_CCSRC_PARALLEL_OPS_INFO_ONEHOT_INFO_H_ diff --git a/mindspore/ccsrc/parallel/step_parallel.cc b/mindspore/ccsrc/parallel/step_parallel.cc index 0a6d0b0bef..1976053eff 100644 --- a/mindspore/ccsrc/parallel/step_parallel.cc +++ b/mindspore/ccsrc/parallel/step_parallel.cc @@ -47,8 +47,8 @@ using mindspore::tensor::Tensor; namespace mindspore { namespace parallel { -const std::set COMMUNICATION_OPS = {ALL_REDUCE, ALL_GATHER, ALL_TO_ALL, REDUCE_SCATTER}; -const std::set INVALID_LOSS_OPS = {GET_NEXT, VIRTUALLOSS}; +static const std::set COMMUNICATION_OPS = {ALL_REDUCE, ALL_GATHER, ALL_TO_ALL, REDUCE_SCATTER}; +static const std::set INVALID_LOSS_OPS = {GET_NEXT, VIRTUALLOSS}; // g_RefMap, for CNode B input i is a RefKey[Parameter C], // it will be one item in map with key: C, and value: (B, i) static std::map> g_RefMap; @@ -1832,7 +1832,6 @@ void ParallelCommunication(const FuncGraphPtr& root, const std::vector Date: Wed, 8 Apr 2020 16:24:51 +0800 Subject: [PATCH 172/367] Support pow's second input could be tensor and fix bug in bprop of pow --- .../ccsrc/parallel/ops_info/arithmetic_info.h | 7 + .../ops_info/elementary_function_info.cc | 47 ------- .../ops_info/elementary_function_info.h | 10 -- mindspore/nn/layer/pooling.py | 2 +- mindspore/ops/_grad/grad_math_ops.py | 11 +- mindspore/ops/operations/array_ops.py | 4 +- mindspore/ops/operations/math_ops.py | 120 +++++++++--------- mindspore/ops/primitive.py | 3 - .../ut/cpp/parallel/ops_info/pow_info_test.cc | 18 +-- tests/ut/python/ops/test_math_ops.py | 3 +- tests/ut/python/ops/test_ops.py | 6 +- .../parallel/test_element_wise_function.py | 2 +- tests/vm_impl/math_ops_vm_impl.py | 1 + 13 files changed, 95 insertions(+), 139 deletions(-) delete mode 100644 mindspore/ccsrc/parallel/ops_info/elementary_function_info.cc diff --git a/mindspore/ccsrc/parallel/ops_info/arithmetic_info.h b/mindspore/ccsrc/parallel/ops_info/arithmetic_info.h index daa2ad595c..76ba500a03 100644 --- a/mindspore/ccsrc/parallel/ops_info/arithmetic_info.h +++ b/mindspore/ccsrc/parallel/ops_info/arithmetic_info.h @@ -98,6 +98,13 @@ class FloorDivInfo : public ArithmeticBase { ~FloorDivInfo() override = default; }; +class PowInfo : public ArithmeticBase { + public: + PowInfo(const std::string& name, const Shapes& inputs_shape, const Shapes& outputs_shape, const PrimitiveAttrs& attrs) + : ArithmeticBase(name, inputs_shape, outputs_shape, attrs) {} + ~PowInfo() override = default; +}; + class GreaterInfo : public ArithmeticBase { public: GreaterInfo(const std::string& name, const Shapes& inputs_shape, const Shapes& outputs_shape, diff --git a/mindspore/ccsrc/parallel/ops_info/elementary_function_info.cc b/mindspore/ccsrc/parallel/ops_info/elementary_function_info.cc deleted file mode 100644 index d4f79aca65..0000000000 --- a/mindspore/ccsrc/parallel/ops_info/elementary_function_info.cc +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Copyright 2019 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "parallel/ops_info/elementary_function_info.h" - -namespace mindspore { -namespace parallel { -Status PowInfo::InferMirrorOps() { - mirror_ops_.clear(); - - Shape tensor_map = inputs_tensor_map_[0]; - std::vector group; - if (CreateGroupByTensorMap(tensor_map, &group) != SUCCESS) { - MS_LOG(ERROR) << name_ << " : Create group failed."; - return FAILED; - } - - OperatorVector mirror_op; - OperatorVector op_for_value; - if (group.empty()) { - MS_LOG(INFO) << name_ << " : The mirror ops is empty."; - return SUCCESS; - } else { - mirror_op = CreateMirrorOps(group[0].name(), group[0].GetDevNum()); - mirror_ops_.push_back(mirror_op); - mirror_ops_.push_back(op_for_value); - std::string group_name = group[0].name(); - MS_LOG(INFO) << name_ << " : Create the mirror ops success, the group name is " << group_name; - } - - return SUCCESS; -} -} // namespace parallel -} // namespace mindspore diff --git a/mindspore/ccsrc/parallel/ops_info/elementary_function_info.h b/mindspore/ccsrc/parallel/ops_info/elementary_function_info.h index 57b4650f26..84b8030f37 100644 --- a/mindspore/ccsrc/parallel/ops_info/elementary_function_info.h +++ b/mindspore/ccsrc/parallel/ops_info/elementary_function_info.h @@ -27,16 +27,6 @@ namespace mindspore { namespace parallel { -class PowInfo : public ActivationOther { - public: - PowInfo(const std::string& name, const Shapes& inputs_shape, const Shapes& outputs_shape, const PrimitiveAttrs& attrs) - : ActivationOther(name, inputs_shape, outputs_shape, attrs) {} - ~PowInfo() override = default; - - protected: - Status InferMirrorOps() override; -}; - class ExpInfo : public ActivationOther { public: ExpInfo(const std::string& name, const Shapes& inputs_shape, const Shapes& outputs_shape, const PrimitiveAttrs& attrs) diff --git a/mindspore/nn/layer/pooling.py b/mindspore/nn/layer/pooling.py index bf90fcc9de..5d9b0ffa6c 100644 --- a/mindspore/nn/layer/pooling.py +++ b/mindspore/nn/layer/pooling.py @@ -58,7 +58,7 @@ class _PoolNd(Cell): pass def extend_repr(self): - return 'kernel_size={kernel_size}, strides={strides}, pad_mode={pad_mode}'.format(**self.__dict__) + return 'kernel_size={kernel_size}, stride={stride}, pad_mode={pad_mode}'.format(**self.__dict__) class MaxPool2d(_PoolNd): diff --git a/mindspore/ops/_grad/grad_math_ops.py b/mindspore/ops/_grad/grad_math_ops.py index 1863ac8fdd..81e078dc98 100755 --- a/mindspore/ops/_grad/grad_math_ops.py +++ b/mindspore/ops/_grad/grad_math_ops.py @@ -336,14 +336,13 @@ def get_bprop_log(self): @bprop_getters.register(P.Pow) def get_bprop_pow(self): """Grad definition for `Pow` operation.""" - pow_ = P.Pow() - cast = P.Cast() - dtype = P.DType() + pow_op = P.Pow() + ln = P.Log() def bprop(x, power, out, dout): - g = cast(F.tuple_to_array((power,)), dtype(x)) * pow_(x, power-1.0) - dx = g * dout - return dx, 0 + dx = power * pow_op(x, power - 1.0) * dout + dpower = pow_op(x, power) * ln(x) * dout + return dx, dpower return bprop diff --git a/mindspore/ops/operations/array_ops.py b/mindspore/ops/operations/array_ops.py index ac7f8ed699..850e895ad0 100644 --- a/mindspore/ops/operations/array_ops.py +++ b/mindspore/ops/operations/array_ops.py @@ -1097,7 +1097,7 @@ class ArgMaxWithValue(PrimitiveWithInfer): axis = self.axis x_rank = len(x_shape) validator.check_int_range("axis", axis, -x_rank, x_rank, Rel.INC_LEFT) - ouput_shape = _infer_shape_reduce(x_shape, self.axis, self.keep_dims, self.prim_name()) + ouput_shape = _infer_shape_reduce(x_shape, self.axis, self.keep_dims, self.name) return ouput_shape, ouput_shape def infer_dtype(self, x_dtype): @@ -1143,7 +1143,7 @@ class ArgMinWithValue(PrimitiveWithInfer): axis = self.axis x_rank = len(x_shape) validator.check_int_range("axis", axis, -x_rank, x_rank, Rel.INC_LEFT) - ouput_shape = _infer_shape_reduce(x_shape, self.axis, self.keep_dims, self.prim_name()) + ouput_shape = _infer_shape_reduce(x_shape, self.axis, self.keep_dims, self.name) return ouput_shape, ouput_shape def infer_dtype(self, x_dtype): diff --git a/mindspore/ops/operations/math_ops.py b/mindspore/ops/operations/math_ops.py index 106886c45c..a1fe6e72b5 100644 --- a/mindspore/ops/operations/math_ops.py +++ b/mindspore/ops/operations/math_ops.py @@ -74,7 +74,7 @@ class _BinaryOp(PrimitiveWithInfer): self.init_prim_io_names(inputs=['x', 'y'], outputs=['output']) def infer_shape(self, x_shape, y_shape): - return _get_broadcast_shape(x_shape, y_shape, self.prim_name()) + return _get_broadcast_shape(x_shape, y_shape, self.name) class _MathBinaryOp(_BinaryOp): @@ -89,7 +89,7 @@ class _MathBinaryOp(_BinaryOp): return x_dtype def infer_dtype(self, x_dtype, y_dtype): - return _MathBinaryOp.do_infer_dtype(x_dtype, y_dtype, mstype.number_type, self.prim_name()) + return _MathBinaryOp.do_infer_dtype(x_dtype, y_dtype, mstype.number_type, self.name) class TensorAdd(_MathBinaryOp): @@ -158,7 +158,7 @@ class AssignAdd(PrimitiveWithInfer): def infer_dtype(self, variable, value): args = {"value": value} - validator.check_scalar_or_tensor_type_same(args, mstype.number_type, self.prim_name()) + validator.check_scalar_or_tensor_type_same(args, mstype.number_type, self.name) return value @@ -201,7 +201,7 @@ class AssignSub(PrimitiveWithInfer): def infer_dtype(self, variable, value): args = {"value": value} - validator.check_scalar_or_tensor_type_same(args, mstype.number_type, self.prim_name()) + validator.check_scalar_or_tensor_type_same(args, mstype.number_type, self.name) return value @@ -222,16 +222,16 @@ class _Reduce(PrimitiveWithInfer): @prim_attr_register def __init__(self, keep_dims=False): """init Reduce""" - validator.check_value_type('keep_dims', keep_dims, [bool], self.prim_name()) + validator.check_value_type('keep_dims', keep_dims, [bool], self.name) self.init_prim_io_names(inputs=['input_x', 'axis'], outputs=['y']) def do_infer(self, input_x, axis, valid_dtype=mstype.number_type): axis_v = axis['value'] input_shp = input_x['shape'] args = {'input_x': input_x['dtype']} - validator.check_tensor_type_same(args, valid_dtype, self.prim_name()) + validator.check_tensor_type_same(args, valid_dtype, self.name) - input_shp = _infer_shape_reduce(input_shp, axis_v, self.keep_dims, self.prim_name()) + input_shp = _infer_shape_reduce(input_shp, axis_v, self.keep_dims, self.name) return {'shape': input_shp, 'dtype': input_x['dtype'], 'value': None} @@ -466,7 +466,7 @@ class CumProd(PrimitiveWithInfer): """ @prim_attr_register def __init__(self, exclusive=False, reverse=False): - cls_name = self.prim_name() + cls_name = self.name self.exclusive = validator.check_value_type("exclusive", exclusive, [bool], cls_name) self.reverse = validator.check_value_type("reverse", reverse, [bool], cls_name) @@ -474,7 +474,7 @@ class CumProd(PrimitiveWithInfer): return x_shape def infer_dtype(self, x_type, axis_type): - cls_name = self.prim_name() + cls_name = self.name validator.check_tensor_type_same({'x': x_type}, mstype.number_type, cls_name) validator.check_subclass("axis", axis_type, mstype.int_, cls_name) return x_type @@ -510,7 +510,7 @@ class MatMul(PrimitiveWithInfer): def __init__(self, transpose_a=False, transpose_b=False): self.init_prim_io_names(inputs=['x1', 'x2'], outputs=['output']) self.__setattr_flag__ = True - cls_name = self.prim_name() + cls_name = self.name validator.check_value_type("transpose_a", transpose_a, [bool], cls_name) validator.check_value_type("transpose_b", transpose_b, [bool], cls_name) @@ -521,7 +521,7 @@ class MatMul(PrimitiveWithInfer): def infer_shape(self, x, y): self.check_shape_size(x, y) - cls_name = self.prim_name() + cls_name = self.name # expected dimension of x, y, x:[...,a,b] y:[..., c,d], the dim size should be the same except the last two for i in range(len(x) - 2): if x[i] != y[i]: @@ -546,7 +546,7 @@ class MatMul(PrimitiveWithInfer): def infer_dtype(self, x, y): args = {"x": x, "y": y} - validator.check_tensor_type_same(args, mstype.float_type + mstype.int_type, self.prim_name()) + validator.check_tensor_type_same(args, mstype.float_type + mstype.int_type, self.name) return x @@ -590,7 +590,7 @@ class BatchMatMul(MatMul): def __init__(self, transpose_a=False, transpose_b=False): self.init_prim_io_names(inputs=['x1', 'x2'], outputs=['output']) self.__setattr_flag__ = True - cls_name = self.prim_name() + cls_name = self.name validator.check_value_type("transpose_a", transpose_a, [bool], cls_name) validator.check_value_type("transpose_b", transpose_b, [bool], cls_name) @@ -628,13 +628,13 @@ class CumSum(PrimitiveWithInfer): @prim_attr_register def __init__(self, exclusive=False, reverse=False): """init cumsum""" - cls_name = self.prim_name() + cls_name = self.name validator.check_value_type('exclusive', exclusive, [bool], cls_name) validator.check_value_type('reverse', reverse, [bool], cls_name) self.init_prim_io_names(inputs=['x', 'axis'], outputs=['y']) def __infer__(self, x, axis): - cls_name = self.prim_name() + cls_name = self.name x_shp = x['shape'] validator.check_value_type('axis', axis['value'], [int], cls_name) valid_types = [mstype.uint8, mstype.int8, mstype.int32, mstype.float16, mstype.float32] @@ -679,7 +679,7 @@ class AddN(PrimitiveWithInfer): self.init_prim_io_names(inputs=["inputs"], outputs=["sum"]) def infer_shape(self, inputs): - cls_name = self.prim_name() + cls_name = self.name validator.check_integer("inputs", len(inputs), 1, Rel.GE, cls_name) self.add_prim_attr('n', len(inputs)) shp0 = inputs[0] @@ -688,7 +688,7 @@ class AddN(PrimitiveWithInfer): return shp0 def infer_dtype(self, inputs): - cls_name = self.prim_name() + cls_name = self.name validator.check_value_type("inputs", inputs, [tuple, list], cls_name) validator.check_integer("inputs", len(inputs), 1, Rel.GE, cls_name) args = {} @@ -718,7 +718,7 @@ class Neg(PrimitiveWithInfer): return input_x def infer_dtype(self, input_x): - validator.check_tensor_type_same({"input_x": input_x}, mstype.number_type, self.prim_name()) + validator.check_tensor_type_same({"input_x": input_x}, mstype.number_type, self.name) return input_x @@ -809,7 +809,7 @@ class Square(PrimitiveWithInfer): return x_shape def infer_dtype(self, x_type): - validator.check_tensor_type_same({"x": x_type}, mstype.number_type, self.prim_name()) + validator.check_tensor_type_same({"x": x_type}, mstype.number_type, self.name) return x_type @@ -838,7 +838,7 @@ class Rsqrt(PrimitiveWithInfer): return x_shape def infer_dtype(self, x_type): - validator.check_tensor_type_same({"x": x_type}, mstype.number_type, self.prim_name()) + validator.check_tensor_type_same({"x": x_type}, mstype.number_type, self.name) return x_type @@ -867,7 +867,7 @@ class Sqrt(PrimitiveWithInfer): return x_shape def infer_dtype(self, x_type): - validator.check_tensor_type_same({"x": x_type}, mstype.number_type, self.prim_name()) + validator.check_tensor_type_same({"x": x_type}, mstype.number_type, self.name) return x_type @@ -897,14 +897,29 @@ class Reciprocal(PrimitiveWithInfer): return x def infer_dtype(self, x): - validator.check_subclass("x", x, mstype.tensor, self.prim_name()) + validator.check_subclass("x", x, mstype.tensor, self.name) return x -class Pow(PrimitiveWithInfer): +class Pow(_MathBinaryOp): """ Computes a tensor to the power of the second input. + The first input must be a tensor, and the second input should be a tensor or a number. + When the inputs are two tensors, the shapes of them could be broadcast, + and the data types of them should be the same. + When the inputs are one tensor and one scalar, the scalar could not be a parameter, + only could be a constant, and the type of the scalar is the same as the data type of the tensor. + + Inputs: + - **input_x** (Union[Tensor]) - The first input is a tensor whose data type is number. + - **input_y** (Union[Tensor, Number]) - The second input is a tensor whose data type is same as 'input_x' or + a number. + + Outputs: + Tensor, the shape is same as the shape after broadcasting, and the data type is same as 'input_x'. + + Inputs: - **input_x** (Tensor) - The input tensor. - **input_y** (Union[Tensor, Number]) - The exponent part. If exponent is a tensor, its shape must be able to @@ -927,17 +942,6 @@ class Pow(PrimitiveWithInfer): [1.0, 16.0, 64.0] """ - @prim_attr_register - def __init__(self): - """init Multiply""" - - def infer_shape(self, x, power): - return x - - def infer_dtype(self, x, power): - validator.check_tensor_type_same({"x": x}, mstype.number_type, self.prim_name()) - return x - class Exp(PrimitiveWithInfer): """ @@ -965,7 +969,7 @@ class Exp(PrimitiveWithInfer): return x_shape def infer_dtype(self, x_type): - validator.check_subclass("x", x_type, mstype.tensor, self.prim_name()) + validator.check_subclass("x", x_type, mstype.tensor, self.name) return x_type @@ -994,7 +998,7 @@ class Log(PrimitiveWithInfer): return x def infer_dtype(self, x): - validator.check_subclass("x", x, mstype.tensor, self.prim_name()) + validator.check_subclass("x", x, mstype.tensor, self.name) return x @@ -1176,7 +1180,7 @@ class Floor(PrimitiveWithInfer): return x_shape def infer_dtype(self, x_dtype): - validator.check_tensor_type_same({"x": x_dtype}, mstype.float_type, self.prim_name()) + validator.check_tensor_type_same({"x": x_dtype}, mstype.float_type, self.name) return x_dtype @@ -1231,7 +1235,7 @@ class Acosh(PrimitiveWithInfer): return x def infer_dtype(self, x): - validator.check_tensor_type_same({'x': x}, mstype.number_type, self.prim_name()) + validator.check_tensor_type_same({'x': x}, mstype.number_type, self.name) return x @@ -1247,7 +1251,7 @@ class _LogicBinaryOp(_BinaryOp): return mstype.tensor_type(mstype.bool_) def infer_dtype(self, x_dtype, y_dtype): - return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, prim_name=self.prim_name()) + return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, prim_name=self.name) class Equal(_LogicBinaryOp): @@ -1283,7 +1287,7 @@ class Equal(_LogicBinaryOp): """ def infer_dtype(self, x_dtype, y_dtype): - return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, mstype.number_type + (mstype.bool_,), self.prim_name()) + return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, mstype.number_type + (mstype.bool_,), self.name) class EqualCount(PrimitiveWithInfer): @@ -1318,7 +1322,7 @@ class EqualCount(PrimitiveWithInfer): def infer_dtype(self, x_dtype, y_dtype): args = {'x': x_dtype, 'y': y_dtype} - validator.check_tensor_type_same(args, mstype.number_type + (mstype.bool_,), self.prim_name()) + validator.check_tensor_type_same(args, mstype.number_type + (mstype.bool_,), self.name) return x_dtype @@ -1355,7 +1359,7 @@ class NotEqual(_LogicBinaryOp): """ def infer_dtype(self, x_dtype, y_dtype): - return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, mstype.number_type + (mstype.bool_,), self.prim_name()) + return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, mstype.number_type + (mstype.bool_,), self.name) class Greater(_LogicBinaryOp): @@ -1491,7 +1495,7 @@ class LogicalNot(PrimitiveWithInfer): return x_shape def infer_dtype(self, x_dtype): - validator.check_tensor_type_same({"x": x_dtype}, [mstype.bool_], self.prim_name()) + validator.check_tensor_type_same({"x": x_dtype}, [mstype.bool_], self.name) return mstype.tensor_type(mstype.bool_) @@ -1521,7 +1525,7 @@ class LogicalAnd(_LogicBinaryOp): """ def infer_dtype(self, x_dtype, y_dtype): - return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, (mstype.bool_,), self.prim_name()) + return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, (mstype.bool_,), self.name) class LogicalOr(_LogicBinaryOp): @@ -1550,7 +1554,7 @@ class LogicalOr(_LogicBinaryOp): """ def infer_dtype(self, x_dtype, y_dtype): - return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, (mstype.bool_,), self.prim_name()) + return _LogicBinaryOp.do_infer_dtype(x_dtype, y_dtype, (mstype.bool_,), self.name) class IsNan(PrimitiveWithInfer): """ @@ -1699,13 +1703,13 @@ class NPUGetFloatStatus(PrimitiveWithInfer): self.add_prim_attr("_side_effect_flag", True) def infer_shape(self, x_shape): - cls_name = self.prim_name() + cls_name = self.name validator.check_integer("len(x_shape)", len(x_shape), 1, Rel.EQ, cls_name) validator.check_integer("x_shape[0]", x_shape[0], 8, Rel.EQ, cls_name) return [8] def infer_dtype(self, x_dtype): - validator.check_tensor_type_same({'x': x_dtype}, [mstype.float32], self.prim_name()) + validator.check_tensor_type_same({'x': x_dtype}, [mstype.float32], self.name) return mstype.float32 @@ -1741,13 +1745,13 @@ class NPUClearFloatStatus(PrimitiveWithInfer): self.add_prim_attr("_side_effect_flag", True) def infer_shape(self, x_shape): - cls_name = self.prim_name() + cls_name = self.name validator.check_integer("len(x_shape)", len(x_shape), 1, Rel.EQ, cls_name) validator.check_integer("x_shape[0]", x_shape[0], 8, Rel.EQ, cls_name) return [8] def infer_dtype(self, x_dtype): - validator.check_tensor_type_same({'x': x_dtype}, [mstype.float32], self.prim_name()) + validator.check_tensor_type_same({'x': x_dtype}, [mstype.float32], self.name) return mstype.float32 @@ -1775,7 +1779,7 @@ class Cos(PrimitiveWithInfer): return x def infer_dtype(self, x): - validator.check_tensor_type_same({'x': x}, mstype.number_type, self.prim_name()) + validator.check_tensor_type_same({'x': x}, mstype.number_type, self.name) return x @@ -1803,7 +1807,7 @@ class ACos(PrimitiveWithInfer): return x def infer_dtype(self, x): - validator.check_tensor_type_same({'x': x}, mstype.number_type, self.prim_name()) + validator.check_tensor_type_same({'x': x}, mstype.number_type, self.name) return x @@ -1831,7 +1835,7 @@ class Sin(PrimitiveWithInfer): return x def infer_dtype(self, x): - validator.check_tensor_type_same({'x': x}, mstype.number_type, self.prim_name()) + validator.check_tensor_type_same({'x': x}, mstype.number_type, self.name) return x @@ -1876,11 +1880,11 @@ class NMSWithMask(PrimitiveWithInfer): @prim_attr_register def __init__(self, iou_threshold=0.5): """Init NMSWithMask""" - validator.check_value_type("iou_threshold", iou_threshold, [float], self.prim_name()) + validator.check_value_type("iou_threshold", iou_threshold, [float], self.name) self.init_prim_io_names(inputs=['bboxes'], outputs=['selected_boxes', 'selected_idx', 'selected_mask']) def infer_shape(self, bboxes_shape): - cls_name = self.prim_name() + cls_name = self.name validator.check_integer("bboxes rank", len(bboxes_shape), 2, Rel.EQ, cls_name) validator.check_integer("bboxes.shape()[0]", bboxes_shape[0], 0, Rel.GT, cls_name) validator.check_integer("bboxes.shape()[1]", bboxes_shape[1], 5, Rel.EQ, cls_name) @@ -1888,7 +1892,7 @@ class NMSWithMask(PrimitiveWithInfer): return (bboxes_shape, (num,), (num,)) def infer_dtype(self, bboxes_dtype): - validator.check_tensor_type_same({"bboxes": bboxes_dtype}, [mstype.float16, mstype.float32], self.prim_name()) + validator.check_tensor_type_same({"bboxes": bboxes_dtype}, [mstype.float16, mstype.float32], self.name) return (bboxes_dtype, mstype.int32, mstype.bool_) @@ -1917,7 +1921,7 @@ class Abs(PrimitiveWithInfer): return x_shape def infer_dtype(self, x_type): - validator.check_tensor_type_same({'x': x_type}, mstype.number_type, self.prim_name()) + validator.check_tensor_type_same({'x': x_type}, mstype.number_type, self.name) return x_type def infer_value(self, x): @@ -1959,7 +1963,7 @@ class Sign(PrimitiveWithInfer): return x_shape def infer_dtype(self, x_dtype): - validator.check_tensor_type_same({'x': x_dtype}, mstype.number_type, self.prim_name()) + validator.check_tensor_type_same({'x': x_dtype}, mstype.number_type, self.name) return x_dtype @@ -1988,7 +1992,7 @@ class Round(PrimitiveWithInfer): return x_shape def infer_dtype(self, x_type): - validator.check_tensor_type_same({'x': x_type}, mstype.number_type, self.prim_name()) + validator.check_tensor_type_same({'x': x_type}, mstype.number_type, self.name) return x_type diff --git a/mindspore/ops/primitive.py b/mindspore/ops/primitive.py index 96e754f5f7..d281b4f76c 100644 --- a/mindspore/ops/primitive.py +++ b/mindspore/ops/primitive.py @@ -194,9 +194,6 @@ class PrimitiveWithInfer(Primitive): Primitive.__init__(self, name) self.set_prim_type(prim_type.py_infer_shape) - def prim_name(self): - return self.__class__.__name__ - def _clone(self): """ Deeply clones the primitive object. diff --git a/tests/ut/cpp/parallel/ops_info/pow_info_test.cc b/tests/ut/cpp/parallel/ops_info/pow_info_test.cc index f6ea2c3d3c..7b37a90fd8 100644 --- a/tests/ut/cpp/parallel/ops_info/pow_info_test.cc +++ b/tests/ut/cpp/parallel/ops_info/pow_info_test.cc @@ -19,7 +19,7 @@ #include #include "common/common_test.h" #include "parallel/strategy.h" -#include "parallel/ops_info/elementary_function_info.h" +#include "parallel/ops_info/arithmetic_info.h" #include "parallel/device_manager.h" #include "parallel/step_parallel.h" @@ -56,14 +56,14 @@ void TestPowInfo::SetUp() { std::unordered_map attr; - Shapes inputs_shape = {{32, 64, 128}}; + Shapes inputs_shape = {{32, 64, 128}, {32, 64, 128}}; Shapes outputs_shape = {{32, 64, 128}}; pow = std::make_shared("pow_info", inputs_shape, outputs_shape, attr); } TEST_F(TestPowInfo, InferDevMatrixShape1) { - std::vector inputs = {{2, 4, 8}}; + std::vector inputs = {{2, 4, 8}, {2, 4, 8}}; StrategyPtr strategy = NewStrategy(0, inputs); pow->Init(strategy); @@ -74,7 +74,7 @@ TEST_F(TestPowInfo, InferDevMatrixShape1) { } TEST_F(TestPowInfo, InferSliceShape1) { - std::vector str = {{2, 4, 8}}; + std::vector str = {{2, 4, 8}, {2, 4, 8}}; StrategyPtr strategy = NewStrategy(0, str); pow->Init(strategy); @@ -95,7 +95,7 @@ TEST_F(TestPowInfo, InferSliceShape1) { } TEST_F(TestPowInfo, GetTensorLayout1) { - std::vector str = {{2, 4, 8}}; + std::vector str = {{2, 4, 8}, {2, 4, 8}}; StrategyPtr strategy = NewStrategy(0, str); pow->Init(strategy); @@ -116,7 +116,7 @@ TEST_F(TestPowInfo, GetTensorLayout1) { } TEST_F(TestPowInfo, GetForwardOp1) { - std::vector inputs = {{2, 4, 8}}; + std::vector inputs = {{2, 4, 8}, {2, 4, 8}}; StrategyPtr strategy = NewStrategy(0, inputs); pow->Init(strategy); @@ -127,7 +127,7 @@ TEST_F(TestPowInfo, GetForwardOp1) { } TEST_F(TestPowInfo, GetMirrorOPs1) { - std::vector inputs = {{2, 4, 8}}; + std::vector inputs = {{2, 4, 8}, {2, 4, 8}}; StrategyPtr strategy = NewStrategy(0, inputs); pow->Init(strategy); @@ -147,7 +147,7 @@ TEST_F(TestPowInfo, CheckStrategy1) { } TEST_F(TestPowInfo, CheckStrategy2) { - std::vector inputs = {{2, 4, 8, 16}}; + std::vector inputs = {{2, 4, 8, 16}, {2, 4, 8, 16}}; StrategyPtr strategy = NewStrategy(0, inputs); Status ret = pow->Init(strategy); @@ -155,7 +155,7 @@ TEST_F(TestPowInfo, CheckStrategy2) { } TEST_F(TestPowInfo, CheckStrategy3) { - std::vector inputs = {{2, 4, 8}}; + std::vector inputs = {{2, 4, 8}, {2, 4, 8}}; StrategyPtr strategy = NewStrategy(0, inputs); Status ret = pow->Init(strategy); diff --git a/tests/ut/python/ops/test_math_ops.py b/tests/ut/python/ops/test_math_ops.py index 7c0cca9b40..ad1642228d 100755 --- a/tests/ut/python/ops/test_math_ops.py +++ b/tests/ut/python/ops/test_math_ops.py @@ -82,9 +82,10 @@ def test_sqrt(): def test_pow(): """ test_pow """ input_tensor = Tensor(np.array([[2, 2], [3, 3]])) + power = Tensor(np.array(3.0, np.int64)) testpow = P.Pow() expect = np.array([[8, 8], [27, 27]]) - result = testpow(input_tensor, 3.0) + result = testpow(input_tensor, power) assert np.all(result.asnumpy() == expect) diff --git a/tests/ut/python/ops/test_ops.py b/tests/ut/python/ops/test_ops.py index a6b064bdb0..078ada8406 100755 --- a/tests/ut/python/ops/test_ops.py +++ b/tests/ut/python/ops/test_ops.py @@ -224,11 +224,15 @@ test_case_math_ops = [ 'block': P.Minimum(), 'desc_inputs': [[2, 3, 3, 5], [2, 3, 3, 5]], 'desc_bprop': [[2, 3, 3, 5]]}), - ('Pow', { + ('Pow_0', { 'block': P.Pow(), 'desc_const': [2.0], 'desc_inputs': [[2, 3, 3, 5]], 'desc_bprop': [[2, 3, 3, 5]]}), + ('Pow_1', { + 'block': P.Pow(), + 'desc_inputs': [[3, 5], [2, 3, 3, 5]], + 'desc_bprop': [[2, 3, 3, 5]]}), ('Exp', { 'block': P.Exp(), 'desc_inputs': [[2, 3]], diff --git a/tests/ut/python/parallel/test_element_wise_function.py b/tests/ut/python/parallel/test_element_wise_function.py index 2eb3a22ed2..641eb19f20 100644 --- a/tests/ut/python/parallel/test_element_wise_function.py +++ b/tests/ut/python/parallel/test_element_wise_function.py @@ -59,7 +59,7 @@ def test_matmul_pow(): context.set_auto_parallel_context(device_num=8, global_rank=0) strategy1 = ((2, 2), (2, 2)) - strategy2 = ((4, 2), ) + strategy2 = ((4, 2), ()) net = GradWrap(NetWithLoss(Net(strategy1, strategy2))) context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") diff --git a/tests/vm_impl/math_ops_vm_impl.py b/tests/vm_impl/math_ops_vm_impl.py index fd132280d1..01df0b824e 100644 --- a/tests/vm_impl/math_ops_vm_impl.py +++ b/tests/vm_impl/math_ops_vm_impl.py @@ -117,6 +117,7 @@ def vm_impl_pow(self): """Generate vm_impl function for Pow.""" def vm_impl(x, y): x = x.asnumpy() + y = y.asnumpy() res = vm.power(x, y) return Tensor(res) return vm_impl From 3348e5a7c2a24dd331fe68f8d7487274915b275c Mon Sep 17 00:00:00 2001 From: lianliguang Date: Sat, 11 Apr 2020 15:55:50 +0800 Subject: [PATCH 173/367] deal something special of adam's kernel select --- mindspore/ccsrc/device/ascend/kernel_select_ascend.cc | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/mindspore/ccsrc/device/ascend/kernel_select_ascend.cc b/mindspore/ccsrc/device/ascend/kernel_select_ascend.cc index d05b9fafa1..0a23e2da7b 100644 --- a/mindspore/ccsrc/device/ascend/kernel_select_ascend.cc +++ b/mindspore/ccsrc/device/ascend/kernel_select_ascend.cc @@ -82,6 +82,13 @@ bool IsValidKernelInfo(const std::shared_ptr &kernel_node, const kernel:: } return true; }; + if (AnfAlgo::GetCNodeName(kernel_node) == "Adam") { + auto input_num = AnfAlgo::GetInputTensorNum(kernel_node); + if (AnfAlgo::GetPrevNodeOutputFormat(kernel_node, input_num - 1) != + kernel_build_info.GetInputFormat(input_num - 1)) { + return false; + } + } if (AnfAlgo::GetCNodeName(kernel_node) == prim::kPrimCast->name()) { return AnfAlgo::GetOutputInferDataType(kernel_node, 0) == kernel_build_info.GetOutputDeviceType(0) && AnfAlgo::GetPrevNodeOutputInferDataType(kernel_node, 0) == kernel_build_info.GetInputDeviceType(0); From 149839952b5a5d698f5e1ddfee9e3902960459dd Mon Sep 17 00:00:00 2001 From: buxue Date: Sat, 11 Apr 2020 17:25:35 +0800 Subject: [PATCH 174/367] normalize log in optimizer in python --- mindspore/nn/optim/optimizer.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/mindspore/nn/optim/optimizer.py b/mindspore/nn/optim/optimizer.py index cd0ed93a10..e2edf7bfb4 100755 --- a/mindspore/nn/optim/optimizer.py +++ b/mindspore/nn/optim/optimizer.py @@ -14,7 +14,6 @@ # ============================================================================ """optimizer""" from typing import Iterable -import logging import numpy as np @@ -24,8 +23,8 @@ from mindspore.common.parameter import Parameter, ParameterTuple from mindspore._checkparam import ParamValidator as validator from mindspore._checkparam import Rel from mindspore.common.tensor import Tensor +from mindspore import log as logger -logger = logging.getLogger('Optimizer') __all__ = ['Optimizer'] From 11403492ae05facee8550714419dddadb36876d4 Mon Sep 17 00:00:00 2001 From: liyong Date: Fri, 10 Apr 2020 18:56:58 +0800 Subject: [PATCH 175/367] add mindrecord subset random sampler --- mindspore/ccsrc/dataset/api/de_pipeline.cc | 31 +++ mindspore/ccsrc/dataset/api/de_pipeline.h | 3 + .../mindrecord/include/common/shard_utils.h | 2 + .../ccsrc/mindrecord/include/shard_sample.h | 6 + .../ccsrc/mindrecord/meta/shard_sample.cc | 74 +++--- mindspore/dataset/engine/datasets.py | 31 ++- .../dataset/test_minddataset_sampler.py | 222 ++++++++++++++++++ 7 files changed, 336 insertions(+), 33 deletions(-) create mode 100644 tests/ut/python/dataset/test_minddataset_sampler.py diff --git a/mindspore/ccsrc/dataset/api/de_pipeline.cc b/mindspore/ccsrc/dataset/api/de_pipeline.cc index 1812c0421a..cf7050450b 100644 --- a/mindspore/ccsrc/dataset/api/de_pipeline.cc +++ b/mindspore/ccsrc/dataset/api/de_pipeline.cc @@ -391,6 +391,30 @@ Status DEPipeline::CheckMindRecordPartitionInfo(const py::dict &args, std::vecto return Status::OK(); } +Status DEPipeline::GetMindrecordSampler(const std::string &sampler_name, const py::dict &args, + std::shared_ptr *ptr) { + std::vector indices; + for (auto &arg : args) { + std::string key = py::str(arg.first); + py::handle value = arg.second; + if (!value.is_none()) { + if (key == "indices") { + indices = ToIntVector(value); + } else { + std::string err_msg = "ERROR: parameter " + key + " is invalid."; + RETURN_STATUS_UNEXPECTED(err_msg); + } + } + } + if (sampler_name == "SubsetRandomSampler") { + *ptr = std::make_shared(indices); + } else { + std::string err_msg = "ERROR: parameter sampler_name is invalid."; + RETURN_STATUS_UNEXPECTED(err_msg); + } + return Status::OK(); +} + Status DEPipeline::ParseMindRecordOp(const py::dict &args, std::shared_ptr *ptr) { if (args["dataset_file"].is_none()) { std::string err_msg = "Error: at least one of dataset_files is missing"; @@ -422,6 +446,13 @@ Status DEPipeline::ParseMindRecordOp(const py::dict &args, std::shared_ptr(seed)); + } else if (key == "sampler_name") { + std::shared_ptr sample_op; + auto ret = GetMindrecordSampler(ToString(value), args["sampler_params"], &sample_op); + if (Status::OK() != ret) { + return ret; + } + operators.push_back(sample_op); } } } diff --git a/mindspore/ccsrc/dataset/api/de_pipeline.h b/mindspore/ccsrc/dataset/api/de_pipeline.h index acffc390cc..491a75390e 100644 --- a/mindspore/ccsrc/dataset/api/de_pipeline.h +++ b/mindspore/ccsrc/dataset/api/de_pipeline.h @@ -145,6 +145,9 @@ class DEPipeline { Status ParseCelebAOp(const py::dict &args, std::shared_ptr *ptr); + Status GetMindrecordSampler(const std::string &sampler_name, const py::dict &args, + std::shared_ptr *ptr); + private: // Execution tree that links the dataset operators. std::shared_ptr tree_; diff --git a/mindspore/ccsrc/mindrecord/include/common/shard_utils.h b/mindspore/ccsrc/mindrecord/include/common/shard_utils.h index 55319cabfe..e18cbb75b9 100644 --- a/mindspore/ccsrc/mindrecord/include/common/shard_utils.h +++ b/mindspore/ccsrc/mindrecord/include/common/shard_utils.h @@ -68,6 +68,8 @@ enum ShardType { kCV = 1, }; +enum SamplerType { kCustomTopNSampler, kCustomTopPercentSampler, kSubsetRandomSampler, kPKSampler }; + const double kEpsilon = 1e-7; const int kThreadNumber = 14; diff --git a/mindspore/ccsrc/mindrecord/include/shard_sample.h b/mindspore/ccsrc/mindrecord/include/shard_sample.h index f6b074a65d..aeb3374f28 100644 --- a/mindspore/ccsrc/mindrecord/include/shard_sample.h +++ b/mindspore/ccsrc/mindrecord/include/shard_sample.h @@ -17,7 +17,9 @@ #ifndef MINDRECORD_INCLUDE_SHARD_SAMPLE_H_ #define MINDRECORD_INCLUDE_SHARD_SAMPLE_H_ +#include #include +#include #include "mindrecord/include/shard_operator.h" namespace mindspore { @@ -30,6 +32,8 @@ class ShardSample : public ShardOperator { ShardSample(int num, int den, int par); + explicit ShardSample(const std::vector &indices); + ~ShardSample() override{}; const std::pair get_partitions() const; @@ -41,6 +45,8 @@ class ShardSample : public ShardOperator { int denominator_; int no_of_samples_; int partition_id_; + std::vector indices_; + SamplerType sampler_type_; }; } // namespace mindrecord } // namespace mindspore diff --git a/mindspore/ccsrc/mindrecord/meta/shard_sample.cc b/mindspore/ccsrc/mindrecord/meta/shard_sample.cc index ea365a0e2a..367c7a5cf9 100644 --- a/mindspore/ccsrc/mindrecord/meta/shard_sample.cc +++ b/mindspore/ccsrc/mindrecord/meta/shard_sample.cc @@ -22,33 +22,37 @@ using mindspore::MsLogLevel::ERROR; namespace mindspore { namespace mindrecord { -ShardSample::ShardSample(int n) { - numerator_ = 0; - denominator_ = 0; - no_of_samples_ = n; - partition_id_ = 0; -} +ShardSample::ShardSample(int n) + : numerator_(0), + denominator_(0), + no_of_samples_(n), + partition_id_(0), + indices_({}), + sampler_type_(kCustomTopNSampler) {} -ShardSample::ShardSample(int num, int den) { - if (num < 0 || den <= 0 || num > den) { - no_of_samples_ = 5; - numerator_ = 0; - denominator_ = 0; - partition_id_ = 0; - return; - } - numerator_ = num; - denominator_ = den; - no_of_samples_ = 0; - partition_id_ = 0; -} +ShardSample::ShardSample(int num, int den) + : numerator_(num), + denominator_(den), + no_of_samples_(0), + partition_id_(0), + indices_({}), + sampler_type_(kCustomTopPercentSampler) {} -ShardSample::ShardSample(int num, int den, int par) { - numerator_ = num; - denominator_ = den; - no_of_samples_ = 0; - partition_id_ = par; -} +ShardSample::ShardSample(int num, int den, int par) + : numerator_(num), + denominator_(den), + no_of_samples_(0), + partition_id_(par), + indices_({}), + sampler_type_(kCustomTopPercentSampler) {} + +ShardSample::ShardSample(const std::vector &indices) + : numerator_(0), + denominator_(0), + no_of_samples_(0), + partition_id_(0), + indices_(indices), + sampler_type_(kSubsetRandomSampler) {} const std::pair ShardSample::get_partitions() const { if (numerator_ == 1 && denominator_ > 1) { @@ -62,10 +66,15 @@ MSRStatus ShardSample::operator()(ShardTask &tasks) { int total_no = static_cast(tasks.Size()); int taking = 0; - if (no_of_samples_ > 0) { // non sharding case constructor #1 + if (sampler_type_ == kCustomTopNSampler) { // non sharding case constructor #1 no_of_samples_ = std::min(no_of_samples_, total_no); taking = no_of_samples_ - no_of_samples_ % no_of_categories; - } else { // constructor #2 & #3 + } else if (sampler_type_ == kSubsetRandomSampler) { + if (indices_.size() > total_no) { + MS_LOG(ERROR) << "parameter indices's size is greater than dataset size."; + return FAILED; + } + } else { // constructor TopPercent if (numerator_ > 0 && denominator_ > 0 && numerator_ <= denominator_) { if (numerator_ == 1 && denominator_ > 1) { // sharding taking = (total_no / denominator_) + (total_no % denominator_ == 0 ? 0 : 1); @@ -82,8 +91,15 @@ MSRStatus ShardSample::operator()(ShardTask &tasks) { if (tasks.permutation_.empty()) { ShardTask new_tasks; total_no = static_cast(tasks.Size()); - for (int i = partition_id_ * taking; i < (partition_id_ + 1) * taking; i++) { - new_tasks.InsertTask(tasks.get_task_by_id(i % total_no)); // rounding up. if overflow, go back to start + if (sampler_type_ == kSubsetRandomSampler) { + for (int i = 0; i < indices_.size(); ++i) { + int index = ((indices_[i] % total_no) + total_no) % total_no; + new_tasks.InsertTask(tasks.get_task_by_id(index)); // different mod result between c and python + } + } else { + for (int i = partition_id_ * taking; i < (partition_id_ + 1) * taking; i++) { + new_tasks.InsertTask(tasks.get_task_by_id(i % total_no)); // rounding up. if overflow, go back to start + } } std::swap(tasks, new_tasks); } else { diff --git a/mindspore/dataset/engine/datasets.py b/mindspore/dataset/engine/datasets.py index 2058bbf826..3d660d58a8 100644 --- a/mindspore/dataset/engine/datasets.py +++ b/mindspore/dataset/engine/datasets.py @@ -1363,7 +1363,6 @@ def _select_sampler(num_samples, input_sampler, shuffle, num_shards, shard_id): return samplers.SequentialSampler() - class ImageFolderDatasetV2(SourceDataset): """ A source dataset that reads images from a tree of directories. @@ -1621,6 +1620,9 @@ class MindDataset(SourceDataset): shard_id (int, optional): The shard ID within num_shards (default=None). This argument should be specified only when num_shards is also specified. block_reader (bool, optional): Whether read data by block mode (default=False). + sampler (Sampler, optional): Object used to choose samples from the + dataset (default=None, sampler is exclusive + with shuffle and block_reader). Support list: SubsetRandomSampler. Raises: ValueError: If num_shards is specified but shard_id is None. @@ -1630,14 +1632,16 @@ class MindDataset(SourceDataset): @check_minddataset def __init__(self, dataset_file, columns_list=None, num_parallel_workers=None, - shuffle=None, num_shards=None, shard_id=None, block_reader=False): + shuffle=None, num_shards=None, shard_id=None, + block_reader=False, sampler=None): super().__init__(num_parallel_workers) self.dataset_file = dataset_file self.columns_list = columns_list - self.global_shuffle = not bool(shuffle is False) + self.global_shuffle = shuffle self.distribution = "" + self.sampler = sampler - if num_shards is None: + if num_shards is None or shard_id is None: self.partitions = None else: self.partitions = [num_shards, shard_id] @@ -1645,9 +1649,25 @@ class MindDataset(SourceDataset): if block_reader is True and self.partitions is not None: raise ValueError("block reader not allowed true when use partitions") + if block_reader is True and shuffle is True: + raise ValueError("block reader not allowed true when use shuffle") + if block_reader is True: logger.warning("WARN: global shuffle is not used.") + if sampler is not None and isinstance(sampler, samplers.SubsetRandomSampler) is False: + raise ValueError("the sampler is not supported yet.") + + # sampler exclusive + if block_reader is True and sampler is not None: + raise ValueError("block reader not allowed true when use sampler") + + if shuffle is True and sampler is not None: + raise ValueError("shuffle not allowed true when use sampler") + + if block_reader is False and sampler is None: + self.global_shuffle = not bool(shuffle is False) + self.num_shards = num_shards self.shard_id = shard_id self.block_reader = block_reader @@ -1661,6 +1681,9 @@ class MindDataset(SourceDataset): args["block_reader"] = self.block_reader args["num_shards"] = self.num_shards args["shard_id"] = self.shard_id + if self.sampler: + args["sampler_name"] = self.sampler.__class__.__name__ + args["sampler_params"] = self.sampler.__dict__ return args def get_dataset_size(self): diff --git a/tests/ut/python/dataset/test_minddataset_sampler.py b/tests/ut/python/dataset/test_minddataset_sampler.py new file mode 100644 index 0000000000..7662a0e390 --- /dev/null +++ b/tests/ut/python/dataset/test_minddataset_sampler.py @@ -0,0 +1,222 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +""" +This is the test module for mindrecord +""" +import collections +import json +import os +import re +import string + +import mindspore.dataset.transforms.vision.c_transforms as vision +import numpy as np +import pytest +from mindspore.dataset.transforms.vision import Inter +from mindspore import log as logger + +import mindspore.dataset as ds +from mindspore.mindrecord import FileWriter + +FILES_NUM = 4 +CV_FILE_NAME = "../data/mindrecord/imagenet.mindrecord" +CV_DIR_NAME = "../data/mindrecord/testImageNetData" + + +@pytest.fixture +def add_and_remove_cv_file(): + """add/remove cv file""" + paths = ["{}{}".format(CV_FILE_NAME, str(x).rjust(1, '0')) + for x in range(FILES_NUM)] + for x in paths: + if os.path.exists("{}".format(x)): + os.remove("{}".format(x)) + if os.path.exists("{}.db".format(x)): + os.remove("{}.db".format(x)) + writer = FileWriter(CV_FILE_NAME, FILES_NUM) + data = get_data(CV_DIR_NAME) + cv_schema_json = {"id": {"type": "int32"}, + "file_name": {"type": "string"}, + "label": {"type": "int32"}, + "data": {"type": "bytes"}} + writer.add_schema(cv_schema_json, "img_schema") + writer.add_index(["file_name", "label"]) + writer.write_raw_data(data) + writer.commit() + yield "yield_cv_data" + for x in paths: + os.remove("{}".format(x)) + os.remove("{}.db".format(x)) + + +def test_cv_minddataset_subset_random_sample_basic(add_and_remove_cv_file): + """tutorial for cv minderdataset.""" + columns_list = ["data", "file_name", "label"] + num_readers = 4 + indices = [1, 2, 3, 5, 7] + sampler = ds.SubsetRandomSampler(indices) + data_set = ds.MindDataset(CV_FILE_NAME + "0", columns_list, num_readers, + sampler=sampler) + data = get_data(CV_DIR_NAME) + assert data_set.get_dataset_size() == 10 + num_iter = 0 + for item in data_set.create_dict_iterator(): + logger.info( + "-------------- cv reader basic: {} ------------------------".format(num_iter)) + logger.info( + "-------------- item[data]: {} -----------------------------".format(item["data"])) + logger.info( + "-------------- item[file_name]: {} ------------------------".format(item["file_name"])) + logger.info( + "-------------- item[label]: {} ----------------------------".format(item["label"])) + assert data[indices[num_iter]]['file_name'] == "".join( + [chr(x) for x in item['file_name']]) + num_iter += 1 + assert num_iter == 5 + + +def test_cv_minddataset_subset_random_sample_replica(add_and_remove_cv_file): + """tutorial for cv minderdataset.""" + columns_list = ["data", "file_name", "label"] + num_readers = 4 + indices = [1, 2, 2, 5, 7, 9] + sampler = ds.SubsetRandomSampler(indices) + data_set = ds.MindDataset(CV_FILE_NAME + "0", columns_list, num_readers, + sampler=sampler) + data = get_data(CV_DIR_NAME) + assert data_set.get_dataset_size() == 10 + num_iter = 0 + for item in data_set.create_dict_iterator(): + logger.info( + "-------------- cv reader basic: {} ------------------------".format(num_iter)) + logger.info( + "-------------- item[data]: {} -----------------------------".format(item["data"])) + logger.info( + "-------------- item[file_name]: {} ------------------------".format(item["file_name"])) + logger.info( + "-------------- item[label]: {} ----------------------------".format(item["label"])) + assert data[indices[num_iter]]['file_name'] == "".join( + [chr(x) for x in item['file_name']]) + num_iter += 1 + assert num_iter == 6 + + +def test_cv_minddataset_subset_random_sample_empty(add_and_remove_cv_file): + """tutorial for cv minderdataset.""" + columns_list = ["data", "file_name", "label"] + num_readers = 4 + indices = [] + sampler = ds.SubsetRandomSampler(indices) + data_set = ds.MindDataset(CV_FILE_NAME + "0", columns_list, num_readers, + sampler=sampler) + data = get_data(CV_DIR_NAME) + assert data_set.get_dataset_size() == 10 + num_iter = 0 + for item in data_set.create_dict_iterator(): + logger.info( + "-------------- cv reader basic: {} ------------------------".format(num_iter)) + logger.info( + "-------------- item[data]: {} -----------------------------".format(item["data"])) + logger.info( + "-------------- item[file_name]: {} ------------------------".format(item["file_name"])) + logger.info( + "-------------- item[label]: {} ----------------------------".format(item["label"])) + assert data[indices[num_iter]]['file_name'] == "".join( + [chr(x) for x in item['file_name']]) + num_iter += 1 + assert num_iter == 0 + + +def test_cv_minddataset_subset_random_sample_out_range(add_and_remove_cv_file): + """tutorial for cv minderdataset.""" + columns_list = ["data", "file_name", "label"] + num_readers = 4 + indices = [1, 2, 4, 11, 13] + sampler = ds.SubsetRandomSampler(indices) + data_set = ds.MindDataset(CV_FILE_NAME + "0", columns_list, num_readers, + sampler=sampler) + data = get_data(CV_DIR_NAME) + assert data_set.get_dataset_size() == 10 + num_iter = 0 + for item in data_set.create_dict_iterator(): + logger.info( + "-------------- cv reader basic: {} ------------------------".format(num_iter)) + logger.info( + "-------------- item[data]: {} -----------------------------".format(item["data"])) + logger.info( + "-------------- item[file_name]: {} ------------------------".format(item["file_name"])) + logger.info( + "-------------- item[label]: {} ----------------------------".format(item["label"])) + assert data[indices[num_iter] % len(data)]['file_name'] == "".join([ + chr(x) for x in item['file_name']]) + num_iter += 1 + assert num_iter == 5 + + +def test_cv_minddataset_subset_random_sample_negative(add_and_remove_cv_file): + """tutorial for cv minderdataset.""" + columns_list = ["data", "file_name", "label"] + num_readers = 4 + indices = [1, 2, 4, -1, -2] + sampler = ds.SubsetRandomSampler(indices) + data_set = ds.MindDataset(CV_FILE_NAME + "0", columns_list, num_readers, + sampler=sampler) + data = get_data(CV_DIR_NAME) + assert data_set.get_dataset_size() == 10 + num_iter = 0 + for item in data_set.create_dict_iterator(): + logger.info( + "-------------- cv reader basic: {} ------------------------".format(num_iter)) + logger.info( + "-------------- item[data]: {} -----------------------------".format(item["data"])) + logger.info( + "-------------- item[file_name]: {} ------------------------".format(item["file_name"])) + logger.info( + "-------------- item[label]: {} ----------------------------".format(item["label"])) + assert data[indices[num_iter] % len(data)]['file_name'] == "".join([ + chr(x) for x in item['file_name']]) + num_iter += 1 + assert num_iter == 5 + + +def get_data(dir_name): + """ + usage: get data from imagenet dataset + params: + dir_name: directory containing folder images and annotation information + + """ + if not os.path.isdir(dir_name): + raise IOError("Directory {} not exists".format(dir_name)) + img_dir = os.path.join(dir_name, "images") + ann_file = os.path.join(dir_name, "annotation.txt") + with open(ann_file, "r") as file_reader: + lines = file_reader.readlines() + + data_list = [] + for i, line in enumerate(lines): + try: + filename, label = line.split(",") + label = label.strip("\n") + with open(os.path.join(img_dir, filename), "rb") as file_reader: + img = file_reader.read() + data_json = {"id": i, + "file_name": filename, + "data": img, + "label": int(label)} + data_list.append(data_json) + except FileNotFoundError: + continue + return data_list From c8cdb6b3311ba8024abbc1787db0d7ff4fcd5986 Mon Sep 17 00:00:00 2001 From: c00425699 Date: Tue, 7 Apr 2020 15:08:31 +0800 Subject: [PATCH 176/367] support distributed GatherV2 operator --- .../auto_parallel/operator_costmodel.cc | 29 ++ .../auto_parallel/operator_costmodel.h | 27 ++ .../parallel/ops_info/batch_parallel_info.cc | 20 - .../parallel/ops_info/batch_parallel_info.h | 9 - .../ccsrc/parallel/ops_info/gather_v2_info.cc | 350 ++++++++++++++++++ .../ccsrc/parallel/ops_info/gather_v2_info.h | 73 ++++ .../ccsrc/parallel/ops_info/operator_info.cc | 1 + .../ccsrc/parallel/ops_info/operator_info.h | 3 + .../parallel/ops_info/ops_info_head_files.h | 1 + mindspore/ccsrc/parallel/step_parallel.cc | 8 + .../parallel/test_gather_v2_primitive.py | 123 +++++- 11 files changed, 597 insertions(+), 47 deletions(-) create mode 100644 mindspore/ccsrc/parallel/ops_info/gather_v2_info.cc create mode 100644 mindspore/ccsrc/parallel/ops_info/gather_v2_info.h diff --git a/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.cc b/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.cc index 1f420e8797..960e13281c 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.cc +++ b/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.cc @@ -623,5 +623,34 @@ double DropOutCost::GetForwardComputationCost(const std::vector& inp Shape input0_slice_shape = input0.slice_shape(); return ListProduct(input0_slice_shape) * static_cast(inputs_type_lengths_[0]) * DROPOUT_COST_RATE; } + +// return the per device communication cost in the forward phase. +double GatherV2Cost::GetForwardCommCost(const std::vector&, const std::vector&, + const int32_t&) const { + // GatherV2Cost does not need communication in the forward phase + return 0.0; +} + +// return the per device communication cost in the backward phase. +double GatherV2Cost::GetBackwardCommCost(const std::vector&, const std::vector&, + const int32_t&) const { + // GatherV2Cost does not need communication in the backward phase + return 0.0; +} + +double GatherV2Cost::GetForwardComputationCost(const std::vector& inputs, const std::vector&, + const int32_t&) const { + // In forward phase, the computation cost = slice(A) + slice(B) + Shape input0_slice_shape = inputs[0].slice_shape(); + Shape input1_slice_shape = inputs[1].slice_shape(); + double result = ListProduct(input0_slice_shape) * static_cast(inputs_type_lengths_[0]) + + ListProduct(input1_slice_shape) * static_cast(inputs_type_lengths_[1]); + return result; +} + +double GatherV2Cost::GetBackwardComputationCost(const std::vector&, const std::vector&, + const int32_t&) const { + return 0.0; +} } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.h b/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.h index b642ada0d9..685cb259c3 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.h +++ b/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.h @@ -81,6 +81,8 @@ class OperatorCost { std::vector outputs_type_lengths_; }; +using OperatorCostPtr = std::shared_ptr; + class MatMulCost : public OperatorCost { public: MatMulCost() = default; @@ -525,6 +527,31 @@ class DropOutCost : public OperatorCost { }; using DropOutCostPtr = std::shared_ptr; + +class GatherV2Cost : public OperatorCost { + public: + GatherV2Cost() = default; + ~GatherV2Cost() override = default; + + double GetCommCost(const std::vector& inputs, const std::vector& outputs, + const int32_t& stage_id) const override { + return GetForwardCommCost(inputs, outputs, stage_id) + GetBackwardCommCost(inputs, outputs, stage_id); + } + double GetForwardCommCost(const std::vector& inputs, const std::vector& outputs, + const int32_t& stage_id) const override; + double GetBackwardCommCost(const std::vector& inputs, const std::vector& outputs, + const int32_t& stage_id) const override; + double GetComputationCost(const std::vector& inputs, const std::vector& outputs, + const int32_t& stage_id) const override { + return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id); + } + double GetForwardComputationCost(const std::vector& inputs, const std::vector& outputs, + const int32_t& stage_id) const override; + double GetBackwardComputationCost(const std::vector& inputs, const std::vector& outputs, + const int32_t&) const override; +}; + +using GatherV2CostPtr = std::shared_ptr; } // namespace parallel } // namespace mindspore #endif // PARALLEL_AUTO_PARALLEL_OPERATOR_COSTMODEL_H_ diff --git a/mindspore/ccsrc/parallel/ops_info/batch_parallel_info.cc b/mindspore/ccsrc/parallel/ops_info/batch_parallel_info.cc index 793452b8ad..b1d9b8b60e 100644 --- a/mindspore/ccsrc/parallel/ops_info/batch_parallel_info.cc +++ b/mindspore/ccsrc/parallel/ops_info/batch_parallel_info.cc @@ -228,26 +228,6 @@ void SparseSoftmaxCrossEntropyWithLogitsInfo::ReComputeBatchSplitFlagList() { } } -void GatherV2Info::ReComputeBatchSplitFlagList() { - MS_ASSERT(inputs_shape_.size() == 2); - MS_ASSERT(input_value_.size() == 3); - MS_ASSERT(input_value_[0] == nullptr); - // the second input is the index tensor - MS_ASSERT(input_value_[1] != nullptr); - // the third input is the axis - MS_ASSERT(input_value_[2] != nullptr); - int axis = GetValue(input_value_[2]); - MS_ASSERT(axis < inputs_shape_[0].size() && axis >= 0 - inputs_shape_[0].size()); - if (axis < 0) { - axis += SizeToInt(inputs_shape_[0].size()); - } - split_flag_list_[0] = true; - // if gather axis is 0, the index's strategy is equal to device number - if (axis == 0) { - split_flag_list_[1] = true; - } -} - Status BatchParallelInfo::InferAsLossDivisor() { as_loss_divisor_ = 1; return SUCCESS; diff --git a/mindspore/ccsrc/parallel/ops_info/batch_parallel_info.h b/mindspore/ccsrc/parallel/ops_info/batch_parallel_info.h index fae96dcab5..093bfb8fad 100644 --- a/mindspore/ccsrc/parallel/ops_info/batch_parallel_info.h +++ b/mindspore/ccsrc/parallel/ops_info/batch_parallel_info.h @@ -62,15 +62,6 @@ class SparseSoftmaxCrossEntropyWithLogitsInfo : public BatchParallelInfo { ~SparseSoftmaxCrossEntropyWithLogitsInfo() override = default; void ReComputeBatchSplitFlagList() override; }; - -class GatherV2Info : public BatchParallelInfo { - public: - GatherV2Info(const std::string& name, const Shapes& inputs_shape, const Shapes& outputs_shape, - const PrimitiveAttrs& attrs) - : BatchParallelInfo(name, inputs_shape, outputs_shape, attrs) {} - ~GatherV2Info() override = default; - void ReComputeBatchSplitFlagList() override; -}; } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/ops_info/gather_v2_info.cc b/mindspore/ccsrc/parallel/ops_info/gather_v2_info.cc new file mode 100644 index 0000000000..2010d1ed46 --- /dev/null +++ b/mindspore/ccsrc/parallel/ops_info/gather_v2_info.cc @@ -0,0 +1,350 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "parallel/ops_info/gather_v2_info.h" + +#include +#include +#include + +#include "ir/meta_tensor.h" +#include "ir/value.h" +#include "parallel/auto_parallel/costmodel.h" +#include "parallel/device_matrix.h" +#include "parallel/graph_util/generate_graph.h" +#include "parallel/strategy.h" +#include "utils/log_adapter.h" + +namespace mindspore { +namespace parallel { +Status GatherV2Info::GetAttrs() { + if (inputs_shape_.size() != GATHER_V2_INPUTS_SIZE) { + MS_LOG(ERROR) << name_ << ": inputs shape size must be 2, but is " << inputs_shape_.size(); + return FAILED; + } + if (outputs_shape_.size() != GATHER_V2_OUTPUTS_SIZE) { + MS_LOG(ERROR) << name_ << ": outputs shape size must be 1, but is " << outputs_shape_.size(); + return FAILED; + } + if (input_value_.size() != GATHER_V2_INPUTS_VALUE_SIZE) { + MS_LOG(ERROR) << name_ << ": input value size must be 3, but is " << input_value_.size(); + return FAILED; + } + // the second input is the index tensor + + // the third input is the axis, is a ValueNode + if (input_value_.at(2) == nullptr) { + MS_LOG(ERROR) << name_ << ": the third input value is nullptr, is not a ValueNode!"; + return FAILED; + } + + if (inputs_shape_.at(0).size() == 0) { + MS_LOG(ERROR) << name_ << ": input can not be a scalar!"; + return FAILED; + } + int axis = GetValue(input_value_.at(2)); + if (axis >= SizeToInt(inputs_shape_.at(0).size()) || axis < 0 - SizeToInt(inputs_shape_.at(0).size())) { + MS_LOG(ERROR) << "Axis is " << axis << ", not in [-" << inputs_shape_.at(0).size() << ", " + << inputs_shape_.at(0).size() << ")."; + } + if (axis < 0) { + axis += SizeToInt(inputs_shape_[0].size()); + } + axis_ = axis; + + index_size_ = inputs_shape_.at(1).size(); + + return SUCCESS; +} + +Status GatherV2Info::CheckStrategy(const StrategyPtr& strategy) { + if (inputs_shape_.size() != GATHER_V2_INPUTS_SIZE) { + MS_LOG(ERROR) << name_ << ": inputs shape size must be " << GATHER_V2_INPUTS_SIZE << ", but is " + << inputs_shape_.size(); + return FAILED; + } + if (outputs_shape_.size() != GATHER_V2_OUTPUTS_SIZE) { + MS_LOG(ERROR) << name_ << ": outputs shape size must be " << GATHER_V2_OUTPUTS_SIZE << ", but is " + << outputs_shape_.size(); + return FAILED; + } + // Only strategy of the first input should be set. + if (CheckStrategyValue(strategy, {inputs_shape_.at(0)}, is_auto_parallel_) != SUCCESS) { + if (is_auto_parallel_) { + MS_LOG(DEBUG) << name_ << ": Invalid strategy."; + } else { + MS_LOG(ERROR) << name_ << ": Invalid strategy."; + } + return FAILED; + } + axis_strategy_ = strategy->GetInputDim().at(0).at(axis_); + if (index_size_ != 1 && axis_strategy_ != 1) { + MS_LOG(ERROR) << name_ + << ": Invalid strategy. If the index is a scalar or a more than 1 dimension vector, the strategy " + "corresponding to axis must be 1, but is " + << axis_strategy_; + return FAILED; + } + if (index_size_ == 1 && axis_strategy_ != 1 && inputs_shape_.at(1).at(0) % axis_strategy_ != 0) { + MS_LOG(ERROR) << name_ + << ": Invalid strategy. The first dimension of index can not be divided by strategy corresponding to " + "axis. The first dimension of index is " + << inputs_shape_.at(1).at(0) << " strategy corresponding to axis is " << axis_strategy_; + return FAILED; + } + return SUCCESS; +} + +Status GatherV2Info::InferDevMatrixShape() { + std::vector stra = strategy_->GetInputDim(); + dev_matrix_shape_ = stra.at(0); + return SUCCESS; +} + +// If index is a scalar, output dimension is input dimension minus 1; +// If index is a n dimension tensor, output dimension is input dimension plus (n - 1). +// Tensor map dimension is equal to the corresponding input and output dimension. +// If index's dimension is more than 1, we insert -1 for the output tensor map. +Status GatherV2Info::InferTensorMap() { + if (inputs_shape_.size() != GATHER_V2_INPUTS_SIZE) { + MS_LOG(ERROR) << name_ << ": inputs shape size must be " << GATHER_V2_INPUTS_SIZE << ", but is " + << inputs_shape_.size(); + return FAILED; + } + if (outputs_shape_.size() != GATHER_V2_OUTPUTS_SIZE) { + MS_LOG(ERROR) << name_ << ": outputs shape size must be " << GATHER_V2_OUTPUTS_SIZE << ", but is " + << outputs_shape_.size(); + return FAILED; + } + std::vector tensor_map_in; + std::vector tensor_map_out; + size_t size = inputs_shape_.at(0).size(); + // such as 4: tensor_map_index [3,2,1,0] + for (size_t i = 0; i < size; ++i) { + tensor_map_in.push_back(SizeToInt(size - i - 1)); + tensor_map_out.push_back(SizeToInt(size - i - 1)); + } + + if (index_size_ == 0) { + (void)tensor_map_out.erase(tensor_map_out.begin() + axis_); + } else if (index_size_ > 1) { + (void)tensor_map_out.insert(tensor_map_out.begin() + axis_, index_size_ - 1, -1); + } + if (tensor_map_out.size() != outputs_shape_.at(0).size()) { + MS_LOG(ERROR) << "Out tensor map size is not equal to output size! Out tensor map size is " << tensor_map_out.size() + << " output size is " << outputs_shape_.at(0).size(); + return FAILED; + } + + std::vector tensor_map_in_index; + if (index_size_ >= 1) { + tensor_map_in_index.push_back(SizeToInt(size - axis_ - 1)); + } + for (size_t i = 1; i < index_size_; ++i) { + tensor_map_in_index.push_back(-1); + } + inputs_tensor_map_.emplace_back(std::move(tensor_map_in)); + inputs_tensor_map_.emplace_back(std::move(tensor_map_in_index)); + outputs_tensor_map_.emplace_back(std::move(tensor_map_out)); + return SUCCESS; +} + +Status GatherV2Info::InferTensorInfo() { + if (inputs_shape_.size() != GATHER_V2_INPUTS_SIZE) { + MS_LOG(ERROR) << name_ << ": inputs shape size must be " << GATHER_V2_INPUTS_SIZE << ", but is " + << inputs_shape_.size(); + return FAILED; + } + if (outputs_shape_.size() != GATHER_V2_OUTPUTS_SIZE) { + MS_LOG(ERROR) << name_ << ": outputs shape size must be " << GATHER_V2_OUTPUTS_SIZE << ", but is " + << outputs_shape_.size(); + return FAILED; + } + if (inputs_tensor_map_.size() != GATHER_V2_INPUTS_SIZE) { + MS_LOG(ERROR) << name_ << ": inputs tensor map size must be " << GATHER_V2_INPUTS_SIZE << ", but is " + << inputs_tensor_map_.size(); + return FAILED; + } + if (outputs_tensor_map_.size() != GATHER_V2_OUTPUTS_SIZE) { + MS_LOG(ERROR) << name_ << ": outputs tensor map size must be " << GATHER_V2_OUTPUTS_SIZE << ", but is " + << outputs_tensor_map_.size(); + return FAILED; + } + // infer tensor shape + Shape input_shape = inputs_shape_.at(0); + Shape input_index_shape = inputs_shape_.at(1); + Shape output_shape = outputs_shape_.at(0); + + TensorLayout input_tensor_layout, input_index_layout, output_tensor_layout; + if ((input_tensor_layout.InitFromVector(dev_matrix_shape_, inputs_tensor_map_.at(0), input_shape) != SUCCESS) || + (input_index_layout.InitFromVector(dev_matrix_shape_, inputs_tensor_map_.at(1), input_index_shape) != SUCCESS) || + (output_tensor_layout.InitFromVector(dev_matrix_shape_, outputs_tensor_map_.at(0), output_shape) != SUCCESS)) { + return FAILED; + } + + TensorInfo input_tensor_info(input_tensor_layout); + TensorInfo input_index_info(input_index_layout); + TensorInfo output_tensor_info(output_tensor_layout); + + inputs_tensor_info_.push_back(input_tensor_info); + inputs_tensor_info_.push_back(input_index_info); + outputs_tensor_info_.push_back(output_tensor_info); + return SUCCESS; +} + +OperatorVector CreateSubOp(int32_t sub_value) { + OperatorVector ops; + OperatorName operator_name = SUB; + OperatorAttrs operator_attrs; + + py::tuple tuple = py::make_tuple(sub_value); + mindspore::tensor::TensorPtr tensor_ptr = std::make_shared(tuple, kInt32); + ValuePtr op_param_value = MakeValue(tensor_ptr); + + Attr op1_param = std::make_pair("", op_param_value); + OperatorParams operator_param = {std::make_pair(op1_param, 2)}; + + OperatorArgs operator_args = std::make_pair(operator_attrs, operator_param); + Operator op = std::make_pair(operator_name, operator_args); + ops.push_back(op); + return ops; +} + +Status GatherV2Info::InferTensorSubOps() { + sub_ops_.clear(); + if ((index_size_ == 0) || (axis_strategy_ == 1)) { + return SUCCESS; + } + int32_t mod_n = 1; + for (size_t i = IntToSize(axis_) + 1; i < dev_matrix_shape_.size(); i++) { + mod_n *= dev_matrix_shape_.at(i); + } + if ((axis_ >= SizeToInt(dev_matrix_shape_.size())) || axis_ < 0) { + MS_LOG(ERROR) << "Axis is " << axis_ << ", not in [0, " << dev_matrix_shape_.size() << ")."; + } + int32_t mod_p = mod_n * dev_matrix_shape_.at(axis_); + int32_t rank = g_device_manager->global_rank(); + int32_t mod_rank = rank % mod_p; + mod_rank = static_cast(mod_rank / mod_n); + if (inputs_shape_.size() != GATHER_V2_INPUTS_SIZE) { + MS_LOG(ERROR) << name_ << ": inputs shape size must be " << GATHER_V2_INPUTS_SIZE << ", but is " + << inputs_shape_.size(); + return FAILED; + } + if ((axis_ >= SizeToInt(inputs_shape_.at(0).size())) || axis_ < 0) { + MS_LOG(ERROR) << "Axis is " << axis_ << ", not in [0, " << inputs_shape_.at(0).size() << ")."; + } + int32_t sub_value = static_cast(inputs_shape_.at(0).at(axis_) / dev_matrix_shape_.at(axis_)) * mod_rank; + + OperatorVector sub_op; + sub_ops_.emplace_back(std::move(sub_op)); + sub_op = CreateSubOp(sub_value); + sub_ops_.emplace_back(std::move(sub_op)); + return SUCCESS; +} + +Status GatherV2Info::Init(const StrategyPtr& strategy) { + if (InitWithAutoRepeatCalc(strategy) != SUCCESS) { + MS_LOG(ERROR) << name_ << ": Init failed."; + return FAILED; + } + Status status = InferTensorSubOps(); + if (status != SUCCESS) { + MS_LOG(ERROR) << name_ << ": InferTensorSubOps failed."; + return status; + } + MS_LOG(INFO) << name_ << ": Init success."; + return SUCCESS; +} + +Status GatherV2Info::InitForCostModel(const StrategyPtr& strategy) { + if (InitForCostModelWithAutoRepeatCalc(strategy) != SUCCESS) { + if (is_auto_parallel_) { + MS_LOG(DEBUG) << name_ << ": Init for cost model failed."; + } else { + MS_LOG(ERROR) << name_ << ": Init for cost model failed."; + } + return FAILED; + } + MS_LOG(INFO) << name_ << ": Init for cost model success."; + return SUCCESS; +} + +Status GatherV2Info::GenerateStrategies(int32_t stage_id) { + if ((inputs_shape_.size() != GATHER_V2_INPUTS_SIZE) || (outputs_shape_.size() != GATHER_V2_OUTPUTS_SIZE)) { + MS_LOG(ERROR) << name_ << " : Inputs shape size(" << inputs_shape_.size() << ") or outputs shape size(" + << outputs_shape_.size() << "is wrong."; + return FAILED; + } + + is_auto_parallel_ = true; + Shape input0_split(inputs_shape_[0].size()); + Shapes splittable_inputs = {input0_split}; + + std::vector sp_vector; + if (GenerateStrategiesForIndependentInputs(stage_id, {inputs_shape_.at(0)}, splittable_inputs, &sp_vector) != + SUCCESS) { + MS_LOG(ERROR) << name_ << " : Generate strategies for independent inputs() failed."; + return FAILED; + } + size_t success = 0; + for (auto& sp : sp_vector) { + if (SetCostUnderStrategy(sp) == SUCCESS) { + success++; + MS_LOG(INFO) << name_ << " : Successfully generated " << success << " strategy"; + PrintStrategy(sp); + } + } + return SUCCESS; +} + +Status GatherV2Info::SetCostUnderStrategy(const StrategyPtr& strategy) { + if (SetCostUnderStrategyBase(strategy) != SUCCESS) { + if (is_auto_parallel_) { + MS_LOG(DEBUG) << name_ << ": Set cost under strategy failed."; + } else { + MS_LOG(ERROR) << name_ << ": Set cost under strategy failed."; + } + return FAILED; + } + return SUCCESS; +} + +std::shared_ptr>> GatherV2Info::GenerateBatchStrategies() { + if (inputs_shape_.size() != GATHER_V2_INPUTS_SIZE) { + MS_LOG(EXCEPTION) << name_ << ": inputs shape size must be " << GATHER_V2_INPUTS_SIZE << ", but is " + << inputs_shape_.size(); + } + CheckGlobalDeviceManager(); + size_t dev_num = g_device_manager->GetDeviceListByStageId(0).size(); + if (GetAttrs() != SUCCESS) { + MS_LOG(EXCEPTION) << "GetAttrs failed!"; + } + + Dimensions strategy; + if (index_size_ != 1) { + strategy.push_back(1); + } else { + strategy.push_back(SizeToInt(dev_num)); + } + for (size_t i = 1; i < inputs_shape_[0].size(); i++) { + strategy.push_back(1); + } + std::vector strategy_v = {strategy}; + return std::make_shared>>(strategy_v); +} +} // namespace parallel +} // namespace mindspore diff --git a/mindspore/ccsrc/parallel/ops_info/gather_v2_info.h b/mindspore/ccsrc/parallel/ops_info/gather_v2_info.h new file mode 100644 index 0000000000..773d46f429 --- /dev/null +++ b/mindspore/ccsrc/parallel/ops_info/gather_v2_info.h @@ -0,0 +1,73 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_PARALLEL_OPS_INFO_GATHER_V2_INFO_H_ +#define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_GATHER_V2_INFO_H_ + +#include +#include +#include +#include + +#include "ir/value.h" +#include "parallel/auto_parallel/operator_costmodel.h" +#include "parallel/ops_info/operator_info.h" +#include "parallel/strategy.h" + +namespace mindspore { +namespace parallel { +constexpr size_t GATHER_V2_INPUTS_SIZE = 2; +constexpr size_t GATHER_V2_OUTPUTS_SIZE = 1; +constexpr size_t GATHER_V2_INPUTS_VALUE_SIZE = 3; +// We now supported limited parallel strategies. +// If the strategy corresponding to axis is more than 1, index must be evenly distributed across the axis-dimension of +// the input. +// If Index is a scalar or n-dimension vector(n > 1), the strategy corresponding to axis must be 1. +class GatherV2Info : public OperatorInfo { + public: + GatherV2Info(const std::string& name, const Shapes& inputs_shape, const Shapes& outputs_shape, + const PrimitiveAttrs& attrs) + : OperatorInfo(name, inputs_shape, outputs_shape, attrs, std::make_shared()), + axis_(-1), + index_size_(0), + axis_strategy_(1) {} + ~GatherV2Info() override = default; + Status Init(const StrategyPtr& strategy) override; + Status InitForCostModel(const StrategyPtr& strategy) override; + + Status GenerateStrategies(int32_t stage_id) override; + Status SetCostUnderStrategy(const StrategyPtr& strategy) override; + std::shared_ptr>> GenerateBatchStrategies() override; + + protected: + Status CheckStrategy(const StrategyPtr& strategy) override; + Status InferMirrorOps() override { return SUCCESS; } + Status InferForwardCommunication() override { return SUCCESS; } + Status InferTensorInfo() override; + Status InferDevMatrixShape() override; + Status InferTensorMap() override; + Status GetAttrs() override; + + private: + Status InferTensorSubOps(); + + int32_t axis_; + size_t index_size_; + int32_t axis_strategy_; +}; +} // namespace parallel +} // namespace mindspore +#endif // MINDSPORE_CCSRC_PARALLEL_OPS_INFO_GATHER_V2_INFO_H_ diff --git a/mindspore/ccsrc/parallel/ops_info/operator_info.cc b/mindspore/ccsrc/parallel/ops_info/operator_info.cc index 68c73bc548..42755b3ec3 100644 --- a/mindspore/ccsrc/parallel/ops_info/operator_info.cc +++ b/mindspore/ccsrc/parallel/ops_info/operator_info.cc @@ -112,6 +112,7 @@ void OperatorInfo::ResetQueueMember() { dev_matrix_shape_.clear(); forward_op_.clear(); mirror_ops_.clear(); + sub_ops_.clear(); replace_op_.clear(); replace_op_info_.clear(); virtual_div_op_.clear(); diff --git a/mindspore/ccsrc/parallel/ops_info/operator_info.h b/mindspore/ccsrc/parallel/ops_info/operator_info.h index 8fcae8ad33..248172fa4c 100644 --- a/mindspore/ccsrc/parallel/ops_info/operator_info.h +++ b/mindspore/ccsrc/parallel/ops_info/operator_info.h @@ -41,6 +41,7 @@ namespace mindspore { namespace parallel { using ForwardOp = OperatorVector; using MirrorOps = std::vector; +using Ops = std::vector; using VirtualDivOp = OperatorVector; using TensorMaps = std::vector>; using TensorLayouts = std::vector; @@ -99,6 +100,7 @@ class OperatorInfo { OutPutInfoVector replace_op_info() const { return replace_op_info_; } virtual ReplaceGraphPtr replace_graph(const CNodePtr&) { return replace_graph_; } MirrorOps mirror_ops() const { return mirror_ops_; } + Ops sub_ops() const { return sub_ops_; } VirtualDivOp virtual_div_op() const { return virtual_div_op_; } Shape dev_matrix_shape() const { return dev_matrix_shape_; } std::vector inputs_tensor_info() const { return inputs_tensor_info_; } @@ -190,6 +192,7 @@ class OperatorInfo { TensorMaps inputs_tensor_map_; TensorMaps outputs_tensor_map_; ForwardOp forward_op_; + Ops sub_ops_; ForwardOp replace_op_; OutPutInfoVector replace_op_info_; ReplaceGraphPtr replace_graph_; diff --git a/mindspore/ccsrc/parallel/ops_info/ops_info_head_files.h b/mindspore/ccsrc/parallel/ops_info/ops_info_head_files.h index 1681c8f796..27b434ecca 100644 --- a/mindspore/ccsrc/parallel/ops_info/ops_info_head_files.h +++ b/mindspore/ccsrc/parallel/ops_info/ops_info_head_files.h @@ -24,6 +24,7 @@ #include "parallel/ops_info/comparison_function_info.h" #include "parallel/ops_info/dropout_do_mask_info.h" #include "parallel/ops_info/elementary_function_info.h" +#include "parallel/ops_info/gather_v2_info.h" #include "parallel/ops_info/get_next_info.h" #include "parallel/ops_info/l2_normalize_info.h" #include "parallel/ops_info/loss_info.h" diff --git a/mindspore/ccsrc/parallel/step_parallel.cc b/mindspore/ccsrc/parallel/step_parallel.cc index 0a6d0b0bef..eab5443481 100644 --- a/mindspore/ccsrc/parallel/step_parallel.cc +++ b/mindspore/ccsrc/parallel/step_parallel.cc @@ -464,6 +464,14 @@ void SplitTensor(const AnfNodePtr& node, const CNodePtr& next_node, int index) { MS_EXCEPTION_IF_NULL(func_graph); Operator op = CreateGetTensorSliceOp(tensor_layout); InsertGetTensorSliceOp(op, next_node, func_graph, index, SPLIT_TENSOR); + if (!op_info->sub_ops().empty()) { + auto sub_ops = op_info->sub_ops(); + for (size_t i = 0; i < sub_ops.size(); i++) { + if (!sub_ops.at(i).empty()) { + InsertGetTensorSliceOp(sub_ops.at(i).at(0), next_node, func_graph, index, SUB); + } + } + } } void StepSplitTensor(const AnfNodePtr& node, const FuncGraphManagerPtr& manager) { diff --git a/tests/ut/python/parallel/test_gather_v2_primitive.py b/tests/ut/python/parallel/test_gather_v2_primitive.py index c623595b53..3ea0795e9c 100644 --- a/tests/ut/python/parallel/test_gather_v2_primitive.py +++ b/tests/ut/python/parallel/test_gather_v2_primitive.py @@ -29,6 +29,8 @@ from mindspore.nn import Dense, Cell from mindspore import context context.set_context(mode=context.GRAPH_MODE) +device_number = 32 +batch_size_per_device = 128 class Dataset(): @@ -57,15 +59,22 @@ class Dataset(): class GatherV2(_Loss): - def __init__(self, batchsize): + def __init__(self, index_dim, strategy, index_size=16): super(GatherV2, self).__init__() self.pow = P.Pow() - emb_list = list(range(batchsize)) - emb1_list = emb_list[0::2] - emb2_list = emb_list[1::2] + emb1_list = 21 + emb2_list = 2 + if index_dim == 1: + emb_list = list(range(index_size)) + emb1_list = emb_list[0::2] + emb2_list = emb_list[1::2] + if index_dim == 2: + emb_list = np.arange(index_size*16) + emb1_list = np.reshape(emb_list[0::2], (int(index_size/2), 16)) + emb2_list = np.reshape(emb_list[1::2], (int(index_size/2), 16)) self.emb1_param = Tensor(emb1_list, dtype=mstype.int32) self.emb2_param = Tensor(emb2_list, dtype=mstype.int32) - self.gatherv2 = P.GatherV2() + self.gatherv2 = P.GatherV2().set_strategy(strategy) def construct(self, nembeddings): emb1 = self.gatherv2(nembeddings, self.emb1_param, 0) @@ -73,10 +82,6 @@ class GatherV2(_Loss): return self.pow((emb1 - emb2), 2.0) -def get_loss(batchsize): - return GatherV2(batchsize) - - def fc_with_initialize(input_channels, out_channels): return Dense(input_channels, out_channels) @@ -114,26 +119,23 @@ class TrainOneStepCell(Cell): return F.depend(loss, self.optimizer(grads)) -def test_trains(): +def net_trains(gather_v2_strategy, criterion, rank): init() lr = 0.1 momentum = 0.9 max_epoch = 20 - device_number = 32 - batch_size_per_device = 128 input_channels = 256 out_channels = 512 - + context.set_context(mode=context.GRAPH_MODE, save_graphs=False) context.reset_auto_parallel_context() - context.set_auto_parallel_context(parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL, device_num=device_number) + context.set_auto_parallel_context(parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL, device_num=device_number, + global_rank=rank) predict = Tensor(np.ones([batch_size_per_device, input_channels]), dtype=ms.float32) dataset = Dataset(predict, 4) network = fc_with_initialize(input_channels, out_channels) network.set_train() - criterion = get_loss(batch_size_per_device * device_number) - train_network = BuildTrainNetwork(network, criterion) train_network.set_train() opt = Momentum(train_network.trainable_params(), lr, momentum) @@ -143,5 +145,90 @@ def test_trains(): model.train(max_epoch, dataset, dataset_sink_mode=False) context.reset_auto_parallel_context() -if __name__ == "__main__": - test_trains() + +def test_auto_batch_parallel(): + gather_v2_strategy = None + criterion = GatherV2(1, strategy=gather_v2_strategy, index_size=batch_size_per_device * device_number) + rank = 2 + net_trains(gather_v2_strategy, criterion, rank) + + +def test_2d_index_auto_batch_parallel(): + gather_v2_strategy = None + criterion = GatherV2(2, strategy=gather_v2_strategy, index_size=batch_size_per_device * device_number) + rank = 2 + net_trains(gather_v2_strategy, criterion, rank) + + +def test_batch_parallel(): + gather_v2_strategy = ((device_number, 1),) + criterion = GatherV2(1, strategy=gather_v2_strategy, index_size=batch_size_per_device * device_number) + rank = 2 + net_trains(gather_v2_strategy, criterion, rank) + + +def test_strategy1(): + gather_v2_strategy = ((16, 2),) + rank = 2 + criterion = GatherV2(1, strategy=gather_v2_strategy, index_size=batch_size_per_device * device_number) + net_trains(gather_v2_strategy, criterion, rank) + + +def test_strategy2(): + gather_v2_strategy = ((1, device_number),) + rank = 2 + criterion = GatherV2(1, strategy=gather_v2_strategy, index_size=batch_size_per_device * device_number) + net_trains(gather_v2_strategy, criterion, rank) + + +def test_strategy3(): + gather_v2_strategy = ((8, 1),) + rank = 2 + criterion = GatherV2(1, strategy=gather_v2_strategy, index_size=batch_size_per_device * device_number) + net_trains(gather_v2_strategy, criterion, rank) + + +class GatherV2Axis1(_Loss): + def __init__(self, index_dim, strategy, index_size=16): + super(GatherV2Axis1, self).__init__() + self.pow = P.Pow() + emb1_list = 21 + emb2_list = 2 + if index_dim == 1: + emb_list = list(range(index_size)) + emb1_list = emb_list[0::2] + emb2_list = emb_list[1::2] + if index_dim == 2: + emb_list = np.arange(index_size*index_size) + emb1_list = np.reshape(emb_list[0::2], (int(index_size/2), index_size)) + emb2_list = np.reshape(emb_list[1::2], (int(index_size/2), index_size)) + self.emb1_param = Tensor(emb1_list, dtype=mstype.int32) + self.emb2_param = Tensor(emb2_list, dtype=mstype.int32) + self.gatherv2 = P.GatherV2().set_strategy(strategy) + + def construct(self, nembeddings): + emb1 = self.gatherv2(nembeddings, self.emb1_param, 1) + emb2 = self.gatherv2(nembeddings, self.emb2_param, 1) + return self.pow((emb1 - emb2), 2.0) + + +def test_axis1_auto_batch_parallel(): + gather_v2_strategy = None + criterion = GatherV2Axis1(1, strategy=gather_v2_strategy, index_size=512) + rank = 2 + net_trains(gather_v2_strategy, criterion, rank) + + +def test_axis1_batch_parallel(): + gather_v2_strategy = ((device_number, 1),) + criterion = GatherV2Axis1(1, strategy=gather_v2_strategy, index_size=512) + rank = 2 + net_trains(gather_v2_strategy, criterion, rank) + + +def test_axis1_strategy1(): + gather_v2_strategy = ((16, 2),) + rank = 17 + criterion = GatherV2Axis1(1, strategy=gather_v2_strategy, index_size=512) + net_trains(gather_v2_strategy, criterion, rank) + From 87f8b9fba2149e1b63cc1489b0f4c508e2f6f1da Mon Sep 17 00:00:00 2001 From: leonwanghui Date: Sun, 12 Apr 2020 00:13:39 +0800 Subject: [PATCH 177/367] Update docker image section in README.md Signed-off-by: leonwanghui --- README.md | 66 ++++++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 56 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index be8ca5189a..8b6b363de3 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ ![MindSpore Logo](docs/MindSpore-logo.png "MindSpore logo") ============================================================ -- [What is MindSpore?](#what-is-MindSpore) +- [What is MindSpore?](#what-is-mindspore) - [Automatic Differentiation](#automatic-differentiation) - [Automatic Parallel](#automatic-parallel) - [Installation](#installation) @@ -53,7 +53,7 @@ The goal of MindSpore automatic parallel is to build a training method that comb Automatic Parallel -At present, MindSpore uses a fine-grained parallel strategy of splitting operators, that is, each operator in the figure is splited into a cluster to complete parallel operations. The splitting strategy during this period may be very complicated, but as a developer advocating Pythonic, you don't need to care about the underlying implementation, as long as the top-level API compute is efficient. +At present, MindSpore uses a fine-grained parallel strategy of splitting operators, that is, each operator in the figure is splitted into a cluster to complete parallel operations. The splitting strategy during this period may be very complicated, but as a developer advocating Pythonic, you don't need to care about the underlying implementation, as long as the top-level API compute is efficient. ## Installation @@ -70,9 +70,9 @@ MindSpore offers build options across multiple backends: | GPU CUDA 10.1 | Ubuntu-x86 | ✔️ | | CPU | Ubuntu-x86 | ✔️ | -For installation using pip, take `Ubuntu-x86` and `CPU` build version as an example: +For installation using `pip`, take `Ubuntu-x86` and `CPU` build version as an example: -1. Download whl from [MindSpore website](https://www.mindspore.cn/), and install the package. +1. Download whl from [MindSpore download page](https://www.mindspore.cn/versions/en), and install the package. ``` pip install https://ms-release.obs.cn-north-4.myhuaweicloud.com/0.1.0-alpha/MindSpore/cpu/ubuntu-x86/mindspore-0.1.0-cp37-cp37m-linux_x86_64.whl @@ -96,14 +96,60 @@ currently the containerized build options are supported as follows: | Hardware Platform | Docker Image URL | | :---------------- | :--------------- | | CPU | `mindspore/mindspore-cpu:0.1.0-alpha` | -| GPU CUDA 9.2 | `mindspore/mindspore-cuda9.2:0.1.0-alpha` | -| GPU CUDA 10.1 | `mindspore/mindspore-cuda10.1:0.1.0-alpha` | +| GPU | `mindspore/mindspore-gpu:0.1.0-alpha` | | Ascend |
    | -Take `CPU` for example, you can directly pull the image using the below command: -``` -docker pull mindspore/mindspore-cpu:0.1.0-alpha -``` +* CPU + + For `CPU` backend, you can directly pull and run the image using the below command: + ``` + docker pull mindspore/mindspore-cpu:0.1.0-alpha + docker run -it mindspore/mindspore-cpu:0.1.0-alpha python -c 'import mindspore' + ``` + +* GPU + + For `GPU` backend, please make sure the `nvidia-container-toolkit` has been installed in advance, here are some install guidelines for Ubuntu users: + ``` + DISTRIBUTION=$(. /etc/os-release; echo $ID$VERSION_ID) + curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | apt-key add - + curl -s -L https://nvidia.github.io/nvidia-docker/$DISTRIBUTION/nvidia-docker.list | tee /etc/apt/sources.list.d/nvidia-docker.list + + sudo apt-get update && sudo apt-get install -y nvidia-container-toolkit nvidia-docker2 + sudo systemctl restart docker + ``` + + Then you can pull and run the image using the below command: + ``` + docker pull mindspore/mindspore-gpu:0.1.0-alpha + docker run -it --runtime=nvidia --privileged=true mindspore/mindspore-gpu:0.1.0-alpha /bin/bash + ``` + + To test if the docker image works, please execute the python code below and check the output: + ```python + import numpy as np + from mindspore import Tensor + from mindspore.ops import functional as F + import mindspore.context as context + + context.set_context(device_target="GPU") + x = Tensor(np.ones([1,3,3,4]).astype(np.float32)) + y = Tensor(np.ones([1,3,3,4]).astype(np.float32)) + print(F.tensor_add(x, y)) + ``` + ``` + [[[ 2. 2. 2. 2.], + [ 2. 2. 2. 2.], + [ 2. 2. 2. 2.]], + + [[ 2. 2. 2. 2.], + [ 2. 2. 2. 2.], + [ 2. 2. 2. 2.]], + + [[ 2. 2. 2. 2.], + [ 2. 2. 2. 2.], + [ 2. 2. 2. 2.]]] + ``` If anyone wants to learn more about the build process of MindSpore docker images, please check out `docker` folder for the details. From 6a79fc1735f5a5a8170dc15ab487320c54a28b3d Mon Sep 17 00:00:00 2001 From: panfengfeng Date: Mon, 13 Apr 2020 10:38:54 +0800 Subject: [PATCH 178/367] skip mindrecord ut test case --- .../cpp/mindrecord/ut_shard_operator_test.cc | 50 +++++++++---------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/tests/ut/cpp/mindrecord/ut_shard_operator_test.cc b/tests/ut/cpp/mindrecord/ut_shard_operator_test.cc index 46ea1712b2..143931658a 100644 --- a/tests/ut/cpp/mindrecord/ut_shard_operator_test.cc +++ b/tests/ut/cpp/mindrecord/ut_shard_operator_test.cc @@ -65,31 +65,31 @@ TEST_F(TestShardOperator, TestShardSampleBasic) { ASSERT_TRUE(i <= kSampleCount); } -TEST_F(TestShardOperator, TestShardSampleWrongNumber) { - MS_LOG(INFO) << common::SafeCStr(FormatInfo("Test read imageNet")); - - std::string file_name = "./imagenet.shard01"; - auto column_list = std::vector{"file_name"}; - - const int kNum = 5; - const int kDen = 0; - std::vector> ops; - ops.push_back(std::make_shared(kNum, kDen)); - - ShardReader dataset; - dataset.Open(file_name, 4, column_list, ops); - dataset.Launch(); - - int i = 0; - while (true) { - auto x = dataset.GetNext(); - if (x.empty()) break; - MS_LOG(INFO) << "index: " << i << ", filename: " << common::SafeCStr((std::get<1>(x[0]))["file_name"]); - i++; - } - dataset.Finish(); - ASSERT_TRUE(i <= 5); -} +// TEST_F(TestShardOperator, TestShardSampleWrongNumber) { +// MS_LOG(INFO) << common::SafeCStr(FormatInfo("Test read imageNet")); +// +// std::string file_name = "./imagenet.shard01"; +// auto column_list = std::vector{"file_name"}; +// +// const int kNum = 5; +// const int kDen = 0; +// std::vector> ops; +// ops.push_back(std::make_shared(kNum, kDen)); +// +// ShardReader dataset; +// dataset.Open(file_name, 4, column_list, ops); +// dataset.Launch(); +// +// int i = 0; +// while (true) { +// auto x = dataset.GetNext(); +// if (x.empty()) break; +// MS_LOG(INFO) << "index: " << i << ", filename: " << common::SafeCStr((std::get<1>(x[0]))["file_name"]); +// i++; +// } +// dataset.Finish(); +// ASSERT_TRUE(i <= 5); +// } TEST_F(TestShardOperator, TestShardSampleRatio) { MS_LOG(INFO) << common::SafeCStr(FormatInfo("Test read imageNet")); From 37ba21c271d25ec95f903b26953e61d1ba9e6ac7 Mon Sep 17 00:00:00 2001 From: dinghao Date: Sun, 12 Apr 2020 09:55:03 +0800 Subject: [PATCH 179/367] fix ref pass visit graph bug --- .../pre_activate/ascend/ascend_backend_optimization.cc | 2 ++ .../ascend/format_type/deal_ref_trans_and_cast.cc | 8 ++++++++ .../ascend/format_type/deal_ref_trans_and_cast.h | 1 + mindspore/ccsrc/pre_activate/common/node_pass.cc | 1 + mindspore/ops/_op_impl/tbe/trans_data.py | 8 ++++++-- 5 files changed, 18 insertions(+), 2 deletions(-) diff --git a/mindspore/ccsrc/pre_activate/ascend/ascend_backend_optimization.cc b/mindspore/ccsrc/pre_activate/ascend/ascend_backend_optimization.cc index 432d88e7a4..023838c3a5 100644 --- a/mindspore/ccsrc/pre_activate/ascend/ascend_backend_optimization.cc +++ b/mindspore/ccsrc/pre_activate/ascend/ascend_backend_optimization.cc @@ -90,6 +90,7 @@ void RunOpAscendMixPrecision(const std::shared_ptr &kernel mixed_precision_pm->AddPass(std::make_shared()); mixed_precision_pm->AddPass(std::make_shared()); mixed_precision_pm->AddPass(std::make_shared()); + mixed_precision_pm->AddPass(std::make_shared()); mixed_precision_pm->AddPass(std::make_shared()); mixed_precision_pm->AddPass(std::make_shared()); mixed_precision_pm->AddPass(std::make_shared()); @@ -126,6 +127,7 @@ void AscendMixPrecision(const std::shared_ptr &kernel_grap mixed_precision_pm->AddPass(std::make_shared()); mixed_precision_pm->AddPass(std::make_shared()); mixed_precision_pm->AddPass(std::make_shared()); + mixed_precision_pm->AddPass(std::make_shared()); mixed_precision_pm->AddPass(std::make_shared()); mixed_precision_pm->AddPass(std::make_shared()); mixed_precision_pm->AddPass(std::make_shared()); diff --git a/mindspore/ccsrc/pre_activate/ascend/format_type/deal_ref_trans_and_cast.cc b/mindspore/ccsrc/pre_activate/ascend/format_type/deal_ref_trans_and_cast.cc index fd20611415..81e5c4b486 100644 --- a/mindspore/ccsrc/pre_activate/ascend/format_type/deal_ref_trans_and_cast.cc +++ b/mindspore/ccsrc/pre_activate/ascend/format_type/deal_ref_trans_and_cast.cc @@ -22,6 +22,7 @@ #include "kernel/oplib/oplib.h" #include "session/anf_runtime_algorithm.h" #include "session/kernel_graph.h" +#include "pre_activate/common/helper.h" namespace mindspore { namespace opt { @@ -168,11 +169,18 @@ AnfNodePtr DealRefSigleOutput(const FuncGraphPtr &func_graph, const CNodePtr &cn } } // namespace +const BaseRef DealRefTransAndCast::DefinePattern() const { + VarPtr V = std::make_shared(UnVisited); + VarPtr Xs = std::make_shared(); + return VectorRef({V, Xs}); +} + const AnfNodePtr DealRefTransAndCast::Process(const FuncGraphPtr &graph, const AnfNodePtr &node, const EquivPtr &) const { if (node == nullptr || !node->isa()) { return nullptr; } + AnfAlgo::SetNodeAttr(kAttrVisited, MakeValue(true), node); auto cnode = node->cast(); MS_EXCEPTION_IF_NULL(cnode); if (!AnfAlgo::IsRealCNodeKernel(cnode)) { diff --git a/mindspore/ccsrc/pre_activate/ascend/format_type/deal_ref_trans_and_cast.h b/mindspore/ccsrc/pre_activate/ascend/format_type/deal_ref_trans_and_cast.h index 9ed55d8b29..1b54a7b111 100644 --- a/mindspore/ccsrc/pre_activate/ascend/format_type/deal_ref_trans_and_cast.h +++ b/mindspore/ccsrc/pre_activate/ascend/format_type/deal_ref_trans_and_cast.h @@ -28,6 +28,7 @@ class DealRefTransAndCast : public PatternProcessPass { public: explicit DealRefTransAndCast(bool multigraph = true) : PatternProcessPass("deal_ref_trans_and_cast", multigraph) {} ~DealRefTransAndCast() override = default; + const BaseRef DefinePattern() const override; const AnfNodePtr Process(const FuncGraphPtr &, const AnfNodePtr &, const EquivPtr &) const override; }; } // namespace opt diff --git a/mindspore/ccsrc/pre_activate/common/node_pass.cc b/mindspore/ccsrc/pre_activate/common/node_pass.cc index cd213f8263..a6e93d2f07 100644 --- a/mindspore/ccsrc/pre_activate/common/node_pass.cc +++ b/mindspore/ccsrc/pre_activate/common/node_pass.cc @@ -45,6 +45,7 @@ bool NodePass::Run(const FuncGraphPtr &func_graph) { bool change = (new_node != nullptr); if (new_node != nullptr && new_node != node) { (void)manager->Replace(node, new_node); + (void)seen_node.erase(node); } else if (new_node == nullptr) { new_node = node; } diff --git a/mindspore/ops/_op_impl/tbe/trans_data.py b/mindspore/ops/_op_impl/tbe/trans_data.py index 1b7c8fa25d..c6628c7638 100644 --- a/mindspore/ops/_op_impl/tbe/trans_data.py +++ b/mindspore/ops/_op_impl/tbe/trans_data.py @@ -46,11 +46,13 @@ from mindspore.ops.op_info_register import op_info_register "dtype": [ "bool", "float","float","float","float","float","float","float","float","float","float", - "float16","float16","float16","float16","float16","float16","float16","float16","float16","float16" + "float16","float16","float16","float16","float16","float16","float16","float16","float16","float16", + "uint16","uint16","uint16","uint16","uint16","uint16","uint16","uint16","uint16","uint16" ], "format": [ "DefaultFormat", "DefaultFormat","DefaultFormat","DefaultFormat","FracZ","FRACTAL_NZ","NC1HWC0","HWCN","HWCN","C1HWNCoC0","FracZ", + "DefaultFormat","DefaultFormat","DefaultFormat","FracZ","FRACTAL_NZ","NC1HWC0","HWCN","HWCN","C1HWNCoC0","FracZ", "DefaultFormat","DefaultFormat","DefaultFormat","FracZ","FRACTAL_NZ","NC1HWC0","HWCN","HWCN","C1HWNCoC0","FracZ" ], "name": "src", @@ -65,11 +67,13 @@ from mindspore.ops.op_info_register import op_info_register "dtype": [ "bool", "float","float","float","float","float","float","float","float","float","float", - "float16","float16","float16","float16","float16","float16","float16","float16","float16","float16" + "float16","float16","float16","float16","float16","float16","float16","float16","float16","float16", + "uint16","uint16","uint16","uint16","uint16","uint16","uint16","uint16","uint16","uint16" ], "format": [ "NC1HWC0", "NC1HWC0","FRACTAL_NZ","FracZ","DefaultFormat","DefaultFormat","DefaultFormat","FracZ","C1HWNCoC0","HWCN","HWCN", + "NC1HWC0","FRACTAL_NZ","FracZ","DefaultFormat","DefaultFormat","DefaultFormat","FracZ","C1HWNCoC0","HWCN","HWCN", "NC1HWC0","FRACTAL_NZ","FracZ","DefaultFormat","DefaultFormat","DefaultFormat","FracZ","C1HWNCoC0","HWCN","HWCN" ], "name": "dst", From fa03a66433201edf4ab26ec482dcba758346f72f Mon Sep 17 00:00:00 2001 From: zhaoting Date: Mon, 13 Apr 2020 10:43:12 +0800 Subject: [PATCH 180/367] change adam output numbers adapter to tbe --- mindspore/ops/_op_impl/tbe/apply_adam.py | 46 ++++++++++++++++++++---- mindspore/ops/operations/nn_ops.py | 4 +-- 2 files changed, 41 insertions(+), 9 deletions(-) diff --git a/mindspore/ops/_op_impl/tbe/apply_adam.py b/mindspore/ops/_op_impl/tbe/apply_adam.py index ae6b7d782e..1d5c383515 100644 --- a/mindspore/ops/_op_impl/tbe/apply_adam.py +++ b/mindspore/ops/_op_impl/tbe/apply_adam.py @@ -88,7 +88,8 @@ from mindspore.ops.op_info_register import op_info_register "float16","float16","float16","float16","float","float","float", "float" ], "format": [ - "NC1HWC0", "C1HWNCoC0", "DefaultFormat", "FracZ", "NC1HWC0", "C1HWNCoC0", "DefaultFormat", "FracZ" + "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", + "DefaultFormat", "DefaultFormat" ], "name": "beta1_power", "need_compile": false, @@ -101,7 +102,8 @@ from mindspore.ops.op_info_register import op_info_register "float16","float16","float16","float16","float","float","float","float" ], "format": [ - "NC1HWC0", "C1HWNCoC0", "DefaultFormat", "FracZ", "NC1HWC0", "C1HWNCoC0", "DefaultFormat", "FracZ" + "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", + "DefaultFormat", "DefaultFormat" ], "name": "beta2_power", "need_compile": false, @@ -114,7 +116,8 @@ from mindspore.ops.op_info_register import op_info_register "float16","float16","float16","float16","float","float","float", "float" ], "format": [ - "NC1HWC0", "C1HWNCoC0", "DefaultFormat", "FracZ", "NC1HWC0", "C1HWNCoC0", "DefaultFormat", "FracZ" + "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", + "DefaultFormat", "DefaultFormat" ], "name": "lr", "need_compile": false, @@ -127,7 +130,8 @@ from mindspore.ops.op_info_register import op_info_register "float16","float16","float16","float16","float","float","float", "float" ], "format": [ - "NC1HWC0", "C1HWNCoC0", "DefaultFormat", "FracZ", "NC1HWC0", "C1HWNCoC0", "DefaultFormat", "FracZ" + "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", + "DefaultFormat", "DefaultFormat" ], "name": "beta1", "need_compile": false, @@ -140,7 +144,8 @@ from mindspore.ops.op_info_register import op_info_register "float16","float16","float16","float16","float","float","float", "float" ], "format": [ - "NC1HWC0", "C1HWNCoC0", "DefaultFormat", "FracZ", "NC1HWC0", "C1HWNCoC0", "DefaultFormat", "FracZ" + "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", + "DefaultFormat", "DefaultFormat" ], "name": "beta2", "need_compile": false, @@ -153,7 +158,8 @@ from mindspore.ops.op_info_register import op_info_register "float16","float16","float16","float16","float","float","float", "float" ], "format": [ - "NC1HWC0", "C1HWNCoC0", "DefaultFormat", "FracZ", "NC1HWC0", "C1HWNCoC0", "DefaultFormat", "FracZ" + "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", + "DefaultFormat", "DefaultFormat" ], "name": "epsilon", "need_compile": false, @@ -161,7 +167,7 @@ from mindspore.ops.op_info_register import op_info_register "shape": "all" }, { - "index": 8, + "index": 9, "dtype": [ "float16","float16","float16","float16","float","float","float", "float" ], @@ -187,6 +193,32 @@ from mindspore.ops.op_info_register import op_info_register "need_compile": false, "param_type": "required", "shape": "all" + }, + { + "index": 1, + "dtype": [ + "float16","float16","float16","float16","float","float","float","float" + ], + "format": [ + "NC1HWC0", "C1HWNCoC0", "DefaultFormat", "FracZ", "NC1HWC0", "C1HWNCoC0", "DefaultFormat", "FracZ" + ], + "name": "m", + "need_compile": false, + "param_type": "required", + "shape": "all" + }, + { + "index": 2, + "dtype": [ + "float16","float16","float16","float16","float","float","float","float" + ], + "format": [ + "NC1HWC0", "C1HWNCoC0", "DefaultFormat", "FracZ", "NC1HWC0", "C1HWNCoC0", "DefaultFormat", "FracZ" + ], + "name": "v", + "need_compile": false, + "param_type": "required", + "shape": "all" } ] }""") diff --git a/mindspore/ops/operations/nn_ops.py b/mindspore/ops/operations/nn_ops.py index 83f76455e0..538d7f3826 100644 --- a/mindspore/ops/operations/nn_ops.py +++ b/mindspore/ops/operations/nn_ops.py @@ -2149,7 +2149,7 @@ class Adam(PrimitiveWithInfer): validator.check_param_equal("var_shape", var_shape, "m_shape", m_shape) validator.check_param_equal("var_shape", var_shape, "v_shape", v_shape) validator.check_param_equal("var_shape", var_shape, "grad_shape", grad_shape) - return var_shape + return var_shape, m_shape, v_shape def infer_dtype(self, var_dtype, m_dtype, v_dtype, beta1_power_dtype, beta2_power_dtype, lr_dtype, beta1_dtype, beta2_dtype, epsilon_dtype, grad_dtype): @@ -2159,7 +2159,7 @@ class Adam(PrimitiveWithInfer): args = {"beta1_power_dtype": beta1_power_dtype, "beta2_power_dtype": beta2_power_dtype, 'lr_dtype': lr_dtype, "beta1_dtype": beta1_dtype, "beta2_dtype": beta2_dtype, "epsilon_dtype": epsilon_dtype} validator.check_type_same(args, [mstype.float16, mstype.float32]) - return var_dtype + return var_dtype, m_dtype, v_dtype class BinaryCrossEntropy(PrimitiveWithInfer): From 08dd50d6712b286f1e227a713539e509546250eb Mon Sep 17 00:00:00 2001 From: leonwanghui Date: Mon, 13 Apr 2020 11:28:28 +0800 Subject: [PATCH 181/367] Add mindspore devel docker image for CPU and GPU backend Signed-off-by: leonwanghui --- README.md | 12 ++-- docker/mindspore-cpu/devel/Dockerfile | 80 ++++++++++++++++++++++ docker/mindspore-gpu/devel/Dockerfile | 95 +++++++++++++++++++++++++++ 3 files changed, 182 insertions(+), 5 deletions(-) create mode 100644 docker/mindspore-cpu/devel/Dockerfile create mode 100644 docker/mindspore-gpu/devel/Dockerfile diff --git a/README.md b/README.md index 8b6b363de3..169f1985e5 100644 --- a/README.md +++ b/README.md @@ -93,11 +93,13 @@ For installation using `pip`, take `Ubuntu-x86` and `CPU` build version as an ex MindSpore docker image is hosted on [Docker Hub](https://hub.docker.com/r/mindspore), currently the containerized build options are supported as follows: -| Hardware Platform | Docker Image URL | -| :---------------- | :--------------- | -| CPU | `mindspore/mindspore-cpu:0.1.0-alpha` | -| GPU | `mindspore/mindspore-gpu:0.1.0-alpha` | -| Ascend |
    | +| Hardware Platform | Docker Image Repository | Tag | Description | +| :---------------- | :---------------------- | :-- | :---------- | +| CPU | `mindspore/mindspore-cpu` | `0.1.0-alpha` | Production environment with pre-installed MindSpore `0.1.0-alpha` CPU release. | +| | | `devel` | Development environment provided to build MindSpore (with `CPU` backend) from the source, refer to https://www.mindspore.cn/install/en for installation details. | +| GPU | `mindspore/mindspore-gpu` | `0.1.0-alpha` | Production environment with pre-installed MindSpore `0.1.0-alpha` GPU release. | +| | | `devel` | Development environment provided to build MindSpore (with `GPU CUDA10.1` backend) from the source, refer to https://www.mindspore.cn/install/en for installation details. | +| Ascend |
    |
    | Coming soon. | * CPU diff --git a/docker/mindspore-cpu/devel/Dockerfile b/docker/mindspore-cpu/devel/Dockerfile new file mode 100644 index 0000000000..a48326af9b --- /dev/null +++ b/docker/mindspore-cpu/devel/Dockerfile @@ -0,0 +1,80 @@ +FROM ubuntu:18.04 + +MAINTAINER leonwanghui + +# Set env +ENV PYTHON_ROOT_PATH /usr/local/python-3.7.5 +ENV CMAKE_ROOT_PATH /usr/local/cmake-3.14.1 +ENV PATH ${PYTHON_ROOT_PATH}/bin:${CMAKE_ROOT_PATH}/bin:/usr/local/bin:$PATH + +# Install base tools +RUN apt update \ + && DEBIAN_FRONTEND=noninteractive apt install -y \ + vim \ + wget \ + curl \ + xz-utils \ + net-tools \ + openssh-client \ + git \ + ntpdate \ + tzdata \ + tcl \ + sudo \ + bash-completion + +# Install compile tools +RUN DEBIAN_FRONTEND=noninteractive apt install -y \ + gcc \ + g++ \ + zlibc \ + make \ + libgmp-dev \ + patch \ + autoconf \ + libtool \ + automake \ + flex + +# Set bash +RUN echo "dash dash/sh boolean false" | debconf-set-selections +RUN DEBIAN_FRONTEND=noninteractive dpkg-reconfigure dash + +# Install python (v3.7.5) +RUN apt install -y libffi-dev libssl-dev zlib1g-dev libbz2-dev libncurses5-dev \ + libgdbm-dev libgdbm-compat-dev liblzma-dev libreadline-dev libsqlite3-dev \ + && cd /tmp \ + && wget https://github.com/python/cpython/archive/v3.7.5.tar.gz \ + && tar -xvf v3.7.5.tar.gz \ + && cd /tmp/cpython-3.7.5 \ + && mkdir -p ${PYTHON_ROOT_PATH} \ + && ./configure --prefix=${PYTHON_ROOT_PATH} \ + && make -j4 \ + && make install -j4 \ + && rm -f /usr/local/bin/python \ + && rm -f /usr/local/bin/pip \ + && ln -s ${PYTHON_ROOT_PATH}/bin/python3.7 /usr/local/bin/python \ + && ln -s ${PYTHON_ROOT_PATH}/bin/pip3.7 /usr/local/bin/pip \ + && rm -rf /tmp/cpython-3.7.5 \ + && rm -f /tmp/v3.7.5.tar.gz + +# Set pip source +RUN mkdir -pv /root/.pip \ + && echo "[global]" > /root/.pip/pip.conf \ + && echo "trusted-host=mirrors.aliyun.com" >> /root/.pip/pip.conf \ + && echo "index-url=http://mirrors.aliyun.com/pypi/simple/" >> /root/.pip/pip.conf + +# Install pip package +RUN pip install --no-cache-dir \ + numpy \ + wheel \ + nose \ + pytest \ + pytest-xdist + +# Install cmake (v3.14.1) +RUN cd /tmp \ + && wget https://github.com/Kitware/CMake/releases/download/v3.14.1/cmake-3.14.1-Linux-x86_64.sh \ + && mkdir -p ${CMAKE_ROOT_PATH} \ + && bash ./cmake-3.14.1-Linux-x86_64.sh --prefix=${CMAKE_ROOT_PATH} --exclude-subdir --skip-license \ + && rm -f /tmp/cmake-3.14.1-Linux-x86_64.sh diff --git a/docker/mindspore-gpu/devel/Dockerfile b/docker/mindspore-gpu/devel/Dockerfile new file mode 100644 index 0000000000..0b07605d9f --- /dev/null +++ b/docker/mindspore-gpu/devel/Dockerfile @@ -0,0 +1,95 @@ +FROM nvidia/cuda:10.1-cudnn7-devel-ubuntu18.04 + +MAINTAINER leonwanghui + +# Set env +ENV PYTHON_ROOT_PATH /usr/local/python-3.7.5 +ENV CMAKE_ROOT_PATH /usr/local/cmake-3.14.1 +ENV OMPI_ROOT_PATH /usr/local/openmpi-3.1.5 +ENV PATH ${PYTHON_ROOT_PATH}/bin:${CMAKE_ROOT_PATH}/bin:${OMPI_ROOT_PATH}/bin:/usr/local/bin:$PATH + +# Install base tools +RUN apt update \ + && DEBIAN_FRONTEND=noninteractive apt install -y \ + vim \ + wget \ + curl \ + xz-utils \ + net-tools \ + openssh-client \ + git \ + ntpdate \ + tzdata \ + tcl \ + sudo \ + bash-completion + +# Install compile tools +RUN DEBIAN_FRONTEND=noninteractive apt install -y \ + gcc \ + g++ \ + zlibc \ + make \ + libgmp-dev \ + patch \ + autoconf \ + libtool \ + automake \ + flex \ + libnccl2=2.4.8-1+cuda10.1 \ + libnccl-dev=2.4.8-1+cuda10.1 + +# Set bash +RUN echo "dash dash/sh boolean false" | debconf-set-selections +RUN DEBIAN_FRONTEND=noninteractive dpkg-reconfigure dash + +# Install python (v3.7.5) +RUN apt install -y libffi-dev libssl-dev zlib1g-dev libbz2-dev libncurses5-dev \ + libgdbm-dev libgdbm-compat-dev liblzma-dev libreadline-dev libsqlite3-dev \ + && cd /tmp \ + && wget https://github.com/python/cpython/archive/v3.7.5.tar.gz \ + && tar -xvf v3.7.5.tar.gz \ + && cd /tmp/cpython-3.7.5 \ + && mkdir -p ${PYTHON_ROOT_PATH} \ + && ./configure --prefix=${PYTHON_ROOT_PATH} \ + && make -j4 \ + && make install -j4 \ + && rm -f /usr/local/bin/python \ + && rm -f /usr/local/bin/pip \ + && ln -s ${PYTHON_ROOT_PATH}/bin/python3.7 /usr/local/bin/python \ + && ln -s ${PYTHON_ROOT_PATH}/bin/pip3.7 /usr/local/bin/pip \ + && rm -rf /tmp/cpython-3.7.5 \ + && rm -f /tmp/v3.7.5.tar.gz + +# Set pip source +RUN mkdir -pv /root/.pip \ + && echo "[global]" > /root/.pip/pip.conf \ + && echo "trusted-host=mirrors.aliyun.com" >> /root/.pip/pip.conf \ + && echo "index-url=http://mirrors.aliyun.com/pypi/simple/" >> /root/.pip/pip.conf + +# Install pip package +RUN pip install --no-cache-dir \ + numpy \ + wheel \ + nose \ + pytest \ + pytest-xdist + +# Install cmake (v3.14.1) +RUN cd /tmp \ + && wget https://github.com/Kitware/CMake/releases/download/v3.14.1/cmake-3.14.1-Linux-x86_64.sh \ + && mkdir -p ${CMAKE_ROOT_PATH} \ + && bash ./cmake-3.14.1-Linux-x86_64.sh --prefix=${CMAKE_ROOT_PATH} --exclude-subdir --skip-license \ + && rm -f /tmp/cmake-3.14.1-Linux-x86_64.sh + +# Install openmpi (v3.1.5) +RUN cd /tmp \ + && wget https://download.open-mpi.org/release/open-mpi/v3.1/openmpi-3.1.5.tar.gz \ + && tar -xvf openmpi-3.1.5.tar.gz \ + && cd /tmp/openmpi-3.1.5 \ + && mkdir -p ${OMPI_ROOT_PATH} \ + && ./configure --prefix=${OMPI_ROOT_PATH} \ + && make -j4 \ + && make install -j4 \ + && rm -rf /tmp/openmpi-3.1.5 \ + && rm -f /tmp/openmpi-3.1.5.tar.gz From d64f662c76d0ddc613506fece1e354cbd4bfa2ac Mon Sep 17 00:00:00 2001 From: chenzomi Date: Fri, 10 Apr 2020 09:37:57 +0800 Subject: [PATCH 182/367] quantization aware training frontend operators define. --- mindspore/nn/layer/__init__.py | 7 +- mindspore/nn/layer/_quant.py | 703 ++++++++++++++++++ mindspore/nn/layer/activation.py | 63 +- mindspore/ops/_grad/grad_nn_ops.py | 22 + mindspore/ops/_grad/grad_quant_ops.py | 82 ++ mindspore/ops/operations/__init__.py | 8 +- mindspore/ops/operations/_grad_ops.py | 32 + mindspore/ops/operations/_quant_ops.py | 525 +++++++++++++ mindspore/ops/operations/nn_ops.py | 71 +- tests/ut/python/nn/test_dense.py | 5 - .../pynative_mode/nn/test_activation.py | 5 - .../ut/python/pynative_mode/nn/test_dense.py | 5 - 12 files changed, 1505 insertions(+), 23 deletions(-) create mode 100644 mindspore/nn/layer/_quant.py create mode 100644 mindspore/ops/_grad/grad_quant_ops.py create mode 100644 mindspore/ops/operations/_quant_ops.py diff --git a/mindspore/nn/layer/__init__.py b/mindspore/nn/layer/__init__.py index dae18fe663..aed6cb7776 100644 --- a/mindspore/nn/layer/__init__.py +++ b/mindspore/nn/layer/__init__.py @@ -17,7 +17,7 @@ Layer. The high-level components(Cells) used to construct the neural network. """ -from .activation import Softmax, LogSoftmax, ReLU, ReLU6, Tanh, GELU, ELU, Sigmoid, PReLU, get_activation, LeakyReLU +from .activation import Softmax, LogSoftmax, ReLU, ReLU6, Tanh, GELU, ELU, Sigmoid, PReLU, get_activation, LeakyReLU, HSigmoid, HSwish from .normalization import BatchNorm1d, BatchNorm2d, LayerNorm from .container import SequentialCell, CellList from .conv import Conv2d, Conv2dTranspose @@ -26,8 +26,9 @@ from .basic import Dropout, Flatten, Dense, ClipByNorm, Norm, OneHot, ImageGradi from .embedding import Embedding from .pooling import AvgPool2d, MaxPool2d -__all__ = ['Softmax', 'LogSoftmax', 'ReLU', 'ReLU6', 'Tanh', 'GELU', 'Sigmoid', 'PReLU', 'get_activation', 'LeakyReLU', - 'BatchNorm1d', 'BatchNorm2d', 'LayerNorm', 'ELU', +__all__ = ['Softmax', 'LogSoftmax', 'ReLU', 'ReLU6', 'Tanh', 'GELU', 'Sigmoid', + 'PReLU', 'get_activation', 'LeakyReLU', 'HSigmoid', 'HSwish', 'ELU', + 'BatchNorm1d', 'BatchNorm2d', 'LayerNorm', 'SequentialCell', 'CellList', 'Conv2d', 'Conv2dTranspose', 'LSTM', diff --git a/mindspore/nn/layer/_quant.py b/mindspore/nn/layer/_quant.py new file mode 100644 index 0000000000..f27af8b269 --- /dev/null +++ b/mindspore/nn/layer/_quant.py @@ -0,0 +1,703 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Aware quantization.""" + +import numpy as np +import mindspore.nn as nn +import mindspore.common.dtype as mstype +from mindspore.ops import operations as P +from mindspore.ops import functional as F +from mindspore.common.parameter import Parameter +from mindspore.common.initializer import initializer +from mindspore.common.tensor import Tensor +from mindspore._checkparam import check_int_positive, check_bool, twice +from mindspore.nn.cell import Cell +from mindspore.nn.layer.conv import _Conv +from mindspore.nn.layer.activation import get_activation + +__all__ = [ + 'FakeQuantWithMinMax', + 'Conv2dBatchNormQuant', + 'Conv2dQuant', + 'DenseQuant', + 'ReLUQuant', + 'ReLU6Quant', + 'HSwishQuant', + 'HSigmoidQuant', + 'TensorAddQuant', +] + + +class FakeQuantWithMinMax(Cell): + r""" + Aware Quantization training op. This OP provide Fake quantization observer function on data with min and max. + + Args: + min_init (int, list): The dimension of channel or 1(layer). Default: -6. + max_init (int, list): The dimension of channel or 1(layer). Default: 6. + num_bits (int): Quantization number bit, support 4 and 8bit. Default: 8. + ema (bool): Exponential Moving Average algorithm update min and max. Default: False. + ema_decay (float): Exponential Moving Average algorithm parameter. Default: 0.9999. + per_channel (bool): Quantization by layer or channel. Default: False. + channel_size (int): declarate the min and max channel size, Default: 1. + quant_delay (int): Quantization delay parameters according by global step. Default: 0. + symmetric (bool): Quantization algorithm use symmetric or not. Default: False. + narrow_range (bool): Quantization algorithm use narrow range or not. Default: False. + + Inputs: + - **x** (Tensor) - The input of FakeQuantWithMinMax. + + Outputs: + Tensor, with the same type and shape as the `x`. + + """ + + def __init__(self, + min_init=-6, + max_init=6, + num_bits=8, + ema=False, + ema_decay=0.999, + per_channel=False, + channel_size=1, + quant_delay=0, + symmetric=False, + narrow_range=False): + super(FakeQuantWithMinMax, self).__init__() + + self.min_init = min_init + self.num_bits = num_bits + self.max_init = max_init + self.ema = ema + self.ema_decay = ema_decay + self.per_channel = per_channel + self.channel_size = channel_size + self.quant_delay = quant_delay + self.symmetric = symmetric + self.narrow_range = narrow_range + + if per_channel: + min_array = np.array([self.min_init for i in range( + 0, self.channel_size)]).astype(np.float32) + max_array = np.array([self.max_init for i in range( + 0, self.channel_size)]).astype(np.float32) + self.fake_quant_train = P.FakeQuantWithMinMaxPerChannel(num_bits=self.num_bits, + ema=self.ema, + ema_decay=self.ema_decay, + quant_delay=self.quant_delay, + symmetric=self.symmetric, + narrow_range=self.narrow_range, + training=True) + self.fake_quant_infer = P.FakeQuantWithMinMaxPerChannel(num_bits=self.num_bits, + ema=self.ema, + ema_decay=ema_decay, + quant_delay=quant_delay, + symmetric=self.symmetric, + narrow_range=self.narrow_range, + training=False) + else: + min_array = np.array([min_init]).reshape(1).astype(np.float32) + max_array = np.array([max_init]).reshape(1).astype(np.float32) + self.fake_quant_train = P.FakeQuantWithMinMax(num_bits=self.num_bits, + ema=self.ema, + ema_decay=self.ema_decay, + quant_delay=self.quant_delay, + symmetric=self.symmetric, + narrow_range=self.narrow_range, + training=True) + self.fake_quant_infer = P.FakeQuantWithMinMax(num_bits=self.num_bits, + ema=self.ema, + ema_decay=ema_decay, + quant_delay=quant_delay, + symmetric=self.symmetric, + narrow_range=self.narrow_range, + training=False) + + self.min = Parameter( + Tensor(min_array), name='quant_min', requires_grad=False) + self.max = Parameter( + Tensor(max_array), name='quant_max', requires_grad=False) + + def extend_repr(self): + s = 'min_init={}, max_init={}, ema={}, ema_decay={}, per_channel={}, channel_size={}, quant_delay={}'.format( + self.min_init, self.max_init, self.ema, self.ema_decay, self.per_channel, self.channel_size, + self.quant_delay) + return s + + def construct(self, x): + if self.training: + out = self.fake_quant_train(x, self.min, self.max) + else: + out = self.fake_quant_infer(x, self.min, self.max) + return out + + +class Conv2dBatchNormQuant(Cell): + r""" + 2D convolution with BatchNormal op folded layer. + + For a more Detailed overview of Conv2d op. + + Args: + in_channels (int): The number of input channel :math:`C_{in}`. + out_channels (int): The number of output channel :math:`C_{out}`. + kernel_size (Union[int, tuple]): Specifies the height and width of the 2D convolution window. + stride (int): Specifies stride for all spatial dimensions with the same value. + pad_mode: (str): Specifies padding mode. The optional values are "same", "valid", "pad". Default: "same". + padding: (int): Implicit paddings on both sides of the input. Default: 0. + eps (int): Parameters for BatchNormal. Default: 1e-5. + momentum (int): Parameters for BatchNormal op. Default: 0.9. + weight_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the + convolution kernel. Default: 'None'. + beta_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the + beta vector. Default: 'None'. + gamma_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the + gamma vector. Default: 'None'. + mean_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the + mean vector. Default: 'None'. + var_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the + variance vector. Default: 'None'. + quant_delay (int): Quantization delay parameters according by global step. Default: 0. + freeze_bn (int): Quantization freeze BatchNormal op according by global step. Default: 100000. + fake (bool): Conv2dBatchNormQuant Cell add FakeQuantWithMinMax op or not. Default: True. + num_bits (int): Quantization number bit, support 4 and 8bit. Default: 8. + per_channel (bool): FakeQuantWithMinMax Parameters. Default: False. + symmetric (bool): Quantization algorithm use symmetric or not. Default: False. + narrow_range (bool): Quantization algorithm use narrow range or not. Default: False. + + Inputs: + - **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`. + + Outputs: + Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`. + """ + + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride, + pad_mode, + padding=0, + eps=1e-5, + momentum=0.9, + weight_init=None, + beta_init=None, + gamma_init=None, + mean_init=None, + var_init=None, + group=1, + quant_delay=0, + freeze_bn=100000, + fake=True, + num_bits=8, + per_channel=False, + symmetric=False, + narrow_range=False): + super(Conv2dBatchNormQuant, self).__init__() + self.stride = stride + self.conv = P.Conv2D(out_channel=out_channels, + kernel_size=kernel_size, + mode=1, + pad_mode=pad_mode, + pad=padding, + stride=stride, + dilation=1, + group=group) + self.fake = fake + self.freeze_bn = freeze_bn + if isinstance(kernel_size, int): + kernel_size = (kernel_size, kernel_size) + + if weight_init is None: + weight_init = initializer( + 'normal', [out_channels, in_channels // group, *kernel_size]) + self.weight = Parameter(weight_init, name='weight') + if gamma_init is None: + gamma_init = initializer('ones', [out_channels]) + self.gamma = Parameter(gamma_init, name='gamma') + if beta_init is None: + beta_init = initializer('zeros', [out_channels]) + self.beta = Parameter(beta_init, name='beta') + if mean_init is None: + mean_init = initializer('zeros', [out_channels]) + self.moving_mean = Parameter( + mean_init, name='moving_mean', requires_grad=False) + if var_init is None: + var_init = initializer('ones', [out_channels]) + self.moving_variance = Parameter( + var_init, name='moving_variance', requires_grad=False) + + self.step = Parameter(initializer( + 'normal', [1], dtype=mstype.int32), name='step', requires_grad=False) + + self.fake_quant_weight = nn.FakeQuantWithMinMax(min_init=-6, + max_init=6, + ema=False, + num_bits=num_bits, + quant_delay=quant_delay, + per_channel=per_channel, + channel_size=out_channels, + symmetric=symmetric, + narrow_range=narrow_range) + + self.batchnorm_fold_train = P.BatchNormFold(epsilon=eps, + momentum=momentum, + is_training=True, + freeze_bn=freeze_bn) + self.batchnorm_fold_infer = P.BatchNormFold(epsilon=eps, + momentum=momentum, + is_training=False, + freeze_bn=freeze_bn) + self.correct_mul = P.CorrectionMul() + self.relu = P.ReLU() + self.batchnorm_fold2 = P.BatchNormFold2(freeze_bn=freeze_bn) + self.batchnorm_fold2_infer = P.BatchNormFold2(freeze_bn=0) + self.one = Tensor(1, mstype.int32) + self.assignadd = P.AssignAdd() + + def extend_repr(self): + s = 'fake={}, freeze_bn={}'.format(self.fake, self.freeze_bn) + return s + + def construct(self, x): + if self.training: + beta = self.beta + gamma = self.gamma + gmean = self.moving_mean + gvar = self.moving_variance + step = self.step + out_conv = self.conv(x, self.weight) + batch_mean, batch_std, running_mean, running_std = self.batchnorm_fold_train( + out_conv, gmean, gvar, step) + # BN fold1 + weight = self.correct_mul(self.weight, gamma, running_std) + if self.fake: + weight = self.fake_quant_weight(weight) + out = self.conv(x, weight) + # BN fold2 + out = self.batchnorm_fold2( + out, beta, gamma, batch_std, batch_mean, running_std, running_mean, step) + F.control_depend(out, self.assignadd(self.step, self.one)) + else: + step = self.step + out_conv = self.conv(x, self.weight) + batch_mean, batch_std, running_mean, running_std = self.batchnorm_fold_infer( + out_conv, self.moving_mean, self.moving_variance, step) + weight = self.correct_mul(self.weight, self.gamma, running_std) + if self.fake: + weight = self.fake_quant_weight(weight) + out = self.conv(x, weight) + out = self.batchnorm_fold2_infer(out, self.beta, self.gamma, batch_std, batch_mean, + running_std, running_mean, step) + return out + + +class Conv2dQuant(_Conv): + r""" + 2D convolution with fake quant op layer. + + For a more Detailed overview of Conv2d op. + + Args: + in_channels (int): The number of input channel :math:`C_{in}`. + out_channels (int): The number of output channel :math:`C_{out}`. + kernel_size (Union[int, tuple]): Specifies the height and width of the 2D convolution window. + stride (int): Specifies stride for all spatial dimensions with the same value. Default: 1. + pad_mode: (str): Specifies padding mode. The optional values are "same", "valid", "pad". Default: "same". + padding: (int): Implicit paddings on both sides of the input. Default: 0. + dilation (int): Specifying the dilation rate to use for dilated convolution. Default: 1. + group (int): Split filter into groups, `in_ channels` and `out_channels` should be + divisible by the number of groups. Default: 1. + has_bias (bool): Specifies whether the layer uses a bias vector. Default: False. + weight_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the convolution kernel. + Default: 'normal'. + bias_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the bias vector. Default: 'zeros'. + quant_delay (int): Quantization delay parameters according by global step. Default: 0. + num_bits (int): Quantization number bit, support 4 and 8bit. Default: 8. + per_channel (bool): FakeQuantWithMinMax Parameters. Default: False. + symmetric (bool): Quantization algorithm use symmetric or not. Default: False. + narrow_range (bool): Quantization algorithm use narrow range or not. Default: False. + + Inputs: + - **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`. + + Outputs: + Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`. + + """ + + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride=1, + pad_mode='same', + padding=0, + dilation=1, + group=1, + has_bias=False, + weight_init='normal', + bias_init='zeros', + quant_delay=0, + num_bits=8, + per_channel=False, + symmetric=False, + narrow_range=False): + kernel_size = twice(kernel_size) + super(Conv2dQuant, self).__init__(in_channels, out_channels, kernel_size, stride, pad_mode, padding, dilation, + group, has_bias, weight_init, bias_init) + self.conv2d = P.Conv2D(out_channel=self.out_channels, kernel_size=self.kernel_size, mode=1, + pad_mode=self.pad_mode, pad=self.padding, stride=self.stride, dilation=self.dilation, + group=self.group) + self.bias_add = P.BiasAdd() + if pad_mode not in ('valid', 'same', 'pad'): + raise ValueError('Attr \'pad_mode\' of \'Conv2d\' Op passed ' + + str(pad_mode) + ', should be one of values in \'valid\', \'same\', \'pad\'.') + self.fake_quant_weight = nn.FakeQuantWithMinMax(min_init=-6, + max_init=6, + ema=False, + num_bits=num_bits, + quant_delay=quant_delay, + per_channel=per_channel, + channel_size=out_channels, + symmetric=symmetric, + narrow_range=narrow_range) + + def construct(self, x): + weight_q = self.fake_quant_weight(self.weight) + out = self.conv2d(x, weight_q) + if self.has_bias: + return self.bias_add(out, self.bias) + return out + + +class DenseQuant(Cell): + r""" + The fully connected layer with fake quant op. + + For a more Detailed overview of Dense op. + + Args: + in_channels (int): The dimension of the input space. + out_channels (int): The dimension of the output space. + weight_init (Union[Tensor, str, Initializer, numbers.Number]): The trainable weight_init parameter. The dtype + is same as input x. The values of str refer to the function `initializer`. Default: 'normal'. + bias_init (Union[Tensor, str, Initializer, numbers.Number]): The trainable bias_init parameter. The dtype is + same as input x. The values of str refer to the function `initializer`. Default: 'zeros'. + has_bias (bool): Specifies whether the layer uses a bias vector. Default: True. + activation (str): Regularizer function applied to the output of the layer, eg. 'relu'. Default: None. + num_bits (int): Quantization number bit, support 4 and 8bit. Default: 8. + quant_delay (int): Quantization delay parameters according by global step. Default: 0. + per_channel (bool): FakeQuantWithMinMax Parameters. Default: False. + symmetric (bool): Quantization algorithm use symmetric or not. Default: False. + narrow_range (bool): Quantization algorithm use narrow range or not. Default: False. + + Inputs: + - **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`. + + Outputs: + Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`. + """ + + def __init__( + self, + in_channels, + out_channels, + weight_init='normal', + bias_init='zeros', + has_bias=True, + activation=None, + num_bits=8, + quant_delay=0, + per_channel=False, + symmetric=False, + narrow_range=False): + super(DenseQuant, self).__init__() + self.in_channels = check_int_positive(in_channels) + self.out_channels = check_int_positive(out_channels) + self.has_bias = check_bool(has_bias) + + if isinstance(weight_init, Tensor): + if weight_init.dim() != 2 or weight_init.shape()[0] != out_channels or \ + weight_init.shape()[1] != in_channels: + raise ValueError("weight_init shape error") + + self.weight = Parameter(initializer( + weight_init, [out_channels, in_channels]), name="weight") + + if self.has_bias: + if isinstance(bias_init, Tensor): + if bias_init.dim() != 1 or bias_init.shape()[0] != out_channels: + raise ValueError("bias_init shape error") + + self.bias = Parameter(initializer( + bias_init, [out_channels]), name="bias") + + self.matmul = P.MatMul(transpose_b=True) + self.bias_add = P.BiasAdd() + + self.activation = get_activation(activation) + self.activation_flag = self.activation is not None + self.fake_quant_weight = nn.FakeQuantWithMinMax(min_init=-6, + max_init=6, + ema=False, + num_bits=num_bits, + quant_delay=quant_delay, + per_channel=per_channel, + channel_size=out_channels, + symmetric=symmetric, + narrow_range=narrow_range) + + def construct(self, x): + """Use operators to construct to Dense layer.""" + output = self.fake_quant_weight(self.weight) + output = self.matmul(x, output) + if self.has_bias: + output = self.bias_add(output, self.bias) + if self.activation_flag: + return self.activation(output) + return output + + def extend_repr(self): + """A pretty print for Dense layer.""" + str_info = 'in_channels={}, out_channels={}, weight={}, has_bias={}'.format( + self.in_channels, self.out_channels, self.weight, self.has_bias) + if self.has_bias: + str_info = str_info + ', bias={}'.format(self.bias) + if self.activation_flag: + str_info = str_info + ', activation={}'.format(self.activation) + + return str_info + + +class ReLUQuant(Cell): + r""" + ReLUQuant activation function. Add Fake Quant OP after Relu OP. + + For a more Detailed overview of ReLU op. + + Args: + num_bits (int): Quantization number bit, support 4 and 8bit. Default: 8. + quant_delay (int): Quantization delay parameters according by global step. Default: 0. + symmetric (bool): Quantization algorithm use symmetric or not. Default: False. + narrow_range (bool): Quantization algorithm use narrow range or not. Default: False. + + Inputs: + - **x** (Tensor) - The input of ReLUQuant. + + Outputs: + Tensor, with the same type and shape as the `x`. + + """ + + def __init__(self, + num_bits=8, + quant_delay=0, + symmetric=False, + narrow_range=False): + super(ReLUQuant, self).__init__() + self.fake_quant_act = nn.FakeQuantWithMinMax(min_init=0, + max_init=6, + num_bits=num_bits, + quant_delay=quant_delay, + ema=True, + symmetric=symmetric, + narrow_range=narrow_range) + self.relu = P.ReLU() + + def construct(self, x): + x = self.relu(x) + x = self.fake_quant_act(x) + return x + + +class ReLU6Quant(Cell): + r""" + ReLU6Quant activation function. + + Add Fake Quant OP after Relu6. Not Recommand to used these cell for Fake Quant Op + Will climp the max range of the activation and the relu6 do the same operation. + For a more Detailed overview of ReLU6 op. + + Args: + num_bits (int): Quantization number bit, support 4 and 8bit. Default: 8. + quant_delay (int): Quantization delay parameters according by global step. Default: 0. + symmetric (bool): Quantization algorithm use symmetric or not. Default: False. + narrow_range (bool): Quantization algorithm use narrow range or not. Default: False. + + Inputs: + - **x** (Tensor) - The input of ReLU6Quant. + + Outputs: + Tensor, with the same type and shape as the `x`. + + """ + + def __init__(self, num_bits=8, quant_delay=0, symmetric=False, + narrow_range=False): + super(ReLU6Quant, self).__init__() + self.fake_quant_act = nn.FakeQuantWithMinMax(min_init=0, + max_init=6, + num_bits=num_bits, + quant_delay=quant_delay, + ema=True, + symmetric=symmetric, + narrow_range=narrow_range) + self.relu6 = P.ReLU6() + + def construct(self, x): + x = self.relu6(x) + x = self.fake_quant_act(x) + return x + + +class HSwishQuant(Cell): + r""" + HSwishQuant activation function. Add Fake Quant OP after HSwish OP. + + For a more Detailed overview of HSwish op. + + Args: + num_bits (int): Quantization number bit, support 4 and 8bit. Default: 8. + quant_delay (int): Quantization delay parameters according by global step. Default: 0. + symmetric (bool): Quantization algorithm use symmetric or not. Default: False. + narrow_range (bool): Quantization algorithm use narrow range or not. Default: False. + + Inputs: + - **x** (Tensor) - The input of HSwishQuant. + + Outputs: + Tensor, with the same type and shape as the `x`. + + """ + + def __init__(self, + num_bits=8, + quant_delay=0, + symmetric=False, + narrow_range=False): + super(HSwishQuant, self).__init__() + self.fake_quant_act_before = nn.FakeQuantWithMinMax(min_init=0, + max_init=6, + num_bits=num_bits, + quant_delay=quant_delay, + ema=True, + symmetric=symmetric, + narrow_range=narrow_range) + self.fake_quant_act_after = nn.FakeQuantWithMinMax(min_init=0, + max_init=6, + num_bits=num_bits, + quant_delay=quant_delay, + ema=True, + symmetric=symmetric, + narrow_range=narrow_range) + self.act = P.HSwish() + + def construct(self, x): + x = self.fake_quant_act_before(x) + x = self.act(x) + x = self.fake_quant_act_after(x) + return x + + +class HSigmoidQuant(Cell): + r""" + HSigmoidQuant activation function. Add Fake Quant OP before and after HSigmoid OP. + + For a more Detailed overview of HSigmoid op. + + Args: + num_bits (int): Quantization number bit, support 4 and 8bit. Default: 8. + quant_delay (int): Quantization delay parameters according by global step. Default: 0. + symmetric (bool): Quantization algorithm use symmetric or not. Default: False. + narrow_range (bool): Quantization algorithm use narrow range or not. Default: False. + + Inputs: + - **x** (Tensor) - The input of HSigmoidQuant. + + Outputs: + Tensor, with the same type and shape as the `x`. + + """ + + def __init__(self, + num_bits=8, + quant_delay=0, + symmetric=False, + narrow_range=False): + super(HSigmoidQuant, self).__init__() + self.fake_quant_act_before = nn.FakeQuantWithMinMax(min_init=0, + max_init=6, + num_bits=num_bits, + quant_delay=quant_delay, + ema=True, + symmetric=symmetric, + narrow_range=narrow_range) + self.fake_quant_act_after = nn.FakeQuantWithMinMax(min_init=0, + max_init=6, + num_bits=num_bits, + quant_delay=quant_delay, + ema=True, + symmetric=symmetric, + narrow_range=narrow_range) + self.act = P.HSigmoid() + + def construct(self, x): + x = self.fake_quant_act_before(x) + x = self.act(x) + x = self.fake_quant_act_after(x) + return x + + +class TensorAddQuant(Cell): + r""" + Add Fake Quant OP after TensorAdd OP. + + For a more Detailed overview of TensorAdd op. + + Args: + num_bits (int): Quantization number bit, support 4 and 8bit. Default: 8. + quant_delay (int): Quantization delay parameters according by global step. Default: 0. + symmetric (bool): Quantization algorithm use symmetric or not. Default: False. + narrow_range (bool): Quantization algorithm use narrow range or not. Default: False. + + Inputs: + - **x** (Tensor) - The input of TensorAddQuant. + + Outputs: + Tensor, with the same type and shape as the `x`. + + """ + + def __init__(self, + num_bits=8, + quant_delay=0, + symmetric=False, + narrow_range=False): + super(TensorAddQuant, self).__init__() + self.fake_quant_act = nn.FakeQuantWithMinMax(min_init=-6, + max_init=6, + num_bits=num_bits, + quant_delay=quant_delay, + ema=True, + symmetric=symmetric, + narrow_range=narrow_range) + self.add = P.TensorAdd() + + def construct(self, x1, x2): + x = self.add(x1, x2) + x = self.fake_quant_act(x) + return x diff --git a/mindspore/nn/layer/activation.py b/mindspore/nn/layer/activation.py index ad63dde8bc..12d6c74dcd 100644 --- a/mindspore/nn/layer/activation.py +++ b/mindspore/nn/layer/activation.py @@ -234,7 +234,7 @@ class Tanh(Cell): class GELU(Cell): - """ + r""" Gaussian error linear unit activation function. Applies GELU function to each element of the input. The input is a Tensor with any valid shape. @@ -332,15 +332,74 @@ class PReLU(Cell): return v +class HSwish(Cell): + r""" + rHard swish activation function. + + Applies hswish-type activation element-wise. The input is a Tensor with any valid shape. + + Hard swish is defined as: + + .. math:: + \text{hswish}(x_{i}) = x_{i} * \frac{ReLU6(x_{i} + 3)}{6}, + + where :math:`x_{i}` is the :math:`i`-th slice along the given dim of the input Tensor. + + Inputs: + - **input_data** (Tensor) - The input of Hswish. + + Outputs: + Tensor, with the same type and shape as the `input_data`. + + """ + def __init__(self): + super(HSwish, self).__init__() + self.hswish = P.HSwish() + + def construct(self, x): + return self.hswish(x) + + +class HSigmoid(Cell): + r""" + Hard sigmoid activation function. + + Applies hard sigmoid activation element-wise. The input is a Tensor with any valid shape. + + Hard sigmoid is defined as: + + .. math:: + \text{hsigmoid}(x_{i}) = max(0, min(1, \ftac{2 * x_{i} + 5}{10})), + + where :math:`x_{i}` is the :math:`i`-th slice along the given dim of the input Tensor. + + Inputs: + - **input_data** (Tensor) - The input of HSigmoid. + + Outputs: + Tensor, with the same type and shape as the `input_data`. + + """ + def __init__(self): + super(HSigmoid, self).__init__() + self.hsigmoid = P.HSigmoid() + + def construct(self, x): + return self.hsigmoid(x) + + _activation = { 'softmax': Softmax, 'logsoftmax': LogSoftmax, 'relu': ReLU, + 'relu6': ReLU6, 'tanh': Tanh, 'gelu': GELU, 'sigmoid': Sigmoid, 'prelu': PReLU, - 'leakyrelu': LeakyReLU + 'leakyrelu': LeakyReLU, + 'hswish': HSwish, + 'hsigmoid': HSigmoid, } diff --git a/mindspore/ops/_grad/grad_nn_ops.py b/mindspore/ops/_grad/grad_nn_ops.py index fbe48aff97..1b18d9f248 100755 --- a/mindspore/ops/_grad/grad_nn_ops.py +++ b/mindspore/ops/_grad/grad_nn_ops.py @@ -172,6 +172,28 @@ def get_bprop_relu6(self): return bprop +@bprop_getters.register(P.HSwish) +def get_bprop_hswish(self): + """Grad definition for `HSwish` operation.""" + input_grad = G.HSwishGrad() + + def bprop(x, out, dout): + dx = input_grad(dout, x) + return (dx,) + return bprop + + +@bprop_getters.register(P.HSigmoid) +def get_bprop_hsigmoid(self): + """Grad definition for `HSigmoid` operation.""" + input_grad = G.HSigmoidGrad() + + def bprop(x, out, dout): + dx = input_grad(dout, x) + return (dx,) + return bprop + + @bprop_getters.register(P.Elu) def get_bprop_elu(self): """Grad definition for `Elu` operation.""" diff --git a/mindspore/ops/_grad/grad_quant_ops.py b/mindspore/ops/_grad/grad_quant_ops.py new file mode 100644 index 0000000000..5d4ad22392 --- /dev/null +++ b/mindspore/ops/_grad/grad_quant_ops.py @@ -0,0 +1,82 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""Generate bprop for aware quantization ops""" + +from .. import operations as P +from .grad_base import bprop_getters +from ..composite.multitype_ops.zeros_like_impl import zeros_like + + +@bprop_getters.register(P.FakeQuantWithMinMax) +def get_bprop_fakequant_with_minmax(self): + """Generate bprop for FakeQuantWithMinMax""" + op = P.FakeQuantWithMinMaxGrad(num_bits=self.num_bits, quant_delay=self.quant_delay) + + def bprop(x, x_min, x_max, out, dout): + dx = op(dout, x, x_min, x_max) + return dx, zeros_like(x_min), zeros_like(x_max) + + return bprop + + +@bprop_getters.register(P.FakeQuantWithMinMaxPerChannel) +def get_bprop_fakequant_with_minmax_perchannel(self): + """Generate bprop for FakeQuantWithMinMaxPerChannel""" + op = P.FakeQuantWithMinMaxPerChannelGrad(num_bits=self.num_bits, quant_delay=self.quant_delay) + + def bprop(x, x_min, x_max, out, dout): + dx = op(dout, x, x_min, x_max) + return dx, zeros_like(x_min), zeros_like(x_max) + + return bprop + + +@bprop_getters.register(P.BatchNormFold) +def get_bprop_batchnorm_fold(self): + """Generate bprop for BatchNormFold""" + op = P.BatchNormFoldGrad(self.epsilon, self.is_training, self.freeze_bn) + + def bprop(x, mean, variance, global_step, out, dout): + dx = op(dout[0], dout[1], x, out[0], out[1], global_step) + return dx, zeros_like(mean), zeros_like(variance), zeros_like(global_step) + + return bprop + + +@bprop_getters.register(P.CorrectionMul) +def get_bprop_correction_mul(self): + """Generate bprop for CorrectionMul""" + grad = P.CorrectionMulGrad() + + def bprop(x, batch_std, running_std, out, dout): + dx, d_batch_std = grad(dout, x, batch_std, running_std) + return dx, d_batch_std, zeros_like(running_std) + + return bprop + + +@bprop_getters.register(P.BatchNormFold2) +def get_bprop_batchnorm_fold2(self): + """Generate bprop for CorrectionAdd""" + op_f = P.BatchNormFold2Grad(freeze_bn=self.freeze_bn) + + def bprop(x, beta, gamma, batch_std, batch_mean, running_std, running_mean, global_step, out, dout): + d_batch_std, d_batch_mean, d_beta, d_gamma, d_x = op_f(dout, x, gamma, batch_std, batch_mean, running_std, + running_mean, global_step) + return d_x, d_beta, d_gamma, d_batch_std, d_batch_mean, zeros_like(running_std), zeros_like(running_mean), \ + zeros_like(global_step) + + return bprop diff --git a/mindspore/ops/operations/__init__.py b/mindspore/ops/operations/__init__.py index 45cd856298..8bfca77b38 100644 --- a/mindspore/ops/operations/__init__.py +++ b/mindspore/ops/operations/__init__.py @@ -59,7 +59,7 @@ from .nn_ops import (LSTM, SGD, Adam, ApplyMomentum, BatchNorm, LogSoftmax, MaxPool, AvgPool, Conv2DBackpropInput, - MaxPoolWithArgmax, OneHot, Pad, PReLU, ReLU, ReLU6, + MaxPoolWithArgmax, OneHot, Pad, PReLU, ReLU, ReLU6, HSwish, HSigmoid, ResizeBilinear, Sigmoid, SigmoidCrossEntropyWithLogits, SmoothL1Loss, Softmax, @@ -68,7 +68,8 @@ from .nn_ops import (LSTM, SGD, Adam, ApplyMomentum, BatchNorm, TopK, BinaryCrossEntropy, SparseApplyAdagrad, LARSUpdate, ApplyFtrl, ApplyRMSProp, ApplyCenteredRMSProp) from .other_ops import Assign, IOU, BoundingBoxDecode, BoundingBoxEncode, CheckValid, MakeRefKey - +from . import _quant_ops +from ._quant_ops import * __all__ = [ 'TensorAdd', @@ -138,6 +139,8 @@ __all__ = [ 'ReLU6', 'Elu', 'Sigmoid', + 'HSwish', + 'HSigmoid', 'Tanh', 'RandomChoiceWithMask', 'ResizeBilinear', @@ -241,4 +244,5 @@ __all__ = [ "ApplyCenteredRMSProp" ] +__all__.extend(_quant_ops.__all__) __all__.sort() diff --git a/mindspore/ops/operations/_grad_ops.py b/mindspore/ops/operations/_grad_ops.py index f38044ab6a..f0a9a2f658 100644 --- a/mindspore/ops/operations/_grad_ops.py +++ b/mindspore/ops/operations/_grad_ops.py @@ -805,6 +805,38 @@ class SigmoidGrad(PrimitiveWithInfer): return out +class HSigmoidGrad(PrimitiveWithInfer): + """Gets the gradient of HSigmoid operation.""" + + @prim_attr_register + def __init__(self): + self.init_prim_io_names(inputs=['y_grad', 'x'], outputs=['output']) + + def infer_shape(self, y_grad_shape, x_shape): + return x_shape + + def infer_dtype(self, y_grad_dtype, x_dtype): + validator.check_typename("y_grad dtype", y_grad_dtype, (mstype.float16, mstype.float32)) + validator.check_typename("x dtype", x_dtype, (mstype.float16, mstype.float32)) + return x_dtype + + +class HSwishGrad(PrimitiveWithInfer): + """Gets the gradient of HSwish operation.""" + + @prim_attr_register + def __init__(self): + self.init_prim_io_names(inputs=['y_grad', 'x'], outputs=['output']) + + def infer_shape(self, y_grad_shape, x_shape): + return x_shape + + def infer_dtype(self, y_grad_dtype, x_dtype): + validator.check_typename("y_grad dtype", y_grad_dtype, (mstype.float16, mstype.float32)) + validator.check_typename("x_ dtype", x_dtype, (mstype.float16, mstype.float32)) + return x_dtype + + class SigmoidCrossEntropyWithLogitsGrad(PrimitiveWithInfer): """Computes the gradients of `SigmoidCrossEntropyWithLogits`.""" diff --git a/mindspore/ops/operations/_quant_ops.py b/mindspore/ops/operations/_quant_ops.py new file mode 100644 index 0000000000..14d1bc9234 --- /dev/null +++ b/mindspore/ops/operations/_quant_ops.py @@ -0,0 +1,525 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0(the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http: // www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""Operators for quantization.""" + +from ..._checkparam import ParamValidator as validator +from ..._checkparam import Rel, check_bool, check_int_positive, check_int +from ..primitive import PrimitiveWithInfer, prim_attr_register +from ...common import dtype as mstype + +__all__ = ["FakeQuantWithMinMax", + "FakeQuantWithMinMaxGrad", + "FakeQuantWithMinMaxPerChannel", + "FakeQuantWithMinMaxPerChannelGrad", + "BatchNormFold", + "BatchNormFoldGrad", + "CorrectionMul", + "CorrectionMulGrad", + "BatchNormFold2", + "BatchNormFold2Grad", + ] + + +class FakeQuantWithMinMax(PrimitiveWithInfer): + r""" + Simulate the quantize and dequantize operations in training time. + + Args: + num_bits (int) : Number bits for aware quantilization. Default: 8. + ema (bool): Use EMA algorithm update value min and max. Default: False. + ema_decay (int) : EMA algorithm decay parameter. Default: 0.999. + quant_delay (int): Quantilization delay parameter. Before delay step in training time not update + simulate aware quantize funcion. After delay step in training time begin simulate the aware + quantize funcion. Default: 0. + symmetric (bool): Quantization algorithm use symmetric or not. Default: False. + narrow_range (bool): Quantization algorithm use narrow range or not. Default: False. + training (bool): Training the network or not. Default: True. + + Inputs: + - **x** (Tensor) : float32 Tensor representing the shape of the output tensor. + - **min** (Tensor) : Value of the min range of the input data x. + - **max** (Tensor) : Value of the max range of the input data x. + + Outputs: + - Tensor: Simulate quantize tensor of x. + + Examples: + >>> input_tensor = Tensor(np.random.rand(3, 16, 5, 5), mstype.float32) + >>> min_tensor = Tensor(np.array([-6]), mstype.float32) + >>> max_tensor = Tensor(np.array([6]), mstype.float32) + >>> output_tensor = P.FakeQuantWithMinMax(num_bits=8)(input_tensor, min_tensor, max_tensor) + """ + support_quant_bit = [4, 7, 8] + + @prim_attr_register + def __init__(self, num_bits=8, ema=False, ema_decay=0.999, quant_delay=0, symmetric=False, narrow_range=False, + training=True): + """init FakeQuantWithMinMax OP""" + if num_bits not in self.support_quant_bit: + raise ValueError("Attr \'num_bits\' is not support.") + if ema and not ema_decay: + raise ValueError( + "Attr \'ema\' and \'ema_decay\' should set together.") + + self.ema = check_bool(ema) + self.symmetric = check_bool(symmetric) + self.narrow_range = check_bool(narrow_range) + self.training = check_bool(training) + self.ema_decay = validator.check_number_range( + 'ema_decay', ema_decay, 0, 1, Rel.INC_BOTH) + self.num_bits = check_int_positive(num_bits) + self.quant_delay = check_int(quant_delay) + self.init_prim_io_names(inputs=['x', 'min', 'max'], + outputs=['out']) + + def infer_shape(self, x_shape, min_shape, max_shape): + validator.check_integer("x shape", len(x_shape), 1, Rel.GT) + validator.check("min shape", min_shape, "max shape", max_shape) + validator.check_integer("min shape", len(min_shape), 1, Rel.EQ) + validator.check_integer("max shape", len(min_shape), 1, Rel.EQ) + return x_shape + + def infer_dtype(self, x_type, min_type, max_type): + validator.check_typename( + "x type", x_type, (mstype.float16, mstype.float32)) + validator.check_typename("min type", min_type, + (mstype.float16, mstype.float32)) + validator.check_typename("max type", max_type, + (mstype.float16, mstype.float32)) + return x_type + + +class FakeQuantWithMinMaxGrad(PrimitiveWithInfer): + """Performs grad of FakeQuantWithMinMax operation.""" + support_quant_bit = [4, 8] + + @prim_attr_register + def __init__(self, num_bits=8, quant_delay=0): + if num_bits not in self.support_quant_bit: + raise ValueError("Attr \'num_bits\' is not support.") + + self.quant_delay = check_int(quant_delay) + self.num_bits = check_int_positive(num_bits) + self.init_prim_io_names(inputs=['dout', 'x', 'min', 'max'], + outputs=['dx']) + + def infer_shape(self, dout_shape, x_shape, min_shape, max_shape): + validator.check("dout shape", dout_shape, "x shape", x_shape) + validator.check("min shape", min_shape, "max shape", max_shape) + validator.check_integer("min shape", len(min_shape), 1, Rel.EQ) + validator.check_integer("max shape", len(min_shape), 1, Rel.EQ) + return dout_shape + + def infer_dtype(self, dout_type, x_type, min_type, max_type): + validator.check_typename( + "dout type", dout_type, (mstype.float16, mstype.float32)) + validator.check_typename( + "x type", x_type, (mstype.float16, mstype.float32)) + validator.check_typename("min type", min_type, + (mstype.float16, mstype.float32)) + validator.check_typename("max type", max_type, + (mstype.float16, mstype.float32)) + return dout_type + + +class FakeQuantWithMinMaxPerChannel(PrimitiveWithInfer): + r""" + Simulate the quantize and dequantize operations in training time base on per channel. + + Args: + num_bits (int) : Number bits to quantilization. Default: 8. + ema (bool): Use EMA algorithm update tensor min and tensor max. Default: False. + ema_decay (int) : EMA algorithm decay parameter. Default: 0.999. + quant_delay (int): Quantilization delay parameter. Before delay step in training time not + update the weight data to simulate quantize operation. After delay step in training time + begin simulate the quantize operation. Default: 0. + symmetric (bool): Quantization algorithm use symmetric or not. Default: False. + narrow_range (bool): Quantization algorithm use narrow range or not. Default: False. + training (bool): Training the network or not. Default: True. + + Inputs: + - **x** (Tensor) : 4-D float32 Tensor representing the shape of the output tensor. + - **min** (int, float) : Value of the min range of the input data. + - **max** (int, float) : Value of the max range of the input data. + + Outputs: + - Tensor, has the same type as input. + + Examples: + >>> input_tensor = Tensor(np.random.rand(3,4,5,5), mstype.float32) + >>> min_tensor = Tensor(np.array([-6.0, -6.5, -4.0, -5.0]), mstype.float32) + >>> max_tensor = Tensor(np.array([6.0, 6.5, 4.0, 5.0]), mstype.float32) + >>> output_tensor = P.FakeQuantWithMinMax(num_bits=8)(input_tensor, min_tensor, max_tensor) + """ + support_quant_bit = [4, 8] + channel_idx = 0 + + @prim_attr_register + def __init__(self, num_bits=8, ema=False, ema_decay=0.999, quant_delay=0, symmetric=False, narrow_range=False, + training=True): + """init FakeQuantWithMinMaxPerChannel OP""" + if num_bits not in self.support_quant_bit: + raise ValueError("Attr \'num_bits\' is not support.") + if ema and not ema_decay: + raise ValueError( + "Attr \'ema\' and \'ema_decay\' should set together.") + + self.ema = check_bool(ema) + self.symmetric = check_bool(symmetric) + self.narrow_range = check_bool(narrow_range) + self.training = check_bool(training) + self.ema_decay = validator.check_number_range( + 'ema_decay', ema_decay, 0, 1, Rel.INC_BOTH) + self.num_bits = check_int_positive(num_bits) + self.quant_delay = check_int(quant_delay) + self.init_prim_io_names(inputs=['x', 'min', 'max'], + outputs=['out']) + + def infer_shape(self, x_shape, min_shape, max_shape): + validator.check_integer("x shape", len(x_shape), 1, Rel.GT) + validator.check_integer( + "min len", min_shape[0], x_shape[self.channel_idx], Rel.EQ) + validator.check_integer( + "max len", max_shape[0], x_shape[self.channel_idx], Rel.EQ) + return x_shape + + def infer_dtype(self, x_type, min_type, max_type): + validator.check_typename( + "x type", x_type, (mstype.float16, mstype.float32)) + validator.check_typename("min type", min_type, + (mstype.float16, mstype.float32)) + validator.check_typename("max type", max_type, + (mstype.float16, mstype.float32)) + return x_type + + +class FakeQuantWithMinMaxPerChannelGrad(PrimitiveWithInfer): + """Performs grad of FakeQuantWithMinMaxPerChannel operation.""" + support_quant_bit = [4, 8] + + @prim_attr_register + def __init__(self, num_bits=8, quant_delay=0): + """init FakeQuantWithMinMaxPerChannel Fill""" + if num_bits not in self.support_quant_bit: + raise ValueError("Attr \'num_bits\' is not support.") + + self.quant_delay = check_int(quant_delay) + self.num_bits = check_int_positive(num_bits) + self.init_prim_io_names(inputs=['dout', 'x', 'min', 'max'], + outputs=['dx']) + + def infer_shape(self, dout_shape, x_shape, min_shape, max_shape): + validator.check("dout shape", dout_shape, "x shape", x_shape) + validator.check("min shape", min_shape, "max shape", max_shape) + return dout_shape + + def infer_dtype(self, dout_type, x_type, min_type, max_type): + validator.check_typename( + "dout", dout_type, (mstype.float16, mstype.float32)) + validator.check_typename("x", x_type, (mstype.float16, mstype.float32)) + validator.check_typename( + "min", min_type, (mstype.float16, mstype.float32)) + validator.check_typename( + "max", max_type, (mstype.float16, mstype.float32)) + return dout_type + + +class BatchNormFold(PrimitiveWithInfer): + """ + Batch normalization folded. + + Args: + momentum (float): Momentum value should be [0, 1]. Default: 0.1. + epsilon (float): A small float number to avoid dividing by 0. 1e-12 if dtype in + float32 else 1e-3. Default: 1e-12. + is_training (bool): In training mode set True, else set False. Default: True. + freeze_bn (int): Delay in steps at which computation switches from regular batch + norm to frozen mean and std. Default: 0. + + Inputs: + - **x** (Tensor) - Tensor of shape :math:`(N, C)`. + - **mean** (Tensor) - Tensor of shape :math:`(C,)`. + - **variance** (Tensor) - Tensor of shape :math:`(C,)`. + - **global_step** (Tensor) - Tensor to record current global step. + + Outputs: + Tuple of 4 Tensor, the normalized input and the updated parameters. + + - **batch_mean** (Tensor) - Tensor of shape :math:`(C,)`. + - **batch_std** (Tensor) - Tensor of shape :math:`(C,)`. + - **running_mean** (Tensor) - Tensor of shape :math:`(C,)`. + - **running_std** (Tensor) - Tensor of shape :math:`(C,)`. + + """ + channel = 1 + + @prim_attr_register + def __init__(self, momentum=0.1, epsilon=1e-12, is_training=True, freeze_bn=0): + """init batch norm fold layer""" + self.momentum = validator.check_number_range( + 'momentum', momentum, 0, 1, Rel.INC_BOTH) + self.epsilon = validator.check_float_positive('epsilon', epsilon) + self.is_training = check_bool(is_training) + self.freeze_bn = check_int(freeze_bn) + + self.init_prim_io_names(inputs=['x', 'mean', 'variance', 'global_step'], + outputs=['batch_mean', 'batch_std', 'running_mean', 'running_std']) + + def infer_shape(self, x_shape, mean_shape, variance_shape, global_step_shape): + validator.check("mean shape", mean_shape, + "gamma_shape", variance_shape) + validator.check("mean_shape size", + mean_shape[0], "input channel", x_shape[self.channel]) + validator.check_integer("global_step shape", + len(global_step_shape), 1, Rel.EQ) + return mean_shape, mean_shape, mean_shape, mean_shape + + def infer_dtype(self, x_type, mean_type, variance_type, global_step_type): + validator.check("input type", x_type, "mean type", mean_type) + validator.check("input type", x_type, "variance type", variance_type) + validator.check_typename("input type", x_type, + (mstype.float16, mstype.float32)) + validator.check_typename( + "global_step type", global_step_type, (mstype.int32,)) + return x_type, x_type, x_type, x_type + + +class BatchNormFoldGrad(PrimitiveWithInfer): + """Performs grad of BatchNormFold operation.""" + channel = 1 + + @prim_attr_register + def __init__(self, epsilon=1e-12, is_training=True, freeze_bn=0): + """init BatchNormGrad layer""" + self.is_training = check_bool(is_training) + self.freeze_bn = check_int(freeze_bn) + self.epsilon = validator.check_float_positive('epsilon', epsilon) + self.init_prim_io_names(inputs=['d_batch_mean', 'd_batch_std', 'x', 'batch_mean', 'batch_std', 'global_step'], + outputs=['dx']) + + def infer_shape(self, d_batch_mean_shape, d_batch_std_shape, x_shape, batch_mean_shape, batch_std_shape, + global_step_shape): + validator.check("d_batch_mean shape", d_batch_mean_shape, + "d_batch_std shape", d_batch_std_shape) + validator.check("d_batch_mean shape", d_batch_mean_shape, + "batch_mean shape", batch_mean_shape) + validator.check("d_batch_mean shape", d_batch_mean_shape, + "batch_std shape", batch_std_shape) + validator.check( + "x_shape shape", d_batch_mean_shape[0], "input channel", x_shape[self.channel]) + validator.check_integer("global_step shape", + len(global_step_shape), 1, Rel.EQ) + return x_shape + + def infer_dtype(self, d_batch_mean_type, d_batch_std_type, x_type, batch_mean_type, batch_std_type, + global_step_type): + validator.check("input type", x_type, + "d_batch_mean type", d_batch_mean_type) + validator.check("input type", x_type, + "d_batch_std type", d_batch_std_type) + validator.check("input type", x_type, + "batch_mean type", batch_mean_type) + validator.check("input type", x_type, "batch_std type", batch_std_type) + validator.check_typename("input type", x_type, + (mstype.float16, mstype.float32)) + validator.check_typename( + "global_step type", global_step_type, (mstype.int32,)) + return x_type + + +class CorrectionMul(PrimitiveWithInfer): + """ + Scale the weights with a correction factor to the long term statistics + prior to quantization. This ensures that there is no jitter in the quantized weights + due to batch to batch variation. + + Inputs: + - **x** (Tensor) - Tensor of shape :math:`(N, C)`. + - **batch_std** (Tensor) - Tensor of shape :math:`(C,)`. + - **running_std** (Tensor) - Tensor of shape :math:`(C,)`. + + Outputs: + - **out** (Tensor) - Tensor has the same shape as x. + + """ + channel = 0 + + @prim_attr_register + def __init__(self): + """init correction mul layer""" + self.init_prim_io_names(inputs=['x', 'batch_std', 'running_std'], + outputs=['out']) + + def infer_shape(self, x_shape, batch_std_shape, running_std_shape): + validator.check("batch_std shape", batch_std_shape, + "running_std shape", running_std_shape) + validator.check( + "batch_std size", batch_std_shape[0], "x_shape channel size", x_shape[self.channel]) + return x_shape + + def infer_dtype(self, x_type, batch_std_type, running_std_type): + validator.check("batch_std type", batch_std_type, + "running_std type", running_std_type) + validator.check("batch_std_type", batch_std_type, "x_type", x_type) + validator.check_typename( + "batch_std type", batch_std_type, (mstype.float16, mstype.float32)) + return x_type + + +class CorrectionMulGrad(PrimitiveWithInfer): + """Performs grad of CorrectionMul operation.""" + channel = 0 + + @prim_attr_register + def __init__(self): + """init correction mul layer""" + self.init_prim_io_names(inputs=['dout', 'x', 'gamma', 'running_std'], + outputs=['dx', 'd_gamma']) + + def infer_shape(self, dout_shape, x_shape, gamma_shape, running_std_shape): + validator.check("dout shape", dout_shape, "x_shape x", x_shape) + validator.check( + "gamma size", gamma_shape[0], "dout channel size", dout_shape[self.channel]) + validator.check( + "running_std size", running_std_shape[0], "dout channel size", dout_shape[self.channel]) + return x_shape, gamma_shape + + def infer_dtype(self, dout_type, x_type, gamma_type, running_std_type): + validator.check("x type", x_type, "dout type", dout_type) + validator.check("gamma type", gamma_type, "dout type", dout_type) + validator.check("running_std type", running_std_type, + "dout type", dout_type) + validator.check_typename( + "dout type", dout_type, (mstype.float16, mstype.float32)) + return x_type, x_type + + +class BatchNormFold2(PrimitiveWithInfer): + """ + Scale the bias with a correction factor to the long term statistics + prior to quantization. This ensures that there is no jitter in the quantized bias + due to batch to batch variation. + + Inputs: + - **x** (Tensor) - Tensor of shape :math:`(N, C)`. + - **beta** (Tensor) - Tensor of shape :math:`(C,)`. + - **gamma** (Tensor) - Tensor of shape :math:`(C,)`. + - **batch_std** (Tensor) - Tensor of shape :math:`(C,)`. + - **batch_mean** (Tensor) - Tensor of shape :math:`(C,)`. + - **running_std** (Tensor) - Tensor of shape :math:`(C,)`. + - **running_mean** (Tensor) - Tensor of shape :math:`(C,)`. + - **global_step** (Tensor) - Tensor to record current global step. + + Outputs: + - **y** (Tensor) - Tensor has the same shape as x. + + """ + channel = 1 + + @prim_attr_register + def __init__(self, freeze_bn=0): + """init conv2d fold layer""" + self.freeze_bn = check_int(freeze_bn) + self.init_prim_io_names(inputs=['x', 'beta', 'gamma', 'batch_std', 'batch_mean', + 'running_std', 'running_mean', 'global_step'], + outputs=['y']) + + def infer_shape(self, x_shape, beta_shape, gamma_shape, batch_std_shape, running_std_shape, batch_mean_shape, + running_mean_shape, global_step_shape): + validator.check("batch_std shape", batch_std_shape, + "running_std shape", running_std_shape) + validator.check("batch_std shape", batch_std_shape, + "batch_mean shape", batch_mean_shape) + validator.check("batch_std shape", batch_std_shape, + "beta shape", beta_shape) + validator.check("batch_std shape", batch_std_shape, + "running_mean shape", running_mean_shape) + validator.check("batch_std shape", batch_std_shape, + "batch_mean shape", gamma_shape) + validator.check( + "batch_std size", batch_std_shape[0], "x_shape channel size", x_shape[self.channel]) + validator.check_integer("global_step shape", + len(global_step_shape), 1, Rel.EQ) + return x_shape + + def infer_dtype(self, x_type, beta_type, gamma_type, batch_std_type, running_std_type, batch_mean_type, + running_mean_type, global_step_type): + validator.check("batch_std type", batch_std_type, + "running_std type", running_std_type) + validator.check("batch_std type", batch_std_type, + "batch_mean type", batch_mean_type) + validator.check("batch_std type", batch_std_type, + "beta type", beta_type) + validator.check("batch_std type", batch_std_type, + "running_mean type", running_mean_type) + validator.check("batch_std type", batch_std_type, + "gamma type", gamma_type) + validator.check("x_type", x_type, "batch_std type", batch_std_type) + validator.check_typename( + "batch_std type", batch_std_type, (mstype.float16, mstype.float32)) + validator.check_typename( + "global_step type", global_step_type, (mstype.int32,)) + return x_type + + +class BatchNormFold2Grad(PrimitiveWithInfer): + """Performs grad of CorrectionAddGrad operation.""" + channel = 1 + + @prim_attr_register + def __init__(self, freeze_bn=0): + """init MulFold layer""" + self.freeze_bn = freeze_bn + self.init_prim_io_names(inputs=['dout', 'x', 'gamma', + 'batch_std', 'batch_mean', + 'running_std', 'running_mean', 'global_step'], + outputs=['d_batch_std', 'd_batch_mean', 'd_beta', 'd_gamma', 'dx']) + + def infer_shape(self, dout_shape, x_shape, gamma_shape, + batch_std_shape, batch_mean_shape, + running_std_shape, running_mean_shape, global_step_shape): + validator.check("batch_std shape", batch_std_shape, + "batch_mean shape", batch_mean_shape) + validator.check("batch_std shape", batch_std_shape, + "running_std shape", running_std_shape) + validator.check("batch_std shape", batch_std_shape, + "running_mean shape", running_mean_shape) + validator.check("batch_std shape", batch_std_shape, + "gamma shape", gamma_shape) + validator.check( + "batch_std size", batch_std_shape[0], "dout channel size", dout_shape[self.channel]) + validator.check_integer("global_step shape", + len(global_step_shape), 1, Rel.EQ) + return gamma_shape, gamma_shape, gamma_shape, gamma_shape, x_shape + + def infer_dtype(self, dout_type, x_type, gamma_type, + batch_std_type, batch_mean_type, + running_std_type, running_mean_type, global_step_type): + validator.check("batch_std type", batch_std_type, + "batch_mean type", batch_mean_type) + validator.check("batch_std type", batch_std_type, + "gamma type", gamma_type) + validator.check("batch_std type", batch_std_type, + "running_std type", running_std_type) + validator.check("batch_std type", batch_std_type, + "running_mean type", running_mean_type) + validator.check("batch_std_type", batch_std_type, + "dout type", dout_type) + validator.check_typename( + "batch_std type", batch_std_type, (mstype.float16, mstype.float32)) + validator.check_typename( + "global_step type", global_step_type, (mstype.int32,)) + return gamma_type, gamma_type, gamma_type, gamma_type, gamma_type diff --git a/mindspore/ops/operations/nn_ops.py b/mindspore/ops/operations/nn_ops.py index 83f76455e0..3a3ea2c3e0 100644 --- a/mindspore/ops/operations/nn_ops.py +++ b/mindspore/ops/operations/nn_ops.py @@ -207,7 +207,7 @@ class ReLU6(PrimitiveWithInfer): class Elu(PrimitiveWithInfer): - """ + r""" Computes exponential linear: `alpha * (exp(x) - 1)` if x < 0, `x` otherwise. The data type of input tensor should be float. @@ -242,6 +242,40 @@ class Elu(PrimitiveWithInfer): return input_x +class HSwish(PrimitiveWithInfer): + r""" + Hard swish activation function. + + Applies hswish-type activation element-wise. The input is a Tensor with any valid shape. + + Hard swish is defined as: + + .. math:: + \text{hswish}(x_{i}) = x_{i} * \frac{ReLU6(x_{i} + 3)}{6}, + + where :math:`x_{i}` is the :math:`i`-th slice along the given dim of the input Tensor. + + Inputs: + - **input_data** (Tensor) - The input of Hswish. + + Outputs: + Tensor, with the same type and shape as the `input_data`. + + """ + @prim_attr_register + def __init__(self): + self.init_prim_io_names(inputs=['x'], outputs=['output']) + + def infer_shape(self, xshape): + return xshape + + def infer_dtype(self, x_dtype): + validator.check_subclass("x_dtype", x_dtype, mstype.tensor) + validator.check_typename("x_dtype", x_dtype, (mstype.float16, mstype.float32)) + return x_dtype + + + class Sigmoid(PrimitiveWithInfer): r""" Sigmoid activation function. @@ -258,6 +292,7 @@ class Sigmoid(PrimitiveWithInfer): Outputs: Tensor, with the same type and shape as the input_x. + """ @prim_attr_register @@ -273,6 +308,40 @@ class Sigmoid(PrimitiveWithInfer): return input_x +class HSigmoid(PrimitiveWithInfer): + r""" + Hard sigmoid activation function. + + Applies hard sigmoid activation element-wise. The input is a Tensor with any valid shape. + + Hard sigmoid is defined as: + + .. math:: + \text{hsigmoid}(x_{i}) = max(0, min(1, \ftac{2 * x_{i} + 5}{10})), + + where :math:`x_{i}` is the :math:`i`-th slice along the given dim of the input Tensor. + + Inputs: + - **input_data** (Tensor) - The input of HSigmoid. + + Outputs: + Tensor, with the same type and shape as the `input_data`. + + """ + + @prim_attr_register + def __init__(self): + self.init_prim_io_names(inputs=['x'], outputs=['output']) + + def infer_shape(self, x_shape): + return x_shape + + def infer_dtype(self, x_dtype): + validator.check_subclass("x_dtype", x_dtype, mstype.tensor) + validator.check_typename("x_dtype", x_dtype, (mstype.float16, mstype.float32)) + return x_dtype + + class Tanh(PrimitiveWithInfer): r""" Tanh activation function. diff --git a/tests/ut/python/nn/test_dense.py b/tests/ut/python/nn/test_dense.py index 8581576c6b..0845983bb0 100644 --- a/tests/ut/python/nn/test_dense.py +++ b/tests/ut/python/nn/test_dense.py @@ -27,11 +27,6 @@ def test_dense_none(): nn.Dense(3, 2, None, None) -def test_dense_invalid_activation(): - with pytest.raises(KeyError): - nn.Dense(3, 2, activation='relu6') - - @non_graph_engine def test_dense_str_activation(): dense = nn.Dense(1, 1, activation='relu') diff --git a/tests/ut/python/pynative_mode/nn/test_activation.py b/tests/ut/python/pynative_mode/nn/test_activation.py index 7230fa272b..1b8a6f5d76 100644 --- a/tests/ut/python/pynative_mode/nn/test_activation.py +++ b/tests/ut/python/pynative_mode/nn/test_activation.py @@ -51,11 +51,6 @@ def test_activation_empty(): assert nn.get_activation('') is None -def test_activation_invalid(): - with pytest.raises(KeyError): - nn.get_activation('relu6') - - # test softmax def test_softmax_axis(): layer = nn.Softmax(1) diff --git a/tests/ut/python/pynative_mode/nn/test_dense.py b/tests/ut/python/pynative_mode/nn/test_dense.py index 48bfcc6674..cc9d280521 100644 --- a/tests/ut/python/pynative_mode/nn/test_dense.py +++ b/tests/ut/python/pynative_mode/nn/test_dense.py @@ -68,11 +68,6 @@ def test_dense_none(): nn.Dense(3, 2, None, None) -def test_dense_invalid_activation(): - with pytest.raises(KeyError): - nn.Dense(3, 2, activation='relu6') - - def test_dense_str_activation(): dense = nn.Dense(1, 1, activation='relu') assert isinstance(dense.activation, nn.ReLU) From 19daf09f882ae57a87cd5b12aff3cccd23fa4853 Mon Sep 17 00:00:00 2001 From: zhoufeng Date: Mon, 13 Apr 2020 11:54:32 +0800 Subject: [PATCH 183/367] output sha256 file --- package.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/package.sh b/package.sh index f57cb7f104..056ca8c25d 100755 --- a/package.sh +++ b/package.sh @@ -121,6 +121,7 @@ PACKAGE_BASE_NAME=${PACKAGE_BASE_NAME//_*-/-} PACKAGE_NEW_NAME="${PACKAGE_BASE_NAME}-${PY_TAGS}-${PLATFORM_TAG}.whl" cp -rf "${PACKAGE_PATH}/dist"/*.whl "${PACKAGE_PATH}/${PACKAGE_NEW_NAME}" cp -f "${PACKAGE_PATH}/${PACKAGE_NEW_NAME}" "${OUTPUT_PATH}" +find ${OUTPUT_PATH} -name "*.whl" -print0 | xargs -0 -I {} sh -c "sha256sum {} | awk '{printf \$1}' > {}.sha256" cd "${BASEPATH}" From 005e2020836efc939d288cee42f332eac5274e32 Mon Sep 17 00:00:00 2001 From: guohongzilong <2713219276@qq.com> Date: Mon, 13 Apr 2020 13:21:24 +0800 Subject: [PATCH 184/367] fix print content of mindspore.string --- mindspore/ccsrc/ir/dtype.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mindspore/ccsrc/ir/dtype.h b/mindspore/ccsrc/ir/dtype.h index 11099e460e..e3e2099b5e 100644 --- a/mindspore/ccsrc/ir/dtype.h +++ b/mindspore/ccsrc/ir/dtype.h @@ -51,7 +51,7 @@ class String : public Object { TypeId generic_type_id() const override { return kObjectTypeString; } TypePtr DeepCopy() const override { return std::make_shared(); } - std::string ToString() const override { return std::string("String:"); } + std::string ToString() const override { return std::string("String"); } std::string ToReprString() const override { return "string"; } std::string DumpText() const override { return "String"; } }; From b8a7e73f7d3a240c299244dbf0c14ecf320cd6f2 Mon Sep 17 00:00:00 2001 From: kswang Date: Mon, 13 Apr 2020 14:20:58 +0800 Subject: [PATCH 185/367] fix reshape output and clearres error --- .../device/ascend/ascend_kernel_runtime.cc | 6 +- .../ccsrc/device/gpu/gpu_kernel_runtime.cc | 5 +- .../ccsrc/session/anf_runtime_algorithm.cc | 80 +++++-------------- .../ccsrc/session/anf_runtime_algorithm.h | 2 + 4 files changed, 28 insertions(+), 65 deletions(-) diff --git a/mindspore/ccsrc/device/ascend/ascend_kernel_runtime.cc b/mindspore/ccsrc/device/ascend/ascend_kernel_runtime.cc index 0c6861e21f..baed733a5b 100644 --- a/mindspore/ccsrc/device/ascend/ascend_kernel_runtime.cc +++ b/mindspore/ccsrc/device/ascend/ascend_kernel_runtime.cc @@ -85,8 +85,10 @@ void AscendKernelRuntime::ReleaseDeviceRes() { MS_EXCEPTION(DeviceProcessError) << "rtSetDevice, ret[" << static_cast(ret) << "]"; } - MS_EXCEPTION_IF_NULL(mem_manager_); - mem_manager_->FreeDeviceMemory(); + if (mem_manager_ != nullptr) { + mem_manager_->FreeDeviceMemory(); + } + (void)DestroyHccl(); (void)ResetDevice(); (void)ProfilingManager::GetInstance().StopProfiling(); diff --git a/mindspore/ccsrc/device/gpu/gpu_kernel_runtime.cc b/mindspore/ccsrc/device/gpu/gpu_kernel_runtime.cc index 2ec1a5df29..671a11f776 100644 --- a/mindspore/ccsrc/device/gpu/gpu_kernel_runtime.cc +++ b/mindspore/ccsrc/device/gpu/gpu_kernel_runtime.cc @@ -101,8 +101,9 @@ void GPUKernelRuntime::ReleaseDeviceRes() { CHECK_OP_RET_WITH_EXCEPT(GpuBufferMgr::GetInstance().Destroy(), "Could not destroy gpu data queue."); } GPUDeviceManager::GetInstance().ReleaseDevice(); - MS_EXCEPTION_IF_NULL(mem_manager_); - mem_manager_->FreeDeviceMemory(); + if (mem_manager_ != nullptr) { + mem_manager_->FreeDeviceMemory(); + } } void GPUKernelRuntime::AssignMemory(session::KernelGraph *graph) { diff --git a/mindspore/ccsrc/session/anf_runtime_algorithm.cc b/mindspore/ccsrc/session/anf_runtime_algorithm.cc index 78922448af..893c379a07 100644 --- a/mindspore/ccsrc/session/anf_runtime_algorithm.cc +++ b/mindspore/ccsrc/session/anf_runtime_algorithm.cc @@ -112,6 +112,12 @@ KernelWithIndex AnfRuntimeAlgorithm::VisitKernelWithReturnType(const AnfNodePtr return VisitKernelWithReturnType(cnode->input(kRealInputNodeIndexInTupleGetItem), IntToSize(item_idx)); } else if (IsPrimitive(input0, prim::kPrimDepend) || IsPrimitive(input0, prim::kPrimControlDepend)) { return VisitKernelWithReturnType(cnode->input(kRealInputIndexInDepend), 0); + } else if (opt::IsNopNode(cnode)) { + if (cnode->inputs().size() == 2) { + return VisitKernelWithReturnType(cnode->input(1), 0); + } else { + MS_LOG(EXCEPTION) << cnode->DebugString() << "Invalid nop node"; + } } else { return std::make_pair(anf_node, index); } @@ -299,20 +305,23 @@ std::string AnfRuntimeAlgorithm::GetInputFormat(const AnfNodePtr &node, size_t i return build_info->GetInputFormat(input_idx); } -std::string AnfRuntimeAlgorithm::GetPrevNodeOutputFormat(const AnfNodePtr &anf_node, size_t input_idx) { +KernelWithIndex AnfRuntimeAlgorithm::GetPrevNodeOutput(const AnfNodePtr &anf_node, size_t input_idx) { MS_EXCEPTION_IF_NULL(anf_node); if (!anf_node->isa()) { - MS_LOG(EXCEPTION) << "anf_node is not CNode."; + MS_LOG(EXCEPTION) << anf_node->DebugString() << "anf_node is not CNode."; } auto cnode = anf_node->cast(); MS_EXCEPTION_IF_NULL(cnode); if (input_idx + 1 >= cnode->inputs().size()) { - MS_LOG(EXCEPTION) << "Input index " << input_idx << " is larger than input number " << GetInputTensorNum(cnode) - << "."; + MS_LOG(EXCEPTION) << "Input index " << input_idx << " is larger than input number " << GetInputTensorNum(cnode); } auto node = cnode->input(input_idx + 1); MS_EXCEPTION_IF_NULL(node); - KernelWithIndex kernel_with_index = VisitKernel(node, 0); + return VisitKernel(node, 0); +} + +std::string AnfRuntimeAlgorithm::GetPrevNodeOutputFormat(const AnfNodePtr &anf_node, size_t input_idx) { + KernelWithIndex kernel_with_index = AnfAlgo::GetPrevNodeOutput(anf_node, input_idx); return AnfRuntimeAlgorithm::GetOutputFormat(kernel_with_index.first, kernel_with_index.second); } @@ -346,18 +355,7 @@ std::vector AnfRuntimeAlgorithm::GetOutputInferShape(const AnfNodePtr &n } std::vector AnfRuntimeAlgorithm::GetPrevNodeOutputInferShape(const AnfNodePtr &node, size_t input_idx) { - MS_EXCEPTION_IF_NULL(node); - if (!node->isa()) { - MS_LOG(EXCEPTION) << "anf_node is not CNode."; - } - auto cnode = node->cast(); - MS_EXCEPTION_IF_NULL(cnode); - if (input_idx + 1 >= cnode->inputs().size()) { - MS_LOG(EXCEPTION) << "Input index " << input_idx << " is larger than input number " << GetInputTensorNum(cnode) - << "."; - } - auto input_node = cnode->input(input_idx + 1); - KernelWithIndex kernel_with_index = VisitKernel(input_node, 0); + KernelWithIndex kernel_with_index = AnfAlgo::GetPrevNodeOutput(node, input_idx); return AnfRuntimeAlgorithm::GetOutputInferShape(kernel_with_index.first, kernel_with_index.second); } @@ -459,17 +457,7 @@ TypeId AnfRuntimeAlgorithm::GetOutputInferDataType(const AnfNodePtr &node, size_ } TypeId AnfRuntimeAlgorithm::GetPrevNodeOutputInferDataType(const AnfNodePtr &node, size_t input_idx) { - MS_EXCEPTION_IF_NULL(node); - if (!node->isa()) { - MS_LOG(EXCEPTION) << node->DebugString() << "is not a CNode"; - } - auto cnode = node->cast(); - MS_EXCEPTION_IF_NULL(cnode); - if (input_idx + 1 >= cnode->inputs().size()) { - MS_LOG(EXCEPTION) << "Input index " << input_idx << " is larger than input number " << GetInputTensorNum(cnode); - } - auto input_node = cnode->input(input_idx + 1); - KernelWithIndex kernel_with_index = VisitKernel(input_node, 0); + KernelWithIndex kernel_with_index = AnfAlgo::GetPrevNodeOutput(node, input_idx); return AnfRuntimeAlgorithm::GetOutputInferDataType(kernel_with_index.first, kernel_with_index.second); } @@ -492,17 +480,7 @@ TypeId AnfRuntimeAlgorithm::GetInputDeviceDataType(const AnfNodePtr &node, size_ } TypeId AnfRuntimeAlgorithm::GetPrevNodeOutputDeviceDataType(const AnfNodePtr &anf_node, size_t input_idx) { - if (!anf_node->isa()) { - MS_LOG(EXCEPTION) << anf_node->DebugString() << "anf_node is not CNode."; - } - auto cnode = anf_node->cast(); - MS_EXCEPTION_IF_NULL(cnode); - if (input_idx + 1 >= cnode->inputs().size()) { - MS_LOG(EXCEPTION) << "Input index " << input_idx << " is larger than input number " << GetInputTensorNum(cnode); - } - auto node = cnode->input(input_idx + 1); - MS_EXCEPTION_IF_NULL(node); - KernelWithIndex kernel_with_index = VisitKernel(node, 0); + KernelWithIndex kernel_with_index = AnfAlgo::GetPrevNodeOutput(anf_node, input_idx); return AnfRuntimeAlgorithm::GetOutputDeviceDataType(kernel_with_index.first, kernel_with_index.second); } @@ -558,32 +536,12 @@ bool AnfRuntimeAlgorithm::OutputAddrExist(const AnfNodePtr &node, size_t output_ } const DeviceAddress *AnfRuntimeAlgorithm::GetPrevNodeOutputAddr(const AnfNodePtr &anf_node, size_t input_idx) { - if (!anf_node->isa()) { - MS_LOG(EXCEPTION) << anf_node->DebugString() << "anf node is not a CNode"; - } - auto cnode = anf_node->cast(); - MS_EXCEPTION_IF_NULL(cnode); - if (input_idx + 1 >= cnode->inputs().size()) { - MS_LOG(EXCEPTION) << "Input index " << input_idx << " is larger than input number " << GetInputTensorNum(cnode); - } - auto node = cnode->input(input_idx + 1); - MS_EXCEPTION_IF_NULL(node); - KernelWithIndex kernel_with_index = VisitKernel(node, 0); + KernelWithIndex kernel_with_index = AnfAlgo::GetPrevNodeOutput(anf_node, input_idx); return AnfRuntimeAlgorithm::GetOutputAddr(kernel_with_index.first, kernel_with_index.second); } DeviceAddressPtr AnfRuntimeAlgorithm::GetPrevNodeMutableOutputAddr(const AnfNodePtr &anf_node, size_t input_idx) { - if (!anf_node->isa()) { - MS_LOG(EXCEPTION) << anf_node->DebugString() << "anf_node is not CNode."; - } - auto cnode = anf_node->cast(); - MS_EXCEPTION_IF_NULL(cnode); - if (input_idx + 1 >= cnode->inputs().size()) { - MS_LOG(EXCEPTION) << "Input index " << input_idx << " is larger than input number " << GetInputTensorNum(cnode); - } - auto node = cnode->input(input_idx + 1); - MS_EXCEPTION_IF_NULL(node); - KernelWithIndex kernel_with_index = VisitKernel(node, 0); + KernelWithIndex kernel_with_index = AnfAlgo::GetPrevNodeOutput(anf_node, input_idx); return AnfRuntimeAlgorithm::GetMutableOutputAddr(kernel_with_index.first, kernel_with_index.second); } diff --git a/mindspore/ccsrc/session/anf_runtime_algorithm.h b/mindspore/ccsrc/session/anf_runtime_algorithm.h index 55650ac31e..1a1d471b84 100644 --- a/mindspore/ccsrc/session/anf_runtime_algorithm.h +++ b/mindspore/ccsrc/session/anf_runtime_algorithm.h @@ -89,6 +89,8 @@ class AnfRuntimeAlgorithm { static std::string GetOutputFormat(const AnfNodePtr &node, size_t output_idx); // get input format select of anf node static std::string GetInputFormat(const AnfNodePtr &node, size_t input_idx); + // get prev node output width output index + static KernelWithIndex GetPrevNodeOutput(const AnfNodePtr &anf_node, size_t input_idx); // get output format from prev node,input_index is the input index of current node related to prev node static std::string GetPrevNodeOutputFormat(const AnfNodePtr &node, size_t input_idx); // get output shapes inferred by ME from input nodes. From 7c19a4c2fd03c8c67d35626ac821f6bdc4baa814 Mon Sep 17 00:00:00 2001 From: zjun Date: Mon, 13 Apr 2020 09:51:44 +0800 Subject: [PATCH 186/367] fix getnext op bug --- .../ccsrc/kernel/aicpu/aicpu_kernel_metadata.cc | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/mindspore/ccsrc/kernel/aicpu/aicpu_kernel_metadata.cc b/mindspore/ccsrc/kernel/aicpu/aicpu_kernel_metadata.cc index 6675051069..9f91c1bdd2 100644 --- a/mindspore/ccsrc/kernel/aicpu/aicpu_kernel_metadata.cc +++ b/mindspore/ccsrc/kernel/aicpu/aicpu_kernel_metadata.cc @@ -38,12 +38,14 @@ void AicpuMetadataInfo(const CNodePtr &kernel_node, std::vector inputs_format; - std::vector inputs_type; - for (size_t input_index = 0; input_index < AnfAlgo::GetInputTensorNum(kernel_node); ++input_index) { - inputs_format.emplace_back(kOpFormat_DEFAULT); - inputs_type.push_back(AnfAlgo::GetPrevNodeOutputInferDataType(kernel_node, input_index)); + if (op_name == kPrint || op_name == kGetNext) { + std::vector inputs_format{}; + std::vector inputs_type{}; + if (op_name == kPrint) { + for (size_t input_index = 0; input_index < AnfAlgo::GetInputTensorNum(kernel_node); ++input_index) { + inputs_format.emplace_back(kOpFormat_DEFAULT); + inputs_type.push_back(AnfAlgo::GetPrevNodeOutputInferDataType(kernel_node, input_index)); + } } std::vector outputs_format; std::vector outputs_type; From 0db45147a4e7a07e69147b0f0efa84fba79d445a Mon Sep 17 00:00:00 2001 From: buxue Date: Thu, 9 Apr 2020 21:50:31 +0800 Subject: [PATCH 187/367] dock relu6 for open source process and fix pow bprop --- mindspore/ccsrc/kernel/tbe/tbe_adapter.cc | 2 ++ mindspore/ops/_grad/grad_math_ops.py | 6 ++-- mindspore/ops/_op_impl/tbe/__init__.py | 2 ++ mindspore/ops/_op_impl/tbe/relu6.py | 40 +++++++++++++++++++++ mindspore/ops/_op_impl/tbe/relu6_grad.py | 43 +++++++++++++++++++++++ 5 files changed, 90 insertions(+), 3 deletions(-) create mode 100644 mindspore/ops/_op_impl/tbe/relu6.py create mode 100644 mindspore/ops/_op_impl/tbe/relu6_grad.py diff --git a/mindspore/ccsrc/kernel/tbe/tbe_adapter.cc b/mindspore/ccsrc/kernel/tbe/tbe_adapter.cc index 229a3eb34a..5336d1e67f 100644 --- a/mindspore/ccsrc/kernel/tbe/tbe_adapter.cc +++ b/mindspore/ccsrc/kernel/tbe/tbe_adapter.cc @@ -30,6 +30,8 @@ namespace mindspore { namespace kernel { namespace tbe { static std::map tbe_func_adapter_map = { + {"re_lu6", "relu6"}, + {"re_lu6_grad", "relu6_grad"}, {"re_lu", "relu"}, {"tensor_add", "add"}, {"reduce_mean", "reduce_mean_d"}, diff --git a/mindspore/ops/_grad/grad_math_ops.py b/mindspore/ops/_grad/grad_math_ops.py index 81e078dc98..2d819718c8 100755 --- a/mindspore/ops/_grad/grad_math_ops.py +++ b/mindspore/ops/_grad/grad_math_ops.py @@ -340,9 +340,9 @@ def get_bprop_pow(self): ln = P.Log() def bprop(x, power, out, dout): - dx = power * pow_op(x, power - 1.0) * dout - dpower = pow_op(x, power) * ln(x) * dout - return dx, dpower + bc_dx = power * pow_op(x, power - 1.0) * dout + bc_dpower = out * ln(x) * dout + return binop_grad_common(x, power, bc_dx, bc_dpower) return bprop diff --git a/mindspore/ops/_op_impl/tbe/__init__.py b/mindspore/ops/_op_impl/tbe/__init__.py index 0b79ae845b..9ec5446165 100644 --- a/mindspore/ops/_op_impl/tbe/__init__.py +++ b/mindspore/ops/_op_impl/tbe/__init__.py @@ -42,6 +42,8 @@ from .mul import _mul_tbe from .real_div import _real_div_tbe from .relu import _relu_tbe from .relu_grad import _relu_grad_tbe +from .relu6 import _relu6_tbe +from .relu6_grad import _relu6_grad_tbe from .softmax_cross_entropy_with_logits import _softmax_cross_entropy_with_logits_tbe from .sigmoid_cross_entropy_with_logits import _sigmoid_cross_entropy_with_logits_tbe from .sigmoid_cross_entropy_with_logits_grad import _sigmoid_cross_entropy_with_logits_grad_tbe diff --git a/mindspore/ops/_op_impl/tbe/relu6.py b/mindspore/ops/_op_impl/tbe/relu6.py new file mode 100644 index 0000000000..bbedfdeb0f --- /dev/null +++ b/mindspore/ops/_op_impl/tbe/relu6.py @@ -0,0 +1,40 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""ReLU6 op""" +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType + +relu6_op_info = TBERegOp("ReLU6") \ + .fusion_type("ELEMWISE") \ + .async_flag(False) \ + .binfile_name("relu6.so") \ + .compute_cost(10) \ + .kernel_name("relu6") \ + .partial_flag(True) \ + .input(0, "features", False, "required", "all") \ + .output(0, "activations", False, "required", "all") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD) \ + .dtype_format(DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.I32_5HD, DataType.I32_5HD) \ + .get_op_info() + + +@op_info_register(relu6_op_info) +def _relu6_tbe(): + """Relu6 TBE register""" + return diff --git a/mindspore/ops/_op_impl/tbe/relu6_grad.py b/mindspore/ops/_op_impl/tbe/relu6_grad.py new file mode 100644 index 0000000000..eaf3449fe7 --- /dev/null +++ b/mindspore/ops/_op_impl/tbe/relu6_grad.py @@ -0,0 +1,43 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""ReLU6Grad op""" +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType + +relu6_grad_op_info = TBERegOp("ReLU6Grad") \ + .fusion_type("ELEMWISE") \ + .async_flag(False) \ + .binfile_name("relu6_grad.so") \ + .compute_cost(10) \ + .kernel_name("relu6_grad") \ + .partial_flag(True) \ + .input(0, "gradients", False, "required", "all") \ + .input(1, "features", False, "required", "all") \ + .output(0, "backprops", False, "required", "all") \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_FracNZ, DataType.F16_FracNZ, DataType.F16_FracNZ) \ + .dtype_format(DataType.F16_C1HWNCoC0, DataType.F16_C1HWNCoC0, DataType.F16_C1HWNCoC0) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_FracNZ, DataType.F32_FracNZ, DataType.F32_FracNZ) \ + .dtype_format(DataType.F32_C1HWNCoC0, DataType.F32_C1HWNCoC0, DataType.F32_C1HWNCoC0) \ + .get_op_info() + + +@op_info_register(relu6_grad_op_info) +def _relu6_grad_tbe(): + """Relu6Grad TBE register""" + return From 7307c81f3144f51fb51adc3da18b17ba05f3800a Mon Sep 17 00:00:00 2001 From: YuJianfeng Date: Thu, 9 Apr 2020 11:34:03 +0800 Subject: [PATCH 188/367] implement AddN fission pass --- .../ascend/ascend_backend_optimization.cc | 2 + .../ascend/ir_fission/addn_fission.cc | 81 +++++++++ .../ascend/ir_fission/addn_fission.h | 37 ++++ mindspore/ccsrc/utils/utils.h | 1 + .../ascend/ir_fission/addn_fission_test.cc | 160 ++++++++++++++++++ .../pre_activate/addn_fission_test.py | 80 +++++++++ 6 files changed, 361 insertions(+) create mode 100644 mindspore/ccsrc/pre_activate/ascend/ir_fission/addn_fission.cc create mode 100644 mindspore/ccsrc/pre_activate/ascend/ir_fission/addn_fission.h create mode 100644 tests/ut/cpp/pre_activate/ascend/ir_fission/addn_fission_test.cc create mode 100644 tests/ut/cpp/python_input/gtest_input/pre_activate/addn_fission_test.py diff --git a/mindspore/ccsrc/pre_activate/ascend/ascend_backend_optimization.cc b/mindspore/ccsrc/pre_activate/ascend/ascend_backend_optimization.cc index 432d88e7a4..6ede069eb3 100644 --- a/mindspore/ccsrc/pre_activate/ascend/ascend_backend_optimization.cc +++ b/mindspore/ccsrc/pre_activate/ascend/ascend_backend_optimization.cc @@ -58,6 +58,7 @@ #include "pre_activate/ascend/ir_fission/add_memcpy_async.h" #include "pre_activate/ascend/format_type/insert_cast_for_runop.h" #include "pre_activate/ascend/format_type/insert_transdata_for_runop.h" +#include "pre_activate/ascend/ir_fission/addn_fission.h" #include "utils/context/ms_context.h" #include "debug/anf_ir_dump.h" #include "debug/anf_ir_utils.h" @@ -175,6 +176,7 @@ void AscendBackendIRFusionOptimization(const std::shared_ptrAddPass(std::make_shared()); ir_fusion_pm->AddPass(std::make_shared()); ir_fusion_pm->AddPass(std::make_shared()); + ir_fusion_pm->AddPass(std::make_shared()); ir_fusion_pm->AddPass(std::make_shared()); ir_fusion_pm->AddPass(std::make_shared()); } diff --git a/mindspore/ccsrc/pre_activate/ascend/ir_fission/addn_fission.cc b/mindspore/ccsrc/pre_activate/ascend/ir_fission/addn_fission.cc new file mode 100644 index 0000000000..f6eb6aca64 --- /dev/null +++ b/mindspore/ccsrc/pre_activate/ascend/ir_fission/addn_fission.cc @@ -0,0 +1,81 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "pre_activate/ascend/ir_fission/addn_fission.h" +#include +#include +#include "session/anf_runtime_algorithm.h" + +namespace mindspore { +namespace opt { +namespace { +AnfNodePtr CreateNewAddn(const FuncGraphPtr &func_graph, const CNodePtr &origin_addn_cnode, size_t begin_index, + size_t offset) { + MS_EXCEPTION_IF_NULL(func_graph); + MS_EXCEPTION_IF_NULL(origin_addn_cnode); + std::vector new_addn_inputs{NewValueNode(std::make_shared(prim::kPrimAddN->name()))}; + for (size_t i = begin_index; i < begin_index + offset; ++i) { + new_addn_inputs.push_back(origin_addn_cnode->input(i)); + } + CNodePtr new_addn = func_graph->NewCNode(new_addn_inputs); + MS_EXCEPTION_IF_NULL(new_addn); + new_addn->set_scope(origin_addn_cnode->scope()); + new_addn->set_abstract(origin_addn_cnode->abstract()); + AnfAlgo::SetNodeAttr(kAttrN, MakeValue(SizeToInt(offset)), new_addn); + return new_addn; +} +} // namespace + +const BaseRef AddnFission::DefinePattern() const { + VarPtr Xs = std::make_shared(); + return VectorRef({prim::kPrimAddN, Xs}); +} + +const AnfNodePtr AddnFission::Process(const FuncGraphPtr &func_graph, const AnfNodePtr &node, const EquivPtr &) const { + MS_EXCEPTION_IF_NULL(func_graph); + MS_EXCEPTION_IF_NULL(node); + auto cnode = node->cast(); + MS_EXCEPTION_IF_NULL(cnode); + // The real input begins with index 1. + size_t origin_input_size = cnode->inputs().size() - 1; + if (origin_input_size <= inputs_divisor_) { + return nullptr; + } + CNodePtr new_cnode = cnode; + while (origin_input_size > inputs_divisor_) { + std::vector base_addn_inputs{NewValueNode(std::make_shared(prim::kPrimAddN->name()))}; + size_t cur_input_index = 1; + // Divide the inputs of addn by 63. + while (origin_input_size - cur_input_index + 1 > inputs_divisor_) { + base_addn_inputs.push_back(CreateNewAddn(func_graph, new_cnode, cur_input_index, inputs_divisor_)); + cur_input_index += inputs_divisor_; + } + base_addn_inputs.push_back( + CreateNewAddn(func_graph, new_cnode, cur_input_index, origin_input_size - cur_input_index + 1)); + + CNodePtr base_addn = func_graph->NewCNode(base_addn_inputs); + MS_EXCEPTION_IF_NULL(base_addn); + MS_EXCEPTION_IF_NULL(new_cnode); + base_addn->set_scope(new_cnode->scope()); + base_addn->set_abstract(new_cnode->abstract()); + AnfAlgo::SetNodeAttr(kAttrN, MakeValue(SizeToInt(base_addn_inputs.size() - 1)), base_addn); + new_cnode = base_addn; + origin_input_size = base_addn->inputs().size() - 1; + } + + return new_cnode; +} +} // namespace opt +} // namespace mindspore diff --git a/mindspore/ccsrc/pre_activate/ascend/ir_fission/addn_fission.h b/mindspore/ccsrc/pre_activate/ascend/ir_fission/addn_fission.h new file mode 100644 index 0000000000..3c62391f9a --- /dev/null +++ b/mindspore/ccsrc/pre_activate/ascend/ir_fission/addn_fission.h @@ -0,0 +1,37 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_CCSRC_PRE_ACTIVATE_ASCEND_IR_FISSION_ADDN_FISSION_H_ +#define MINDSPORE_CCSRC_PRE_ACTIVATE_ASCEND_IR_FISSION_ADDN_FISSION_H_ + +#include "pre_activate/common/optimizer.h" + +namespace mindspore { +namespace opt { +constexpr size_t kAddnInputsDivisor = 63; +class AddnFission : public PatternProcessPass { + public: + explicit AddnFission(bool multigraph = true) + : PatternProcessPass("addn_fission", multigraph), inputs_divisor_(kAddnInputsDivisor) {} + ~AddnFission() override = default; + const BaseRef DefinePattern() const override; + const AnfNodePtr Process(const FuncGraphPtr &, const AnfNodePtr &, const EquivPtr &) const override; + + private: + size_t inputs_divisor_; +}; +} // namespace opt +} // namespace mindspore +#endif // MINDSPORE_CCSRC_PRE_ACTIVATE_ASCEND_IR_FISSION_ADDN_FISSION_H_ diff --git a/mindspore/ccsrc/utils/utils.h b/mindspore/ccsrc/utils/utils.h index ea5e969e52..2b35168ec3 100644 --- a/mindspore/ccsrc/utils/utils.h +++ b/mindspore/ccsrc/utils/utils.h @@ -142,6 +142,7 @@ constexpr auto kAttrDynInputSizes = "dyn_input_sizes"; constexpr auto kAttrSrcFormat = "src_format"; constexpr auto kAttrOutputUsedNum = "output_used_num"; constexpr auto kAttrHasBias = "has_bias"; +constexpr auto kAttrN = "N"; // attr value constexpr auto kValueTargetSwitch = "target_switch"; diff --git a/tests/ut/cpp/pre_activate/ascend/ir_fission/addn_fission_test.cc b/tests/ut/cpp/pre_activate/ascend/ir_fission/addn_fission_test.cc new file mode 100644 index 0000000000..90174636b1 --- /dev/null +++ b/tests/ut/cpp/pre_activate/ascend/ir_fission/addn_fission_test.cc @@ -0,0 +1,160 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "common/backend_common_test.h" +#include "common/py_func_graph_fetcher.h" +#define private public +#define protected public +#include "pre_activate/ascend/ir_fission/addn_fission.h" +#undef private +#undef protected + +namespace mindspore { +namespace opt { +class TestHWAddnFission : public BackendCommon { + public: + TestHWAddnFission() : get_py_fun_("gtest_input.pre_activate.addn_fission_test", true) {} + ~TestHWAddnFission() override = default; + + UT::PyFuncGraphFetcher get_py_fun_; +}; + +TEST_F(TestHWAddnFission, test_addn_fission_divided_by_2) { + FuncGraphPtr g = get_py_fun_.CallAndParseRet("test_addn_fission", "before"); + EXPECT_NE(g, nullptr); + std::vector shp{2, 32, 224, 224}; + auto x_abstract = std::make_shared(kFloat32, shp); + AbstractBasePtrList args_spec_list; + for (size_t i = 0; i < 9; ++i) { + args_spec_list.push_back(x_abstract); + } + auto kg = GetKernelGraph(g, args_spec_list); + + auto optimizer = std::make_shared(); + auto pm = std::make_shared(); + auto addn_fission = std::make_shared(); + addn_fission->inputs_divisor_ = 2; + pm->AddPass(addn_fission); + optimizer->AddPassManager(pm); + FuncGraphPtr new_graph = optimizer->Optimize(kg); + + FuncGraphPtr g_after = get_py_fun_.CallAndParseRet("test_addn_fission", "after_divided_by_2"); + EXPECT_NE(g_after, nullptr); + auto kg_after = GetKernelGraph(g_after, args_spec_list); + EXPECT_TRUE(CheckEqualGraph(kg_after, new_graph)); +} + +TEST_F(TestHWAddnFission, test_addn_fission_divided_by_3) { + FuncGraphPtr g = get_py_fun_.CallAndParseRet("test_addn_fission", "before"); + EXPECT_NE(g, nullptr); + std::vector shp{2, 32, 224, 224}; + auto x_abstract = std::make_shared(kFloat32, shp); + AbstractBasePtrList args_spec_list; + for (size_t i = 0; i < 9; ++i) { + args_spec_list.push_back(x_abstract); + } + auto kg = GetKernelGraph(g, args_spec_list); + + auto optimizer = std::make_shared(); + auto pm = std::make_shared(); + auto addn_fission = std::make_shared(); + addn_fission->inputs_divisor_ = 3; + pm->AddPass(addn_fission); + optimizer->AddPassManager(pm); + FuncGraphPtr new_graph = optimizer->Optimize(kg); + + FuncGraphPtr g_after = get_py_fun_.CallAndParseRet("test_addn_fission", "after_divided_by_3"); + EXPECT_NE(g_after, nullptr); + auto kg_after = GetKernelGraph(g_after, args_spec_list); + EXPECT_TRUE(CheckEqualGraph(kg_after, new_graph)); +} + +TEST_F(TestHWAddnFission, test_addn_fission_divided_by_4) { + FuncGraphPtr g = get_py_fun_.CallAndParseRet("test_addn_fission", "before"); + EXPECT_NE(g, nullptr); + std::vector shp{2, 32, 224, 224}; + auto x_abstract = std::make_shared(kFloat32, shp); + AbstractBasePtrList args_spec_list; + for (size_t i = 0; i < 9; ++i) { + args_spec_list.push_back(x_abstract); + } + auto kg = GetKernelGraph(g, args_spec_list); + + auto optimizer = std::make_shared(); + auto pm = std::make_shared(); + auto addn_fission = std::make_shared(); + addn_fission->inputs_divisor_ = 4; + pm->AddPass(addn_fission); + optimizer->AddPassManager(pm); + FuncGraphPtr new_graph = optimizer->Optimize(kg); + + FuncGraphPtr g_after = get_py_fun_.CallAndParseRet("test_addn_fission", "after_divided_by_4"); + EXPECT_NE(g_after, nullptr); + auto kg_after = GetKernelGraph(g_after, args_spec_list); + EXPECT_TRUE(CheckEqualGraph(kg_after, new_graph)); +} + +TEST_F(TestHWAddnFission, test_addn_fission_divided_by_8) { + FuncGraphPtr g = get_py_fun_.CallAndParseRet("test_addn_fission", "before"); + EXPECT_NE(g, nullptr); + std::vector shp{2, 32, 224, 224}; + auto x_abstract = std::make_shared(kFloat32, shp); + AbstractBasePtrList args_spec_list; + for (size_t i = 0; i < 9; ++i) { + args_spec_list.push_back(x_abstract); + } + auto kg = GetKernelGraph(g, args_spec_list); + + auto optimizer = std::make_shared(); + auto pm = std::make_shared(); + auto addn_fission = std::make_shared(); + addn_fission->inputs_divisor_ = 8; + pm->AddPass(addn_fission); + optimizer->AddPassManager(pm); + FuncGraphPtr new_graph = optimizer->Optimize(kg); + + FuncGraphPtr g_after = get_py_fun_.CallAndParseRet("test_addn_fission", "after_divided_by_8"); + EXPECT_NE(g_after, nullptr); + auto kg_after = GetKernelGraph(g_after, args_spec_list); + EXPECT_TRUE(CheckEqualGraph(kg_after, new_graph)); +} + +TEST_F(TestHWAddnFission, test_addn_fission_divided_by_9) { + FuncGraphPtr g = get_py_fun_.CallAndParseRet("test_addn_fission", "before"); + EXPECT_NE(g, nullptr); + std::vector shp{2, 32, 224, 224}; + auto x_abstract = std::make_shared(kFloat32, shp); + AbstractBasePtrList args_spec_list; + for (size_t i = 0; i < 9; ++i) { + args_spec_list.push_back(x_abstract); + } + auto kg = GetKernelGraph(g, args_spec_list); + + auto optimizer = std::make_shared(); + auto pm = std::make_shared(); + auto addn_fission = std::make_shared(); + addn_fission->inputs_divisor_ = 9; + pm->AddPass(addn_fission); + optimizer->AddPassManager(pm); + FuncGraphPtr new_graph = optimizer->Optimize(kg); + + FuncGraphPtr g_after = get_py_fun_.CallAndParseRet("test_addn_fission", "after_divided_by_9"); + EXPECT_NE(g_after, nullptr); + auto kg_after = GetKernelGraph(g_after, args_spec_list); + EXPECT_TRUE(CheckEqualGraph(kg_after, new_graph)); +} +} // namespace opt +} // namespace mindspore diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/addn_fission_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/addn_fission_test.py new file mode 100644 index 0000000000..c120ac3e68 --- /dev/null +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/addn_fission_test.py @@ -0,0 +1,80 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +from mindspore.ops import operations as P +from mindspore.ops import Primitive + +addn = P.AddN() +make_tuple = Primitive('make_tuple') + + +class FnDict: + def __init__(self): + self.fnDict = {} + + def __call__(self, fn): + self.fnDict[fn.__name__] = fn + + def __getitem__(self, name): + return self.fnDict[name] + + +def test_addn_fission(tag): + """ test_adam_apply_one_with_decay_rule """ + fns = FnDict() + + @fns + def before(input0, input1, input2, input3, input4, input5, input6, input7, input8): + return addn((input0, input1, input2, input3, input4, input5, input6, input7, input8)) + + @fns + def after_divided_by_2(input0, input1, input2, input3, input4, input5, input6, input7, input8): + a = addn((input0, input1)) + b = addn((input2, input3)) + c = addn((input4, input5)) + d = addn((input6, input7)) + e = addn((input8,)) + f = addn((a, b)) + g = addn((c, d)) + h = addn((e,)) + i = addn((f, g)) + j = addn((h,)) + return addn((i, j)) + + @fns + def after_divided_by_3(input0, input1, input2, input3, input4, input5, input6, input7, input8): + a = addn((input0, input1, input2)) + b = addn((input3, input4, input5)) + c = addn((input6, input7, input8)) + return addn((a, b, c)) + + @fns + def after_divided_by_4(input0, input1, input2, input3, input4, input5, input6, input7, input8): + a = addn((input0, input1, input2, input3)) + b = addn((input4, input5, input6, input7)) + c = addn((input8,)) + return addn((a, b, c)) + + @fns + def after_divided_by_8(input0, input1, input2, input3, input4, input5, input6, input7, input8): + a = addn((input0, input1, input2, input3, input4, input5, input6, input7)) + b = addn((input8,)) + return addn((a, b)) + + @fns + def after_divided_by_9(input0, input1, input2, input3, input4, input5, input6, input7, input8): + return addn((input0, input1, input2, input3, input4, input5, input6, input7, input8)) + + return fns[tag] From cc80c76687dfe8e527ee53bf9872cf3492b68bce Mon Sep 17 00:00:00 2001 From: panfengfeng Date: Sat, 11 Apr 2020 09:28:09 +0800 Subject: [PATCH 189/367] add quantizaiton gpu op --- mindspore/_akg/gpu/__init__.py | 4 + mindspore/_akg/gpu/hsigmoid.py | 63 ++++++ mindspore/_akg/gpu/hsigmoid_grad.py | 51 +++++ mindspore/_akg/gpu/hswish.py | 63 ++++++ mindspore/_akg/gpu/hswish_grad.py | 53 +++++ mindspore/_checkparam.py | 7 + mindspore/ccsrc/CMakeLists.txt | 1 + .../gpu/cuda_impl/batchnorm_fold2_impl.cu | 169 ++++++++++++++ .../gpu/cuda_impl/batchnorm_fold2_impl.cuh | 40 ++++ .../gpu/cuda_impl/batchnorm_fold_impl.cu | 88 ++++++++ .../gpu/cuda_impl/batchnorm_fold_impl.cuh | 32 +++ .../kernel/gpu/cuda_impl/concatv2_impl.cu | 1 + .../gpu/cuda_impl/correction_mul_impl.cu | 66 ++++++ .../gpu/cuda_impl/correction_mul_impl.cuh | 27 +++ .../gpu/cuda_impl/cross_entropy_cuda_impl.cu | 47 ++++ .../gpu/cuda_impl/cross_entropy_cuda_impl.cuh | 26 +++ .../kernel/gpu/cuda_impl/dropout_impl.cu | 47 ++++ .../kernel/gpu/cuda_impl/dropout_impl.cuh | 26 +++ .../kernel/gpu/cuda_impl/fake_quant_impl.cu | 133 +++++++++++ .../kernel/gpu/cuda_impl/fake_quant_impl.cuh | 32 +++ .../cuda_impl/fake_quant_per_channel_impl.cu | 174 +++++++++++++++ .../cuda_impl/fake_quant_per_channel_impl.cuh | 35 +++ .../ccsrc/kernel/gpu/cuda_impl/gather.cuh | 2 +- .../sparse_cross_entropy_cuda_impl.cu | 77 +++++++ .../sparse_cross_entropy_cuda_impl.cuh | 30 +++ .../ccsrc/kernel/gpu/nn/dropout_gpu_kernel.cc | 101 +++++++++ .../ccsrc/kernel/gpu/nn/dropout_gpu_kernel.h | 67 ++++++ .../kernel/gpu/nn/dropout_grad_kernel.cc | 92 ++++++++ .../ccsrc/kernel/gpu/nn/dropout_grad_kernel.h | 58 +++++ .../gpu/quant/batchnorm_fold2_gpu_kernel.cc | 35 +++ .../gpu/quant/batchnorm_fold2_gpu_kernel.h | 139 ++++++++++++ .../quant/batchnorm_fold2_grad_gpu_kernel.cc | 39 ++++ .../quant/batchnorm_fold2_grad_gpu_kernel.h | 167 ++++++++++++++ .../gpu/quant/batchnorm_fold_gpu_kernel.cc | 34 +++ .../gpu/quant/batchnorm_fold_gpu_kernel.h | 208 ++++++++++++++++++ .../quant/batchnorm_fold_grad_gpu_kernel.cc | 32 +++ .../quant/batchnorm_fold_grad_gpu_kernel.h | 167 ++++++++++++++ .../gpu/quant/correction_mul_gpu_kernel.cc | 29 +++ .../gpu/quant/correction_mul_gpu_kernel.h | 98 +++++++++ .../quant/correction_mul_grad_gpu_kernel.cc | 33 +++ .../quant/correction_mul_grad_gpu_kernel.h | 104 +++++++++ .../kernel/gpu/quant/fake_quant_gpu_kernel.cc | 176 +++++++++++++++ .../kernel/gpu/quant/fake_quant_gpu_kernel.h | 66 ++++++ .../gpu/quant/fake_quant_grad_gpu_kernel.cc | 145 ++++++++++++ .../gpu/quant/fake_quant_grad_gpu_kernel.h | 61 +++++ .../fake_quant_per_channel_gpu_kernel.cc | 181 +++++++++++++++ .../quant/fake_quant_per_channel_gpu_kernel.h | 66 ++++++ .../fake_quant_per_channel_grad_gpu_kernel.cc | 158 +++++++++++++ .../fake_quant_per_channel_grad_gpu_kernel.h | 63 ++++++ mindspore/ops/_op_impl/akg/gpu/hsigmoid.py | 52 +++++ .../ops/_op_impl/akg/gpu/hsigmoid_grad.py | 62 ++++++ mindspore/ops/_op_impl/akg/gpu/hswish.py | 52 +++++ mindspore/ops/_op_impl/akg/gpu/hswish_grad.py | 62 ++++++ 53 files changed, 3840 insertions(+), 1 deletion(-) create mode 100644 mindspore/_akg/gpu/hsigmoid.py create mode 100644 mindspore/_akg/gpu/hsigmoid_grad.py create mode 100644 mindspore/_akg/gpu/hswish.py create mode 100644 mindspore/_akg/gpu/hswish_grad.py create mode 100644 mindspore/ccsrc/kernel/gpu/cuda_impl/batchnorm_fold2_impl.cu create mode 100644 mindspore/ccsrc/kernel/gpu/cuda_impl/batchnorm_fold2_impl.cuh create mode 100755 mindspore/ccsrc/kernel/gpu/cuda_impl/batchnorm_fold_impl.cu create mode 100755 mindspore/ccsrc/kernel/gpu/cuda_impl/batchnorm_fold_impl.cuh create mode 100755 mindspore/ccsrc/kernel/gpu/cuda_impl/correction_mul_impl.cu create mode 100644 mindspore/ccsrc/kernel/gpu/cuda_impl/correction_mul_impl.cuh create mode 100644 mindspore/ccsrc/kernel/gpu/cuda_impl/cross_entropy_cuda_impl.cu create mode 100644 mindspore/ccsrc/kernel/gpu/cuda_impl/cross_entropy_cuda_impl.cuh create mode 100644 mindspore/ccsrc/kernel/gpu/cuda_impl/dropout_impl.cu create mode 100644 mindspore/ccsrc/kernel/gpu/cuda_impl/dropout_impl.cuh create mode 100644 mindspore/ccsrc/kernel/gpu/cuda_impl/fake_quant_impl.cu create mode 100644 mindspore/ccsrc/kernel/gpu/cuda_impl/fake_quant_impl.cuh create mode 100644 mindspore/ccsrc/kernel/gpu/cuda_impl/fake_quant_per_channel_impl.cu create mode 100644 mindspore/ccsrc/kernel/gpu/cuda_impl/fake_quant_per_channel_impl.cuh mode change 100755 => 100644 mindspore/ccsrc/kernel/gpu/cuda_impl/gather.cuh create mode 100755 mindspore/ccsrc/kernel/gpu/cuda_impl/sparse_cross_entropy_cuda_impl.cu create mode 100755 mindspore/ccsrc/kernel/gpu/cuda_impl/sparse_cross_entropy_cuda_impl.cuh create mode 100644 mindspore/ccsrc/kernel/gpu/nn/dropout_gpu_kernel.cc create mode 100644 mindspore/ccsrc/kernel/gpu/nn/dropout_gpu_kernel.h create mode 100644 mindspore/ccsrc/kernel/gpu/nn/dropout_grad_kernel.cc create mode 100644 mindspore/ccsrc/kernel/gpu/nn/dropout_grad_kernel.h create mode 100644 mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold2_gpu_kernel.cc create mode 100644 mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold2_gpu_kernel.h create mode 100644 mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold2_grad_gpu_kernel.cc create mode 100644 mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold2_grad_gpu_kernel.h create mode 100644 mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold_gpu_kernel.cc create mode 100644 mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold_gpu_kernel.h create mode 100644 mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold_grad_gpu_kernel.cc create mode 100644 mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold_grad_gpu_kernel.h create mode 100644 mindspore/ccsrc/kernel/gpu/quant/correction_mul_gpu_kernel.cc create mode 100644 mindspore/ccsrc/kernel/gpu/quant/correction_mul_gpu_kernel.h create mode 100644 mindspore/ccsrc/kernel/gpu/quant/correction_mul_grad_gpu_kernel.cc create mode 100644 mindspore/ccsrc/kernel/gpu/quant/correction_mul_grad_gpu_kernel.h create mode 100644 mindspore/ccsrc/kernel/gpu/quant/fake_quant_gpu_kernel.cc create mode 100755 mindspore/ccsrc/kernel/gpu/quant/fake_quant_gpu_kernel.h create mode 100644 mindspore/ccsrc/kernel/gpu/quant/fake_quant_grad_gpu_kernel.cc create mode 100644 mindspore/ccsrc/kernel/gpu/quant/fake_quant_grad_gpu_kernel.h create mode 100644 mindspore/ccsrc/kernel/gpu/quant/fake_quant_per_channel_gpu_kernel.cc create mode 100755 mindspore/ccsrc/kernel/gpu/quant/fake_quant_per_channel_gpu_kernel.h create mode 100644 mindspore/ccsrc/kernel/gpu/quant/fake_quant_per_channel_grad_gpu_kernel.cc create mode 100644 mindspore/ccsrc/kernel/gpu/quant/fake_quant_per_channel_grad_gpu_kernel.h create mode 100644 mindspore/ops/_op_impl/akg/gpu/hsigmoid.py create mode 100644 mindspore/ops/_op_impl/akg/gpu/hsigmoid_grad.py create mode 100644 mindspore/ops/_op_impl/akg/gpu/hswish.py create mode 100644 mindspore/ops/_op_impl/akg/gpu/hswish_grad.py diff --git a/mindspore/_akg/gpu/__init__.py b/mindspore/_akg/gpu/__init__.py index 86334cfcd3..2ac6d1adb1 100644 --- a/mindspore/_akg/gpu/__init__.py +++ b/mindspore/_akg/gpu/__init__.py @@ -26,3 +26,7 @@ from .squeeze_grad import SqueezeGrad, gpu_schedule_SqueezeGrad from .mean import SimpleMean, gpu_schedule_SimpleMean from .mean_grad import SimpleMeanGrad, gpu_schedule_SimpleMeanGrad from .mul import Mul, gpu_schedule_Mul +from .hsigmoid import Hsigmoid, gpu_schedule_Hsigmoid +from .hsigmoid_grad import HsigmoidGrad, gpu_schedule_HsigmoidGrad +from .hswish import Hswish, gpu_schedule_Hswish +from .hswish_grad import HswishGrad, gpu_schedule_HswishGrad diff --git a/mindspore/_akg/gpu/hsigmoid.py b/mindspore/_akg/gpu/hsigmoid.py new file mode 100644 index 0000000000..b9d5ea74c9 --- /dev/null +++ b/mindspore/_akg/gpu/hsigmoid.py @@ -0,0 +1,63 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""hsigmoid""" +import _akg.topi as topi +import _akg.tvm as tvm +from _akg.topi import tag + + +@tvm.tag_scope(tag=tag.ELEMWISE) +def topi_nn_hsigmoid(x): + """ + topi hsigmoid + Args: + x: + + Returns: + + """ + return tvm.compute(x.shape, lambda *i: tvm.if_then_else(x(*i) <= -3, 0, + tvm.if_then_else(x(*i) >= 3, 1, + (x(*i) + 3) / 6))) + + +def Hsigmoid(x): + """ + Hsigmoid + Args: + x: + + Returns: + + """ + return topi_nn_hsigmoid(x) + + +def gpu_schedule_Hsigmoid(outs): + """ + gpu schedule Hsigmoid + Args: + outs: + + Returns: + + """ + device = 'cuda' + ctx = tvm.context(device, 0) + if not ctx.exist: + raise SystemError("Skip because %s is not enabled" % device) + with tvm.target.create(device): + sch = topi.cuda.schedule_elemwise(outs) + return sch diff --git a/mindspore/_akg/gpu/hsigmoid_grad.py b/mindspore/_akg/gpu/hsigmoid_grad.py new file mode 100644 index 0000000000..d3e7ac6345 --- /dev/null +++ b/mindspore/_akg/gpu/hsigmoid_grad.py @@ -0,0 +1,51 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Hsigmoid grad""" +import _akg.topi as topi +import _akg.tvm as tvm + + +def HsigmoidGrad(y_grad, x): + """ + HsigmoidGrad + Args: + y_grad: + x: + + Returns: + + """ + return tvm.compute(x.shape, lambda *i: tvm.if_then_else(x(*i) <= -3, 0, + tvm.if_then_else(x(*i) >= 3, 0, + y_grad(*i) / 6))) + + +def gpu_schedule_HsigmoidGrad(outs): + """ + gpu schedule ReLU6Grad + Args: + outs: + + Returns: + + """ + device = 'cuda' + ctx = tvm.context(device, 0) + if not ctx.exist: + raise SystemError("Skip because %s is not enabled" % device) + + with tvm.target.create(device): + sch = topi.cuda.schedule_elemwise(outs) + return sch diff --git a/mindspore/_akg/gpu/hswish.py b/mindspore/_akg/gpu/hswish.py new file mode 100644 index 0000000000..904c38c2a2 --- /dev/null +++ b/mindspore/_akg/gpu/hswish.py @@ -0,0 +1,63 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""hswish""" +import _akg.topi as topi +import _akg.tvm as tvm +from _akg.topi import tag + + +@tvm.tag_scope(tag=tag.ELEMWISE) +def topi_nn_hswish(x): + """ + topi hswish + Args: + x: + + Returns: + + """ + return tvm.compute(x.shape, lambda *i: tvm.if_then_else(x(*i) <= -3, 0, + tvm.if_then_else(x(*i) >= 3, x(*i), + x(*i) * (x(*i) + 3) / 6))) + + +def Hswish(x): + """ + Hswish + Args: + x: + + Returns: + + """ + return topi_nn_hswish(x) + + +def gpu_schedule_Hswish(outs): + """ + gpu schedule Hswish + Args: + outs: + + Returns: + + """ + device = 'cuda' + ctx = tvm.context(device, 0) + if not ctx.exist: + raise SystemError("Skip because %s is not enabled" % device) + with tvm.target.create(device): + sch = topi.cuda.schedule_elemwise(outs) + return sch diff --git a/mindspore/_akg/gpu/hswish_grad.py b/mindspore/_akg/gpu/hswish_grad.py new file mode 100644 index 0000000000..5b38f07c84 --- /dev/null +++ b/mindspore/_akg/gpu/hswish_grad.py @@ -0,0 +1,53 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""HswishGrad""" +import _akg.topi as topi +import _akg.tvm as tvm + + +def HswishGrad(y_grad, x): + """ + HswishGrad + Args: + y_grad: + x: + + Returns: + + """ + shape = x.shape + + res0 = tvm.compute(shape, lambda *i: tvm.if_then_else(x(*i) <= -3, 0, y_grad(*i) * (2 * x(*i) + 3) / 6)) + res6 = tvm.compute(shape, lambda *i: tvm.if_then_else(x(*i) >= 3, y_grad(*i), res0(*i))) + return res6 + + +def gpu_schedule_HswishGrad(outs): + """ + gpu schedule HswishGrad + Args: + outs: + + Returns: + + """ + device = 'cuda' + ctx = tvm.context(device, 0) + if not ctx.exist: + raise SystemError("Skip because %s is not enabled" % device) + + with tvm.target.create(device): + sch = topi.cuda.schedule_elemwise(outs) + return sch diff --git a/mindspore/_checkparam.py b/mindspore/_checkparam.py index d553bcd364..cb3dbc0d50 100644 --- a/mindspore/_checkparam.py +++ b/mindspore/_checkparam.py @@ -300,6 +300,13 @@ class ParamValidator: for arg, value in args.items(): ParamValidator.check_subclass(arg, value, mstype.tensor) + @staticmethod + def check_bool(arg_name, arg_value): + """Check arg isintance of bool""" + if not isinstance(arg_value, bool): + raise ValueError(f'The `{arg_name}` should be isintance of bool, but got {arg_value}.') + return arg_value + @staticmethod def check_type(arg_name, arg_value, valid_types): """Type checking.""" diff --git a/mindspore/ccsrc/CMakeLists.txt b/mindspore/ccsrc/CMakeLists.txt index c49c962bdd..6f6fbf955d 100644 --- a/mindspore/ccsrc/CMakeLists.txt +++ b/mindspore/ccsrc/CMakeLists.txt @@ -473,6 +473,7 @@ if(ENABLE_GPU) gpu_cuda_lib gpu_queue cublas + ${CUDA_PATH}/lib64/libcurand.so ${CUDNN_PATH}/lib64/libcudnn.so ${CUDA_PATH}/lib64/libcudart.so ${CUDA_PATH}/lib64/stubs/libcuda.so) diff --git a/mindspore/ccsrc/kernel/gpu/cuda_impl/batchnorm_fold2_impl.cu b/mindspore/ccsrc/kernel/gpu/cuda_impl/batchnorm_fold2_impl.cu new file mode 100644 index 0000000000..3ef856e00a --- /dev/null +++ b/mindspore/ccsrc/kernel/gpu/cuda_impl/batchnorm_fold2_impl.cu @@ -0,0 +1,169 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include "batchnorm_fold2_impl.cuh" +#include "batchnorm_fold_impl.cuh" +#include "include/cuda_runtime.h" + + +template +__global__ void BatchNormFold2Kernel(const T *x, const T *beta, const T *gamma, const T *batch_std, const T *batch_mean, + const T *running_std, const T *running_mean, const int *global_step, T *y, + int freeze_bn, size_t N, size_t C, size_t H, size_t W) { + int c = 0; + size_t num_count = N * C * H * W; + if (*global_step < freeze_bn) { + for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < num_count; i += blockDim.x * gridDim.x) { + c = i / (H * W) % C; + y[i] = x[i] * running_std[c] / batch_std[c] + beta[c] - gamma[c] * batch_mean[c] / batch_std[c]; + } + } else { + for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < num_count; i += blockDim.x * gridDim.x) { + c = i / (H * W) % C; + y[i] = x[i] + beta[c] - gamma[c] * running_mean[c] / running_std[c]; + } + } +} + +template +__global__ void BatchNormFold2GradReduce1(const T *dout, T *tmp, const T *x, T *tmp2, size_t N, size_t C, size_t HW) { + int n = 0; + int c = 0; + for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < N * C; i += blockDim.x * gridDim.x) { + n = i / C; + c = i % C; + tmp[c * N + n] = thrust::reduce(thrust::seq, dout + i * HW, dout + (i + 1) * HW, 0.f, thrust::plus()); + tmp2[c * N + n] = thrust::reduce(thrust::seq, x + i * HW, x + (i + 1) * HW, 0.f, thrust::plus()); + } +} + +template +__global__ void BatchNormFold2GradReduce2(const T *tmp, T *d_beta, const T *tmp2, T *reduce_x, size_t N, size_t C) { + for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < C; i += blockDim.x * gridDim.x) { + d_beta[i] = thrust::reduce(thrust::seq, tmp + i * N, tmp + (i + 1) * N, 0.f, thrust::plus()); + reduce_x[i] = thrust::reduce(thrust::seq, tmp2 + i * N, tmp2 + (i + 1) * N, 0.f, thrust::plus()); + } +} + +template +__global__ void BatchNormFold2GradNotFreeze(const T *d_beta, const T *reduce_x, const T *batch_mean, const T *batch_std, + const T *running_mean, const T *running_std, const T *gamma, T *d_gamma, + T *d_batch_mean, T *d_batch_std, size_t C) { + for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < C; i += blockDim.x * gridDim.x) { + d_gamma[i] = -d_beta[i] * batch_mean[i] / batch_std[i]; + d_batch_mean[i] = -d_beta[i] * gamma[i] / batch_std[i]; + d_batch_std[i] = + (d_beta[i] * gamma[i] * batch_mean[i] - reduce_x[i] * running_std[i]) / batch_std[i] / batch_std[i]; + } +} + +template +__global__ void BatchNormFold2GradFreeze(const T *d_beta, const T *running_mean, const T *running_std, T *d_gamma, + size_t C) { + for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < C; i += blockDim.x * gridDim.x) { + d_gamma[i] = -d_beta[i] * running_mean[i] / running_std[i]; + } +} + +template +__global__ void BatchNormFold2GradMul(const T *dout, const T *x, T *tmp_x, size_t NCHW) { + for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < NCHW; i += blockDim.x * gridDim.x) { + tmp_x[i] = dout[i] * x[i]; + } +} + +template +__global__ void DxMul(size_t N, size_t C, size_t HW, const T *batch_std, const T *running_std, T *d_x) { + int c = 0; + size_t num_count = N * C * HW; + for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < num_count; i += blockDim.x * gridDim.x) { + c = (i / HW) % C; + d_x[i] = d_x[i] * running_std[c] / batch_std[c]; + } +} + +template +void BatchNormFold2Forward(const T *x, const T *beta, const T *gamma, const T *batch_std, const T *batch_mean, + const T *running_std, const T *running_mean, const int *global_step, T *y, int freeze_bn, + size_t N, size_t C, size_t H, size_t W, cudaStream_t cuda_stream) { + auto num_count = N * C * H * W; + BatchNormFold2Kernel<<>>( + x, beta, gamma, batch_std, batch_mean, running_std, running_mean, global_step, y, freeze_bn, N, C, H, W); +} + +template void BatchNormFold2Forward(const float *x, const float *beta, const float *gamma, + const float *batch_std, const float *batch_mean, const float *running_std, + const float *running_mean, const int *global_step, float *y, int freeze_bn, + size_t N, size_t C, size_t H, size_t W, cudaStream_t cuda_stream); + +template +void BatchNormFold2GradReduce(const T *dout, const T *x, T *d_beta, T *tmp, T *reduce_x, T *tmp2, T *tmp_x, size_t N, + size_t C, size_t H, size_t W, cudaStream_t cuda_stream) { + auto hw = H * W; + auto num_count = N * C * H * W; + BatchNormFold2GradMul<<>>(dout, x, tmp_x, num_count); + BatchNormFold2GradReduce1<<>>(dout, tmp, tmp_x, tmp2, N, C, hw); + BatchNormFold2GradReduce2<<>>(tmp, d_beta, tmp2, reduce_x, N, C); +} + +template void BatchNormFold2GradReduce(const float *dout, const float *x, float *d_beta, float *tmp, + float *reduce_x, float *tmp2, float *tmp_x, size_t N, size_t C, size_t H, + size_t W, cudaStream_t cuda_stream); + +template +void CalBatchNormFold2GradNotFreeze(const T *d_beta, const T *reduce_x, const T *batch_mean, const T *batch_std, + const T *running_mean, const T *running_std, const T *gamma, T *d_gamma, + T *d_batch_mean, T *d_batch_std, size_t C, cudaStream_t cuda_stream) { + BatchNormFold2GradNotFreeze<<>>( + d_beta, reduce_x, batch_mean, batch_std, running_mean, running_std, gamma, d_gamma, d_batch_mean, d_batch_std, C); +} + +template void CalBatchNormFold2GradNotFreeze(const float *d_beta, const float *reduce_x, const float *batch_mean, + const float *batch_std, const float *running_mean, + const float *running_std, const float *gamma, float *d_gamma, + float *d_batch_mean, float *d_batch_std, size_t C, + cudaStream_t cuda_stream); + +template +void CalBatchNormFold2GradFreeze(const T *d_beta, const T *reduce_x, const T *batch_mean, const T *batch_std, + const T *running_mean, const T *running_std, const T *gamma, T *d_gamma, + T *d_batch_mean, T *d_batch_std, size_t C, cudaStream_t cuda_stream) { + BatchNormFold2GradFreeze<<>>(d_beta, running_mean, running_std, d_gamma, + C); + ThrustFillWith(d_batch_mean, C, (T)0.f, cuda_stream); + ThrustFillWith(d_batch_std, C, (T)0.f, cuda_stream); +} + +template void CalBatchNormFold2GradFreeze(const float *d_beta, const float *reduce_x, const float *batch_mean, + const float *batch_std, const float *running_mean, + const float *running_std, const float *gamma, float *d_gamma, + float *d_batch_mean, float *d_batch_std, size_t C, + cudaStream_t cuda_stream); + +template +void CalBatchNormFold2GradNotFreezeDxMul(const T *batch_std, const T *running_std, T *d_x, size_t N, size_t C, size_t H, + size_t W, cudaStream_t cuda_stream) { + DxMul<<>>(N, C, H * W, batch_std, running_std, d_x); +} + +template void CalBatchNormFold2GradNotFreezeDxMul(const float *batch_std, const float *running_std, float *d_x, + size_t N, size_t C, size_t H, size_t W, + cudaStream_t cuda_stream); diff --git a/mindspore/ccsrc/kernel/gpu/cuda_impl/batchnorm_fold2_impl.cuh b/mindspore/ccsrc/kernel/gpu/cuda_impl/batchnorm_fold2_impl.cuh new file mode 100644 index 0000000000..c3ce08dfd0 --- /dev/null +++ b/mindspore/ccsrc/kernel/gpu/cuda_impl/batchnorm_fold2_impl.cuh @@ -0,0 +1,40 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_BATCHNORMFOLD2_H_ +#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_BATCHNORMFOLD2_H_ + +#include "device/gpu/cuda_common.h" +template +void BatchNormFold2Forward(const T *x, const T *beta, const T *gamma, const T *batch_std, const T *batch_mean, + const T *running_std, const T *running_mean, const int *global_step, T *y, int freeze_bn, + size_t N, size_t C, size_t H, size_t W, cudaStream_t cuda_stream); +template +void CalBatchNormFold2GradNotFreeze(const T *d_beta, const T *reduce_x, const T *batch_mean, const T *batch_std, + const T *running_mean, const T *running_std, const T *gamma, T *d_gamma, + T *d_batch_mean, T *d_batch_std, size_t C, cudaStream_t cuda_stream); +template +void CalBatchNormFold2GradFreeze(const T *d_beta, const T *reduce_x, const T *batch_mean, const T *batch_std, + const T *running_mean, const T *running_std, const T *gamma, T *d_gamma, + T *d_batch_mean, T *d_batch_std, size_t C, cudaStream_t cuda_stream); +template +void BatchNormFold2GradReduce(const T *dout, const T *x, T *d_beta, T *tmp, T *reduce_x, T *tmp2, T *tmp_x, size_t N, + size_t C, size_t H, size_t W, cudaStream_t cuda_stream); + +template +void CalBatchNormFold2GradNotFreezeDxMul(const T *batch_std, const T *running_std, T *d_x, size_t N, size_t C, size_t H, + size_t W, cudaStream_t cuda_stream); +#endif // MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_BATCHNORMFOLD2_H_ diff --git a/mindspore/ccsrc/kernel/gpu/cuda_impl/batchnorm_fold_impl.cu b/mindspore/ccsrc/kernel/gpu/cuda_impl/batchnorm_fold_impl.cu new file mode 100755 index 0000000000..ddc2803f56 --- /dev/null +++ b/mindspore/ccsrc/kernel/gpu/cuda_impl/batchnorm_fold_impl.cu @@ -0,0 +1,88 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "batchnorm_fold_impl.cuh" +#include "device/gpu/cuda_common.h" + +template +__global__ void UpdateRunningStd(int channel_size, const double epsilon, T* running_std) { + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < channel_size; i += blockDim.x * gridDim.x) { + running_std[i] = sqrtf(running_std[i] + epsilon); + } + return; +} + +template +__global__ void UpdateBatchStd(int channel_size, T* batch_std) { + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < channel_size; i += blockDim.x * gridDim.x) { + batch_std[i] = 1 / batch_std[i]; + } + return; +} + +template +__global__ void CalDx(const T* d_batch_mean, const T* d_batch_std, const T* x, const T* batch_mean, const T* batch_std, + int batch_size, int channel_size, int height, int width, T* dx) { + int n = batch_size * channel_size * height * width; + int normal_size = batch_size * height * width; + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { + int channel_index = i / (height * width) % channel_size; + dx[i] = d_batch_mean[channel_index] / normal_size + + d_batch_std[channel_index] * (x[i] - batch_mean[channel_index]) / batch_std[channel_index] / normal_size; + } + return; +} + +template +void CalUpdateRunningStd(int channel_size, double epsilon, T* running_std, cudaStream_t cuda_stream) { + UpdateRunningStd<<>>(channel_size, epsilon, running_std); + return; +} + +template void CalUpdateRunningStd(int channel_size, double epsilon, float* running_std, + cudaStream_t cuda_stream); + +template +void CalUpdateBatchStd(int channel_size, T* batch_std, cudaStream_t cuda_stream) { + UpdateBatchStd<<>>(channel_size, batch_std); + return; +} + +template void CalUpdateBatchStd(int channel_size, float* batch_std, cudaStream_t cuda_stream); + +template +void CalBatchNormFoldGrad(const T* d_batch_mean, const T* d_batch_std, const T* x, const T* batch_mean, + const T* batch_std, int batch_size, int channel_size, int height, int width, T* dx, + cudaStream_t cuda_stream) { + CalDx<<>>( + d_batch_mean, d_batch_std, x, batch_mean, batch_std, batch_size, channel_size, height, width, dx); +} + +template void CalBatchNormFoldGrad(const float* d_batch_mean, const float* d_batch_std, const float* x, + const float* batch_mean, const float* batch_std, int batch_size, + int channel_size, int height, int width, float* dx, cudaStream_t cuda_stream); + +template +void ThrustFillWith(T* array, int size, T tofill, cudaStream_t cuda_stream) { + thrust::device_ptr dev_ptr(array); + thrust::fill(thrust::cuda::par.on(cuda_stream), dev_ptr, dev_ptr + size, tofill); +} + +template void ThrustFillWith(float* array, int size, float tofill, cudaStream_t cuda_stream); + diff --git a/mindspore/ccsrc/kernel/gpu/cuda_impl/batchnorm_fold_impl.cuh b/mindspore/ccsrc/kernel/gpu/cuda_impl/batchnorm_fold_impl.cuh new file mode 100755 index 0000000000..d7ad76c5ad --- /dev/null +++ b/mindspore/ccsrc/kernel/gpu/cuda_impl/batchnorm_fold_impl.cuh @@ -0,0 +1,32 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_BATCHNORM_FOLD_H_ +#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_BATCHNORM_FOLD_H_ + +template +void CalUpdateRunningStd(int channel_size, double epsilon, T* running_std, cudaStream_t cuda_stream); + +template +void CalUpdateBatchStd(int channel_size, T* batch_std, cudaStream_t cuda_stream); + +template +void CalBatchNormFoldGrad(const T* d_batch_mean, const T* d_batch_std, const T* x, const T* batch_mean, + const T* batch_std, int batch_size, int channel_size, int height, int width, T* dx, + cudaStream_t cuda_stream); +template +void ThrustFillWith(T* array, int size, T tofill, cudaStream_t cuda_stream); +#endif // MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMP_BATCHNORM_FOLD_H_ diff --git a/mindspore/ccsrc/kernel/gpu/cuda_impl/concatv2_impl.cu b/mindspore/ccsrc/kernel/gpu/cuda_impl/concatv2_impl.cu index ed330f6e0a..fa10494d9c 100755 --- a/mindspore/ccsrc/kernel/gpu/cuda_impl/concatv2_impl.cu +++ b/mindspore/ccsrc/kernel/gpu/cuda_impl/concatv2_impl.cu @@ -41,3 +41,4 @@ template void CalConcatV2(const size_t size, const int w1, const int w2, const i int* output, cudaStream_t cuda_stream); template void CalConcatV2(const size_t size, const int w1, const int w2, const half* input_1, const half* input_2, half* output, cudaStream_t cuda_stream); + diff --git a/mindspore/ccsrc/kernel/gpu/cuda_impl/correction_mul_impl.cu b/mindspore/ccsrc/kernel/gpu/cuda_impl/correction_mul_impl.cu new file mode 100755 index 0000000000..ac2f99ed9a --- /dev/null +++ b/mindspore/ccsrc/kernel/gpu/cuda_impl/correction_mul_impl.cu @@ -0,0 +1,66 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "correction_mul_impl.cuh" +#include "device/gpu/cuda_common.h" + +template +__global__ void CorrectionMul(const T* weight, const T* gamma, const T* running_std, const int batchsize, const int chw, + T* output) { + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < batchsize * chw; i += blockDim.x * gridDim.x) { + int n = i / chw; + output[i] = weight[i] * gamma[n] / running_std[n]; + } + return; +} + +template +__global__ void Mul(int N, const T* a, const T* b, T* c) { + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { + c[i] = a[i] * b[i]; + } + return; +} + +template +__global__ void Reduce(int N, int CHW, const T* tmp, const T* running_std, T* d_gamma) { + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { + d_gamma[i] = thrust::reduce(thrust::seq, tmp + i * CHW, tmp + (i + 1) * CHW, 0.f, thrust::plus()); + d_gamma[i] = d_gamma[i] / running_std[i]; + } + return; +} + +template +void CalCorrectionMul(const T* weight, const T* gamma, const T* running_std, int N, int C, int H, int W, T* output, + cudaStream_t cuda_stream) { + CorrectionMul<<>>(weight, gamma, running_std, N, C * H * W, + output); +} + +template void CalCorrectionMul(const float* weight, const float* gamma, const float* running_std, int N, int C, + int H, int W, float* output, cudaStream_t cuda_stream); + +template +void CalCorrectionMulGrad(const T* d_out, const T* weight, const T* running_std, int N, int C, int H, int W, T* d_gamma, + T* tmp, cudaStream_t cuda_stream) { + Mul<<>>(N * C * H * W, d_out, weight, tmp); + Reduce<<>>(N, C * H * W, tmp, running_std, d_gamma); +} + +template void CalCorrectionMulGrad(const float* d_out, const float* weight, const float* running_std, int N, + int C, int H, int W, float* d_gamma, float* tmp, cudaStream_t cuda_stream); diff --git a/mindspore/ccsrc/kernel/gpu/cuda_impl/correction_mul_impl.cuh b/mindspore/ccsrc/kernel/gpu/cuda_impl/correction_mul_impl.cuh new file mode 100644 index 0000000000..176c063dc8 --- /dev/null +++ b/mindspore/ccsrc/kernel/gpu/cuda_impl/correction_mul_impl.cuh @@ -0,0 +1,27 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_CORRECTIONMUL_H_ +#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_CORRECTIONMUL_H_ + +template +void CalCorrectionMul(const T* weight, const T* gamma, const T* running_std, int batch_size, int channel_size, + int height, int width, T* output, cudaStream_t cuda_stream); + +template +void CalCorrectionMulGrad(const T* d_out, const T* weight, const T* running_std, int batch_size, int channel_size, + int height, int width, T* d_gamma, T* tmp, cudaStream_t cuda_stream); +#endif // MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMP_CORRECTIONMUL_H_ diff --git a/mindspore/ccsrc/kernel/gpu/cuda_impl/cross_entropy_cuda_impl.cu b/mindspore/ccsrc/kernel/gpu/cuda_impl/cross_entropy_cuda_impl.cu new file mode 100644 index 0000000000..a3d2e3558c --- /dev/null +++ b/mindspore/ccsrc/kernel/gpu/cuda_impl/cross_entropy_cuda_impl.cu @@ -0,0 +1,47 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "cross_entropy_cuda_impl.cuh" +#include "include/cuda_runtime.h" + +__global__ void CalCrossEntropyWithGradKernel(const float *softmax_logits, const float *log_softmax_logits, + const float *labels, const int batch_size, const int num_classes, + float *loss, float *dx) { + extern __shared__ float loss_shared[]; + const float mean_scale = 1.0f / static_cast(batch_size); + + loss_shared[threadIdx.x] = 0; + for (int i = threadIdx.x * num_classes; i < (threadIdx.x + 1) * num_classes; ++i) { + loss_shared[threadIdx.x] -= log_softmax_logits[i] * labels[i]; + dx[i] = (softmax_logits[i] - labels[i]) * mean_scale; + } + __syncthreads(); + if (threadIdx.x == 0) { + *loss = 0; + for (int i = 0; i < batch_size; i++) { + *loss += loss_shared[i]; + } + *loss *= mean_scale; + } +} + +void CalCrossEntropyWithGrad(const float *softmax_logits, const float *log_softmax_logits, const float *labels, + const int batch_size, const int num_classes, float *loss, float *dx, + cudaStream_t cuda_stream) { + CalCrossEntropyWithGradKernel<<<1, batch_size, batch_size * sizeof(float), cuda_stream>>>( + softmax_logits, log_softmax_logits, labels, batch_size, num_classes, loss, dx); +} diff --git a/mindspore/ccsrc/kernel/gpu/cuda_impl/cross_entropy_cuda_impl.cuh b/mindspore/ccsrc/kernel/gpu/cuda_impl/cross_entropy_cuda_impl.cuh new file mode 100644 index 0000000000..25b1624a46 --- /dev/null +++ b/mindspore/ccsrc/kernel/gpu/cuda_impl/cross_entropy_cuda_impl.cuh @@ -0,0 +1,26 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_CROSSENTROPYCUDAIMPL_H_ +#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_CROSSENTROPYCUDAIMPL_H_ + +#include "device/gpu/cuda_common.h" + +void CalCrossEntropyWithGrad(const float *softmax_logits, const float *log_softmax_logits, const float *labels, + const int batch_size, const int num_classes, float *loss, float *dx, + cudaStream_t cuda_stream); + +#endif // MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_CROSSENTROPYCUDAIMPL_H_ diff --git a/mindspore/ccsrc/kernel/gpu/cuda_impl/dropout_impl.cu b/mindspore/ccsrc/kernel/gpu/cuda_impl/dropout_impl.cu new file mode 100644 index 0000000000..bffa73fb76 --- /dev/null +++ b/mindspore/ccsrc/kernel/gpu/cuda_impl/dropout_impl.cu @@ -0,0 +1,47 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "dropout_impl.cuh" +#include "include/cuda_runtime.h" + +__global__ void DropoutForwardKernel(const float *input, float *mask, float *output, size_t num_count, + float drop_prob) { + float scale = 1.f / (1.f - drop_prob); + for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < num_count; i += blockDim.x * gridDim.x) { + mask[i] = mask[i] > drop_prob; + output[i] = scale * input[i] * mask[i]; + } +} + +void DropoutForward(const float *input, float *mask, float *output, size_t num_count, float drop_prob, + cudaStream_t cuda_stream) { + DropoutForwardKernel<<>>(input, mask, output, num_count, + drop_prob); +} + +__global__ void DropoutBackwardKernel(const float *dy, const float *mask, float *dx, size_t num_count, + float drop_prob) { + float scale = 1.f / (1.f - drop_prob); + for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < num_count; i += blockDim.x * gridDim.x) { + dx[i] = scale * dy[i] * mask[i]; + } +} + +void DropoutBackward(const float *dy, const float *mask, float *dx, size_t num_count, float drop_prob, + cudaStream_t cuda_stream) { + DropoutBackwardKernel<<>>(dy, mask, dx, num_count, drop_prob); +} diff --git a/mindspore/ccsrc/kernel/gpu/cuda_impl/dropout_impl.cuh b/mindspore/ccsrc/kernel/gpu/cuda_impl/dropout_impl.cuh new file mode 100644 index 0000000000..9aa05d6a08 --- /dev/null +++ b/mindspore/ccsrc/kernel/gpu/cuda_impl/dropout_impl.cuh @@ -0,0 +1,26 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_DROPOUT_H_ +#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_DROPOUT_H_ + +#include "device/gpu/cuda_common.h" +void DropoutForward(const float *input, float *mask, float *output, size_t num_count, float drop_prob, + cudaStream_t cuda_stream); +void DropoutBackward(const float *dy, const float *mask, float *dx, size_t num_count, float drop_prob, + cudaStream_t cuda_stream); + +#endif // MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_DROPOUT_H_ diff --git a/mindspore/ccsrc/kernel/gpu/cuda_impl/fake_quant_impl.cu b/mindspore/ccsrc/kernel/gpu/cuda_impl/fake_quant_impl.cu new file mode 100644 index 0000000000..7b09256e1d --- /dev/null +++ b/mindspore/ccsrc/kernel/gpu/cuda_impl/fake_quant_impl.cu @@ -0,0 +1,133 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "device/gpu/cuda_common.h" +#include "fake_quant_impl.cuh" + +__global__ void FakeQuantize(const float* input, float* output, const int size, const float* nudge_min, + const float* nudge_max, const float* scale, bool symmetric) { + float input_x = 0.f; + int nudge_input = 0; + + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += blockDim.x * gridDim.x) { + input_x = input[i]; + // clamp input x + if (input_x < nudge_min[0]) { + input_x = nudge_min[0]; + } + if (input_x > nudge_max[0]) { + input_x = nudge_max[0]; + } + // clamp shift + nudge_input = floor((input_x - nudge_min[0]) / scale[0] + 0.5f); + + // quantize + output[i] = nudge_input * scale[0] + nudge_min[0]; + } + return; +} + +__global__ void FakeQuantizeGrad(const float* input, const float* gradient, float* output, const int size, + const float* nudge_min, const float* nudge_max) { + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += blockDim.x * gridDim.x) { + if (input[i] < nudge_min[0] || input[i] > nudge_max[0]) { + output[i] = 0; + } else { + output[i] = gradient[i]; + } + } + return; +} + +__global__ void NudgeMinMax(const float* input_min, const float* input_max, const float quant_min, + const float quant_max, float* nudge_min, float* nudge_max, float* scale) { + float zp_from_min = 0.f; + if ((quant_max - quant_min) == 0 || (*input_max - *input_min) == 0) { + *scale = 0.f; + zp_from_min = 0.f; + } else { + *scale = (*input_max - *input_min) / (quant_max - quant_min); + zp_from_min = quant_min - *input_min / *scale; + } + + float nudge_zp = 0.f; + if (zp_from_min <= quant_min) { + nudge_zp = quant_min; + } else if (zp_from_min >= quant_max) { + nudge_zp = quant_max; + } else { + nudge_zp = round(zp_from_min); + } + + *nudge_min = (quant_min - nudge_zp) * (*scale); + *nudge_max = (quant_max - nudge_zp) * (*scale); + return; +} + +__global__ void UpdateInputMinMaxWithEMA(float* input_min, float* input_max, const float min, const float max, + const float decay) { + *input_min = decay * (min) + (1 - decay) * (*input_min); + *input_min = *input_min > 0 ? 0 : *input_min; + *input_max = decay * (max) + (1 - decay) * (*input_max); + *input_max = *input_max < 0 ? 0 : *input_max; + return; +} + +__global__ void UpdateInputMinMax(float* input_min, float* input_max, const float min, const float max) { + *input_min = min; + *input_max = max; +} + +void CalFakeQuantize(const float* input, float* output, const int size, const float* nudge_min, const float* nudge_max, + const float* scale, bool symmetric, cudaStream_t cuda_stream) { + FakeQuantize<<>>(input, output, size, nudge_min, nudge_max, scale, + symmetric); + return; +} + +void CalFakeQuantizeGrad(const float* input, const float* gradient, float* output, const int size, + const float* nudge_min, const float* nudge_max, cudaStream_t cuda_stream) { + FakeQuantizeGrad<<>>(input, gradient, output, size, nudge_min, + nudge_max); + return; +} + +void CalNudge(const float* input_min, const float* input_max, const float quant_min, const float quant_max, + float* nudge_min, float* nudge_max, float* scale, cudaStream_t cuda_stream) { + NudgeMinMax<<<1, 1>>>(input_min, input_max, quant_min, quant_max, nudge_min, nudge_max, scale); + return; +} + +void CalMinMax(float* input, float* input_min, float* input_max, const int size, const float ema_decay, const bool ema, + cudaStream_t cuda_stream) { + float minel = 0.f; + float maxel = 0.f; + thrust::pair, thrust::device_ptr> tuple; + tuple = thrust::minmax_element(thrust::device_pointer_cast(input), thrust::device_pointer_cast(input) + size); + minel = tuple.first[0]; + maxel = tuple.second[0]; + + if (ema) { + UpdateInputMinMaxWithEMA<<<1, 1>>>(input_min, input_max, minel, maxel, ema_decay); + } else { + UpdateInputMinMax<<<1, 1>>>(input_min, input_max, minel, maxel); + } + return; +} + diff --git a/mindspore/ccsrc/kernel/gpu/cuda_impl/fake_quant_impl.cuh b/mindspore/ccsrc/kernel/gpu/cuda_impl/fake_quant_impl.cuh new file mode 100644 index 0000000000..c88c1f79e2 --- /dev/null +++ b/mindspore/ccsrc/kernel/gpu/cuda_impl/fake_quant_impl.cuh @@ -0,0 +1,32 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMP_FAKEQUANTIZE_H_ +#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMP_FAKEQUANTIZE_H_ + +void CalFakeQuantize(const float* input, float* output, const int size, const float* nudge_min, const float* nudge_max, + const float* scale, bool symmetric, cudaStream_t cuda_stream); + +void CalFakeQuantizeGrad(const float* input, const float* gradient, float* output, const int size, + const float* nudge_min, const float* nudge_max, cudaStream_t cuda_stream); + +void CalNudge(const float* input_min, const float* input_max, const float quant_min, const float quant_max, + float* nudge_min, float* nudge_max, float* scale, cudaStream_t cuda_stream); + +void CalMinMax(float* input, float* input_min, float* input_max, const int size, const float ema_decay, const bool ema, + cudaStream_t cuda_stream); + +#endif // MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMP_FAKEQUANTIZE_H_ diff --git a/mindspore/ccsrc/kernel/gpu/cuda_impl/fake_quant_per_channel_impl.cu b/mindspore/ccsrc/kernel/gpu/cuda_impl/fake_quant_per_channel_impl.cu new file mode 100644 index 0000000000..09153bf28f --- /dev/null +++ b/mindspore/ccsrc/kernel/gpu/cuda_impl/fake_quant_per_channel_impl.cu @@ -0,0 +1,174 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include "fake_quant_per_channel_impl.cuh" +#include "device/gpu/cuda_common.h" + +/** + * Find the nudge min, max and scale value as output. + * @param input_min array + * @param input_max array + * @param quant_min 1 << bit -1 + * @param quant_max 0 + * @param nudge_min array + * @param nudge_max array + * @param scale array + * @param channel_num + * @return + */ +__global__ void NudgeMinMaxPerChannel(const float* input_min, const float* input_max, const float quant_min, + const float quant_max, float* nudge_min, float* nudge_max, float* scale, + int channel_num) { + float zp_from_min = 0.f; + float nudge_zp = 0.f; + + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < channel_num; i += blockDim.x * gridDim.x) { + if ((quant_max - quant_min) == 0 || (input_max[i] - input_min[i]) == 0) { + scale[i] = 0.f; + zp_from_min = 0.f; + } else { + scale[i] = (input_max[i] - input_min[i]) / (quant_max - quant_min); + zp_from_min = quant_min - input_min[i] / scale[i]; + } + + if (zp_from_min <= quant_min) { + nudge_zp = quant_min; + } else if (zp_from_min >= quant_max) { + nudge_zp = quant_max; + } else { + nudge_zp = round(zp_from_min); + } + + nudge_min[i] = (quant_min - nudge_zp) * (scale[i]); + nudge_max[i] = (quant_max - nudge_zp) * (scale[i]); + } +} + +void CalNudgePerChannel(const float* input_min, const float* input_max, const float quant_min, const float quant_max, + float* nudge_min, float* nudge_max, float* scale, const int channel_num, + cudaStream_t cuda_stream) { + NudgeMinMaxPerChannel<<>>( + input_min, input_max, quant_min, quant_max, nudge_min, nudge_max, scale, channel_num); +} + +/** + * Calulate fake quant output accroding by nudge min, nudge max, nudge scale. + * @param input - array + * @param output - array + * @param total_size - int, purpose for cal the per chanel number in filters + * @param channel_size - int, purpose for cal the per channel number in filters + * @param nudge_min - array + * @param nudge_max - array + * @param scale - array + * @return + */ +__global__ void FakeQuantizePerChannel(const float* input, float* output, const int total_size, const int channel_size, + const float* nudge_min, const float* nudge_max, const float* scale, + bool symmetric) { + float input_x = 0.f; + int nudge_input = 0; + int channel_idx = 0; + int per_channel_num = total_size / channel_size; + + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < total_size; i += blockDim.x * gridDim.x) { + input_x = input[i]; + channel_idx = floor(static_cast(i) / static_cast(per_channel_num)); + // clamp input x + if (input_x < nudge_min[channel_idx]) { + input_x = nudge_min[channel_idx]; + } + if (input_x > nudge_max[channel_idx]) { + input_x = nudge_max[channel_idx]; + } + // clamp shift + nudge_input = floor((input_x - nudge_min[channel_idx]) / scale[channel_idx] + 0.5f); + + // quantize + output[i] = nudge_input * scale[channel_idx] + nudge_min[channel_idx]; + } +} + +void CalFakeQuantizePerChannel(const float* input, float* output, const int total_size, const int channel_size, + const float* nudge_min, const float* nudge_max, const float* scale, bool symmetric, + cudaStream_t cuda_stream) { + FakeQuantizePerChannel<<>>( + input, output, total_size, channel_size, nudge_min, nudge_max, scale, symmetric); +} + +/** + * UpdateInputMinMaxPerChannel or UpdateInputMinMaxPerChannel With EMA. + * @param input_min + * @param input_max + * @param min + * @param max + * @return + */ +__global__ void UpdateInputMinMaxPerChannel(float* input_min, float* input_max, float* input, int channels, + int per_channel_nums, bool ema, float ema_decay) { + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < channels; i += blockDim.x * gridDim.x) { + thrust::pair sum = + thrust::minmax_element(thrust::device, input + i * per_channel_nums, input + per_channel_nums * (i + 1)); + if (ema) { + input_min[i] = ema_decay * sum.first[0] + (1 - ema_decay) * input_min[i]; + input_max[i] = ema_decay * sum.second[0] + (1 - ema_decay) * input_max[i]; + } else { + input_min[i] = sum.first[0]; + input_max[i] = sum.second[0]; + } + } +} + +__global__ void UpdateInputMinMaxPerChannelWithEMA(float* input_min, float* input_max, float min, float max, + const float decay) { + *input_min = decay * (min) + (1 - decay) * (*input_min); + *input_max = decay * (max) + (1 - decay) * (*input_max); +} + +void CalMinMaxPerChannel(float* input, float* input_min, float* input_max, const int total_size, const int channel_size, + const float ema_decay, const bool ema, cudaStream_t cuda_stream) { + int per_channel_num = total_size / channel_size; + UpdateInputMinMaxPerChannel<<>>( + input_min, input_max, input, channel_size, per_channel_num, ema, ema_decay); +} + +__global__ void FakeQuantizePerChannelGrad(const float* input, const float* gradient, float* output, + const int total_size, const int channel_size, const float* nudge_min, + const float* nudge_max) { + int channel_idx = 0; + int per_channel_num = total_size / channel_size; + + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < total_size; i += blockDim.x * gridDim.x) { + channel_idx = floor(static_cast(i) / static_cast(per_channel_num)); + if (input[i] < nudge_min[channel_idx] || input[i] > nudge_max[channel_idx]) { + output[i] = 0; + } else { + output[i] = gradient[i]; + } + } +} + +void CalFakeQuantizePerChannelGrad(const float* input, const float* gradient, float* output, const int total_num, + const int channel_num, const float* nudge_min, const float* nudge_max, + cudaStream_t cuda_stream) { + FakeQuantizePerChannelGrad<<>>( + input, gradient, output, total_num, channel_num, nudge_min, nudge_max); +} + diff --git a/mindspore/ccsrc/kernel/gpu/cuda_impl/fake_quant_per_channel_impl.cuh b/mindspore/ccsrc/kernel/gpu/cuda_impl/fake_quant_per_channel_impl.cuh new file mode 100644 index 0000000000..3dff7156a7 --- /dev/null +++ b/mindspore/ccsrc/kernel/gpu/cuda_impl/fake_quant_per_channel_impl.cuh @@ -0,0 +1,35 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMP_FAKEQUANTIZE_H_ +#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMP_FAKEQUANTIZE_H_ + +void CalNudgePerChannel(const float* input_min, const float* input_max, const float quant_min, const float quant_max, + float* nudge_min, float* nudge_max, float* scale, const int channel_num, + cudaStream_t cuda_stream); + +void CalFakeQuantizePerChannel(const float* input, float* output, const int total_num, const int channel_num, + const float* nudge_min, const float* nudge_max, const float* scale, bool symmetric, + cudaStream_t cuda_stream); + +void CalMinMaxPerChannel(float* input, float* input_min, float* input_max, const int total_num, const int channel_num, + const float ema_decay, const bool ema, cudaStream_t cuda_stream); + +void CalFakeQuantizePerChannelGrad(const float* input, const float* gradient, float* output, const int total_num, + const int channel_num, const float* nudge_min, const float* nudge_max, + cudaStream_t cuda_stream); + +#endif // MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMP_FAKEQUANTIZE_H_ diff --git a/mindspore/ccsrc/kernel/gpu/cuda_impl/gather.cuh b/mindspore/ccsrc/kernel/gpu/cuda_impl/gather.cuh old mode 100755 new mode 100644 index dae2115a91..a2aab89fb1 --- a/mindspore/ccsrc/kernel/gpu/cuda_impl/gather.cuh +++ b/mindspore/ccsrc/kernel/gpu/cuda_impl/gather.cuh @@ -16,7 +16,7 @@ #ifndef MINDSPORE_GATHER_GPU_CU_H #define MINDSPORE_GATHER_GPU_CU_H -template +template void Gather(T *input, S *indices, T *output, size_t output_dim0, size_t output_dim1, size_t output_dim2, size_t input_dim1, cudaStream_t stream); diff --git a/mindspore/ccsrc/kernel/gpu/cuda_impl/sparse_cross_entropy_cuda_impl.cu b/mindspore/ccsrc/kernel/gpu/cuda_impl/sparse_cross_entropy_cuda_impl.cu new file mode 100755 index 0000000000..b549c5bd4e --- /dev/null +++ b/mindspore/ccsrc/kernel/gpu/cuda_impl/sparse_cross_entropy_cuda_impl.cu @@ -0,0 +1,77 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "sparse_cross_entropy_cuda_impl.cuh" +#include "include/cuda_runtime.h" + +template +__global__ void CalCrossEntropyKernel(const float *logits, T *labels, const int batch_size, const int class_num, + float *loss) { + float total_loss = 0.0; + float epsilon = 1e-6; + for (int i = 0; i < batch_size; ++i) { + float logit = logits[i * class_num + labels[i]]; + if (logit <= 0) { + logit += epsilon; + } + float single_loss = -logf(logit); + total_loss += single_loss; + } + + total_loss /= batch_size; + loss[0] = total_loss; + return; +} + +template +__global__ void CalCrossEntropyGradKernel(const float *logits, T *labels, const int batch_size, const int class_num, + float *grad) { + for (int i = 0; i < batch_size; i++) { + for (int j = blockIdx.x * blockDim.x + threadIdx.x; j < class_num; j += blockDim.x * gridDim.x) { + if (labels[i] == j) { + grad[i * class_num + j] = (logits[i * class_num + j] - 1) / batch_size; + } else { + grad[i * class_num + j] = logits[i * class_num + j] / batch_size; + } + } + } + return; +} + +template +void CalCrossEntropy(const float *logits, T *labels, const int batch_size, const int class_num, float *loss, + cudaStream_t cuda_stream) { + CalCrossEntropyKernel<<<1, 1, 0, cuda_stream>>>(logits, labels, batch_size, class_num, loss); + return; +} + +template +void CalCrossEntropyGrad(const float *logits, T *labels, const int batch_size, const int class_num, float *grad, + cudaStream_t cuda_stream) { + CalCrossEntropyGradKernel<<>>(logits, labels, batch_size, + class_num, grad); + return; +} + +template void CalCrossEntropy(const float *logits, int *labels, const int batch_size, const int class_num, + float *loss, cudaStream_t cuda_stream); +template void CalCrossEntropy(const float *logits, uint64_t *labels, const int batch_size, + const int class_num, float *loss, cudaStream_t cuda_stream); +template void CalCrossEntropyGrad(const float *logits, int *labels, const int batch_size, const int class_num, + float *grad, cudaStream_t cuda_stream); +template void CalCrossEntropyGrad(const float *logits, uint64_t *labels, const int batch_size, + const int class_num, float *grad, cudaStream_t cuda_stream); diff --git a/mindspore/ccsrc/kernel/gpu/cuda_impl/sparse_cross_entropy_cuda_impl.cuh b/mindspore/ccsrc/kernel/gpu/cuda_impl/sparse_cross_entropy_cuda_impl.cuh new file mode 100755 index 0000000000..d16131470c --- /dev/null +++ b/mindspore/ccsrc/kernel/gpu/cuda_impl/sparse_cross_entropy_cuda_impl.cuh @@ -0,0 +1,30 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPARSECROSSENTROPYCUDAIMPL_H_ +#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPARSECROSSENTROPYCUDAIMPL_H_ + +#include "device/gpu/cuda_common.h" + +template +void CalCrossEntropy(const float *logits, T *labels, const int batch_size, const int class_num, float *loss, + cudaStream_t cuda_stream); + +template +void CalCrossEntropyGrad(const float *logits, T *labels, const int batch_size, const int class_num, float *grad, + cudaStream_t cuda_stream); + +#endif // MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPARSECROSSENTROPYCUDAIMPL_H_ diff --git a/mindspore/ccsrc/kernel/gpu/nn/dropout_gpu_kernel.cc b/mindspore/ccsrc/kernel/gpu/nn/dropout_gpu_kernel.cc new file mode 100644 index 0000000000..eeec8365da --- /dev/null +++ b/mindspore/ccsrc/kernel/gpu/nn/dropout_gpu_kernel.cc @@ -0,0 +1,101 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "kernel/gpu/nn/dropout_gpu_kernel.h" +#include "kernel/gpu/cuda_impl/dropout_impl.cuh" + +namespace mindspore { +namespace kernel { + +DropoutGpuFwdKernel::DropoutGpuFwdKernel() + : cudnn_handle_(nullptr), + is_null_input_(false), + num_count_(0), + drop_prob_(0.0), + states_init_(false), + mask_generator_(nullptr) {} + +DropoutGpuFwdKernel::~DropoutGpuFwdKernel() { DestroyResource(); } + +const std::vector &DropoutGpuFwdKernel::GetInputSizeList() const { return input_size_list_; } + +const std::vector &DropoutGpuFwdKernel::GetOutputSizeList() const { return output_size_list_; } + +const std::vector &DropoutGpuFwdKernel::GetWorkspaceSizeList() const { return workspace_size_list_; } + +bool DropoutGpuFwdKernel::Init(const CNodePtr &kernel_node) { + InitResource(); + + size_t input_num = AnfAlgo::GetInputTensorNum(kernel_node); + if (input_num != 1) { + MS_LOG(EXCEPTION) << "Argument number is " << input_num << ", but DropoutGpuFwdKernel needs 1."; + } + + auto input_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); + is_null_input_ = CHECK_NULL_INPUT(input_shape); + if (is_null_input_) { + InitSizeLists(); + return true; + } + + num_count_ = 1; + for (size_t x : input_shape) { + num_count_ *= x; + } + drop_prob_ = GetValue(AnfAlgo::GetCNodePrimitive(kernel_node)->GetAttr("drop_prob")); + + InitSizeLists(); + return true; +} + +void DropoutGpuFwdKernel::InitResource() { + cudnn_handle_ = device::gpu::GPUDeviceManager::GetInstance().GetCudnnHandle(); +} + +void DropoutGpuFwdKernel::DestroyResource() noexcept {} + +void DropoutGpuFwdKernel::InitSizeLists() { + size_t input_size = num_count_ * sizeof(float); + size_t workspace_size = 0; + input_size_list_.push_back(input_size); + output_size_list_.push_back(input_size); // output size: the same with input size + output_size_list_.push_back(input_size); // mask size: the same with input size + workspace_size_list_.push_back(workspace_size); +} + +bool DropoutGpuFwdKernel::Launch(const std::vector &inputs, const std::vector &workspace, + const std::vector &outputs, uintptr_t stream_ptr) { + if (is_null_input_) { + return true; + } + + auto *input = reinterpret_cast(inputs[0]->addr); + auto *output = reinterpret_cast(outputs[0]->addr); + auto *mask = reinterpret_cast(outputs[1]->addr); + + if (!states_init_) { + curandCreateGenerator(&mask_generator_, CURAND_RNG_PSEUDO_DEFAULT); + curandSetPseudoRandomGeneratorSeed(mask_generator_, time(NULL)); + states_init_ = true; + } + + curandGenerateUniform(mask_generator_, mask, num_count_); + DropoutForward(input, mask, output, num_count_, drop_prob_, reinterpret_cast(stream_ptr)); + + return true; +} +} // namespace kernel +} // namespace mindspore diff --git a/mindspore/ccsrc/kernel/gpu/nn/dropout_gpu_kernel.h b/mindspore/ccsrc/kernel/gpu/nn/dropout_gpu_kernel.h new file mode 100644 index 0000000000..2b0d84a40c --- /dev/null +++ b/mindspore/ccsrc/kernel/gpu/nn/dropout_gpu_kernel.h @@ -0,0 +1,67 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_KERNEL_GPU_NN_DROPOUT_GPU_KERNEL_H_ +#define MINDSPORE_CCSRC_KERNEL_GPU_NN_DROPOUT_GPU_KERNEL_H_ + +#include +#include "kernel/gpu/gpu_kernel.h" +#include "kernel/gpu/gpu_kernel_factory.h" +#include "include/curand.h" + +namespace mindspore { +namespace kernel { +class DropoutGpuFwdKernel : public GpuKernel { + public: + DropoutGpuFwdKernel(); + + ~DropoutGpuFwdKernel() override; + + const std::vector &GetInputSizeList() const override; + + const std::vector &GetOutputSizeList() const override; + + const std::vector &GetWorkspaceSizeList() const override; + + bool Launch(const std::vector &inputs, const std::vector &workspace, + const std::vector &outputs, uintptr_t stream_ptr) override; + + bool Init(const CNodePtr &kernel_node) override; + + protected: + void InitResource() override; + + void InitSizeLists() override; + + private: + void DestroyResource() noexcept; + + cudnnHandle_t cudnn_handle_; + bool is_null_input_; + size_t num_count_; + float drop_prob_; + bool states_init_; + curandGenerator_t mask_generator_; + std::vector input_size_list_; + std::vector output_size_list_; + std::vector workspace_size_list_; +}; + +MS_REG_GPU_KERNEL(Dropout, DropoutGpuFwdKernel) +} // namespace kernel +} // namespace mindspore + +#endif // MINDSPORE_CCSRC_KERNEL_GPU_NN_DROPOUT_GPU_KERNEL_H_ diff --git a/mindspore/ccsrc/kernel/gpu/nn/dropout_grad_kernel.cc b/mindspore/ccsrc/kernel/gpu/nn/dropout_grad_kernel.cc new file mode 100644 index 0000000000..42c3d279c4 --- /dev/null +++ b/mindspore/ccsrc/kernel/gpu/nn/dropout_grad_kernel.cc @@ -0,0 +1,92 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "kernel/gpu/nn/dropout_grad_kernel.h" +#include "kernel/gpu/cuda_impl/dropout_impl.cuh" + +namespace mindspore { +namespace kernel { +DropoutGradGpuFwdKernel::DropoutGradGpuFwdKernel() + : cudnn_handle_(nullptr), is_null_input_(false), num_count_(0), drop_prob_(0.0) {} + +DropoutGradGpuFwdKernel::~DropoutGradGpuFwdKernel() { DestroyResource(); } + +const std::vector &DropoutGradGpuFwdKernel::GetInputSizeList() const { return input_size_list_; } + +const std::vector &DropoutGradGpuFwdKernel::GetOutputSizeList() const { return output_size_list_; } + +const std::vector &DropoutGradGpuFwdKernel::GetWorkspaceSizeList() const { return workspace_size_list_; } + +bool DropoutGradGpuFwdKernel::Init(const CNodePtr &kernel_node) { + InitResource(); + + size_t input_num = AnfAlgo::GetInputTensorNum(kernel_node); + if (input_num != 2) { + MS_LOG(ERROR) << "Argument number is " << input_num << ", but DropoutGradGpuFwdKernel needs 2."; + return false; + } + + auto input_shape = AnfAlgo::GetOutputInferShape(kernel_node, 0); + is_null_input_ = CHECK_NULL_INPUT(input_shape); + if (is_null_input_) { + InitSizeLists(); + return true; + } + + num_count_ = 1; + for (size_t x : input_shape) { + num_count_ *= x; + } + drop_prob_ = GetValue(AnfAlgo::GetCNodePrimitive(kernel_node)->GetAttr("drop_prob")); + + InitSizeLists(); + return true; +} + +void DropoutGradGpuFwdKernel::InitResource() { + cudnn_handle_ = device::gpu::GPUDeviceManager::GetInstance().GetCudnnHandle(); +} + +void DropoutGradGpuFwdKernel::DestroyResource() noexcept {} + +void DropoutGradGpuFwdKernel::InitSizeLists() { + size_t dy_size = num_count_ * sizeof(float); + size_t mask_size = dy_size; + size_t dx_size = dy_size; + size_t workspace_size = 0; + + input_size_list_.push_back(dy_size); + input_size_list_.push_back(mask_size); + output_size_list_.push_back(dx_size); + workspace_size_list_.push_back(workspace_size); +} + +bool DropoutGradGpuFwdKernel::Launch(const std::vector &inputs, const std::vector &workspace, + const std::vector &outputs, uintptr_t stream_ptr) { + if (is_null_input_) { + return true; + } + + auto *dy = reinterpret_cast(inputs[0]->addr); + auto *mask = reinterpret_cast(inputs[1]->addr); + auto *dx = reinterpret_cast(outputs[0]->addr); + + DropoutBackward(dy, mask, dx, num_count_, drop_prob_, reinterpret_cast(stream_ptr)); + + return true; +} +} // namespace kernel +} // namespace mindspore diff --git a/mindspore/ccsrc/kernel/gpu/nn/dropout_grad_kernel.h b/mindspore/ccsrc/kernel/gpu/nn/dropout_grad_kernel.h new file mode 100644 index 0000000000..b59b5d2670 --- /dev/null +++ b/mindspore/ccsrc/kernel/gpu/nn/dropout_grad_kernel.h @@ -0,0 +1,58 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_KERNEL_GPU_NN_DROPOUT_GRAD_KERNEL_H_ +#define MINDSPORE_CCSRC_KERNEL_GPU_NN_DROPOUT_GRAD_KERNEL_H_ + +#include +#include "kernel/gpu/gpu_kernel.h" +#include "kernel/gpu/gpu_kernel_factory.h" + +namespace mindspore { +namespace kernel { +class DropoutGradGpuFwdKernel : public GpuKernel { + public: + DropoutGradGpuFwdKernel(); + ~DropoutGradGpuFwdKernel() override; + + const std::vector &GetInputSizeList() const override; + const std::vector &GetOutputSizeList() const override; + const std::vector &GetWorkspaceSizeList() const override; + bool Launch(const std::vector &inputs, const std::vector &workspace, + const std::vector &outputs, uintptr_t stream_ptr) override; + bool Init(const CNodePtr &kernel_node) override; + + protected: + void InitResource() override; + void InitSizeLists() override; + + private: + void DestroyResource() noexcept; + + cudnnHandle_t cudnn_handle_; + bool is_null_input_; + size_t num_count_; + float drop_prob_; + std::vector input_size_list_; + std::vector output_size_list_; + std::vector workspace_size_list_; +}; + +MS_REG_GPU_KERNEL(DropoutGrad, DropoutGradGpuFwdKernel) +} // namespace kernel +} // namespace mindspore + +#endif // MINDSPORE_CCSRC_KERNEL_GPU_NN_DROPOUT_GRAD_KERNEL_H_ diff --git a/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold2_gpu_kernel.cc b/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold2_gpu_kernel.cc new file mode 100644 index 0000000000..a95c1b78dd --- /dev/null +++ b/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold2_gpu_kernel.cc @@ -0,0 +1,35 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "kernel/gpu/quant/batchnorm_fold2_gpu_kernel.h" + +namespace mindspore { +namespace kernel { + +MS_REG_GPU_KERNEL_ONE(BatchNormFold2, + KernelAttr() + .AddInputAttr(kNumberTypeFloat32) + .AddInputAttr(kNumberTypeFloat32) + .AddInputAttr(kNumberTypeFloat32) + .AddInputAttr(kNumberTypeFloat32) + .AddInputAttr(kNumberTypeFloat32) + .AddInputAttr(kNumberTypeFloat32) + .AddInputAttr(kNumberTypeFloat32) + .AddInputAttr(kNumberTypeInt32) + .AddOutputAttr(kNumberTypeFloat32), + BatchNormFold2GpuKernel, float) +} // namespace kernel +} // namespace mindspore diff --git a/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold2_gpu_kernel.h b/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold2_gpu_kernel.h new file mode 100644 index 0000000000..ada4eabd86 --- /dev/null +++ b/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold2_gpu_kernel.h @@ -0,0 +1,139 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_KERNEL_GPU_NN_BATCHNORMFOLD2_GPU_KERNEL_H_ +#define MINDSPORE_CCSRC_KERNEL_GPU_NN_BATCHNORMFOLD2_GPU_KERNEL_H_ + +#include +#include "kernel/gpu/gpu_kernel.h" +#include "kernel/gpu/gpu_kernel_factory.h" +#include "kernel/gpu/cuda_impl/batchnorm_fold2_impl.cuh" + +namespace mindspore { +namespace kernel { +template +class BatchNormFold2GpuKernel : public GpuKernel { + public: + BatchNormFold2GpuKernel() + : cudnn_handle_(nullptr), + is_null_input_(false), + batch_size_(0), + channel_(0), + height_(0), + width_(0), + freeze_bn_(0) {} + + ~BatchNormFold2GpuKernel() override { DestroyResource(); } + + const std::vector &GetInputSizeList() const { return input_size_list_; } + + const std::vector &GetOutputSizeList() const { return output_size_list_; } + + const std::vector &GetWorkspaceSizeList() const { return workspace_size_list_; } + + bool Launch(const std::vector &inputs, const std::vector &workspace, + const std::vector &outputs, uintptr_t stream_ptr) { + if (is_null_input_) { + return true; + } + + auto *input = GetDeviceAddress(inputs, 0); + auto *beta = GetDeviceAddress(inputs, 1); + auto *gamma = GetDeviceAddress(inputs, 2); + auto *batch_std = GetDeviceAddress(inputs, 3); + auto *batch_mean = GetDeviceAddress(inputs, 4); + auto *running_std = GetDeviceAddress(inputs, 5); + auto *running_mean = GetDeviceAddress(inputs, 6); + auto *global_step = GetDeviceAddress(inputs, 7); + auto *output = GetDeviceAddress(outputs, 0); + + BatchNormFold2Forward(input, beta, gamma, batch_std, batch_mean, running_std, running_mean, global_step, output, + freeze_bn_, batch_size_, channel_, height_, width_, + reinterpret_cast(stream_ptr)); + return true; + } + + bool Init(const CNodePtr &kernel_node) { + InitResource(); + + size_t input_num = AnfAlgo::GetInputTensorNum(kernel_node); + if (input_num != 8) { + MS_LOG(ERROR) << "Argument number is " << input_num << ", but BatchNormFold2GpuKernel needs 8."; + return false; + } + + auto input_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); + is_null_input_ = CHECK_NULL_INPUT(input_shape); + if (is_null_input_) { + MS_LOG(WARNING) << "BatchNormFold2GpuKernel input is null"; + InitSizeLists(); + return true; + } + + if (input_shape.size() != 4) { + MS_LOG(ERROR) << "BatchNormFold2GpuKernel input shape needs (N,C,H,W)."; + return false; + } + batch_size_ = input_shape[0]; + channel_ = input_shape[1]; + height_ = input_shape[2]; + width_ = input_shape[3]; + freeze_bn_ = GetValue(AnfAlgo::GetCNodePrimitive(kernel_node)->GetAttr("freeze_bn")); + + InitSizeLists(); + return true; + } + + protected: + void InitResource() { cudnn_handle_ = device::gpu::GPUDeviceManager::GetInstance().GetCudnnHandle(); } + + void InitSizeLists() { + size_t input_size = batch_size_ * channel_ * height_ * width_ * sizeof(T); + size_t weight_size = channel_ * sizeof(T); + input_size_list_.push_back(input_size); + input_size_list_.push_back(weight_size); // beta + input_size_list_.push_back(weight_size); // gamma + input_size_list_.push_back(weight_size); // batch_std + input_size_list_.push_back(weight_size); // batch_mean + input_size_list_.push_back(weight_size); // running_std + input_size_list_.push_back(weight_size); // running_mean + input_size_list_.push_back(sizeof(int32_t)); // global_step + + output_size_list_.push_back(input_size); + + size_t workspace_size = 0; + workspace_size_list_.push_back(workspace_size); + } + + private: + void DestroyResource() noexcept {} + + cudnnHandle_t cudnn_handle_; + bool is_null_input_; + size_t batch_size_; + size_t channel_; + size_t height_; + size_t width_; + size_t freeze_bn_; + std::vector input_size_list_; + std::vector output_size_list_; + std::vector workspace_size_list_; +}; + +} // namespace kernel +} // namespace mindspore + +#endif // MINDSPORE_CCSRC_KERNEL_GPU_NN_BATCHNORMFOLD2_GPU_KERNEL_H_ diff --git a/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold2_grad_gpu_kernel.cc b/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold2_grad_gpu_kernel.cc new file mode 100644 index 0000000000..d5932f1984 --- /dev/null +++ b/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold2_grad_gpu_kernel.cc @@ -0,0 +1,39 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "kernel/gpu/quant/batchnorm_fold2_grad_gpu_kernel.h" + +namespace mindspore { +namespace kernel { + +MS_REG_GPU_KERNEL_ONE(BatchNormFold2Grad, + KernelAttr() + .AddInputAttr(kNumberTypeFloat32) + .AddInputAttr(kNumberTypeFloat32) + .AddInputAttr(kNumberTypeFloat32) + .AddInputAttr(kNumberTypeFloat32) + .AddInputAttr(kNumberTypeFloat32) + .AddInputAttr(kNumberTypeFloat32) + .AddInputAttr(kNumberTypeFloat32) + .AddInputAttr(kNumberTypeInt32) + .AddOutputAttr(kNumberTypeFloat32) + .AddOutputAttr(kNumberTypeFloat32) + .AddOutputAttr(kNumberTypeFloat32) + .AddOutputAttr(kNumberTypeFloat32) + .AddOutputAttr(kNumberTypeFloat32), + BatchNormFold2GradGpuKernel, float) +} // namespace kernel +} // namespace mindspore diff --git a/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold2_grad_gpu_kernel.h b/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold2_grad_gpu_kernel.h new file mode 100644 index 0000000000..ef9611f258 --- /dev/null +++ b/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold2_grad_gpu_kernel.h @@ -0,0 +1,167 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_KERNEL_GPU_NN_BATCHNORMFOLD2_GRAD_GPU_KERNEL_H_ +#define MINDSPORE_CCSRC_KERNEL_GPU_NN_BATCHNORMFOLD2_GRAD_GPU_KERNEL_H_ + +#include +#include "kernel/gpu/gpu_kernel.h" +#include "kernel/gpu/gpu_kernel_factory.h" +#include "kernel/gpu/cuda_impl/batchnorm_fold2_impl.cuh" + +namespace mindspore { +namespace kernel { +template +class BatchNormFold2GradGpuKernel : public GpuKernel { + public: + BatchNormFold2GradGpuKernel() + : cudnn_handle_(nullptr), + is_null_input_(false), + batch_size_(0), + channel_(0), + height_(0), + width_(0), + freeze_bn_(0) {} + + ~BatchNormFold2GradGpuKernel() override { DestroyResource(); } + + const std::vector &GetInputSizeList() const { return input_size_list_; } + + const std::vector &GetOutputSizeList() const { return output_size_list_; } + + const std::vector &GetWorkspaceSizeList() const { return workspace_size_list_; } + + bool Launch(const std::vector &inputs, const std::vector &workspace, + const std::vector &outputs, uintptr_t stream_ptr) { + if (is_null_input_) { + return true; + } + + auto *dout = GetDeviceAddress(inputs, 0); + auto *x = GetDeviceAddress(inputs, 1); + auto *gamma = GetDeviceAddress(inputs, 2); + auto *batch_std = GetDeviceAddress(inputs, 3); + auto *batch_mean = GetDeviceAddress(inputs, 4); + auto *running_std = GetDeviceAddress(inputs, 5); + auto *running_mean = GetDeviceAddress(inputs, 6); + auto *global_step = GetDeviceAddress(inputs, 7); + auto *d_batch_std = GetDeviceAddress(outputs, 0); + auto *d_batch_mean = GetDeviceAddress(outputs, 1); + auto *d_beta = GetDeviceAddress(outputs, 2); + auto *d_gamma = GetDeviceAddress(outputs, 3); + auto *d_x = GetDeviceAddress(outputs, 4); + auto *tmp = GetDeviceAddress(workspace, 0); + auto *tmp2 = GetDeviceAddress(workspace, 1); + auto *reduce_x = GetDeviceAddress(workspace, 2); + auto *tmp_x = GetDeviceAddress(workspace, 3); + + int32_t current_step_host[1]; + size_t x_size = batch_size_ * channel_ * height_ * width_ * sizeof(T); + CHECK_CUDA_RET_WITH_ERROR(cudaMemcpy(current_step_host, global_step, sizeof(int32_t), cudaMemcpyDeviceToHost), + "Failed to copy gpu memory."); + CHECK_CUDA_RET_WITH_ERROR(cudaMemcpy(d_x, dout, x_size, cudaMemcpyDeviceToDevice), "Failed to copy gpu memory."); + + BatchNormFold2GradReduce(dout, x, d_beta, tmp, reduce_x, tmp2, tmp_x, batch_size_, channel_, height_, width_, + reinterpret_cast(stream_ptr)); + if (current_step_host[0] < freeze_bn_) { + CalBatchNormFold2GradNotFreezeDxMul(batch_std, running_std, d_x, batch_size_, channel_, height_, width_, + reinterpret_cast(stream_ptr)); + CalBatchNormFold2GradNotFreeze(d_beta, reduce_x, batch_mean, batch_std, running_mean, running_std, gamma, d_gamma, + d_batch_mean, d_batch_std, channel_, reinterpret_cast(stream_ptr)); + } else { + CalBatchNormFold2GradFreeze(d_beta, reduce_x, batch_mean, batch_std, running_mean, running_std, gamma, d_gamma, + d_batch_mean, d_batch_std, channel_, reinterpret_cast(stream_ptr)); + } + return true; + } + + bool Init(const CNodePtr &kernel_node) { + InitResource(); + + size_t input_num = AnfAlgo::GetInputTensorNum(kernel_node); + if (input_num != 8) { + MS_LOG(ERROR) << "Argument number is " << input_num << ", but BatchNormFold2GradGpuKernel needs 8."; + return false; + } + + auto input_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); + is_null_input_ = CHECK_NULL_INPUT(input_shape); + if (is_null_input_) { + MS_LOG(WARNING) << "BatchNormFold2GradGpuKernel input is null"; + InitSizeLists(); + return true; + } + + if (input_shape.size() != 4) { + MS_LOG(ERROR) << "BatchNormFold2GradGpuKernel input shape needs (N,C,H,W)."; + return false; + } + batch_size_ = input_shape[0]; + channel_ = input_shape[1]; + height_ = input_shape[2]; + width_ = input_shape[3]; + freeze_bn_ = GetValue(AnfAlgo::GetCNodePrimitive(kernel_node)->GetAttr("freeze_bn")); + + InitSizeLists(); + return true; + } + + protected: + void InitResource() { cudnn_handle_ = device::gpu::GPUDeviceManager::GetInstance().GetCudnnHandle(); } + + void InitSizeLists() { + size_t input_size = batch_size_ * channel_ * height_ * width_ * sizeof(T); + size_t weight_size = channel_ * sizeof(T); + size_t workspace_size = batch_size_ * channel_ * sizeof(T); + input_size_list_.push_back(input_size); // dout + input_size_list_.push_back(input_size); // x + input_size_list_.push_back(weight_size); // gamma + input_size_list_.push_back(weight_size); // batch_std + input_size_list_.push_back(weight_size); // batch_mean + input_size_list_.push_back(weight_size); // running_std + input_size_list_.push_back(weight_size); // running_mean + input_size_list_.push_back(sizeof(int32_t)); // global_step + + output_size_list_.push_back(weight_size); // d_batch_std + output_size_list_.push_back(weight_size); // d_batch_mean + output_size_list_.push_back(weight_size); // d_beta + output_size_list_.push_back(weight_size); // d_gamma + output_size_list_.push_back(input_size); // d_x + + workspace_size_list_.push_back(workspace_size); // tmp + workspace_size_list_.push_back(workspace_size); // tmp2 + workspace_size_list_.push_back(weight_size); // reduce_x + workspace_size_list_.push_back(input_size); // tmp_x + } + + private: + void DestroyResource() noexcept {} + + cudnnHandle_t cudnn_handle_; + bool is_null_input_; + size_t batch_size_; + size_t channel_; + size_t height_; + size_t width_; + int32_t freeze_bn_; + std::vector input_size_list_; + std::vector output_size_list_; + std::vector workspace_size_list_; +}; +} // namespace kernel +} // namespace mindspore + +#endif // MINDSPORE_CCSRC_KERNEL_GPU_NN_BATCHNORMFOLD2_GRAD_GPU_KERNEL_H_ diff --git a/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold_gpu_kernel.cc b/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold_gpu_kernel.cc new file mode 100644 index 0000000000..b5fbfe4927 --- /dev/null +++ b/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold_gpu_kernel.cc @@ -0,0 +1,34 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "kernel/gpu/quant/batchnorm_fold_gpu_kernel.h" + +namespace mindspore { +namespace kernel { + +MS_REG_GPU_KERNEL_ONE(BatchNormFold, + KernelAttr() + .AddInputAttr(kNumberTypeFloat32) + .AddInputAttr(kNumberTypeFloat32) + .AddInputAttr(kNumberTypeFloat32) + .AddInputAttr(kNumberTypeInt32) + .AddOutputAttr(kNumberTypeFloat32) + .AddOutputAttr(kNumberTypeFloat32) + .AddOutputAttr(kNumberTypeFloat32) + .AddOutputAttr(kNumberTypeFloat32), + BatchNormFoldGpuKernel, float) +} // namespace kernel +} // namespace mindspore diff --git a/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold_gpu_kernel.h b/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold_gpu_kernel.h new file mode 100644 index 0000000000..e90fac2792 --- /dev/null +++ b/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold_gpu_kernel.h @@ -0,0 +1,208 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_KERNEL_GPU_BATCHNORM_FOLD_GPUKERNEL_H_ +#define MINDSPORE_CCSRC_KERNEL_GPU_BATCHNORM_FOLD_GPUKERNEL_H_ + +#include +#include "kernel/gpu/gpu_kernel.h" +#include "kernel/gpu/gpu_kernel_factory.h" +#include "kernel/gpu/kernel_constants.h" +#include "kernel/gpu/cuda_impl/batchnorm_fold_impl.cuh" + +namespace mindspore { +namespace kernel { +template +class BatchNormFoldGpuKernel : public GpuKernel { + public: + BatchNormFoldGpuKernel() + : input_size_(0), + output_size_(0), + exp_avg_factor_(0.9), + epsilon_(1e-12), + is_training_(true), + freeze_bn_(0), + batch_(0), + channel_(0), + height_(0), + width_(0), + mode_(CUDNN_BATCHNORM_SPATIAL), + x_desc_(nullptr), + scale_bias_mean_var_desc_(nullptr), + handle_(nullptr) {} + + ~BatchNormFoldGpuKernel() override { DestroyResource(); } + + const std::vector &GetInputSizeList() const { return input_size_list_; } + + const std::vector &GetOutputSizeList() const { return output_size_list_; } + + const std::vector &GetWorkspaceSizeList() const { return workspace_size_list_; } + + bool Launch(const std::vector &inputs, const std::vector &workspace, + const std::vector &outputs, uintptr_t stream_ptr) { + (void)workspace; + auto x = reinterpret_cast(inputs[0]->addr); + auto mean = reinterpret_cast(inputs[1]->addr); + auto variance = reinterpret_cast(inputs[2]->addr); + int *current_step = reinterpret_cast(inputs[3]->addr); + int current_step_host[1]; + CHECK_CUDA_RET_WITH_ERROR(cudaMemcpy(current_step_host, current_step, sizeof(int), cudaMemcpyDeviceToHost), + "Copy gpu memoy failed."); + if (x == nullptr) { + MS_LOG(ERROR) << "BatchNormFoldGpuKernel x is null."; + return false; + } + if (mean == nullptr) { + MS_LOG(ERROR) << "BatchNormFoldGpuKernel mean is null."; + return false; + } + if (variance == nullptr) { + MS_LOG(ERROR) << "BatchNormFoldGpuKernel variance is null."; + return false; + } + if (current_step == nullptr) { + MS_LOG(ERROR) << "BatchNormFoldGpuKernel current_step is null."; + return false; + } + auto batch_mean = reinterpret_cast(outputs[0]->addr); + auto batch_std = reinterpret_cast(outputs[1]->addr); + auto running_mean = reinterpret_cast(outputs[2]->addr); + auto running_std = reinterpret_cast(outputs[3]->addr); + auto y = reinterpret_cast(workspace[0]->addr); + + CHECK_CUDA_RET_WITH_ERROR(cudaMemcpy(running_mean, mean, output_size_, cudaMemcpyDeviceToDevice), + "Failed to copy gpu memory."); + CHECK_CUDA_RET_WITH_ERROR(cudaMemcpy(running_std, variance, output_size_, cudaMemcpyDeviceToDevice), + "Failed to copy gpu memory."); + CalUpdateRunningStd(channel_, epsilon_, running_std, reinterpret_cast(stream_ptr)); + if (!is_training_ || current_step_host[0] >= freeze_bn_) { + CHECK_CUDA_RET_WITH_ERROR(cudaMemset(batch_mean, 0, output_size_), "Failed to set gpu memory."); + ThrustFillWith(batch_std, channel_, 1.f, reinterpret_cast(stream_ptr)); + return true; + } + const T alpha = 1; + const T beta = 0; + CHECK_CUDNN_RET_WITH_EXCEPT(cudnnBatchNormalizationForwardTraining( + handle_, mode_, &alpha, &beta, x_desc_, x, x_desc_, y, scale_bias_mean_var_desc_, + mean, mean, exp_avg_factor_, mean, variance, epsilon_, batch_mean, batch_std), + "Failed to launch kernel.") + CalUpdateBatchStd(channel_, batch_std, reinterpret_cast(stream_ptr)); + return true; + } + + bool Init(const CNodePtr &kernel_node) { + InitResource(); + size_t input_num = AnfAlgo::GetInputTensorNum(kernel_node); + if (input_num != 4) { + MS_LOG(ERROR) << "Input number is " << input_num << " but BatchNormFold GpuKernel OP needs 4 input."; + return false; + } + + size_t output_num = AnfAlgo::GetOutputTensorNum(kernel_node); + if (output_num != 4) { + MS_LOG(ERROR) << "Output number is " << output_num << ", but BatchNormFold GpuKernel OP needs 4 output."; + return false; + } + + T momentum = GetValue(AnfAlgo::GetCNodePrimitive(kernel_node)->GetAttr("momentum")); + exp_avg_factor_ = 1.0 - momentum; + epsilon_ = GetValue(AnfAlgo::GetCNodePrimitive(kernel_node)->GetAttr("epsilon")); + is_training_ = GetValue(AnfAlgo::GetCNodePrimitive(kernel_node)->GetAttr("is_training")); + freeze_bn_ = GetValue(AnfAlgo::GetCNodePrimitive(kernel_node)->GetAttr("freeze_bn")); + + auto input_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); + if (input_shape.size() != 4) { + MS_LOG(ERROR) << "Input shape is " << input_shape.size() + << ", but BatchNormFold GpuKernel OP needs 4DTensor input."; + return false; + } + batch_ = input_shape[0]; + channel_ = input_shape[1]; + height_ = input_shape[2]; + width_ = input_shape[3]; + + input_size_ = sizeof(T) * batch_ * channel_ * height_ * width_; + output_size_ = sizeof(T) * channel_; + + cudnnDataType_t cudnnDataType = kCudnnDtypeMap[TypeIdLabel(AnfAlgo::GetInputDeviceDataType(kernel_node, 0))]; + CHECK_CUDNN_RET_WITH_EXCEPT( + cudnnSetTensor4dDescriptor(x_desc_, CUDNN_TENSOR_NCHW, cudnnDataType, batch_, channel_, height_, width_), + "Set x desc failed"); + + CHECK_CUDNN_RET_WITH_EXCEPT( + cudnnSetTensor4dDescriptor(scale_bias_mean_var_desc_, CUDNN_TENSOR_NCHW, cudnnDataType, 1, channel_, 1, 1), + "Set para desc failed"); + + InitSizeLists(); + return true; + } + + protected: + void InitSizeLists() { + // x, mean, variance, current_step + input_size_list_.push_back(input_size_); + input_size_list_.push_back(output_size_); + input_size_list_.push_back(output_size_); + input_size_list_.push_back(sizeof(int)); + + // batch_mean, batch_std, running_mean, running_std + output_size_list_.push_back(output_size_); + output_size_list_.push_back(output_size_); + output_size_list_.push_back(output_size_); + output_size_list_.push_back(output_size_); + + // store y + workspace_size_list_.push_back(input_size_); + } + + void InitResource() { + handle_ = device::gpu::GPUDeviceManager::GetInstance().GetCudnnHandle(); + CHECK_CUDNN_RET_WITH_EXCEPT(cudnnCreateTensorDescriptor(&x_desc_), "Create x desc failed"); + CHECK_CUDNN_RET_WITH_EXCEPT(cudnnCreateTensorDescriptor(&scale_bias_mean_var_desc_), "Create para desc failed"); + } + + private: + void DestroyResource() noexcept { + CHECK_CUDNN_RET_WITH_ERROR(cudnnDestroyTensorDescriptor(x_desc_), "Destroy x desc failed"); + CHECK_CUDNN_RET_WITH_ERROR(cudnnDestroyTensorDescriptor(scale_bias_mean_var_desc_), "Destroy para desc failed"); + } + + size_t input_size_; + size_t output_size_; + std::vector input_size_list_; + std::vector output_size_list_; + std::vector workspace_size_list_; + + double exp_avg_factor_; + double epsilon_; + bool is_training_; + int freeze_bn_; + int batch_; + int channel_; + int height_; + int width_; + + cudnnBatchNormMode_t mode_; + cudnnTensorDescriptor_t x_desc_; + cudnnTensorDescriptor_t scale_bias_mean_var_desc_; + + cudnnHandle_t handle_; +}; +} // namespace kernel +} // namespace mindspore + +#endif // MINDSPORE_CCSRC_KERNEL_GPU_BATCHNORM_FOLD_GPUKERNEL_H_ diff --git a/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold_grad_gpu_kernel.cc b/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold_grad_gpu_kernel.cc new file mode 100644 index 0000000000..93ea66258d --- /dev/null +++ b/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold_grad_gpu_kernel.cc @@ -0,0 +1,32 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "kernel/gpu/quant/batchnorm_fold_grad_gpu_kernel.h" + +namespace mindspore { +namespace kernel { +MS_REG_GPU_KERNEL_ONE(BatchNormFoldGrad, + KernelAttr() + .AddInputAttr(kNumberTypeFloat32) + .AddInputAttr(kNumberTypeFloat32) + .AddInputAttr(kNumberTypeFloat32) + .AddInputAttr(kNumberTypeFloat32) + .AddInputAttr(kNumberTypeFloat32) + .AddInputAttr(kNumberTypeInt32) + .AddOutputAttr(kNumberTypeFloat32), + BatchNormFoldGradGpuKernel, float) +} // namespace kernel +} // namespace mindspore diff --git a/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold_grad_gpu_kernel.h b/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold_grad_gpu_kernel.h new file mode 100644 index 0000000000..830f6dc243 --- /dev/null +++ b/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold_grad_gpu_kernel.h @@ -0,0 +1,167 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_KERNEL_GPU_BATCHNORM_FOLD_GRAD_GPUKERNEL_H_ +#define MINDSPORE_CCSRC_KERNEL_GPU_BATCHNORM_FOLD_GRAD_GPUKERNEL_H_ + +#include +#include "kernel/gpu/gpu_kernel.h" +#include "kernel/gpu/gpu_kernel_factory.h" +#include "kernel/gpu/cuda_impl/batchnorm_fold_impl.cuh" + +namespace mindspore { +namespace kernel { +template +class BatchNormFoldGradGpuKernel : public GpuKernel { + public: + BatchNormFoldGradGpuKernel() + : input_size_(0), + channel_size_(0), + workspace_size_(0), + momentum_(0.1), + epsilon_(1e-12), + is_training_(true), + freeze_bn_(0), + current_step_(0), + batch_(0), + channel_(0), + height_(0), + width_(0) {} + ~BatchNormFoldGradGpuKernel() = default; + + const std::vector &GetInputSizeList() const { return input_size_list_; } + const std::vector &GetOutputSizeList() const { return output_size_list_; } + const std::vector &GetWorkspaceSizeList() const { return workspace_size_list_; } + bool Launch(const std::vector &inputs, const std::vector &workspace, + const std::vector &outputs, uintptr_t stream_ptr) { + (void)workspace; + // 'd_batch_mean', 'd_batch_std', 'x', 'batch_mean', 'batch_std', 'current_step' + T *d_batch_mean = GetDeviceAddress(inputs, 0); + T *d_batch_std = GetDeviceAddress(inputs, 1); + T *x = GetDeviceAddress(inputs, 2); + T *batch_mean = GetDeviceAddress(inputs, 3); + T *batch_std = GetDeviceAddress(inputs, 4); + int *current_step = GetDeviceAddress(inputs, 5); + int current_step_host[1]; + CHECK_CUDA_RET_WITH_ERROR(cudaMemcpy(current_step_host, current_step, sizeof(int), cudaMemcpyDeviceToHost), + "Copy gpu memoy failed."); + if (d_batch_mean == nullptr) { + MS_LOG(ERROR) << "BatchNormFoldGradGpuKernel d_batch_mean is null."; + return false; + } + if (d_batch_std == nullptr) { + MS_LOG(ERROR) << "BatchNormFoldGradGpuKernel d_batch_std is null."; + return false; + } + if (x == nullptr) { + MS_LOG(ERROR) << "BatchNormFoldGradGpuKernel x is null."; + return false; + } + if (batch_mean == nullptr) { + MS_LOG(ERROR) << "BatchNormFoldGradGpuKernel batch_mean is null."; + return false; + } + if (batch_std == nullptr) { + MS_LOG(ERROR) << "BatchNormFoldGradGpuKernel batch_std is null."; + return false; + } + if (current_step == nullptr) { + MS_LOG(ERROR) << "BatchNormFoldGradGpuKernel current_step is null."; + return false; + } + T *dx = reinterpret_cast(outputs[0]->addr); + + if (!is_training_ || current_step_host[0] >= freeze_bn_) { + ThrustFillWith(dx, batch_ * channel_ * height_ * width_, 0.f, reinterpret_cast(stream_ptr)); + return true; + } + CalBatchNormFoldGrad(d_batch_mean, d_batch_std, x, batch_mean, batch_std, batch_, channel_, height_, width_, dx, + reinterpret_cast(stream_ptr)); + return true; + } + bool Init(const CNodePtr &kernel_node) { + size_t input_num = AnfAlgo::GetInputTensorNum(kernel_node); + if (input_num != 6) { + MS_LOG(ERROR) << "Input number is " << input_num << ", but BatchNormFoldGrad GpuKernel OP needs 6 input."; + return false; + } + + size_t output_num = AnfAlgo::GetOutputTensorNum(kernel_node); + if (output_num != 1) { + MS_LOG(ERROR) << "Output number is " << output_num << ", but BatchNormFoldGrad GpuKernel OP needs 4 output."; + return false; + } + + epsilon_ = GetValue(AnfAlgo::GetCNodePrimitive(kernel_node)->GetAttr("epsilon")); + is_training_ = GetValue(AnfAlgo::GetCNodePrimitive(kernel_node)->GetAttr("is_training")); + freeze_bn_ = GetValue(AnfAlgo::GetCNodePrimitive(kernel_node)->GetAttr("freeze_bn")); + + auto input_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2); + if (input_shape.size() != 4) { + MS_LOG(ERROR) << "Input shape is " << input_shape.size() + << ", but BatchNormFoldGrad GpuKernel OP needs 4DTensor input."; + return false; + } + batch_ = input_shape[0]; + channel_ = input_shape[1]; + height_ = input_shape[2]; + width_ = input_shape[3]; + + input_size_ = sizeof(T) * batch_ * channel_ * height_ * width_; + channel_size_ = sizeof(T) * channel_; + + InitSizeLists(); + return true; + } + + protected: + void InitSizeLists() { + // 'd_batch_mean', 'd_batch_std', 'x', 'batch_mean', 'batch_std', 'current_step' + input_size_list_.push_back(channel_size_); + input_size_list_.push_back(channel_size_); + input_size_list_.push_back(input_size_); + input_size_list_.push_back(channel_size_); + input_size_list_.push_back(channel_size_); + input_size_list_.push_back(sizeof(int)); + + // 'dx' + output_size_list_.push_back(input_size_); + + workspace_size_list_.push_back(workspace_size_); + } + + private: + size_t input_size_; + size_t channel_size_; + size_t workspace_size_; + std::vector input_size_list_; + std::vector output_size_list_; + std::vector workspace_size_list_; + + T momentum_; + T epsilon_; + bool is_training_; + int freeze_bn_; + int current_step_; + int batch_; + int channel_; + int height_; + int width_; +}; +} // namespace kernel +} // namespace mindspore + +#endif // MINDSPORE_CCSRC_KERNEL_GPU_BATCHNORM_FOLD_GRAD_GPUKERNEL_H_ diff --git a/mindspore/ccsrc/kernel/gpu/quant/correction_mul_gpu_kernel.cc b/mindspore/ccsrc/kernel/gpu/quant/correction_mul_gpu_kernel.cc new file mode 100644 index 0000000000..a914b6ec14 --- /dev/null +++ b/mindspore/ccsrc/kernel/gpu/quant/correction_mul_gpu_kernel.cc @@ -0,0 +1,29 @@ +/** + * Copyright 2020、 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "kernel/gpu/quant/correction_mul_gpu_kernel.h" + +namespace mindspore { +namespace kernel { +MS_REG_GPU_KERNEL_ONE(CorrectionMul, + KernelAttr() + .AddInputAttr(kNumberTypeFloat32) + .AddInputAttr(kNumberTypeFloat32) + .AddInputAttr(kNumberTypeFloat32) + .AddOutputAttr(kNumberTypeFloat32), + CorrectionMulGpuKernel, float) +} // namespace kernel +} // namespace mindspore diff --git a/mindspore/ccsrc/kernel/gpu/quant/correction_mul_gpu_kernel.h b/mindspore/ccsrc/kernel/gpu/quant/correction_mul_gpu_kernel.h new file mode 100644 index 0000000000..af23d7732a --- /dev/null +++ b/mindspore/ccsrc/kernel/gpu/quant/correction_mul_gpu_kernel.h @@ -0,0 +1,98 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CORRECTIONMUL_GPUKERNEL_H_ +#define MINDSPORE_CCSRC_KERNEL_GPU_CORRECTIONMUL_GPUKERNEL_H_ + +#include +#include "kernel/gpu/gpu_kernel.h" +#include "kernel/gpu/gpu_kernel_factory.h" +#include "kernel/gpu/cuda_impl/correction_mul_impl.cuh" + +namespace mindspore { +namespace kernel { +template +class CorrectionMulGpuKernel : public GpuKernel { + public: + CorrectionMulGpuKernel() : batch_size_(0), channel_(0), height_(0), width_(0) {} + ~CorrectionMulGpuKernel() override { DestroyResource(); } + + const std::vector &GetInputSizeList() const { return input_size_list_; } + const std::vector &GetOutputSizeList() const { return output_size_list_; } + const std::vector &GetWorkspaceSizeList() const { return workspace_size_list_; } + bool Launch(const std::vector &inputs, const std::vector &workspace, + const std::vector &outputs, uintptr_t stream_ptr) { + auto *weight = GetDeviceAddress(inputs, 0); + auto *gamma = GetDeviceAddress(inputs, 1); + auto *running_std = GetDeviceAddress(inputs, 2); + auto *output = GetDeviceAddress(outputs, 0); + + CalCorrectionMul(weight, gamma, running_std, batch_size_, channel_, height_, width_, output, + reinterpret_cast(stream_ptr)); + return true; + } + bool Init(const CNodePtr &kernel_node) { + InitResource(); + + size_t input_num = AnfAlgo::GetInputTensorNum(kernel_node); + if (input_num != 3) { + MS_LOG(ERROR) << "Argument number is " << input_num << ", but CorrectionMulGpuKernel needs 3."; + return false; + } + + auto input_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); + + if (input_shape.size() != 4) { + MS_LOG(ERROR) << "CorrectionMulGpuKernel input shape needs (N,C,H,W)."; + return false; + } + batch_size_ = input_shape[0]; + channel_ = input_shape[1]; + height_ = input_shape[2]; + width_ = input_shape[3]; + + InitSizeLists(); + return true; + } + + protected: + void InitSizeLists() { + size_t input_size = batch_size_ * channel_ * height_ * width_ * sizeof(T); + size_t weight_size = batch_size_ * sizeof(T); + input_size_list_.push_back(input_size); // weight + input_size_list_.push_back(weight_size); // gamma + input_size_list_.push_back(weight_size); // running_std + size_t workspace_size = 0; + output_size_list_.push_back(input_size); + workspace_size_list_.push_back(workspace_size); + } + void InitResource() {} + + private: + void DestroyResource() noexcept {} + + size_t batch_size_; + size_t channel_; + size_t height_; + size_t width_; + std::vector input_size_list_; + std::vector output_size_list_; + std::vector workspace_size_list_; +}; +} // namespace kernel +} // namespace mindspore + +#endif // MINDSPORE_CCSRC_KERNEL_GPU_CORRECTIONMUL_GPUKERNEL_H_ diff --git a/mindspore/ccsrc/kernel/gpu/quant/correction_mul_grad_gpu_kernel.cc b/mindspore/ccsrc/kernel/gpu/quant/correction_mul_grad_gpu_kernel.cc new file mode 100644 index 0000000000..211c515e02 --- /dev/null +++ b/mindspore/ccsrc/kernel/gpu/quant/correction_mul_grad_gpu_kernel.cc @@ -0,0 +1,33 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "kernel/gpu/quant/correction_mul_grad_gpu_kernel.h" +#include "kernel/gpu/cuda_impl/correction_mul_impl.cuh" + +namespace mindspore { +namespace kernel { + +MS_REG_GPU_KERNEL_ONE(CorrectionMulGrad, + KernelAttr() + .AddInputAttr(kNumberTypeFloat32) + .AddInputAttr(kNumberTypeFloat32) + .AddInputAttr(kNumberTypeFloat32) + .AddInputAttr(kNumberTypeFloat32) + .AddOutputAttr(kNumberTypeFloat32) + .AddOutputAttr(kNumberTypeFloat32), + CorrectionMulGradGpuKernel, float) +} // namespace kernel +} // namespace mindspore diff --git a/mindspore/ccsrc/kernel/gpu/quant/correction_mul_grad_gpu_kernel.h b/mindspore/ccsrc/kernel/gpu/quant/correction_mul_grad_gpu_kernel.h new file mode 100644 index 0000000000..f20c6278c0 --- /dev/null +++ b/mindspore/ccsrc/kernel/gpu/quant/correction_mul_grad_gpu_kernel.h @@ -0,0 +1,104 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CORRECTIONMULGRAD_GPUKERNEL_H_ +#define MINDSPORE_CCSRC_KERNEL_GPU_CORRECTIONMULGRAD_GPUKERNEL_H_ + +#include +#include "kernel/gpu/gpu_kernel.h" +#include "kernel/gpu/gpu_kernel_factory.h" +#include "kernel/gpu/cuda_impl/correction_mul_impl.cuh" + +namespace mindspore { +namespace kernel { +template +class CorrectionMulGradGpuKernel : public GpuKernel { + public: + CorrectionMulGradGpuKernel() : batch_size_(0), channel_(0), height_(0), width_(0) {} + ~CorrectionMulGradGpuKernel() override { DestroyResource(); } + + const std::vector &GetInputSizeList() const { return input_size_list_; } + const std::vector &GetOutputSizeList() const { return output_size_list_; } + const std::vector &GetWorkspaceSizeList() const { return workspace_size_list_; } + bool Launch(const std::vector &inputs, const std::vector &workspace, + const std::vector &outputs, uintptr_t stream_ptr) { + auto *d_out = GetDeviceAddress(inputs, 0); + auto *weight = GetDeviceAddress(inputs, 1); + auto *gamma = GetDeviceAddress(inputs, 2); + auto *running_std = GetDeviceAddress(inputs, 3); + auto *d_weight = GetDeviceAddress(outputs, 0); + auto *d_gamma = GetDeviceAddress(outputs, 1); + auto *tmp = GetDeviceAddress(workspace, 0); + + CalCorrectionMul(d_out, gamma, running_std, batch_size_, channel_, height_, width_, d_weight, + reinterpret_cast(stream_ptr)); + CalCorrectionMulGrad(d_out, weight, running_std, batch_size_, channel_, height_, width_, d_gamma, tmp, + reinterpret_cast(stream_ptr)); + return true; + } + bool Init(const CNodePtr &kernel_node) { + InitResource(); + + size_t input_num = AnfAlgo::GetInputTensorNum(kernel_node); + if (input_num != 4) { + MS_LOG(ERROR) << "Argument number is " << input_num << ", but CorrectionMulGradGpuKernel needs 4."; + return false; + } + + auto input_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); + + if (input_shape.size() != 4) { + MS_LOG(ERROR) << "CorrectionMulGradGpuKernel input shape needs (N,C,H,W)."; + return false; + } + batch_size_ = input_shape[0]; + channel_ = input_shape[1]; + height_ = input_shape[2]; + width_ = input_shape[3]; + + InitSizeLists(); + return true; + } + + protected: + void InitSizeLists() { + size_t input_size = batch_size_ * channel_ * height_ * width_ * sizeof(T); + size_t weight_size = batch_size_ * sizeof(T); + input_size_list_.push_back(input_size); // d_out + input_size_list_.push_back(input_size); // weight + input_size_list_.push_back(weight_size); // gamma + input_size_list_.push_back(weight_size); // running_std + output_size_list_.push_back(input_size); // d_weight + output_size_list_.push_back(weight_size); // d_gamma + workspace_size_list_.push_back(input_size); // tmp d_out * weight + } + void InitResource() {} + + private: + void DestroyResource() noexcept {} + + size_t batch_size_; + size_t channel_; + size_t height_; + size_t width_; + std::vector input_size_list_; + std::vector output_size_list_; + std::vector workspace_size_list_; +}; +} // namespace kernel +} // namespace mindspore + +#endif // MINDSPORE_CCSRC_KERNEL_GPU_CORRECTIONMULGRAD_GPUKERNEL_H_ diff --git a/mindspore/ccsrc/kernel/gpu/quant/fake_quant_gpu_kernel.cc b/mindspore/ccsrc/kernel/gpu/quant/fake_quant_gpu_kernel.cc new file mode 100644 index 0000000000..f4e2c74aac --- /dev/null +++ b/mindspore/ccsrc/kernel/gpu/quant/fake_quant_gpu_kernel.cc @@ -0,0 +1,176 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "kernel/gpu/quant/fake_quant_gpu_kernel.h" +#include "kernel/gpu/cuda_impl/fake_quant_impl.cuh" +#include +#include +#include +#include + +namespace mindspore { +namespace kernel { +FakeQuantGpuKernel::FakeQuantGpuKernel() + : input_size_(0), + min_size_(0), + max_size_(0), + output_size_(0), + workspace_size_(0), + num_bits_(0), + quant_min_(0), + quant_max_(0), + quant_num_(0), + quant_delay_(0), + ema_(false), + ema_decay_(0), + global_step_(0), + training_(false), + narrow_range_(false), + symmetric_(false) {} + +const std::vector &FakeQuantGpuKernel::GetInputSizeList() const { return input_size_list_; } + +const std::vector &FakeQuantGpuKernel::GetOutputSizeList() const { return output_size_list_; } + +const std::vector &FakeQuantGpuKernel::GetWorkspaceSizeList() const { return workspace_size_list_; } + +bool FakeQuantGpuKernel::Init(const CNodePtr &kernel_node) { + size_t input_num = AnfAlgo::GetInputTensorNum(kernel_node); + if (input_num != 3) { + MS_LOG(EXCEPTION) << "Input number is " << input_num << ", but FakeQuant GpuKernel OP needs 3 output."; + } + + size_t output_num = AnfAlgo::GetOutputTensorNum(kernel_node); + if (output_num != 1) { + MS_LOG(EXCEPTION) << "Output number is " << output_num << ", but FakeQuant GpuKernel OP needs 1 output."; + } + + num_bits_ = GetValue(AnfAlgo::GetCNodePrimitive(kernel_node)->GetAttr("num_bits")); + ema_ = GetValue(AnfAlgo::GetCNodePrimitive(kernel_node)->GetAttr("ema")); + ema_decay_ = 1.0 - GetValue(AnfAlgo::GetCNodePrimitive(kernel_node)->GetAttr("ema_decay")); + training_ = GetValue(AnfAlgo::GetCNodePrimitive(kernel_node)->GetAttr("training")); + + if (num_bits_ <= 2 || num_bits_ >= 16) { + MS_LOG(EXCEPTION) << "Attr \'num_bits\' " << num_bits_ << " is out of range, expected between 2 and 16."; + } + + quant_delay_ = GetValue(AnfAlgo::GetCNodePrimitive(kernel_node)->GetAttr("quant_delay")); + if (quant_delay_ < 0) { + MS_LOG(EXCEPTION) << "Attr \'quant_delay\' " << num_bits_ << "is less then 0, require larger than 0."; + } + + symmetric_ = GetValue(AnfAlgo::GetCNodePrimitive(kernel_node)->GetAttr("symmetric")); + if (symmetric_) { + quant_min_ = 0 - (1 << (num_bits_ - 1)); + quant_max_ = (1 << (num_bits_ - 1)) - 1; + } else { + quant_min_ = 0; + quant_max_ = (1 << num_bits_) - 1; + } + + narrow_range_ = GetValue(AnfAlgo::GetCNodePrimitive(kernel_node)->GetAttr("narrow_range")); + if (narrow_range_) { + quant_min_++; + } + + if (quant_num_ == 0) { + quant_num_ = 1; + } + auto input_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); + for (size_t i = 0; i < input_shape.size(); ++i) { + quant_num_ *= SizeToInt(input_shape[i]); + } + + input_size_ = sizeof(float); + min_size_ = sizeof(float); + max_size_ = sizeof(float); + for (size_t i = 0; i < input_shape.size(); i++) { + input_size_ *= input_shape[i]; + } + output_size_ = input_size_; + InitSizeLists(); + return true; +} + +void FakeQuantGpuKernel::InitSizeLists() { + input_size_list_.push_back(input_size_); // input + input_size_list_.push_back(min_size_); // min + input_size_list_.push_back(max_size_); // max + output_size_list_.push_back(output_size_); + workspace_size_list_.push_back(workspace_size_); +} + +bool FakeQuantGpuKernel::Launch(const std::vector &inputs, const std::vector &workspace, + const std::vector &outputs, uintptr_t stream_ptr) { + (void)workspace; + float *output = GetDeviceAddress(outputs, 0); + float *input = GetDeviceAddress(inputs, 0); + float *input_min = GetDeviceAddress(inputs, 1); + float *input_max = GetDeviceAddress(inputs, 2); + + if (input == nullptr) { + MS_LOG(EXCEPTION) << "FakeQuantGpuKernel input x is null."; + } + if (input_min == nullptr) { + MS_LOG(EXCEPTION) << "FakeQuantGpuKernel input min is null."; + } + if (input_max == nullptr) { + MS_LOG(EXCEPTION) << "FakeQuantGpuKernel input max is null."; + } + + // Allocate space for device copies + int size = sizeof(float); + float *d_scale = nullptr; + float *d_nudge_min = nullptr; + float *d_nudge_max = nullptr; + CHECK_CUDA_RET_WITH_ERROR(cudaMalloc(reinterpret_cast(&d_scale), size), "Malloc gpu memory failed"); + CHECK_CUDA_RET_WITH_ERROR(cudaMalloc(reinterpret_cast(&d_nudge_min), size), "Malloc gpu memory failed"); + CHECK_CUDA_RET_WITH_ERROR(cudaMalloc(reinterpret_cast(&d_nudge_max), size), "Malloc gpu memory failed"); + + if (training_) { + // calculate the input min and max according by the parameter ema and ema_decay. + CalMinMax(input, input_min, input_max, quant_num_, ema_decay_, ema_, reinterpret_cast(stream_ptr)); + // control flow for quant_delay + if (global_step_ >= quant_delay_) { + // real launch + CalNudge(input_min, input_max, quant_min_, quant_max_, d_nudge_min, d_nudge_max, d_scale, + reinterpret_cast(stream_ptr)); + CalFakeQuantize(input, output, quant_num_, d_nudge_min, d_nudge_max, d_scale, symmetric_, + reinterpret_cast(stream_ptr)); + } else { + CHECK_CUDA_RET_WITH_ERROR(cudaMemcpy(output, input, input_size_, cudaMemcpyDeviceToDevice), + "Copy gpu memory failed"); + } + global_step_++; + } else { + // real launch + CalNudge(input_min, input_max, quant_min_, quant_max_, d_nudge_min, d_nudge_max, d_scale, + reinterpret_cast(stream_ptr)); + CalFakeQuantize(input, output, quant_num_, d_nudge_min, d_nudge_max, d_scale, symmetric_, + reinterpret_cast(stream_ptr)); + } + + // Cleanup + CHECK_CUDA_RET_WITH_ERROR(cudaFree(d_scale), "Free gpu memory failed"); + CHECK_CUDA_RET_WITH_ERROR(cudaFree(d_nudge_min), "Free gpu memory failed"); + CHECK_CUDA_RET_WITH_ERROR(cudaFree(d_nudge_max), "Free gpu memory failed"); + + return true; +} + +MS_REG_GPU_KERNEL(FakeQuantWithMinMax, FakeQuantGpuKernel) +} // namespace kernel +} // namespace mindspore diff --git a/mindspore/ccsrc/kernel/gpu/quant/fake_quant_gpu_kernel.h b/mindspore/ccsrc/kernel/gpu/quant/fake_quant_gpu_kernel.h new file mode 100755 index 0000000000..b14268ed62 --- /dev/null +++ b/mindspore/ccsrc/kernel/gpu/quant/fake_quant_gpu_kernel.h @@ -0,0 +1,66 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_KERNEL_GPU_FAKEQUANT_GPUKERNEL_H_ +#define MINDSPORE_CCSRC_KERNEL_GPU_FAKEQUANT_GPUKERNEL_H_ + +#include +#include "kernel/gpu/gpu_kernel.h" +#include "kernel/gpu/gpu_kernel_factory.h" + +namespace mindspore { +namespace kernel { +class FakeQuantGpuKernel : public GpuKernel { + public: + FakeQuantGpuKernel(); + ~FakeQuantGpuKernel() = default; + + const std::vector &GetInputSizeList() const override; + const std::vector &GetOutputSizeList() const override; + const std::vector &GetWorkspaceSizeList() const override; + bool Launch(const std::vector &inputs, const std::vector &workspace, + const std::vector &outputs, uintptr_t stream_ptr) override; + bool Init(const CNodePtr &kernel) override; + + protected: + void InitSizeLists() override; + + private: + size_t input_size_; + size_t min_size_; + size_t max_size_; + size_t output_size_; + size_t workspace_size_; + std::vector input_size_list_; + std::vector output_size_list_; + std::vector workspace_size_list_; + + int num_bits_; + float quant_min_; + float quant_max_; + int quant_num_; + int quant_delay_; + bool ema_; + float ema_decay_; + int global_step_; + bool training_; + bool narrow_range_; + bool symmetric_; +}; +} // namespace kernel +} // namespace mindspore + +#endif // MINDSPORE_CCSRC_KERNEL_GPU_FAKEQUANT_GPUKERNEL_H_ diff --git a/mindspore/ccsrc/kernel/gpu/quant/fake_quant_grad_gpu_kernel.cc b/mindspore/ccsrc/kernel/gpu/quant/fake_quant_grad_gpu_kernel.cc new file mode 100644 index 0000000000..4746e8e8e0 --- /dev/null +++ b/mindspore/ccsrc/kernel/gpu/quant/fake_quant_grad_gpu_kernel.cc @@ -0,0 +1,145 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "kernel/gpu/quant/fake_quant_grad_gpu_kernel.h" +#include "kernel/gpu/cuda_impl/fake_quant_impl.cuh" + +namespace mindspore { +namespace kernel { +FakeQuantGradGpuKernel::FakeQuantGradGpuKernel() + : input_size_(0), + min_size_(0), + max_size_(0), + output_size_(0), + workspace_size_(0), + num_bits_(0), + quant_min_(0), + quant_max_(0), + quant_size_(0), + quant_delay_(0), + global_step_(0) {} + +const std::vector &FakeQuantGradGpuKernel::GetInputSizeList() const { return input_size_list_; } + +const std::vector &FakeQuantGradGpuKernel::GetOutputSizeList() const { return output_size_list_; } + +const std::vector &FakeQuantGradGpuKernel::GetWorkspaceSizeList() const { return workspace_size_list_; } + +bool FakeQuantGradGpuKernel::Init(const CNodePtr &kernel_node) { + size_t input_num = AnfAlgo::GetInputTensorNum(kernel_node); + if (input_num != 4) { + MS_LOG(EXCEPTION) << "Input number is " << input_num << ", but FakeQuantGrad GpuKernel OP needs 4 output."; + } + + size_t output_num = AnfAlgo::GetOutputTensorNum(kernel_node); + if (output_num != 1) { + MS_LOG(EXCEPTION) << "Output number is " << output_num << ", but FakeQuantGrad GpuKernel OP needs 1 output."; + } + + num_bits_ = GetValue(AnfAlgo::GetCNodePrimitive(kernel_node)->GetAttr("num_bits")); + if (num_bits_ <= 2 || num_bits_ >= 16) { + MS_LOG(EXCEPTION) << "Attr \'num_bits\' " << num_bits_ << " is out of range, expected between 2 and 16."; + } + + quant_delay_ = GetValue(AnfAlgo::GetCNodePrimitive(kernel_node)->GetAttr("quant_delay")); + if (quant_delay_ < 0) { + MS_LOG(EXCEPTION) << "Attr \'quant_delay_\' " << quant_delay_ << " is less then 0, require larger than 0."; + } + + quant_min_ = 0; + quant_max_ = (1 << num_bits_) - 1; + + if (quant_size_ == 0) { + quant_size_ = 1; + } + auto input_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); + for (size_t i = 0; i < input_shape.size(); ++i) { + quant_size_ *= SizeToInt(input_shape[i]); + } + + input_size_ = sizeof(float); + min_size_ = sizeof(float); + max_size_ = sizeof(float); + for (size_t i = 0; i < input_shape.size(); i++) { + input_size_ *= input_shape[i]; + } + output_size_ = input_size_; + + InitSizeLists(); + return true; +} + +void FakeQuantGradGpuKernel::InitSizeLists() { + input_size_list_.push_back(input_size_); // gradient + input_size_list_.push_back(input_size_); // input + input_size_list_.push_back(min_size_); // min + input_size_list_.push_back(max_size_); // max + output_size_list_.push_back(output_size_); + workspace_size_list_.push_back(workspace_size_); +} + +bool FakeQuantGradGpuKernel::Launch(const std::vector &inputs, const std::vector &workspace, + const std::vector &outputs, uintptr_t stream_ptr) { + (void)workspace; + float *output = GetDeviceAddress(outputs, 0); + float *gradient = GetDeviceAddress(inputs, 0); + float *input = GetDeviceAddress(inputs, 1); + float *input_min = GetDeviceAddress(inputs, 2); + float *input_max = GetDeviceAddress(inputs, 3); + + if (gradient == nullptr) { + MS_LOG(EXCEPTION) << "FakeQuantGradGpuKernel gradient is null"; + } + if (input == nullptr) { + MS_LOG(EXCEPTION) << "FakeQuantGradGpuKernel input is null."; + } + if (input_min == nullptr) { + MS_LOG(EXCEPTION) << "FakeQuantGradGpuKernel input min is null."; + } + if (input_max == nullptr) { + MS_LOG(EXCEPTION) << "FakeQuantGradGpuKernel input max is null."; + } + + if (global_step_ >= quant_delay_) { + float *d_scale = nullptr; + float *d_nudge_min = nullptr; + float *d_nudge_max = nullptr; + int size = sizeof(float); + // Allocate space for device copies + CHECK_CUDA_RET_WITH_ERROR(cudaMalloc(reinterpret_cast(&d_scale), size), "Malloc gpu memory failed"); + CHECK_CUDA_RET_WITH_ERROR(cudaMalloc(reinterpret_cast(&d_nudge_min), size), "Malloc gpu memory failed"); + CHECK_CUDA_RET_WITH_ERROR(cudaMalloc(reinterpret_cast(&d_nudge_max), size), "Malloc gpu memory failed"); + + CalNudge(input_min, input_max, quant_min_, quant_max_, d_nudge_min, d_nudge_max, d_scale, + reinterpret_cast(stream_ptr)); + CalFakeQuantizeGrad(input, gradient, output, quant_size_, d_nudge_min, d_nudge_max, + reinterpret_cast(stream_ptr)); + + // Cleanup + CHECK_CUDA_RET_WITH_ERROR(cudaFree(d_scale), "Free gpu memory failed"); + CHECK_CUDA_RET_WITH_ERROR(cudaFree(d_nudge_min), "Free gpu memory failed"); + CHECK_CUDA_RET_WITH_ERROR(cudaFree(d_nudge_max), "Free gpu memory failed"); + } else { + CHECK_CUDA_RET_WITH_ERROR(cudaMemcpy(output, gradient, input_size_, cudaMemcpyDeviceToDevice), + "Copy gpu memory failed."); + } + global_step_++; + return true; +} + +MS_REG_GPU_KERNEL(FakeQuantWithMinMaxGrad, FakeQuantGradGpuKernel) +} // namespace kernel +} // namespace mindspore diff --git a/mindspore/ccsrc/kernel/gpu/quant/fake_quant_grad_gpu_kernel.h b/mindspore/ccsrc/kernel/gpu/quant/fake_quant_grad_gpu_kernel.h new file mode 100644 index 0000000000..cd0f9a4680 --- /dev/null +++ b/mindspore/ccsrc/kernel/gpu/quant/fake_quant_grad_gpu_kernel.h @@ -0,0 +1,61 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_KERNEL_GPU_FAKEQUANT_GRAD_GPUKERNEL_H_ +#define MINDSPORE_CCSRC_KERNEL_GPU_FAKEQUANT_GRAD_GPUKERNEL_H_ + +#include +#include "kernel/gpu/gpu_kernel.h" +#include "kernel/gpu/gpu_kernel_factory.h" + +namespace mindspore { +namespace kernel { +class FakeQuantGradGpuKernel : public GpuKernel { + public: + FakeQuantGradGpuKernel(); + ~FakeQuantGradGpuKernel() = default; + + const std::vector &GetInputSizeList() const override; + const std::vector &GetOutputSizeList() const override; + const std::vector &GetWorkspaceSizeList() const override; + bool Launch(const std::vector &inputs, const std::vector &workspace, + const std::vector &outputs, uintptr_t stream_ptr) override; + bool Init(const CNodePtr &kernel_node) override; + + protected: + void InitSizeLists() override; + + private: + size_t input_size_; + size_t min_size_; + size_t max_size_; + size_t output_size_; + size_t workspace_size_; + std::vector input_size_list_; + std::vector output_size_list_; + std::vector workspace_size_list_; + + int num_bits_; + float quant_min_; + float quant_max_; + int quant_size_; + int quant_delay_; + int global_step_; +}; +} // namespace kernel +} // namespace mindspore + +#endif // MINDSPORE_CCSRC_KERNEL_GPU_FAKEQUANT_GRAD_GPUKERNEL_H_ diff --git a/mindspore/ccsrc/kernel/gpu/quant/fake_quant_per_channel_gpu_kernel.cc b/mindspore/ccsrc/kernel/gpu/quant/fake_quant_per_channel_gpu_kernel.cc new file mode 100644 index 0000000000..302ef8d99f --- /dev/null +++ b/mindspore/ccsrc/kernel/gpu/quant/fake_quant_per_channel_gpu_kernel.cc @@ -0,0 +1,181 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "kernel/gpu/quant/fake_quant_per_channel_gpu_kernel.h" +#include "kernel/gpu/cuda_impl/fake_quant_per_channel_impl.cuh" +#include +#include +#include +#include + +namespace mindspore { +namespace kernel { +FakeQuantPerChannelGpuKernel::FakeQuantPerChannelGpuKernel() + : input_size_(0), + min_size_(0), + max_size_(0), + output_size_(0), + workspace_size_(0), + num_bits_(0), + quant_min_(0), + quant_max_(0), + quant_delay_(0), + ema_(false), + ema_decay_(0), + global_step_(0), + training_(false), + channel_out_(0), + narrow_range_(false), + symmetric_(false) {} + +const std::vector &FakeQuantPerChannelGpuKernel::GetInputSizeList() const { return input_size_list_; } + +const std::vector &FakeQuantPerChannelGpuKernel::GetOutputSizeList() const { return output_size_list_; } + +const std::vector &FakeQuantPerChannelGpuKernel::GetWorkspaceSizeList() const { return workspace_size_list_; } + +bool FakeQuantPerChannelGpuKernel::Init(const CNodePtr &kernel_node) { + size_t input_num = AnfAlgo::GetInputTensorNum(kernel_node); + if (input_num != 3) { + MS_LOG(EXCEPTION) << "Input number is " << input_num << ", but FakeQuant GpuKernel OP needs 3 input."; + return false; + } + + size_t output_num = AnfAlgo::GetOutputTensorNum(kernel_node); + if (output_num != 1) { + MS_LOG(EXCEPTION) << "Output number is " << output_num << " but FakeQuant GpuKernel OP needs 1 output."; + return false; + } + + num_bits_ = GetValue(AnfAlgo::GetCNodePrimitive(kernel_node)->GetAttr("num_bits")); + ema_ = GetValue(AnfAlgo::GetCNodePrimitive(kernel_node)->GetAttr("ema")); + ema_decay_ = 1.0 - GetValue(AnfAlgo::GetCNodePrimitive(kernel_node)->GetAttr("ema_decay")); + + if (num_bits_ <= 2 || num_bits_ >= 16) { + MS_LOG(EXCEPTION) << "Attr \'num_bits\' " << num_bits_ << "is out of range, expected between 2 and 16."; + return false; + } + + quant_delay_ = GetValue(AnfAlgo::GetCNodePrimitive(kernel_node)->GetAttr("quant_delay")); + if (quant_delay_ < 0) { + MS_LOG(EXCEPTION) << "Attr \'quant_delay\' " << num_bits_ << " is less then 0, require larger than 0."; + return false; + } + + training_ = GetValue(AnfAlgo::GetCNodePrimitive(kernel_node)->GetAttr("training")); + + symmetric_ = GetValue(AnfAlgo::GetCNodePrimitive(kernel_node)->GetAttr("symmetric")); + if (symmetric_) { + quant_min_ = 0 - (1 << (num_bits_ - 1)); + quant_max_ = (1 << (num_bits_ - 1)) - 1; + } else { + quant_min_ = 0; + quant_max_ = (1 << num_bits_) - 1; + } + + narrow_range_ = GetValue(AnfAlgo::GetCNodePrimitive(kernel_node)->GetAttr("narrow_range")); + if (narrow_range_) { + quant_min_++; + } + + // shape info for gpu + auto input_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); + channel_out_ = SizeToInt(input_shape[0]); + min_size_ = sizeof(float) * channel_out_; + max_size_ = sizeof(float) * channel_out_; + input_size_ = sizeof(float); + for (size_t i = 0; i < input_shape.size(); i++) { + input_size_ *= input_shape[i]; + } + output_size_ = input_size_; + + InitSizeLists(); + return true; +} + +void FakeQuantPerChannelGpuKernel::InitSizeLists() { + input_size_list_.push_back(input_size_); // input + input_size_list_.push_back(min_size_); // min + input_size_list_.push_back(max_size_); // max + output_size_list_.push_back(output_size_); + workspace_size_list_.push_back(workspace_size_); +} + +bool FakeQuantPerChannelGpuKernel::Launch(const std::vector &inputs, + const std::vector &workspace, + const std::vector &outputs, uintptr_t stream_ptr) { + (void)workspace; + float *output = GetDeviceAddress(outputs, 0); + float *input = GetDeviceAddress(inputs, 0); + float *input_min = GetDeviceAddress(inputs, 1); + float *input_max = GetDeviceAddress(inputs, 2); + + if (input == nullptr) { + MS_LOG(EXCEPTION) << "FakeQuantPerChannelGpuKernel input is null."; + } + if (input_min == nullptr) { + MS_LOG(EXCEPTION) << "FakeQuantPerChannelGpuKernel input min is null."; + } + if (input_max == nullptr) { + MS_LOG(EXCEPTION) << "FakeQuantPerChannelGpuKernel input max is null."; + } + + // Allocate space for device copies + float *d_scale = nullptr; + float *d_nudge_min = nullptr; + float *d_nudge_max = nullptr; + CHECK_CUDA_RET_WITH_ERROR(cudaMalloc(reinterpret_cast(&d_scale), sizeof(float) * channel_out_), + "Malloc gpu memory failed"); + CHECK_CUDA_RET_WITH_ERROR(cudaMalloc(reinterpret_cast(&d_nudge_min), sizeof(float) * channel_out_), + "Malloc gpu memory failed"); + CHECK_CUDA_RET_WITH_ERROR(cudaMalloc(reinterpret_cast(&d_nudge_max), sizeof(float) * channel_out_), + "Malloc gpu memory failed"); + int total_size = input_size_ / sizeof(float); + bool symmetric = false; + if (training_) { + // calculate the input min and max according by the parameter ema and ema_decay. + CalMinMaxPerChannel(input, input_min, input_max, total_size, channel_out_, ema_decay_, ema_, + reinterpret_cast(stream_ptr)); + // control flow for quant_delay + if (global_step_ >= quant_delay_) { + // real launch + CalNudgePerChannel(input_min, input_max, quant_min_, quant_max_, d_nudge_min, d_nudge_max, d_scale, channel_out_, + reinterpret_cast(stream_ptr)); + CalFakeQuantizePerChannel(input, output, total_size, channel_out_, d_nudge_min, d_nudge_max, d_scale, symmetric, + reinterpret_cast(stream_ptr)); + } else { + CHECK_CUDA_RET_WITH_ERROR(cudaMemcpy(output, input, input_size_, cudaMemcpyDeviceToDevice), + "Copy gpu memory failed."); + } + global_step_++; + } else { + // real launch + CalNudgePerChannel(input_min, input_max, quant_min_, quant_max_, d_nudge_min, d_nudge_max, d_scale, channel_out_, + reinterpret_cast(stream_ptr)); + CalFakeQuantizePerChannel(input, output, total_size, channel_out_, d_nudge_min, d_nudge_max, d_scale, symmetric, + reinterpret_cast(stream_ptr)); + } + + // Cleanup + CHECK_CUDA_RET_WITH_ERROR(cudaFree(d_scale), "Free gpu memory failed"); + CHECK_CUDA_RET_WITH_ERROR(cudaFree(d_nudge_min), "Free gpu memory failed"); + CHECK_CUDA_RET_WITH_ERROR(cudaFree(d_nudge_max), "Free gpu memory failed"); + return true; +} + +MS_REG_GPU_KERNEL(FakeQuantWithMinMaxPerChannel, FakeQuantPerChannelGpuKernel) +} // namespace kernel +} // namespace mindspore diff --git a/mindspore/ccsrc/kernel/gpu/quant/fake_quant_per_channel_gpu_kernel.h b/mindspore/ccsrc/kernel/gpu/quant/fake_quant_per_channel_gpu_kernel.h new file mode 100755 index 0000000000..faf8684fca --- /dev/null +++ b/mindspore/ccsrc/kernel/gpu/quant/fake_quant_per_channel_gpu_kernel.h @@ -0,0 +1,66 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_KERNEL_GPU_FAKEQUANT_PER_CHANNEL_GPUKERNEL_H_ +#define MINDSPORE_CCSRC_KERNEL_GPU_FAKEQUANT_PER_CHANNEL_GPUKERNEL_H_ + +#include +#include "kernel/gpu/gpu_kernel.h" +#include "kernel/gpu/gpu_kernel_factory.h" + +namespace mindspore { +namespace kernel { +class FakeQuantPerChannelGpuKernel : public GpuKernel { + public: + FakeQuantPerChannelGpuKernel(); + ~FakeQuantPerChannelGpuKernel() = default; + + const std::vector &GetInputSizeList() const override; + const std::vector &GetOutputSizeList() const override; + const std::vector &GetWorkspaceSizeList() const override; + bool Launch(const std::vector &inputs, const std::vector &workspace, + const std::vector &outputs, uintptr_t stream_ptr) override; + bool Init(const CNodePtr &kernel) override; + + protected: + void InitSizeLists() override; + + private: + size_t input_size_; + size_t min_size_; + size_t max_size_; + size_t output_size_; + size_t workspace_size_; + std::vector input_size_list_; + std::vector output_size_list_; + std::vector workspace_size_list_; + + int num_bits_; + float quant_min_; + float quant_max_; + int quant_delay_; + bool ema_; + float ema_decay_; + int global_step_; + bool training_; + int channel_out_; + bool narrow_range_; + bool symmetric_; +}; +} // namespace kernel +} // namespace mindspore + +#endif // MINDSPORE_CCSRC_KERNEL_GPU_FAKEQUANT_PER_CHANNEL_GPUKERNEL_H_ diff --git a/mindspore/ccsrc/kernel/gpu/quant/fake_quant_per_channel_grad_gpu_kernel.cc b/mindspore/ccsrc/kernel/gpu/quant/fake_quant_per_channel_grad_gpu_kernel.cc new file mode 100644 index 0000000000..3184132121 --- /dev/null +++ b/mindspore/ccsrc/kernel/gpu/quant/fake_quant_per_channel_grad_gpu_kernel.cc @@ -0,0 +1,158 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "kernel/gpu/quant/fake_quant_per_channel_grad_gpu_kernel.h" +#include "kernel/gpu/cuda_impl/fake_quant_per_channel_impl.cuh" + +namespace mindspore { +namespace kernel { +FakeQuantPerChannelGradGpuKernel::FakeQuantPerChannelGradGpuKernel() + : input_size_(0), + min_size_(0), + max_size_(0), + output_size_(0), + workspace_size_(0), + num_bits_(0), + quant_min_(0), + quant_max_(0), + channel_out_(0), + quant_delay_(0), + global_step_(0), + narrow_range_(false), + symmetric_(false) {} + +const std::vector &FakeQuantPerChannelGradGpuKernel::GetInputSizeList() const { return input_size_list_; } + +const std::vector &FakeQuantPerChannelGradGpuKernel::GetOutputSizeList() const { return output_size_list_; } + +const std::vector &FakeQuantPerChannelGradGpuKernel::GetWorkspaceSizeList() const { + return workspace_size_list_; +} + +bool FakeQuantPerChannelGradGpuKernel::Init(const CNodePtr &kernel_node) { + size_t input_num = AnfAlgo::GetInputTensorNum(kernel_node); + if (input_num != 4) { + MS_LOG(EXCEPTION) << "Input number is " << input_num << ", but FakeQuantGrad GpuKernel OP needs 4 output."; + } + + size_t output_num = AnfAlgo::GetOutputTensorNum(kernel_node); + if (output_num != 1) { + MS_LOG(EXCEPTION) << "Output number is " << output_num << ", but FakeQuantGrad GpuKernel OP needs 1 output."; + } + + num_bits_ = GetValue(AnfAlgo::GetCNodePrimitive(kernel_node)->GetAttr("num_bits")); + if (num_bits_ <= 2 || num_bits_ >= 16) { + MS_LOG(EXCEPTION) << "Attr \'num_bits\' " << num_bits_ << " is out of range, expected between 2 and 16."; + } + + quant_delay_ = GetValue(AnfAlgo::GetCNodePrimitive(kernel_node)->GetAttr("quant_delay")); + if (quant_delay_ < 0) { + MS_LOG(EXCEPTION) << "Attr \'quant_delay_\' " << quant_delay_ << " is less then 0, require larger than 0."; + } + + symmetric_ = GetValue(AnfAlgo::GetCNodePrimitive(kernel_node)->GetAttr("symmetric")); + if (symmetric_) { + quant_min_ = 0 - (1 << (num_bits_ - 1)); + quant_max_ = (1 << (num_bits_ - 1)) - 1; + } else { + quant_min_ = 0; + quant_max_ = (1 << num_bits_) - 1; + } + + narrow_range_ = GetValue(AnfAlgo::GetCNodePrimitive(kernel_node)->GetAttr("narrow_range")); + if (narrow_range_) { + quant_min_++; + } + + auto input_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); + channel_out_ = SizeToInt(input_shape[0]); + min_size_ = sizeof(float) * channel_out_; + max_size_ = sizeof(float) * channel_out_; + input_size_ = sizeof(float); + for (size_t i = 0; i < input_shape.size(); i++) { + input_size_ *= input_shape[i]; + } + output_size_ = input_size_; + + InitSizeLists(); + return true; +} + +void FakeQuantPerChannelGradGpuKernel::InitSizeLists() { + input_size_list_.push_back(input_size_); // gradient + input_size_list_.push_back(input_size_); // input + input_size_list_.push_back(min_size_); // min + input_size_list_.push_back(max_size_); // max + output_size_list_.push_back(output_size_); + workspace_size_list_.push_back(workspace_size_); +} + +bool FakeQuantPerChannelGradGpuKernel::Launch(const std::vector &inputs, + const std::vector &workspace, + const std::vector &outputs, uintptr_t stream_ptr) { + (void)workspace; + float *output = GetDeviceAddress(outputs, 0); + float *gradient = GetDeviceAddress(inputs, 0); + float *input = GetDeviceAddress(inputs, 1); + float *input_min = GetDeviceAddress(inputs, 2); + float *input_max = GetDeviceAddress(inputs, 3); + + if (gradient == nullptr) { + MS_LOG(EXCEPTION) << "FakeQuantPerChannelGradGpuKernel gradient is null"; + } + if (input == nullptr) { + MS_LOG(EXCEPTION) << "FakeQuantPerChannelGradGpuKernel input is null"; + } + if (input_min == nullptr) { + MS_LOG(EXCEPTION) << "FakeQuantPerChannelGradGpuKernel input min is null"; + } + if (input_max == nullptr) { + MS_LOG(EXCEPTION) << "FakeQuantPerChannelGradGpuKernel input max is null"; + } + + int total_size = input_size_ / sizeof(float); + if (global_step_ >= quant_delay_) { + float *d_scale = nullptr; + float *d_nudge_min = nullptr; + float *d_nudge_max = nullptr; + // Allocate space for device copies + CHECK_CUDA_RET_WITH_ERROR(cudaMalloc(reinterpret_cast(&d_scale), channel_out_ * sizeof(float)), + "Malloc gpu memory failed"); + CHECK_CUDA_RET_WITH_ERROR(cudaMalloc(reinterpret_cast(&d_nudge_min), channel_out_ * sizeof(float)), + "Malloc gpu memory failed"); + CHECK_CUDA_RET_WITH_ERROR(cudaMalloc(reinterpret_cast(&d_nudge_max), channel_out_ * sizeof(float)), + "Malloc gpu memory failed"); + + CalNudgePerChannel(input_min, input_max, quant_min_, quant_max_, d_nudge_min, d_nudge_max, d_scale, channel_out_, + reinterpret_cast(stream_ptr)); + CalFakeQuantizePerChannelGrad(input, gradient, output, total_size, channel_out_, d_nudge_min, d_nudge_max, + reinterpret_cast(stream_ptr)); + + // Cleanup + CHECK_CUDA_RET_WITH_ERROR(cudaFree(d_scale), "Free gpu memory failed"); + CHECK_CUDA_RET_WITH_ERROR(cudaFree(d_nudge_min), "Free gpu memory failed"); + CHECK_CUDA_RET_WITH_ERROR(cudaFree(d_nudge_max), "Free gpu memory failed"); + } else { + CHECK_CUDA_RET_WITH_ERROR(cudaMemcpy(output, gradient, input_size_, cudaMemcpyDeviceToDevice), + "Copy gpu memory failed."); + } + global_step_++; + return true; +} + +MS_REG_GPU_KERNEL(FakeQuantWithMinMaxPerChannelGrad, FakeQuantPerChannelGradGpuKernel) +} // namespace kernel +} // namespace mindspore diff --git a/mindspore/ccsrc/kernel/gpu/quant/fake_quant_per_channel_grad_gpu_kernel.h b/mindspore/ccsrc/kernel/gpu/quant/fake_quant_per_channel_grad_gpu_kernel.h new file mode 100644 index 0000000000..c210f4cc81 --- /dev/null +++ b/mindspore/ccsrc/kernel/gpu/quant/fake_quant_per_channel_grad_gpu_kernel.h @@ -0,0 +1,63 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_KERNEL_GPU_FAKEQUANT_PER_CHANNEL_GRAD_GPUKERNEL_H_ +#define MINDSPORE_CCSRC_KERNEL_GPU_FAKEQUANT_PER_CHANNEL_GRAD_GPUKERNEL_H_ + +#include +#include "kernel/gpu/gpu_kernel.h" +#include "kernel/gpu/gpu_kernel_factory.h" + +namespace mindspore { +namespace kernel { +class FakeQuantPerChannelGradGpuKernel : public GpuKernel { + public: + FakeQuantPerChannelGradGpuKernel(); + ~FakeQuantPerChannelGradGpuKernel() = default; + + const std::vector &GetInputSizeList() const override; + const std::vector &GetOutputSizeList() const override; + const std::vector &GetWorkspaceSizeList() const override; + bool Launch(const std::vector &inputs, const std::vector &workspace, + const std::vector &outputs, uintptr_t stream_ptr) override; + bool Init(const CNodePtr &kernel_node) override; + + protected: + void InitSizeLists() override; + + private: + size_t input_size_; + size_t min_size_; + size_t max_size_; + size_t output_size_; + size_t workspace_size_; + std::vector input_size_list_; + std::vector output_size_list_; + std::vector workspace_size_list_; + + int num_bits_; + float quant_min_; + float quant_max_; + int channel_out_; + int quant_delay_; + int global_step_; + bool narrow_range_; + bool symmetric_; +}; +} // namespace kernel +} // namespace mindspore + +#endif // MINDSPORE_CCSRC_KERNEL_GPU_FAKEQUANT_PER_CHANNEL_GRAD_GPUKERNEL_H_ diff --git a/mindspore/ops/_op_impl/akg/gpu/hsigmoid.py b/mindspore/ops/_op_impl/akg/gpu/hsigmoid.py new file mode 100644 index 0000000000..29dd8d6251 --- /dev/null +++ b/mindspore/ops/_op_impl/akg/gpu/hsigmoid.py @@ -0,0 +1,52 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""HSigmoid op""" +from mindspore.ops.op_info_register import op_info_register + +@op_info_register("""{ + "op_name": "HSigmoid", + "imply_type": "AutoDiff", + "fusion_type": "OPAQUE", + "processor": "cuda", + "attr": [ + ], + "inputs": [ + { + "index": 0, + "dtype": [ + "float32", "float16" + ], + "format": [ + "DefaultFormat", "DefaultFormat" + ], + "name": "x" + } + ], + "outputs": [ + { + "index": 0, + "dtype": [ + "float32", "float16" + ], + "format": [ + "DefaultFormat", "DefaultFormat" + ], + "name": "output" + } + ] +}""") +def _hsigmoid_akg(): + """HSigmoid AutoDiff register""" + return diff --git a/mindspore/ops/_op_impl/akg/gpu/hsigmoid_grad.py b/mindspore/ops/_op_impl/akg/gpu/hsigmoid_grad.py new file mode 100644 index 0000000000..d29df9c946 --- /dev/null +++ b/mindspore/ops/_op_impl/akg/gpu/hsigmoid_grad.py @@ -0,0 +1,62 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""HSigmoidGrad op""" +from mindspore.ops.op_info_register import op_info_register + +@op_info_register("""{ + "op_name": "HSigmoidGrad", + "imply_type": "AutoDiff", + "fusion_type": "OPAQUE", + "processor": "cuda", + "attr": [ + ], + "inputs": [ + { + "index": 0, + "dtype": [ + "float32", "float16" + ], + "format": [ + "DefaultFormat", "DefaultFormat" + ], + "name": "y_grad" + }, + { + "index": 1, + "dtype": [ + "float32", "float16" + ], + "format": [ + "DefaultFormat", "DefaultFormat" + ], + "name": "x" + } + ], + "outputs": [ + { + "index": 0, + "dtype": [ + "float32", "float16" + ], + "format": [ + "DefaultFormat", "DefaultFormat" + ], + "name": "output" + } + ] +}""") +def _hsigmoid_grad_akg(): + """HSigmoidGrad AutoDiff register""" + return diff --git a/mindspore/ops/_op_impl/akg/gpu/hswish.py b/mindspore/ops/_op_impl/akg/gpu/hswish.py new file mode 100644 index 0000000000..619575920f --- /dev/null +++ b/mindspore/ops/_op_impl/akg/gpu/hswish.py @@ -0,0 +1,52 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""HSwish op""" +from mindspore.ops.op_info_register import op_info_register + +@op_info_register("""{ + "op_name": "HSwish", + "imply_type": "AutoDiff", + "fusion_type": "OPAQUE", + "processor": "cuda", + "attr": [ + ], + "inputs": [ + { + "index": 0, + "dtype": [ + "float32", "float16" + ], + "format": [ + "DefaultFormat", "DefaultFormat" + ], + "name": "x" + } + ], + "outputs": [ + { + "index": 0, + "dtype": [ + "float32", "float16" + ], + "format": [ + "DefaultFormat", "DefaultFormat" + ], + "name": "output" + } + ] +}""") +def _hswish_akg(): + """HSwish AutoDiff register""" + return diff --git a/mindspore/ops/_op_impl/akg/gpu/hswish_grad.py b/mindspore/ops/_op_impl/akg/gpu/hswish_grad.py new file mode 100644 index 0000000000..6d3556b969 --- /dev/null +++ b/mindspore/ops/_op_impl/akg/gpu/hswish_grad.py @@ -0,0 +1,62 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""HSwishGrad op""" +from mindspore.ops.op_info_register import op_info_register + +@op_info_register("""{ + "op_name": "HSwishGrad", + "imply_type": "AutoDiff", + "fusion_type": "OPAQUE", + "processor": "cuda", + "attr": [ + ], + "inputs": [ + { + "index": 0, + "dtype": [ + "float32", "float16" + ], + "format": [ + "DefaultFormat", "DefaultFormat" + ], + "name": "y_grad" + }, + { + "index": 1, + "dtype": [ + "float32", "float16" + ], + "format": [ + "DefaultFormat", "DefaultFormat" + ], + "name": "x" + } + ], + "outputs": [ + { + "index": 0, + "dtype": [ + "float32", "float16" + ], + "format": [ + "DefaultFormat", "DefaultFormat" + ], + "name": "output" + } + ] +}""") +def _hswish_grad_akg(): + """HSwishGrad AutoDiff register""" + return From b81cc6ea4fedbe893a52dbfee83f240117c403a9 Mon Sep 17 00:00:00 2001 From: lichenever Date: Mon, 13 Apr 2020 11:43:33 +0800 Subject: [PATCH 190/367] add minimum distributed op --- mindspore/ccsrc/parallel/dynamic_creator.h | 1 + .../ops_info/comparison_function_info.h | 8 ++ mindspore/ccsrc/parallel/ops_info/ops_utils.h | 1 + .../ccsrc/parallel/step_auto_parallel.cc | 1 + .../parallel/test_comparison_function_info.py | 110 +++++++++++++++--- 5 files changed, 108 insertions(+), 13 deletions(-) diff --git a/mindspore/ccsrc/parallel/dynamic_creator.h b/mindspore/ccsrc/parallel/dynamic_creator.h index 1650ff0b21..145a8a0840 100644 --- a/mindspore/ccsrc/parallel/dynamic_creator.h +++ b/mindspore/ccsrc/parallel/dynamic_creator.h @@ -114,6 +114,7 @@ REGISTER(DropoutDoMaskInfo); REGISTER(ReshapeInfo); REGISTER(FloorDivInfo); REGISTER(MaximumInfo); +REGISTER(MinimumInfo); REGISTER(CastInfo); REGISTER(GreaterInfo); REGISTER(SparseSoftmaxCrossEntropyWithLogitsInfo); diff --git a/mindspore/ccsrc/parallel/ops_info/comparison_function_info.h b/mindspore/ccsrc/parallel/ops_info/comparison_function_info.h index 5f51f1d0a9..00cc431463 100644 --- a/mindspore/ccsrc/parallel/ops_info/comparison_function_info.h +++ b/mindspore/ccsrc/parallel/ops_info/comparison_function_info.h @@ -50,6 +50,14 @@ class MaximumInfo : public ArithmeticBase { : ArithmeticBase(name, inputs_shape, outputs_shape, attrs) {} ~MaximumInfo() override = default; }; + +class MinimumInfo : public ArithmeticBase { + public: + MinimumInfo(const std::string& name, const Shapes& inputs_shape, const Shapes& outputs_shape, + const PrimitiveAttrs& attrs) + : ArithmeticBase(name, inputs_shape, outputs_shape, attrs) {} + ~MinimumInfo() override = default; +}; } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/ops_info/ops_utils.h b/mindspore/ccsrc/parallel/ops_info/ops_utils.h index befd26e318..b0a9fb3a3c 100644 --- a/mindspore/ccsrc/parallel/ops_info/ops_utils.h +++ b/mindspore/ccsrc/parallel/ops_info/ops_utils.h @@ -186,6 +186,7 @@ constexpr char LOG[] = "Log"; constexpr char SIGMOID[] = "Sigmoid"; constexpr char POW[] = "Pow"; constexpr char MAXIMUM[] = "Maximum"; +constexpr char MINIMUM[] = "Minimum"; constexpr char EQUAL[] = "Equal"; constexpr char NOT_EQUAL[] = "NotEqual"; constexpr char LOGICALNOT[] = "LogicalNot"; diff --git a/mindspore/ccsrc/parallel/step_auto_parallel.cc b/mindspore/ccsrc/parallel/step_auto_parallel.cc index fe6be575ee..1eb881b798 100644 --- a/mindspore/ccsrc/parallel/step_auto_parallel.cc +++ b/mindspore/ccsrc/parallel/step_auto_parallel.cc @@ -93,6 +93,7 @@ std::vector splittable_op_ = {MATMUL, SIGMOID, POW, MAXIMUM, + MINIMUM, EQUAL, NOT_EQUAL, LOGICALNOT, diff --git a/tests/ut/python/parallel/test_comparison_function_info.py b/tests/ut/python/parallel/test_comparison_function_info.py index 74de04f1df..93ec5e5981 100644 --- a/tests/ut/python/parallel/test_comparison_function_info.py +++ b/tests/ut/python/parallel/test_comparison_function_info.py @@ -54,11 +54,10 @@ def test_matmul_equal(): out = self.equal(out, b) return out - context.set_auto_parallel_context(device_num=8, global_rank=0) + context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel") strategy1 = ((2, 2), (2, 2)) strategy2 = ((4, 2), (4, 2)) net = GradWrap(NetWithLoss(Net(strategy1, strategy2))) - context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") x = Tensor(np.ones([128, 32]), dtype=ms.float32) y = Tensor(np.ones([32, 64]), dtype=ms.float32) @@ -78,11 +77,10 @@ def test_matmul_not_equal(): out = self.notequal(out, b) return out - context.set_auto_parallel_context(device_num=8, global_rank=0) + context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel") strategy1 = ((2, 2), (2, 2)) strategy2 = ((4, 2), (4, 2)) net = GradWrap(NetWithLoss(Net(strategy1, strategy2))) - context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") x = Tensor(np.ones([128, 32]), dtype=ms.float32) y = Tensor(np.ones([32, 64]), dtype=ms.float32) @@ -102,11 +100,10 @@ def test_matmul_not_equal_repeated_calculation(): out = self.notequal(out, b) return out - context.set_auto_parallel_context(device_num=8, global_rank=0) + context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel") strategy1 = ((2, 2), (2, 2)) strategy2 = ((4, 1), (4, 1)) net = GradWrap(NetWithLoss(Net(strategy1, strategy2))) - context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") x = Tensor(np.ones([128, 32]), dtype=ms.float32) y = Tensor(np.ones([32, 64]), dtype=ms.float32) @@ -126,11 +123,10 @@ def test_matmul_maximum(): out = self.maximum(out, b) return out - context.set_auto_parallel_context(device_num=8, global_rank=0) + context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel") strategy1 = ((2, 2), (2, 2)) strategy2 = ((4, 2), (4, 2)) net = GradWrap(NetWithLoss(Net(strategy1, strategy2))) - context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") x = Tensor(np.ones([64, 32]), dtype=ms.float32) y = Tensor(np.ones([32, 64]), dtype=ms.float32) @@ -150,11 +146,10 @@ def test_matmul_maximum_broadcast(): out = self.maximum(out, b) return out - context.set_auto_parallel_context(device_num=8, global_rank=0) + context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel") strategy1 = ((2, 2), (2, 2)) strategy2 = ((4, 2), (2, )) net = GradWrap(NetWithLoss(Net(strategy1, strategy2))) - context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") x = Tensor(np.ones([64, 32]), dtype=ms.float32) y = Tensor(np.ones([32, 64]), dtype=ms.float32) @@ -174,13 +169,102 @@ def test_matmul_maximum_broadcast2(): out = self.maximum(out, b) return out - context.set_auto_parallel_context(device_num=8, global_rank=0) + context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel") strategy1 = ((2, 4), (4, 1)) strategy2 = ((4, 1), (1, 2)) net = GradWrap(NetWithLoss(Net(strategy1, strategy2))) - context.set_auto_parallel_context(parallel_mode="semi_auto_parallel") x = Tensor(np.ones([64, 32]), dtype=ms.float32) y = Tensor(np.ones([32, 1]), dtype=ms.float32) b = Tensor(np.ones([1, 64]), dtype=ms.float32) - _executor.compile(net, x, y, b) \ No newline at end of file + _executor.compile(net, x, y, b) + + +def test_matmul_minimum(): + class Net(nn.Cell): + def __init__(self, strategy1, strategy2): + super().__init__() + self.matmul = P.MatMul().set_strategy(strategy1) + self.minimum = P.Minimum().set_strategy(strategy2) + + def construct(self, x, y, b): + out = self.matmul(x, y) + out = self.minimum(out, b) + return out + + context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel") + strategy1 = ((2, 2), (2, 2)) + strategy2 = ((4, 2), (4, 2)) + net = GradWrap(NetWithLoss(Net(strategy1, strategy2))) + + x = Tensor(np.ones([64, 32]), dtype=ms.float32) + y = Tensor(np.ones([32, 64]), dtype=ms.float32) + b = Tensor(np.ones([64, 64]), dtype=ms.float32) + _executor.compile(net, x, y, b) + + +def test_matmul_minimum_broadcast(): + class Net(nn.Cell): + def __init__(self, strategy1, strategy2): + super().__init__() + self.matmul = P.MatMul().set_strategy(strategy1) + self.minimum = P.Maximum().set_strategy(strategy2) + + def construct(self, x, y, b): + out = self.matmul(x, y) + out = self.minimum(out, b) + return out + + context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel") + strategy1 = ((2, 2), (2, 2)) + strategy2 = ((4, 2), (2, )) + net = GradWrap(NetWithLoss(Net(strategy1, strategy2))) + + x = Tensor(np.ones([64, 32]), dtype=ms.float32) + y = Tensor(np.ones([32, 64]), dtype=ms.float32) + b = Tensor(np.ones([64]), dtype=ms.float32) + _executor.compile(net, x, y, b) + + +def test_matmul_minimum_broadcast2(): + class Net(nn.Cell): + def __init__(self, strategy1, strategy2): + super().__init__() + self.matmul = P.MatMul().set_strategy(strategy1) + self.minimum = P.Minimum().set_strategy(strategy2) + + def construct(self, x, y, b): + out = self.matmul(x, y) + out = self.minimum(out, b) + return out + + context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel") + strategy1 = ((2, 4), (4, 1)) + strategy2 = ((4, 1), (1, 2)) + net = GradWrap(NetWithLoss(Net(strategy1, strategy2))) + + x = Tensor(np.ones([64, 32]), dtype=ms.float32) + y = Tensor(np.ones([32, 1]), dtype=ms.float32) + b = Tensor(np.ones([1, 64]), dtype=ms.float32) + _executor.compile(net, x, y, b) + + +def test_matmul_minimum_auto_parallel(): + class Net(nn.Cell): + def __init__(self): + super().__init__() + self.matmul = P.MatMul() + self.minimum = P.Minimum() + + def construct(self, x, y, b): + out = self.matmul(x, y) + out = self.minimum(out, b) + return out + + context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="auto_parallel") + net = GradWrap(NetWithLoss(Net())) + + x = Tensor(np.ones([64, 32]), dtype=ms.float32) + y = Tensor(np.ones([32, 1]), dtype=ms.float32) + b = Tensor(np.ones([1, 64]), dtype=ms.float32) + _executor.compile(net, x, y, b) From d62f560b500ff5fdab3298b94f7a7a0c274390f5 Mon Sep 17 00:00:00 2001 From: c00425699 Date: Sat, 11 Apr 2020 11:35:54 +0800 Subject: [PATCH 191/367] add_bool_type_check_in_comm_op --- .../parallel/auto_parallel/edge_costmodel.cc | 12 ++++- .../parallel/auto_parallel/edge_costmodel.h | 2 +- .../ccsrc/parallel/ops_info/operator_info.cc | 10 ++++ .../ccsrc/parallel/ops_info/operator_info.h | 9 +++- mindspore/ccsrc/parallel/ops_info/ops_utils.h | 1 + .../ccsrc/parallel/step_auto_parallel.cc | 34 ++++++++------ mindspore/ccsrc/parallel/step_auto_parallel.h | 4 +- mindspore/ops/operations/comm_ops.py | 8 ++++ .../parallel/auto_parallel/dp_algo_test.cc | 46 +++++++++++++++++++ .../auto_parallel/edge_costmodel_test.cc | 5 ++ .../auto_parallel/graph_costmodel_test.cc | 6 +++ tests/ut/python/communication/test_comm.py | 2 +- 12 files changed, 120 insertions(+), 19 deletions(-) diff --git a/mindspore/ccsrc/parallel/auto_parallel/edge_costmodel.cc b/mindspore/ccsrc/parallel/auto_parallel/edge_costmodel.cc index 653f6c903d..cbd66f58a6 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/edge_costmodel.cc +++ b/mindspore/ccsrc/parallel/auto_parallel/edge_costmodel.cc @@ -61,11 +61,12 @@ Status Edge::InitEdgeCost() { auto target_output_lyt = target_output.second[prev_op_output_index_].tensor_layout(); auto target_output_str = target_output.first; auto type_length = prev_op_->GetOutputTypeLengths()[prev_op_output_index_]; + auto type = prev_op_->outputs_type()[prev_op_output_index_]; for (auto& target_input : next_op_input_) { auto target_input_lyt = target_input.second[next_op_input_index_].tensor_layout(); auto target_input_str = target_input.first; CostPtr cost; - if (GetRedistributionCost(target_output_lyt, target_input_lyt, type_length, &cost) != SUCCESS) { + if (GetRedistributionCost(target_output_lyt, target_input_lyt, type_length, type, &cost) != SUCCESS) { MS_LOG(EXCEPTION) << "Failure: redistribution cost calculation failed"; } MS_EXCEPTION_IF_NULL(cost); @@ -99,7 +100,7 @@ Status Edge::InitEdgeCost() { } Status Edge::GetRedistributionCost(const TensorLayout& prev_op_output_layout, const TensorLayout& next_op_input_layout, - size_t type_length, CostPtr* cost) { + size_t type_length, TypePtr type, CostPtr* cost) { MS_EXCEPTION_IF_NULL(prev_op_); MS_EXCEPTION_IF_NULL(cost); RankList dev_list = prev_op_->global_device_list(); @@ -119,6 +120,13 @@ Status Edge::GetRedistributionCost(const TensorLayout& prev_op_output_layout, co double backward_comm_cost = tensor_redistribution.backward_comm_cost(); double computation_cost = tensor_redistribution.computation_cost(); + // Now AllGather, ReduceScatter, AlltoAll don't support bool type + MS_EXCEPTION_IF_NULL(type); + if ((type->type_id() == kNumberTypeBool) && (comm_cost > 0)) { + computation_cost = INF; + comm_cost = INF; + MS_LOG(WARNING) << "Communication Operators don't support bool dtype!"; + } *cost = std::make_shared(type_length * computation_cost, type_length * comm_cost); (*cost)->communication_without_parameter_ = type_length * comm_cost; (*cost)->communication_with_partial_para_ = diff --git a/mindspore/ccsrc/parallel/auto_parallel/edge_costmodel.h b/mindspore/ccsrc/parallel/auto_parallel/edge_costmodel.h index eb89466d7c..bd882bb43f 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/edge_costmodel.h +++ b/mindspore/ccsrc/parallel/auto_parallel/edge_costmodel.h @@ -84,7 +84,7 @@ class Edge { // and the input tensor layout of v, return the redistribution cost, // and the op_list to carry out the redistribution. Status GetRedistributionCost(const TensorLayout& prev_op_output_layout, const TensorLayout& next_op_input_layout, - size_t, CostPtr* cost); + size_t, TypePtr type, CostPtr* cost); void set_pre_op_output(const std::vector, std::vector>>& output_set) { pre_op_output_ = output_set; diff --git a/mindspore/ccsrc/parallel/ops_info/operator_info.cc b/mindspore/ccsrc/parallel/ops_info/operator_info.cc index 42755b3ec3..561628dbb2 100644 --- a/mindspore/ccsrc/parallel/ops_info/operator_info.cc +++ b/mindspore/ccsrc/parallel/ops_info/operator_info.cc @@ -1197,6 +1197,16 @@ Status OperatorInfo::SetInputAndOutputTypeLength(const std::vector& inpu return SUCCESS; } +Status OperatorInfo::set_outputs_type(const std::vector& outputs_type) { + if (outputs_type.size() != outputs_shape_.size()) { + MS_LOG(ERROR) << "Outputs type: " << outputs_type.size() + << " do not have the same number of outputs shape: " << outputs_shape_.size(); + return FAILED; + } + outputs_type_ = outputs_type; + return SUCCESS; +} + void OperatorInfo::BreakingTiesForPerferringDataParallel(const StrategyPtr& stra, const CostPtr& cost) { if (!stra->GetInputDim().empty() && !stra->GetInputDim()[0].empty()) { CheckGlobalDeviceManager(); diff --git a/mindspore/ccsrc/parallel/ops_info/operator_info.h b/mindspore/ccsrc/parallel/ops_info/operator_info.h index 248172fa4c..5fe89a1602 100644 --- a/mindspore/ccsrc/parallel/ops_info/operator_info.h +++ b/mindspore/ccsrc/parallel/ops_info/operator_info.h @@ -60,7 +60,8 @@ class OperatorInfo { outputs_shape_(std::move(outputs_shape)), attrs_(std::move(attrs)), is_alive_(true), - cost_(cost) { + cost_(cost), + outputs_type_() { std::vector not_parameteter(inputs_shape_.size(), false); is_parameter_ = not_parameteter; refkey_parameter_name_ = ""; @@ -71,6 +72,11 @@ class OperatorInfo { Status set_is_parameter(const std::vector& is_parameter); Status SetInputAndOutputTypeLength(const std::vector& input_lengths, const std::vector& output_lengths); + // Set outputs dtype. + // If only one output, outputs_type.size() is 1. + // If output is tuple, outputs_type.size() is greater than 1. + Status set_outputs_type(const std::vector& outputs_type); + const std::vector& outputs_type() const { return outputs_type_; } virtual Status Init(const StrategyPtr& strategy) = 0; virtual Status InitForCostModel(const StrategyPtr& strategy) = 0; // only init the necessary parts @@ -229,6 +235,7 @@ class OperatorInfo { private: OperatorCostPtr cost_; + std::vector outputs_type_; }; Shape GetSliceShape(const Shape& tensor_shape, const Dimensions& strategy); diff --git a/mindspore/ccsrc/parallel/ops_info/ops_utils.h b/mindspore/ccsrc/parallel/ops_info/ops_utils.h index befd26e318..1373fdcfeb 100644 --- a/mindspore/ccsrc/parallel/ops_info/ops_utils.h +++ b/mindspore/ccsrc/parallel/ops_info/ops_utils.h @@ -42,6 +42,7 @@ constexpr size_t SoftmaxCrossEntropyWithLogitsAttrSize = 1; constexpr size_t SoftmaxCrossEntropyWithLogitsInputsSize = 2; constexpr size_t SoftmaxCrossEntropyWithLogitsOutputsSize = 2; constexpr double EPS = 1e-6; +constexpr double INF = 1e20; constexpr char AUTO_PARALLEL_RUN_ONCE_ONLY[] = "auto_parallel_run_once_only"; constexpr char SEMI_AUTO_PARALLEL_RUN_ONCE_ONLY[] = "semi_auto_parallel_run_once_only"; diff --git a/mindspore/ccsrc/parallel/step_auto_parallel.cc b/mindspore/ccsrc/parallel/step_auto_parallel.cc index fe6be575ee..b25dc074d2 100644 --- a/mindspore/ccsrc/parallel/step_auto_parallel.cc +++ b/mindspore/ccsrc/parallel/step_auto_parallel.cc @@ -254,12 +254,9 @@ size_t GetInputsTypeLen(const AnfNodePtr &input) { return input_type_len; } -// Given the node, return the element length of input and output -std::vector> ExtractInputAndOutputTypeLengthByNode(const CNodePtr &node) { +std::vector ExtractInputTypeLengthByNode(const CNodePtr &node) { MS_EXCEPTION_IF_NULL(node); std::vector inputs_type_len; - std::vector outputs_type_len; - std::vector> all_types; std::vector node_inputs{node->inputs()}; // extract input element length @@ -277,9 +274,13 @@ std::vector> ExtractInputAndOutputTypeLengthByNode(const CNo inputs_type_len.push_back(GetInputsTypeLen(input)); } } - all_types.push_back(inputs_type_len); + return inputs_type_len; +} - // extract output element length +std::vector ExtractOutputTypeByNode(const CNodePtr &node) { + MS_EXCEPTION_IF_NULL(node); + std::vector outputs_type; + // extract output element type auto primary_output_type = node->Type(); MS_EXCEPTION_IF_NULL(primary_output_type); if (primary_output_type->isa()) { @@ -289,7 +290,7 @@ std::vector> ExtractInputAndOutputTypeLengthByNode(const CNo for (auto &ele : elements) { if (ele->isa()) { auto ele_element_type = ele->cast()->element(); - outputs_type_len.push_back(GetLengthOfDataType(ele_element_type)); + outputs_type.push_back(ele_element_type); } else { MS_LOG(EXCEPTION) << "Unknown type: " << primary_output_type->type_name(); } @@ -298,14 +299,12 @@ std::vector> ExtractInputAndOutputTypeLengthByNode(const CNo // in this case, the output is a single tensor if (primary_output_type->isa()) { auto element_type = primary_output_type->cast()->element(); - outputs_type_len.push_back(GetLengthOfDataType(element_type)); + outputs_type.push_back(element_type); } else { MS_LOG(EXCEPTION) << "Unknown type: " << primary_output_type->type_name(); } } - all_types.push_back(outputs_type_len); - - return all_types; + return outputs_type; } // Be careful the argument is cnode_full_name, not the op_name @@ -366,11 +365,20 @@ OperatorInfoPtr CreateTheOperatorInfo(const PrimitivePtr &prim, const CNodePtr & return nullptr; } // Set the data type for inputs and outputs of this OperatorInfo - std::vector> type_lengths = ExtractInputAndOutputTypeLengthByNode(cnode); - if (operator_info->SetInputAndOutputTypeLength(type_lengths[0], type_lengths[1]) != SUCCESS) { + auto inputs_type_length = ExtractInputTypeLengthByNode(cnode); + auto outputs_type = ExtractOutputTypeByNode(cnode); + std::vector outputs_type_length; + outputs_type_length.reserve(outputs_type.size()); + std::transform(outputs_type.begin(), outputs_type.end(), std::back_inserter(outputs_type_length), + GetLengthOfDataType); + if (operator_info->SetInputAndOutputTypeLength(inputs_type_length, outputs_type_length) != SUCCESS) { MS_LOG(ERROR) << "Setting the lengths of inputs and outputs failed for operator: " << operator_info->name(); return nullptr; } + if (operator_info->set_outputs_type(outputs_type) != SUCCESS) { + MS_LOG(ERROR) << "Setting the types of outputs failed for operator: " << operator_info->name(); + return nullptr; + } // When the 'inputs' contains numerical values for some operators, these values should be extracted from // ANF graph auto &inputs = cnode->inputs(); diff --git a/mindspore/ccsrc/parallel/step_auto_parallel.h b/mindspore/ccsrc/parallel/step_auto_parallel.h index 5ee75ca162..349af7c956 100644 --- a/mindspore/ccsrc/parallel/step_auto_parallel.h +++ b/mindspore/ccsrc/parallel/step_auto_parallel.h @@ -39,7 +39,9 @@ size_t GetLengthOfDataType(const TypePtr &type); std::vector ExtractInputParameterByNode(const CNodePtr &node); -std::vector> ExtractInputAndOutputTypeLengthByNode(const CNodePtr &node); +std::vector ExtractInputTypeLengthByNode(const CNodePtr &node); + +std::vector ExtractOutputTypeByNode(const CNodePtr &node); Status ConstructCostGraphNodes(const std::vector &all_nodes, const FuncGraphPtr &root); diff --git a/mindspore/ops/operations/comm_ops.py b/mindspore/ops/operations/comm_ops.py index 1644c5800a..441e441c2c 100644 --- a/mindspore/ops/operations/comm_ops.py +++ b/mindspore/ops/operations/comm_ops.py @@ -162,6 +162,8 @@ class AllGather(PrimitiveWithInfer): return x_shape def infer_dtype(self, x_dtype): + if x_dtype == mstype.bool_: + raise TypeError("AllGather does not support 'Bool' as the dtype of input!") return x_dtype def __call__(self, tensor): @@ -219,6 +221,8 @@ class ReduceScatter(PrimitiveWithInfer): return x_shape def infer_dtype(self, x_dtype): + if x_dtype == mstype.bool_: + raise TypeError("ReduceScatter does not support 'Bool' as the dtype of input!") return x_dtype def __call__(self, tensor): @@ -276,6 +280,8 @@ class Broadcast(PrimitiveWithInfer): return x_shape def infer_dtype(self, x_dtype): + if x_dtype == mstype.bool_: + raise TypeError("Broadcast does not support 'Bool' as the dtype of input!") return x_dtype @@ -318,6 +324,8 @@ class _AlltoAll(PrimitiveWithInfer): return x_shape def infer_dtype(self, x_dtype): + if x_dtype == mstype.bool_: + raise TypeError("AlltoAll does not support 'Bool' as the dtype of input!") return x_dtype def __call__(self, tensor): diff --git a/tests/ut/cpp/parallel/auto_parallel/dp_algo_test.cc b/tests/ut/cpp/parallel/auto_parallel/dp_algo_test.cc index d0243d5327..0462993672 100644 --- a/tests/ut/cpp/parallel/auto_parallel/dp_algo_test.cc +++ b/tests/ut/cpp/parallel/auto_parallel/dp_algo_test.cc @@ -178,6 +178,7 @@ void TestDPAlgo::SetUp() { Shapes outputs_shape_0 = {{4096, 1024}}; matmul0 = std::make_shared("matmul_info", inputs_shape_0, outputs_shape_0, attr_0); matmul0->set_name("MatMul0"); + matmul0->set_outputs_type({kFloat32}); // matmul1 ValuePtr transpose_a_1 = MakeValue(false); @@ -187,6 +188,7 @@ void TestDPAlgo::SetUp() { Shapes outputs_shape_1 = {{128, 4096}}; matmul1 = std::make_shared("matmul_info", inputs_shape_1, outputs_shape_1, attr_1); matmul1->set_name("MatMul1"); + matmul1->set_outputs_type({kFloat32}); // matmul2 ValuePtr transpose_a_2 = MakeValue(false); @@ -196,6 +198,7 @@ void TestDPAlgo::SetUp() { Shapes outputs_shape_2 = {{128, 1024}}; matmul2 = std::make_shared("matmul_info", inputs_shape_2, outputs_shape_2, attr_2); matmul2->set_name("MatMul2"); + matmul2->set_outputs_type({kFloat32}); // matmul3 ValuePtr transpose_a_3 = MakeValue(false); @@ -205,6 +208,7 @@ void TestDPAlgo::SetUp() { Shapes outputs_shape_3 = {{1024, 4096}}; matmul3 = std::make_shared("matmul_info", inputs_shape_3, outputs_shape_3, attr_3); matmul3->set_name("MatMul3"); + matmul3->set_outputs_type({kFloat32}); // matmul4 ValuePtr transpose_a_4 = MakeValue(false); @@ -214,6 +218,7 @@ void TestDPAlgo::SetUp() { Shapes outputs_shape_4 = {{128, 4096}}; matmul4 = std::make_shared("matmul_info", inputs_shape_4, outputs_shape_4, attr_4); matmul4->set_name("MatMul4"); + matmul4->set_outputs_type({kFloat32}); // matmul5 ValuePtr transpose_a_5 = MakeValue(false); @@ -223,6 +228,7 @@ void TestDPAlgo::SetUp() { Shapes outputs_shape_5 = {{128, 4096}}; matmul5 = std::make_shared("matmul_info", inputs_shape_5, outputs_shape_5, attr_5); matmul5->set_name("MatMul5"); + matmul5->set_outputs_type({kFloat32}); // matmul6 ValuePtr transpose_a_6 = MakeValue(false); @@ -232,6 +238,7 @@ void TestDPAlgo::SetUp() { Shapes outputs_shape_6 = {{4096, 1024}}; matmul6 = std::make_shared("matmul_info", inputs_shape_6, outputs_shape_6, attr_6); matmul6->set_name("MatMul6"); + matmul6->set_outputs_type({kFloat32}); // matmul7 ValuePtr transpose_a_7 = MakeValue(false); @@ -241,6 +248,7 @@ void TestDPAlgo::SetUp() { Shapes outputs_shape_7 = {{64, 4096}}; matmul7 = std::make_shared("matmul_info", inputs_shape_7, outputs_shape_7, attr_7); matmul7->set_name("MatMul7"); + matmul7->set_outputs_type({kFloat32}); // matmul8 ValuePtr transpose_a_8 = MakeValue(false); @@ -250,6 +258,7 @@ void TestDPAlgo::SetUp() { Shapes outputs_shape_8 = {{64, 40960}}; matmul8 = std::make_shared("matmul_info", inputs_shape_8, outputs_shape_8, attr_8); matmul8->set_name("MatMul8"); + matmul8->set_outputs_type({kFloat32}); } void TestDPAlgo::ConstructTwoLargeMatMul() { @@ -278,12 +287,15 @@ void TestDPAlgo::ConstructBatmanGraph() { Shapes outputs_shape = {{64, 64}}; tmp_identity_ptr1 = std::make_shared(inputs_shape, outputs_shape, attr); tmp_identity_ptr1->set_name("identity_info1"); + tmp_identity_ptr1->set_outputs_type({kFloat32}); tmp_identity_ptr2 = std::make_shared(inputs_shape, outputs_shape, attr); tmp_identity_ptr2->set_name("identity_info2"); + tmp_identity_ptr2->set_outputs_type({kFloat32}); tmp_identity_ptr = std::make_shared(inputs_shape, outputs_shape, attr); tmp_identity_ptr->set_name("identity_info"); + tmp_identity_ptr->set_outputs_type({kFloat32}); // mm1_ptr ValuePtr transpose_a_1 = MakeValue(false); @@ -292,6 +304,7 @@ void TestDPAlgo::ConstructBatmanGraph() { Shapes inputs_shape_1 = {{64, 64}, {64, 64}}; Shapes outputs_shape_1 = {{64, 64}}; mm1_ptr = std::make_shared("matmul_info1", inputs_shape_1, outputs_shape_1, attr_1); + mm1_ptr->set_outputs_type({kFloat32}); // mm2_ptr ValuePtr transpose_a_2 = MakeValue(false); @@ -300,6 +313,7 @@ void TestDPAlgo::ConstructBatmanGraph() { Shapes inputs_shape_2 = {{64, 64}, {64, 64}}; Shapes outputs_shape_2 = {{64, 64}}; mm2_ptr = std::make_shared("matmul_info2", inputs_shape_2, outputs_shape_2, attr_2); + mm2_ptr->set_outputs_type({kFloat32}); // mm3_ptr ValuePtr transpose_a_3 = MakeValue(false); @@ -308,6 +322,7 @@ void TestDPAlgo::ConstructBatmanGraph() { Shapes inputs_shape_3 = {{64, 64}, {64, 64}}; Shapes outputs_shape_3 = {{64, 64}}; mm3_ptr = std::make_shared("matmul_info3", inputs_shape_3, outputs_shape_3, attr_3); + mm3_ptr->set_outputs_type({kFloat32}); // mm4_ptr ValuePtr transpose_a_4 = MakeValue(false); @@ -316,6 +331,7 @@ void TestDPAlgo::ConstructBatmanGraph() { Shapes inputs_shape_4 = {{64, 64}, {64, 64}}; Shapes outputs_shape_4 = {{64, 64}}; mm4_ptr = std::make_shared("matmul_info4", inputs_shape_4, outputs_shape_4, attr_4); + mm4_ptr->set_outputs_type({kFloat32}); // mm5_ptr ValuePtr transpose_a_5 = MakeValue(false); @@ -324,6 +340,7 @@ void TestDPAlgo::ConstructBatmanGraph() { Shapes inputs_shape_5 = {{64, 64}, {64, 64}}; Shapes outputs_shape_5 = {{64, 64}}; mm5_ptr = std::make_shared("matmul_info5", inputs_shape_5, outputs_shape_5, attr_5); + mm5_ptr->set_outputs_type({kFloat32}); // mm6_ptr ValuePtr transpose_a_6 = MakeValue(false); @@ -332,6 +349,7 @@ void TestDPAlgo::ConstructBatmanGraph() { Shapes inputs_shape_6 = {{64, 64}, {64, 64}}; Shapes outputs_shape_6 = {{64, 64}}; mm6_ptr = std::make_shared("matmul_info6", inputs_shape_6, outputs_shape_6, attr_6); + mm6_ptr->set_outputs_type({kFloat32}); // mm7_ptr ValuePtr transpose_a_7 = MakeValue(false); @@ -340,6 +358,7 @@ void TestDPAlgo::ConstructBatmanGraph() { Shapes inputs_shape_7 = {{64, 64}, {64, 64}}; Shapes outputs_shape_7 = {{64, 64}}; mm7_ptr = std::make_shared("matmul_info7", inputs_shape_7, outputs_shape_7, attr_7); + mm7_ptr->set_outputs_type({kFloat32}); // create edges edge_i0_m3 = std::make_shared(edge_iden_matmul_name, tmp_identity_ptr, mm3_ptr, 0, 0, false, true); @@ -451,6 +470,7 @@ void TestDPAlgo::ConstructTriangleGraph() { Shapes outputs_shape = {{64, 64}}; tmp_identity_ptr1 = std::make_shared(inputs_shape, outputs_shape, attr); tmp_identity_ptr1->set_name("identity_info1"); + tmp_identity_ptr1->set_outputs_type({kFloat32}); // mm6_ptr ValuePtr transpose_a_6 = MakeValue(false); @@ -459,9 +479,11 @@ void TestDPAlgo::ConstructTriangleGraph() { Shapes inputs_shape_6 = {{64, 64}, {64, 64}}; Shapes outputs_shape_6 = {{64, 64}}; mm6_ptr = std::make_shared("matmul_info", inputs_shape_6, outputs_shape_6, attr_6); + mm6_ptr->set_outputs_type({kFloat32}); tmp_identity_ptr2 = std::make_shared(inputs_shape, outputs_shape, attr); tmp_identity_ptr2->set_name("identity_info2"); + tmp_identity_ptr2->set_outputs_type({kFloat32}); // mm1_ptr ValuePtr transpose_a_1 = MakeValue(false); @@ -470,6 +492,7 @@ void TestDPAlgo::ConstructTriangleGraph() { Shapes inputs_shape_1 = {{64, 64}, {64, 64}}; Shapes outputs_shape_1 = {{64, 64}}; mm1_ptr = std::make_shared("matmul_info", inputs_shape_1, outputs_shape_1, attr_1); + mm1_ptr->set_outputs_type({kFloat32}); // mm2_ptr ValuePtr transpose_a_2 = MakeValue(false); @@ -478,6 +501,7 @@ void TestDPAlgo::ConstructTriangleGraph() { Shapes inputs_shape_2 = {{64, 64}, {64, 64}}; Shapes outputs_shape_2 = {{64, 64}}; mm2_ptr = std::make_shared("matmul_info", inputs_shape_2, outputs_shape_2, attr_2); + mm2_ptr->set_outputs_type({kFloat32}); // mm3_ptr ValuePtr transpose_a_3 = MakeValue(false); @@ -486,6 +510,7 @@ void TestDPAlgo::ConstructTriangleGraph() { Shapes inputs_shape_3 = {{64, 64}, {64, 64}}; Shapes outputs_shape_3 = {{64, 64}}; mm3_ptr = std::make_shared("matmul_info", inputs_shape_3, outputs_shape_3, attr_3); + mm3_ptr->set_outputs_type({kFloat32}); // mm4_ptr ValuePtr transpose_a_4 = MakeValue(false); @@ -494,6 +519,7 @@ void TestDPAlgo::ConstructTriangleGraph() { Shapes inputs_shape_4 = {{64, 64}, {64, 64}}; Shapes outputs_shape_4 = {{64, 64}}; mm4_ptr = std::make_shared("matmul_info", inputs_shape_4, outputs_shape_4, attr_4); + mm4_ptr->set_outputs_type({kFloat32}); // mm5_ptr ValuePtr transpose_a_5 = MakeValue(false); @@ -502,6 +528,7 @@ void TestDPAlgo::ConstructTriangleGraph() { Shapes inputs_shape_5 = {{64, 64}, {64, 64}}; Shapes outputs_shape_5 = {{64, 64}}; mm5_ptr = std::make_shared("matmul_info", inputs_shape_5, outputs_shape_5, attr_5); + mm5_ptr->set_outputs_type({kFloat32}); // create edges std::string edge_matmul_matmul_name = "MatMul-MatMul"; @@ -584,6 +611,7 @@ void TestDPAlgo::ConstructTriangleGraph2() { Shapes outputs_shape = {{64, 64}}; tmp_identity_ptr1 = std::make_shared(inputs_shape, outputs_shape, attr); tmp_identity_ptr1->set_name("identity_info1"); + tmp_identity_ptr1->set_outputs_type({kFloat32}); // mm1_ptr ValuePtr transpose_a_1 = MakeValue(false); @@ -592,6 +620,7 @@ void TestDPAlgo::ConstructTriangleGraph2() { Shapes inputs_shape_1 = {{64, 64}, {64, 64}}; Shapes outputs_shape_1 = {{64, 64}}; mm1_ptr = std::make_shared("matmul_info", inputs_shape_1, outputs_shape_1, attr_1); + mm1_ptr->set_outputs_type({kFloat32}); // mm2_ptr ValuePtr transpose_a_2 = MakeValue(false); @@ -600,6 +629,7 @@ void TestDPAlgo::ConstructTriangleGraph2() { Shapes inputs_shape_2 = {{64, 64}, {64, 64}}; Shapes outputs_shape_2 = {{64, 64}}; mm2_ptr = std::make_shared("matmul_info", inputs_shape_2, outputs_shape_2, attr_2); + mm2_ptr->set_outputs_type({kFloat32}); // mm3_ptr ValuePtr transpose_a_3 = MakeValue(false); @@ -608,6 +638,7 @@ void TestDPAlgo::ConstructTriangleGraph2() { Shapes inputs_shape_3 = {{64, 64}, {64, 64}}; Shapes outputs_shape_3 = {{64, 64}}; mm3_ptr = std::make_shared("matmul_info", inputs_shape_3, outputs_shape_3, attr_3); + mm3_ptr->set_outputs_type({kFloat32}); // create edges std::string edge_matmul_matmul_name = "MatMul-MatMul"; @@ -953,6 +984,7 @@ void TestDPAlgo::ConstructMMRGraph() { Shapes inputs_shape_1 = {{32, 16}, {16, 32}}; Shapes outputs_shape_1 = {{32, 32}}; mm1_ptr = std::make_shared("matmul_info", inputs_shape_1, outputs_shape_1, attr_1); + mm1_ptr->set_outputs_type({kFloat32}); // mm2_ptr ValuePtr transpose_a_2 = MakeValue(false); @@ -961,6 +993,7 @@ void TestDPAlgo::ConstructMMRGraph() { Shapes inputs_shape_2 = {{8, 32}, {32, 32}}; Shapes outputs_shape_2 = {{8, 32}}; mm2_ptr = std::make_shared("matmul_info", inputs_shape_2, outputs_shape_2, attr_2); + mm2_ptr->set_outputs_type({kFloat32}); // mm3_ptr ValuePtr transpose_a_3 = MakeValue(false); @@ -969,6 +1002,7 @@ void TestDPAlgo::ConstructMMRGraph() { Shapes inputs_shape_3 = {{32, 32}, {32, 64}}; Shapes outputs_shape_3 = {{32, 64}}; mm3_ptr = std::make_shared("matmul_info", inputs_shape_3, outputs_shape_3, attr_3); + mm3_ptr->set_outputs_type({kFloat32}); // mm4_ptr ValuePtr transpose_a_4 = MakeValue(false); @@ -977,6 +1011,7 @@ void TestDPAlgo::ConstructMMRGraph() { Shapes inputs_shape_4 = {{64, 32}, {32, 32}}; Shapes outputs_shape_4 = {{64, 32}}; mm4_ptr = std::make_shared("matmul_info", inputs_shape_4, outputs_shape_4, attr_4); + mm4_ptr->set_outputs_type({kFloat32}); // mm5_ptr ValuePtr transpose_a_5 = MakeValue(false); @@ -985,6 +1020,7 @@ void TestDPAlgo::ConstructMMRGraph() { Shapes inputs_shape_5 = {{8, 32}, {32, 64}}; Shapes outputs_shape_5 = {{8, 64}}; mm5_ptr = std::make_shared("matmul_info", inputs_shape_5, outputs_shape_5, attr_5); + mm5_ptr->set_outputs_type({kFloat32}); // mm5_ptr ValuePtr transpose_a_6 = MakeValue(false); @@ -993,6 +1029,7 @@ void TestDPAlgo::ConstructMMRGraph() { Shapes inputs_shape_6 = {{8, 64}, {64, 32}}; Shapes outputs_shape_6 = {{8, 32}}; mm6_ptr = std::make_shared("matmul_info", inputs_shape_6, outputs_shape_6, attr_6); + mm6_ptr->set_outputs_type({kFloat32}); ValuePtr relu = MakeValue(std::string("relu")); std::unordered_map relu_attr = {{"activation_type", relu}}; @@ -1001,26 +1038,31 @@ void TestDPAlgo::ConstructMMRGraph() { Shapes relu1_inputs_shape = {{8, 32}}; Shapes relu1_outputs_shape = {{8, 32}}; relu1_ptr = std::make_shared("relu_info", relu1_inputs_shape, relu1_outputs_shape, relu_attr); + relu1_ptr->set_outputs_type({kFloat32}); // relu2_ptr Shapes relu2_inputs_shape = {{32, 64}}; Shapes relu2_outputs_shape = {{32, 64}}; relu2_ptr = std::make_shared("relu_info", relu2_inputs_shape, relu2_outputs_shape, relu_attr); + relu2_ptr->set_outputs_type({kFloat32}); // relu3_ptr Shapes relu3_inputs_shape = {{64, 32}}; Shapes relu3_outputs_shape = {{64, 32}}; relu3_ptr = std::make_shared("relu_info", relu3_inputs_shape, relu3_outputs_shape, relu_attr); + relu3_ptr->set_outputs_type({kFloat32}); // relu4_ptr Shapes relu4_inputs_shape = {{8, 64}}; Shapes relu4_outputs_shape = {{8, 64}}; relu4_ptr = std::make_shared("relu_info", relu4_inputs_shape, relu4_outputs_shape, relu_attr); + relu4_ptr->set_outputs_type({kFloat32}); // relu5_ptr Shapes relu5_inputs_shape = {{8, 32}}; Shapes relu5_outputs_shape = {{8, 32}}; relu5_ptr = std::make_shared("relu_info", relu5_inputs_shape, relu5_outputs_shape, relu_attr); + relu5_ptr->set_outputs_type({kFloat32}); std::string edge_matmul_matmul_name = "MatMul-MatMul"; std::string edge_matmul_relu_name = "MatMul-ReLU"; @@ -1134,6 +1176,7 @@ void TestDPAlgo::ConstructIdentityDiamondGraph() { Shapes inputs_shape = {{32, 64}}; Shapes outputs_shape = {{32, 64}}; tmp_identity_ptr = std::make_shared(inputs_shape, outputs_shape, attr); + tmp_identity_ptr->set_outputs_type({kFloat32}); // mm1_ptr ValuePtr transpose_a_1 = MakeValue(false); @@ -1142,6 +1185,7 @@ void TestDPAlgo::ConstructIdentityDiamondGraph() { Shapes inputs_shape_1 = {{32, 64}, {64, 128}}; Shapes outputs_shape_1 = {{32, 128}}; mm1_ptr = std::make_shared("matmul_info", inputs_shape_1, outputs_shape_1, attr_1); + mm1_ptr->set_outputs_type({kFloat32}); // mm2_ptr ValuePtr transpose_a_2 = MakeValue(false); @@ -1150,6 +1194,7 @@ void TestDPAlgo::ConstructIdentityDiamondGraph() { Shapes inputs_shape_2 = {{128, 32}, {32, 64}}; Shapes outputs_shape_2 = {{128, 64}}; mm2_ptr = std::make_shared("matmul_info", inputs_shape_2, outputs_shape_2, attr_2); + mm2_ptr->set_outputs_type({kFloat32}); // mm3_ptr ValuePtr transpose_a_3 = MakeValue(false); @@ -1158,6 +1203,7 @@ void TestDPAlgo::ConstructIdentityDiamondGraph() { Shapes inputs_shape_3 = {{32, 128}, {128, 64}}; Shapes outputs_shape_3 = {{32, 64}}; mm3_ptr = std::make_shared("matmul_info", inputs_shape_3, outputs_shape_3, attr_3); + mm3_ptr->set_outputs_type({kFloat32}); // create edges std::string edge_matmul_matmul_name = "MatMul-MatMul"; diff --git a/tests/ut/cpp/parallel/auto_parallel/edge_costmodel_test.cc b/tests/ut/cpp/parallel/auto_parallel/edge_costmodel_test.cc index 467f4976e8..423a258a28 100644 --- a/tests/ut/cpp/parallel/auto_parallel/edge_costmodel_test.cc +++ b/tests/ut/cpp/parallel/auto_parallel/edge_costmodel_test.cc @@ -65,6 +65,7 @@ void TestEdgeCostModel::SetUp() { Shapes inputs_shape_1 = {{8, 16}, {16, 32}}; Shapes outputs_shape_1 = {{8, 32}}; matmul1 = std::make_shared("matmul_info", inputs_shape_1, outputs_shape_1, attr_1); + matmul1->set_outputs_type({kFloat32}); // matmul2 ValuePtr transpose_a_2 = MakeValue(false); @@ -73,6 +74,7 @@ void TestEdgeCostModel::SetUp() { Shapes inputs_shape_2 = {{8, 32}, {32, 16}}; Shapes outputs_shape_2 = {{8, 16}}; matmul2 = std::make_shared("matmul_info", inputs_shape_2, outputs_shape_2, attr_2); + matmul2->set_outputs_type({kFloat32}); // matmul3 ValuePtr transpose_a_3 = MakeValue(false); @@ -81,6 +83,7 @@ void TestEdgeCostModel::SetUp() { Shapes inputs_shape_3 = {{16, 8}, {8, 32}}; Shapes outputs_shape_3 = {{16, 32}}; matmul3 = std::make_shared("matmul_info", inputs_shape_3, outputs_shape_3, attr_3); + matmul3->set_outputs_type({kFloat32}); // matmul4 ValuePtr transpose_a_4 = MakeValue(false); @@ -89,6 +92,7 @@ void TestEdgeCostModel::SetUp() { Shapes inputs_shape_4 = {{8, 16}, {16, 32}}; Shapes outputs_shape_4 = {{8, 32}}; matmul4 = std::make_shared("matmul_info", inputs_shape_4, outputs_shape_4, attr_4); + matmul4->set_outputs_type({kFloat32}); // matmul5 ValuePtr transpose_a_5 = MakeValue(false); @@ -97,6 +101,7 @@ void TestEdgeCostModel::SetUp() { Shapes inputs_shape_5 = {{8, 32}, {8, 32}}; Shapes outputs_shape_5 = {{8, 8}}; matmul5 = std::make_shared("matmul_info", inputs_shape_5, outputs_shape_5, attr_5); + matmul5->set_outputs_type({kFloat32}); } TEST_F(TestEdgeCostModel, test_InitEdgeCost) { diff --git a/tests/ut/cpp/parallel/auto_parallel/graph_costmodel_test.cc b/tests/ut/cpp/parallel/auto_parallel/graph_costmodel_test.cc index 415a1fdd55..81b017a28d 100644 --- a/tests/ut/cpp/parallel/auto_parallel/graph_costmodel_test.cc +++ b/tests/ut/cpp/parallel/auto_parallel/graph_costmodel_test.cc @@ -76,6 +76,7 @@ void TestCostGraph::SetUp() { Shapes inputs_shape_0 = {{32, 16}, {16, 16}}; Shapes outputs_shape_0 = {{32, 16}}; matmul0 = std::make_shared("matmul_info", inputs_shape_0, outputs_shape_0, attr_0); + matmul0->set_outputs_type({kFloat32}); // matmul1 ValuePtr transpose_a_1 = MakeValue(false); @@ -84,6 +85,7 @@ void TestCostGraph::SetUp() { Shapes inputs_shape_1 = {{8, 16}, {16, 32}}; Shapes outputs_shape_1 = {{8, 32}}; matmul1 = std::make_shared("matmul_info", inputs_shape_1, outputs_shape_1, attr_1); + matmul1->set_outputs_type({kFloat32}); // matmul2 ValuePtr transpose_a_2 = MakeValue(false); @@ -92,6 +94,7 @@ void TestCostGraph::SetUp() { Shapes inputs_shape_2 = {{8, 32}, {32, 16}}; Shapes outputs_shape_2 = {{8, 16}}; matmul2 = std::make_shared("matmul_info", inputs_shape_2, outputs_shape_2, attr_2); + matmul2->set_outputs_type({kFloat32}); // matmul3 ValuePtr transpose_a_3 = MakeValue(false); @@ -100,6 +103,7 @@ void TestCostGraph::SetUp() { Shapes inputs_shape_3 = {{16, 8}, {8, 32}}; Shapes outputs_shape_3 = {{16, 32}}; matmul3 = std::make_shared("matmul_info", inputs_shape_3, outputs_shape_3, attr_3); + matmul3->set_outputs_type({kFloat32}); // matmul4 ValuePtr transpose_a_4 = MakeValue(false); @@ -108,6 +112,7 @@ void TestCostGraph::SetUp() { Shapes inputs_shape_4 = {{8, 16}, {16, 32}}; Shapes outputs_shape_4 = {{8, 32}}; matmul4 = std::make_shared("matmul_info", inputs_shape_4, outputs_shape_4, attr_4); + matmul4->set_outputs_type({kFloat32}); // matmul5 ValuePtr transpose_a_5 = MakeValue(false); @@ -116,6 +121,7 @@ void TestCostGraph::SetUp() { Shapes inputs_shape_5 = {{8, 32}, {8, 32}}; Shapes outputs_shape_5 = {{8, 8}}; matmul5 = std::make_shared("matmul_info", inputs_shape_5, outputs_shape_5, attr_5); + matmul5->set_outputs_type({kFloat32}); } void TestCostGraph::ConstructStarGraph2() { diff --git a/tests/ut/python/communication/test_comm.py b/tests/ut/python/communication/test_comm.py index 885c8fa9e3..38fd7199fd 100644 --- a/tests/ut/python/communication/test_comm.py +++ b/tests/ut/python/communication/test_comm.py @@ -55,7 +55,7 @@ class BroadCastNet(nn.Cell): self.broadcast = Broadcast(0) def construct(self, x): - x, = self.broadcast((x,)) + x = self.broadcast((x)) x = self.dense(x) return x From 37c94a5f767a5f3416ba0065b4c9f68c26e052bd Mon Sep 17 00:00:00 2001 From: Wei Luning Date: Mon, 13 Apr 2020 17:43:25 +0800 Subject: [PATCH 192/367] add pass replace_old_param_ --- mindspore/ccsrc/optimizer/irpass.cc | 2 + mindspore/ccsrc/optimizer/irpass.h | 1 + .../ccsrc/optimizer/irpass/param_replace.h | 60 +++++++++++++++++++ mindspore/ccsrc/pipeline/action.cc | 1 + mindspore/ccsrc/pipeline/pass.cc | 8 +-- .../pynative_mode/test_insert_grad_of.py | 2 +- 6 files changed, 68 insertions(+), 6 deletions(-) create mode 100644 mindspore/ccsrc/optimizer/irpass/param_replace.h diff --git a/mindspore/ccsrc/optimizer/irpass.cc b/mindspore/ccsrc/optimizer/irpass.cc index 0991c31b00..96d88f6e61 100644 --- a/mindspore/ccsrc/optimizer/irpass.cc +++ b/mindspore/ccsrc/optimizer/irpass.cc @@ -40,6 +40,7 @@ #include "optimizer/irpass/incorporate_getitem.h" #include "optimizer/irpass/incorporate_call.h" #include "optimizer/irpass/grad_var_prepare.h" +#include "optimizer/irpass/param_replace.h" namespace mindspore { namespace opt { @@ -81,6 +82,7 @@ OptimizeIRPassLib::OptimizeIRPassLib() { get_make_ref_eliminate_ = MakeSubstitution(GetMakeRefEliminater(), "get_make_ref_eliminate", {prim::kPrimGetRefKey, prim::kPrimGetRefValue}); replace_refkey_by_param_ = MakeSubstitution(ReplaceRefkeyByParam(), "replace_refkey_by_param", IsValueNode); + replace_old_param_ = MakeSubstitution(ReplaceOldParam(), "replace_old_param", IsParam); // Gradient transforms expand_jprim_ = MakeSubstitution(ExpandJPrim(), "expand_jprim", prim::kPrimJ); diff --git a/mindspore/ccsrc/optimizer/irpass.h b/mindspore/ccsrc/optimizer/irpass.h index bdaf42b3ed..00274bdcc8 100644 --- a/mindspore/ccsrc/optimizer/irpass.h +++ b/mindspore/ccsrc/optimizer/irpass.h @@ -58,6 +58,7 @@ class OptimizeIRPassLib { SubstitutionPtr make_ref_eliminate_; SubstitutionPtr get_make_ref_eliminate_; SubstitutionPtr replace_refkey_by_param_; + SubstitutionPtr replace_old_param_; // Branch culling SubstitutionPtr switch_simplify_; diff --git a/mindspore/ccsrc/optimizer/irpass/param_replace.h b/mindspore/ccsrc/optimizer/irpass/param_replace.h new file mode 100644 index 0000000000..c0c4c832d7 --- /dev/null +++ b/mindspore/ccsrc/optimizer/irpass/param_replace.h @@ -0,0 +1,60 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_OPTIMIZER_IRPASS_PARAM_REPLACE_H_ +#define MINDSPORE_CCSRC_OPTIMIZER_IRPASS_PARAM_REPLACE_H_ + +#include + +#include "optimizer/optimizer.h" +#include "optimizer/irpass.h" +#include "ir/visitor.h" +#include "operator/ops.h" +#include "pipeline/parse/parse.h" + +namespace mindspore { +namespace opt { +namespace irpass { +class ReplaceOldParam : public AnfVisitor { + public: + AnfNodePtr operator()(const OptimizerPtr &optimizer, const AnfNodePtr &node) override { + if (!IsParam(node)) { + return nullptr; + } + auto resource = std::dynamic_pointer_cast(optimizer->resource()); + MS_EXCEPTION_IF_NULL(resource); + + auto top_graph = resource->func_graph(); // parse::Parser::GetTopFuncGraph(); + MS_EXCEPTION_IF_NULL(top_graph); + + auto param_node = node->cast(); + if (!param_node->has_default() || node->func_graph() == top_graph) { + return nullptr; + } + auto para_name = param_node->name(); + for (const auto &tnode : top_graph->parameters()) { + auto para = tnode->cast(); + if (para != nullptr && para->name() == para_name) { + return para; + } + } + return nullptr; + } +}; +} // namespace irpass +} // namespace opt +} // namespace mindspore +#endif // MINDSPORE_CCSRC_OPTIMIZER_IRPASS_PARAM_REPLACE_H_ diff --git a/mindspore/ccsrc/pipeline/action.cc b/mindspore/ccsrc/pipeline/action.cc index baf4bea7ec..d77fee84aa 100644 --- a/mindspore/ccsrc/pipeline/action.cc +++ b/mindspore/ccsrc/pipeline/action.cc @@ -88,6 +88,7 @@ FuncGraphPtr Renormalize(const ResourcePtr& res, const FuncGraphPtr& func_graph, double t2 = GetTime(); #endif auto ret = ProgramSpecialize(res, func_graph, result.context); + res->set_func_graph(ret); #ifdef ENABLE_PROFILE double t3 = GetTime(); MsProfile::StatTime("renormalize.infer", t2 - t1); diff --git a/mindspore/ccsrc/pipeline/pass.cc b/mindspore/ccsrc/pipeline/pass.cc index a58ecf41b6..9248590f27 100644 --- a/mindspore/ccsrc/pipeline/pass.cc +++ b/mindspore/ccsrc/pipeline/pass.cc @@ -114,11 +114,9 @@ OptPassGroupMap GetOptPassesA(const opt::irpass::OptimizeIRPassLib& irpass) { opt::OptPassConfig grad = opt::OptPassConfig({irpass.expand_jprim_}, true); opt::irpass::ResolveIRPassLib resolve_irpass; - opt::OptPassConfig resolve_pass = opt::OptPassConfig({ - resolve_irpass.resolver_resolve_, - resolve_irpass.resolver_getattr_, - irpass.get_make_ref_eliminate_, - }); + opt::OptPassConfig resolve_pass = + opt::OptPassConfig({resolve_irpass.resolver_resolve_, resolve_irpass.resolver_getattr_, + irpass.get_make_ref_eliminate_, irpass.replace_old_param_}); OptPassGroupMap map_a({{"a_1", a_1}, {"a_2", a_2}, diff --git a/tests/ut/python/pynative_mode/test_insert_grad_of.py b/tests/ut/python/pynative_mode/test_insert_grad_of.py index 104ac4d1c7..a11c5fa2b1 100644 --- a/tests/ut/python/pynative_mode/test_insert_grad_of.py +++ b/tests/ut/python/pynative_mode/test_insert_grad_of.py @@ -129,7 +129,7 @@ def test_cell_assign(): self.matrix_g = mindspore.Parameter(Tensor(np.ones([2, 2], np.float32)), name="matrix_g") def save_gradient(self, dout): - self.matrix_g = dout + self.matrix_g = dout + self.matrix_g return dout def construct(self, x, y): From 50ed76bc0c4e2f4ef389fd88e93d5056ad819862 Mon Sep 17 00:00:00 2001 From: guohongzilong <2713219276@qq.com> Date: Mon, 13 Apr 2020 17:47:09 +0800 Subject: [PATCH 193/367] unify some examples in mindpore front --- mindspore/nn/layer/normalization.py | 2 +- mindspore/nn/layer/pooling.py | 4 ++-- mindspore/nn/loss/loss.py | 2 +- mindspore/nn/metrics/__init__.py | 2 +- mindspore/nn/metrics/error.py | 2 +- mindspore/ops/operations/control_ops.py | 6 +++--- mindspore/ops/operations/math_ops.py | 4 ++-- mindspore/ops/operations/nn_ops.py | 6 +++--- mindspore/ops/operations/other_ops.py | 12 ++++++------ mindspore/ops/operations/random_ops.py | 2 +- 10 files changed, 21 insertions(+), 21 deletions(-) diff --git a/mindspore/nn/layer/normalization.py b/mindspore/nn/layer/normalization.py index 1ca2221122..d5082371c1 100644 --- a/mindspore/nn/layer/normalization.py +++ b/mindspore/nn/layer/normalization.py @@ -256,7 +256,7 @@ class LayerNorm(Cell): Tensor, the normalized and scaled offset tensor, has the same shape and data type as the `input_x`. Examples: - >>> x = Tensor(np.ones([20, 5, 10, 10], np.float32)) + >>> x = Tensor(np.ones([20, 5, 10, 10]), mindspore.float32) >>> shape1 = x.shape()[1:] >>> m = nn.LayerNorm(shape1, begin_norm_axis=1, begin_params_axis=1) >>> m(x) diff --git a/mindspore/nn/layer/pooling.py b/mindspore/nn/layer/pooling.py index 5d9b0ffa6c..746b6d240f 100644 --- a/mindspore/nn/layer/pooling.py +++ b/mindspore/nn/layer/pooling.py @@ -104,7 +104,7 @@ class MaxPool2d(_PoolNd): Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`. Examples: - >>> pool = MaxPool2d(kernel_size=3, stride=1) + >>> pool = nn.MaxPool2d(kernel_size=3, stride=1) >>> x = Tensor(np.random.randint(0, 10, [1, 2, 4, 4]), mindspore.float32) [[[[1. 5. 5. 1.] [0. 3. 4. 8.] @@ -186,7 +186,7 @@ class AvgPool2d(_PoolNd): Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`. Examples: - >>> pool = AvgPool2d(kernel_size=3, strides=1) + >>> pool = nn.AvgPool2d(kernel_size=3, strides=1) >>> x = Tensor(np.random.randint(0, 10, [1, 2, 4, 4]), mindspore.float32) [[[[5. 5. 9. 9.] [8. 4. 3. 0.] diff --git a/mindspore/nn/loss/loss.py b/mindspore/nn/loss/loss.py index 806456e561..9a3de36f47 100644 --- a/mindspore/nn/loss/loss.py +++ b/mindspore/nn/loss/loss.py @@ -284,7 +284,7 @@ class SoftmaxCrossEntropyExpand(Cell): Tensor, a scalar tensor including the mean loss. Examples: - >>> loss = SoftmaxCrossEntropyExpand(sparse=True) + >>> loss = nn.SoftmaxCrossEntropyExpand(sparse=True) >>> input_data = Tensor(np.ones([64, 512]), dtype=mindspore.float32) >>> label = Tensor(np.ones([64]), dtype=mindspore.int32) >>> loss(input_data, label) diff --git a/mindspore/nn/metrics/__init__.py b/mindspore/nn/metrics/__init__.py index 490e1620fa..06429e200e 100755 --- a/mindspore/nn/metrics/__init__.py +++ b/mindspore/nn/metrics/__init__.py @@ -83,7 +83,7 @@ def get_metric_fn(name, *args, **kwargs): Metric object, class instance of the metric method. Examples: - >>> metric = get_metric_fn('precision', eval_type='classification') + >>> metric = nn.get_metric_fn('precision', eval_type='classification') """ if name not in __factory__: raise KeyError("Unknown Metric:", name) diff --git a/mindspore/nn/metrics/error.py b/mindspore/nn/metrics/error.py index c803000192..8ed175bc27 100644 --- a/mindspore/nn/metrics/error.py +++ b/mindspore/nn/metrics/error.py @@ -97,7 +97,7 @@ class MSE(Metric): Examples: >>> x = Tensor(np.array([0.1, 0.2, 0.6, 0.9]), mindspore.float32) >>> y = Tensor(np.array([0.1, 0.25, 0.5, 0.9]), mindspore.float32) - >>> error = MSE() + >>> error = nn.MSE() >>> error.clear() >>> error.update(x, y) >>> result = error.eval() diff --git a/mindspore/ops/operations/control_ops.py b/mindspore/ops/operations/control_ops.py index 167739b89a..d4e8b279ba 100644 --- a/mindspore/ops/operations/control_ops.py +++ b/mindspore/ops/operations/control_ops.py @@ -51,9 +51,9 @@ class ControlDepend(Primitive): >>> class Net(nn.Cell): >>> def __init__(self): >>> super(Net, self).__init__() - >>> self.global_step = Parameter(initializer(0, [1]), name="global_step") + >>> self.global_step = mindspore.Parameter(initializer(0, [1]), name="global_step") >>> self.rate = 0.2 - >>> self.control_depend = ControlDepend() + >>> self.control_depend = P.ControlDepend() >>> >>> def construct(self, x): >>> data = self.rate * self.global_step + x @@ -92,7 +92,7 @@ class GeSwitch(PrimitiveWithInfer): >>> super(Net, self).__init__() >>> self.square = P.Square() >>> self.add = P.TensorAdd() - >>> self.value = Tensor(np.full((1), 3, dtype=np.float32)) + >>> self.value = Tensor(np.full((1), 3), mindspore.float32) >>> self.switch = P.GeSwitch() >>> self.merge = P.Merge() >>> self.less = P.Less() diff --git a/mindspore/ops/operations/math_ops.py b/mindspore/ops/operations/math_ops.py index a1fe6e72b5..f6feb1af18 100644 --- a/mindspore/ops/operations/math_ops.py +++ b/mindspore/ops/operations/math_ops.py @@ -133,7 +133,7 @@ class AssignAdd(PrimitiveWithInfer): >>> def __init__(self): >>> super(Net, self).__init__() >>> self.AssignAdd = P.AssignAdd() - >>> self.variable = Parameter(initializer(1, [1], mindspore.int64), name="global_step") + >>> self.variable = mindspore.Parameter(initializer(1, [1], mindspore.int64), name="global_step") >>> >>> def construct(self, x): >>> self.AssignAdd(self.variable, x) @@ -176,7 +176,7 @@ class AssignSub(PrimitiveWithInfer): >>> def __init__(self): >>> super(Net, self).__init__() >>> self.AssignSub = P.AssignSub() - >>> self.variable = Parameter(initializer(1, [1], mindspore.int64), name="global_step") + >>> self.variable = mindspore.Parameter(initializer(1, [1], mindspore.int64), name="global_step") >>> >>> def construct(self, x): >>> self.AssignSub(self.variable, x) diff --git a/mindspore/ops/operations/nn_ops.py b/mindspore/ops/operations/nn_ops.py index 83f76455e0..6e34648ed4 100644 --- a/mindspore/ops/operations/nn_ops.py +++ b/mindspore/ops/operations/nn_ops.py @@ -154,7 +154,7 @@ class ReLU(PrimitiveWithInfer): Tensor, with the same type and shape as the `input_x`. Examples: - >>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]], np.float32)) + >>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32) >>> relu = P.ReLU() >>> result = relu(input_x) [[0, 4.0, 0.0], [2.0, 0.0, 9.0]] @@ -187,7 +187,7 @@ class ReLU6(PrimitiveWithInfer): Tensor, with the same type and shape as the `input_x`. Examples: - >>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]], np.float32)) + >>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32) >>> relu6 = P.ReLU6() >>> result = relu6(input_x) """ @@ -221,7 +221,7 @@ class Elu(PrimitiveWithInfer): Tensor, has the same shape and data type as `input_x`. Examples: - >>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]], np.float32)) + >>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32) >>> elu = P.Elu() >>> result = elu(input_x) Tensor([[-0.632 4.0 -0.999] diff --git a/mindspore/ops/operations/other_ops.py b/mindspore/ops/operations/other_ops.py index e4d526ad01..4ac7f2e554 100644 --- a/mindspore/ops/operations/other_ops.py +++ b/mindspore/ops/operations/other_ops.py @@ -76,7 +76,7 @@ class BoundingBoxEncode(PrimitiveWithInfer): Tensor, encoded bounding boxes. Examples: - >>> boundingbox_encode = BoundingBoxEncode(means=(0.0, 0.0, 0.0, 0.0), stds=(1.0, 1.0, 1.0, 1.0)) + >>> boundingbox_encode = P.BoundingBoxEncode(means=(0.0, 0.0, 0.0, 0.0), stds=(1.0, 1.0, 1.0, 1.0)) >>> delta_box = boundingbox_encode(anchor_box, groundtruth_box) """ @@ -119,7 +119,7 @@ class BoundingBoxDecode(PrimitiveWithInfer): Tensor, decoded boxes. Examples: - >>> boundingbox_decode = BoundingBoxDecode(means=(0.0, 0.0, 0.0, 0.0), stds=(1.0, 1.0, 1.0, 1.0), + >>> boundingbox_decode = P.BoundingBoxDecode(means=(0.0, 0.0, 0.0, 0.0), stds=(1.0, 1.0, 1.0, 1.0), max_shape=(768, 1280), wh_ratio_clip=0.016) >>> bbox = boundingbox_decode(anchor_box, deltas) """ @@ -208,7 +208,7 @@ class IOU(PrimitiveWithInfer): KeyError: When `mode` is not 'iou' or 'iof'. Examples: - >>> iou = IOU() + >>> iou = P.IOU() >>> anchor_boxes = Tensor(np.random.randint(1,5, [10, 4])) >>> gt_boxes = Tensor(np.random.randint(1,5, [3, 4])) >>> iou(anchor_boxes, gt_boxes) @@ -255,15 +255,15 @@ class MakeRefKey(Primitive): >>> class Net(nn.Cell): >>> def __init__(self): >>> super(Net, self).__init__() - >>> self.y = Parameter(Tensor(np.ones([6, 8, 10], np.int32)), name="y") - >>> self.make_ref_key = MakeRefKey("y") + >>> self.y = mindspore.Parameter(Tensor(np.ones([6, 8, 10]), mindspore.int32), name="y") + >>> self.make_ref_key = P.MakeRefKey("y") >>> >>> def construct(self, x): >>> key = self.make_ref_key() >>> ref = F.make_ref(key, x, self.y) >>> return ref * x >>> - >>> x = Tensor(np.ones([3, 4, 5], np.int32)) + >>> x = Tensor(np.ones([3, 4, 5]), mindspore.int32) >>> net = Net() >>> net(x) """ diff --git a/mindspore/ops/operations/random_ops.py b/mindspore/ops/operations/random_ops.py index 95692a622e..c8d7c75768 100644 --- a/mindspore/ops/operations/random_ops.py +++ b/mindspore/ops/operations/random_ops.py @@ -44,7 +44,7 @@ class RandomChoiceWithMask(PrimitiveWithInfer): - **mask** (Tensor) - The output has shape 1-D. Examples: - >>> rnd_choice_mask = RandomChoiceWithMask() + >>> rnd_choice_mask = P.RandomChoiceWithMask() >>> input_x = Tensor(np.ones(shape=[240000, 4]), mindspore.bool_) >>> output_y, output_mask = rnd_choice_mask(input_x) """ From 19ee376cd35757fc47fb55bb0209826fbb734f8c Mon Sep 17 00:00:00 2001 From: huanghui Date: Fri, 10 Apr 2020 15:19:16 +0800 Subject: [PATCH 194/367] add confusion_mul_grad fusion pass --- .../ir_fusion/confusion_mul_grad_fusion.cc | 112 ++++++++++++++++++ .../ir_fusion/confusion_mul_grad_fusion.h | 41 +++++++ mindspore/ccsrc/utils/utils.h | 1 + .../confusion_mul_grad_fusion_test.cc | 54 +++++++++ .../pre_activate/confusion_mul_grad_fusion.py | 55 +++++++++ 5 files changed, 263 insertions(+) create mode 100644 mindspore/ccsrc/pre_activate/ascend/ir_fusion/confusion_mul_grad_fusion.cc create mode 100644 mindspore/ccsrc/pre_activate/ascend/ir_fusion/confusion_mul_grad_fusion.h create mode 100644 tests/ut/cpp/pre_activate/ascend/ir_fusion/confusion_mul_grad_fusion_test.cc create mode 100644 tests/ut/cpp/python_input/gtest_input/pre_activate/confusion_mul_grad_fusion.py diff --git a/mindspore/ccsrc/pre_activate/ascend/ir_fusion/confusion_mul_grad_fusion.cc b/mindspore/ccsrc/pre_activate/ascend/ir_fusion/confusion_mul_grad_fusion.cc new file mode 100644 index 0000000000..6b7f732a6a --- /dev/null +++ b/mindspore/ccsrc/pre_activate/ascend/ir_fusion/confusion_mul_grad_fusion.cc @@ -0,0 +1,112 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "pre_activate/ascend/ir_fusion/confusion_mul_grad_fusion.h" +#include +#include +#include +#include +#include "session/anf_runtime_algorithm.h" +#include "ir/primitive.h" +#include "utils/utils.h" +#include "pipeline/static_analysis/abstract_value.h" +#include "pre_activate/common/helper.h" + +namespace mindspore { +namespace opt { +namespace { +const size_t kConfusionMulGradOutputNum = 2; + +CNodePtr CreateFusionNode(const FuncGraphPtr &graph, const CNodePtr &reduce_sum, const AnfNodePtr &mul0_anf, + const AnfNodePtr &input3) { + MS_EXCEPTION_IF_NULL(graph); + MS_EXCEPTION_IF_NULL(reduce_sum); + MS_EXCEPTION_IF_NULL(mul0_anf); + MS_EXCEPTION_IF_NULL(input3); + auto mul0 = mul0_anf->cast(); + MS_EXCEPTION_IF_NULL(mul0); + + auto prim = std::make_shared(kConfusionMulGradOpName); + std::vector inputs = {NewValueNode(prim), mul0->input(1), mul0->input(2), input3}; + auto fusion_node = graph->NewCNode(inputs); + MS_EXCEPTION_IF_NULL(fusion_node); + fusion_node->set_scope(reduce_sum->scope()); + AnfAlgo::CopyNodeAttr(kAttrAxis, reduce_sum, fusion_node); + AnfAlgo::CopyNodeAttr(kAttrKeepDims, reduce_sum, fusion_node); + auto types = {AnfAlgo::GetOutputInferDataType(mul0, 0), AnfAlgo::GetOutputInferDataType(reduce_sum, 0)}; + auto shapes = {AnfAlgo::GetOutputInferShape(mul0, 0), AnfAlgo::GetOutputInferShape(reduce_sum, 0)}; + AnfAlgo::SetOutputInferTypeAndShape(types, shapes, fusion_node.get()); + return fusion_node; +} + +AnfNodePtr GetMul0(const FuncGraphPtr &graph, const AnfNodePtr &input2, const AnfNodePtr &mul1) { + MS_EXCEPTION_IF_NULL(graph); + MS_EXCEPTION_IF_NULL(input2); + auto manager = graph->manager(); + MS_EXCEPTION_IF_NULL(manager); + if (manager->node_users().find(input2) == manager->node_users().end()) { + MS_LOG(EXCEPTION) << "node has no output in manager"; + } + + AnfNodePtr mul0 = nullptr; + const AnfNodeIndexSet &outputs_set = manager->node_users()[input2]; + // input2 must be the 2rd input of mul0 + auto it = std::find_if(outputs_set.begin(), outputs_set.end(), [&mul1](const std::pair &node_index) { + return node_index.first != mul1 && node_index.second == 2; + }); + if (it != outputs_set.end() && AnfAlgo::GetCNodeName(it->first) == prim::kPrimMul->name()) { + mul0 = it->first; + } + return mul0; +} +} // namespace + +const BaseRef ConfusionMulGradFusion::DefinePattern() const { + VectorRef mul1({prim::kPrimMul, input3_, input2_}); + VectorRef reduce_sum({prim::kPrimReduceSum, mul1}); + return reduce_sum; +} + +const AnfNodePtr ConfusionMulGradFusion::Process(const FuncGraphPtr &graph, const AnfNodePtr &node, + const EquivPtr &equiv) const { + MS_EXCEPTION_IF_NULL(graph); + MS_EXCEPTION_IF_NULL(node); + MS_EXCEPTION_IF_NULL(equiv); + auto input2 = utils::cast((*equiv)[input2_]); + auto input3 = utils::cast((*equiv)[input3_]); + auto reduce_sum = node->cast(); + MS_EXCEPTION_IF_NULL(reduce_sum); + auto mul1 = reduce_sum->input(1); + if (IsUsedByOthers(graph, mul1)) { + MS_LOG(INFO) << "Mul1 is used by others, quit fusion!"; + return nullptr; + } + auto mul0 = GetMul0(graph, input2, mul1); + if (mul0 == nullptr) { + MS_LOG(INFO) << "Mul0 do not exist, quit fusion"; + return nullptr; + } + + auto fusion_node = CreateFusionNode(graph, reduce_sum, mul0, input3); + std::vector fusion_node_outputs; + CreateMultipleOutputsOfAnfNode(graph, fusion_node, kConfusionMulGradOutputNum, &fusion_node_outputs); + + auto manage = graph->manager(); + MS_EXCEPTION_IF_NULL(manage); + manage->Replace(mul0, fusion_node_outputs[0]); + return fusion_node_outputs[1]; +} +} // namespace opt +} // namespace mindspore diff --git a/mindspore/ccsrc/pre_activate/ascend/ir_fusion/confusion_mul_grad_fusion.h b/mindspore/ccsrc/pre_activate/ascend/ir_fusion/confusion_mul_grad_fusion.h new file mode 100644 index 0000000000..170df5b0e4 --- /dev/null +++ b/mindspore/ccsrc/pre_activate/ascend/ir_fusion/confusion_mul_grad_fusion.h @@ -0,0 +1,41 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_CCSRC_PRE_ACTIVATE_ASCEND_IR_FUSION_CONFUSION_MUL_GRAD_FUSION_H_ +#define MINDSPORE_CCSRC_PRE_ACTIVATE_ASCEND_IR_FUSION_CONFUSION_MUL_GRAD_FUSION_H_ + +#include +#include "pre_activate/common/optimizer.h" + +namespace mindspore { +namespace opt { +class ConfusionMulGradFusion : public PatternProcessPass { + public: + explicit ConfusionMulGradFusion(bool multigraph = true) + : PatternProcessPass("confusion_mul_grad_fusion", multigraph) { + input2_ = std::make_shared(); + input3_ = std::make_shared(); + } + ~ConfusionMulGradFusion() override = default; + const BaseRef DefinePattern() const override; + const AnfNodePtr Process(const FuncGraphPtr &, const AnfNodePtr &, const EquivPtr &) const override; + + private: + VarPtr input2_; + VarPtr input3_; +}; +} // namespace opt +} // namespace mindspore +#endif // MINDSPORE_CCSRC_PRE_ACTIVATE_ASCEND_IR_FUSION_CONFUSION_MUL_GRAD_FUSION_H_ diff --git a/mindspore/ccsrc/utils/utils.h b/mindspore/ccsrc/utils/utils.h index ea5e969e52..0903ffd854 100644 --- a/mindspore/ccsrc/utils/utils.h +++ b/mindspore/ccsrc/utils/utils.h @@ -111,6 +111,7 @@ constexpr auto kFusedMulAddOpName = "FusedMulAdd"; constexpr auto kFusedMulAddNOpName = "FusedMulAddN"; constexpr auto kFusedMulApplyMomentumOpName = "FusedMulApplyMomentum"; constexpr auto kBiasAddOpName = "BiasAdd"; +constexpr auto kConfusionMulGradOpName = "ConfusionMulGrad"; // attr key name constexpr auto kAttrInputNames = "input_names"; diff --git a/tests/ut/cpp/pre_activate/ascend/ir_fusion/confusion_mul_grad_fusion_test.cc b/tests/ut/cpp/pre_activate/ascend/ir_fusion/confusion_mul_grad_fusion_test.cc new file mode 100644 index 0000000000..e3bf09d2cb --- /dev/null +++ b/tests/ut/cpp/pre_activate/ascend/ir_fusion/confusion_mul_grad_fusion_test.cc @@ -0,0 +1,54 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/backend_common_test.h" +#include "common/py_func_graph_fetcher.h" +#include "pre_activate/common/optimizer.h" +#include "pre_activate/ascend/ir_fusion/confusion_mul_grad_fusion.h" +#include "debug/anf_ir_dump.h" + +namespace mindspore { +namespace opt { +class TestHWOptimizeConfusionMulGradFusion : public BackendCommon { + public: + TestHWOptimizeConfusionMulGradFusion() : get_py_fun_("gtest_input.pre_activate.confusion_mul_grad_fusion", true) {} + ~TestHWOptimizeConfusionMulGradFusion() override = default; + + UT::PyFuncGraphFetcher get_py_fun_; +}; + +TEST_F(TestHWOptimizeConfusionMulGradFusion, test_fusion) { + FuncGraphPtr g = get_py_fun_.CallAndParseRet("test_confusion_mul_grad_fusion", "before"); + EXPECT_NE(g, nullptr); + std::vector shp{1, 1, 1, 1}; + auto x_abstract = std::make_shared(kFloat32, shp); + AbstractBasePtrList args_spec_list; + for (size_t i = 0; i < 3; ++i) { + args_spec_list.push_back(x_abstract); + } + auto fg = GetKernelGraph(g, args_spec_list); + + auto optimizer = std::make_shared(); + auto pm = std::make_shared(); + pm->AddPass(std::make_shared()); + optimizer->AddPassManager(pm); + FuncGraphPtr new_graph = optimizer->Optimize(fg); + + FuncGraphPtr g_after = get_py_fun_.CallAndParseRet("test_confusion_mul_grad_fusion", "after"); + EXPECT_TRUE(CheckEqualGraph(g_after, new_graph)); +} + +} // namespace opt +} // namespace mindspore diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/confusion_mul_grad_fusion.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/confusion_mul_grad_fusion.py new file mode 100644 index 0000000000..d8f7bcc996 --- /dev/null +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/confusion_mul_grad_fusion.py @@ -0,0 +1,55 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +from mindspore.ops import operations as P +from mindspore.ops import Primitive + +mul = P.Mul() +reduce_sum = P.ReduceSum() +confusion_mul_grad = Primitive('ConfusionMulGrad') +make_tuple = Primitive('make_tuple') +tuple_getitem = Primitive('tuple_getitem') +axis = 2 + +class FnDict: + def __init__(self): + self.fnDict = {} + + def __call__(self, fn): + self.fnDict[fn.__name__] = fn + + def __getitem__(self, name): + return self.fnDict[name] + +def test_confusion_mul_grad_fusion(tag): + fns = FnDict() + + @fns + def before(input1, input2, input3): + output1 = mul(input1, input2) + mul1 = mul(input3, input2) + # input axis will be convert to attr in step ConstructKernelGraph + output2 = reduce_sum(mul1, axis) + res = make_tuple(output1, output2) + return res + + @fns + def after(input1, input2, input3): + res = confusion_mul_grad(input1, input2, input3) + item0 = tuple_getitem(res, 0) + item1 = tuple_getitem(res, 1) + res = make_tuple(item0, item1) + return make_tuple(res) + + return fns[tag] From 2fecdede6b6cca648707bd83e3926c6937b350e6 Mon Sep 17 00:00:00 2001 From: Wei Luning Date: Thu, 9 Apr 2020 23:37:29 +0800 Subject: [PATCH 195/367] support amp when model eval, fix example of UnsortSegmentsSum --- .../ccsrc/parallel/step_auto_parallel.cc | 9 ++++ mindspore/ops/operations/array_ops.py | 7 +-- mindspore/ops/operations/nn_ops.py | 26 ++++++----- mindspore/train/amp.py | 42 +++++++++-------- mindspore/train/model.py | 8 ++-- tests/train_step_wrap.py | 45 +------------------ 6 files changed, 59 insertions(+), 78 deletions(-) diff --git a/mindspore/ccsrc/parallel/step_auto_parallel.cc b/mindspore/ccsrc/parallel/step_auto_parallel.cc index fe6be575ee..3f1e18183a 100644 --- a/mindspore/ccsrc/parallel/step_auto_parallel.cc +++ b/mindspore/ccsrc/parallel/step_auto_parallel.cc @@ -636,6 +636,15 @@ void AugmentCostGraph(const std::vector &all_nodes) { // Dealing with the RefKey case auto refkeys = cnode_with_refkeys.second; auto cnode = cnode_with_refkeys.first; + + auto cnode_ptr = cnode->cast(); + if (cnode_ptr == nullptr || !IsValueNode(cnode_ptr->input(0))) { + continue; + } + if (!IsAutoParallelCareNode(cnode_ptr)) { + continue; + } + if (refkeys.size() > 1) { MS_LOG(EXCEPTION) << "CNode: " << cnode->fullname_with_scope() << " 's inputs have more than 1 RefKeys."; } diff --git a/mindspore/ops/operations/array_ops.py b/mindspore/ops/operations/array_ops.py index 850e895ad0..a7c3f50440 100644 --- a/mindspore/ops/operations/array_ops.py +++ b/mindspore/ops/operations/array_ops.py @@ -1235,10 +1235,11 @@ class UnsortedSegmentSum(PrimitiveWithInfer): Tensor, the shape is :math:`(z, x_{N+1}, ..., x_R)`. Examples: - >>> input_x = [1, 2, 3, 4] - >>> segment_ids = [0, 0, 1, 2] + >>> input_x = Tensor([1, 2, 3, 4], mindspore.float) + >>> segment_ids = Tensor([0, 0, 1, 2], mindspore.int32) >>> num_segments = 4 - >>> type = P.UnsortedSegmentSum()(input_x, segment_ids, num_segments) + >>> P.UnsortedSegmentSum()(input_x, segment_ids, num_segments) + [3, 3, 4, 0] """ @prim_attr_register diff --git a/mindspore/ops/operations/nn_ops.py b/mindspore/ops/operations/nn_ops.py index 91f6d7ec01..acccfbaba3 100644 --- a/mindspore/ops/operations/nn_ops.py +++ b/mindspore/ops/operations/nn_ops.py @@ -22,6 +22,8 @@ from functools import reduce import numpy as np from ... import context +from ..._c_expression import signature_rw as sig_rw +from ..._c_expression import signature_kind as sig_kind from ..._checkparam import ParamValidator as validator from ..._checkparam import Rel, check_bool, check_int_positive from ...common import dtype as mstype @@ -1297,29 +1299,31 @@ class ApplyMomentum(PrimitiveWithInfer): filter(lambda x: x.requires_grad, net.get_parameters())) >>> model = Model(net, loss, opt) """ - + __mindspore_signature__ = ( + ('variable', sig_rw.RW_WRITE, sig_kind.KIND_POSITIONAL_KEYWORD), + ('accumulation', sig_rw.RW_WRITE, sig_kind.KIND_POSITIONAL_KEYWORD), + ('learning_rate', sig_rw.RW_READ, sig_kind.KIND_POSITIONAL_KEYWORD), + ('gradient', sig_rw.RW_READ, sig_kind.KIND_POSITIONAL_KEYWORD), + ('momentum', sig_rw.RW_READ, sig_kind.KIND_POSITIONAL_KEYWORD) + ) @prim_attr_register def __init__(self, use_nesterov=False, use_locking=False, gradient_scale=1.0): self.init_prim_io_names(inputs=['variable', 'accumulation', 'learning_rate', 'gradient', 'momentum'], outputs=['output']) def infer_shape(self, v_shape, a_shape, l_shape, g_shape, m_shape): - validator.check(f'variable shape {v_shape}', len(v_shape), '', 0, Rel.GT) - validator.check(f'accumulation shape {a_shape}', len(a_shape), '', 0, Rel.GT) - validator.check(f'learning rate shape {l_shape}', len(l_shape), '', 0, Rel.GE) - validator.check(f'gradient shape {g_shape}', len(g_shape), '', 0, Rel.GE) - validator.check(f'momentum shape {m_shape}', len(m_shape), '', 0, Rel.GE) return v_shape def infer_dtype(self, v_dtype, a_dtype, l_dtype, g_dtype, m_dtype): - validator.check_subclass("v_dtype", v_dtype, mstype.tensor) - validator.check_subclass("a_dtype", a_dtype, mstype.tensor) - v_type = validator.check_typename("v_dtype", v_dtype, [mstype.float16, mstype.float32, mstype.float64]) - validator.check_typename("a_dtype", a_dtype, [mstype.float16, mstype.float32, mstype.float64]) + if v_dtype != mstype.type_refkey and a_dtype != mstype.type_refkey: + validator.check_subclass("v_dtype", v_dtype, mstype.tensor) + validator.check_subclass("a_dtype", a_dtype, mstype.tensor) + validator.check_typename("v_dtype", v_dtype, [mstype.float16, mstype.float32, mstype.float64]) + validator.check_typename("a_dtype", a_dtype, [mstype.float16, mstype.float32, mstype.float64]) validator.check_typename("l_dtype", l_dtype, [mstype.float16, mstype.float32, mstype.float64]) validator.check_typename("g_dtype", g_dtype, [mstype.float16, mstype.float32, mstype.float64]) validator.check_typename("m_dtype", m_dtype, [mstype.float16, mstype.float32, mstype.float64]) - return v_type + return g_dtype class SmoothL1Loss(PrimitiveWithInfer): diff --git a/mindspore/train/amp.py b/mindspore/train/amp.py index e909b44e40..c4c115ef27 100644 --- a/mindspore/train/amp.py +++ b/mindspore/train/amp.py @@ -82,6 +82,29 @@ def _check_kwargs(key_words): if loss_scale_manager: validator.check_isinstance('loss_scale_manager', loss_scale_manager, LossScaleManager) + +def _add_loss_network(network, loss_fn, cast_model_type): + class WithLossCell(nn.Cell): + "Wrap loss for amp. Cast network output back to float32" + + def __init__(self, backbone, loss_fn): + super(WithLossCell, self).__init__(auto_prefix=False) + self._backbone = backbone + self._loss_fn = loss_fn + + def construct(self, data, label): + out = self._backbone(data) + label = _mp_cast_helper(mstype.float32, label) + return self._loss_fn(F.cast(out, mstype.float32), label) + + validator.check_isinstance('loss_fn', loss_fn, nn.Cell) + if cast_model_type == mstype.float16: + network = WithLossCell(network, loss_fn) + else: + network = nn.WithLossCell(network, loss_fn) + return network + + def build_train_network(network, optimizer, loss_fn=None, level='O0', **kwargs): """ Build the mixed precision training cell automatically. @@ -117,24 +140,7 @@ def build_train_network(network, optimizer, loss_fn=None, level='O0', **kwargs): _do_keep_batchnorm_fp32(network) if loss_fn: - class WithLossCell(nn.Cell): - "Wrap loss for amp. Cast network output back to float32" - - def __init__(self, backbone, loss_fn): - super(WithLossCell, self).__init__(auto_prefix=False) - self._backbone = backbone - self._loss_fn = loss_fn - - def construct(self, data, label): - out = self._backbone(data) - label = _mp_cast_helper(mstype.float32, label) - return self._loss_fn(F.cast(out, mstype.float32), label) - - validator.check_isinstance('loss_fn', loss_fn, nn.Cell) - if config.cast_model_type == mstype.float16: - network = WithLossCell(network, loss_fn) - else: - network = nn.WithLossCell(network, loss_fn) + network = _add_loss_network(network, loss_fn, config.cast_model_type) if _get_parallel_mode() in (ParallelMode.SEMI_AUTO_PARALLEL, ParallelMode.AUTO_PARALLEL): network = _VirtualDatasetCell(network) diff --git a/mindspore/train/model.py b/mindspore/train/model.py index 833fb07256..a1acec859c 100755 --- a/mindspore/train/model.py +++ b/mindspore/train/model.py @@ -24,8 +24,7 @@ from .. import context from ..parallel._utils import _get_parallel_mode, _get_device_num, _get_global_rank, \ _get_parameter_broadcast, _device_number_check, _parameter_broadcast_check, _callback_wrapper from ..nn.metrics import Loss -from ..nn.wrap import WithLossCell, WithEvalCell, \ - DataWrapper +from ..nn.wrap import WithLossCell, DataWrapper, WithEvalCell from ..nn.wrap.cell_wrapper import _VirtualDatasetCell from .parallel_utils import ParallelMode from ..common import dtype as mstype @@ -151,7 +150,10 @@ class Model: else: if self._loss_fn is None: raise ValueError("loss_fn can not be None.") - self._eval_network = WithEvalCell(self._network, self._loss_fn) + if self._optimizer: + self._eval_network = self._train_network.network + else: + self._eval_network = WithEvalCell(self._network, self._loss_fn) self._eval_indexes = [0, 1, 2] def _clear_metrics(self): diff --git a/tests/train_step_wrap.py b/tests/train_step_wrap.py index 7289c01004..d48e25b837 100644 --- a/tests/train_step_wrap.py +++ b/tests/train_step_wrap.py @@ -21,47 +21,6 @@ from mindspore.ops import composite as C from mindspore.ops import operations as P from mindspore import Parameter, ParameterTuple - -run_opt = C.MultitypeFuncGraph("run_opt") - -# pylint: disable=unused-argument -@run_opt.register("Function", "Int", "Number", "Number", - "Tensor", "Tensor", "Tensor") -def tensor_run_opt(opt, iterator, learning_rate, momentum, - gradient, variable, moment): - success = True - new_weight = opt(gradient, moment, variable, learning_rate, momentum) - success = F.depend(success, P.Assign()(variable, new_weight)) - return success - - -class OptimizerByMomentum(nn.Cell): - """ - OptimizerByMomentum definition - """ - # list of tensor - def __init__(self, weights): - super(OptimizerByMomentum, self).__init__() - self.learning_rate = Parameter(0.1, name="learning_rate") - self.momentum = Parameter(0.05, name="momentum") - self.iter = Parameter(0, name="iter") - - self.weights = weights - self.moments = weights.clone(prefix="moments", init='zeros') - - self.hyper_map = C.HyperMap() - self.opt = P.ApplyMomentum() - - def construct(self, grads): - success = True - weights = self.weights - moments = self.moments - success = self.hyper_map( - F.partial(run_opt, self.opt, self.iter, - self.learning_rate, self.momentum), grads, weights, moments) - # self.learning_rate = updata_lr(self.learning_rate, self.momentum) - return success - class TrainStepWrap(nn.Cell): """ TrainStepWrap definition @@ -71,7 +30,7 @@ class TrainStepWrap(nn.Cell): self.network = network self.network.set_train() self.weights = ParameterTuple(network.trainable_params()) - self.optimizer = OptimizerByMomentum(self.weights) + self.optimizer = nn.Momentum(self.weights, 0.1, 0.9) self.hyper_map = C.HyperMap() self.grad = C.GradOperation('grad', get_by_list=True) @@ -107,7 +66,7 @@ class TrainStepWrap2(nn.Cell): self.network = network self.network.set_train() self.weights = ParameterTuple(network.get_parameters()) - self.optimizer = OptimizerByMomentum(self.weights) + self.optimizer = nn.Momentum(self.weights, 0.1, 0.9) self.hyper_map = C.HyperMap() self.grad = C.GradOperation('grad', get_by_list=True, sens_param=True) self.sens = sens From b9728f4e9e16f46b98e4688c63daafaeff822619 Mon Sep 17 00:00:00 2001 From: Zhang Qinghua Date: Mon, 13 Apr 2020 19:23:45 +0800 Subject: [PATCH 196/367] Fix the issue in debug info. --- mindspore/ccsrc/debug/anf_ir_dump.cc | 4 ++-- mindspore/ccsrc/debug/label.cc | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/mindspore/ccsrc/debug/anf_ir_dump.cc b/mindspore/ccsrc/debug/anf_ir_dump.cc index 9eb0a376cc..fcf0777d16 100644 --- a/mindspore/ccsrc/debug/anf_ir_dump.cc +++ b/mindspore/ccsrc/debug/anf_ir_dump.cc @@ -94,7 +94,7 @@ struct SubGraphIRInfo { OrderedMap local_var_map; }; -void DumpGrobalInfoEntry(const FuncGraphPtr &graph, std::ostringstream &buffer) { +void DumpGlobalInfoEntry(const FuncGraphPtr &graph, std::ostringstream &buffer) { if (graph == nullptr) { return; } @@ -452,7 +452,7 @@ void DumpIR(const std::string &filename, const FuncGraphPtr &graph, bool dump_fu auto nodes = TopoSort(graph->get_return(), SuccDeeperSimple, AlwaysInclude); // dump global info - DumpGrobalInfoEntry(graph, buffer); + DumpGlobalInfoEntry(graph, buffer); DumpParams(graph, buffer, ¶_map); OrderedMap> sub_graphs; diff --git a/mindspore/ccsrc/debug/label.cc b/mindspore/ccsrc/debug/label.cc index 794151b952..f0e16e831e 100644 --- a/mindspore/ccsrc/debug/label.cc +++ b/mindspore/ccsrc/debug/label.cc @@ -66,7 +66,7 @@ NameWithTrace RootName(const DebugInfoPtr& debug_info, TraceLabelType trace_labe return trace_name; } -std::string CombineTraceTypes(const std::string& root_name, std::vector trace_labels) { +std::string CombineTraceTypes(const std::string& root_name, const std::vector& trace_labels) { std::string tags = ""; for (auto& itr : trace_labels) { std::string symbol = itr; From 732dd01a6aa88e855079ebfbced6abae1a36d262 Mon Sep 17 00:00:00 2001 From: Yanjun Peng Date: Mon, 13 Apr 2020 17:46:48 +0800 Subject: [PATCH 197/367] fix epoch repeat doc problem --- mindspore/dataset/engine/datasets.py | 2 ++ mindspore/train/model.py | 3 +++ 2 files changed, 5 insertions(+) diff --git a/mindspore/dataset/engine/datasets.py b/mindspore/dataset/engine/datasets.py index 3d660d58a8..4e0b082734 100644 --- a/mindspore/dataset/engine/datasets.py +++ b/mindspore/dataset/engine/datasets.py @@ -394,6 +394,8 @@ class Dataset: The order of using repeat and batch reflects the number of batches. Recommend that repeat operation should be used after batch operation. If dataset_sink_mode is False, here repeat operation is invalid. + If dataset_sink_mode is True, repeat count should be euqal to the epoch of training. Otherwise, + errors could occur since the amount of data is not the amount training requires. Args: count (int): Number of times the dataset should be repeated (default=None). diff --git a/mindspore/train/model.py b/mindspore/train/model.py index 833fb07256..55db3b538a 100755 --- a/mindspore/train/model.py +++ b/mindspore/train/model.py @@ -360,6 +360,9 @@ class Model: Note: CPU is not supported when dataset_sink_mode is true. + If dataset_sink_mode is True, epoch of training should be equal to the count of repeat + operation in dataset processing. Otherwise, errors could occur since the amount of data + is not the amount training requires. Args: epoch (int): Total number of iterations on the data. From 79f74745ece93f281c5c608d23e537138a0b7596 Mon Sep 17 00:00:00 2001 From: lvliang Date: Mon, 13 Apr 2020 09:22:32 +0800 Subject: [PATCH 198/367] fix-bug-type-str-is-invalied-uint16 --- mindspore/ccsrc/pre_activate/ascend/ascend_helper.cc | 2 +- mindspore/ccsrc/session/ascend_session.cc | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/mindspore/ccsrc/pre_activate/ascend/ascend_helper.cc b/mindspore/ccsrc/pre_activate/ascend/ascend_helper.cc index 7f11c8f2c7..58c030e79d 100644 --- a/mindspore/ccsrc/pre_activate/ascend/ascend_helper.cc +++ b/mindspore/ccsrc/pre_activate/ascend/ascend_helper.cc @@ -94,7 +94,7 @@ AnfNodePtr GetTransInputNodePtr(const FuncGraphPtr &func_graph, const CNodePtr & MS_EXCEPTION_IF_NULL(node); bool padding_flag = false; auto input_node = AnfAlgo::GetInputNode(node, index); - if (!AnfAlgo::IsFeatureMapInput(node, index)) { + if (input_node->isa() || input_node->isa()) { input_node = InsertTransOpForOutput(func_graph, input_node, kernel_select); MS_EXCEPTION_IF_NULL(input_node); AnfAlgo::SetNodeInput(node, input_node, index); diff --git a/mindspore/ccsrc/session/ascend_session.cc b/mindspore/ccsrc/session/ascend_session.cc index f255b2f15f..751cf76e32 100755 --- a/mindspore/ccsrc/session/ascend_session.cc +++ b/mindspore/ccsrc/session/ascend_session.cc @@ -186,7 +186,7 @@ void AscendSession::RunGraph(const GraphId &graph_id, const std::vector &kernel_graph) const { MS_LOG(INFO) << "Start"; // data layout optimization - opt::AscendDataLayout(kernel_graph); + opt::RunOpAscendDataLayout(kernel_graph); // mixed precision optimization opt::AscendMixPrecision(kernel_graph); MS_LOG(INFO) << "Finish"; From 65a49a21e54a7753daeda9216fc1460cfd705686 Mon Sep 17 00:00:00 2001 From: chenzomi Date: Mon, 13 Apr 2020 19:42:12 +0800 Subject: [PATCH 199/367] fix complite error while using .so with clang on macos --- CMakeLists.txt | 7 ++++++- build.sh | 2 +- cmake/external_libs/gtest.cmake | 4 ++-- cmake/external_libs/jpeg_turbo.cmake | 8 ++++++-- cmake/external_libs/libtiff.cmake | 13 ++++++++++--- cmake/external_libs/opencv.cmake | 13 +++++++++---- cmake/external_libs/protobuf.cmake | 6 +++++- cmake/external_libs/sqlite.cmake | 6 +++++- mindspore/ccsrc/CMakeLists.txt | 21 ++++++++++++--------- mindspore/ccsrc/dataset/CMakeLists.txt | 7 ++++++- mindspore/ccsrc/kernel/common_utils.cc | 4 ++-- 11 files changed, 64 insertions(+), 27 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index d11314408e..6fe159590f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -5,8 +5,13 @@ include(${CMAKE_SOURCE_DIR}/cmake/options.cmake) set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake/modules/") +if (${CMAKE_SYSTEM_NAME} MATCHES "Darwin") + set(CMAKE_CXX_FLAGS_RELEASE "$ENV{CXXFLAGS} -O2 -Werror -Wno-return-std-move -Wno-unused-private-field -Wno-unused-lambda-capture -Wno-sign-compare -Wno-overloaded-virtual -Wno-unneeded-internal-declaration -Wno-unused-variable -Wno-pessimizing-move -Wno-inconsistent-missing-override -DHALF_ENABLE_CPP11_USER_LITERALS=0 -D_FORTIFY_SOURCE=2") +else() + set(CMAKE_CXX_FLAGS_RELEASE "$ENV{CXXFLAGS} -O2 -Wl,--allow-shlib-undefined -DHALF_ENABLE_CPP11_USER_LITERALS=0 -D_FORTIFY_SOURCE=2") +endif() + set(CMAKE_CXX_FLAGS_DEBUG "$ENV{CXXFLAGS} -O0 -g2 -ggdb -fno-inline-functions -fno-omit-frame-pointer -Wl,--allow-shlib-undefined -D_LIBCPP_INLINE_VISIBILITY='' -D'_LIBCPP_EXTERN_TEMPLATE(...)=' -DHALF_ENABLE_CPP11_USER_LITERALS=0 -D_FORTIFY_SOURCE=2 -Wno-cpp") -set(CMAKE_CXX_FLAGS_RELEASE "$ENV{CXXFLAGS} -O2 -Wl,--allow-shlib-undefined -DHALF_ENABLE_CPP11_USER_LITERALS=0 -D_FORTIFY_SOURCE=2") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -I/usr/local/include -std=c++17 -Werror -Wall -Wno-deprecated-declarations -fPIC") set(CMAKE_EXPORT_COMPILE_COMMANDS ON) diff --git a/build.sh b/build.sh index 6dc699000a..3c6de1cf77 100755 --- a/build.sh +++ b/build.sh @@ -297,7 +297,7 @@ build_mindspore() if [[ "X$ENABLE_DUMPE2E" = "Xon" ]]; then CMAKE_ARGS="${CMAKE_ARGS} -DENABLE_DUMP_E2E=ON" fi - CMAKE_ARGS="${CMAKE_ARGS} -DENABLE_DUMP_IR=${ENABLE_DUMP_IR^^}" + CMAKE_ARGS="${CMAKE_ARGS} -DENABLE_DUMP_IR=${ENABLE_DUMP_IR}" if [[ "X$ENABLE_MPI" = "Xon" ]]; then CMAKE_ARGS="${CMAKE_ARGS} -DENABLE_MPI=ON" fi diff --git a/cmake/external_libs/gtest.cmake b/cmake/external_libs/gtest.cmake index 5384b48825..df2eaec2cc 100644 --- a/cmake/external_libs/gtest.cmake +++ b/cmake/external_libs/gtest.cmake @@ -9,5 +9,5 @@ mindspore_add_pkg(gtest -DCMAKE_MACOSX_RPATH=TRUE -Dgtest_disable_pthreads=ON) include_directories(${gtest_INC}) add_library(mindspore::gtest ALIAS gtest::gtest) -file(COPY ${gtest_LIBPATH}/libgtest.so DESTINATION ${CMAKE_BINARY_DIR}/googletest/googlemock/gtest) -file(COPY ${gtest_LIBPATH}/libgtest_main.so DESTINATION ${CMAKE_BINARY_DIR}/googletest/googlemock/gtest) +file(COPY ${gtest_LIBPATH}/libgtest${CMAKE_SHARED_LIBRARY_SUFFIX} DESTINATION ${CMAKE_BINARY_DIR}/googletest/googlemock/gtest) +file(COPY ${gtest_LIBPATH}/libgtest_main${CMAKE_SHARED_LIBRARY_SUFFIX} DESTINATION ${CMAKE_BINARY_DIR}/googletest/googlemock/gtest) diff --git a/cmake/external_libs/jpeg_turbo.cmake b/cmake/external_libs/jpeg_turbo.cmake index 84d6e3006c..6c2c70c709 100644 --- a/cmake/external_libs/jpeg_turbo.cmake +++ b/cmake/external_libs/jpeg_turbo.cmake @@ -1,6 +1,10 @@ - set(jpeg_turbo_USE_STATIC_LIBS ON) -set(jpeg_turbo_CFLAGS "-fstack-protector-all -Wno-maybe-uninitialized -Wno-unused-parameter -fPIC -D_FORTIFY_SOURCE=2 -O2") +if (${CMAKE_SYSTEM_NAME} MATCHES "Darwin") + set(jpeg_turbo_CFLAGS "-fstack-protector-all -Wno-uninitialized -Wno-unused-parameter -fPIC -D_FORTIFY_SOURCE=2 -O2") +else() + set(jpeg_turbo_CFLAGS "-fstack-protector-all -Wno-maybe-uninitialized -Wno-unused-parameter -fPIC -D_FORTIFY_SOURCE=2 -O2") +endif() + set(jpeg_turbo_LDFLAGS "-Wl,-z,relro,-z,now,-z,noexecstack") mindspore_add_pkg(jpeg_turbo VER 2.0.4 diff --git a/cmake/external_libs/libtiff.cmake b/cmake/external_libs/libtiff.cmake index 461b9c4481..c9934bfaa1 100644 --- a/cmake/external_libs/libtiff.cmake +++ b/cmake/external_libs/libtiff.cmake @@ -1,8 +1,15 @@ - -set(tiff_CXXFLAGS "-fstack-protector-all -Wno-maybe-uninitialized -Wno-unused-parameter -Wno-unused-result \ +if (${CMAKE_SYSTEM_NAME} MATCHES "Darwin") + set(tiff_CXXFLAGS "-fstack-protector-all -Wno-uninitialized -Wno-unused-parameter -Wno-unused-result \ -Wno-unused-but-set-variable -fPIC -D_FORTIFY_SOURCE=2 -O2") -set(tiff_CFLAGS "-fstack-protector-all -Wno-maybe-uninitialized -Wno-unused-parameter -Wno-unused-result \ + set(tiff_CFLAGS "-fstack-protector-all -Wno-uninitialized -Wno-unused-parameter -Wno-unused-result \ -Wno-unused-but-set-variable -fPIC -D_FORTIFY_SOURCE=2 -O2") +else() + set(tiff_CXXFLAGS "-fstack-protector-all -Wno-maybe-uninitialized -Wno-unused-parameter -Wno-unused-result \ + -Wno-unused-but-set-variable -fPIC -D_FORTIFY_SOURCE=2 -O2") + set(tiff_CFLAGS "-fstack-protector-all -Wno-maybe-uninitialized -Wno-unused-parameter -Wno-unused-result \ + -Wno-unused-but-set-variable -fPIC -D_FORTIFY_SOURCE=2 -O2") +endif() + set(tiff_LDFLAGS "-Wl,-z,relro,-z,now,-z,noexecstack") mindspore_add_pkg(tiff diff --git a/cmake/external_libs/opencv.cmake b/cmake/external_libs/opencv.cmake index e67c3f232f..5c60a2fa61 100644 --- a/cmake/external_libs/opencv.cmake +++ b/cmake/external_libs/opencv.cmake @@ -1,7 +1,12 @@ - -set(opencv_CXXFLAGS "-fstack-protector-all -Wno-maybe-uninitialized -Wno-unused-parameter -D_FORTIFY_SOURCE=2 -O2") -set(opencv_CFLAGS "-fstack-protector-all -Wno-maybe-uninitialized -Wno-unused-parameter -D_FORTIFY_SOURCE=2 -O2") -set(opencv_LDFLAGS "-Wl,-z,relro,-z,now,-z,noexecstack") +if (${CMAKE_SYSTEM_NAME} MATCHES "Darwin") + set(opencv_CXXFLAGS "-fstack-protector-all -Wno-uninitialized -Wno-unused-parameter -D_FORTIFY_SOURCE=2 -O2") + set(opencv_CFLAGS "-fstack-protector-all -Wno-uninitialized -Wno-unused-parameter -D_FORTIFY_SOURCE=2 -O2") + set(opencv_LDFLAGS "-Wl") +else() + set(opencv_CXXFLAGS "-fstack-protector-all -Wno-maybe-uninitialized -Wno-unused-parameter -D_FORTIFY_SOURCE=2 -O2") + set(opencv_CFLAGS "-fstack-protector-all -Wno-maybe-uninitialized -Wno-unused-parameter -D_FORTIFY_SOURCE=2 -O2") + set(opencv_LDFLAGS "-Wl,-z,relro,-z,now,-z,noexecstack") +endif() mindspore_add_pkg(opencv VER 4.2.0 diff --git a/cmake/external_libs/protobuf.cmake b/cmake/external_libs/protobuf.cmake index 9e64785a7a..987d5c6a8b 100644 --- a/cmake/external_libs/protobuf.cmake +++ b/cmake/external_libs/protobuf.cmake @@ -1,5 +1,9 @@ set(protobuf_USE_STATIC_LIBS ON) -set(protobuf_CXXFLAGS "-fstack-protector-all -Wno-maybe-uninitialized -Wno-unused-parameter -fPIC -fvisibility=hidden -D_FORTIFY_SOURCE=2 -O2") +if (${CMAKE_SYSTEM_NAME} MATCHES "Darwin") + set(protobuf_CXXFLAGS "-fstack-protector-all -Wno-uninitialized -Wno-unused-parameter -fPIC -fvisibility=hidden -D_FORTIFY_SOURCE=2 -O2") +else() + set(protobuf_CXXFLAGS "-fstack-protector-all -Wno-maybe-uninitialized -Wno-unused-parameter -fPIC -fvisibility=hidden -D_FORTIFY_SOURCE=2 -O2") +endif() set(protobuf_LDFLAGS "-Wl,-z,relro,-z,now,-z,noexecstack") set(_ms_tmp_CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS}) set(CMAKE_CXX_FLAGS ${_ms_tmp_CMAKE_CXX_FLAGS}) diff --git a/cmake/external_libs/sqlite.cmake b/cmake/external_libs/sqlite.cmake index 35e48b2d0e..e3fe77d96b 100644 --- a/cmake/external_libs/sqlite.cmake +++ b/cmake/external_libs/sqlite.cmake @@ -1,7 +1,11 @@ set(sqlite_USE_STATIC_LIBS ON) set(sqlite_CXXFLAGS) -set(sqlite_CFLAGS "-fstack-protector-all -Wno-maybe-uninitialized -Wno-unused-parameter -fPIC -D_FORTIFY_SOURCE=2 -O2") +if (${CMAKE_SYSTEM_NAME} MATCHES "Darwin") + set(sqlite_CFLAGS "-fstack-protector-all -Wno-uninitialized -Wno-unused-parameter -fPIC -D_FORTIFY_SOURCE=2 -O2") +else() + set(sqlite_CFLAGS "-fstack-protector-all -Wno-maybe-uninitialized -Wno-unused-parameter -fPIC -D_FORTIFY_SOURCE=2 -O2") +endif() set(sqlite_LDFLAGS "-Wl,-z,relro,-z,now,-z,noexecstack") mindspore_add_pkg(sqlite diff --git a/mindspore/ccsrc/CMakeLists.txt b/mindspore/ccsrc/CMakeLists.txt index f3100f62c9..1b35856431 100644 --- a/mindspore/ccsrc/CMakeLists.txt +++ b/mindspore/ccsrc/CMakeLists.txt @@ -330,10 +330,10 @@ set(PYTHON_MODULE_SOURCE set(CMAKE_BUILD_WITH_INSTALL_RPATH TRUE) pybind11_add_module(_c_expression ${PYTHON_MODULE_SOURCE}) -target_link_options(_c_expression PRIVATE -Wl,-init,mindspore_log_init) MESSAGE(STATUS "operation system is ${CMAKE_SYSTEM}") if (CMAKE_SYSTEM_NAME MATCHES "Linux") + target_link_options(_c_expression PRIVATE -Wl,-init,mindspore_log_init) set(ORIGIN_PATH $ORIGIN) elseif (CMAKE_SYSTEM_NAME MATCHES "Darwin") set_target_properties(_c_expression PROPERTIES MACOSX_RPATH ON) @@ -497,7 +497,7 @@ set(MS_LIB_PATH ${CMAKE_SOURCE_DIR}/build/package/mindspore/lib/) add_custom_target(add_ms_lib ALL COMMAND mkdir -pv ${MS_LIB_PATH} COMMAND cp ${MS_CCSRC_BUILD_PATH}/_c_expression* ${MS_PACK_PATH} - COMMAND cp ${MS_CCSRC_BUILD_PATH}/libmindspore_gvar.so ${MS_LIB_PATH} + COMMAND cp ${MS_CCSRC_BUILD_PATH}/libmindspore_gvar${CMAKE_SHARED_LIBRARY_SUFFIX} ${MS_LIB_PATH} ) add_dependencies(add_ms_lib _c_expression) @@ -549,9 +549,11 @@ if (ENABLE_GPU) endif() if (ENABLE_CPU) - add_custom_target(add_cpu_lib ALL - COMMAND cp ${onednn_LIBPATH}/libdnnl.so.1.1 ${MS_LIB_PATH}/libdnnl.so.1 - ) + if (CMAKE_SYSTEM_NAME MATCHES "Darwin") + add_custom_target(add_cpu_lib ALL COMMAND cp ${onednn_LIBPATH}/libdnnl.1.1.dylib ${MS_LIB_PATH}/libdnnl.1.1.dylib) + else () + add_custom_target(add_cpu_lib ALL COMMAND cp ${onednn_LIBPATH}/libdnnl.so.1.1 ${MS_LIB_PATH}/libdnnl.so.1) + endif () add_dependencies(add_cpu_lib add_ms_lib) endif() @@ -573,8 +575,9 @@ endif() if (USE_GLOG) target_link_libraries(_c_expression PRIVATE mindspore::glog) - add_custom_target(add_glog_lib ALL - COMMAND cp ${glog_LIBPATH}/libglog*.so.0 ${MS_LIB_PATH} - ) - add_dependencies(add_glog_lib add_ms_lib) + if (CMAKE_SYSTEM_NAME MATCHES "Darwin") + add_custom_target(add_glog_lib ALL COMMAND cp ${glog_LIBPATH}/libglog*.dylib ${MS_LIB_PATH}) + else () + add_custom_target(add_glog_lib ALL COMMAND cp ${glog_LIBPATH}/libglog*.so.0 ${MS_LIB_PATH}) + endif () endif() diff --git a/mindspore/ccsrc/dataset/CMakeLists.txt b/mindspore/ccsrc/dataset/CMakeLists.txt index 5bf210a8ba..52ba328828 100644 --- a/mindspore/ccsrc/dataset/CMakeLists.txt +++ b/mindspore/ccsrc/dataset/CMakeLists.txt @@ -3,7 +3,12 @@ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-reorder") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-switch") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-sequence-point") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-unused-variable") -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-maybe-uninitialized") + +if (${CMAKE_SYSTEM_NAME} MATCHES "Darwin") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-uninitialized") +else() + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-maybe-uninitialized") +endif() set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-format") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-attributes") diff --git a/mindspore/ccsrc/kernel/common_utils.cc b/mindspore/ccsrc/kernel/common_utils.cc index 137ae65414..065cfaddc8 100644 --- a/mindspore/ccsrc/kernel/common_utils.cc +++ b/mindspore/ccsrc/kernel/common_utils.cc @@ -89,13 +89,13 @@ bool IsAtomicNode(const CNodePtr &kernel_node) { parameters_indexs.push_back(0); } } - std::vector clean_output_indexs; + std::vector clean_output_indexs; // in parameters data sort as input->workspace->output size_t index = 0; while (index < output_num) { if (parameters_indexs[input_num + workspace_num + index] == 1) { atomic_flag = true; - clean_output_indexs.push_back(index); + clean_output_indexs.push_back(SizeToInt(index)); } index++; } From 0ce83e39e16d6b0c1996ad2918548afc5f29d0f2 Mon Sep 17 00:00:00 2001 From: liyong Date: Mon, 13 Apr 2020 16:42:13 +0800 Subject: [PATCH 200/367] fix TestShardSampleWrongNumber adjust internal api --- mindspore/ccsrc/dataset/api/de_pipeline.cc | 34 +------ mindspore/ccsrc/dataset/api/de_pipeline.h | 3 - .../ccsrc/dataset/api/python_bindings.cc | 8 ++ .../ccsrc/mindrecord/include/shard_category.h | 2 +- .../ccsrc/mindrecord/include/shard_operator.h | 20 +++- .../ccsrc/mindrecord/include/shard_sample.h | 10 +- .../ccsrc/mindrecord/include/shard_shuffle.h | 2 +- mindspore/ccsrc/mindrecord/io/shard_reader.cc | 11 ++- .../ccsrc/mindrecord/meta/shard_category.cc | 2 +- .../ccsrc/mindrecord/meta/shard_sample.cc | 17 +++- .../ccsrc/mindrecord/meta/shard_shuffle.cc | 2 +- mindspore/dataset/engine/datasets.py | 4 +- mindspore/dataset/engine/samplers.py | 2 + .../cpp/mindrecord/ut_shard_operator_test.cc | 95 +++++++++---------- .../dataset/test_minddataset_sampler.py | 10 -- 15 files changed, 115 insertions(+), 107 deletions(-) diff --git a/mindspore/ccsrc/dataset/api/de_pipeline.cc b/mindspore/ccsrc/dataset/api/de_pipeline.cc index cf7050450b..f572db0cdf 100644 --- a/mindspore/ccsrc/dataset/api/de_pipeline.cc +++ b/mindspore/ccsrc/dataset/api/de_pipeline.cc @@ -391,30 +391,6 @@ Status DEPipeline::CheckMindRecordPartitionInfo(const py::dict &args, std::vecto return Status::OK(); } -Status DEPipeline::GetMindrecordSampler(const std::string &sampler_name, const py::dict &args, - std::shared_ptr *ptr) { - std::vector indices; - for (auto &arg : args) { - std::string key = py::str(arg.first); - py::handle value = arg.second; - if (!value.is_none()) { - if (key == "indices") { - indices = ToIntVector(value); - } else { - std::string err_msg = "ERROR: parameter " + key + " is invalid."; - RETURN_STATUS_UNEXPECTED(err_msg); - } - } - } - if (sampler_name == "SubsetRandomSampler") { - *ptr = std::make_shared(indices); - } else { - std::string err_msg = "ERROR: parameter sampler_name is invalid."; - RETURN_STATUS_UNEXPECTED(err_msg); - } - return Status::OK(); -} - Status DEPipeline::ParseMindRecordOp(const py::dict &args, std::shared_ptr *ptr) { if (args["dataset_file"].is_none()) { std::string err_msg = "Error: at least one of dataset_files is missing"; @@ -446,12 +422,10 @@ Status DEPipeline::ParseMindRecordOp(const py::dict &args, std::shared_ptr(seed)); - } else if (key == "sampler_name") { - std::shared_ptr sample_op; - auto ret = GetMindrecordSampler(ToString(value), args["sampler_params"], &sample_op); - if (Status::OK() != ret) { - return ret; - } + } else if (key == "sampler") { + auto create = py::reinterpret_borrow(value).attr("_create_for_minddataset"); + std::shared_ptr sample_op = + create().cast>(); operators.push_back(sample_op); } } diff --git a/mindspore/ccsrc/dataset/api/de_pipeline.h b/mindspore/ccsrc/dataset/api/de_pipeline.h index 491a75390e..acffc390cc 100644 --- a/mindspore/ccsrc/dataset/api/de_pipeline.h +++ b/mindspore/ccsrc/dataset/api/de_pipeline.h @@ -145,9 +145,6 @@ class DEPipeline { Status ParseCelebAOp(const py::dict &args, std::shared_ptr *ptr); - Status GetMindrecordSampler(const std::string &sampler_name, const py::dict &args, - std::shared_ptr *ptr); - private: // Execution tree that links the dataset operators. std::shared_ptr tree_; diff --git a/mindspore/ccsrc/dataset/api/python_bindings.cc b/mindspore/ccsrc/dataset/api/python_bindings.cc index e6c2691281..b7cf74d435 100644 --- a/mindspore/ccsrc/dataset/api/python_bindings.cc +++ b/mindspore/ccsrc/dataset/api/python_bindings.cc @@ -54,6 +54,9 @@ #include "dataset/engine/datasetops/source/tf_reader_op.h" #include "dataset/engine/jagged_connector.h" #include "dataset/kernels/data/to_float16_op.h" +#include "dataset/util/random.h" +#include "mindrecord/include/shard_operator.h" +#include "mindrecord/include/shard_sample.h" #include "pybind11/pybind11.h" #include "pybind11/stl.h" #include "pybind11/stl_bind.h" @@ -382,6 +385,7 @@ void bindTensorOps4(py::module *m) { void bindSamplerOps(py::module *m) { (void)py::class_>(*m, "Sampler"); + (void)py::class_>(*m, "ShardOperator"); (void)py::class_>(*m, "DistributedSampler") .def(py::init(), py::arg("numDev"), py::arg("devId"), py::arg("shuffle"), @@ -399,6 +403,10 @@ void bindSamplerOps(py::module *m) { (void)py::class_>(*m, "SubsetRandomSampler") .def(py::init>(), py::arg("indices")); + (void)py::class_>( + *m, "MindrecordSubsetRandomSampler") + .def(py::init, uint32_t>(), py::arg("indices"), py::arg("seed") = GetSeed()); + (void)py::class_>(*m, "WeightedRandomSampler") .def(py::init, int64_t, bool>(), py::arg("weights"), py::arg("numSamples"), py::arg("replacement")); diff --git a/mindspore/ccsrc/mindrecord/include/shard_category.h b/mindspore/ccsrc/mindrecord/include/shard_category.h index 08e5ac9c2e..b8a7611540 100644 --- a/mindspore/ccsrc/mindrecord/include/shard_category.h +++ b/mindspore/ccsrc/mindrecord/include/shard_category.h @@ -32,7 +32,7 @@ class ShardCategory : public ShardOperator { const std::vector> &get_categories() const; - MSRStatus operator()(ShardTask &tasks) override; + MSRStatus execute(ShardTask &tasks) override; private: std::vector> categories_; diff --git a/mindspore/ccsrc/mindrecord/include/shard_operator.h b/mindspore/ccsrc/mindrecord/include/shard_operator.h index 9d00fb7628..9f302e5321 100644 --- a/mindspore/ccsrc/mindrecord/include/shard_operator.h +++ b/mindspore/ccsrc/mindrecord/include/shard_operator.h @@ -24,7 +24,25 @@ namespace mindrecord { class ShardOperator { public: virtual ~ShardOperator() = default; - virtual MSRStatus operator()(ShardTask &tasks) = 0; + + MSRStatus operator()(ShardTask &tasks) { + if (SUCCESS != this->pre_execute(tasks)) { + return FAILED; + } + if (SUCCESS != this->execute(tasks)) { + return FAILED; + } + if (SUCCESS != this->suf_execute(tasks)) { + return FAILED; + } + return SUCCESS; + } + + virtual MSRStatus pre_execute(ShardTask &tasks) { return SUCCESS; } + + virtual MSRStatus execute(ShardTask &tasks) = 0; + + virtual MSRStatus suf_execute(ShardTask &tasks) { return SUCCESS; } }; } // namespace mindrecord } // namespace mindspore diff --git a/mindspore/ccsrc/mindrecord/include/shard_sample.h b/mindspore/ccsrc/mindrecord/include/shard_sample.h index aeb3374f28..15353fd0ff 100644 --- a/mindspore/ccsrc/mindrecord/include/shard_sample.h +++ b/mindspore/ccsrc/mindrecord/include/shard_sample.h @@ -17,10 +17,12 @@ #ifndef MINDRECORD_INCLUDE_SHARD_SAMPLE_H_ #define MINDRECORD_INCLUDE_SHARD_SAMPLE_H_ +#include #include #include #include #include "mindrecord/include/shard_operator.h" +#include "mindrecord/include/shard_shuffle.h" namespace mindspore { namespace mindrecord { @@ -32,21 +34,23 @@ class ShardSample : public ShardOperator { ShardSample(int num, int den, int par); - explicit ShardSample(const std::vector &indices); + ShardSample(const std::vector &indices, uint32_t seed); ~ShardSample() override{}; const std::pair get_partitions() const; - MSRStatus operator()(ShardTask &tasks) override; + MSRStatus execute(ShardTask &tasks) override; + MSRStatus suf_execute(ShardTask &tasks) override; private: int numerator_; int denominator_; int no_of_samples_; int partition_id_; - std::vector indices_; + std::vector indices_; SamplerType sampler_type_; + std::shared_ptr shuffle_op_; }; } // namespace mindrecord } // namespace mindspore diff --git a/mindspore/ccsrc/mindrecord/include/shard_shuffle.h b/mindspore/ccsrc/mindrecord/include/shard_shuffle.h index a9992ab4bc..464881aa7a 100644 --- a/mindspore/ccsrc/mindrecord/include/shard_shuffle.h +++ b/mindspore/ccsrc/mindrecord/include/shard_shuffle.h @@ -28,7 +28,7 @@ class ShardShuffle : public ShardOperator { ~ShardShuffle() override{}; - MSRStatus operator()(ShardTask &tasks) override; + MSRStatus execute(ShardTask &tasks) override; private: uint32_t shuffle_seed_; diff --git a/mindspore/ccsrc/mindrecord/io/shard_reader.cc b/mindspore/ccsrc/mindrecord/io/shard_reader.cc index 12aecea21f..2413da3737 100644 --- a/mindspore/ccsrc/mindrecord/io/shard_reader.cc +++ b/mindspore/ccsrc/mindrecord/io/shard_reader.cc @@ -779,8 +779,12 @@ MSRStatus ShardReader::Launch(bool isSimpleReader) { // Sort row group by (group_id, shard_id), prepare for parallel reading std::sort(row_group_summary.begin(), row_group_summary.end(), ResortRowGroups); - CreateTasks(row_group_summary, operators_); - MS_LOG(INFO) << "Launching read threads"; + if (CreateTasks(row_group_summary, operators_) != SUCCESS) { + MS_LOG(ERROR) << "Failed to launch read threads."; + interrupt_ = true; + return FAILED; + } + MS_LOG(INFO) << "Launching read threads."; if (isSimpleReader) return SUCCESS; @@ -1152,6 +1156,9 @@ std::vector, json>> ShardReader::GetBlockNext() } std::vector, json>> ShardReader::GetNext() { + if (interrupt_) { + return std::vector, json>>(); + } if (block_reader_) return GetBlockNext(); if (deliver_id_ >= static_cast(tasks_.Size())) { return std::vector, json>>(); diff --git a/mindspore/ccsrc/mindrecord/meta/shard_category.cc b/mindspore/ccsrc/mindrecord/meta/shard_category.cc index c64a7bfc70..859a3b343f 100644 --- a/mindspore/ccsrc/mindrecord/meta/shard_category.cc +++ b/mindspore/ccsrc/mindrecord/meta/shard_category.cc @@ -23,6 +23,6 @@ ShardCategory::ShardCategory(const std::vector> &ShardCategory::get_categories() const { return categories_; } -MSRStatus ShardCategory::operator()(ShardTask &tasks) { return SUCCESS; } +MSRStatus ShardCategory::execute(ShardTask &tasks) { return SUCCESS; } } // namespace mindrecord } // namespace mindspore diff --git a/mindspore/ccsrc/mindrecord/meta/shard_sample.cc b/mindspore/ccsrc/mindrecord/meta/shard_sample.cc index 367c7a5cf9..ef627b0c09 100644 --- a/mindspore/ccsrc/mindrecord/meta/shard_sample.cc +++ b/mindspore/ccsrc/mindrecord/meta/shard_sample.cc @@ -46,13 +46,15 @@ ShardSample::ShardSample(int num, int den, int par) indices_({}), sampler_type_(kCustomTopPercentSampler) {} -ShardSample::ShardSample(const std::vector &indices) +ShardSample::ShardSample(const std::vector &indices, uint32_t seed) : numerator_(0), denominator_(0), no_of_samples_(0), partition_id_(0), indices_(indices), - sampler_type_(kSubsetRandomSampler) {} + sampler_type_(kSubsetRandomSampler) { + shuffle_op_ = std::make_shared(seed); +} const std::pair ShardSample::get_partitions() const { if (numerator_ == 1 && denominator_ > 1) { @@ -61,7 +63,7 @@ const std::pair ShardSample::get_partitions() const { return std::pair(-1, -1); } -MSRStatus ShardSample::operator()(ShardTask &tasks) { +MSRStatus ShardSample::execute(ShardTask &tasks) { int no_of_categories = static_cast(tasks.categories); int total_no = static_cast(tasks.Size()); @@ -115,5 +117,14 @@ MSRStatus ShardSample::operator()(ShardTask &tasks) { } return SUCCESS; } + +MSRStatus ShardSample::suf_execute(ShardTask &tasks) { + if (sampler_type_ == kSubsetRandomSampler) { + if (SUCCESS != (*shuffle_op_)(tasks)) { + return FAILED; + } + } + return SUCCESS; +} } // namespace mindrecord } // namespace mindspore diff --git a/mindspore/ccsrc/mindrecord/meta/shard_shuffle.cc b/mindspore/ccsrc/mindrecord/meta/shard_shuffle.cc index 14816e9e9f..f8ad2c341d 100644 --- a/mindspore/ccsrc/mindrecord/meta/shard_shuffle.cc +++ b/mindspore/ccsrc/mindrecord/meta/shard_shuffle.cc @@ -22,7 +22,7 @@ namespace mindspore { namespace mindrecord { ShardShuffle::ShardShuffle(uint32_t seed) : shuffle_seed_(seed) {} -MSRStatus ShardShuffle::operator()(ShardTask &tasks) { +MSRStatus ShardShuffle::execute(ShardTask &tasks) { if (tasks.categories < 1) { return FAILED; } diff --git a/mindspore/dataset/engine/datasets.py b/mindspore/dataset/engine/datasets.py index 3d660d58a8..6052490928 100644 --- a/mindspore/dataset/engine/datasets.py +++ b/mindspore/dataset/engine/datasets.py @@ -1681,9 +1681,7 @@ class MindDataset(SourceDataset): args["block_reader"] = self.block_reader args["num_shards"] = self.num_shards args["shard_id"] = self.shard_id - if self.sampler: - args["sampler_name"] = self.sampler.__class__.__name__ - args["sampler_params"] = self.sampler.__dict__ + args["sampler"] = self.sampler return args def get_dataset_size(self): diff --git a/mindspore/dataset/engine/samplers.py b/mindspore/dataset/engine/samplers.py index 62a3dbed18..fd9c50e951 100644 --- a/mindspore/dataset/engine/samplers.py +++ b/mindspore/dataset/engine/samplers.py @@ -195,6 +195,8 @@ class SubsetRandomSampler(): def create(self): return cde.SubsetRandomSampler(self.indices) + def _create_for_minddataset(self): + return cde.MindrecordSubsetRandomSampler(self.indices) class WeightedRandomSampler(): """ diff --git a/tests/ut/cpp/mindrecord/ut_shard_operator_test.cc b/tests/ut/cpp/mindrecord/ut_shard_operator_test.cc index 143931658a..549e2140f4 100644 --- a/tests/ut/cpp/mindrecord/ut_shard_operator_test.cc +++ b/tests/ut/cpp/mindrecord/ut_shard_operator_test.cc @@ -30,9 +30,9 @@ #include "mindrecord/include/shard_shuffle.h" #include "ut_common.h" -using mindspore::MsLogLevel::INFO; -using mindspore::ExceptionType::NoExceptionType; using mindspore::LogStream; +using mindspore::ExceptionType::NoExceptionType; +using mindspore::MsLogLevel::INFO; namespace mindspore { namespace mindrecord { @@ -65,31 +65,31 @@ TEST_F(TestShardOperator, TestShardSampleBasic) { ASSERT_TRUE(i <= kSampleCount); } -// TEST_F(TestShardOperator, TestShardSampleWrongNumber) { -// MS_LOG(INFO) << common::SafeCStr(FormatInfo("Test read imageNet")); -// -// std::string file_name = "./imagenet.shard01"; -// auto column_list = std::vector{"file_name"}; -// -// const int kNum = 5; -// const int kDen = 0; -// std::vector> ops; -// ops.push_back(std::make_shared(kNum, kDen)); -// -// ShardReader dataset; -// dataset.Open(file_name, 4, column_list, ops); -// dataset.Launch(); -// -// int i = 0; -// while (true) { -// auto x = dataset.GetNext(); -// if (x.empty()) break; -// MS_LOG(INFO) << "index: " << i << ", filename: " << common::SafeCStr((std::get<1>(x[0]))["file_name"]); -// i++; -// } -// dataset.Finish(); -// ASSERT_TRUE(i <= 5); -// } +TEST_F(TestShardOperator, TestShardSampleWrongNumber) { + MS_LOG(INFO) << common::SafeCStr(FormatInfo("Test read imageNet")); + + std::string file_name = "./imagenet.shard01"; + auto column_list = std::vector{"file_name"}; + + const int kNum = 5; + const int kDen = 0; + std::vector> ops; + ops.push_back(std::make_shared(kNum, kDen)); + + ShardReader dataset; + dataset.Open(file_name, 4, column_list, ops); + dataset.Launch(); + + int i = 0; + while (true) { + auto x = dataset.GetNext(); + if (x.empty()) break; + MS_LOG(INFO) << "index: " << i << ", filename: " << common::SafeCStr((std::get<1>(x[0]))["file_name"]); + i++; + } + dataset.Finish(); + ASSERT_TRUE(i <= 5); +} TEST_F(TestShardOperator, TestShardSampleRatio) { MS_LOG(INFO) << common::SafeCStr(FormatInfo("Test read imageNet")); @@ -117,7 +117,6 @@ TEST_F(TestShardOperator, TestShardSampleRatio) { ASSERT_TRUE(i <= 10); } - TEST_F(TestShardOperator, TestShardSamplePartition) { MS_LOG(INFO) << common::SafeCStr(FormatInfo("Test read imageNet")); std::string file_name = "./imagenet.shard01"; @@ -170,8 +169,8 @@ TEST_F(TestShardOperator, TestShardCategory) { auto x = dataset.GetNext(); if (x.empty()) break; - MS_LOG(INFO) << "index: " << i << ", filename: " << common::SafeCStr((std::get<1>(x[0]))["file_name"]) << - ", label: " << common::SafeCStr((std::get<1>(x[0]))["label"].dump()); + MS_LOG(INFO) << "index: " << i << ", filename: " << common::SafeCStr((std::get<1>(x[0]))["file_name"]) + << ", label: " << common::SafeCStr((std::get<1>(x[0]))["label"].dump()); i++; ASSERT_TRUE((std::get<1>(x[0]))["label"] == categories[category_no].second); @@ -199,8 +198,8 @@ TEST_F(TestShardOperator, TestShardShuffle) { while (true) { auto x = dataset.GetNext(); if (x.empty()) break; - MS_LOG(INFO) << "index: " << i << ", filename: " << common::SafeCStr((std::get<1>(x[0]))["file_name"]) << - ", label: " << common::SafeCStr((std::get<1>(x[0]))["label"].dump()); + MS_LOG(INFO) << "index: " << i << ", filename: " << common::SafeCStr((std::get<1>(x[0]))["file_name"]) + << ", label: " << common::SafeCStr((std::get<1>(x[0]))["label"].dump()); i++; } dataset.Finish(); @@ -224,8 +223,8 @@ TEST_F(TestShardOperator, TestShardSampleShuffle) { while (true) { auto x = dataset.GetNext(); if (x.empty()) break; - MS_LOG(INFO) << "index: " << i << ", filename: " << common::SafeCStr((std::get<1>(x[0]))["file_name"]) << - ", label: " << common::SafeCStr((std::get<1>(x[0]))["label"].dump()); + MS_LOG(INFO) << "index: " << i << ", filename: " << common::SafeCStr((std::get<1>(x[0]))["file_name"]) + << ", label: " << common::SafeCStr((std::get<1>(x[0]))["label"].dump()); i++; } dataset.Finish(); @@ -251,8 +250,8 @@ TEST_F(TestShardOperator, TestShardShuffleSample) { while (true) { auto x = dataset.GetNext(); if (x.empty()) break; - MS_LOG(INFO) << "index: " << i << ", filename: " << common::SafeCStr((std::get<1>(x[0]))["file_name"]) << - ", label: " << common::SafeCStr((std::get<1>(x[0]))["label"].dump()); + MS_LOG(INFO) << "index: " << i << ", filename: " << common::SafeCStr((std::get<1>(x[0]))["file_name"]) + << ", label: " << common::SafeCStr((std::get<1>(x[0]))["label"].dump()); i++; } dataset.Finish(); @@ -278,8 +277,8 @@ TEST_F(TestShardOperator, TestShardSampleShuffleSample) { while (true) { auto x = dataset.GetNext(); if (x.empty()) break; - MS_LOG(INFO) << "index: " << i << ", filename: " << common::SafeCStr((std::get<1>(x[0]))["file_name"]) << - ", label: " << common::SafeCStr((std::get<1>(x[0]))["label"].dump()); + MS_LOG(INFO) << "index: " << i << ", filename: " << common::SafeCStr((std::get<1>(x[0]))["file_name"]) + << ", label: " << common::SafeCStr((std::get<1>(x[0]))["label"].dump()); i++; } dataset.Finish(); @@ -307,8 +306,8 @@ TEST_F(TestShardOperator, TestShardShuffleCompare) { while (true) { auto x = dataset.GetNext(); if (x.empty()) break; - MS_LOG(INFO) << "index: " << i << ", filename: " << common::SafeCStr((std::get<1>(x[0]))["file_name"]) << - ", label: " << common::SafeCStr((std::get<1>(x[0]))["label"].dump()); + MS_LOG(INFO) << "index: " << i << ", filename: " << common::SafeCStr((std::get<1>(x[0]))["file_name"]) + << ", label: " << common::SafeCStr((std::get<1>(x[0]))["label"].dump()); i++; auto y = compare_dataset.GetNext(); @@ -342,8 +341,8 @@ TEST_F(TestShardOperator, TestShardCategoryShuffle1) { while (true) { auto x = dataset.GetNext(); if (x.empty()) break; - MS_LOG(INFO) << "index: " << i << ", filename: " << common::SafeCStr((std::get<1>(x[0]))["file_name"]) << - ", label: " << common::SafeCStr((std::get<1>(x[0]))["label"].dump()); + MS_LOG(INFO) << "index: " << i << ", filename: " << common::SafeCStr((std::get<1>(x[0]))["file_name"]) + << ", label: " << common::SafeCStr((std::get<1>(x[0]))["label"].dump()); i++; ASSERT_TRUE((std::get<1>(x[0]))["label"] == categories[category_no].second); @@ -376,8 +375,8 @@ TEST_F(TestShardOperator, TestShardCategoryShuffle2) { while (true) { auto x = dataset.GetNext(); if (x.empty()) break; - MS_LOG(INFO) << "index: " << i << ", filename: " << common::SafeCStr((std::get<1>(x[0]))["file_name"]) << - ", label: " << common::SafeCStr((std::get<1>(x[0]))["label"].dump()); + MS_LOG(INFO) << "index: " << i << ", filename: " << common::SafeCStr((std::get<1>(x[0]))["file_name"]) + << ", label: " << common::SafeCStr((std::get<1>(x[0]))["label"].dump()); i++; ASSERT_TRUE((std::get<1>(x[0]))["label"] == categories[category_no].second); category_no++; @@ -410,8 +409,8 @@ TEST_F(TestShardOperator, TestShardCategorySample) { while (true) { auto x = dataset.GetNext(); if (x.empty()) break; - MS_LOG(INFO) << "index: " << i << ", filename: " << common::SafeCStr((std::get<1>(x[0]))["file_name"]) << - ", label: " << common::SafeCStr((std::get<1>(x[0]))["label"].dump()); + MS_LOG(INFO) << "index: " << i << ", filename: " << common::SafeCStr((std::get<1>(x[0]))["file_name"]) + << ", label: " << common::SafeCStr((std::get<1>(x[0]))["label"].dump()); i++; ASSERT_TRUE((std::get<1>(x[0]))["label"] == categories[category_no].second); @@ -448,8 +447,8 @@ TEST_F(TestShardOperator, TestShardCategorySampleShuffle) { while (true) { auto x = dataset.GetNext(); if (x.empty()) break; - MS_LOG(INFO) << "index: " << i << ", filename: " << common::SafeCStr((std::get<1>(x[0]))["file_name"]) << - ", label: " << common::SafeCStr((std::get<1>(x[0]))["label"].dump()); + MS_LOG(INFO) << "index: " << i << ", filename: " << common::SafeCStr((std::get<1>(x[0]))["file_name"]) + << ", label: " << common::SafeCStr((std::get<1>(x[0]))["label"].dump()); i++; ASSERT_TRUE((std::get<1>(x[0]))["label"] == categories[category_no].second); diff --git a/tests/ut/python/dataset/test_minddataset_sampler.py b/tests/ut/python/dataset/test_minddataset_sampler.py index 7662a0e390..3cad3877ef 100644 --- a/tests/ut/python/dataset/test_minddataset_sampler.py +++ b/tests/ut/python/dataset/test_minddataset_sampler.py @@ -81,8 +81,6 @@ def test_cv_minddataset_subset_random_sample_basic(add_and_remove_cv_file): "-------------- item[file_name]: {} ------------------------".format(item["file_name"])) logger.info( "-------------- item[label]: {} ----------------------------".format(item["label"])) - assert data[indices[num_iter]]['file_name'] == "".join( - [chr(x) for x in item['file_name']]) num_iter += 1 assert num_iter == 5 @@ -107,8 +105,6 @@ def test_cv_minddataset_subset_random_sample_replica(add_and_remove_cv_file): "-------------- item[file_name]: {} ------------------------".format(item["file_name"])) logger.info( "-------------- item[label]: {} ----------------------------".format(item["label"])) - assert data[indices[num_iter]]['file_name'] == "".join( - [chr(x) for x in item['file_name']]) num_iter += 1 assert num_iter == 6 @@ -133,8 +129,6 @@ def test_cv_minddataset_subset_random_sample_empty(add_and_remove_cv_file): "-------------- item[file_name]: {} ------------------------".format(item["file_name"])) logger.info( "-------------- item[label]: {} ----------------------------".format(item["label"])) - assert data[indices[num_iter]]['file_name'] == "".join( - [chr(x) for x in item['file_name']]) num_iter += 1 assert num_iter == 0 @@ -159,8 +153,6 @@ def test_cv_minddataset_subset_random_sample_out_range(add_and_remove_cv_file): "-------------- item[file_name]: {} ------------------------".format(item["file_name"])) logger.info( "-------------- item[label]: {} ----------------------------".format(item["label"])) - assert data[indices[num_iter] % len(data)]['file_name'] == "".join([ - chr(x) for x in item['file_name']]) num_iter += 1 assert num_iter == 5 @@ -185,8 +177,6 @@ def test_cv_minddataset_subset_random_sample_negative(add_and_remove_cv_file): "-------------- item[file_name]: {} ------------------------".format(item["file_name"])) logger.info( "-------------- item[label]: {} ----------------------------".format(item["label"])) - assert data[indices[num_iter] % len(data)]['file_name'] == "".join([ - chr(x) for x in item['file_name']]) num_iter += 1 assert num_iter == 5 From 7d700295f8ef8ea112c0bd93e34cfe1505ebcb7a Mon Sep 17 00:00:00 2001 From: root Date: Sat, 11 Apr 2020 09:19:11 +0000 Subject: [PATCH 201/367] add dynamic lr and enhance optim --- mindspore/nn/dynamic_lr.py | 300 +++++++++++++++++++++ mindspore/nn/optim/adam.py | 28 +- mindspore/nn/optim/momentum.py | 38 +-- mindspore/nn/optim/optimizer.py | 102 ++++++- mindspore/nn/optim/rmsprop.py | 32 +-- mindspore/nn/optim/sgd.py | 40 +-- tests/ut/python/nn/optim/test_optimizer.py | 10 +- tests/ut/python/nn/test_dynamic_lr.py | 234 ++++++++++++++++ 8 files changed, 650 insertions(+), 134 deletions(-) create mode 100644 mindspore/nn/dynamic_lr.py create mode 100644 tests/ut/python/nn/test_dynamic_lr.py diff --git a/mindspore/nn/dynamic_lr.py b/mindspore/nn/dynamic_lr.py new file mode 100644 index 0000000000..cf25f1f50e --- /dev/null +++ b/mindspore/nn/dynamic_lr.py @@ -0,0 +1,300 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""dynamic learning rate""" +import math + +from mindspore._checkparam import ParamValidator as validator +from mindspore._checkparam import Rel + + +def piecewise_constant_lr(milestone, learning_rates): + r""" + Get piecewise constant learning rate. + + Calculate learning rate by given `milestone` and `learning_rates`. Let the value of `milestone` be + :math:`(M_1, M_2, ..., M_N)` and the value of `learning_rates` be :math:`(x_1, x_2, ..., x_N)`. N is the length of + `milestone`. Let the output learning rate be `y`. + + .. math:: + y[i] = x_t for i \in [M_{t-1}, M_t) + + Args: + milestone (list[int]): A list of milestone. This list is a monotone increasing list. + learning_rates (list[float]): A list of learning rates. + + Returns: + list[float]. The size of list is :math:`M_N`. + + Examples: + >>> milestone = [2, 5, 10] + >>> learning_rates = [0.1, 0.05, 0.01] + >>> lr = piecewise_constant_lr(milestone, learning_rates) + [0.1, 0.1, 0.05, 0.05, 0.05, 0.01, 0.01, 0.01, 0.01, 0.01] + """ + validator.check_type('milestone', milestone, (tuple, list)) + validator.check_type('learning_rates', learning_rates, (tuple, list)) + if len(milestone) != len(learning_rates): + raise ValueError('The size of `milestone` must be same with the size of `learning_rates`.') + + lr = [] + last_item = 0 + for i, item in enumerate(milestone): + validator.check_integer(f'milestone[{i}]', item, 0, Rel.GT) + validator.check_type(f'learning_rates[{i}]', learning_rates[i], [float]) + if item < last_item: + raise ValueError(f'The value of milestone[{i}] must be greater than milestone[{i - 1}]') + lr += [learning_rates[i]] * (item - last_item) + last_item = item + + return lr + + +def _check_inputs(learning_rate, decay_rate, total_step, step_per_epoch, decay_epoch, is_stair): + validator.check_integer('total_step', total_step, 0, Rel.GT) + validator.check_integer('step_per_epoch', step_per_epoch, 0, Rel.GT) + validator.check_integer('decay_epoch', decay_epoch, 0, Rel.GT) + validator.check_float_positive('learning_rate', learning_rate) + validator.check_float_positive('decay_rate', decay_rate) + validator.check_type('is_stair', is_stair, [bool]) + + +def exponential_decay_lr(learning_rate, decay_rate, total_step, step_per_epoch, decay_epoch, is_stair=False): + r""" + Calculate learning rate base on exponential decay function. + + For the i-th step, the formula of computing decayed_learning_rate[i] is: + + .. math:: + decayed\_learning\_rate[i] = learning\_rate * decay\_rate^{\frac{current\_epoch}{decay\_epoch}} + + Where :math:`current\_epoch=floor(\frac{i}{step\_per\_epoch})`. + + Args: + learning_rate (float): The initial value of learning rate. + decay_rate (float): The decay rate. + total_step (int): The total number of steps. + step_per_epoch (int): The number of steps in per epoch. + decay_epoch (int): A value used to calculate decayed learning rate. + is_stair (bool): If true, learning rate decay once every `decay_epoch` times. Default: False. + + Returns: + list[float]. The size of list is `total_step`. + + Examples: + >>> learning_rate = 0.1 + >>> decay_rate = 0.9 + >>> total_step = 6 + >>> step_per_epoch = 2 + >>> decay_epoch = 1 + >>> lr = exponential_decay_lr(learning_rate, decay_rate, total_step, step_per_epoch, decay_epoch) + [0.1, 0.1, 0.09000000000000001, 0.09000000000000001, 0.08100000000000002, 0.08100000000000002] + """ + _check_inputs(learning_rate, decay_rate, total_step, step_per_epoch, decay_epoch, is_stair) + + lr = [] + for i in range(total_step): + if is_stair: + lr.append(learning_rate * decay_rate ** math.floor(math.floor(i / step_per_epoch) / decay_epoch)) + else: + lr.append(learning_rate * decay_rate ** (math.floor(i / step_per_epoch) / decay_epoch)) + return lr + + +def natural_exp_decay_lr(learning_rate, decay_rate, total_step, step_per_epoch, decay_epoch, is_stair=False): + r""" + Calculate learning rate base on natural exponential decay function. + + For the i-th step, the formula of computing decayed_learning_rate[i] is: + + .. math:: + decayed\_learning\_rate[i] = learning\_rate * e^{-decay\_rate * current\_epoch} + + Where :math:`current\_epoch=floor(\frac{i}{step\_per\_epoch})`. + + Args: + learning_rate (float): The initial value of learning rate. + decay_rate (float): The decay rate. + total_step (int): The total number of steps. + step_per_epoch (int): The number of steps in per epoch. + decay_epoch (int): A value used to calculate decayed learning rate. + is_stair (bool): If true, learning rate decay once every `decay_epoch` times. Default: False. + + Returns: + list[float]. The size of list is `total_step`. + + Examples: + >>> learning_rate = 0.1 + >>> decay_rate = 0.9 + >>> total_step = 6 + >>> step_per_epoch = 2 + >>> decay_epoch = 2 + >>> lr = natural_exp_decay_lr(learning_rate, decay_rate, total_step, step_per_epoch, decay_epoch, True) + [0.1, 0.1, 0.1, 0.1, 0.016529888822158657, 0.016529888822158657] + """ + _check_inputs(learning_rate, decay_rate, total_step, step_per_epoch, decay_epoch, is_stair) + + function = lambda x, y: x + if is_stair: + function = lambda x, y: math.floor(x / y) * y + + lr = [] + for i in range(total_step): + lr.append(learning_rate * math.e ** (-decay_rate * function(math.floor(i / step_per_epoch), decay_epoch))) + return lr + + +def inverse_decay_lr(learning_rate, decay_rate, total_step, step_per_epoch, decay_epoch, is_stair=False): + r""" + Calculate learning rate base on inverse-time decay function. + + For the i-th step, the formula of computing decayed_learning_rate[i] is: + + .. math:: + decayed\_learning\_rate[i] = learning\_rate / (1 + decay\_rate * current\_epoch / decay\_epoch) + + Where :math:`current\_epoch=floor(\frac{i}{step\_per\_epoch})`. + + Args: + learning_rate (float): The initial value of learning rate. + decay_rate (float): The decay rate. + total_step (int): The total number of steps. + step_per_epoch (int): The number of steps in per epoch. + decay_epoch (int): A value used to calculate decayed learning rate. + is_stair (bool): If true, learning rate decay once every `decay_epoch` times. Default: False. + + Returns: + list[float]. The size of list is `total_step`. + + Examples: + >>> learning_rate = 0.1 + >>> decay_rate = 0.5 + >>> total_step = 6 + >>> step_per_epoch = 1 + >>> decay_epoch = 1 + >>> lr = inverse_decay_lr(learning_rate, decay_rate, total_step, step_per_epoch, decay_epoch, True) + [0.1, 0.06666666666666667, 0.05, 0.04, 0.03333333333333333, 0.028571428571428574] + """ + _check_inputs(learning_rate, decay_rate, total_step, step_per_epoch, decay_epoch, is_stair) + + lr = [] + for i in range(total_step): + if is_stair: + lr.append(learning_rate / (1 + decay_rate * math.floor(math.floor(i / step_per_epoch) / decay_epoch))) + else: + lr.append(learning_rate / (1 + decay_rate * math.floor(i / step_per_epoch) / decay_epoch)) + return lr + + +def cosine_decay_lr(min_lr, max_lr, total_step, step_per_epoch, decay_epoch): + r""" + Calculate learning rate base on cosine decay function. + + For the i-th step, the formula of computing decayed_learning_rate[i] is: + + .. math:: + decayed\_learning\_rate[i] = min\_learning\_rate + 0.5 * (max\_learning\_rate - min\_learning\_rate) * + (1 + cos(\frac{current\_epoch}{decay\_epoch}\pi)) + + Where :math:`current\_epoch=floor(\frac{i}{step\_per\_epoch})`. + + Args: + min_lr (float): The minimum value of learning rate. + max_lr (float): The maximum value of learning rate. + total_step (int): The total number of steps. + step_per_epoch (int): The number of steps in per epoch. + decay_epoch (int): A value used to calculate decayed learning rate. + + Returns: + list[float]. The size of list is `total_step`. + + Examples: + >>> min_lr = 0.01 + >>> max_lr = 0.1 + >>> total_step = 6 + >>> step_per_epoch = 2 + >>> decay_epoch = 2 + >>> lr = cosine_decay_lr(min_lr, max_lr, total_step, step_per_epoch, decay_epoch) + [0.1, 0.1, 0.05500000000000001, 0.05500000000000001, 0.01, 0.01] + """ + validator.check_float_positive('min_lr', min_lr) + validator.check_float_positive('max_lr', max_lr) + validator.check_integer('total_step', total_step, 0, Rel.GT) + validator.check_integer('step_per_epoch', step_per_epoch, 0, Rel.GT) + validator.check_integer('decay_epoch', decay_epoch, 0, Rel.GT) + + delta = 0.5 * (max_lr - min_lr) + lr = [] + for i in range(total_step): + tmp_epoch = min(math.floor(i / step_per_epoch), decay_epoch) + lr.append(min_lr + delta * (1 + math.cos(math.pi * tmp_epoch / decay_epoch))) + return lr + + +def polynomial_decay_lr(learning_rate, end_learning_rate, total_step, step_per_epoch, decay_epoch, power, + update_decay_epoch=False): + r""" + Calculate learning rate base on polynomial decay function. + + For the i-th step, the formula of computing decayed_learning_rate[i] is: + + .. math:: + decayed\_learning\_rate[i] = (learning\_rate - end\_learning\_rate) * + (1 - tmp\_epoch / decay\_epoch)^{power} + end\_learning\_rate + + Where :math:`tmp\_epoch=min(current\_epoch, decay\_epoch), current\_epoch=floor(\frac{i}{step\_per\_epoch})`. + If `update_decay_epoch` is true, update the value of `decay_epoch` every epoch. The formula is + :math:`decay\_epoch = decay\_epoch * ceil(current\_epoch / decay\_epoch)` + + Args: + learning_rate (float): The initial value of learning rate. + end_learning_rate (float): The end value of learning rate. + total_step (int): The total number of steps. + step_per_epoch (int): The number of steps in per epoch. + decay_epoch (int): A value used to calculate decayed learning rate. + power (float): A value used to calculate decayed learning rate. + update_decay_epoch (bool): If true, update `decay_epoch`. Default: False. + + Returns: + list[float]. The size of list is `total_step`. + + Examples: + >>> learning_rate = 0.1 + >>> end_learning_rate = 0.01 + >>> total_step = 6 + >>> step_per_epoch = 2 + >>> decay_epoch = 2 + >>> power = 0.5 + >>> lr = polynomial_decay_lr(learning_rate, end_learning_rate, total_step, step_per_epoch, decay_epoch, power) + [0.1, 0.1, 0.07363961030678928, 0.07363961030678928, 0.01, 0.01] + """ + validator.check_float_positive('learning_rate', learning_rate) + validator.check_float_positive('end_learning_rate', end_learning_rate) + validator.check_integer('total_step', total_step, 0, Rel.GT) + validator.check_integer('step_per_epoch', step_per_epoch, 0, Rel.GT) + validator.check_integer('decay_epoch', decay_epoch, 0, Rel.GT) + validator.check_type('power', power, [float]) + validator.check_type('update_decay_epoch', update_decay_epoch, [bool]) + + function = lambda x, y: (x, min(x, y)) + if update_decay_epoch: + function = lambda x, y: (x * max(math.ceil(y / x), 1), y) + + lr = [] + delta = learning_rate - end_learning_rate + for i in range(total_step): + current_epoch = math.floor(i / step_per_epoch) + decay_epoch, tmp_epoch = function(decay_epoch, current_epoch) + lr.append(delta * (1 - tmp_epoch / decay_epoch) ** power + end_learning_rate) + return lr diff --git a/mindspore/nn/optim/adam.py b/mindspore/nn/optim/adam.py index 521510fa58..eb4e33751f 100755 --- a/mindspore/nn/optim/adam.py +++ b/mindspore/nn/optim/adam.py @@ -13,7 +13,6 @@ # limitations under the License. # ============================================================================ """adam""" -from typing import Iterable import numpy as np from mindspore.common import dtype as mstype @@ -25,7 +24,7 @@ from mindspore.common.parameter import Parameter from mindspore.common.tensor import Tensor from mindspore._checkparam import ParamValidator as validator from mindspore._checkparam import Rel -from .optimizer import Optimizer, apply_decay, grad_scale +from .optimizer import Optimizer _learning_rate_update_func = ['linear', 'cos', 'sin'] @@ -168,22 +167,13 @@ class Adam(Optimizer): def __init__(self, params, learning_rate=1e-3, beta1=0.9, beta2=0.999, eps=1e-8, use_locking=False, use_nesterov=False, weight_decay=0.0, loss_scale=1.0, decay_filter=lambda x: 'beta' not in x.name and 'gamma' not in x.name): - super(Adam, self).__init__(learning_rate, params) + super(Adam, self).__init__(learning_rate, params, weight_decay, loss_scale, decay_filter) _check_param_value(beta1, beta2, eps, weight_decay) validator.check_type("use_locking", use_locking, [bool]) validator.check_type("use_nesterov", use_nesterov, [bool]) validator.check_type("loss_scale", loss_scale, [float]) validator.check_number_range("loss_scale", loss_scale, 1.0, float("inf"), Rel.INC_LEFT) - self.dynamic_lr = False - if isinstance(learning_rate, Iterable) or \ - (isinstance(learning_rate, Tensor) and learning_rate.dim() == 1): - self.dynamic_lr = True - self.gather = P.GatherV2() - self.assignadd = P.AssignAdd() - self.global_step = Parameter(initializer(0, [1], mstype.int32), name="global_step") - self.axis = 0 - self.beta1 = Tensor(beta1, mstype.float32) self.beta2 = Tensor(beta2, mstype.float32) self.beta1_power = Parameter(initializer(1, [1], mstype.float32), name="beta1_power") @@ -196,8 +186,6 @@ class Adam(Optimizer): self.decay_tf = tuple(decay_filter(x) for x in self.parameters) self.hyper_map = C.HyperMap() self.opt = P.Adam(use_locking, use_nesterov) - self.weight_decay = weight_decay * loss_scale - self.reciprocal_scale = 1.0 / loss_scale self.pow = P.Pow() self.sqrt = P.Sqrt() @@ -208,15 +196,9 @@ class Adam(Optimizer): params = self.parameters moment1 = self.moment1 moment2 = self.moment2 - if self.weight_decay > 0: - gradients = self.hyper_map(F.partial(apply_decay, self.weight_decay), self.decay_tf, params, gradients) - if self.reciprocal_scale != 1.0: - gradients = self.hyper_map(F.partial(grad_scale, self.reciprocal_scale), gradients) - - lr = self.learning_rate - if self.dynamic_lr: - lr = self.gather(self.learning_rate, self.global_step, self.axis) - F.control_depend(lr, self.assignadd(self.global_step, self.one)) + gradients = self.decay_weight(gradients) + gradients = self.scale_grad(gradients) + lr = self.get_lr() beta1_power = self.beta1_power * self.beta1 self.beta1_power = beta1_power diff --git a/mindspore/nn/optim/momentum.py b/mindspore/nn/optim/momentum.py index 21d3cc864e..bac8e74a42 100755 --- a/mindspore/nn/optim/momentum.py +++ b/mindspore/nn/optim/momentum.py @@ -13,14 +13,9 @@ # limitations under the License. # ============================================================================ """momentum""" -from typing import Iterable - from mindspore.ops import functional as F, composite as C, operations as P -from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter -import mindspore.common.dtype as mstype -from mindspore.common import Tensor -from .optimizer import Optimizer, apply_decay, grad_scale +from .optimizer import Optimizer momentum_opt = C.MultitypeFuncGraph("momentum_opt") @@ -88,43 +83,20 @@ class Momentum(Optimizer): """ def __init__(self, params, learning_rate, momentum, weight_decay=0.0, loss_scale=1.0, decay_filter=lambda x: 'beta' not in x.name and 'gamma' not in x.name): - super(Momentum, self).__init__(learning_rate, params) + super(Momentum, self).__init__(learning_rate, params, weight_decay, loss_scale, decay_filter) if isinstance(momentum, float) and momentum < 0.0: raise ValueError("momentum should be at least 0.0, but got momentum {}".format(momentum)) - if isinstance(learning_rate, Iterable) or \ - (isinstance(learning_rate, Tensor) and learning_rate.dim() == 1): - self.dynamic_lr = True - self.gather = P.GatherV2() - self.assignadd = P.AssignAdd() - self.global_step = Parameter(initializer(0, [1], mstype.int32), name="global_step") - self.axis = 0 - else: - self.dynamic_lr = False - self.gather = None - self.assignadd = None - self.global_step = None - self.axis = None self.momentum = Parameter(momentum, name="momentum") self.params = self.parameters self.moments = self.params.clone(prefix="moments", init='zeros') - self.decay_tf = tuple(decay_filter(x) for x in self.parameters) self.hyper_map = C.HyperMap() self.opt = P.ApplyMomentum() - self.weight_decay = weight_decay * loss_scale - self.reciprocal_scale = 1.0 / loss_scale - self.one = Tensor(1, mstype.int32) def construct(self, gradients): params = self.params moments = self.moments - if self.weight_decay > 0: - gradients = self.hyper_map(F.partial(apply_decay, self.weight_decay), self.decay_tf, params, gradients) - if self.reciprocal_scale != 1.0: - gradients = self.hyper_map(F.partial(grad_scale, self.reciprocal_scale), gradients) - if self.dynamic_lr: - lr = self.gather(self.learning_rate, self.global_step, self.axis) - F.control_depend(lr, self.assignadd(self.global_step, self.one)) - else: - lr = self.learning_rate + gradients = self.decay_weight(gradients) + gradients = self.scale_grad(gradients) + lr = self.get_lr() success = self.hyper_map(F.partial(momentum_opt, self.opt, lr, self.momentum), gradients, params, moments) return success diff --git a/mindspore/nn/optim/optimizer.py b/mindspore/nn/optim/optimizer.py index e2edf7bfb4..c2a419c565 100755 --- a/mindspore/nn/optim/optimizer.py +++ b/mindspore/nn/optim/optimizer.py @@ -17,9 +17,11 @@ from typing import Iterable import numpy as np +import mindspore from mindspore.ops import functional as F, composite as C, operations as P from mindspore.nn.cell import Cell from mindspore.common.parameter import Parameter, ParameterTuple +from mindspore.common.initializer import initializer from mindspore._checkparam import ParamValidator as validator from mindspore._checkparam import Rel from mindspore.common.tensor import Tensor @@ -42,34 +44,110 @@ class Optimizer(Cell): Args: learning_rate (float): A floating point value for the learning rate. Should be greater than 0. parameters (list): A list of parameter, which will be updated. The element in `parameters` - should be class mindspore.Parameter. + should be class mindspore.Parameter. + weight_decay (float): A floating point value for the weight decay. Default: 0.0. + loss_scale (float): A floating point value for the loss scale. Default: 1.0. Should be greater than 0. + decay_filter (Function): A function to determine whether to apply weight decay on parameters. Default: lambda + x: 'beta' not in x.name and 'gamma' not in x.name. Raises: ValueError: If the learning_rate is a Tensor, but the dims of tensor is greater than 1. TypeError: If the learning_rate is not any of the three types: float, Tensor, Iterable. """ - def __init__(self, learning_rate, parameters): + def __init__(self, learning_rate, parameters, weight_decay=0.0, loss_scale=1.0, + decay_filter=lambda x: 'beta' not in x.name and 'gamma' not in x.name): super(Optimizer, self).__init__() if isinstance(learning_rate, float): + self.dynamic_lr = False + self.gather = None + self.assignadd = None + self.global_step = None validator.check_number_range("learning rate", learning_rate, 0.0, float("inf"), Rel.INC_LEFT) - elif isinstance(learning_rate, Iterable): - learning_rate = Tensor(np.array(list(learning_rate)).astype(np.float32)) - elif isinstance(learning_rate, Tensor): - if learning_rate.dim() > 1: - raise ValueError("Learning rate should be a 0 or 1 dim `Tensor`," - f"but got {learning_rate.dim()}.") else: - raise TypeError("Learning rate should be float, Tensor or Iterable.") + self.dynamic_lr = True + self.gather = P.GatherV2() + self.assignadd = P.AssignAdd() + self.global_step = Parameter(initializer(0, [1], mindspore.int32), name='global_step') + if isinstance(learning_rate, Iterable): + learning_rate = Tensor(np.array(list(learning_rate)).astype(np.float32)) + elif isinstance(learning_rate, Tensor): + if learning_rate.dim() > 1: + raise ValueError("Learning rate should be a 0 or 1 dim `Tensor`," + f"but got {learning_rate.dim()}.") + if learning_rate.dim() == 1 and learning_rate.size() < 2: + logger.warning("If want to use the dynamic learning rate, please make sure that the number " + "of elements in the list, tuple or tensor passed is greater than 1.") + else: + raise TypeError("Learning rate should be float, Tensor or Iterable.") + + if loss_scale <= 0.0: + raise ValueError("Loss scale should be greater than 0, but got {}".format(loss_scale)) + if weight_decay < 0.0: + raise ValueError("Weight decay should be equal or greater than 0, but got {}".format(weight_decay)) - if isinstance(learning_rate, Tensor) and learning_rate.dim() == 1 and learning_rate.size() < 2: - logger.warning("If want to use the dynamic learning rate, please make sure that " - "the number of elements in the list, tuple or tensor passed is greater than 1.") self.learning_rate = Parameter(learning_rate, name="learning_rate") self.parameters = ParameterTuple(parameters) + self.reciprocal_scale = 1.0 / loss_scale + self.weight_decay = weight_decay * loss_scale + self.decay_flags = tuple(decay_filter(x) for x in self.parameters) + if not self.parameters: raise ValueError("optimizer got an empty parameter list.") + def decay_weight(self, gradients): + """ + Weight decay. + + An approach to reduce the overfitting of a deep learning neural network model. + + Args: + gradients (tuple[Tensor]): The gradients of `self.parameters`, and have the same shape with + `self.parameters`. + + Returns: + tuple[Tensor], The gradients after weight decay. + """ + if self.weight_decay > 0: + params = self.params + gradients = self.hyper_map(F.partial(apply_decay, self.weight_decay), self.decay_flags, params, gradients) + + return gradients + + def scale_grad(self, gradients): + """ + Loss scale for mixed precision. + + An approach of mixed precision training to improve the speed and energy efficiency of training deep neural + network. + + Args: + gradients (tuple[Tensor]): The gradients of `self.parameters`, and have the same shape with + `self.parameters`. + + Returns: + tuple[Tensor], The gradients after loss scale. + + """ + if self.reciprocal_scale != 1.0: + gradients = self.hyper_map(F.partial(grad_scale, self.reciprocal_scale), gradients) + + return gradients + + def get_lr(self): + """ + Get the learning rate of current step. + + Returns: + float, the learning rate of current step. + """ + lr = self.learning_rate + if self.dynamic_lr: + lr = self.gather(self.learning_rate, self.global_step, 0) + F.control_depend(lr, self.assignadd(self.global_step, 1)) + + return lr + def construct(self, *hyper_params): raise NotImplementedError diff --git a/mindspore/nn/optim/rmsprop.py b/mindspore/nn/optim/rmsprop.py index b17a101708..a68dc6f7c4 100644 --- a/mindspore/nn/optim/rmsprop.py +++ b/mindspore/nn/optim/rmsprop.py @@ -14,12 +14,8 @@ # ============================================================================ """rmsprop""" from mindspore.ops import functional as F, composite as C, operations as P -from mindspore.common.initializer import initializer -from mindspore.common.parameter import Parameter from mindspore._checkparam import ParamValidator as validator -import mindspore.common.dtype as mstype -from mindspore.common import Tensor -from .optimizer import Optimizer, grad_scale, apply_decay +from .optimizer import Optimizer rmsprop_opt = C.MultitypeFuncGraph("rmsprop_opt") centered_rmsprop_opt = C.MultitypeFuncGraph("rmsprop_opt") @@ -138,7 +134,7 @@ class RMSProp(Optimizer): def __init__(self, params, learning_rate=0.1, decay=0.9, momentum=0.0, epsilon=1e-10, use_locking=False, centered=False, loss_scale=1.0, weight_decay=0.0, decay_filter=lambda x: 'beta' not in x.name and 'gamma' not in x.name): - super(RMSProp, self).__init__(learning_rate, params) + super(RMSProp, self).__init__(learning_rate, params, weight_decay, loss_scale, decay_filter) if isinstance(momentum, float) and momentum < 0.0: raise ValueError("momentum should be at least 0.0, but got momentum {}".format(momentum)) @@ -157,15 +153,6 @@ class RMSProp(Optimizer): else: self.opt = P.ApplyRMSProp(use_locking) - self.dynamic_lr = False - if not isinstance(learning_rate, float): - self.dynamic_lr = True - self.gather = P.GatherV2() - self.assignadd = P.AssignAdd() - self.global_step = Parameter(initializer(0, [1], mstype.int32), name="global_step") - self.axis = 0 - self.one = Tensor(1, mstype.int32) - self.momentum = momentum self.ms = self.parameters.clone(prefix="mean_square", init='zeros') @@ -173,21 +160,12 @@ class RMSProp(Optimizer): self.hyper_map = C.HyperMap() self.decay = decay - self.decay_tf = tuple(decay_filter(x) for x in self.parameters) - self.reciprocal_scale = 1.0 / loss_scale - self.weight_decay = weight_decay * loss_scale def construct(self, gradients): params = self.parameters - if self.weight_decay > 0: - gradients = self.hyper_map(F.partial(apply_decay, self.weight_decay), self.decay_tf, params, gradients) - if self.reciprocal_scale != 1.0: - gradients = self.hyper_map(F.partial(grad_scale, self.reciprocal_scale), gradients) - if self.dynamic_lr: - lr = self.gather(self.learning_rate, self.global_step, self.axis) - F.control_depend(lr, self.assignadd(self.global_step, self.one)) - else: - lr = self.learning_rate + gradients = self.decay_weight(gradients) + gradients = self.scale_grad(gradients) + lr = self.get_lr() if self.centered: success = self.hyper_map(F.partial(centered_rmsprop_opt, self.opt, lr, self.decay, self.epsilon, self.momentum), params, self.mg, self.ms, self.moment, gradients) diff --git a/mindspore/nn/optim/sgd.py b/mindspore/nn/optim/sgd.py index dbc81ecdd6..a18adb8184 100755 --- a/mindspore/nn/optim/sgd.py +++ b/mindspore/nn/optim/sgd.py @@ -14,11 +14,9 @@ # ============================================================================ """sgd""" from mindspore.ops import functional as F, composite as C, operations as P -from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter from mindspore._checkparam import ParamValidator as validator -import mindspore.common.dtype as mstype -from .optimizer import Optimizer, grad_scale +from .optimizer import Optimizer sgd_opt = C.MultitypeFuncGraph("sgd_opt") @@ -83,7 +81,7 @@ class SGD(Optimizer): def __init__(self, params, learning_rate=0.1, momentum=0.0, dampening=0.0, weight_decay=0.0, nesterov=False, loss_scale=1.0): - super(SGD, self).__init__(learning_rate, params) + super(SGD, self).__init__(learning_rate, params, weight_decay, loss_scale) if isinstance(momentum, float) and momentum < 0.0: raise ValueError("momentum should be at least 0.0, but got momentum {}".format(momentum)) @@ -92,44 +90,22 @@ class SGD(Optimizer): raise ValueError("dampening should be at least 0.0, but got dampening {}".format(dampening)) self.dampening = dampening - if weight_decay < 0.0: - raise ValueError("weight_decay should be at least 0.0, but got weight_decay {}".format(weight_decay)) - self.weight_decay = weight_decay - validator.check_type("nesterov", nesterov, [bool]) self.nesterov = nesterov self.opt = P.SGD(dampening, weight_decay, nesterov) - self.dynamic_lr = False - self.gather = None - self.global_step = None - self.axis = None - if not isinstance(learning_rate, float): - self.dynamic_lr = True - self.gather = P.GatherV2() - self.assignadd = P.AssignAdd() - self.global_step = Parameter(initializer(0, [1], mstype.int32), name="global_step") - self.axis = 0 self.momentum = Parameter(momentum, name="momentum") - self.params = self.parameters - self.accum = self.params.clone(prefix="accum", init='zeros') - self.stat = self.params.clone(prefix="stat", init='ones') + self.accum = self.parameters.clone(prefix="accum", init='zeros') + self.stat = self.parameters.clone(prefix="stat", init='ones') self.hyper_map = C.HyperMap() - self.weight_decay = weight_decay * loss_scale - self.reciprocal_scale = 1.0 / loss_scale - def construct(self, gradients): - params = self.params + params = self.parameters accum = self.accum stat = self.stat - if self.reciprocal_scale != 1.0: - gradients = self.hyper_map(F.partial(grad_scale, self.reciprocal_scale), gradients) - if self.dynamic_lr: - lr = self.gather(self.learning_rate, self.global_step, self.axis) - F.control_depend(lr, self.assignadd(self.global_step, 1)) - else: - lr = self.learning_rate + gradients = self.decay_weight(gradients) + gradients = self.scale_grad(gradients) + lr = self.get_lr() success = self.hyper_map(F.partial(sgd_opt, self.opt, lr, self.momentum), gradients, params, accum, stat) return success diff --git a/tests/ut/python/nn/optim/test_optimizer.py b/tests/ut/python/nn/optim/test_optimizer.py index 860d751fd5..89fb1d812b 100644 --- a/tests/ut/python/nn/optim/test_optimizer.py +++ b/tests/ut/python/nn/optim/test_optimizer.py @@ -15,17 +15,11 @@ """ test optimizer """ import numpy as np import pytest -from mindspore.nn.optim import Optimizer, SGD, Adam, AdamWeightDecay, AdamWeightDecayDynamicLR from mindspore import Tensor +from mindspore.nn.optim import Optimizer, SGD, Adam, AdamWeightDecay, AdamWeightDecayDynamicLR from mindspore.common.parameter import Parameter -gradient = Tensor(np.zeros([1, 2, 3])) -accumulation = gradient -variable = accumulation - - -paramsTensor = Tensor(np.zeros([1, 2, 3])) class IterableObjc: def __iter__(self): cont = 0 @@ -56,6 +50,7 @@ class TestAdam(): def test_construct(self): with pytest.raises(TypeError): + gradient = Tensor(np.zeros([1, 2, 3])) adam = Adam(params, learning_rate=1e-3, beta1=0.9, beta2=0.999, eps=1e-8, use_locking=False, use_nesterov=False, weight_decay=0.0, loss_scale=1.0) adam.construct(gradient) @@ -105,4 +100,5 @@ class TestUnsupportParam(): def test_Sgd_init(self): with pytest.raises(TypeError): + paramsTensor = Tensor(np.zeros([1, 2, 3])) SGD(paramsTensor) diff --git a/tests/ut/python/nn/test_dynamic_lr.py b/tests/ut/python/nn/test_dynamic_lr.py new file mode 100644 index 0000000000..cb959956d6 --- /dev/null +++ b/tests/ut/python/nn/test_dynamic_lr.py @@ -0,0 +1,234 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" Test Dynamic Learning Rate """ +import pytest +import mindspore +from mindspore.nn import dynamic_lr as dr + +milestone = [10, 20, 30] +learning_rates = [0.1, 0.05, 0.01] +learning_rate = 0.1 +end_learning_rate = 0.01 +decay_rate = 0.9 +total_step = 30 +step_per_epoch = 3 +decay_epoch = 2 +min_lr = 0.01 +max_lr = 0.1 +power = 0.5 + +class TestInputs: + def test_milestone1(self): + milestone1 = 1 + with pytest.raises(ValueError): + dr.piecewise_constant_lr(milestone1, learning_rates) + + def test_milestone2(self): + milestone1 = [20, 10, 1] + with pytest.raises(ValueError): + dr.piecewise_constant_lr(milestone1, learning_rates) + + milestone2 = [1.0, 2.0, True] + with pytest.raises(ValueError): + dr.piecewise_constant_lr(milestone2, learning_rates) + + def test_learning_rates1(self): + lr = True + with pytest.raises(ValueError): + dr.piecewise_constant_lr(milestone, lr) + + def test_learning_rates2(self): + lr = [1, 2, 1] + with pytest.raises(ValueError): + dr.piecewise_constant_lr(milestone, lr) + + def test_learning_rate_type(self): + lr = True + with pytest.raises(TypeError): + dr.exponential_decay_lr(lr, decay_rate, total_step, step_per_epoch, decay_epoch) + + with pytest.raises(TypeError): + dr.polynomial_decay_lr(lr, end_learning_rate, total_step, step_per_epoch, decay_epoch, power) + + def test_learning_rate_value(self): + lr = -1.0 + with pytest.raises(ValueError): + dr.exponential_decay_lr(lr, decay_rate, total_step, step_per_epoch, decay_epoch) + + with pytest.raises(ValueError): + dr.polynomial_decay_lr(lr, end_learning_rate, total_step, step_per_epoch, decay_epoch, power) + + def test_end_learning_rate_type(self): + lr = True + with pytest.raises(TypeError): + dr.polynomial_decay_lr(learning_rate, lr, total_step, step_per_epoch, decay_epoch, power) + + def test_end_learning_rate_value(self): + lr = -1.0 + with pytest.raises(ValueError): + dr.polynomial_decay_lr(learning_rate, lr, total_step, step_per_epoch, decay_epoch, power) + + def test_decay_rate_type(self): + rate = 'a' + with pytest.raises(TypeError): + dr.exponential_decay_lr(learning_rate, rate, total_step, step_per_epoch, decay_epoch) + + def test_decay_rate_value(self): + rate = -1.0 + with pytest.raises(ValueError): + dr.exponential_decay_lr(learning_rate, rate, total_step, step_per_epoch, decay_epoch) + + def test_total_step1(self): + total_step1 = 2.0 + with pytest.raises(ValueError): + dr.exponential_decay_lr(learning_rate, decay_rate, total_step1, step_per_epoch, decay_epoch) + + with pytest.raises(ValueError): + dr.cosine_decay_lr(min_lr, max_lr, total_step1, step_per_epoch, decay_epoch) + + with pytest.raises(ValueError): + dr.polynomial_decay_lr(learning_rate, end_learning_rate, total_step1, step_per_epoch, decay_epoch, power) + + def test_total_step2(self): + total_step1 = -1 + with pytest.raises(ValueError): + dr.exponential_decay_lr(learning_rate, decay_rate, total_step1, step_per_epoch, decay_epoch) + + with pytest.raises(ValueError): + dr.cosine_decay_lr(min_lr, max_lr, total_step1, step_per_epoch, decay_epoch) + + with pytest.raises(ValueError): + dr.polynomial_decay_lr(learning_rate, end_learning_rate, total_step1, step_per_epoch, decay_epoch, power) + + def test_step_per_epoch1(self): + step_per_epoch1 = True + with pytest.raises(ValueError): + dr.exponential_decay_lr(learning_rate, decay_rate, total_step, step_per_epoch1, decay_epoch) + + with pytest.raises(ValueError): + dr.cosine_decay_lr(min_lr, max_lr, total_step, step_per_epoch1, decay_epoch) + + with pytest.raises(ValueError): + dr.polynomial_decay_lr(learning_rate, end_learning_rate, total_step, step_per_epoch1, decay_epoch, power) + + def test_step_per_epoch2(self): + step_per_epoch1 = -1 + with pytest.raises(ValueError): + dr.exponential_decay_lr(learning_rate, decay_rate, total_step, step_per_epoch1, decay_epoch) + + with pytest.raises(ValueError): + dr.cosine_decay_lr(min_lr, max_lr, total_step, step_per_epoch1, decay_epoch) + + with pytest.raises(ValueError): + dr.polynomial_decay_lr(learning_rate, end_learning_rate, total_step, step_per_epoch1, decay_epoch, power) + + def test_decay_epoch1(self): + decay_epoch1 = 'm' + with pytest.raises(ValueError): + dr.exponential_decay_lr(learning_rate, decay_rate, total_step, step_per_epoch, decay_epoch1) + + with pytest.raises(ValueError): + dr.cosine_decay_lr(min_lr, max_lr, total_step, step_per_epoch, decay_epoch1) + + with pytest.raises(ValueError): + dr.polynomial_decay_lr(learning_rate, end_learning_rate, total_step, step_per_epoch, decay_epoch1, power) + + def test_decay_epoch2(self): + decay_epoch1 = -1 + with pytest.raises(ValueError): + dr.exponential_decay_lr(learning_rate, decay_rate, total_step, step_per_epoch, decay_epoch1) + + with pytest.raises(ValueError): + dr.cosine_decay_lr(min_lr, max_lr, total_step, step_per_epoch, decay_epoch1) + + with pytest.raises(ValueError): + dr.polynomial_decay_lr(learning_rate, end_learning_rate, total_step, step_per_epoch, decay_epoch1, power) + + def test_is_stair(self): + is_stair = 1 + with pytest.raises(ValueError): + dr.exponential_decay_lr(learning_rate, decay_rate, total_step, step_per_epoch, decay_epoch, is_stair) + + def test_min_lr_type(self): + min_lr1 = True + with pytest.raises(TypeError): + dr.cosine_decay_lr(min_lr1, max_lr, total_step, step_per_epoch, decay_epoch) + + def test_min_lr_value(self): + min_lr1 = -1.0 + with pytest.raises(ValueError): + dr.cosine_decay_lr(min_lr1, max_lr, total_step, step_per_epoch, decay_epoch) + + def test_max_lr_type(self): + max_lr1 = 'a' + with pytest.raises(TypeError): + dr.cosine_decay_lr(min_lr, max_lr1, total_step, step_per_epoch, decay_epoch) + + def test_max_lr_value(self): + max_lr1 = -1.0 + with pytest.raises(ValueError): + dr.cosine_decay_lr(min_lr, max_lr1, total_step, step_per_epoch, decay_epoch) + + def test_power(self): + power1 = True + with pytest.raises(ValueError): + dr.polynomial_decay_lr(learning_rate, end_learning_rate, total_step, step_per_epoch, decay_epoch, power1) + + def test_update_decay_epoch(self): + update_decay_epoch = 1 + with pytest.raises(ValueError): + dr.polynomial_decay_lr(learning_rate, end_learning_rate, total_step, step_per_epoch, decay_epoch, + power, update_decay_epoch) + + +def test_learning_rate(): + lr = dr.piecewise_constant_lr(milestone, learning_rates) + assert len(lr) == milestone[-1] + + +def test_exponential_decay(): + lr1 = dr.exponential_decay_lr(learning_rate, decay_rate, total_step, step_per_epoch, decay_epoch) + assert len(lr1) == total_step + + lr2 = dr.exponential_decay_lr(learning_rate, decay_rate, total_step, step_per_epoch, decay_epoch, True) + assert len(lr2) == total_step + + +def test_enatural_exp_decay(): + lr1 = dr.natural_exp_decay_lr(learning_rate, decay_rate, total_step, step_per_epoch, decay_epoch) + assert len(lr1) == total_step + + lr2 = dr.natural_exp_decay_lr(learning_rate, decay_rate, total_step, step_per_epoch, decay_epoch, True) + assert len(lr2) == total_step + + +def test_inverse_decay(): + lr1 = dr.inverse_decay_lr(learning_rate, decay_rate, total_step, step_per_epoch, decay_epoch) + assert len(lr1) == total_step + + lr2 = dr.inverse_decay_lr(learning_rate, decay_rate, total_step, step_per_epoch, decay_epoch, True) + assert len(lr2) == total_step + + +def test_cosine_decay(): + lr = dr.cosine_decay_lr(min_lr, max_lr, total_step, step_per_epoch, decay_epoch) + assert len(lr) == total_step + +def test_polynomial_decay(): + lr1 = dr.polynomial_decay_lr(learning_rate, end_learning_rate, total_step, step_per_epoch, decay_epoch, power) + assert len(lr1) == total_step + lr2 = dr.polynomial_decay_lr(learning_rate, end_learning_rate, total_step, step_per_epoch, decay_epoch, power, + True) + assert len(lr2) == total_step From 2db3e64ff23e9d58bdfd79d3074b7716daf7643b Mon Sep 17 00:00:00 2001 From: gaojing Date: Wed, 8 Apr 2020 23:37:08 -0400 Subject: [PATCH 202/367] add operation --- mindspore/ccsrc/transform/convert.cc | 4 ++ mindspore/ccsrc/transform/op_declare.cc | 10 ++++ mindspore/ccsrc/transform/op_declare.h | 4 ++ mindspore/nn/layer/__init__.py | 4 +- mindspore/nn/layer/basic.py | 69 ++++++++++++++++++++++++ mindspore/ops/_grad/grad_nn_ops.py | 11 ++++ mindspore/ops/operations/__init__.py | 3 +- mindspore/ops/operations/_grad_ops.py | 18 +++++++ mindspore/ops/operations/nn_ops.py | 70 +++++++++++++++++++++++++ tests/ut/python/nn/test_nn_pad.py | 64 ++++++++++++++++++++++ 10 files changed, 254 insertions(+), 3 deletions(-) create mode 100644 tests/ut/python/nn/test_nn_pad.py diff --git a/mindspore/ccsrc/transform/convert.cc b/mindspore/ccsrc/transform/convert.cc index c53367a20f..251946f6fd 100755 --- a/mindspore/ccsrc/transform/convert.cc +++ b/mindspore/ccsrc/transform/convert.cc @@ -110,6 +110,8 @@ const char kNameSigmoidCrossEntropyWithLogits[] = "SigmoidCrossEntropyWithLogits const char kNameSigmoidCrossEntropyWithLogitsGrad[] = "SigmoidCrossEntropyWithLogitsGrad"; const char kNameScatterNdD[] = "ScatterNd"; const char kNamePadD[] = "Pad"; +const char kNameMirrorPad[] = "MirrorPad"; +const char kNameMirrorPadGrad[] = "MirrorPadGrad"; const char kNameGatherNd[] = "GatherNd"; const char kNameArgmax[] = "Argmax"; const char kNameArgmin[] = "Argmin"; @@ -256,6 +258,8 @@ std::unordered_map &DfGraphConvertor::get_adpt_ma {string(kNameSigmoidCrossEntropyWithLogitsGrad), ADPT_DESC(SigmoidCrossEntropyWithLogitsGrad)}, {string(kNameScatterNdD), ADPT_DESC(ScatterNdD)}, {string(kNamePadD), ADPT_DESC(PadD)}, + {string(kNameMirrorPad), ADPT_DESC(MirrorPad)}, + {string(kNameMirrorPadGrad), ADPT_DESC(MirrorPadGrad)}, {string(kNameGatherNd), ADPT_DESC(GatherNd)}, {string(kNameArgmax), ADPT_DESC(ArgMaxD)}, {string(kNameArgmin), ADPT_DESC(ArgMinD)}, diff --git a/mindspore/ccsrc/transform/op_declare.cc b/mindspore/ccsrc/transform/op_declare.cc index 419805c37f..7a7a696e2d 100644 --- a/mindspore/ccsrc/transform/op_declare.cc +++ b/mindspore/ccsrc/transform/op_declare.cc @@ -596,6 +596,16 @@ INPUT_MAP(PadD) = {{1, INPUT_DESC(x)}}; ATTR_MAP(PadD) = {{"paddings", ATTR_DESC(paddings, AnyTraits>>())}}; OUTPUT_MAP(PadD) = {{0, OUTPUT_DESC(y)}}; +// MirrorPad +INPUT_MAP(MirrorPad) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(paddings)}}; +ATTR_MAP(MirrorPad) = {{"mode", ATTR_DESC(mode, AnyTraits())}}; +OUTPUT_MAP(MirrorPad) = {{0, OUTPUT_DESC(y)}}; + +// MirrorPadGrad +INPUT_MAP(MirrorPadGrad) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(paddings)}}; +ATTR_MAP(MirrorPadGrad) = {{"mode", ATTR_DESC(mode, AnyTraits())}}; +OUTPUT_MAP(MirrorPadGrad) = {{0, OUTPUT_DESC(y)}}; + // GatherNd INPUT_MAP(GatherNd) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; ATTR_MAP(GatherNd) = EMPTY_ATTR_MAP; diff --git a/mindspore/ccsrc/transform/op_declare.h b/mindspore/ccsrc/transform/op_declare.h index e4d4101127..8f6dda9430 100755 --- a/mindspore/ccsrc/transform/op_declare.h +++ b/mindspore/ccsrc/transform/op_declare.h @@ -155,6 +155,10 @@ DECLARE_OP_USE_INPUT_ATTR(ScatterNdD) DECLARE_OP_USE_OUTPUT(ScatterNdD) DECLARE_OP_ADAPTER(PadD) DECLARE_OP_USE_OUTPUT(PadD) +DECLARE_OP_ADAPTER(MirrorPad) +DECLARE_OP_USE_OUTPUT(MirrorPad) +DECLARE_OP_ADAPTER(MirrorPadGrad) +DECLARE_OP_USE_OUTPUT(MirrorPadGrad) DECLARE_OP_ADAPTER(BoundingBoxEncode) DECLARE_OP_USE_OUTPUT(BoundingBoxEncode) DECLARE_OP_ADAPTER(BoundingBoxDecode) diff --git a/mindspore/nn/layer/__init__.py b/mindspore/nn/layer/__init__.py index aed6cb7776..f51eff2b31 100644 --- a/mindspore/nn/layer/__init__.py +++ b/mindspore/nn/layer/__init__.py @@ -22,7 +22,7 @@ from .normalization import BatchNorm1d, BatchNorm2d, LayerNorm from .container import SequentialCell, CellList from .conv import Conv2d, Conv2dTranspose from .lstm import LSTM -from .basic import Dropout, Flatten, Dense, ClipByNorm, Norm, OneHot, ImageGradients +from .basic import Dropout, Flatten, Dense, ClipByNorm, Norm, OneHot, ImageGradients, Pad from .embedding import Embedding from .pooling import AvgPool2d, MaxPool2d @@ -34,5 +34,5 @@ __all__ = ['Softmax', 'LogSoftmax', 'ReLU', 'ReLU6', 'Tanh', 'GELU', 'Sigmoid', 'LSTM', 'Dropout', 'Flatten', 'Dense', 'ClipByNorm', 'Norm', 'OneHot', 'ImageGradients', 'Embedding', - 'AvgPool2d', 'MaxPool2d', + 'AvgPool2d', 'MaxPool2d', 'Pad', ] diff --git a/mindspore/nn/layer/basic.py b/mindspore/nn/layer/basic.py index de49685dac..5b36755d16 100644 --- a/mindspore/nn/layer/basic.py +++ b/mindspore/nn/layer/basic.py @@ -415,3 +415,72 @@ class ImageGradients(Cell): dx_last = P.Fill()(P.DType()(images), (batch_size, depth, height, 1), 0) dx = P.Concat(3)((dx, dx_last)) return dy, dx + + +class Pad(Cell): + """ + Pads the input tensor according to the paddings and mode. + + Args: + paddings (tuple): The shape of parameter `paddings` is (N, 2). N is the rank of input data. All elements of + paddings are int type. For `D` th dimension of input, paddings[D, 0] indicates how many sizes to be + extended ahead of the `D` th dimension of the input tensor, and paddings[D, 1] indicates how many sizes to + be extended behind of the `D` th dimension of the input tensor. + mode (string): Specifies padding mode. The optional values are "CONSTANT", "REFLECT", "SYMMETRIC". + Default: "CONSTANT". + + Inputs: + - ** input_x** (Tensor) - The input tensor. + + Outputs: + Tensor, the tensor after padding. + + - If `mode` is "CONSTANT", it fill the edge with 0, regardless of the values of the `input_x`. + If the `input_x` is [[1,2,3],[4,5,6],[7,8,9]] and `paddings` is [[1,1],[2,2]], then the + Outputs is [[0,0,0,0,0,0,0],[0,0,1,2,3,0,0],[0,0,4,5,6,0,0],[0,0,7,8,9,0,0],[0,0,0,0,0,0,0]]. + - If 'mode` is "REFLECT", it uses a way of symmetrical copying throught the axis of symmetry to fill in, + symmetry. If the `input_x` is [[1,2,3],[4,5,6],[7,8,9]] and `paddings` is [[1,1],[2,2]], then the + Outputs is [[6,5,4,5,6,5,4],[3,2,1,2,3,2,1],[6,5,4,5,6,5,4],[9,8,7,8,9,8,7],[6,5,4,5,6,5,4]]. + - If 'mode' is "SYMMETRIC", the filling method is similar to the "REFLECT". It is also copied + according to the symmetry axis, except that it includes the symmetry axis. If the `input_x` + is [[1,2,3],[4,5,6],[7,8,9]] and `paddings` is [[1,1],[2,2]], then the Outputs is + [[2,1,1,2,3,3,2],[2,1,1,2,3,3,2],[5,4,4,5,6,6,5],[8,7,7,8,9,9,8],[8,7,7,8,9,9,8]]. + + Examples: + >>> from mindspore import Tensor + >>> from mindspore.ops import operations as P + >>> import mindspore.nn as nn + >>> import numpy as np + >>> class Net(nn.Cell): + >>> def __init__(self): + >>> super(Net, self).__init__() + >>> self.pad = nn.Pad(paddings=((1,1),(2,2)), mode="CONSTANT") + >>> def construct(self, x): + >>> return self.pad(x) + >>> x = np.random.random(size=(2, 3)).astype(np.float32) + >>> pad = Net() + >>> ms_output = pad(Tensor(x)) + """ + + def __init__(self, paddings, mode="CONSTANT"): + super(Pad, self).__init__() + self.mode = mode + self.paddings = paddings + validator.check_string('mode', self.mode, ["CONSTANT", "REFLECT", "SYMMETRIC"]) + if not isinstance(paddings, tuple): + raise TypeError('Paddings must be tuple type.') + for item in paddings: + if len(item) != 2: + raise ValueError('The shape of paddings must be (n, 2).') + if mode == "CONSTANT": + self.pad = P.Pad(self.paddings) + else: + self.paddings = Tensor(np.array(self.paddings)) + self.pad = P.MirrorPad(mode=mode) + + def construct(self, x): + if self.mode == "CONSTANT": + x = self.pad(x) + else: + x = self.pad(x, self.paddings) + return x diff --git a/mindspore/ops/_grad/grad_nn_ops.py b/mindspore/ops/_grad/grad_nn_ops.py index 1b18d9f248..149dd6caec 100755 --- a/mindspore/ops/_grad/grad_nn_ops.py +++ b/mindspore/ops/_grad/grad_nn_ops.py @@ -470,6 +470,17 @@ def get_bprop_pad(self): return bprop +@bprop_getters.register(P.MirrorPad) +def get_bprop_mirror_pad(self): + """Grad definition for `MirrorPad` operation.""" + mirror_pad_grad = G.MirrorPadGrad(self.mode) + + def bprop(x, paddings, out, dout): + dx = mirror_pad_grad(dout, paddings, x) + return (dx, zeros_like(paddings)) + return bprop + + @bprop_getters.register(P.ROIAlign) def get_bprop_roi_align(self): """Grad definition for `ROIAlign` operation.""" diff --git a/mindspore/ops/operations/__init__.py b/mindspore/ops/operations/__init__.py index 8bfca77b38..40cbfc3381 100644 --- a/mindspore/ops/operations/__init__.py +++ b/mindspore/ops/operations/__init__.py @@ -59,7 +59,7 @@ from .nn_ops import (LSTM, SGD, Adam, ApplyMomentum, BatchNorm, LogSoftmax, MaxPool, AvgPool, Conv2DBackpropInput, - MaxPoolWithArgmax, OneHot, Pad, PReLU, ReLU, ReLU6, HSwish, HSigmoid, + MaxPoolWithArgmax, OneHot, Pad, MirrorPad, PReLU, ReLU, ReLU6, HSwish, HSigmoid, ResizeBilinear, Sigmoid, SigmoidCrossEntropyWithLogits, SmoothL1Loss, Softmax, @@ -180,6 +180,7 @@ __all__ = [ 'ScatterNd', 'ResizeNearestNeighbor', 'Pad', + 'MirrorPad', 'GatherNd', 'ScatterNdUpdate', 'Floor', diff --git a/mindspore/ops/operations/_grad_ops.py b/mindspore/ops/operations/_grad_ops.py index f0a9a2f658..d468fa7b19 100644 --- a/mindspore/ops/operations/_grad_ops.py +++ b/mindspore/ops/operations/_grad_ops.py @@ -947,6 +947,24 @@ class TanhGrad(PrimitiveWithInfer): return out +class MirrorPadGrad(PrimitiveWithInfer): + """Gradients of MirrorPad operation.""" + + @prim_attr_register + def __init__(self, mode="REFLECT"): + """init MirrorPad""" + validator.check_string('mode', mode, ['REFLECT', 'SYMMETRIC']) + self.mode = mode + + def __infer__(self, dout, paddings, x): + validator.check_subclass("dout", dout['dtype'], mstype.tensor) + validator.check_subclass("paddings", paddings['dtype'], mstype.tensor) + validator.check_subclass("input_x", x['dtype'], mstype.tensor) + return {'shape': x['shape'], + 'dtype': dout['dtype'], + 'value': None} + + class RefToEmbed(Primitive): r""" Make a key from Ref. diff --git a/mindspore/ops/operations/nn_ops.py b/mindspore/ops/operations/nn_ops.py index 91f6d7ec01..1e3a4349ae 100644 --- a/mindspore/ops/operations/nn_ops.py +++ b/mindspore/ops/operations/nn_ops.py @@ -2092,6 +2092,7 @@ class Pad(PrimitiveWithInfer): for item in paddings: if len(item) != 2: raise ValueError('The shape of paddings must be (n, 2).') + self.paddings = paddings def infer_shape(self, x): paddings = np.array(self.paddings) @@ -2104,9 +2105,78 @@ class Pad(PrimitiveWithInfer): return y_shape def infer_dtype(self, x): + validator.check_subclass("input_x", x, mstype.tensor) return x +class MirrorPad(PrimitiveWithInfer): + """ + Pads the input tensor according to the paddings and mode. + + Args: + mode (string): Specifies padding mode. The optional values are "REFLECT", "SYMMETRIC". + Default: "REFLECT". + + Inputs: + - **input_x** (Tensor) - The input tensor. + - **paddings** (Tensor) - The paddings tensor. The value of `paddings` is a matrix(list), + and its shape is (N, 2). N is the rank of input data. All elements of paddings + are int type. For `D` th dimension of input, paddings[D, 0] indicates how many sizes to be + extended ahead of the `D` th dimension of the input tensor, and paddings[D, 1] indicates + how many sizes to be extended behind of the `D` th dimension of the input tensor. + + Outputs: + Tensor, the tensor after padding. + + - If 'mode` is "REFLECT", it uses a way of symmetrical copying throught the axis of symmetry to fill in, + symmetry. If the `input_x` is [[1,2,3],[4,5,6],[7,8,9]] and `paddings` is [[1,1],[2,2]], then the + Outputs is [[6,5,4,5,6,5,4],[3,2,1,2,3,2,1],[6,5,4,5,6,5,4],[9,8,7,8,9,8,7],[6,5,4,5,6,5,4]]. + - If 'mode' is "SYMMETRIC", the filling method is similar to the "REFLECT". It is also copied + according to the symmetry axis, except that it includes the symmetry axis. If the `input_x` + is [[1,2,3],[4,5,6],[7,8,9]] and `paddings` is [[1,1],[2,2]], then the Outputs is + [[2,1,1,2,3,3,2],[2,1,1,2,3,3,2],[5,4,4,5,6,6,5],[8,7,7,8,9,9,8],[8,7,7,8,9,9,8]]. + + Examples: + >>> from mindspore import Tensor + >>> from mindspore.ops import operations as P + >>> import mindspore.nn as nn + >>> import numpy as np + >>> class Net(nn.Cell): + >>> def __init__(self): + >>> super(Net, self).__init__() + >>> self.pad = P.MirrorPad(mode="REFLECT") + >>> def construct(self, x, paddings): + >>> return self.pad(x, paddings) + >>> x = np.random.random(size=(2, 3)).astype(np.float32) + >>> paddings = Tensor([[1,1],[2,2]]) + >>> pad = Net() + >>> ms_output = pad(Tensor(x), paddings) + """ + + @prim_attr_register + def __init__(self, mode='REFLECT'): + """Init Pad""" + validator.check_string('mode', mode, ['REFLECT', 'SYMMETRIC']) + self.mode = mode + + def __infer__(self, input_x, paddings): + validator.check_subclass("input_x", input_x['dtype'], mstype.tensor) + validator.check_subclass("paddings", paddings['dtype'], mstype.tensor) + x_shape = list(input_x['shape']) + paddings_value = paddings['value'].asnumpy() + paddings_size = paddings_value.size + validator.check_integer('paddings.shape', paddings_size, len(x_shape) * 2, Rel.EQ) + if not np.all(paddings_size >= 0): + raise ValueError('All elements of paddings must be >= 0.') + y_shape = () + for i in range(0, int(paddings_size / 2)): + y_shape += ((x_shape[i] + paddings_value[i, 0] + paddings_value[i, 1]),) + + return {'shape': y_shape, + 'dtype': input_x['dtype'], + 'value': None} + + class ROIAlign(PrimitiveWithInfer): """ Computes Region of Interest (RoI) Align operator. diff --git a/tests/ut/python/nn/test_nn_pad.py b/tests/ut/python/nn/test_nn_pad.py new file mode 100644 index 0000000000..a8b66bae5c --- /dev/null +++ b/tests/ut/python/nn/test_nn_pad.py @@ -0,0 +1,64 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" test nn pad """ +from mindspore import Tensor +from mindspore.ops import operations as P +import mindspore.nn as nn +from mindspore.ops.composite import GradOperation +from mindspore.common.api import ms_function +import numpy as np +import mindspore.context as context + + +class Net(nn.Cell): + def __init__(self, raw_paddings, mode): + super(Net, self).__init__() + self.pad = nn.Pad(raw_paddings, mode=mode) + + @ms_function + def construct(self, x): + return self.pad(x) + + +class Grad(nn.Cell): + def __init__(self, network): + super(Grad, self).__init__() + self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) + self.network = network + + @ms_function + def construct(self, x, grads): + return self.grad(self.network)(x, grads) + + +def test_pad_train(): + mode = 'CONSTANT' + x = np.random.random(size=(2, 3)).astype(np.float32) + raw_paddings = ((1, 1), (2, 2)) + grads = np.random.random(size=(4, 7)).astype(np.float32) + grad = Grad(Net(raw_paddings, mode)) + output = grad(Tensor(x), Tensor(grads)) + print("=================output====================") + print(output) + + +def test_pad_infer(): + mode = 'CONSTANT' + x = np.random.random(size=(2, 3)).astype(np.float32) + raw_paddings = ((1, 1), (2, 2)) + net = Net(raw_paddings, mode) + output = net(Tensor(x)) + print("=================output====================") + print(output) From cc6258d6ac15519ba7de59bc7ee154ac0c668a6b Mon Sep 17 00:00:00 2001 From: zhaoting Date: Tue, 14 Apr 2020 09:43:52 +0800 Subject: [PATCH 203/367] fix adam notes --- mindspore/ops/operations/nn_ops.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/mindspore/ops/operations/nn_ops.py b/mindspore/ops/operations/nn_ops.py index 538d7f3826..cc6f7da7b0 100644 --- a/mindspore/ops/operations/nn_ops.py +++ b/mindspore/ops/operations/nn_ops.py @@ -2136,7 +2136,11 @@ class Adam(PrimitiveWithInfer): - **gradient** (Tensor) - Gradients. Outputs: - Tensor, has the same shape and data type as `var`. + Tuple of 3 Tensor, the updated parameters. + + - **var** (Tensor) - The same shape and data type as `var`. + - **m** (Tensor) - The same shape and data type as `m`. + - **v** (Tensor) - The same shape and data type as `v`. """ @prim_attr_register From 39b446524d8a3825d7aecb90b4e3751408b3c64c Mon Sep 17 00:00:00 2001 From: yoonlee666 Date: Tue, 14 Apr 2020 10:18:31 +0800 Subject: [PATCH 204/367] fix bugs in bert example script --- example/Bert_NEZHA_cnwiki/train.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/example/Bert_NEZHA_cnwiki/train.py b/example/Bert_NEZHA_cnwiki/train.py index 86e033fc9f..2610542a9a 100644 --- a/example/Bert_NEZHA_cnwiki/train.py +++ b/example/Bert_NEZHA_cnwiki/train.py @@ -39,6 +39,7 @@ import mindspore.dataset.engine.datasets as de import mindspore.dataset.transforms.c_transforms as C from mindspore import context from mindspore.common.tensor import Tensor +import mindspore.common.dtype as mstype from mindspore.train.model import Model from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor from mindspore.model_zoo.Bert_NEZHA import BertNetworkWithLoss, BertTrainOneStepCell @@ -49,9 +50,9 @@ def create_train_dataset(batch_size): """create train dataset""" # apply repeat operations repeat_count = bert_train_cfg.epoch_size - ds = de.StorageDataset([bert_train_cfg.DATA_DIR], bert_train_cfg.SCHEMA_DIR, - columns_list=["input_ids", "input_mask", "segment_ids", "next_sentence_labels", - "masked_lm_positions", "masked_lm_ids", "masked_lm_weights"]) + ds = de.TFRecordDataset([bert_train_cfg.DATA_DIR], bert_train_cfg.SCHEMA_DIR, + columns_list=["input_ids", "input_mask", "segment_ids", "next_sentence_labels", + "masked_lm_positions", "masked_lm_ids", "masked_lm_weights"]) type_cast_op = C.TypeCast(mstype.int32) ds = ds.map(input_columns="masked_lm_ids", operations=type_cast_op) ds = ds.map(input_columns="masked_lm_positions", operations=type_cast_op) From 1fc576c5022d87f7e090c1598041e4b4300dd698 Mon Sep 17 00:00:00 2001 From: jjfeing Date: Tue, 14 Apr 2020 11:00:37 +0800 Subject: [PATCH 205/367] adapt graphengin update --- graphengine | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/graphengine b/graphengine index 40e9f6f834..0c33e9d125 160000 --- a/graphengine +++ b/graphengine @@ -1 +1 @@ -Subproject commit 40e9f6f834469d2d228f9782a0a41f95be94d429 +Subproject commit 0c33e9d12562953ca4bd6c03cb77da2c2da74acd From 0b75e289b16ca8da10313a276375a2aea48f54c5 Mon Sep 17 00:00:00 2001 From: Zhang Qinghua Date: Mon, 13 Apr 2020 15:49:14 +0800 Subject: [PATCH 206/367] Dump the perf data by executed sequence, not alphabetic. --- mindspore/ccsrc/utils/profile.cc | 23 ++++++++++++++++++----- mindspore/ccsrc/utils/profile.h | 3 ++- 2 files changed, 20 insertions(+), 6 deletions(-) diff --git a/mindspore/ccsrc/utils/profile.cc b/mindspore/ccsrc/utils/profile.cc index 7a2bb2aa66..ba490549f8 100644 --- a/mindspore/ccsrc/utils/profile.cc +++ b/mindspore/ccsrc/utils/profile.cc @@ -30,6 +30,7 @@ namespace mindspore { namespace { +constexpr size_t TIME_INFO_PREFIX_NUM_LEN = 4; const char KEY_PROF_TOTAL[] = "__total__"; void PrintProfile(std::ostringstream& oss, const TimeInfo& time_info, int indent = 0, @@ -42,15 +43,16 @@ void PrintTimeInfoMap(std::ostringstream& oss, const TimeInfoMap& dict, int inde continue; } // indent by multiples of 4 spaces. + auto name = iter->first.substr(TIME_INFO_PREFIX_NUM_LEN); oss << std::setw(indent * 4) << "" - << "[" << iter->first << "]: " << iter->second->time_; + << "[" << name << "]: " << iter->second->time_; if (iter->second->dict_ != nullptr) { oss << ", [" << iter->second->dict_->size() << "]"; } oss << "\n"; std::string newPrefix = prefix; - if (iter->first.find("Cycle ") != 0) { + if (iter->first.find("Cycle ") == std::string::npos) { newPrefix = prefix.empty() ? iter->first : prefix + "." + iter->first; } PrintProfile(oss, *iter->second, indent + 1, sums, newPrefix); @@ -94,7 +96,14 @@ void PrintProfile(std::ostringstream& oss, const TimeInfo& time_info, int indent oss << "Sums\n"; if (total >= 0.0 + DBL_EPSILON) { for (auto& iter : *sums) { - oss << " " << std::left << std::setw(36) << iter.first << " : " << std::right << std::setw(12) << std::fixed + std::string name = iter.first; + name.erase(0, TIME_INFO_PREFIX_NUM_LEN); + std::size_t pos = 0; + while ((pos = name.find('.', pos)) != std::string::npos) { + pos++; + name.erase(pos, TIME_INFO_PREFIX_NUM_LEN); + } + oss << " " << std::left << std::setw(36) << name << " : " << std::right << std::setw(12) << std::fixed << std::setprecision(6) << iter.second << "s : " << std::right << std::setw(5) << std::fixed << std::setprecision(2) << iter.second / total * 100 << "%\n"; } @@ -241,14 +250,18 @@ void ProfContext::Insert(const std::string& name, const TimeInfo* time) noexcept } } - auto iter = time_info_->dict_->find(name); + std::stringstream ss; + ss << std::setw(TIME_INFO_PREFIX_NUM_LEN) << std::setfill('0') << time_info_->actionNum_; + std::string sorted_name(ss.str() + name); + time_info_->actionNum_++; + auto iter = time_info_->dict_->find(sorted_name); // if contains item with same name, delete it if (iter != time_info_->dict_->end()) { delete iter->second; iter->second = nullptr; (void)time_info_->dict_->erase(iter); } - (*time_info_->dict_)[name] = time; + (*time_info_->dict_)[sorted_name] = time; } bool ProfContext::IsTopContext() const noexcept { return (prof_ != nullptr) && (this == &prof_->context_); } diff --git a/mindspore/ccsrc/utils/profile.h b/mindspore/ccsrc/utils/profile.h index 4824f1f6ab..6892b0b4f6 100644 --- a/mindspore/ccsrc/utils/profile.h +++ b/mindspore/ccsrc/utils/profile.h @@ -34,12 +34,13 @@ extern double GetTime(); class ProfileBase; struct TimeInfo { - explicit TimeInfo(double time = -1.0) : time_(time), dict_(nullptr) {} + explicit TimeInfo(double time = -1.0) : time_(time), dict_(nullptr), actionNum_(0) {} TimeInfo(const TimeInfo&) = delete; ~TimeInfo(); double time_; TimeInfoMap* dict_; + size_t actionNum_; }; // Utility class for Profile. From 0ac50a19f5457f965c69c0358733e768cebe77ae Mon Sep 17 00:00:00 2001 From: Xiaoda Zhang Date: Fri, 10 Apr 2020 10:13:34 +0800 Subject: [PATCH 207/367] Model the memory cost in auto-parallel. It is calculated by the output of operators, plus the parameters. Additionally, modify the graph-operations in auto_parallel to include memory_cost. --- .../ccsrc/parallel/auto_parallel/costmodel.h | 8 +- .../auto_parallel/dp_algo_costmodel.cc | 15 +- .../auto_parallel/dp_algo_costmodel.h | 7 +- .../parallel/auto_parallel/edge_costmodel.cc | 34 +++- .../parallel/auto_parallel/edge_costmodel.h | 2 +- .../parallel/auto_parallel/graph_costmodel.cc | 153 +++++++++++++----- .../parallel/auto_parallel/graph_costmodel.h | 3 + .../auto_parallel/operator_costmodel.cc | 90 +++++++---- .../auto_parallel/operator_costmodel.h | 85 +++++++--- .../ccsrc/parallel/ops_info/activation_info.h | 4 +- .../ccsrc/parallel/ops_info/arithmetic_info.h | 22 +-- .../parallel/ops_info/batch_parallel_info.h | 8 +- .../ccsrc/parallel/ops_info/bias_add_info.h | 2 +- .../ops_info/comparison_function_info.h | 9 +- .../parallel/ops_info/dropout_do_mask_info.h | 2 +- .../ccsrc/parallel/ops_info/get_next_info.h | 2 +- mindspore/ccsrc/parallel/ops_info/loss_info.h | 3 +- .../ccsrc/parallel/ops_info/matmul_info.cc | 6 +- .../ccsrc/parallel/ops_info/matmul_info.h | 2 +- .../ccsrc/parallel/ops_info/onehot_info.h | 2 +- .../ccsrc/parallel/ops_info/operator_info.cc | 44 ++++- .../ccsrc/parallel/ops_info/operator_info.h | 16 +- .../ccsrc/parallel/ops_info/prelu_info.h | 2 +- .../parallel/ops_info/reduce_method_info.cc | 2 +- .../parallel/ops_info/reduce_method_info.h | 2 +- .../ccsrc/parallel/ops_info/reshape_info.h | 2 +- .../parallel/ops_info/tmp_identity_info.h | 2 +- .../ccsrc/parallel/ops_info/transpose_info.h | 2 +- .../parallel/ops_info/virtual_dataset_info.h | 2 +- .../ccsrc/parallel/step_auto_parallel.cc | 8 +- .../tensor_layout/tensor_redistribution.cc | 6 + .../tensor_layout/tensor_redistribution.h | 7 + .../cpp/parallel/ops_info/activation_test.cc | 8 +- .../cpp/parallel/ops_info/matmul_info_test.cc | 4 +- .../parallel/ops_info/tensor_add_info_test.cc | 8 +- .../cpp/parallel/ops_info/tmpidentity_test.cc | 4 +- 36 files changed, 401 insertions(+), 177 deletions(-) diff --git a/mindspore/ccsrc/parallel/auto_parallel/costmodel.h b/mindspore/ccsrc/parallel/auto_parallel/costmodel.h index 229f0fbf5e..9e9003848b 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/costmodel.h +++ b/mindspore/ccsrc/parallel/auto_parallel/costmodel.h @@ -207,15 +207,13 @@ struct ContractEliminationDecision : public Decision { */ struct TriangleEliminationDecision : public Decision { TriangleEliminationDecision(StrategyPtr elimi_stra, CostPtr elimi_op_cost, CostPtr l_edge_cost, CostPtr r_edge_cost, - StrategyPtr left_stra, CostPtr l_node_cost, StrategyPtr right_stra, CostPtr r_node_cost) + StrategyPtr left_stra, CostPtr l_node_cost) : eliminated_op_strategy_(std::move(elimi_stra)), eliminated_op_cost_(std::move(elimi_op_cost)), left_edge_cost_(std::move(l_edge_cost)), right_edge_cost_(std::move(r_edge_cost)), left_node_strategy_(std::move(left_stra)), - left_node_cost_(std::move(l_node_cost)), - right_node_strategy_(std::move(right_stra)), - right_node_cost_(std::move(r_node_cost)) { + left_node_cost_(std::move(l_node_cost)) { type_ = DecisionType::TRIANGLE_ELIMINATION; } @@ -225,8 +223,6 @@ struct TriangleEliminationDecision : public Decision { CostPtr right_edge_cost_; StrategyPtr left_node_strategy_; CostPtr left_node_cost_; - StrategyPtr right_node_strategy_; - CostPtr right_node_cost_; MS_DECLARE_PARENT(TriangleEliminationDecision, Decision); }; diff --git a/mindspore/ccsrc/parallel/auto_parallel/dp_algo_costmodel.cc b/mindspore/ccsrc/parallel/auto_parallel/dp_algo_costmodel.cc index 060caa4cca..dd21096fcc 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/dp_algo_costmodel.cc +++ b/mindspore/ccsrc/parallel/auto_parallel/dp_algo_costmodel.cc @@ -76,7 +76,6 @@ Status GetStrategy(const CostGraphPtr& graph) { auto l_r_edge = triangle_pair.second; auto left_node = l_r_edge->prev_operator(); - auto right_node = l_r_edge->next_operator(); auto left_edge = eliminated_node->GetAliveSuccEdges()[0]; auto right_edge = eliminated_node->GetAliveSuccEdges()[1]; MS_EXCEPTION_IF_NULL(left_edge); @@ -86,8 +85,7 @@ Status GetStrategy(const CostGraphPtr& graph) { right_edge = tmp; } auto left_node_cpy = graph->EliminationTriangle(eliminated_node, l_r_edge); - auto elimi = - std::make_shared(eliminated_node, left_edge, left_node_cpy, right_edge, right_node); + auto elimi = std::make_shared(eliminated_node, left_edge, left_node_cpy, right_edge); eliminations.emplace_back(std::move(elimi)); } auto star_center = graph->CheckStarElimination(); @@ -183,14 +181,13 @@ Status RecoverStrategy(std::vector eliminations) { auto left_edge = elimination->left_edge_; auto eliminated_node = elimination->eliminated_node_; auto right_edge = elimination->right_edge_; - auto right_node = elimination->right_node_; auto decision = left_node->selected_cost()->decision_ptr_->cast(); eliminated_node->SetSelectedStrategyAndCost(decision->eliminated_op_strategy_, decision->eliminated_op_cost_); left_edge->set_selected_cost(decision->left_edge_cost_); right_edge->set_selected_cost(decision->right_edge_cost_); + // Since Triangle is eliminated into 'left_node', only 'left_node' is needed to recover the strategy. left_node->SetSelectedStrategyAndCost(decision->left_node_strategy_, decision->left_node_cost_); - right_node->SetSelectedStrategyAndCost(decision->right_node_strategy_, decision->right_node_cost_); MS_LOG(INFO) << "Recover triangleElimination succeeded."; } else if ((*rit)->isa()) { auto elimination = (*rit)->cast(); @@ -204,9 +201,11 @@ Status RecoverStrategy(std::vector eliminations) { for (size_t i = 0; i < succ_edges.size(); ++i) { succ_edges[i]->set_selected_cost(decision->succ_edges_cost_list_[i]); } - for (size_t j = 0; j < succ_nodes.size(); ++j) { - succ_nodes[j]->SetSelectedStrategyAndCost(decision->succ_ops_stra_list_[j], decision->succ_ops_cost_list_[j]); - } + MS_EXCEPTION_IF_NULL(succ_nodes[0]); + MS_EXCEPTION_IF_NULL(decision->succ_ops_stra_list_[0]); + MS_EXCEPTION_IF_NULL(decision->succ_ops_cost_list_[0]); + // Since Star is eliminated into 'succ_nodes[0]', only 'succ_nodes[0]' is needed to recover the strategy. + succ_nodes[0]->SetSelectedStrategyAndCost(decision->succ_ops_stra_list_[0], decision->succ_ops_cost_list_[0]); MS_LOG(INFO) << "Recover starElimination succeeded."; } else { MS_LOG(ERROR) << "Unknown Elimination type."; diff --git a/mindspore/ccsrc/parallel/auto_parallel/dp_algo_costmodel.h b/mindspore/ccsrc/parallel/auto_parallel/dp_algo_costmodel.h index 0cb58c49da..6d43218e19 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/dp_algo_costmodel.h +++ b/mindspore/ccsrc/parallel/auto_parallel/dp_algo_costmodel.h @@ -102,20 +102,17 @@ struct ContractElimination : public Elimination { // Triangle Elimination struct TriangleElimination : public Elimination { - TriangleElimination(OperatorInfoPtr elim_node, EdgePtr l_edge, OperatorInfoPtr l_node, EdgePtr r_edge, - OperatorInfoPtr r_node) + TriangleElimination(OperatorInfoPtr elim_node, EdgePtr l_edge, OperatorInfoPtr l_node, EdgePtr r_edge) : Elimination(nullptr, Elimination::EliminationType::TRIANGLE), eliminated_node_(std::move(elim_node)), left_edge_(std::move(l_edge)), left_node_(std::move(l_node)), - right_edge_(std::move(r_edge)), - right_node_(std::move(r_node)) {} + right_edge_(std::move(r_edge)) {} OperatorInfoPtr eliminated_node_; EdgePtr left_edge_; OperatorInfoPtr left_node_; EdgePtr right_edge_; - OperatorInfoPtr right_node_; MS_DECLARE_PARENT(TriangleElimination, Elimination); }; diff --git a/mindspore/ccsrc/parallel/auto_parallel/edge_costmodel.cc b/mindspore/ccsrc/parallel/auto_parallel/edge_costmodel.cc index cbd66f58a6..895646f409 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/edge_costmodel.cc +++ b/mindspore/ccsrc/parallel/auto_parallel/edge_costmodel.cc @@ -119,6 +119,7 @@ Status Edge::GetRedistributionCost(const TensorLayout& prev_op_output_layout, co double forward_comm_cost = tensor_redistribution.forward_comm_cost(); double backward_comm_cost = tensor_redistribution.backward_comm_cost(); double computation_cost = tensor_redistribution.computation_cost(); + double mem_cost = tensor_redistribution.memory_cost(); // Now AllGather, ReduceScatter, AlltoAll don't support bool type MS_EXCEPTION_IF_NULL(type); @@ -134,6 +135,7 @@ Status Edge::GetRedistributionCost(const TensorLayout& prev_op_output_layout, co COST_MODEL_GAMMA * ((*cost)->communication_cost_ - (*cost)->communication_without_parameter_); (*cost)->communication_redis_forward_ = type_length * forward_comm_cost; (*cost)->communication_redis_backward_ = type_length * backward_comm_cost; + (*cost)->memory_with_reuse_ = mem_cost; return Status::SUCCESS; } @@ -158,8 +160,8 @@ CostPtrList Edge::CreateEdgeEliminationCostList(const StrategyPtr& output_st_ptr (void)std::transform(edges.begin(), edges.end(), all_cost_list.begin(), LocalGetCostList); CostPtrList selected_cost_list(all_cost_list.size(), nullptr); - std::function recursive = - [&](size_t k, double computation, double communication, double communication_without_para) { + std::function recursive = + [&](size_t k, double computation, double memory, double communication, double communication_without_para) { if (k == edges.size()) { auto decision = std::make_shared(selected_cost_list); CostPtr new_cost = std::make_shared(computation, communication); @@ -167,6 +169,7 @@ CostPtrList Edge::CreateEdgeEliminationCostList(const StrategyPtr& output_st_ptr new_cost->communication_without_parameter_ = communication_without_para; new_cost->communication_with_partial_para_ = communication_without_para + COST_MODEL_GAMMA * (communication - communication_without_para); + new_cost->memory_with_reuse_ = memory; new_cost->decision_ptr_ = decision; result.push_back(new_cost); return; @@ -174,11 +177,12 @@ CostPtrList Edge::CreateEdgeEliminationCostList(const StrategyPtr& output_st_ptr for (auto& c : all_cost_list[k]) { MS_EXCEPTION_IF_NULL(c); selected_cost_list[k] = c; - recursive(k + 1, computation + c->computation_cost_, communication + c->communication_cost_, + recursive(k + 1, computation + c->computation_cost_, memory + c->memory_with_reuse_, + communication + c->communication_cost_, communication_without_para + c->communication_without_parameter_); } }; - recursive(0, 0, 0, 0); + recursive(0, 0.0, 0.0, 0.0, 0.0); SimplifyForDreasingCommunicationWithPartialPara(&result); return result; } @@ -218,6 +222,8 @@ void Edge::CreateOpEliminationSubCostList(StrategyPtr op_strategy, const CostPtr double communication_without_para = left_cost->communication_without_parameter_ + middle_cost->communication_without_parameter_ + right_cost->communication_without_parameter_; + double memory_cost = + left_cost->memory_with_reuse_ + middle_cost->memory_with_reuse_ + right_cost->memory_with_reuse_; auto decision = std::make_shared(op_strategy, left_cost, middle_cost, right_cost); auto cost = std::make_shared(computation, communication, decision); @@ -225,6 +231,7 @@ void Edge::CreateOpEliminationSubCostList(StrategyPtr op_strategy, const CostPtr cost->communication_without_parameter_ = communication_without_para; cost->communication_with_partial_para_ = communication_without_para + COST_MODEL_GAMMA * (communication - communication_without_para); + cost->memory_with_reuse_ = memory_cost; ret_cost_list->emplace_back(std::move(cost)); } } @@ -267,5 +274,24 @@ void Edge::OpEliminationSetNewCost(const EdgePtr& e1, const OperatorInfoPtr& op, MS_LOG(EXCEPTION) << "Creating edge: " << edge_name_ << " failed."; } } + +Status Edge::CalculateMemoryCost() { + if (is_output_parameter_involve_ == -1) { + MS_LOG(ERROR) << "is_output_parameter_involve_ is unset."; + return FAILED; + } + if (is_output_parameter_involve_ == 0) { + // In this case, it is sure that the tensor redistribution along this edge is NOT parameter-involved, thus it is + // unnecessary to keep them in memory. + for (auto& cost_kv : cost_map_) { + auto& cost_v = cost_kv.second; + if (!cost_v.empty()) { + cost_v[0]->memory_with_reuse_ = 0; + } + } + } + + return SUCCESS; +} } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/auto_parallel/edge_costmodel.h b/mindspore/ccsrc/parallel/auto_parallel/edge_costmodel.h index bd882bb43f..f974125749 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/edge_costmodel.h +++ b/mindspore/ccsrc/parallel/auto_parallel/edge_costmodel.h @@ -133,7 +133,7 @@ class Edge { void set_parameter_involve(int para_invol) { is_output_parameter_involve_ = para_invol; } // When the input of a operator contains WEIGHT or a output from other operators involving WEIGHT, then these input // should stay in memory until it is used in the backward phase, which is kept in memory at the end of forward phase. - Status CalculateMemoryCost() const { return SUCCESS; } + Status CalculateMemoryCost(); private: std::string edge_name_; diff --git a/mindspore/ccsrc/parallel/auto_parallel/graph_costmodel.cc b/mindspore/ccsrc/parallel/auto_parallel/graph_costmodel.cc index 88a54662d3..82dd723039 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/graph_costmodel.cc +++ b/mindspore/ccsrc/parallel/auto_parallel/graph_costmodel.cc @@ -248,6 +248,7 @@ CostPtrList CostGraph::CreateFinalCostList(const OperatorInfoPtr& u, const std:: MS_EXCEPTION_IF_NULL(cost2); MS_EXCEPTION_IF_NULL(cost3); double computation = cost1->computation_cost_ + cost2->computation_cost_ + cost3->computation_cost_; + double memory = cost1->memory_with_reuse_ + cost2->memory_with_reuse_ + cost3->memory_with_reuse_; double commmunication = cost1->communication_cost_ + cost2->communication_cost_ + cost3->communication_cost_; double communication_without_para = cost1->communication_without_parameter_ + @@ -260,6 +261,7 @@ CostPtrList CostGraph::CreateFinalCostList(const OperatorInfoPtr& u, const std:: cost->communication_without_parameter_ = communication_without_para; cost->communication_with_partial_para_ = communication_without_para + COST_MODEL_GAMMA * (commmunication - communication_without_para); + cost->memory_with_reuse_ = memory; ret.push_back(cost); } } @@ -288,6 +290,7 @@ CostPtrList CostGraph::CreateFinalSingleCostList(const OperatorInfoPtr& u) { new_cost->communication_with_partial_para_ = cost1->communication_without_parameter_ + COST_MODEL_GAMMA * (cost1->communication_cost_ - cost1->communication_without_parameter_); + new_cost->memory_with_reuse_ = cost1->memory_with_reuse_; ret.push_back(new_cost); } } @@ -297,9 +300,14 @@ CostPtrList CostGraph::CreateFinalSingleCostList(const OperatorInfoPtr& u) { } CostPtr CostGraph::SelectCostWithMemoryConstraint(const CostPtrList& cost_list, double memory) { - if (cost_list.empty() || cost_list[0]->computation_cost_ >= memory) { - return nullptr; + CostPtrList after_mem_filter; + // Filter out the valid costs + for (auto& a_cost : cost_list) { + if (a_cost->memory_with_reuse_ <= memory) { + after_mem_filter.emplace_back(std::move(a_cost)); + } } + std::function LocalCompare = [&](CostPtr init, const CostPtr& cost_x) { MS_EXCEPTION_IF_NULL(cost_x); if (init == nullptr || cost_x->computation_cost_ < memory) { @@ -308,7 +316,7 @@ CostPtr CostGraph::SelectCostWithMemoryConstraint(const CostPtrList& cost_list, return init; }; CostPtr ret = nullptr; - return std::accumulate(cost_list.begin(), cost_list.end(), ret, LocalCompare); + return std::accumulate(after_mem_filter.begin(), after_mem_filter.end(), ret, LocalCompare); } CostPtr CostGraph::SelectCostWithMinTrainingTime(const CostPtrList& cost_list, double memory) { @@ -318,36 +326,46 @@ CostPtr CostGraph::SelectCostWithMinTrainingTime(const CostPtrList& cost_list, d MS_LOG(ERROR) << "Final cost list is null."; return nullptr; } - CostPtr ret = cost_list[0]; - MS_EXCEPTION_IF_NULL(ret); - if (ret->computation_cost_ >= memory) { - MS_LOG(ERROR) << "No available cost; the minimum cost is " << ret->computation_cost_ + CostPtrList after_mem_filter; + double minimum_memory = DBL_MAX; + // Filter out the valid costs. + for (auto& a_cost : cost_list) { + if (a_cost->memory_with_reuse_ <= memory) { + after_mem_filter.emplace_back(std::move(a_cost)); + } else if (a_cost->memory_with_reuse_ < minimum_memory) { + minimum_memory = a_cost->memory_with_reuse_; + } + } + if (after_mem_filter.empty()) { + MS_LOG(ERROR) << "No available cost. The minimum memory cost is: " << minimum_memory << ", the memory capacity is: " << memory << "."; return nullptr; } + // Init the returned value with first cost. + CostPtr ret = after_mem_filter[0]; + double minimum = costmodel_alpha_ * ret->computation_cost_ + costmodel_beta_ * ret->communication_with_partial_para_; - MS_LOG(INFO) << "minimum: " << minimum << ", computation_cost_: " << ret->computation_cost_ + MS_LOG(INFO) << "Cost 0: " + << "memory_cost: " << ret->memory_with_reuse_ << ", computation_cost_: " << ret->computation_cost_ << ", communication_with_partial_para_: " << ret->communication_with_partial_para_ << ", communication_cost_: " << ret->communication_cost_ << ", communication_without_parameter_: " << ret->communication_without_parameter_ << "."; - for (size_t i = 1; i < cost_list.size(); ++i) { - MS_EXCEPTION_IF_NULL(cost_list[i]); - if (cost_list[i]->computation_cost_ >= memory) { - MS_LOG(INFO) << "cost_list " << i << " computation_cost_: " << cost_list[i]->computation_cost_ - << ", is larger than the memory capacity: " << memory << "."; - break; - } - MS_LOG(INFO) << "cost_list " << i << " computation_cost_: " << cost_list[i]->computation_cost_ - << ", communication_with_partial_para_: " << cost_list[i]->communication_with_partial_para_ - << ", communication_cost_: " << cost_list[i]->communication_cost_ - << ", communication_without_parameter_: " << cost_list[i]->communication_without_parameter_ << "."; - auto tmp = costmodel_alpha_ * cost_list[i]->computation_cost_ + - costmodel_beta_ * cost_list[i]->communication_with_partial_para_; - MS_LOG(INFO) << "tmp: " << tmp; + MS_LOG(INFO) << "Cost 0: totoal_cost: " << minimum; + for (size_t i = 1; i < after_mem_filter.size(); ++i) { + MS_EXCEPTION_IF_NULL(after_mem_filter[i]); + MS_LOG(INFO) << "Cost " << i << ": memory_cost: " << after_mem_filter[i]->memory_with_reuse_ + << ", computation_cost_: " << after_mem_filter[i]->computation_cost_ + << ", communication_with_partial_para_: " << after_mem_filter[i]->communication_with_partial_para_ + << ", communication_cost_: " << after_mem_filter[i]->communication_cost_ + << ", communication_without_parameter_: " << after_mem_filter[i]->communication_without_parameter_ + << "."; + auto tmp = costmodel_alpha_ * after_mem_filter[i]->computation_cost_ + + costmodel_beta_ * after_mem_filter[i]->communication_with_partial_para_; + MS_LOG(INFO) << "Cost " << i << ": total_cost: " << tmp; if (minimum > tmp) { minimum = tmp; - ret = cost_list[i]; - MS_LOG(INFO) << "selected: " << i; + ret = after_mem_filter[i]; + MS_LOG(INFO) << "Selected: " << i; } } return ret; @@ -356,17 +374,21 @@ CostPtr CostGraph::SelectCostWithMinTrainingTime(const CostPtrList& cost_list, d CostPtrList CostGraph::SelectCostListWithMinTrainingTimeMultiple(const std::vector& all_cost_list, double available_memory) { CostPtrList selected_cost_list(all_cost_list.size(), nullptr); - double minimum = 0.0, total_memory = 0.0; + double minimum = DBL_MAX, total_memory = 0.0; CostPtrList ret(all_cost_list.size(), nullptr); + // Check whether valid costs exist. for (size_t i = 0; i < all_cost_list.size(); ++i) { if (all_cost_list[i][0] == nullptr) { MS_LOG(ERROR) << "The cost list " << i << " is empty."; return ret; } else { - total_memory += all_cost_list[i][0]->computation_cost_; - minimum += costmodel_alpha_ * all_cost_list[i][0]->computation_cost_ + - costmodel_beta_ * all_cost_list[i][0]->communication_with_partial_para_; - ret[i] = all_cost_list[i][0]; + double memory_i_cost = DBL_MAX; + for (size_t j = 0; j < all_cost_list[i].size(); ++j) { + if (all_cost_list[i][j]->memory_with_reuse_ < memory_i_cost) { + memory_i_cost = all_cost_list[i][j]->memory_with_reuse_; + } + } + total_memory += memory_i_cost; } } if (total_memory >= available_memory) { @@ -381,7 +403,7 @@ CostPtrList CostGraph::SelectCostListWithMinTrainingTimeMultiple(const std::vect double tmp_memory = 0.0, tmp_minimum = 0.0; for (size_t i = 0; i < selected_cost_list.size(); ++i) { MS_EXCEPTION_IF_NULL(selected_cost_list[i]); - tmp_memory += selected_cost_list[i]->computation_cost_; + tmp_memory += selected_cost_list[i]->memory_with_reuse_; tmp_minimum += costmodel_alpha_ * selected_cost_list[i]->computation_cost_ + costmodel_beta_ * selected_cost_list[i]->communication_with_partial_para_; } @@ -816,6 +838,7 @@ void CostGraph::CreateMergeEliminationSubCostList(StrategyPtr op_strategy, const auto& tar_cost = tar_cost_list[k]; MS_EXCEPTION_IF_NULL(tar_cost); double computation = op_cost->computation_cost_ + edge_cost->computation_cost_ + tar_cost->computation_cost_; + double memory = op_cost->memory_with_reuse_ + edge_cost->memory_with_reuse_ + tar_cost->memory_with_reuse_; double communication = op_cost->communication_cost_ + edge_cost->communication_cost_ + tar_cost->communication_cost_; double communication_without_para = op_cost->communication_without_parameter_ + @@ -829,6 +852,7 @@ void CostGraph::CreateMergeEliminationSubCostList(StrategyPtr op_strategy, const new_cost->communication_without_parameter_ = communication_without_para; new_cost->communication_with_partial_para_ = communication_without_para + COST_MODEL_GAMMA * (communication - communication_without_para); + new_cost->memory_with_reuse_ = memory; MS_EXCEPTION_IF_NULL(tar_cost_list_new); tar_cost_list_new->emplace_back(std::move(new_cost)); } @@ -894,6 +918,8 @@ void CostGraph::CreateContractEliminationSubCostList(StrategyPtr contract_op_str MS_EXCEPTION_IF_NULL(tar_cost); double computation = contract_op_cost->computation_cost_ + edge_cost->computation_cost_ + tar_cost->computation_cost_; + double memory = + contract_op_cost->memory_with_reuse_ + edge_cost->memory_with_reuse_ + tar_cost->memory_with_reuse_; double communication = contract_op_cost->communication_cost_ + edge_cost->communication_cost_ + tar_cost->communication_cost_; double communication_without_para = contract_op_cost->communication_without_parameter_ + @@ -906,6 +932,7 @@ void CostGraph::CreateContractEliminationSubCostList(StrategyPtr contract_op_str new_cost->communication_without_parameter_ = communication_without_para; new_cost->communication_with_partial_para_ = communication_without_para + COST_MODEL_GAMMA * (communication - communication_without_para); + new_cost->memory_with_reuse_ = memory; tar_cost_list_new->emplace_back(std::move(new_cost)); } } @@ -966,23 +993,22 @@ void CostGraph::CreateTriangleEliminationSubCostList(StrategyPtr elimi_op_stra, for (auto& left_node_cost : left_node_clist_origin) { MS_EXCEPTION_IF_NULL(left_node_cost); double new_computation = elimi_op_cost->computation_cost_ + left_edge_cost->computation_cost_ + - left_node_cost->computation_cost_ + right_edge_cost->computation_cost_ + - right_op_cost->computation_cost_; + left_node_cost->computation_cost_ + right_edge_cost->computation_cost_; + double new_memory = elimi_op_cost->memory_with_reuse_ + left_edge_cost->memory_with_reuse_ + + left_node_cost->memory_with_reuse_ + right_edge_cost->memory_with_reuse_; double new_commu_cost = elimi_op_cost->communication_cost_ + left_edge_cost->communication_cost_ + - left_node_cost->communication_cost_ + right_edge_cost->communication_cost_ + - right_op_cost->communication_cost_; + left_node_cost->communication_cost_ + right_edge_cost->communication_cost_; double new_commu_without = elimi_op_cost->communication_without_parameter_ + left_edge_cost->communication_without_parameter_ + - left_node_cost->communication_without_parameter_ + right_edge_cost->communication_without_parameter_ + - right_op_cost->communication_without_parameter_; + left_node_cost->communication_without_parameter_ + right_edge_cost->communication_without_parameter_; - auto decision = - std::make_shared(elimi_op_stra, elimi_op_cost, left_edge_cost, right_edge_cost, - left_op_stra, left_node_cost, right_op_stra, right_op_cost); + auto decision = std::make_shared(elimi_op_stra, elimi_op_cost, left_edge_cost, + right_edge_cost, left_op_stra, left_node_cost); auto new_cost = std::make_shared(new_computation, new_commu_cost, decision); new_cost->communication_without_parameter_ = new_commu_without; new_cost->communication_with_partial_para_ = new_commu_without + COST_MODEL_GAMMA * (new_commu_cost - new_commu_without); + new_cost->memory_with_reuse_ = new_memory; left_node_clist_new->emplace_back(std::move(new_cost)); } } @@ -1085,14 +1111,22 @@ void CostGraph::CreateStarEliminationSubCostList(const StrategyPtr& first_succ_n succ_nodes_costs[0] = first_succ_node_cost; double computation_cost = merged_node_cost->computation_cost_, - commu_cost = merged_node_cost->communication_cost_, + memory_cost = merged_node_cost->memory_with_reuse_, commu_cost = merged_node_cost->communication_cost_, commu_without = merged_node_cost->communication_without_parameter_; for (size_t i = 0; i < succ_nodes_stras.size(); ++i) { MS_EXCEPTION_IF_NULL(succ_edges_costs[i]); - computation_cost += succ_edges_costs[i]->computation_cost_ + succ_nodes_costs[i]->computation_cost_; - commu_cost += succ_edges_costs[i]->communication_cost_ + succ_nodes_costs[i]->communication_cost_; - commu_without += succ_edges_costs[i]->communication_without_parameter_ + - succ_nodes_costs[i]->communication_without_parameter_; + if (i == 0) { + computation_cost += succ_edges_costs[i]->computation_cost_ + succ_nodes_costs[i]->computation_cost_; + memory_cost += succ_edges_costs[i]->memory_with_reuse_ + succ_nodes_costs[i]->memory_with_reuse_; + commu_cost += succ_edges_costs[i]->communication_cost_ + succ_nodes_costs[i]->communication_cost_; + commu_without += succ_edges_costs[i]->communication_without_parameter_ + + succ_nodes_costs[i]->communication_without_parameter_; + } else { + computation_cost += succ_edges_costs[i]->computation_cost_; + memory_cost += succ_edges_costs[i]->memory_with_reuse_; + commu_cost += succ_edges_costs[i]->communication_cost_; + commu_without += succ_edges_costs[i]->communication_without_parameter_; + } } auto decision = std::make_shared(merged_op_stra, merged_node_cost, succ_edges_costs, @@ -1100,6 +1134,7 @@ void CostGraph::CreateStarEliminationSubCostList(const StrategyPtr& first_succ_n auto new_cost = std::make_shared(computation_cost, commu_cost, decision); new_cost->communication_without_parameter_ = commu_without; new_cost->communication_with_partial_para_ = commu_without + COST_MODEL_GAMMA * (commu_cost - commu_without); + new_cost->memory_with_reuse_ = memory_cost; first_succ_node_clist_new->emplace_back(std::move(new_cost)); } } @@ -1259,5 +1294,35 @@ OperatorInfoPtr CostGraph::FindTmpIdentityByParameterName(std::string& p_name) c } return nullptr; } +Status CostGraph::CorrectOpsMemoryCost() { + for (auto& one_op : ops_) { + if ((one_op->name().find(IDENTITY_INFO) != std::string::npos) && (one_op->is_output_parameter_involve() == 1)) { + if (one_op->GetAliveSuccEdges().size() > 1) { + // Filter out the case when the TmpIdentity being used by multiple operators + std::map output_count; + for (size_t i = 0; i < one_op->GetAliveSuccEdges().size(); ++i) { + auto output_index = one_op->GetAliveSuccEdges()[i]->prev_op_output_index(); + output_count[output_index]++; + } + for (size_t i = 0; i < one_op->GetAliveSuccEdges().size(); ++i) { + auto output_index = one_op->GetAliveSuccEdges()[i]->prev_op_output_index(); + if (output_count[output_index] <= 1) { + continue; + } + auto next_op = one_op->GetAliveSuccEdges()[i]->next_operator(); + MS_EXCEPTION_IF_NULL(next_op); + auto input_index = one_op->GetAliveSuccEdges()[i]->next_op_input_index(); + if (next_op->CorrectMemoryCost(input_index) != SUCCESS) { + MS_LOG(ERROR) << "The operator name: " << one_op->name() << ", the next operator name: " << next_op->name() + << ", the output_index: " << output_index << ", the input_index: " << input_index << "."; + return FAILED; + } + output_count[output_index]--; + } + } + } + } + return SUCCESS; +} } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/auto_parallel/graph_costmodel.h b/mindspore/ccsrc/parallel/auto_parallel/graph_costmodel.h index c149534826..65aeb210ea 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/graph_costmodel.h +++ b/mindspore/ccsrc/parallel/auto_parallel/graph_costmodel.h @@ -187,6 +187,9 @@ class CostGraph { size_t GetNumPairs() const { return edges_.size(); } Status InitSelectedStrategy(); OperatorInfoPtr FindTmpIdentityByParameterName(std::string&) const; + // When TmpIdentity is used by mulitple operators, the corresponding parameter's memory cost should be calculated only + // once (instead of multiple times), this method is used to correct this. + Status CorrectOpsMemoryCost(); // Needed by rec_parser void add_inputs_tensor_name(const std::vector& inputs_tensor_name) { inputs_tensor_name_list_.push_back(inputs_tensor_name); diff --git a/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.cc b/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.cc index 960e13281c..ecd42db6bb 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.cc +++ b/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.cc @@ -17,6 +17,7 @@ #include "parallel/auto_parallel/operator_costmodel.h" #include +#include #include "parallel/device_matrix.h" #include "parallel/tensor_layout/tensor_redistribution.h" @@ -24,12 +25,44 @@ namespace mindspore { namespace parallel { void OperatorCost::set_is_parameter(const std::vector& is_parameter) { is_parameter_ = is_parameter; } +void OperatorCost::set_is_parameter_involve(const std::vector& is_parameter_inv) { + is_parameter_involve_ = is_parameter_inv; +} + +void OperatorCost::set_output_parameter_involve(int output_para) { output_parameter_involve_ = output_para; } + void OperatorCost::SetInputAndOutputTypeLength(const std::vector& input_lengths, const std::vector& output_lengths) { inputs_type_lengths_ = input_lengths; outputs_type_lengths_ = output_lengths; } +double OperatorCost::GetMemoryCost(const std::vector& inputs, + const std::vector& outputs) const { + double result = 0.0; + if (output_parameter_involve_ == 1) { + // When this operator has multiple outputs, they all contributes to the memory. + for (size_t i = 0; i < outputs.size(); ++i) { + result += ListProduct(outputs[i].slice_shape()) * static_cast(outputs_type_lengths_[i]); + } + bool is_any_para_inv = + std::any_of(is_parameter_involve_.begin(), is_parameter_involve_.end(), [](bool value) { return value; }); + if (is_any_para_inv) { + for (size_t i = 0; i < inputs.size(); ++i) { + if (is_parameter_[i]) { + result += ListProduct(inputs[i].slice_shape()) * static_cast(inputs_type_lengths_[i]); + } else if (inputs_related_ && (!is_parameter_involve_[i])) { + // When the inputs of this operator are related, and they are not parameter-involved, then they are included + // in the memory cost. + result += ListProduct(inputs[i].slice_shape()) * static_cast(inputs_type_lengths_[i]); + } + } + } + } + + return result; +} + // return the per device communication cost in the forward phase. double MatMulCost::GetForwardCommCost(const std::vector& inputs, const std::vector& outputs, const int32_t&) const { @@ -72,11 +105,11 @@ double MatMulCost::GetBackwardCommCost(const std::vector& inputs, co return result; } -// Return the per device memory cost in the forward phase. The cost is calculated according to the bytes +// Return the per device computation cost in the forward phase. The cost is calculated according to the bytes // this operator uses double MatMulCost::GetForwardComputationCost(const std::vector& inputs, const std::vector& outputs, const int32_t&) const { - // In forward phase, the memory cost = slice(A) + slice(B) + (0 or 1) allreduce(slice(C)) + // In forward phase, the compuatation cost = slice(A) + slice(B) + (0 or 1) allreduce(slice(C)) double result = 0.0; TensorInfo output0 = outputs[0]; Shape input0_slice_shape = inputs[0].slice_shape(); @@ -91,11 +124,11 @@ double MatMulCost::GetForwardComputationCost(const std::vector& inpu return result; } -// Return the per device memory cost in the forward phase. The cost is calculated according to the bytes +// Return the per device computation cost in the forward phase. The cost is calculated according to the bytes // this operator uses double MatMulCost::GetBackwardComputationCost(const std::vector& inputs, const std::vector&, const int32_t& stage_id) const { - // In backward phase, the memory cost = (0 or 1) allreduce(slice(B)) + // In backward phase, the computation cost = (0 or 1) allreduce(slice(B)) double result = 0.0; if (is_parameter_[1]) { TensorInfo input1 = inputs[1]; // tensor B @@ -145,7 +178,7 @@ double ActivationCost::GetBackwardCommCost(const std::vector& inputs return result; } -// Return the per memory cost in the forward phase. The cost is calculated according to the bytes +// Return the per device computation cost in the forward phase. The cost is calculated according to the bytes // this operator uses double ActivationCost::GetForwardComputationCost(const std::vector& inputs, const std::vector&, const int32_t&) const { @@ -154,7 +187,7 @@ double ActivationCost::GetForwardComputationCost(const std::vector& return ListProduct(input0_slice_shape) * static_cast(inputs_type_lengths_[0]); } -// Return the per memory cost in the forward phase. The cost is calculated according to the bytes +// Return the per device computation cost in the forward phase. The cost is calculated according to the bytes // this operator uses double ActivationCost::GetBackwardComputationCost(const std::vector&, const std::vector&, const int32_t&) const { @@ -189,17 +222,17 @@ double SoftmaxCost::GetBackwardCommCost(const std::vector& inputs, c return result; } -// Return the per memory cost in the forward phase. The cost is calculated according to the bytes +// Return the per device computation cost in the forward phase. The cost is calculated according to the bytes // this operator uses double SoftmaxCost::GetForwardComputationCost(const std::vector& inputs, const std::vector&, const int32_t&) const { - // In the forward phase, the memory cost = slice(A) + // In the forward phase, the computation cost = slice(A) TensorInfo input0 = inputs[0]; Shape input0_slice_shape = input0.slice_shape(); return ListProduct(input0_slice_shape) * static_cast(inputs_type_lengths_[0]); } -// Return the per memory cost in the forward phase. The cost is calculated according to the bytes +// Return the per device computation cost in the forward phase. The cost is calculated according to the bytes // this operator uses double SoftmaxCost::GetBackwardComputationCost(const std::vector&, const std::vector&, @@ -221,17 +254,15 @@ double TmpIdentityCost::GetBackwardCommCost(const std::vector& inputs, +double TmpIdentityCost::GetForwardComputationCost(const std::vector&, const std::vector&, const int32_t&) const { - TensorInfo input0_info = inputs[0]; - Shape input0_slice_shape = input0_info.slice_shape(); - return ListProduct(input0_slice_shape) * static_cast(inputs_type_lengths_[0]); + return 0.0; } -// Return the per memory cost in the backward phase. The cost is calculated according to the bytes +// Return the per device computation cost in the backward phase. The cost is calculated according to the bytes // this operator uses double TmpIdentityCost::GetBackwardComputationCost(const std::vector&, const std::vector&, @@ -239,6 +270,11 @@ double TmpIdentityCost::GetBackwardComputationCost(const std::vector&, const std::vector&) const { + return 0.0; +} + double BatchParallelCost::GetForwardComputationCost(const std::vector& inputs, const std::vector&, const int32_t&) const { @@ -284,11 +320,11 @@ double PReLUCost::GetBackwardCommCost(const std::vector& inputs, con return result; } -// Return the per memory cost in the forward phase. The cost is calculated according to the bytes +// Return the per device computation cost in the forward phase. The cost is calculated according to the bytes // this operator uses double PReLUCost::GetForwardComputationCost(const std::vector& inputs, const std::vector&, const int32_t&) const { - // In forward phase, the memory cost = slice(A) + slice(B) + // In forward phase, the computation cost = slice(A) + slice(B) Shape input0_slice_shape = inputs[0].slice_shape(); Shape input1_slice_shape = inputs[1].slice_shape(); double result = ListProduct(input0_slice_shape) * static_cast(inputs_type_lengths_[0]) + @@ -296,12 +332,12 @@ double PReLUCost::GetForwardComputationCost(const std::vector& input return result; } -// Return the per memory cost in the backward phase. The cost is calculated according to the bytes +// Return the per device computation cost in the backward phase. The cost is calculated according to the bytes // this operator uses double PReLUCost::GetBackwardComputationCost(const std::vector& inputs, const std::vector&, const int32_t& stage_id) const { - // In backward phase, the memory cost = (0 or 1) allreduce(slice(B)) + // In backward phase, the computation cost = (0 or 1) allreduce(slice(B)) double result = 0.0; if (is_parameter_[1]) { TensorInfo input1 = inputs[1]; // tensor B @@ -337,16 +373,16 @@ double OneHotCost::GetBackwardCommCost(const std::vector&, const std return 0.0; } -// Return the per memory cost in the forward phase. The cost is calculated according to the bytes +// Return the per device computation cost in the forward phase. The cost is calculated according to the bytes // this operator uses double OneHotCost::GetForwardComputationCost(const std::vector& inputs, const std::vector&, const int32_t&) const { - // In onehot's forward phase, the memory cost = slice(A) + // In onehot's forward phase, the computation cost = slice(A) Shape input0_slice_shape = inputs[0].slice_shape(); return ListProduct(input0_slice_shape) * static_cast(inputs_type_lengths_[0]); } -// Return the per memory cost in the backward phase. The cost is calculated according to the bytes +// Return the per device computation cost in the backward phase. The cost is calculated according to the bytes // this operator uses double OneHotCost::GetBackwardComputationCost(const std::vector&, const std::vector&, const int32_t&) const { @@ -367,12 +403,12 @@ double SoftmaxCrossEntropyWithLogitsCost::GetBackwardCommCost(const std::vector< return 0.0; } -// Return the per memory cost in the forward phase. The cost is calculated according to the bytes +// Return the per device computation cost in the forward phase. The cost is calculated according to the bytes // this operator uses double SoftmaxCrossEntropyWithLogitsCost::GetForwardComputationCost(const std::vector& inputs, const std::vector&, const int32_t&) const { - // In forward phase, the memory cost = slice(A) + slice(B) + // In forward phase, the computation cost = slice(A) + slice(B) Shape input0_slice_shape = inputs[0].slice_shape(); Shape input1_slice_shape = inputs[1].slice_shape(); double result = ListProduct(input0_slice_shape) * static_cast(inputs_type_lengths_[0]) + @@ -380,7 +416,7 @@ double SoftmaxCrossEntropyWithLogitsCost::GetForwardComputationCost(const std::v return result; } -// Return the per memory cost in the backward phase. The cost is calculated according to the bytes +// Return the per device computation cost in the backward phase. The cost is calculated according to the bytes // this operator uses double SoftmaxCrossEntropyWithLogitsCost::GetBackwardComputationCost(const std::vector&, const std::vector&, @@ -410,7 +446,7 @@ double ReshapeCost::GetBackwardCommCost(const std::vector&, const st return 0.0; } -// Return the per memory cost in the forward phase. The cost is calculated according to the bytes +// Return the per device computation cost in the forward phase. The cost is calculated according to the bytes // this operator uses double ReshapeCost::GetForwardComputationCost(const std::vector& inputs, const std::vector& outputs, const int32_t& stage_id) const { @@ -427,7 +463,7 @@ double ReshapeCost::GetForwardComputationCost(const std::vector& inp return (inputs_type_lengths_[0] * tensor_redistribution.computation_cost()); } -// Return the per memory cost in the backward phase. The cost is calculated according to the bytes +// Return the per device computation cost in the backward phase. The cost is calculated according to the bytes // this operator uses double ReshapeCost::GetBackwardComputationCost(const std::vector&, const std::vector&, diff --git a/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.h b/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.h index 685cb259c3..7dc45bae71 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.h +++ b/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.h @@ -43,10 +43,20 @@ double ListProduct(std::vector vec) { // entries timing the length of each entry's data type class OperatorCost { public: - OperatorCost() { + explicit OperatorCost(bool is_inputs_related) : inputs_related_(is_inputs_related) { // this is only for the case when set_is_parameter() and SetInputAndOutputTypeLength() are not invoked for (size_t i = 0; i < MAXIMUM_INPUT_NUMBER; ++i) { is_parameter_.push_back(false); + is_parameter_involve_.push_back(false); + inputs_type_lengths_.push_back(DEFAULT_DATA_TYPE_LENGTH); + outputs_type_lengths_.push_back(DEFAULT_DATA_TYPE_LENGTH); + } + } + OperatorCost() : inputs_related_(false) { + // this is only for the case when set_is_parameter() and SetInputAndOutputTypeLength() are not invoked + for (size_t i = 0; i < MAXIMUM_INPUT_NUMBER; ++i) { + is_parameter_.push_back(false); + is_parameter_involve_.push_back(false); inputs_type_lengths_.push_back(DEFAULT_DATA_TYPE_LENGTH); outputs_type_lengths_.push_back(DEFAULT_DATA_TYPE_LENGTH); } @@ -54,6 +64,8 @@ class OperatorCost { virtual ~OperatorCost() = default; void set_is_parameter(const std::vector& is_parameter); + void set_is_parameter_involve(const std::vector&); + void set_output_parameter_involve(int); void SetInputAndOutputTypeLength(const std::vector& input_lengths, const std::vector& output_lengths); std::vector inputs_type_lengths() const { return inputs_type_lengths_; } std::vector outputs_type_lengths() const { return outputs_type_lengths_; } @@ -72,8 +84,19 @@ class OperatorCost { const std::vector& outputs, const int32_t& stage_id) const = 0; virtual double GetBackwardComputationCost(const std::vector& inputs, const std::vector& outputs, const int32_t& stage_id) const = 0; + // per device PEAK memory cost in a training iteration + // Typically, the PEAK memory cost contributed by an operator is its output (if the output is parameter-invovled), + // plus necessary inputs. + virtual double GetMemoryCost(const std::vector& inputs, const std::vector& outputs) const; protected: + // For each input in 'inputs_', a bool variable is true if the corresponding one is a parameter or a output of + // pre-operator that has parameters as input. + std::vector is_parameter_involve_; + int output_parameter_involve_ = -1; // -1: unset; 0: not parameter_involved; 1: parameter_involved + // Whether the inputs are related or not? For example, TensorAdd's two inputs are independent (not related), while + // Mul's two inputs are dependent (related). + bool inputs_related_; // for each input in 'inputs_', there is a bool variable indicating whether that the corresponding input is parameter std::vector is_parameter_; // for each input and output, the followings record the number of bytes of each element @@ -85,7 +108,8 @@ using OperatorCostPtr = std::shared_ptr; class MatMulCost : public OperatorCost { public: - MatMulCost() = default; + explicit MatMulCost(bool is_inputs_related) : OperatorCost(is_inputs_related) {} + MatMulCost() : OperatorCost(true) {} ~MatMulCost() override = default; // per device communication cost @@ -108,12 +132,12 @@ class MatMulCost : public OperatorCost { double GetBackwardComputationCost(const std::vector& inputs, const std::vector& outputs, const int32_t& stage_id) const override; }; - using MatMulCostPtr = std::shared_ptr; class ActivationCost : public OperatorCost { public: - ActivationCost() = default; + explicit ActivationCost(bool is_inputs_related) : OperatorCost(is_inputs_related) {} + ActivationCost() : OperatorCost(false) {} ~ActivationCost() override = default; double GetCommCost(const std::vector& inputs, const std::vector& outputs, @@ -133,14 +157,14 @@ class ActivationCost : public OperatorCost { double GetBackwardComputationCost(const std::vector& inputs, const std::vector& outputs, const int32_t& stage_id) const override; }; - using ActivationCostPtr = std::shared_ptr; using TransposeCost = ActivationCost; using TransposeCostPtr = std::shared_ptr; class SoftmaxCost : public OperatorCost { public: - SoftmaxCost() = default; + explicit SoftmaxCost(bool is_inputs_related) : OperatorCost(is_inputs_related) {} + SoftmaxCost() : OperatorCost(false) {} ~SoftmaxCost() override = default; double GetCommCost(const std::vector& inputs, const std::vector& outputs, @@ -160,12 +184,12 @@ class SoftmaxCost : public OperatorCost { double GetBackwardComputationCost(const std::vector& inputs, const std::vector& outputs, const int32_t&) const override; }; - using SoftmaxCostPtr = std::shared_ptr; class TmpIdentityCost : public OperatorCost { public: - TmpIdentityCost() = default; + explicit TmpIdentityCost(bool is_inputs_related) : OperatorCost(is_inputs_related) {} + TmpIdentityCost() : OperatorCost(false) {} ~TmpIdentityCost() override = default; double GetCommCost(const std::vector& inputs, const std::vector& outputs, @@ -184,12 +208,15 @@ class TmpIdentityCost : public OperatorCost { const int32_t& stage_id) const override; double GetBackwardComputationCost(const std::vector& inputs, const std::vector& outputs, const int32_t& stage_id) const override; + // per device PEAK memory cost in a training iteration + double GetMemoryCost(const std::vector& inputs, const std::vector& outputs) const override; }; using TmpIdentityCostPtr = std::shared_ptr; class BatchParallelCost : public OperatorCost { public: - BatchParallelCost() = default; + explicit BatchParallelCost(bool is_inputs_related) : OperatorCost(is_inputs_related) {} + BatchParallelCost() : OperatorCost(false) {} ~BatchParallelCost() override = default; double GetCommCost(const std::vector& inputs, const std::vector& outputs, @@ -217,7 +244,8 @@ using BatchParallelCostPtr = std::shared_ptr; class VirtualDatasetCost : public OperatorCost { public: - VirtualDatasetCost() = default; + explicit VirtualDatasetCost(bool is_inputs_related) : OperatorCost(is_inputs_related) {} + VirtualDatasetCost() : OperatorCost(false) {} ~VirtualDatasetCost() override = default; double GetCommCost(const std::vector& inputs, const std::vector& outputs, @@ -244,12 +272,17 @@ class VirtualDatasetCost : public OperatorCost { const int32_t&) const override { return 0.0; } + // per device PEAK memory cost in a training iteration + double GetMemoryCost(const std::vector& inputs, const std::vector& outputs) const override { + return 0.0; + } }; using VirtualDatasetCostPtr = std::shared_ptr; class GeneratorBaseCost : public OperatorCost { public: - GeneratorBaseCost() = default; + explicit GeneratorBaseCost(bool is_inputs_related) : OperatorCost(is_inputs_related) {} + GeneratorBaseCost() : OperatorCost(false) {} ~GeneratorBaseCost() override = default; double GetCommCost(const std::vector& inputs, const std::vector& outputs, @@ -283,7 +316,8 @@ using GeneratorBaseCostPtr = std::shared_ptr; class PReLUCost : public OperatorCost { public: - PReLUCost() = default; + explicit PReLUCost(bool is_inputs_related) : OperatorCost(is_inputs_related) {} + PReLUCost() : OperatorCost(true) {} ~PReLUCost() override = default; // per device communication cost @@ -310,7 +344,8 @@ using PReLUCostPtr = std::shared_ptr; class OneHotCost : public OperatorCost { public: - OneHotCost() = default; + explicit OneHotCost(bool is_inputs_related) : OperatorCost(is_inputs_related) {} + OneHotCost() : OperatorCost(true) {} ~OneHotCost() override = default; // per device communication cost @@ -337,7 +372,8 @@ using OneHotCostPtr = std::shared_ptr; class SoftmaxCrossEntropyWithLogitsCost : public OperatorCost { public: - SoftmaxCrossEntropyWithLogitsCost() = default; + explicit SoftmaxCrossEntropyWithLogitsCost(bool is_inputs_related) : OperatorCost(is_inputs_related) {} + SoftmaxCrossEntropyWithLogitsCost() : OperatorCost(false) {} ~SoftmaxCrossEntropyWithLogitsCost() override = default; // per device communication cost @@ -364,7 +400,8 @@ using SoftmaxCrossEntropyWithLogitsCostPtr = std::shared_ptr; class ArithmeticCost : public OperatorCost { public: - ArithmeticCost() = default; + explicit ArithmeticCost(bool is_inputs_related) : OperatorCost(is_inputs_related) {} + ArithmeticCost() : OperatorCost(false) {} ~ArithmeticCost() override = default; double GetCommCost(const std::vector& inputs, const std::vector& outputs, @@ -425,7 +463,8 @@ using BiasAddCostPtr = std::shared_ptr; class ReduceMethodCost : public OperatorCost { public: - ReduceMethodCost() = default; + explicit ReduceMethodCost(bool is_inputs_related) : OperatorCost(is_inputs_related) {} + ReduceMethodCost() : OperatorCost(true) {} ~ReduceMethodCost() override = default; double GetCommCost(const std::vector& inputs, const std::vector& outputs, @@ -455,7 +494,8 @@ using ReduceMethodCostPtr = std::shared_ptr; class ReduceMeanCost : public ReduceMethodCost { public: - ReduceMeanCost() = default; + explicit ReduceMeanCost(bool is_inputs_related) : ReduceMethodCost(is_inputs_related) {} + ReduceMeanCost() : ReduceMethodCost(true) {} ~ReduceMeanCost() override = default; double GetForwardComputationCost(const std::vector& inputs, const std::vector& outputs, @@ -465,7 +505,8 @@ using ReduceMeanCostPtr = std::shared_ptr; class GetNextCost : public OperatorCost { public: - GetNextCost() = default; + explicit GetNextCost(bool is_inputs_related) : OperatorCost(is_inputs_related) {} + GetNextCost() : OperatorCost(false) {} ~GetNextCost() override = default; double GetCommCost(const std::vector& inputs, const std::vector& outputs, @@ -499,7 +540,8 @@ using GetNextCostPtr = std::shared_ptr; class DropOutCost : public OperatorCost { public: - DropOutCost() = default; + explicit DropOutCost(bool is_inputs_related) : OperatorCost(is_inputs_related) {} + DropOutCost() : OperatorCost(true) {} ~DropOutCost() override = default; double GetCommCost(const std::vector& inputs, const std::vector& outputs, @@ -530,7 +572,8 @@ using DropOutCostPtr = std::shared_ptr; class GatherV2Cost : public OperatorCost { public: - GatherV2Cost() = default; + explicit GatherV2Cost(bool is_inputs_related) : OperatorCost(is_inputs_related) {} + GatherV2Cost() : OperatorCost(true) {} ~GatherV2Cost() override = default; double GetCommCost(const std::vector& inputs, const std::vector& outputs, diff --git a/mindspore/ccsrc/parallel/ops_info/activation_info.h b/mindspore/ccsrc/parallel/ops_info/activation_info.h index b19e38b910..8dca036f9e 100644 --- a/mindspore/ccsrc/parallel/ops_info/activation_info.h +++ b/mindspore/ccsrc/parallel/ops_info/activation_info.h @@ -51,7 +51,7 @@ class Activation : public ActivationBase { public: Activation(const std::string& name, const Shapes& inputs_shape, const Shapes& outputs_shape, const PrimitiveAttrs& attrs) - : ActivationBase(name, inputs_shape, outputs_shape, attrs, std::make_shared()) {} + : ActivationBase(name, inputs_shape, outputs_shape, attrs, std::make_shared(false)) {} ~Activation() override = default; Status GenerateStrategies(int32_t stage_id) override; Status SetCostUnderStrategy(const StrategyPtr& strategy) override; @@ -102,7 +102,7 @@ class Softmax : public ActivationBase { public: explicit Softmax(const std::string& name, const Shapes& inputs_shape, const Shapes& outputs_shape, const PrimitiveAttrs& attrs) - : ActivationBase(name, inputs_shape, outputs_shape, attrs, std::make_shared()) {} + : ActivationBase(name, inputs_shape, outputs_shape, attrs, std::make_shared(false)) {} ~Softmax() override = default; Status GenerateStrategies(int32_t stage_id) override; Status SetCostUnderStrategy(const StrategyPtr& strategy) override; diff --git a/mindspore/ccsrc/parallel/ops_info/arithmetic_info.h b/mindspore/ccsrc/parallel/ops_info/arithmetic_info.h index 435a7ce793..376a1fb4cf 100644 --- a/mindspore/ccsrc/parallel/ops_info/arithmetic_info.h +++ b/mindspore/ccsrc/parallel/ops_info/arithmetic_info.h @@ -32,8 +32,8 @@ namespace parallel { class ArithmeticBase : public OperatorInfo { public: ArithmeticBase(const std::string& operator_name, const Shapes& inputs_shape, const Shapes& outputs_shape, - const PrimitiveAttrs& attrs) - : OperatorInfo(operator_name, inputs_shape, outputs_shape, attrs, std::make_shared()) {} + const PrimitiveAttrs& attrs, OperatorCostPtr cost) + : OperatorInfo(operator_name, inputs_shape, outputs_shape, attrs, cost) {} ~ArithmeticBase() override = default; Status Init(const StrategyPtr& strategy) override; Status InitForCostModel(const StrategyPtr& strategy) override; @@ -56,7 +56,7 @@ class ArithmeticBase : public OperatorInfo { class SubInfo : public ArithmeticBase { public: SubInfo(const std::string& name, const Shapes& inputs_shape, const Shapes& outputs_shape, const PrimitiveAttrs& attrs) - : ArithmeticBase(name, inputs_shape, outputs_shape, attrs) {} + : ArithmeticBase(name, inputs_shape, outputs_shape, attrs, std::make_shared(false)) {} ~SubInfo() override = default; }; @@ -64,21 +64,21 @@ class TensorAddInfo : public ArithmeticBase { public: TensorAddInfo(const std::string& name, const Shapes& inputs_shape, const Shapes& outputs_shape, const PrimitiveAttrs& attrs) - : ArithmeticBase(name, inputs_shape, outputs_shape, attrs) {} + : ArithmeticBase(name, inputs_shape, outputs_shape, attrs, std::make_shared(false)) {} ~TensorAddInfo() override = default; }; class MulInfo : public ArithmeticBase { public: MulInfo(const std::string& name, const Shapes& inputs_shape, const Shapes& outputs_shape, const PrimitiveAttrs& attrs) - : ArithmeticBase(name, inputs_shape, outputs_shape, attrs) {} + : ArithmeticBase(name, inputs_shape, outputs_shape, attrs, std::make_shared(true)) {} ~MulInfo() override = default; }; class DivInfo : public ArithmeticBase { public: DivInfo(const std::string& name, const Shapes& inputs_shape, const Shapes& outputs_shape, const PrimitiveAttrs& attrs) - : ArithmeticBase(name, inputs_shape, outputs_shape, attrs) {} + : ArithmeticBase(name, inputs_shape, outputs_shape, attrs, std::make_shared(true)) {} ~DivInfo() override = default; }; @@ -86,7 +86,7 @@ class RealDivInfo : public ArithmeticBase { public: RealDivInfo(const std::string& name, const Shapes& inputs_shape, const Shapes& outputs_shape, const PrimitiveAttrs& attrs) - : ArithmeticBase(name, inputs_shape, outputs_shape, attrs) {} + : ArithmeticBase(name, inputs_shape, outputs_shape, attrs, std::make_shared(true)) {} ~RealDivInfo() override = default; }; @@ -94,14 +94,14 @@ class FloorDivInfo : public ArithmeticBase { public: FloorDivInfo(const std::string& name, const Shapes& inputs_shape, const Shapes& outputs_shape, const PrimitiveAttrs& attrs) - : ArithmeticBase(name, inputs_shape, outputs_shape, attrs) {} + : ArithmeticBase(name, inputs_shape, outputs_shape, attrs, std::make_shared(true)) {} ~FloorDivInfo() override = default; }; class PowInfo : public ArithmeticBase { public: PowInfo(const std::string& name, const Shapes& inputs_shape, const Shapes& outputs_shape, const PrimitiveAttrs& attrs) - : ArithmeticBase(name, inputs_shape, outputs_shape, attrs) {} + : ArithmeticBase(name, inputs_shape, outputs_shape, attrs, std::make_shared(true)) {} ~PowInfo() override = default; }; @@ -109,7 +109,7 @@ class GreaterInfo : public ArithmeticBase { public: GreaterInfo(const std::string& name, const Shapes& inputs_shape, const Shapes& outputs_shape, const PrimitiveAttrs& attrs) - : ArithmeticBase(name, inputs_shape, outputs_shape, attrs) {} + : ArithmeticBase(name, inputs_shape, outputs_shape, attrs, std::make_shared(false)) {} ~GreaterInfo() override = default; }; @@ -117,7 +117,7 @@ class AssignSubInfo : public ArithmeticBase { public: AssignSubInfo(const std::string& name, const Shapes& inputs_shape, const Shapes& outputs_shape, const PrimitiveAttrs& attrs) - : ArithmeticBase(name, inputs_shape, outputs_shape, attrs) {} + : ArithmeticBase(name, inputs_shape, outputs_shape, attrs, std::make_shared(false)) {} ~AssignSubInfo() override = default; }; } // namespace parallel diff --git a/mindspore/ccsrc/parallel/ops_info/batch_parallel_info.h b/mindspore/ccsrc/parallel/ops_info/batch_parallel_info.h index 093bfb8fad..4cedb9b7b8 100644 --- a/mindspore/ccsrc/parallel/ops_info/batch_parallel_info.h +++ b/mindspore/ccsrc/parallel/ops_info/batch_parallel_info.h @@ -29,9 +29,13 @@ namespace mindspore { namespace parallel { class BatchParallelInfo : public OperatorInfo { public: + BatchParallelInfo(const std::string& name, const Shapes& inputs_shape, const Shapes& outputs_shape, + const PrimitiveAttrs& attrs, OperatorCostPtr cost) + : OperatorInfo(name, inputs_shape, outputs_shape, attrs, cost), dev_num_(1) {} BatchParallelInfo(const std::string& name, const Shapes& inputs_shape, const Shapes& outputs_shape, const PrimitiveAttrs& attrs) - : OperatorInfo(name, inputs_shape, outputs_shape, attrs, std::make_shared()), dev_num_(1) {} + : OperatorInfo(name, inputs_shape, outputs_shape, attrs, std::make_shared(false)), + dev_num_(1) {} ~BatchParallelInfo() override = default; Status Init(const StrategyPtr& strategy) override; @@ -58,7 +62,7 @@ class SparseSoftmaxCrossEntropyWithLogitsInfo : public BatchParallelInfo { public: SparseSoftmaxCrossEntropyWithLogitsInfo(const std::string& name, const Shapes& inputs_shape, const Shapes& outputs_shape, const PrimitiveAttrs& attrs) - : BatchParallelInfo(name, inputs_shape, outputs_shape, attrs) {} + : BatchParallelInfo(name, inputs_shape, outputs_shape, attrs, std::make_shared(true)) {} ~SparseSoftmaxCrossEntropyWithLogitsInfo() override = default; void ReComputeBatchSplitFlagList() override; }; diff --git a/mindspore/ccsrc/parallel/ops_info/bias_add_info.h b/mindspore/ccsrc/parallel/ops_info/bias_add_info.h index dea5c90c88..e792858338 100644 --- a/mindspore/ccsrc/parallel/ops_info/bias_add_info.h +++ b/mindspore/ccsrc/parallel/ops_info/bias_add_info.h @@ -34,7 +34,7 @@ class BiasAddInfo : public OperatorInfo { public: BiasAddInfo(const std::string& operator_name, const Shapes& inputs_shape, const Shapes& outputs_shape, const PrimitiveAttrs& attrs) - : OperatorInfo(operator_name, inputs_shape, outputs_shape, attrs, std::make_shared()) {} + : OperatorInfo(operator_name, inputs_shape, outputs_shape, attrs, std::make_shared(false)) {} ~BiasAddInfo() override = default; Status Init(const StrategyPtr& strategy) override; diff --git a/mindspore/ccsrc/parallel/ops_info/comparison_function_info.h b/mindspore/ccsrc/parallel/ops_info/comparison_function_info.h index 00cc431463..9ea496e0b0 100644 --- a/mindspore/ccsrc/parallel/ops_info/comparison_function_info.h +++ b/mindspore/ccsrc/parallel/ops_info/comparison_function_info.h @@ -18,6 +18,7 @@ #define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_COMPARISON_FUNCTION_INFO_H_ #include +#include #include #include #include "ir/value.h" @@ -31,7 +32,7 @@ class EqualInfo : public ArithmeticBase { public: EqualInfo(const std::string& name, const Shapes& inputs_shape, const Shapes& outputs_shape, const PrimitiveAttrs& attrs) - : ArithmeticBase(name, inputs_shape, outputs_shape, attrs) {} + : ArithmeticBase(name, inputs_shape, outputs_shape, attrs, std::make_shared(false)) {} ~EqualInfo() override = default; }; @@ -39,7 +40,7 @@ class NotEqualInfo : public ArithmeticBase { public: NotEqualInfo(const std::string& name, const Shapes& inputs_shape, const Shapes& outputs_shape, const PrimitiveAttrs& attrs) - : ArithmeticBase(name, inputs_shape, outputs_shape, attrs) {} + : ArithmeticBase(name, inputs_shape, outputs_shape, attrs, std::make_shared(false)) {} ~NotEqualInfo() override = default; }; @@ -47,7 +48,7 @@ class MaximumInfo : public ArithmeticBase { public: MaximumInfo(const std::string& name, const Shapes& inputs_shape, const Shapes& outputs_shape, const PrimitiveAttrs& attrs) - : ArithmeticBase(name, inputs_shape, outputs_shape, attrs) {} + : ArithmeticBase(name, inputs_shape, outputs_shape, attrs, std::make_shared(true)) {} ~MaximumInfo() override = default; }; @@ -55,7 +56,7 @@ class MinimumInfo : public ArithmeticBase { public: MinimumInfo(const std::string& name, const Shapes& inputs_shape, const Shapes& outputs_shape, const PrimitiveAttrs& attrs) - : ArithmeticBase(name, inputs_shape, outputs_shape, attrs) {} + : ArithmeticBase(name, inputs_shape, outputs_shape, attrs, std::make_shared(true)) {} ~MinimumInfo() override = default; }; } // namespace parallel diff --git a/mindspore/ccsrc/parallel/ops_info/dropout_do_mask_info.h b/mindspore/ccsrc/parallel/ops_info/dropout_do_mask_info.h index 7ebe677997..3b154bd6db 100644 --- a/mindspore/ccsrc/parallel/ops_info/dropout_do_mask_info.h +++ b/mindspore/ccsrc/parallel/ops_info/dropout_do_mask_info.h @@ -33,7 +33,7 @@ class DropoutDoMaskInfo : public OperatorInfo { public: DropoutDoMaskInfo(const std::string& name, const Shapes& inputs_shape, const Shapes& outputs_shape, const PrimitiveAttrs& attrs) - : OperatorInfo(name, inputs_shape, outputs_shape, attrs, std::make_shared()) {} + : OperatorInfo(name, inputs_shape, outputs_shape, attrs, std::make_shared(true)) {} ~DropoutDoMaskInfo() override = default; Status Init(const StrategyPtr& strategy) override; diff --git a/mindspore/ccsrc/parallel/ops_info/get_next_info.h b/mindspore/ccsrc/parallel/ops_info/get_next_info.h index 9a65eff035..ba209910b7 100644 --- a/mindspore/ccsrc/parallel/ops_info/get_next_info.h +++ b/mindspore/ccsrc/parallel/ops_info/get_next_info.h @@ -32,7 +32,7 @@ class GetNextInfo : public OperatorInfo { public: GetNextInfo(const std::string &operator_name, const Shapes &inputs_shape, const Shapes &outputs_shape, const PrimitiveAttrs &attrs) - : OperatorInfo(operator_name, inputs_shape, outputs_shape, attrs, std::make_shared()) {} + : OperatorInfo(operator_name, inputs_shape, outputs_shape, attrs, std::make_shared(false)) {} ~GetNextInfo() override = default; Status Init(const StrategyPtr &strategy) override; diff --git a/mindspore/ccsrc/parallel/ops_info/loss_info.h b/mindspore/ccsrc/parallel/ops_info/loss_info.h index f1c2537a39..44fe22ce90 100644 --- a/mindspore/ccsrc/parallel/ops_info/loss_info.h +++ b/mindspore/ccsrc/parallel/ops_info/loss_info.h @@ -36,7 +36,8 @@ class SoftmaxCrossEntropyWithLogitsInfo : public OperatorInfo { public: SoftmaxCrossEntropyWithLogitsInfo(const std::string& name, const Shapes& inputs_shape, const Shapes& outputs_shape, const PrimitiveAttrs& attrs) - : OperatorInfo(name, inputs_shape, outputs_shape, attrs, std::make_shared()) {} + : OperatorInfo(name, inputs_shape, outputs_shape, attrs, + std::make_shared(false)) {} ~SoftmaxCrossEntropyWithLogitsInfo() override = default; Status Init(const StrategyPtr& strategy) override; Status InitForCostModel(const StrategyPtr& strategy) override; diff --git a/mindspore/ccsrc/parallel/ops_info/matmul_info.cc b/mindspore/ccsrc/parallel/ops_info/matmul_info.cc index 848116d68a..e617ae6c24 100644 --- a/mindspore/ccsrc/parallel/ops_info/matmul_info.cc +++ b/mindspore/ccsrc/parallel/ops_info/matmul_info.cc @@ -593,11 +593,11 @@ Status MatMulBase::SetCostUnderStrategy(const mindspore::parallel::StrategyPtr& // Here, we use the origin outputs_, because we only use the slice size of the output tensor. // It does not matter whether the output tensor is transposed or not. double computation_cost = - cost()->GetForwardComputationCost(relica_inputs_tensor_vector, outputs_tensor_info_, stage_id); - double communication_cost = cost()->GetCommCost(relica_inputs_tensor_vector, outputs_tensor_info_, stage_id); + operator_cost()->GetForwardComputationCost(relica_inputs_tensor_vector, outputs_tensor_info_, stage_id); + double communication_cost = operator_cost()->GetCommCost(relica_inputs_tensor_vector, outputs_tensor_info_, stage_id); std::shared_ptr result = std::make_shared(computation_cost, communication_cost); result->communication_without_parameter_ = - cost()->GetForwardCommCost(relica_inputs_tensor_vector, outputs_tensor_info_, stage_id); + operator_cost()->GetForwardCommCost(relica_inputs_tensor_vector, outputs_tensor_info_, stage_id); result->communication_with_partial_para_ = result->communication_without_parameter_ + COST_MODEL_GAMMA * (communication_cost - result->communication_without_parameter_); diff --git a/mindspore/ccsrc/parallel/ops_info/matmul_info.h b/mindspore/ccsrc/parallel/ops_info/matmul_info.h index 2d3312774d..8a64fb7206 100644 --- a/mindspore/ccsrc/parallel/ops_info/matmul_info.h +++ b/mindspore/ccsrc/parallel/ops_info/matmul_info.h @@ -34,7 +34,7 @@ class MatMulBase : public OperatorInfo { public: MatMulBase(const std::string& name, const Shapes& inputs_shape, const Shapes& outputs_shape, const PrimitiveAttrs& attrs) - : OperatorInfo(name, inputs_shape, outputs_shape, attrs, std::make_shared()) {} + : OperatorInfo(name, inputs_shape, outputs_shape, attrs, std::make_shared(true)) {} ~MatMulBase() override = default; Status Init(const StrategyPtr& strategy) override; diff --git a/mindspore/ccsrc/parallel/ops_info/onehot_info.h b/mindspore/ccsrc/parallel/ops_info/onehot_info.h index fec8d96324..a4f00ea093 100644 --- a/mindspore/ccsrc/parallel/ops_info/onehot_info.h +++ b/mindspore/ccsrc/parallel/ops_info/onehot_info.h @@ -33,7 +33,7 @@ class OneHotInfo : public OperatorInfo { public: OneHotInfo(const std::string& name, const Shapes& inputs_shape, const Shapes& outputs_shape, const PrimitiveAttrs& attrs) - : OperatorInfo(name, inputs_shape, outputs_shape, attrs, std::make_shared()) {} + : OperatorInfo(name, inputs_shape, outputs_shape, attrs, std::make_shared(false)) {} ~OneHotInfo() override = default; Status Init(const StrategyPtr& strategy) override; Status InitForCostModel(const StrategyPtr& strategy) override; diff --git a/mindspore/ccsrc/parallel/ops_info/operator_info.cc b/mindspore/ccsrc/parallel/ops_info/operator_info.cc index 561628dbb2..23b6a5190a 100644 --- a/mindspore/ccsrc/parallel/ops_info/operator_info.cc +++ b/mindspore/ccsrc/parallel/ops_info/operator_info.cc @@ -1035,11 +1035,12 @@ Status OperatorInfo::SetCostUnderStrategyBase(const StrategyPtr& strategy) { return FAILED; } int32_t stage_id = strategy->GetInputStage(); - double computation_cost = cost()->GetForwardComputationCost(inputs_tensor_info_, outputs_tensor_info_, stage_id); - double communication_cost = cost()->GetCommCost(inputs_tensor_info_, outputs_tensor_info_, stage_id); + double computation_cost = + operator_cost()->GetForwardComputationCost(inputs_tensor_info_, outputs_tensor_info_, stage_id); + double communication_cost = operator_cost()->GetCommCost(inputs_tensor_info_, outputs_tensor_info_, stage_id); std::shared_ptr result = std::make_shared(computation_cost, communication_cost); result->communication_without_parameter_ = - cost()->GetForwardCommCost(inputs_tensor_info_, outputs_tensor_info_, stage_id); + operator_cost()->GetForwardCommCost(inputs_tensor_info_, outputs_tensor_info_, stage_id); result->communication_with_partial_para_ = result->communication_without_parameter_ + COST_MODEL_GAMMA * (communication_cost - result->communication_without_parameter_); @@ -1096,7 +1097,38 @@ Status OperatorInfo::set_is_parameter(const std::vector& is_parameter) { return FAILED; } is_parameter_ = is_parameter; - cost()->set_is_parameter(is_parameter); + operator_cost()->set_is_parameter(is_parameter); + return SUCCESS; +} + +Status OperatorInfo::CalculateMemoryCost() { + // First, set the 'is_parameter_involve_' and 'is_output_parameter_involve_' into OperatorCost, which are necessary to + // calculate memory cost. + if (is_parameter_involve_.size() != is_parameter_.size()) { + MS_LOG(ERROR) << "'is_parameter_' does not have the same number of input size of 'is_parameter_involve_'."; + return FAILED; + } + operator_cost()->set_is_parameter_involve(is_parameter_involve_); + operator_cost()->set_output_parameter_involve(is_output_parameter_involve_); + // Set the memory cost in the 'strategy_cost_' + for (auto& swc : strategy_cost_) { + auto mem_cost = operator_cost()->GetMemoryCost(swc->inputs_ptr, swc->outputs_ptr); + swc->cost_list[0]->memory_with_reuse_ = mem_cost; + } + return SUCCESS; +} + +Status OperatorInfo::CorrectMemoryCost(size_t input_index) { + for (auto& swc : strategy_cost_) { + double parameter_mem_cost = ListProduct(swc->inputs_ptr[input_index].slice_shape()) * + static_cast(operator_cost()->inputs_type_lengths()[input_index]); + swc->cost_list[0]->memory_with_reuse_ -= parameter_mem_cost; + if (swc->cost_list[0]->memory_with_reuse_ < 0) { + MS_LOG(ERROR) << "The memory cost after correction is: " << swc->cost_list[0]->memory_with_reuse_ + << ", the parameter memory cost is: " << parameter_mem_cost; + return FAILED; + } + } return SUCCESS; } @@ -1193,7 +1225,7 @@ Status OperatorInfo::SetInputAndOutputTypeLength(const std::vector& inpu } inputs_type_lengths_ = input_lengths; outputs_type_lengths_ = output_lengths; - cost()->SetInputAndOutputTypeLength(input_lengths, output_lengths); + operator_cost()->SetInputAndOutputTypeLength(input_lengths, output_lengths); return SUCCESS; } @@ -1221,7 +1253,7 @@ void OperatorInfo::BreakingTiesForPerferringDataParallel(const StrategyPtr& stra } double OperatorInfo::GetForwardMemoryCostFromCNode() { - return cost()->GetForwardComputationCost(inputs_tensor_info_, outputs_tensor_info_, 0); + return operator_cost()->GetForwardComputationCost(inputs_tensor_info_, outputs_tensor_info_, 0); } } // namespace parallel diff --git a/mindspore/ccsrc/parallel/ops_info/operator_info.h b/mindspore/ccsrc/parallel/ops_info/operator_info.h index 5fe89a1602..19e0eeeda1 100644 --- a/mindspore/ccsrc/parallel/ops_info/operator_info.h +++ b/mindspore/ccsrc/parallel/ops_info/operator_info.h @@ -60,7 +60,7 @@ class OperatorInfo { outputs_shape_(std::move(outputs_shape)), attrs_(std::move(attrs)), is_alive_(true), - cost_(cost), + operator_cost_(cost), outputs_type_() { std::vector not_parameteter(inputs_shape_.size(), false); is_parameter_ = not_parameteter; @@ -83,8 +83,8 @@ class OperatorInfo { // Given the stage_id (which indicates the number of devices), // generate all strategies for this operator virtual Status GenerateStrategies(int32_t stage_id) = 0; - const OperatorCostPtr& cost() const { return cost_; } - void set_cost(const OperatorCostPtr& cost) { cost_ = cost; } + const OperatorCostPtr& operator_cost() const { return operator_cost_; } + void set_cost(const OperatorCostPtr& cost) { operator_cost_ = cost; } virtual Status SetCostUnderStrategy(const StrategyPtr& strategy) = 0; virtual std::shared_ptr>> GenerateBatchStrategies(); @@ -98,7 +98,7 @@ class OperatorInfo { std::vector> GetStrategyCost() { return strategy_cost_; } // When the input of a operator contains WEIGHT or a output from other operators involving WEIGHT, then these input // should stay in memory until it is used in the backward phase, which is kept in memory at the end of forward phase. - Status CalculateMemoryCost() const { return SUCCESS; } + Status CalculateMemoryCost(); int ComputeOpAndPrevEdgeParameterInvolved(); ForwardOp forward_op() const { return forward_op_; } @@ -125,7 +125,7 @@ class OperatorInfo { void ReplaceSuccEdge(const std::shared_ptr& op, const std::shared_ptr& new_edge); void ReplacePreEdges(const std::shared_ptr& op, const std::shared_ptr& new_edge); void ReplaceSuccEdges(const std::shared_ptr& op, const std::shared_ptr& new_edge); - std::vector GetOutputTypeLengths() const { return cost()->outputs_type_lengths(); } + std::vector GetOutputTypeLengths() const { return operator_cost()->outputs_type_lengths(); } void SetSelectedStrategyAndCost(const StrategyPtr& s_strategy, const CostPtr& cost) { selected_strategy_ = s_strategy; selected_cost_ = cost; @@ -142,6 +142,10 @@ class OperatorInfo { void set_strategy(const StrategyPtr& strategy) { strategy_ = strategy; } void set_refkey_parameter_name(std::string p_name) { refkey_parameter_name_ = std::move(p_name); } const std::string& refkey_parameter_name() const { return refkey_parameter_name_; } + // When the output of a Parameter (require_grad) being used by multiple operators, the Parameter's cost is calculated + // multiple times. This method is to correct this, and makes the cost is calulated only once. + Status CorrectMemoryCost(size_t input_index); + int is_output_parameter_involve() const { return is_output_parameter_involve_; } int used_devices() const { return used_devices_; } // needed by rec_parser void set_type(const std::string& type) { type_ = type; } @@ -234,7 +238,7 @@ class OperatorInfo { int32_t used_devices_ = -1; private: - OperatorCostPtr cost_; + OperatorCostPtr operator_cost_; std::vector outputs_type_; }; diff --git a/mindspore/ccsrc/parallel/ops_info/prelu_info.h b/mindspore/ccsrc/parallel/ops_info/prelu_info.h index bdfb11550b..396407c1ee 100644 --- a/mindspore/ccsrc/parallel/ops_info/prelu_info.h +++ b/mindspore/ccsrc/parallel/ops_info/prelu_info.h @@ -35,7 +35,7 @@ class PReLUInfo : public OperatorInfo { public: PReLUInfo(const std::string& name, const Shapes& inputs_shape, const Shapes& outputs_shape, const PrimitiveAttrs& attrs) - : OperatorInfo(name, inputs_shape, outputs_shape, attrs, std::make_shared()) {} + : OperatorInfo(name, inputs_shape, outputs_shape, attrs, std::make_shared(true)) {} ~PReLUInfo() override = default; Status Init(const StrategyPtr& strategy) override; Status InitForCostModel(const StrategyPtr& strategy) override; diff --git a/mindspore/ccsrc/parallel/ops_info/reduce_method_info.cc b/mindspore/ccsrc/parallel/ops_info/reduce_method_info.cc index aa64e72d05..44eab20588 100644 --- a/mindspore/ccsrc/parallel/ops_info/reduce_method_info.cc +++ b/mindspore/ccsrc/parallel/ops_info/reduce_method_info.cc @@ -109,7 +109,7 @@ Status ReduceMethod::GetAttrs() { } cross_batch_ = cross_batch_iter->second->cast()->value(); } - auto reducemethodcost = std::dynamic_pointer_cast(cost()); + auto reducemethodcost = std::dynamic_pointer_cast(operator_cost()); if (reducemethodcost == nullptr) { MS_LOG(ERROR) << "Cost cast to ReduceMethodCostPtr failed!"; return FAILED; diff --git a/mindspore/ccsrc/parallel/ops_info/reduce_method_info.h b/mindspore/ccsrc/parallel/ops_info/reduce_method_info.h index c2ddbc87ce..2911bdfe10 100644 --- a/mindspore/ccsrc/parallel/ops_info/reduce_method_info.h +++ b/mindspore/ccsrc/parallel/ops_info/reduce_method_info.h @@ -34,7 +34,7 @@ class ReduceMethod : public OperatorInfo { public: ReduceMethod(const std::string &name, const Shapes &inputs_shape, const Shapes &outputs_shape, const PrimitiveAttrs &attrs) - : OperatorInfo(name, inputs_shape, outputs_shape, attrs, std::make_shared()) {} + : OperatorInfo(name, inputs_shape, outputs_shape, attrs, std::make_shared(true)) {} ~ReduceMethod() override = default; Status Init(const StrategyPtr &strategy) override; diff --git a/mindspore/ccsrc/parallel/ops_info/reshape_info.h b/mindspore/ccsrc/parallel/ops_info/reshape_info.h index 38192a5d01..3864d2b93d 100644 --- a/mindspore/ccsrc/parallel/ops_info/reshape_info.h +++ b/mindspore/ccsrc/parallel/ops_info/reshape_info.h @@ -36,7 +36,7 @@ class ReshapeInfo : public OperatorInfo { public: ReshapeInfo(const std::string& name, const Shapes& inputs_shape, const Shapes& outputs_shape, const PrimitiveAttrs& attrs) - : OperatorInfo(name, inputs_shape, outputs_shape, attrs, std::make_shared()), + : OperatorInfo(name, inputs_shape, outputs_shape, attrs, std::make_shared(false)), dev_num_(0), input_layout_set_flag_(false), output_layout_set_flag_(false) {} diff --git a/mindspore/ccsrc/parallel/ops_info/tmp_identity_info.h b/mindspore/ccsrc/parallel/ops_info/tmp_identity_info.h index cf850683a6..3682fe334f 100644 --- a/mindspore/ccsrc/parallel/ops_info/tmp_identity_info.h +++ b/mindspore/ccsrc/parallel/ops_info/tmp_identity_info.h @@ -34,7 +34,7 @@ class TmpIdentityInfo : public OperatorInfo { public: TmpIdentityInfo(const Shapes& inputs_shape, const Shapes& outputs_shape, const PrimitiveAttrs& attrs, const std::string& name = IDENTITY_INFO) - : OperatorInfo(name, inputs_shape, outputs_shape, attrs, std::make_shared()) {} + : OperatorInfo(name, inputs_shape, outputs_shape, attrs, std::make_shared(false)) {} ~TmpIdentityInfo() override = default; Status Init(const StrategyPtr& strategy) override; diff --git a/mindspore/ccsrc/parallel/ops_info/transpose_info.h b/mindspore/ccsrc/parallel/ops_info/transpose_info.h index 2714b352b6..e4e2b90b7b 100644 --- a/mindspore/ccsrc/parallel/ops_info/transpose_info.h +++ b/mindspore/ccsrc/parallel/ops_info/transpose_info.h @@ -35,7 +35,7 @@ class TransposeInfo : public OperatorInfo { public: TransposeInfo(const std::string& name, const Shapes& inputs_shape, const Shapes& outputs_shape, const PrimitiveAttrs& attrs) - : OperatorInfo(name, inputs_shape, outputs_shape, attrs, std::make_shared()) {} + : OperatorInfo(name, inputs_shape, outputs_shape, attrs, std::make_shared(false)) {} ~TransposeInfo() override = default; Status Init(const StrategyPtr& strategy) override; Status InitForCostModel(const StrategyPtr& strategy) override; diff --git a/mindspore/ccsrc/parallel/ops_info/virtual_dataset_info.h b/mindspore/ccsrc/parallel/ops_info/virtual_dataset_info.h index b958adeabe..bf17e678a3 100644 --- a/mindspore/ccsrc/parallel/ops_info/virtual_dataset_info.h +++ b/mindspore/ccsrc/parallel/ops_info/virtual_dataset_info.h @@ -32,7 +32,7 @@ class VirtualDatasetInfo : public OperatorInfo { public: VirtualDatasetInfo(const std::string& name, const Shapes& inputs_shape, const Shapes& outputs_shape, const PrimitiveAttrs& attrs) - : OperatorInfo(name, inputs_shape, outputs_shape, attrs, std::make_shared()) {} + : OperatorInfo(name, inputs_shape, outputs_shape, attrs, std::make_shared(false)) {} ~VirtualDatasetInfo() override = default; Status Init(const StrategyPtr& strategy) override; Status InitForCostModel(const StrategyPtr& strategy) override; diff --git a/mindspore/ccsrc/parallel/step_auto_parallel.cc b/mindspore/ccsrc/parallel/step_auto_parallel.cc index ae60454972..a42ce612fb 100644 --- a/mindspore/ccsrc/parallel/step_auto_parallel.cc +++ b/mindspore/ccsrc/parallel/step_auto_parallel.cc @@ -874,11 +874,15 @@ Status ParallelStrategySearch(const std::vector &all_nodes, const Fu if (entire_costgraph->ComputeOpsAndEdgesParameterInvolved() == SUCCESS) { // Calculate operators' memory usage if (entire_costgraph->CalculateOpsMemoryCost() != SUCCESS) { - MS_LOG(EXCEPTION) << "Correcting operators' cost for memory reuse failed."; + MS_LOG(EXCEPTION) << "Calculating operators' cost for memory cost failed."; } // Calculate edges' memory usage if (entire_costgraph->CalculateEdgesMemoryCost() != SUCCESS) { - MS_LOG(EXCEPTION) << "Correcting edges' cost for memory reuse failed."; + MS_LOG(EXCEPTION) << "Calculating edges' cost for memory cost failed."; + } + // Correct memory usage caused by TmpIdentity + if (entire_costgraph->CorrectOpsMemoryCost() != SUCCESS) { + MS_LOG(EXCEPTION) << "Correcting operators' cost for memory cost failed."; } } else { MS_LOG(EXCEPTION) << "Computing operators' parameter_involved failed."; diff --git a/mindspore/ccsrc/parallel/tensor_layout/tensor_redistribution.cc b/mindspore/ccsrc/parallel/tensor_layout/tensor_redistribution.cc index 55e6a300e0..be5eaa40ba 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/tensor_redistribution.cc +++ b/mindspore/ccsrc/parallel/tensor_layout/tensor_redistribution.cc @@ -159,6 +159,7 @@ Status TensorRedistribution::ComputeCost() { backward_comm_cost_ += prod; comm_cost_ += 2.0 * prod; computation_cost_ += prod; + memory_cost_ += prod; } else if (str == CONCAT_BY_AXIS) { // communication cost = all_gather + reduce_scatter = before_slice_shape + after_slice_shape // computation cost = before_slice_shape @@ -175,20 +176,25 @@ Status TensorRedistribution::ComputeCost() { if (concat_dim == 0) { // computation cost = all_gather computation_cost_ += prod; + memory_cost_ += prod * dev_num; } else { // computation cost = all_gather + split + concat computation_cost_ += (prod + prod * dev_num + prod * dev_num); + memory_cost_ += (prod * dev_num + prod * dev_num + prod); } } else { // There is only computation cost in SplitByAxis. // computation cost = before_slice_shape computation_cost_ += prod; + // This addtion may be erroneous + memory_cost_ += prod; } } if (reshape_flag()) { Shape prev_slice_shape = from_.slice_shape().array(); double prev_prod = std::accumulate(prev_slice_shape.begin(), prev_slice_shape.end(), 1, std::multiplies()); computation_cost_ += 2.0 * prev_prod; + memory_cost_ += 2.0 * prev_prod; } return Status::SUCCESS; } diff --git a/mindspore/ccsrc/parallel/tensor_layout/tensor_redistribution.h b/mindspore/ccsrc/parallel/tensor_layout/tensor_redistribution.h index e933b9b8eb..7e2b3682e6 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/tensor_redistribution.h +++ b/mindspore/ccsrc/parallel/tensor_layout/tensor_redistribution.h @@ -42,6 +42,7 @@ class TensorRedistribution { forward_comm_cost_(0.0), backward_comm_cost_(0.0), computation_cost_(0.0), + memory_cost_(0.0), construct_op_flag_(construct_op_flag), keep_reshape_(keep_reshape) {} Status Init(const TensorLayout& from, const TensorLayout& to, const RankList& dev_list); @@ -54,6 +55,7 @@ class TensorRedistribution { double computation_cost() const { return computation_cost_; } double forward_comm_cost() const { return forward_comm_cost_; } double backward_comm_cost() const { return backward_comm_cost_; } + double memory_cost() const { return memory_cost_; } private: Status InferReshape(const TensorLayout& from_layout, const TensorLayout& to_layout, @@ -72,7 +74,12 @@ class TensorRedistribution { double forward_comm_cost_; // backward communication cost double backward_comm_cost_; + // computation_cost models the time spending on computing in this tensor redistribution, which is calculated by the + // inputs. double computation_cost_; + // memory_cost models the PEAK memory cost in a traning iteration contributed by this tensor redistribution, which is + // calculated by the outputs. + double memory_cost_; bool construct_op_flag_; bool keep_reshape_; }; diff --git a/tests/ut/cpp/parallel/ops_info/activation_test.cc b/tests/ut/cpp/parallel/ops_info/activation_test.cc index a8f8425ae9..9af7203799 100644 --- a/tests/ut/cpp/parallel/ops_info/activation_test.cc +++ b/tests/ut/cpp/parallel/ops_info/activation_test.cc @@ -84,9 +84,9 @@ TEST_F(TestActivation, test_activation_strategies) { act_ptr_->InitForCostModel(sp); std::vector inputs_info = act_ptr_->inputs_tensor_info(); std::vector outputs_info = act_ptr_->outputs_tensor_info(); - ASSERT_DOUBLE_EQ(act_ptr_->cost()->GetComputationCost(inputs_info, outputs_info, sp->GetInputStage()), + ASSERT_DOUBLE_EQ(act_ptr_->operator_cost()->GetComputationCost(inputs_info, outputs_info, sp->GetInputStage()), cost.computation_cost_); - ASSERT_DOUBLE_EQ(act_ptr_->cost()->GetCommCost(inputs_info, outputs_info, sp->GetInputStage()), + ASSERT_DOUBLE_EQ(act_ptr_->operator_cost()->GetCommCost(inputs_info, outputs_info, sp->GetInputStage()), cost.communication_cost_); } } @@ -109,9 +109,9 @@ TEST_F(TestActivation, test_softmax_strategies) { soft_ptr_->InitForCostModel(sp); std::vector inputs_info = soft_ptr_->inputs_tensor_info(); std::vector outputs_info = soft_ptr_->outputs_tensor_info(); - ASSERT_DOUBLE_EQ(soft_ptr_->cost()->GetComputationCost(inputs_info, outputs_info, sp->GetInputStage()), + ASSERT_DOUBLE_EQ(soft_ptr_->operator_cost()->GetComputationCost(inputs_info, outputs_info, sp->GetInputStage()), cost.computation_cost_); - ASSERT_DOUBLE_EQ(soft_ptr_->cost()->GetCommCost(inputs_info, outputs_info, sp->GetInputStage()), + ASSERT_DOUBLE_EQ(soft_ptr_->operator_cost()->GetCommCost(inputs_info, outputs_info, sp->GetInputStage()), cost.communication_cost_); } } diff --git a/tests/ut/cpp/parallel/ops_info/matmul_info_test.cc b/tests/ut/cpp/parallel/ops_info/matmul_info_test.cc index 2fece098e8..f710f51265 100644 --- a/tests/ut/cpp/parallel/ops_info/matmul_info_test.cc +++ b/tests/ut/cpp/parallel/ops_info/matmul_info_test.cc @@ -569,7 +569,7 @@ TEST_F(TestMatmulInfo, test_GenerateStrategies1) { matmul1->InitForCostModel(sp); std::vector inputs_info = matmul1->inputs_tensor_info(); std::vector outputs_info = matmul1->outputs_tensor_info(); - ASSERT_DOUBLE_EQ(matmul1->cost()->GetComputationCost(inputs_info, outputs_info, sp->GetInputStage()), + ASSERT_DOUBLE_EQ(matmul1->operator_cost()->GetComputationCost(inputs_info, outputs_info, sp->GetInputStage()), cost.computation_cost_); break; } @@ -599,7 +599,7 @@ TEST_F(TestMatmulInfo, test_GenerateStrategies2) { TensorInfo replica_input1_info(tly, input1_shape, input1_slice_shape); replica_inputs_info.push_back(replica_input1_info); - ASSERT_DOUBLE_EQ(matmul3->cost()->GetComputationCost(replica_inputs_info, outputs_info, sp->GetInputStage()), + ASSERT_DOUBLE_EQ(matmul3->operator_cost()->GetComputationCost(replica_inputs_info, outputs_info, sp->GetInputStage()), cost.computation_cost_); break; } diff --git a/tests/ut/cpp/parallel/ops_info/tensor_add_info_test.cc b/tests/ut/cpp/parallel/ops_info/tensor_add_info_test.cc index 8c956328a7..42d292c605 100644 --- a/tests/ut/cpp/parallel/ops_info/tensor_add_info_test.cc +++ b/tests/ut/cpp/parallel/ops_info/tensor_add_info_test.cc @@ -188,11 +188,11 @@ TEST_F(TestTensorAddInfo, GenerateStrategies) { tensor_add->InitForCostModel(sp); std::vector inputs_info = tensor_add->inputs_tensor_info(); std::vector outputs_info = tensor_add->outputs_tensor_info(); - double memory_cost0 = tensor_add->cost()->GetComputationCost(inputs_info, outputs_info, sp->GetInputStage()); + double memory_cost0 = tensor_add->operator_cost()->GetComputationCost(inputs_info, outputs_info, sp->GetInputStage()); double memory_cost1 = cost.computation_cost_; bool memory = memory_cost0 - memory_cost1 <= 1.0; - double comm_cost0 = tensor_add->cost()->GetCommCost(inputs_info, outputs_info, sp->GetInputStage()); + double comm_cost0 = tensor_add->operator_cost()->GetCommCost(inputs_info, outputs_info, sp->GetInputStage()); double comm_cost1 = cost.communication_cost_; bool comm = comm_cost0 - comm_cost1 <= 1.0; @@ -210,11 +210,11 @@ TEST_F(TestTensorAddInfo, GenerateStrategies1) { tensor_add1->InitForCostModel(sp); std::vector inputs_info = tensor_add1->inputs_tensor_info(); std::vector outputs_info = tensor_add1->outputs_tensor_info(); - double memory_cost0 = tensor_add1->cost()->GetComputationCost(inputs_info, outputs_info, sp->GetInputStage()); + double memory_cost0 = tensor_add1->operator_cost()->GetComputationCost(inputs_info, outputs_info, sp->GetInputStage()); double memory_cost1 = cost.computation_cost_; bool memory = memory_cost0 - memory_cost1 <= 1.0; - double comm_cost0 = tensor_add1->cost()->GetCommCost(inputs_info, outputs_info, sp->GetInputStage()); + double comm_cost0 = tensor_add1->operator_cost()->GetCommCost(inputs_info, outputs_info, sp->GetInputStage()); double comm_cost1 = cost.communication_cost_; bool comm = comm_cost0 - comm_cost1 <= 1.0; diff --git a/tests/ut/cpp/parallel/ops_info/tmpidentity_test.cc b/tests/ut/cpp/parallel/ops_info/tmpidentity_test.cc index 3971a2b471..eabac51e17 100644 --- a/tests/ut/cpp/parallel/ops_info/tmpidentity_test.cc +++ b/tests/ut/cpp/parallel/ops_info/tmpidentity_test.cc @@ -145,9 +145,9 @@ TEST_F(TestTmpIdentityInfo, test_generate_strategies) { identity_ptr->Init(sp); std::vector inputs_info = identity_ptr->inputs_tensor_info(); std::vector outputs_info = identity_ptr->outputs_tensor_info(); - ASSERT_DOUBLE_EQ(identity_ptr->cost()->GetComputationCost(inputs_info, outputs_info, sp->GetInputStage()), + ASSERT_DOUBLE_EQ(identity_ptr->operator_cost()->GetComputationCost(inputs_info, outputs_info, sp->GetInputStage()), cost.computation_cost_); - ASSERT_DOUBLE_EQ(identity_ptr->cost()->GetCommCost(inputs_info, outputs_info, sp->GetInputStage()), + ASSERT_DOUBLE_EQ(identity_ptr->operator_cost()->GetCommCost(inputs_info, outputs_info, sp->GetInputStage()), cost.communication_cost_); } } From 46c590b787ba5e85f71ef393adbf50fc818bb635 Mon Sep 17 00:00:00 2001 From: Hoai Linh Tran h00472437 Date: Thu, 2 Apr 2020 17:16:47 -0400 Subject: [PATCH 208/367] Add new flag to watch for the need of Renormalize pass when parsing the python input program Signed-off-by: Hoai Linh Tran h00472437 Add optimizer checking: For a group of passes in "optimizer", if flagged then it will check and collect the newly generated nodes without types (i.e. abstract() == nullptr). Before calling Renormalize(), the optimizer will check if there is any node needed retyping. If not the Renormalize pass will not be called. Add checking for non-null abstract but still needs renorm; Add flags to Substitution to help watching Renormalize Simpler pass result checker, change Bool to Enum type --- mindspore/ccsrc/optimizer/irpass.cc | 7 ++++-- mindspore/ccsrc/optimizer/opt.cc | 24 +++++++++++++------ mindspore/ccsrc/optimizer/opt.h | 20 ++++++++++++---- mindspore/ccsrc/optimizer/optimizer.h | 34 ++++++++++++++++++++++++--- mindspore/ccsrc/pipeline/pass.cc | 4 ++-- 5 files changed, 70 insertions(+), 19 deletions(-) diff --git a/mindspore/ccsrc/optimizer/irpass.cc b/mindspore/ccsrc/optimizer/irpass.cc index 96d88f6e61..be9c8f787a 100644 --- a/mindspore/ccsrc/optimizer/irpass.cc +++ b/mindspore/ccsrc/optimizer/irpass.cc @@ -52,7 +52,8 @@ OptimizeIRPassLib::OptimizeIRPassLib() { special_op_eliminate_ = MakeSubstitution(SpecialOpEliminater(), "special_op_eliminate", {prim::kPrimInsertGradientOf, prim::kPrimPrintShapeType, prim::kPrimGetRefKey, prim::kPrimMirror, prim::kPrimVirtualDiv}); - zero_like_fill_zero_ = MakeSubstitution(ZeroLikeFillZero(), "zero_like_fill_zero", prim::kPrimZerosLikeTensor); + zero_like_fill_zero_ = + MakeSubstitution(ZeroLikeFillZero(), "zero_like_fill_zero", prim::kPrimZerosLikeTensor, opt::FORCE_RENORM); // ops eliminate item_tuple_eliminate_ = @@ -81,7 +82,9 @@ OptimizeIRPassLib::OptimizeIRPassLib() { make_ref_eliminate_ = MakeSubstitution(MakeRefEliminater(), "make_ref_eliminate", prim::kPrimMakeRef); get_make_ref_eliminate_ = MakeSubstitution(GetMakeRefEliminater(), "get_make_ref_eliminate", {prim::kPrimGetRefKey, prim::kPrimGetRefValue}); - replace_refkey_by_param_ = MakeSubstitution(ReplaceRefkeyByParam(), "replace_refkey_by_param", IsValueNode); + + replace_refkey_by_param_ = + MakeSubstitution(ReplaceRefkeyByParam(), "replace_refkey_by_param", IsValueNode, opt::FORCE_RENORM); replace_old_param_ = MakeSubstitution(ReplaceOldParam(), "replace_old_param", IsParam); // Gradient transforms diff --git a/mindspore/ccsrc/optimizer/opt.cc b/mindspore/ccsrc/optimizer/opt.cc index a0faa2bf46..24339ddb84 100644 --- a/mindspore/ccsrc/optimizer/opt.cc +++ b/mindspore/ccsrc/optimizer/opt.cc @@ -31,14 +31,14 @@ namespace mindspore { /* namespace to support opt */ namespace opt { -SubstitutionPtr MakeSubstitution(const TransformFuncType& transform, const std::string& name, - const PrimitivePtr& prim) { +SubstitutionPtr MakeSubstitution(const TransformFuncType& transform, const std::string& name, const PrimitivePtr& prim, + const RenormAction& renorm_action) { auto fn = [prim](const AnfNodePtr& node) -> bool { return IsPrimitiveCNode(node, prim); }; - return std::make_shared(transform, name, fn); + return std::make_shared(transform, name, fn, renorm_action); } SubstitutionPtr MakeSubstitution(const TransformFuncType& transform, const std::string& name, - const std::vector& prims) { + const std::vector& prims, const RenormAction& renorm_action) { auto fn = [prims](const AnfNodePtr& node) -> bool { if (!node->isa()) { return false; @@ -52,12 +52,12 @@ SubstitutionPtr MakeSubstitution(const TransformFuncType& transform, const std:: return false; }; - return std::make_shared(transform, name, fn); + return std::make_shared(transform, name, fn, renorm_action); } SubstitutionPtr MakeSubstitution(const TransformFuncType& transform, const std::string& name, - const PredicateFuncType& predicate) { - return std::make_shared(transform, name, predicate); + const PredicateFuncType& predicate, const RenormAction& renorm_action) { + return std::make_shared(transform, name, predicate, renorm_action); } AnfNodePtr Substitution::operator()(const OptimizerPtr& optimizer, const AnfNodePtr& node) const { @@ -74,6 +74,16 @@ AnfNodePtr Substitution::operator()(const OptimizerPtr& optimizer, const AnfNode } } #endif + if (optimizer != nullptr && optimizer->is_watch_renormalize() && result != nullptr) { + if (renorm_action_ == FORCE_RENORM) { + optimizer->add_node_to_renormalize(result); + } else { + // renorm_action_ is CHECK_RENORM + if (result->abstract() == nullptr) { + optimizer->add_node_to_renormalize(result); + } + } + } return result; } diff --git a/mindspore/ccsrc/optimizer/opt.h b/mindspore/ccsrc/optimizer/opt.h index bd548645f4..24191998e8 100644 --- a/mindspore/ccsrc/optimizer/opt.h +++ b/mindspore/ccsrc/optimizer/opt.h @@ -36,24 +36,34 @@ using OptimizerWeakPtr = std::weak_ptr; using PredicateFuncType = std::function; using TransformFuncType = std::function; +// Define the interaction mode between an Optimize pass and Renormalize pass +// FORCE_RENORM: if the pass modified the graph then the next Renormalize will be executed +// CHECK_RENORM: check if the new node is un-typed to decide if the next Renormalize will be executted +enum RenormAction : int { FORCE_RENORM = 0, CHECK_RENORM }; + class Substitution { public: TransformFuncType transform_{nullptr}; std::string name_; PredicateFuncType predicate_{nullptr}; - explicit Substitution(const TransformFuncType &transform, const std::string &name, const PredicateFuncType &predicate) - : transform_(transform), name_(name), predicate_(predicate) {} + // an enum to mark this Substitution relation to renormalize pass + RenormAction renorm_action_; + explicit Substitution(const TransformFuncType &transform, const std::string &name, const PredicateFuncType &predicate, + const RenormAction &renorm_action) + : transform_(transform), name_(name), predicate_(predicate), renorm_action_(renorm_action) {} ~Substitution() = default; AnfNodePtr operator()(const OptimizerPtr &optimizer, const AnfNodePtr &node) const; }; using SubstitutionPtr = std::shared_ptr; -SubstitutionPtr MakeSubstitution(const TransformFuncType &transform, const std::string &name, const PrimitivePtr &prim); +SubstitutionPtr MakeSubstitution(const TransformFuncType &transform, const std::string &name, const PrimitivePtr &prim, + const RenormAction &action_renorm = CHECK_RENORM); SubstitutionPtr MakeSubstitution(const TransformFuncType &transform, const std::string &name, - const std::vector &prims); + const std::vector &prims, + const RenormAction &action_renorm = CHECK_RENORM); SubstitutionPtr MakeSubstitution(const TransformFuncType &transform, const std::string &name, - const PredicateFuncType &predicate); + const PredicateFuncType &predicate, const RenormAction &action_renorm = CHECK_RENORM); class SubstitutionList { public: diff --git a/mindspore/ccsrc/optimizer/optimizer.h b/mindspore/ccsrc/optimizer/optimizer.h index d821e826cf..f67466efba 100644 --- a/mindspore/ccsrc/optimizer/optimizer.h +++ b/mindspore/ccsrc/optimizer/optimizer.h @@ -87,11 +87,12 @@ using OptPassGroupMap = std::vector>; class Optimizer : public std::enable_shared_from_this { public: Optimizer(const std::string &name, const pipeline::ResourceBasePtr &resource_ptr) - : name_(name), resource_(resource_ptr), run_only_once_(false) {} + : name_(name), resource_(resource_ptr), run_only_once_(false), is_watch_renormalize_(false) {} virtual ~Optimizer() = default; void Init(const OptPassGroupMap &passes, bool run_only_once) { run_only_once_ = run_only_once; + is_watch_renormalize_ = false; for (auto &iter : passes) { const std::string &name = iter.first; @@ -118,9 +119,13 @@ class Optimizer : public std::enable_shared_from_this { } static std::shared_ptr MakeOptimizer(const std::string &name, const pipeline::ResourceBasePtr resource_ptr, - const OptPassGroupMap &passes, bool run_only_once = false) { + const OptPassGroupMap &passes, bool run_only_once = false, + bool watch_renormalize = false) { OptimizerPtr optimizer = std::make_shared(name, resource_ptr); optimizer->Init(passes, run_only_once); + if (watch_renormalize) { + optimizer->enable_watch_renormalize(); + } return optimizer; } @@ -138,7 +143,16 @@ class Optimizer : public std::enable_shared_from_this { if (opt.is_renormalize()) { auto resource_ptr = std::dynamic_pointer_cast(resource_); if (resource_ptr != nullptr) { - func_graph = pipeline::Renormalize(resource_ptr, func_graph, args_spec); + if (is_watch_renormalize_) { + if (untyped_nodes_.size() > 0) { + func_graph = pipeline::Renormalize(resource_ptr, func_graph, args_spec); + clear_untyped_nodes(); + } else { + MS_LOG(INFO) << "Optimizer::step: Skipping Renormalize because untyped_nodes_ is empty."; + } + } else { + func_graph = pipeline::Renormalize(resource_ptr, func_graph, args_spec); + } } } else if (opt(func_graph, shared_from_this())) { changes = true; @@ -180,12 +194,26 @@ class Optimizer : public std::enable_shared_from_this { const std::string name() const { return name_; } + void add_node_to_renormalize(AnfNodePtr anode) { + if (std::find(untyped_nodes_.begin(), untyped_nodes_.end(), anode) == untyped_nodes_.end()) { + untyped_nodes_.push_back(anode); + } + } + + void clear_untyped_nodes() { untyped_nodes_.clear(); } + + void enable_watch_renormalize() { is_watch_renormalize_ = true; } + void disable_watch_renormalize() { is_watch_renormalize_ = false; } + bool is_watch_renormalize() { return is_watch_renormalize_; } + private: const std::string name_; pipeline::ResourceBasePtr resource_; std::vector passes_; std::vector pass_names_; bool run_only_once_; + std::vector untyped_nodes_; + bool is_watch_renormalize_; }; } // namespace opt } // namespace mindspore diff --git a/mindspore/ccsrc/pipeline/pass.cc b/mindspore/ccsrc/pipeline/pass.cc index 9248590f27..b3eda4c37b 100644 --- a/mindspore/ccsrc/pipeline/pass.cc +++ b/mindspore/ccsrc/pipeline/pass.cc @@ -185,8 +185,8 @@ void InitOpt(const ResourcePtr& res) { if (g_pass_opts.size() == 0) { opt::irpass::OptimizeIRPassLib irpass; g_pass_opts["opt_a"] = Optimizer::MakeOptimizer("opt_a", res, GetOptPassesA(irpass)); - g_pass_opts["opt_b"] = Optimizer::MakeOptimizer("opt_b", res, GetOptPassesB(irpass)); - g_pass_opts["opt_control"] = Optimizer::MakeOptimizer("opt_control", res, GetControlPhases(irpass)); + g_pass_opts["opt_b"] = Optimizer::MakeOptimizer("opt_b", res, GetOptPassesB(irpass), false, true); + g_pass_opts["opt_control"] = Optimizer::MakeOptimizer("opt_control", res, GetControlPhases(irpass), false, true); g_pass_opts["opt_prepare"] = Optimizer::MakeOptimizer("opt_prepare", res, GetPreparePhases(irpass)); } } From 5d225f934f43662b7244ce9cfa033215a0b6db00 Mon Sep 17 00:00:00 2001 From: lianliguang Date: Wed, 8 Apr 2020 17:42:56 +0800 Subject: [PATCH 209/367] change the padding strategy & refactor insert transdata --- mindspore/ccsrc/common/trans.cc | 132 ++++++++---- mindspore/ccsrc/common/trans.h | 6 +- .../device/ascend/ascend_device_address.cc | 4 +- .../device/ascend/ascend_kernel_runtime.cc | 5 +- .../device/ascend/kernel_select_ascend.cc | 13 +- mindspore/ccsrc/device/kernel_adjust.cc | 4 +- mindspore/ccsrc/device/kernel_info.h | 4 + mindspore/ccsrc/device/kernel_runtime.cc | 20 +- mindspore/ccsrc/kernel/kernel_build_info.cc | 30 ++- mindspore/ccsrc/kernel/kernel_build_info.h | 8 +- .../pre_activate/ascend/ascend_helper.cc | 189 ++++++----------- .../ccsrc/pre_activate/ascend/ascend_helper.h | 2 +- .../format_type/deal_ref_trans_and_cast.cc | 6 +- .../ascend/ir_fusion/transdata_split.cc | 198 +++++++++--------- .../ccsrc/session/anf_runtime_algorithm.cc | 104 +++++---- .../ccsrc/session/anf_runtime_algorithm.h | 5 + mindspore/ccsrc/session/ascend_session.cc | 5 +- mindspore/ccsrc/session/kernel_graph.cc | 18 +- mindspore/ccsrc/session/session_basic.cc | 13 +- mindspore/ccsrc/utils/utils.h | 4 +- ...er_norm_beta_gamma_backprop_fusion_test.cc | 2 + .../cpp/session/anf_runtime_algorithm_test.cc | 26 +-- tests/ut/cpp/session/kernel_graph_test.cc | 6 +- 23 files changed, 425 insertions(+), 379 deletions(-) diff --git a/mindspore/ccsrc/common/trans.cc b/mindspore/ccsrc/common/trans.cc index 380c51bcf9..a2b9f7ef24 100644 --- a/mindspore/ccsrc/common/trans.cc +++ b/mindspore/ccsrc/common/trans.cc @@ -20,6 +20,8 @@ #include #include "./securec.h" #include "common/utils.h" +#include "session/anf_runtime_algorithm.h" +#include "kernel/kernel.h" #include "device/convert_tensor_utils.h" #include "utils/convert_utils.h" #include "utils/log_adapter.h" @@ -27,6 +29,33 @@ namespace mindspore { namespace trans { +namespace { +std::vector PaddingShapeTo4dByDefault(const std::vector &shape) { + std::vector shape_4d(4, 1); + switch (shape.size()) { + case 0: + return shape_4d; + case 1: + shape_4d[1] = shape[0]; + break; + case 2: + shape_4d[1] = shape[0]; + shape_4d[2] = shape[1]; + break; + case 3: + shape_4d[1] = shape[0]; + shape_4d[2] = shape[1]; + shape_4d[3] = shape[2]; + break; + case 4: + std::copy(shape.begin(), shape.end(), shape_4d.begin()); + break; + default: + MS_LOG(EXCEPTION) << "Unexpect shape size = " << shape.size(); + } + return shape_4d; +} +} // namespace const size_t kNchwDims = 4; const std::map type_map = {{kNumberTypeBool, 1}, {kNumberTypeInt, 4}, {kNumberTypeInt8, 1}, {kNumberTypeInt16, 2}, {kNumberTypeInt32, 4}, {kNumberTypeInt64, 8}, @@ -154,38 +183,64 @@ size_t TypeIdSize(const TypeId data_type) { return unsupported_type_error; } -std::vector TransShapeTo4d(const std::vector &shape) { +bool IsNeedPadding(const std::string &format, const size_t shape_size) { + if (shape_size == 0) { + return false; + } + if (format == kOpFormat_DEFAULT || format == kOpFormat_FRAC_NZ) { + return false; + } else if (shape_size < 4) { + return true; + } + return false; +} + +std::vector GetRuntimePaddingShape(const AnfNodePtr &node, size_t index) { + std::vector shape; + std::vector host_shape; + if (node->isa()) { + auto value_node = node->cast(); + auto node_value = value_node->value(); + auto tensor = node_value->cast(); + if (tensor == nullptr) { + MS_LOG(EXCEPTION) << " the node[ " << node->DebugString() << "]'s cannot convert "; + } + shape = tensor->shape(); + (void)std::transform(shape.begin(), shape.end(), std::back_inserter(host_shape), IntToSize); + if (host_shape.empty()) { + host_shape.push_back(1); + } + } else { + host_shape = AnfAlgo::GetOutputInferShape(node, index); + } + if (trans::IsNeedPadding(AnfAlgo::GetOutputFormat(node, 0), host_shape.size())) { + host_shape = trans::PaddingShapeTo4d(host_shape, AnfAlgo::GetOutputReshapeType(node, 0)); + } + std::transform(host_shape.begin(), host_shape.end(), std::back_inserter(shape), SizeToInt); + return shape; +} + +std::vector PaddingShapeTo4d(const std::vector &shape, const std::vector &padding_axis) { + if (padding_axis.empty() || shape.size() != padding_axis.size()) { + return PaddingShapeTo4dByDefault(shape); + } std::vector shape_4d(4, 1); - switch (shape.size()) { - case 0: - break; - case 1: - shape_4d[1] = shape[0]; - break; - case 2: - shape_4d[0] = shape[0]; - shape_4d[1] = shape[1]; - break; - case 3: - MS_LOG(EXCEPTION) << "Unexpected shape size = 3,it should has a default format"; - case 4: - for (size_t i = 0; i < 4; ++i) { - shape_4d[i] = shape[i]; - } - break; - default: - MS_LOG(EXCEPTION) << "Unexpected shape size = " << shape.size(); + for (size_t index = 0; index < padding_axis.size(); index++) { + shape_4d[padding_axis[index]] = shape[index]; } return shape_4d; } std::vector TransShapeToDevice(const std::vector &shape, const std::string &format) { + if (format == kOpFormat_ND || format == kOpFormat_DEFAULT) { + return shape; + } + auto temp_shape = shape; std::vector device_shape; if (format == kOpFormat_FRAC_NZ) { if (shape.size() < 2) { - MS_EXCEPTION(NotSupportError) << "Format " << format << " is not support shape " << shape.size(); - } - if (shape.size() > 2) { + MS_LOG(EXCEPTION) << "Format" << format << " is not support shape " << shape.size(); + } else { (void)std::copy(shape.begin(), shape.end() - 2, std::back_inserter(device_shape)); } auto h1 = (shape[shape.size() - 2] - 1) / kCubeSize + 1; @@ -197,35 +252,36 @@ std::vector TransShapeToDevice(const std::vector &shape, const s return device_shape; } if (shape.size() != 4) { - MS_LOG(EXCEPTION) << "shape_4d size should be 4"; + MS_LOG(WARNING) << "Get Device Shape using a shape size is less than 4 ,should be Padding shape by Default firstly"; + temp_shape = PaddingShapeTo4dByDefault(shape); } if (format == kOpFormat_NC1HWC0) { - size_t C1 = (shape[1] + kCubeSize - 1) / kCubeSize; + size_t C1 = (temp_shape[1] + kCubeSize - 1) / kCubeSize; size_t C0 = kCubeSize; - device_shape.push_back(shape[0]); + device_shape.push_back(temp_shape[0]); device_shape.push_back(C1); - device_shape.push_back(shape[2]); - device_shape.push_back(shape[3]); + device_shape.push_back(temp_shape[2]); + device_shape.push_back(temp_shape[3]); device_shape.push_back(C0); return device_shape; } else if (format == kOpFormat_FRAC_Z) { - size_t cout16 = ((shape[0] + kCubeSize - 1) / kCubeSize) * kCubeSize; - size_t cin16 = ((shape[1] + kCubeSize - 1) / kCubeSize) * kCubeSize; - device_shape.push_back(shape[2] * shape[3] * cin16 / kCubeSize); + size_t cout16 = ((temp_shape[0] + kCubeSize - 1) / kCubeSize) * kCubeSize; + size_t cin16 = ((temp_shape[1] + kCubeSize - 1) / kCubeSize) * kCubeSize; + device_shape.push_back(temp_shape[2] * temp_shape[3] * cin16 / kCubeSize); device_shape.push_back(cout16 / kCubeSize); device_shape.push_back(kCubeSize); device_shape.push_back(kCubeSize); return device_shape; } else if (format == kOpFormat_NHWC) { - device_shape.push_back(shape[0]); - device_shape.push_back(shape[2]); - device_shape.push_back(shape[3]); - device_shape.push_back(shape[1]); + device_shape.push_back(temp_shape[0]); + device_shape.push_back(temp_shape[2]); + device_shape.push_back(temp_shape[3]); + device_shape.push_back(temp_shape[1]); return device_shape; - } else if (format == kOpFormat_NCHW) { - return shape; } else if (format == kOpFormat_HWCN) { - return {shape[2], shape[3], shape[1], shape[0]}; + return {temp_shape[2], temp_shape[3], temp_shape[1], temp_shape[0]}; + } else if (format == kOpFormat_NCHW) { + return temp_shape; } MS_LOG(EXCEPTION) << "Unexpected format[" << format << "]"; } diff --git a/mindspore/ccsrc/common/trans.h b/mindspore/ccsrc/common/trans.h index cf815985ff..4bebdde814 100644 --- a/mindspore/ccsrc/common/trans.h +++ b/mindspore/ccsrc/common/trans.h @@ -24,6 +24,7 @@ #include #include #include "ir/dtype.h" +#include "kernel/kernel.h" #include "ir/dtype/type.h" namespace mindspore { @@ -49,7 +50,10 @@ size_t TypeIdSize(const TypeId data_type); size_t ShapeSize(const std::vector &shape); size_t CubeSizeByType(const TypeId data_type); -std::vector TransShapeTo4d(const std::vector &shape); +std::vector PaddingShapeTo4d(const std::vector &shape, + const std::vector &padding_axis = {}); +std::vector GetRuntimePaddingShape(const AnfNodePtr &node, size_t index); +bool IsNeedPadding(const std::string &format, const size_t shape_size); std::vector TransShapeToDevice(const std::vector &shape, const std::string &format); bool TransDataType(const TypeIdArgs &args, void *result); bool TransFormat(const FormatArgs &args, void *result); diff --git a/mindspore/ccsrc/device/ascend/ascend_device_address.cc b/mindspore/ccsrc/device/ascend/ascend_device_address.cc index 93f039af0e..69d1918163 100644 --- a/mindspore/ccsrc/device/ascend/ascend_device_address.cc +++ b/mindspore/ccsrc/device/ascend/ascend_device_address.cc @@ -141,7 +141,7 @@ bool AscendDeviceAddress::SyncDeviceToHostAndConvertFormat(const std::vectorisa()) { + continue; + } + if (real_input_node->isa() && !AnfAlgo::IsParameterWeight(real_input_node->cast())) { + continue; + } std::shared_ptr builder = std::make_shared(); // we set special device info of a input tensor. diff --git a/mindspore/ccsrc/device/kernel_adjust.cc b/mindspore/ccsrc/device/kernel_adjust.cc index 9a6f48025f..c1588d7d53 100644 --- a/mindspore/ccsrc/device/kernel_adjust.cc +++ b/mindspore/ccsrc/device/kernel_adjust.cc @@ -25,6 +25,7 @@ #include "session/anf_runtime_algorithm.h" #include "utils/context/ms_context.h" +#include "common/trans.h" #include "utils/config_manager.h" #include "common/utils.h" #include "kernel/kernel_build_info.h" @@ -391,7 +392,8 @@ bool KernelAdjust::StepLoadCtrlInputs(const std::shared_ptr &c auto device_address = AnfAlgo::GetMutableOutputAddr(pk_node, 0); MS_EXCEPTION_IF_NULL(device_address); tensor->set_device_address(device_address); - if (!device_address->SyncHostToDevice(tensor->shape(), LongToSize(tensor->data().nbytes()), tensor->data_type(), + if (!device_address->SyncHostToDevice(trans::GetRuntimePaddingShape(pk_node, 0), + LongToSize(tensor->data().nbytes()), tensor->data_type(), tensor->data_c(false))) { MS_LOG(INFO) << "SyncHostToDevice failed."; return false; diff --git a/mindspore/ccsrc/device/kernel_info.h b/mindspore/ccsrc/device/kernel_info.h index 9352158774..33ddda83c9 100644 --- a/mindspore/ccsrc/device/kernel_info.h +++ b/mindspore/ccsrc/device/kernel_info.h @@ -31,6 +31,7 @@ class KernelInfo { public: KernelInfo() { kernel_mod_ = nullptr; + is_feature_map_ = false; select_kernel_build_info_ = nullptr; output_address_list_ = {}; workspace_address_list_ = {}; @@ -45,6 +46,7 @@ class KernelInfo { void set_select_kernel_build_info(const kernel::KernelBuildInfoPtr &select_kernel_build_info) { select_kernel_build_info_ = select_kernel_build_info; } + void SetFeatureMapFlag(bool flag) { is_feature_map_ = flag; } const DeviceAddress *GetOutputAddr(size_t index) const; DeviceAddressPtr GetMutableOutputAddr(size_t index) const; bool OutputAddrExist(size_t index) const; @@ -63,8 +65,10 @@ class KernelInfo { void set_graph_id(uint32_t graph_id) { graph_id_ = graph_id; } uint32_t graph_id() const { return graph_id_; } bool operator==(const KernelInfo &other) const; + bool is_feature_map() const { return is_feature_map_; } private: + bool is_feature_map_; kernel::KernelBuildInfoPtr select_kernel_build_info_; std::vector> output_address_list_; std::vector> workspace_address_list_; diff --git a/mindspore/ccsrc/device/kernel_runtime.cc b/mindspore/ccsrc/device/kernel_runtime.cc index eebc650347..303f2cc873 100644 --- a/mindspore/ccsrc/device/kernel_runtime.cc +++ b/mindspore/ccsrc/device/kernel_runtime.cc @@ -105,7 +105,7 @@ size_t KernelRuntime::CountNodeDeviceMemorySize(const mindspore::AnfNodePtr &nod std::vector shape = AnfAlgo::GetOutputDeviceShape(node, output_index); auto format = AnfAlgo::GetOutputFormat(node, output_index); if (shape.empty() && format != kOpFormat_DEFAULT) { - shape = trans::TransShapeTo4d(shape); + shape = trans::PaddingShapeTo4d(shape, AnfAlgo::GetOutputReshapeType(node, output_index)); shape = trans::TransShapeToDevice(shape, format); } // scalar's output shape is a empty vector @@ -401,8 +401,9 @@ void KernelRuntime::AssignValueNodeTensor(const ValueNodePtr &value_node, const auto address = CreateDeviceAddress(ptr, node_size, AnfAlgo::GetOutputFormat(value_node, output_idx), output_type_id); MS_EXCEPTION_IF_NULL(address); AnfAlgo::SetOutputAddr(address, output_idx, value_node.get()); - if (!address->SyncHostToDevice(tensor->shape(), tensor_size, tensor->data_type(), tensor->data_c(false))) { - MS_EXCEPTION(NotExistsError) << "kValueNode SyncHostToDevice fail!" << value_node->DebugString() << "node format is" + if (!address->SyncHostToDevice(trans::GetRuntimePaddingShape(value_node, 0), tensor_size, tensor->data_type(), + tensor->data_c(false))) { + MS_EXCEPTION(NotExistsError) << "ValueNode SyncHostToDevice fail!" << value_node->DebugString() << "node format is" << AnfAlgo::GetOutputFormat(value_node, output_idx) << "node dtype is " << AnfAlgo::GetOutputInferDataType(value_node, output_idx); } @@ -421,19 +422,6 @@ void KernelRuntime::AssignStaticMemoryValueNode(session::KernelGraph *graph) { MS_EXCEPTION_IF_NULL(node_value); if (node_value->isa()) { AssignValueNodeTensor(value_node, node_value, 0); - } else if (node_value->isa()) { - auto value_tuple = node_value->cast(); - if (value_tuple == nullptr) { - MS_LOG(WARNING) << "value_tuple is null"; - continue; - } - size_t i = 0; - auto value_list = value_tuple->value(); - for (auto value_ptr : value_list) { - if (value_ptr->isa()) { - AssignValueNodeTensor(value_node, value_ptr, i++); - } - } } else if (node_value->isa()) { auto value = GetValue(node_value); size_t tensor_size = value.size(); diff --git a/mindspore/ccsrc/kernel/kernel_build_info.cc b/mindspore/ccsrc/kernel/kernel_build_info.cc index c52f71c136..038c06d8ed 100644 --- a/mindspore/ccsrc/kernel/kernel_build_info.cc +++ b/mindspore/ccsrc/kernel/kernel_build_info.cc @@ -59,30 +59,20 @@ size_t KernelBuildInfo::GetInputNum() const { return inputs_format_.size(); } size_t KernelBuildInfo::GetOutputNum() const { return outputs_format_.size(); } -bool KernelBuildInfo::GetInputReshapeType(size_t input_index, std::vector *reshape_type) const { - MS_EXCEPTION_IF_NULL(reshape_type); - reshape_type->clear(); +std::vector KernelBuildInfo::GetInputReshapeType(size_t input_index) const { if (input_index >= input_reshape_type_.size()) { - MS_LOG(WARNING) << "The index [" << input_index << "] is exceed the number of input node size " - << input_reshape_type_.size(); - return false; + MS_LOG(EXCEPTION) << "The index [" << input_index << "] is exceed the number of input node size " + << input_reshape_type_.size(); } - (void)std::copy(input_reshape_type_[input_index].begin(), input_reshape_type_[input_index].end(), - std::inserter(*reshape_type, (*reshape_type).begin())); - return true; + return input_reshape_type_[input_index]; } -bool KernelBuildInfo::GetOutputReshapeType(size_t output_index, std::vector *reshape_type) const { - MS_EXCEPTION_IF_NULL(reshape_type); - reshape_type->clear(); +std::vector KernelBuildInfo::GetOutputReshapeType(size_t output_index) const { if (output_index >= output_reshape_type_.size()) { - MS_LOG(WARNING) << "The index [" << output_index << "] is exceed the number of output node dixr" - << output_reshape_type_.size(); - return false; + MS_LOG(EXCEPTION) << "The index [" << output_index << "] is exceed the number of output node size " + << output_reshape_type_.size(); } - (void)std::copy(output_reshape_type_[output_index].begin(), output_reshape_type_[output_index].end(), - std::inserter(*reshape_type, (*reshape_type).begin())); - return true; + return output_reshape_type_[output_index]; } std::string KernelBuildInfo::ToString() const { @@ -115,6 +105,10 @@ bool KernelBuildInfo::operator==(const KernelBuildInfo &other) const { return !(inputs_device_type_ != other.inputs_device_type_ || outputs_device_type_ != other.outputs_device_type_); } +bool KernelBuildInfo::IsInputDefaultPadding() const { return output_reshape_type_.empty(); } + +bool KernelBuildInfo::IsOutputDefaultPadding() const { return input_reshape_type_.empty(); } + void KernelBuildInfo::KernelBuildInfoBuilder::SetKernelType(const KernelType &kernel_type) { MS_EXCEPTION_IF_NULL(kernel_build_info_); kernel_build_info_->kernel_type_ = kernel_type; diff --git a/mindspore/ccsrc/kernel/kernel_build_info.h b/mindspore/ccsrc/kernel/kernel_build_info.h index 24552e0341..76ebc7a572 100644 --- a/mindspore/ccsrc/kernel/kernel_build_info.h +++ b/mindspore/ccsrc/kernel/kernel_build_info.h @@ -54,9 +54,13 @@ class KernelBuildInfo { TypeId GetOutputDeviceType(size_t output_index) const; - bool GetInputReshapeType(size_t input_index, std::vector *reshape_type) const; + std::vector GetInputReshapeType(size_t input_index) const; - bool GetOutputReshapeType(size_t input_index, std::vector *reshape_type) const; + bool IsInputDefaultPadding() const; + + bool IsOutputDefaultPadding() const; + + std::vector GetOutputReshapeType(size_t input_index) const; std::vector GetAllInputFormats() const; diff --git a/mindspore/ccsrc/pre_activate/ascend/ascend_helper.cc b/mindspore/ccsrc/pre_activate/ascend/ascend_helper.cc index 58c030e79d..490a905a45 100644 --- a/mindspore/ccsrc/pre_activate/ascend/ascend_helper.cc +++ b/mindspore/ccsrc/pre_activate/ascend/ascend_helper.cc @@ -18,20 +18,21 @@ #include #include "common/trans.h" #include "common/utils.h" +#include "utils/utils.h" #include "device/kernel_info.h" #include "kernel/oplib/oplib.h" #include "operator/ops.h" #include "session/anf_runtime_algorithm.h" #include "session/kernel_graph.h" #include "utils/context/ms_context.h" -#include "utils/utils.h" namespace mindspore { namespace opt { using KernelBuildInfoBuilder = kernel::KernelBuildInfo::KernelBuildInfoBuilder; namespace { -kernel::KernelBuildInfoPtr CreateKernelBuildInfo(const std::string &input_format, const std::string &output_format, - const AnfNodePtr &node, const kernel::KernelBuildInfo ori_build_info) { +kernel::KernelBuildInfoPtr RefreshKernelBuildInfo(const std::string &input_format, const std::string &output_format, + const AnfNodePtr &node, + const kernel::KernelBuildInfo ori_build_info) { KernelBuildInfoBuilder builder; builder.SetInputsFormat({input_format}); builder.SetOutputsFormat({output_format}); @@ -54,9 +55,11 @@ CNodePtr NewTransOpNode(const FuncGraphPtr &func_graph, const AnfNodePtr &input, CNodePtr trans_node = func_graph->NewCNode(trans_inputs); MS_EXCEPTION_IF_NULL(trans_node); if (need_padding) { - AnfAlgo::SetOutputInferTypeAndShape({AnfAlgo::GetOutputInferDataType(input, 0)}, - {trans::TransShapeTo4d(AnfAlgo::GetOutputInferShape(input, 0))}, - trans_node.get()); + // if need padding we should set the transdata node's shape to the padding shape + AnfAlgo::SetOutputInferTypeAndShape( + {AnfAlgo::GetOutputInferDataType(input, 0)}, + {trans::PaddingShapeTo4d(AnfAlgo::GetOutputInferShape(input, 0), AnfAlgo::GetOutputReshapeType(input, 0))}, + trans_node.get()); } else { AnfAlgo::SetOutputInferTypeAndShape({AnfAlgo::GetOutputInferDataType(input, 0)}, {AnfAlgo::GetOutputInferShape(input, 0)}, trans_node.get()); @@ -92,9 +95,11 @@ AnfNodePtr CreateReshapeNode(const FuncGraphPtr &func_graph, const AnfNodePtr &i AnfNodePtr GetTransInputNodePtr(const FuncGraphPtr &func_graph, const CNodePtr &node, size_t index, const KernelSelectPtr &kernel_select) { MS_EXCEPTION_IF_NULL(node); - bool padding_flag = false; auto input_node = AnfAlgo::GetInputNode(node, index); - if (input_node->isa() || input_node->isa()) { + auto node_with_index = AnfAlgo::VisitKernel(input_node, 0); + MS_EXCEPTION_IF_NULL(node_with_index.first); + auto real_input = node_with_index.first; + if (real_input->isa() || real_input->isa()) { input_node = InsertTransOpForOutput(func_graph, input_node, kernel_select); MS_EXCEPTION_IF_NULL(input_node); AnfAlgo::SetNodeInput(node, input_node, index); @@ -106,33 +111,11 @@ AnfNodePtr GetTransInputNodePtr(const FuncGraphPtr &func_graph, const CNodePtr & std::vector origin_shape = AnfAlgo::GetPrevNodeOutputInferShape(node, index); std::string origin_format = kOpFormat_DEFAULT; std::string dest_format = AnfAlgo::GetInputFormat(node, index); - if (dest_format == kOpFormat_C1HWNCoC0) { - padding_flag = (origin_shape.size() != kShape4dDims); - AnfNodePtr replace_input = AddTransOpNodeToGraph(func_graph, node, kernel_select, index, padding_flag, - origin_format, dest_format, kTransDataOpName, true); - MS_EXCEPTION_IF_NULL(replace_input); - return replace_input; - } - if (dest_format == kOpFormat_NC1HWC0 && origin_shape.size() > 1) { - padding_flag = (origin_shape.size() != kShape4dDims); - AnfNodePtr replace_input = AddTransOpNodeToGraph(func_graph, node, kernel_select, index, padding_flag, - origin_format, dest_format, kTransDataOpName, true); - MS_EXCEPTION_IF_NULL(replace_input); - MS_LOG(DEBUG) << "Inserted Translate45, index: " << index; - return replace_input; - } else if (dest_format == kOpFormat_FRAC_NZ) { - AnfNodePtr replace_input = AddTransOpNodeToGraph(func_graph, node, kernel_select, index, padding_flag, - origin_format, dest_format, kTransDataOpName, true); - MS_EXCEPTION_IF_NULL(replace_input); - MS_LOG(DEBUG) << "inserted translate " << AnfAlgo::GetInputFormat(node, index) << " To default, index: " << index; - return replace_input; - } else if (dest_format == kOpFormat_FRAC_Z && !origin_shape.empty()) { - padding_flag = (origin_shape.size() != kShape4dDims); - AnfNodePtr replace_input = AddTransOpNodeToGraph(func_graph, node, kernel_select, index, padding_flag, - origin_format, dest_format, kTransDataOpName, true); - MS_EXCEPTION_IF_NULL(replace_input); - MS_LOG(DEBUG) << "Inserted Translate45, index: " << index; - return replace_input; + if (kNeedTransFormatSet.find(dest_format) != kNeedTransFormatSet.end() && origin_shape.size() > 1) { + MS_LOG(DEBUG) << node->DebugString() << "Insert transdata " << AnfAlgo::GetInputFormat(node, index) + << " To DefaultFormat , index: " << index; + return AddTransOpNodeToGraph(func_graph, node, kernel_select, index, origin_format, dest_format, kTransDataOpName, + true); } return input_node; } @@ -140,7 +123,6 @@ AnfNodePtr GetTransInputNodePtr(const FuncGraphPtr &func_graph, const CNodePtr & AnfNodePtr InsertTransOpForSingleOutput(const FuncGraphPtr &func_graph, const AnfNodePtr &node, const KernelSelectPtr &kernel_select) { MS_EXCEPTION_IF_NULL(node); - bool padding_flag = false; std::string output_format; std::vector origin_shape; if (!AnfAlgo::IsRealKernel(node)) { @@ -156,46 +138,14 @@ AnfNodePtr InsertTransOpForSingleOutput(const FuncGraphPtr &func_graph, const An } std::string origin_format = output_format; std::string dest_format = kOpFormat_DEFAULT; - if (output_format == kOpFormat_C1HWNCoC0) { - padding_flag = (origin_shape.size() != kShape4dDims); - AnfNodePtr replace_input = AddTransOpNodeToGraph(func_graph, node, kernel_select, 0, padding_flag, origin_format, - dest_format, kTransDataOpName, false); - MS_EXCEPTION_IF_NULL(replace_input); - return replace_input; - } - if (output_format == kOpFormat_NC1HWC0 && origin_shape.size() > 1) { - padding_flag = (origin_shape.size() != kShape4dDims); - AnfNodePtr replace_output = AddTransOpNodeToGraph(func_graph, node, kernel_select, 0, padding_flag, origin_format, - dest_format, kTransDataOpName, false); - MS_EXCEPTION_IF_NULL(replace_output); - MS_LOG(DEBUG) << "Inserted Trans54"; - return replace_output; - } else if (output_format == kOpFormat_FRAC_NZ) { - AnfNodePtr replace_output = AddTransOpNodeToGraph(func_graph, node, kernel_select, 0, padding_flag, origin_format, - dest_format, kTransDataOpName, false); - MS_EXCEPTION_IF_NULL(replace_output); - MS_LOG(DEBUG) << "Inserted Translate " << output_format << " To default, index: 0"; - return replace_output; - } else if (output_format == kOpFormat_FRAC_Z && !origin_shape.empty()) { - padding_flag = (origin_shape.size() != kShape4dDims); - AnfNodePtr replace_output = AddTransOpNodeToGraph(func_graph, node, kernel_select, 0, padding_flag, origin_format, - dest_format, kTransDataOpName, false); - MS_EXCEPTION_IF_NULL(replace_output); - MS_LOG(DEBUG) << "Inserted Trans54"; - return replace_output; + if (kNeedTransFormatSet.find(output_format) != kNeedTransFormatSet.end() && origin_shape.size() > 1) { + MS_LOG(DEBUG) << "Inserted Transdata " << output_format << " To default , index :0"; + return AddTransOpNodeToGraph(func_graph, node, kernel_select, 0, origin_format, dest_format, kTransDataOpName, + false); } return node; } -void GetTransDataInputFormat(const AnfNodePtr &node, size_t idx, std::string *input_format) { - MS_EXCEPTION_IF_NULL(input_format); - if (AnfAlgo::IsRealKernel(node)) { - *input_format = AnfAlgo::GetOutputFormat(node, idx); - } else { - *input_format = AnfAlgo::GetPrevNodeOutputFormat(node, 0); - } -} - AnfNodePtr InsertTransOpForMultipleOutput(const FuncGraphPtr &func_graph, const AnfNodePtr &node, const KernelSelectPtr &kernel_select) { MS_EXCEPTION_IF_NULL(func_graph); @@ -203,46 +153,17 @@ AnfNodePtr InsertTransOpForMultipleOutput(const FuncGraphPtr &func_graph, const std::vector make_tuple_inputs; make_tuple_inputs.push_back(NewValueNode(prim::kPrimMakeTuple)); for (size_t output_idx = 0; output_idx < AnfAlgo::GetOutputTensorNum(node); ++output_idx) { - bool padding_flag = false; - - std::string output_format; - GetTransDataInputFormat(node, output_idx, &output_format); + std::string output_format = AnfAlgo::GetOutputFormat(node, output_idx); if (output_format == kOpFormat_NC1KHKWHWC0) { - MS_LOG(EXCEPTION) << "got the hw format" << output_format << " when insert the transdata node " + MS_LOG(EXCEPTION) << "Got the special format" << output_format << " when insert the transdata node " << node->DebugString(); } auto tuple_getitem = CreatTupleGetItemNode(func_graph, node, output_idx); std::vector origin_shape = AnfAlgo::GetOutputInferShape(node, output_idx); - std::string origin_format = output_format; std::string dest_format = kOpFormat_DEFAULT; - if (output_format == kOpFormat_C1HWNCoC0) { - padding_flag = (origin_shape.size() != kShape4dDims); - AnfNodePtr replace_input = AddTransOpNodeToGraph(func_graph, tuple_getitem, kernel_select, 0, padding_flag, - origin_format, dest_format, kTransDataOpName, false); - MS_EXCEPTION_IF_NULL(replace_input); - return replace_input; - } - if (output_format == kOpFormat_NC1HWC0 && origin_shape.size() > 1) { - padding_flag = (origin_shape.size() != kShape4dDims); - // Insert a 5to4 trans op. - AnfNodePtr replace_output = AddTransOpNodeToGraph(func_graph, tuple_getitem, kernel_select, 0, padding_flag, - origin_format, dest_format, kTransDataOpName, false); - MS_EXCEPTION_IF_NULL(replace_output); - MS_LOG(DEBUG) << "Inserted Translate54"; - make_tuple_inputs.push_back(replace_output); - } else if (output_format == kOpFormat_FRAC_NZ) { - AnfNodePtr replace_output = AddTransOpNodeToGraph(func_graph, tuple_getitem, kernel_select, 0, padding_flag, - origin_format, dest_format, kTransDataOpName, false); - MS_EXCEPTION_IF_NULL(replace_output); - MS_LOG(DEBUG) << "Inserted Translate " << output_format << " To default, index: " << output_idx; - make_tuple_inputs.push_back(replace_output); - } else if (output_format == kOpFormat_FRAC_Z && !origin_shape.empty()) { - padding_flag = (origin_shape.size() != kShape4dDims); - AnfNodePtr replace_output = AddTransOpNodeToGraph(func_graph, tuple_getitem, kernel_select, 0, padding_flag, - origin_format, dest_format, kTransDataOpName, false); - MS_EXCEPTION_IF_NULL(replace_output); - MS_LOG(DEBUG) << "Inserted Translate54"; - make_tuple_inputs.push_back(replace_output); + if (kNeedTransFormatSet.find(output_format) != kNeedTransFormatSet.end() && origin_shape.size() > 1) { + make_tuple_inputs.emplace_back(AddTransOpNodeToGraph(func_graph, tuple_getitem, kernel_select, 0, output_format, + dest_format, kTransDataOpName, false)); } else { // No need insert trans op. make_tuple_inputs.push_back(tuple_getitem); @@ -253,16 +174,17 @@ AnfNodePtr InsertTransOpForMultipleOutput(const FuncGraphPtr &func_graph, const } } // namespace AnfNodePtr AddTransOpNodeToGraph(const FuncGraphPtr &func_graph, const AnfNodePtr &node, - const KernelSelectPtr &kernel_select, size_t insert_index, const bool padding_flag, + const KernelSelectPtr &kernel_select, size_t insert_index, const std::string &origin_format, const std::string &dest_format, const std::string &op_name, bool is_insert_input) { AnfNodePtr trans_node = nullptr; - AnfNodePtr input_node = nullptr; + AnfNodePtr input_node = node; AnfNodePtr trans_data = nullptr; MS_EXCEPTION_IF_NULL(node); if (origin_format.empty() || dest_format.empty()) { MS_LOG(EXCEPTION) << "trans op format is error, origin = " << origin_format << ", dest " << origin_format; } + // if insert transdata for input we need to change the input if (is_insert_input) { if (!node->isa()) { MS_LOG(EXCEPTION) << "cannot insert a transdata node to a node's input which the node is not a cnode"; @@ -270,29 +192,34 @@ AnfNodePtr AddTransOpNodeToGraph(const FuncGraphPtr &func_graph, const AnfNodePt auto cnode = node->cast(); MS_EXCEPTION_IF_NULL(cnode); input_node = AnfAlgo::GetInputNode(cnode, insert_index); - if (padding_flag) { - auto padd_shape = trans::TransShapeTo4d(AnfAlgo::GetOutputInferShape(input_node, 0)); - auto reshape_node = CreateReshapeNode(func_graph, input_node, kernel_select, padd_shape); - trans_data = NewTransOpNode(func_graph, reshape_node, kernel_select, padding_flag, op_name); - } else { - trans_data = NewTransOpNode(func_graph, input_node, kernel_select, padding_flag, op_name); - } + } + bool need_padding = (trans::IsNeedPadding(dest_format, AnfAlgo::GetOutputInferShape(input_node, 0).size()) && + op_name == kTransDataOpName); + if (!need_padding) { + // don't need padding insert transdata only + trans_data = NewTransOpNode(func_graph, input_node, kernel_select, need_padding, op_name); + trans_node = trans_data; + } else if (is_insert_input) { + // if need padding & is input need insert a transdata + // reshape[padding shape] -> transdata[padding shape] -> node + auto padding_shape = + trans::PaddingShapeTo4d(AnfAlgo::GetOutputInferShape(input_node, 0), AnfAlgo::GetInputReshapeType(node, 0)); + auto reshape_node = CreateReshapeNode(func_graph, input_node, kernel_select, padding_shape); + trans_data = NewTransOpNode(func_graph, reshape_node, kernel_select, need_padding, op_name); trans_node = trans_data; } else { - input_node = node; - trans_data = NewTransOpNode(func_graph, input_node, kernel_select, padding_flag, op_name); - if (padding_flag) { - auto reshape_node = - CreateReshapeNode(func_graph, trans_data, kernel_select, AnfAlgo::GetOutputInferShape(input_node, 0)); - trans_node = reshape_node; - } else { - trans_node = trans_data; - } + // if need padding & is output need insert a transdata + // node -> transdata[padding shape] -> reshape[ori_shape] + trans_data = NewTransOpNode(func_graph, input_node, kernel_select, need_padding, op_name); + auto reshape_node = + CreateReshapeNode(func_graph, trans_data, kernel_select, AnfAlgo::GetOutputInferShape(input_node, 0)); + trans_node = reshape_node; } + // refresh the transdata's format to ori format & dst format MS_EXCEPTION_IF_NULL(trans_data); MS_EXCEPTION_IF_NULL(trans_data->kernel_info()); auto trans_ori_build_info = trans_data->kernel_info()->select_kernel_build_info(); - auto kernel_build_info = CreateKernelBuildInfo(origin_format, dest_format, input_node, *trans_ori_build_info); + auto kernel_build_info = RefreshKernelBuildInfo(origin_format, dest_format, input_node, *trans_ori_build_info); AnfAlgo::SetSelectKernelBuildInfo(kernel_build_info, trans_data.get()); return trans_node; } @@ -376,7 +303,17 @@ CNodePtr InsertCastForInput(const FuncGraphPtr &func_graph, const CNodePtr &cnod for (size_t input_index = 0; input_index < AnfAlgo::GetInputTensorNum(cnode); ++input_index) { TypeId origin_type; auto cur_input = AnfAlgo::GetInputNode(cnode, input_index); - if (!AnfAlgo::IsFeatureMapInput(cnode, input_index)) { + auto kernel_with_index = AnfAlgo::VisitKernel(cur_input, 0); + auto is_weight_boundary = [](const AnfNodePtr &node) -> bool { + if (node->isa()) { + return true; + } else if (node->isa() && AnfAlgo::IsParameterWeight(node->cast())) { + return true; + } + return false; + }; + auto real_input_node = kernel_with_index.first; + if (is_weight_boundary(real_input_node)) { // weight origin_type = AnfAlgo::GetPrevNodeOutputDeviceDataType(cnode, input_index); } else { diff --git a/mindspore/ccsrc/pre_activate/ascend/ascend_helper.h b/mindspore/ccsrc/pre_activate/ascend/ascend_helper.h index b605d700c3..8925a52a7d 100644 --- a/mindspore/ccsrc/pre_activate/ascend/ascend_helper.h +++ b/mindspore/ccsrc/pre_activate/ascend/ascend_helper.h @@ -48,7 +48,7 @@ class KernelQuery { using KernelQueryPtr = std::shared_ptr; AnfNodePtr AddTransOpNodeToGraph(const FuncGraphPtr &func_graph, const AnfNodePtr &node, - const KernelSelectPtr &kernel_select, size_t insert_index, bool padding_flag, + const KernelSelectPtr &kernel_select, size_t insert_index, const std::string &origin_format, const std::string &dest_format, const std::string &op_name, bool is_insert_input); diff --git a/mindspore/ccsrc/pre_activate/ascend/format_type/deal_ref_trans_and_cast.cc b/mindspore/ccsrc/pre_activate/ascend/format_type/deal_ref_trans_and_cast.cc index 81e5c4b486..2d44bf8f8f 100644 --- a/mindspore/ccsrc/pre_activate/ascend/format_type/deal_ref_trans_and_cast.cc +++ b/mindspore/ccsrc/pre_activate/ascend/format_type/deal_ref_trans_and_cast.cc @@ -105,10 +105,8 @@ AnfNodePtr AddAdditionalToRefOutput(const FuncGraphPtr &func_graph, const CNodeP // insert trans if (origin_format != cur_format) { auto kernel_select = std::make_shared(); - bool need_padding = - (cur_format == kOpFormat_NC1HWC0 && AnfAlgo::GetOutputInferShape(final_node, 0).size() != kShape4dDims); - final_node = AddTransOpNodeToGraph(func_graph, final_node, kernel_select, 0, need_padding, cur_format, - origin_format, kTransDataOpName, false); + final_node = AddTransOpNodeToGraph(func_graph, final_node, kernel_select, 0, cur_format, origin_format, + kTransDataOpName, false); final_index = 0; MS_EXCEPTION_IF_NULL(final_node); MS_LOG(INFO) << "DealRefTransAndCast add trans op, op debug info is " << final_node->DebugString(); diff --git a/mindspore/ccsrc/pre_activate/ascend/ir_fusion/transdata_split.cc b/mindspore/ccsrc/pre_activate/ascend/ir_fusion/transdata_split.cc index faef277599..d3990fe898 100644 --- a/mindspore/ccsrc/pre_activate/ascend/ir_fusion/transdata_split.cc +++ b/mindspore/ccsrc/pre_activate/ascend/ir_fusion/transdata_split.cc @@ -1,99 +1,99 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "pre_activate/ascend/ir_fusion/transdata_split.h" -#include -#include "pre_activate/ascend/ascend_helper.h" -#include "session/anf_runtime_algorithm.h" -#include "debug/anf_ir_dump.h" - -namespace mindspore { -namespace opt { -const std::set> invalid_formats_pair = {{kOpFormat_C1HWNCoC0, kOpFormat_NCHW}, - {kOpFormat_NCHW, kOpFormat_C1HWNCoC0}, - {kOpFormat_C1HWNCoC0, kOpFormat_DEFAULT}, - {kOpFormat_DEFAULT, kOpFormat_C1HWNCoC0}}; - -bool TransDataSplit::Run(const FuncGraphPtr &func_graph) { - MS_EXCEPTION_IF_NULL(func_graph); - bool changed = false; - std::vector node_list = TopoSort(func_graph->get_return()); - for (auto &node : node_list) { - if (node != nullptr && node->isa() && AnfAlgo::GetCNodeName(node) == kTransDataOpName) { - CheckCNodeInputSize(node->cast(), kBackendTransDataInputNum); - if (IsFormatInvaild(node)) { - changed = DoSplit(func_graph, node); - } - } - } - return changed; -} -bool TransDataSplit::IsFormatInvaild(const AnfNodePtr &node) { - MS_EXCEPTION_IF_NULL(node); - auto cnode = node->cast(); - MS_EXCEPTION_IF_NULL(cnode); - auto input_format = AnfAlgo::GetInputFormat(node, 0); - auto output_format = AnfAlgo::GetOutputFormat(node, 0); - auto format_pair = std::make_pair(input_format, output_format); - - return invalid_formats_pair.find(format_pair) != invalid_formats_pair.end(); -} -// transdata cannot support frac_z to nchw need split transdata(frac_z-HWCN) and transpose(HWCN-NCHW) -bool TransDataSplit::DoSplit(const FuncGraphPtr &func_graph, const AnfNodePtr &node) { - MS_EXCEPTION_IF_NULL(func_graph); - MS_EXCEPTION_IF_NULL(node); - auto cnode = node->cast(); - MS_EXCEPTION_IF_NULL(cnode); - auto input_node = node->cast()->input(1); - MS_EXCEPTION_IF_NULL(input_node); - - auto input_format = AnfAlgo::GetInputFormat(node, 0); - auto output_format = AnfAlgo::GetOutputFormat(node, 0); - AnfNodePtr new_transdata_node = nullptr; - AnfNodePtr new_transpose_node = nullptr; - AnfNodePtr new_replace_node = nullptr; - // if output_format=default transdata need split transdata->transpose else transpose->transdata - if (output_format == kOpFormat_DEFAULT || output_format == kOpFormat_NCHW) { - // trans input_format to hwcn - new_transdata_node = AddTransOpNodeToGraph(func_graph, node, kernel_select_, 0, false, input_format, kOpFormat_HWCN, - kTransDataOpName, true); - // trans hwcn to default_format - new_transpose_node = AddTransOpNodeToGraph(func_graph, new_transdata_node, kernel_select_, 0, false, kOpFormat_HWCN, - output_format, prim::kPrimTranspose->name(), false); - AnfAlgo::SetNodeAttr(kAttrPerm, MakeValue(std::vector{3, 2, 0, 1}), new_transpose_node); - new_replace_node = new_transpose_node; - } else { - // trans default to hwcn - new_transpose_node = AddTransOpNodeToGraph(func_graph, node, kernel_select_, 0, false, input_format, kOpFormat_HWCN, - prim::kPrimTranspose->name(), true); - AnfAlgo::SetNodeAttr(kAttrPerm, MakeValue(std::vector{2, 3, 1, 0}), new_transpose_node); - - // trans hwcn to output_format - new_transdata_node = AddTransOpNodeToGraph(func_graph, new_transpose_node, kernel_select_, 0, false, kOpFormat_HWCN, - output_format, kTransDataOpName, false); - new_replace_node = new_transdata_node; - } - FuncGraphManagerPtr manager = func_graph->manager(); - MS_EXCEPTION_IF_NULL(manager); - manager->AddFuncGraph(func_graph); - - if (!manager->Replace(node, new_replace_node)) { - MS_LOG(EXCEPTION) << "manager replace node failed"; - } - MS_LOG(INFO) << "transdata node:" << cnode->DebugString() << "split success."; - return true; -} -} // namespace opt -} // namespace mindspore +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "pre_activate/ascend/ir_fusion/transdata_split.h" +#include +#include "pre_activate/ascend/ascend_helper.h" +#include "session/anf_runtime_algorithm.h" +#include "debug/anf_ir_dump.h" + +namespace mindspore { +namespace opt { +const std::set> invalid_formats_pair = {{kOpFormat_C1HWNCoC0, kOpFormat_NCHW}, + {kOpFormat_NCHW, kOpFormat_C1HWNCoC0}, + {kOpFormat_C1HWNCoC0, kOpFormat_DEFAULT}, + {kOpFormat_DEFAULT, kOpFormat_C1HWNCoC0}}; + +bool TransDataSplit::Run(const FuncGraphPtr &func_graph) { + MS_EXCEPTION_IF_NULL(func_graph); + bool changed = false; + std::vector node_list = TopoSort(func_graph->get_return()); + for (auto &node : node_list) { + if (node != nullptr && node->isa() && AnfAlgo::GetCNodeName(node) == kTransDataOpName) { + CheckCNodeInputSize(node->cast(), kBackendTransDataInputNum); + if (IsFormatInvaild(node)) { + changed = DoSplit(func_graph, node); + } + } + } + return changed; +} +bool TransDataSplit::IsFormatInvaild(const AnfNodePtr &node) { + MS_EXCEPTION_IF_NULL(node); + auto cnode = node->cast(); + MS_EXCEPTION_IF_NULL(cnode); + auto input_format = AnfAlgo::GetInputFormat(node, 0); + auto output_format = AnfAlgo::GetOutputFormat(node, 0); + auto format_pair = std::make_pair(input_format, output_format); + + return invalid_formats_pair.find(format_pair) != invalid_formats_pair.end(); +} +// transdata cannot support frac_z to nchw need split transdata(frac_z-HWCN) and transpose(HWCN-NCHW) +bool TransDataSplit::DoSplit(const FuncGraphPtr &func_graph, const AnfNodePtr &node) { + MS_EXCEPTION_IF_NULL(func_graph); + MS_EXCEPTION_IF_NULL(node); + auto cnode = node->cast(); + MS_EXCEPTION_IF_NULL(cnode); + auto input_node = node->cast()->input(1); + MS_EXCEPTION_IF_NULL(input_node); + + auto input_format = AnfAlgo::GetInputFormat(node, 0); + auto output_format = AnfAlgo::GetOutputFormat(node, 0); + AnfNodePtr new_transdata_node = nullptr; + AnfNodePtr new_transpose_node = nullptr; + AnfNodePtr new_replace_node = nullptr; + // if output_format=default transdata need split transdata->transpose else transpose->transdata + if (output_format == kOpFormat_DEFAULT || output_format == kOpFormat_NCHW) { + // trans input_format to hwcn + new_transdata_node = + AddTransOpNodeToGraph(func_graph, node, kernel_select_, 0, input_format, kOpFormat_HWCN, kTransDataOpName, true); + // trans hwcn to default_format + new_transpose_node = AddTransOpNodeToGraph(func_graph, new_transdata_node, kernel_select_, 0, kOpFormat_HWCN, + output_format, prim::kPrimTranspose->name(), false); + AnfAlgo::SetNodeAttr(kAttrPerm, MakeValue(std::vector{3, 2, 0, 1}), new_transpose_node); + new_replace_node = new_transpose_node; + } else { + // trans default to hwcn + new_transpose_node = AddTransOpNodeToGraph(func_graph, node, kernel_select_, 0, input_format, kOpFormat_HWCN, + prim::kPrimTranspose->name(), true); + AnfAlgo::SetNodeAttr(kAttrPerm, MakeValue(std::vector{2, 3, 1, 0}), new_transpose_node); + + // trans hwcn to output_format + new_transdata_node = AddTransOpNodeToGraph(func_graph, new_transpose_node, kernel_select_, 0, kOpFormat_HWCN, + output_format, kTransDataOpName, false); + new_replace_node = new_transdata_node; + } + FuncGraphManagerPtr manager = func_graph->manager(); + MS_EXCEPTION_IF_NULL(manager); + manager->AddFuncGraph(func_graph); + + if (!manager->Replace(node, new_replace_node)) { + MS_LOG(EXCEPTION) << "Manager replace node failed"; + } + MS_LOG(INFO) << "Transdata node:" << cnode->DebugString() << "split success."; + return true; +} +} // namespace opt +} // namespace mindspore diff --git a/mindspore/ccsrc/session/anf_runtime_algorithm.cc b/mindspore/ccsrc/session/anf_runtime_algorithm.cc index 893c379a07..3f20fec7b5 100644 --- a/mindspore/ccsrc/session/anf_runtime_algorithm.cc +++ b/mindspore/ccsrc/session/anf_runtime_algorithm.cc @@ -289,6 +289,11 @@ size_t AnfRuntimeAlgorithm::GetOutputTensorNum(const AnfNodePtr &node) { std::string AnfRuntimeAlgorithm::GetOutputFormat(const AnfNodePtr &node, size_t output_idx) { MS_EXCEPTION_IF_NULL(node); + if (output_idx > GetOutputTensorNum(node)) { + MS_LOG(EXCEPTION) << "Output index:" << output_idx + << " is out of the node output range :" << GetOutputTensorNum(node) << " #node [" + << node->DebugString() << "]"; + } auto kernel_info = node->kernel_info(); MS_EXCEPTION_IF_NULL(kernel_info); auto build_info = kernel_info->select_kernel_build_info(); @@ -298,6 +303,11 @@ std::string AnfRuntimeAlgorithm::GetOutputFormat(const AnfNodePtr &node, size_t std::string AnfRuntimeAlgorithm::GetInputFormat(const AnfNodePtr &node, size_t input_idx) { MS_EXCEPTION_IF_NULL(node); + if (input_idx > GetInputTensorNum(node)) { + MS_LOG(EXCEPTION) << "Input index :" << input_idx + << " is out of the number node Input range :" << GetInputTensorNum(node) << "#node [" + << node->DebugString() << "]"; + } auto kernel_info = node->kernel_info(); MS_EXCEPTION_IF_NULL(kernel_info); auto build_info = kernel_info->select_kernel_build_info(); @@ -362,62 +372,60 @@ std::vector AnfRuntimeAlgorithm::GetPrevNodeOutputInferShape(const AnfNo std::vector AnfRuntimeAlgorithm::GetOutputDeviceShape(const AnfNodePtr &node, size_t output_idx) { auto format = GetOutputFormat(node, output_idx); auto infer_shape = GetOutputInferShape(node, output_idx); - // if format is default_format or NC1KHKWHWC0,device shape = original shape - if (format == kOpFormat_DEFAULT || format == kOpFormat_NC1KHKWHWC0) { - return infer_shape; - } - // scalar shape if (infer_shape.empty()) { return infer_shape; } - if (format == kOpFormat_FRAC_NZ) { - return trans::TransShapeToDevice(infer_shape, format); + // if format is default_format or NC1KHKWHWC0,device shape = original shape + if (trans::IsNeedPadding(format, infer_shape.size())) { + infer_shape = trans::PaddingShapeTo4d(infer_shape, GetOutputReshapeType(node, output_idx)); } - // else trans infer shape to 4d and then calculate device shape - return trans::TransShapeToDevice(trans::TransShapeTo4d(infer_shape), format); + return trans::TransShapeToDevice(infer_shape, format); } std::vector AnfRuntimeAlgorithm::GetInputDeviceShape(const AnfNodePtr &node, size_t input_idx) { auto format = GetInputFormat(node, input_idx); auto infer_shape = GetPrevNodeOutputInferShape(node, input_idx); - // if format is default_format or NC1KHKWHWC0,device shape = original shape - if (format == kOpFormat_DEFAULT || format == kOpFormat_NC1KHKWHWC0) { - return infer_shape; - } if (infer_shape.empty()) { return infer_shape; } - if (format == kOpFormat_FRAC_NZ) { - return trans::TransShapeToDevice(infer_shape, format); + // if format is default_format or NC1KHKWHWC0,device shape = original shape + if (trans::IsNeedPadding(format, infer_shape.size())) { + infer_shape = trans::PaddingShapeTo4d(infer_shape, GetInputReshapeType(node, input_idx)); } - // else trans infer shape to 4d and then calculate device shape - return trans::TransShapeToDevice(trans::TransShapeTo4d(infer_shape), format); + return trans::TransShapeToDevice(infer_shape, format); } std::vector AnfRuntimeAlgorithm::GetInputReshapeType(const AnfNodePtr &node, size_t input_idx) { MS_EXCEPTION_IF_NULL(node); + if (input_idx > GetInputTensorNum(node)) { + MS_LOG(EXCEPTION) << "The index:" << input_idx + << " is out of range of the node's input size : " << GetInputTensorNum(node) << "#node[" + << node->DebugString() << "]"; + } auto kernel_info = node->kernel_info(); MS_EXCEPTION_IF_NULL(kernel_info); auto build_info = kernel_info->select_kernel_build_info(); MS_EXCEPTION_IF_NULL(build_info); - std::vector result; - if (!build_info->GetInputReshapeType(input_idx, &result)) { - MS_LOG(EXCEPTION) << "Failed to get the node's[ " << node->DebugString() << "] reshape type !"; + if (build_info->IsInputDefaultPadding()) { + return {}; } - return result; + return build_info->GetInputReshapeType(input_idx); } std::vector AnfRuntimeAlgorithm::GetOutputReshapeType(const AnfNodePtr &node, size_t output_idx) { MS_EXCEPTION_IF_NULL(node); + if (output_idx > GetOutputTensorNum(node)) { + MS_LOG(EXCEPTION) << "The index [" << output_idx << "] is out of range of the node's output size [ " + << GetOutputTensorNum(node) << "#node[ " << node->DebugString() << "]"; + } auto kernel_info = node->kernel_info(); MS_EXCEPTION_IF_NULL(kernel_info); auto build_info = kernel_info->select_kernel_build_info(); MS_EXCEPTION_IF_NULL(build_info); - std::vector result; - if (!build_info->GetOutputReshapeType(output_idx, &result)) { - MS_LOG(EXCEPTION) << "Failed to get the node's[ " << node->DebugString() << "] reshape type !"; + if (build_info->IsOutputDefaultPadding()) { + return {}; } - return result; + return build_info->GetOutputReshapeType(output_idx); } TypeId AnfRuntimeAlgorithm::GetOutputInferDataType(const AnfNodePtr &node, size_t output_idx) { @@ -463,6 +471,10 @@ TypeId AnfRuntimeAlgorithm::GetPrevNodeOutputInferDataType(const AnfNodePtr &nod TypeId AnfRuntimeAlgorithm::GetOutputDeviceDataType(const AnfNodePtr &node, size_t output_idx) { MS_EXCEPTION_IF_NULL(node); + if (output_idx > GetOutputTensorNum(node)) { + MS_LOG(EXCEPTION) << "The index [" << output_idx << "] is out of range of the node's output size [ " + << GetOutputTensorNum(node) << "#node [ " << node->DebugString() << "]"; + } auto kernel_info = node->kernel_info(); MS_EXCEPTION_IF_NULL(kernel_info); auto build_info = kernel_info->select_kernel_build_info(); @@ -472,6 +484,10 @@ TypeId AnfRuntimeAlgorithm::GetOutputDeviceDataType(const AnfNodePtr &node, size TypeId AnfRuntimeAlgorithm::GetInputDeviceDataType(const AnfNodePtr &node, size_t input_idx) { MS_EXCEPTION_IF_NULL(node); + if (input_idx > GetInputTensorNum(node)) { + MS_LOG(EXCEPTION) << "The index [" << input_idx << "] is out of range of the node's input size [ " + << GetInputTensorNum(node) << "#node [ " << node->DebugString() << "]"; + } auto kernel_info = node->kernel_info(); MS_EXCEPTION_IF_NULL(kernel_info); auto build_info = kernel_info->select_kernel_build_info(); @@ -496,11 +512,15 @@ const DeviceAddress *AnfRuntimeAlgorithm::GetOutputAddr(const AnfNodePtr &node, MS_LOG(EXCEPTION) << node->DebugString() << "Invalid nop node"; } } + if (output_idx > GetOutputTensorNum(node)) { + MS_LOG(EXCEPTION) << "The index [" << output_idx << "] is out of range of the node's output size [ " + << GetOutputTensorNum(node) << "#node:[ " << node->DebugString() << "]"; + } auto kernel_info = node->kernel_info(); MS_EXCEPTION_IF_NULL(kernel_info); auto addr = kernel_info->GetOutputAddr(output_idx); if (addr == nullptr) { - MS_LOG(EXCEPTION) << "output_idx " << output_idx << " of node " << node->DebugString() + MS_LOG(EXCEPTION) << "Output_idx " << output_idx << " of node " << node->DebugString() << " output addr is not exist"; } return addr; @@ -517,11 +537,15 @@ DeviceAddressPtr AnfRuntimeAlgorithm::GetMutableOutputAddr(const AnfNodePtr &nod MS_LOG(EXCEPTION) << node->DebugString() << "Invalid nop node."; } } + if (output_idx > GetOutputTensorNum(node)) { + MS_LOG(EXCEPTION) << "The index [" << output_idx << "] is out of range of the node's output size [ " + << GetOutputTensorNum(node) << "#node:[ " << node->DebugString() << "]"; + } auto kernel_info = node->kernel_info(); MS_EXCEPTION_IF_NULL(kernel_info); auto addr = kernel_info->GetMutableOutputAddr(output_idx); if (addr == nullptr) { - MS_LOG(EXCEPTION) << "output_idx" << output_idx << " of node " << node->DebugString() + MS_LOG(EXCEPTION) << "Output_idx" << output_idx << " of node " << node->DebugString() << " output addr is not exist"; } return addr; @@ -530,6 +554,10 @@ DeviceAddressPtr AnfRuntimeAlgorithm::GetMutableOutputAddr(const AnfNodePtr &nod // get output device addr of anf_node bool AnfRuntimeAlgorithm::OutputAddrExist(const AnfNodePtr &node, size_t output_idx) { MS_EXCEPTION_IF_NULL(node); + if (output_idx > GetOutputTensorNum(node)) { + MS_LOG(EXCEPTION) << "The index [" << output_idx << "] is out of range of the node's output size [ " + << GetOutputTensorNum(node) << "#node:[ " << node->DebugString() << "]"; + } auto kernel_info = node->kernel_info(); MS_EXCEPTION_IF_NULL(kernel_info); return kernel_info->OutputAddrExist(output_idx); @@ -769,22 +797,24 @@ AnfNodePtr AnfRuntimeAlgorithm::GetInputNode(const CNodePtr &node, size_t index) return node->input(get_input_index); } +bool AnfRuntimeAlgorithm::IsFeatureMapOutput(const AnfNodePtr &node) { + MS_EXCEPTION_IF_NULL(node); + if (node->isa()) { + return false; + } + auto kernel_info = node->kernel_info(); + MS_EXCEPTION_IF_NULL(kernel_info); + return kernel_info->is_feature_map(); +} + bool AnfRuntimeAlgorithm::IsFeatureMapInput(const AnfNodePtr &node, size_t input_index) { if (!node->isa()) { - MS_LOG(EXCEPTION) << "Cannot input a parameter or a valuenode to charge it's input if is a feature"; + MS_LOG(EXCEPTION) << "Cannot input a parameter or a valuenode to charge it's input if is a feature map"; } auto cnode = node->cast(); MS_EXCEPTION_IF_NULL(cnode); auto input_node = cnode->input(input_index + 1); - auto node_with_index = VisitKernel(input_node, 0); - MS_EXCEPTION_IF_NULL(node_with_index.first); - if (node_with_index.first->isa()) { - return false; - } - if (node_with_index.first->isa()) { - return !AnfAlgo::IsParameterWeight(node_with_index.first->cast()); - } - return true; + return IsFeatureMapOutput(input_node); } size_t AnfRuntimeAlgorithm::GetRealInputIndex(const mindspore::AnfNodePtr &anf_node, const size_t cur_index) { diff --git a/mindspore/ccsrc/session/anf_runtime_algorithm.h b/mindspore/ccsrc/session/anf_runtime_algorithm.h index 1a1d471b84..9ac83e011f 100644 --- a/mindspore/ccsrc/session/anf_runtime_algorithm.h +++ b/mindspore/ccsrc/session/anf_runtime_algorithm.h @@ -101,7 +101,9 @@ class AnfRuntimeAlgorithm { static std::vector GetOutputDeviceShape(const AnfNodePtr &node, size_t output_idx); // get input shapes which will built and run in device static std::vector GetInputDeviceShape(const AnfNodePtr &node, size_t input_idx); + // Get Input Padding Axis static std::vector GetInputReshapeType(const AnfNodePtr &node, size_t output_idx); + // Get Output Padding Axis static std::vector GetOutputReshapeType(const AnfNodePtr &node, size_t output_idx); // get output data type inferred by ME of anf node static TypeId GetOutputInferDataType(const AnfNodePtr &node, size_t output_idx); @@ -165,6 +167,9 @@ class AnfRuntimeAlgorithm { // get graph id static uint32_t GetGraphId(const AnfNode *node); static AnfNodePtr GetInputNode(const CNodePtr &node, size_t index); + // charge if the node's output is a feature map output + static bool IsFeatureMapOutput(const AnfNodePtr &node); + // charge if the node's input is from a feature map output static bool IsFeatureMapInput(const AnfNodePtr &node, size_t input_index); // get real input index for some tbe ops which input order is different between me and tbe impl static size_t GetRealInputIndex(const AnfNodePtr &anf_node, const size_t cur_index); diff --git a/mindspore/ccsrc/session/ascend_session.cc b/mindspore/ccsrc/session/ascend_session.cc index 751cf76e32..93ae99f4d2 100755 --- a/mindspore/ccsrc/session/ascend_session.cc +++ b/mindspore/ccsrc/session/ascend_session.cc @@ -18,6 +18,7 @@ #include "operator/ops.h" #include "ir/meta_tensor.h" #include "ir/anf.h" +#include "common/trans.h" #include "device/kernel_runtime.h" #include "device/ascend/kernel_select_ascend.h" #include "device/ascend/kernel_build_ascend.h" @@ -730,8 +731,8 @@ void AscendSession::SetChildGraphParameter(const tensor::TensorPtr &front_tensor size_t tensor_size = front_tensor->data().nbytes(); auto addr = AnfAlgo::GetOutputAddr(backend_parameter, 0); MS_EXCEPTION_IF_NULL(addr); - if (!addr->SyncHostToDevice(front_tensor->shape(), tensor_size, front_tensor->data_type(), - front_tensor->data_c(false))) { + if (!addr->SyncHostToDevice(trans::GetRuntimePaddingShape(backend_parameter, 0), tensor_size, + front_tensor->data_type(), front_tensor->data_c(false))) { MS_LOG(EXCEPTION) << "Tensor SyncHostToDevice fail!"; } MS_LOG(INFO) << "Finish!"; diff --git a/mindspore/ccsrc/session/kernel_graph.cc b/mindspore/ccsrc/session/kernel_graph.cc index dbf6e07e7e..bbcc04e14b 100755 --- a/mindspore/ccsrc/session/kernel_graph.cc +++ b/mindspore/ccsrc/session/kernel_graph.cc @@ -143,6 +143,12 @@ CNodePtr KernelGraph::NewCNode(const std::vector &inputs) { cnode->set_abstract(std::make_shared()); // create kernel_info from new parameter auto kernel_info = std::make_shared(); + // if the node only has the primitive(such as getNext) or the node's input has a feature map input + // then the node's output is a feature map output + if (inputs.size() == 1 || std::any_of(inputs.begin() + 1, inputs.end(), + [&](const AnfNodePtr &node) { return AnfAlgo::IsFeatureMapOutput(node); })) { + kernel_info->SetFeatureMapFlag(true); + } cnode->set_kernel_info(kernel_info); AnfAlgo::SetGraphId(graph_id_, cnode.get()); return cnode; @@ -162,22 +168,26 @@ CNodePtr KernelGraph::NewCNode(const CNodePtr &cnode) { ParameterPtr KernelGraph::NewParameter(const ParameterPtr ¶meter) { ParameterPtr new_parameter = add_parameter(); MS_EXCEPTION_IF_NULL(new_parameter); + // create kernel_info form new parameter + auto kernel_info = std::make_shared(); size_t output_tensor_num = 1; // if use default parameter = nullptr,it remarks create a new parameter from no parameter if (parameter == nullptr) { new_parameter->set_abstract(std::make_shared()); + kernel_info->SetFeatureMapFlag(true); } else { // if don't use default parameter = nullptr,it remarks create a new parameter from a old parameter new_parameter->set_abstract(parameter->abstract()); new_parameter->set_name(parameter->name()); - if (parameter->has_default()) { + if (AnfAlgo::IsParameterWeight(parameter)) { new_parameter->set_default_param(parameter->default_param()); + kernel_info->SetFeatureMapFlag(false); + } else { + kernel_info->SetFeatureMapFlag(true); } // if output is a tuple tensor,now can use for loop to handle tuple tensor output_tensor_num = AnfAlgo::GetOutputTensorNum(parameter); } - // create kernel_info form new parameter - auto kernel_info = std::make_shared(); new_parameter->set_kernel_info(kernel_info); // create kernel_build_info for new parameter auto kernel_build_info_builder = std::make_shared(); @@ -217,6 +227,7 @@ std::vector KernelGraph::SplitTupleValueNodeToNodeList(const ValueNo AddValueNodeToGraph(new_value_node); auto kernel_info = std::make_shared(); new_value_node->set_kernel_info(kernel_info); + kernel_info->SetFeatureMapFlag(false); // create kernel_build_info for new value node auto kernel_build_info_builder = std::make_shared(); // set the format of value_node to DEFAULT_FORMAT @@ -240,6 +251,7 @@ ValueNodePtr KernelGraph::NewValueNode(const ValueNodePtr &value_node) { new_value_node->set_abstract(value_node->abstract()); // create kernel_info fo new value node auto kernel_info = std::make_shared(); + kernel_info->SetFeatureMapFlag(false); new_value_node->set_kernel_info(kernel_info); // create kernel_build_info for new value node auto kernel_build_info_builder = std::make_shared(); diff --git a/mindspore/ccsrc/session/session_basic.cc b/mindspore/ccsrc/session/session_basic.cc index d2a255229d..bea51037bf 100755 --- a/mindspore/ccsrc/session/session_basic.cc +++ b/mindspore/ccsrc/session/session_basic.cc @@ -20,6 +20,7 @@ #include "pipeline/parse/data_converter.h" #include "ir/manager.h" #include "operator/ops.h" +#include "common/trans.h" #include "utils/context/ms_context.h" #include "utils/config_manager.h" #include "session/anf_runtime_algorithm.h" @@ -124,7 +125,8 @@ BaseRef CreateOneTensor(const AnfNodePtr &node, size_t output_index, const Kerne MS_EXCEPTION_IF_NULL(ms_context); if (ms_context->enable_pynative_infer()) { tensor->set_device_address(AnfAlgo::GetMutableOutputAddr(node, output_index)); - } else if (!address->SyncDeviceToHost(tensor->shape(), LongToSize(tensor->data().nbytes()), tensor->data_type(), + } else if (!address->SyncDeviceToHost(trans::GetRuntimePaddingShape(node, output_index), + LongToSize(tensor->data().nbytes()), tensor->data_type(), tensor->data_c(true))) { MS_LOG(INFO) << "output sync device to host error!!!"; tensor->set_dirty(false); @@ -369,7 +371,7 @@ ParameterPtr ConstructRunOpParameter(const std::shared_ptr &graph, kernel_build_info_builder->SetOutputsDeviceType(std::vector{input_tensor->device_address()->type_id()}); } AnfAlgo::SetSelectKernelBuildInfo(kernel_build_info_builder->Build(), param.get()); - // construct abstract of parameter + // ftruct abstract of parameter auto abstract = std::make_shared(input_tensor); param->set_abstract(abstract); return param; @@ -548,7 +550,8 @@ void SessionBasic::LoadInputData(const std::shared_ptr &kernel_grap if (need_sync) { tensor->set_device_address(device_address); MS_EXCEPTION_IF_NULL(device_address); - if (!device_address->SyncHostToDevice(tensor->shape(), LongToSize(tensor->data().nbytes()), tensor->data_type(), + if (!device_address->SyncHostToDevice(trans::GetRuntimePaddingShape(pk_node, 0), + LongToSize(tensor->data().nbytes()), tensor->data_type(), tensor->data_c(false))) { MS_LOG(EXCEPTION) << "SyncHostToDevice failed."; } @@ -620,8 +623,8 @@ void SessionBasic::Summary(KernelGraph *graph) { (void)std::copy(shape.begin(), shape.end(), std::back_inserter(temp_shape)); tensor::TensorPtr tensor = std::make_shared(type_id, temp_shape); MS_EXCEPTION_IF_NULL(address); - if (!address->SyncDeviceToHost(tensor->shape(), LongToSize(tensor->data().nbytes()), tensor->data_type(), - tensor->data_c(true))) { + if (!address->SyncDeviceToHost(trans::GetRuntimePaddingShape(node, index), LongToSize(tensor->data().nbytes()), + tensor->data_type(), tensor->data_c(true))) { MS_LOG(ERROR) << "Failed to sync output from device to host."; } tensor->set_dirty(false); diff --git a/mindspore/ccsrc/utils/utils.h b/mindspore/ccsrc/utils/utils.h index 39b4b7a160..79a4b216fb 100644 --- a/mindspore/ccsrc/utils/utils.h +++ b/mindspore/ccsrc/utils/utils.h @@ -197,8 +197,8 @@ const std::set kOptOperatorSet = { kApplyRMSPropOpName, }; -const std::set kSpecialFormatSet = {kOpFormat_FRAC_Z, kOpFormat_NC1KHKWHWC0, kOpFormat_NC1HWC0, - kOpFormat_FRAC_NZ, kOpFormat_C1HWNCoC0}; +const std::set kNeedTransFormatSet = {kOpFormat_FRAC_Z, kOpFormat_NC1KHKWHWC0, kOpFormat_NC1HWC0, + kOpFormat_FRAC_NZ, kOpFormat_C1HWNCoC0}; static inline void ChangeFileMode(const std::string& file_name, mode_t mode) { if (access(file_name.c_str(), F_OK) != 0) { diff --git a/tests/ut/cpp/pre_activate/ascend/ir_fusion/layer_norm_beta_gamma_backprop_fusion_test.cc b/tests/ut/cpp/pre_activate/ascend/ir_fusion/layer_norm_beta_gamma_backprop_fusion_test.cc index e7831ec353..44b9b3df69 100644 --- a/tests/ut/cpp/pre_activate/ascend/ir_fusion/layer_norm_beta_gamma_backprop_fusion_test.cc +++ b/tests/ut/cpp/pre_activate/ascend/ir_fusion/layer_norm_beta_gamma_backprop_fusion_test.cc @@ -80,6 +80,8 @@ TEST_F(TestHWLayerNormBetaGammaBackpropFusion, layernorm_beta_gamma_backprop_fus builder1.SetOutputsDeviceType({kNumberTypeFloat32}); cast0->set_kernel_info(std::make_shared()); cast1->set_kernel_info(std::make_shared()); + cast0->set_abstract(x_abstract); + cast1->set_abstract(x_abstract); AnfAlgo::SetSelectKernelBuildInfo(builder1.Build(), cast0.get()); AnfAlgo::SetSelectKernelBuildInfo(builder1.Build(), cast1.get()); diff --git a/tests/ut/cpp/session/anf_runtime_algorithm_test.cc b/tests/ut/cpp/session/anf_runtime_algorithm_test.cc index 2af2a7413b..6375d1a758 100644 --- a/tests/ut/cpp/session/anf_runtime_algorithm_test.cc +++ b/tests/ut/cpp/session/anf_runtime_algorithm_test.cc @@ -211,8 +211,8 @@ TEST_F(AnfRuntimeAlgorithmTest, EraseNodeAttr) { TEST_F(AnfRuntimeAlgorithmTest, GetInputTensorNum) { auto kernel_graph = std::make_shared(); // test cnode node - auto parameter_one = kernel_graph->add_parameter(); - auto parameter_two = kernel_graph->add_parameter(); + auto parameter_one = kernel_graph->NewParameter(); + auto parameter_two = kernel_graph->NewParameter(); std::vector add_inputs{NewValueNode(prim::kPrimTensorAdd), parameter_one, parameter_two}; auto add = kernel_graph->NewCNode(add_inputs); EXPECT_EQ(AnfAlgo::GetInputTensorNum(add), 2); @@ -247,9 +247,11 @@ TEST_F(AnfRuntimeAlgorithmTest, GetOutputTensorNum) { TEST_F(AnfRuntimeAlgorithmTest, GetOutputFormat) { auto kernel_graph = std::make_shared(); - std::vector inputs; - inputs.push_back(NewValueNode(prim::kPrimTensorAdd)); + std::vector inputs = {NewValueNode(prim::kPrimTensorAdd), kernel_graph->NewParameter(), + kernel_graph->NewParameter()}; auto add = kernel_graph->NewCNode(inputs); + std::vector shape = {1, 2, 3, 4}; + AnfAlgo::SetOutputInferTypeAndShape({kNumberTypeFloat32, kNumberTypeFloat32}, {shape, shape}, add.get()); MS_EXCEPTION_IF_NULL(add); add->set_kernel_info(std::make_shared()); auto d_kernel_info = add->kernel_info(); @@ -266,8 +268,8 @@ TEST_F(AnfRuntimeAlgorithmTest, GetOutputFormat) { TEST_F(AnfRuntimeAlgorithmTest, GetInputFormat) { auto kernel_graph = std::make_shared(); - std::vector inputs; - inputs.push_back(NewValueNode(prim::kPrimTensorAdd)); + std::vector inputs = {NewValueNode(prim::kPrimTensorAdd), kernel_graph->NewParameter(), + kernel_graph->NewParameter()}; auto add = kernel_graph->NewCNode(inputs); MS_EXCEPTION_IF_NULL(add); add->set_kernel_info(std::make_shared()); @@ -345,7 +347,7 @@ TEST_F(AnfRuntimeAlgorithmTest, GetPrevNodeOutputInferShape) { std::vector shp{2, 32, 224, 224}; auto x_abstract = std::make_shared(kFloat32, shp); // test parameter node as input - auto parameter_node = kernel_graph->add_parameter(); + auto parameter_node = kernel_graph->NewParameter(); MS_EXCEPTION_IF_NULL(parameter_node); parameter_node->set_abstract(x_abstract); EXPECT_THROW(AnfAlgo::GetPrevNodeOutputInferShape(parameter_node, 0), std::runtime_error); @@ -387,13 +389,13 @@ TEST_F(AnfRuntimeAlgorithmTest, GetInputDeviceShape) { auto kernel_graph = std::make_shared(); std::vector shp{2, 32, 224, 224}; auto x_abstract = std::make_shared(kFloat32, shp); - auto parameter_one = kernel_graph->add_parameter(); + auto parameter_one = kernel_graph->NewParameter(); MS_EXCEPTION_IF_NULL(parameter_one); parameter_one->set_abstract(x_abstract); - auto parameter_two = kernel_graph->add_parameter(); + auto parameter_two = kernel_graph->NewParameter(); MS_EXCEPTION_IF_NULL(parameter_two); parameter_two->set_abstract(x_abstract); - auto parameter_third = kernel_graph->add_parameter(); + auto parameter_third = kernel_graph->NewParameter(); MS_EXCEPTION_IF_NULL(parameter_third); parameter_third->set_abstract(x_abstract); // test cnode as input @@ -466,8 +468,8 @@ TEST_F(AnfRuntimeAlgorithmTest, GetOutputDeviceDataTypeTest) { TEST_F(AnfRuntimeAlgorithmTest, GetInputDeviceDataTypeTest) { auto kernel_graph = std::make_shared(); - std::vector inputs; - inputs.push_back(NewValueNode(prim::kPrimTensorAdd)); + std::vector inputs = {NewValueNode(prim::kPrimTensorAdd), kernel_graph->NewParameter(), + kernel_graph->NewParameter()}; auto add = kernel_graph->NewCNode(inputs); MS_EXCEPTION_IF_NULL(add); add->set_kernel_info(std::make_shared()); diff --git a/tests/ut/cpp/session/kernel_graph_test.cc b/tests/ut/cpp/session/kernel_graph_test.cc index 55e1b1b28e..a62af9c892 100644 --- a/tests/ut/cpp/session/kernel_graph_test.cc +++ b/tests/ut/cpp/session/kernel_graph_test.cc @@ -140,11 +140,11 @@ TEST_F(KernelGraphTest, SetExecOrderByDefault) { std::vector shape = {2, 32, 224, 224}; auto abstract = std::make_shared(kFloat32, shape); - auto x_parameter = kernel_graph->add_parameter(); + auto x_parameter = kernel_graph->NewParameter(); MS_EXCEPTION_IF_NULL(x_parameter); x_parameter->set_name("x_parameter"); x_parameter->set_abstract(abstract); - auto y_parameter = kernel_graph->add_parameter(); + auto y_parameter = kernel_graph->NewParameter(); MS_EXCEPTION_IF_NULL(y_parameter); y_parameter->set_name("y_parameter"); y_parameter->set_abstract(abstract); @@ -153,7 +153,7 @@ TEST_F(KernelGraphTest, SetExecOrderByDefault) { MS_EXCEPTION_IF_NULL(add); add->set_abstract(abstract); - auto z_parameter = kernel_graph->add_parameter(); + auto z_parameter = kernel_graph->NewParameter(); MS_EXCEPTION_IF_NULL(z_parameter); z_parameter->set_name("z_parameter"); z_parameter->set_abstract(abstract); From 2604acedcb17abad94404e408e4e3882d294c6bb Mon Sep 17 00:00:00 2001 From: "wangnan39@huawei.com" Date: Fri, 10 Apr 2020 10:41:59 +0800 Subject: [PATCH 210/367] extend conv stride and dilation to 2d --- .../cpu/kernel/mkldnn/conv2d_cpu_kernel.cc | 18 +- .../mkldnn/conv2d_grad_filter_cpu_kernel.cc | 15 +- .../mkldnn/conv2d_grad_input_cpu_kernel.cc | 15 +- .../ccsrc/kernel/gpu/nn/conv2d_gpu_kernel.h | 19 +- .../gpu/nn/conv2d_grad_filter_gpu_kernel.h | 15 +- .../gpu/nn/conv2d_grad_input_gpu_kernel.h | 15 +- mindspore/ccsrc/kernel/tbe/tbe_adapter.cc | 132 ------------ mindspore/ccsrc/onnx/onnx_exporter.cc | 5 +- mindspore/ccsrc/transform/op_declare.cc | 24 +-- mindspore/nn/layer/conv.py | 66 +++--- mindspore/ops/_op_impl/tbe/conv2d.py | 2 +- mindspore/ops/operations/_grad_ops.py | 12 +- mindspore/ops/operations/nn_ops.py | 198 +++++++++++------- .../ops/cpu/test_conv2d_backprop_filter_op.py | 4 +- .../ops/gpu/test_conv2d_backprop_filter_op.py | 4 +- .../gtest_input/pynative/ops_test.py | 14 +- tests/vm_impl/vm_me.py | 94 +++++++-- 17 files changed, 362 insertions(+), 290 deletions(-) diff --git a/mindspore/ccsrc/device/cpu/kernel/mkldnn/conv2d_cpu_kernel.cc b/mindspore/ccsrc/device/cpu/kernel/mkldnn/conv2d_cpu_kernel.cc index f7527c4750..5d63aee6cd 100644 --- a/mindspore/ccsrc/device/cpu/kernel/mkldnn/conv2d_cpu_kernel.cc +++ b/mindspore/ccsrc/device/cpu/kernel/mkldnn/conv2d_cpu_kernel.cc @@ -35,8 +35,22 @@ void Conv2dCPUKernel::InitKernel(const CNodePtr &kernel_node) { dnnl::memory::desc dst_desc = GetDefaultMemDesc(dst_shape); int kernel_size = SizeToInt(weight_shape[3]); - int stride = AnfAlgo::GetNodeAttr(kernel_node, STRIDE); - int dilation = AnfAlgo::GetNodeAttr(kernel_node, DILATION); + auto stride_ori = AnfAlgo::GetNodeAttr>(kernel_node, STRIDE); + auto dilation_ori = AnfAlgo::GetNodeAttr>(kernel_node, DILATION); + if (stride_ori.size() != 4 || stride_ori[2] != stride_ori[3]) { + MS_LOG(EXCEPTION) << "conv2d only support equal stride, and stride must be 4d!"; + } + if (stride_ori[0] != 1 || stride_ori[1] != 1) { + MS_LOG(EXCEPTION) << "conv2d stride only support 1 in N axis and C axis!"; + } + if (dilation_ori.size() != 4 || dilation_ori[2] != 1 || dilation_ori[3] != 1) { + MS_LOG(EXCEPTION) << "conv2d dilation only support 1, and dilation must be 4d!"; + } + if (dilation_ori[0] != 1 || dilation_ori[1] != 1) { + MS_LOG(EXCEPTION) << "conv2d dilation only support 1 in N axis and C axis!"; + } + int stride = stride_ori[2]; + int dilation = dilation_ori[2]; dnnl::memory::dims strides{stride, stride}; dnnl::memory::dims dilates{dilation - 1, dilation - 1}; diff --git a/mindspore/ccsrc/device/cpu/kernel/mkldnn/conv2d_grad_filter_cpu_kernel.cc b/mindspore/ccsrc/device/cpu/kernel/mkldnn/conv2d_grad_filter_cpu_kernel.cc index f4c0e58350..1a7c10a531 100644 --- a/mindspore/ccsrc/device/cpu/kernel/mkldnn/conv2d_grad_filter_cpu_kernel.cc +++ b/mindspore/ccsrc/device/cpu/kernel/mkldnn/conv2d_grad_filter_cpu_kernel.cc @@ -35,8 +35,19 @@ void Conv2dGradFilterCPUKernel::InitKernel(const CNodePtr &kernel_node) { dnnl::memory::desc dst_desc = GetDefaultMemDesc(dst_shape); int kernel_size = SizeToInt(weight_shape[3]); - int stride = AnfAlgo::GetNodeAttr(kernel_node, STRIDE); - int dilation = AnfAlgo::GetNodeAttr(kernel_node, DILATION); + auto stride_ori = AnfAlgo::GetNodeAttr>(kernel_node, STRIDE); + auto dilation_ori = AnfAlgo::GetNodeAttr>(kernel_node, DILATION); + if (stride_ori.size() != 2 || stride_ori[0] != stride_ori[1]) { + MS_LOG(EXCEPTION) << "Conv2dGradFilterCPUKernel only support equal stride, and stride must be 2d!"; + } + if (dilation_ori.size() != 4 || dilation_ori[2] != 1 || dilation_ori[3] != 1) { + MS_LOG(EXCEPTION) << "Conv2dGradFilterCPUKernel dilation only support 1, and dilation must be 4d!"; + } + if (dilation_ori[0] != 1 || dilation_ori[1] != 1) { + MS_LOG(EXCEPTION) << "Conv2dGradFilterCPUKernel dilation only support 1 in N axis and C axis!"; + } + int stride = stride_ori[0]; + int dilation = dilation_ori[2]; dnnl::memory::dims strides{stride, stride}; dnnl::memory::dims dilates{dilation - 1, dilation - 1}; diff --git a/mindspore/ccsrc/device/cpu/kernel/mkldnn/conv2d_grad_input_cpu_kernel.cc b/mindspore/ccsrc/device/cpu/kernel/mkldnn/conv2d_grad_input_cpu_kernel.cc index 492e2d6280..04dda20acd 100644 --- a/mindspore/ccsrc/device/cpu/kernel/mkldnn/conv2d_grad_input_cpu_kernel.cc +++ b/mindspore/ccsrc/device/cpu/kernel/mkldnn/conv2d_grad_input_cpu_kernel.cc @@ -35,8 +35,19 @@ void Conv2dGradInputCPUKernel::InitKernel(const CNodePtr &kernel_node) { dnnl::memory::desc dst_desc = GetDefaultMemDesc(dst_shape); int kernel_size = SizeToInt(weight_shape[3]); - int stride = AnfAlgo::GetNodeAttr(kernel_node, STRIDE); - int dilation = AnfAlgo::GetNodeAttr(kernel_node, DILATION); + auto stride_ori = AnfAlgo::GetNodeAttr>(kernel_node, STRIDE); + auto dilation_ori = AnfAlgo::GetNodeAttr>(kernel_node, DILATION); + if (stride_ori.size() != 2 || stride_ori[0] != stride_ori[1]) { + MS_LOG(EXCEPTION) << "Conv2dGradInputCPUKernel only support equal stride, and stride must be 2d!"; + } + if (dilation_ori.size() != 4 || dilation_ori[2] != 1 || dilation_ori[3] != 1) { + MS_LOG(EXCEPTION) << "Conv2dGradInputCPUKernel dilation only support 1, and dilation must be 4d!"; + } + if (dilation_ori[0] != 1 || dilation_ori[1] != 1) { + MS_LOG(EXCEPTION) << "Conv2dGradInputCPUKernel dilation only support 1 in N axis and C axis!"; + } + int stride = stride_ori[0]; + int dilation = dilation_ori[2]; dnnl::memory::dims strides{stride, stride}; dnnl::memory::dims dilates{dilation - 1, dilation - 1}; std::vector int_padding_l; diff --git a/mindspore/ccsrc/kernel/gpu/nn/conv2d_gpu_kernel.h b/mindspore/ccsrc/kernel/gpu/nn/conv2d_gpu_kernel.h index 7a4adff970..75b2a97cf8 100644 --- a/mindspore/ccsrc/kernel/gpu/nn/conv2d_gpu_kernel.h +++ b/mindspore/ccsrc/kernel/gpu/nn/conv2d_gpu_kernel.h @@ -113,9 +113,24 @@ class Conv2dGpuFwdKernel : public GpuKernel { CHECK_CUDNN_RET_WITH_EXCEPT(cudnnSetConvolutionGroupCount(conv_desc_, group_), "cudnnSetConvGroupCount failed"); pad_height_ = GetAttr(kernel_node, "pad"); pad_width_ = pad_height_; - stride_ = GetAttr(kernel_node, "stride"); - dilation_ = GetAttr(kernel_node, "dilation"); pad_mode_ = GetAttr(kernel_node, "pad_mode"); + auto stride_ori = AnfAlgo::GetNodeAttr>(kernel_node, "stride"); + auto dilation_ori = AnfAlgo::GetNodeAttr>(kernel_node, "dilation"); + if (stride_ori.size() != 4 || stride_ori[2] != stride_ori[3]) { + MS_LOG(EXCEPTION) << "conv2d only support equal stride, and stride must be 4d!"; + } + if (stride_ori[0] != 1 || stride_ori[1] != 1) { + MS_LOG(EXCEPTION) << "conv2d stride only support 1 in N axis and C axis!"; + } + if (dilation_ori.size() != 4 || dilation_ori[2] != dilation_ori[3]) { + MS_LOG(EXCEPTION) << "conv2d only support equal dilation, and dilation must be 4d!"; + } + if (dilation_ori[0] != 1 || dilation_ori[1] != 1) { + MS_LOG(EXCEPTION) << "conv2d dilation only support 1 in N axis and C axis!"; + } + stride_ = stride_ori[2]; + dilation_ = dilation_ori[2]; + cudnnTensorDescriptor_t input_descriptor_real = nullptr; if (pad_mode_ == kSamePadModeUpperCase || pad_mode_ == kSamePadModeLowerCase) { SetPad(in_shape, kernel_node); diff --git a/mindspore/ccsrc/kernel/gpu/nn/conv2d_grad_filter_gpu_kernel.h b/mindspore/ccsrc/kernel/gpu/nn/conv2d_grad_filter_gpu_kernel.h index f8afad4f84..e481fd448e 100644 --- a/mindspore/ccsrc/kernel/gpu/nn/conv2d_grad_filter_gpu_kernel.h +++ b/mindspore/ccsrc/kernel/gpu/nn/conv2d_grad_filter_gpu_kernel.h @@ -116,9 +116,20 @@ class ConvGradFilterGpuBkwKernel : public GpuKernel { pad_height_ = GetAttr(kernel_node, "pad"); pad_width_ = pad_height_; - stride_ = GetAttr(kernel_node, "stride"); - dilation_ = GetAttr(kernel_node, "dilation"); pad_mode_ = GetAttr(kernel_node, "pad_mode"); + auto stride_ori = AnfAlgo::GetNodeAttr>(kernel_node, "stride"); + auto dilation_ori = AnfAlgo::GetNodeAttr>(kernel_node, "dilation"); + if (stride_ori.size() != 2 || stride_ori[0] != stride_ori[1]) { + MS_LOG(EXCEPTION) << "ConvGradFilterGpuBkwKernel only support equal stride, and stride must be 2d!"; + } + if (dilation_ori.size() != 4 || dilation_ori[2] != dilation_ori[3]) { + MS_LOG(EXCEPTION) << "ConvGradFilterGpuBkwKernel only support equal dilation, and dilation must be 4d!"; + } + if (dilation_ori[0] != 1 || dilation_ori[1] != 1) { + MS_LOG(EXCEPTION) << "ConvGradFilterGpuBkwKernel dilation only support 1 in N axis and C axis!"; + } + stride_ = stride_ori[0]; + dilation_ = dilation_ori[2]; cudnnTensorDescriptor_t x_desc_real = nullptr; if (pad_mode_ == kSamePadModeUpperCase || pad_mode_ == kSamePadModeLowerCase) { SetPad(in_shape, kernel_node); diff --git a/mindspore/ccsrc/kernel/gpu/nn/conv2d_grad_input_gpu_kernel.h b/mindspore/ccsrc/kernel/gpu/nn/conv2d_grad_input_gpu_kernel.h index be7739981d..008abcc658 100644 --- a/mindspore/ccsrc/kernel/gpu/nn/conv2d_grad_input_gpu_kernel.h +++ b/mindspore/ccsrc/kernel/gpu/nn/conv2d_grad_input_gpu_kernel.h @@ -117,9 +117,20 @@ class ConvGradInputGpuBkwKernel : public GpuKernel { pad_height_ = GetAttr(kernel_node, "pad"); pad_width_ = pad_height_; - stride_ = GetAttr(kernel_node, "stride"); - dilation_ = GetAttr(kernel_node, "dilation"); pad_mode_ = GetAttr(kernel_node, "pad_mode"); + auto stride_ori = AnfAlgo::GetNodeAttr>(kernel_node, "stride"); + auto dilation_ori = AnfAlgo::GetNodeAttr>(kernel_node, "dilation"); + if (stride_ori.size() != 2 || stride_ori[0] != stride_ori[1]) { + MS_LOG(EXCEPTION) << "ConvGradInputGpuBkwKernel only support equal stride, and stride must be 2d!"; + } + if (dilation_ori.size() != 4 || dilation_ori[2] != dilation_ori[3]) { + MS_LOG(EXCEPTION) << "ConvGradInputGpuBkwKernel only support equal dilation, and dilation must be 4d!"; + } + if (dilation_ori[0] != 1 || dilation_ori[1] != 1) { + MS_LOG(EXCEPTION) << "ConvGradInputGpuBkwKernel dilation only support 1 in N axis and C axis!"; + } + stride_ = stride_ori[0]; + dilation_ = dilation_ori[2]; cudnnTensorDescriptor_t dx_desc_real = nullptr; if (pad_mode_ == kSamePadModeUpperCase || pad_mode_ == kSamePadModeLowerCase) { SetPad(input_shape, kernel_node); diff --git a/mindspore/ccsrc/kernel/tbe/tbe_adapter.cc b/mindspore/ccsrc/kernel/tbe/tbe_adapter.cc index 229a3eb34a..481ac75504 100644 --- a/mindspore/ccsrc/kernel/tbe/tbe_adapter.cc +++ b/mindspore/ccsrc/kernel/tbe/tbe_adapter.cc @@ -148,9 +148,6 @@ void TbeAdapter::InputOrderPass(const std::string &op_name, std::vector TbeAdapter::build_json_attr_pass_map_ = { - {"Conv2D", TbeAdapter::Conv2DAttrJsonPass}, - {"Conv2DBackpropFilter", TbeAdapter::Conv2DBackpropFilterAttrJsonPass}, - {"Conv2DBackpropInput", TbeAdapter::Conv2DBackpropInputAttrJsonPass}, {"MaximumGrad", TbeAdapter::MaximumGradAttrJsonPass}, {"MinimumGrad", TbeAdapter::MinimumGradAttrJsonPass}, {"Cast", TbeAdapter::CastAttrJsonPass}}; @@ -168,135 +165,6 @@ bool TbeAdapter::RunAttrPass(const mindspore::AnfNodePtr &anf_node, return false; } -void TbeAdapter::Conv2DAttrJsonPass(const mindspore::AnfNodePtr &anf_node, - const std::vector> &op_info_attrs, - nlohmann::json *attrs_json) { - MS_EXCEPTION_IF_NULL(anf_node); - MS_EXCEPTION_IF_NULL(attrs_json); - auto attr_num = op_info_attrs.size(); - auto primitive = AnfAlgo::GetCNodePrimitive(anf_node); - MS_EXCEPTION_IF_NULL(primitive); - for (size_t i = 0; i < attr_num; i++) { - nlohmann::json attr_obj; - MS_EXCEPTION_IF_NULL(op_info_attrs[i]); - std::string attr_name = op_info_attrs[i]->name(); - std::vector attr_value; - if (primitive->GetAttr(attr_name) != nullptr) { - auto value = primitive->GetAttr(attr_name); - int data = GetValue(value); - size_t list_int_size = 0; - if (attr_name == "stride") { - list_int_size = 4; - } else if (attr_name == "dilation") { - list_int_size = 4; - } else if (attr_name == "pad") { - value = primitive->GetAttr("pad_list"); - attr_value = GetValue>(value); - } - for (size_t j = 0; j < list_int_size; j++) { - attr_value.push_back(data); - } - attr_obj["value"] = attr_value; - } else { - attr_obj["value"] = 0; - } - attr_obj["name"] = attr_name; - attr_obj["valid"] = true; - (*attrs_json).push_back(attr_obj); - } - MS_LOG(INFO) << "Conv2DAttrPass done."; -} - -void TbeAdapter::Conv2DBackpropFilterAttrJsonPass( - const mindspore::AnfNodePtr &anf_node, const std::vector> &op_info_attrs, - nlohmann::json *attrs_json) { - MS_EXCEPTION_IF_NULL(anf_node); - MS_EXCEPTION_IF_NULL(attrs_json); - auto attr_num = op_info_attrs.size(); - auto primitive = AnfAlgo::GetCNodePrimitive(anf_node); - MS_EXCEPTION_IF_NULL(primitive); - for (size_t i = 0; i < attr_num; i++) { - nlohmann::json attr_obj; - MS_EXCEPTION_IF_NULL(op_info_attrs[i]); - std::string attr_name = op_info_attrs[i]->name(); - if (primitive->GetAttr(attr_name) != nullptr) { - auto value = primitive->GetAttr(attr_name); - if (attr_name == "pad_mode") { - std::string attr_value = GetValue(value); - (void)transform(attr_value.begin(), attr_value.end(), attr_value.begin(), ::toupper); - attr_obj["value"] = attr_value; - } else if (attr_name == "filter_sizes") { - std::vector attr_value = GetValue>(value); - attr_obj["value"] = attr_value; - } else { - std::vector attr_value; - int data = GetValue(value); - size_t list_int_size = 0; - if (attr_name == "stride") { - list_int_size = 2; - } else if (attr_name == "dilation") { - list_int_size = 4; - } - for (size_t j = 0; j < list_int_size; j++) { - attr_value.push_back(data); - } - attr_obj["value"] = attr_value; - } - attr_obj["valid"] = true; - } else { - attr_obj["valid"] = false; - } - attr_obj["name"] = attr_name; - attrs_json->push_back(attr_obj); - } - MS_LOG(INFO) << "Conv2DBackpropFilterAttrJsonPass done."; -} - -void TbeAdapter::Conv2DBackpropInputAttrJsonPass( - const mindspore::AnfNodePtr &anf_node, const std::vector> &op_info_attrs, - nlohmann::json *attrs_json) { - MS_EXCEPTION_IF_NULL(anf_node); - MS_EXCEPTION_IF_NULL(attrs_json); - auto attr_num = op_info_attrs.size(); - auto primitive = AnfAlgo::GetCNodePrimitive(anf_node); - MS_EXCEPTION_IF_NULL(primitive); - for (size_t i = 0; i < attr_num; i++) { - nlohmann::json attr_obj; - MS_EXCEPTION_IF_NULL(op_info_attrs[i]); - std::string attr_name = op_info_attrs[i]->name(); - if (primitive->GetAttr(attr_name) != nullptr) { - auto value = primitive->GetAttr(attr_name); - if (attr_name == "pad_mode") { - std::string attr_value = GetValue(value); - (void)transform(attr_value.begin(), attr_value.end(), attr_value.begin(), ::toupper); - attr_obj["value"] = attr_value; - } else if (attr_name == "input_sizes") { - std::vector attr_value = GetValue>(value); - attr_obj["value"] = attr_value; - } else { - std::vector attr_value; - int data = GetValue(value); - size_t list_int_size = 0; - if (attr_name == "stride") { - list_int_size = 2; - } else if (attr_name == "dilation") { - list_int_size = 4; - } - for (size_t j = 0; j < list_int_size; j++) { - attr_value.push_back(data); - } - attr_obj["value"] = attr_value; - } - attr_obj["valid"] = true; - } else { - attr_obj["valid"] = false; - } - attr_obj["name"] = attr_name; - attrs_json->push_back(attr_obj); - } - MS_LOG(INFO) << "Conv2DBackpropInputAttrJsonPass done."; -} - void TbeAdapter::MaximumGradAttrJsonPass(const mindspore::AnfNodePtr &anf_node, const std::vector> &op_info_attrs, nlohmann::json *attrs_json) { diff --git a/mindspore/ccsrc/onnx/onnx_exporter.cc b/mindspore/ccsrc/onnx/onnx_exporter.cc index 3bd4a38881..4767d13cd9 100644 --- a/mindspore/ccsrc/onnx/onnx_exporter.cc +++ b/mindspore/ccsrc/onnx/onnx_exporter.cc @@ -179,7 +179,7 @@ OPERATOR_ONNX_CONVERT_DEFINE(Squeeze, Squeeze, OPERATOR_ONNX_CONVERT_DEFINE( Conv2D, Conv, OpNameInfo() - .Attr("dilation", "dilations", onnx::AttributeProto_AttributeType_INTS, SetAttrValueToProto) + .Attr("dilation", "dilations", onnx::AttributeProto_AttributeType_INTS, SetAttrTupleValueToProto<2>) .Attr("group", "group", onnx::AttributeProto_AttributeType_INT, SetAttrValueToProto) .Attr("kernel_size", "kernel_shape", onnx::AttributeProto_AttributeType_INTS, SetAttrTupleValueToProto) .Attr("pad_mode", "auto_pad", onnx::AttributeProto_AttributeType_STRING, @@ -197,8 +197,7 @@ OPERATOR_ONNX_CONVERT_DEFINE( prim); } }) - .Attr("stride", "strides", onnx::AttributeProto_AttributeType_INTS, SetAttrValueToProto)) - + .Attr("stride", "strides", onnx::AttributeProto_AttributeType_INTS, SetAttrTupleValueToProto<2>)) OPERATOR_ONNX_CONVERT_DEFINE(BiasAdd, Add, OpNameInfo()) OPERATOR_ONNX_CONVERT_DEFINE(MatMul, Gemm, OpNameInfo() diff --git a/mindspore/ccsrc/transform/op_declare.cc b/mindspore/ccsrc/transform/op_declare.cc index 419805c37f..7634879959 100644 --- a/mindspore/ccsrc/transform/op_declare.cc +++ b/mindspore/ccsrc/transform/op_declare.cc @@ -754,9 +754,9 @@ OUTPUT_MAP(MaxPoolGradWithArgmax) = {{0, OUTPUT_DESC(y)}}; // Conv2D INPUT_MAP(Conv2D) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(filter)}}; ATTR_MAP(Conv2D) = { - {"stride", ATTR_DESC(strides, "pad", AnyTraits>())}, + {"stride", ATTR_DESC(strides, AnyTraits>(), AnyTraits>())}, {"pad_list", ATTR_DESC(pads, AnyTraits>(), AnyTraits>())}, - {"dilation", ATTR_DESC(dilations, "pad", AnyTraits>())}, + {"dilation", ATTR_DESC(dilations, AnyTraits>(), AnyTraits>())}, }; OUTPUT_MAP(Conv2D) = {{0, OUTPUT_DESC(y)}}; @@ -766,8 +766,8 @@ INPUT_ATTR_MAP(Conv2DBackpropInputD) = { {3, ATTR_DESC(input_sizes, AnyTraits>(), AnyTraits>())}}; ATTR_MAP(Conv2DBackpropInputD) = { {"pad_list", ATTR_DESC(pads, AnyTraits>(), AnyTraits>())}, - {"stride", ATTR_DESC(strides, "strides", AnyTraits>())}, - {"dilation", ATTR_DESC(dilations, "pad", AnyTraits>())}, + {"stride", ATTR_DESC(strides, AnyTraits>(), AnyTraits>())}, + {"dilation", ATTR_DESC(dilations, AnyTraits>(), AnyTraits>())}, }; OUTPUT_MAP(Conv2DBackpropInputD) = {{0, OUTPUT_DESC(y)}}; @@ -777,17 +777,17 @@ INPUT_ATTR_MAP(Conv2DBackpropFilterD) = { {3, ATTR_DESC(filter_sizes, AnyTraits>(), AnyTraits>())}}; ATTR_MAP(Conv2DBackpropFilterD) = { {"pad_list", ATTR_DESC(pads, AnyTraits>(), AnyTraits>())}, - {"stride", ATTR_DESC(strides, "strides", AnyTraits>())}, - {"dilation", ATTR_DESC(dilations, "pad", AnyTraits>())}, + {"stride", ATTR_DESC(strides, AnyTraits>(), AnyTraits>())}, + {"dilation", ATTR_DESC(dilations, AnyTraits>(), AnyTraits>())}, }; OUTPUT_MAP(Conv2DBackpropFilterD) = {{0, OUTPUT_DESC(y)}}; // DepthwiseConv2D INPUT_MAP(DepthwiseConv2D) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(filter)}}; ATTR_MAP(DepthwiseConv2D) = { - {"stride", ATTR_DESC(strides, "pad", AnyTraits>())}, + {"stride", ATTR_DESC(strides, AnyTraits>(), AnyTraits>())}, {"pads", ATTR_DESC(pads, AnyTraits>(), AnyTraits>())}, - {"dilation", ATTR_DESC(dilations, "pad", AnyTraits>())}, + {"dilation", ATTR_DESC(dilations, AnyTraits>(), AnyTraits>())}, {"data_format", ATTR_DESC(data_format, AnyTraits())}, }; OUTPUT_MAP(DepthwiseConv2D) = {{0, OUTPUT_DESC(y)}}; @@ -797,9 +797,9 @@ INPUT_MAP(DepthwiseConv2DBackpropInputD) = {{2, INPUT_DESC(filter)}, {3, INPUT_D INPUT_ATTR_MAP(DepthwiseConv2DBackpropInputD) = { {1, ATTR_DESC(input_size, AnyTraits>(), AnyTraits>())}}; ATTR_MAP(DepthwiseConv2DBackpropInputD) = { - {"stride", ATTR_DESC(strides, "pad", AnyTraits>())}, + {"stride", ATTR_DESC(strides, AnyTraits>(), AnyTraits>())}, {"pads", ATTR_DESC(pads, AnyTraits>(), AnyTraits>())}, - {"dilation", ATTR_DESC(dilations, "pad", AnyTraits>())}, + {"dilation", ATTR_DESC(dilations, AnyTraits>(), AnyTraits>())}, }; OUTPUT_MAP(DepthwiseConv2DBackpropInputD) = {{0, OUTPUT_DESC(input_grad)}}; @@ -808,9 +808,9 @@ INPUT_MAP(DepthwiseConv2DBackpropFilterD) = {{1, INPUT_DESC(input)}, {3, INPUT_D INPUT_ATTR_MAP(DepthwiseConv2DBackpropFilterD) = { {2, ATTR_DESC(filter_size, AnyTraits>(), AnyTraits>())}}; ATTR_MAP(DepthwiseConv2DBackpropFilterD) = { - {"stride", ATTR_DESC(strides, "pad", AnyTraits>())}, + {"stride", ATTR_DESC(strides, AnyTraits>(), AnyTraits>())}, {"pads", ATTR_DESC(pads, AnyTraits>(), AnyTraits>())}, - {"dilation", ATTR_DESC(dilations, "pad", AnyTraits>())}, + {"dilation", ATTR_DESC(dilations, AnyTraits>(), AnyTraits>())}, }; OUTPUT_MAP(DepthwiseConv2DBackpropFilterD) = {{0, OUTPUT_DESC(filter_grad)}}; diff --git a/mindspore/nn/layer/conv.py b/mindspore/nn/layer/conv.py index eb73a9ce5a..fbf6ad2a0c 100644 --- a/mindspore/nn/layer/conv.py +++ b/mindspore/nn/layer/conv.py @@ -17,7 +17,7 @@ from mindspore import log as logger from mindspore.ops import operations as P from mindspore.common.parameter import Parameter from mindspore.common.initializer import initializer -from mindspore._checkparam import check_bool, twice, check_int_positive, check_int_non_negative, check_int +from mindspore._checkparam import check_bool, twice, check_int_positive, check_int_non_negative from mindspore._extends import cell_attr_register from ..cell import Cell @@ -42,17 +42,23 @@ class _Conv(Cell): self.in_channels = check_int_positive(in_channels) self.out_channels = check_int_positive(out_channels) self.kernel_size = kernel_size - self.stride = check_int_positive(stride) + self.stride = stride self.pad_mode = pad_mode self.padding = check_int_non_negative(padding) - self.dilation = check_int(dilation) + self.dilation = dilation self.group = check_int_positive(group) self.has_bias = has_bias - if (not isinstance(kernel_size, tuple)) or len(kernel_size) != 2 or \ - (not isinstance(kernel_size[0], int)) or (not isinstance(kernel_size[1], int)) or \ - kernel_size[0] < 1 or kernel_size[1] < 1: + if (not isinstance(kernel_size[0], int)) or (not isinstance(kernel_size[1], int)) or \ + kernel_size[0] < 1 or kernel_size[1] < 1: raise ValueError("Attr 'kernel_size' of 'Conv2D' Op passed " + str(self.kernel_size) + ", should be a int or tuple and equal to or greater than 1.") + if (not isinstance(stride[0], int)) or (not isinstance(stride[1], int)) or stride[0] < 1 or stride[1] < 1: + raise ValueError("Attr 'stride' of 'Conv2D' Op passed " + + str(self.stride) + ", should be a int or tuple and equal to or greater than 1.") + if (not isinstance(dilation[0], int)) or (not isinstance(dilation[1], int)) or \ + dilation[0] < 1 or dilation[1] < 1: + raise ValueError("Attr 'dilation' of 'Conv2D' Op passed " + + str(self.dilation) + ", should equal to or greater than 1.") if in_channels % group != 0: raise ValueError("Attr 'in_channels' of 'Conv2D' Op must be divisible by " "attr 'group' of 'Conv2D' Op.") @@ -107,12 +113,13 @@ class Conv2d(_Conv): Args: in_channels (int): The number of input channel :math:`C_{in}`. out_channels (int): The number of output channel :math:`C_{out}`. - kernel_size (Union[int, tuple]): The data type is int or tuple with 2 integers. Specifies the height + kernel_size (Union[int, tuple[int]]): The data type is int or tuple with 2 integers. Specifies the height and width of the 2D convolution window. Single int means the value if for both height and width of the kernel. A tuple of 2 ints means the first value is for the height and the other is for the width of the kernel. - stride (int): Specifies stride for all spatial dimensions with the same value. Value of stride should be - greater or equal to 1 but bounded by the height and width of the input. Default: 1. + stride (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents + the height and width of movement are both strides, or a tuple of two int numbers that + represent height and width of movement respectively. Default: 1. pad_mode (str): Specifies padding mode. The optional values are "same", "valid", "pad". Default: "same". @@ -130,9 +137,11 @@ class Conv2d(_Conv): Tensor borders. `padding` should be greater than or equal to 0. padding (int): Implicit paddings on both sides of the input. Default: 0. - dilation (int): Specifying the dilation rate to use for dilated convolution. If set to be :math:`k > 1`, - there will be :math:`k - 1` pixels skipped for each sampling location. Its value should be greater - or equal to 1 and bounded by the height and width of the input. Default: 1. + dilation (Union[int, tuple[int]]): The data type is int or tuple with 2 integers. Specifies the dilation rate + to use for dilated convolution. If set to be :math:`k > 1`, there will + be :math:`k - 1` pixels skipped for each sampling location. Its value should + be greater or equal to 1 and bounded by the height and width of the + input. Default: 1. group (int): Split filter into groups, `in_ channels` and `out_channels` should be divisible by the number of groups. Default: 1. has_bias (bool): Specifies whether the layer uses a bias vector. Default: False. @@ -172,6 +181,8 @@ class Conv2d(_Conv): weight_init='normal', bias_init='zeros'): kernel_size = twice(kernel_size) + stride = twice(stride) + dilation = twice(dilation) super(Conv2d, self).__init__( in_channels, out_channels, @@ -241,7 +252,9 @@ class Conv2dTranspose(_Conv): and width of the 2D convolution window. Single int means the value is for both height and width of the kernel. A tuple of 2 ints means the first value is for the height and the other is for the width of the kernel. - stride (int): Specifies the same value for all spatial dimensions. Default: 1. + stride (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents + the height and width of movement are both strides, or a tuple of two int numbers that + represent height and width of movement respectively. Default: 1. pad_mode (str): Select the mode of the pad. The optional values are "pad", "same", "valid". Default: "same". @@ -251,8 +264,11 @@ class Conv2dTranspose(_Conv): - valid: Adopted the way of discarding. padding (int): Implicit paddings on both sides of the input. Default: 0. - dilation (int): Specifies the dilation rate to use for dilated - convolution. Default: 1. + dilation (Union[int, tuple[int]]): The data type is int or tuple with 2 integers. Specifies the dilation rate + to use for dilated convolution. If set to be :math:`k > 1`, there will + be :math:`k - 1` pixels skipped for each sampling location. Its value should + be greater or equal to 1 and bounded by the height and width of the + input. Default: 1. group (int): Split filter into groups, `in_channels` and `out_channels` should be divisible by the number of groups. Default: 1. has_bias (bool): Specifies whether the layer uses a bias vector. Default: False. @@ -290,6 +306,8 @@ class Conv2dTranspose(_Conv): weight_init='normal', bias_init='zeros'): kernel_size = twice(kernel_size) + stride = twice(stride) + dilation = twice(dilation) # out_channels and in_channels swap. # cause Conv2DBackpropInput's out_channel refers to Conv2D's out_channel, # then Conv2dTranspose's out_channel refers to Conv2DBackpropInput's in_channel. @@ -333,26 +351,26 @@ class Conv2dTranspose(_Conv): self.conv2d_transpose.set_strategy(strategy) return self - def _deconv_output_length(self, input_length, filter_size): + def _deconv_output_length(self, input_length, filter_size, stride_size, dilation_size): """Calculate the width and height of output.""" length = 0 if self.is_valid: - if filter_size - self.stride > 0: - length = input_length * self.stride + filter_size - self.stride + if filter_size - stride_size > 0: + length = input_length * stride_size + filter_size - stride_size else: - length = input_length * self.stride + length = input_length * stride_size elif self.is_same: - length = input_length * self.stride + length = input_length * stride_size elif self.is_pad: - length = input_length * self.stride - 2 * self.padding + filter_size + \ - (filter_size - 1) * (self.dilation - 1) - self.stride + length = input_length * stride_size - 2 * self.padding + filter_size + \ + (filter_size - 1) * (dilation_size - 1) - stride_size return length def construct(self, x): n, _, h, w = self.shape(x) - h_out = self._deconv_output_length(h, self.kernel_size[0]) - w_out = self._deconv_output_length(w, self.kernel_size[1]) + h_out = self._deconv_output_length(h, self.kernel_size[0], self.stride[0], self.dilation[0]) + w_out = self._deconv_output_length(w, self.kernel_size[1], self.stride[1], self.dilation[1]) if self.has_bias: return self.bias_add(self.conv2d_transpose(x, self.weight, (n, self.out_channels, h_out, w_out)), self.bias) diff --git a/mindspore/ops/_op_impl/tbe/conv2d.py b/mindspore/ops/_op_impl/tbe/conv2d.py index 52a9eac1fa..da0f34e359 100644 --- a/mindspore/ops/_op_impl/tbe/conv2d.py +++ b/mindspore/ops/_op_impl/tbe/conv2d.py @@ -34,7 +34,7 @@ from mindspore.ops.op_info_register import op_info_register "value": "all" }, { - "name": "pad", + "name": "pad_list", "param_type": "required", "type": "listInt", "value": "all" diff --git a/mindspore/ops/operations/_grad_ops.py b/mindspore/ops/operations/_grad_ops.py index f38044ab6a..e03b163225 100644 --- a/mindspore/ops/operations/_grad_ops.py +++ b/mindspore/ops/operations/_grad_ops.py @@ -119,8 +119,8 @@ class Conv2DBackpropFilter(PrimitiveWithInfer): pad (int): The pad value to fill. Default: 0. mode (int): 0 Math convolutiuon, 1 cross-correlation convolution , 2 deconvolution, 3 depthwise convolution. Default: 1. - stride (int): The stride to apply conv filter. Default: 1. - dilation (int): Specifies the dilation rate to use for dilated convolution. Default: 1. + stride (tuple): The stride to apply conv filter. Default: (1, 1). + dilation (tuple): Specifies the dilation rate to use for dilated convolution. Default: (1, 1, 1, 1). group (int): Splits input into groups. Default: 1. Returns: @@ -135,8 +135,8 @@ class Conv2DBackpropFilter(PrimitiveWithInfer): pad=0, pad_list=(0, 0, 0, 0), mode=1, - stride=1, - dilation=1, + stride=(1, 1), + dilation=(1, 1, 1, 1), group=1): """init Convolution""" self.init_prim_io_names(inputs=['out_backprop', 'input', 'filter_sizes'], outputs=['output']) @@ -146,7 +146,9 @@ class Conv2DBackpropFilter(PrimitiveWithInfer): pad_mode = pad_mode.upper() self.add_prim_attr('pad_mode', pad_mode) self.pad = pad - self.stride = stride + if isinstance(stride, tuple) and len(stride) == 4: + self.stride = (stride[2], stride[3]) + self.add_prim_attr('stride', self.stride) self.dilation = dilation self.group = group self.add_prim_attr('data_format', "NCHW") diff --git a/mindspore/ops/operations/nn_ops.py b/mindspore/ops/operations/nn_ops.py index 83f76455e0..a7d7357e46 100644 --- a/mindspore/ops/operations/nn_ops.py +++ b/mindspore/ops/operations/nn_ops.py @@ -460,8 +460,8 @@ class Conv2D(PrimitiveWithInfer): 2 deconvolution, 3 depthwise convolution. Default: 1. pad_mode (str): "valid", "same", "pad" the mode to fill padding. Default: "valid". pad (int): The pad value to fill. Default: 0. - stride (int): The stride to apply conv filter. Default: 1. - dilation (int): Specify the space to use between kernel elements. Default: 1. + stride (Union(int, tuple[int])): The stride to apply conv filter. Default: 1. + dilation (Union(int, tuple[int])): Specify the space to use between kernel elements. Default: 1. group (int): Split input into groups. Default: 1. Returns: @@ -488,11 +488,35 @@ class Conv2D(PrimitiveWithInfer): group=1): """init Conv2D""" self.init_prim_io_names(inputs=['x', 'w'], outputs=['output']) - self.kernel_size = kernel_size self.kernel_size = validator.check_type('kernel_size', kernel_size, (int, tuple)) - if isinstance(self.kernel_size, int): - self.kernel_size = (self.kernel_size, self.kernel_size) - validator.check_integer('length of kernel_size', len(self.kernel_size), 2, Rel.GE) + if isinstance(kernel_size, int): + self.kernel_size = (kernel_size, kernel_size) + if len(self.kernel_size) != 2 or (not isinstance(self.kernel_size[0], int)) or \ + (not isinstance(self.kernel_size[1], int)) or \ + self.kernel_size[0] < 1 or self.kernel_size[1] < 1: + raise ValueError(f"The \'kernel_size\' of \'Conv2D\' should be an positive int number or " + f"a tuple of two positive int numbers, but got {kernel_size}") + self.stride = validator.check_type('stride', stride, (int, tuple)) + if isinstance(stride, int): + self.stride = (stride, stride) + if len(self.stride) != 2 or (not isinstance(self.stride[0], int)) or \ + (not isinstance(self.stride[1], int)) or \ + self.stride[0] < 1 or self.stride[1] < 1: + raise ValueError(f"The \'stride\' of \'Conv2D\' should be an positive int number or " + f"a tuple of two positive int numbers, but got {stride}") + self.add_prim_attr('stride', (1, 1, self.stride[0], self.stride[1])) + self.dilation = validator.check_type('dilation', dilation, (tuple, int)) + if isinstance(dilation, int): + self.dilation = (1, 1, dilation, dilation) + elif len(dilation) == 2: + self.dilation = (1, 1, dilation[0], dilation[1]) + if len(self.dilation) != 4 or (not isinstance(self.dilation[0], int) or self.dilation[0] < 1) or \ + (not isinstance(self.dilation[1], int) or self.dilation[1] < 1) or \ + (not isinstance(self.dilation[2], int) or self.dilation[2] < 1) or \ + (not isinstance(self.dilation[3], int) or self.dilation[3] < 1): + raise ValueError(f"The \'dilation\' of \'Conv2D\' should be an positive int number or " + f"a tuple of two or four positive int numbers, but got {dilation}") + self.add_prim_attr('dilation', self.dilation) validator.equal('type of pad', type(pad), 'not bool', not isinstance(pad, bool)) validator.equal('type of pad', type(pad), 'int', isinstance(pad, int)) self.pad_mode = validator.check_string('pad_mode', pad_mode, ['valid', 'same', 'pad']) @@ -504,18 +528,6 @@ class Conv2D(PrimitiveWithInfer): self.add_prim_attr('data_format', "NCHW") self.out_channel = validator.check_integer('out_channel', out_channel, 0, Rel.GT) self.group = validator.check_integer('group', group, 0, Rel.GT) - self.dilation = validator.check_integer('dilation', dilation, 1, Rel.GE) - validator.check_type('kernel_size', kernel_size, [int, tuple]) - if isinstance(kernel_size, int) and kernel_size < 1: - raise ValueError('Attr \'kernel_size\' of \'Conv2D\' Op passed ' - + str(self.kernel_size) + ', should be a int or tuple and equal to or greater than 1.') - if isinstance(kernel_size, tuple) and (len(kernel_size) != 2 or - (not isinstance(kernel_size[0], int)) or - (not isinstance(kernel_size[1], int)) or - kernel_size[0] < 1 or kernel_size[1] < 1): - raise ValueError('Attr \'kernel_size\' of \'Conv2D\' Op passed ' - + str(self.kernel_size) + ', should be a int or tuple and equal to or greater than 1.') - self.stride = validator.check_integer('stride', stride, 1, Rel.GE) def infer_shape(self, x_shape, w_shape): validator.check_integer("weight_shape", len(w_shape), 4, Rel.EQ) @@ -526,29 +538,33 @@ class Conv2D(PrimitiveWithInfer): kernel_size_h = w_shape[2] kernel_size_w = w_shape[3] + stride_h = self.stride[2] + stride_w = self.stride[3] + dilation_h = self.dilation[2] + dilation_w = self.dilation[3] if self.pad_mode == "valid": - h_out = math.ceil((x_shape[2] - self.dilation * (kernel_size_h - 1)) / self.stride) - w_out = math.ceil((x_shape[3] - self.dilation * (kernel_size_w - 1)) / self.stride) + h_out = math.ceil((x_shape[2] - dilation_h * (kernel_size_h - 1)) / stride_h) + w_out = math.ceil((x_shape[3] - dilation_w * (kernel_size_w - 1)) / stride_w) pad_top, pad_bottom, pad_left, pad_right = 0, 0, 0, 0 elif self.pad_mode == "same": - h_out = math.ceil(x_shape[2] / self.stride) - w_out = math.ceil(x_shape[3] / self.stride) + h_out = math.ceil(x_shape[2] / stride_h) + w_out = math.ceil(x_shape[3] / stride_w) - pad_needed_h = max(0, (h_out - 1) * self.stride + self.dilation * (kernel_size_h - 1) + 1 - x_shape[2]) + pad_needed_h = max(0, (h_out - 1) * stride_h + dilation_h * (kernel_size_h - 1) + 1 - x_shape[2]) pad_top = math.floor(pad_needed_h / 2) pad_bottom = pad_needed_h - pad_top - pad_needed_w = max(0, (w_out - 1) * self.stride + self.dilation * (kernel_size_w - 1) + 1 - x_shape[3]) + pad_needed_w = max(0, (w_out - 1) * stride_w + dilation_w * (kernel_size_w - 1) + 1 - x_shape[3]) pad_left = math.floor(pad_needed_w / 2) pad_right = pad_needed_w - pad_left elif self.pad_mode == 'pad': pad_top, pad_bottom, pad_left, pad_right = self.pad, self.pad, self.pad, self.pad - h_out = 1 + (x_shape[2] + 2 * self.pad - kernel_size_h - (kernel_size_h - 1) * (self.dilation - 1)) \ - / self.stride - w_out = 1 + (x_shape[3] + 2 * self.pad - kernel_size_w - (kernel_size_w - 1) * (self.dilation - 1)) \ - / self.stride + h_out = 1 + (x_shape[2] + 2 * self.pad - kernel_size_h - (kernel_size_h - 1) * (dilation_h - 1)) \ + / stride_h + w_out = 1 + (x_shape[3] + 2 * self.pad - kernel_size_w - (kernel_size_w - 1) * (dilation_w - 1)) \ + / stride_w h_out = math.floor(h_out) w_out = math.floor(w_out) @@ -580,19 +596,19 @@ class DepthwiseConv2dNative(PrimitiveWithInfer): Args: channel_multiplier (int): The multipiler for the original output conv. - kernel_size (int or tuple): The size of the conv kernel. + kernel_size (Union[int, tuple[int]]): The size of the conv kernel. mode (int): 0 Math convolution, 1 cross-correlation convolution , 2 deconvolution, 3 depthwise convolution. Default: 3. pad_mode (str): "valid", "same", "pad" the mode to fill padding. Default: "valid". pad (int): The pad value to fill. Default: 0. - stride (int): The stride to apply conv filter. Default: 1. - dilation (int): Specifies the dilation rate to use for dilated convolution. Default: 1. + stride (Union[int, tuple[int]]): The stride to apply conv filter. Default: 1. + dilation (Union[int, tuple[int]]): Specifies the dilation rate to use for dilated convolution. Default: 1. group (int): Splits input into groups. Default: 1. Inputs: - **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`. - **weight** (Tensor) - Set size of kernel is :math:`(K_1, K_2)`, then the shape is - :math:`(C_{out}, C_{in}, K_1, K_2)`. + :math:`(channel_multiplier, C_{in}, K_1, K_2)`. Outputs: Tensor of shape :math:`(N, C_{in} * \text{channel_multiplier}, H_{out}, W_{out})`. @@ -610,15 +626,33 @@ class DepthwiseConv2dNative(PrimitiveWithInfer): group=1): """init DepthwiseConv2dNative""" validator.check_pad_value_by_mode(self.__class__.__name__, pad_mode, pad) - validator.check_type("kernel_size", kernel_size, (int, tuple)) + self.kernel_size = validator.check_type('kernel_size', kernel_size, (int, tuple)) if isinstance(kernel_size, int): - kernel_size = (kernel_size, kernel_size) - if isinstance(kernel_size, tuple) and (len(kernel_size) != 2 or - (not isinstance(kernel_size[0], int)) or - (not isinstance(kernel_size[1], int)) or - kernel_size[0] < 1 or kernel_size[1] < 1): - raise ValueError(f"Attr kernel_size of DepthwiseConv2dNative Op not passed " - f"{kernel_size}, should be a int or tuple and equal to or greater than 1.") + self.kernel_size = (kernel_size, kernel_size) + if len(self.kernel_size) != 2 or (not isinstance(self.kernel_size[0], int)) or \ + (not isinstance(self.kernel_size[1], int)) or \ + self.kernel_size[0] < 1 or self.kernel_size[1] < 1: + raise ValueError(f"The \'kernel_size\' of \'DepthwiseConv2dNative\' should be an positive int number or " + f"a tuple of two positive int numbers, but got {kernel_size}") + self.stride = validator.check_type('stride', stride, (int, tuple)) + if isinstance(stride, int): + self.stride = (stride, stride) + if len(self.stride) != 2 or (not isinstance(self.stride[0], int)) or \ + (not isinstance(self.stride[1], int)) or \ + self.stride[0] < 1 or self.stride[1] < 1: + raise ValueError(f"The \'stride\' of \'DepthwiseConv2dNative\' should be an positive int number or " + f"a tuple of two positive int numbers, but got {stride}") + self.add_prim_attr('stride', (1, 1, self.stride[0], self.stride[1])) + self.dilation = validator.check_type('dilation', dilation, (tuple, int)) + if isinstance(dilation, int): + self.dilation = (dilation, dilation) + if len(self.dilation) != 2 or (not isinstance(self.dilation[0], int)) or \ + (not isinstance(self.dilation[1], int)) or \ + self.dilation[0] < 1 or self.dilation[1] < 1: + raise ValueError(f"The \'dilation\' of \'DepthwiseConv2dNative\' should be an positive int number or " + f"a tuple of two or four positive int numbers, but got {dilation}") + self.add_prim_attr('dilation', (1, 1, self.dilation[0], self.dilation[1])) + validator.equal('type of pad', type(pad), 'not bool', not isinstance(pad, bool)) if pad_mode not in ("same", "valid", "pad"): raise ValueError(f"Attr pad_mode of DepthwiseConv2dNative Op not passed" f"{pad_mode} not in valid, same, pad.") @@ -627,9 +661,6 @@ class DepthwiseConv2dNative(PrimitiveWithInfer): self.add_prim_attr('data_format', "NCHW") self.channel_multiplier = validator.check_integer("channel_multiplier", channel_multiplier, 0, Rel.GT) self.group = validator.check_integer("group", group, 0, Rel.GT) - self.dilation = validator.check_integer("dilation", dilation, 1, Rel.GE) - self.kernel_size = validator.check_value_on_integer("kernel_size", kernel_size, 1, Rel.GE) - self.stride = validator.check_integer("stride", stride, 1, Rel.GE) self.pad = pad def infer_shape(self, x_shape, w_shape): @@ -640,29 +671,33 @@ class DepthwiseConv2dNative(PrimitiveWithInfer): kernel_size_h = w_shape[2] kernel_size_w = w_shape[3] + stride_h = self.stride[2] + stride_w = self.stride[3] + dilation_h = self.dilation[2] + dilation_w = self.dilation[3] if self.pad_mode == "valid": - h_out = math.ceil((x_shape[2] - self.dilation * (kernel_size_h - 1)) / self.stride) - w_out = math.ceil((x_shape[3] - self.dilation * (kernel_size_w - 1)) / self.stride) + h_out = math.ceil((x_shape[2] - dilation_h * (kernel_size_h - 1)) / stride_h) + w_out = math.ceil((x_shape[3] - dilation_w * (kernel_size_w - 1)) / stride_w) pad_top, pad_bottom, pad_left, pad_right = 0, 0, 0, 0 elif self.pad_mode == "same": - h_out = math.ceil(x_shape[2] / self.stride) - w_out = math.ceil(x_shape[3] / self.stride) + h_out = math.ceil(x_shape[2] / stride_h) + w_out = math.ceil(x_shape[3] / stride_w) - pad_needed_h = max(0, (h_out - 1) * self.stride + self.dilation * (kernel_size_h - 1) + 1 - x_shape[2]) + pad_needed_h = max(0, (h_out - 1) * stride_h+ dilation_h * (kernel_size_h - 1) + 1 - x_shape[2]) pad_top = math.floor(pad_needed_h / 2) pad_bottom = pad_needed_h - pad_top - pad_needed_w = max(0, (w_out - 1) * self.stride + self.dilation * (kernel_size_w - 1) + 1 - x_shape[3]) + pad_needed_w = max(0, (w_out - 1) * stride_w + dilation_w * (kernel_size_w - 1) + 1 - x_shape[3]) pad_left = math.floor(pad_needed_w / 2) pad_right = pad_needed_w - pad_left elif self.pad_mode == 'pad': pad_top, pad_bottom, pad_left, pad_right = self.pad, self.pad, self.pad, self.pad - h_out = 1 + (x_shape[2] + 2 * self.pad - kernel_size_h - (kernel_size_h - 1) * (self.dilation - 1)) \ - / self.stride - w_out = 1 + (x_shape[3] + 2 * self.pad - kernel_size_w - (kernel_size_w - 1) * (self.dilation - 1)) \ - / self.stride + h_out = 1 + (x_shape[2] + 2 * self.pad - kernel_size_h - (kernel_size_h - 1) * (dilation_h - 1)) \ + / stride_h + w_out = 1 + (x_shape[3] + 2 * self.pad - kernel_size_w - (kernel_size_w - 1) * (dilation_w - 1)) \ + / stride_w h_out = math.floor(h_out) w_out = math.floor(w_out) else: @@ -715,7 +750,7 @@ class _Pool(PrimitiveWithInfer): (not isinstance(ksize[1], int)) or ksize[0] <= 0 or ksize[1] <= 0): - raise ValueError(f"The 'ksize' passed to operator {self.name} should be an positive int number or" + raise ValueError(f"The 'ksize' passed to operator {self.name} should be an positive int number or " f"a tuple of two positive int numbers, but got {ksize}") self.ksize = (1, 1, ksize[0], ksize[1]) if self.is_maxpoolwithargmax: @@ -731,7 +766,7 @@ class _Pool(PrimitiveWithInfer): (not isinstance(strides[1], int)) or strides[0] <= 0 or strides[1] <= 0): - raise ValueError(f"The 'strides' passed to operator {self.name} should be an positive int number or" + raise ValueError(f"The 'strides' passed to operator {self.name} should be an positive int number or " f"a tuple of two positive int numbers, but got {strides}") self.strides = (1, 1, strides[0], strides[1]) if self.is_maxpoolwithargmax: @@ -853,7 +888,6 @@ class MaxPoolWithArgmax(_Pool): - **output** (Tensor) - Maxpooling result, with shape :math:`(N, C_{out}, H_{out}, W_{out})`. - **mask** (Tensor) - Max values' index represented by the mask. """ - def __init__(self, ksize=1, strides=1, padding="valid"): super(MaxPoolWithArgmax, self).__init__(ksize, strides, padding) self.is_tbe = context.get_context("device_target") == "Ascend" @@ -944,8 +978,8 @@ class Conv2DBackpropInput(PrimitiveWithInfer): pad (int): The pad value to fill. Default: 0. mode (int): 0 Math convolutiuon, 1 cross-correlation convolution , 2 deconvolution, 3 depthwise convolution. Default: 1. - stride (int): The stride to apply conv filter. Default: 1. - dilation (int): Specifies the dilation rate to use for dilated convolution. Default: 1. + stride (Union[int. tuple[int]]): The stride to apply conv filter. Default: 1. + dilation (Union[int. tuple[int]]): Specifies the dilation rate to use for dilated convolution. Default: 1. group (int): Splits input into groups. Default: 1. Returns: @@ -967,25 +1001,41 @@ class Conv2DBackpropInput(PrimitiveWithInfer): self.init_prim_io_names(inputs=['out_backprop', 'filter', 'input_sizes'], outputs=['output']) self.out_channel = validator.check_integer('out_channel', out_channel, 0, Rel.GT) self.kernel_size = validator.check_type('kernel_size', kernel_size, (int, tuple)) - if isinstance(self.kernel_size, int): - if kernel_size < 1: - raise ValueError('Attr \'kernel_size\' of \'Conv2DBackpropInput\' Op passed ' - + str(self.kernel_size) + ', should be a int or tuple and equal to or greater than 1.') - self.kernel_size = (self.kernel_size, self.kernel_size) - elif isinstance(kernel_size, tuple) and (len(kernel_size) != 2 or - (not isinstance(kernel_size[0], int)) or - (not isinstance(kernel_size[1], int)) or - kernel_size[0] < 1 or kernel_size[1] < 1): - raise ValueError('Attr \'kernel_size\' of \'Conv2DBackpropInput\' Op passed ' - + str(self.kernel_size) + ', should be a int or tuple and equal to or greater than 1.') + if isinstance(kernel_size, int): + self.kernel_size = (kernel_size, kernel_size) + if len(self.kernel_size) != 2 or (not isinstance(self.kernel_size[0], int)) or \ + (not isinstance(self.kernel_size[1], int)) or \ + self.kernel_size[0] < 1 or self.kernel_size[1] < 1: + raise ValueError(f"The \'kernel_size\' of \'Conv2DBackpropInput\' should be an positive int number or " + f"a tuple of two positive int numbers, but got {kernel_size}") + self.stride = validator.check_type('stride', stride, (int, tuple)) + if isinstance(stride, int): + self.stride = (stride, stride) + elif isinstance(stride, tuple) and len(stride) == 4: + self.stride = (stride[2], stride[3]) + if len(self.stride) != 2 or (not isinstance(self.stride[0], int)) or (not isinstance(self.stride[1], int)) or \ + self.stride[0] < 1 or self.stride[1] < 1: + raise ValueError(f"The \'stride\' of \'Conv2DBackpropInput\' should be an positive int number or " + f"a tuple of two or four positive int numbers, but got {stride}") + self.add_prim_attr('stride', self.stride) + self.dilation = validator.check_type('dilation', dilation, (tuple, int)) + if isinstance(dilation, int): + self.dilation = (1, 1, dilation, dilation) + elif len(dilation) == 2: + self.dilation = (1, 1, dilation[0], dilation[1]) + if len(self.dilation) != 4 or (not isinstance(self.dilation[0], int) or self.dilation[0] < 1) or \ + (not isinstance(self.dilation[1], int) or self.dilation[1] < 1) or \ + (not isinstance(self.dilation[2], int) or self.dilation[2] < 1) or \ + (not isinstance(self.dilation[3], int) or self.dilation[3] < 1): + raise ValueError(f"The \'dilation\' of \'Conv2DBackpropInput\' should be an positive int number or " + f"a tuple of two or four positive int numbers, but got {dilation}") + self.add_prim_attr('dilation', self.dilation) validator.equal('type of pad', type(pad), 'not bool', not isinstance(pad, bool)) validator.equal('type of pad', type(pad), 'int', isinstance(pad, int)) self.pad_mode = validator.check_string('pad_mode', pad_mode, ['valid', 'same', 'pad']) self.pad = validator.check_pad_value_by_mode(self.__class__.__name__, pad_mode, pad) self.mode = validator.check_integer('mode', mode, 1, Rel.EQ) self.group = validator.check_integer('group', group, 0, Rel.GT) - self.dilation = validator.check_integer('dilation', dilation, 1, Rel.GE) - self.stride = validator.check_integer('stride', stride, 1, Rel.GE) pad_mode = pad_mode.upper() self.add_prim_attr('pad_mode', pad_mode) self.add_prim_attr('data_format', "NCHW") @@ -1004,16 +1054,18 @@ class Conv2DBackpropInput(PrimitiveWithInfer): dout_shape = doutput['shape'] kernel_h = self.kernel_size[0] kernel_w = self.kernel_size[1] + stride_h = self.stride[0] + stride_w = self.stride[1] # default pad mode is valid pad_list = (0, 0, 0, 0) if self.pad_list: pad_list = tuple(self.pad_list) elif self.pad_mode == "SAME": - pad_needed_h = max(0, (dout_shape[2] - 1) * self.stride + kernel_h - x_size_v[2]) + pad_needed_h = max(0, (dout_shape[2] - 1) * stride_h + kernel_h - x_size_v[2]) pad_top = math.floor(pad_needed_h / 2) pad_bottom = pad_needed_h - pad_top - pad_needed_w = max(0, (dout_shape[3] - 1) * self.stride + kernel_w - x_size_v[3]) + pad_needed_w = max(0, (dout_shape[3] - 1) * stride_w + kernel_w - x_size_v[3]) pad_left = math.floor(pad_needed_w / 2) pad_right = pad_needed_w - pad_left pad_list = (pad_top, pad_bottom, pad_left, pad_right) diff --git a/tests/st/ops/cpu/test_conv2d_backprop_filter_op.py b/tests/st/ops/cpu/test_conv2d_backprop_filter_op.py index 75ca915499..c2f8422e30 100644 --- a/tests/st/ops/cpu/test_conv2d_backprop_filter_op.py +++ b/tests/st/ops/cpu/test_conv2d_backprop_filter_op.py @@ -35,8 +35,8 @@ class Net4(nn.Cell): pad_mode="valid", pad=0, mode=1, - stride=1, - dilation=1, + stride=(1, 1), + dilation=(1, 1, 1, 1), group=1) self.w = Parameter(initializer(Tensor(np.array([[[[1, 0, -1], [1, 0, -1], [1, 0, -1]]]]).astype(np.float32)), [1, 1, 3, 3]), name='w') self.x = Parameter(initializer(Tensor(np.array([[[ diff --git a/tests/st/ops/gpu/test_conv2d_backprop_filter_op.py b/tests/st/ops/gpu/test_conv2d_backprop_filter_op.py index 6e2e76cd47..0f66f2fac5 100644 --- a/tests/st/ops/gpu/test_conv2d_backprop_filter_op.py +++ b/tests/st/ops/gpu/test_conv2d_backprop_filter_op.py @@ -35,8 +35,8 @@ class Conv2dFilter(nn.Cell): pad_mode="valid", pad=0, mode=1, - stride=1, - dilation=1, + stride=(1, 1), + dilation=(1, 1, 1, 1), group=1) self.get_shape = P.Shape() diff --git a/tests/ut/cpp/python_input/gtest_input/pynative/ops_test.py b/tests/ut/cpp/python_input/gtest_input/pynative/ops_test.py index 46c6fdd1cf..c7de09dbcb 100644 --- a/tests/ut/cpp/python_input/gtest_input/pynative/ops_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pynative/ops_test.py @@ -21,17 +21,17 @@ from mindspore.common.tensor import Tensor def im2col(img, filter_h, filter_w, stride=1, pad=0, dilation=1): """Rearranges an image to row vector""" batch_num, channel, height, width = img.shape - out_h = (height + 2*pad - filter_h - (filter_h - 1) * (dilation - 1))//stride + 1 - out_w = (width + 2*pad - filter_w - (filter_w - 1) * (dilation - 1))//stride + 1 + out_h = (height + 2*pad - filter_h - (filter_h - 1) * (dilation[2] - 1))//stride[2] + 1 + out_w = (width + 2*pad - filter_w - (filter_w - 1) * (dilation[3] - 1))//stride[3] + 1 img = np.pad(img, [(0, 0), (0, 0), (pad, pad), (pad, pad)], 'constant') col = np.zeros((batch_num, channel, filter_h, filter_w, out_h, out_w)).astype(img.dtype) for y in range(filter_h): - y_max = y + stride*out_h + y_max = y + stride[2]*out_h for x in range(filter_w): - x_max = x + stride*out_w - col[:, :, y, x, :, :] = img[:, :, y:y_max:stride, x:x_max:stride] + x_max = x + stride[2]*out_w + col[:, :, y, x, :, :] = img[:, :, y:y_max:stride[2], x:x_max:stride[2]] col = col.transpose(0, 4, 5, 1, 2, 3).reshape(batch_num*out_h*out_w, -1) return col @@ -42,8 +42,8 @@ def conv2d(x, weight, bias=None, stride=1, pad=0, """Convolution 2D""" batch_num, _, x_h, x_w = x.shape filter_num, _, filter_h, filter_w = weight.shape - out_h = 1 + int((x_h + 2 * pad - filter_h - (filter_h - 1) * (dilation - 1)) / stride) - out_w = 1 + int((x_w + 2 * pad - filter_w - (filter_w - 1) * (dilation - 1)) / stride) + out_h = 1 + int((x_h + 2 * pad - filter_h - (filter_h - 1) * (dilation[2] - 1)) / stride[2]) + out_w = 1 + int((x_w + 2 * pad - filter_w - (filter_w - 1) * (dilation[3] - 1)) / stride[3]) col = im2col(x, filter_h, filter_w, stride, pad, dilation) col_w = np.reshape(weight, (filter_num, -1)).T out = np.dot(col, col_w) diff --git a/tests/vm_impl/vm_me.py b/tests/vm_impl/vm_me.py index ba51a3b13b..a189aa8b90 100644 --- a/tests/vm_impl/vm_me.py +++ b/tests/vm_impl/vm_me.py @@ -155,23 +155,35 @@ def batch_norm_grad(dy, x, scale, save_mean, save_inv_variance): def col2im(col, input_shape, filter_h, filter_w, stride=1, pad=0): """Rearranges a row vector to an image.""" - validator.check_integer("stride", stride, 0, Rel.GT) + if isinstance(stride, int): + stride_h = stride + stride_w = stride + elif isinstance(stride, tuple) and len(stride) == 2: + stride_h = stride[0] + stride_w = stride[1] + elif isinstance(stride, tuple) and len(stride) == 3: + stride_h = stride[2] + stride_w = stride[3] + else: + raise ValueError(f"The \'stride\' should be an int number or " + f"a tuple of two or four int numbers, but got {stride}") + batch_num, channel, height, width = input_shape - out_h = (height + 2*pad - filter_h)//stride + 1 - out_w = (width + 2*pad - filter_w)//stride + 1 + out_h = (height + 2*pad - filter_h)//stride_h + 1 + out_w = (width + 2*pad - filter_w)//stride_w + 1 col = col.reshape(batch_num, out_h, out_w, channel, filter_h, filter_w) \ .transpose(0, 3, 4, 5, 1, 2) img = np.zeros((batch_num, channel, - height + 2*pad + stride - 1, - width + 2*pad + stride - 1)) \ + height + 2*pad + stride_h - 1, + width + 2*pad + stride_w - 1)) \ .astype(col.dtype) for y in range(filter_h): - y_max = y + stride*out_h + y_max = y + stride_h*out_h for x in range(filter_w): - x_max = x + stride*out_w - img[:, :, y:y_max:stride, x:x_max:stride] += col[:, :, y, x, :, :] + x_max = x + stride_h*out_w + img[:, :, y:y_max:stride_h, x:x_max:stride_h] += col[:, :, y, x, :, :] return img[:, :, pad:height + pad, pad:width + pad] @@ -205,11 +217,35 @@ def conv2d(x, weight, bias=None, stride=1, pad=0, dilation=1, groups=1, padding_mode='zeros'): """Convolution 2D.""" # pylint: disable=unused-argument - validator.check_integer("stride", stride, 0, Rel.GT) + validator.check_type('stride', stride, (int, tuple)) + if isinstance(stride, int): + stride = (stride, stride) + elif len(stride) == 4: + stride = (stride[2], stride[3]) + if len(stride) != 2 or (not isinstance(stride[0], int)) or \ + (not isinstance(stride[1], int)) or \ + stride[0] < 1 or stride[1] < 1: + raise ValueError(f"The \'stride\' of \'conv2d\' should be an positive int number or " + f"a tuple of two positive int numbers, but got {stride}") + stride_h = stride[0] + stride_w = stride[1] + validator.check_type('dilation', dilation, (int, tuple)) + if isinstance(dilation, int): + dilation = (dilation, dilation) + elif len(dilation) == 4: + dilation = (dilation[2], dilation[3]) + if len(dilation) != 2 or (not isinstance(dilation[0], int)) or \ + (not isinstance(dilation[1], int)) or \ + dilation[0] < 1 or dilation[1] < 1: + raise ValueError(f"The \'dilation\' of \'conv2d\' should be an positive int number or " + f"a tuple of two positive int numbers, but got {dilation}") + dilation_h = dilation[0] + dilation_w = dilation[1] + batch_num, _, x_h, x_w = x.shape filter_num, _, filter_h, filter_w = weight.shape - out_h = 1 + int((x_h + 2 * pad - filter_h - (filter_h - 1) * (dilation - 1)) / stride) - out_w = 1 + int((x_w + 2 * pad - filter_w - (filter_w - 1) * (dilation - 1)) / stride) + out_h = 1 + int((x_h + 2 * pad - filter_h - (filter_h - 1) * (dilation_h - 1)) / stride_h) + out_w = 1 + int((x_w + 2 * pad - filter_w - (filter_w - 1) * (dilation_w - 1)) / stride_w) col = im2col(x, filter_h, filter_w, stride, pad, dilation) col_w = np.reshape(weight, (filter_num, -1)).T out = np.dot(col, col_w) @@ -286,19 +322,43 @@ def flatten_grad(dout, x): def im2col(img, filter_h, filter_w, stride=1, pad=0, dilation=1): """Rearranges an image to row vector.""" - validator.check_integer("stride", stride, 0, Rel.GT) + if isinstance(stride, int): + stride_h = stride + stride_w = stride + elif isinstance(stride, tuple) and len(stride) == 2: + stride_h = stride[0] + stride_w = stride[1] + elif isinstance(stride, tuple) and len(stride) == 3: + stride_h = stride[2] + stride_w = stride[3] + else: + raise ValueError(f"The \'stride\' should be an int number or " + f"a tuple of two or four int numbers, but got {stride}") + if isinstance(dilation, int): + dilation_h = dilation + dilation_w = dilation + elif isinstance(dilation, tuple) and len(dilation) == 2: + dilation_h = dilation[0] + dilation_w = dilation[1] + elif isinstance(dilation, tuple) and len(dilation) == 3: + dilation_h = dilation[2] + dilation_w = dilation[3] + else: + raise ValueError(f"The \'dilation\' should be an int number or " + f"a tuple of two or four int numbers, but got {dilation}") + batch_num, channel, height, width = img.shape - out_h = (height + 2*pad - filter_h- (filter_h - 1) * (dilation - 1))//stride + 1 - out_w = (width + 2*pad - filter_w- (filter_w - 1) * (dilation - 1))//stride + 1 + out_h = (height + 2*pad - filter_h- (filter_h - 1) * (dilation_h - 1))//stride_h + 1 + out_w = (width + 2*pad - filter_w- (filter_w - 1) * (dilation_w - 1))//stride_w + 1 img = np.pad(img, [(0, 0), (0, 0), (pad, pad), (pad, pad)], 'constant') col = np.zeros((batch_num, channel, filter_h, filter_w, out_h, out_w)).astype(img.dtype) for y in range(filter_h): - y_max = y + stride*out_h + y_max = y + stride_h*out_h for x in range(filter_w): - x_max = x + stride*out_w - col[:, :, y, x, :, :] = img[:, :, y:y_max:stride, x:x_max:stride] + x_max = x + stride_h*out_w + col[:, :, y, x, :, :] = img[:, :, y:y_max:stride_h, x:x_max:stride_h] col = col.transpose(0, 4, 5, 1, 2, 3).reshape(batch_num*out_h*out_w, -1) return col From 652ab6c386b8afb54e542f6ba1d9865e5fb955d9 Mon Sep 17 00:00:00 2001 From: chenzomi Date: Mon, 13 Apr 2020 15:39:51 +0800 Subject: [PATCH 211/367] add test case for aware quantizaiton --- .../gpu/quant/batchnorm_fold2_gpu_kernel.h | 14 +-- .../quant/batchnorm_fold2_grad_gpu_kernel.h | 14 +-- .../gpu/quant/batchnorm_fold_gpu_kernel.h | 14 +-- .../quant/batchnorm_fold_grad_gpu_kernel.h | 14 ++- .../gpu/quant/correction_mul_gpu_kernel.h | 14 +-- .../quant/correction_mul_grad_gpu_kernel.h | 16 +-- mindspore/nn/layer/activation.py | 2 +- mindspore/ops/operations/nn_ops.py | 2 +- tests/st/ops/gpu/test_batchnorm_fold2_op.py | 89 ++++++++++++++ .../st/ops/gpu/test_batchnorm_fold_grad_op.py | 96 +++++++++++++++ tests/st/ops/gpu/test_batchnorm_fold_op.py | 116 ++++++++++++++++++ tests/st/ops/gpu/test_conv2d_op.py | 2 +- .../st/ops/gpu/test_correction_mul_grad_op.py | 55 +++++++++ tests/st/ops/gpu/test_correction_mul_op.py | 52 ++++++++ 14 files changed, 456 insertions(+), 44 deletions(-) create mode 100644 tests/st/ops/gpu/test_batchnorm_fold2_op.py create mode 100644 tests/st/ops/gpu/test_batchnorm_fold_grad_op.py create mode 100644 tests/st/ops/gpu/test_batchnorm_fold_op.py create mode 100644 tests/st/ops/gpu/test_correction_mul_grad_op.py create mode 100644 tests/st/ops/gpu/test_correction_mul_op.py diff --git a/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold2_gpu_kernel.h b/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold2_gpu_kernel.h index ada4eabd86..3e246f18f6 100644 --- a/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold2_gpu_kernel.h +++ b/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold2_gpu_kernel.h @@ -38,14 +38,14 @@ class BatchNormFold2GpuKernel : public GpuKernel { ~BatchNormFold2GpuKernel() override { DestroyResource(); } - const std::vector &GetInputSizeList() const { return input_size_list_; } + const std::vector &GetInputSizeList() const override { return input_size_list_; } - const std::vector &GetOutputSizeList() const { return output_size_list_; } + const std::vector &GetOutputSizeList() const override { return output_size_list_; } - const std::vector &GetWorkspaceSizeList() const { return workspace_size_list_; } + const std::vector &GetWorkspaceSizeList() const override { return workspace_size_list_; } bool Launch(const std::vector &inputs, const std::vector &workspace, - const std::vector &outputs, uintptr_t stream_ptr) { + const std::vector &outputs, uintptr_t stream_ptr) override { if (is_null_input_) { return true; } @@ -66,7 +66,7 @@ class BatchNormFold2GpuKernel : public GpuKernel { return true; } - bool Init(const CNodePtr &kernel_node) { + bool Init(const CNodePtr &kernel_node) override { InitResource(); size_t input_num = AnfAlgo::GetInputTensorNum(kernel_node); @@ -98,9 +98,9 @@ class BatchNormFold2GpuKernel : public GpuKernel { } protected: - void InitResource() { cudnn_handle_ = device::gpu::GPUDeviceManager::GetInstance().GetCudnnHandle(); } + void InitResource() override { cudnn_handle_ = device::gpu::GPUDeviceManager::GetInstance().GetCudnnHandle(); } - void InitSizeLists() { + void InitSizeLists() override { size_t input_size = batch_size_ * channel_ * height_ * width_ * sizeof(T); size_t weight_size = channel_ * sizeof(T); input_size_list_.push_back(input_size); diff --git a/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold2_grad_gpu_kernel.h b/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold2_grad_gpu_kernel.h index ef9611f258..099960e7fa 100644 --- a/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold2_grad_gpu_kernel.h +++ b/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold2_grad_gpu_kernel.h @@ -38,14 +38,14 @@ class BatchNormFold2GradGpuKernel : public GpuKernel { ~BatchNormFold2GradGpuKernel() override { DestroyResource(); } - const std::vector &GetInputSizeList() const { return input_size_list_; } + const std::vector &GetInputSizeList() const override { return input_size_list_; } - const std::vector &GetOutputSizeList() const { return output_size_list_; } + const std::vector &GetOutputSizeList() const override { return output_size_list_; } - const std::vector &GetWorkspaceSizeList() const { return workspace_size_list_; } + const std::vector &GetWorkspaceSizeList() const override { return workspace_size_list_; } bool Launch(const std::vector &inputs, const std::vector &workspace, - const std::vector &outputs, uintptr_t stream_ptr) { + const std::vector &outputs, uintptr_t stream_ptr) override { if (is_null_input_) { return true; } @@ -88,7 +88,7 @@ class BatchNormFold2GradGpuKernel : public GpuKernel { return true; } - bool Init(const CNodePtr &kernel_node) { + bool Init(const CNodePtr &kernel_node) override { InitResource(); size_t input_num = AnfAlgo::GetInputTensorNum(kernel_node); @@ -120,9 +120,9 @@ class BatchNormFold2GradGpuKernel : public GpuKernel { } protected: - void InitResource() { cudnn_handle_ = device::gpu::GPUDeviceManager::GetInstance().GetCudnnHandle(); } + void InitResource() override { cudnn_handle_ = device::gpu::GPUDeviceManager::GetInstance().GetCudnnHandle(); } - void InitSizeLists() { + void InitSizeLists() override { size_t input_size = batch_size_ * channel_ * height_ * width_ * sizeof(T); size_t weight_size = channel_ * sizeof(T); size_t workspace_size = batch_size_ * channel_ * sizeof(T); diff --git a/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold_gpu_kernel.h b/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold_gpu_kernel.h index e90fac2792..3e8c1ca52b 100644 --- a/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold_gpu_kernel.h +++ b/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold_gpu_kernel.h @@ -46,14 +46,14 @@ class BatchNormFoldGpuKernel : public GpuKernel { ~BatchNormFoldGpuKernel() override { DestroyResource(); } - const std::vector &GetInputSizeList() const { return input_size_list_; } + const std::vector &GetInputSizeList() const override { return input_size_list_; } - const std::vector &GetOutputSizeList() const { return output_size_list_; } + const std::vector &GetOutputSizeList() const override { return output_size_list_; } - const std::vector &GetWorkspaceSizeList() const { return workspace_size_list_; } + const std::vector &GetWorkspaceSizeList() const override { return workspace_size_list_; } bool Launch(const std::vector &inputs, const std::vector &workspace, - const std::vector &outputs, uintptr_t stream_ptr) { + const std::vector &outputs, uintptr_t stream_ptr) override { (void)workspace; auto x = reinterpret_cast(inputs[0]->addr); auto mean = reinterpret_cast(inputs[1]->addr); @@ -104,7 +104,7 @@ class BatchNormFoldGpuKernel : public GpuKernel { return true; } - bool Init(const CNodePtr &kernel_node) { + bool Init(const CNodePtr &kernel_node) override { InitResource(); size_t input_num = AnfAlgo::GetInputTensorNum(kernel_node); if (input_num != 4) { @@ -152,7 +152,7 @@ class BatchNormFoldGpuKernel : public GpuKernel { } protected: - void InitSizeLists() { + void InitSizeLists() override { // x, mean, variance, current_step input_size_list_.push_back(input_size_); input_size_list_.push_back(output_size_); @@ -169,7 +169,7 @@ class BatchNormFoldGpuKernel : public GpuKernel { workspace_size_list_.push_back(input_size_); } - void InitResource() { + void InitResource() override { handle_ = device::gpu::GPUDeviceManager::GetInstance().GetCudnnHandle(); CHECK_CUDNN_RET_WITH_EXCEPT(cudnnCreateTensorDescriptor(&x_desc_), "Create x desc failed"); CHECK_CUDNN_RET_WITH_EXCEPT(cudnnCreateTensorDescriptor(&scale_bias_mean_var_desc_), "Create para desc failed"); diff --git a/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold_grad_gpu_kernel.h b/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold_grad_gpu_kernel.h index 830f6dc243..ec845fbb9e 100644 --- a/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold_grad_gpu_kernel.h +++ b/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold_grad_gpu_kernel.h @@ -42,11 +42,12 @@ class BatchNormFoldGradGpuKernel : public GpuKernel { width_(0) {} ~BatchNormFoldGradGpuKernel() = default; - const std::vector &GetInputSizeList() const { return input_size_list_; } - const std::vector &GetOutputSizeList() const { return output_size_list_; } - const std::vector &GetWorkspaceSizeList() const { return workspace_size_list_; } + const std::vector &GetInputSizeList() const override { return input_size_list_; } + const std::vector &GetOutputSizeList() const override { return output_size_list_; } + const std::vector &GetWorkspaceSizeList() const override { return workspace_size_list_; } + bool Launch(const std::vector &inputs, const std::vector &workspace, - const std::vector &outputs, uintptr_t stream_ptr) { + const std::vector &outputs, uintptr_t stream_ptr) override { (void)workspace; // 'd_batch_mean', 'd_batch_std', 'x', 'batch_mean', 'batch_std', 'current_step' T *d_batch_mean = GetDeviceAddress(inputs, 0); @@ -92,7 +93,8 @@ class BatchNormFoldGradGpuKernel : public GpuKernel { reinterpret_cast(stream_ptr)); return true; } - bool Init(const CNodePtr &kernel_node) { + + bool Init(const CNodePtr &kernel_node) override { size_t input_num = AnfAlgo::GetInputTensorNum(kernel_node); if (input_num != 6) { MS_LOG(ERROR) << "Input number is " << input_num << ", but BatchNormFoldGrad GpuKernel OP needs 6 input."; @@ -128,7 +130,7 @@ class BatchNormFoldGradGpuKernel : public GpuKernel { } protected: - void InitSizeLists() { + void InitSizeLists() override { // 'd_batch_mean', 'd_batch_std', 'x', 'batch_mean', 'batch_std', 'current_step' input_size_list_.push_back(channel_size_); input_size_list_.push_back(channel_size_); diff --git a/mindspore/ccsrc/kernel/gpu/quant/correction_mul_gpu_kernel.h b/mindspore/ccsrc/kernel/gpu/quant/correction_mul_gpu_kernel.h index af23d7732a..7608ae5d3c 100644 --- a/mindspore/ccsrc/kernel/gpu/quant/correction_mul_gpu_kernel.h +++ b/mindspore/ccsrc/kernel/gpu/quant/correction_mul_gpu_kernel.h @@ -30,11 +30,11 @@ class CorrectionMulGpuKernel : public GpuKernel { CorrectionMulGpuKernel() : batch_size_(0), channel_(0), height_(0), width_(0) {} ~CorrectionMulGpuKernel() override { DestroyResource(); } - const std::vector &GetInputSizeList() const { return input_size_list_; } - const std::vector &GetOutputSizeList() const { return output_size_list_; } - const std::vector &GetWorkspaceSizeList() const { return workspace_size_list_; } + const std::vector &GetInputSizeList() const override { return input_size_list_; } + const std::vector &GetOutputSizeList() const override { return output_size_list_; } + const std::vector &GetWorkspaceSizeList() const override { return workspace_size_list_; } bool Launch(const std::vector &inputs, const std::vector &workspace, - const std::vector &outputs, uintptr_t stream_ptr) { + const std::vector &outputs, uintptr_t stream_ptr) override { auto *weight = GetDeviceAddress(inputs, 0); auto *gamma = GetDeviceAddress(inputs, 1); auto *running_std = GetDeviceAddress(inputs, 2); @@ -44,7 +44,7 @@ class CorrectionMulGpuKernel : public GpuKernel { reinterpret_cast(stream_ptr)); return true; } - bool Init(const CNodePtr &kernel_node) { + bool Init(const CNodePtr &kernel_node) override { InitResource(); size_t input_num = AnfAlgo::GetInputTensorNum(kernel_node); @@ -69,7 +69,7 @@ class CorrectionMulGpuKernel : public GpuKernel { } protected: - void InitSizeLists() { + void InitSizeLists() override { size_t input_size = batch_size_ * channel_ * height_ * width_ * sizeof(T); size_t weight_size = batch_size_ * sizeof(T); input_size_list_.push_back(input_size); // weight @@ -79,7 +79,7 @@ class CorrectionMulGpuKernel : public GpuKernel { output_size_list_.push_back(input_size); workspace_size_list_.push_back(workspace_size); } - void InitResource() {} + void InitResource() override {} private: void DestroyResource() noexcept {} diff --git a/mindspore/ccsrc/kernel/gpu/quant/correction_mul_grad_gpu_kernel.h b/mindspore/ccsrc/kernel/gpu/quant/correction_mul_grad_gpu_kernel.h index f20c6278c0..2439826cc3 100644 --- a/mindspore/ccsrc/kernel/gpu/quant/correction_mul_grad_gpu_kernel.h +++ b/mindspore/ccsrc/kernel/gpu/quant/correction_mul_grad_gpu_kernel.h @@ -30,11 +30,12 @@ class CorrectionMulGradGpuKernel : public GpuKernel { CorrectionMulGradGpuKernel() : batch_size_(0), channel_(0), height_(0), width_(0) {} ~CorrectionMulGradGpuKernel() override { DestroyResource(); } - const std::vector &GetInputSizeList() const { return input_size_list_; } - const std::vector &GetOutputSizeList() const { return output_size_list_; } - const std::vector &GetWorkspaceSizeList() const { return workspace_size_list_; } + const std::vector &GetInputSizeList() const override { return input_size_list_; } + const std::vector &GetOutputSizeList() const override { return output_size_list_; } + const std::vector &GetWorkspaceSizeList() const override { return workspace_size_list_; } + bool Launch(const std::vector &inputs, const std::vector &workspace, - const std::vector &outputs, uintptr_t stream_ptr) { + const std::vector &outputs, uintptr_t stream_ptr) override { auto *d_out = GetDeviceAddress(inputs, 0); auto *weight = GetDeviceAddress(inputs, 1); auto *gamma = GetDeviceAddress(inputs, 2); @@ -49,7 +50,8 @@ class CorrectionMulGradGpuKernel : public GpuKernel { reinterpret_cast(stream_ptr)); return true; } - bool Init(const CNodePtr &kernel_node) { + + bool Init(const CNodePtr &kernel_node) override { InitResource(); size_t input_num = AnfAlgo::GetInputTensorNum(kernel_node); @@ -74,7 +76,7 @@ class CorrectionMulGradGpuKernel : public GpuKernel { } protected: - void InitSizeLists() { + void InitSizeLists() override { size_t input_size = batch_size_ * channel_ * height_ * width_ * sizeof(T); size_t weight_size = batch_size_ * sizeof(T); input_size_list_.push_back(input_size); // d_out @@ -85,7 +87,7 @@ class CorrectionMulGradGpuKernel : public GpuKernel { output_size_list_.push_back(weight_size); // d_gamma workspace_size_list_.push_back(input_size); // tmp d_out * weight } - void InitResource() {} + void InitResource() override {} private: void DestroyResource() noexcept {} diff --git a/mindspore/nn/layer/activation.py b/mindspore/nn/layer/activation.py index 12d6c74dcd..6485e27228 100644 --- a/mindspore/nn/layer/activation.py +++ b/mindspore/nn/layer/activation.py @@ -369,7 +369,7 @@ class HSigmoid(Cell): Hard sigmoid is defined as: .. math:: - \text{hsigmoid}(x_{i}) = max(0, min(1, \ftac{2 * x_{i} + 5}{10})), + \text{hsigmoid}(x_{i}) = max(0, min(1, \frac{2 * x_{i} + 5}{10})), where :math:`x_{i}` is the :math:`i`-th slice along the given dim of the input Tensor. diff --git a/mindspore/ops/operations/nn_ops.py b/mindspore/ops/operations/nn_ops.py index e82f59a05d..0dab6b28a6 100644 --- a/mindspore/ops/operations/nn_ops.py +++ b/mindspore/ops/operations/nn_ops.py @@ -319,7 +319,7 @@ class HSigmoid(PrimitiveWithInfer): Hard sigmoid is defined as: .. math:: - \text{hsigmoid}(x_{i}) = max(0, min(1, \ftac{2 * x_{i} + 5}{10})), + \text{hsigmoid}(x_{i}) = max(0, min(1, \frac{2 * x_{i} + 5}{10})), where :math:`x_{i}` is the :math:`i`-th slice along the given dim of the input Tensor. diff --git a/tests/st/ops/gpu/test_batchnorm_fold2_op.py b/tests/st/ops/gpu/test_batchnorm_fold2_op.py new file mode 100644 index 0000000000..0440e92a8d --- /dev/null +++ b/tests/st/ops/gpu/test_batchnorm_fold2_op.py @@ -0,0 +1,89 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +import numpy as np +import pytest +from mindspore import Tensor +from mindspore.ops import operations as P +import mindspore.nn as nn +from mindspore.common.api import ms_function +import mindspore.context as context + +context.set_context(device_target='GPU') + + +class Net(nn.Cell): + def __init__(self): + super(Net, self).__init__() + self.op = P.BatchNormFold2(100000) + + @ms_function + def construct(self, x, beta, gamma, batch_std, batch_mean, running_std, running_mean, current_step): + return self.op(x, beta, gamma, batch_std, batch_mean, running_std, running_mean, current_step) + + +class Net_gnd(nn.Cell): + def __init__(self): + super(Net_gnd, self).__init__() + self.conv_mul = P.ConvMul(freeze_bn=100000) + self.correct_add = P.CorrectionAdd(freeze_bn=100000) + self.add_fold = P.AddFold() + + @ms_function + def construct(self, x, beta, gamma, batch_std, batch_mean, running_std, running_mean, current_step): + out = self.conv_mul(x, batch_std, running_std, current_step) + out = self.correct_add(out, gamma, batch_std, batch_mean, + running_std, running_mean, current_step) + out = self.add_fold(out, beta, gamma, batch_std, batch_mean) + return out + + +@pytest.mark.level0 +@pytest.mark.platform_x86_gpu_training +@pytest.mark.env_onecard +def test_batchnrom_fold2(): + net = Net() + c = 64 + freeze_bn = 100000 + x = np.random.uniform(-1, 1, size=[3, c, 32, 32]).astype('float32') + beta = np.random.uniform(1, 2, size=[c]).astype('float32') + gamma = np.random.uniform(1, 2, size=[c]).astype('float32') + batch_std = np.random.uniform(1, 2, size=[c]).astype('float32') + batch_mean = np.random.uniform(1, 2, size=[c]).astype('float32') + running_std = np.random.uniform(1, 2, size=[c]).astype('float32') + running_mean = np.random.uniform(1, 2, size=[c]).astype('float32') + current_step = np.array([0]).astype('int32') + output = net(Tensor(x), Tensor(beta), Tensor(gamma), Tensor(batch_std), Tensor(batch_mean), + Tensor(running_std), Tensor(running_mean), Tensor(current_step)) + expect = (x + beta.reshape(-1, 1, 1) - (gamma * running_mean / running_std).reshape(-1, 1, + 1) if current_step >= freeze_bn else + x * (running_std / batch_std).reshape(-1, 1, 1) + (beta - gamma * batch_mean / batch_std).reshape(-1, 1, + 1)) + error = np.ones(shape=expect.shape) * 1.0e-6 + diff = output.asnumpy() - expect + assert np.all(diff < error) + assert np.all(diff > error * -1) + + current_step = np.array([100000]).astype('int32') + output = net(Tensor(x), Tensor(beta), Tensor(gamma), Tensor(batch_std), Tensor(batch_mean), Tensor(running_std), + Tensor(running_mean), Tensor(current_step)) + expect = (x + beta.reshape(-1, 1, 1) - (gamma * running_mean / running_std).reshape(-1, 1, + 1) if current_step >= freeze_bn else + x * (batch_std / running_std).reshape(-1, 1, 1) + (beta - gamma * batch_mean / batch_std).reshape(-1, 1, + 1)) + error = np.ones(shape=expect.shape) * 1.0e-6 + diff = output.asnumpy() - expect + assert np.all(diff < error) + assert np.all(diff > error * -1) diff --git a/tests/st/ops/gpu/test_batchnorm_fold_grad_op.py b/tests/st/ops/gpu/test_batchnorm_fold_grad_op.py new file mode 100644 index 0000000000..8e55f6a473 --- /dev/null +++ b/tests/st/ops/gpu/test_batchnorm_fold_grad_op.py @@ -0,0 +1,96 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +import numpy as np +import pytest +from mindspore import Tensor +from mindspore.ops import operations as P +import mindspore.nn as nn +from mindspore.common.api import ms_function +import mindspore.context as context + +context.set_context(device_target='GPU') + + +class Net(nn.Cell): + def __init__(self): + super(Net, self).__init__() + self.op = P.BatchNormFoldGrad(freeze_bn=10) + + @ms_function + def construct(self, d_batch_mean, d_batch_std, x, batch_mean, batch_std, current_step): + dx = self.op(d_batch_mean, d_batch_std, x, batch_mean, batch_std, current_step) + return dx + + +def np_result(d_batch_mean, d_batch_std, x, batch_mean, batch_std): + n = x.shape[0] * x.shape[2] * x.shape[3] + dx = d_batch_mean.reshape(1, -1, 1, 1) / n + d_batch_std.reshape(1, -1, 1, 1) * ( + x - batch_mean.reshape(1, -1, 1, 1)) / batch_std.reshape(1, -1, 1, 1) / n + return dx + + +@pytest.mark.level0 +@pytest.mark.platform_x86_gpu_training +@pytest.mark.env_onecard +def test_batchnorm_fold_grad1(): + net = Net() + c = 64 + x = np.random.uniform(1, 10, size=[3, c, 32, 32]).astype('float32') + d_batch_mean = np.random.uniform(1, 10, size=[c]).astype('float32') + d_batch_std = np.random.uniform(1, 10, size=[c]).astype('float32') + batch_mean = np.random.uniform(1, 10, size=[c]).astype('float32') + batch_std = np.random.uniform(1, 10, size=[c]).astype('float32') + current_step = np.array([0]).astype('int32') + dx = net(Tensor(d_batch_mean), Tensor(d_batch_std), Tensor(x), Tensor(batch_mean), Tensor(batch_std), + Tensor(current_step)) + expect = np_result(d_batch_mean, d_batch_std, x, batch_mean, batch_std) + assert np.allclose(dx.asnumpy(), expect, rtol=1.e-7, atol=1.e-7) + + +@pytest.mark.level0 +@pytest.mark.platform_x86_gpu_training +@pytest.mark.env_onecard +def test_batchnorm_fold_grad2(): + net = Net() + c = 64 + x = np.random.uniform(1, 10, size=[1, c, 256, 256]).astype('float32') + d_batch_mean = np.random.uniform(1, 10, size=[c]).astype('float32') + d_batch_std = np.random.uniform(1, 10, size=[c]).astype('float32') + batch_mean = np.random.uniform(1, 10, size=[c]).astype('float32') + batch_std = np.random.uniform(1, 10, size=[c]).astype('float32') + current_step = np.array([0]).astype('int32') + dx = net(Tensor(d_batch_mean), Tensor(d_batch_std), Tensor(x), Tensor(batch_mean), Tensor(batch_std), + Tensor(current_step)) + expect = np_result(d_batch_mean, d_batch_std, x, batch_mean, batch_std) + assert np.allclose(dx.asnumpy(), expect, rtol=1.e-7, atol=1.e-7) + + +@pytest.mark.level0 +@pytest.mark.platform_x86_gpu_training +@pytest.mark.env_onecard +def test_batchnorm_fold_grad_freeze(): + net = Net() + c = 64 + x = np.random.uniform(1, 10, size=[3, c, 32, 32]).astype('float32') + d_batch_mean = np.random.uniform(1, 10, size=[c]).astype('float32') + d_batch_std = np.random.uniform(1, 10, size=[c]).astype('float32') + batch_mean = np.random.uniform(1, 10, size=[c]).astype('float32') + batch_std = np.random.uniform(1, 10, size=[c]).astype('float32') + current_step = np.array([10]).astype('int32') + dx = net(Tensor(d_batch_mean), Tensor(d_batch_std), Tensor(x), Tensor(batch_mean), Tensor(batch_std), + Tensor(current_step)) + expect = np.zeros_like(x) + assert np.allclose(dx.asnumpy(), expect, rtol=1.e-7, atol=1.e-7) diff --git a/tests/st/ops/gpu/test_batchnorm_fold_op.py b/tests/st/ops/gpu/test_batchnorm_fold_op.py new file mode 100644 index 0000000000..c4abf152a6 --- /dev/null +++ b/tests/st/ops/gpu/test_batchnorm_fold_op.py @@ -0,0 +1,116 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +import numpy as np +import pytest +from mindspore import Tensor +from mindspore.ops import operations as P +import mindspore.nn as nn +from mindspore.common.api import ms_function +import mindspore.context as context + +context.set_context(device_target='GPU') + + +class Net(nn.Cell): + def __init__(self): + super(Net, self).__init__() + self.op = P.BatchNormFold(freeze_bn=10) + + @ms_function + def construct(self, x, mean, variance, current_step): + a, b, c, d = self.op(x, mean, variance, current_step) + return a, b, c, d + + +def np_result(x, mean, var, momentum, epsilon): + np_mean = x.mean(axis=(0, 2, 3)) + np_var = x.var(axis=(0, 2, 3)) + n = x.shape[0] * x.shape[2] * x.shape[3] + mean_update = momentum * np_mean + (1 - momentum) * mean + var_update = momentum * np_var * n / (n - 1) + (1 - momentum) * var + np_var = np.sqrt(np_var + epsilon) + delay_mean = mean.copy() + delay_std = np.sqrt(var + epsilon) + return np_mean, np_var, mean_update, var_update, delay_mean, delay_std + + +@pytest.mark.level0 +@pytest.mark.platform_x86_gpu_training +@pytest.mark.env_onecard +def test_batchnorm_fold(): + net = Net() + c = 64 + x = np.random.uniform(1, 10, size=[3, c, 32, 32]).astype('float32') + mean = np.random.uniform(1, 10, size=[c]).astype('float32') + variance = np.random.uniform(1, 10, size=[c]).astype('float32') + current_step = np.array([0]).astype('int32') + ms_mean = Tensor(mean) + ms_var = Tensor(variance) + batch_mean, batch_var, delay_mean, delay_std = net(Tensor(x), ms_mean, ms_var, + Tensor(current_step)) + + expect1, expect2, expect3, expect4, expect5, expect6 = np_result(x, mean, variance, 0.9, 1e-12) + assert np.allclose(batch_mean.asnumpy(), expect1, rtol=1.e-7, atol=1.e-5) + assert np.allclose(batch_var.asnumpy(), expect2, rtol=1.e-7, atol=1.e-5) + assert np.allclose(ms_mean.asnumpy(), expect3, rtol=1.e-7, atol=1.e-5) + assert np.allclose(ms_var.asnumpy(), expect4, rtol=1.e-7, atol=1.e-5) + assert np.allclose(delay_mean.asnumpy(), expect5, rtol=1.e-7, atol=1.e-5) + assert np.allclose(delay_std.asnumpy(), expect6, rtol=1.e-7, atol=1.e-5) + + +@pytest.mark.level0 +@pytest.mark.platform_x86_gpu_training +@pytest.mark.env_onecard +def test_batchnorm_fold2(): + net = Net() + c = 64 + x = np.random.uniform(1, 10, size=[3, c, 512, 512]).astype('float32') + mean = np.random.uniform(1, 10, size=[c]).astype('float32') + variance = np.random.uniform(1, 10, size=[c]).astype('float32') + current_step = np.array([0]).astype('int32') + ms_mean = Tensor(mean) + ms_var = Tensor(variance) + batch_mean, batch_var, delay_mean, delay_std = net(Tensor(x), ms_mean, ms_var, + Tensor(current_step)) + expect1, expect2, expect3, expect4, expect5, expect6 = np_result(x, mean, variance, 0.9, 1e-12) + assert np.allclose(batch_mean.asnumpy(), expect1, rtol=1.e-7, atol=1.e-5) + assert np.allclose(batch_var.asnumpy(), expect2, rtol=1.e-7, atol=1.e-5) + assert np.allclose(ms_mean.asnumpy(), expect3, rtol=1.e-7, atol=1.e-5) + assert np.allclose(delay_mean.asnumpy(), expect5, rtol=1.e-7, atol=1.e-5) + assert np.allclose(delay_std.asnumpy(), expect6, rtol=1.e-7, atol=1.e-5) + + +@pytest.mark.level0 +@pytest.mark.platform_x86_gpu_training +@pytest.mark.env_onecard +def test_batchnorm_fold_freeze(): + net = Net() + c = 64 + x = np.random.uniform(1, 10, size=[3, c, 32, 32]).astype('float32') + mean = np.random.uniform(1, 10, size=[c]).astype('float32') + variance = np.random.uniform(1, 10, size=[c]).astype('float32') + current_step = np.array([10]).astype('int32') + ms_mean = Tensor(mean) + ms_var = Tensor(variance) + batch_mean, batch_var, delay_mean, delay_std = net(Tensor(x), ms_mean, ms_var, + Tensor(current_step)) + expect1, expect2, expect3, expect4, expect5, expect6 = np_result(x, mean, variance, 0.9, 1e-12) + assert np.allclose(batch_mean.asnumpy(), np.zeros_like(mean), rtol=1.e-7, atol=1.e-5) + assert np.allclose(batch_var.asnumpy(), np.ones_like(mean), rtol=1.e-7, atol=1.e-5) + assert np.allclose(ms_mean.asnumpy(), mean, rtol=1.e-7, atol=1.e-5) + assert np.allclose(ms_var.asnumpy(), variance, rtol=1.e-7, atol=1.e-5) + assert np.allclose(delay_mean.asnumpy(), expect5, rtol=1.e-7, atol=1.e-5) + assert np.allclose(delay_std.asnumpy(), expect6, rtol=1.e-7, atol=1.e-5) diff --git a/tests/st/ops/gpu/test_conv2d_op.py b/tests/st/ops/gpu/test_conv2d_op.py index d724f6f6c8..1bac156c37 100644 --- a/tests/st/ops/gpu/test_conv2d_op.py +++ b/tests/st/ops/gpu/test_conv2d_op.py @@ -14,10 +14,10 @@ # ============================================================================ import pytest +import numpy as np from mindspore import Tensor from mindspore.ops import operations as P import mindspore.nn as nn -import numpy as np import mindspore.context as context diff --git a/tests/st/ops/gpu/test_correction_mul_grad_op.py b/tests/st/ops/gpu/test_correction_mul_grad_op.py new file mode 100644 index 0000000000..88b391a77a --- /dev/null +++ b/tests/st/ops/gpu/test_correction_mul_grad_op.py @@ -0,0 +1,55 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +import numpy as np +import pytest +import os +from mindspore import Tensor +from mindspore.ops import operations as P +import mindspore.nn as nn +from mindspore.common.api import ms_function +import mindspore.context as context + + +context.set_context(device_target='GPU') + + +class Net(nn.Cell): + def __init__(self): + super(Net, self).__init__() + self.op_w = P.CorrectionMulGrad() + + @ms_function + def construct(self, dy, x, batch_std, running_std): + dx, d_batch_std = self.op_w(dy, x, batch_std, running_std) + return dx, d_batch_std + + +@pytest.mark.level0 +@pytest.mark.platform_x86_gpu_training +@pytest.mark.env_onecard +def test_correction_mul_grad(): + net = Net() + co, ci, h, w = 64, 1, 32, 32 + dout = np.random.uniform(-0.1, 0.1, size=[co, ci, h, w]).astype('float32') + x = np.random.uniform(1, 1, size=[co, ci, h, w]).astype('float32') + batch_std = np.random.uniform(1, 10, size=[co]).astype('float32') + running_std = np.random.uniform(1, 10, size=[co]).astype('float32') + output = net(Tensor(dout), Tensor(x), Tensor(batch_std), Tensor(running_std)) + expect = [0, 0] + expect[0] = (dout * np.reshape(batch_std / running_std, (co, 1, 1, 1))) + expect[1] = (np.sum(dout * x, (1, 2, 3)) / running_std) + for i, v in enumerate(output): + assert (np.allclose(output[i].asnumpy(), expect[i], rtol=1.e-5, atol=1.e-5)) diff --git a/tests/st/ops/gpu/test_correction_mul_op.py b/tests/st/ops/gpu/test_correction_mul_op.py new file mode 100644 index 0000000000..01389e148c --- /dev/null +++ b/tests/st/ops/gpu/test_correction_mul_op.py @@ -0,0 +1,52 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +import numpy as np +import pytest +from mindspore import Tensor +from mindspore.ops import operations as P +import mindspore.nn as nn +from mindspore.common.api import ms_function +import mindspore.context as context + +context.set_context(device_target='GPU') + + +class Net(nn.Cell): + def __init__(self): + super(Net, self).__init__() + self.op = P.CorrectionMul() + + @ms_function + def construct(self, x, batch_var, moving_var): + return self.op(x, batch_var, moving_var) + + +@pytest.mark.level0 +@pytest.mark.platform_x86_gpu_training +@pytest.mark.env_onecard +def test_correction_mul(): + net = Net() + co = 64 + x = np.random.uniform(-1, 1, size=[co, 64, 32, 32]).astype('float32') + bv = np.random.uniform(1, 2, size=[co]).astype('float32') + mv = np.random.uniform(1, 2, size=[co]).astype('float32') + output = net(Tensor(x), Tensor(bv), Tensor(mv)) + expect = x * np.reshape(bv, (co, 1, 1, 1)) / np.reshape(mv, (co, 1, 1, 1)) + error = np.ones(shape=expect.shape) * 1.0e-5 + diff = output.asnumpy() - expect + assert np.all(diff < error) + assert np.all(diff > error * -1) + assert (output.shape() == expect.shape) From ae675c5cf8cbc670a469d74359c2f542a3e56c34 Mon Sep 17 00:00:00 2001 From: kswang Date: Tue, 14 Apr 2020 14:15:44 +0800 Subject: [PATCH 212/367] fix nopnode output bug --- mindspore/ccsrc/device/kernel_runtime.cc | 2 +- mindspore/ccsrc/session/anf_runtime_algorithm.cc | 12 +++++++----- mindspore/ccsrc/session/anf_runtime_algorithm.h | 1 + 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/mindspore/ccsrc/device/kernel_runtime.cc b/mindspore/ccsrc/device/kernel_runtime.cc index eebc650347..e68ad22bbd 100644 --- a/mindspore/ccsrc/device/kernel_runtime.cc +++ b/mindspore/ccsrc/device/kernel_runtime.cc @@ -250,7 +250,7 @@ void KernelRuntime::AssignStaticMemoryOutput(const session::KernelGraph *graph) MS_EXCEPTION_IF_NULL(graph); auto nodes = AnfAlgo::GetAllOutput(graph->output(), {prim::kPrimTupleGetItem}); for (const auto &node : nodes) { - auto item_with_index = AnfAlgo::VisitKernelWithReturnType(node, 0); + auto item_with_index = AnfAlgo::VisitKernelWithReturnType(node, 0, true); MS_EXCEPTION_IF_NULL(item_with_index.first); if (!item_with_index.first->isa() || !AnfAlgo::IsRealKernel(item_with_index.first)) { continue; diff --git a/mindspore/ccsrc/session/anf_runtime_algorithm.cc b/mindspore/ccsrc/session/anf_runtime_algorithm.cc index 893c379a07..e355c7885d 100644 --- a/mindspore/ccsrc/session/anf_runtime_algorithm.cc +++ b/mindspore/ccsrc/session/anf_runtime_algorithm.cc @@ -84,6 +84,7 @@ KernelWithIndex AnfRuntimeAlgorithm::VisitKernel(const AnfNodePtr &anf_node, siz } KernelWithIndex AnfRuntimeAlgorithm::VisitKernelWithReturnType(const AnfNodePtr &anf_node, size_t index, + bool visit_nop_node, const std::vector &return_types) { MS_EXCEPTION_IF_NULL(anf_node); for (const auto &prim_type : return_types) { @@ -109,12 +110,13 @@ KernelWithIndex AnfRuntimeAlgorithm::VisitKernelWithReturnType(const AnfNodePtr auto value_node = input2->cast(); MS_EXCEPTION_IF_NULL(value_node); int item_idx = GetValue(value_node->value()); - return VisitKernelWithReturnType(cnode->input(kRealInputNodeIndexInTupleGetItem), IntToSize(item_idx)); + return VisitKernelWithReturnType(cnode->input(kRealInputNodeIndexInTupleGetItem), IntToSize(item_idx), + visit_nop_node); } else if (IsPrimitive(input0, prim::kPrimDepend) || IsPrimitive(input0, prim::kPrimControlDepend)) { - return VisitKernelWithReturnType(cnode->input(kRealInputIndexInDepend), 0); - } else if (opt::IsNopNode(cnode)) { + return VisitKernelWithReturnType(cnode->input(kRealInputIndexInDepend), 0, visit_nop_node); + } else if (opt::IsNopNode(cnode) && visit_nop_node) { if (cnode->inputs().size() == 2) { - return VisitKernelWithReturnType(cnode->input(1), 0); + return VisitKernelWithReturnType(cnode->input(1), 0, visit_nop_node); } else { MS_LOG(EXCEPTION) << cnode->DebugString() << "Invalid nop node"; } @@ -132,7 +134,7 @@ std::vector AnfRuntimeAlgorithm::GetAllOutput(const AnfNodePtr &node auto return_prim_type = return_types; // if visited make_tuple should return back return_prim_type.push_back(prim::kPrimMakeTuple); - auto item_with_index = AnfAlgo::VisitKernelWithReturnType(node, 0, return_prim_type); + auto item_with_index = AnfAlgo::VisitKernelWithReturnType(node, 0, false, return_prim_type); if (AnfAlgo::CheckPrimitiveType(item_with_index.first, prim::kPrimMakeTuple)) { MS_EXCEPTION_IF_NULL(item_with_index.first); auto make_tuple = item_with_index.first->cast(); diff --git a/mindspore/ccsrc/session/anf_runtime_algorithm.h b/mindspore/ccsrc/session/anf_runtime_algorithm.h index 1a1d471b84..233f86410c 100644 --- a/mindspore/ccsrc/session/anf_runtime_algorithm.h +++ b/mindspore/ccsrc/session/anf_runtime_algorithm.h @@ -41,6 +41,7 @@ class AnfRuntimeAlgorithm { // get input_anf_node's real kernel by recurse static KernelWithIndex VisitKernel(const AnfNodePtr &input_anf_node, size_t output_index); static KernelWithIndex VisitKernelWithReturnType(const AnfNodePtr &input_anf_node, size_t output_index, + bool visit_nop_node = false, const std::vector &return_types = { prim::kPrimMakeTuple}); static std::vector GetAllOutput(const AnfNodePtr &node, From 2aad57c595320da8a548105f411dc08e8538bb9d Mon Sep 17 00:00:00 2001 From: jojobugfree Date: Tue, 14 Apr 2020 14:57:23 +0800 Subject: [PATCH 213/367] getnext disable memory reuse --- mindspore/ccsrc/device/kernel_runtime.cc | 4 ++++ mindspore/ccsrc/session/anf_runtime_algorithm.cc | 5 +++++ mindspore/ccsrc/session/anf_runtime_algorithm.h | 2 ++ mindspore/ccsrc/utils/utils.h | 1 + 4 files changed, 12 insertions(+) diff --git a/mindspore/ccsrc/device/kernel_runtime.cc b/mindspore/ccsrc/device/kernel_runtime.cc index eebc650347..db79484f8c 100644 --- a/mindspore/ccsrc/device/kernel_runtime.cc +++ b/mindspore/ccsrc/device/kernel_runtime.cc @@ -355,6 +355,10 @@ void KernelRuntime::AssignNodeOutputMem(int flag, const AnfNodePtr &node, int in AssignCommunicationNodeOutputMem(flag, node); return; } + if (AnfAlgo::IsGetNext(NOT_NULL(node)) && flag == kReuseDynamicMem) { + MS_LOG(INFO) << "GetNext disable mem_reuse"; + flag = kDynamicMem; + } auto kernel_mod = AnfAlgo::GetKernelMod(node); MS_EXCEPTION_IF_NULL(kernel_mod); auto output_sizes = kernel_mod->GetOutputSizeList(); diff --git a/mindspore/ccsrc/session/anf_runtime_algorithm.cc b/mindspore/ccsrc/session/anf_runtime_algorithm.cc index 893c379a07..29a27a65b1 100644 --- a/mindspore/ccsrc/session/anf_runtime_algorithm.cc +++ b/mindspore/ccsrc/session/anf_runtime_algorithm.cc @@ -825,5 +825,10 @@ bool AnfRuntimeAlgorithm::IsCommunicationOp(const AnfNodePtr &node) { } return false; } + +bool AnfRuntimeAlgorithm::IsGetNext(const NotNull &node) { + auto kernel_name = AnfAlgo::GetCNodeName(node); + return kernel_name == kGetNextOpName; +} } // namespace session } // namespace mindspore diff --git a/mindspore/ccsrc/session/anf_runtime_algorithm.h b/mindspore/ccsrc/session/anf_runtime_algorithm.h index 1a1d471b84..ab5a68db7f 100644 --- a/mindspore/ccsrc/session/anf_runtime_algorithm.h +++ b/mindspore/ccsrc/session/anf_runtime_algorithm.h @@ -31,6 +31,7 @@ #include "kernel/kernel.h" #include "kernel/kernel_build_info.h" #include "operator/ops.h" +#include "utils/contract.h" namespace mindspore { namespace session { @@ -169,6 +170,7 @@ class AnfRuntimeAlgorithm { // get real input index for some tbe ops which input order is different between me and tbe impl static size_t GetRealInputIndex(const AnfNodePtr &anf_node, const size_t cur_index); static bool IsCommunicationOp(const AnfNodePtr &node); + static bool IsGetNext(const NotNull &node); }; } // namespace session using AnfAlgo = session::AnfRuntimeAlgorithm; diff --git a/mindspore/ccsrc/utils/utils.h b/mindspore/ccsrc/utils/utils.h index 39b4b7a160..e1df2a8d25 100644 --- a/mindspore/ccsrc/utils/utils.h +++ b/mindspore/ccsrc/utils/utils.h @@ -42,6 +42,7 @@ constexpr auto kBNGrad2OpName = "BNGrad2"; constexpr auto kBNGrad3OpName = "BNGrad3"; constexpr auto kClearZeroOpName = "ClearZero"; constexpr auto kAtomicAddrCleanOpName = "AtomicAddrClean"; +constexpr auto kGetNextOpName = "GetNext"; constexpr auto kAllReduceOpName = "AllReduce"; constexpr auto kAllGatherOpName = "AllGather"; constexpr auto kBroadcastOpName = "Broadcast"; From 988363299f1fe0683121e6d64bb6326089f65f36 Mon Sep 17 00:00:00 2001 From: w00517616 Date: Tue, 14 Apr 2020 15:35:30 +0800 Subject: [PATCH 214/367] Exclude FV to further improve performance. --- mindspore/ccsrc/pipeline/static_analysis/evaluator.cc | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/mindspore/ccsrc/pipeline/static_analysis/evaluator.cc b/mindspore/ccsrc/pipeline/static_analysis/evaluator.cc index 99cb893104..5bad1634d5 100644 --- a/mindspore/ccsrc/pipeline/static_analysis/evaluator.cc +++ b/mindspore/ccsrc/pipeline/static_analysis/evaluator.cc @@ -63,6 +63,9 @@ AnalysisContextPtr BaseFuncGraphEvaluator::MakeContext(const AnalysisEnginePtr & } static std::vector FastShadowSort(const AnfNodePtr &ret_node) { + auto ori_func_graph = ret_node->func_graph(); + MS_EXCEPTION_IF_NULL(ori_func_graph); + std::vector sorted_nodes; std::unordered_set checked_cnodes; std::size_t index = 0; @@ -75,7 +78,8 @@ static std::vector FastShadowSort(const AnfNodePtr &ret_node) { auto &inputs = current->cast()->inputs(); for (auto it = inputs.begin(); it != inputs.end(); it++) { AnfNodePtr input = *it; - if (input != nullptr && input->isa() && checked_cnodes.find(input) == checked_cnodes.end()) { + if (input != nullptr && input->isa() && checked_cnodes.find(input) == checked_cnodes.end() && + input->func_graph() == ori_func_graph) { sorted_nodes.emplace_back(input); (void)checked_cnodes.insert(input); } From 5d4144de11c1a728e445a86c5e9f3db0f5f304fc Mon Sep 17 00:00:00 2001 From: gong chen Date: Tue, 14 Apr 2020 15:47:43 +0800 Subject: [PATCH 215/367] bugfix(side effect): fix adding wrong control depend between AllReduce and GetStatus. --- mindspore/model_zoo/Bert_NEZHA/bert_for_pre_training.py | 7 +++---- mindspore/nn/wrap/loss_scale.py | 7 +++---- 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/mindspore/model_zoo/Bert_NEZHA/bert_for_pre_training.py b/mindspore/model_zoo/Bert_NEZHA/bert_for_pre_training.py index bc51ba5d48..046b2adbe2 100644 --- a/mindspore/model_zoo/Bert_NEZHA/bert_for_pre_training.py +++ b/mindspore/model_zoo/Bert_NEZHA/bert_for_pre_training.py @@ -370,7 +370,7 @@ class BertTrainOneStepWithLossScaleCell(nn.Cell): self.parallel_mode = context.get_auto_parallel_context("parallel_mode") if self.parallel_mode in [ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL]: self.reducer_flag = True - self.grad_reducer = None + self.grad_reducer = F.identity if self.reducer_flag: mean = context.get_auto_parallel_context("mirror_mean") degree = get_group_size() @@ -428,9 +428,8 @@ class BertTrainOneStepWithLossScaleCell(nn.Cell): mstype.float32)) grads = self.hyper_map(F.partial(grad_scale, scaling_sens), grads) grads = self.clip_gradients(grads, GRADIENT_CLIP_TYPE, GRADIENT_CLIP_VALUE) - if self.reducer_flag: - # apply grad reducer on grads - grads = self.grad_reducer(grads) + # apply grad reducer on grads + grads = self.grad_reducer(grads) self.get_status(init) flag_sum = self.reduce_sum(init, (0,)) if self.is_distributed: diff --git a/mindspore/nn/wrap/loss_scale.py b/mindspore/nn/wrap/loss_scale.py index c6d61e6983..ba8e6cbb7c 100644 --- a/mindspore/nn/wrap/loss_scale.py +++ b/mindspore/nn/wrap/loss_scale.py @@ -220,7 +220,7 @@ class TrainOneStepWithLossScaleCell(Cell): self.depend_parameter_use = ControlDepend(depend_mode=1) self.allreduce = P.AllReduce() self.parallel_mode = _get_parallel_mode() - self.grad_reducer = None + self.grad_reducer = F.identity self.reducer_flag = self.parallel_mode in [ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL] if self.reducer_flag: mean = _get_mirror_mean() @@ -250,9 +250,8 @@ class TrainOneStepWithLossScaleCell(Cell): scaling_sens = sens grads = self.grad(self.network, weights)(data, label, F.cast(scaling_sens, F.dtype(loss))) grads = self.hyper_map(F.partial(_grad_scale, scaling_sens), grads) - if self.reducer_flag: - # apply grad reducer on grads - grads = self.grad_reducer(grads) + # apply grad reducer on grads + grads = self.grad_reducer(grads) # get the overflow buffer if not self.gpu_target: self.get_status(init) From e8ca9c12c2421695dda29b85b8e44a01763bc800 Mon Sep 17 00:00:00 2001 From: yanghaoran Date: Tue, 14 Apr 2020 15:48:07 +0800 Subject: [PATCH 216/367] add alternate path for stdclient library --- mindspore/ccsrc/CMakeLists.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mindspore/ccsrc/CMakeLists.txt b/mindspore/ccsrc/CMakeLists.txt index 3a04d9b3fb..4b500cc88a 100644 --- a/mindspore/ccsrc/CMakeLists.txt +++ b/mindspore/ccsrc/CMakeLists.txt @@ -301,6 +301,7 @@ if(ENABLE_D) set(ASCEND_PATH /usr/local/Ascend) endif() set(ASCEND_DRIVER_PATH ${ASCEND_PATH}/driver/lib64/common) + set(ASCEND_DRIVER_BACK_PATH ${ASCEND_PATH}/driver/lib64/driver) set(ASCEND_RUNTIME_PATH ${ASCEND_PATH}/fwkacllib/lib64) endif() @@ -308,7 +309,7 @@ if(ENABLE_D) find_library(HCCL hccl ${ASCEND_RUNTIME_PATH}) find_library(CCE_LIB cce ${ASCEND_RUNTIME_PATH}) find_library(RUNTIME_LIB runtime ${ASCEND_RUNTIME_PATH}) - find_library(TSDCLIENT tsdclient ${ASCEND_RUNTIME_PATH}) + find_library(TSDCLIENT tsdclient HINTS ${ASCEND_RUNTIME_PATH} ${ASCEND_DRIVER_BACK_PATH}) find_library(PROFILING msprof ${ASCEND_DRIVER_PATH}) target_link_libraries(mindspore ge_runtime ${CCE_LIB} ${RUNTIME_LIB} ${TSDCLIENT} ${PROFILING} ${HCCL} ${TSDCLIENT}) endif() From 61e959a9f33f3b1629ac7bb94ab78383428d1c48 Mon Sep 17 00:00:00 2001 From: gong chen Date: Tue, 14 Apr 2020 15:07:42 +0800 Subject: [PATCH 217/367] bugfix(side effect): fix cell object cann't free normally. --- mindspore/ccsrc/ir/func_graph.cc | 20 ++++++++++---------- mindspore/ccsrc/ir/func_graph.h | 2 +- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/mindspore/ccsrc/ir/func_graph.cc b/mindspore/ccsrc/ir/func_graph.cc index 7404db4af0..93fd9c0936 100644 --- a/mindspore/ccsrc/ir/func_graph.cc +++ b/mindspore/ccsrc/ir/func_graph.cc @@ -640,8 +640,8 @@ FuncGraphPtr FuncGraph::GenerateGraph(const AbstractBasePtrList& args_spec_list) void FuncGraph::add_parameter_obj_node(const AnfNodePtr& p) { paramter_obj_nodes_.push_back(p); } -std::list FuncGraph::GetOrderedCnodes(bool force_use_topo_sort) { - if (has_flag(GRAPH_FLAG_HAS_EFFECT) && !force_use_topo_sort) { +std::list FuncGraph::GetOrderedCnodes() { + if (has_flag(GRAPH_FLAG_HAS_EFFECT)) { MS_LOG(DEBUG) << "Return ordered cnodes."; return order_; } else { @@ -703,14 +703,14 @@ void FuncGraph::CheckOrder() { } } } - auto topo_sort = GetOrderedCnodes(true); - if (topo_sort.size() != order_.size()) { - DumpCNodeList(); - DumpIR(ToString(), shared_from_base()); - MS_LOG(INFO) << "Dump graph: " << ToString() << "."; - DumpFuncGraph(ToString()); - MS_LOG(EXCEPTION) << "CNode order size " << order_.size() << " is not equal to topo sort list size " - << topo_sort.size() << "."; + auto mng = manager_.lock(); + if (mng != nullptr) { + const auto& nodes = mng->nodes()[shared_from_base()]; + if (nodes.size() != (order_.size() + parameters_.size())) { + DumpCNodeList(); + MS_LOG(EXCEPTION) << "CNode order size " << order_.size() << " is not equal to managed node size " + << nodes.size() - parameters_.size() << "."; + } } MS_LOG(DEBUG) << "Check order okay."; } diff --git a/mindspore/ccsrc/ir/func_graph.h b/mindspore/ccsrc/ir/func_graph.h index 1d58c90755..9c3752cd81 100644 --- a/mindspore/ccsrc/ir/func_graph.h +++ b/mindspore/ccsrc/ir/func_graph.h @@ -258,7 +258,7 @@ class FuncGraph : public FuncGraphBase { std::map parameter_default_value_; std::unordered_map make_ref_params_; - std::list GetOrderedCnodes(bool force_use_topo_sort = false); + std::list GetOrderedCnodes(); void EraseUnusedNodeInOrder(const AnfNodePtr &n); void EraseUnusedNodeInOrder(); void CheckOrder(); From 8ba68220dddfdfb4977397b4f34a097637bf495d Mon Sep 17 00:00:00 2001 From: "wangnan39@huawei.com" Date: Tue, 14 Apr 2020 16:03:55 +0800 Subject: [PATCH 218/367] modify annotation of DepthwiseConv2d --- mindspore/ops/operations/nn_ops.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mindspore/ops/operations/nn_ops.py b/mindspore/ops/operations/nn_ops.py index e56a8a826c..377ef19417 100644 --- a/mindspore/ops/operations/nn_ops.py +++ b/mindspore/ops/operations/nn_ops.py @@ -679,7 +679,7 @@ class DepthwiseConv2dNative(PrimitiveWithInfer): Inputs: - **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`. - **weight** (Tensor) - Set size of kernel is :math:`(K_1, K_2)`, then the shape is - :math:`(channel_multiplier, C_{in}, K_1, K_2)`. + :math:`(channel_{multiplier}, C_{in}, K_1, K_2)`. Outputs: Tensor of shape :math:`(N, C_{in} * \text{channel_multiplier}, H_{out}, W_{out})`. From e53376092f1f9cffa52d5ed520748de0a29cee41 Mon Sep 17 00:00:00 2001 From: kswang Date: Tue, 14 Apr 2020 16:06:56 +0800 Subject: [PATCH 219/367] fix workspace reuse bug --- mindspore/ccsrc/device/kernel_runtime.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mindspore/ccsrc/device/kernel_runtime.cc b/mindspore/ccsrc/device/kernel_runtime.cc index e68ad22bbd..2322f8469f 100644 --- a/mindspore/ccsrc/device/kernel_runtime.cc +++ b/mindspore/ccsrc/device/kernel_runtime.cc @@ -474,7 +474,7 @@ void KernelRuntime::AssignWorkSpaceMem(int flag, const AnfNodePtr &node) { MS_EXCEPTION_IF_NULL(kernel_mod); size_t index = 0; for (auto &size : kernel_mod->GetWorkspaceSizeList()) { - auto ptr = mem_manager_->MallocWorkSpaceMem(node, flag, index, size); + auto ptr = mem_manager_->MallocWorkSpaceMem(node, index, flag, size); AnfAlgo::SetWorkspaceAddr(CreateDeviceAddress(ptr, size, "", kTypeUnknown), index, node.get()); index++; } From e1c8f248e0fc1d86b5877bb4e3ef9229fe2a1909 Mon Sep 17 00:00:00 2001 From: Wei Luning Date: Tue, 14 Apr 2020 13:18:33 +0800 Subject: [PATCH 220/367] Fix the output is not tuple, when eval --- mindspore/nn/wrap/cell_wrapper.py | 20 ++++++++---- mindspore/train/model.py | 13 +++----- tests/ut/python/train/test_amp.py | 52 +++++++++++++++++++++++++++++++ 3 files changed, 71 insertions(+), 14 deletions(-) diff --git a/mindspore/nn/wrap/cell_wrapper.py b/mindspore/nn/wrap/cell_wrapper.py index 53a535781d..64c382557a 100644 --- a/mindspore/nn/wrap/cell_wrapper.py +++ b/mindspore/nn/wrap/cell_wrapper.py @@ -14,15 +14,23 @@ # ============================================================================ """Cell_wrapper.""" import copy + import numpy as np + +from mindspore.parallel._utils import (_get_device_num, _get_mirror_mean, + _get_parallel_mode) from mindspore.train.parallel_utils import ParallelMode -from mindspore.parallel._utils import _get_device_num, _get_parallel_mode, _get_mirror_mean -from ...ops import composite as C, functional as F, operations as P -from ...common import Tensor, dtype as mstype -from ..cell import Cell + +from ...common import Tensor +from ...common import dtype as mstype from ...common.initializer import initializer from ...common.parameter import Parameter, ParameterTuple +from ...ops import composite as C +from ...ops import functional as F +from ...ops import operations as P +from ...ops.composite.base import _mp_cast_helper from ...ops.operations.comm_ops import _VirtualDataset +from ..cell import Cell from .grad_reducer import DistributedGradReducer @@ -310,8 +318,8 @@ class WithEvalCell(Cell): def construct(self, data, label): outputs = self._network(data) - loss = self._loss_fn(outputs, label) - + label = _mp_cast_helper(mstype.float32, label) + loss = self._loss_fn(F.cast(outputs, mstype.float32), label) return loss, outputs, label diff --git a/mindspore/train/model.py b/mindspore/train/model.py index 7604b8ac38..46e4f421f7 100755 --- a/mindspore/train/model.py +++ b/mindspore/train/model.py @@ -24,7 +24,7 @@ from .. import context from ..parallel._utils import _get_parallel_mode, _get_device_num, _get_global_rank, \ _get_parameter_broadcast, _device_number_check, _parameter_broadcast_check, _callback_wrapper from ..nn.metrics import Loss -from ..nn.wrap import WithLossCell, DataWrapper, WithEvalCell +from .. import nn from ..nn.wrap.cell_wrapper import _VirtualDatasetCell from .parallel_utils import ParallelMode from ..common import dtype as mstype @@ -130,7 +130,7 @@ class Model: self._loss_fn, level=self._amp_level) elif self._loss_fn: - network = WithLossCell(network, self._loss_fn) + network = nn.WithLossCell(network, self._loss_fn) # If need to check if loss_fn is not None, but optimizer is None return network @@ -150,10 +150,7 @@ class Model: else: if self._loss_fn is None: raise ValueError("loss_fn can not be None.") - if self._optimizer: - self._eval_network = self._train_network.network - else: - self._eval_network = WithEvalCell(self._network, self._loss_fn) + self._eval_network = nn.WithEvalCell(self._network, self._loss_fn) self._eval_indexes = [0, 1, 2] def _clear_metrics(self): @@ -263,7 +260,7 @@ class Model: dataset_helper = DatasetHelper(train_dataset) # remove later to deal with loop sink if need_wrap: - self._train_network = DataWrapper(self._train_network, *(dataset_helper.types_shapes()), + self._train_network = nn.DataWrapper(self._train_network, *(dataset_helper.types_shapes()), train_dataset.__ME_INITED__) cb_params.train_network = self._train_network self._train_network.set_train() @@ -429,7 +426,7 @@ class Model: # remove later to deal with loop sink if need_wrap: - self._eval_network = DataWrapper(self._eval_network, *(dataset_helper.types_shapes()), + self._eval_network = nn.DataWrapper(self._eval_network, *(dataset_helper.types_shapes()), valid_dataset.__ME_INITED__) self._eval_network.set_train(mode=False) self._eval_network.phase = 'eval' diff --git a/tests/ut/python/train/test_amp.py b/tests/ut/python/train/test_amp.py index 1a26c21775..2afb1e00b5 100644 --- a/tests/ut/python/train/test_amp.py +++ b/tests/ut/python/train/test_amp.py @@ -14,12 +14,15 @@ # ============================================================================ """ auto mixed precision """ import numpy as np +import pytest from mindspore import amp from mindspore import nn from mindspore import Tensor from mindspore.common import dtype as mstype import mindspore.context as context from mindspore.model_zoo.resnet import resnet50 +from mindspore.train import Model +from ....dataset_mock import MindData def setup_module(module): @@ -85,3 +88,52 @@ def test_amp_o0_loss(): optimizer = nn.Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) train_network = amp.build_train_network(net, optimizer, loss) output = train_network(inputs, label) + + +class MindDataSet(MindData): + def __init__(self, dataset_types, dataset_shapes): + super(MindDataSet, self).__init__(size=2, batch_size=32, + np_types=dataset_types, + output_shapes=dataset_shapes, + input_indexs=(0, 1)) + def __next__(self): + if self._size < self._iter_num: + raise StopIteration + self._iter_num += 1 + next = [] + for shape, type in zip(self._output_shapes, self._np_types): + next.append(Tensor(np.ones(shape).astype(type))) + return tuple(next) + + +def test_compile_model_train_O0(): + dataset_types = (np.float32, np.float32) + dataset_shapes = ((16, 16), (16, 16)) + + dataset = MindDataSet(dataset_types, dataset_shapes) + + net = NetNoLoss(16, 16) + loss = nn.MSELoss() + optimizer = nn.Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) + + model = Model(net, loss_fn=loss, optimizer=optimizer, metrics={"acc"}, amp_level="O0") + model.train(2, dataset, dataset_sink_mode=False) + with pytest.raises(ValueError): + # not actual run, the metrics step will fail, check if compile ok. + model.eval(dataset) + +def test_compile_model_train_O2(): + dataset_types = (np.float32, np.float32) + dataset_shapes = ((16, 16), (16, 16)) + + dataset = MindDataSet(dataset_types, dataset_shapes) + + net = NetNoLoss(16, 16) + loss = nn.MSELoss() + optimizer = nn.Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) + + model = Model(net, loss_fn=loss, optimizer=optimizer, metrics={"acc"}, amp_level="O2") + model.train(2, dataset, dataset_sink_mode=False) + with pytest.raises(ValueError): + # not actual run, the metrics step will fail, check if compile ok. + model.eval(dataset) From ea5da25d10444d4f86098b6216f97d56115364de Mon Sep 17 00:00:00 2001 From: jonyguo Date: Mon, 13 Apr 2020 22:07:19 +0800 Subject: [PATCH 221/367] fix: use exactly read option --- .../mindrecord/io/shard_index_generator.cc | 4 +++ mindspore/ccsrc/mindrecord/io/shard_reader.cc | 36 ++++++++----------- mindspore/ccsrc/mindrecord/io/shard_writer.cc | 25 +++++++++---- 3 files changed, 36 insertions(+), 29 deletions(-) diff --git a/mindspore/ccsrc/mindrecord/io/shard_index_generator.cc b/mindspore/ccsrc/mindrecord/io/shard_index_generator.cc index 254ddfbb16..5a5cd7cbf3 100644 --- a/mindspore/ccsrc/mindrecord/io/shard_index_generator.cc +++ b/mindspore/ccsrc/mindrecord/io/shard_index_generator.cc @@ -512,6 +512,10 @@ MSRStatus ShardIndexGenerator::ExecuteTransaction(const int &shard_no, const std std::fstream in; in.open(common::SafeCStr(shard_address), std::ios::in | std::ios::binary); + if (!in.good()) { + MS_LOG(ERROR) << "File could not opened"; + return FAILED; + } (void)sqlite3_exec(db.second, "BEGIN TRANSACTION;", nullptr, nullptr, nullptr); for (int raw_page_id : raw_page_ids) { auto sql = GenerateRawSQL(fields_); diff --git a/mindspore/ccsrc/mindrecord/io/shard_reader.cc b/mindspore/ccsrc/mindrecord/io/shard_reader.cc index 2413da3737..085f148a88 100644 --- a/mindspore/ccsrc/mindrecord/io/shard_reader.cc +++ b/mindspore/ccsrc/mindrecord/io/shard_reader.cc @@ -125,13 +125,10 @@ MSRStatus ShardReader::Open() { for (const auto &file : file_paths_) { std::shared_ptr fs = std::make_shared(); - fs->open(common::SafeCStr(file), std::ios::in | std::ios::out | std::ios::binary); - if (fs->fail()) { - fs->open(common::SafeCStr(file), std::ios::in | std::ios::out | std::ios::trunc | std::ios::binary); - if (fs->fail()) { - MS_LOG(ERROR) << "File could not opened"; - return FAILED; - } + fs->open(common::SafeCStr(file), std::ios::in | std::ios::binary); + if (!fs->good()) { + MS_LOG(ERROR) << "File could not opened"; + return FAILED; } MS_LOG(INFO) << "Open shard file successfully."; file_streams_.push_back(fs); @@ -146,13 +143,10 @@ MSRStatus ShardReader::Open(int n_consumer) { for (const auto &file : file_paths_) { for (int j = 0; j < n_consumer; ++j) { std::shared_ptr fs = std::make_shared(); - fs->open(common::SafeCStr(file), std::ios::in | std::ios::out | std::ios::binary); - if (fs->fail()) { - fs->open(common::SafeCStr(file), std::ios::in | std::ios::out | std::ios::trunc | std::ios::binary); - if (fs->fail()) { - MS_LOG(ERROR) << "File could not opened"; - return FAILED; - } + fs->open(common::SafeCStr(file), std::ios::in | std::ios::binary); + if (!fs->good()) { + MS_LOG(ERROR) << "File could not opened"; + return FAILED; } file_streams_random_[j].push_back(fs); } @@ -311,12 +305,10 @@ MSRStatus ShardReader::ReadAllRowsInShard(int shard_id, const std::string &sql, std::string file_name = file_paths_[shard_id]; std::shared_ptr fs = std::make_shared(); if (!all_in_index_) { - fs->open(common::SafeCStr(file_name), std::ios::in | std::ios::out | std::ios::binary); - if (fs->fail()) { - fs->open(common::SafeCStr(file_name), std::ios::in | std::ios::out | std::ios::trunc | std::ios::binary); - if (fs->fail()) { - MS_LOG(ERROR) << "File could not opened"; - } + fs->open(common::SafeCStr(file_name), std::ios::in | std::ios::binary); + if (!fs->good()) { + MS_LOG(ERROR) << "File could not opened"; + return FAILED; } } sqlite3_free(errmsg); @@ -520,8 +512,8 @@ std::pair> ShardReader::GetLabelsFromBinaryFile( std::string file_name = file_paths_[shard_id]; std::vector res; std::shared_ptr fs = std::make_shared(); - fs->open(common::SafeCStr(file_name), std::ios::in | std::ios::out | std::ios::binary); - if (fs->fail()) { + fs->open(common::SafeCStr(file_name), std::ios::in | std::ios::binary); + if (!fs->good()) { MS_LOG(ERROR) << "File could not opened"; return {FAILED, {}}; } diff --git a/mindspore/ccsrc/mindrecord/io/shard_writer.cc b/mindspore/ccsrc/mindrecord/io/shard_writer.cc index 54cf0e156b..3d4259ebbd 100644 --- a/mindspore/ccsrc/mindrecord/io/shard_writer.cc +++ b/mindspore/ccsrc/mindrecord/io/shard_writer.cc @@ -76,16 +76,27 @@ MSRStatus ShardWriter::Open(const std::vector &paths, bool append) // Open files for (const auto &file : file_paths_) { std::shared_ptr fs = std::make_shared(); - fs->open(common::SafeCStr(file), std::ios::in | std::ios::out | std::ios::binary); - if (fs->fail()) { - fs->open(common::SafeCStr(file), std::ios::in | std::ios::out | std::ios::trunc | std::ios::binary); - if (fs->fail()) { - MS_LOG(ERROR) << "File could not opened"; + if (!append) { + // if not append and mindrecord file exist, return FAILED + fs->open(common::SafeCStr(file), std::ios::in | std::ios::binary); + if (fs->good()) { + MS_LOG(ERROR) << "MindRecord file already existed."; + fs->close(); + return FAILED; + } + fs->close(); + + // open the mindrecord file to write + fs->open(common::SafeCStr(file), std::ios::out | std::ios::binary); + if (!fs->good()) { + MS_LOG(ERROR) << "MindRecord file could not opened."; return FAILED; } } else { - if (!append) { - MS_LOG(ERROR) << "MindRecord file already existed"; + // open the mindrecord file to append + fs->open(common::SafeCStr(file), std::ios::out | std::ios::in | std::ios::binary); + if (!fs->good()) { + MS_LOG(ERROR) << "MindRecord file could not opened for append."; return FAILED; } } From e5c67b9088cc3023c888c8d71f04a58f817906b7 Mon Sep 17 00:00:00 2001 From: YuJianfeng Date: Mon, 13 Apr 2020 16:53:20 +0800 Subject: [PATCH 222/367] Add cnode to equal map when opt matching --- .../ascend/ir_fusion/adam_apply_one_fusion.cc | 51 +++--------- .../ascend/ir_fusion/adam_apply_one_fusion.h | 5 ++ .../adam_apply_one_with_decay_rule.cc | 54 ++++--------- .../adam_apply_one_with_decay_rule.h | 5 ++ .../ascend/ir_fusion/lamb_next_right_rule.cc | 35 ++------ .../ascend/ir_fusion/lamb_next_right_rule.h | 6 +- .../ccsrc/pre_activate/common/optimizer.cc | 30 ++++--- .../ccsrc/pre_activate/common/optimizer.h | 2 + .../pre_activate/common/pattern_engine.cc | 79 +++++++++++++------ .../pre_activate/common/pattern_engine.h | 71 ++++++++++------- .../common/pattern_engine_test.cc | 30 +++---- 11 files changed, 182 insertions(+), 186 deletions(-) diff --git a/mindspore/ccsrc/pre_activate/ascend/ir_fusion/adam_apply_one_fusion.cc b/mindspore/ccsrc/pre_activate/ascend/ir_fusion/adam_apply_one_fusion.cc index 1ecf4bbd06..3f905fedf9 100644 --- a/mindspore/ccsrc/pre_activate/ascend/ir_fusion/adam_apply_one_fusion.cc +++ b/mindspore/ccsrc/pre_activate/ascend/ir_fusion/adam_apply_one_fusion.cc @@ -15,43 +15,9 @@ */ #include "pre_activate/ascend/ir_fusion/adam_apply_one_fusion.h" #include "pre_activate/common/helper.h" -#include "utils/utils.h" namespace mindspore { namespace opt { -namespace { -void GetAdd0AndAdd1(const AnfNodePtr &sub0, AnfNodePtr *add0, AnfNodePtr *add1) { - MS_EXCEPTION_IF_NULL(sub0); - MS_EXCEPTION_IF_NULL(add0); - MS_EXCEPTION_IF_NULL(add1); - auto sub0_cnode = sub0->cast(); - MS_EXCEPTION_IF_NULL(sub0_cnode); - CheckCNodeInputSize(sub0_cnode, kSubInputNum); - AnfNodePtr mul4 = sub0_cnode->input(2); - MS_EXCEPTION_IF_NULL(mul4); - auto mul4_cnode = mul4->cast(); - MS_EXCEPTION_IF_NULL(mul4_cnode); - CheckCNodeInputSize(mul4_cnode, kMulInputNum); - AnfNodePtr true_div0 = mul4_cnode->input(2); - MS_EXCEPTION_IF_NULL(true_div0); - auto true_div0_cnode = true_div0->cast(); - MS_EXCEPTION_IF_NULL(true_div0_cnode); - CheckCNodeInputSize(true_div0_cnode, kRealDivInputNum); - *add0 = true_div0_cnode->input(1); - AnfNodePtr add2 = true_div0_cnode->input(2); - MS_EXCEPTION_IF_NULL(add2); - auto add2_cnode = add2->cast(); - MS_EXCEPTION_IF_NULL(add2_cnode); - CheckCNodeInputSize(add2_cnode, kAddInputNum); - AnfNodePtr sqrt0 = add2_cnode->input(1); - MS_EXCEPTION_IF_NULL(sqrt0); - auto sqrt0_cnode = sqrt0->cast(); - MS_EXCEPTION_IF_NULL(sqrt0_cnode); - CheckCNodeInputSize(sqrt0_cnode, kSqrtInputNum); - *add1 = sqrt0_cnode->input(1); -} -} // namespace - AnfNodePtr AdamApplyOneFusion::CreateAdamApplyOneNode(const FuncGraphPtr &func_graph, const EquivPtr &equiv) const { MS_EXCEPTION_IF_NULL(func_graph); MS_EXCEPTION_IF_NULL(equiv); @@ -79,10 +45,10 @@ const BaseRef AdamApplyOneFusion::DefinePattern() const { const auto prim_deal_div = std::make_shared(kRealDivOpName); VectorRef mul2 = VectorRef({prim::kPrimMul, mul_x_input_vars_[2], input_vars_[1]}); VectorRef mul3 = VectorRef({prim::kPrimMul, mul_x_input_vars_[3], VectorRef({prim::kPrimSquare, input_vars_[0]})}); - VectorRef sqrt0 = VectorRef({prim_sqrt, VectorRef({prim::kPrimTensorAdd, mul2, mul3})}); + VectorRef sqrt0 = VectorRef({prim_sqrt, VectorRef({add1_var_, mul2, mul3})}); VectorRef mul1 = VectorRef({prim::kPrimMul, mul_x_input_vars_[1], input_vars_[0]}); VectorRef mul0 = VectorRef({prim::kPrimMul, mul_x_input_vars_[0], input_vars_[2]}); - VectorRef add0 = VectorRef({prim::kPrimTensorAdd, mul0, mul1}); + VectorRef add0 = VectorRef({add0_var_, mul0, mul1}); VectorRef true_div0 = VectorRef({prim_deal_div, add0, VectorRef({prim::kPrimTensorAdd, sqrt0, add2_y_})}); return VectorRef({prim::kPrimSub, input_vars_[3], VectorRef({prim::kPrimMul, input_vars_[4], true_div0})}); } @@ -96,10 +62,17 @@ const AnfNodePtr AdamApplyOneFusion::Process(const FuncGraphPtr &func_graph, con new_node->set_scope(node->scope()); // Set abstract of new node AbstractBasePtrList new_node_abstract_list; - AnfNodePtr add0 = nullptr; - AnfNodePtr add1 = nullptr; - GetAdd0AndAdd1(node, &add0, &add1); + auto iter_add0 = (*equiv).find(add0_var_); + if (iter_add0 == (*equiv).end()) { + MS_LOG(EXCEPTION) << "The equiv map is expected to contains the add0 var after matched."; + } + auto iter_add1 = (*equiv).find(add1_var_); + if (iter_add1 == (*equiv).end()) { + MS_LOG(EXCEPTION) << "The equiv map is expected to contains the add1 var after matched."; + } + auto add0 = utils::cast(iter_add0->second); MS_EXCEPTION_IF_NULL(add0); + auto add1 = utils::cast(iter_add1->second); MS_EXCEPTION_IF_NULL(add1); new_node_abstract_list.push_back(add1->abstract()); new_node_abstract_list.push_back(add0->abstract()); diff --git a/mindspore/ccsrc/pre_activate/ascend/ir_fusion/adam_apply_one_fusion.h b/mindspore/ccsrc/pre_activate/ascend/ir_fusion/adam_apply_one_fusion.h index 6642561b07..77f6641463 100644 --- a/mindspore/ccsrc/pre_activate/ascend/ir_fusion/adam_apply_one_fusion.h +++ b/mindspore/ccsrc/pre_activate/ascend/ir_fusion/adam_apply_one_fusion.h @@ -19,6 +19,7 @@ #include #include #include "pre_activate/common/optimizer.h" +#include "utils/utils.h" namespace mindspore { namespace opt { @@ -35,6 +36,8 @@ class AdamApplyOneFusion : public PatternProcessPass { mul_x_input_vars_.push_back(std::make_shared()); } add2_y_ = std::make_shared(); + add0_var_ = std::make_shared(std::make_shared(prim::kPrimTensorAdd->name())); + add1_var_ = std::make_shared(std::make_shared(prim::kPrimTensorAdd->name())); } ~AdamApplyOneFusion() override = default; @@ -46,6 +49,8 @@ class AdamApplyOneFusion : public PatternProcessPass { std::vector input_vars_; std::vector mul_x_input_vars_; VarPtr add2_y_; + VarPtr add0_var_; + VarPtr add1_var_; }; } // namespace opt } // namespace mindspore diff --git a/mindspore/ccsrc/pre_activate/ascend/ir_fusion/adam_apply_one_with_decay_rule.cc b/mindspore/ccsrc/pre_activate/ascend/ir_fusion/adam_apply_one_with_decay_rule.cc index 442aa64217..4a2387d3cc 100644 --- a/mindspore/ccsrc/pre_activate/ascend/ir_fusion/adam_apply_one_with_decay_rule.cc +++ b/mindspore/ccsrc/pre_activate/ascend/ir_fusion/adam_apply_one_with_decay_rule.cc @@ -17,48 +17,13 @@ #include #include -#include #include "session/anf_runtime_algorithm.h" #include "ir/primitive.h" -#include "utils/utils.h" #include "pre_activate/common/helper.h" namespace mindspore { namespace opt { -namespace { -std::tuple GetAdd0Add1Node(const AnfNodePtr &node) { - MS_EXCEPTION_IF_NULL(node); - auto sub0 = node->cast(); - MS_EXCEPTION_IF_NULL(sub0); - auto mul5_anf = sub0->input(2); - MS_EXCEPTION_IF_NULL(mul5_anf); - auto mul5 = mul5_anf->cast(); - MS_EXCEPTION_IF_NULL(mul5); - auto add3_anf = mul5->input(2); - MS_EXCEPTION_IF_NULL(add3_anf); - auto add3 = add3_anf->cast(); - MS_EXCEPTION_IF_NULL(add3); - auto real_div0_anf = add3->input(1); - MS_EXCEPTION_IF_NULL(real_div0_anf); - auto real_div0 = real_div0_anf->cast(); - MS_EXCEPTION_IF_NULL(real_div0); - auto add0_anf = real_div0->input(1); - MS_EXCEPTION_IF_NULL(add0_anf); - auto add2_anf = real_div0->input(2); - MS_EXCEPTION_IF_NULL(add2_anf); - auto add2 = add2_anf->cast(); - MS_EXCEPTION_IF_NULL(add2); - auto sqrt0_anf = add2->input(1); - MS_EXCEPTION_IF_NULL(sqrt0_anf); - auto sqrt0 = sqrt0_anf->cast(); - MS_EXCEPTION_IF_NULL(sqrt0); - auto add1_anf = sqrt0->input(1); - MS_EXCEPTION_IF_NULL(add1_anf); - return std::make_tuple(add0_anf, add1_anf); -} -} // namespace - std::vector AdamApplyOneWithDecayRule::GetFusionNodeInputs(const EquivPtr &equiv) const { MS_EXCEPTION_IF_NULL(equiv); auto input0 = utils::cast((*equiv)[input0_]); @@ -82,10 +47,10 @@ const BaseRef AdamApplyOneWithDecayRule::DefinePattern() const { VectorRef mul0_pattern({prim::kPrimMul, mul0_x_, input2_}); VectorRef mul1_pattern({prim::kPrimMul, mul1_x_, input0_}); VectorRef square0_pattern({prim::kPrimSquare, input0_}); - VectorRef add0_pattern({prim::kPrimTensorAdd, mul0_pattern, mul1_pattern}); + VectorRef add0_pattern({add0_var_, mul0_pattern, mul1_pattern}); VectorRef mul2_pattern({prim::kPrimMul, mul2_x_, input1_}); VectorRef mul3_pattern({prim::kPrimMul, mul3_x_, square0_pattern}); - VectorRef add1_pattern({prim::kPrimTensorAdd, mul2_pattern, mul3_pattern}); + VectorRef add1_pattern({add1_var_, mul2_pattern, mul3_pattern}); VectorRef sqrt0_pattern({sqrt, add1_pattern}); VectorRef add2_pattern({prim::kPrimTensorAdd, sqrt0_pattern, add2_y_}); VectorRef mul4_pattern({prim::kPrimMul, mul4_x_, input3_}); @@ -107,9 +72,18 @@ const AnfNodePtr AdamApplyOneWithDecayRule::Process(const FuncGraphPtr &graph, c MS_EXCEPTION_IF_NULL(fusion_node); fusion_node->set_scope(node->scope()); - AnfNodePtr add0 = nullptr; - AnfNodePtr add1 = nullptr; - std::tie(add0, add1) = GetAdd0Add1Node(node); + auto iter_add0 = (*equiv).find(add0_var_); + if (iter_add0 == (*equiv).end()) { + MS_LOG(EXCEPTION) << "The equiv map is expected to contains the add0 var after matched."; + } + auto iter_add1 = (*equiv).find(add1_var_); + if (iter_add1 == (*equiv).end()) { + MS_LOG(EXCEPTION) << "The equiv map is expected to contains the add1 var after matched."; + } + auto add0 = utils::cast(iter_add0->second); + MS_EXCEPTION_IF_NULL(add0); + auto add1 = utils::cast(iter_add1->second); + MS_EXCEPTION_IF_NULL(add1); auto types = {AnfAlgo::GetOutputInferDataType(add1, 0), AnfAlgo::GetOutputInferDataType(add0, 0), AnfAlgo::GetOutputInferDataType(node, 0)}; auto shapes = {AnfAlgo::GetOutputInferShape(add1, 0), AnfAlgo::GetOutputInferShape(add0, 0), diff --git a/mindspore/ccsrc/pre_activate/ascend/ir_fusion/adam_apply_one_with_decay_rule.h b/mindspore/ccsrc/pre_activate/ascend/ir_fusion/adam_apply_one_with_decay_rule.h index a6bab48770..72c54f3535 100644 --- a/mindspore/ccsrc/pre_activate/ascend/ir_fusion/adam_apply_one_with_decay_rule.h +++ b/mindspore/ccsrc/pre_activate/ascend/ir_fusion/adam_apply_one_with_decay_rule.h @@ -19,6 +19,7 @@ #include #include #include "pre_activate/common/optimizer.h" +#include "utils/utils.h" namespace mindspore { namespace opt { class AdamApplyOneWithDecayRule : public PatternProcessPass { @@ -36,6 +37,8 @@ class AdamApplyOneWithDecayRule : public PatternProcessPass { mul3_x_ = std::make_shared(); mul4_x_ = std::make_shared(); add2_y_ = std::make_shared(); + add0_var_ = std::make_shared(std::make_shared(prim::kPrimTensorAdd->name())); + add1_var_ = std::make_shared(std::make_shared(prim::kPrimTensorAdd->name())); } ~AdamApplyOneWithDecayRule() override = default; const BaseRef DefinePattern() const override; @@ -54,6 +57,8 @@ class AdamApplyOneWithDecayRule : public PatternProcessPass { VarPtr mul3_x_; VarPtr mul4_x_; VarPtr add2_y_; + VarPtr add0_var_; + VarPtr add1_var_; }; } // namespace opt } // namespace mindspore diff --git a/mindspore/ccsrc/pre_activate/ascend/ir_fusion/lamb_next_right_rule.cc b/mindspore/ccsrc/pre_activate/ascend/ir_fusion/lamb_next_right_rule.cc index ca9c90f4e5..68baeeed99 100644 --- a/mindspore/ccsrc/pre_activate/ascend/ir_fusion/lamb_next_right_rule.cc +++ b/mindspore/ccsrc/pre_activate/ascend/ir_fusion/lamb_next_right_rule.cc @@ -16,36 +16,9 @@ #include "pre_activate/ascend/ir_fusion/lamb_next_right_rule.h" #include #include "pre_activate/common/helper.h" -#include "utils/utils.h" namespace mindspore { namespace opt { -namespace { -AnfNodePtr GetAdd1Node(const AnfNodePtr &node) { - MS_EXCEPTION_IF_NULL(node); - auto add2_cnode = node->cast(); - MS_EXCEPTION_IF_NULL(add2_cnode); - if (add2_cnode->inputs().size() != kAddInputNum) { - MS_LOG(ERROR) << "The input size of Add2 is not equal to " << kAddInputNum; - } - AnfNodePtr sqrt0 = add2_cnode->input(1); - MS_EXCEPTION_IF_NULL(sqrt0); - auto sqrt0_cnode = sqrt0->cast(); - MS_EXCEPTION_IF_NULL(sqrt0_cnode); - if (sqrt0_cnode->inputs().size() != kSqrtInputNum) { - MS_LOG(ERROR) << "The input size of Sqrt0 is not equal to " << kSqrtInputNum; - } - AnfNodePtr real_div1 = sqrt0_cnode->input(1); - MS_EXCEPTION_IF_NULL(real_div1); - auto real_div1_cnode = real_div1->cast(); - MS_EXCEPTION_IF_NULL(real_div1_cnode); - if (real_div1_cnode->inputs().size() != kMulInputNum) { - MS_LOG(ERROR) << "The input size of RealDiv1 is not equal to " << kMulInputNum; - } - return real_div1_cnode->input(1); -} -} // namespace - AnfNodePtr LambNextRightRule::CreateLambNextRightNode(const FuncGraphPtr &func_graph, const EquivPtr &equiv) const { MS_EXCEPTION_IF_NULL(func_graph); MS_EXCEPTION_IF_NULL(equiv); @@ -79,7 +52,7 @@ const BaseRef LambNextRightRule::DefinePattern() const { const auto prim_sqrt = std::make_shared(kSqrtOpName); MS_EXCEPTION_IF_NULL(prim_sqrt); VectorRef mul3 = VectorRef({prim::kPrimMul, mul3_x_, VectorRef({prim::kPrimSquare, input0_})}); - VectorRef add1 = VectorRef({prim::kPrimTensorAdd, VectorRef({prim::kPrimMul, mul2_x_, input1_}), mul3}); + VectorRef add1 = VectorRef({add1_var_, VectorRef({prim::kPrimMul, mul2_x_, input1_}), mul3}); return VectorRef( {prim::kPrimTensorAdd, VectorRef({prim_sqrt, VectorRef({prim::kPrimMul, add1, true_div1_recip_})}), add2_y_}); } @@ -91,7 +64,11 @@ const AnfNodePtr LambNextRightRule::Process(const FuncGraphPtr &func_graph, cons auto new_node = CreateLambNextRightNode(func_graph, equiv); MS_EXCEPTION_IF_NULL(new_node); // Set abstract of new node - AnfNodePtr add1 = GetAdd1Node(node); + auto iter_add1 = (*equiv).find(add1_var_); + if (iter_add1 == (*equiv).end()) { + MS_LOG(EXCEPTION) << "The equiv map is expected to contains the add1 var after matched."; + } + auto add1 = utils::cast(iter_add1->second); MS_EXCEPTION_IF_NULL(add1); AbstractBasePtrList new_node_abstract_list; new_node_abstract_list.push_back(add1->abstract()); diff --git a/mindspore/ccsrc/pre_activate/ascend/ir_fusion/lamb_next_right_rule.h b/mindspore/ccsrc/pre_activate/ascend/ir_fusion/lamb_next_right_rule.h index f78be7460b..3d15001da2 100644 --- a/mindspore/ccsrc/pre_activate/ascend/ir_fusion/lamb_next_right_rule.h +++ b/mindspore/ccsrc/pre_activate/ascend/ir_fusion/lamb_next_right_rule.h @@ -18,6 +18,8 @@ #include #include "pre_activate/common/optimizer.h" +#include "utils/utils.h" + namespace mindspore { namespace opt { class LambNextRightRule : public PatternProcessPass { @@ -29,7 +31,8 @@ class LambNextRightRule : public PatternProcessPass { mul2_x_(std::make_shared()), mul3_x_(std::make_shared()), true_div1_recip_(std::make_shared()), - add2_y_(std::make_shared()) {} + add2_y_(std::make_shared()), + add1_var_(std::make_shared(std::make_shared(prim::kPrimTensorAdd->name()))) {} ~LambNextRightRule() override = default; const BaseRef DefinePattern() const override; @@ -44,6 +47,7 @@ class LambNextRightRule : public PatternProcessPass { VarPtr mul3_x_; VarPtr true_div1_recip_; VarPtr add2_y_; + VarPtr add1_var_; }; } // namespace opt } // namespace mindspore diff --git a/mindspore/ccsrc/pre_activate/common/optimizer.cc b/mindspore/ccsrc/pre_activate/common/optimizer.cc index 62cff76be0..0e74da3fe8 100644 --- a/mindspore/ccsrc/pre_activate/common/optimizer.cc +++ b/mindspore/ccsrc/pre_activate/common/optimizer.cc @@ -30,7 +30,8 @@ namespace mindspore { namespace opt { namespace { -AnfNodePtr HandleSexpVector(const BaseRef &sexp, const BaseRef &graph, bool multigraph); +AnfNodePtr HandleSexpVector(const BaseRef &sexp, const BaseRef &graph, PrimitiveVarMap *primitive_vars, + bool multigraph); ValueNodePtr CreateValueNodeWithSexp(const BaseRef &sexp) { if (utils::isa(sexp)) { @@ -71,12 +72,20 @@ VarNodePtr CreateVarNodeWithSexp(const BaseRef &sexp, const BaseRef &graph) { return nullptr; } -AnfNodePtr SexpToNode(const BaseRef &sexp, const BaseRef &graph, bool multigraph = false) { +AnfNodePtr SexpToNode(const BaseRef &sexp, const BaseRef &graph, PrimitiveVarMap *primitive_vars, + bool multigraph = false) { MS_LOG(DEBUG) << "SexpToNode sexp: " + sexp.ToString() + ", graph " + graph.ToString(); + MS_EXCEPTION_IF_NULL(primitive_vars); if (utils::isa(sexp)) { - return HandleSexpVector(sexp, graph, multigraph); + return HandleSexpVector(sexp, graph, primitive_vars, multigraph); } if (utils::isa(sexp)) { + auto var_ptr = utils::cast(sexp); + MS_EXCEPTION_IF_NULL(var_ptr); + if (var_ptr->primitive()) { + (*primitive_vars)[var_ptr->primitive()] = var_ptr; + return NewValueNode(var_ptr->primitive()); + } return CreateVarNodeWithSexp(sexp, graph); } if (utils::isa(sexp)) { @@ -89,13 +98,14 @@ AnfNodePtr SexpToNode(const BaseRef &sexp, const BaseRef &graph, bool multigraph return value_node; } -AnfNodePtr HandleSexpVector(const BaseRef &sexp, const BaseRef &graph, bool multigraph) { +AnfNodePtr HandleSexpVector(const BaseRef &sexp, const BaseRef &graph, PrimitiveVarMap *primitive_vars, + bool multigraph) { MS_LOG(DEBUG) << "HandleSexpVector sexp: " + sexp.ToString() + ", graph " + graph.ToString(); std::vector input_nodes; const auto &tuple = utils::cast(sexp); if (multigraph && utils::isa(graph)) { for (auto &x : tuple) { - AnfNodePtr node = SexpToNode(x, std::make_shared("G"), true); + AnfNodePtr node = SexpToNode(x, std::make_shared("G"), primitive_vars, true); input_nodes.push_back(node); } VarPtr var_ptr = utils::cast(graph); @@ -103,7 +113,7 @@ AnfNodePtr HandleSexpVector(const BaseRef &sexp, const BaseRef &graph, bool mult } for (auto &x : tuple) { - AnfNodePtr node = SexpToNode(x, graph, multigraph); + AnfNodePtr node = SexpToNode(x, graph, primitive_vars, multigraph); input_nodes.push_back(node); } return CreateCNodeWithGraph(input_nodes, graph); @@ -166,7 +176,8 @@ PatternProcessPass::PatternProcessPass(const std::string &name, bool multigraph) multigraph_(multigraph), pattern_engine_(PatternEngine(std::make_shared(), std::function(AnfEqual), - std::function(CNodeTypeEqual))) {} + std::function(CNodeTypeEqual))), + primitive_vars_(std::make_shared()) {} const BaseRef PatternProcessPass::DefinePattern() const { VarPtr X = std::make_shared(); @@ -176,7 +187,7 @@ const BaseRef PatternProcessPass::DefinePattern() const { void PatternProcessPass::Build() { VarPtr fg = std::make_shared("RootG"); BaseRef pattern = std::move(DefinePattern()); - pattern_ = SexpToNode(pattern, fg, multigraph_); + pattern_ = SexpToNode(pattern, fg, primitive_vars_.get(), multigraph_); } AnfNodePtr PatternProcessPass::Run(const FuncGraphPtr &func_graph, const AnfNodePtr &node) { @@ -185,7 +196,8 @@ AnfNodePtr PatternProcessPass::Run(const FuncGraphPtr &func_graph, const AnfNode } auto empty_equiv = std::make_shared(); - EquivPtr equiv = pattern_engine_.Match(pattern_, node, empty_equiv); + MS_EXCEPTION_IF_NULL(primitive_vars_); + EquivPtr equiv = pattern_engine_.Match(pattern_, node, *primitive_vars_, empty_equiv); if (equiv != nullptr && !equiv->empty()) { return Process(func_graph, node, equiv); } diff --git a/mindspore/ccsrc/pre_activate/common/optimizer.h b/mindspore/ccsrc/pre_activate/common/optimizer.h index 8ef0b6dc34..eade7f7789 100644 --- a/mindspore/ccsrc/pre_activate/common/optimizer.h +++ b/mindspore/ccsrc/pre_activate/common/optimizer.h @@ -19,6 +19,7 @@ #include #include #include +#include #include "ir/anf.h" #include "ir/func_graph.h" @@ -46,6 +47,7 @@ class PatternProcessPass : public NodePass { AnfNodePtr pattern_ = nullptr; bool multigraph_ = true; PatternEngine pattern_engine_; + PrimitiveVarMapPtr primitive_vars_; }; class GraphOptimizer { diff --git a/mindspore/ccsrc/pre_activate/common/pattern_engine.cc b/mindspore/ccsrc/pre_activate/common/pattern_engine.cc index e2ff321a89..350332b9d1 100644 --- a/mindspore/ccsrc/pre_activate/common/pattern_engine.cc +++ b/mindspore/ccsrc/pre_activate/common/pattern_engine.cc @@ -42,7 +42,7 @@ void Var::EnsureTag() { } } -bool operator==(const VarPtr& lhs, const VarPtr& rhs) { +bool operator==(const VarPtr &lhs, const VarPtr &rhs) { if (lhs->isa() && rhs->isa()) { CondVarPtr v1 = dyn_cast(lhs); CondVarPtr v2 = dyn_cast(rhs); @@ -63,7 +63,7 @@ std::string SeqVar::ToString() const { return buffer.str(); } -std::ostream& operator<<(std::ostream& os, const VarPtr& var) { +std::ostream &operator<<(std::ostream &os, const VarPtr &var) { if (var == nullptr) { os << ""; } else { @@ -73,10 +73,10 @@ std::ostream& operator<<(std::ostream& os, const VarPtr& var) { } template <> -std::ostream& operator<<(std::ostream& os, const Equiv& equiv) { +std::ostream &operator<<(std::ostream &os, const Equiv &equiv) { os << "[Equiv]" << "\n"; - for (auto& equiv_item : equiv) { + for (auto &equiv_item : equiv) { auto k = equiv_item.first; os << k << ":"; BaseRef x = equiv_item.second; @@ -104,7 +104,7 @@ std::ostream& operator<<(std::ostream& os, const Equiv& equiv) return os; } -static BaseRef GetVar(const BaseRef& x) { +static BaseRef GetVar(const BaseRef &x) { MS_LOG(DEBUG) << "getVar start :%s" + x.ToString(); if (utils::isa(x)) { auto node = utils::cast(x); @@ -129,7 +129,7 @@ static BaseRef GetVar(const BaseRef& x) { return x; } -EquivPtr MatchOnVar(const BaseRef& pattern, const BaseRef& expr, EquivPtr equiv) { +EquivPtr MatchOnVar(const BaseRef &pattern, const BaseRef &expr, EquivPtr equiv) { MS_LOG(DEBUG) << "MatchOnVar pattern " + pattern.ToString() + " expr: " + expr.ToString(); MS_EXCEPTION_IF_NULL(equiv); if (utils::isa(pattern)) { @@ -144,8 +144,8 @@ EquivPtr MatchOnVar(const BaseRef& pattern, const BaseRef& expr, EquivPtr equiv) return nullptr; } -bool PatternEngine::ToVector(const VectorRef& pattern_ref, const VectorRef& expr_ref, VectorRef* const values_pattern, - VectorRef* const values_expr) const { +bool PatternEngine::ToVector(const VectorRef &pattern_ref, const VectorRef &expr_ref, VectorRef *const values_pattern, + VectorRef *const values_expr) const { MS_EXCEPTION_IF_NULL(values_expr); if (utils::isa(pattern_ref)) { *values_pattern = pattern_ref; @@ -155,12 +155,12 @@ bool PatternEngine::ToVector(const VectorRef& pattern_ref, const VectorRef& expr return false; } -bool PatternEngine::ToVector(const BaseRef& pattern_ref, const BaseRef& expr_ref, VectorRef* const values_pattern, - VectorRef* const values_expr) const { +bool PatternEngine::ToVector(const BaseRef &pattern_ref, const BaseRef &expr_ref, VectorRef *const values_pattern, + VectorRef *const values_expr) const { MS_EXCEPTION_IF_NULL(values_expr); // visitor to visite the list - auto appender_pattern = [](VectorRef& values) { - std::function fn = [&](const BaseRef& u) { + auto appender_pattern = [](VectorRef &values) { + std::function fn = [&](const BaseRef &u) { values.push_back(GetVar(u)); return u; }; @@ -174,8 +174,8 @@ bool PatternEngine::ToVector(const BaseRef& pattern_ref, const BaseRef& expr_ref return false; } - auto appender_expr = [](VectorRef& values) { - std::function fn = [&](const BaseRef& u) { + auto appender_expr = [](VectorRef &values) { + std::function fn = [&](const BaseRef &u) { values.push_back(u); return u; }; @@ -187,10 +187,10 @@ bool PatternEngine::ToVector(const BaseRef& pattern_ref, const BaseRef& expr_ref return visitor_->Visit(expr_ref, nullptr); } -static int GetSVarStartIndex(const VectorRef& values) { +static int GetSVarStartIndex(const VectorRef &values) { int index = -1; int count = 0; - for (auto& value : values) { + for (auto &value : values) { if (utils::isa(value) && utils::cast(value)->isa()) { if (index != -1) { MS_LOG(DEBUG) << "Multiple SVars in sequence"; @@ -203,7 +203,35 @@ static int GetSVarStartIndex(const VectorRef& values) { return index; } -EquivPtr PatternEngine::AlignSVar(const VectorRef& values_pattern, const VectorRef& values_expr, EquivPtr equiv) const { +void UpdateEquivMap(const VectorRef &values_pattern, const BaseRef &expr_ref, const PrimitiveVarMap &primitive_vars, + EquivPtr equiv) { + if (equiv == nullptr || values_pattern.empty() || !utils::isa(values_pattern[0]) || + !utils::isa(expr_ref)) { + return; + } + auto real_node = utils::cast(expr_ref); + MS_EXCEPTION_IF_NULL(real_node); + if (!real_node->isa()) { + return; + } + auto prim_node = utils::cast(values_pattern[0]); + MS_EXCEPTION_IF_NULL(prim_node); + if (!IsValueNode(prim_node)) { + return; + } + ValuePtr value = GetValueNode(prim_node); + MS_EXCEPTION_IF_NULL(value); + auto prim = value->cast(); + MS_EXCEPTION_IF_NULL(prim); + auto iter = primitive_vars.find(prim); + if (iter == primitive_vars.end()) { + return; + } + (*equiv)[iter->second] = real_node; +} + +EquivPtr PatternEngine::AlignSVar(const VectorRef &values_pattern, const VectorRef &values_expr, + const PrimitiveVarMap &primitive_vars, EquivPtr equiv) const { int svar_index = GetSVarStartIndex(values_pattern); if (svar_index == kInvalidVarIndex) { return nullptr; @@ -229,12 +257,12 @@ EquivPtr PatternEngine::AlignSVar(const VectorRef& values_pattern, const VectorR if (svar_index != -1 && i == IntToSize(svar_index)) { auto seq = std::vector(values_expr.begin() + svar_index, values_expr.begin() + svar_index + SizeToInt(diff)); - equiv = Match(values_pattern[svar_index], seq, equiv); + equiv = Match(values_pattern[svar_index], seq, primitive_vars, equiv); } else { if (svar_index != -1 && i > IntToSize(svar_index)) { expr_i = i + diff - 1; } - equiv = Match(values_pattern[i], values_expr[expr_i], equiv); + equiv = Match(values_pattern[i], values_expr[expr_i], primitive_vars, equiv); } if (equiv == nullptr) { return nullptr; @@ -243,7 +271,8 @@ EquivPtr PatternEngine::AlignSVar(const VectorRef& values_pattern, const VectorR return equiv; } -EquivPtr PatternEngine::Match(const BaseRef& pattern, const BaseRef& expr, EquivPtr equiv) const { +EquivPtr PatternEngine::Match(const BaseRef &pattern, const BaseRef &expr, const PrimitiveVarMap &primitive_vars, + EquivPtr equiv) const { MS_LOG(DEBUG) << "-----[in Match]"; MS_LOG(DEBUG) << "GetVar w"; BaseRef pattern_ref = GetVar(pattern); @@ -292,10 +321,12 @@ EquivPtr PatternEngine::Match(const BaseRef& pattern, const BaseRef& expr, Equiv // 6. if any svar in both side, find the SeqVar index, // try to pack the Var s in std::vector to a Seq and match elements one by one. // check svar - return AlignSVar(values_pattern, values_expr, equiv); + equiv = AlignSVar(values_pattern, values_expr, primitive_vars, equiv); + UpdateEquivMap(values_pattern, expr_ref, primitive_vars, equiv); + return equiv; } -BaseRef PatternEngine::Replace(const BaseRef& pattern, const EquivPtr& equiv) const { +BaseRef PatternEngine::Replace(const BaseRef &pattern, const EquivPtr &equiv) const { MS_EXCEPTION_IF_NULL(equiv); MS_LOG(DEBUG) << "-----[in Replace]"; BaseRef ref = GetVar(pattern); @@ -304,7 +335,7 @@ BaseRef PatternEngine::Replace(const BaseRef& pattern, const EquivPtr& equiv) co // w is var if (utils::isa(ref)) { - const VarPtr& var = utils::cast(ref); + const VarPtr &var = utils::cast(ref); auto iter = equiv->find(var); if (iter != equiv->end()) { out = iter->second; @@ -316,7 +347,7 @@ BaseRef PatternEngine::Replace(const BaseRef& pattern, const EquivPtr& equiv) co } // visitor to visit the list - std::function fn = [&, this, equiv](const BaseRef& u) { return Replace(u, equiv); }; + std::function fn = [&, this, equiv](const BaseRef &u) { return Replace(u, equiv); }; visitor_->SetFn(fn); BaseRef visit_out; diff --git a/mindspore/ccsrc/pre_activate/common/pattern_engine.h b/mindspore/ccsrc/pre_activate/common/pattern_engine.h index 432746332f..858b1aecb8 100644 --- a/mindspore/ccsrc/pre_activate/common/pattern_engine.h +++ b/mindspore/ccsrc/pre_activate/common/pattern_engine.h @@ -31,6 +31,7 @@ #include #include #include +#include #include "pre_activate/common/visit.h" #include "ir/base.h" @@ -44,16 +45,19 @@ using CondVarPtr = std::shared_ptr; using SVarPtr = std::shared_ptr; const int kInvalidVarIndex = -2; -using ConditionFunc = std::function; +using ConditionFunc = std::function; // Base wildcard variable which could match any anf node. class Var : public Base { friend class VarHasher; public: - explicit Var(const std::string& tag = "") : tag_(tag) { EnsureTag(); } - Var(const Var& other) : Base(other), tag_(other.tag_) {} - virtual Var& operator=(const Var& other) { + explicit Var(std::string tag = "") : tag_(std::move(tag)), primitive_(nullptr) { EnsureTag(); } + explicit Var(const PrimitivePtr &primitive, std::string tag = "") : tag_(std::move(tag)), primitive_(primitive) { + EnsureTag(); + } + Var(const Var &other) : Base(other), tag_(other.tag_) {} + virtual Var &operator=(const Var &other) { if (&other == this) { return *this; } @@ -63,12 +67,13 @@ class Var : public Base { ~Var() override = default; MS_DECLARE_PARENT(Var, Base); - virtual bool matches(const BaseRef&) { return true; } + virtual bool matches(const BaseRef &) { return true; } - virtual bool operator==(const Var& other) const { return tag_ == other.tag_; } - bool operator!=(const Var& other) const { return !(&other == this); } + virtual bool operator==(const Var &other) const { return tag_ == other.tag_; } + bool operator!=(const Var &other) const { return !(&other == this); } std::string tag() const { return tag_; } + PrimitivePtr primitive() const { return primitive_; } std::string ToString() const override { std::ostringstream buffer; buffer << "Var(" << tag_ << ")"; @@ -80,12 +85,13 @@ class Var : public Base { void EnsureTag(); std::string tag_; + PrimitivePtr primitive_; }; // VarNode means variable node, a subclass of AnfNode class VarNode : public AnfNode { public: - VarNode(const VarPtr& value, const FuncGraphPtr& func_graph) : AnfNode(func_graph), var_(value) {} + VarNode(const VarPtr &value, const FuncGraphPtr &func_graph) : AnfNode(func_graph), var_(value) {} ~VarNode() override = default; MS_DECLARE_PARENT(VarNode, AnfNode); @@ -95,16 +101,16 @@ using VarNodePtr = std::shared_ptr; class VarHasher { public: - std::size_t operator()(const Var& var) const { return var.hash(); } + std::size_t operator()(const Var &var) const { return var.hash(); } }; // Condition Var, match an anf node when condition function return true. class CondVar : public Var { public: - explicit CondVar(const ConditionFunc& cond) : cond_fn_(cond) {} + explicit CondVar(const ConditionFunc &cond) : cond_fn_(cond) {} ~CondVar() override = default; MS_DECLARE_PARENT(CondVar, Var); - bool matches(const BaseRef& value) override { + bool matches(const BaseRef &value) override { MS_LOG(DEBUG) << "CondVarPtr match: " + value.ToString(); if (utils::isa(value)) { return false; @@ -124,55 +130,60 @@ class SeqVar : public Var { ~SeqVar() override = default; MS_DECLARE_PARENT(SeqVar, Var); explicit SeqVar(const VarPtr subvar) : subvar_(nullptr) { subvar_ = subvar; } - bool matches(const BaseRef& value) override { + bool matches(const BaseRef &value) override { // match Seq. if (utils::isa(value)) { - const Seq& seq = utils::cast(value); - return std::all_of(seq.begin(), seq.end(), [this](const BaseRef& v) { + const Seq &seq = utils::cast(value); + return std::all_of(seq.begin(), seq.end(), [this](const BaseRef &v) { auto eq = subvar_->matches(v); return eq; }); } return false; } - bool operator==(const SeqVar& other) const { return *subvar_ == *other.subvar_; } + bool operator==(const SeqVar &other) const { return *subvar_ == *other.subvar_; } std::string ToString() const override; private: VarPtr subvar_; }; -bool operator==(const VarPtr& lhs, const VarPtr& rhs); +bool operator==(const VarPtr &lhs, const VarPtr &rhs); -inline bool operator!=(const VarPtr& lhs, const VarPtr& rhs) { return !(lhs == rhs); } +inline bool operator!=(const VarPtr &lhs, const VarPtr &rhs) { return !(lhs == rhs); } -std::ostream& operator<<(std::ostream& os, const VarPtr& var); +std::ostream &operator<<(std::ostream &os, const VarPtr &var); using Equiv = std::map; using EquivPtr = std::shared_ptr; +using PrimitiveVarMap = std::unordered_map; +using PrimitiveVarMapPtr = std::shared_ptr; -inline bool DefaultTypeEq(const BaseRef& x, const BaseRef& y) { return x.type() == y.type(); } +inline bool DefaultTypeEq(const BaseRef &x, const BaseRef &y) { return x.type() == y.type(); } class PatternEngine { public: - PatternEngine(const std::shared_ptr& visitor, const std::function& eq, - const std::function& type_eq = DefaultTypeEq) + PatternEngine(const std::shared_ptr &visitor, + const std::function &eq, + const std::function &type_eq = DefaultTypeEq) : visitor_(visitor), eq_(eq), type_eq_(type_eq) {} ~PatternEngine() = default; - EquivPtr Match(const BaseRef& pattern, const BaseRef& expr, EquivPtr equiv) const; + EquivPtr Match(const BaseRef &pattern, const BaseRef &expr, const PrimitiveVarMap &primitive_vars, + EquivPtr equiv) const; // Replace pattern with equivalent - BaseRef Replace(const BaseRef& pattern, const EquivPtr& equiv) const; + BaseRef Replace(const BaseRef &pattern, const EquivPtr &equiv) const; private: - EquivPtr AlignSVar(const VectorRef& values_pattern, const VectorRef& values_expr, EquivPtr equiv) const; - bool ToVector(const BaseRef& pattern, const BaseRef& expr, VectorRef* const values_pattern, - VectorRef* const values_expr) const; - bool ToVector(const VectorRef& pattern_ref, const VectorRef& expr_ref, VectorRef* const values_pattern, - VectorRef* const values_expr) const; + EquivPtr AlignSVar(const VectorRef &values_pattern, const VectorRef &values_expr, + const PrimitiveVarMap &primitive_vars, EquivPtr equiv) const; + bool ToVector(const BaseRef &pattern, const BaseRef &expr, VectorRef *const values_pattern, + VectorRef *const values_expr) const; + bool ToVector(const VectorRef &pattern_ref, const VectorRef &expr_ref, VectorRef *const values_pattern, + VectorRef *const values_expr) const; std::shared_ptr visitor_; - std::function eq_; - std::function type_eq_; + std::function eq_; + std::function type_eq_; }; } // namespace mindspore namespace std { diff --git a/tests/ut/cpp/pre_activate/common/pattern_engine_test.cc b/tests/ut/cpp/pre_activate/common/pattern_engine_test.cc index 9124f5cf74..7b0e2cc9db 100644 --- a/tests/ut/cpp/pre_activate/common/pattern_engine_test.cc +++ b/tests/ut/cpp/pre_activate/common/pattern_engine_test.cc @@ -40,6 +40,7 @@ class TestMatchEngine : public UT::Common { public: PatternEngine TU; EquivPtr equiv_null; + PrimitiveVarMap primitive_vars_null; }; TEST_F(TestMatchEngine, Var) { @@ -106,30 +107,30 @@ TEST_F(TestMatchEngine, MatchRaw_Var) { // common equiv_null->clear(); - d = TU.Match(v1, 1, equiv_null); + d = TU.Match(v1, 1, primitive_vars_null, equiv_null); ASSERT_EQ((*d)[v1], 1); equiv_null->clear(); (*equiv_null)[v1] = v2; - d = TU.Match(v1, 1, equiv_null); + d = TU.Match(v1, 1, primitive_vars_null, equiv_null); ASSERT_EQ(d->count(v2), std::size_t(1)); ASSERT_EQ((*d)[v2], 1); equiv_null->clear(); (*equiv_null)[v1] = v2; (*equiv_null)[v3] = 1; - d = TU.Match(v1, 1, equiv_null); + d = TU.Match(v1, 1, primitive_vars_null, equiv_null); ASSERT_EQ(d->count(v2), std::size_t(1)); ASSERT_EQ((*d)[v2], 1); equiv_null->clear(); - d = TU.Match(VectorRef({v1}), VectorRef({1}), equiv_null); + d = TU.Match(VectorRef({v1}), VectorRef({1}), primitive_vars_null, equiv_null); ASSERT_EQ(d->size(), std::size_t(1)); ASSERT_EQ(d->count(v1), std::size_t(1)); ASSERT_EQ((*d)[v1], 1); equiv_null->clear(); - ASSERT_EQ(TU.Match(1, 2, equiv_null), nullptr); + ASSERT_EQ(TU.Match(1, 2, primitive_vars_null, equiv_null), nullptr); } TEST_F(TestMatchEngine, MatchRaw_SVar) { @@ -139,22 +140,22 @@ TEST_F(TestMatchEngine, MatchRaw_SVar) { EquivPtr d; equiv_null->clear(); - d = TU.Match(VectorRef({sv1}), VectorRef({1, 2}), equiv_null); + d = TU.Match(VectorRef({sv1}), VectorRef({1, 2}), primitive_vars_null, equiv_null); ASSERT_EQ(d->size(), std::size_t(1)); ASSERT_EQ(d->count(sv1), std::size_t(1)); ASSERT_EQ(utils::cast((*d)[sv1]), Seq({1, 2})); equiv_null->clear(); - d = TU.Match(VectorRef({v1, sv1}), VectorRef({1, 2}), equiv_null); + d = TU.Match(VectorRef({v1, sv1}), VectorRef({1, 2}), primitive_vars_null, equiv_null); ASSERT_EQ(d->size(), std::size_t(2)); ASSERT_EQ(utils::cast((*d)[sv1]), Seq({2})); equiv_null->clear(); - ASSERT_EQ(TU.Match(VectorRef({sv1, sv2}), VectorRef({1, 2}), equiv_null), nullptr); + ASSERT_EQ(TU.Match(VectorRef({sv1, sv2}), VectorRef({1, 2}), primitive_vars_null, equiv_null), nullptr); equiv_null->clear(); (*equiv_null)[sv1] = std::make_shared(PatternListType{1, 2}); - d = TU.Match(VectorRef({v1, sv1}), VectorRef({1, 1, 2}), equiv_null); + d = TU.Match(VectorRef({v1, sv1}), VectorRef({1, 1, 2}), primitive_vars_null, equiv_null); ASSERT_EQ(d->size(), std::size_t(2)); ASSERT_EQ((*d)[v1], 1); } @@ -167,13 +168,13 @@ TEST_F(TestMatchEngine, Match) { EquivPtr d; equiv_null->clear(); - d = TU.Match(VectorRef({v1, v1, v2}), VectorRef({1, 1, 2}), equiv_null); + d = TU.Match(VectorRef({v1, v1, v2}), VectorRef({1, 1, 2}), primitive_vars_null, equiv_null); ASSERT_EQ(d->size(), std::size_t(2)); ASSERT_EQ((*d)[v1], 1); ASSERT_EQ((*d)[v2], 2); equiv_null->clear(); - d = TU.Match(static_cast(1), static_cast(1), equiv_null); + d = TU.Match(static_cast(1), static_cast(1), primitive_vars_null, equiv_null); ASSERT_EQ(d, nullptr); } @@ -197,18 +198,19 @@ TEST_F(TestMatchEngine, Match_CondVar) { EquivPtr d; equiv_null->clear(); - d = TU.Match(VectorRef({vf, vn}), VectorRef({static_cast(1.0), -1}), equiv_null); + d = TU.Match(VectorRef({vf, vn}), VectorRef({static_cast(1.0), -1}), primitive_vars_null, equiv_null); ASSERT_GE(d->size(), std::size_t(0)); auto vfn = (*d)[vf]; ASSERT_EQ((*d)[vf], static_cast(1.0)); ASSERT_EQ((*d)[vn], -1); equiv_null->clear(); - d = TU.Match(VectorRef({vf, vn}), VectorRef({1, static_cast(-1.0)}), equiv_null); + d = TU.Match(VectorRef({vf, vn}), VectorRef({1, static_cast(-1.0)}), primitive_vars_null, equiv_null); ASSERT_EQ(d, nullptr); equiv_null->clear(); - d = TU.Match(VectorRef({vf, vn}), VectorRef({static_cast(1.0), static_cast(1)}), equiv_null); + d = TU.Match(VectorRef({vf, vn}), VectorRef({static_cast(1.0), static_cast(1)}), primitive_vars_null, + equiv_null); ASSERT_EQ(d, nullptr); } From 02aca064519ed7d17c169016241262682f32dda8 Mon Sep 17 00:00:00 2001 From: zjun Date: Thu, 9 Apr 2020 20:43:12 +0800 Subject: [PATCH 223/367] Add all other tbe op info register --- mindspore/ops/_op_impl/tbe/add.py | 81 ++--- mindspore/ops/_op_impl/tbe/add_n.py | 78 ++--- mindspore/ops/_op_impl/tbe/apply_adam.py | 264 ++++----------- mindspore/ops/_op_impl/tbe/apply_momentum.py | 138 ++------ .../ops/_op_impl/tbe/arg_max_with_value.py | 79 +---- .../ops/_op_impl/tbe/arg_min_with_value.py | 79 +---- mindspore/ops/_op_impl/tbe/assign.py | 118 ++----- mindspore/ops/_op_impl/tbe/assign_add.py | 96 ++---- mindspore/ops/_op_impl/tbe/assign_sub.py | 74 +---- .../ops/_op_impl/tbe/atomic_addr_clean.py | 35 +- mindspore/ops/_op_impl/tbe/batch_matmul.py | 101 ++---- mindspore/ops/_op_impl/tbe/batchnorm.py | 203 +++--------- mindspore/ops/_op_impl/tbe/batchnorm_grad.py | 210 +++--------- mindspore/ops/_op_impl/tbe/bias_add.py | 80 +---- mindspore/ops/_op_impl/tbe/bias_add_grad.py | 67 +--- .../ops/_op_impl/tbe/bn_training_reduce.py | 68 +--- .../_op_impl/tbe/bn_training_reduce_grad.py | 150 ++------- .../ops/_op_impl/tbe/bn_training_update.py | 224 ++----------- .../_op_impl/tbe/bn_training_update_grad.py | 123 ++----- mindspore/ops/_op_impl/tbe/cast.py | 95 ++---- .../_op_impl/tbe/clip_by_norm_no_div_sum.py | 102 ++---- mindspore/ops/_op_impl/tbe/clip_by_value.py | 99 ++---- mindspore/ops/_op_impl/tbe/concat.py | 169 ++-------- .../_op_impl/tbe/confusion_softmax_grad.py | 75 ++--- .../ops/_op_impl/tbe/confusion_transpose_d.py | 107 ++---- mindspore/ops/_op_impl/tbe/conv2d.py | 128 ++------ .../_op_impl/tbe/conv2d_backprop_filter.py | 100 ++---- .../ops/_op_impl/tbe/conv2d_backprop_input.py | 99 ++---- mindspore/ops/_op_impl/tbe/div.py | 85 ++--- mindspore/ops/_op_impl/tbe/dropout_do_mask.py | 85 +---- mindspore/ops/_op_impl/tbe/equal.py | 80 ++--- mindspore/ops/_op_impl/tbe/exp.py | 59 +--- mindspore/ops/_op_impl/tbe/expand_dims.py | 66 +--- mindspore/ops/_op_impl/tbe/floor_div.py | 73 +---- mindspore/ops/_op_impl/tbe/fused_mul_add.py | 115 ++----- mindspore/ops/_op_impl/tbe/fused_mul_add_n.py | 99 ++---- .../_op_impl/tbe/fused_mul_apply_momentum.py | 164 ++-------- mindspore/ops/_op_impl/tbe/gather_v2.py | 131 +++----- mindspore/ops/_op_impl/tbe/gelu.py | 64 ++-- mindspore/ops/_op_impl/tbe/gelu_grad.py | 90 ++--- mindspore/ops/_op_impl/tbe/greater.py | 82 ++--- mindspore/ops/_op_impl/tbe/lamb_next_mv.py | 309 +++--------------- .../tbe/lamb_next_mv_with_decay_v1.py | 309 +++--------------- .../ops/_op_impl/tbe/lamb_update_with_lr.py | 193 ++--------- .../_op_impl/tbe/lamb_update_with_lr_v2.py | 159 ++------- mindspore/ops/_op_impl/tbe/layer_norm.py | 132 ++------ .../tbe/layer_norm_beta_gamma_backprop.py | 127 ++----- mindspore/ops/_op_impl/tbe/layer_norm_grad.py | 141 ++------ .../ops/_op_impl/tbe/layer_norm_x_backprop.py | 121 ++----- mindspore/ops/_op_impl/tbe/less.py | 81 ++--- mindspore/ops/_op_impl/tbe/less_equal.py | 83 ++--- mindspore/ops/_op_impl/tbe/log.py | 59 +--- mindspore/ops/_op_impl/tbe/logical_and.py | 73 +---- mindspore/ops/_op_impl/tbe/logical_not.py | 59 +--- mindspore/ops/_op_impl/tbe/logical_or.py | 73 +---- mindspore/ops/_op_impl/tbe/logsoftmax.py | 65 +--- mindspore/ops/_op_impl/tbe/logsoftmax_grad.py | 79 +---- mindspore/ops/_op_impl/tbe/matmul.py | 102 ++---- mindspore/ops/_op_impl/tbe/max_pool.py | 84 +---- mindspore/ops/_op_impl/tbe/max_pool_grad.py | 104 ++---- .../_op_impl/tbe/max_pool_grad_with_argmax.py | 107 ++---- .../ops/_op_impl/tbe/max_pool_with_argmax.py | 92 +----- mindspore/ops/_op_impl/tbe/maximum.py | 81 ++--- mindspore/ops/_op_impl/tbe/maximum_grad.py | 134 ++------ mindspore/ops/_op_impl/tbe/minimum.py | 84 ++--- mindspore/ops/_op_impl/tbe/minimum_grad.py | 134 ++------ mindspore/ops/_op_impl/tbe/mul.py | 96 ++---- mindspore/ops/_op_impl/tbe/neg.py | 62 ++-- .../_op_impl/tbe/npu_alloc_float_status.py | 42 +-- .../_op_impl/tbe/npu_clear_float_status.py | 56 +--- .../ops/_op_impl/tbe/npu_get_float_status.py | 56 +--- mindspore/ops/_op_impl/tbe/one_hot.py | 115 ++----- mindspore/ops/_op_impl/tbe/pad_d.py | 68 ++-- mindspore/ops/_op_impl/tbe/pow.py | 74 +---- mindspore/ops/_op_impl/tbe/real_div.py | 72 +--- mindspore/ops/_op_impl/tbe/reciprocal.py | 61 +--- mindspore/ops/_op_impl/tbe/reduce_max.py | 76 ++--- mindspore/ops/_op_impl/tbe/reduce_mean.py | 74 ++--- mindspore/ops/_op_impl/tbe/reduce_mean_d.py | 74 ++--- mindspore/ops/_op_impl/tbe/reduce_min.py | 78 ++--- mindspore/ops/_op_impl/tbe/reduce_sum.py | 72 +--- mindspore/ops/_op_impl/tbe/relu.py | 65 ++-- mindspore/ops/_op_impl/tbe/relu_grad.py | 82 ++--- mindspore/ops/_op_impl/tbe/reshape.py | 66 +--- .../_op_impl/tbe/resize_nearest_neighbor.py | 84 ++--- .../_op_impl/tbe/resize_nearest_neighbor_d.py | 75 ++--- .../tbe/resize_nearest_neighbor_grad_d.py | 71 +--- mindspore/ops/_op_impl/tbe/round.py | 61 +--- mindspore/ops/_op_impl/tbe/rsqrt.py | 107 ++---- mindspore/ops/_op_impl/tbe/scatter_nd.py | 83 ++--- mindspore/ops/_op_impl/tbe/scatter_nd_d.py | 82 ++--- mindspore/ops/_op_impl/tbe/select.py | 111 ++----- mindspore/ops/_op_impl/tbe/sigmoid.py | 82 ++--- .../tbe/sigmoid_cross_entropy_with_logits.py | 74 +---- .../sigmoid_cross_entropy_with_logits_grad.py | 88 ++--- mindspore/ops/_op_impl/tbe/sigmoid_grad.py | 74 +---- mindspore/ops/_op_impl/tbe/slice.py | 116 ++----- mindspore/ops/_op_impl/tbe/softmax.py | 68 ++-- .../tbe/softmax_cross_entropy_with_logits.py | 85 +---- mindspore/ops/_op_impl/tbe/split_d.py | 100 +++--- mindspore/ops/_op_impl/tbe/sqrt.py | 61 +--- mindspore/ops/_op_impl/tbe/square.py | 68 ++-- mindspore/ops/_op_impl/tbe/square_sum_v1.py | 72 +--- mindspore/ops/_op_impl/tbe/square_sum_v2.py | 86 +---- mindspore/ops/_op_impl/tbe/squeeze.py | 65 +--- mindspore/ops/_op_impl/tbe/strideslice_d.py | 118 ++----- .../ops/_op_impl/tbe/strideslicegrad_d.py | 131 ++------ mindspore/ops/_op_impl/tbe/sub.py | 75 ++--- mindspore/ops/_op_impl/tbe/tanh.py | 59 +--- mindspore/ops/_op_impl/tbe/tanh_grad.py | 73 +---- mindspore/ops/_op_impl/tbe/tensor_add.py | 82 ++--- mindspore/ops/_op_impl/tbe/tile.py | 66 +--- mindspore/ops/_op_impl/tbe/topkv2.py | 99 +----- mindspore/ops/_op_impl/tbe/trans_data.py | 113 +++---- mindspore/ops/_op_impl/tbe/transpose_d.py | 75 ++--- .../ops/_op_impl/tbe/unsorted_segment_sum.py | 201 ++---------- mindspore/ops/_op_impl/tbe/zeros_like.py | 68 ++-- mindspore/ops/op_info_register.py | 35 +- 118 files changed, 2686 insertions(+), 8931 deletions(-) diff --git a/mindspore/ops/_op_impl/tbe/add.py b/mindspore/ops/_op_impl/tbe/add.py index 95c31d8974..63e1efb1c6 100644 --- a/mindspore/ops/_op_impl/tbe/add.py +++ b/mindspore/ops/_op_impl/tbe/add.py @@ -14,71 +14,28 @@ # ============================================================================ """Add op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +add_op_info = TBERegOp("Add") \ + .fusion_type("ELEMWISE") \ + .async_flag(False) \ + .binfile_name("add.so") \ + .compute_cost(10) \ + .kernel_name("add") \ + .partial_flag(True) \ + .input(0, "x1", False, "required", "all") \ + .input(1, "x2", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.I32_5HD, DataType.I32_5HD, DataType.I32_5HD) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD) \ + .get_op_info() -@op_info_register("""{ - "op_name": "Add", - "imply_type": "TBE", - "fusion_type": "ELEMWISE", - "async_flag": false, - "binfile_name": "add.so", - "compute_cost": 10, - "kernel_name": "add", - "partial_flag": true, - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16", "float16", "float16", "float16", "float", "float", "float", - "float", "int32", "int32", "int32", "int32" - ], - "format": [ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat" - ], - "name": "x1", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float16", "float16", "float16", "float16", "float", "float", "float", "float", "int32", - "int32", "int32", "int32" - ], - "format": [ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat" - ], - "name": "x2", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16", "float16", "float16", "float16", "float", "float", "float", "float", "int32", - "int32", "int32", "int32" - ], - "format": [ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat" - ], - "name": "y", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") +@op_info_register(add_op_info) def _add_tbe(): """Add TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/add_n.py b/mindspore/ops/_op_impl/tbe/add_n.py index 9177ed14c7..3e8a6c0016 100644 --- a/mindspore/ops/_op_impl/tbe/add_n.py +++ b/mindspore/ops/_op_impl/tbe/add_n.py @@ -14,61 +14,33 @@ # ============================================================================ """AddN op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +add_n_op_info = TBERegOp("AddN") \ + .fusion_type("ELEMWISE") \ + .async_flag(False) \ + .binfile_name("add_n.so") \ + .compute_cost(10) \ + .kernel_name("add_n") \ + .partial_flag(True) \ + .attr("n", "required", "int", "all") \ + .input(0, "x", False, "dynamic", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F16_FracZ, DataType.F16_FracZ) \ + .dtype_format(DataType.F16_FracNZ, DataType.F16_FracNZ) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD) \ + .dtype_format(DataType.F32_FracZ, DataType.F32_FracZ) \ + .dtype_format(DataType.F32_FracNZ, DataType.F32_FracNZ) \ + .dtype_format(DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.I32_5HD, DataType.I32_5HD) \ + .dtype_format(DataType.I32_FracZ, DataType.I32_FracZ) \ + .get_op_info() -@op_info_register("""{ - "op_name": "AddN", - "imply_type": "TBE", - "fusion_type": "ELEMWISE", - "async_flag": false, - "binfile_name": "add_n.so", - "compute_cost": 10, - "kernel_name": "add_n", - "partial_flag": true, - "attr": [ - { - "name": "n", - "param_type": "required", - "type": "int", - "value": "all" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16","float16","float16","float16", - "float","float","float","float","int32","int32","int32" - ], - "format": [ - "DefaultFormat","NC1HWC0","FracZ","FRACTAL_NZ", - "DefaultFormat","NC1HWC0","FracZ","FRACTAL_NZ","DefaultFormat","NC1HWC0","FracZ" - ], - "name": "x", - "need_compile": false, - "param_type": "dynamic", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16","float16","float16","float16", - "float","float","float","float","int32","int32","int32" - ], - "format": [ - "DefaultFormat","NC1HWC0","FracZ","FRACTAL_NZ", - "DefaultFormat","NC1HWC0","FracZ","FRACTAL_NZ","DefaultFormat","NC1HWC0","FracZ" - ], - "name": "y", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(add_n_op_info) def _add_n_tbe(): """AddN TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/apply_adam.py b/mindspore/ops/_op_impl/tbe/apply_adam.py index 1d5c383515..6fd7205567 100644 --- a/mindspore/ops/_op_impl/tbe/apply_adam.py +++ b/mindspore/ops/_op_impl/tbe/apply_adam.py @@ -14,214 +14,66 @@ # ============================================================================ """ApplyAdam op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +apply_adam_op_info = TBERegOp("Adam") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("apply_adam.so") \ + .compute_cost(10) \ + .kernel_name("apply_adam") \ + .partial_flag(True) \ + .attr("use_locking", "optional", "bool", "true,false", "false") \ + .attr("use_nesterov", "optional", "bool", "true,false", "false") \ + .input(0, "var", False, "required", "all") \ + .input(1, "m", False, "required", "all") \ + .input(2, "v", False, "required", "all") \ + .input(3, "beta1_power", False, "required", "all") \ + .input(4, "beta2_power", False, "required", "all") \ + .input(5, "lr", False, "required", "all") \ + .input(6, "beta1", False, "required", "all") \ + .input(7, "beta2", False, "required", "all") \ + .input(8, "epsilon", False, "required", "all") \ + .input(9, "grad", False, "required", "all") \ + .output(0, "var", False, "required", "all") \ + .output(1, "m", False, "required", "all") \ + .output(2, "v", False, "required", "all") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, + DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, + DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, + DataType.F16_Default) \ + .dtype_format(DataType.F16_C1HWNCoC0, DataType.F16_C1HWNCoC0, DataType.F16_C1HWNCoC0, DataType.F16_Default, + DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, + DataType.F16_Default, DataType.F16_C1HWNCoC0, DataType.F16_C1HWNCoC0, DataType.F16_C1HWNCoC0, + DataType.F16_C1HWNCoC0) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD, DataType.F16_Default, + DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, + DataType.F16_Default, DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD, + DataType.F16_5HD) \ + .dtype_format(DataType.F16_FracZ, DataType.F16_FracZ, DataType.F16_FracZ, DataType.F16_Default, + DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, + DataType.F16_Default, DataType.F16_FracZ, DataType.F16_FracZ, DataType.F16_FracZ, + DataType.F16_FracZ) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, + DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, + DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, + DataType.F32_Default) \ + .dtype_format(DataType.F32_C1HWNCoC0, DataType.F32_C1HWNCoC0, DataType.F32_C1HWNCoC0, DataType.F32_Default, + DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, + DataType.F32_Default, DataType.F32_C1HWNCoC0, DataType.F32_C1HWNCoC0, DataType.F32_C1HWNCoC0, + DataType.F32_C1HWNCoC0) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD, DataType.F32_Default, + DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, + DataType.F32_Default, DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD, + DataType.F32_5HD) \ + .dtype_format(DataType.F32_FracZ, DataType.F32_FracZ, DataType.F32_FracZ, DataType.F32_Default, + DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, + DataType.F32_Default, DataType.F32_FracZ, DataType.F32_FracZ, DataType.F32_FracZ, + DataType.F32_FracZ) \ + .get_op_info() -@op_info_register("""{ - "op_name": "Adam", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "apply_adam.so", - "compute_cost": 10, - "kernel_name": "apply_adam", - "partial_flag": true, - "attr": [ - { - "name": "use_locking", - "param_type": "optional", - "type": "bool", - "value": "true,false", - "default_value":"false" - }, - { - "name": "use_nesterov", - "param_type": "optional", - "type": "bool", - "value": "true,false", - "default_value":"false" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16","float16","float16","float16","float","float","float","float" - ], - "format": [ - "NC1HWC0", "C1HWNCoC0", "DefaultFormat", "FracZ", "NC1HWC0", "C1HWNCoC0", "DefaultFormat", "FracZ" - ], - "name": "var", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float16","float16","float16","float16","float","float","float","float" - ], - "format": [ - "NC1HWC0", "C1HWNCoC0", "DefaultFormat", "FracZ", "NC1HWC0", "C1HWNCoC0", "DefaultFormat", "FracZ" - ], - "name": "m", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 2, - "dtype": [ - "float16","float16","float16","float16","float","float","float","float" - ], - "format": [ - "NC1HWC0", "C1HWNCoC0", "DefaultFormat", "FracZ", "NC1HWC0", "C1HWNCoC0", "DefaultFormat", "FracZ" - ], - "name": "v", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 3, - "dtype": [ - "float16","float16","float16","float16","float","float","float", "float" - ], - "format": [ - "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", - "DefaultFormat", "DefaultFormat" - ], - "name": "beta1_power", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 4, - "dtype": [ - "float16","float16","float16","float16","float","float","float","float" - ], - "format": [ - "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", - "DefaultFormat", "DefaultFormat" - ], - "name": "beta2_power", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 5, - "dtype": [ - "float16","float16","float16","float16","float","float","float", "float" - ], - "format": [ - "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", - "DefaultFormat", "DefaultFormat" - ], - "name": "lr", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 6, - "dtype": [ - "float16","float16","float16","float16","float","float","float", "float" - ], - "format": [ - "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", - "DefaultFormat", "DefaultFormat" - ], - "name": "beta1", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 7, - "dtype": [ - "float16","float16","float16","float16","float","float","float", "float" - ], - "format": [ - "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", - "DefaultFormat", "DefaultFormat" - ], - "name": "beta2", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 8, - "dtype": [ - "float16","float16","float16","float16","float","float","float", "float" - ], - "format": [ - "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", - "DefaultFormat", "DefaultFormat" - ], - "name": "epsilon", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 9, - "dtype": [ - "float16","float16","float16","float16","float","float","float", "float" - ], - "format": [ - "NC1HWC0", "C1HWNCoC0", "DefaultFormat", "FracZ", "NC1HWC0", "C1HWNCoC0", "DefaultFormat", "FracZ" - ], - "name": "grad", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16","float16","float16","float16","float","float","float","float" - ], - "format": [ - "NC1HWC0", "C1HWNCoC0", "DefaultFormat", "FracZ", "NC1HWC0", "C1HWNCoC0", "DefaultFormat", "FracZ" - ], - "name": "var", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float16","float16","float16","float16","float","float","float","float" - ], - "format": [ - "NC1HWC0", "C1HWNCoC0", "DefaultFormat", "FracZ", "NC1HWC0", "C1HWNCoC0", "DefaultFormat", "FracZ" - ], - "name": "m", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 2, - "dtype": [ - "float16","float16","float16","float16","float","float","float","float" - ], - "format": [ - "NC1HWC0", "C1HWNCoC0", "DefaultFormat", "FracZ", "NC1HWC0", "C1HWNCoC0", "DefaultFormat", "FracZ" - ], - "name": "v", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(apply_adam_op_info) def _apply_adam_tbe(): """ApplyAdam TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/apply_momentum.py b/mindspore/ops/_op_impl/tbe/apply_momentum.py index f2c6f5b15e..42ce9d0e41 100644 --- a/mindspore/ops/_op_impl/tbe/apply_momentum.py +++ b/mindspore/ops/_op_impl/tbe/apply_momentum.py @@ -14,112 +14,42 @@ # ============================================================================ """ApplyMomentum op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +apply_momentum_op_info = TBERegOp("ApplyMomentum") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("apply_momentum.so") \ + .compute_cost(10) \ + .kernel_name("apply_momentum") \ + .partial_flag(True) \ + .attr("use_nesterov", "optional", "bool", "true,false", "false") \ + .input(0, "var", False, "required", "all") \ + .input(1, "accum", False, "required", "all") \ + .input(2, "lr", False, "required", "all") \ + .input(3, "grad", False, "required", "all") \ + .input(4, "momentum", False, "required", "all") \ + .output(0, "var", False, "required", "all") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, + DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.F16_Default, DataType.F16_5HD, + DataType.F16_Default, DataType.F16_5HD) \ + .dtype_format(DataType.F16_C1HWNCoC0, DataType.F16_C1HWNCoC0, DataType.F16_Default, DataType.F16_C1HWNCoC0, + DataType.F16_Default, DataType.F16_C1HWNCoC0) \ + .dtype_format(DataType.F16_FracZ, DataType.F16_FracZ, DataType.F16_Default, DataType.F16_FracZ, + DataType.F16_Default, DataType.F16_FracZ) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, + DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.F32_Default, DataType.F32_5HD, + DataType.F32_Default, DataType.F32_5HD) \ + .dtype_format(DataType.F32_C1HWNCoC0, DataType.F32_C1HWNCoC0, DataType.F32_Default, DataType.F32_C1HWNCoC0, + DataType.F32_Default, DataType.F32_C1HWNCoC0) \ + .dtype_format(DataType.F32_FracZ, DataType.F32_FracZ, DataType.F32_Default, DataType.F32_FracZ, + DataType.F32_Default, DataType.F32_FracZ) \ + .get_op_info() -@op_info_register("""{ - "op_name": "ApplyMomentum", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "apply_momentum.so", - "compute_cost": 10, - "kernel_name": "apply_momentum", - "partial_flag": true, - "attr": [ - { - "name": "use_nesterov", - "param_type": "optional", - "type": "bool", - "value": "true,false", - "default_value":"false" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16","float16","float16","float16","float","float","float","float" - ], - "format": [ - "NC1HWC0", "C1HWNCoC0", "DefaultFormat", "FracZ", "NC1HWC0", "DefaultFormat", "FracZ", "C1HWNCoC0" - ], - "name": "var", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float16","float16","float16","float16","float","float","float","float" - ], - "format": [ - "NC1HWC0", "C1HWNCoC0", "DefaultFormat", "FracZ", "NC1HWC0", "DefaultFormat", "FracZ", "C1HWNCoC0" - ], - "name": "accum", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 2, - "dtype": [ - "float16","float16","float16","float16","float","float","float","float" - ], - "format": [ - "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", - "DefaultFormat", "DefaultFormat" - ], - "name": "lr", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 3, - "dtype": [ - "float16","float16","float16","float16","float","float","float","float" - ], - "format": [ - "NC1HWC0", "C1HWNCoC0", "DefaultFormat", "FracZ", "NC1HWC0", "DefaultFormat", "FracZ", "C1HWNCoC0" - ], - "name": "grad", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 4, - "dtype": [ - "float16","float16","float16","float16","float","float","float", "float" - ], - "format": [ - "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", - "DefaultFormat", "DefaultFormat" - ], - "name": "momentum", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16","float16","float16","float16","float","float","float","float" - ], - "format": [ - "NC1HWC0", "C1HWNCoC0", "DefaultFormat", "FracZ", "NC1HWC0", "DefaultFormat", "FracZ", "C1HWNCoC0" - ], - "name": "var", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(apply_momentum_op_info) def _apply_momentum_tbe(): """ApplyMomentum TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/arg_max_with_value.py b/mindspore/ops/_op_impl/tbe/arg_max_with_value.py index e5ffe3d36f..ca393f3356 100644 --- a/mindspore/ops/_op_impl/tbe/arg_max_with_value.py +++ b/mindspore/ops/_op_impl/tbe/arg_max_with_value.py @@ -14,70 +14,25 @@ # ============================================================================ """ArgMaxWithValue op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +arg_max_with_value_op_info = TBERegOp("ArgMaxWithValue") \ + .fusion_type("ELEMWISE") \ + .async_flag(False) \ + .binfile_name("arg_max_with_value.so") \ + .compute_cost(10) \ + .kernel_name("arg_max_with_value") \ + .partial_flag(True) \ + .attr("axis", "required", "int", "all") \ + .input(0, "x", False, "required", "all") \ + .output(0, "indice", False, "required", "all") \ + .output(1, "values", False, "required", "all") \ + .dtype_format(DataType.F16_Default, DataType.I32_Default, DataType.F16_Default) \ + .dtype_format(DataType.F32_Default, DataType.I32_Default, DataType.F32_Default) \ + .get_op_info() -@op_info_register("""{ - "op_name": "ArgMaxWithValue", - "imply_type": "TBE", - "fusion_type": "ELEMWISE", - "async_flag": false, - "binfile_name": "arg_max_with_value.so", - "compute_cost": 10, - "kernel_name": "arg_max_with_value", - "partial_flag": true, - "attr": [ - { - "name": "axis", - "param_type": "required", - "type": "int", - "value": "all" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16", "float" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "int32", "int32" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "indice", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 0, - "dtype": [ - "float16", "float" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "values", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(arg_max_with_value_op_info) def _arg_max_with_value_tbe(): """ArgMaxWithValue TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/arg_min_with_value.py b/mindspore/ops/_op_impl/tbe/arg_min_with_value.py index 3d66b4534d..a23005403b 100644 --- a/mindspore/ops/_op_impl/tbe/arg_min_with_value.py +++ b/mindspore/ops/_op_impl/tbe/arg_min_with_value.py @@ -14,70 +14,25 @@ # ============================================================================ """ArgMinWithValue op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +arg_min_with_value_op_info = TBERegOp("ArgMaxWithValue") \ + .fusion_type("ELEMWISE") \ + .async_flag(False) \ + .binfile_name("arg_min_with_value.so") \ + .compute_cost(10) \ + .kernel_name("arg_min_with_value") \ + .partial_flag(True) \ + .attr("axis", "required", "int", "all") \ + .input(0, "x", False, "required", "all") \ + .output(0, "indice", False, "required", "all") \ + .output(1, "values", False, "required", "all") \ + .dtype_format(DataType.F16_Default, DataType.I32_Default, DataType.F16_Default) \ + .dtype_format(DataType.F32_Default, DataType.I32_Default, DataType.F32_Default) \ + .get_op_info() -@op_info_register("""{ - "op_name": "ArgMinWithValue", - "imply_type": "TBE", - "fusion_type": "ELEMWISE", - "async_flag": false, - "binfile_name": "arg_min_with_value.so", - "compute_cost": 10, - "kernel_name": "arg_min_with_value", - "partial_flag": true, - "attr": [ - { - "name": "axis", - "param_type": "required", - "type": "int", - "value": "all" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16", "float" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "int32", "int32" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "indice", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 0, - "dtype": [ - "float16", "float" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "values", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(arg_min_with_value_op_info) def _arg_min_with_value_tbe(): """ArgMinWithValue TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/assign.py b/mindspore/ops/_op_impl/tbe/assign.py index 610a221a1c..41a9a0fecd 100644 --- a/mindspore/ops/_op_impl/tbe/assign.py +++ b/mindspore/ops/_op_impl/tbe/assign.py @@ -14,93 +14,43 @@ # ============================================================================ """Assign op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +assign_op_info = TBERegOp("Assign") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("assign.so") \ + .compute_cost(10) \ + .kernel_name("assign") \ + .partial_flag(True) \ + .input(0, "resource", False, "required", "all") \ + .input(1, "value", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.I8_Default, DataType.I8_Default, DataType.I8_Default) \ + .dtype_format(DataType.I8_5HD, DataType.I8_5HD, DataType.I8_5HD) \ + .dtype_format(DataType.U8_Default, DataType.U8_Default, DataType.U8_Default) \ + .dtype_format(DataType.U8_5HD, DataType.U8_5HD, DataType.U8_5HD) \ + .dtype_format(DataType.I16_Default, DataType.I16_Default, DataType.I16_Default) \ + .dtype_format(DataType.I16_5HD, DataType.I16_5HD, DataType.I16_5HD) \ + .dtype_format(DataType.U16_Default, DataType.U16_Default, DataType.U16_Default) \ + .dtype_format(DataType.U16_5HD, DataType.U16_5HD, DataType.U16_5HD) \ + .dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.I32_5HD, DataType.I32_5HD, DataType.I32_5HD) \ + .dtype_format(DataType.U32_Default, DataType.U32_Default, DataType.U32_Default) \ + .dtype_format(DataType.U32_5HD, DataType.U32_5HD, DataType.U32_5HD) \ + .dtype_format(DataType.I64_Default, DataType.I64_Default, DataType.I64_Default) \ + .dtype_format(DataType.I64_5HD, DataType.I64_5HD, DataType.I64_5HD) \ + .dtype_format(DataType.U64_Default, DataType.U64_Default, DataType.U64_Default) \ + .dtype_format(DataType.U64_5HD, DataType.U64_5HD, DataType.U64_5HD) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD) \ + .dtype_format(DataType.F32_FracNZ, DataType.F32_FracNZ, DataType.F32_FracNZ) \ + .get_op_info() -@op_info_register("""{ - "op_name": "Assign", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "assign.so", - "compute_cost": 10, - "kernel_name": "assign", - "partial_flag": true, - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16", "float16", "float16", "float16", "float", "float", "float", "float", - "int32", "int32", "int32", "int32", "uint32", "uint32", "uint32", "uint32", "int8", - "int8", "int8", "int8", "uint8", "uint8", "uint8", "uint8", "int16", "int16", "int16", - "int16", "uint16", "uint16", "uint16", "uint16", "int64", "int64", "int64", "int64", - "uint64", "uint64", "uint64", "uint64", "float" - ], - "format": [ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", - "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", - "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", - "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", "FRACTAL_NZ" - ], - "name": "resource", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float16", "float16", "float16", "float16", "float", "float", "float", "float", "int32", "int32", - "int32", "int32", "uint32", "uint32", "uint32", "uint32", "int8", "int8", "int8", "int8", "uint8", - "uint8", "uint8", "uint8", "int16", "int16", "int16", "int16", "uint16", "uint16", "uint16", - "uint16", "int64", "int64", "int64", "int64", "uint64", "uint64", "uint64", "uint64", "float" - ], - "format": [ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", - "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", - "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", - "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", "FRACTAL_NZ" - ], - "name": "value", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16", "float16", "float16", "float16", "float", "float", "float", "float", "int32", - "int32", "int32", "int32", "uint32", "uint32", "uint32", "uint32", "int8", "int8", "int8", - "int8", "uint8", "uint8", "uint8", "uint8", "int16", "int16", "int16", "int16", "uint16", - "uint16", "uint16", "uint16", "int64", "int64", "int64", "int64", - "uint64", "uint64", "uint64", "uint64", "float" - ], - "format": [ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", - "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", - "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", - "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", "FRACTAL_NZ" - ], - "name": "y", - "param_type": "required", - "shape": "all" - } - ] -}""") +@op_info_register(assign_op_info) def _assign_tbe(): """Assign TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/assign_add.py b/mindspore/ops/_op_impl/tbe/assign_add.py index 94e0e781f5..fbbb9a997f 100644 --- a/mindspore/ops/_op_impl/tbe/assign_add.py +++ b/mindspore/ops/_op_impl/tbe/assign_add.py @@ -14,80 +14,34 @@ # ============================================================================ """AssignAdd op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +assign_add_op_info = TBERegOp("AssignAdd") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("assignadd.so") \ + .compute_cost(10) \ + .kernel_name("assignadd") \ + .partial_flag(True) \ + .input(0, "ref", False, "required", "all") \ + .input(1, "value", False, "required", "all") \ + .output(0, "output_ref", False, "required", "all") \ + .dtype_format(DataType.I8_Default, DataType.I8_Default, DataType.I8_Default) \ + .dtype_format(DataType.I8_5HD, DataType.I8_5HD, DataType.I8_5HD) \ + .dtype_format(DataType.U8_Default, DataType.U8_Default, DataType.U8_Default) \ + .dtype_format(DataType.U8_5HD, DataType.U8_5HD, DataType.U8_5HD) \ + .dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.I32_5HD, DataType.I32_5HD, DataType.I32_5HD) \ + .dtype_format(DataType.I64_Default, DataType.I64_Default, DataType.I64_Default) \ + .dtype_format(DataType.I64_5HD, DataType.I64_5HD, DataType.I64_5HD) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD) \ + .get_op_info() -@op_info_register("""{ - "op_name": "AssignAdd", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "assignadd.so", - "compute_cost": 10, - "kernel_name": "assignadd", - "partial_flag": true, - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype":[ - "float16", "float16", "float16", "float16", "float", "float", "float", "float", "int32", "int32", - "int32", "int32", "int8", "int8", "int8", "int8", "uint8", "uint8", "uint8", "uint8", "int64", - "int64", "int64", "int64" - ], - "format":[ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", - "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat" - ], - "name": "ref", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype":[ - "float16", "float16", "float16", "float16", "float", "float", "float", "float", "int32", "int32", - "int32", "int32", "int8", "int8", "int8", "int8", "uint8", "uint8", "uint8", "uint8", "int64", - "int64", "int64", "int64" - ], - "format":[ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", - "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat" - ], - "name": "value", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype":[ - "float16", "float16", "float16", "float16", "float", "float", "float", "float", "int32", "int32", - "int32", "int32", "int8", "int8", "int8", "int8", "uint8", "uint8", "uint8", "uint8", "int64", - "int64", "int64", "int64" - ], - "format":[ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", - "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat" - ], - "name": "output_ref", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") +@op_info_register(assign_add_op_info) def _assign_add_tbe(): """AssignAdd TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/assign_sub.py b/mindspore/ops/_op_impl/tbe/assign_sub.py index 85104f6eb3..126a6b7a9a 100644 --- a/mindspore/ops/_op_impl/tbe/assign_sub.py +++ b/mindspore/ops/_op_impl/tbe/assign_sub.py @@ -14,65 +14,27 @@ # ============================================================================ """AssignSub op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +assign_sub_op_info = TBERegOp("AssignSub") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("assign_sub.so") \ + .compute_cost(10) \ + .kernel_name("assign_sub") \ + .partial_flag(True) \ + .input(0, "var", False, "required", "all") \ + .input(1, "value", False, "required", "all") \ + .output(0, "output_ref", False, "required", "all") \ + .dtype_format(DataType.I8_Default, DataType.I8_Default, DataType.I8_Default) \ + .dtype_format(DataType.U8_Default, DataType.U8_Default, DataType.U8_Default) \ + .dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ + .get_op_info() -@op_info_register("""{ - "op_name": "AssignSub", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "assign_sub.so", - "compute_cost": 10, - "kernel_name": "assign_sub", - "partial_flag": true, - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16", "float", "int32", "int8", "uint8" - ], - "format": [ - "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat" - ], - "name": "var", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float16", "float", "int32", "int8", "uint8" - ], - "format": [ - "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat" - ], - "name": "value", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16", "float", "int32", "int8", "uint8" - ], - "format": [ - "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat" - ], - "name": "out_ref", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") +@op_info_register(assign_sub_op_info) def _assign_sub_tbe(): """AssignSub TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/atomic_addr_clean.py b/mindspore/ops/_op_impl/tbe/atomic_addr_clean.py index 90186e6526..e707a1f26f 100644 --- a/mindspore/ops/_op_impl/tbe/atomic_addr_clean.py +++ b/mindspore/ops/_op_impl/tbe/atomic_addr_clean.py @@ -14,31 +14,20 @@ # ============================================================================ """AtomicAddrClean op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp +atomic_addr_clean_op_info = TBERegOp("AtomicAddrClean") \ + .fusion_type("ELEMWISE") \ + .async_flag(False) \ + .binfile_name("atomic_addr_clean.so") \ + .compute_cost(10) \ + .kernel_name("atomic_addr_clean") \ + .partial_flag(True) \ + .attr("automic_add_mem_size", "required", "listInt", "all") \ + .get_op_info() -@op_info_register("""{ - "op_name": "AtomicAddrClean", - "imply_type": "TBE", - "fusion_type": "ELEMWISE", - "async_flag": false, - "binfile_name": "atomic_addr_clean.so", - "compute_cost": 10, - "kernel_name": "atomic_addr_clean", - "partial_flag": true, - "attr": [ - { - "name": "automic_add_mem_size", - "param_type": "required", - "type": "listInt", - "value": "all" - } - ], - "inputs": [ - ], - "outputs": [ - ] -}""") + +@op_info_register(atomic_addr_clean_op_info) def _atomic_addr_clean_tbe(): """AtomicAddrClean TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/batch_matmul.py b/mindspore/ops/_op_impl/tbe/batch_matmul.py index 668791b659..4efcf8031c 100644 --- a/mindspore/ops/_op_impl/tbe/batch_matmul.py +++ b/mindspore/ops/_op_impl/tbe/batch_matmul.py @@ -14,88 +14,29 @@ # ============================================================================ """BatchMatMul op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +batch_matmul_op_info = TBERegOp("BatchMatMul") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("batch_matmul.so") \ + .compute_cost(10) \ + .kernel_name("batch_matmul") \ + .attr("transpose_x1", "required", "bool", "all") \ + .attr("transpose_x2", "required", "bool", "all") \ + .partial_flag(True) \ + .input(0, "x1", False, "required", "all") \ + .input(1, "x2", False, "required", "all") \ + .input(2, "bias", False, "optional", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_FracNZ, DataType.F16_FracNZ, DataType.F16_Default, DataType.F16_FracNZ) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ + .get_op_info() -@op_info_register("""{ - "op_name": "BatchMatMul", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "batch_matmul.so", - "compute_cost": 10, - "kernel_name": "batch_matmul", - "partial_flag": true, - "attr": [ - { - "name": "transpose_x1", - "param_type": "required", - "type": "bool", - "value": "all" - }, - { - "name": "transpose_x2", - "param_type": "required", - "type": "bool", - "value": "all" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16","float16","float16","float","float","int32","int32" - ], - "format": [ - "DefaultFormat","DefaultFormat","FRACTAL_NZ","DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat" - ], - "name": "x1", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float16","float16","float16","float","float","int32","int32" - ], - "format": [ - "DefaultFormat","DefaultFormat","FRACTAL_NZ","DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat" - ], - "name": "x2", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 2, - "dtype": [ - "float16","float16","float16","float","float","int32","int32" - ], - "format": [ - "DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat" - ], - "name": "bias", - "need_compile": false, - "param_type": "optional", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16","float16","float16","float","float","int32","int32" - ], - "format": [ - "DefaultFormat","DefaultFormat","FRACTAL_NZ","DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat" - ], - "name": "y", - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(batch_matmul_op_info) def _batch_matmul_tbe(): """BatchMatMul TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/batchnorm.py b/mindspore/ops/_op_impl/tbe/batchnorm.py index 263fcfb0f2..6dd79245a3 100644 --- a/mindspore/ops/_op_impl/tbe/batchnorm.py +++ b/mindspore/ops/_op_impl/tbe/batchnorm.py @@ -14,174 +14,45 @@ # ============================================================================ """BatchNorm op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +batch_norm_op_info = TBERegOp("BatchNorm") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("batch_norm.so") \ + .compute_cost(10) \ + .kernel_name("batch_norm") \ + .partial_flag(True) \ + .attr("epsilon", "optional", "float", "all") \ + .attr("data_format", "optional", "str", "all") \ + .attr("is_training", "optional", "bool", "all") \ + .input(0, "x", False, "required", "all") \ + .input(1, "scale", False, "required", "all") \ + .input(2, "offset", False, "required", "all") \ + .input(3, "mean", False, "optional", "all") \ + .input(4, "variance", False, "optional", "all") \ + .output(0, "y", False, "required", "all") \ + .output(1, "batch_mean", False, "required", "all") \ + .output(2, "batch_variance", False, "required", "all") \ + .output(3, "reserve_space_1", False, "optional", "all") \ + .output(4, "reserve_space_2", False, "optional", "all") \ + .output(5, "reserve_space_3", False, "optional", "all") \ + .dtype_format(DataType.F16_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, + DataType.F32_Default, DataType.F16_Default, DataType.F32_Default, DataType.F32_Default, + DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD, + DataType.F32_5HD, DataType.F16_5HD, DataType.F32_5HD, DataType.F32_5HD, + DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, + DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, + DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD, + DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD, + DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD) \ + .get_op_info() -@op_info_register("""{ - "op_name": "BatchNorm", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "batch_norm.so", - "compute_cost": 10, - "kernel_name": "batch_norm", - "partial_flag": true, - "attr": [ - { - "name": "epsilon", - "param_type": "required", - "type": "float", - "value": "all" - }, - { - "name": "data_format", - "param_type": "required", - "type": "str", - "value": "all" - }, - { - "name": "is_training", - "param_type": "required", - "type": "bool", - "value": "all" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16","float16","float","float" - ], - "format": [ - "DefaultFormat","NC1HWC0", "DefaultFormat","NC1HWC0" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float","float","float","float" - ], - "format": [ - "DefaultFormat","NC1HWC0","DefaultFormat", "NC1HWC0" - ], - "name": "scale", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 2, - "dtype": [ - "float","float","float","float" - ], - "format": [ - "DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0" - ], - "name": "offset", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 3, - "dtype": [ - "float","float","float","float" - ], - "format": [ - "DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0" - ], - "name": "mean", - "need_compile": false, - "param_type": "optional", - "shape": "all" - }, - { - "index": 4, - "dtype": [ - "float","float","float","float" - ], - "format": [ - "DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0" - ], - "name": "variance", - "need_compile": false, - "param_type": "optional", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16", "float16", "float", "float" - ], - "format": [ - "DefaultFormat","NC1HWC0", "DefaultFormat","NC1HWC0" - ], - "name": "y", - "param_type": "required" - }, - { - "index": 1, - "dtype": [ - "float","float","float","float" - ], - "format": [ - "DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0" - ], - "name": "batch_mean", - "param_type": "required" - }, - { - "index": 2, - "dtype": [ - "float", "float", "float", "float" - ], - "format": [ - "DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0" - ], - "name": "batch_variance", - "param_type": "required" - }, - { - "index": 3, - "dtype": [ - "float", "float", "float", "float" - ], - "format": [ - "DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0" - ], - "name": "reserve_space_1", - "param_type": "optional" - }, - { - "index": 4, - "dtype": [ - "float", "float", "float", "float" - ], - "format": [ - "DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0" - ], - "name": "reserve_space_2", - "param_type": "optional" - }, - { - "index": 5, - "dtype": [ - "float", "float", "float", "float" - ], - "format": [ - "DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0" - ], - "name": "reserve_space_3", - "param_type": "optional" - } - ] -}""") + +@op_info_register(batch_norm_op_info) def _batch_norm_tbe(): """BatchNorm TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/batchnorm_grad.py b/mindspore/ops/_op_impl/tbe/batchnorm_grad.py index cc560c5283..6063c0e750 100644 --- a/mindspore/ops/_op_impl/tbe/batchnorm_grad.py +++ b/mindspore/ops/_op_impl/tbe/batchnorm_grad.py @@ -14,181 +14,45 @@ # ============================================================================ """BatchNormGrad op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +batch_norm_grad_op_info = TBERegOp("BatchNormGrad") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("batchnormgrad.so") \ + .compute_cost(10) \ + .kernel_name("batchnormgrad") \ + .partial_flag(True) \ + .attr("epsilon", "optional", "float", "all") \ + .attr("data_format", "optional", "str", "all") \ + .attr("is_training", "optional", "bool", "all") \ + .input(0, "y_backprop", False, "required", "all") \ + .input(1, "x", False, "required", "all") \ + .input(2, "scale", False, "required", "all") \ + .input(3, "reserve_space_1", False, "required", "all") \ + .input(4, "reserve_space_2", False, "required", "all") \ + .input(5, "reserve_space_3", False, "required", "all") \ + .output(0, "x_backprop", False, "required", "all") \ + .output(1, "scale_backprop", False, "required", "all") \ + .output(2, "offset_backprop", False, "required", "all") \ + .output(3, "reserve_space_4", False, "optional", "all") \ + .output(4, "reserve_space_5", False, "optional", "all") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F32_Default, DataType.F32_Default, + DataType.F32_Default, DataType.F32_Default, DataType.F16_Default, DataType.F32_Default, + DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.F32_5HD, DataType.F32_5HD, + DataType.F32_5HD, DataType.F32_5HD, DataType.F16_5HD, DataType.F32_5HD, + DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, + DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, + DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD, + DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD, + DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD) \ + .get_op_info() -@op_info_register("""{ - "op_name": "BatchNormGrad", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "batchnormgrad.so", - "compute_cost": 10, - "kernel_name": "batchnormgrad", - "partial_flag": true, - "attr": [ - { - "name": "epsilon", - "param_type": "optional", - "type": "float", - "value": "all" - }, - { - "name": "data_format", - "param_type": "optional", - "type": "str", - "value": "all" - }, - { - "name": "is_training", - "param_type": "optional", - "type": "bool", - "value": "all" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16","float16","float16","float","float","float" - ], - "format": [ - "DefaultFormat","DefaultFormat","NC1HWC0","DefaultFormat","DefaultFormat","NC1HWC0" - ], - "name": "y_backprop", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float16","float16","float16","float","float","float" - ], - "format": [ - "DefaultFormat","DefaultFormat","NC1HWC0","DefaultFormat","DefaultFormat","NC1HWC0" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 2, - "dtype": [ - "float","float","float","float","float","float" - ], - "format": [ - "DefaultFormat","DefaultFormat","NC1HWC0","DefaultFormat","DefaultFormat","NC1HWC0" - ], - "name": "scale", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 3, - "dtype": [ - "float","float","float","float","float","float" - ], - "format": [ - "DefaultFormat","DefaultFormat","NC1HWC0","DefaultFormat","DefaultFormat","NC1HWC0" - ], - "name": "reserve_space_1", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 4, - "dtype": [ - "float","float","float","float","float","float" - ], - "format": [ - "DefaultFormat","DefaultFormat","NC1HWC0","DefaultFormat","DefaultFormat","NC1HWC0" - ], - "name": "reserve_space_2", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 5, - "dtype": [ - "float","float","float","float","float","float" - ], - "format": [ - "DefaultFormat","DefaultFormat","NC1HWC0","DefaultFormat","DefaultFormat","NC1HWC0" - ], - "name": "reserve_space_3", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16","float16","float16","float","float","float" - ], - "format": [ - "DefaultFormat","DefaultFormat","NC1HWC0","DefaultFormat","DefaultFormat","NC1HWC0" - ], - "name": "x_backprop", - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float","float","float","float","float","float" - ], - "format": [ - "DefaultFormat","DefaultFormat","NC1HWC0","DefaultFormat","DefaultFormat","NC1HWC0" - ], - "name": "scale_backprop", - "param_type": "required", - "shape": "all" - }, - { - "index": 2, - "dtype": [ - "float","float","float","float","float","float" - ], - "format": [ - "DefaultFormat","DefaultFormat","NC1HWC0","DefaultFormat","DefaultFormat","NC1HWC0" - ], - "name": "offset_backprop", - "param_type": "required", - "shape": "all" - }, - { - "index": 3, - "dtype": [ - "float","float","float","float","float","float" - ], - "format": [ - "DefaultFormat","DefaultFormat","NC1HWC0","DefaultFormat","DefaultFormat","NC1HWC0" - ], - "name": "reserve_space_4", - "param_type": "optional", - "shape": "all" - }, - { - "index": 4, - "dtype": [ - "float","float","float","float","float","float" - ], - "format": [ - "DefaultFormat","DefaultFormat","NC1HWC0","DefaultFormat","DefaultFormat","NC1HWC0" - ], - "name": "reserve_space_5", - "param_type": "optional", - "shape": "all" - } - ] -}""") + +@op_info_register(batch_norm_grad_op_info) def _batch_norm_grad_tbe(): """BatchNormGrad TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/bias_add.py b/mindspore/ops/_op_impl/tbe/bias_add.py index 9081ed2c13..24607af141 100644 --- a/mindspore/ops/_op_impl/tbe/bias_add.py +++ b/mindspore/ops/_op_impl/tbe/bias_add.py @@ -14,70 +14,26 @@ # ============================================================================ """BiasAdd op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +bias_add_grad_op_info = TBERegOp("BiasAdd") \ + .fusion_type("COMMREDUCE") \ + .async_flag(False) \ + .binfile_name("bias_add.so") \ + .compute_cost(10) \ + .kernel_name("bias_add") \ + .partial_flag(True) \ + .attr("data_format", "required", "str", "all") \ + .input(0, "x", False, "required", "all") \ + .input(1, "bias", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ + .get_op_info() -@op_info_register("""{ - "op_name": "BiasAdd", - "imply_type": "TBE", - "fusion_type": "COMMREDUCE", - "async_flag": false, - "binfile_name": "bias_add.so", - "compute_cost": 10, - "kernel_name": "bias_add", - "partial_flag": true, - "attr": [ - { - "name": "data_format", - "param_type": "required", - "type": "str", - "value": "all" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "int32", "float16", "float" - ], - "format": [ - "DefaultFormat", "DefaultFormat", "DefaultFormat" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "int32", "float16", "float" - ], - "format": [ - "DefaultFormat", "DefaultFormat", "DefaultFormat" - ], - "name": "bias", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "int32", "float16", "float" - ], - "format": [ - "DefaultFormat", "DefaultFormat", "DefaultFormat" - ], - "name": "y", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(bias_add_grad_op_info) def _bias_add_tbe(): """BiasAdd TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/bias_add_grad.py b/mindspore/ops/_op_impl/tbe/bias_add_grad.py index 4a24e361bb..e59c197bce 100644 --- a/mindspore/ops/_op_impl/tbe/bias_add_grad.py +++ b/mindspore/ops/_op_impl/tbe/bias_add_grad.py @@ -14,57 +14,26 @@ # ============================================================================ """BiasAddGrad op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +bias_add_grad_op_info = TBERegOp("BiasAddGrad") \ + .fusion_type("COMMREDUCE") \ + .async_flag(False) \ + .binfile_name("biasaddgrad.so") \ + .compute_cost(10) \ + .kernel_name("biasaddgrad") \ + .partial_flag(True) \ + .attr("data_format", "required", "str", "all") \ + .input(0, "output_backprop", False, "required", "all") \ + .output(0, "output", False, "required", "all") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_FracNZ, DataType.F16_Default) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_FracNZ, DataType.F32_Default) \ + .get_op_info() -@op_info_register("""{ - "op_name": "BiasAddGrad", - "imply_type": "TBE", - "fusion_type": "COMMREDUCE", - "async_flag": false, - "binfile_name": "biasaddgrad.so", - "compute_cost": 10, - "kernel_name": "biasaddgrad", - "partial_flag": true, - "attr": [ - { - "name": "data_format", - "param_type": "required", - "type": "str", - "value": "all" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16","float16","float","float" - ], - "format": [ - "FRACTAL_NZ","DefaultFormat","FRACTAL_NZ","DefaultFormat" - ], - "name": "out_backprop", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16","float16","float","float" - ], - "format": [ - "DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat" - ], - "name": "output", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(bias_add_grad_op_info) def _bias_add_grad_tbe(): """BiasAddGrad TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/bn_training_reduce.py b/mindspore/ops/_op_impl/tbe/bn_training_reduce.py index 3228ccd791..16d75d06be 100644 --- a/mindspore/ops/_op_impl/tbe/bn_training_reduce.py +++ b/mindspore/ops/_op_impl/tbe/bn_training_reduce.py @@ -14,60 +14,24 @@ # ============================================================================ """BatchNorm op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +bn_training_reduce_op_info = TBERegOp("BNTrainingReduce") \ + .fusion_type("ELEMWISE") \ + .async_flag(False) \ + .binfile_name("bn_training_reduce.so") \ + .compute_cost(10) \ + .kernel_name("bn_training_reduce") \ + .partial_flag(True) \ + .input(0, "x", False, "required", "all") \ + .output(0, "sum", False, "required", "all") \ + .output(1, "square_sum", False, "required", "all") \ + .dtype_format(DataType.F16_5HD, DataType.F32_5HD, DataType.F32_5HD) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD) \ + .get_op_info() -@op_info_register("""{ - "op_name": "BNTrainingReduce", - "imply_type": "TBE", - "fusion_type": "ELEMWISE", - "async_flag": false, - "binfile_name": "bn_training_reduce.so", - "compute_cost": 10, - "kernel_name": "bn_training_reduce", - "partial_flag": true, - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16","float" - ], - "format": [ - "NC1HWC0", "NC1HWC0" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float","float" - ], - "format": [ - "NC1HWC0", "NC1HWC0" - ], - "name": "sum", - "param_type": "required" - }, - { - "index": 1, - "dtype": [ - "float","float" - ], - "format": [ - "NC1HWC0", "NC1HWC0" - ], - "name": "square_sum", - "param_type": "required" - } - ] -}""") + +@op_info_register(bn_training_reduce_op_info) def _bn_training_reduce_tbe(): """BNTrainingReduce TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/bn_training_reduce_grad.py b/mindspore/ops/_op_impl/tbe/bn_training_reduce_grad.py index 3bb43fbb94..e92054670d 100644 --- a/mindspore/ops/_op_impl/tbe/bn_training_reduce_grad.py +++ b/mindspore/ops/_op_impl/tbe/bn_training_reduce_grad.py @@ -14,134 +14,32 @@ # ============================================================================ """BatchNormGrad op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +bn_training_reduce_grad_op_info = TBERegOp("BNTrainingReduceGrad") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("bn_training_reduce_grad.so") \ + .compute_cost(10) \ + .kernel_name("bn_training_reduce_grad") \ + .partial_flag(True) \ + .attr("epsilon", "optional", "float", "all") \ + .input(0, "grads", False, "required", "all") \ + .input(1, "x_norm", False, "required", "all") \ + .input(2, "diff_scale", False, "required", "all") \ + .input(3, "diff_offset", False, "required", "all") \ + .input(4, "scale", False, "required", "all") \ + .input(5, "batch_mean", False, "required", "all") \ + .input(6, "batch_variance", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.F16_5HD, DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD, + DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD, + DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD) \ + .get_op_info() -@op_info_register("""{ - "op_name": "BNTrainingReduceGrad", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "bn_training_reduce_grad.so", - "compute_cost": 10, - "kernel_name": "bn_training_reduce_grad", - "partial_flag": true, - "attr": [ - { - "name": "epsilon", - "param_type": "optional", - "type": "float", - "value": "all" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16", "float" - ], - "format": [ - "NC1HWC0","NC1HWC0" - ], - "name": "grads", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float", "float" - ], - "format": [ - "NC1HWC0","NC1HWC0" - ], - "name": "x_norm", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 2, - "dtype": [ - "float", "float" - ], - "format": [ - "NC1HWC0","NC1HWC0" - ], - "name": "diff_scale", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 3, - "dtype": [ - "float", "float" - ], - "format": [ - "NC1HWC0","NC1HWC0" - ], - "name": "diff_offset", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 4, - "dtype": [ - "float", "float" - ], - "format": [ - "NC1HWC0","NC1HWC0" - ], - "name": "scale", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 5, - "dtype": [ - "float", "float" - ], - "format": [ - "NC1HWC0","NC1HWC0" - ], - "name": "batch_mean", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 6, - "dtype": [ - "float", "float" - ], - "format": [ - "NC1HWC0","NC1HWC0" - ], - "name": "batch_variance", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16", "float" - ], - "format": [ - "NC1HWC0","NC1HWC0" - ], - "name": "y", - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(bn_training_reduce_grad_op_info) def _bn_training_reduce_grad_tbe(): """BNTrainingReduceGrad TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/bn_training_update.py b/mindspore/ops/_op_impl/tbe/bn_training_update.py index 9d6838f0e4..49b572e31e 100644 --- a/mindspore/ops/_op_impl/tbe/bn_training_update.py +++ b/mindspore/ops/_op_impl/tbe/bn_training_update.py @@ -14,200 +14,40 @@ # ============================================================================ """BatchNormGrad op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +bn_training_update_op_info = TBERegOp("BNTrainingUpdate") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("bn_training_update.so") \ + .compute_cost(10) \ + .kernel_name("bn_training_update") \ + .partial_flag(True) \ + .attr("factor", "optional", "float", "all") \ + .attr("epsilon", "optional", "float", "all") \ + .attr("isRef", "optional", "bool", "all", "true") \ + .input(0, "x", False, "required", "all") \ + .input(1, "sum", False, "required", "all") \ + .input(2, "square_sum", False, "required", "all") \ + .input(3, "scale", False, "required", "all") \ + .input(4, "offset", False, "required", "all") \ + .input(5, "mean", False, "required", "all") \ + .input(6, "variance", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .output(1, "mean", False, "required", "all") \ + .output(2, "variance", False, "required", "all") \ + .output(3, "batch_mean", False, "required", "all") \ + .output(4, "batch_variance", False, "required", "all") \ + .dtype_format(DataType.F16_5HD, DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD, + DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD, DataType.F16_5HD, + DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD, + DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD, + DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD) \ + .get_op_info() -@op_info_register("""{ - "op_name": "BNTrainingUpdate", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "bn_training_update.so", - "compute_cost": 10, - "kernel_name": "bn_training_update", - "partial_flag": true, - "attr": [ - { - "name": "factor", - "param_type": "optional", - "type": "float", - "value": "all" - }, - { - "name": "epsilon", - "param_type": "optional", - "type": "float", - "value": "all" - }, - { - "name": "isRef", - "param_type": "optional", - "type": "bool", - "default_value":"true", - "value": "all" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16", "float" - ], - "format": [ - "NC1HWC0","NC1HWC0" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float", "float" - ], - "format": [ - "NC1HWC0","NC1HWC0" - ], - "name": "sum", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 2, - "dtype": [ - "float", "float" - ], - "format": [ - "NC1HWC0","NC1HWC0" - ], - "name": "square_sum", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 3, - "dtype": [ - "float", "float" - ], - "format": [ - "NC1HWC0","NC1HWC0" - ], - "name": "scale", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 4, - "dtype": [ - "float", "float" - ], - "format": [ - "NC1HWC0","NC1HWC0" - ], - "name": "offset", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 5, - "dtype": [ - "float", "float" - ], - "format": [ - "NC1HWC0","NC1HWC0" - ], - "name": "mean", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 6, - "dtype": [ - "float", "float" - ], - "format": [ - "NC1HWC0","NC1HWC0" - ], - "name": "variance", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16", "float" - ], - "format": [ - "NC1HWC0","NC1HWC0" - ], - "name": "y", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float", "float" - ], - "format": [ - "NC1HWC0","NC1HWC0" - ], - "name": "mean", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 2, - "dtype": [ - "float", "float" - ], - "format": [ - "NC1HWC0","NC1HWC0" - ], - "name": "variance", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 3, - "dtype": [ - "float", "float" - ], - "format": [ - "NC1HWC0","NC1HWC0" - ], - "name": "batch_mean", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 4, - "dtype": [ - "float", "float" - ], - "format": [ - "NC1HWC0","NC1HWC0" - ], - "name": "batch_variance", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(bn_training_update_op_info) def _bn_training_update_tbe(): """BNTrainingUpdate TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/bn_training_update_grad.py b/mindspore/ops/_op_impl/tbe/bn_training_update_grad.py index 802ef6e91f..5e693bea42 100644 --- a/mindspore/ops/_op_impl/tbe/bn_training_update_grad.py +++ b/mindspore/ops/_op_impl/tbe/bn_training_update_grad.py @@ -14,109 +14,30 @@ # ============================================================================ """BatchNormGrad op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +bn_training_update_grad_op_info = TBERegOp("BNTrainingUpdateGrad") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("bn_training_update_grad.so") \ + .compute_cost(10) \ + .kernel_name("bn_training_update_grad") \ + .partial_flag(True) \ + .attr("epsilon", "optional", "float", "all") \ + .input(0, "grads", False, "required", "all") \ + .input(1, "x", False, "required", "all") \ + .input(2, "batch_mean", False, "required", "all") \ + .input(3, "batch_variance", False, "required", "all") \ + .output(0, "diff_scale", False, "required", "all") \ + .output(1, "diff_offset", False, "required", "all") \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.F32_5HD, DataType.F32_5HD, + DataType.F32_5HD, DataType.F32_5HD) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD, + DataType.F32_5HD, DataType.F32_5HD) \ + .get_op_info() -@op_info_register("""{ - "op_name": "BNTrainingUpdateGrad", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "bn_training_update_grad.so", - "compute_cost": 10, - "kernel_name": "bn_training_update_grad", - "partial_flag": true, - "attr": [ - { - "name": "epsilon", - "param_type": "optional", - "type": "float", - "value": "all" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16", "float" - ], - "format": [ - "NC1HWC0","NC1HWC0" - ], - "name": "grads", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float16", "float" - ], - "format": [ - "NC1HWC0","NC1HWC0" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 2, - "dtype": [ - "float", "float" - ], - "format": [ - "NC1HWC0","NC1HWC0" - ], - "name": "batch_mean", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 3, - "dtype": [ - "float", "float" - ], - "format": [ - "NC1HWC0","NC1HWC0" - ], - "name": "batch_variance", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float", "float" - ], - "format": [ - "NC1HWC0","NC1HWC0" - ], - "name": "diff_scale", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float", "float" - ], - "format": [ - "NC1HWC0","NC1HWC0" - ], - "name": "diff_offset", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(bn_training_update_grad_op_info) def _bn_training_update_grad_tbe(): """BNTrainingUpdateGrad TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/cast.py b/mindspore/ops/_op_impl/tbe/cast.py index e443d776ae..a18dcddfbf 100644 --- a/mindspore/ops/_op_impl/tbe/cast.py +++ b/mindspore/ops/_op_impl/tbe/cast.py @@ -14,69 +14,42 @@ # ============================================================================ """Cast op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +cast_op_info = TBERegOp("Cast") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("cast.so") \ + .compute_cost(10) \ + .kernel_name("cast") \ + .partial_flag(True) \ + .attr("dst_type", "required", "int", "all") \ + .input(0, "x", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.BOOL_Default, DataType.F16_Default) \ + .dtype_format(DataType.BOOL_Default, DataType.U8_Default) \ + .dtype_format(DataType.BOOL_Default, DataType.F32_Default) \ + .dtype_format(DataType.BOOL_Default, DataType.I32_Default) \ + .dtype_format(DataType.I8_Default, DataType.F16_Default) \ + .dtype_format(DataType.I8_Default, DataType.F32_Default) \ + .dtype_format(DataType.I8_Default, DataType.I32_Default) \ + .dtype_format(DataType.U8_Default, DataType.F16_Default) \ + .dtype_format(DataType.U8_Default, DataType.F32_Default) \ + .dtype_format(DataType.U8_Default, DataType.I32_Default) \ + .dtype_format(DataType.I32_Default, DataType.BOOL_Default) \ + .dtype_format(DataType.I32_Default, DataType.F16_Default) \ + .dtype_format(DataType.I32_Default, DataType.F32_Default) \ + .dtype_format(DataType.I32_Default, DataType.I8_Default) \ + .dtype_format(DataType.I32_Default, DataType.U8_Default) \ + .dtype_format(DataType.F16_Default, DataType.U8_Default) \ + .dtype_format(DataType.F16_Default, DataType.F32_Default) \ + .dtype_format(DataType.F16_Default, DataType.I32_Default) \ + .dtype_format(DataType.F32_Default, DataType.F16_Default) \ + .dtype_format(DataType.F32_Default, DataType.I32_Default) \ + .get_op_info() -@op_info_register("""{ - "op_name": "Cast", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "cast.so", - "compute_cost": 10, - "kernel_name": "cast", - "partial_flag": true, - "attr": [ - { - "name": "dst_type", - "param_type": "required", - "type": "int", - "value": "all" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16", "float16", "float", "float", - "int32", "int32", "int32", "int32", "int32", - "int8", "int8", "int8", "uint8", "uint8", "uint8", - "bool", "bool", "bool", "bool", "float16" - ], - "format": [ - "DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat", - "DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat", - "DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat", - "DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float", "int32", "float16", "int32", - "float16", "float", "int8", "uint8", "bool", - "float16", "float", "int32", "float16", "float", "int32", - "float16", "float", "int32", "uint8", "uint8" - ], - "format": [ - "DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat", - "DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat", - "DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat", - "DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat" - ], - "name": "y", - "need_compile": true, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(cast_op_info) def _cast_tbe(): """Cast TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum.py b/mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum.py index 859315fb7b..92fb9a59ee 100644 --- a/mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum.py +++ b/mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum.py @@ -14,90 +14,28 @@ # ============================================================================ """ClipByNormNoDivSum op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +clip_by_norm_no_div_sum_op_info = TBERegOp("ClipByNormNoDivSum") \ + .fusion_type("ELEMWISE") \ + .async_flag(False) \ + .binfile_name("clip_by_norm_no_div_sum.so") \ + .compute_cost(10) \ + .kernel_name("clip_by_norm_no_div_sum") \ + .partial_flag(True) \ + .input(0, "input_x", False, "required", "all") \ + .input(1, "input1", False, "required", "all") \ + .input(2, "input2", False, "required", "all") \ + .input(3, "input3", False, "required", "all") \ + .output(0, "output_y", False, "required", "all") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, + DataType.F16_Default) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, + DataType.F32_Default) \ + .get_op_info() -@op_info_register("""{ - "op_name": "ClipByNormNoDivSum", - "imply_type": "TBE", - "fusion_type": "ELEMWISE", - "async_flag": false, - "binfile_name": "clip_by_norm_no_div_sum.so", - "compute_cost": 10, - "kernel_name": "clip_by_norm_no_div_sum", - "partial_flag": true, - "attr":[ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16", "float32" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "input_x", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float16", "float32" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "input1", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 2, - "dtype": [ - "float16", "float32" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "input2", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 3, - "dtype": [ - "float16", "float32" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "input3", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16", "float32" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "output_y", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(clip_by_norm_no_div_sum_op_info) def _clip_by_norm_no_div_sum_tbe(): """ClipByNormNoDivSum TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/clip_by_value.py b/mindspore/ops/_op_impl/tbe/clip_by_value.py index 02ec0158a9..4ddc6c3c0f 100644 --- a/mindspore/ops/_op_impl/tbe/clip_by_value.py +++ b/mindspore/ops/_op_impl/tbe/clip_by_value.py @@ -14,85 +14,30 @@ # ============================================================================ """ClipByValue op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +clip_by_value_op_info = TBERegOp("ClipByValue") \ + .fusion_type("ELEMWISE") \ + .async_flag(False) \ + .binfile_name("clip_by_value.so") \ + .compute_cost(10) \ + .kernel_name("clip_by_value") \ + .partial_flag(True) \ + .attr("dst_type", "required", "int", "all") \ + .input(0, "x", False, "required", "all") \ + .input(1, "clip_value_min", False, "required", "all") \ + .input(2, "clip_value_max", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.I32_5HD, DataType.I32_5HD, DataType.I32_5HD, DataType.I32_5HD) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD) \ + .get_op_info() -@op_info_register("""{ - "op_name": "ClipByValue", - "imply_type": "TBE", - "fusion_type": "ELEMWISE", - "async_flag": false, - "binfile_name": "clip_by_value.so", - "compute_cost": 10, - "kernel_name": "clip_by_value", - "partial_flag": true, - "attr":[ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16", "float16", "float16", "float16", "float", "float", "float", "float", "int32", - "int32", "int32", "int32" - ], - "format": [ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float16", "float16", "float16", "float16", "float", "float", "float", "float", "int32", - "int32", "int32", "int32" - ], - "format": [ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat" - ], - "name": "clip_value_min", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 2, - "dtype": [ - "float16", "float16", "float16", "float16", "float", "float", "float", "float", "int32", - "int32", "int32", "int32" - ], - "format": [ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat" - ], - "name": "clip_value_max", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16", "float16", "float16", "float16", "float", "float", "float", "float", "int32", - "int32", "int32", "int32" - ], - "format": [ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat" - ], - "name": "y", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(clip_by_value_op_info) def _clip_by_value_tbe(): """ClipByValue TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/concat.py b/mindspore/ops/_op_impl/tbe/concat.py index 3e5c577476..56807b15fc 100644 --- a/mindspore/ops/_op_impl/tbe/concat.py +++ b/mindspore/ops/_op_impl/tbe/concat.py @@ -14,141 +14,44 @@ # ============================================================================ """Concat op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +concat_op_info = TBERegOp("Concat") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("concat_d.so") \ + .compute_cost(10) \ + .kernel_name("concat_d") \ + .partial_flag(True) \ + .attr("axis", "required", "int", "all") \ + .input(0, "input_values", False, "dynamic", "all") \ + .output(0, "output_data", False, "required", "all") \ + .dtype_format(DataType.BOOL_Default, DataType.BOOL_Default) \ + .dtype_format(DataType.BOOL_5HD, DataType.BOOL_5HD) \ + .dtype_format(DataType.I8_Default, DataType.I8_Default) \ + .dtype_format(DataType.I8_5HD, DataType.I8_5HD) \ + .dtype_format(DataType.U8_Default, DataType.U8_Default) \ + .dtype_format(DataType.U8_5HD, DataType.U8_5HD) \ + .dtype_format(DataType.I16_Default, DataType.I16_Default) \ + .dtype_format(DataType.I16_5HD, DataType.I16_5HD) \ + .dtype_format(DataType.U16_Default, DataType.U16_Default) \ + .dtype_format(DataType.U16_5HD, DataType.U16_5HD) \ + .dtype_format(DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.I32_5HD, DataType.I32_5HD) \ + .dtype_format(DataType.U32_Default, DataType.U32_Default) \ + .dtype_format(DataType.U32_5HD, DataType.U32_5HD) \ + .dtype_format(DataType.I64_Default, DataType.I64_Default) \ + .dtype_format(DataType.I64_5HD, DataType.I64_5HD) \ + .dtype_format(DataType.U64_Default, DataType.U64_Default) \ + .dtype_format(DataType.U64_5HD, DataType.U64_5HD) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD) \ + .get_op_info() -@op_info_register("""{ - "op_name": "Concat", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "concat_d.so", - "compute_cost": 10, - "kernel_name": "concat_d", - "partial_flag": true, - "attr": [ - { - "name": "axis", - "param_type": "required", - "type": "int", - "value": "all" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16", - "float16", - "float", - "float", - "int32", - "int32", - "int8", - "int8", - "int16", - "int16", - "int64", - "int64", - "uint8", - "uint8", - "uint16", - "uint16", - "uint32", - "uint32", - "uint64", - "uint64", - "bool", - "bool" - ], - "format": [ - "DefaultFormat", - "NC1HWC0", - "DefaultFormat", - "NC1HWC0", - "DefaultFormat", - "NC1HWC0", - "DefaultFormat", - "NC1HWC0", - "DefaultFormat", - "NC1HWC0", - "DefaultFormat", - "NC1HWC0", - "DefaultFormat", - "NC1HWC0", - "DefaultFormat", - "NC1HWC0", - "DefaultFormat", - "NC1HWC0", - "DefaultFormat", - "NC1HWC0", - "DefaultFormat", - "NC1HWC0" - ], - "name": "input_values", - "need_compile": false, - "param_type": "dynamic", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16", - "float16", - "float", - "float", - "int32", - "int32", - "int8", - "int8", - "int16", - "int16", - "int64", - "int64", - "uint8", - "uint8", - "uint16", - "uint16", - "uint32", - "uint32", - "uint64", - "uint64", - "bool", - "bool" - ], - "format": [ - "DefaultFormat", - "NC1HWC0", - "DefaultFormat", - "NC1HWC0", - "DefaultFormat", - "NC1HWC0", - "DefaultFormat", - "NC1HWC0", - "DefaultFormat", - "NC1HWC0", - "DefaultFormat", - "NC1HWC0", - "DefaultFormat", - "NC1HWC0", - "DefaultFormat", - "NC1HWC0", - "DefaultFormat", - "NC1HWC0", - "DefaultFormat", - "NC1HWC0", - "DefaultFormat", - "NC1HWC0" - ], - "name": "output_data", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(concat_op_info) def _concat_tbe(): """Concat TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/confusion_softmax_grad.py b/mindspore/ops/_op_impl/tbe/confusion_softmax_grad.py index cbd518d541..55d9562a44 100644 --- a/mindspore/ops/_op_impl/tbe/confusion_softmax_grad.py +++ b/mindspore/ops/_op_impl/tbe/confusion_softmax_grad.py @@ -14,65 +14,28 @@ # ============================================================================ """ConfusionSoftmaxGrad op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +confusion_softmax_grad_op_info = TBERegOp("ConfusionSoftmaxGrad") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("confusion_softmax_grad.so") \ + .compute_cost(10) \ + .kernel_name("confusion_softmax_grad") \ + .partial_flag(True) \ + .input(0, "grad", False, "required", "all") \ + .input(1, "x", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F16_FracNZ, DataType.F16_FracNZ, DataType.F16_FracNZ) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD) \ + .dtype_format(DataType.F32_FracNZ, DataType.F32_FracNZ, DataType.F32_FracNZ) \ + .get_op_info() -@op_info_register("""{ - "op_name": "ConfusionSoftmaxGrad", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "confusion_softmax_grad.so", - "compute_cost": 10, - "kernel_name": "confusion_softmax_grad", - "partial_flag": true, - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16", "float16", "float16", "float", "float", "float" - ], - "format": [ - "FRACTAL_NZ", "DefaultFormat", "NC1HWC0", "FRACTAL_NZ", "DefaultFormat", "NC1HWC0" - ], - "name": "grad", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float16", "float16", "float16", "float", "float", "float" - ], - "format": [ - "FRACTAL_NZ", "DefaultFormat", "NC1HWC0", "FRACTAL_NZ", "DefaultFormat", "NC1HWC0" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16", "float16", "float16", "float", "float", "float" - ], - "format": [ - "FRACTAL_NZ", "DefaultFormat", "NC1HWC0", "FRACTAL_NZ", "DefaultFormat", "NC1HWC0" - ], - "name": "y", - "need_compile": true, - "param_type": "required", - "shape": "all" - } - ] -}""") +@op_info_register(confusion_softmax_grad_op_info) def _confusion_softmax_grad_tbe(): """ConfusionSoftmaxGrad TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/confusion_transpose_d.py b/mindspore/ops/_op_impl/tbe/confusion_transpose_d.py index db35107b5d..e52ae01520 100644 --- a/mindspore/ops/_op_impl/tbe/confusion_transpose_d.py +++ b/mindspore/ops/_op_impl/tbe/confusion_transpose_d.py @@ -14,79 +14,44 @@ # ============================================================================ """ConfusionTransposeD op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +confusion_transpose_d_op_info = TBERegOp("ConfusionTransposeD") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("confusion_transpose_d.so") \ + .compute_cost(10) \ + .kernel_name("confusion_transpose_d") \ + .partial_flag(True) \ + .attr("perm", "required", "listInt", "all") \ + .attr("shape", "required", "listInt", "all") \ + .attr("transpose_first", "required", "bool", "all") \ + .input(0, "x", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.I8_FracNZ, DataType.I8_FracNZ) \ + .dtype_format(DataType.I8_Default, DataType.I8_Default) \ + .dtype_format(DataType.U8_FracNZ, DataType.U8_FracNZ) \ + .dtype_format(DataType.U8_Default, DataType.U8_Default) \ + .dtype_format(DataType.I16_FracNZ, DataType.I16_FracNZ) \ + .dtype_format(DataType.I16_Default, DataType.I16_Default) \ + .dtype_format(DataType.U16_FracNZ, DataType.U16_FracNZ) \ + .dtype_format(DataType.U16_Default, DataType.U16_Default) \ + .dtype_format(DataType.I32_FracNZ, DataType.I32_FracNZ) \ + .dtype_format(DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.U32_FracNZ, DataType.U32_FracNZ) \ + .dtype_format(DataType.U32_Default, DataType.U32_Default) \ + .dtype_format(DataType.I64_FracNZ, DataType.I64_FracNZ) \ + .dtype_format(DataType.I64_Default, DataType.I64_Default) \ + .dtype_format(DataType.U64_FracNZ, DataType.U64_FracNZ) \ + .dtype_format(DataType.U64_Default, DataType.U64_Default) \ + .dtype_format(DataType.F16_FracNZ, DataType.F16_FracNZ) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F32_FracNZ, DataType.F32_FracNZ) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .get_op_info() -@op_info_register("""{ - "op_name": "ConfusionTransposeD", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "confusion_transpose_d.so", - "compute_cost": 10, - "kernel_name": "confusion_transpose_d", - "partial_flag": true, - "attr":[ - { - "name":"perm", - "param_type":"required", - "type":"listInt", - "value":"all" - }, - { - "name":"shape", - "param_type":"required", - "type":"listInt", - "value":"all" - }, - { - "name":"transpose_first", - "param_type":"required", - "type":"bool", - "value":"all" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16", "float", "int8", "int16", "int32", "int64", "uint8", "uint16", "uint32", - "uint64", "float16", "float", "int8", "int16", "int32", "int64", "uint8", "uint16", - "uint32", "uint64" - ], - "format": [ - "FRACTAL_NZ", "FRACTAL_NZ", "FRACTAL_NZ", "FRACTAL_NZ", "FRACTAL_NZ", "FRACTAL_NZ", - "FRACTAL_NZ", "FRACTAL_NZ", "FRACTAL_NZ", "FRACTAL_NZ", "DefaultFormat", "DefaultFormat", - "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", - "DefaultFormat", "DefaultFormat", "DefaultFormat" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16", "float", "int8", "int16", "int32", "int64", "uint8", "uint16", "uint32", - "uint64", "float16", "float", "int8", "int16", "int32", "int64", "uint8", "uint16", - "uint32", "uint64" - ], - "format": [ - "FRACTAL_NZ", "FRACTAL_NZ", "FRACTAL_NZ", "FRACTAL_NZ", "FRACTAL_NZ", "FRACTAL_NZ", - "FRACTAL_NZ", "FRACTAL_NZ", "FRACTAL_NZ", "FRACTAL_NZ", "DefaultFormat", "DefaultFormat", - "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", - "DefaultFormat", "DefaultFormat", "DefaultFormat" - ], - "name": "y", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(confusion_transpose_d_op_info) def _confusion_transpose_d_tbe(): """ConfusionTransposeD TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/conv2d.py b/mindspore/ops/_op_impl/tbe/conv2d.py index da0f34e359..425521901d 100644 --- a/mindspore/ops/_op_impl/tbe/conv2d.py +++ b/mindspore/ops/_op_impl/tbe/conv2d.py @@ -14,114 +14,30 @@ # ============================================================================ """Conv2D op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +conv2d_op_info = TBERegOp("Conv2D") \ + .fusion_type("CONVLUTION") \ + .async_flag(False) \ + .binfile_name("conv2d.so") \ + .compute_cost(10) \ + .kernel_name("conv2d") \ + .partial_flag(True) \ + .attr("stride", "required", "listInt", "all") \ + .attr("pad_list", "required", "listInt", "all") \ + .attr("dilation", "required", "listInt", "all") \ + .attr("offset_a", "optional", "int", "all") \ + .input(0, "x", False, "required", "all") \ + .input(1, "filter", False, "required", "all") \ + .input(2, "bias", False, "optional", "all") \ + .input(3, "offset_w", False, "optional", "all") \ + .output(0, "y", True, "required", "all") \ + .dtype_format(DataType.F16_5HD, DataType.F16_FracZ, DataType.F16_Default, DataType.I8_Default, + DataType.F16_5HD) \ + .get_op_info() -@op_info_register("""{ - "op_name": "Conv2D", - "imply_type": "TBE", - "fusion_type": "CONVLUTION", - "async_flag": false, - "binfile_name": "conv2d.so", - "compute_cost": 10, - "kernel_name": "conv2d", - "partial_flag": true, - "attr": [ - { - "name": "stride", - "param_type": "required", - "type": "listInt", - "value": "all" - }, - { - "name": "pad_list", - "param_type": "required", - "type": "listInt", - "value": "all" - }, - { - "name": "dilation", - "param_type": "required", - "type": "listInt", - "value": "all" - }, - { - "name": "offset_a", - "param_type": "optional", - "type": "int", - "value": "all" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16" - ], - "format": [ - "NC1HWC0" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float16" - ], - "format": [ - "FracZ" - ], - "name": "filter", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 2, - "dtype": [ - "float16" - ], - "format": [ - "DefaultFormat" - ], - "name": "bias", - "need_compile": false, - "param_type": "optional", - "shape": "all" - }, - { - "index": 3, - "dtype": [ - "int8" - ], - "format": [ - "DefaultFormat" - ], - "name": "offset_w", - "need_compile": false, - "param_type": "optional", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16" - ], - "format": [ - "NC1HWC0" - ], - "name": "y", - "need_compile": true, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(conv2d_op_info) def _conv2d_tbe(): """Conv2D TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/conv2d_backprop_filter.py b/mindspore/ops/_op_impl/tbe/conv2d_backprop_filter.py index 2c1397c0b1..e32e99d888 100644 --- a/mindspore/ops/_op_impl/tbe/conv2d_backprop_filter.py +++ b/mindspore/ops/_op_impl/tbe/conv2d_backprop_filter.py @@ -14,89 +14,27 @@ # ============================================================================ """Conv2DBackpropFilter op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +conv2d_backprop_filter_op_info = TBERegOp("Conv2DBackpropFilter") \ + .fusion_type("CONVLUTION") \ + .async_flag(False) \ + .binfile_name("conv2d_backprop_filter_d.so") \ + .compute_cost(10) \ + .kernel_name("conv2d_backprop_filter_d") \ + .partial_flag(True) \ + .attr("filter_sizes", "required", "listInt", "all") \ + .attr("stride", "required", "listInt", "all") \ + .attr("pad_mode", "required", "str", "all") \ + .attr("dilation", "required", "listInt", "all") \ + .input(0, "out_backprop", False, "required", "all") \ + .input(1, "x", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.F32_FracZ) \ + .get_op_info() -# map to tbe kernel name conv2d_backprop_filter_d -@op_info_register("""{ - "op_name": "Conv2DBackpropFilter", - "imply_type": "TBE", - "fusion_type": "CONVLUTION", - "async_flag": false, - "binfile_name": "conv2d_backprop_filter_d.so", - "compute_cost": 10, - "kernel_name": "conv2d_backprop_filter_d", - "partial_flag": true, - "attr": [ - { - "name": "filter_sizes", - "param_type": "required", - "type": "listInt", - "value": "all" - }, - { - "name": "stride", - "param_type": "required", - "type": "listInt", - "value": "all" - }, - { - "name": "pad_mode", - "param_type": "required", - "type": "str", - "value": "all" - }, - { - "name": "dilation", - "param_type": "required", - "type": "listInt", - "value": "all" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16" - ], - "format": [ - "NC1HWC0" - ], - "name": "out_backprop", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float16" - ], - "format": [ - "NC1HWC0" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float32" - ], - "format": [ - "FracZ" - ], - "name": "y", - "need_compile": true, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(conv2d_backprop_filter_op_info) def _conv2d_backprop_filter_tbe(): """Conv2DBackpropFilter TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/conv2d_backprop_input.py b/mindspore/ops/_op_impl/tbe/conv2d_backprop_input.py index d61989e472..2c1dd6aea2 100644 --- a/mindspore/ops/_op_impl/tbe/conv2d_backprop_input.py +++ b/mindspore/ops/_op_impl/tbe/conv2d_backprop_input.py @@ -14,88 +14,27 @@ # ============================================================================ """Conv2DBackpropInput op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +conv2d_backprop_input_op_info = TBERegOp("Conv2DBackpropInput") \ + .fusion_type("CONVLUTION") \ + .async_flag(False) \ + .binfile_name("conv2d_backprop_input_d.so") \ + .compute_cost(10) \ + .kernel_name("conv2d_backprop_input_d") \ + .partial_flag(True) \ + .attr("input_sizes", "required", "listInt", "all") \ + .attr("stride", "required", "listInt", "all") \ + .attr("pad_mode", "required", "str", "all") \ + .attr("dilation", "required", "listInt", "all") \ + .input(0, "out_backprop", False, "required", "all") \ + .input(1, "filter", False, "required", "all") \ + .output(0, "y", True, "required", "all") \ + .dtype_format(DataType.F16_5HD, DataType.F16_FracZ, DataType.F16_5HD) \ + .get_op_info() -@op_info_register("""{ - "op_name": "Conv2DBackpropInput", - "imply_type": "TBE", - "fusion_type": "CONVLUTION", - "async_flag": false, - "binfile_name": "conv2d_backprop_input_d.so", - "compute_cost": 10, - "kernel_name": "conv2d_backprop_input_d", - "partial_flag": true, - "attr": [ - { - "name": "input_sizes", - "param_type": "required", - "type": "listInt", - "value": "all" - }, - { - "name": "stride", - "param_type": "required", - "type": "listInt", - "value": "all" - }, - { - "name": "pad_mode", - "param_type": "required", - "type": "str", - "value": "all" - }, - { - "name": "dilation", - "param_type": "required", - "type": "listInt", - "value": "all" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16" - ], - "format": [ - "NC1HWC0" - ], - "name": "out_backprop", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float16" - ], - "format": [ - "FracZ" - ], - "name": "filter", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16" - ], - "format": [ - "NC1HWC0" - ], - "name": "y", - "need_compile": true, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(conv2d_backprop_input_op_info) def _conv2d_backprop_input_tbe(): """Conv2DBackpropInput TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/div.py b/mindspore/ops/_op_impl/tbe/div.py index c71d6f38c4..2a83745399 100644 --- a/mindspore/ops/_op_impl/tbe/div.py +++ b/mindspore/ops/_op_impl/tbe/div.py @@ -14,71 +14,32 @@ # ============================================================================ """Div op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +div_op_info = TBERegOp("Div") \ + .fusion_type("ELEMWISE") \ + .async_flag(False) \ + .binfile_name("div.so") \ + .compute_cost(10) \ + .kernel_name("div") \ + .partial_flag(True) \ + .input(0, "x1", False, "required", "all") \ + .input(1, "x2", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.I8_Default, DataType.I8_Default, DataType.I8_Default) \ + .dtype_format(DataType.I8_5HD, DataType.I8_5HD, DataType.I8_5HD) \ + .dtype_format(DataType.U8_Default, DataType.U8_Default, DataType.U8_Default) \ + .dtype_format(DataType.U8_5HD, DataType.U8_5HD, DataType.U8_5HD) \ + .dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.I32_5HD, DataType.I32_5HD, DataType.I32_5HD) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD) \ + .get_op_info() -@op_info_register("""{ - "op_name": "Div", - "imply_type": "TBE", - "fusion_type": "ELEMWISE", - "async_flag": false, - "binfile_name": "div.so", - "compute_cost": 10, - "kernel_name": "div", - "partial_flag": true, - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16", "float16", "float", "float", "int32", "int32", "int8", "int8", "uint8", "uint8" - ], - "format": [ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "NC1HWC0", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "NC1HWC0" - ], - "name": "x1", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 0, - "dtype": [ - "float16", "float16", "float", "float", "int32", "int32", "int8", "int8", "uint8", "uint8" - ], - "format": [ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "NC1HWC0", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "NC1HWC0" - ], - "name": "x2", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16", "float16", "float", "float", "int32", "int32", "int8", "int8", "uint8", "uint8" - ], - "format": [ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "NC1HWC0", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "NC1HWC0" - ], - "name": "y", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") +@op_info_register(div_op_info) def _div_tbe(): """Div TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/dropout_do_mask.py b/mindspore/ops/_op_impl/tbe/dropout_do_mask.py index 5f4557a4f5..2bef489b96 100644 --- a/mindspore/ops/_op_impl/tbe/dropout_do_mask.py +++ b/mindspore/ops/_op_impl/tbe/dropout_do_mask.py @@ -14,76 +14,25 @@ # ============================================================================ """DropoutdoMask op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +drop_out_do_mask_op_info = TBERegOp("DropoutDoMask") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("drop_out_do_mask.so") \ + .compute_cost(10) \ + .kernel_name("drop_out_do_mask") \ + .partial_flag(True) \ + .input(0, "x", False, "required", "all") \ + .input(1, "mask", False, "required", "all") \ + .input(2, "keep_prob", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.F16_Default, DataType.U8_Default, DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F32_Default, DataType.U8_Default, DataType.F32_Default, DataType.F32_Default) \ + .get_op_info() -@op_info_register("""{ - "op_name": "DropoutDoMask", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "drop_out_do_mask.so", - "compute_cost": 10, - "kernel_name": "drop_out_do_mask", - "partial_flag": true, - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16","float16","float16","float","float","float" - ], - "format": [ - "DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "uint8","uint8","uint8","uint8","uint8","uint8" - ], - "format": [ - "DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat" - ], - "name": "mask", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 2, - "dtype": [ - "float16","float16","float16","float","float","float" - ], - "format": [ - "DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat" - ], - "name": "keep_prob", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16","float16","float16","float","float","float" - ], - "format": [ - "DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat" - ], - "name": "y", - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(drop_out_do_mask_op_info) def _dropout_do_mask_tbe(): """DropoutdoMask TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/equal.py b/mindspore/ops/_op_impl/tbe/equal.py index db2e152c27..594fb51cb5 100644 --- a/mindspore/ops/_op_impl/tbe/equal.py +++ b/mindspore/ops/_op_impl/tbe/equal.py @@ -14,66 +14,32 @@ # ============================================================================ """Equal op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +equal_op_info = TBERegOp("Equal") \ + .fusion_type("ELEMWISE") \ + .async_flag(False) \ + .binfile_name("equal.so") \ + .compute_cost(10) \ + .kernel_name("equal") \ + .partial_flag(True) \ + .input(0, "x1", False, "required", "all") \ + .input(1, "x2", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.I8_Default, DataType.I8_Default, DataType.BOOL_Default) \ + .dtype_format(DataType.I8_5HD, DataType.I8_5HD, DataType.BOOL_5HD) \ + .dtype_format(DataType.U8_Default, DataType.U8_Default, DataType.BOOL_Default) \ + .dtype_format(DataType.U8_5HD, DataType.U8_5HD, DataType.BOOL_5HD) \ + .dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.BOOL_Default) \ + .dtype_format(DataType.I32_5HD, DataType.I32_5HD, DataType.BOOL_5HD) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.BOOL_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.BOOL_5HD) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.BOOL_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.BOOL_5HD) \ + .get_op_info() -@op_info_register("""{ - "op_name": "Equal", - "imply_type": "TBE", - "fusion_type": "ELEMWISE", - "async_flag": false, - "binfile_name": "equal.so", - "compute_cost": 10, - "kernel_name": "equal", - "partial_flag": true, - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16","float16","float","float","int32","int32","int8","int8","uint8","uint8" - ], - "format": [ - "DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0","DefaultFormat", - "NC1HWC0","DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0" - ], - "name": "x1", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float16","float16","float","float","int32","int32","int8","int8","uint8","uint8" - ], - "format": [ - "DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0","DefaultFormat", - "NC1HWC0","DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0" - ], - "name": "x2", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "bool","bool","bool","bool","bool","bool","bool","bool","bool","bool" - ], - "format": [ - "DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0","DefaultFormat", - "NC1HWC0","DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0" - ], - "name": "y", - "param_type": "required" - } - ] -}""") +@op_info_register(equal_op_info) def _equal_tbe(): """Equal TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/exp.py b/mindspore/ops/_op_impl/tbe/exp.py index e5f34e67b0..545845a3b0 100644 --- a/mindspore/ops/_op_impl/tbe/exp.py +++ b/mindspore/ops/_op_impl/tbe/exp.py @@ -14,52 +14,25 @@ # ============================================================================ """Exp op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +exp_op_info = TBERegOp("Exp") \ + .fusion_type("ELEMWISE") \ + .async_flag(False) \ + .binfile_name("exp.so") \ + .compute_cost(10) \ + .kernel_name("exp") \ + .partial_flag(True) \ + .input(0, "x", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD) \ + .get_op_info() -@op_info_register("""{ - "op_name": "Exp", - "imply_type": "TBE", - "fusion_type": "ELEMWISE", - "async_flag": false, - "binfile_name": "exp.so", - "compute_cost": 10, - "kernel_name": "exp", - "partial_flag": true, - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16", "float16", "float", "float" - ], - "format": [ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "NC1HWC0" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16", "float16", "float", "float" - ], - "format": [ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "NC1HWC0" - ], - "name": "y", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") +@op_info_register(exp_op_info) def _exp_tbe(): """Exp TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/expand_dims.py b/mindspore/ops/_op_impl/tbe/expand_dims.py index 462a676e4f..8b0755a521 100644 --- a/mindspore/ops/_op_impl/tbe/expand_dims.py +++ b/mindspore/ops/_op_impl/tbe/expand_dims.py @@ -14,57 +14,25 @@ # ============================================================================ """ExpandDims op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +expand_dims_op_info = TBERegOp("ExpandDims") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("expand_dims.so") \ + .compute_cost(10) \ + .kernel_name("expand_dims") \ + .partial_flag(True) \ + .attr("axis", "required", "listInt", "all") \ + .input(0, "x", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .get_op_info() -@op_info_register("""{ - "op_name": "ExpandDims", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "expand_dims.so", - "compute_cost": 10, - "kernel_name": "expand_dims", - "partial_flag": true, - "attr": [ - { - "name": "axis", - "param_type": "required", - "type": "listInt", - "value": "all" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16", "float32", "int32" - ], - "format": [ - "DefaultFormat", "DefaultFormat", "DefaultFormat" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16", "float32", "int32" - ], - "format": [ - "DefaultFormat", "DefaultFormat", "DefaultFormat" - ], - "name": "y", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(expand_dims_op_info) def _expand_dims_tbe(): """ExpandDims TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/floor_div.py b/mindspore/ops/_op_impl/tbe/floor_div.py index fdc49e3805..74fd594901 100644 --- a/mindspore/ops/_op_impl/tbe/floor_div.py +++ b/mindspore/ops/_op_impl/tbe/floor_div.py @@ -14,64 +14,27 @@ # ============================================================================ """FloorDiv op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +floordiv_op_info = TBERegOp("FloorDiv") \ + .fusion_type("ELEMWISE") \ + .async_flag(False) \ + .binfile_name("floordiv.so") \ + .compute_cost(10) \ + .kernel_name("floordiv") \ + .partial_flag(True) \ + .input(0, "x1", False, "required", "all") \ + .input(1, "x2", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.I8_Default, DataType.I8_Default, DataType.I8_Default) \ + .dtype_format(DataType.U8_Default, DataType.U8_Default, DataType.U8_Default) \ + .dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ + .get_op_info() -@op_info_register("""{ - "op_name": "FloorDiv", - "imply_type": "TBE", - "fusion_type": "ELEMWISE", - "async_flag": false, - "binfile_name": "floordiv.so", - "compute_cost": 10, - "kernel_name": "floordiv", - "partial_flag": true, - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16","float","int32","int8","uint8" - ], - "format": [ - "DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat" - ], - "name": "x1", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float16","float","int32","int8","uint8" - ], - "format": [ - "DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat" - ], - "name": "x2", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16","float","int32","int8","uint8" - ], - "format": [ - "DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat" - ], - "name": "y", - "param_type": "required", - "shape": "all" - } - ] -}""") +@op_info_register(floordiv_op_info) def _floor_div_tbe(): """FloorDiv TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/fused_mul_add.py b/mindspore/ops/_op_impl/tbe/fused_mul_add.py index 96e18a89c7..ad3c601e5d 100644 --- a/mindspore/ops/_op_impl/tbe/fused_mul_add.py +++ b/mindspore/ops/_op_impl/tbe/fused_mul_add.py @@ -14,93 +14,38 @@ # ============================================================================ """FusedMulAdd op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +fused_mul_add_op_info = TBERegOp("FusedMulAdd") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("fused_mul_add.so") \ + .compute_cost(10) \ + .kernel_name("fused_mul_add") \ + .partial_flag(True) \ + .input(0, "x1", False, "required", "all") \ + .input(1, "x2", False, "required", "all") \ + .input(2, "x3", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.I32_5HD, DataType.I32_5HD, DataType.I32_5HD, DataType.I32_5HD) \ + .dtype_format(DataType.I32_FracZ, DataType.I32_FracZ, DataType.I32_FracZ, DataType.I32_FracZ) \ + .dtype_format(DataType.I32_FracNZ, DataType.I32_FracNZ, DataType.I32_FracNZ, DataType.I32_FracNZ) \ + .dtype_format(DataType.I32_C1HWNCoC0, DataType.I32_C1HWNCoC0, DataType.I32_C1HWNCoC0, DataType.I32_C1HWNCoC0) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F16_FracZ, DataType.F16_FracZ, DataType.F16_FracZ, DataType.F16_FracZ) \ + .dtype_format(DataType.F16_FracNZ, DataType.F16_FracNZ, DataType.F16_FracNZ, DataType.F16_FracNZ) \ + .dtype_format(DataType.F16_C1HWNCoC0, DataType.F16_C1HWNCoC0, DataType.F16_C1HWNCoC0, DataType.F16_C1HWNCoC0) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD) \ + .dtype_format(DataType.F32_FracZ, DataType.F32_FracZ, DataType.F32_FracZ, DataType.F32_FracZ) \ + .dtype_format(DataType.F32_FracNZ, DataType.F32_FracNZ, DataType.F32_FracNZ, DataType.F32_FracNZ) \ + .dtype_format(DataType.F32_C1HWNCoC0, DataType.F32_C1HWNCoC0, DataType.F32_C1HWNCoC0, DataType.F32_C1HWNCoC0) \ + .get_op_info() -@op_info_register("""{ - "op_name": "FusedMulAdd", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "fused_mul_add.so", - "compute_cost": 10, - "kernel_name": "fused_mul_add", - "partial_flag": true, - "attr": [ - - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "int32", "int32", "int32", "int32", "int32", - "float16", "float16", "float16", "float16", "float16", - "float", "float", "float", "float", "float" - ], - "format": [ - "FRACTAL_NZ", "DefaultFormat", "FracZ", "C1HWNCoC0", "NC1HWC0", - "FRACTAL_NZ", "DefaultFormat", "FracZ", "C1HWNCoC0", "NC1HWC0", - "FRACTAL_NZ", "DefaultFormat", "FracZ", "C1HWNCoC0", "NC1HWC0" - ], - "name": "x1", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "int32", "int32", "int32", "int32", "int32", - "float16", "float16", "float16", "float16", "float16", - "float", "float", "float", "float", "float" - ], - "format": [ - "FRACTAL_NZ", "DefaultFormat", "FracZ", "C1HWNCoC0", "NC1HWC0", - "FRACTAL_NZ", "DefaultFormat", "FracZ", "C1HWNCoC0", "NC1HWC0", - "FRACTAL_NZ", "DefaultFormat", "FracZ", "C1HWNCoC0", "NC1HWC0" - ], - "name": "x2", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 2, - "dtype": [ - "int32", "int32", "int32", "int32", "int32", - "float16", "float16", "float16", "float16", "float16", - "float", "float", "float", "float", "float" - ], - "format": [ - "FRACTAL_NZ", "DefaultFormat", "FracZ", "C1HWNCoC0", "NC1HWC0", - "FRACTAL_NZ", "DefaultFormat", "FracZ", "C1HWNCoC0", "NC1HWC0", - "FRACTAL_NZ", "DefaultFormat", "FracZ", "C1HWNCoC0", "NC1HWC0" - ], - "name": "x3", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "int32", "int32", "int32", "int32", "int32", - "float16", "float16", "float16", "float16", "float16", - "float", "float", "float", "float", "float" - ], - "format": [ - "FRACTAL_NZ", "DefaultFormat", "FracZ", "C1HWNCoC0", "NC1HWC0", - "FRACTAL_NZ", "DefaultFormat", "FracZ", "C1HWNCoC0", "NC1HWC0", - "FRACTAL_NZ", "DefaultFormat", "FracZ", "C1HWNCoC0", "NC1HWC0" - ], - "name": "y", - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(fused_mul_add_op_info) def _fused_mul_add_tbe(): """FusedMulAdd TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/fused_mul_add_n.py b/mindspore/ops/_op_impl/tbe/fused_mul_add_n.py index a4046a253b..9996466f70 100644 --- a/mindspore/ops/_op_impl/tbe/fused_mul_add_n.py +++ b/mindspore/ops/_op_impl/tbe/fused_mul_add_n.py @@ -14,86 +14,31 @@ # ============================================================================ """FusedMulAddN op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +fused_mul_add_n_op_info = TBERegOp("FusedMulAddN") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("fused_mul_add_n.so") \ + .compute_cost(10) \ + .kernel_name("fused_mul_add_n") \ + .partial_flag(True) \ + .input(0, "x1", False, "required", "all") \ + .input(1, "x2", False, "required", "all") \ + .input(2, "x3", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.F16_Default, DataType.F16_5HD) \ + .dtype_format(DataType.F16_C1HWNCoC0, DataType.F16_C1HWNCoC0, DataType.F16_Default, DataType.F16_C1HWNCoC0) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_FracZ, DataType.F16_FracZ, DataType.F16_Default, DataType.F16_FracZ) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.F32_Default, DataType.F32_5HD) \ + .dtype_format(DataType.F32_C1HWNCoC0, DataType.F32_C1HWNCoC0, DataType.F32_Default, DataType.F32_C1HWNCoC0) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_FracZ, DataType.F32_FracZ, DataType.F32_Default, DataType.F32_FracZ) \ + .get_op_info() -@op_info_register("""{ - "op_name": "FusedMulAddN", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "fused_mul_add_n.so", - "compute_cost": 10, - "kernel_name": "fused_mul_add_n", - "partial_flag": true, - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16","float16","float16","float16", - "float","float","float","float" - ], - "format": [ - "NC1HWC0","C1HWNCoC0","DefaultFormat","FracZ", - "NC1HWC0","C1HWNCoC0","DefaultFormat","FracZ" - ], - "name": "x1", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float16","float16","float16","float16", - "float","float","float","float" - ], - "format": [ - "NC1HWC0","C1HWNCoC0","DefaultFormat","FracZ", - "NC1HWC0","C1HWNCoC0","DefaultFormat","FracZ" - ], - "name": "x2", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 2, - "dtype": [ - "float16","float16","float16","float16", - "float","float","float","float" - ], - "format": [ - "DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat", - "DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat" - ], - "name": "x3", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16","float16","float16","float16", - "float","float","float","float" - ], - "format": [ - "NC1HWC0","C1HWNCoC0","DefaultFormat","FracZ", - "NC1HWC0","C1HWNCoC0","DefaultFormat","FracZ" - ], - "name": "y", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") +@op_info_register(fused_mul_add_n_op_info) def _fused_mul_add_n_tbe(): """FusedMulAddN TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum.py b/mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum.py index e303ee042f..a8f84427d6 100644 --- a/mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum.py +++ b/mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum.py @@ -14,137 +14,43 @@ # ============================================================================ """FusedMulApplyMomentum op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +fused_mul_apply_momentum_op_info = TBERegOp("FusedMulApplyMomentum") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("fused_mul_apply_momentum.so") \ + .compute_cost(10) \ + .kernel_name("fused_mul_apply_momentum") \ + .partial_flag(True) \ + .attr("use_nesterov", "optional", "bool", "true,false", "false") \ + .input(0, "var", False, "required", "all") \ + .input(1, "accum", False, "required", "all") \ + .input(2, "lr", False, "required", "all") \ + .input(3, "x1", False, "required", "all") \ + .input(4, "momentum", False, "required", "all") \ + .input(5, "x2", False, "required", "all") \ + .output(0, "var", False, "required", "all") \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.F16_Default, DataType.F16_5HD, + DataType.F16_Default, DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F16_C1HWNCoC0, DataType.F16_C1HWNCoC0, DataType.F16_Default, DataType.F16_C1HWNCoC0, + DataType.F16_Default, DataType.F16_C1HWNCoC0, DataType.F16_C1HWNCoC0) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, + DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_FracZ, DataType.F16_FracZ, DataType.F16_Default, DataType.F16_FracZ, + DataType.F16_Default, DataType.F16_FracZ, DataType.F16_FracZ) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.F32_Default, DataType.F32_5HD, + DataType.F32_Default, DataType.F32_5HD, DataType.F32_5HD) \ + .dtype_format(DataType.F32_C1HWNCoC0, DataType.F32_C1HWNCoC0, DataType.F32_Default, DataType.F32_C1HWNCoC0, + DataType.F32_Default, DataType.F32_C1HWNCoC0, DataType.F32_C1HWNCoC0) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, + DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_FracZ, DataType.F32_FracZ, DataType.F32_Default, DataType.F32_FracZ, + DataType.F32_Default, DataType.F32_FracZ, DataType.F32_FracZ) \ + .get_op_info() -@op_info_register("""{ - "op_name": "FusedMulApplyMomentum", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "fused_mul_apply_momentum.so", - "compute_cost": 10, - "kernel_name": "fused_mul_apply_momentum", - "partial_flag": true, - "attr": [ - { - "name": "use_nesterov", - "param_type": "optional", - "type": "bool", - "value": "true,false", - "default_value":"false" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16","float16","float16","float16", - "float","float","float","float" - ], - "format": [ - "NC1HWC0","C1HWNCoC0","DefaultFormat","FracZ", - "NC1HWC0","C1HWNCoC0","DefaultFormat","FracZ" - ], - "name": "var", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float16","float16","float16","float16", - "float","float","float","float" - ], - "format": [ - "NC1HWC0","C1HWNCoC0","DefaultFormat","FracZ", - "NC1HWC0","C1HWNCoC0","DefaultFormat","FracZ" - ], - "name": "accum", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 2, - "dtype": [ - "float16","float16","float16","float16", - "float","float","float","float" - ], - "format": [ - "DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat", - "DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat" - ], - "name": "lr", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 3, - "dtype": [ - "float16","float16","float16","float16", - "float","float","float","float" - ], - "format": [ - "NC1HWC0","C1HWNCoC0","DefaultFormat","FracZ", - "NC1HWC0","C1HWNCoC0","DefaultFormat","FracZ" - ], - "name": "x1", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 4, - "dtype": [ - "float16","float16","float16","float16", - "float","float","float","float" - ], - "format": [ - "DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat", - "DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat" - ], - "name": "momentum", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 5, - "dtype": [ - "float16","float16","float16","float16", - "float","float","float","float" - ], - "format": [ - "NC1HWC0","C1HWNCoC0","DefaultFormat","FracZ", - "NC1HWC0","C1HWNCoC0","DefaultFormat","FracZ" - ], - "name": "x2", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16","float16","float16","float16", - "float","float","float","float" - ], - "format": [ - "NC1HWC0","C1HWNCoC0","DefaultFormat","FracZ", - "NC1HWC0","C1HWNCoC0","DefaultFormat","FracZ" - ], - "name": "var", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(fused_mul_apply_momentum_op_info) def _fused_mul_apply_momentum_tbe(): """FusedMulApplyMomentum TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/gather_v2.py b/mindspore/ops/_op_impl/tbe/gather_v2.py index b0e14e99c0..72ba17d942 100644 --- a/mindspore/ops/_op_impl/tbe/gather_v2.py +++ b/mindspore/ops/_op_impl/tbe/gather_v2.py @@ -14,94 +14,53 @@ # ============================================================================ """AddN op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +gather_v2_op_info = TBERegOp("GatherV2") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("gather_v2_d.so") \ + .compute_cost(10) \ + .kernel_name("gather_v2_d") \ + .partial_flag(True) \ + .attr("axis", "optional", "int", "all") \ + .input(0, "x", False, "required", "all") \ + .input(1, "indices", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.I8_Default, DataType.I32_Default, DataType.I8_Default) \ + .dtype_format(DataType.I8_Default, DataType.I64_Default, DataType.I8_Default) \ + .dtype_format(DataType.I8_5HD, DataType.I32_5HD, DataType.I8_5HD) \ + .dtype_format(DataType.I8_5HD, DataType.I64_5HD, DataType.I8_5HD) \ + .dtype_format(DataType.I8_FracZ, DataType.I32_FracZ, DataType.I8_FracZ) \ + .dtype_format(DataType.I8_FracZ, DataType.I64_FracZ, DataType.I8_FracZ) \ + .dtype_format(DataType.U8_Default, DataType.I32_Default, DataType.U8_Default) \ + .dtype_format(DataType.U8_Default, DataType.I64_Default, DataType.U8_Default) \ + .dtype_format(DataType.U8_5HD, DataType.I32_5HD, DataType.U8_5HD) \ + .dtype_format(DataType.U8_5HD, DataType.I64_5HD, DataType.U8_5HD) \ + .dtype_format(DataType.U8_FracZ, DataType.I32_FracZ, DataType.U8_FracZ) \ + .dtype_format(DataType.U8_FracZ, DataType.I64_FracZ, DataType.U8_FracZ) \ + .dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.I32_Default, DataType.I64_Default, DataType.I32_Default) \ + .dtype_format(DataType.I32_5HD, DataType.I32_5HD, DataType.I32_5HD) \ + .dtype_format(DataType.I32_5HD, DataType.I64_5HD, DataType.I32_5HD) \ + .dtype_format(DataType.I32_FracZ, DataType.I32_FracZ, DataType.I32_FracZ) \ + .dtype_format(DataType.I32_FracZ, DataType.I64_FracZ, DataType.I32_FracZ) \ + .dtype_format(DataType.F16_Default, DataType.I32_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_Default, DataType.I64_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_5HD, DataType.I32_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F16_5HD, DataType.I64_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F16_FracZ, DataType.I32_FracZ, DataType.F16_FracZ) \ + .dtype_format(DataType.F16_FracZ, DataType.I64_FracZ, DataType.F16_FracZ) \ + .dtype_format(DataType.F32_Default, DataType.I32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_Default, DataType.I64_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_5HD, DataType.I32_5HD, DataType.F32_5HD) \ + .dtype_format(DataType.F32_5HD, DataType.I64_5HD, DataType.F32_5HD) \ + .dtype_format(DataType.F32_FracZ, DataType.I32_FracZ, DataType.F32_FracZ) \ + .dtype_format(DataType.F32_FracZ, DataType.I64_FracZ, DataType.F32_FracZ) \ + .get_op_info() -@op_info_register("""{ - "op_name": "GatherV2", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "gather_v2_d.so", - "compute_cost": 10, - "kernel_name": "gather_v2_d", - "partial_flag": true, - "attr": [ - { - "name": "axis", - "param_type": "optional", - "type": "int", - "value": "all" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16","float16","float16","float16","float16","float16", - "float","float","float","float","float","float", - "int32","int32","int32", "int32","int32","int32", - "uint8","uint8","uint8","uint8","uint8","uint8", - "int8","int8", "int8","int8","int8", "int8" - ], - "format": [ - "DefaultFormat","NC1HWC0","FracZ","DefaultFormat","NC1HWC0","FracZ", - "DefaultFormat","NC1HWC0","FracZ","DefaultFormat","NC1HWC0","FracZ", - "DefaultFormat","NC1HWC0","FracZ","DefaultFormat","NC1HWC0","FracZ", - "DefaultFormat","NC1HWC0","FracZ","DefaultFormat","NC1HWC0","FracZ", - "DefaultFormat","NC1HWC0","FracZ","DefaultFormat","NC1HWC0","FracZ" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "int32","int32","int32","int64","int64","int64", - "int32","int32","int32","int64","int64","int64", - "int32","int32","int32","int64","int64","int64", - "int32","int32","int32","int64","int64","int64", - "int32","int32","int32","int64","int64","int64" - ], - "format": [ - "DefaultFormat","NC1HWC0","FracZ","DefaultFormat","NC1HWC0","FracZ", - "DefaultFormat","NC1HWC0","FracZ","DefaultFormat","NC1HWC0","FracZ", - "DefaultFormat","NC1HWC0","FracZ","DefaultFormat","NC1HWC0","FracZ", - "DefaultFormat","NC1HWC0","FracZ","DefaultFormat","NC1HWC0","FracZ", - "DefaultFormat","NC1HWC0","FracZ","DefaultFormat","NC1HWC0","FracZ" - ], - "name": "indices", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16","float16","float16","float16","float16","float16", - "float","float","float","float","float","float", - "int32","int32","int32", "int32","int32","int32", - "uint8","uint8","uint8","uint8","uint8","uint8", - "int8","int8", "int8","int8","int8", "int8" - ], - "format": [ - "DefaultFormat","NC1HWC0","FracZ","DefaultFormat","NC1HWC0","FracZ", - "DefaultFormat","NC1HWC0","FracZ","DefaultFormat","NC1HWC0","FracZ", - "DefaultFormat","NC1HWC0","FracZ","DefaultFormat","NC1HWC0","FracZ", - "DefaultFormat","NC1HWC0","FracZ","DefaultFormat","NC1HWC0","FracZ", - "DefaultFormat","NC1HWC0","FracZ","DefaultFormat","NC1HWC0","FracZ" - ], - "name": "y", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(gather_v2_op_info) def _gather_v2_tbe(): """GatherV2 TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/gelu.py b/mindspore/ops/_op_impl/tbe/gelu.py index 171d97c043..9d4b2ed7f3 100644 --- a/mindspore/ops/_op_impl/tbe/gelu.py +++ b/mindspore/ops/_op_impl/tbe/gelu.py @@ -14,51 +14,29 @@ # ============================================================================ """Gelu op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +gelu_op_info = TBERegOp("Gelu") \ + .fusion_type("ELEMWISE") \ + .async_flag(False) \ + .binfile_name("gelu.so") \ + .compute_cost(10) \ + .kernel_name("gelu") \ + .partial_flag(True) \ + .input(0, "x", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F16_FracZ, DataType.F16_FracZ) \ + .dtype_format(DataType.F16_FracNZ, DataType.F16_FracNZ) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD) \ + .dtype_format(DataType.F32_FracZ, DataType.F32_FracZ) \ + .dtype_format(DataType.F32_FracNZ, DataType.F32_FracNZ) \ + .get_op_info() -@op_info_register("""{ - "op_name": "Gelu", - "imply_type": "TBE", - "fusion_type": "ELEMWISE", - "async_flag": false, - "binfile_name": "gelu.so", - "compute_cost": 10, - "kernel_name": "gelu", - "partial_flag": true, - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16","float","float16","float","float16","float16","float16","float16","float","float","float","float" - ], - "format": [ - "FRACTAL_NZ","FRACTAL_NZ","FracZ","FracZ","DefaultFormat","NC1HWC0","DefaultFormat","DefaultFormat","DefaultFormat","NC1HWC0","DefaultFormat","DefaultFormat" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16","float","float16","float","float16","float16","float16","float16","float","float","float","float" - ], - "format": [ - "FRACTAL_NZ","FRACTAL_NZ","FracZ","FracZ","DefaultFormat","NC1HWC0","DefaultFormat","DefaultFormat","DefaultFormat","NC1HWC0","DefaultFormat","DefaultFormat" - ], - "name": "y", - "need_compile": true, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(gelu_op_info) def _gelu_tbe(): """Gelu TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/gelu_grad.py b/mindspore/ops/_op_impl/tbe/gelu_grad.py index 9b358262e0..ce62e55071 100644 --- a/mindspore/ops/_op_impl/tbe/gelu_grad.py +++ b/mindspore/ops/_op_impl/tbe/gelu_grad.py @@ -14,77 +14,29 @@ # ============================================================================ """GeluGrad op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +gelu_grad_op_info = TBERegOp("GeluGrad") \ + .fusion_type("ELEMWISE") \ + .async_flag(False) \ + .binfile_name("gelu_grad.so") \ + .compute_cost(10) \ + .kernel_name("gelu_grad") \ + .partial_flag(True) \ + .input(0, "dy", False, "required", "all") \ + .input(1, "x", False, "required", "all") \ + .input(2, "y", False, "required", "all") \ + .output(0, "z", True, "required", "all") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F16_FracNZ, DataType.F16_FracNZ, DataType.F16_FracNZ, DataType.F16_FracNZ) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD) \ + .dtype_format(DataType.F32_FracNZ, DataType.F32_FracNZ, DataType.F32_FracNZ, DataType.F32_FracNZ) \ + .get_op_info() -@op_info_register("""{ - "op_name": "GeluGrad", - "imply_type": "TBE", - "fusion_type": "ELEMWISE", - "async_flag": false, - "binfile_name": "gelu_grad.so", - "compute_cost": 10, - "kernel_name": "gelu_grad", - "partial_flag": true, - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16","float16","float16","float","float","float" - ], - "format": [ - "FRACTAL_NZ","DefaultFormat","NC1HWC0","FRACTAL_NZ","DefaultFormat","NC1HWC0" - ], - "name": "dy", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float16","float16","float16","float","float","float" - ], - "format": [ - "FRACTAL_NZ","DefaultFormat","NC1HWC0","FRACTAL_NZ","DefaultFormat","NC1HWC0" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 2, - "dtype": [ - "float16","float16","float16","float","float","float" - ], - "format": [ - "FRACTAL_NZ","DefaultFormat","NC1HWC0","FRACTAL_NZ","DefaultFormat","NC1HWC0" - ], - "name": "y", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16","float16","float16","float","float","float" - ], - "format": [ - "FRACTAL_NZ","DefaultFormat","NC1HWC0","FRACTAL_NZ","DefaultFormat","NC1HWC0" - ], - "name": "z", - "need_compile": true, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(gelu_grad_op_info) def _gelu_grad_tbe(): """GeluGrad TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/greater.py b/mindspore/ops/_op_impl/tbe/greater.py index 09ee0e31af..90c680ab04 100644 --- a/mindspore/ops/_op_impl/tbe/greater.py +++ b/mindspore/ops/_op_impl/tbe/greater.py @@ -14,68 +14,32 @@ # ============================================================================ """Greater op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +greater_op_info = TBERegOp("Greater") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("greater.so") \ + .compute_cost(10) \ + .kernel_name("greater") \ + .partial_flag(True) \ + .input(0, "x1", False, "required", "all") \ + .input(1, "x2", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.I8_Default, DataType.I8_Default, DataType.BOOL_Default) \ + .dtype_format(DataType.I8_5HD, DataType.I8_5HD, DataType.BOOL_5HD) \ + .dtype_format(DataType.U8_Default, DataType.U8_Default, DataType.BOOL_Default) \ + .dtype_format(DataType.U8_5HD, DataType.U8_5HD, DataType.BOOL_5HD) \ + .dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.BOOL_Default) \ + .dtype_format(DataType.I32_5HD, DataType.I32_5HD, DataType.BOOL_5HD) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.BOOL_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.BOOL_5HD) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.BOOL_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.BOOL_5HD) \ + .get_op_info() -@op_info_register("""{ - "op_name": "Greater", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "greater.so", - "compute_cost": 10, - "kernel_name": "greater", - "partial_flag": true, - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16","float16","float","float","int32","int32","int8","int8","uint8","uint8" - ], - "format": [ - "DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0","DefaultFormat", - "NC1HWC0","DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0" - ], - "name": "x1", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float16","float16","float","float","int32","int32","int8","int8","uint8","uint8" - ], - "format": [ - "DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0","DefaultFormat", - "NC1HWC0","DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0" - ], - "name": "x2", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "bool","bool","bool","bool","bool","bool","bool","bool","bool","bool" - ], - "format": [ - "DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0","DefaultFormat", - "NC1HWC0","DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0" - ], - "name": "y", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") +@op_info_register(greater_op_info) def _greater_tbe(): """Greater TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/lamb_next_mv.py b/mindspore/ops/_op_impl/tbe/lamb_next_mv.py index b432b47c3d..2f2200a1f4 100644 --- a/mindspore/ops/_op_impl/tbe/lamb_next_mv.py +++ b/mindspore/ops/_op_impl/tbe/lamb_next_mv.py @@ -14,279 +14,46 @@ # ============================================================================ """LambNextMV op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +lamb_next_mv_op_info = TBERegOp("LambNextMV") \ + .fusion_type("ELEMWISE") \ + .async_flag(False) \ + .binfile_name("lamb_next_m_v.so") \ + .compute_cost(10) \ + .kernel_name("lamb_next_m_v") \ + .partial_flag(True) \ + .input(0, "input1", False, "required", "all") \ + .input(1, "input2", False, "required", "all") \ + .input(2, "input3", False, "required", "all") \ + .input(3, "input4", False, "required", "all") \ + .input(4, "input5", False, "required", "all") \ + .input(5, "input6", False, "required", "all") \ + .input(6, "input7", False, "required", "all") \ + .input(7, "input8", False, "required", "all") \ + .input(8, "input9", False, "required", "all") \ + .input(9, "inputx0", False, "required", "all") \ + .input(10, "inputx1", False, "required", "all") \ + .input(11, "inputx2", False, "required", "all") \ + .input(12, "inputx3", False, "required", "all") \ + .output(0, "output1", False, "required", "all") \ + .output(1, "output2", False, "required", "all") \ + .output(2, "output3", False, "required", "all") \ + .output(3, "output4", False, "required", "all") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, + DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, + DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, + DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, + DataType.F16_Default) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, + DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, + DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, + DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, + DataType.F32_Default) \ + .get_op_info() -@op_info_register("""{ - "op_name":"LambNextMV", - "imply_type":"TBE", - "fusion_type":"ELEMWISE", - "async_flag":false, - "binfile_name":"lamb_next_m_v.so", - "compute_cost":10, - "kernel_name":"lamb_next_m_v", - "partial_flag":true, - "attr":[], - "inputs":[ - { - "index":0, - "dtype":[ - "float16", - "float32" - ], - "format":[ - "DefaultFormat", - "DefaultFormat" - ], - "name":"input1", - "need_compile":false, - "param_type":"required", - "shape":"all" - }, - { - "index":1, - "dtype":[ - "float16", - "float32" - ], - "format":[ - "DefaultFormat", - "DefaultFormat" - ], - "name":"input2", - "need_compile":false, - "param_type":"required", - "shape":"all" - }, - { - "index":2, - "dtype":[ - "float16", - "float32" - ], - "format":[ - "DefaultFormat", - "DefaultFormat" - ], - "name":"input3", - "need_compile":false, - "param_type":"required", - "shape":"all" - }, - { - "index":3, - "dtype":[ - "float16", - "float32" - ], - "format":[ - "DefaultFormat", - "DefaultFormat" - ], - "name":"input4", - "need_compile":false, - "param_type":"required", - "shape":"all" - }, - { - "index":4, - "dtype":[ - "float16", - "float32" - ], - "format":[ - "DefaultFormat", - "DefaultFormat" - ], - "name":"input5", - "need_compile":false, - "param_type":"required", - "shape":"all" - }, - { - "index":5, - "dtype":[ - "float16", - "float32" - ], - "format":[ - "DefaultFormat", - "DefaultFormat" - ], - "name":"input6", - "need_compile":false, - "param_type":"required", - "shape":"all" - }, - { - "index":6, - "dtype":[ - "float16", - "float32" - ], - "format":[ - "DefaultFormat", - "DefaultFormat" - ], - "name":"input7", - "need_compile":false, - "param_type":"required", - "shape":"all" - }, - { - "index":7, - "dtype":[ - "float16", - "float32" - ], - "format":[ - "DefaultFormat", - "DefaultFormat" - ], - "name":"input8", - "need_compile":false, - "param_type":"required", - "shape":"all" - }, - { - "index":8, - "dtype":[ - "float16", - "float32" - ], - "format":[ - "DefaultFormat", - "DefaultFormat" - ], - "name":"input9", - "need_compile":false, - "param_type":"required", - "shape":"all" - }, - { - "index":9, - "dtype":[ - "float16", - "float32" - ], - "format":[ - "DefaultFormat", - "DefaultFormat" - ], - "name":"inputx0", - "need_compile":false, - "param_type":"required", - "shape":"all" - }, - { - "index":10, - "dtype":[ - "float16", - "float32" - ], - "format":[ - "DefaultFormat", - "DefaultFormat" - ], - "name":"inputx1", - "need_compile":false, - "param_type":"required", - "shape":"all" - }, - { - "index":11, - "dtype":[ - "float16", - "float32" - ], - "format":[ - "DefaultFormat", - "DefaultFormat" - ], - "name":"inputx2", - "need_compile":false, - "param_type":"required", - "shape":"all" - }, - { - "index":12, - "dtype":[ - "float16", - "float32" - ], - "format":[ - "DefaultFormat", - "DefaultFormat" - ], - "name":"inputx3", - "need_compile":false, - "param_type":"required", - "shape":"all" - } - ], - "outputs":[ - { - "index":0, - "dtype":[ - "float16", - "float32" - ], - "format":[ - "DefaultFormat", - "DefaultFormat" - ], - "name":"output1", - "need_compile":false, - "param_type":"required", - "shape":"all" - }, - { - "index":1, - "dtype":[ - "float16", - "float32" - ], - "format":[ - "DefaultFormat", - "DefaultFormat" - ], - "name":"output2", - "need_compile":false, - "param_type":"required", - "shape":"all" - }, - { - "index":2, - "dtype":[ - "float16", - "float32" - ], - "format":[ - "DefaultFormat", - "DefaultFormat" - ], - "name":"output3", - "need_compile":false, - "param_type":"required", - "shape":"all" - }, - { - "index":3, - "dtype":[ - "float16", - "float32" - ], - "format":[ - "DefaultFormat", - "DefaultFormat" - ], - "name":"output4", - "need_compile":false, - "param_type":"required", - "shape":"all" - } - ] -}""") + +@op_info_register(lamb_next_mv_op_info) def _lamb_next_mv_tbe(): """LambNextMV TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/lamb_next_mv_with_decay_v1.py b/mindspore/ops/_op_impl/tbe/lamb_next_mv_with_decay_v1.py index bf5b6cd0e7..aa135e5afe 100644 --- a/mindspore/ops/_op_impl/tbe/lamb_next_mv_with_decay_v1.py +++ b/mindspore/ops/_op_impl/tbe/lamb_next_mv_with_decay_v1.py @@ -14,279 +14,46 @@ # ============================================================================ """LambNextMVWithDecayV1 op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +lamb_next_m_v_with_decay_v1_op_info = TBERegOp("LambNextMVWithDecayV1") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("lamb_next_m_v_with_decay_v1.so") \ + .compute_cost(10) \ + .kernel_name("lamb_next_m_v_with_decay_v1") \ + .partial_flag(True) \ + .input(0, "input1", False, "required", "all") \ + .input(1, "input2", False, "required", "all") \ + .input(2, "input3", False, "required", "all") \ + .input(3, "input4", False, "required", "all") \ + .input(4, "input5", False, "required", "all") \ + .input(5, "input6", False, "required", "all") \ + .input(6, "input7", False, "required", "all") \ + .input(7, "input8", False, "required", "all") \ + .input(8, "input9", False, "required", "all") \ + .input(9, "inputx0", False, "required", "all") \ + .input(10, "inputx1", False, "required", "all") \ + .input(11, "inputx2", False, "required", "all") \ + .input(12, "inputx3", False, "required", "all") \ + .output(0, "output1", False, "required", "all") \ + .output(1, "output2", False, "required", "all") \ + .output(2, "output3", False, "required", "all") \ + .output(3, "output4", False, "required", "all") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, + DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, + DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, + DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, + DataType.F16_Default) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, + DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, + DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, + DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, + DataType.F32_Default) \ + .get_op_info() -@op_info_register("""{ - "op_name":"LambNextMVWithDecayV1", - "imply_type":"TBE", - "fusion_type":"OPAQUE", - "async_flag":false, - "binfile_name":"lamb_next_m_v_with_decay_v1.so", - "compute_cost":10, - "kernel_name":"lamb_next_m_v_with_decay_v1", - "partial_flag":true, - "attr":[], - "inputs":[ - { - "index":0, - "dtype":[ - "float16", - "float32" - ], - "format":[ - "DefaultFormat", - "DefaultFormat" - ], - "name":"input1", - "need_compile":false, - "param_type":"required", - "shape":"all" - }, - { - "index":1, - "dtype":[ - "float16", - "float32" - ], - "format":[ - "DefaultFormat", - "DefaultFormat" - ], - "name":"input2", - "need_compile":false, - "param_type":"required", - "shape":"all" - }, - { - "index":2, - "dtype":[ - "float16", - "float32" - ], - "format":[ - "DefaultFormat", - "DefaultFormat" - ], - "name":"input3", - "need_compile":false, - "param_type":"required", - "shape":"all" - }, - { - "index":3, - "dtype":[ - "float16", - "float32" - ], - "format":[ - "DefaultFormat", - "DefaultFormat" - ], - "name":"input4", - "need_compile":false, - "param_type":"required", - "shape":"all" - }, - { - "index":4, - "dtype":[ - "float16", - "float32" - ], - "format":[ - "DefaultFormat", - "DefaultFormat" - ], - "name":"input5", - "need_compile":false, - "param_type":"required", - "shape":"all" - }, - { - "index":5, - "dtype":[ - "float16", - "float32" - ], - "format":[ - "DefaultFormat", - "DefaultFormat" - ], - "name":"input6", - "need_compile":false, - "param_type":"required", - "shape":"all" - }, - { - "index":6, - "dtype":[ - "float16", - "float32" - ], - "format":[ - "DefaultFormat", - "DefaultFormat" - ], - "name":"input7", - "need_compile":false, - "param_type":"required", - "shape":"all" - }, - { - "index":7, - "dtype":[ - "float16", - "float32" - ], - "format":[ - "DefaultFormat", - "DefaultFormat" - ], - "name":"input8", - "need_compile":false, - "param_type":"required", - "shape":"all" - }, - { - "index":8, - "dtype":[ - "float16", - "float32" - ], - "format":[ - "DefaultFormat", - "DefaultFormat" - ], - "name":"input9", - "need_compile":false, - "param_type":"required", - "shape":"all" - }, - { - "index":9, - "dtype":[ - "float16", - "float32" - ], - "format":[ - "DefaultFormat", - "DefaultFormat" - ], - "name":"inputx0", - "need_compile":false, - "param_type":"required", - "shape":"all" - }, - { - "index":10, - "dtype":[ - "float16", - "float32" - ], - "format":[ - "DefaultFormat", - "DefaultFormat" - ], - "name":"inputx1", - "need_compile":false, - "param_type":"required", - "shape":"all" - }, - { - "index":11, - "dtype":[ - "float16", - "float32" - ], - "format":[ - "DefaultFormat", - "DefaultFormat" - ], - "name":"inputx2", - "need_compile":false, - "param_type":"required", - "shape":"all" - }, - { - "index":12, - "dtype":[ - "float16", - "float32" - ], - "format":[ - "DefaultFormat", - "DefaultFormat" - ], - "name":"inputx3", - "need_compile":false, - "param_type":"required", - "shape":"all" - } - ], - "outputs":[ - { - "index":0, - "dtype":[ - "float16", - "float32" - ], - "format":[ - "DefaultFormat", - "DefaultFormat" - ], - "name":"output1", - "need_compile":false, - "param_type":"required", - "shape":"all" - }, - { - "index":1, - "dtype":[ - "float16", - "float32" - ], - "format":[ - "DefaultFormat", - "DefaultFormat" - ], - "name":"output2", - "need_compile":false, - "param_type":"required", - "shape":"all" - }, - { - "index":2, - "dtype":[ - "float16", - "float32" - ], - "format":[ - "DefaultFormat", - "DefaultFormat" - ], - "name":"output3", - "need_compile":false, - "param_type":"required", - "shape":"all" - }, - { - "index":3, - "dtype":[ - "float16", - "float32" - ], - "format":[ - "DefaultFormat", - "DefaultFormat" - ], - "name":"output4", - "need_compile":false, - "param_type":"required", - "shape":"all" - } - ] -}""") + +@op_info_register(lamb_next_m_v_with_decay_v1_op_info) def _lamb_next_mv_with_decay_v1_tbe(): """LambNextMVWithDecayV1 TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/lamb_update_with_lr.py b/mindspore/ops/_op_impl/tbe/lamb_update_with_lr.py index a5062e74e2..b34ac57df2 100644 --- a/mindspore/ops/_op_impl/tbe/lamb_update_with_lr.py +++ b/mindspore/ops/_op_impl/tbe/lamb_update_with_lr.py @@ -14,174 +14,35 @@ # ============================================================================ """LambUpdateWithLr op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +lamb_update_with_lr_op_info = TBERegOp("LambUpdateWithLR") \ + .fusion_type("ELEMWISE") \ + .async_flag(False) \ + .binfile_name("lamb_update_with_lr.so") \ + .compute_cost(10) \ + .kernel_name("lamb_update_with_lr") \ + .partial_flag(True) \ + .input(0, "input1", False, "required", "all") \ + .input(1, "input2", False, "required", "all") \ + .input(2, "input3", False, "required", "all") \ + .input(3, "input4", False, "required", "all") \ + .input(4, "input5", False, "required", "all") \ + .input(5, "input6", False, "required", "all") \ + .input(6, "input7", False, "required", "all") \ + .input(7, "input8", False, "required", "all") \ + .input(8, "input9", False, "required", "all") \ + .output(0, "output_y", False, "required", "all") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, + DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, + DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, + DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, + DataType.F32_Default, DataType.F32_Default) \ + .get_op_info() -@op_info_register("""{ - "op_name":"LambUpdateWithLR", - "imply_type":"TBE", - "fusion_type":"ELEMWISE", - "async_flag":false, - "binfile_name":"lamb_update_with_lr.so", - "compute_cost":10, - "kernel_name":"lamb_update_with_lr", - "partial_flag":true, - "attr":[], - "inputs":[ - { - "index":0, - "dtype":[ - "float16", - "float32" - ], - "format":[ - "DefaultFormat", - "DefaultFormat" - ], - "name":"input1", - "need_compile":false, - "param_type":"required", - "shape":"all" - }, - { - "index":1, - "dtype":[ - "float16", - "float32" - ], - "format":[ - "DefaultFormat", - "DefaultFormat" - ], - "name":"input2", - "need_compile":false, - "param_type":"required", - "shape":"all" - }, - { - "index":2, - "dtype":[ - "float16", - "float32" - ], - "format":[ - "DefaultFormat", - "DefaultFormat" - ], - "name":"input3", - "need_compile":false, - "param_type":"required", - "shape":"all" - }, - { - "index":3, - "dtype":[ - "float16", - "float32" - ], - "format":[ - "DefaultFormat", - "DefaultFormat" - ], - "name":"input4", - "need_compile":false, - "param_type":"required", - "shape":"all" - }, - { - "index":4, - "dtype":[ - "float16", - "float32" - ], - "format":[ - "DefaultFormat", - "DefaultFormat" - ], - "name":"input5", - "need_compile":false, - "param_type":"required", - "shape":"all" - }, - { - "index":5, - "dtype":[ - "float16", - "float32" - ], - "format":[ - "DefaultFormat", - "DefaultFormat" - ], - "name":"input6", - "need_compile":false, - "param_type":"required", - "shape":"all" - }, - { - "index":6, - "dtype":[ - "float16", - "float32" - ], - "format":[ - "DefaultFormat", - "DefaultFormat" - ], - "name":"input7", - "need_compile":false, - "param_type":"required", - "shape":"all" - }, - { - "index":7, - "dtype":[ - "float16", - "float32" - ], - "format":[ - "DefaultFormat", - "DefaultFormat" - ], - "name":"input8", - "need_compile":false, - "param_type":"required", - "shape":"all" - }, - { - "index":8, - "dtype":[ - "float16", - "float32" - ], - "format":[ - "DefaultFormat", - "DefaultFormat" - ], - "name":"input9", - "need_compile":false, - "param_type":"required", - "shape":"all" - } - ], - "outputs":[ - { - "index":0, - "dtype":[ - "float16", - "float32" - ], - "format":[ - "DefaultFormat", - "DefaultFormat" - ], - "name":"output_y", - "need_compile":false, - "param_type":"required", - "shape":"all" - } - ] -}""") + +@op_info_register(lamb_update_with_lr_op_info) def _lamb_update_with_lr_tbe(): """LambUpdateWithLr TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/lamb_update_with_lr_v2.py b/mindspore/ops/_op_impl/tbe/lamb_update_with_lr_v2.py index 0900775b07..0902d68de9 100644 --- a/mindspore/ops/_op_impl/tbe/lamb_update_with_lr_v2.py +++ b/mindspore/ops/_op_impl/tbe/lamb_update_with_lr_v2.py @@ -14,144 +14,31 @@ # ============================================================================ """LambUpdateWithLrV2 op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +lamb_update_with_lr_v2_op_info = TBERegOp("LambUpdateWithLrV2") \ + .fusion_type("ELEMWISE") \ + .async_flag(False) \ + .binfile_name("lamb_update_with_lr_v2.so") \ + .compute_cost(10) \ + .kernel_name("lamb_update_with_lr_v2") \ + .partial_flag(True) \ + .input(0, "x1", False, "required", "all") \ + .input(1, "x2", False, "required", "all") \ + .input(2, "x3", False, "required", "all") \ + .input(3, "x4", False, "required", "all") \ + .input(4, "x5", False, "required", "all") \ + .input(5, "greater_y", False, "required", "all") \ + .input(6, "select_e", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, + DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, + DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ + .get_op_info() -@op_info_register("""{ - "op_name":"LambUpdateWithLrV2", - "imply_type":"TBE", - "fusion_type":"ELEMWISE", - "async_flag":false, - "binfile_name":"lamb_update_with_lr_v2.so", - "compute_cost":10, - "kernel_name":"lamb_update_with_lr_v2", - "partial_flag":true, - "attr":[], - "inputs":[ - { - "index":0, - "dtype":[ - "float16", - "float32" - ], - "format":[ - "DefaultFormat", - "DefaultFormat" - ], - "name":"x1", - "need_compile":false, - "param_type":"required", - "shape":"all" - }, - { - "index":1, - "dtype":[ - "float16", - "float32" - ], - "format":[ - "DefaultFormat", - "DefaultFormat" - ], - "name":"x2", - "need_compile":false, - "param_type":"required", - "shape":"all" - }, - { - "index":2, - "dtype":[ - "float16", - "float32" - ], - "format":[ - "DefaultFormat", - "DefaultFormat" - ], - "name":"x3", - "need_compile":false, - "param_type":"required", - "shape":"all" - }, - { - "index":3, - "dtype":[ - "float16", - "float32" - ], - "format":[ - "DefaultFormat", - "DefaultFormat" - ], - "name":"x4", - "need_compile":false, - "param_type":"required", - "shape":"all" - }, - { - "index":4, - "dtype":[ - "float16", - "float32" - ], - "format":[ - "DefaultFormat", - "DefaultFormat" - ], - "name":"x5", - "need_compile":false, - "param_type":"required", - "shape":"all" - }, - { - "index":5, - "dtype":[ - "float16", - "float32" - ], - "format":[ - "DefaultFormat", - "DefaultFormat" - ], - "name":"greater_y", - "need_compile":false, - "param_type":"required", - "shape":"all" - }, - { - "index":6, - "dtype":[ - "float16", - "float32" - ], - "format":[ - "DefaultFormat", - "DefaultFormat" - ], - "name":"select_e", - "need_compile":false, - "param_type":"required", - "shape":"all" - } - ], - "outputs":[ - { - "index":0, - "dtype":[ - "float16", - "float32" - ], - "format":[ - "DefaultFormat", - "DefaultFormat" - ], - "name":"y", - "need_compile":false, - "param_type":"required", - "shape":"all" - } - ] -}""") + +@op_info_register(lamb_update_with_lr_v2_op_info) def _lamb_update_with_lr_v2_tbe(): """LambUpdateWithLrV2 TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/layer_norm.py b/mindspore/ops/_op_impl/tbe/layer_norm.py index 5fd4a6387b..bc71fa87d3 100644 --- a/mindspore/ops/_op_impl/tbe/layer_norm.py +++ b/mindspore/ops/_op_impl/tbe/layer_norm.py @@ -14,111 +14,39 @@ # ============================================================================ """LayerNorm op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +layer_norm_op_info = TBERegOp("LayerNorm") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("layer_norm.so") \ + .compute_cost(10) \ + .kernel_name("layer_norm") \ + .partial_flag(True) \ + .attr("begin_norm_axis", "required", "int", "all") \ + .attr("begin_params_axis", "required", "int", "all") \ + .input(0, "x", False, "required", "all") \ + .input(1, "gamma", False, "required", "all") \ + .input(2, "beta", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .output(1, "mean", False, "required", "all") \ + .output(2, "variance", False, "required", "all") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, + DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD, + DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F16_FracNZ, DataType.F16_Default, DataType.F16_Default, DataType.F16_FracNZ, + DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, + DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD, + DataType.F32_5HD, DataType.F32_5HD) \ + .dtype_format(DataType.F32_FracNZ, DataType.F32_Default, DataType.F32_Default, DataType.F32_FracNZ, + DataType.F32_Default, DataType.F32_Default) \ + .get_op_info() -@op_info_register("""{ - "op_name": "LayerNorm", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "layer_norm.so", - "compute_cost": 10, - "kernel_name": "layer_norm", - "partial_flag": true, - "attr": [ - { - "name": "begin_norm_axis", - "param_type": "required", - "type": "int", - "value": "all" - }, - { - "name": "begin_params_axis", - "param_type": "required", - "type": "int", - "value": "all" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16","float16","float16","float","float","float" - ], - "format": [ - "FRACTAL_NZ","DefaultFormat","NC1HWC0","FRACTAL_NZ","DefaultFormat","NC1HWC0" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float16","float16","float16","float","float","float" - ], - "format": [ - "DefaultFormat","DefaultFormat","NC1HWC0","DefaultFormat","DefaultFormat","NC1HWC0" - ], - "name": "gamma", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 2, - "dtype": [ - "float16","float16","float16","float","float","float" - ], - "format": [ - "DefaultFormat","DefaultFormat","NC1HWC0","DefaultFormat","DefaultFormat","NC1HWC0" - ], - "name": "beta", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16","float16","float16","float","float","float" - ], - "format": [ - "FRACTAL_NZ","DefaultFormat","NC1HWC0","FRACTAL_NZ","DefaultFormat","NC1HWC0" - ], - "name": "y", - "param_type": "required" - }, - { - "index": 1, - "dtype": [ - "float16","float16","float16","float","float","float" - ], - "format": [ - "DefaultFormat","DefaultFormat","NC1HWC0","DefaultFormat","DefaultFormat","NC1HWC0" - ], - "name": "mean", - "param_type": "required" - }, - { - "index": 2, - "dtype": [ - "float16","float16","float16","float","float","float" - ], - "format": [ - "DefaultFormat","DefaultFormat","NC1HWC0","DefaultFormat","DefaultFormat","NC1HWC0" - - ], - "name": "variance", - "param_type": "required" - } - ] -}""") +@op_info_register(layer_norm_op_info) def _layer_norm_tbe(): """LayerNorm TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop.py b/mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop.py index cdf0dad744..ef254465bc 100644 --- a/mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop.py +++ b/mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop.py @@ -14,105 +14,38 @@ # ============================================================================ """LayerNormBetaGammaBackprop op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +layer_norm_beta_gamma_backprop_op_info = TBERegOp("LayerNormBetaGammaBackprop") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("layer_norm_beta_gamma_backprop.so") \ + .compute_cost(10) \ + .kernel_name("layer_norm_beta_gamma_backprop") \ + .partial_flag(True) \ + .attr("shape_gamma", "required", "listInt", "all") \ + .input(0, "dy", False, "required", "all") \ + .input(1, "x", False, "required", "all") \ + .input(2, "variance", False, "required", "all") \ + .input(3, "mean", False, "required", "all") \ + .output(0, "pd_gamma", False, "required", "all") \ + .output(1, "pd_beta", False, "required", "all") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, + DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD, + DataType.F32_5HD, DataType.F32_5HD) \ + .dtype_format(DataType.F16_FracNZ, DataType.F16_FracNZ, DataType.F16_Default, DataType.F16_Default, + DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, + DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD, + DataType.F32_5HD, DataType.F32_5HD) \ + .dtype_format(DataType.F32_FracNZ, DataType.F32_FracNZ, DataType.F32_Default, DataType.F32_Default, + DataType.F32_Default, DataType.F32_Default) \ + .get_op_info() -@op_info_register("""{ - "op_name": "LayerNormBetaGammaBackprop", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "layer_norm_beta_gamma_backprop.so", - "compute_cost": 10, - "kernel_name": "layer_norm_beta_gamma_backprop", - "partial_flag": true, - "attr": [ - { - "name": "shape_gamma", - "param_type": "required", - "type": "listInt", - "value": "all" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16","float16","float16","float","float","float" - ], - "format": [ - "FRACTAL_NZ","DefaultFormat","NC1HWC0","FRACTAL_NZ","DefaultFormat","NC1HWC0" - ], - "name": "dy", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float16","float16","float16","float","float","float" - ], - "format": [ - "FRACTAL_NZ","DefaultFormat","NC1HWC0","FRACTAL_NZ","DefaultFormat","NC1HWC0" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 2, - "dtype": [ - "float16","float16","float16","float","float","float" - ], - "format": [ - "DefaultFormat","DefaultFormat","NC1HWC0","DefaultFormat","DefaultFormat","NC1HWC0" - ], - "name": "variance", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 3, - "dtype": [ - "float16","float16","float16","float","float","float" - ], - "format": [ - "DefaultFormat","DefaultFormat","NC1HWC0","DefaultFormat","DefaultFormat","NC1HWC0" - ], - "name": "mean", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float","float","float","float","float","float" - ], - "format": [ - "DefaultFormat","DefaultFormat","NC1HWC0","DefaultFormat","DefaultFormat","NC1HWC0" - ], - "name": "pd_gamma", - "param_type": "required" - }, - { - "index": 1, - "dtype": [ - "float","float","float","float","float","float" - ], - "format": [ - "DefaultFormat","DefaultFormat","NC1HWC0","DefaultFormat","DefaultFormat","NC1HWC0" - ], - "name": "pd_beta", - "param_type": "required" - } - ] -}""") + +@op_info_register(layer_norm_beta_gamma_backprop_op_info) def _layer_norm_beta_gamma_backprop_tbe(): """LayerNormBetaGammaBackprop TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/layer_norm_grad.py b/mindspore/ops/_op_impl/tbe/layer_norm_grad.py index 6ba4656615..9540f2e265 100644 --- a/mindspore/ops/_op_impl/tbe/layer_norm_grad.py +++ b/mindspore/ops/_op_impl/tbe/layer_norm_grad.py @@ -14,124 +14,35 @@ # ============================================================================ """LayerNormGrad op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +layer_norm_grad_op_info = TBERegOp("LayerNormGrad") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("layer_norm_grad.so") \ + .compute_cost(10) \ + .kernel_name("layer_norm_grad") \ + .partial_flag(True) \ + .input(0, "dy", False, "required", "all") \ + .input(1, "x", False, "required", "all") \ + .input(2, "variance", False, "required", "all") \ + .input(3, "mean", False, "required", "all") \ + .input(4, "gamma", False, "required", "all") \ + .output(0, "pd_x", False, "required", "all") \ + .output(1, "pd_gamma", False, "required", "all") \ + .output(2, "pd_beta", False, "required", "all") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, + DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD, + DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, + DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD, + DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD) \ + .get_op_info() -@op_info_register("""{ - "op_name": "LayerNormGrad", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "layer_norm_grad.so", - "compute_cost": 10, - "kernel_name": "layer_norm_grad", - "partial_flag": true, - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16","float16","float","float" - ], - "format": [ - "DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0" - ], - "name": "dy", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float16","float16","float","float" - ], - "format": [ - "DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 2, - "dtype": [ - "float16","float16","float","float" - ], - "format": [ - "DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0" - ], - "name": "variance", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 3, - "dtype": [ - "float16","float16","float","float" - ], - "format": [ - "DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0" - ], - "name": "mean", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 4, - "dtype": [ - "float16","float16","float","float" - ], - "format": [ - "DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0" - ], - "name": "gamma", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16","float16","float","float" - ], - "format": [ - "DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0" - ], - "name": "pd_x", - "param_type": "required" - }, - { - "index": 1, - "dtype": [ - "float16","float16","float","float" - ], - "format": [ - "DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0" - ], - "name": "pd_gamma", - "param_type": "required" - }, - { - "index": 2, - "dtype": [ - "float16","float16","float","float" - ], - "format": [ - "DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0" - ], - "name": "pd_beta", - "param_type": "required" - } - ] -}""") +@op_info_register(layer_norm_grad_op_info) def _layer_norm_grad_tbe(): """LayerNormGrad TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/layer_norm_x_backprop.py b/mindspore/ops/_op_impl/tbe/layer_norm_x_backprop.py index 0557fdebc2..bbab66816d 100644 --- a/mindspore/ops/_op_impl/tbe/layer_norm_x_backprop.py +++ b/mindspore/ops/_op_impl/tbe/layer_norm_x_backprop.py @@ -14,102 +14,37 @@ # ============================================================================ """LayerNormXBackprop op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +layer_norm_x_backprop_op_info = TBERegOp("LayerNormXBackprop") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("layer_norm_x_backprop.so") \ + .compute_cost(10) \ + .kernel_name("layer_norm_x_backprop") \ + .partial_flag(True) \ + .input(0, "dy", False, "required", "all") \ + .input(1, "x", False, "required", "all") \ + .input(2, "variance", False, "required", "all") \ + .input(3, "mean", False, "required", "all") \ + .input(4, "gamma", False, "required", "all") \ + .output(0, "pd_x", False, "required", "all") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, + DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD, + DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F16_FracNZ, DataType.F16_FracNZ, DataType.F16_Default, DataType.F16_Default, + DataType.F16_Default, DataType.F16_FracNZ) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, + DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD, + DataType.F32_5HD, DataType.F32_5HD) \ + .dtype_format(DataType.F32_FracNZ, DataType.F32_FracNZ, DataType.F32_Default, DataType.F32_Default, + DataType.F32_Default, DataType.F32_FracNZ) \ + .get_op_info() -@op_info_register("""{ - "op_name": "LayerNormXBackprop", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "layer_norm_x_backprop.so", - "compute_cost": 10, - "kernel_name": "layer_norm_x_backprop", - "partial_flag": true, - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16","float16","float16","float","float","float" - ], - "format": [ - "FRACTAL_NZ","DefaultFormat","NC1HWC0","FRACTAL_NZ","DefaultFormat","NC1HWC0" - ], - "name": "dy", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float16","float16","float16","float","float","float" - ], - "format": [ - "FRACTAL_NZ","DefaultFormat","NC1HWC0","FRACTAL_NZ","DefaultFormat","NC1HWC0" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 2, - "dtype": [ - "float16","float16","float16","float","float","float" - ], - "format": [ - "DefaultFormat","DefaultFormat","NC1HWC0","DefaultFormat","DefaultFormat","NC1HWC0" - ], - "name": "variance", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 3, - "dtype": [ - "float16","float16","float16","float","float","float" - ], - "format": [ - "DefaultFormat","DefaultFormat","NC1HWC0","DefaultFormat","DefaultFormat","NC1HWC0" - ], - "name": "mean", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 4, - "dtype": [ - "float16","float16","float16","float","float","float" - ], - "format": [ - "DefaultFormat","DefaultFormat","NC1HWC0","DefaultFormat","DefaultFormat","NC1HWC0" - ], - "name": "gamma", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16","float16","float16","float","float","float" - ], - "format": [ - "FRACTAL_NZ","DefaultFormat","NC1HWC0","FRACTAL_NZ","DefaultFormat","NC1HWC0" - ], - "name": "pd_x", - "param_type": "required" - } - ] -}""") +@op_info_register(layer_norm_x_backprop_op_info) def _layer_norm_x_backprop_tbe(): """LayerNormXBackprop TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/less.py b/mindspore/ops/_op_impl/tbe/less.py index 6e48d60341..947c40b949 100644 --- a/mindspore/ops/_op_impl/tbe/less.py +++ b/mindspore/ops/_op_impl/tbe/less.py @@ -14,67 +14,32 @@ # ============================================================================ """Less op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +less_op_info = TBERegOp("Less") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("less.so") \ + .compute_cost(10) \ + .kernel_name("less") \ + .partial_flag(True) \ + .input(0, "x1", False, "required", "all") \ + .input(1, "x2", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.I8_Default, DataType.I8_Default, DataType.BOOL_Default) \ + .dtype_format(DataType.I8_5HD, DataType.I8_5HD, DataType.BOOL_5HD) \ + .dtype_format(DataType.U8_Default, DataType.U8_Default, DataType.BOOL_Default) \ + .dtype_format(DataType.U8_5HD, DataType.U8_5HD, DataType.BOOL_5HD) \ + .dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.BOOL_Default) \ + .dtype_format(DataType.I32_5HD, DataType.I32_5HD, DataType.BOOL_5HD) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.BOOL_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.BOOL_5HD) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.BOOL_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.BOOL_5HD) \ + .get_op_info() -@op_info_register("""{ - "op_name": "Less", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "less.so", - "compute_cost": 10, - "kernel_name": "less", - "partial_flag": true, - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16","float16","float","float","int32","int32","int8","int8","uint8","uint8" - ], - "format": [ - "DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0","DefaultFormat", - "NC1HWC0","DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0" - ], - "name": "x1", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float16","float16","float","float","int32","int32","int8","int8","uint8","uint8" - ], - "format": [ - "DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0","DefaultFormat", - "NC1HWC0","DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0" - ], - "name": "x2", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "bool","bool","bool","bool","bool","bool","bool","bool","bool","bool" - ], - "format": [ - "DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0","DefaultFormat", - "NC1HWC0","DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0" - ], - "name": "y", - "param_type": "required", - "shape": "all" - } - ] -}""") +@op_info_register(less_op_info) def _less_tbe(): """Less TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/less_equal.py b/mindspore/ops/_op_impl/tbe/less_equal.py index 556389fa0e..14cf7c8906 100644 --- a/mindspore/ops/_op_impl/tbe/less_equal.py +++ b/mindspore/ops/_op_impl/tbe/less_equal.py @@ -14,67 +14,34 @@ # ============================================================================ """LessEqual op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +less_equal_op_info = TBERegOp("LessEqual") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("less_equal.so") \ + .compute_cost(10) \ + .kernel_name("less_equal") \ + .partial_flag(True) \ + .attr("begin_norm_axis", "required", "int", "all") \ + .attr("begin_params_axis", "required", "int", "all") \ + .input(0, "x1", False, "required", "all") \ + .input(1, "x2", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.I8_Default, DataType.I8_Default, DataType.BOOL_Default) \ + .dtype_format(DataType.I8_5HD, DataType.I8_5HD, DataType.BOOL_5HD) \ + .dtype_format(DataType.U8_Default, DataType.U8_Default, DataType.BOOL_Default) \ + .dtype_format(DataType.U8_5HD, DataType.U8_5HD, DataType.BOOL_5HD) \ + .dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.BOOL_Default) \ + .dtype_format(DataType.I32_5HD, DataType.I32_5HD, DataType.BOOL_5HD) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.BOOL_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.BOOL_5HD) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.BOOL_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.BOOL_5HD) \ + .get_op_info() -@op_info_register("""{ - "op_name": "LessEqual", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "less_equal.so", - "compute_cost": 10, - "kernel_name": "less_equal", - "partial_flag": true, - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16","float16","float","float","int32","int32","int8","int8","uint8","uint8" - ], - "format": [ - "DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0","DefaultFormat", - "NC1HWC0","DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0" - ], - "name": "x1", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float16","float16","float","float","int32","int32","int8","int8","uint8","uint8" - ], - "format": [ - "DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0","DefaultFormat", - "NC1HWC0","DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0" - ], - "name": "x2", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "bool","bool","bool","bool","bool","bool","bool","bool","bool","bool" - ], - "format": [ - "DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0","DefaultFormat", - "NC1HWC0","DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0" - ], - "name": "y", - "param_type": "required", - "shape": "all" - } - ] -}""") +@op_info_register(less_equal_op_info) def _less_equal_tbe(): """LessEqual TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/log.py b/mindspore/ops/_op_impl/tbe/log.py index 72a55757a2..b7da647248 100644 --- a/mindspore/ops/_op_impl/tbe/log.py +++ b/mindspore/ops/_op_impl/tbe/log.py @@ -14,52 +14,25 @@ # ============================================================================ """Log op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +log_op_info = TBERegOp("Log") \ + .fusion_type("ELEMWISE") \ + .async_flag(False) \ + .binfile_name("log.so") \ + .compute_cost(10) \ + .kernel_name("log") \ + .partial_flag(True) \ + .input(0, "x", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD) \ + .get_op_info() -@op_info_register("""{ - "op_name": "Log", - "imply_type": "TBE", - "fusion_type": "ELEMWISE", - "async_flag": false, - "binfile_name": "log.so", - "compute_cost": 10, - "kernel_name": "log", - "partial_flag": true, - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16", "float16", "float", "float" - ], - "format": [ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "NC1HWC0" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16", "float16", "float", "float" - ], - "format": [ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "NC1HWC0" - ], - "name": "y", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") +@op_info_register(log_op_info) def _log_tbe(): """Log TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/logical_and.py b/mindspore/ops/_op_impl/tbe/logical_and.py index da4862450f..925a4e82d8 100644 --- a/mindspore/ops/_op_impl/tbe/logical_and.py +++ b/mindspore/ops/_op_impl/tbe/logical_and.py @@ -14,65 +14,26 @@ # ============================================================================ """LogicalAnd op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +logical_and_op_info = TBERegOp("LogicalAnd") \ + .fusion_type("ELEMWISE") \ + .async_flag(False) \ + .binfile_name("logical_and.so") \ + .compute_cost(10) \ + .kernel_name("logical_and") \ + .partial_flag(True) \ + .input(0, "x1", False, "required", "all") \ + .input(1, "x2", False, "required", "all") \ + .output(0, "y", True, "required", "all") \ + .dtype_format(DataType.BOOL_Default, DataType.BOOL_Default, DataType.BOOL_Default) \ + .dtype_format(DataType.BOOL_FracZ, DataType.BOOL_FracZ, DataType.BOOL_FracZ) \ + .dtype_format(DataType.BOOL_C1HWNCoC0, DataType.BOOL_C1HWNCoC0, DataType.BOOL_C1HWNCoC0) \ + .dtype_format(DataType.BOOL_5HD, DataType.BOOL_5HD, DataType.BOOL_5HD) \ + .get_op_info() -@op_info_register("""{ - "op_name": "LogicalAnd", - "imply_type": "TBE", - "fusion_type": "ELEMWISE", - "async_flag": false, - "binfile_name": "logical_and.so", - "compute_cost": 10, - "kernel_name": "logical_and", - "partial_flag": true, - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "bool", "bool", "bool", "bool" - ], - "format": [ - "DefaultFormat", "FracZ", "C1HWNCoC0", "NC1HWC0" - ], - "name": "x1", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "bool", "bool", "bool", "bool" - ], - "format": [ - "DefaultFormat", "FracZ", "C1HWNCoC0", "NC1HWC0" - ], - "name": "x2", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "bool", "bool", "bool", "bool" - ], - "format": [ - "DefaultFormat", "FracZ", "C1HWNCoC0", "NC1HWC0" - ], - "name": "y", - "need_compile": true, - "param_type": "required", - "shape": "all" - } - ] -}""") +@op_info_register(logical_and_op_info) def _logical_and_tbe(): """LogicalAnd TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/logical_not.py b/mindspore/ops/_op_impl/tbe/logical_not.py index 4fe8094ffb..3d40441156 100644 --- a/mindspore/ops/_op_impl/tbe/logical_not.py +++ b/mindspore/ops/_op_impl/tbe/logical_not.py @@ -14,52 +14,25 @@ # ============================================================================ """LogicalNot op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +logical_not_op_info = TBERegOp("LogicalNot") \ + .fusion_type("ELEMWISE") \ + .async_flag(False) \ + .binfile_name("logical_not.so") \ + .compute_cost(10) \ + .kernel_name("logical_not") \ + .partial_flag(True) \ + .input(0, "x", False, "required", "all") \ + .output(0, "y", True, "required", "all") \ + .dtype_format(DataType.BOOL_Default, DataType.BOOL_Default) \ + .dtype_format(DataType.BOOL_FracZ, DataType.BOOL_FracZ) \ + .dtype_format(DataType.BOOL_C1HWNCoC0, DataType.BOOL_C1HWNCoC0) \ + .dtype_format(DataType.BOOL_5HD, DataType.BOOL_5HD) \ + .get_op_info() -@op_info_register("""{ - "op_name": "LogicalNot", - "imply_type": "TBE", - "fusion_type": "ELEMWISE", - "async_flag": false, - "binfile_name": "logical_not.so", - "compute_cost": 10, - "kernel_name": "logical_not", - "partial_flag": true, - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "bool", "bool", "bool", "bool" - ], - "format": [ - "DefaultFormat", "FracZ", "C1HWNCoC0", "NC1HWC0" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "bool", "bool", "bool", "bool" - ], - "format": [ - "DefaultFormat", "FracZ", "C1HWNCoC0", "NC1HWC0" - ], - "name": "y", - "need_compile": true, - "param_type": "required", - "shape": "all" - } - ] -}""") +@op_info_register(logical_not_op_info) def _logical_not_tbe(): """LogicalNot TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/logical_or.py b/mindspore/ops/_op_impl/tbe/logical_or.py index 0f21bb61b0..bf8d82c656 100644 --- a/mindspore/ops/_op_impl/tbe/logical_or.py +++ b/mindspore/ops/_op_impl/tbe/logical_or.py @@ -14,65 +14,26 @@ # ============================================================================ """LogicalOr op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +logical_or_op_info = TBERegOp("LogicalOr") \ + .fusion_type("ELEMWISE") \ + .async_flag(False) \ + .binfile_name("logical_or.so") \ + .compute_cost(10) \ + .kernel_name("logical_or") \ + .partial_flag(True) \ + .input(0, "x1", False, "required", "all") \ + .input(1, "x2", False, "required", "all") \ + .output(0, "y", True, "required", "all") \ + .dtype_format(DataType.BOOL_Default, DataType.BOOL_Default, DataType.BOOL_Default) \ + .dtype_format(DataType.BOOL_FracZ, DataType.BOOL_FracZ, DataType.BOOL_FracZ) \ + .dtype_format(DataType.BOOL_C1HWNCoC0, DataType.BOOL_C1HWNCoC0, DataType.BOOL_C1HWNCoC0) \ + .dtype_format(DataType.BOOL_5HD, DataType.BOOL_5HD, DataType.BOOL_5HD) \ + .get_op_info() -@op_info_register("""{ - "op_name": "LogicalOr", - "imply_type": "TBE", - "fusion_type": "ELEMWISE", - "async_flag": false, - "binfile_name": "logical_or.so", - "compute_cost": 10, - "kernel_name": "logical_or", - "partial_flag": true, - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "bool", "bool", "bool", "bool" - ], - "format": [ - "DefaultFormat", "FracZ", "C1HWNCoC0", "NC1HWC0" - ], - "name": "x1", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "bool", "bool", "bool", "bool" - ], - "format": [ - "DefaultFormat", "FracZ", "C1HWNCoC0", "NC1HWC0" - ], - "name": "x2", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "bool", "bool", "bool", "bool" - ], - "format": [ - "DefaultFormat", "FracZ", "C1HWNCoC0", "NC1HWC0" - ], - "name": "y", - "need_compile": true, - "param_type": "required", - "shape": "all" - } - ] -}""") +@op_info_register(logical_or_op_info) def _logical_or_tbe(): """LogicalOr TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/logsoftmax.py b/mindspore/ops/_op_impl/tbe/logsoftmax.py index 03e8657919..9bf0baf3f2 100644 --- a/mindspore/ops/_op_impl/tbe/logsoftmax.py +++ b/mindspore/ops/_op_impl/tbe/logsoftmax.py @@ -14,57 +14,24 @@ # ============================================================================ """LogSoftmax op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +log_softmax_op_info = TBERegOp("LogSoftmax") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("log_softmax.so") \ + .compute_cost(10) \ + .kernel_name("log_softmax") \ + .partial_flag(True) \ + .attr("axis", "optional", "listInt", "all") \ + .input(0, "logits", False, "required", "all") \ + .output(0, "logsoftmax", False, "required", "all") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .get_op_info() -@op_info_register("""{ - "op_name": "LogSoftmax", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "log_softmax.so", - "compute_cost": 10, - "kernel_name": "log_softmax", - "partial_flag": true, - "attr": [ - { - "name": "axis", - "param_type": "optional", - "type": "listInt", - "value": "all" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16", "float" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "logits", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16", "float" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "logsoftmax", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(log_softmax_op_info) def _logsoftmax_tbe(): """LogSoftMaxGrad TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/logsoftmax_grad.py b/mindspore/ops/_op_impl/tbe/logsoftmax_grad.py index f6858e9530..9223b821d5 100644 --- a/mindspore/ops/_op_impl/tbe/logsoftmax_grad.py +++ b/mindspore/ops/_op_impl/tbe/logsoftmax_grad.py @@ -14,70 +14,25 @@ # ============================================================================ """LogSoftmaxGrad op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +log_softmax_grad_op_info = TBERegOp("LogSoftmaxGrad") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("log_softmax_grad.so") \ + .compute_cost(10) \ + .kernel_name("log_softmax_grad") \ + .partial_flag(True) \ + .attr("axis", "optional", "listInt", "all") \ + .input(0, "x", False, "required", "all") \ + .input(1, "grad", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ + .get_op_info() -@op_info_register("""{ - "op_name": "LogSoftmaxGrad", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "log_softmax_grad.so", - "compute_cost": 10, - "kernel_name": "log_softmax_grad", - "partial_flag": true, - "attr": [ - { - "name": "axis", - "param_type": "optional", - "type": "listInt", - "value": "all" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16", "float" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float16", "float" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "grad", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16", "float" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "y", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(log_softmax_grad_op_info) def _logsoftmax_grad_tbe(): """LogSoftMaxGrad TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/matmul.py b/mindspore/ops/_op_impl/tbe/matmul.py index d18d2cde48..c29378f721 100644 --- a/mindspore/ops/_op_impl/tbe/matmul.py +++ b/mindspore/ops/_op_impl/tbe/matmul.py @@ -14,89 +14,29 @@ # ============================================================================ """MatMul op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +matmul_op_info = TBERegOp("MatMul") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("matmul.so") \ + .compute_cost(10) \ + .kernel_name("matmul") \ + .partial_flag(True) \ + .attr("transpose_a", "required", "bool", "all") \ + .attr("transpose_b", "required", "bool", "all") \ + .input(0, "x1", False, "required", "all") \ + .input(1, "x2", False, "required", "all") \ + .input(2, "x3", False, "optional", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.F16_FracNZ, DataType.F16_FracNZ, DataType.F16_Default, DataType.F16_FracNZ) \ + .dtype_format(DataType.F16_FracNZ, DataType.F16_FracNZ, DataType.F32_Default, DataType.F32_FracNZ) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ + .get_op_info() -@op_info_register("""{ - "op_name": "MatMul", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "matmul.so", - "compute_cost": 10, - "kernel_name": "matmul", - "partial_flag": true, - "attr": [ - { - "name": "transpose_a", - "param_type": "required", - "type": "bool", - "value": "all" - }, - { - "name": "transpose_b", - "param_type": "required", - "type": "bool", - "value": "all" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16","float16","float","int32" - ], - "format": [ - "FRACTAL_NZ","FRACTAL_NZ","DefaultFormat","DefaultFormat" - ], - "name": "x1", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float16","float16","float","int32" - ], - "format": [ - "FRACTAL_NZ","FRACTAL_NZ","DefaultFormat","DefaultFormat" - ], - "name": "x2", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 2, - "dtype": [ - "float16","float","float","int32" - ], - "format": [ - "DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat" - ], - "name": "x3", - "need_compile": false, - "param_type": "optional", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16","float","float","int32" - ], - "format": [ - "FRACTAL_NZ","FRACTAL_NZ","DefaultFormat","DefaultFormat" - ], - "name": "y", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(matmul_op_info) def _matmul_tbe(): """Mul TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/max_pool.py b/mindspore/ops/_op_impl/tbe/max_pool.py index 6b10bc8d9b..6c4c53cbce 100644 --- a/mindspore/ops/_op_impl/tbe/max_pool.py +++ b/mindspore/ops/_op_impl/tbe/max_pool.py @@ -14,74 +14,26 @@ # ============================================================================ """MaxPool op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +max_pool_op_info = TBERegOp("MaxPool") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("max_pool.so") \ + .compute_cost(10) \ + .kernel_name("max_pool") \ + .partial_flag(True) \ + .attr("ksize", "required", "listInt", "all") \ + .attr("strides", "required", "listInt", "all") \ + .attr("padding", "required", "str", "all") \ + .attr("data_format", "required", "str", "all") \ + .input(0, "input_data", False, "required", "all") \ + .output(0, "output_data", False, "required", "all") \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD) \ + .get_op_info() -@op_info_register("""{ - "op_name": "MaxPool", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "max_pool.so", - "compute_cost": 10, - "kernel_name": "max_pool", - "partial_flag": true, - "attr": [ - { - "name": "ksize", - "param_type": "required", - "type": "listInt", - "value": "all" - }, - { - "name": "strides", - "param_type": "required", - "type": "listInt", - "value": "all" - }, - { - "name": "padding", - "param_type": "required", - "type": "str", - "value": "all" - }, - { - "name": "data_format", - "param_type": "required", - "type": "str", - "value": "all" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16" - ], - "format": [ - "NC1HWC0" - ], - "name": "input_data", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16" - ], - "format": [ - "NC1HWC0" - ], - "name": "output_data", - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(max_pool_op_info) def _max_pool_tbe(): """MaxPool TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/max_pool_grad.py b/mindspore/ops/_op_impl/tbe/max_pool_grad.py index 7c942d01e0..debe1a5a3a 100644 --- a/mindspore/ops/_op_impl/tbe/max_pool_grad.py +++ b/mindspore/ops/_op_impl/tbe/max_pool_grad.py @@ -14,93 +14,27 @@ # ============================================================================ """MaxPoolGrad op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +max_pool_grad_op_info = TBERegOp("MaxPoolGrad") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("max_pool_grad.so") \ + .compute_cost(10) \ + .kernel_name("max_pool_grad") \ + .partial_flag(True) \ + .attr("ksize", "required", "listInt", "all") \ + .attr("strides", "required", "listInt", "all") \ + .attr("padding", "required", "str", "all") \ + .input(0, "x1", False, "required", "all") \ + .input(1, "x2", False, "required", "all") \ + .input(2, "grad", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD) \ + .get_op_info() -@op_info_register("""{ - "op_name": "MaxPoolGrad", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "max_pool_grad.so", - "compute_cost": 10, - "kernel_name": "max_pool_grad", - "partial_flag": true, - "attr": [ - { - "name": "ksize", - "param_type": "required", - "type": "listInt", - "value": "all" - }, - { - "name": "strides", - "param_type": "required", - "type": "listInt", - "value": "all" - }, - { - "name": "padding", - "param_type": "required", - "type": "str", - "value": "all" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16" - ], - "format": [ - "NC1HWC0" - ], - "name": "x1", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float16" - ], - "format": [ - "NC1HWC0" - ], - "name": "x2", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 2, - "dtype": [ - "float16" - ], - "format": [ - "NC1HWC0" - ], - "name": "grad", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16" - ], - "format": [ - "NC1HWC0" - ], - "name": "y", - "param_type": "required" - } - ] -}""") + +@op_info_register(max_pool_grad_op_info) def _max_pool_grad_tbe(): """MaxPoolGrad TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/max_pool_grad_with_argmax.py b/mindspore/ops/_op_impl/tbe/max_pool_grad_with_argmax.py index 3730ee1b93..2d6556ffef 100644 --- a/mindspore/ops/_op_impl/tbe/max_pool_grad_with_argmax.py +++ b/mindspore/ops/_op_impl/tbe/max_pool_grad_with_argmax.py @@ -14,95 +14,28 @@ # ============================================================================ """MaxPoolGradWithArgmax op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +max_pool_grad_with_argmax_op_info = TBERegOp("MaxPoolGradWithArgmax") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("max_pool_grad_with_argmax.so") \ + .compute_cost(10) \ + .kernel_name("max_pool_grad_with_argmax") \ + .partial_flag(True) \ + .attr("ksize", "required", "listInt", "all") \ + .attr("strides", "required", "listInt", "all") \ + .attr("padding", "required", "str", "all") \ + .input(0, "x", False, "required", "all") \ + .input(1, "grad", False, "required", "all") \ + .input(2, "argmax", False, "optional", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.U16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.I64_5HD, DataType.F16_5HD) \ + .get_op_info() -@op_info_register("""{ - "op_name": "MaxPoolGradWithArgmax", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "max_pool_grad_with_argmax.so", - "compute_cost": 10, - "kernel_name": "max_pool_grad_with_argmax", - "partial_flag": true, - "attr": [ - { - "name": "ksize", - "param_type": "required", - "type": "listInt", - "value": "all" - }, - { - "name": "strides", - "param_type": "required", - "type": "listInt", - "value": "all" - }, - { - "name": "padding", - "param_type": "required", - "type": "str", - "value": "all" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16", "float16" - ], - "format": [ - "NC1HWC0", "NC1HWC0" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float16", "float16" - ], - "format": [ - "NC1HWC0", "NC1HWC0" - ], - "name": "grad", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 2, - "dtype": [ - "uint16", "int64" - ], - "format": [ - "NC1HWC0", "NC1HWC0" - ], - "name": "argmax", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16", "float16" - ], - "format": [ - "NC1HWC0", "NC1HWC0" - ], - "name": "y", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(max_pool_grad_with_argmax_op_info) def _max_pool_grad_with_argmax_tbe(): """MaxPoolGradWithArgmax TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/max_pool_with_argmax.py b/mindspore/ops/_op_impl/tbe/max_pool_with_argmax.py index 2e081c1082..24700d4b42 100644 --- a/mindspore/ops/_op_impl/tbe/max_pool_with_argmax.py +++ b/mindspore/ops/_op_impl/tbe/max_pool_with_argmax.py @@ -14,82 +14,26 @@ # ============================================================================ """MaxPoolWithArgmax op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +max_pool_with_argmax_op_info = TBERegOp("MaxPoolWithArgmax") \ + .fusion_type("CONVLUTION") \ + .async_flag(False) \ + .binfile_name("max_pool_with_argmax.so") \ + .compute_cost(10) \ + .kernel_name("max_pool_with_argmax") \ + .partial_flag(True) \ + .attr("ksize", "required", "listInt", "all") \ + .attr("strides", "required", "listInt", "all") \ + .attr("padding", "required", "str", "all") \ + .input(0, "x", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .output(1, "argmax", False, "required", "all") \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.U16_5HD) \ + .get_op_info() -@op_info_register("""{ - "op_name": "MaxPoolWithArgmax", - "imply_type": "TBE", - "fusion_type": "CONVLUTION", - "async_flag": false, - "binfile_name": "max_pool_with_argmax.so", - "compute_cost": 10, - "kernel_name": "max_pool_with_argmax", - "partial_flag": true, - "attr": [ - { - "name": "ksize", - "param_type": "required", - "type": "listInt", - "value": "all" - }, - { - "name": "strides", - "param_type": "required", - "type": "listInt", - "value": "all" - }, - { - "name": "padding", - "param_type": "required", - "type": "str", - "value": "all" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16" - ], - "format": [ - "NC1HWC0" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16" - ], - "format": [ - "NC1HWC0" - ], - "name": "y", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "uint16" - ], - "format": [ - "NC1HWC0" - ], - "name": "argmax", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(max_pool_with_argmax_op_info) def _max_pool_with_argmax_tbe(): """MaxPoolWithArgmax TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/maximum.py b/mindspore/ops/_op_impl/tbe/maximum.py index 2a4051f28d..6fb7d05e03 100644 --- a/mindspore/ops/_op_impl/tbe/maximum.py +++ b/mindspore/ops/_op_impl/tbe/maximum.py @@ -14,69 +14,28 @@ # ============================================================================ """Maximum op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +maximum_op_info = TBERegOp("Maximum") \ + .fusion_type("ELEMWISE") \ + .async_flag(False) \ + .binfile_name("maximum.so") \ + .compute_cost(10) \ + .kernel_name("maximum") \ + .partial_flag(True) \ + .input(0, "x1", False, "required", "all") \ + .input(1, "x2", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.I32_5HD, DataType.I32_5HD, DataType.I32_5HD) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD) \ + .get_op_info() -@op_info_register("""{ - "op_name":"Maximum", - "imply_type":"TBE", - "fusion_type":"ELEMWISE", - "async_flag":false, - "binfile_name":"maximum.so", - "compute_cost":10, - "kernel_name":"maximum", - "partial_flag":true, - "attr":[], - "inputs":[ - { - "index":0, - "dtype":[ - "float16", "float16", "float16", "float16", "float", "float", "float", "float", "int32", - "int32", "int32", "int32" - ], - "format":[ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat" - ], - "name":"x1", - "need_compile":false, - "param_type":"required", - "shape":"all" - }, - { - "index":1, - "dtype":[ - "float16", "float16", "float16", "float16", "float", "float", "float", "float", "int32", - "int32", "int32", "int32" - ], - "format":[ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat" - ], - "name":"x2", - "need_compile":false, - "param_type":"required", - "shape":"all" - } - ], - "outputs":[ - { - "index":0, - "dtype":[ - "float16", "float16", "float16", "float16", "float", "float", "float", "float", "int32", - "int32", "int32", "int32" - ], - "format":[ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat" - ], - "name":"y", - "need_compile":false, - "param_type":"required", - "shape":"all" - } - ] -}""") + +@op_info_register(maximum_op_info) def _maximum_tbe(): """Maximum TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/maximum_grad.py b/mindspore/ops/_op_impl/tbe/maximum_grad.py index f602616da9..b9bc9c09f8 100644 --- a/mindspore/ops/_op_impl/tbe/maximum_grad.py +++ b/mindspore/ops/_op_impl/tbe/maximum_grad.py @@ -14,112 +14,38 @@ # ============================================================================ """MaximumGrad op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +maximum_grad_op_info = TBERegOp("MaximumGrad") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("maximum_grad.so") \ + .compute_cost(10) \ + .kernel_name("maximum_grad") \ + .partial_flag(True) \ + .attr("grad_x", "optional", "bool", "all") \ + .attr("grad_y", "optional", "bool", "all") \ + .input(0, "grads", False, "required", "all") \ + .input(1, "x1", False, "required", "all") \ + .input(2, "x2", False, "required", "all") \ + .output(0, "y1", False, "required", "all") \ + .output(1, "y2", False, "required", "all") \ + .dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.I32_Default, DataType.I32_Default, + DataType.I32_Default) \ + .dtype_format(DataType.I32_5HD, DataType.I32_5HD, DataType.I32_5HD, DataType.I32_5HD, + DataType.I32_5HD) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, + DataType.F16_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD, + DataType.F16_5HD) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, + DataType.F32_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD, + DataType.F32_5HD) \ + .get_op_info() -@op_info_register("""{ - "op_name":"MaximumGrad", - "imply_type":"TBE", - "fusion_type":"OPAQUE", - "async_flag":false, - "binfile_name":"maximum_grad.so", - "compute_cost":10, - "kernel_name":"maximum_grad", - "partial_flag":true, - "attr":[ - { - "name":"grad_x", - "param_type":"optional", - "type":"bool", - "value":"all" - }, - { - "name":"grad_y", - "param_type":"optional", - "type":"bool", - "value":"all" - } - ], - "inputs":[ - { - "index":0, - "dtype":[ - "float16", "float16", "float16", "float16", "float", "float", "float", "float", - "int32", "int32", "int32", "int32" - ], - "format":[ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat" - ], - "name":"grads", - "need_compile":false, - "param_type":"required", - "shape":"all" - }, - { - "index":1, - "dtype":[ - "float16", "float16", "float16", "float16", "float", "float", "float", "float", - "int32", "int32", "int32", "int32" - ], - "format":[ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat" - ], - "name":"x1", - "need_compile":false, - "param_type":"required", - "shape":"all" - }, - { - "index":2, - "dtype":[ - "float16", "float16", "float16", "float16", "float", "float", "float", "float", - "int32", "int32", "int32", "int32" - ], - "format":[ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat" - ], - "name":"x2", - "need_compile":false, - "param_type":"required", - "shape":"all" - } - ], - "outputs":[ - { - "index":0, - "dtype":[ - "float16", "float16", "float16", "float16", "float", "float", "float", "float", - "int32", "int32", "int32", "int32" - ], - "format":[ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat" - ], - "name":"y1", - "need_compile":false, - "param_type":"required", - "shape":"all" - }, - { - "index":1, - "dtype":[ - "float16", "float16", "float16", "float16", "float", "float", "float", "float", - "int32", "int32", "int32", "int32" - ], - "format":[ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat" - ], - "name":"y2", - "need_compile":false, - "param_type":"required", - "shape":"all" - } - ] -}""") + +@op_info_register(maximum_grad_op_info) def _maximum_grad_tbe(): """MaximumGrad TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/minimum.py b/mindspore/ops/_op_impl/tbe/minimum.py index 2f7e5b80d7..1cebfd3dad 100644 --- a/mindspore/ops/_op_impl/tbe/minimum.py +++ b/mindspore/ops/_op_impl/tbe/minimum.py @@ -15,74 +15,28 @@ """Minimum op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +minimum_op_info = TBERegOp("Minimum") \ + .fusion_type("ELEMWISE") \ + .async_flag(False) \ + .binfile_name("minimum.so") \ + .compute_cost(10) \ + .kernel_name("minimum") \ + .partial_flag(True) \ + .input(0, "x1", False, "required", "all") \ + .input(1, "x2", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.I32_5HD, DataType.I32_5HD, DataType.I32_5HD) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD) \ + .get_op_info() -@op_info_register("""{ - "op_name": "Minimum", - "imply_type": "TBE", - "fusion_type": "ELEMWISE", - "async_flag": false, - "binfile_name": "minimum.so", - "compute_cost": 10, - "kernel_name": "minimum", - "partial_flag": true, - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16", "float16", "float16", "float16", "float", "float", "float", "float", - "int32", "int32", "int32", "int32" - ], - "format": [ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", "DefaultFormat", - "NC1HWC0", "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "DefaultFormat" - ], - "name": "x1", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float16", "float16", "float16", "float16", "float", "float", "float", "float", - "int32", "int32", "int32", "int32" - ], - "format": [ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", "DefaultFormat", - "NC1HWC0", "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "DefaultFormat" - ], - "name": "x2", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16", "float16", "float16", "float16", "float", "float", "float", "float", - "int32", "int32", "int32", "int32" - ], - "format": [ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "DefaultFormat" - ], - "name": "y", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") +@op_info_register(minimum_op_info) def _minimum_tbe(): """Minimum TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/minimum_grad.py b/mindspore/ops/_op_impl/tbe/minimum_grad.py index d49b2aa184..c3ea1c3a56 100644 --- a/mindspore/ops/_op_impl/tbe/minimum_grad.py +++ b/mindspore/ops/_op_impl/tbe/minimum_grad.py @@ -14,112 +14,38 @@ # ============================================================================ """MinimumGrad op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +minimum_grad_op_info = TBERegOp("MinimumGrad") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("minimum_grad.so") \ + .compute_cost(10) \ + .kernel_name("minimum_grad") \ + .partial_flag(True) \ + .attr("grad_x", "optional", "bool", "all") \ + .attr("grad_y", "optional", "bool", "all") \ + .input(0, "grads", False, "required", "all") \ + .input(1, "x1", False, "required", "all") \ + .input(2, "x2", False, "required", "all") \ + .output(0, "y1", False, "required", "all") \ + .output(1, "y2", False, "required", "all") \ + .dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.I32_Default, DataType.I32_Default, + DataType.I32_Default) \ + .dtype_format(DataType.I32_5HD, DataType.I32_5HD, DataType.I32_5HD, DataType.I32_5HD, + DataType.I32_5HD) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, + DataType.F16_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD, + DataType.F16_5HD) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, + DataType.F32_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD, + DataType.F32_5HD) \ + .get_op_info() -@op_info_register("""{ - "op_name":"MinimumGrad", - "imply_type":"TBE", - "fusion_type":"OPAQUE", - "async_flag":false, - "binfile_name":"minimum_grad.so", - "compute_cost":10, - "kernel_name":"minimum_grad", - "partial_flag":true, - "attr":[ - { - "name":"grad_x", - "param_type":"optional", - "type":"bool", - "value":"all" - }, - { - "name":"grad_y", - "param_type":"optional", - "type":"bool", - "value":"all" - } - ], - "inputs":[ - { - "index":0, - "dtype":[ - "float16", "float16", "float16", "float16", "float", "float", "float", "float", - "int32", "int32", "int32", "int32" - ], - "format":[ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat" - ], - "name":"grads", - "need_compile":false, - "param_type":"required", - "shape":"all" - }, - { - "index":1, - "dtype":[ - "float16", "float16", "float16", "float16", "float", "float", "float", "float", - "int32", "int32", "int32", "int32" - ], - "format":[ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat" - ], - "name":"x1", - "need_compile":false, - "param_type":"required", - "shape":"all" - }, - { - "index":2, - "dtype":[ - "float16", "float16", "float16", "float16", "float", "float", "float", "float", - "int32", "int32", "int32", "int32" - ], - "format":[ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat" - ], - "name":"x2", - "need_compile":false, - "param_type":"required", - "shape":"all" - } - ], - "outputs":[ - { - "index":0, - "dtype":[ - "float16", "float16", "float16", "float16", "float", "float", "float", "float", - "int32", "int32", "int32", "int32" - ], - "format":[ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat" - ], - "name":"y1", - "need_compile":false, - "param_type":"required", - "shape":"all" - }, - { - "index":1, - "dtype":[ - "float16", "float16", "float16", "float16", "float", "float", "float", "float", - "int32", "int32", "int32", "int32" - ], - "format":[ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat" - ], - "name":"y2", - "need_compile":false, - "param_type":"required", - "shape":"all" - } - ] -}""") + +@op_info_register(minimum_grad_op_info) def _minimum_grad_tbe(): """MinimumGrad TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/mul.py b/mindspore/ops/_op_impl/tbe/mul.py index 912d5e372f..fa74c88de3 100644 --- a/mindspore/ops/_op_impl/tbe/mul.py +++ b/mindspore/ops/_op_impl/tbe/mul.py @@ -14,77 +14,37 @@ # ============================================================================ """Mul op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +mul_op_info = TBERegOp("Mul") \ + .fusion_type("ELEMWISE") \ + .async_flag(False) \ + .binfile_name("mul.so") \ + .compute_cost(10) \ + .kernel_name("mul") \ + .partial_flag(True) \ + .input(0, "x", False, "required", "all") \ + .input(1, "y", False, "required", "all") \ + .output(0, "output", False, "required", "all") \ + .dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.I32_5HD, DataType.I32_5HD, DataType.I32_5HD) \ + .dtype_format(DataType.I32_FracZ, DataType.I32_FracZ, DataType.I32_FracZ) \ + .dtype_format(DataType.I32_FracNZ, DataType.I32_FracNZ, DataType.I32_FracNZ) \ + .dtype_format(DataType.I32_C1HWNCoC0, DataType.I32_C1HWNCoC0, DataType.I32_C1HWNCoC0) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F16_FracZ, DataType.F16_FracZ, DataType.F16_FracZ) \ + .dtype_format(DataType.F16_FracNZ, DataType.F16_FracNZ, DataType.F16_FracNZ) \ + .dtype_format(DataType.F16_C1HWNCoC0, DataType.F16_C1HWNCoC0, DataType.F16_C1HWNCoC0) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD) \ + .dtype_format(DataType.F32_FracZ, DataType.F32_FracZ, DataType.F32_FracZ) \ + .dtype_format(DataType.F32_FracNZ, DataType.F32_FracNZ, DataType.F32_FracNZ) \ + .dtype_format(DataType.F32_C1HWNCoC0, DataType.F32_C1HWNCoC0, DataType.F32_C1HWNCoC0) \ + .get_op_info() -@op_info_register("""{ - "op_name": "Mul", - "imply_type": "TBE", - "fusion_type": "ELEMWISE", - "async_flag": false, - "binfile_name": "mul.so", - "compute_cost": 10, - "kernel_name": "mul", - "partial_flag": true, - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "int32", "int32", "int32", "int32", "int32", - "float16", "float16", "float16", "float16", "float16", - "float", "float", "float", "float", "float" - ], - "format": [ - "FRACTAL_NZ", "DefaultFormat", "FracZ", "C1HWNCoC0", "NC1HWC0", - "FRACTAL_NZ", "DefaultFormat", "FracZ", "C1HWNCoC0", "NC1HWC0", - "FRACTAL_NZ", "DefaultFormat", "FracZ", "C1HWNCoC0", "NC1HWC0" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "int32", "int32", "int32", "int32", "int32", - "float16", "float16", "float16", "float16", "float16", - "float", "float", "float", "float", "float" - ], - "format": [ - "FRACTAL_NZ", "DefaultFormat", "FracZ", "C1HWNCoC0", "NC1HWC0", - "FRACTAL_NZ", "DefaultFormat", "FracZ", "C1HWNCoC0", "NC1HWC0", - "FRACTAL_NZ", "DefaultFormat", "FracZ", "C1HWNCoC0", "NC1HWC0" - ], - "name": "y", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "int32", "int32", "int32", "int32", "int32", - "float16", "float16", "float16", "float16", "float16", - "float", "float", "float", "float","float" - ], - "format": [ - "FRACTAL_NZ", "DefaultFormat", "FracZ", "C1HWNCoC0", "NC1HWC0", - "FRACTAL_NZ", "DefaultFormat", "FracZ", "C1HWNCoC0", "NC1HWC0", - "FRACTAL_NZ", "DefaultFormat", "FracZ", "C1HWNCoC0", "NC1HWC0" - ], - "name": "output", - "need_compile": true, - "param_type": "required", - "shape": "all" - } - ] -}""") +@op_info_register(mul_op_info) def _mul_tbe(): """Mul TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/neg.py b/mindspore/ops/_op_impl/tbe/neg.py index bbfcd824ec..feb648f056 100644 --- a/mindspore/ops/_op_impl/tbe/neg.py +++ b/mindspore/ops/_op_impl/tbe/neg.py @@ -14,51 +14,29 @@ # ============================================================================ """Neg op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +neg_op_info = TBERegOp("Neg") \ + .fusion_type("ELEMWISE") \ + .async_flag(False) \ + .binfile_name("neg.so") \ + .compute_cost(10) \ + .kernel_name("neg") \ + .partial_flag(True) \ + .input(0, "x", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.I8_Default, DataType.I8_Default) \ + .dtype_format(DataType.I8_5HD, DataType.I8_5HD) \ + .dtype_format(DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.I32_5HD, DataType.I32_5HD) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD) \ + .get_op_info() -@op_info_register("""{ - "op_name": "Neg", - "imply_type": "TBE", - "fusion_type": "ELEMWISE", - "async_flag": false, - "binfile_name": "neg.so", - "compute_cost": 10, - "kernel_name": "neg", - "partial_flag": true, - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float","float","float16","float16","int32","int32","int8","int8" - ], - "format": [ - "DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float","float","float16","float16","int32","int32","int8","int8" - ], - "format": [ - "DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0" - ], - "name": "y", - "param_type": "required", - "shape": "all" - } - ] -}""") +@op_info_register(neg_op_info) def _neg_tbe(): """Neg TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/npu_alloc_float_status.py b/mindspore/ops/_op_impl/tbe/npu_alloc_float_status.py index 6f65b9064a..a395f38e76 100644 --- a/mindspore/ops/_op_impl/tbe/npu_alloc_float_status.py +++ b/mindspore/ops/_op_impl/tbe/npu_alloc_float_status.py @@ -14,39 +14,21 @@ # ============================================================================ """NPUAllocFloatStatus op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +npu_alloc_float_status_op_info = TBERegOp("NPUAllocFloatStatus") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("n_p_u_alloc_float_status.so") \ + .compute_cost(10) \ + .kernel_name("n_p_u_alloc_float_status") \ + .partial_flag(True) \ + .output(0, "data", False, "required", "all") \ + .dtype_format(DataType.F32_Default) \ + .get_op_info() -@op_info_register("""{ - "op_name": "NPUAllocFloatStatus", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "n_p_u_alloc_float_status.so", - "compute_cost": 10, - "kernel_name": "n_p_u_alloc_float_status", - "partial_flag": true, - "attr": [ - ], - "inputs": [ - - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float" - ], - "format": [ - "DefaultFormat" - ], - "name": "data", - "param_type": "required", - "shape": "all" - } - ] -}""") +@op_info_register(npu_alloc_float_status_op_info) def _npu_alloc_float_status_tbe(): """NPUAllocFloatStatus TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/npu_clear_float_status.py b/mindspore/ops/_op_impl/tbe/npu_clear_float_status.py index d7e69673f2..b67bf5e62b 100644 --- a/mindspore/ops/_op_impl/tbe/npu_clear_float_status.py +++ b/mindspore/ops/_op_impl/tbe/npu_clear_float_status.py @@ -14,52 +14,22 @@ # ============================================================================ """NPUClearFloatStatus op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +npu_clear_float_status_op_info = TBERegOp("NPUClearFloatStatus") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("n_p_u_clear_float_status.so") \ + .compute_cost(10) \ + .kernel_name("n_p_u_clear_float_status") \ + .partial_flag(True) \ + .input(0, "addr", False, "required", "all") \ + .output(0, "data", False, "required", "all") \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .get_op_info() -@op_info_register("""{ - "op_name": "NPUClearFloatStatus", - "imply_type": "TBE", - "fusion_type": "ELEMWISE", - "async_flag": false, - "binfile_name": "n_p_u_clear_float_status.so", - "compute_cost": 10, - "kernel_name": "n_p_u_clear_float_status", - "partial_flag": true, - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float" - ], - "format": [ - "DefaultFormat" - ], - "name": "addr", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float" - ], - "format": [ - "DefaultFormat" - ], - "name": "data", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") +@op_info_register(npu_clear_float_status_op_info) def _npu_clear_float_status_tbe(): """NPUClearFloatStatus TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/npu_get_float_status.py b/mindspore/ops/_op_impl/tbe/npu_get_float_status.py index 441fe3b271..ad3eb4be8c 100644 --- a/mindspore/ops/_op_impl/tbe/npu_get_float_status.py +++ b/mindspore/ops/_op_impl/tbe/npu_get_float_status.py @@ -14,52 +14,22 @@ # ============================================================================ """NPUGetFloatStatus op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +npu_get_float_status_op_info = TBERegOp("NPUGetFloatStatus") \ + .fusion_type("ELEMWISE") \ + .async_flag(False) \ + .binfile_name("n_p_u_get_float_status.so") \ + .compute_cost(10) \ + .kernel_name("n_p_u_get_float_status") \ + .partial_flag(True) \ + .input(0, "addr", False, "required", "all") \ + .output(0, "data", False, "required", "all") \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .get_op_info() -@op_info_register("""{ - "op_name": "NPUGetFloatStatus", - "imply_type": "TBE", - "fusion_type": "ELEMWISE", - "async_flag": false, - "binfile_name": "n_p_u_get_float_status.so", - "compute_cost": 10, - "kernel_name": "n_p_u_get_float_status", - "partial_flag": true, - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float" - ], - "format": [ - "DefaultFormat" - ], - "name": "addr", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float" - ], - "format": [ - "DefaultFormat" - ], - "name": "data", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") +@op_info_register(npu_get_float_status_op_info) def _npu_get_float_status_tbe(): """NPUGetFloatStatus TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/one_hot.py b/mindspore/ops/_op_impl/tbe/one_hot.py index 0af406dfc6..81a80bf759 100644 --- a/mindspore/ops/_op_impl/tbe/one_hot.py +++ b/mindspore/ops/_op_impl/tbe/one_hot.py @@ -14,96 +14,35 @@ # ============================================================================ """OneHot op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +one_hot_op_info = TBERegOp("OneHot") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("one_hot.so") \ + .compute_cost(10) \ + .kernel_name("one_hot") \ + .partial_flag(True) \ + .attr("depth", "required", "int", "all") \ + .attr("axis", "required", "int", "all") \ + .input(0, "x", False, "required", "all") \ + .input(1, "on_value", False, "required", "all") \ + .input(2, "off_value", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.U8_Default, DataType.I8_Default, DataType.I8_Default, DataType.I8_Default) \ + .dtype_format(DataType.U8_Default, DataType.U8_Default, DataType.U8_Default, DataType.U8_Default) \ + .dtype_format(DataType.U8_Default, DataType.I32_Default, DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.U8_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.U8_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.I32_Default, DataType.I8_Default, DataType.I8_Default, DataType.I8_Default) \ + .dtype_format(DataType.I32_Default, DataType.U8_Default, DataType.U8_Default, DataType.U8_Default) \ + .dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.I32_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.I32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ + .get_op_info() -@op_info_register("""{ - "op_name": "OneHot", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "one_hot.so", - "compute_cost": 10, - "kernel_name": "one_hot", - "partial_flag": true, - "attr": [ - { - "name": "depth", - "param_type": "required", - "type": "int", - "value": "all" - }, - { - "name": "axis", - "param_type": "required", - "type": "int", - "value": "all" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "int32","int32","int32","int32","int32", - "uint8","uint8","uint8","uint8","uint8" - ], - "format": [ - "DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat", - "DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float16","float32","int32","int8","uint8", - "float16","float32","int32","int8","uint8" - ], - "format": [ - "DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat", - "DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat" - ], - "name": "on_value", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 2, - "dtype": [ - "float16","float32","int32","int8","uint8", - "float16","float32","int32","int8","uint8" - ], - "format": [ - "DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat", - "DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat" - ], - "name": "off_value", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16","float32","int32","int8","uint8", - "float16","float32","int32","int8","uint8" - ], - "format": [ - "DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat", - "DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat" - ], - "name": "y", - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(one_hot_op_info) def _one_hot_tbe(): """OneHot TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/pad_d.py b/mindspore/ops/_op_impl/tbe/pad_d.py index 25cb19816d..21d814d6b6 100644 --- a/mindspore/ops/_op_impl/tbe/pad_d.py +++ b/mindspore/ops/_op_impl/tbe/pad_d.py @@ -14,57 +14,27 @@ # ============================================================================ """Pad op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +pad_d_op_info = TBERegOp("Pad") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("pad_d.so") \ + .compute_cost(10) \ + .kernel_name("pad_d") \ + .partial_flag(True) \ + .attr("paddings", "optional", "listListInt", "all") \ + .input(0, "x", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.I8_Default, DataType.I8_Default) \ + .dtype_format(DataType.U8_Default, DataType.U8_Default) \ + .dtype_format(DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .get_op_info() -@op_info_register("""{ - "op_name": "Pad", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "pad_d.so", - "compute_cost": 10, - "kernel_name": "pad_d", - "partial_flag": true, - "attr": [ - { - "name": "paddings", - "param_type": "optional", - "type": "listListInt", - "value": "all" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16","float","int8","uint8","int32" - ], - "format": [ - "DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16","float","int8","uint8","int32" - ], - "format": [ - "DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat" - ], - "name": "y", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(pad_d_op_info) def _pad_d_tbe(): """Pad TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/pow.py b/mindspore/ops/_op_impl/tbe/pow.py index aa67a8c942..223a139252 100644 --- a/mindspore/ops/_op_impl/tbe/pow.py +++ b/mindspore/ops/_op_impl/tbe/pow.py @@ -14,65 +14,27 @@ # ============================================================================ """Pow op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +pow_op_info = TBERegOp("Pow") \ + .fusion_type("ELEMWISE") \ + .async_flag(False) \ + .binfile_name("pow.so") \ + .compute_cost(10) \ + .kernel_name("pow") \ + .partial_flag(True) \ + .input(0, "x1", False, "required", "all") \ + .input(1, "x2", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.I8_Default, DataType.I8_Default, DataType.I8_Default) \ + .dtype_format(DataType.U8_Default, DataType.U8_Default, DataType.U8_Default) \ + .dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ + .get_op_info() -@op_info_register("""{ - "op_name": "Pow", - "imply_type": "TBE", - "fusion_type": "ELEMWISE", - "async_flag": false, - "binfile_name": "pow.so", - "compute_cost": 10, - "kernel_name": "pow", - "partial_flag": true, - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16", "float", "int32", "int8", "uint8" - ], - "format": [ - "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat" - ], - "name": "x1", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float16", "float", "int32", "int8", "uint8" - ], - "format": [ - "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat" - ], - "name": "x2", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16", "float", "int32", "int8", "uint8" - ], - "format": [ - "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat" - ], - "name": "y", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") +@op_info_register(pow_op_info) def _pow_tbe(): """Pow TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/real_div.py b/mindspore/ops/_op_impl/tbe/real_div.py index 01f870b7cb..b39948971d 100644 --- a/mindspore/ops/_op_impl/tbe/real_div.py +++ b/mindspore/ops/_op_impl/tbe/real_div.py @@ -14,64 +14,26 @@ # ============================================================================ """RealDiv op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +realdiv_op_info = TBERegOp("RealDiv") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("realdiv.so") \ + .compute_cost(10) \ + .kernel_name("realdiv") \ + .partial_flag(True) \ + .input(0, "x", False, "required", "all") \ + .input(1, "y", False, "required", "all") \ + .output(0, "z", False, "required", "all") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD) \ + .get_op_info() -@op_info_register("""{ - "op_name": "RealDiv", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "realdiv.so", - "compute_cost": 10, - "kernel_name": "realdiv", - "partial_flag": true, - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16", "float16", "float", "float" - ], - "format": [ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "NC1HWC0" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float16", "float16", "float", "float" - ], - "format": [ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "NC1HWC0" - ], - "name": "y", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16", "float16", "float", "float" - ], - "format": [ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "NC1HWC0" - ], - "name": "z", - "param_type": "required", - "shape": "all" - } - ] -}""") +@op_info_register(realdiv_op_info) def _real_div_tbe(): """RealDiv TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/reciprocal.py b/mindspore/ops/_op_impl/tbe/reciprocal.py index ba039be4c5..dfa126384c 100644 --- a/mindspore/ops/_op_impl/tbe/reciprocal.py +++ b/mindspore/ops/_op_impl/tbe/reciprocal.py @@ -14,52 +14,27 @@ # ============================================================================ """Add op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +reciprocal_op_info = TBERegOp("Reciprocal") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("reciprocal.so") \ + .compute_cost(10) \ + .kernel_name("reciprocal") \ + .partial_flag(True) \ + .input(0, "x", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F16_NHWC, DataType.F16_NHWC) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD) \ + .dtype_format(DataType.F32_NHWC, DataType.F32_NHWC) \ + .get_op_info() -@op_info_register("""{ - "op_name": "Reciprocal", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "reciprocal.so", - "compute_cost": 10, - "kernel_name": "reciprocal", - "partial_flag": true, - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16", "float16", "float16", "float32", "float32", "float32" - ], - "format": [ - "DefaultFormat", "NC1HWC0", "NHWC", "DefaultFormat", "NC1HWC0", "NHWC" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16", "float16", "float16", "float32", "float32", "float32" - ], - "format": [ - "DefaultFormat", "NC1HWC0", "NHWC", "DefaultFormat", "NC1HWC0", "NHWC" - ], - "name": "y", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") +@op_info_register(reciprocal_op_info) def _reciprocal_tbe(): """Add TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/reduce_max.py b/mindspore/ops/_op_impl/tbe/reduce_max.py index 9c4981babc..ab0e766f59 100644 --- a/mindspore/ops/_op_impl/tbe/reduce_max.py +++ b/mindspore/ops/_op_impl/tbe/reduce_max.py @@ -14,63 +14,29 @@ # ============================================================================ """ReduceMax op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +reduce_max_d_op_info = TBERegOp("ReduceMax") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("reduce_max_d.so") \ + .compute_cost(10) \ + .kernel_name("reduce_max_d") \ + .partial_flag(True) \ + .attr("axis", "optional", "listInt", "all") \ + .attr("keep_dims", "optional", "bool", "all") \ + .input(0, "x", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.BOOL_Default, DataType.BOOL_Default) \ + .dtype_format(DataType.I8_Default, DataType.I8_Default) \ + .dtype_format(DataType.U8_Default, DataType.U8_Default) \ + .dtype_format(DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .get_op_info() -@op_info_register("""{ - "op_name": "ReduceMax", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "reduce_max_d.so", - "compute_cost": 10, - "kernel_name": "reduce_max_d", - "partial_flag": true, - "attr": [ - { - "name": "axis", - "param_type": "required", - "type": "listInt", - "value": "all" - }, - { - "name": "keep_dims", - "param_type": "required", - "type": "bool", - "value": "all" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16", "float", "int8", "uint8", "bool", "int32" - ], - "format": [ - "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16", "float", "int8", "uint8", "bool", "int32" - ], - "format": [ - "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat" - ], - "name": "y", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(reduce_max_d_op_info) def _reduce_max_tbe(): """ReduceMax TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/reduce_mean.py b/mindspore/ops/_op_impl/tbe/reduce_mean.py index c8776fa8b1..47548e9036 100644 --- a/mindspore/ops/_op_impl/tbe/reduce_mean.py +++ b/mindspore/ops/_op_impl/tbe/reduce_mean.py @@ -14,63 +14,27 @@ # ============================================================================ """ReduceMean op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +reduce_mean_op_info = TBERegOp("ReduceMean") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("reduce_mean.so") \ + .compute_cost(10) \ + .kernel_name("reduce_mean") \ + .partial_flag(True) \ + .attr("axis", "optional", "listInt", "all") \ + .attr("keep_dims", "optional", "bool", "all") \ + .input(0, "x", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.I8_Default, DataType.I8_Default) \ + .dtype_format(DataType.U8_Default, DataType.U8_Default) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .get_op_info() -@op_info_register("""{ - "op_name": "ReduceMean", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "reduce_mean.so", - "compute_cost": 10, - "kernel_name": "reduce_mean", - "partial_flag": true, - "attr": [ - { - "name": "axis", - "param_type": "optional", - "type": "listInt", - "value": "all" - }, - { - "name": "keep_dims", - "param_type": "optional", - "type": "bool", - "value": "all" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16","float","float16","int8","uint8" - ], - "format": [ - "NC1HWC0","DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16","float","float16","int8","uint8" - ], - "format": [ - "NC1HWC0","DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat" - ], - "name": "y", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(reduce_mean_op_info) def _reduce_mean_tbe(): """ReduceMean TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/reduce_mean_d.py b/mindspore/ops/_op_impl/tbe/reduce_mean_d.py index 59cfeb240b..e427b34869 100644 --- a/mindspore/ops/_op_impl/tbe/reduce_mean_d.py +++ b/mindspore/ops/_op_impl/tbe/reduce_mean_d.py @@ -14,63 +14,27 @@ # ============================================================================ """ReduceMeanD op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +reduce_mean_d_op_info = TBERegOp("ReduceMeanD") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("reduce_mean_d.so") \ + .compute_cost(10) \ + .kernel_name("reduce_mean_d") \ + .partial_flag(True) \ + .attr("axis", "optional", "listInt", "all") \ + .attr("keep_dims", "optional", "bool", "all") \ + .input(0, "x", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.I8_Default, DataType.I8_Default) \ + .dtype_format(DataType.U8_Default, DataType.U8_Default) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .get_op_info() -@op_info_register("""{ - "op_name": "ReduceMeanD", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "reduce_mean_d.so", - "compute_cost": 10, - "kernel_name": "reduce_mean_d", - "partial_flag": true, - "attr": [ - { - "name": "axis", - "param_type": "optional", - "type": "listInt", - "value": "all" - }, - { - "name": "keep_dims", - "param_type": "optional", - "type": "bool", - "value": "all" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float","float16","int8","uint8" - ], - "format": [ - "DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float","float16","int8","uint8" - ], - "format": [ - "DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat" - ], - "name": "y", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(reduce_mean_d_op_info) def _reduce_mean_d_tbe(): """Conv2D TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/reduce_min.py b/mindspore/ops/_op_impl/tbe/reduce_min.py index f440b1fff6..f1601ebc94 100644 --- a/mindspore/ops/_op_impl/tbe/reduce_min.py +++ b/mindspore/ops/_op_impl/tbe/reduce_min.py @@ -14,63 +14,31 @@ # ============================================================================ """ReduceMin op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +reduce_min_op_info = TBERegOp("ReduceMin") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("reduce_min_d.so") \ + .compute_cost(10) \ + .kernel_name("reduce_min_d") \ + .partial_flag(True) \ + .attr("axis", "required", "listInt", "all") \ + .attr("keep_dims", "required", "bool", "all") \ + .input(0, "x", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.I8_Default, DataType.I8_Default) \ + .dtype_format(DataType.I8_FracZ, DataType.I8_FracZ) \ + .dtype_format(DataType.U8_Default, DataType.U8_Default) \ + .dtype_format(DataType.U8_FracZ, DataType.U8_FracZ) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_FracZ, DataType.F16_FracZ) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_FracZ, DataType.F32_FracZ) \ + .get_op_info() -@op_info_register("""{ - "op_name": "ReduceMin", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "reduce_min_d.so", - "compute_cost": 10, - "kernel_name": "reduce_min_d", - "partial_flag": true, - "attr": [ - { - "name": "axis", - "param_type": "required", - "type": "listInt", - "value": "all" - }, - { - "name": "keep_dims", - "param_type": "required", - "type": "bool", - "value": "all" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16", "float16", "float", "float", "int8", "int8", "uint8", "uint8" - ], - "format": [ - "DefaultFormat", "FracZ", "DefaultFormat", "FracZ", "DefaultFormat", "FracZ", "DefaultFormat", "FracZ" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16", "float16", "float", "float", "int8", "int8", "uint8", "uint8" - ], - "format": [ - "DefaultFormat", "FracZ", "DefaultFormat", "FracZ", "DefaultFormat", "FracZ", "DefaultFormat", "FracZ" - ], - "name": "y", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(reduce_min_op_info) def _reduce_min_tbe(): """ReduceMin TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/reduce_sum.py b/mindspore/ops/_op_impl/tbe/reduce_sum.py index b15a4deccc..2f76f74562 100644 --- a/mindspore/ops/_op_impl/tbe/reduce_sum.py +++ b/mindspore/ops/_op_impl/tbe/reduce_sum.py @@ -14,63 +14,25 @@ # ============================================================================ """ReduceSum op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +reduce_sum_op_info = TBERegOp("ReduceSum") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("reduce_sum_d.so") \ + .compute_cost(10) \ + .kernel_name("reduce_sum_d") \ + .partial_flag(True) \ + .attr("axis", "optional", "listInt", "all") \ + .attr("keep_dims", "optional", "bool", "all") \ + .input(0, "x", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .get_op_info() -@op_info_register("""{ - "op_name": "ReduceSum", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "reduce_sum_d.so", - "compute_cost": 10, - "kernel_name": "reduce_sum_d", - "partial_flag": true, - "attr": [ - { - "name": "axis", - "param_type": "optional", - "type": "listInt", - "value": "all" - }, - { - "name": "keep_dims", - "param_type": "optional", - "type": "bool", - "value": "all" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16", "float" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16", "float" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "y", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(reduce_sum_op_info) def _reduce_sum_tbe(): """ReduceSum TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/relu.py b/mindspore/ops/_op_impl/tbe/relu.py index 7350f2ae35..03cc381253 100644 --- a/mindspore/ops/_op_impl/tbe/relu.py +++ b/mindspore/ops/_op_impl/tbe/relu.py @@ -14,54 +14,29 @@ # ============================================================================ """ReLU op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +relu_op_info = TBERegOp("ReLU") \ + .fusion_type("ELEMWISE") \ + .async_flag(False) \ + .binfile_name("relu.so") \ + .compute_cost(10) \ + .kernel_name("relu") \ + .partial_flag(True) \ + .input(0, "x", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.I8_Default, DataType.I8_Default) \ + .dtype_format(DataType.I8_5HD, DataType.I8_5HD) \ + .dtype_format(DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.I32_5HD, DataType.I32_5HD) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD) \ + .get_op_info() -@op_info_register("""{ - "op_name": "ReLU", - "imply_type": "TBE", - "fusion_type": "ELEMWISE", - "async_flag": false, - "binfile_name": "relu.so", - "compute_cost": 10, - "kernel_name": "relu", - "partial_flag": true, - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16", "float16", "float", "float","int32", "int32", "int8", "int8" - ], - "format": [ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "NC1HWC0", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "NC1HWC0" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16", "float16", "float", "float", "int32", "int32", "int8", "int8" - ], - "format": [ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "NC1HWC0", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "NC1HWC0" - ], - "name": "y", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") +@op_info_register(relu_op_info) def _relu_tbe(): """Relu TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/relu_grad.py b/mindspore/ops/_op_impl/tbe/relu_grad.py index 28b4574e04..0f9e962cfd 100644 --- a/mindspore/ops/_op_impl/tbe/relu_grad.py +++ b/mindspore/ops/_op_impl/tbe/relu_grad.py @@ -14,68 +14,32 @@ # ============================================================================ """ReluGrad op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +relugrad_op_info = TBERegOp("ReluGrad") \ + .fusion_type("ELEMWISE") \ + .async_flag(False) \ + .binfile_name("relugrad.so") \ + .compute_cost(10) \ + .kernel_name("relugrad") \ + .partial_flag(True) \ + .input(0, "gradients", False, "required", "all") \ + .input(1, "features", False, "required", "all") \ + .output(0, "backprops", True, "required", "all") \ + .dtype_format(DataType.I8_Default, DataType.I8_Default, DataType.I8_Default) \ + .dtype_format(DataType.I8_5HD, DataType.I8_5HD, DataType.I8_5HD) \ + .dtype_format(DataType.U8_Default, DataType.U8_Default, DataType.U8_Default) \ + .dtype_format(DataType.U8_5HD, DataType.U8_5HD, DataType.U8_5HD) \ + .dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.I32_5HD, DataType.I32_5HD, DataType.I32_5HD) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD) \ + .get_op_info() -@op_info_register("""{ - "op_name": "ReluGrad", - "imply_type": "TBE", - "fusion_type": "ELEMWISE", - "async_flag": false, - "binfile_name": "relugrad.so", - "compute_cost": 10, - "kernel_name": "relugrad", - "partial_flag": true, - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16", "float16", "float", "float", "int32", "int32", "int8", "int8", "uint8", "uint8" - ], - "format": [ - "DefaultFormat", "NC1HWC0","DefaultFormat", "NC1HWC0", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "NC1HWC0", "DefaultFormat", "NC1HWC0" - ], - "name": "gradients", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float16", "float16", "float", "float", "int32", "int32", "int8", "int8", "uint8", "uint8" - ], - "format": [ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "NC1HWC0", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "NC1HWC0", "DefaultFormat", "NC1HWC0" - ], - "name": "features", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16", "float16", "float", "float", "int32", "int32", "int8", "int8", "uint8", "uint8" - ], - "format": [ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "NC1HWC0", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "NC1HWC0", "DefaultFormat", "NC1HWC0" - ], - "name": "backprops", - "need_compile": true, - "param_type": "required", - "shape": "all" - } - ] -}""") +@op_info_register(relugrad_op_info) def _relu_grad_tbe(): """ReluGrad TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/reshape.py b/mindspore/ops/_op_impl/tbe/reshape.py index 8386b7a6f0..d46fd966d8 100644 --- a/mindspore/ops/_op_impl/tbe/reshape.py +++ b/mindspore/ops/_op_impl/tbe/reshape.py @@ -14,57 +14,25 @@ # ============================================================================ """Reshape op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +reshape_op_info = TBERegOp("Reshape") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("reshape.so") \ + .compute_cost(10) \ + .kernel_name("reshape") \ + .partial_flag(True) \ + .attr("shape", "required", "listInt", "all") \ + .input(0, "x", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .get_op_info() -@op_info_register("""{ - "op_name": "Reshape", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "reshape.so", - "compute_cost": 10, - "kernel_name": "reshape", - "partial_flag": true, - "attr": [ - { - "name": "shape", - "param_type": "required", - "type": "listInt", - "value": "all" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16", "float32", "int32" - ], - "format": [ - "DefaultFormat", "DefaultFormat", "DefaultFormat" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16", "float32", "int32" - ], - "format": [ - "DefaultFormat", "DefaultFormat", "DefaultFormat" - ], - "name": "y", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(reshape_op_info) def _reshape_tbe(): """Reshape TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/resize_nearest_neighbor.py b/mindspore/ops/_op_impl/tbe/resize_nearest_neighbor.py index 63ecf0ede2..8a66f75dbb 100644 --- a/mindspore/ops/_op_impl/tbe/resize_nearest_neighbor.py +++ b/mindspore/ops/_op_impl/tbe/resize_nearest_neighbor.py @@ -14,67 +14,33 @@ # ============================================================================ """ResizeNearestNeighbor op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +resize_nearest_neighbor_op_info = TBERegOp("ResizeNearestNeighbor") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("resize_nearest_neighbor_d.so") \ + .compute_cost(10) \ + .kernel_name("resize_nearest_neighbor_d") \ + .partial_flag(True) \ + .attr("size", "required", "listInt", "all") \ + .attr("align_corners", "optional", "bool", "all") \ + .input(0, "images", False, "required", "all") \ + .output(0, "y", True, "required", "all") \ + .dtype_format(DataType.I8_Default, DataType.I8_Default) \ + .dtype_format(DataType.I8_5HD, DataType.I8_5HD) \ + .dtype_format(DataType.U8_Default, DataType.U8_Default) \ + .dtype_format(DataType.U8_5HD, DataType.U8_5HD) \ + .dtype_format(DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.I32_5HD, DataType.I32_5HD) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD) \ + .get_op_info() -@op_info_register("""{ - "op_name": "ResizeNearestNeighbor", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "resize_nearest_neighbor_d.so", - "compute_cost": 10, - "kernel_name": "resize_nearest_neighbor_d", - "partial_flag": true, - "attr": [ - { - "name": "size", - "param_type": "required", - "type": "listInt", - "value": "all" - }, - { - "name": "align_corners", - "param_type": "optional", - "type": "bool", - "value": "all" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16","float","int32","int8","uint8", - "float16","float","int32","int8","uint8" - ], - "format": [ - "NC1HWC0","NC1HWC0","NC1HWC0","NC1HWC0","NC1HWC0", - "DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat" - ], - "name": "images", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16","float","int32","int8","uint8", - "float16","float","int32","int8","uint8" - ], - "format": [ - "NC1HWC0","NC1HWC0","NC1HWC0","NC1HWC0","NC1HWC0", - "DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat" - ], - "name": "y", - "need_compile": true, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(resize_nearest_neighbor_op_info) def _resize_nearest_neighbor_d_tbe(): """ResizeNearestNeighbor TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_d.py b/mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_d.py index 9595041401..4b54da991a 100644 --- a/mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_d.py +++ b/mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_d.py @@ -14,63 +14,28 @@ # ============================================================================ """ResizeNearestNeighbor op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +resize_nearest_neighbor_d_op_info = TBERegOp("ResizeNearestNeighbor") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("resize_nearest_neighbor_d.so") \ + .compute_cost(10) \ + .kernel_name("resize_nearest_neighbor_d") \ + .partial_flag(True) \ + .attr("size", "required", "listInt", "all") \ + .attr("align_corners", "optional", "bool", "all") \ + .input(0, "images", False, "required", "all") \ + .output(0, "y", True, "required", "all") \ + .dtype_format(DataType.I8_5HD, DataType.I8_5HD) \ + .dtype_format(DataType.U8_5HD, DataType.U8_5HD) \ + .dtype_format(DataType.I32_5HD, DataType.I32_5HD) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD) \ + .get_op_info() -@op_info_register("""{ - "op_name": "ResizeNearestNeighbor", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "resize_nearest_neighbor_d.so", - "compute_cost": 10, - "kernel_name": "resize_nearest_neighbor_d", - "partial_flag": true, - "attr": [ - { - "name": "size", - "param_type": "required", - "type": "listInt", - "value": "all" - }, - { - "name": "align_corners", - "param_type": "optional", - "type": "bool", - "value": "all" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16","float","int32","int8","uint8" - ], - "format": [ - "NC1HWC0","NC1HWC0","NC1HWC0","NC1HWC0","NC1HWC0" - ], - "name": "images", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16","float","int32","int8","uint8" - ], - "format": [ - "NC1HWC0","NC1HWC0","NC1HWC0","NC1HWC0","NC1HWC0" - ], - "name": "y", - "need_compile": true, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(resize_nearest_neighbor_d_op_info) def _resize_nearest_neighbor_d_tbe(): """ResizeNearestNeighbor TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad_d.py b/mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad_d.py index 51cfaf5176..6ee6c56146 100644 --- a/mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad_d.py +++ b/mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad_d.py @@ -14,63 +14,24 @@ # ============================================================================ """ResizeNearestNeighborgrad op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +resize_nearest_neighbor_grad_d_op_info = TBERegOp("ResizeNearestNeighborGrad") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("resize_nearest_neighbor_grad_d.so") \ + .compute_cost(10) \ + .kernel_name("resize_nearest_neighbor_grad_d") \ + .partial_flag(True) \ + .attr("size", "required", "listInt", "all") \ + .attr("align_corners", "optional", "bool", "all") \ + .input(0, "grads", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD) \ + .get_op_info() -@op_info_register("""{ - "op_name": "ResizeNearestNeighborGrad", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "resize_nearest_neighbor_grad_d.so", - "compute_cost": 10, - "kernel_name": "resize_nearest_neighbor_grad_d", - "partial_flag": true, - "attr": [ - { - "name": "size", - "param_type": "required", - "type": "listInt", - "value": "all" - }, - { - "name": "align_corners", - "param_type": "optional", - "type": "bool", - "value": "all" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float" - ], - "format": [ - "NC1HWC0" - ], - "name": "grads", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float" - ], - "format": [ - "NC1HWC0" - ], - "name": "y", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(resize_nearest_neighbor_grad_d_op_info) def _resize_nearest_neighbor_grad_d_tbe(): """ResizeNearestNeighborGrad TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/round.py b/mindspore/ops/_op_impl/tbe/round.py index 1368100a8e..4559a3def0 100644 --- a/mindspore/ops/_op_impl/tbe/round.py +++ b/mindspore/ops/_op_impl/tbe/round.py @@ -14,52 +14,27 @@ # ============================================================================ """Round op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +round_op_info = TBERegOp("Round") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("round.so") \ + .compute_cost(10) \ + .kernel_name("round") \ + .partial_flag(True) \ + .input(0, "x", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F16_FracZ, DataType.F16_FracZ) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD) \ + .dtype_format(DataType.F32_FracZ, DataType.F32_FracZ) \ + .get_op_info() -@op_info_register("""{ - "op_name": "Round", - "imply_type": "TBE", - "fusion_type": "ELEMWISE", - "async_flag": false, - "binfile_name": "round.so", - "compute_cost": 10, - "kernel_name": "round", - "partial_flag": true, - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16", "float16", "float16", "float", "float", "float" - ], - "format": [ - "DefaultFormat", "NC1HWC0", "FracZ", "DefaultFormat", "NC1HWC0", "FracZ" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16", "float16", "float16", "float", "float", "float" - ], - "format": [ - "DefaultFormat", "NC1HWC0", "FracZ", "DefaultFormat", "NC1HWC0", "FracZ" - ], - "name": "y", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") +@op_info_register(round_op_info) def _round_tbe(): """Round TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/rsqrt.py b/mindspore/ops/_op_impl/tbe/rsqrt.py index e23d505397..b0830cf484 100644 --- a/mindspore/ops/_op_impl/tbe/rsqrt.py +++ b/mindspore/ops/_op_impl/tbe/rsqrt.py @@ -14,94 +14,29 @@ # ============================================================================ """Rsqrt op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +rsqrt_op_info = TBERegOp("Rsqrt") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("rsqrt.so") \ + .compute_cost(10) \ + .kernel_name("rsqrt") \ + .partial_flag(True) \ + .input(0, "x", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F16_FracZ, DataType.F16_FracZ) \ + .dtype_format(DataType.F16_C1HWNCoC0, DataType.F16_C1HWNCoC0) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD) \ + .dtype_format(DataType.F32_FracZ, DataType.F32_FracZ) \ + .dtype_format(DataType.F32_C1HWNCoC0, DataType.F32_C1HWNCoC0) \ + .get_op_info() -@op_info_register("""{ - "op_name":"Rsqrt", - "imply_type":"TBE", - "fusion_type":"OPAQUE", - "async_flag":false, - "binfile_name":"rsqrt.so", - "compute_cost":10, - "kernel_name":"rsqrt", - "partial_flag":true, - "attr":[], - "inputs":[ - { - "index":0, - "dtype":[ - "float16", - "float16", - "float16", - "float16", - "float16", - "float16", - "float", - "float", - "float", - "float", - "float", - "float" - ], - "format":[ - "DefaultFormat", - "NC1HWC0", - "DefaultFormat", - "FracZ", - "C1HWNCoC0", - "DefaultFormat", - "DefaultFormat", - "NC1HWC0", - "DefaultFormat", - "DefaultFormat", - "FracZ", - "C1HWNCoC0" - ], - "name":"x", - "need_compile":false, - "param_type":"required", - "shape":"all" - } - ], - "outputs":[ - { - "index":0, - "dtype":[ - "float16", - "float16", - "float16", - "float16", - "float16", - "float16", - "float", - "float", - "float", - "float", - "float", - "float" - ], - "format":[ - "DefaultFormat", - "NC1HWC0", - "DefaultFormat", - "FracZ", - "C1HWNCoC0", - "DefaultFormat", - "DefaultFormat", - "NC1HWC0", - "DefaultFormat", - "DefaultFormat", - "FracZ", - "C1HWNCoC0" - ], - "name":"y", - "need_compile":false, - "param_type":"required", - "shape":"all" - } - ] -}""") + +@op_info_register(rsqrt_op_info) def _rsqrt_tbe(): """Rsqrt TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/scatter_nd.py b/mindspore/ops/_op_impl/tbe/scatter_nd.py index 947fc57920..6c9eae3ad4 100644 --- a/mindspore/ops/_op_impl/tbe/scatter_nd.py +++ b/mindspore/ops/_op_impl/tbe/scatter_nd.py @@ -14,71 +14,28 @@ # ============================================================================ """ScatterNd op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +scatter_nd_op_info = TBERegOp("ScatterNd") \ + .fusion_type("ELEMWISE") \ + .async_flag(False) \ + .binfile_name("scatter_nd_d.so") \ + .compute_cost(10) \ + .kernel_name("scatter_nd_d") \ + .partial_flag(True) \ + .attr("shape", "optional", "listInt", "all") \ + .input(0, "indices", False, "required", "all") \ + .input(1, "x", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.I32_Default, DataType.I8_Default, DataType.I8_Default) \ + .dtype_format(DataType.I32_Default, DataType.U8_Default, DataType.U8_Default) \ + .dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.I32_Default, DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.I32_Default, DataType.F32_Default, DataType.F32_Default) \ + .get_op_info() -# map to tbe kernel name scatter_nd_d -@op_info_register("""{ - "op_name": "ScatterNd", - "imply_type": "TBE", - "fusion_type": "ELEMWISE", - "async_flag": false, - "binfile_name": "scatter_nd_d.so", - "compute_cost": 10, - "kernel_name": "scatter_nd_d", - "partial_flag": true, - "attr": [ - { - "name": "shape", - "param_type": "optional", - "type": "listInt", - "value": "all" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "int32", "int32", "int32", "int32", "int32" - ], - "format": [ - "DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat" - ], - "name": "indices", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float16","float","int32","int8","uint8" - ], - "format": [ - "DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16","float","int32","int8","uint8" - ], - "format": [ - "DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat" - ], - "name": "y", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(scatter_nd_op_info) def _scatter_nd_tbe(): """Conv2D TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/scatter_nd_d.py b/mindspore/ops/_op_impl/tbe/scatter_nd_d.py index ad776fde49..b069b7d8aa 100644 --- a/mindspore/ops/_op_impl/tbe/scatter_nd_d.py +++ b/mindspore/ops/_op_impl/tbe/scatter_nd_d.py @@ -14,70 +14,28 @@ # ============================================================================ """ScatterNdD op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +scatter_nd_d_op_info = TBERegOp("ScatterNdD") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("scatter_nd_d.so") \ + .compute_cost(10) \ + .kernel_name("scatter_nd_d") \ + .partial_flag(True) \ + .attr("shape", "optional", "listInt", "all") \ + .input(0, "indices", False, "required", "all") \ + .input(1, "x", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.I32_Default, DataType.I8_Default, DataType.I8_Default) \ + .dtype_format(DataType.I32_Default, DataType.U8_Default, DataType.U8_Default) \ + .dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.I32_Default, DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.I32_Default, DataType.F32_Default, DataType.F32_Default) \ + .get_op_info() -@op_info_register("""{ - "op_name": "ScatterNdD", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "scatter_nd_d.so", - "compute_cost": 10, - "kernel_name": "scatter_nd_d", - "partial_flag": true, - "attr": [ - { - "name": "shape", - "param_type": "optional", - "type": "listInt", - "value": "all" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "int32", "int32", "int32", "int32", "int32" - ], - "format": [ - "DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat" - ], - "name": "indices", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float16","float","int32","int8","uint8" - ], - "format": [ - "DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16","float","int32","int8","uint8" - ], - "format": [ - "DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat","DefaultFormat" - ], - "name": "y", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(scatter_nd_d_op_info) def _scatter_nd_d_tbe(): """ScatterNdD TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/select.py b/mindspore/ops/_op_impl/tbe/select.py index c205e0de1d..4af4325312 100644 --- a/mindspore/ops/_op_impl/tbe/select.py +++ b/mindspore/ops/_op_impl/tbe/select.py @@ -14,94 +14,33 @@ # ============================================================================ """Select op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +select_op_info = TBERegOp("Select") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("select.so") \ + .compute_cost(10) \ + .kernel_name("select") \ + .partial_flag(True) \ + .input(0, "condition", False, "required", "all") \ + .input(1, "x1", False, "required", "all") \ + .input(2, "x2", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.BOOL_Default, DataType.I8_Default, DataType.I8_Default, DataType.I8_Default) \ + .dtype_format(DataType.BOOL_Default, DataType.U8_Default, DataType.U8_Default, DataType.U8_Default) \ + .dtype_format(DataType.BOOL_Default, DataType.I32_Default, DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.BOOL_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.BOOL_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.BOOL_5HD, DataType.I8_5HD, DataType.I8_5HD, DataType.I8_5HD) \ + .dtype_format(DataType.BOOL_5HD, DataType.U8_5HD, DataType.U8_5HD, DataType.U8_5HD) \ + .dtype_format(DataType.BOOL_5HD, DataType.I32_5HD, DataType.I32_5HD, DataType.I32_5HD) \ + .dtype_format(DataType.BOOL_5HD, DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.BOOL_5HD, DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD) \ + .get_op_info() -@op_info_register("""{ - "op_name": "Select", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "select.so", - "compute_cost": 10, - "kernel_name": "select", - "partial_flag": true, - "attr":[ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "bool", "bool", "bool", "bool", "bool", "bool", "bool", "bool", "bool", "bool", - "bool", "bool", "bool", "bool", "bool", "bool", "bool", "bool", "bool", "bool" - ], - "format": [ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", "DefaultFormat", - "NC1HWC0", "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", "DefaultFormat", - "DefaultFormat", "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", - "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat" - ], - "name": "condition", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float16", "float16", "float16", "float16", "float", "float", "float", "float", - "int32", "int32", "int32", "int32", "int8", "int8", "int8", "int8", "uint8", - "uint8", "uint8", "uint8" - ], - "format": [ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", "DefaultFormat", - "DefaultFormat", "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", - "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat" - ], - "name": "x1", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 2, - "dtype": [ - "float16", "float16", "float16", "float16", "float", "float", "float", "float", "int32", - "int32", "int32", "int32", "int8", "int8", "int8", "int8", "uint8", "uint8", "uint8", "uint8" - ], - "format": [ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", - "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "DefaultFormat" - ], - "name": "x2", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16", "float16", "float16", "float16", "float", "float", "float", "float", "int32", - "int32", "int32", "int32", "int8", "int8", "int8", "int8", "uint8", "uint8", "uint8", "uint8" - ], - "format": [ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", - "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "DefaultFormat" - ], - "name": "y", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(select_op_info) def _select_tbe(): """Select TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/sigmoid.py b/mindspore/ops/_op_impl/tbe/sigmoid.py index cba9561d27..38413c0432 100644 --- a/mindspore/ops/_op_impl/tbe/sigmoid.py +++ b/mindspore/ops/_op_impl/tbe/sigmoid.py @@ -14,67 +14,31 @@ # ============================================================================ """Sigmoid op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +sigmoid_op_info = TBERegOp("Sigmoid") \ + .fusion_type("ELEMWISE") \ + .async_flag(False) \ + .binfile_name("sigmoid.so") \ + .compute_cost(10) \ + .kernel_name("sigmoid") \ + .partial_flag(True) \ + .input(0, "x", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F16_FracZ, DataType.F16_FracZ) \ + .dtype_format(DataType.F16_FracNZ, DataType.F16_FracNZ) \ + .dtype_format(DataType.F16_C1HWNCoC0, DataType.F16_C1HWNCoC0) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD) \ + .dtype_format(DataType.F32_FracZ, DataType.F32_FracZ) \ + .dtype_format(DataType.F32_FracNZ, DataType.F32_FracNZ) \ + .dtype_format(DataType.F32_C1HWNCoC0, DataType.F32_C1HWNCoC0) \ + .get_op_info() -@op_info_register("""{ - "op_name": "Sigmoid", - "imply_type": "TBE", - "fusion_type": "ELEMWISE", - "async_flag": false, - "binfile_name": "Sigmoid.so", - "compute_cost": 10, - "kernel_name": "sigmoid", - "partial_flag": true, - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16","float", - "float16","float", - "float16","float", - "float16","float", - "float16","float" - ], - "format": [ - "FracZ","FracZ", - "FRACTAL_NZ","FRACTAL_NZ", - "C1HWNCoC0","C1HWNCoC0", - "NC1HWC0","NC1HWC0", - "DefaultFormat","DefaultFormat" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16","float", - "float16","float", - "float16","float", - "float16","float", - "float16","float" - ], - "format": [ - "FracZ","FracZ", - "FRACTAL_NZ","FRACTAL_NZ", - "C1HWNCoC0","C1HWNCoC0", - "NC1HWC0","NC1HWC0", - "DefaultFormat","DefaultFormat" - ], - "name": "y", - "need_compile": true, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(sigmoid_op_info) def _sigmoid_tbe(): """Sigmoid TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits.py b/mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits.py index b20438d5fe..61c81a8a99 100644 --- a/mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits.py +++ b/mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits.py @@ -14,64 +14,26 @@ # ============================================================================ """SigmoidCrossEntropyWithLogits op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +sigmoid_cross_entropy_with_logits_op_info = TBERegOp("SigmoidCrossEntropyWithLogits") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("sigmoid_cross_entropy_with_logits.so") \ + .compute_cost(10) \ + .kernel_name("sigmoid_cross_entropy_with_logits") \ + .partial_flag(True) \ + .input(0, "predict", False, "required", "all") \ + .input(1, "target", False, "required", "all") \ + .output(0, "loss", False, "required", "all") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD) \ + .get_op_info() -@op_info_register("""{ - "op_name": "SigmoidCrossEntropyWithLogits", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "sigmoid_cross_entropy_with_logits.so", - "compute_cost": 10, - "kernel_name": "sigmoid_cross_entropy_with_logits", - "partial_flag": true, - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16", "float16", "float", "float" - ], - "format": [ - "NC1HWC0", "DefaultFormat", "NC1HWC0", "DefaultFormat" - ], - "name": "predict", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float16", "float16", "float", "float" - ], - "format": [ - "NC1HWC0", "DefaultFormat", "NC1HWC0", "DefaultFormat" - ], - "name": "target", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16", "float16", "float", "float" - ], - "format": [ - "NC1HWC0", "DefaultFormat", "NC1HWC0", "DefaultFormat" - ], - "name": "loss", - "need_compile": true, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(sigmoid_cross_entropy_with_logits_op_info) def _sigmoid_cross_entropy_with_logits_tbe(): """SigmoidCrossEntropyWithLogits TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad.py b/mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad.py index 6e5df24cfe..cc2a29353d 100644 --- a/mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad.py +++ b/mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad.py @@ -14,77 +14,27 @@ # ============================================================================ """SigmoidCrossEntropyWithLogitsGrad op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +sigmoid_cross_entropy_with_logits_grad_op_info = TBERegOp("SigmoidCrossEntropyWithLogitsGrad") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("sigmoid_cross_entropy_with_logits_grad.so") \ + .compute_cost(10) \ + .kernel_name("sigmoid_cross_entropy_with_logits_grad") \ + .partial_flag(True) \ + .input(0, "predict", False, "required", "all") \ + .input(1, "target", False, "required", "all") \ + .input(2, "dout", False, "required", "all") \ + .output(0, "gradient", False, "required", "all") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD) \ + .get_op_info() -@op_info_register("""{ - "op_name": "SigmoidCrossEntropyWithLogitsGrad", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "sigmoid_cross_entropy_with_logits_grad.so", - "compute_cost": 10, - "kernel_name": "sigmoid_cross_entropy_with_logits_grad", - "partial_flag": true, - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16", "float16", "float", "float" - ], - "format": [ - "NC1HWC0", "DefaultFormat", "NC1HWC0", "DefaultFormat" - ], - "name": "predict", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float16", "float16", "float", "float" - ], - "format": [ - "NC1HWC0", "DefaultFormat", "NC1HWC0", "DefaultFormat" - ], - "name": "target", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 2, - "dtype": [ - "float16", "float16", "float", "float" - ], - "format": [ - "NC1HWC0", "DefaultFormat", "NC1HWC0", "DefaultFormat" - ], - "name": "dout", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16", "float16", "float", "float" - ], - "format": [ - "NC1HWC0", "DefaultFormat", "NC1HWC0", "DefaultFormat" - ], - "name": "gradient", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(sigmoid_cross_entropy_with_logits_grad_op_info) def _sigmoid_cross_entropy_with_logits_grad_tbe(): """SigmoidCrossEntropyWithLogitsGrad TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/sigmoid_grad.py b/mindspore/ops/_op_impl/tbe/sigmoid_grad.py index f0833e8a80..bc2878ce96 100644 --- a/mindspore/ops/_op_impl/tbe/sigmoid_grad.py +++ b/mindspore/ops/_op_impl/tbe/sigmoid_grad.py @@ -14,64 +14,26 @@ # ============================================================================ """SigmoidGrad op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +sigmoid_cross_entropy_with_logits_op_info = TBERegOp("SigmoidGrad") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("sigmoid_grad.so") \ + .compute_cost(10) \ + .kernel_name("sigmoid_grad") \ + .partial_flag(True) \ + .input(0, "x", False, "required", "all") \ + .input(1, "y", False, "required", "all") \ + .output(0, "z", False, "required", "all") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD) \ + .get_op_info() -@op_info_register("""{ - "op_name": "SigmoidGrad", - "imply_type": "TBE", - "fusion_type": "ELEMWISE", - "async_flag": false, - "binfile_name": "sigmoid_grad.so", - "compute_cost": 10, - "kernel_name": "sigmoid_grad", - "partial_flag": true, - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16","float","float16","float" - ], - "format": [ - "NC1HWC0","NC1HWC0","DefaultFormat","DefaultFormat" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float16","float","float16","float" - ], - "format": [ - "NC1HWC0","NC1HWC0","DefaultFormat","DefaultFormat" - ], - "name": "y", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16","float","float16","float" - ], - "format": [ - "NC1HWC0","NC1HWC0","DefaultFormat","DefaultFormat" - ], - "name": "z", - "need_compile": true, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(sigmoid_cross_entropy_with_logits_op_info) def _sigmoid_grad_tbe(): """SigmoidGrad TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/slice.py b/mindspore/ops/_op_impl/tbe/slice.py index 779f19cbc5..402692ca32 100644 --- a/mindspore/ops/_op_impl/tbe/slice.py +++ b/mindspore/ops/_op_impl/tbe/slice.py @@ -14,99 +14,33 @@ # ============================================================================ """Slice op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +slice_op_info = TBERegOp("Slice") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("slice_d.so") \ + .compute_cost(10) \ + .kernel_name("slice_d") \ + .partial_flag(True) \ + .attr("begin", "required", "listInt", "all") \ + .attr("size", "required", "listInt", "all") \ + .input(0, "x", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.I8_Default, DataType.I8_Default) \ + .dtype_format(DataType.U8_Default, DataType.U8_Default) \ + .dtype_format(DataType.I16_Default, DataType.I16_Default) \ + .dtype_format(DataType.U16_Default, DataType.U16_Default) \ + .dtype_format(DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.I64_Default, DataType.I64_Default) \ + .dtype_format(DataType.U32_Default, DataType.U32_Default) \ + .dtype_format(DataType.U64_Default, DataType.U64_Default) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .get_op_info() -@op_info_register("""{ - "op_name":"Slice", - "imply_type":"TBE", - "fusion_type":"OPAQUE", - "async_flag":false, - "binfile_name":"slice_d.so", - "compute_cost":10, - "kernel_name":"slice_d", - "partial_flag":true, - "attr":[ - { - "name":"begin", - "param_type":"required", - "type":"listInt", - "value":"all" - }, - { - "name":"size", - "param_type":"required", - "type":"listInt", - "value":"all" - } - ], - "inputs":[ - { - "index":0, - "dtype":[ - "float", - "float16", - "int8", - "int16", - "int32", - "int64", - "uint8", - "uint16", - "uint32", - "uint64" - ], - "format":[ - "DefaultFormat", - "DefaultFormat", - "DefaultFormat", - "DefaultFormat", - "DefaultFormat", - "DefaultFormat", - "DefaultFormat", - "DefaultFormat", - "DefaultFormat", - "DefaultFormat" - ], - "name":"x", - "need_compile":false, - "param_type":"required", - "shape":"all" - } - ], - "outputs":[ - { - "index":0, - "dtype":[ - "float", - "float16", - "int8", - "int16", - "int32", - "int64", - "uint8", - "uint16", - "uint32", - "uint64" - ], - "format":[ - "DefaultFormat", - "DefaultFormat", - "DefaultFormat", - "DefaultFormat", - "DefaultFormat", - "DefaultFormat", - "DefaultFormat", - "DefaultFormat", - "DefaultFormat", - "DefaultFormat" - ], - "name":"y", - "need_compile":false, - "param_type":"required", - "shape":"all" - } - ] -}""") + +@op_info_register(slice_op_info) def _slice_tbe(): """Slice TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/softmax.py b/mindspore/ops/_op_impl/tbe/softmax.py index 5a70d2605a..faefad87ec 100644 --- a/mindspore/ops/_op_impl/tbe/softmax.py +++ b/mindspore/ops/_op_impl/tbe/softmax.py @@ -14,57 +14,27 @@ # ============================================================================ """Softmax op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +softmax_op_info = TBERegOp("Softmax") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("softmax.so") \ + .compute_cost(10) \ + .kernel_name("softmax") \ + .partial_flag(True) \ + .attr("axis", "optional", "listInt", "all") \ + .input(0, "x", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F16_FracNZ, DataType.F16_FracNZ) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_FracNZ, DataType.F32_FracNZ) \ + .get_op_info() -@op_info_register("""{ - "op_name": "Softmax", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "softmax.so", - "compute_cost": 10, - "kernel_name": "softmax", - "partial_flag": true, - "attr": [ - { - "name": "axis", - "param_type": "optional", - "type": "listInt", - "value": "all" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16", "float16", "float16", "float", "float" - ], - "format": [ - "FRACTAL_NZ", "DefaultFormat", "NC1HWC0", "FRACTAL_NZ", "DefaultFormat" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16", "float16", "float16", "float", "float" - ], - "format": [ - "FRACTAL_NZ", "DefaultFormat", "NC1HWC0", "FRACTAL_NZ", "DefaultFormat" - ], - "name": "y", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(softmax_op_info) def _softmax_tbe(): """Softmax TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits.py b/mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits.py index cc9f06948a..386562f9f2 100644 --- a/mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits.py +++ b/mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits.py @@ -14,78 +14,25 @@ # ============================================================================ """SoftmaxCrossEntropyWithLogits op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +softmax_cross_entropy_with_logits_op_info = TBERegOp("SoftmaxCrossEntropyWithLogits") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("softmax_cross_entropy_with_logits.so") \ + .compute_cost(10) \ + .kernel_name("softmax_cross_entropy_with_logits") \ + .partial_flag(True) \ + .input(0, "input_features", False, "required", "all") \ + .input(1, "input_labels", False, "required", "all") \ + .output(0, "output_loss", True, "required", "all") \ + .output(1, "output_backprop", True, "required", "all") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ + .get_op_info() -@op_info_register("""{ - "op_name": "SoftmaxCrossEntropyWithLogits", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "softmax_cross_entropy_with_logits.so", - "compute_cost": 10, - "kernel_name": "softmax_cross_entropy_with_logits", - "partial_flag": true, - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16", "float" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "input_features", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float16", "float" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "input_labels", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16", "float" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "output_loss", - "need_compile": true, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float16", "float" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "output_backprop", - "need_compile": true, - "param_type": "required", - "shape": "all" - } - ] -}""") +@op_info_register(softmax_cross_entropy_with_logits_op_info) def _softmax_cross_entropy_with_logits_tbe(): """SoftmaxCrossEntropyWithLogits TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/split_d.py b/mindspore/ops/_op_impl/tbe/split_d.py index 41311ffb90..dcc8219fd4 100644 --- a/mindspore/ops/_op_impl/tbe/split_d.py +++ b/mindspore/ops/_op_impl/tbe/split_d.py @@ -14,71 +14,45 @@ # ============================================================================ """Add op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +split_d_op_info = TBERegOp("Split") \ + .fusion_type("ELEMWISE") \ + .async_flag(False) \ + .binfile_name("split_d.so") \ + .compute_cost(10) \ + .kernel_name("split_d") \ + .partial_flag(True) \ + .attr("axis", "required", "int", "all") \ + .attr("output_num", "required", "int", "all") \ + .input(0, "value", False, "required", "all") \ + .output(0, "output", False, "dynamic", "all") \ + .dtype_format(DataType.BOOL_Default, DataType.BOOL_Default) \ + .dtype_format(DataType.BOOL_NHWC, DataType.BOOL_NHWC) \ + .dtype_format(DataType.I8_Default, DataType.I8_Default) \ + .dtype_format(DataType.I8_NHWC, DataType.I8_NHWC) \ + .dtype_format(DataType.U8_Default, DataType.U8_Default) \ + .dtype_format(DataType.U8_NHWC, DataType.U8_NHWC) \ + .dtype_format(DataType.I16_Default, DataType.I16_Default) \ + .dtype_format(DataType.I16_NHWC, DataType.I16_NHWC) \ + .dtype_format(DataType.U16_Default, DataType.U16_Default) \ + .dtype_format(DataType.U16_NHWC, DataType.U16_NHWC) \ + .dtype_format(DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.I32_NHWC, DataType.I32_NHWC) \ + .dtype_format(DataType.U32_Default, DataType.U32_Default) \ + .dtype_format(DataType.U32_NHWC, DataType.U32_NHWC) \ + .dtype_format(DataType.I64_Default, DataType.I64_Default) \ + .dtype_format(DataType.I64_NHWC, DataType.I64_NHWC) \ + .dtype_format(DataType.U64_Default, DataType.U64_Default) \ + .dtype_format(DataType.U64_NHWC, DataType.U64_NHWC) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_NHWC, DataType.F16_NHWC) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_NHWC, DataType.F32_NHWC) \ + .get_op_info() -@op_info_register("""{ - "op_name": "Split", - "imply_type": "TBE", - "fusion_type": "ELEMWISE", - "async_flag": false, - "binfile_name": "split_d.so", - "compute_cost": 10, - "kernel_name": "split_d", - "partial_flag": true, - "attr": [ - { - "name": "axis", - "param_type": "required", - "type": "int", - "value": "all" - }, - { - "name": "output_num", - "param_type": "required", - "type": "int", - "value": "all" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16", "float16","float32", "float32", "int32", "int32", "int8", "int8", - "int16", "int16", "int64", "int64", "uint8", "uint8", "uint16", "uint16", - "uint32", "uint32", "uint64", "uint64", "bool", "bool" - ], - "format": [ - "DefaultFormat", "NHWC", "DefaultFormat", "NHWC", "DefaultFormat", "NHWC", "DefaultFormat", "NHWC" - , "DefaultFormat", "NHWC", "DefaultFormat", "NHWC", "DefaultFormat", "NHWC", "DefaultFormat", "NHWC" - , "DefaultFormat", "NHWC", "DefaultFormat", "NHWC", "DefaultFormat", "NHWC" - ], - "name": "value", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16", "float16","float32", "float32", "int32", "int32", "int8", "int8", - "int16", "int16", "int64", "int64", "uint8", "uint8", "uint16", "uint16", - "uint32", "uint32", "uint64", "uint64", "bool", "bool" - ], - "format": [ - "DefaultFormat", "NHWC", "DefaultFormat", "NHWC", "DefaultFormat", "NHWC", "DefaultFormat", "NHWC" - , "DefaultFormat", "NHWC", "DefaultFormat", "NHWC", "DefaultFormat", "NHWC", "DefaultFormat", "NHWC" - , "DefaultFormat", "NHWC", "DefaultFormat", "NHWC", "DefaultFormat", "NHWC" - ], - "name": "output", - "need_compile": false, - "param_type": "dynamic", - "shape": "all" - } - ] -}""") + +@op_info_register(split_d_op_info) def _split_d_tbe(): """Add TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/sqrt.py b/mindspore/ops/_op_impl/tbe/sqrt.py index c73092886a..f9e339713b 100644 --- a/mindspore/ops/_op_impl/tbe/sqrt.py +++ b/mindspore/ops/_op_impl/tbe/sqrt.py @@ -14,52 +14,27 @@ # ============================================================================ """Sqrt op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +sqrt_op_info = TBERegOp("Sqrt") \ + .fusion_type("ELEMWISE") \ + .async_flag(False) \ + .binfile_name("sqrt.so") \ + .compute_cost(10) \ + .kernel_name("sqrt") \ + .partial_flag(True) \ + .input(0, "x", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F16_NHWC, DataType.F16_NHWC) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD) \ + .dtype_format(DataType.F32_NHWC, DataType.F32_NHWC) \ + .get_op_info() -@op_info_register("""{ - "op_name": "Sqrt", - "imply_type": "TBE", - "fusion_type": "ELEMWISE", - "async_flag": false, - "binfile_name": "sqrt.so", - "compute_cost": 10, - "kernel_name": "sqrt", - "partial_flag": true, - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16", "float16", "float16", "float", "float", "float" - ], - "format": [ - "DefaultFormat", "NC1HWC0", "NHWC", "DefaultFormat", "NC1HWC0", "NHWC" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16", "float16", "float16", "float", "float", "float" - ], - "format": [ - "DefaultFormat", "NC1HWC0", "NHWC", "DefaultFormat", "NC1HWC0", "NHWC" - ], - "name": "y", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") +@op_info_register(sqrt_op_info) def _sqrt_tbe(): """Sqrt TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/square.py b/mindspore/ops/_op_impl/tbe/square.py index 03a81236cc..c3eeb12780 100644 --- a/mindspore/ops/_op_impl/tbe/square.py +++ b/mindspore/ops/_op_impl/tbe/square.py @@ -14,56 +14,30 @@ # ============================================================================ """Square op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +square_op_info = TBERegOp("Square") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("square.so") \ + .compute_cost(10) \ + .kernel_name("square") \ + .partial_flag(True) \ + .input(0, "x", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.I32_5HD, DataType.I32_5HD) \ + .dtype_format(DataType.I32_NHWC, DataType.I32_NHWC) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F16_NHWC, DataType.F16_NHWC) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD) \ + .dtype_format(DataType.F32_NHWC, DataType.F32_NHWC) \ + .get_op_info() -@op_info_register("""{ - "op_name": "Square", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "square.so", - "compute_cost": 10, - "kernel_name": "sqrt", - "partial_flag": true, - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16", "float16", "float16", "float", "float", "float", - "int32", "int32", "int32" - ], - "format": [ - "DefaultFormat", "NC1HWC0", "NHWC", "DefaultFormat", "NC1HWC0", "NHWC", - "DefaultFormat", "NC1HWC0", "NHWC" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16", "float16", "float16", "float", "float", "float", - "int32", "int32", "int32" - ], - "format": [ - "DefaultFormat", "NC1HWC0", "NHWC", "DefaultFormat", "NC1HWC0", "NHWC", - "DefaultFormat", "NC1HWC0", "NHWC" - ], - "name": "y", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") +@op_info_register(square_op_info) def _square_tbe(): """Square TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/square_sum_v1.py b/mindspore/ops/_op_impl/tbe/square_sum_v1.py index 39b5400298..9d43fe4cc0 100644 --- a/mindspore/ops/_op_impl/tbe/square_sum_v1.py +++ b/mindspore/ops/_op_impl/tbe/square_sum_v1.py @@ -14,63 +14,25 @@ # ============================================================================ """SquareSumV1 op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +square_sum_v1_op_info = TBERegOp("SquareSumV1") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("square_sum_v1.so") \ + .compute_cost(10) \ + .kernel_name("square_sum_v1") \ + .partial_flag(True) \ + .attr("axis", "optional", "listInt", "all") \ + .attr("keep_dims", "optional", "bool", "all") \ + .input(0, "input_x", False, "required", "all") \ + .output(0, "output1", False, "required", "all") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .get_op_info() -@op_info_register("""{ - "op_name": "SquareSumV1", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "square_sum_v1.so", - "compute_cost": 10, - "kernel_name": "square_sum_v1", - "partial_flag": true, - "attr":[ - { - "name":"axis", - "param_type":"optional", - "type":"listInt", - "value":"all" - }, - { - "name":"keep_dims", - "param_type":"optional", - "type":"bool", - "value":"all" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16", "float32" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "input_x", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16", "float32" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "output1", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(square_sum_v1_op_info) def _square_sum_v1_tbe(): """SquareSumV1 TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/square_sum_v2.py b/mindspore/ops/_op_impl/tbe/square_sum_v2.py index 2f5ca49cc2..88bb1283d1 100644 --- a/mindspore/ops/_op_impl/tbe/square_sum_v2.py +++ b/mindspore/ops/_op_impl/tbe/square_sum_v2.py @@ -14,76 +14,26 @@ # ============================================================================ """SquareSumV2 op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +square_sum_v2_op_info = TBERegOp("SquareSumV2") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("square_sum_v2.so") \ + .compute_cost(10) \ + .kernel_name("square_sum_v2") \ + .partial_flag(True) \ + .attr("axis", "optional", "listInt", "all") \ + .attr("keep_dims", "optional", "bool", "all") \ + .input(0, "input_x", False, "required", "all") \ + .output(0, "output1", False, "required", "all") \ + .output(1, "output2", False, "required", "all") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ + .get_op_info() -@op_info_register("""{ - "op_name": "SquareSumV2", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "square_sum_v2.so", - "compute_cost": 10, - "kernel_name": "square_sum_v2", - "partial_flag": true, - "attr":[ - { - "name":"axis", - "param_type":"optional", - "type":"listInt", - "value":"all" - }, - { - "name":"keep_dims", - "param_type":"optional", - "type":"bool", - "value":"all" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16", "float32" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "input_x", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16", "float32" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "output1", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float16", "float32" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "output2", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(square_sum_v2_op_info) def _square_sum_v2_tbe(): """SquareSumV2 TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/squeeze.py b/mindspore/ops/_op_impl/tbe/squeeze.py index 765ffae2c1..9d585ccabe 100644 --- a/mindspore/ops/_op_impl/tbe/squeeze.py +++ b/mindspore/ops/_op_impl/tbe/squeeze.py @@ -14,57 +14,24 @@ # ============================================================================ """Squeeze op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +squeeze_op_info = TBERegOp("Squeeze") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("squeeze.so") \ + .compute_cost(10) \ + .kernel_name("squeeze") \ + .partial_flag(True) \ + .attr("axis", "required", "listInt", "all") \ + .input(0, "x", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .get_op_info() -@op_info_register("""{ - "op_name": "Squeeze", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "squeeze.so", - "compute_cost": 10, - "kernel_name": "squeeze", - "partial_flag": true, - "attr": [ - { - "name": "axis", - "param_type": "required", - "type": "listInt", - "value": "all" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16", "float32" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16", "float32" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "y", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(squeeze_op_info) def _squeeze_tbe(): """Squeeze TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/strideslice_d.py b/mindspore/ops/_op_impl/tbe/strideslice_d.py index 8f398b4215..e008e6f3d7 100644 --- a/mindspore/ops/_op_impl/tbe/strideslice_d.py +++ b/mindspore/ops/_op_impl/tbe/strideslice_d.py @@ -14,99 +14,35 @@ # ============================================================================ """StridedSlice op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +strided_slice_d_op_info = TBERegOp("StridedSlice") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("strided_slice_d.so") \ + .compute_cost(10) \ + .kernel_name("strided_slice_d") \ + .partial_flag(True) \ + .attr("begin", "optional", "listInt", "all") \ + .attr("end", "optional", "listInt", "all") \ + .attr("strides", "optional", "listInt", "all") \ + .attr("begin_mask", "required", "int", "all") \ + .attr("end_mask", "required", "int", "all") \ + .attr("ellipsis_mask", "required", "int", "all") \ + .attr("new_axis_mask", "required", "int", "all") \ + .attr("shrink_axis_mask", "required", "int", "all") \ + .input(0, "x", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.BOOL_Default, DataType.BOOL_Default) \ + .dtype_format(DataType.I8_Default, DataType.I8_Default) \ + .dtype_format(DataType.U8_Default, DataType.U8_Default) \ + .dtype_format(DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .get_op_info() -@op_info_register("""{ - "op_name": "StridedSlice", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "strided_slice_d.so", - "compute_cost": 10, - "kernel_name": "strided_slice_d", - "partial_flag": true, - "attr": [ - { - "name": "begin", - "param_type": "optional", - "type": "listInt", - "value": "all" - }, - { - "name": "end", - "param_type": "optional", - "type": "listInt", - "value": "all" - }, - { - "name": "strides", - "param_type": "optional", - "type": "listInt", - "value": "all" - }, - { - "name": "begin_mask", - "param_type": "required", - "type": "int", - "value": "all" - }, - { - "name": "end_mask", - "param_type": "required", - "type": "int", - "value": "all" - }, - { - "name": "ellipsis_mask", - "param_type": "required", - "type": "int", - "value": "all" - }, - { - "name": "new_axis_mask", - "param_type": "required", - "type": "int", - "value": "all" - }, - { - "name": "shrink_axis_mask", - "param_type": "required", - "type": "int", - "value": "all" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16", "float", "int32", "uint8", "bool", "int8" - ], - "format": [ - "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16", "float", "int32", "uint8", "bool", "int8" - ], - "format": [ - "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat" - ], - "name": "y", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(strided_slice_d_op_info) def _strided_slice_d_tbe(): """StridedSlice TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/strideslicegrad_d.py b/mindspore/ops/_op_impl/tbe/strideslicegrad_d.py index adeeab9a81..e7e1204385 100644 --- a/mindspore/ops/_op_impl/tbe/strideslicegrad_d.py +++ b/mindspore/ops/_op_impl/tbe/strideslicegrad_d.py @@ -14,107 +14,40 @@ # ============================================================================ """StridedSliceGrad op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +strided_slice_grad_d_op_info = TBERegOp("StridedSliceGrad") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("strided_slice_grad_d.so") \ + .compute_cost(10) \ + .kernel_name("strided_slice_grad_d") \ + .partial_flag(True) \ + .attr("shapex", "optional", "listInt", "all") \ + .attr("begin", "optional", "listInt", "all") \ + .attr("end", "optional", "listInt", "all") \ + .attr("strides", "optional", "listInt", "all") \ + .attr("begin_mask", "optional", "int", "all") \ + .attr("end_mask", "optional", "int", "all") \ + .attr("ellipsis_mask", "optional", "int", "all") \ + .attr("new_axis_mask", "optional", "int", "all") \ + .attr("shrink_axis_mask", "optional", "int", "all") \ + .input(0, "dy", False, "required", "all") \ + .output(0, "output", False, "required", "all") \ + .dtype_format(DataType.I8_Default, DataType.I8_Default) \ + .dtype_format(DataType.I8_5HD, DataType.I8_5HD) \ + .dtype_format(DataType.U8_Default, DataType.U8_Default) \ + .dtype_format(DataType.U8_5HD, DataType.U8_5HD) \ + .dtype_format(DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.I32_5HD, DataType.I32_5HD) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD) \ + .get_op_info() -@op_info_register("""{ - "op_name": "StridedSliceGrad", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "strided_slice_grad_d.so", - "compute_cost": 10, - "kernel_name": "strided_slice_grad_d", - "partial_flag": true, - "attr": [ - { - "name": "shapex", - "param_type": "optional", - "type": "listInt", - "value": "all" - }, - { - "name": "begin", - "param_type": "optional", - "type": "listInt", - "value": "all" - }, - { - "name": "end", - "param_type": "optional", - "type": "listInt", - "value": "all" - }, - { - "name": "strides", - "param_type": "optional", - "type": "listInt", - "value": "all" - }, - { - "name": "begin_mask", - "param_type": "optional", - "type": "int", - "value": "all" - }, - { - "name": "end_mask", - "param_type": "optional", - "type": "int", - "value": "all" - }, - { - "name": "ellipsis_mask", - "param_type": "optional", - "type": "int", - "value": "all" - }, - { - "name": "new_axis_mask", - "param_type": "optional", - "type": "int", - "value": "all" - }, - { - "name": "shrink_axis_mask", - "param_type": "optional", - "type": "int", - "value": "all" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16","float16","float","float","int32","int32","uint8","uint8","int8","int8" - ], - "format": [ - "DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0", - "DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0" - ], - "name": "dy", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16","float16","float","float","int32","int32","uint8","uint8","int8","int8" - ], - "format": [ - "DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0", - "DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0" - ], - "name": "output", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(strided_slice_grad_d_op_info) def _strided_slice_grad_d_tbe(): """StridedSliceGrad TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/sub.py b/mindspore/ops/_op_impl/tbe/sub.py index 8d6ea4aa0e..8e97681c6b 100644 --- a/mindspore/ops/_op_impl/tbe/sub.py +++ b/mindspore/ops/_op_impl/tbe/sub.py @@ -14,65 +14,28 @@ # ============================================================================ """Sub op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +sub_op_info = TBERegOp("Sub") \ + .fusion_type("ELEMWISE") \ + .async_flag(False) \ + .binfile_name("sub.so") \ + .compute_cost(10) \ + .kernel_name("sub") \ + .partial_flag(True) \ + .input(0, "x1", False, "required", "all") \ + .input(1, "x2", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.I32_5HD, DataType.I32_5HD, DataType.I32_5HD) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD) \ + .get_op_info() -@op_info_register("""{ - "op_name": "Sub", - "imply_type": "TBE", - "fusion_type": "ELEMWISE", - "async_flag": false, - "binfile_name": "sub.so", - "compute_cost": 10, - "kernel_name": "sub", - "partial_flag": true, - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16", "float16", "float", "float", "int32", "int32" - ], - "format": [ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "NC1HWC0", "DefaultFormat", "NC1HWC0" - ], - "name": "x1", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float16", "float16", "float", "float", "int32", "int32" - ], - "format": [ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "NC1HWC0", "DefaultFormat", "NC1HWC0" - ], - "name": "x2", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16", "float16", "float", "float", "int32", "int32" - ], - "format": [ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "NC1HWC0", "DefaultFormat", "NC1HWC0" - ], - "name": "y", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") +@op_info_register(sub_op_info) def _sub_tbe(): """Add TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/tanh.py b/mindspore/ops/_op_impl/tbe/tanh.py index dd2737f2ce..3d0b2704a3 100644 --- a/mindspore/ops/_op_impl/tbe/tanh.py +++ b/mindspore/ops/_op_impl/tbe/tanh.py @@ -14,52 +14,25 @@ # ============================================================================ """Tanh op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +tanh_op_info = TBERegOp("Tanh") \ + .fusion_type("ELEMWISE") \ + .async_flag(False) \ + .binfile_name("tanh.so") \ + .compute_cost(10) \ + .kernel_name("tanh") \ + .partial_flag(True) \ + .input(0, "x", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD) \ + .get_op_info() -@op_info_register("""{ - "op_name": "Tanh", - "imply_type": "TBE", - "fusion_type": "ELEMWISE", - "async_flag": false, - "binfile_name": "tanh.so", - "compute_cost": 10, - "kernel_name": "tanh", - "partial_flag": true, - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16", "float16", "float", "float" - ], - "format": [ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "NC1HWC0" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16", "float16", "float", "float" - ], - "format": [ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "NC1HWC0" - ], - "name": "y", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") +@op_info_register(tanh_op_info) def _tanh_tbe(): """Tanh TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/tanh_grad.py b/mindspore/ops/_op_impl/tbe/tanh_grad.py index c50b5a3a5a..5796ed7aff 100644 --- a/mindspore/ops/_op_impl/tbe/tanh_grad.py +++ b/mindspore/ops/_op_impl/tbe/tanh_grad.py @@ -14,65 +14,26 @@ # ============================================================================ """TanhGrad op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +tanh_grad_op_info = TBERegOp("TanhGrad") \ + .fusion_type("ELEMWISE") \ + .async_flag(False) \ + .binfile_name("tanh_grad.so") \ + .compute_cost(10) \ + .kernel_name("tanh_grad") \ + .partial_flag(True) \ + .input(0, "y", False, "required", "all") \ + .input(1, "dy", False, "required", "all") \ + .output(0, "z", True, "required", "all") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD) \ + .get_op_info() -@op_info_register("""{ - "op_name": "TanhGrad", - "imply_type": "TBE", - "fusion_type": "ELEMWISE", - "async_flag": false, - "binfile_name": "tanh_grad.so", - "compute_cost": 10, - "kernel_name": "tanh_grad", - "partial_flag": true, - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16", "float16", "float", "float" - ], - "format": [ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "NC1HWC0" - ], - "name": "y", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float16", "float16", "float", "float" - ], - "format": [ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "NC1HWC0" - ], - "name": "dy", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16", "float16", "float", "float" - ], - "format": [ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "NC1HWC0" - ], - "name": "z", - "need_compile": true, - "param_type": "required", - "shape": "all" - } - ] -}""") +@op_info_register(tanh_grad_op_info) def _tanh_grad_tbe(): """TanhGrad TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/tensor_add.py b/mindspore/ops/_op_impl/tbe/tensor_add.py index 26a25c34b2..255c1b1278 100644 --- a/mindspore/ops/_op_impl/tbe/tensor_add.py +++ b/mindspore/ops/_op_impl/tbe/tensor_add.py @@ -14,70 +14,28 @@ # ============================================================================ """TensorAdd op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +tensor_add_op_info = TBERegOp("TensorAdd") \ + .fusion_type("ELEMWISE") \ + .async_flag(False) \ + .binfile_name("add.so") \ + .compute_cost(10) \ + .kernel_name("add") \ + .partial_flag(True) \ + .input(0, "x1", False, "required", "all") \ + .input(1, "x2", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.I32_5HD, DataType.I32_5HD, DataType.I32_5HD) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD) \ + .get_op_info() -@op_info_register("""{ - "op_name": "TensorAdd", - "imply_type": "TBE", - "fusion_type": "ELEMWISE", - "async_flag": false, - "binfile_name": "add.so", - "compute_cost": 10, - "kernel_name": "add", - "partial_flag": true, - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16", "float16", "float16", "float16", "float", "float", "float", "float", "int32", "int32", - "int32", "int32" - ], - "format": [ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat" - ], - "name": "x1", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float16", "float16", "float16", "float16", "float", "float", "float", "float", "int32", "int32", - "int32", "int32" - ], - "format": [ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat" - ], - "name": "x2", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16", "float16", "float16", "float16", "float", "float", "float", "float", "int32", - "int32", "int32", "int32" - ], - "format": [ - "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", - "DefaultFormat", "DefaultFormat", "DefaultFormat", "NC1HWC0", "DefaultFormat", "DefaultFormat" - ], - "name": "y", - "need_compile": true, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(tensor_add_op_info) def _tensor_add_tbe(): """Add TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/tile.py b/mindspore/ops/_op_impl/tbe/tile.py index 8299c500db..093e03f6ac 100644 --- a/mindspore/ops/_op_impl/tbe/tile.py +++ b/mindspore/ops/_op_impl/tbe/tile.py @@ -14,57 +14,25 @@ # ============================================================================ """Tile op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +tile_op_info = TBERegOp("Tile") \ + .fusion_type("ELEMWISE") \ + .async_flag(False) \ + .binfile_name("tile_d.so") \ + .compute_cost(10) \ + .kernel_name("tile_d") \ + .partial_flag(True) \ + .attr("multiples", "optional", "listInt", "all")\ + .input(0, "x1", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .get_op_info() -@op_info_register("""{ - "op_name": "Tile", - "imply_type": "TBE", - "fusion_type": "ELEMWISE", - "async_flag": false, - "binfile_name": "tile_d.so", - "compute_cost": 10, - "kernel_name": "tile_d", - "partial_flag": true, - "attr": [ - { - "name": "multiples", - "param_type": "optional", - "type": "listInt", - "value": "all" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16", "float32", "int32", "float16", "int32" - ], - "format": [ - "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat" - ], - "name": "x1", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16", "float32", "int32", "float16", "int32" - ], - "format": [ - "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat" - ], - "name": "y", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(tile_op_info) def _tile_tbe(): """Tile TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/topkv2.py b/mindspore/ops/_op_impl/tbe/topkv2.py index 916b246a38..a03871f8b7 100644 --- a/mindspore/ops/_op_impl/tbe/topkv2.py +++ b/mindspore/ops/_op_impl/tbe/topkv2.py @@ -14,89 +14,26 @@ # ============================================================================ """TopKV2 op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +top_k_v2_op_info = TBERegOp("TopKV2") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("top_k_v2.so") \ + .compute_cost(10) \ + .kernel_name("top_k_v2") \ + .partial_flag(True) \ + .attr("k", "required", "int", "all")\ + .attr("sorted", "required", "bool", "all")\ + .input(0, "x", False, "required", "all") \ + .input(1, "input_indices", False, "optional", "all") \ + .output(0, "values", False, "required", "all") \ + .output(1, "indices", False, "required", "all") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, DataType.I32_Default) \ + .get_op_info() -@op_info_register("""{ - "op_name": "TopKV2", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "top_k_v2.so", - "compute_cost": 10, - "kernel_name": "top_k_v2", - "partial_flag": true, - "attr": [ - { - "name": "k", - "param_type": "required", - "type": "int", - "value": "all" - }, - { - "name": "sorted", - "param_type": "required", - "type": "bool", - "value": "all" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16" - ], - "format": [ - "DefaultFormat" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "float16" - ], - "format": [ - "DefaultFormat" - ], - "name": "input_indices", - "need_compile": false, - "param_type": "optional", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16" - ], - "format": [ - "DefaultFormat" - ], - "name": "values", - "need_compile": false, - "param_type": "required", - "shape": "all" - }, - { - "index": 1, - "dtype": [ - "int32" - ], - "format": [ - "DefaultFormat" - ], - "name": "indices", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(top_k_v2_op_info) def _topk_v2_tbe(): """TopKV2 TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/trans_data.py b/mindspore/ops/_op_impl/tbe/trans_data.py index c6628c7638..f961491b37 100644 --- a/mindspore/ops/_op_impl/tbe/trans_data.py +++ b/mindspore/ops/_op_impl/tbe/trans_data.py @@ -14,75 +14,54 @@ # ============================================================================ """TransData op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +trans_data_op_info = TBERegOp("TransData") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("trans_data.so") \ + .compute_cost(10) \ + .kernel_name("trans_data") \ + .partial_flag(True) \ + .attr("src_format", "required", "str", "DefaultFormat,NC1HWC0,FracZ,FRACTAL_NZ,HWCN,C1HWNCoC0")\ + .attr("dst_format", "required", "str", "DefaultFormat,NC1HWC0,FracZ,FRACTAL_NZ,HWCN,C1HWNCoC0")\ + .input(0, "src", False, "required", "all") \ + .output(0, "dst", False, "required", "all") \ + .dtype_format(DataType.U16_Default, DataType.U16_5HD) \ + .dtype_format(DataType.U16_Default, DataType.U16_FracZ) \ + .dtype_format(DataType.U16_Default, DataType.U16_FracNZ) \ + .dtype_format(DataType.U16_FracZ, DataType.U16_Default) \ + .dtype_format(DataType.U16_FracZ, DataType.U16_HWCN) \ + .dtype_format(DataType.U16_FracNZ, DataType.U16_Default) \ + .dtype_format(DataType.U16_5HD, DataType.U16_Default) \ + .dtype_format(DataType.U16_HWCN, DataType.U16_FracZ) \ + .dtype_format(DataType.U16_HWCN, DataType.U16_C1HWNCoC0) \ + .dtype_format(DataType.U16_C1HWNCoC0, DataType.U16_HWCN) \ + .dtype_format(DataType.BOOL_Default, DataType.BOOL_5HD) \ + .dtype_format(DataType.F16_Default, DataType.F16_5HD) \ + .dtype_format(DataType.F16_Default, DataType.F16_FracZ) \ + .dtype_format(DataType.F16_Default, DataType.F16_FracNZ) \ + .dtype_format(DataType.F16_FracZ, DataType.F16_Default) \ + .dtype_format(DataType.F16_FracZ, DataType.F16_HWCN) \ + .dtype_format(DataType.F16_FracNZ, DataType.F16_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F16_Default) \ + .dtype_format(DataType.F16_HWCN, DataType.F16_FracZ) \ + .dtype_format(DataType.F16_HWCN, DataType.F16_C1HWNCoC0) \ + .dtype_format(DataType.F16_C1HWNCoC0, DataType.F16_HWCN) \ + .dtype_format(DataType.F32_Default, DataType.F32_5HD) \ + .dtype_format(DataType.F32_Default, DataType.F32_FracZ) \ + .dtype_format(DataType.F32_Default, DataType.F32_FracNZ) \ + .dtype_format(DataType.F32_FracZ, DataType.F32_Default) \ + .dtype_format(DataType.F32_FracZ, DataType.F32_HWCN) \ + .dtype_format(DataType.F32_FracNZ, DataType.F32_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_Default) \ + .dtype_format(DataType.F32_HWCN, DataType.F32_FracZ) \ + .dtype_format(DataType.F32_HWCN, DataType.F32_C1HWNCoC0) \ + .dtype_format(DataType.F32_C1HWNCoC0, DataType.F32_HWCN) \ + .get_op_info() -@op_info_register("""{ - "op_name": "TransData", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "trans_data.so", - "compute_cost": 10, - "kernel_name": "trans_data", - "partial_flag": true, - "attr": [ - { - "name": "src_format", - "param_type": "required", - "type": "str", - "value": "DefaultFormat,NC1HWC0,FracZ,FRACTAL_NZ,HWCN,C1HWNCoC0" - }, - { - "name": "dst_format", - "param_type": "required", - "type": "str", - "value": "DefaultFormat,NC1HWC0,FracZ,FRACTAL_NZ,HWCN,C1HWNCoC0" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "bool", - "float","float","float","float","float","float","float","float","float","float", - "float16","float16","float16","float16","float16","float16","float16","float16","float16","float16", - "uint16","uint16","uint16","uint16","uint16","uint16","uint16","uint16","uint16","uint16" - ], - "format": [ - "DefaultFormat", - "DefaultFormat","DefaultFormat","DefaultFormat","FracZ","FRACTAL_NZ","NC1HWC0","HWCN","HWCN","C1HWNCoC0","FracZ", - "DefaultFormat","DefaultFormat","DefaultFormat","FracZ","FRACTAL_NZ","NC1HWC0","HWCN","HWCN","C1HWNCoC0","FracZ", - "DefaultFormat","DefaultFormat","DefaultFormat","FracZ","FRACTAL_NZ","NC1HWC0","HWCN","HWCN","C1HWNCoC0","FracZ" - ], - "name": "src", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "bool", - "float","float","float","float","float","float","float","float","float","float", - "float16","float16","float16","float16","float16","float16","float16","float16","float16","float16", - "uint16","uint16","uint16","uint16","uint16","uint16","uint16","uint16","uint16","uint16" - ], - "format": [ - "NC1HWC0", - "NC1HWC0","FRACTAL_NZ","FracZ","DefaultFormat","DefaultFormat","DefaultFormat","FracZ","C1HWNCoC0","HWCN","HWCN", - "NC1HWC0","FRACTAL_NZ","FracZ","DefaultFormat","DefaultFormat","DefaultFormat","FracZ","C1HWNCoC0","HWCN","HWCN", - "NC1HWC0","FRACTAL_NZ","FracZ","DefaultFormat","DefaultFormat","DefaultFormat","FracZ","C1HWNCoC0","HWCN","HWCN" - ], - "name": "dst", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(trans_data_op_info) def _trans_data_tbe(): """TransData TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/transpose_d.py b/mindspore/ops/_op_impl/tbe/transpose_d.py index e79a16adeb..fffc95a38f 100644 --- a/mindspore/ops/_op_impl/tbe/transpose_d.py +++ b/mindspore/ops/_op_impl/tbe/transpose_d.py @@ -14,59 +14,32 @@ # ============================================================================ """TransposeD op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +transpose_d_op_info = TBERegOp("Transpose") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("transpose_d.so") \ + .compute_cost(10) \ + .kernel_name("transpose_d") \ + .partial_flag(True) \ + .attr("perm", "optional", "listInt", "all") \ + .input(0, "x", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.I8_Default, DataType.I8_Default) \ + .dtype_format(DataType.U8_Default, DataType.U8_Default) \ + .dtype_format(DataType.I16_Default, DataType.I16_Default) \ + .dtype_format(DataType.U16_Default, DataType.U16_Default) \ + .dtype_format(DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.U32_Default, DataType.U32_Default) \ + .dtype_format(DataType.I64_Default, DataType.I64_Default) \ + .dtype_format(DataType.U64_Default, DataType.U64_Default) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .get_op_info() -@op_info_register("""{ - "op_name": "Transpose", - "imply_type": "TBE", - "fusion_type": "OPAQUE", - "async_flag": false, - "binfile_name": "transpose_d.so", - "compute_cost": 10, - "kernel_name": "transpose_d", - "partial_flag": true, - "attr": [ - { - "name": "perm", - "param_type": "optional", - "type": "listInt", - "value": "all" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16", "float", "int8", "int16", "int32", "int64", "uint8", "uint16", "uint32", "uint64" - ], - "format": [ - "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", - "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16", "float", "int8", "int16", "int32", "int64", "uint8", "uint16", "uint32", "uint64" - ], - "format": [ - "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat", - "DefaultFormat", "DefaultFormat", "DefaultFormat", "DefaultFormat" - ], - "name": "y", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ] -}""") + +@op_info_register(transpose_d_op_info) def _transpose_d_tbe(): """TransposeD TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/unsorted_segment_sum.py b/mindspore/ops/_op_impl/tbe/unsorted_segment_sum.py index 2bc36b9e3d..5dc07dd59f 100644 --- a/mindspore/ops/_op_impl/tbe/unsorted_segment_sum.py +++ b/mindspore/ops/_op_impl/tbe/unsorted_segment_sum.py @@ -14,184 +14,33 @@ # ============================================================================ """UnsortedSegmentSum op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +unsorted_segment_sum_op_info = TBERegOp("UnsortedSegmentSum") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("unsorted_segment_sum_d.so") \ + .compute_cost(10) \ + .kernel_name("unsorted_segment_sum_d") \ + .partial_flag(True) \ + .attr("num_segments", "required", "int", "all") \ + .input(0, "x", False, "required", "all") \ + .input(1, "segment_ids", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.I8_Default, DataType.I32_Default, DataType.I8_Default) \ + .dtype_format(DataType.I8_5HD, DataType.I32_5HD, DataType.I8_5HD) \ + .dtype_format(DataType.U8_Default, DataType.I32_Default, DataType.U8_Default) \ + .dtype_format(DataType.U8_5HD, DataType.I32_5HD, DataType.U8_5HD) \ + .dtype_format(DataType.I32_Default, DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.I32_5HD, DataType.I32_5HD, DataType.I32_5HD) \ + .dtype_format(DataType.F16_Default, DataType.I32_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_5HD, DataType.I32_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F32_Default, DataType.I32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_5HD, DataType.I32_5HD, DataType.F32_5HD) \ + .get_op_info() -@op_info_register("""{ - "op_name":"UnsortedSegmentSum", - "imply_type":"TBE", - "fusion_type":"OPAQUE", - "async_flag":false, - "binfile_name":"unsorted_segment_sum_d.so", - "compute_cost":10, - "kernel_name":"unsorted_segment_sum_d", - "partial_flag":true, - "attr":[ - { - "name":"num_segments", - "param_type":"required", - "type":"int", - "value":"all" - } - ], - "inputs":[ - { - "index":0, - "dtype":[ - "float16", - "float16", - "float16", - "float16", - "float", - "float", - "float", - "float", - "int8", - "int8", - "int8", - "int8", - "uint8", - "uint8", - "uint8", - "uint8", - "int32", - "int32", - "int32", - "int32" - ], - "format":[ - "DefaultFormat", - "NC1HWC0", - "DefaultFormat", - "DefaultFormat", - "DefaultFormat", - "NC1HWC0", - "DefaultFormat", - "DefaultFormat", - "DefaultFormat", - "NC1HWC0", - "DefaultFormat", - "DefaultFormat", - "DefaultFormat", - "NC1HWC0", - "DefaultFormat", - "DefaultFormat", - "DefaultFormat", - "NC1HWC0", - "DefaultFormat", - "DefaultFormat" - ], - "name":"x", - "need_compile":false, - "param_type":"required", - "shape":"all" - }, - { - "index":1, - "dtype":[ - "int32", - "int32", - "int32", - "int32", - "int32", - "int32", - "int32", - "int32", - "int32", - "int32", - "int32", - "int32", - "int32", - "int32", - "int32", - "int32", - "int32", - "int32", - "int32", - "int32" - ], - "format":[ - "DefaultFormat", - "NC1HWC0", - "DefaultFormat", - "DefaultFormat", - "DefaultFormat", - "NC1HWC0", - "DefaultFormat", - "DefaultFormat", - "DefaultFormat", - "NC1HWC0", - "DefaultFormat", - "DefaultFormat", - "DefaultFormat", - "NC1HWC0", - "DefaultFormat", - "DefaultFormat", - "DefaultFormat", - "NC1HWC0", - "DefaultFormat", - "DefaultFormat" - ], - "name":"segment_ids", - "need_compile":false, - "param_type":"required", - "shape":"all" - } - ], - "outputs":[ - { - "index":0, - "dtype":[ - "float16", - "float16", - "float16", - "float16", - "float", - "float", - "float", - "float", - "int8", - "int8", - "int8", - "int8", - "uint8", - "uint8", - "uint8", - "uint8", - "int32", - "int32", - "int32", - "int32" - ], - "format":[ - "DefaultFormat", - "NC1HWC0", - "DefaultFormat", - "DefaultFormat", - "DefaultFormat", - "NC1HWC0", - "DefaultFormat", - "DefaultFormat", - "DefaultFormat", - "NC1HWC0", - "DefaultFormat", - "DefaultFormat", - "DefaultFormat", - "NC1HWC0", - "DefaultFormat", - "DefaultFormat", - "DefaultFormat", - "NC1HWC0", - "DefaultFormat", - "DefaultFormat" - ], - "name":"y", - "need_compile":false, - "param_type":"required", - "shape":"all" - } - ] -}""") + +@op_info_register(unsorted_segment_sum_op_info) def _unsorted_segment_sum_tbe(): """UnsortedSegmentSum TBE register""" return diff --git a/mindspore/ops/_op_impl/tbe/zeros_like.py b/mindspore/ops/_op_impl/tbe/zeros_like.py index 25f48b80a5..144b0c95cb 100644 --- a/mindspore/ops/_op_impl/tbe/zeros_like.py +++ b/mindspore/ops/_op_impl/tbe/zeros_like.py @@ -14,53 +14,33 @@ # ============================================================================ """ZerosLike op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +zeros_like_op_info = TBERegOp("ZerosLike") \ + .fusion_type("ELEMWISE") \ + .async_flag(False) \ + .binfile_name("zeros_like.so") \ + .compute_cost(10) \ + .kernel_name("zeros_like") \ + .partial_flag(True) \ + .input(0, "x", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.BOOL_Default, DataType.BOOL_Default) \ + .dtype_format(DataType.BOOL_5HD, DataType.BOOL_5HD) \ + .dtype_format(DataType.I8_Default, DataType.I8_Default) \ + .dtype_format(DataType.I8_5HD, DataType.I8_5HD) \ + .dtype_format(DataType.U8_Default, DataType.U8_Default) \ + .dtype_format(DataType.U8_5HD, DataType.U8_5HD) \ + .dtype_format(DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.I32_5HD, DataType.I32_5HD) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD) \ + .get_op_info() -@op_info_register("""{ - "op_name": "ZerosLike", - "imply_type": "TBE", - "fusion_type": "ELEMWISE", - "async_flag": false, - "binfile_name": "zeros_like.so", - "compute_cost": 10, - "kernel_name": "zeros_like", - "partial_flag": true, - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16","float16","float","float","int32","int32","int8","int8","uint8","uint8","bool","bool" - ], - "format": [ - "DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0", - "DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0" - ], - "name": "x", - "need_compile": false, - "param_type": "required", - "shape": "all" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float16","float16","float","float","int32","int32","int8","int8","uint8","uint8","bool","bool" - ], - "format": [ - "DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0", - "DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0","DefaultFormat","NC1HWC0" - ], - "name": "y", - "param_type": "required", - "shape": "all" - } - ] -}""") +@op_info_register(zeros_like_op_info) def _zeros_like_tbe(): """ZerosLike TBE register""" return diff --git a/mindspore/ops/op_info_register.py b/mindspore/ops/op_info_register.py index 0750094e18..52c05a9435 100644 --- a/mindspore/ops/op_info_register.py +++ b/mindspore/ops/op_info_register.py @@ -151,7 +151,7 @@ class RegOp(): fn_list[idx](element) out_dict[key_list[idx]] = element if kwargs: - out_dict = dict(out_dict, kwargs) + out_dict = dict(out_dict, **kwargs) return out_dict def fusion_type(self, fusion_type): @@ -177,7 +177,7 @@ class RegOp(): TypeError: If the type of args is not tuple. """ if len(self.inputs) + len(self.outputs) != len(args): - raise ValueError("input size add output size must be equal to detype format size") + raise ValueError("input size add output size must be equal to dtype format size") dtype_format = [] for arg in args: if not isinstance(arg, tuple) or len(arg) != 2: @@ -435,6 +435,9 @@ class DataType(): BOOL_None = ("bool", "") BOOL_Default = ("bool", "DefaultFormat") BOOL_5HD = ("bool", "NC1HWC0") + BOOL_FracZ = ("bool", "FracZ") + BOOL_FracNZ = ("bool", "FRACTAL_NZ") + BOOL_C1HWNCoC0 = ("bool", "C1HWNCoC0") BOOL_NCHW = ("bool", "NCHW") BOOL_NHWC = ("bool", "NHWC") BOOL_HWCN = ("bool", "HWCN") @@ -442,8 +445,9 @@ class DataType(): I8_None = ("int8", "") I8_Default = ("int8", "DefaultFormat") I8_5HD = ("int8", "NC1HWC0") - I8_FracZ = ("int8", "Fracz") + I8_FracZ = ("int8", "FracZ") I8_FracNZ = ("int8", "FRACTAL_NZ") + I8_C1HWNCoC0 = ("int8", "C1HWNCoC0") I8_NCHW = ("int8", "NCHW") I8_NHWC = ("int8", "NHWC") I8_HWCN = ("int8", "HWCN") @@ -451,8 +455,9 @@ class DataType(): U8_None = ("uint8", "") U8_Default = ("uint8", "DefaultFormat") U8_5HD = ("uint8", "NC1HWC0") - U8_FracZ = ("uint8", "Fracz") + U8_FracZ = ("uint8", "FracZ") U8_FracNZ = ("uint8", "FRACTAL_NZ") + U8_C1HWNCoC0 = ("uint8", "C1HWNCoC0") U8_NCHW = ("uint8", "NCHW") U8_NHWC = ("uint8", "NHWC") U8_HWCN = ("uint8", "HWCN") @@ -460,8 +465,9 @@ class DataType(): I16_None = ("int16", "") I16_Default = ("int16", "DefaultFormat") I16_5HD = ("int16", "NC1HWC0") - I16_FracZ = ("int16", "Fracz") + I16_FracZ = ("int16", "FracZ") I16_FracNZ = ("int16", "FRACTAL_NZ") + I16_C1HWNCoC0 = ("int16", "C1HWNCoC0") I16_NCHW = ("int16", "NCHW") I16_NHWC = ("int16", "NHWC") I16_HWCN = ("int16", "HWCN") @@ -469,8 +475,9 @@ class DataType(): U16_None = ("uint16", "") U16_Default = ("uint16", "DefaultFormat") U16_5HD = ("uint16", "NC1HWC0") - U16_FracZ = ("uint16", "Fracz") + U16_FracZ = ("uint16", "FracZ") U16_FracNZ = ("uint16", "FRACTAL_NZ") + U16_C1HWNCoC0 = ("uint16", "C1HWNCoC0") U16_NCHW = ("uint16", "NCHW") U16_NHWC = ("uint16", "NHWC") U16_HWCN = ("uint16", "HWCN") @@ -478,8 +485,9 @@ class DataType(): I32_None = ("int32", "") I32_Default = ("int32", "DefaultFormat") I32_5HD = ("int32", "NC1HWC0") - I32_FracZ = ("int32", "Fracz") + I32_FracZ = ("int32", "FracZ") I32_FracNZ = ("int32", "FRACTAL_NZ") + I32_C1HWNCoC0 = ("int32", "C1HWNCoC0") I32_NCHW = ("int32", "NCHW") I32_NHWC = ("int32", "NHWC") I32_HWCN = ("int32", "HWCN") @@ -487,8 +495,9 @@ class DataType(): U32_None = ("uint32", "") U32_Default = ("uint32", "DefaultFormat") U32_5HD = ("uint32", "NC1HWC0") - U32_FracZ = ("uint32", "Fracz") + U32_FracZ = ("uint32", "FracZ") U32_FracNZ = ("uint32", "FRACTAL_NZ") + U32_C1HWNCoC0 = ("uint32", "C1HWNCoC0") U32_NCHW = ("uint32", "NCHW") U32_NHWC = ("uint32", "NHWC") U32_HWCN = ("uint32", "HWCN") @@ -496,8 +505,9 @@ class DataType(): I64_None = ("int64", "") I64_Default = ("int64", "DefaultFormat") I64_5HD = ("int64", "NC1HWC0") - I64_FracZ = ("int64", "Fracz") + I64_FracZ = ("int64", "FracZ") I64_FracNZ = ("int64", "FRACTAL_NZ") + I64_C1HWNCoC0 = ("int64", "C1HWNCoC0") I64_NCHW = ("int64", "NCHW") I64_NHWC = ("int64", "NHWC") I64_HWCN = ("int64", "HWCN") @@ -505,8 +515,9 @@ class DataType(): U64_None = ("uint64", "") U64_Default = ("uint64", "DefaultFormat") U64_5HD = ("uint64", "NC1HWC0") - U64_FracZ = ("uint64", "Fracz") + U64_FracZ = ("uint64", "FracZ") U64_FracNZ = ("uint64", "FRACTAL_NZ") + U64_C1HWNCoC0 = ("uint64", "C1HWNCoC0") U64_NCHW = ("uint64", "NCHW") U64_NHWC = ("uint64", "NHWC") U64_HWCN = ("uint64", "HWCN") @@ -514,7 +525,7 @@ class DataType(): F16_None = ("float16", "") F16_Default = ("float16", "DefaultFormat") F16_5HD = ("float16", "NC1HWC0") - F16_FracZ = ("float16", "Fracz") + F16_FracZ = ("float16", "FracZ") F16_FracNZ = ("float16", "FRACTAL_NZ") F16_C1HWNCoC0 = ("float16", "C1HWNCoC0") F16_NCHW = ("float16", "NCHW") @@ -524,7 +535,7 @@ class DataType(): F32_None = ("float32", "") F32_Default = ("float32", "DefaultFormat") F32_5HD = ("float32", "NC1HWC0") - F32_FracZ = ("float32", "Fracz") + F32_FracZ = ("float32", "FracZ") F32_FracNZ = ("float32", "FRACTAL_NZ") F32_C1HWNCoC0 = ("float32", "C1HWNCoC0") F32_NCHW = ("float32", "NCHW") From 791442288cd7dc12b959f5109f566cec0734978c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E4=B8=87=E4=B8=87=E6=B2=A1=E6=83=B3=E5=88=B0?= Date: Tue, 14 Apr 2020 17:21:00 +0800 Subject: [PATCH 224/367] fix format typo in examples --- mindspore/ops/operations/control_ops.py | 4 ++-- mindspore/ops/operations/other_ops.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/mindspore/ops/operations/control_ops.py b/mindspore/ops/operations/control_ops.py index d4e8b279ba..30f1e25a34 100644 --- a/mindspore/ops/operations/control_ops.py +++ b/mindspore/ops/operations/control_ops.py @@ -50,7 +50,7 @@ class ControlDepend(Primitive): >>> # step should be increased, so the add operation should depend on the data calculation operation. >>> class Net(nn.Cell): >>> def __init__(self): - >>> super(Net, self).__init__() + >>> super(Net, self).__init__() >>> self.global_step = mindspore.Parameter(initializer(0, [1]), name="global_step") >>> self.rate = 0.2 >>> self.control_depend = P.ControlDepend() @@ -89,7 +89,7 @@ class GeSwitch(PrimitiveWithInfer): Examples: >>> class Net(nn.Cell): >>> def __init__(self): - >>> super(Net, self).__init__() + >>> super(Net, self).__init__() >>> self.square = P.Square() >>> self.add = P.TensorAdd() >>> self.value = Tensor(np.full((1), 3), mindspore.float32) diff --git a/mindspore/ops/operations/other_ops.py b/mindspore/ops/operations/other_ops.py index 4ac7f2e554..b6182f0476 100644 --- a/mindspore/ops/operations/other_ops.py +++ b/mindspore/ops/operations/other_ops.py @@ -39,7 +39,7 @@ class Assign(PrimitiveWithInfer): >>> self.y = mindspore.Parameter(Tensor([1.0], mindspore.float32), name="y") >>> >>> def construct(self, x): - >>> Assign()(self.y, x) + >>> P.Assign()(self.y, x) >>> return x >>> x = Tensor([2.0], mindspore.float32) >>> net = Net() From a007e4812bd83f5997fce8fd5b7f5e46c67d88e0 Mon Sep 17 00:00:00 2001 From: maoweiyong Date: Fri, 10 Apr 2020 10:20:37 +0800 Subject: [PATCH 225/367] modify gpu operator information registration --- mindspore/ops/__init__.py | 4 +- mindspore/ops/_op_impl/akg/gpu/cast.py | 50 ++++------------ mindspore/ops/_op_impl/akg/gpu/equal.py | 55 ++++-------------- mindspore/ops/_op_impl/akg/gpu/hsigmoid.py | 44 ++++---------- .../ops/_op_impl/akg/gpu/hsigmoid_grad.py | 55 ++++-------------- mindspore/ops/_op_impl/akg/gpu/hswish.py | 44 ++++---------- mindspore/ops/_op_impl/akg/gpu/hswish_grad.py | 55 ++++-------------- mindspore/ops/_op_impl/akg/gpu/mean.py | 44 ++++---------- mindspore/ops/_op_impl/akg/gpu/mean_grad.py | 50 ++++------------ mindspore/ops/_op_impl/akg/gpu/mul.py | 55 ++++-------------- mindspore/ops/_op_impl/akg/gpu/relu6.py | 44 ++++---------- mindspore/ops/_op_impl/akg/gpu/relu6_grad.py | 55 ++++-------------- mindspore/ops/_op_impl/akg/gpu/squeeze.py | 50 ++++------------ .../ops/_op_impl/akg/gpu/squeeze_grad.py | 56 +++++------------- mindspore/ops/_op_impl/akg/gpu/tile.py | 50 ++++------------ mindspore/ops/op_info_register.py | 58 +++++++++++++++++++ 16 files changed, 225 insertions(+), 544 deletions(-) diff --git a/mindspore/ops/__init__.py b/mindspore/ops/__init__.py index 0e6c114566..01ca039862 100644 --- a/mindspore/ops/__init__.py +++ b/mindspore/ops/__init__.py @@ -30,7 +30,7 @@ Note: from .primitive import Primitive, PrimitiveWithInfer, prim_attr_register from .vm_impl_registry import get_vm_impl_fn, vm_impl_registry -from .op_info_register import op_info_register, AiCPURegOp, TBERegOp, DataType +from .op_info_register import op_info_register, AkgRegOp, AiCPURegOp, TBERegOp, DataType from .primitive import constexpr from .._c_expression import signature_rw, signature_kind @@ -40,6 +40,6 @@ __primitive__ = [ ] __all__ = ["get_vm_impl_fn", "vm_impl_registry", - "op_info_register", "AiCPURegOp", "TBERegOp", "DataType", + "op_info_register", "AkgRegOp", "AiCPURegOp", "TBERegOp", "DataType", "constexpr"] __all__.extend(__primitive__) diff --git a/mindspore/ops/_op_impl/akg/gpu/cast.py b/mindspore/ops/_op_impl/akg/gpu/cast.py index fb4b221be6..b9ce4cf464 100644 --- a/mindspore/ops/_op_impl/akg/gpu/cast.py +++ b/mindspore/ops/_op_impl/akg/gpu/cast.py @@ -13,45 +13,19 @@ # limitations under the License. """Cast op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, AkgRegOp, DataType -@op_info_register("""{ - "op_name": "Cast", - "imply_type": "AutoDiff", - "fusion_type": "OPAQUE", - "processor": "cuda", - "attr": [ - { - "name": "dst_type", - "param_type": "required", - "type": "str" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float16", "float32" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "x" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float32", "float16" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "output" - } - ] -}""") +cast_op_info = AkgRegOp("Cast") \ + .fusion_type("OPAQUE") \ + .input(0, "x") \ + .output(0, "output") \ + .attr("dst_type", "required", "str") \ + .dtype_format(DataType.F16_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_Default, DataType.F16_Default) \ + .get_op_info() + + +@op_info_register(cast_op_info) def _cast_akg(): """Cast AutoDiff register""" return diff --git a/mindspore/ops/_op_impl/akg/gpu/equal.py b/mindspore/ops/_op_impl/akg/gpu/equal.py index c6cffdad24..fa20392411 100644 --- a/mindspore/ops/_op_impl/akg/gpu/equal.py +++ b/mindspore/ops/_op_impl/akg/gpu/equal.py @@ -13,50 +13,19 @@ # limitations under the License. """Equal op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, AkgRegOp, DataType -@op_info_register("""{ - "op_name": "Equal", - "imply_type": "AutoDiff", - "fusion_type": "OPAQUE", - "processor": "cuda", - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float32", "float16" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "x" - }, - { - "index": 1, - "dtype": [ - "float32", "float16" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "y" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "bool", "bool" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "output" - } - ] -}""") +equal_op_info = AkgRegOp("Equal") \ + .fusion_type("OPAQUE") \ + .input(0, "x") \ + .input(1, "y") \ + .output(0, "output") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.BOOL_Default) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.BOOL_Default) \ + .get_op_info() + + +@op_info_register(equal_op_info) def _equal_akg(): """Equal AutoDiff register""" return diff --git a/mindspore/ops/_op_impl/akg/gpu/hsigmoid.py b/mindspore/ops/_op_impl/akg/gpu/hsigmoid.py index 29dd8d6251..31fe332206 100644 --- a/mindspore/ops/_op_impl/akg/gpu/hsigmoid.py +++ b/mindspore/ops/_op_impl/akg/gpu/hsigmoid.py @@ -13,40 +13,18 @@ # limitations under the License. """HSigmoid op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, AkgRegOp, DataType -@op_info_register("""{ - "op_name": "HSigmoid", - "imply_type": "AutoDiff", - "fusion_type": "OPAQUE", - "processor": "cuda", - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float32", "float16" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "x" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float32", "float16" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "output" - } - ] -}""") +hsigmoid_op_info = AkgRegOp("HSigmoid") \ + .fusion_type("OPAQUE") \ + .input(0, "x") \ + .output(0, "output") \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .get_op_info() + + +@op_info_register(hsigmoidgrad_op_info) def _hsigmoid_akg(): """HSigmoid AutoDiff register""" return diff --git a/mindspore/ops/_op_impl/akg/gpu/hsigmoid_grad.py b/mindspore/ops/_op_impl/akg/gpu/hsigmoid_grad.py index d29df9c946..39b819138e 100644 --- a/mindspore/ops/_op_impl/akg/gpu/hsigmoid_grad.py +++ b/mindspore/ops/_op_impl/akg/gpu/hsigmoid_grad.py @@ -13,50 +13,19 @@ # limitations under the License. """HSigmoidGrad op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, AkgRegOp, DataType -@op_info_register("""{ - "op_name": "HSigmoidGrad", - "imply_type": "AutoDiff", - "fusion_type": "OPAQUE", - "processor": "cuda", - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float32", "float16" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "y_grad" - }, - { - "index": 1, - "dtype": [ - "float32", "float16" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "x" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float32", "float16" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "output" - } - ] -}""") +hsigmoidgrad_op_info = AkgRegOp("HSigmoidGrad") \ + .fusion_type("OPAQUE") \ + .input(0, "y_grad") \ + .input(1, "x") \ + .output(0, "output") \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \ + .get_op_info() + + +@op_info_register(hsigmoidgrad_op_info) def _hsigmoid_grad_akg(): """HSigmoidGrad AutoDiff register""" return diff --git a/mindspore/ops/_op_impl/akg/gpu/hswish.py b/mindspore/ops/_op_impl/akg/gpu/hswish.py index 619575920f..17364de6cb 100644 --- a/mindspore/ops/_op_impl/akg/gpu/hswish.py +++ b/mindspore/ops/_op_impl/akg/gpu/hswish.py @@ -13,40 +13,18 @@ # limitations under the License. """HSwish op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, AkgRegOp, DataType -@op_info_register("""{ - "op_name": "HSwish", - "imply_type": "AutoDiff", - "fusion_type": "OPAQUE", - "processor": "cuda", - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float32", "float16" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "x" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float32", "float16" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "output" - } - ] -}""") +hswish_op_info = AkgRegOp("HSwish") \ + .fusion_type("OPAQUE") \ + .input(0, "x") \ + .output(0, "output") \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .get_op_info() + + +@op_info_register(hsigmoidgrad_op_info) def _hswish_akg(): """HSwish AutoDiff register""" return diff --git a/mindspore/ops/_op_impl/akg/gpu/hswish_grad.py b/mindspore/ops/_op_impl/akg/gpu/hswish_grad.py index 6d3556b969..503dd9a5f1 100644 --- a/mindspore/ops/_op_impl/akg/gpu/hswish_grad.py +++ b/mindspore/ops/_op_impl/akg/gpu/hswish_grad.py @@ -13,50 +13,19 @@ # limitations under the License. """HSwishGrad op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, AkgRegOp, DataType -@op_info_register("""{ - "op_name": "HSwishGrad", - "imply_type": "AutoDiff", - "fusion_type": "OPAQUE", - "processor": "cuda", - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float32", "float16" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "y_grad" - }, - { - "index": 1, - "dtype": [ - "float32", "float16" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "x" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float32", "float16" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "output" - } - ] -}""") +hswishgrad_op_info = AkgRegOp("HSwishGrad") \ + .fusion_type("OPAQUE") \ + .input(0, "y_grad") \ + .input(1, "x") \ + .output(0, "output") \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \ + .get_op_info() + + +@op_info_register(hsigmoidgrad_op_info) def _hswish_grad_akg(): """HSwishGrad AutoDiff register""" return diff --git a/mindspore/ops/_op_impl/akg/gpu/mean.py b/mindspore/ops/_op_impl/akg/gpu/mean.py index 244af290bb..b46b701b91 100644 --- a/mindspore/ops/_op_impl/akg/gpu/mean.py +++ b/mindspore/ops/_op_impl/akg/gpu/mean.py @@ -13,40 +13,18 @@ # limitations under the License. """SimpleMean op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, AkgRegOp, DataType -@op_info_register("""{ - "op_name": "SimpleMean", - "imply_type": "AutoDiff", - "fusion_type": "OPAQUE", - "processor": "cuda", - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float32", "float16" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "x" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float32", "float16" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "output" - } - ] -}""") +mean_op_info = AkgRegOp("SimpleMean") \ + .fusion_type("OPAQUE") \ + .input(0, "x") \ + .output(0, "output") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .get_op_info() + + +@op_info_register(mean_op_info) def _simple_mean_akg(): """SimpleMean AutoDiff register""" return diff --git a/mindspore/ops/_op_impl/akg/gpu/mean_grad.py b/mindspore/ops/_op_impl/akg/gpu/mean_grad.py index 27c0674632..e3e0121c20 100644 --- a/mindspore/ops/_op_impl/akg/gpu/mean_grad.py +++ b/mindspore/ops/_op_impl/akg/gpu/mean_grad.py @@ -13,45 +13,19 @@ # limitations under the License. """SimpleMeanGrad op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, AkgRegOp, DataType -@op_info_register("""{ - "op_name": "SimpleMeanGrad", - "imply_type": "AutoDiff", - "fusion_type": "OPAQUE", - "processor": "cuda", - "attr": [ - { - "name": "input_shape", - "param_type": "required", - "type": "listInt" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float32", "float16" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "HEAD" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float32", "float16" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "output" - } - ] -}""") +mean_grad_op_info = AkgRegOp("SimpleMeanGrad") \ + .fusion_type("OPAQUE") \ + .input(0, "HEAD") \ + .output(0, "output") \ + .attr("input_shape", "required", "listInt") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .get_op_info() + + +@op_info_register(mean_grad_op_info) def _simple_mean_grad_akg(): """SimpleMeanGrad AutoDiff register""" return diff --git a/mindspore/ops/_op_impl/akg/gpu/mul.py b/mindspore/ops/_op_impl/akg/gpu/mul.py index d9e1a0b5d6..db5b1460ed 100644 --- a/mindspore/ops/_op_impl/akg/gpu/mul.py +++ b/mindspore/ops/_op_impl/akg/gpu/mul.py @@ -13,50 +13,19 @@ # limitations under the License. """Mul op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, AkgRegOp, DataType -@op_info_register("""{ - "op_name": "Mul", - "imply_type": "AutoDiff", - "fusion_type": "OPAQUE", - "processor": "cuda", - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float32", "float16" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "x" - }, - { - "index": 1, - "dtype": [ - "float32", "float16" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "y" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float32", "float16" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "output" - } - ] -}""") +mul_op_info = AkgRegOp("Mul") \ + .fusion_type("OPAQUE") \ + .input(0, "x") \ + .input(1, "y") \ + .output(0, "output") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ + .get_op_info() + + +@op_info_register(mul_op_info) def _mul_akg(): """Mul AutoDiff register""" return diff --git a/mindspore/ops/_op_impl/akg/gpu/relu6.py b/mindspore/ops/_op_impl/akg/gpu/relu6.py index 0de0a7e400..31bfebcd8d 100644 --- a/mindspore/ops/_op_impl/akg/gpu/relu6.py +++ b/mindspore/ops/_op_impl/akg/gpu/relu6.py @@ -13,40 +13,18 @@ # limitations under the License. """ReLU6 op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, AkgRegOp, DataType -@op_info_register("""{ - "op_name": "ReLU6", - "imply_type": "AutoDiff", - "fusion_type": "OPAQUE", - "processor": "cuda", - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float32", "float16" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "x" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float32", "float16" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "output" - } - ] -}""") +relu_op_info = AkgRegOp("ReLU6") \ + .fusion_type("OPAQUE") \ + .input(0, "x") \ + .output(0, "output") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .get_op_info() + + +@op_info_register(relu_op_info) def _relu6_akg(): """ReLU6 AutoDiff register""" return diff --git a/mindspore/ops/_op_impl/akg/gpu/relu6_grad.py b/mindspore/ops/_op_impl/akg/gpu/relu6_grad.py index 4d3c5e9a00..83d93f3077 100644 --- a/mindspore/ops/_op_impl/akg/gpu/relu6_grad.py +++ b/mindspore/ops/_op_impl/akg/gpu/relu6_grad.py @@ -13,50 +13,19 @@ # limitations under the License. """ReLU6Grad op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, AkgRegOp, DataType -@op_info_register("""{ - "op_name": "ReLU6Grad", - "imply_type": "AutoDiff", - "fusion_type": "OPAQUE", - "processor": "cuda", - "attr": [ - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float32", "float16" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "y_grad" - }, - { - "index": 1, - "dtype": [ - "float32", "float16" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "x" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float32", "float16" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "output" - } - ] -}""") +relu_grad_op_info = AkgRegOp("ReLU6Grad") \ + .fusion_type("OPAQUE") \ + .input(0, "y_grad") \ + .input(1, "x") \ + .output(0, "output") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ + .get_op_info() + + +@op_info_register(relu_grad_op_info) def _relu6_grad_akg(): """ReLU6Grad AutoDiff register""" return diff --git a/mindspore/ops/_op_impl/akg/gpu/squeeze.py b/mindspore/ops/_op_impl/akg/gpu/squeeze.py index 9e766cdfd7..378d096d5c 100644 --- a/mindspore/ops/_op_impl/akg/gpu/squeeze.py +++ b/mindspore/ops/_op_impl/akg/gpu/squeeze.py @@ -13,45 +13,19 @@ # limitations under the License. """Squeeze op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, AkgRegOp, DataType -@op_info_register("""{ - "op_name": "Squeeze", - "imply_type": "AutoDiff", - "fusion_type": "OPAQUE", - "processor": "cuda", - "attr": [ - { - "name": "axis", - "param_type": "optional", - "type": "listInt" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float32", "float16" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "x" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float32", "float16" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "output" - } - ] -}""") +squeeze_op_info = AkgRegOp("SqueezeGrad") \ + .fusion_type("OPAQUE") \ + .input(0, "x") \ + .output(0, "output") \ + .attr("axis", "optional", "listInt") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .get_op_info() + + +@op_info_register(squeeze_op_info) def _squeeze_akg(): """Squeeze AutoDiff register""" return diff --git a/mindspore/ops/_op_impl/akg/gpu/squeeze_grad.py b/mindspore/ops/_op_impl/akg/gpu/squeeze_grad.py index 7584bd05f9..ef397ea0a7 100644 --- a/mindspore/ops/_op_impl/akg/gpu/squeeze_grad.py +++ b/mindspore/ops/_op_impl/akg/gpu/squeeze_grad.py @@ -13,50 +13,20 @@ # limitations under the License. """SqueezeGrad op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, AkgRegOp, DataType -@op_info_register("""{ - "op_name": "SqueezeGrad", - "imply_type": "AutoDiff", - "fusion_type": "OPAQUE", - "processor": "cuda", - "attr": [ - { - "name": "x_shape", - "param_type": "required", - "type": "listInt" - }, - { - "name": "axis", - "param_type": "optional", - "type": "listInt" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float32", "float16" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "y_grad" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float32", "float16" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "output" - } - ] -}""") +squeeze_grad_op_info = AkgRegOp("SqueezeGrad") \ + .fusion_type("OPAQUE") \ + .input(0, "y_grad") \ + .output(0, "output") \ + .attr("x_shape", "required", "listInt") \ + .attr("axis", "optional", "listInt") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .get_op_info() + + +@op_info_register(squeeze_grad_op_info) def _squeeze_grad_akg(): """SqueezeGrad AutoDiff register""" return diff --git a/mindspore/ops/_op_impl/akg/gpu/tile.py b/mindspore/ops/_op_impl/akg/gpu/tile.py index f110e9314e..8c9de00979 100644 --- a/mindspore/ops/_op_impl/akg/gpu/tile.py +++ b/mindspore/ops/_op_impl/akg/gpu/tile.py @@ -13,45 +13,19 @@ # limitations under the License. """Tile op""" -from mindspore.ops.op_info_register import op_info_register +from mindspore.ops.op_info_register import op_info_register, AkgRegOp, DataType -@op_info_register("""{ - "op_name": "Tile", - "imply_type": "AutoDiff", - "fusion_type": "OPAQUE", - "processor": "cuda", - "attr": [ - { - "name": "multiples", - "param_type": "required", - "type": "listInt" - } - ], - "inputs": [ - { - "index": 0, - "dtype": [ - "float32", "float16" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "x" - } - ], - "outputs": [ - { - "index": 0, - "dtype": [ - "float32", "float16" - ], - "format": [ - "DefaultFormat", "DefaultFormat" - ], - "name": "output" - } - ] -}""") +tile_op_info = AkgRegOp("Tile") \ + .fusion_type("OPAQUE") \ + .input(0, "x") \ + .output(0, "output") \ + .attr("multiples", "required", "listInt") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .get_op_info() + + +@op_info_register(tile_op_info) def _tile_akg(): """Tile AutoDiff register""" return diff --git a/mindspore/ops/op_info_register.py b/mindspore/ops/op_info_register.py index 0750094e18..daf3192d4f 100644 --- a/mindspore/ops/op_info_register.py +++ b/mindspore/ops/op_info_register.py @@ -205,6 +205,64 @@ class RegOp(): return op_info +class AkgRegOp(RegOp): + """Class for Akg op info register""" + + def __init__(self, op_name): + super(AkgRegOp, self).__init__(op_name) + self.imply_type = "AutoDiff" + self.processor = "cuda" + + def input(self, index=None, name=None, **kwargs): + """ + Register Akg op input information. + + Args: + index (int): Order of the input. Default: None. + name (str): Name of the input. Default: None. + kwargs (dict): Other information for the input. + """ + param_list = [index, name] + key_list = ["index", "name"] + fn_list = [self._is_int, self._is_string] + input_dict = self._check_param(param_list, key_list, fn_list, kwargs) + self.inputs.append(input_dict) + return self + + def output(self, index=None, name=None, **kwargs): + """ + Register Akg op output information. + + Args: + index (int): Order of the output. Default: None. + name (str): Name of the output. Default: None. + kwargs (dict): Other information for the output. + """ + param_list = [index, name] + key_list = ["index", "name"] + fn_list = [self._is_int, self._is_string] + output_dict = self._check_param(param_list, key_list, fn_list, kwargs) + self.outputs.append(output_dict) + return self + + def attr(self, name=None, param_type=None, value_type=None, **kwargs): + """ + Register Akg op attribute information. + + Args: + name (str): Name of the attribute. Default: None. + param_type (str): Param type of the attribute. Default: None. + value_type (str): Value type of the attribute. Default: None. + kwargs (dict): Other information for the attribute. + """ + param_list = [name, param_type, value_type] + key_list = ["name", "param_type", "type"] + fn_list = [self._is_string] + attr_dict = self._check_param(param_list, key_list, fn_list, kwargs) + self.attr_.append(attr_dict) + return self + + class AiCPURegOp(RegOp): """Class for AiCPU op info register""" From d248b05a985e76aff8feebdede7dca250ac36c86 Mon Sep 17 00:00:00 2001 From: VectorSL Date: Tue, 14 Apr 2020 19:41:00 +0800 Subject: [PATCH 226/367] gpu add kernel select --- .../kernel/gpu/arrays/select_gpu_kernel.cc | 43 +++++++++ .../kernel/gpu/arrays/select_gpu_kernel.h | 95 +++++++++++++++++++ .../ccsrc/kernel/gpu/cuda_impl/select_impl.cu | 42 ++++++++ .../kernel/gpu/cuda_impl/select_impl.cuh | 25 +++++ tests/st/ops/gpu/test_select_op.py | 47 +++++++++ 5 files changed, 252 insertions(+) create mode 100644 mindspore/ccsrc/kernel/gpu/arrays/select_gpu_kernel.cc create mode 100644 mindspore/ccsrc/kernel/gpu/arrays/select_gpu_kernel.h create mode 100644 mindspore/ccsrc/kernel/gpu/cuda_impl/select_impl.cu create mode 100644 mindspore/ccsrc/kernel/gpu/cuda_impl/select_impl.cuh create mode 100644 tests/st/ops/gpu/test_select_op.py diff --git a/mindspore/ccsrc/kernel/gpu/arrays/select_gpu_kernel.cc b/mindspore/ccsrc/kernel/gpu/arrays/select_gpu_kernel.cc new file mode 100644 index 0000000000..41c9c2243f --- /dev/null +++ b/mindspore/ccsrc/kernel/gpu/arrays/select_gpu_kernel.cc @@ -0,0 +1,43 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "kernel/gpu/arrays/select_gpu_kernel.h" + +namespace mindspore { +namespace kernel { +MS_REG_GPU_KERNEL_ONE(Select, + KernelAttr() + .AddInputAttr(kNumberTypeBool) + .AddInputAttr(kNumberTypeFloat32) + .AddInputAttr(kNumberTypeFloat32) + .AddOutputAttr(kNumberTypeFloat32), + SelectGpuKernel, float) +MS_REG_GPU_KERNEL_ONE(Select, + KernelAttr() + .AddInputAttr(kNumberTypeBool) + .AddInputAttr(kNumberTypeFloat16) + .AddInputAttr(kNumberTypeFloat16) + .AddOutputAttr(kNumberTypeFloat16), + SelectGpuKernel, half) +MS_REG_GPU_KERNEL_ONE(Select, + KernelAttr() + .AddInputAttr(kNumberTypeBool) + .AddInputAttr(kNumberTypeInt32) + .AddInputAttr(kNumberTypeInt32) + .AddOutputAttr(kNumberTypeInt32), + SelectGpuKernel, int) +} // namespace kernel +} // namespace mindspore diff --git a/mindspore/ccsrc/kernel/gpu/arrays/select_gpu_kernel.h b/mindspore/ccsrc/kernel/gpu/arrays/select_gpu_kernel.h new file mode 100644 index 0000000000..ba0bea4dee --- /dev/null +++ b/mindspore/ccsrc/kernel/gpu/arrays/select_gpu_kernel.h @@ -0,0 +1,95 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_KERNEL_GPU_SELECT_GPU_KERNEL_H +#define MINDSPORE_CCSRC_KERNEL_GPU_SELECT_GPU_KERNEL_H + +#include +#include "kernel/gpu/gpu_kernel.h" +#include "kernel/gpu/gpu_kernel_factory.h" +#include "kernel/gpu/cuda_impl/select_impl.cuh" + +namespace mindspore { +namespace kernel { +template +class SelectGpuKernel : public GpuKernel { + public: + SelectGpuKernel() : input_size_(0), output_size_(0) {} + ~SelectGpuKernel() override = default; + const std::vector &GetInputSizeList() const override { return input_size_list_; } + const std::vector &GetOutputSizeList() const override { return output_size_list_; } + const std::vector &GetWorkspaceSizeList() const override { return workspace_size_list_; } + + bool Launch(const std::vector &inputs, const std::vector &, + const std::vector &outputs, uintptr_t stream_ptr) override { + bool *input_cond = GetDeviceAddress(inputs, 0); + T *input_x = GetDeviceAddress(inputs, 1); + T *input_y = GetDeviceAddress(inputs, 2); + T *output = GetDeviceAddress(outputs, 0); + CalSelect(output_size_ / sizeof(T), input_cond, input_x, input_y, output, + reinterpret_cast(stream_ptr)); + return true; + } + + bool Init(const CNodePtr &kernel_node) override { + if (!CheckParam(kernel_node)) { + return false; + } + auto shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); + input_size_ = sizeof(bool); + output_size_ = sizeof(T); + for (size_t x : shape) { + input_size_ = input_size_ * x; + output_size_ = output_size_ * x; + } + InitSizeLists(); + return true; + } + + protected: + void InitSizeLists() override { + input_size_list_.push_back(input_size_); + input_size_list_.push_back(output_size_); + input_size_list_.push_back(output_size_); + output_size_list_.push_back(output_size_); + } + + private: + bool CheckParam(const CNodePtr &kernel_node) { + size_t input_num = AnfAlgo::GetInputTensorNum(kernel_node); + if (input_num != 3) { + MS_LOG(ERROR) << "Input number is " << input_num << ", but SelectGpuKernel needs 3 output."; + return false; + } + size_t output_num = AnfAlgo::GetOutputTensorNum(kernel_node); + if (output_num != 1) { + MS_LOG(ERROR) << "Output number is " << output_num << ", but SelectGpuKernel needs 1 output."; + return false; + } + return true; + } + + std::vector input_size_list_; + std::vector output_size_list_; + std::vector workspace_size_list_; + + size_t input_size_; + size_t output_size_; +}; +} // namespace kernel +} // namespace mindspore + +#endif // MINDSPORE_CCSRC_KERNEL_GPU_SELECT_GPU_KERNEL_H diff --git a/mindspore/ccsrc/kernel/gpu/cuda_impl/select_impl.cu b/mindspore/ccsrc/kernel/gpu/cuda_impl/select_impl.cu new file mode 100644 index 0000000000..f07a820e75 --- /dev/null +++ b/mindspore/ccsrc/kernel/gpu/cuda_impl/select_impl.cu @@ -0,0 +1,42 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include "kernel/gpu/cuda_impl/select_impl.cuh" + +template +__global__ void Select(const size_t size, const bool* cond, const T* input_x, const T* input_y, T* output) { + for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) { + output[pos] = cond[pos] ? input_x[pos] : input_y[pos]; + } + return; +} + +template +void CalSelect(const size_t size, const bool* cond, const T* input_x, const T* input_y, T* output, + cudaStream_t cuda_stream) { + Select<<>>(size, cond, input_x, input_y, output); + return; +} + +template void CalSelect(const size_t size, const bool* cond, const float* input_X, const float* input_y, + float* output, cudaStream_t cuda_stream); +template void CalSelect(const size_t size, const bool* cond, const int* input_X, const int* input_y, int* output, + cudaStream_t cuda_stream); +template void CalSelect(const size_t size, const bool* cond, const half* input_X, const half* input_y, + half* output, cudaStream_t cuda_stream); diff --git a/mindspore/ccsrc/kernel/gpu/cuda_impl/select_impl.cuh b/mindspore/ccsrc/kernel/gpu/cuda_impl/select_impl.cuh new file mode 100644 index 0000000000..da2d7d9a7f --- /dev/null +++ b/mindspore/ccsrc/kernel/gpu/cuda_impl/select_impl.cuh @@ -0,0 +1,25 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SELECT_IMPL_H_ +#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SELECT_IMPL_H_ + +#include "device/gpu/cuda_common.h" + +template +void CalSelect(const size_t size, const bool* cond, const T* input_x, const T* input_y, T* output, + cudaStream_t cuda_stream); +#endif // MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SELECT_IMPL_H_ diff --git a/tests/st/ops/gpu/test_select_op.py b/tests/st/ops/gpu/test_select_op.py new file mode 100644 index 0000000000..5cac6a6ad3 --- /dev/null +++ b/tests/st/ops/gpu/test_select_op.py @@ -0,0 +1,47 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +import pytest +from mindspore import Tensor +from mindspore.ops import operations as P +import mindspore.nn as nn +import numpy as np +import mindspore.context as context + + +class Net(nn.Cell): + def __init__(self): + super(Net, self).__init__() + self.select = P.Select() + + def construct(self, cond, x, y): + return self.select(cond, x, y) + +cond = np.array([[True, False], [True, False]]).astype(np.bool) +x = np.array([[1.2, 1], [1, 0]]).astype(np.float32) +y = np.array([[1, 2], [3, 4.0]]).astype(np.float32) + +@pytest.mark.level0 +@pytest.mark.platform_x86_gpu_training +@pytest.mark.env_onecard +def test_select(): + context.set_context(mode=context.GRAPH_MODE, device_target="GPU") + select = Net() + output = select(Tensor(cond), Tensor(x), Tensor(y)) + expect = [[1.2, 2], [1, 4.0]] + error = np.ones(shape=[2, 2]) * 1.0e-6 + diff = output.asnumpy() - expect + assert np.all(diff < error) + assert np.all(-diff < error) From 9e372073e2e0718dddfa6b291af4f001d3edd7de Mon Sep 17 00:00:00 2001 From: VectorSL Date: Tue, 14 Apr 2020 20:11:44 +0800 Subject: [PATCH 227/367] gpu add assigin --- .../kernel/gpu/other/assign_gpu_kernel.cc | 33 +++++++ .../kernel/gpu/other/assign_gpu_kernel.h | 93 +++++++++++++++++++ tests/st/ops/gpu/test_assign_op.py | 50 ++++++++++ 3 files changed, 176 insertions(+) create mode 100644 mindspore/ccsrc/kernel/gpu/other/assign_gpu_kernel.cc create mode 100644 mindspore/ccsrc/kernel/gpu/other/assign_gpu_kernel.h create mode 100644 tests/st/ops/gpu/test_assign_op.py diff --git a/mindspore/ccsrc/kernel/gpu/other/assign_gpu_kernel.cc b/mindspore/ccsrc/kernel/gpu/other/assign_gpu_kernel.cc new file mode 100644 index 0000000000..0f3e0c95f4 --- /dev/null +++ b/mindspore/ccsrc/kernel/gpu/other/assign_gpu_kernel.cc @@ -0,0 +1,33 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "kernel/gpu/other/assign_gpu_kernel.h" + +namespace mindspore { +namespace kernel { +MS_REG_GPU_KERNEL_ONE( + Assign, + KernelAttr().AddInputAttr(kNumberTypeFloat32).AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumberTypeFloat32), + AssignGpuKernel, float) +MS_REG_GPU_KERNEL_ONE( + Assign, + KernelAttr().AddInputAttr(kNumberTypeFloat16).AddInputAttr(kNumberTypeFloat16).AddOutputAttr(kNumberTypeFloat16), + AssignGpuKernel, half) +MS_REG_GPU_KERNEL_ONE( + Assign, KernelAttr().AddInputAttr(kNumberTypeInt32).AddInputAttr(kNumberTypeInt32).AddOutputAttr(kNumberTypeInt32), + AssignGpuKernel, int) +} // namespace kernel +} // namespace mindspore diff --git a/mindspore/ccsrc/kernel/gpu/other/assign_gpu_kernel.h b/mindspore/ccsrc/kernel/gpu/other/assign_gpu_kernel.h new file mode 100644 index 0000000000..1c1cde4fd4 --- /dev/null +++ b/mindspore/ccsrc/kernel/gpu/other/assign_gpu_kernel.h @@ -0,0 +1,93 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_KERNEL_GPU_ASSIGN_GPU_KERNEL_H +#define MINDSPORE_CCSRC_KERNEL_GPU_ASSIGN_GPU_KERNEL_H + +#include +#include "kernel/gpu/gpu_kernel.h" +#include "kernel/gpu/gpu_kernel_factory.h" + +namespace mindspore { +namespace kernel { +template +class AssignGpuKernel : public GpuKernel { + public: + AssignGpuKernel() : input_size_(0) {} + ~AssignGpuKernel() override = default; + const std::vector &GetInputSizeList() const override { return input_size_list_; } + const std::vector &GetOutputSizeList() const override { return output_size_list_; } + const std::vector &GetWorkspaceSizeList() const override { return workspace_size_list_; } + + bool Launch(const std::vector &inputs, const std::vector &, + const std::vector &outputs, uintptr_t stream_ptr) override { + T *var = GetDeviceAddress(inputs, 0); + T *value = GetDeviceAddress(inputs, 1); + T *output = GetDeviceAddress(outputs, 0); + CHECK_CUDA_RET_WITH_EXCEPT( + cudaMemcpyAsync(var, value, input_size_, cudaMemcpyDeviceToDevice, reinterpret_cast(stream_ptr)), + "cudaMemxcpyAsync failed."); + CHECK_CUDA_RET_WITH_EXCEPT( + cudaMemcpyAsync(output, value, input_size_, cudaMemcpyDeviceToDevice, reinterpret_cast(stream_ptr)), + "cudaMemxcpyAsync failed."); + return true; + } + + bool Init(const CNodePtr &kernel_node) override { + if (!CheckParam(kernel_node)) { + return false; + } + auto shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); + input_size_ = sizeof(T); + for (size_t x : shape) { + input_size_ = input_size_ * x; + } + InitSizeLists(); + return true; + } + + protected: + void InitSizeLists() override { + input_size_list_.push_back(input_size_); + input_size_list_.push_back(input_size_); + output_size_list_.push_back(input_size_); + } + + private: + bool CheckParam(const CNodePtr &kernel_node) { + size_t input_num = AnfAlgo::GetInputTensorNum(kernel_node); + if (input_num != 2) { + MS_LOG(ERROR) << "Input number is " << input_num << ", but AssignGpuKernel needs 2 output."; + return false; + } + size_t output_num = AnfAlgo::GetOutputTensorNum(kernel_node); + if (output_num != 1) { + MS_LOG(ERROR) << "Output number is " << output_num << ", but AssignGpuKernel needs 1 output."; + return false; + } + return true; + } + + std::vector input_size_list_; + std::vector output_size_list_; + std::vector workspace_size_list_; + + size_t input_size_; +}; +} // namespace kernel +} // namespace mindspore + +#endif // MINDSPORE_CCSRC_KERNEL_GPU_ASSIGN_GPU_KERNEL_H diff --git a/tests/st/ops/gpu/test_assign_op.py b/tests/st/ops/gpu/test_assign_op.py new file mode 100644 index 0000000000..4cf730d763 --- /dev/null +++ b/tests/st/ops/gpu/test_assign_op.py @@ -0,0 +1,50 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +import pytest +from mindspore import Tensor +from mindspore.ops import operations as P +import mindspore.nn as nn +import numpy as np +import mindspore.context as context + + +class Net(nn.Cell): + def __init__(self): + super(Net, self).__init__() + self.assign = P.Assign() + + def construct(self, var, value): + return self.assign(var, value) + +x = np.array([[1.2, 1], [1, 0]]).astype(np.float32) +value = np.array([[1, 2], [3, 4.0]]).astype(np.float32) + +@pytest.mark.level0 +@pytest.mark.platform_x86_gpu_training +@pytest.mark.env_onecard +def test_assign(): + context.set_context(mode=context.GRAPH_MODE, device_target="GPU") + assign = Net() + var = Tensor(x) + output = assign(var, Tensor(value)) + + error = np.ones(shape=[2, 2]) * 1.0e-6 + diff1 = output.asnumpy() - value + diff2 = var.asnumpy() - value + assert np.all(diff1 < error) + assert np.all(-diff1 < error) + assert np.all(diff2 < error) + assert np.all(-diff2 < error) From 48f90eb7bcb1b6a68f2cdafa8be66b8d6e023c13 Mon Sep 17 00:00:00 2001 From: dengwentao Date: Tue, 14 Apr 2020 16:14:20 +0800 Subject: [PATCH 228/367] add custom op st to ci --- tests/st/ops/custom_ops_tbe/cus_square.py | 2 +- tests/st/ops/custom_ops_tbe/test_square.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/st/ops/custom_ops_tbe/cus_square.py b/tests/st/ops/custom_ops_tbe/cus_square.py index 6a9e769f51..d006f75b4c 100644 --- a/tests/st/ops/custom_ops_tbe/cus_square.py +++ b/tests/st/ops/custom_ops_tbe/cus_square.py @@ -24,7 +24,7 @@ class CusSquare(PrimitiveWithInfer): def __init__(self): """init CusSquare""" self.init_prim_io_names(inputs=['x'], outputs=['y']) - from .square_impl import CusSquare + from square_impl import CusSquare def vm_impl(self, x): x = x.asnumpy() diff --git a/tests/st/ops/custom_ops_tbe/test_square.py b/tests/st/ops/custom_ops_tbe/test_square.py index c67edae307..d8439000f8 100644 --- a/tests/st/ops/custom_ops_tbe/test_square.py +++ b/tests/st/ops/custom_ops_tbe/test_square.py @@ -16,7 +16,7 @@ import numpy as np import mindspore.nn as nn import mindspore.context as context from mindspore import Tensor -from .cus_square import CusSquare +from cus_square import CusSquare import pytest context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") @@ -32,6 +32,7 @@ class Net(nn.Cell): @pytest.mark.level0 @pytest.mark.platform_x86_ascend_training +@pytest.mark.platform_arm_ascend_training @pytest.mark.env_onecard def test_net(): x = np.array([1.0, 4.0, 9.0]).astype(np.float32) From 3ea3d9e5a433e47196b9dc087bd00a50bdcf1163 Mon Sep 17 00:00:00 2001 From: ZPaC Date: Mon, 13 Apr 2020 21:04:33 +0800 Subject: [PATCH 229/367] 1.GPU supports multiple streams. 2.GPU commnication stream and compute stream overlap. --- .../ccsrc/device/gpu/gpu_device_manager.cc | 20 +- .../ccsrc/device/gpu/gpu_device_manager.h | 16 +- .../ccsrc/device/gpu/gpu_stream_assign.cc | 181 ++++++++++++++++++ .../ccsrc/device/gpu/gpu_stream_assign.h | 73 +++++++ .../ccsrc/kernel/gpu/nccl/nccl_gpu_kernel.h | 25 +-- mindspore/ccsrc/session/gpu_session.cc | 8 + mindspore/ccsrc/session/gpu_session.h | 2 + mindspore/ccsrc/utils/utils.h | 2 + 8 files changed, 304 insertions(+), 23 deletions(-) create mode 100644 mindspore/ccsrc/device/gpu/gpu_stream_assign.cc create mode 100644 mindspore/ccsrc/device/gpu/gpu_stream_assign.h diff --git a/mindspore/ccsrc/device/gpu/gpu_device_manager.cc b/mindspore/ccsrc/device/gpu/gpu_device_manager.cc index 59c8fde5a2..b25ba2906b 100644 --- a/mindspore/ccsrc/device/gpu/gpu_device_manager.cc +++ b/mindspore/ccsrc/device/gpu/gpu_device_manager.cc @@ -25,7 +25,7 @@ namespace device { namespace gpu { void GPUDeviceManager::InitDevice() { CHECK_OP_RET_WITH_EXCEPT(CudaDriver::set_current_device(SizeToInt(cur_dev_id_)), "Failed to set current device id"); - CHECK_OP_RET_WITH_EXCEPT(CudaDriver::CreateStream(&stream_), "Failed to create CUDA stream."); + CHECK_OP_RET_WITH_EXCEPT(CreateStream(&default_stream_), "Failed to create CUDA stream."); CHECK_CUDNN_RET_WITH_EXCEPT(cudnnCreate(&cudnn_handle_), "Failed to create cuDNN handle"); CHECK_CUDNN_RET_WITH_EXCEPT(cudnnSetStream(cudnn_handle_, reinterpret_cast(default_stream())), "Failed to set stream for cuDNN handle."); @@ -36,19 +36,27 @@ void GPUDeviceManager::InitDevice() { } void GPUDeviceManager::ReleaseDevice() { - if (stream_ != nullptr) { - CHECK_OP_RET_WITH_ERROR(CudaDriver::DestroyStream(stream_), "Failed to destroy cuda stream."); + for (DeviceStream stream : gpu_streams_) { + if (stream != nullptr) { + CHECK_OP_RET_WITH_ERROR(CudaDriver::DestroyStream(stream), "Failed to destroy CUDA stream."); + } } if (cudnn_handle_ != nullptr) { - CHECK_CUDNN_RET_WITH_ERROR(cudnnDestroy(cudnn_handle_), "Failed to destroy cudnn handle"); + CHECK_CUDNN_RET_WITH_ERROR(cudnnDestroy(cudnn_handle_), "Failed to destroy cuDNN handle"); } if (cublas_handle_ != nullptr) { - CHECK_CUBLAS_RET_WITH_ERROR(cublasDestroy(cublas_handle_), "Failed to destroy cublas handle."); + CHECK_CUBLAS_RET_WITH_ERROR(cublasDestroy(cublas_handle_), "Failed to destroy cuBLAS handle."); } CHECK_OP_RET_WITH_ERROR(GPUMemoryAllocator::GetInstance().Finalize(), "Failed to destroy gpu memory allocator"); } -const DeviceStream& GPUDeviceManager::default_stream() const { return stream_; } +bool GPUDeviceManager::CreateStream(DeviceStream* stream) { + CHECK_OP_RET_WITH_EXCEPT(CudaDriver::CreateStream(stream), "Failed to create CUDA stream"); + gpu_streams_.emplace_back(*stream); + return true; +} + +const DeviceStream& GPUDeviceManager::default_stream() const { return default_stream_; } int GPUDeviceManager::device_count() const { return CudaDriver::device_count(); } diff --git a/mindspore/ccsrc/device/gpu/gpu_device_manager.h b/mindspore/ccsrc/device/gpu/gpu_device_manager.h index 6bfaf85673..3b3d2aecb5 100644 --- a/mindspore/ccsrc/device/gpu/gpu_device_manager.h +++ b/mindspore/ccsrc/device/gpu/gpu_device_manager.h @@ -19,6 +19,7 @@ #include #include +#include #include #include "device/gpu/cuda_driver.h" #include "device/gpu/gpu_memory_allocator.h" @@ -36,13 +37,15 @@ class GPUDeviceManager { uint32_t cur_device_id() const; bool is_device_id_init() const; + bool CreateStream(DeviceStream* stream); + bool SyncStream(const DeviceStream& stream) const; const DeviceStream& default_stream() const; + const cudnnHandle_t& GetCudnnHandle() const; const cublasHandle_t& GetCublasHandle() const; bool CopyDeviceMemToHost(const HostMemPtr& dst, const DeviceMemPtr& src, size_t size) const; bool CopyHostMemToDevice(const DeviceMemPtr& dst, const void* src, size_t size) const; - bool SyncStream(const DeviceStream& stream) const; static GPUDeviceManager& GetInstance() { static GPUDeviceManager instance; @@ -55,13 +58,16 @@ class GPUDeviceManager { GPUDeviceManager(const GPUDeviceManager&) = delete; GPUDeviceManager& operator=(const GPUDeviceManager&) = delete; - // default cuda stream used for all the kernels. - DeviceStream stream_{nullptr}; + // default CUDA stream used for all the kernels. + DeviceStream default_stream_{nullptr}; + + // all gpu CUDA streams including default_stream_. + std::vector gpu_streams_; - // handle used for cudnn kernels. + // handle used for cuDNN kernels. cudnnHandle_t cudnn_handle_{nullptr}; - // handle used for cublas kernels. + // handle used for cuBLAS kernels. cublasHandle_t cublas_handle_{nullptr}; bool dev_id_init_; diff --git a/mindspore/ccsrc/device/gpu/gpu_stream_assign.cc b/mindspore/ccsrc/device/gpu/gpu_stream_assign.cc new file mode 100644 index 0000000000..39d5ca3fe6 --- /dev/null +++ b/mindspore/ccsrc/device/gpu/gpu_stream_assign.cc @@ -0,0 +1,181 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include "device/gpu/gpu_common.h" +#include "device/gpu/kernel_info_setter.h" +#include "device/gpu/gpu_device_manager.h" +#include "device/gpu/gpu_stream_assign.h" + +namespace mindspore { +namespace device { +namespace gpu { +void AssignGpuStream(const std::shared_ptr &kernel_graph) { + MS_EXCEPTION_IF_NULL(kernel_graph); + std::vector allreduce_cnodes; + auto execution_kernels = kernel_graph->execution_order(); + for (auto kernel : execution_kernels) { + std::string kernel_name = AnfAlgo::GetCNodeName(kernel); + if (kernel_name == kAllReduceOpName) { + allreduce_cnodes.emplace_back(kernel); + } + } + if (allreduce_cnodes.size() > 1) { + DeviceStream comm_stream = nullptr; + GPUDeviceManager::GetInstance().CreateStream(&comm_stream); + std::transform(allreduce_cnodes.begin(), allreduce_cnodes.end(), allreduce_cnodes.begin(), [&](CNodePtr node) { + AnfAlgo::SetNodeAttr("stream_id", MakeValue(reinterpret_cast(comm_stream)), node); + return node; + }); + + std::vector send_recv_pairs; + FindAllReduceStreamSwitchPos(kernel_graph, &send_recv_pairs); + InsertStreamSwitchNode(kernel_graph, send_recv_pairs); + } +} + +void FindAllReduceStreamSwitchPos(const std::shared_ptr &kernel_graph, + std::vector *send_recv_pairs) { + auto execution_kernels = kernel_graph->execution_order(); + std::vector::iterator iter, iter_begin; + iter = iter_begin = execution_kernels.begin(); + std::vector::iterator iter_end = execution_kernels.end(); + for (; iter != execution_kernels.end(); ++iter) { + std::string kernel_name = AnfAlgo::GetCNodeName(*iter); + if (kernel_name == kAllReduceOpName) { + // Find AllReduce node's last input node. + std::vector::iterator mock_send_node_iter = + FindSendNodePos(iter_begin, iter + 1, *iter, kAllReduceStreamSwitch); + if (mock_send_node_iter == iter + 1) { + MS_LOG(WARNING) << "Can't find send node place before AllReduce node."; + continue; + } + SendRecvPair pair1 = {kAllReduceStreamSwitch, *mock_send_node_iter, *iter, + IntToSize(mock_send_node_iter - iter_begin + 1), IntToSize(iter - iter_begin)}; + send_recv_pairs->push_back(pair1); + // Find node which uses AllReduce as input[0]. + std::vector::iterator mock_recv_node_iter = + FindRecvNodePos(iter, iter_end, *iter, kAllReduceStreamSwitch); + if (mock_recv_node_iter == iter_end) { + MS_LOG(WARNING) << "Can't find send node place before AllReduce node."; + continue; + } + SendRecvPair pair2 = {kAllReduceStreamSwitch, *iter, *mock_recv_node_iter, IntToSize(iter - iter_begin + 1), + IntToSize(mock_recv_node_iter - iter_begin)}; + send_recv_pairs->push_back(pair2); + } + } +} + +std::vector::iterator FindSendNodePos(std::vector::iterator begin, + std::vector::iterator end, const CNodePtr mock_recv_node, + StreamSwitchType stream_switch_type) { + MS_EXCEPTION_IF_NULL(mock_recv_node); + if (stream_switch_type == kAllReduceStreamSwitch) { + for (auto iter = begin; iter != end; iter++) { + if (*(iter + 1) == mock_recv_node) { + return iter; + } + } + } + return end; +} + +std::vector::iterator FindRecvNodePos(std::vector::iterator begin, + std::vector::iterator end, const CNodePtr mock_send_node, + StreamSwitchType stream_switch_type) { + MS_EXCEPTION_IF_NULL(mock_send_node); + for (auto iter = begin; iter != end; iter++) { + auto node = *iter; + if (stream_switch_type == kAllReduceStreamSwitch) { + for (auto input : node->inputs()) { + if (mock_send_node == AnfAlgo::VisitKernel(input, 0).first) { + return iter; + } + } + } + } + return end; +} + +void InsertStreamSwitchNode(const std::shared_ptr &kernel_graph, + const std::vector &send_recv_pairs) { + std::set ordered_stream_switch_nodes; + for (SendRecvPair pair : send_recv_pairs) { + StreamSwitchType stream_switch_type = pair.stream_switch_type; + CNodePtr mock_send_node = pair.mock_send_node; + CNodePtr mock_recv_node = pair.mock_recv_node; + size_t send_node_offset = pair.send_node_offset; + size_t recv_node_offset = pair.recv_node_offset; + CNodePtr send_node = nullptr; + CNodePtr recv_node = nullptr; + // Step 1: generate Send and Recv CNodes. + if (stream_switch_type == kAllReduceStreamSwitch) { + if (!GenSendRecvCNodesForAllReduce(kernel_graph, mock_send_node, mock_recv_node, &send_node, &recv_node)) { + MS_LOG(EXCEPTION) << "Generating CNodes for send and recv failed. Stream switch type: kAllReduceStreamSwitch"; + } + } + // Step 2: sort send and recv CNodes by offset. + ordered_stream_switch_nodes.insert({send_node_offset, send_node}); + ordered_stream_switch_nodes.insert({recv_node_offset, recv_node}); + } + // Step 3: insert stream switch CNodes into execution kernel list. + auto execution_kernels = kernel_graph->execution_order(); + for (auto node = ordered_stream_switch_nodes.begin(); node != ordered_stream_switch_nodes.end(); node++) { + execution_kernels.insert(execution_kernels.begin() + node->offset, node->cnode); + } + kernel_graph->set_execution_order(execution_kernels); +} + +bool GenSendRecvCNodesForAllReduce(const std::shared_ptr &kernel_graph, + const CNodePtr &mock_send_node, const CNodePtr &mock_recv_node, CNodePtr *send_node, + CNodePtr *recv_node) { + *send_node = CreateStreamSwitchNode(kernel_graph, kSendOpName); + MS_EXCEPTION_IF_NULL(*send_node); + *recv_node = CreateStreamSwitchNode(kernel_graph, kRecvOpName); + MS_EXCEPTION_IF_NULL(*recv_node); + + cudaEvent_t event = nullptr; + CHECK_CUDA_RET_WITH_EXCEPT(cudaEventCreate(&event, cudaEventDisableTiming), "Creating cuda event failed."); + AnfAlgo::SetNodeAttr("record_event", MakeValue(reinterpret_cast(event)), *send_node); + AnfAlgo::SetNodeAttr("wait_event", MakeValue(reinterpret_cast(event)), *recv_node); + + uintptr_t send_stream = AnfAlgo::GetNodeAttr(mock_send_node, "stream_id"); + AnfAlgo::SetNodeAttr("record_event_stream", MakeValue(send_stream), *send_node); + uintptr_t recv_stream = AnfAlgo::GetNodeAttr(mock_recv_node, "stream_id"); + AnfAlgo::SetNodeAttr("wait_event_stream", MakeValue(recv_stream), *recv_node); + return true; +} + +CNodePtr CreateStreamSwitchNode(const std::shared_ptr &kernel_graph, const std::string &name) { + auto op = std::make_shared(name); + auto apply = std::make_shared(op); + std::vector input_list = {apply}; + CNodePtr node = kernel_graph->NewCNode(input_list); + MS_EXCEPTION_IF_NULL(node); + kernel::KernelBuildInfo::KernelBuildInfoBuilder selected_kernel_builder; + AnfAlgo::SetSelectKernelBuildInfo(selected_kernel_builder.Build(), node.get()); + auto abstract_none = std::make_shared(); + node->set_abstract(abstract_none); + SetKernelInfo(node); + return node; +} +} // namespace gpu +} // namespace device +} // namespace mindspore diff --git a/mindspore/ccsrc/device/gpu/gpu_stream_assign.h b/mindspore/ccsrc/device/gpu/gpu_stream_assign.h new file mode 100644 index 0000000000..c7d2fe40e2 --- /dev/null +++ b/mindspore/ccsrc/device/gpu/gpu_stream_assign.h @@ -0,0 +1,73 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_DEVICE_GPU_GPU_STREAM_ASSIGN_H_ +#define MINDSPORE_CCSRC_DEVICE_GPU_GPU_STREAM_ASSIGN_H_ + +#include +#include +#include +#include "session/kernel_graph.h" +#include "session/anf_runtime_algorithm.h" + +namespace mindspore { +namespace device { +namespace gpu { +enum StreamSwitchType { kAllReduceStreamSwitch, kStreamSwitchInvalidType = 255 }; +struct SendRecvPair { + StreamSwitchType stream_switch_type; + CNodePtr mock_send_node; + CNodePtr mock_recv_node; + size_t send_node_offset; + size_t recv_node_offset; +}; +struct StreamSwitchNode { + size_t offset; + CNodePtr cnode; + bool operator<(const StreamSwitchNode &n) const { + if (offset < n.offset) { + return true; + } else if (offset == n.offset) { + return AnfAlgo::GetCNodeName(cnode) == kSendOpName ? true : false; + } else { + return false; + } + } +}; +void AssignGpuStream(const std::shared_ptr &kernel_graph); +void FindAllReduceStreamSwitchPos(const std::shared_ptr &kernel_graph, + std::vector *send_recv_pairs); +// Find Send node position according to "mock" recv node. +// "mock" recv node is a gpu kernel node after a real Recv node, e.g. AllReduce node. +std::vector::iterator FindSendNodePos(std::vector::iterator begin, + std::vector::iterator end, const CNodePtr mock_recv_node, + StreamSwitchType stream_switch_type); +// Find Recv node position according to "mock" send node. +// "mock" send node is a gpu kernel node before a real send node, e.g. AllReduce node. +std::vector::iterator FindRecvNodePos(std::vector::iterator begin, + std::vector::iterator end, const CNodePtr mock_send_node, + StreamSwitchType stream_switch_type); +void InsertStreamSwitchNode(const std::shared_ptr &kernel_graph, + const std::vector &send_recv_pairs); +bool GenSendRecvCNodesForAllReduce(const std::shared_ptr &kernel_graph, + const CNodePtr &mock_send_node, const CNodePtr &mock_recv_node, CNodePtr *send_node, + CNodePtr *recv_node); +CNodePtr CreateStreamSwitchNode(const std::shared_ptr &kernel_graph, const std::string &name); +} // namespace gpu +} // namespace device +} // namespace mindspore + +#endif diff --git a/mindspore/ccsrc/kernel/gpu/nccl/nccl_gpu_kernel.h b/mindspore/ccsrc/kernel/gpu/nccl/nccl_gpu_kernel.h index 54e4eb9213..cea56b9878 100644 --- a/mindspore/ccsrc/kernel/gpu/nccl/nccl_gpu_kernel.h +++ b/mindspore/ccsrc/kernel/gpu/nccl/nccl_gpu_kernel.h @@ -52,7 +52,8 @@ class NcclGpuKernel : public GpuKernel { nccl_reduce_type_(ncclSum), input_size_(0), output_size_(0), - collective_handle_(nullptr) {} + collective_handle_(nullptr), + comm_stream_(nullptr) {} ~NcclGpuKernel() override = default; const std::vector &GetInputSizeList() const override { return input_size_list_; } @@ -63,34 +64,33 @@ class NcclGpuKernel : public GpuKernel { T *input_addr = GetDeviceAddress(inputs, 0); T *output_addr = GetDeviceAddress(outputs, 0); + cudaStream_t stream = comm_stream_ ? comm_stream_ : reinterpret_cast(stream_ptr); switch (nccl_kernel_type_) { case NCCL_ALL_REDUCE: { auto all_reduce_funcptr = reinterpret_cast(dlsym(const_cast(collective_handle_), "AllReduce")); MS_EXCEPTION_IF_NULL(all_reduce_funcptr); - CHECK_NCCL_RET_WITH_EXCEPT( - (*all_reduce_funcptr)(input_addr, output_addr, output_size_ / sizeof(T), nccl_data_type_, nccl_reduce_type_, - reinterpret_cast(stream_ptr)), - "ncclAllReduce failed"); + CHECK_NCCL_RET_WITH_EXCEPT((*all_reduce_funcptr)(input_addr, output_addr, output_size_ / sizeof(T), + nccl_data_type_, nccl_reduce_type_, stream), + "ncclAllReduce failed"); break; } case NCCL_ALL_GATHER: { auto all_gather_funcptr = reinterpret_cast(dlsym(const_cast(collective_handle_), "AllGather")); MS_EXCEPTION_IF_NULL(all_gather_funcptr); - CHECK_NCCL_RET_WITH_EXCEPT((*all_gather_funcptr)(input_addr, output_addr, input_size_ / sizeof(T), - nccl_data_type_, reinterpret_cast(stream_ptr)), - "ncclAllGather failed"); + CHECK_NCCL_RET_WITH_EXCEPT( + (*all_gather_funcptr)(input_addr, output_addr, input_size_ / sizeof(T), nccl_data_type_, stream), + "ncclAllGather failed"); break; } case NCCL_REDUCE_SCATTER: { auto reduce_scatter_funcptr = reinterpret_cast(dlsym(const_cast(collective_handle_), "ReduceScatter")); MS_EXCEPTION_IF_NULL(reduce_scatter_funcptr); - CHECK_NCCL_RET_WITH_EXCEPT( - (*reduce_scatter_funcptr)(input_addr, output_addr, output_size_ / sizeof(T), nccl_data_type_, - nccl_reduce_type_, reinterpret_cast(stream_ptr)), - "ncclReduceScatter failed"); + CHECK_NCCL_RET_WITH_EXCEPT((*reduce_scatter_funcptr)(input_addr, output_addr, output_size_ / sizeof(T), + nccl_data_type_, nccl_reduce_type_, stream), + "ncclReduceScatter failed"); break; } default: { @@ -167,6 +167,7 @@ class NcclGpuKernel : public GpuKernel { std::vector output_size_list_; std::vector workspace_size_list_; const void *collective_handle_; + cudaStream_t comm_stream_; }; } // namespace kernel } // namespace mindspore diff --git a/mindspore/ccsrc/session/gpu_session.cc b/mindspore/ccsrc/session/gpu_session.cc index bbcf2228cc..c0b2323e04 100644 --- a/mindspore/ccsrc/session/gpu_session.cc +++ b/mindspore/ccsrc/session/gpu_session.cc @@ -17,6 +17,7 @@ #include "device/gpu/kernel_info_setter.h" #include "device/gpu/gpu_kernel_build.h" #include "device/gpu/gpu_kernel_runtime.h" +#include "device/gpu/gpu_stream_assign.h" #include "pre_activate/common/optimizer.h" #include "pre_activate/common/pass_manager.h" #include "pre_activate/common/ir_fusion/allreduce_fusion.h" @@ -55,6 +56,11 @@ void GPUSession::Optimize(const std::shared_ptr &kernel_graph) { kernel_graph->SetExecOrderByDefault(); } +void GPUSession::AssignStream(const std::shared_ptr &kernel_graph) { + MS_EXCEPTION_IF_NULL(kernel_graph); + device::gpu::AssignGpuStream(kernel_graph); +} + void GPUSession::BuildKernel(const std::shared_ptr &kernel_graph) const { device::gpu::GpuBuild(kernel_graph); } @@ -94,6 +100,8 @@ GraphId GPUSession::CompileGraph(const AnfNodePtrList &lst, const AnfNodePtrList StartKernelRT(); // AllReduce Optimize Optimize(graph); + // Assign CUDA streams + AssignStream(graph); // Build kernel if node is cnode BuildKernel(graph); // Set graph execution order before memory alloc, ensure that memory alloc is according to the reorder graph diff --git a/mindspore/ccsrc/session/gpu_session.h b/mindspore/ccsrc/session/gpu_session.h index e443c1e701..d81a6c58f9 100644 --- a/mindspore/ccsrc/session/gpu_session.h +++ b/mindspore/ccsrc/session/gpu_session.h @@ -49,6 +49,8 @@ class GPUSession : public SessionBasic { void Optimize(const std::shared_ptr &kernel_graph); + void AssignStream(const std::shared_ptr &kernel_graph); + void BuildKernel(const std::shared_ptr &kernel_graph) const; void AllocateMemory(KernelGraph *kernel_graph) const; diff --git a/mindspore/ccsrc/utils/utils.h b/mindspore/ccsrc/utils/utils.h index 39b4b7a160..646fb36871 100644 --- a/mindspore/ccsrc/utils/utils.h +++ b/mindspore/ccsrc/utils/utils.h @@ -112,6 +112,8 @@ constexpr auto kFusedMulAddNOpName = "FusedMulAddN"; constexpr auto kFusedMulApplyMomentumOpName = "FusedMulApplyMomentum"; constexpr auto kBiasAddOpName = "BiasAdd"; constexpr auto kConfusionMulGradOpName = "ConfusionMulGrad"; +constexpr auto kSendOpName = "Send"; +constexpr auto kRecvOpName = "Recv"; // attr key name constexpr auto kAttrInputNames = "input_names"; From 950d75e3761762541c0c8947c1ebcb3ce7ca243d Mon Sep 17 00:00:00 2001 From: Yanjun Peng Date: Tue, 14 Apr 2020 20:42:20 +0800 Subject: [PATCH 230/367] add get_class_indexing exception info --- mindspore/dataset/engine/datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mindspore/dataset/engine/datasets.py b/mindspore/dataset/engine/datasets.py index f92f6f28a8..819013031f 100644 --- a/mindspore/dataset/engine/datasets.py +++ b/mindspore/dataset/engine/datasets.py @@ -782,7 +782,7 @@ class Dataset: """ if self.input: return self.input[0].get_class_indexing() - return None + raise NotImplementedError("Dataset {} has not supported api get_class_indexing yet.".format(type(self))) def reset(self): """Reset the dataset for next epoch""" From 2dd60b4ce38a92a40dc07443a7d6767811cdfe25 Mon Sep 17 00:00:00 2001 From: zhoufeng Date: Mon, 13 Apr 2020 15:30:25 +0800 Subject: [PATCH 231/367] pack via cpack --- CMakeLists.txt | 4 +- build.sh | 33 +----- cmake/package.cmake | 185 +++++++++++++++++++++++++++++++++ cmake/package_script.cmake | 92 ++++++++++++++++ mindspore/ccsrc/CMakeLists.txt | 88 +--------------- package.sh | 128 ----------------------- 6 files changed, 283 insertions(+), 247 deletions(-) create mode 100644 cmake/package.cmake create mode 100644 cmake/package_script.cmake delete mode 100755 package.sh diff --git a/CMakeLists.txt b/CMakeLists.txt index 6fe159590f..a02fcd1caf 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -25,7 +25,7 @@ include_directories(${CMAKE_CURRENT_SOURCE_DIR}/third_party/flatbuffers/include) include_directories(${CMAKE_CURRENT_SOURCE_DIR}/third_party/flatbuffers/include/flatbuffers) include(${CMAKE_SOURCE_DIR}/cmake/dependency_utils.cmake) -find_package(Python3 COMPONENTS Interpreter Development) +find_package(Python3 3.7 COMPONENTS Interpreter Development) if(Python3_FOUND) set(PYTHON_INCLUDE_DIRS "${Python3_INCLUDE_DIRS}") set(PYTHON_LIBRARIES "${Python3_LIBRARIES}") @@ -60,3 +60,5 @@ add_subdirectory(mindspore/ccsrc) if (ENABLE_TESTCASES) add_subdirectory(tests) endif() + +include(cmake/package.cmake) \ No newline at end of file diff --git a/build.sh b/build.sh index 3c6de1cf77..d555188c20 100755 --- a/build.sh +++ b/build.sh @@ -16,7 +16,6 @@ set -e BASEPATH=$(cd "$(dirname $0)"; pwd) -PROJECT_PATH="${BASEPATH}" CUDA_PATH="" CUDNN_PATH="" export BUILD_PATH="${BASEPATH}/build/" @@ -24,7 +23,7 @@ export BUILD_PATH="${BASEPATH}/build/" usage() { echo "Usage:" - echo "bash build.sh [-d] [-r] [-v] [-c on|off] [-t on|off] [-g on|off] [-h] [-s] [-b ge|cpu] [-m infer|train] \\" + echo "bash build.sh [-d] [-r] [-v] [-c on|off] [-t on|off] [-g on|off] [-h] [-b ge|cpu] [-m infer|train] \\" echo " [-a on|off] [-g on|off] [-p on|off] [-i] [-L] [-R] [-D on|off] [-j[n]] [-e gpu|d|cpu] \\" echo " [-P on|off] [-z [on|off]] [-M on|off] [-V 9.2|10.1] [-I] [-K]" echo "" @@ -36,7 +35,6 @@ usage() echo " -t Run testcases switch, default on" echo " -g Use glog to output log, default on" echo " -h Print usage" - echo " -s Install or setup" echo " -b Select other backend, available: \\" echo " ge:graph engine, cpu" echo " -m Select mode, available: infer, train, default is infer " @@ -77,7 +75,6 @@ checkopts() VERBOSE="" ENABLE_COVERAGE="off" RUN_TESTCASES="off" - EXECUTE_SETUP="off" ENABLE_BACKEND="" TRAIN_MODE="INFER" ENABLE_ASAN="off" @@ -129,9 +126,6 @@ checkopts() usage exit 0 ;; - s) - EXECUTE_SETUP="on" - ;; b) if [[ "X$OPTARG" != "Xge" && "X$OPTARG" != "Xcpu" ]]; then echo "Invalid value ${OPTARG} for option -b" @@ -139,9 +133,6 @@ checkopts() exit 1 fi ENABLE_BACKEND=$(echo "$OPTARG" | tr '[a-z]' '[A-Z]') - if [[ "X$ENABLE_BACKEND" == "XGE" ]]; then - ENABLE_GE="on" - fi if [[ "X$ENABLE_BACKEND" != "XCPU" ]]; then ENABLE_CPU="on" fi @@ -323,10 +314,7 @@ build_mindspore() if [[ "X$INC_BUILD" = "Xoff" ]]; then cmake ${CMAKE_ARGS} ../.. fi - make ${VERBOSE} -j$THREAD_NUM - if [[ "X$EXECUTE_SETUP" = "Xon" ]]; then - make install - fi + cmake --build . --target package ${VERBOSE} -j$THREAD_NUM echo "success to build mindspore project!" } @@ -457,24 +445,7 @@ else build_mindspore fi -if [[ "X$INC_BUILD" = "Xoff" ]]; then - if [[ "X$ENABLE_GE" = "Xon" ]]; then - bash "${PROJECT_PATH}/package.sh" ge - elif [[ "X$ENABLE_GPU" = "Xon" ]]; then - bash "${PROJECT_PATH}/package.sh" ms gpu - elif [[ "X$ENABLE_D" = "Xon" ]]; then - bash "${PROJECT_PATH}/package.sh" ms ascend - elif [[ "X$ENABLE_CPU" = "Xon" ]]; then - bash "${PROJECT_PATH}/package.sh" ms cpu - else - bash "${PROJECT_PATH}/package.sh" debug - fi -fi - cp -rf ${BUILD_PATH}/package/mindspore/lib ${BUILD_PATH}/../mindspore cp -rf ${BUILD_PATH}/package/mindspore/*.so ${BUILD_PATH}/../mindspore -if [[ -d "${BUILD_PATH}/package/build" ]]; then - rm -rf "${BUILD_PATH}/package/build" -fi echo "---------------- mindspore: build end ----------------" diff --git a/cmake/package.cmake b/cmake/package.cmake new file mode 100644 index 0000000000..8a44d392cc --- /dev/null +++ b/cmake/package.cmake @@ -0,0 +1,185 @@ +# include dependency +include(CMakePackageConfigHelpers) +include(GNUInstallDirs) + +# set package information +set(CPACK_PACKAGE_NAME ${PROJECT_NAME}) +set(CPACK_GENERATOR "External") +set(CPACK_EXTERNAL_PACKAGE_SCRIPT ${CMAKE_SOURCE_DIR}/cmake/package_script.cmake) +set(CPACK_EXTERNAL_ENABLE_STAGING true) +set(CPACK_TEMPORARY_PACKAGE_FILE_NAME ${CMAKE_SOURCE_DIR}/build/package/mindspore) +set(CPACK_TEMPORARY_INSTALL_DIRECTORY ${CMAKE_SOURCE_DIR}/build/package/mindspore) +if (ENABLE_GE) + set(CPACK_MS_BACKEND "ge") + set(CPACK_MS_PACKAGE_NAME "mindspore") +elseif (ENABLE_GPU) + set(CPACK_MS_BACKEND "ms") + set(CPACK_MS_PACKAGE_NAME "mindspore-gpu") +elseif (ENABLE_D) + set(CPACK_MS_BACKEND "ms") + set(CPACK_MS_PACKAGE_NAME "mindspore-ascend") +elseif (ENABLE_CPU) + set(CPACK_MS_BACKEND "ms") + set(CPACK_MS_PACKAGE_NAME "mindspore") +else () + set(CPACK_MS_BACKEND "debug") + set(CPACK_MS_PACKAGE_NAME "mindspore") +endif () +include(CPack) + +# set install path +set(INSTALL_LIB_DIR ${CMAKE_INSTALL_LIBDIR} CACHE PATH "Installation directory for libraries") +set(INSTALL_PY_DIR ".") +set(INSTALL_LIB_DIR "lib") +set(INSTALL_BASE_DIR ".") + +# set package files +install( + TARGETS _c_expression + DESTINATION ${INSTALL_BASE_DIR} + COMPONENT mindspore +) + +install( + TARGETS mindspore_gvar + DESTINATION ${INSTALL_LIB_DIR} + COMPONENT mindspore +) + +if (USE_GLOG) + file(GLOB_RECURSE GLOG_LIB_LIST ${glog_LIBPATH}/libglog*) + install( + FILES ${GLOG_LIB_LIST} + DESTINATION ${INSTALL_LIB_DIR} + COMPONENT mindspore + ) +endif () + +if (ENABLE_MINDDATA) + install( + TARGETS _c_dataengine _c_mindrecord + DESTINATION ${INSTALL_BASE_DIR} + COMPONENT mindspore + ) + + file(GLOB_RECURSE OPENCV_LIB_LIST + ${opencv_LIBPATH}/libopencv_core* + ${opencv_LIBPATH}/libopencv_imgcodecs* + ${opencv_LIBPATH}/libopencv_imgproc* + ) + install( + FILES ${OPENCV_LIB_LIST} + DESTINATION ${INSTALL_LIB_DIR} + COMPONENT mindspore + ) +endif () + +if (ENABLE_CPU) + if (CMAKE_SYSTEM_NAME MATCHES "Linux") + file(GLOB_RECURSE DNNL_LIB_LIST ${onednn_LIBPATH}/libdnnl${CMAKE_SHARED_LIBRARY_SUFFIX}*) + elseif (CMAKE_SYSTEM_NAME MATCHES "Darwin") + file(GLOB_RECURSE DNNL_LIB_LIST ${onednn_LIBPATH}/libdnnl*${CMAKE_SHARED_LIBRARY_SUFFIX}*) + elseif (CMAKE_SYSTEM_NAME MATCHES "Windows") + file(GLOB_RECURSE DNNL_LIB_LIST ${onednn_LIBPATH}/dnnl.lib) + endif () + install( + FILES ${DNNL_LIB_LIST} + DESTINATION ${INSTALL_LIB_DIR} + COMPONENT mindspore + ) +endif () + +if (ENABLE_GPU) + if (ENABLE_MPI) + install( + TARGETS _ms_mpi gpu_collective + DESTINATION ${INSTALL_LIB_DIR} + COMPONENT mindspore + ) + endif () + + install( + TARGETS gpu_queue + DESTINATION ${INSTALL_LIB_DIR} + COMPONENT mindspore + ) +endif () + +if (NOT ENABLE_GE) + if (ENABLE_D) + if (DEFINED ENV{ASCEND_CUSTOM_PATH}) + set(ASCEND_PATH $ENV{ASCEND_CUSTOM_PATH}) + else () + set(ASCEND_PATH /usr/local/Ascend) + endif () + set(ASCEND_DRIVER_PATH ${ASCEND_PATH}/driver/lib64/common) + + install( + FILES + ${CMAKE_BINARY_DIR}/graphengine/src/common/graph/libgraph.so + ${CMAKE_BINARY_DIR}/graphengine/src/ge/common/libge_common.so + ${CMAKE_BINARY_DIR}/graphengine/src/ge/ge_runtime/libge_runtime.so + ${ASCEND_DRIVER_PATH}/libslog.so + ${ASCEND_DRIVER_PATH}/libc_sec.so + DESTINATION ${INSTALL_LIB_DIR} + COMPONENT mindspore + ) + elseif (ENABLE_TESTCASES) + install( + FILES + ${CMAKE_BINARY_DIR}/graphengine/src/common/graph/libgraph.so + ${CMAKE_SOURCE_DIR}/graphengine/third_party/prebuild/${CMAKE_HOST_SYSTEM_PROCESSOR}/libslog.so + ${CMAKE_SOURCE_DIR}/graphengine/third_party/prebuild/${CMAKE_HOST_SYSTEM_PROCESSOR}/libc_sec.so + DESTINATION ${INSTALL_LIB_DIR} + COMPONENT mindspore + ) + endif () +endif () + +# set python files +file(GLOB MS_PY_LIST ${CMAKE_SOURCE_DIR}/mindspore/*.py) +install( + FILES ${MS_PY_LIST} + DESTINATION ${INSTALL_PY_DIR} + COMPONENT mindspore +) + +install( + DIRECTORY + ${CMAKE_SOURCE_DIR}/mindspore/nn + ${CMAKE_SOURCE_DIR}/mindspore/_extends + ${CMAKE_SOURCE_DIR}/mindspore/parallel + ${CMAKE_SOURCE_DIR}/mindspore/mindrecord + ${CMAKE_SOURCE_DIR}/mindspore/train + ${CMAKE_SOURCE_DIR}/mindspore/model_zoo + ${CMAKE_SOURCE_DIR}/mindspore/common + ${CMAKE_SOURCE_DIR}/mindspore/ops + ${CMAKE_SOURCE_DIR}/mindspore/communication + DESTINATION ${INSTALL_PY_DIR} + COMPONENT mindspore +) + +if (ENABLE_GPU) + install( + DIRECTORY ${CMAKE_SOURCE_DIR}/mindspore/_akg + DESTINATION ${INSTALL_PY_DIR} + COMPONENT mindspore + ) + if (EXISTS ${CMAKE_SOURCE_DIR}/mindspore/incubator-tvm) + install( + DIRECTORY + ${CMAKE_SOURCE_DIR}/mindspore/incubator-tvm/topi/python/topi + ${CMAKE_SOURCE_DIR}/mindspore/incubator-tvm/python/tvm + DESTINATION ${INSTALL_PY_DIR}/_akg + COMPONENT mindspore + ) + endif () +endif () + +if (EXISTS ${CMAKE_SOURCE_DIR}/mindspore/dataset) + install( + DIRECTORY ${CMAKE_SOURCE_DIR}/mindspore/dataset + DESTINATION ${INSTALL_PY_DIR} + COMPONENT mindspore + ) +endif () diff --git a/cmake/package_script.cmake b/cmake/package_script.cmake new file mode 100644 index 0000000000..565154a38d --- /dev/null +++ b/cmake/package_script.cmake @@ -0,0 +1,92 @@ +# find exec +find_package(Python3 3.7 COMPONENTS Interpreter Development) +if (NOT Python3_FOUND) + message("No python3 found.") + return () +endif () + +set(PYTHON ${Python3_EXECUTABLE}) +set(PYTHON_VERSION ${Python3_VERSION_MAJOR}.${Python3_VERSION_MINOR}) + +find_package(Git) +if (NOT GIT_FOUND) + message("No git found.") + return () +endif () +set(GIT ${GIT_EXECUTABLE}) + +# set path +set(MS_ROOT_DIR ${CPACK_PACKAGE_DIRECTORY}/../../) +set(MS_PACK_ROOT_DIR ${MS_ROOT_DIR}/build/package) + +# set package file name +if (CMAKE_SYSTEM_NAME MATCHES "Linux") + if (PYTHON_VERSION MATCHES "3.7") + set(PY_TAGS "cp37-cp37m") + else () + message("Could not find 'Python 3.7'") + return() + endif () + string(TOLOWER linux_${CMAKE_HOST_SYSTEM_PROCESSOR} PLATFORM_TAG) +elseif (CMAKE_SYSTEM_NAME MATCHES "Darwin") + if (PYTHON_VERSION MATCHES "3.7") + set(PY_TAGS "py37-none") + else () + message("Could not find 'Python 3.7'") + return() + endif () + set(PLATFORM_TAG "any") +elseif (CMAKE_SYSTEM_NAME MATCHES "Windows") + if (PYTHON_VERSION MATCHES "3.7") + set(PY_TAGS "cp37-cp37m") + else () + message("Could not find 'Python 3.7'") + return() + endif () + set(PLATFORM_TAG "win_amd64") +else () + message(FATAL_ERROR "other platform: ${CMAKE_SYSTEM_NAME}") +endif () + +# get git commit id +set(GIT_COMMIT_ID "") +execute_process( + COMMAND ${GIT} log --format='[sha1]:%h,[branch]:%d' -1 + OUTPUT_VARIABLE GIT_COMMIT_ID + WORKING_DIRECTORY ${MS_ROOT_DIR} + ERROR_QUIET) +string(REPLACE " " "" GIT_COMMIT_ID ${GIT_COMMIT_ID}) + +set(ENV{BACKEND_POLICY} ${CPACK_MS_BACKEND}) +set(ENV{MS_PACKAGE_NAME} ${CPACK_MS_PACKAGE_NAME}) +set(ENV{COMMIT_ID} ${GIT_COMMIT_ID}) + +execute_process( + COMMAND ${PYTHON} ${MS_ROOT_DIR}/setup.py "bdist_wheel" + WORKING_DIRECTORY ${MS_PACK_ROOT_DIR} +) + +# finally +set(PACKAGE_NAME ${CPACK_MS_PACKAGE_NAME}) +if (NOT CMAKE_SYSTEM_NAME MATCHES "Windows") + string(REPLACE "-" "_" PACKAGE_NAME ${PACKAGE_NAME}) + execute_process( + COMMAND chmod -R 700 ${MS_PACK_ROOT_DIR}/mindspore/ + COMMAND chmod -R 700 ${MS_PACK_ROOT_DIR}/${PACKAGE_NAME}.egg-info/ + ) +endif () + +set(PACKAGE_NAME "mindspore") + +file(GLOB WHL_FILE ${MS_PACK_ROOT_DIR}/dist/*.whl) +get_filename_component(ORIGIN_FILE_NAME ${WHL_FILE} NAME) +string(REPLACE "-" ";" ORIGIN_FILE_NAME ${ORIGIN_FILE_NAME}) +list(GET ORIGIN_FILE_NAME 1 VERSION) +set(NEW_FILE_NAME ${PACKAGE_NAME}-${VERSION}-${PY_TAGS}-${PLATFORM_TAG}.whl) +file(RENAME ${WHL_FILE} ${MS_PACK_ROOT_DIR}/${NEW_FILE_NAME}) +file(REMOVE_RECURSE ${MS_ROOT_DIR}/output) +file(MAKE_DIRECTORY ${MS_ROOT_DIR}/output) +file(COPY ${MS_PACK_ROOT_DIR}/${NEW_FILE_NAME} DESTINATION ${MS_ROOT_DIR}/output/) + +file(SHA256 ${MS_ROOT_DIR}/output/${NEW_FILE_NAME} SHA256_VAR) +file(WRITE ${MS_ROOT_DIR}/output/${NEW_FILE_NAME}.sha256 ${SHA256_VAR} " " ${NEW_FILE_NAME}) diff --git a/mindspore/ccsrc/CMakeLists.txt b/mindspore/ccsrc/CMakeLists.txt index 3a04d9b3fb..f8af2a4f18 100644 --- a/mindspore/ccsrc/CMakeLists.txt +++ b/mindspore/ccsrc/CMakeLists.txt @@ -492,93 +492,7 @@ if(ENABLE_MINDDATA) add_subdirectory(mindrecord) add_subdirectory(dataset) endif() -set(MS_PACK_PATH ${CMAKE_SOURCE_DIR}/build/package/mindspore/) -set(MS_LIB_PATH ${CMAKE_SOURCE_DIR}/build/package/mindspore/lib/) - -add_custom_target(add_ms_lib ALL - COMMAND mkdir -pv ${MS_LIB_PATH} - COMMAND cp ${MS_CCSRC_BUILD_PATH}/_c_expression* ${MS_PACK_PATH} - COMMAND cp ${MS_CCSRC_BUILD_PATH}/libmindspore_gvar${CMAKE_SHARED_LIBRARY_SUFFIX} ${MS_LIB_PATH} -) -add_dependencies(add_ms_lib _c_expression) - -if (NOT ENABLE_GE) - if (ENABLE_D) - if(DEFINED ENV{ASCEND_CUSTOM_PATH}) - set(ASCEND_PATH $ENV{ASCEND_CUSTOM_PATH}) - else() - set(ASCEND_PATH /usr/local/Ascend) - endif() - set(ASCEND_DRIVER_PATH ${ASCEND_PATH}/driver/lib64/common) - add_custom_target(add_ge_lib ALL - COMMAND cp ${MS_CCSRC_BUILD_PATH}/../../graphengine/src/common/graph/libgraph.so ${MS_LIB_PATH} - COMMAND cp ${MS_CCSRC_BUILD_PATH}/../../graphengine/src/ge/common/libge_common.so ${MS_LIB_PATH} - COMMAND cp ${MS_CCSRC_BUILD_PATH}/../../graphengine/src/ge/ge_runtime/libge_runtime.so ${MS_LIB_PATH} - COMMAND cp ${ASCEND_DRIVER_PATH}/libslog.so ${MS_LIB_PATH} - COMMAND cp ${ASCEND_DRIVER_PATH}/libc_sec.so ${MS_LIB_PATH} - ) - add_dependencies(add_ge_lib add_ms_lib) - add_dependencies(add_ge_lib graph) - add_dependencies(add_ge_lib ge_runtime) - elseif(ENABLE_TESTCASES) - add_custom_target(add_ge_lib ALL - COMMAND cp ${MS_CCSRC_BUILD_PATH}/../../graphengine/src/common/graph/libgraph.so ${MS_LIB_PATH} - COMMAND cp ${CMAKE_SOURCE_DIR}/graphengine/third_party/prebuild/${CMAKE_HOST_SYSTEM_PROCESSOR}/libslog.so ${MS_LIB_PATH} - COMMAND cp ${CMAKE_SOURCE_DIR}/graphengine/third_party/prebuild/${CMAKE_HOST_SYSTEM_PROCESSOR}/libc_sec.so ${MS_LIB_PATH} - ) - add_dependencies(add_ge_lib add_ms_lib) - add_dependencies(add_ge_lib graph) - endif() -endif() - -if (ENABLE_GPU) - if (ENABLE_MPI) - add_custom_target(add_mpi_lib ALL - COMMAND cp ${MS_CCSRC_BUILD_PATH}/_ms_mpi* ${MS_PACK_PATH} - ) - add_dependencies(add_mpi_lib _ms_mpi) - add_custom_target(add_gpu_collective_lib ALL - COMMAND mkdir -pv ${MS_LIB_PATH} - COMMAND cp ${MS_CCSRC_BUILD_PATH}/libgpu_collective* ${MS_LIB_PATH} - ) - add_dependencies(add_gpu_collective_lib gpu_collective) - endif() - add_custom_target(add_gpu_queue_lib ALL - COMMAND cp ${MS_CCSRC_BUILD_PATH}/libgpu_queue* ${MS_LIB_PATH} - ) - add_dependencies(add_gpu_queue_lib add_ms_lib) -endif() - -if (ENABLE_CPU) - if (CMAKE_SYSTEM_NAME MATCHES "Darwin") - add_custom_target(add_cpu_lib ALL COMMAND cp ${onednn_LIBPATH}/libdnnl.1.1.dylib ${MS_LIB_PATH}/libdnnl.1.1.dylib) - else () - add_custom_target(add_cpu_lib ALL COMMAND cp ${onednn_LIBPATH}/libdnnl.so.1.1 ${MS_LIB_PATH}/libdnnl.so.1) - endif () - add_dependencies(add_cpu_lib add_ms_lib) -endif() - -if (ENABLE_MINDDATA) - add_custom_target(add_minddata_lib ALL - COMMAND cp ${MS_CCSRC_BUILD_PATH}/dataset/*.so ${MS_PACK_PATH} - COMMAND cp ${MS_CCSRC_BUILD_PATH}/mindrecord/*.so ${MS_PACK_PATH} - COMMAND cp ${opencv_LIBPATH}/libopencv_core.so.4.2.0 ${MS_LIB_PATH}/libopencv_core.so.4.2 - COMMAND cp ${opencv_LIBPATH}/libopencv_imgcodecs.so.4.2.0 ${MS_LIB_PATH}/libopencv_imgcodecs.so.4.2 - COMMAND cp ${opencv_LIBPATH}/libopencv_imgproc.so.4.2.0 ${MS_LIB_PATH}/libopencv_imgproc.so.4.2 - ) - add_dependencies(add_minddata_lib add_ms_lib) - add_dependencies(add_minddata_lib _c_mindrecord) - add_dependencies(add_minddata_lib _c_dataengine) - - add_dependencies(_c_mindrecord mindspore) - add_dependencies(_c_dataengine mindspore) -endif() if (USE_GLOG) target_link_libraries(_c_expression PRIVATE mindspore::glog) - if (CMAKE_SYSTEM_NAME MATCHES "Darwin") - add_custom_target(add_glog_lib ALL COMMAND cp ${glog_LIBPATH}/libglog*.dylib ${MS_LIB_PATH}) - else () - add_custom_target(add_glog_lib ALL COMMAND cp ${glog_LIBPATH}/libglog*.so.0 ${MS_LIB_PATH}) - endif () -endif() +endif() \ No newline at end of file diff --git a/package.sh b/package.sh deleted file mode 100755 index 056ca8c25d..0000000000 --- a/package.sh +++ /dev/null @@ -1,128 +0,0 @@ -#!/bin/bash -# Copyright 2019 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ - -set -e - -BASEPATH=$(cd "$(dirname $0)"; pwd) -echo "${BASEPATH}" -cd "${BASEPATH}" -BUILD_PATH="${BASEPATH}/build" -PACKAGE_PATH="${BUILD_PATH}/package" -OUTPUT_PATH="${BASEPATH}/output" - -mk_new_dir() { - local create_dir="$1" # the target to make - - if [[ -d "${create_dir}" ]];then - rm -rf "${create_dir}" - fi - - mkdir -pv "${create_dir}" -} - -to_lower () { - echo "$1" | tr '[:upper:]' '[:lower:]' -} - -COMMIT_ID=$(git log --format='[sha1]:%h,[branch]:%d' -1 | sed 's/ //g') -export COMMIT_ID - -PYTHON=$(which python3) -PYTHON_VERSION=$("${PYTHON}" -V 2>&1 | awk '{print $2}' | cut -d. -f-2) -if [[ $(uname) == "Linux" ]]; then - if [[ "${PYTHON_VERSION}" == "3.7" ]]; then - PY_TAGS="cp37-cp37m" - else - echo "Could not find 'Python 3.7'" - exit 1 - fi - PLATFORM_TAG=$(to_lower "$(uname)_$(uname -m)") -elif [[ $(uname) == "Darwin" ]]; then - if [[ "${PYTHON_VERSION}" == "3.7" ]]; then - PY_TAGS="py3-none" - else - echo "Could not find 'Python 3.7'" - exit 1 - fi - PLATFORM_TAG="any" -fi -echo "=========${BASEPATH}===================" -mk_new_dir "${OUTPUT_PATH}" - -#copy necessary file to pack_path -cp ${BASEPATH}/mindspore/*.py "${PACKAGE_PATH}/mindspore" -cp -rf "${BUILD_PATH}/../mindspore/nn" "${PACKAGE_PATH}/mindspore" -cp -rf "${BUILD_PATH}/../mindspore/_extends" "${PACKAGE_PATH}/mindspore" -cp -rf "${BUILD_PATH}/../mindspore/parallel" "${PACKAGE_PATH}/mindspore" -cp -rf "${BUILD_PATH}/../mindspore/mindrecord" "${PACKAGE_PATH}/mindspore" -cp -rf "${BUILD_PATH}/../mindspore/train" "${PACKAGE_PATH}/mindspore" -cp -rf "${BUILD_PATH}/../mindspore/model_zoo" "${PACKAGE_PATH}/mindspore" -cp -rf "${BUILD_PATH}/../mindspore/common" "${PACKAGE_PATH}/mindspore" -cp -rf "${BUILD_PATH}/../mindspore/ops" "${PACKAGE_PATH}/mindspore" -cp -rf "${BUILD_PATH}/../mindspore/communication" "${PACKAGE_PATH}/mindspore" - -if [[ "X$2" = "Xgpu" ]]; then - echo "package _akg when gpu enable." - cp -rf "${BASEPATH}/mindspore/_akg" "${PACKAGE_PATH}" - if [[ -d "${BUILD_PATH}/mindspore/incubator-tvm" ]]; then - cp -rf "${BUILD_PATH}/mindspore/incubator-tvm/topi/python/topi" "${PACKAGE_PATH}/_akg" - cp -rf "${BUILD_PATH}/mindspore/incubator-tvm/python/tvm" "${PACKAGE_PATH}/_akg" - fi -fi - -# move dataset -if [[ -d "${BASEPATH}/mindspore/dataset" ]]; then - cp -rf "${BASEPATH}/mindspore/dataset" "${PACKAGE_PATH}/mindspore" -fi - -cd "${PACKAGE_PATH}" -if [ -n "$1" ];then - export BACKEND_POLICY=$1 -else - export BACKEND_POLICY="ms" -fi - -# package name -if [[ "X$1" = "Xge" ]]; then - export MS_PACKAGE_NAME="mindspore" -elif [[ "X$1" = "Xms" && "X$2" = "Xgpu" ]]; then - export MS_PACKAGE_NAME="mindspore-gpu" -elif [[ "X$1" = "Xms" && "X$2" = "Xascend" ]]; then - export MS_PACKAGE_NAME="mindspore-ascend" -elif [[ "X$1" = "Xms" && "X$2" = "Xcpu" ]]; then - export MS_PACKAGE_NAME="mindspore" -else - export MS_PACKAGE_NAME="mindspore" -fi - -${PYTHON} "${BASEPATH}/setup.py" bdist_wheel - -chmod -R 700 ${PACKAGE_PATH}/mindspore/ -chmod -R 700 ${PACKAGE_PATH}/${MS_PACKAGE_NAME//-/_}.egg-info/ - -# rename package -PACKAGE_FULL_NAME=$(find "${PACKAGE_PATH}" -iname "*.whl") -PACKAGE_BASE_NAME=$(echo ${PACKAGE_FULL_NAME} | awk -F / '{print $NF}' | awk -F - '{print $1"-"$2}') -PACKAGE_BASE_NAME=${PACKAGE_BASE_NAME//_*-/-} - -PACKAGE_NEW_NAME="${PACKAGE_BASE_NAME}-${PY_TAGS}-${PLATFORM_TAG}.whl" -cp -rf "${PACKAGE_PATH}/dist"/*.whl "${PACKAGE_PATH}/${PACKAGE_NEW_NAME}" -cp -f "${PACKAGE_PATH}/${PACKAGE_NEW_NAME}" "${OUTPUT_PATH}" -find ${OUTPUT_PATH} -name "*.whl" -print0 | xargs -0 -I {} sh -c "sha256sum {} | awk '{printf \$1}' > {}.sha256" - -cd "${BASEPATH}" - -echo "------Successfully created mindspore package------" From 99f12f9105da9fe8d787d90b8d5f5f0e88627c67 Mon Sep 17 00:00:00 2001 From: limingqi107 Date: Tue, 14 Apr 2020 16:34:28 +0800 Subject: [PATCH 232/367] gpu uses dynamic memory pool by default --- .../ccsrc/device/gpu/gpu_kernel_runtime.cc | 25 +++++------- mindspore/ccsrc/device/memory_manager.cc | 7 ++++ mindspore/ccsrc/device/memory_manager.h | 1 + .../ccsrc/pre_activate/mem_reuse/mem_reuse.cc | 39 +++++++------------ mindspore/ccsrc/utils/context/ms_context.cc | 2 +- 5 files changed, 34 insertions(+), 40 deletions(-) diff --git a/mindspore/ccsrc/device/gpu/gpu_kernel_runtime.cc b/mindspore/ccsrc/device/gpu/gpu_kernel_runtime.cc index 671a11f776..584f66eee7 100644 --- a/mindspore/ccsrc/device/gpu/gpu_kernel_runtime.cc +++ b/mindspore/ccsrc/device/gpu/gpu_kernel_runtime.cc @@ -127,9 +127,10 @@ bool GPUKernelRuntime::Run(session::KernelGraph *graph) { auto context_ptr = MsContext::GetInstance(); MS_EXCEPTION_IF_NULL(context_ptr); bool is_enable_dynamic_mem = context_ptr->enable_dynamic_mem_pool(); + bool is_enable_pynative_infer = context_ptr->enable_pynative_infer(); struct timeval start_time, end_time; (void)gettimeofday(&start_time, nullptr); - if (is_enable_dynamic_mem) { + if (is_enable_dynamic_mem && !is_enable_pynative_infer) { ret = LaunchKernelDynamic(graph); } else { ret = LaunchKernel(graph); @@ -152,7 +153,7 @@ void GPUKernelRuntime::InitKernelRefCount(const session::KernelGraph *graph) { } mem_reuse_util_ptr->SetKernelDefMap(); mem_reuse_util_ptr->SetReuseRefCount(); - // Can't free the device address of graph output, so set the reference count of graph output specially, + // Can't free the device address of graph output, so set the reference count of graph output specially. mem_reuse_util_ptr->SetGraphOutputRefCount(); mem_reuse_util_ptr_ = mem_reuse_util_ptr; } @@ -351,6 +352,10 @@ void GPUKernelRuntime::FreeKernelDynamicRes(const mindspore::AnfNodePtr &kernel, if (kernel_ref_count_ptr == nullptr) { continue; } + // Can't free the output of graph. + if (kernel_ref_count_ptr->ref_count_dynamic_use_ == memreuse::kMaxRefCount) { + continue; + } kernel_ref_count_ptr->ref_count_dynamic_use_--; if (kernel_ref_count_ptr->ref_count_dynamic_use_ == 0) { // Reset the reference count. @@ -360,14 +365,10 @@ void GPUKernelRuntime::FreeKernelDynamicRes(const mindspore::AnfNodePtr &kernel, FreeCommunicationOpDynamicRes(kernel, i, &is_communication_op); if (!is_communication_op) { auto device_address = AnfAlgo::GetPrevNodeMutableOutputAddr(kernel, i); - MS_EXCEPTION_IF_NULL(device_address); - MS_EXCEPTION_IF_NULL(device_address->ptr_); - mem_manager_->FreeMemFromMemPool(device_address->ptr_); - device_address->ptr_ = nullptr; + mem_manager_->FreeMemFromMemPool(device_address); } } } - // Free the workspace of kernel. for (size_t i = 0; i < kernel_workspaces.size(); ++i) { auto workspace = kernel_workspaces[i]; @@ -388,10 +389,7 @@ void GPUKernelRuntime::FreeCommunicationOpDynamicRes(const mindspore::AnfNodePtr communication_op_input_ref_count_--; if (communication_op_input_ref_count_ == 0) { auto device_address = AnfAlgo::GetPrevNodeMutableOutputAddr(kernel, 0); - MS_EXCEPTION_IF_NULL(device_address); - MS_EXCEPTION_IF_NULL(device_address->ptr_); - mem_manager_->FreeMemFromMemPool(device_address->ptr_); - device_address->ptr_ = nullptr; + mem_manager_->FreeMemFromMemPool(device_address); } *is_communication_op = true; return; @@ -410,10 +408,7 @@ void GPUKernelRuntime::FreeCommunicationOpDynamicRes(const mindspore::AnfNodePtr communication_op_output_ref_count_--; if (communication_op_output_ref_count_ == 0) { auto device_address = AnfAlgo::GetMutableOutputAddr(kernel_input.first, 0); - MS_EXCEPTION_IF_NULL(device_address); - MS_EXCEPTION_IF_NULL(device_address->ptr_); - mem_manager_->FreeMemFromMemPool(device_address->ptr_); - device_address->ptr_ = nullptr; + mem_manager_->FreeMemFromMemPool(device_address); } *is_communication_op = true; } diff --git a/mindspore/ccsrc/device/memory_manager.cc b/mindspore/ccsrc/device/memory_manager.cc index 6977628eb1..2fad5fc10e 100644 --- a/mindspore/ccsrc/device/memory_manager.cc +++ b/mindspore/ccsrc/device/memory_manager.cc @@ -155,6 +155,13 @@ void *MemoryManager::MallocMemFromMemPool(size_t size) { return nullptr; } +void MemoryManager::FreeMemFromMemPool(const DeviceAddressPtr address) { + MS_EXCEPTION_IF_NULL(address); + MS_EXCEPTION_IF_NULL(address->ptr_); + FreeMemFromMemPool(address->ptr_); + address->ptr_ = nullptr; +} + void MemoryManager::FreeMemFromMemPool(void *device_ptr) { if (device_ptr == nullptr) { MS_LOG(ERROR) << "FreeMemFromMemPool device_ptr is null."; diff --git a/mindspore/ccsrc/device/memory_manager.h b/mindspore/ccsrc/device/memory_manager.h index 82c22f4548..c90ffc380e 100644 --- a/mindspore/ccsrc/device/memory_manager.h +++ b/mindspore/ccsrc/device/memory_manager.h @@ -47,6 +47,7 @@ class MemoryManager { virtual void MallocMemFromMemPool(const DeviceAddressPtr address, size_t size); virtual void *MallocMemFromMemPool(size_t size); + virtual void FreeMemFromMemPool(const DeviceAddressPtr address); virtual void FreeMemFromMemPool(void *device_ptr); size_t GetCommonAlignSize(size_t input_size) const; diff --git a/mindspore/ccsrc/pre_activate/mem_reuse/mem_reuse.cc b/mindspore/ccsrc/pre_activate/mem_reuse/mem_reuse.cc index 2113fec653..d25b60003f 100644 --- a/mindspore/ccsrc/pre_activate/mem_reuse/mem_reuse.cc +++ b/mindspore/ccsrc/pre_activate/mem_reuse/mem_reuse.cc @@ -273,30 +273,21 @@ void MemReuseUtil::SetReuseRefCount() { } void MemReuseUtil::SetGraphOutputRefCount() { - for (const auto &output : graph_->outputs()) { - MS_EXCEPTION_IF_NULL(output); - for (size_t i = 0; i < AnfAlgo::GetInputTensorNum(output); ++i) { - if (!(output->isa())) { - continue; - } - auto cnode = output->cast(); - MS_EXCEPTION_IF_NULL(cnode); - auto input_node = cnode->input(i + 1); - MS_EXCEPTION_IF_NULL(input_node); - auto kernel_input = AnfAlgo::VisitKernel(input_node, 0); - MS_EXCEPTION_IF_NULL(kernel_input.first); - if (!(kernel_input.first->isa())) { - continue; - } - auto ak_node = kernel_input.first->cast(); - auto key = ak_node.get(); - auto iter = kernel_output_refs_.find(key); - if ((iter != kernel_output_refs_.end()) && (kernel_input.second < iter->second.size())) { - auto kernel_ref_count_ptr = kernel_output_refs_[key][kernel_input.second]; - MS_EXCEPTION_IF_NULL(kernel_ref_count_ptr); - kernel_ref_count_ptr->ref_count_ = kMaxRefCount; - kernel_ref_count_ptr->ref_count_dynamic_use_ = kMaxRefCount; - } + auto nodes = AnfAlgo::GetAllOutput(graph_->output(), {prim::kPrimTupleGetItem}); + for (const auto &node : nodes) { + auto kernel_input = AnfAlgo::VisitKernelWithReturnType(node, 0); + MS_EXCEPTION_IF_NULL(kernel_input.first); + if (!kernel_input.first->isa() || !AnfAlgo::IsRealKernel(kernel_input.first)) { + continue; + } + auto ak_node = kernel_input.first->cast(); + auto key = ak_node.get(); + auto iter = kernel_output_refs_.find(key); + if ((iter != kernel_output_refs_.end()) && (kernel_input.second < iter->second.size())) { + auto kernel_ref_count_ptr = kernel_output_refs_[key][kernel_input.second]; + MS_EXCEPTION_IF_NULL(kernel_ref_count_ptr); + kernel_ref_count_ptr->ref_count_ = kMaxRefCount; + kernel_ref_count_ptr->ref_count_dynamic_use_ = kMaxRefCount; } } #ifdef MEM_REUSE_DEBUG diff --git a/mindspore/ccsrc/utils/context/ms_context.cc b/mindspore/ccsrc/utils/context/ms_context.cc index 6c15e16714..b1ab0205f2 100644 --- a/mindspore/ccsrc/utils/context/ms_context.cc +++ b/mindspore/ccsrc/utils/context/ms_context.cc @@ -75,7 +75,7 @@ MsContext::MsContext(const std::string& policy, const std::string& target) { precompile_only_ = false; auto_mixed_precision_flag_ = true; enable_pynative_infer_ = false; - enable_dynamic_mem_pool_ = false; + enable_dynamic_mem_pool_ = true; graph_memory_max_size_ = "0"; variable_memory_max_size_ = "0"; MS_LOG(INFO) << "Create context with backend policy:" << policy << ", device target:" << target << "."; From 1a1cbc6814a068f52298134a86e786a9a241256d Mon Sep 17 00:00:00 2001 From: xiefangqi Date: Tue, 14 Apr 2020 19:21:15 +0800 Subject: [PATCH 233/367] implemention of new api: apply --- mindspore/dataset/engine/datasets.py | 45 +++++ tests/ut/python/dataset/test_apply.py | 236 ++++++++++++++++++++++++++ 2 files changed, 281 insertions(+) create mode 100644 tests/ut/python/dataset/test_apply.py diff --git a/mindspore/dataset/engine/datasets.py b/mindspore/dataset/engine/datasets.py index f92f6f28a8..76b23eb292 100644 --- a/mindspore/dataset/engine/datasets.py +++ b/mindspore/dataset/engine/datasets.py @@ -499,6 +499,51 @@ class Dataset: return ProjectDataset(self, columns) + def apply(self, apply_func): + """ + Apply a function in this dataset. + + The specified apply_func is a function that must take one 'Dataset' as an argument + and return a preprogressing 'Dataset'. + + Args: + apply_func (function): A function that must take one 'Dataset' as an argument and + return a preprogressing 'Dataset'. + + Returns: + Dataset, applied by the function. + + Examples: + >>> import numpy as np + >>> import mindspore.dataset as ds + >>> # Generate 1d int numpy array from 0 - 6 + >>> def generator_1d(): + >>> for i in range(6): + >>> yield (np.array([i]),) + >>> # 1) get all data from dataset + >>> data = ds.GeneratorDataset(generator_1d, ["data"]) + >>> # 2) declare a apply_func function + >>> def apply_func(ds): + >>> ds = ds.batch(2) + >>> return ds + >>> # 3) use apply to call apply_func + >>> data = data.apply(apply_func) + >>> for item in data.create_dict_iterator(): + >>> print(item["data"]) + + Raises: + TypeError: If apply_func is not a function. + TypeError: If apply_func doesn't return a Dataset. + """ + + if not hasattr(apply_func, '__call__'): + raise TypeError("apply_func must be a function.") + + dataset = apply_func(self) + if not isinstance(dataset, Dataset): + raise TypeError("apply_func must return a dataset.") + return dataset + def device_que(self, prefetch_size=None): """ Returns a transferredDataset that transfer data through tdt. diff --git a/tests/ut/python/dataset/test_apply.py b/tests/ut/python/dataset/test_apply.py new file mode 100644 index 0000000000..f2e7a79011 --- /dev/null +++ b/tests/ut/python/dataset/test_apply.py @@ -0,0 +1,236 @@ +# Copyright 2019 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +import mindspore.dataset as ds +from mindspore import log as logger +import mindspore.dataset.transforms.vision.c_transforms as vision +import numpy as np + +DATA_DIR = "../data/dataset/testPK/data" + +# Generate 1d int numpy array from 0 - 64 +def generator_1d(): + for i in range(64): + yield (np.array([i]),) + +def test_apply_generator_case(): + # apply dataset operations + data1 = ds.GeneratorDataset(generator_1d, ["data"]) + data2 = ds.GeneratorDataset(generator_1d, ["data"]) + + def dataset_fn(ds): + ds = ds.repeat(2) + return ds.batch(4) + + data1 = data1.apply(dataset_fn) + data2 = data2.repeat(2) + data2 = data2.batch(4) + + for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()): + assert np.array_equal(item1["data"], item2["data"]) + +def test_apply_imagefolder_case(): + # apply dataset map operations + data1 = ds.ImageFolderDatasetV2(DATA_DIR, num_shards=4, shard_id=3) + data2 = ds.ImageFolderDatasetV2(DATA_DIR, num_shards=4, shard_id=3) + + decode_op = vision.Decode() + normalize_op = vision.Normalize([121.0, 115.0, 100.0], [70.0, 68.0, 71.0]) + + def dataset_fn(ds): + ds = ds.map(operations = decode_op) + ds = ds.map(operations = normalize_op) + ds = ds.repeat(2) + return ds + + data1 = data1.apply(dataset_fn) + data2 = data2.map(operations = decode_op) + data2 = data2.map(operations = normalize_op) + data2 = data2.repeat(2) + + for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()): + assert np.array_equal(item1["image"], item2["image"]) + +def test_apply_flow_case_0(id=0): + # apply control flow operations + data1 = ds.GeneratorDataset(generator_1d, ["data"]) + + def dataset_fn(ds): + if id == 0: + ds = ds.batch(4) + elif id == 1: + ds = ds.repeat(2) + elif id == 2: + ds = ds.batch(4) + ds = ds.repeat(2) + else: + ds = ds.shuffle(buffer_size=4) + return ds + + data1 = data1.apply(dataset_fn) + num_iter = 0 + for _ in data1.create_dict_iterator(): + num_iter = num_iter + 1 + + if id == 0: + assert num_iter == 16 + elif id == 1: + assert num_iter == 128 + elif id == 2: + assert num_iter == 32 + else: + assert num_iter == 64 + +def test_apply_flow_case_1(id=1): + # apply control flow operations + data1 = ds.GeneratorDataset(generator_1d, ["data"]) + + def dataset_fn(ds): + if id == 0: + ds = ds.batch(4) + elif id == 1: + ds = ds.repeat(2) + elif id == 2: + ds = ds.batch(4) + ds = ds.repeat(2) + else: + ds = ds.shuffle(buffer_size=4) + return ds + + data1 = data1.apply(dataset_fn) + num_iter = 0 + for _ in data1.create_dict_iterator(): + num_iter = num_iter + 1 + + if id == 0: + assert num_iter == 16 + elif id == 1: + assert num_iter == 128 + elif id == 2: + assert num_iter == 32 + else: + assert num_iter == 64 + +def test_apply_flow_case_2(id=2): + # apply control flow operations + data1 = ds.GeneratorDataset(generator_1d, ["data"]) + + def dataset_fn(ds): + if id == 0: + ds = ds.batch(4) + elif id == 1: + ds = ds.repeat(2) + elif id == 2: + ds = ds.batch(4) + ds = ds.repeat(2) + else: + ds = ds.shuffle(buffer_size=4) + return ds + + data1 = data1.apply(dataset_fn) + num_iter = 0 + for _ in data1.create_dict_iterator(): + num_iter = num_iter + 1 + + if id == 0: + assert num_iter == 16 + elif id == 1: + assert num_iter == 128 + elif id == 2: + assert num_iter == 32 + else: + assert num_iter == 64 + +def test_apply_flow_case_3(id=3): + # apply control flow operations + data1 = ds.GeneratorDataset(generator_1d, ["data"]) + + def dataset_fn(ds): + if id == 0: + ds = ds.batch(4) + elif id == 1: + ds = ds.repeat(2) + elif id == 2: + ds = ds.batch(4) + ds = ds.repeat(2) + else: + ds = ds.shuffle(buffer_size=4) + return ds + + data1 = data1.apply(dataset_fn) + num_iter = 0 + for _ in data1.create_dict_iterator(): + num_iter = num_iter + 1 + + if id == 0: + assert num_iter == 16 + elif id == 1: + assert num_iter == 128 + elif id == 2: + assert num_iter == 32 + else: + assert num_iter == 64 + +def test_apply_exception_case(): + # apply exception operations + data1 = ds.GeneratorDataset(generator_1d, ["data"]) + + def dataset_fn(ds): + ds = ds.repeat(2) + return ds.batch(4) + + def exception_fn(ds): + return np.array([[0], [1], [3], [4], [5]]) + + try: + data1 = data1.apply("123") + for _ in data1.create_dict_iterator(): + pass + assert False + except TypeError: + pass + + try: + data1 = data1.apply(exception_fn) + for _ in data1.create_dict_iterator(): + pass + assert False + except TypeError: + pass + + try: + data2 = data1.apply(dataset_fn) + data3 = data1.apply(dataset_fn) + for item1, item2 in zip(data1.create_dict_iterator(), data2.create_dict_iterator()): + pass + assert False + except ValueError: + pass + +if __name__ == '__main__': + logger.info("Running test_apply.py test_apply_generator_case() function") + test_apply_generator_case() + + logger.info("Running test_apply.py test_apply_imagefolder_case() function") + test_apply_imagefolder_case() + + logger.info("Running test_apply.py test_apply_flow_case(id) function") + test_apply_flow_case_0() + test_apply_flow_case_1() + test_apply_flow_case_2() + test_apply_flow_case_3() + + logger.info("Running test_apply.py test_apply_exception_case() function") + test_apply_exception_case() + From 19a5e14b31ad4f0dc38d425c25f8abff8cb758f0 Mon Sep 17 00:00:00 2001 From: fary86 Date: Tue, 14 Apr 2020 21:38:00 +0800 Subject: [PATCH 234/367] Fix BatchNorm1d and BatchNorm2d doc bug, and slog print source path twice in log --- mindspore/ccsrc/utils/log_adapter.cc | 7 +++++++ mindspore/nn/layer/normalization.py | 2 ++ 2 files changed, 9 insertions(+) diff --git a/mindspore/ccsrc/utils/log_adapter.cc b/mindspore/ccsrc/utils/log_adapter.cc index 19482ec193..eed68cf859 100644 --- a/mindspore/ccsrc/utils/log_adapter.cc +++ b/mindspore/ccsrc/utils/log_adapter.cc @@ -96,6 +96,13 @@ static int GetGlogLevel(MsLogLevel level) { } } #else + +#undef Dlog +#define Dlog(module_id, level, format, ...) \ + do { \ + DlogInner((module_id), (level), (format), ##__VA_ARGS__); \ + } while (0) + // convert MsLogLevel to corresponding slog level static int GetSlogLevel(MsLogLevel level) { switch (level) { diff --git a/mindspore/nn/layer/normalization.py b/mindspore/nn/layer/normalization.py index d5082371c1..2df064353f 100644 --- a/mindspore/nn/layer/normalization.py +++ b/mindspore/nn/layer/normalization.py @@ -136,6 +136,7 @@ class BatchNorm1d(_BatchNorm): eps (float): A value added to the denominator for numerical stability. Default: 1e-5. momentum (float): A floating hyperparameter of the momentum for the running_mean and running_var computation. Default: 0.9. + affine (bool): A bool value when set to True, gamma and beta can be learnable. Default: True. gamma_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the gamma weight. The values of str refer to the function `initializer` including 'zeros', 'ones', 'xavier_uniform', 'he_uniform', etc. Default: 'ones'. @@ -187,6 +188,7 @@ class BatchNorm2d(_BatchNorm): eps (float): A value added to the denominator for numerical stability. Default: 1e-5. momentum (float): A floating hyperparameter of the momentum for the running_mean and running_var computation. Default: 0.9. + affine (bool): A bool value when set to True, gamma and beta can be learnable. Default: True. gamma_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the gamma weight. The values of str refer to the function `initializer` including 'zeros', 'ones', 'xavier_uniform', 'he_uniform', etc. Default: 'ones'. From 6a2cf4b6e698041dfffa1bbd61954c4f2ee677ef Mon Sep 17 00:00:00 2001 From: zhaozhenlong Date: Thu, 9 Apr 2020 16:58:18 +0800 Subject: [PATCH 235/367] ssim impl code --- mindspore/nn/layer/__init__.py | 6 +- mindspore/nn/layer/basic.py | 45 ----- mindspore/nn/layer/image.py | 197 +++++++++++++++++++++ tests/ut/python/nn/test_image_gradients.py | 2 +- 4 files changed, 202 insertions(+), 48 deletions(-) create mode 100644 mindspore/nn/layer/image.py diff --git a/mindspore/nn/layer/__init__.py b/mindspore/nn/layer/__init__.py index f51eff2b31..9c2c30c914 100644 --- a/mindspore/nn/layer/__init__.py +++ b/mindspore/nn/layer/__init__.py @@ -22,9 +22,10 @@ from .normalization import BatchNorm1d, BatchNorm2d, LayerNorm from .container import SequentialCell, CellList from .conv import Conv2d, Conv2dTranspose from .lstm import LSTM -from .basic import Dropout, Flatten, Dense, ClipByNorm, Norm, OneHot, ImageGradients, Pad +from .basic import Dropout, Flatten, Dense, ClipByNorm, Norm, OneHot, Pad from .embedding import Embedding from .pooling import AvgPool2d, MaxPool2d +from .image import ImageGradients, SSIM __all__ = ['Softmax', 'LogSoftmax', 'ReLU', 'ReLU6', 'Tanh', 'GELU', 'Sigmoid', 'PReLU', 'get_activation', 'LeakyReLU', 'HSigmoid', 'HSwish', 'ELU', @@ -32,7 +33,8 @@ __all__ = ['Softmax', 'LogSoftmax', 'ReLU', 'ReLU6', 'Tanh', 'GELU', 'Sigmoid', 'SequentialCell', 'CellList', 'Conv2d', 'Conv2dTranspose', 'LSTM', - 'Dropout', 'Flatten', 'Dense', 'ClipByNorm', 'Norm', 'OneHot', 'ImageGradients', + 'Dropout', 'Flatten', 'Dense', 'ClipByNorm', 'Norm', 'OneHot', 'Embedding', 'AvgPool2d', 'MaxPool2d', 'Pad', + 'ImageGradients', 'SSIM', ] diff --git a/mindspore/nn/layer/basic.py b/mindspore/nn/layer/basic.py index 5b36755d16..64c4cfd93b 100644 --- a/mindspore/nn/layer/basic.py +++ b/mindspore/nn/layer/basic.py @@ -372,51 +372,6 @@ class OneHot(Cell): return self.onehot(indices, self.depth, self.on_value, self.off_value) -class ImageGradients(Cell): - r""" - Returns two tensors, the first is along the height dimension and the second is along the width dimension. - - Assume an image shape is :math:`h*w`. The gradients along the height and the width are :math:`dy` and :math:`dx`, - respectively. - - .. math:: - dy[i] = \begin{cases} image[i+1, :]-image[i, :], &if\ 0<=i>> net = nn.ImageGradients() - >>> image = Tensor(np.array([[[[1,2],[3,4]]]]), dtype=mstype.int32) - >>> net(image) - [[[[2,2] - [0,0]]]] - [[[[1,0] - [1,0]]]] - """ - def __init__(self): - super(ImageGradients, self).__init__() - - def construct(self, images): - batch_size, depth, height, width = P.Shape()(images) - dy = images[:, :, 1:, :] - images[:, :, :height - 1, :] - dy_last = P.Fill()(P.DType()(images), (batch_size, depth, 1, width), 0) - dy = P.Concat(2)((dy, dy_last)) - - dx = images[:, :, :, 1:] - images[:, :, :, :width - 1] - dx_last = P.Fill()(P.DType()(images), (batch_size, depth, height, 1), 0) - dx = P.Concat(3)((dx, dx_last)) - return dy, dx - - class Pad(Cell): """ Pads the input tensor according to the paddings and mode. diff --git a/mindspore/nn/layer/image.py b/mindspore/nn/layer/image.py new file mode 100644 index 0000000000..6121776f59 --- /dev/null +++ b/mindspore/nn/layer/image.py @@ -0,0 +1,197 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""image""" +import numpy as np +import mindspore.common.dtype as mstype +from mindspore.common.tensor import Tensor +from mindspore.ops import operations as P +from mindspore.ops import functional as F +from mindspore.ops.primitive import constexpr +from mindspore._checkparam import ParamValidator as validator +from mindspore._checkparam import Rel +from ..cell import Cell + + +class ImageGradients(Cell): + r""" + Returns two tensors, the first is along the height dimension and the second is along the width dimension. + + Assume an image shape is :math:`h*w`. The gradients along the height and the width are :math:`dy` and :math:`dx`, + respectively. + + .. math:: + dy[i] = \begin{cases} image[i+1, :]-image[i, :], &if\ 0<=i>> net = nn.ImageGradients() + >>> image = Tensor(np.array([[[[1,2],[3,4]]]]), dtype=mstype.int32) + >>> net(image) + [[[[2,2] + [0,0]]]] + [[[[1,0] + [1,0]]]] + """ + def __init__(self): + super(ImageGradients, self).__init__() + + def construct(self, images): + batch_size, depth, height, width = P.Shape()(images) + dy = images[:, :, 1:, :] - images[:, :, :height - 1, :] + dy_last = P.Fill()(P.DType()(images), (batch_size, depth, 1, width), 0) + dy = P.Concat(2)((dy, dy_last)) + + dx = images[:, :, :, 1:] - images[:, :, :, :width - 1] + dx_last = P.Fill()(P.DType()(images), (batch_size, depth, height, 1), 0) + dx = P.Concat(3)((dx, dx_last)) + return dy, dx + + +@constexpr +def _gauss_kernel_helper(filter_size): + """gauss kernel helper""" + filter_size = F.scalar_cast(filter_size, mstype.int32) + coords = () + for i in range(filter_size): + i_cast = F.scalar_cast(i, mstype.float32) + offset = F.scalar_cast(filter_size-1, mstype.float32)/2.0 + element = i_cast-offset + coords = coords+(element,) + g = np.square(coords).astype(np.float32) + g = Tensor(g) + return filter_size, g + + +class SSIM(Cell): + r""" + Returns SSIM index between img1 and img2. + + Its implementation is based on Wang, Z., Bovik, A. C., Sheikh, H. R., & Simoncelli, E. P. (2004). `Image quality + assessment: from error visibility to structural similarity `_. + IEEE transactions on image processing. + + .. math:: + + l(x,y)&=\frac{2\mu_x\mu_y+C_1}{\mu_x^2+\mu_y^2+C_1}, C_1=(K_1L)^2.\\ + c(x,y)&=\frac{2\sigma_x\sigma_y+C_2}{\sigma_x^2+\sigma_y^2+C_2}, C_2=(K_2L)^2.\\ + s(x,y)&=\frac{\sigma_{xy}+C_3}{\sigma_x\sigma_y+C_3}, C_3=C_2/2.\\ + SSIM(x,y)&=l*c*s\\&=\frac{(2\mu_x\mu_y+C_1)(2\sigma_{xy}+C_2}{(\mu_x^2+\mu_y^2+C_1)(\sigma_x^2+\sigma_y^2+C_2)}. + + Args: + max_val (Union[int, float]): The dynamic range of the pixel values (255 for 8-bit grayscale images). + Default: 1.0. + filter_size (int): The size of the Gaussian filter. Default: 11. + filter_sigma (float): The standard deviation of Gaussian kernel. Default: 1.5. + k1 (float): The constant used to generate c1 in the luminance comparison function. Default: 0.01. + k2 (float): The constant used to generate c2 in the contrast comparison function. Default: 0.03. + + Inputs: + - **img1** (Tensor) - The first image batch with format 'NCHW'. It should be the same shape and dtype as img2. + - **img2** (Tensor) - The second image batch with format 'NCHW'. It should be the same shape and dtype as img1. + + Outputs: + Tensor, has the same dtype as img1. It is a 1-D tensor with shape N, where N is the batch num of img1. + + Examples: + >>> net = nn.SSIM() + >>> img1 = Tensor(np.random.random((1,3,16,16))) + >>> img2 = Tensor(np.random.random((1,3,16,16))) + >>> ssim = net(img1, img2) + """ + def __init__(self, max_val=1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03): + super(SSIM, self).__init__() + validator.check_type('max_val', max_val, [int, float]) + validator.check('max_val', max_val, '', 0.0, Rel.GT) + self.max_val = max_val + self.filter_size = validator.check_integer('filter_size', filter_size, 1, Rel.GE) + self.filter_sigma = validator.check_float_positive('filter_sigma', filter_sigma) + validator.check_type('k1', k1, [float]) + self.k1 = validator.check_number_range('k1', k1, 0.0, 1.0, Rel.INC_NEITHER) + validator.check_type('k2', k2, [float]) + self.k2 = validator.check_number_range('k2', k2, 0.0, 1.0, Rel.INC_NEITHER) + self.mean = P.DepthwiseConv2dNative(channel_multiplier=1, kernel_size=filter_size) + + def construct(self, img1, img2): + max_val = self._convert_img_dtype_to_float32(self.max_val, self.max_val) + img1 = self._convert_img_dtype_to_float32(img1, self.max_val) + img2 = self._convert_img_dtype_to_float32(img2, self.max_val) + + kernel = self._fspecial_gauss(self.filter_size, self.filter_sigma) + kernel = P.Tile()(kernel, (1, P.Shape()(img1)[1], 1, 1)) + + mean_ssim = self._calculate_mean_ssim(img1, img2, kernel, max_val, self.k1, self.k2) + + return mean_ssim + + def _convert_img_dtype_to_float32(self, img, max_val): + """convert img dtype to float32""" + # Ususally max_val is 1.0 or 255, we will do the scaling if max_val > 1. + # We will scale img pixel value if max_val > 1. and just cast otherwise. + ret = P.Cast()(img, mstype.float32) + max_val = F.scalar_cast(max_val, mstype.float32) + if max_val > 1.: + scale = 1./max_val + ret = ret*scale + return ret + + def _calculate_mean_ssim(self, x, y, kernel, max_val, k1, k2): + """calculate mean ssim""" + c1 = (k1*max_val)*(k1*max_val) + c2 = (k2*max_val)*(k2*max_val) + + # SSIM luminance formula + # (2 * mean_{x} * mean_{y} + c1) / (mean_{x}**2 + mean_{y}**2 + c1) + mean_x = self.mean(x, kernel) + mean_y = self.mean(y, kernel) + square_sum = F.square(mean_x)+F.square(mean_y) + luminance = (2*mean_x*mean_y+c1)/(square_sum+c1) + + # SSIM contrast*structure formula (when c3 = c2/2) + # (2 * conv_{xy} + c2) / (conv_{xx} + conv_{yy} + c2), equals to + # (2 * (mean_{xy} - mean_{x}*mean_{y}) + c2) / (mean_{xx}-mean_{x}**2 + mean_{yy}-mean_{y}**2 + c2) + mean_xy = self.mean(x*y, kernel) + mean_square_add = self.mean(F.square(x)+F.square(y), kernel) + + cs = (2*(mean_xy-mean_x*mean_y)+c2)/(mean_square_add-square_sum+c2) + + # SSIM formula + # luminance * cs + ssim = luminance*cs + + mean_ssim = P.ReduceMean()(ssim, (-3, -2, -1)) + + return mean_ssim + + def _fspecial_gauss(self, filter_size, filter_sigma): + """get gauss kernel""" + filter_size, g = _gauss_kernel_helper(filter_size) + + square_sigma_scale = -0.5/(filter_sigma * filter_sigma) + g = g*square_sigma_scale + g = F.reshape(g, (1, -1))+F.reshape(g, (-1, 1)) + g = F.reshape(g, (1, -1)) + g = P.Softmax()(g) + ret = F.reshape(g, (1, 1, filter_size, filter_size)) + return ret diff --git a/tests/ut/python/nn/test_image_gradients.py b/tests/ut/python/nn/test_image_gradients.py index f65f38ec0a..a2b9495443 100644 --- a/tests/ut/python/nn/test_image_gradients.py +++ b/tests/ut/python/nn/test_image_gradients.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ -""" test loss """ +""" test image gradients """ import numpy as np import mindspore.nn as nn import mindspore.context as context From 0fc23eee0f13a709687673e4f0489cf9c3b6562a Mon Sep 17 00:00:00 2001 From: hesham Date: Sat, 11 Apr 2020 18:00:14 -0400 Subject: [PATCH 236/367] Support nested repeat --- .../dataset/engine/datasetops/dataset_op.cc | 17 ++- .../dataset/engine/datasetops/dataset_op.h | 10 +- .../dataset/engine/datasetops/parallel_op.h | 16 +- .../dataset/engine/datasetops/pipeline_op.h | 15 +- .../dataset/engine/datasetops/repeat_op.cc | 36 ++--- .../dataset/engine/datasetops/repeat_op.h | 10 +- .../ccsrc/dataset/engine/execution_tree.cc | 25 ++-- mindspore/dataset/engine/datasets.py | 2 + tests/ut/cpp/dataset/repeat_op_test.cc | 27 +++- tests/ut/python/dataset/test_repeat.py | 137 +++++++++++++++++- 10 files changed, 233 insertions(+), 62 deletions(-) diff --git a/mindspore/ccsrc/dataset/engine/datasetops/dataset_op.cc b/mindspore/ccsrc/dataset/engine/datasetops/dataset_op.cc index f51c2a1539..7edf1dd288 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/dataset_op.cc +++ b/mindspore/ccsrc/dataset/engine/datasetops/dataset_op.cc @@ -161,15 +161,18 @@ Status DatasetOp::EofReceived(int32_t worker_id) { return (out_connector_->Add(static_cast(worker_id), std::move(eof_buffer))); } -// During tree prepare phase, operators may have specific operations to perform depending on +// During tree prepare phase, operators may have specific pre-operations to perform depending on // their role. -Status DatasetOp::PrepareNodeAction() { +Status DatasetOp::PrepareNodePreAction() { + if (BitTest(tree_->PrepareFlags(), ExecutionTree::kDePrepRepeat)) set_control_flag(kDeOpRepeated); + return Status::OK(); +} +// During tree prepare phase, operators may have specific post-operations to perform depending on +// their role. +Status DatasetOp::PrepareNodePostAction() { // If this op does not have any children and it is in a repeat path of the tree... - if (child_.size() == 0 && BitTest(tree_->PrepareFlags(), ExecutionTree::kDePrepRepeat)) { - // Then, flag this operator as a leaf node in a repeat path of tree execution. - BitSet(&op_ctrl_flags_, kDeOpRepeated); - - // Secondly, push ourselves onto the tree repeat stack. Later, the repeat operator + if (child_.empty() && BitTest(op_ctrl_flags_, kDeOpRepeated)) { + // push ourselves onto the tree repeat stack. Later, the repeat operator // above us will consume them. tree_->AddToRepeatStack(shared_from_this()); } diff --git a/mindspore/ccsrc/dataset/engine/datasetops/dataset_op.h b/mindspore/ccsrc/dataset/engine/datasetops/dataset_op.h index a7d87c3092..0111f5239a 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/dataset_op.h +++ b/mindspore/ccsrc/dataset/engine/datasetops/dataset_op.h @@ -150,11 +150,17 @@ class DatasetOp : public std::enable_shared_from_this { return Status::OK(); } - // During tree prepare phase, operators may have specific operations to perform depending on + // During tree prepare phase, operators may have specific pre-operations to perform depending on // their role. // @notes Derived versions of this function should always call it's superclass version first // before providing their own implementations. - virtual Status PrepareNodeAction(); + virtual Status PrepareNodePreAction(); + + // During tree prepare phase, operators may have specific post-operations to perform depending on + // their role. + // @notes Derived versions of this function should always call it's superclass version first + // before providing their own implementations. + virtual Status PrepareNodePostAction(); // Getter function // @return The operator id diff --git a/mindspore/ccsrc/dataset/engine/datasetops/parallel_op.h b/mindspore/ccsrc/dataset/engine/datasetops/parallel_op.h index ceb7f2c4ac..142ec78360 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/parallel_op.h +++ b/mindspore/ccsrc/dataset/engine/datasetops/parallel_op.h @@ -64,14 +64,24 @@ class ParallelOp : public DatasetOp { return out; } - // During tree prepare phase, operators may have specific operations to perform depending on + // During tree prepare phase, operators may have specific pre-operations to perform depending on // their role. // @notes Derived versions of this function should always call it's superclass version first // before providing their own implementations. // @return Status - The error return code - Status PrepareNodeAction() override { + Status PrepareNodePreAction() override { // Run common code from super class before adding ParallelOp specific logic - return (DatasetOp::PrepareNodeAction()); + return (DatasetOp::PrepareNodePreAction()); + } + + // During tree prepare phase, operators may have specific post-operations to perform depending on + // their role. + // @notes Derived versions of this function should always call it's superclass version first + // before providing their own implementations. + // @return Status - The error return code + Status PrepareNodePostAction() override { + // Run common code from super class before adding ParallelOp specific logic + return (DatasetOp::PrepareNodePostAction()); } // Override base class reset to provide reset actions specific to the ParallelOp class. diff --git a/mindspore/ccsrc/dataset/engine/datasetops/pipeline_op.h b/mindspore/ccsrc/dataset/engine/datasetops/pipeline_op.h index ee20f1d373..a14279032d 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/pipeline_op.h +++ b/mindspore/ccsrc/dataset/engine/datasetops/pipeline_op.h @@ -64,13 +64,22 @@ class PipelineOp : public DatasetOp { // @return The number of threads that push data to the output connector int32_t num_producers() const override { return 1; } - // During tree prepare phase, operators may have specific operations to perform depending on + // During tree prepare phase, operators may have specific pre-operations to perform depending on // their role. // @notes Derived versions of this function should always call it's superclass version first // before providing their own implementations. - Status PrepareNodeAction() override { + Status PrepareNodePreAction() override { // Run common code from super class before adding PipelineOp specific logic - return (DatasetOp::PrepareNodeAction()); + return (DatasetOp::PrepareNodePreAction()); + } + + // During tree prepare phase, operators may have specific post-operations to perform depending on + // their role. + // @notes Derived versions of this function should always call it's superclass version first + // before providing their own implementations. + Status PrepareNodePostAction() override { + // Run common code from super class before adding PipelineOp specific logic + return (DatasetOp::PrepareNodePostAction()); } protected: diff --git a/mindspore/ccsrc/dataset/engine/datasetops/repeat_op.cc b/mindspore/ccsrc/dataset/engine/datasetops/repeat_op.cc index 32723a9bd4..33c731c400 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/repeat_op.cc +++ b/mindspore/ccsrc/dataset/engine/datasetops/repeat_op.cc @@ -58,10 +58,10 @@ void RepeatOp::Print(std::ostream &out, bool show_all) const { out << "RepeatOp:" << "\nCurrent repeat count: " << repeat_count_ << "\nMax repeat count: " << max_repeats_ << "\nLeaf Nodes in my execution path:"; - if (!leaf_ops_.empty()) { + if (!eoe_ops_.empty()) { out << "\n"; - for (size_t i = 0; i < leaf_ops_.size(); i++) { - out << " Operator: " << leaf_ops_[i]->id() << "\n"; + for (size_t i = 0; i < eoe_ops_.size(); i++) { + out << " Operator: " << eoe_ops_[i]->id() << "\n"; } } else { out << " kNone."; @@ -71,21 +71,17 @@ void RepeatOp::Print(std::ostream &out, bool show_all) const { // Base-class override for executing specific RepeatOp configurations. This code will be called // during the execution tree prepare phase when it is visiting this operator. -Status RepeatOp::PrepareNodeAction() { +Status RepeatOp::PrepareNodePostAction() { // Run any common code from super class first before adding our own specific logic - RETURN_IF_NOT_OK(PipelineOp::PrepareNodeAction()); + RETURN_IF_NOT_OK(PipelineOp::PrepareNodePostAction()); std::shared_ptr leaf_op = tree_->PopFromRepeatStack(); while (leaf_op != nullptr) { // Track the leaf operators that are under this repeat op. - leaf_ops_.push_back(leaf_op); - - // Special case. If the repeat count is 1, then pre-flag the leaf nodes - // to tell them they are already at their last op: - if (max_repeats_ == 1) { - leaf_op->set_control_flag(kDeOpLastRepeat); - } + eoe_ops_.push_back(leaf_op); leaf_op = tree_->PopFromRepeatStack(); } + // Push ourselves to the stack in case one of our ascendants is repeat too. + tree_->AddToRepeatStack(shared_from_this()); return Status::OK(); } @@ -127,16 +123,20 @@ Status RepeatOp::GetNextBuffer(std::unique_ptr *p_buffer, int32_t wo Status RepeatOp::EoeReceived(int32_t worker_id) { repeat_count_++; MS_LOG(INFO) << "Repeat operator end of epoch message received. Repeat count is now: " << repeat_count_ << "."; - - // If we've reached the requested repeat count, then flag the leaf nodes + bool repeated = BitTest(op_ctrl_flags_, kDeOpRepeated); + bool last_repeat = BitTest(op_ctrl_flags_, kDeOpLastRepeat); + // If we've reached the requested repeat count, then flag the eoe nodes // to tell them they've got one more epoch to perform. When they reach the end - // of the last epoch, they quit rather than loop again. - if (max_repeats_ != kInfiniteRepeat && repeat_count_ == (max_repeats_ - 1)) { - for (size_t i = 0; i < leaf_ops_.size(); i++) { - leaf_ops_[i]->set_control_flag(kDeOpLastRepeat); + // of the last epoch, they quit rather than loop again. This happens in two cases: + // 1- We are also repeated (by another repeat op) and we are at the last repetition. Or, + // 2- We are not repeated + if (max_repeats_ != kInfiniteRepeat && repeat_count_ == (max_repeats_ - 1) && (!repeated || last_repeat)) { + for (auto &eoe_op : eoe_ops_) { + eoe_op->set_control_flag(kDeOpLastRepeat); } } if (repeat_count_ == max_repeats_) { + repeat_count_ = 0; state_ = OpState::kDeOpIdle; return Status::OK(); } diff --git a/mindspore/ccsrc/dataset/engine/datasetops/repeat_op.h b/mindspore/ccsrc/dataset/engine/datasetops/repeat_op.h index 5cc7ec2efa..8497b4cf3c 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/repeat_op.h +++ b/mindspore/ccsrc/dataset/engine/datasetops/repeat_op.h @@ -87,8 +87,8 @@ class RepeatOp : public PipelineOp { uint32_t PrepareFlags() const override; // Base-class override for executing specific RepeatOp configurations. This code will be called - // during the execution tree prepare phase when it is visiting this operator. - Status PrepareNodeAction() override; + // during the execution tree post-prepare phase when it is visiting this operator. + Status PrepareNodePostAction() override; // This function returns the buffer that is at the top of our output connector. The caller is // typically our parent node, when the parent is asking us to provide the next buffer of data. @@ -119,9 +119,9 @@ class RepeatOp : public PipelineOp { int32_t num_producers() const override; private: - int32_t max_repeats_; // The number of repeats that the user requested - int32_t repeat_count_; // A counter for the current number of executed repeats - std::vector> leaf_ops_; // List of leaf operators underneath this repeat. + int32_t max_repeats_; // The number of repeats that the user requested + int32_t repeat_count_; // A counter for the current number of executed repeats + std::vector> eoe_ops_; // List of operators that can generate EOE underneath this repeat. }; } // namespace dataset } // namespace mindspore diff --git a/mindspore/ccsrc/dataset/engine/execution_tree.cc b/mindspore/ccsrc/dataset/engine/execution_tree.cc index 20fcb836c5..ebfa532195 100644 --- a/mindspore/ccsrc/dataset/engine/execution_tree.cc +++ b/mindspore/ccsrc/dataset/engine/execution_tree.cc @@ -162,30 +162,25 @@ Status ExecutionTree::Prepare() { // Recursive function used during prepare phase to visit a node and drive any pre- and post- // node actions during a tree walk. Status ExecutionTree::PrepareNode(const std::shared_ptr &dataset_op) { - int32_t num_children = dataset_op->child_.size(); + // execute PreAction + RETURN_IF_NOT_OK(dataset_op->PrepareNodePreAction()); - // Before going down into children, make any prepare flags updates based on this - // operator. + // Before going down into children, make any prepare flags updates based on this operator. uint32_t op_prep_flags = dataset_op->PrepareFlags(); - // Sanity check. In future we can support nested repeats. for now it's not allowed. - // If somebody above us already set the repeat flag, and now we are another repeat... - if (BitTest(op_prep_flags, kDePrepRepeat) && BitTest(prepare_flags_, kDePrepRepeat)) { - std::string err_msg("Nested RepeatOp detected! This is not supported yet."); - RETURN_STATUS_UNEXPECTED(err_msg); - } BitSet(&prepare_flags_, op_prep_flags); // Now, descend to children - for (int32_t i = 0; i < num_children; ++i) { - RETURN_IF_NOT_OK(this->PrepareNode(dataset_op->child_[i])); + for (const auto &i : dataset_op->child_) { + RETURN_IF_NOT_OK(this->PrepareNode(i)); } - // No more children, now we execute any prepare actions before going back up the - // the tree on recursive function exit - RETURN_IF_NOT_OK(dataset_op->PrepareNodeAction()); - // Then clear the flags from this op now that we have prepared it. BitClear(&prepare_flags_, op_prep_flags); + + // No more children, now we execute any prepare actions before going back up the + // the tree on recursive function + RETURN_IF_NOT_OK(dataset_op->PrepareNodePostAction()); + return Status::OK(); } diff --git a/mindspore/dataset/engine/datasets.py b/mindspore/dataset/engine/datasets.py index 3d660d58a8..e40c24c140 100644 --- a/mindspore/dataset/engine/datasets.py +++ b/mindspore/dataset/engine/datasets.py @@ -417,6 +417,8 @@ class Dataset: >>> repeat_and_shuffle = data.repeat(50) >>> repeat_and_shuffle = repeat_and_shuffle.shuffle(10) """ + if count == 1: + return self return RepeatDataset(self, count) @check_zip_dataset diff --git a/tests/ut/cpp/dataset/repeat_op_test.cc b/tests/ut/cpp/dataset/repeat_op_test.cc index 99e91afe81..e32e98cbd7 100644 --- a/tests/ut/cpp/dataset/repeat_op_test.cc +++ b/tests/ut/cpp/dataset/repeat_op_test.cc @@ -33,18 +33,29 @@ TEST_F(MindDataTestrepeat_op, Testrepeat_opFuntions) { auto my_tree = std::make_shared(); std::shared_ptr parent_op = std::make_shared(32); - - std::shared_ptr leaf_op = std::make_shared(16); + std::string dataset_path; + dataset_path = datasets_root_path_ + "/testTFTestAllTypes/test.data"; +// TFReaderOp + std::shared_ptr my_tfreader_op; + TFReaderOp::Builder builder; + builder.SetDatasetFilesList({dataset_path}) + .SetRowsPerBuffer(16) + .SetWorkerConnectorSize(16) + .SetNumWorkers(16); + Status rc= builder.Build(&my_tfreader_op); + ASSERT_TRUE(rc.IsOk()); + rc = my_tree->AssociateNode(my_tfreader_op); + ASSERT_TRUE(rc.IsOk()); my_tree->AssociateNode(parent_op); - my_tree->AssociateNode(leaf_op); ASSERT_NE(parent_op, nullptr); - ASSERT_NE(leaf_op, nullptr); - parent_op->AddChild(std::move(leaf_op)); - parent_op->Print(std::cout, false); - parent_op->PrepareNodeAction(); + ASSERT_NE(my_tfreader_op, nullptr); + parent_op->AddChild(std::move(my_tfreader_op)); + MS_LOG(INFO) << parent_op; + my_tree->Prepare(); + RepeatOp RepeatOpOp(); std::shared_ptr repeat_op; - Status rc = RepeatOp::Builder(3).Build(&repeat_op); + rc = RepeatOp::Builder(3).Build(&repeat_op); ASSERT_NE(repeat_op, nullptr); } diff --git a/tests/ut/python/dataset/test_repeat.py b/tests/ut/python/dataset/test_repeat.py index 196a62c315..cb7a80e3d1 100644 --- a/tests/ut/python/dataset/test_repeat.py +++ b/tests/ut/python/dataset/test_repeat.py @@ -16,6 +16,7 @@ import mindspore.dataset.transforms.vision.c_transforms as vision from util import save_and_check import mindspore.dataset as ds +import numpy as np from mindspore import log as logger DATA_DIR_TF = ["../data/dataset/testTFTestAllTypes/test.data"] @@ -95,6 +96,141 @@ def test_tf_repeat_03(): assert num_iter == 2 +def generator(): + for i in range(3): + yield np.array([i]), + + +def test_nested_repeat1(): + data = ds.GeneratorDataset(generator, ["data"]) + data = data.repeat(2) + data = data.repeat(3) + + for i, d in enumerate(data): + assert i % 3 == d[0][0] + + assert sum([1 for _ in data]) == 2 * 3 * 3 + + +def test_nested_repeat2(): + data = ds.GeneratorDataset(generator, ["data"]) + data = data.repeat(1) + data = data.repeat(1) + + for i, d in enumerate(data): + assert i % 3 == d[0][0] + + assert sum([1 for _ in data]) == 3 + + +def test_nested_repeat3(): + data = ds.GeneratorDataset(generator, ["data"]) + data = data.repeat(1) + data = data.repeat(2) + + for i, d in enumerate(data): + assert i % 3 == d[0][0] + + assert sum([1 for _ in data]) == 2 * 3 + + +def test_nested_repeat4(): + data = ds.GeneratorDataset(generator, ["data"]) + data = data.repeat(2) + data = data.repeat(1) + + for i, d in enumerate(data): + assert i % 3 == d[0][0] + + assert sum([1 for _ in data]) == 2 * 3 + + +def test_nested_repeat5(): + data = ds.GeneratorDataset(generator, ["data"]) + data = data.batch(3) + data = data.repeat(2) + data = data.repeat(3) + + for i, d in enumerate(data): + assert np.array_equal(d[0], np.asarray([[0], [1], [2]])) + + assert sum([1 for _ in data]) == 6 + + +def test_nested_repeat6(): + data = ds.GeneratorDataset(generator, ["data"]) + data = data.repeat(2) + data = data.batch(3) + data = data.repeat(3) + + for i, d in enumerate(data): + assert np.array_equal(d[0], np.asarray([[0], [1], [2]])) + + assert sum([1 for _ in data]) == 6 + + +def test_nested_repeat7(): + data = ds.GeneratorDataset(generator, ["data"]) + data = data.repeat(2) + data = data.repeat(3) + data = data.batch(3) + + for i, d in enumerate(data): + assert np.array_equal(d[0], np.asarray([[0], [1], [2]])) + + assert sum([1 for _ in data]) == 6 + + +def test_nested_repeat8(): + data = ds.GeneratorDataset(generator, ["data"]) + data = data.batch(2, drop_remainder=False) + data = data.repeat(2) + data = data.repeat(3) + + for i, d in enumerate(data): + if i % 2 == 0: + assert np.array_equal(d[0], np.asarray([[0], [1]])) + else: + assert np.array_equal(d[0], np.asarray([[2]])) + + assert sum([1 for _ in data]) == 6 * 2 + + +def test_nested_repeat9(): + data = ds.GeneratorDataset(generator, ["data"]) + data = data.repeat() + data = data.repeat(3) + + for i, d in enumerate(data): + assert i % 3 == d[0][0] + if i == 10: + break + + +def test_nested_repeat10(): + data = ds.GeneratorDataset(generator, ["data"]) + data = data.repeat(3) + data = data.repeat() + + for i, d in enumerate(data): + assert i % 3 == d[0][0] + if i == 10: + break + + +def test_nested_repeat11(): + data = ds.GeneratorDataset(generator, ["data"]) + data = data.repeat(2) + data = data.repeat(3) + data = data.repeat(4) + data = data.repeat(5) + + for i, d in enumerate(data): + assert i % 3 == d[0][0] + + assert sum([1 for _ in data]) == 2 * 3 * 4 * 5 * 3 + + if __name__ == "__main__": logger.info("--------test tf repeat 01---------") # test_repeat_01() @@ -104,4 +240,3 @@ if __name__ == "__main__": logger.info("--------test tf repeat 03---------") test_tf_repeat_03() - From 0099f0108ede55c8f723d6b69be73afedab57853 Mon Sep 17 00:00:00 2001 From: zjun Date: Tue, 14 Apr 2020 22:15:44 +0800 Subject: [PATCH 237/367] fix akg register bug --- mindspore/ops/_op_impl/akg/gpu/hsigmoid.py | 2 +- mindspore/ops/_op_impl/akg/gpu/hswish.py | 2 +- mindspore/ops/_op_impl/akg/gpu/hswish_grad.py | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/mindspore/ops/_op_impl/akg/gpu/hsigmoid.py b/mindspore/ops/_op_impl/akg/gpu/hsigmoid.py index 31fe332206..4e802c1cad 100644 --- a/mindspore/ops/_op_impl/akg/gpu/hsigmoid.py +++ b/mindspore/ops/_op_impl/akg/gpu/hsigmoid.py @@ -24,7 +24,7 @@ hsigmoid_op_info = AkgRegOp("HSigmoid") \ .get_op_info() -@op_info_register(hsigmoidgrad_op_info) +@op_info_register(hsigmoid_op_info) def _hsigmoid_akg(): """HSigmoid AutoDiff register""" return diff --git a/mindspore/ops/_op_impl/akg/gpu/hswish.py b/mindspore/ops/_op_impl/akg/gpu/hswish.py index 17364de6cb..29f20bafae 100644 --- a/mindspore/ops/_op_impl/akg/gpu/hswish.py +++ b/mindspore/ops/_op_impl/akg/gpu/hswish.py @@ -24,7 +24,7 @@ hswish_op_info = AkgRegOp("HSwish") \ .get_op_info() -@op_info_register(hsigmoidgrad_op_info) +@op_info_register(hswish_op_info) def _hswish_akg(): """HSwish AutoDiff register""" return diff --git a/mindspore/ops/_op_impl/akg/gpu/hswish_grad.py b/mindspore/ops/_op_impl/akg/gpu/hswish_grad.py index 503dd9a5f1..38e8c78e28 100644 --- a/mindspore/ops/_op_impl/akg/gpu/hswish_grad.py +++ b/mindspore/ops/_op_impl/akg/gpu/hswish_grad.py @@ -15,7 +15,7 @@ """HSwishGrad op""" from mindspore.ops.op_info_register import op_info_register, AkgRegOp, DataType -hswishgrad_op_info = AkgRegOp("HSwishGrad") \ +hswish_grad_op_info = AkgRegOp("HSwishGrad") \ .fusion_type("OPAQUE") \ .input(0, "y_grad") \ .input(1, "x") \ @@ -25,7 +25,7 @@ hswishgrad_op_info = AkgRegOp("HSwishGrad") \ .get_op_info() -@op_info_register(hsigmoidgrad_op_info) +@op_info_register(hswish_grad_op_info) def _hswish_grad_akg(): """HSwishGrad AutoDiff register""" return From 9739d3b048cd2471b6032339def5e54e20f60f6f Mon Sep 17 00:00:00 2001 From: Junhan Hu Date: Sun, 29 Mar 2020 23:21:21 -0400 Subject: [PATCH 238/367] Add CPP sampler support for GeneratorDataset --- mindspore/ccsrc/dataset/api/de_pipeline.cc | 2 +- .../ccsrc/dataset/api/python_bindings.cc | 11 +- mindspore/ccsrc/dataset/core/tensor.cc | 2 + .../engine/datasetops/source/celeba_op.cc | 2 +- .../engine/datasetops/source/cifar_op.cc | 2 +- .../datasetops/source/image_folder_op.cc | 2 +- .../engine/datasetops/source/manifest_op.cc | 2 +- .../engine/datasetops/source/mnist_op.cc | 2 +- .../source/sampler/distributed_sampler.cc | 5 +- .../source/sampler/distributed_sampler.h | 6 +- .../datasetops/source/sampler/pk_sampler.cc | 12 +- .../datasetops/source/sampler/pk_sampler.h | 5 +- .../source/sampler/random_sampler.cc | 5 +- .../source/sampler/random_sampler.h | 6 +- .../datasetops/source/sampler/sampler.cc | 49 ++++- .../datasetops/source/sampler/sampler.h | 18 +- .../source/sampler/sequential_sampler.cc | 4 +- .../source/sampler/sequential_sampler.h | 6 +- .../source/sampler/subset_random_sampler.cc | 5 +- .../source/sampler/subset_random_sampler.h | 3 +- .../source/sampler/weighted_random_sampler.cc | 32 +-- .../source/sampler/weighted_random_sampler.h | 5 +- .../engine/datasetops/source/voc_op.cc | 2 +- mindspore/dataset/engine/datasets.py | 194 ++++++++++++++---- mindspore/dataset/engine/samplers.py | 1 - mindspore/dataset/engine/validators.py | 44 ++-- .../cpp/dataset/stand_alone_samplers_test.cc | 4 +- .../cpp/dataset/subset_random_sampler_test.cc | 12 +- .../dataset/weighted_random_sampler_test.cc | 24 +-- tests/ut/python/dataset/test_generator.py | 71 +++++++ tests/ut/python/dataset/test_sampler.py | 21 ++ 31 files changed, 432 insertions(+), 127 deletions(-) diff --git a/mindspore/ccsrc/dataset/api/de_pipeline.cc b/mindspore/ccsrc/dataset/api/de_pipeline.cc index f572db0cdf..b64d40125e 100644 --- a/mindspore/ccsrc/dataset/api/de_pipeline.cc +++ b/mindspore/ccsrc/dataset/api/de_pipeline.cc @@ -517,7 +517,7 @@ Status DEPipeline::ParseGeneratorOp(const py::dict &args, std::shared_ptr(obj)) { std::string err_msg = "Error: generator is invalid or not set."; diff --git a/mindspore/ccsrc/dataset/api/python_bindings.cc b/mindspore/ccsrc/dataset/api/python_bindings.cc index 3d543f946b..0633af4914 100644 --- a/mindspore/ccsrc/dataset/api/python_bindings.cc +++ b/mindspore/ccsrc/dataset/api/python_bindings.cc @@ -384,7 +384,16 @@ void bindTensorOps4(py::module *m) { } void bindSamplerOps(py::module *m) { - (void)py::class_>(*m, "Sampler"); + (void)py::class_>(*m, "Sampler") + .def("set_num_rows", [](Sampler &self, int64_t rows) { THROW_IF_ERROR(self.SetNumRowsInDataset(rows)); }) + .def("set_num_samples", [](Sampler &self, int64_t samples) { THROW_IF_ERROR(self.SetNumSamples(samples)); }) + .def("initialize", [](Sampler &self) { THROW_IF_ERROR(self.InitSampler()); }) + .def("get_indices", [](Sampler &self) { + py::array ret; + THROW_IF_ERROR(self.GetAllIdsThenReset(&ret)); + return ret; + }); + (void)py::class_>(*m, "ShardOperator"); (void)py::class_>(*m, "DistributedSampler") diff --git a/mindspore/ccsrc/dataset/core/tensor.cc b/mindspore/ccsrc/dataset/core/tensor.cc index 8f0eae459a..8fd1f8d48d 100644 --- a/mindspore/ccsrc/dataset/core/tensor.cc +++ b/mindspore/ccsrc/dataset/core/tensor.cc @@ -491,6 +491,8 @@ Status Tensor::GetItemAt(T *o, const std::vector &index) const { // return data as numpy, should return status Status Tensor::GetDataAsNumpy(py::array *data) { + RETURN_UNEXPECTED_IF_NULL(data_); + RETURN_UNEXPECTED_IF_NULL(data); if (type_ == DataType::DE_BOOL) { *data = py::array_t(shape_.AsVector(), reinterpret_cast(data_)); } else if (type_ == DataType::DE_INT8) { diff --git a/mindspore/ccsrc/dataset/engine/datasetops/source/celeba_op.cc b/mindspore/ccsrc/dataset/engine/datasetops/source/celeba_op.cc index 0c2e20729e..87a7b3c687 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/source/celeba_op.cc +++ b/mindspore/ccsrc/dataset/engine/datasetops/source/celeba_op.cc @@ -100,7 +100,7 @@ Status CelebAOp::LaunchThreadsAndInitOp() { RETURN_IF_NOT_OK(tree_->LaunchWorkers(num_workers_, std::bind(&CelebAOp::WorkerEntry, this, std::placeholders::_1))); TaskManager::FindMe()->Post(); RETURN_IF_NOT_OK(ParseImageAttrInfo()); - RETURN_IF_NOT_OK(sampler_->Init(this)); + RETURN_IF_NOT_OK(sampler_->HandshakeRandomAccessOp(this)); return Status::OK(); } diff --git a/mindspore/ccsrc/dataset/engine/datasetops/source/cifar_op.cc b/mindspore/ccsrc/dataset/engine/datasetops/source/cifar_op.cc index 3e64c8a3e6..60de5a6bdf 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/source/cifar_op.cc +++ b/mindspore/ccsrc/dataset/engine/datasetops/source/cifar_op.cc @@ -240,7 +240,7 @@ Status CifarOp::Reset() { // hand shake with Sampler, allow Sampler to call RandomAccessOp's functions to get NumRows Status CifarOp::InitSampler() { - RETURN_IF_NOT_OK(sampler_->Init(this)); + RETURN_IF_NOT_OK(sampler_->HandshakeRandomAccessOp(this)); return Status::OK(); } diff --git a/mindspore/ccsrc/dataset/engine/datasetops/source/image_folder_op.cc b/mindspore/ccsrc/dataset/engine/datasetops/source/image_folder_op.cc index f6cf377666..0ac579a865 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/source/image_folder_op.cc +++ b/mindspore/ccsrc/dataset/engine/datasetops/source/image_folder_op.cc @@ -258,7 +258,7 @@ Status ImageFolderOp::Reset() { // hand shake with Sampler, allow Sampler to call RandomAccessOp's functions to get NumRows Status ImageFolderOp::InitSampler() { - RETURN_IF_NOT_OK(sampler_->Init(this)); + RETURN_IF_NOT_OK(sampler_->HandshakeRandomAccessOp(this)); return Status::OK(); } diff --git a/mindspore/ccsrc/dataset/engine/datasetops/source/manifest_op.cc b/mindspore/ccsrc/dataset/engine/datasetops/source/manifest_op.cc index 6907647952..0139af4d9d 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/source/manifest_op.cc +++ b/mindspore/ccsrc/dataset/engine/datasetops/source/manifest_op.cc @@ -254,7 +254,7 @@ Status ManifestOp::Reset() { // hand shake with Sampler, allow Sampler to call RandomAccessOp's functions to get NumRows Status ManifestOp::InitSampler() { - RETURN_IF_NOT_OK(sampler_->Init(this)); + RETURN_IF_NOT_OK(sampler_->HandshakeRandomAccessOp(this)); return Status::OK(); } diff --git a/mindspore/ccsrc/dataset/engine/datasetops/source/mnist_op.cc b/mindspore/ccsrc/dataset/engine/datasetops/source/mnist_op.cc index 3431e58aea..71900f8a91 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/source/mnist_op.cc +++ b/mindspore/ccsrc/dataset/engine/datasetops/source/mnist_op.cc @@ -205,7 +205,7 @@ Status MnistOp::Reset() { // hand shake with Sampler, allow Sampler to call RandomAccessOp's functions to get NumRows Status MnistOp::InitSampler() { - RETURN_IF_NOT_OK(sampler_->Init(this)); + RETURN_IF_NOT_OK(sampler_->HandshakeRandomAccessOp(this)); return Status::OK(); } diff --git a/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/distributed_sampler.cc b/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/distributed_sampler.cc index 28a5705648..5b5a9321df 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/distributed_sampler.cc +++ b/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/distributed_sampler.cc @@ -31,8 +31,9 @@ DistributedSampler::DistributedSampler(int64_t num_dev, int64_t dev_id, bool shu num_devices_(num_dev), shuffle_(shuffle) {} -Status DistributedSampler::Init(const RandomAccessOp *op) { - RETURN_IF_NOT_OK(Sampler::Init(op)); +Status DistributedSampler::InitSampler() { + CHECK_FAIL_RETURN_UNEXPECTED(num_samples_ > 0, "num_samples <= 0\n"); + CHECK_FAIL_RETURN_UNEXPECTED(num_rows_ > 0, "num_rows <= 0\n"); CHECK_FAIL_RETURN_UNEXPECTED(device_id_ < num_devices_ && device_id_ >= 0 && num_rows_ > 0 && num_samples_ > 0, "fail to init DistributedSampler"); rnd_.seed(seed_++); diff --git a/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/distributed_sampler.h b/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/distributed_sampler.h index ef25b6bccf..58b469dcc8 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/distributed_sampler.h +++ b/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/distributed_sampler.h @@ -41,10 +41,8 @@ class DistributedSampler : public Sampler { // @return - The error code return Status GetNextBuffer(std::unique_ptr *out_buffer) override; - // first handshake between StorageOp and Sampler - // @param op - StorageOp pointer, pass in so Sampler can call GetNumSamples() and get ClassIds() - // @return - Status Init(const RandomAccessOp *) override; + // Init sampler, called by base class or python + Status InitSampler() override; // for next epoch of sampleIds // @return - The error code return diff --git a/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/pk_sampler.cc b/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/pk_sampler.cc index 8c8c12fce2..8198204437 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/pk_sampler.cc +++ b/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/pk_sampler.cc @@ -28,9 +28,7 @@ PKSampler::PKSampler(int64_t val, bool shuffle, int64_t samples_per_buffer) num_pk_samples_(0), samples_per_class_(val) {} -Status PKSampler::Init(const RandomAccessOp *op) { - RETURN_UNEXPECTED_IF_NULL(op); - RETURN_IF_NOT_OK(op->GetClassIds(&label_to_ids_)); +Status PKSampler::InitSampler() { labels_.reserve(label_to_ids_.size()); for (const auto &pair : label_to_ids_) { if (pair.second.empty() == false) { @@ -79,5 +77,13 @@ Status PKSampler::Reset() { rnd_.seed(seed_++); return Status::OK(); } + +Status PKSampler::HandshakeRandomAccessOp(const RandomAccessOp *op) { + RETURN_UNEXPECTED_IF_NULL(op); + RETURN_IF_NOT_OK(op->GetClassIds(&label_to_ids_)); + RETURN_IF_NOT_OK(InitSampler()); + return Status::OK(); +} + } // namespace dataset } // namespace mindspore diff --git a/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/pk_sampler.h b/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/pk_sampler.h index fa2b4ed0c7..14f598a9ce 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/pk_sampler.h +++ b/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/pk_sampler.h @@ -45,7 +45,10 @@ class PKSampler : public Sampler { // NOT YET FINISHED // first handshake between StorageOp and Sampler // @param op - StorageOp pointer, pass in so Sampler can call GetNumSamples() and get ClassIds() // @return - Status Init(const RandomAccessOp *op) override; + Status HandshakeRandomAccessOp(const RandomAccessOp *op) override; + + // init sampler, to be called by python or Handshake + Status InitSampler() override; // for next epoch of sampleIds // @return - The error code return diff --git a/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/random_sampler.cc b/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/random_sampler.cc index 216f322052..de8cde409f 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/random_sampler.cc +++ b/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/random_sampler.cc @@ -49,10 +49,9 @@ Status RandomSampler::GetNextBuffer(std::unique_ptr *out_buffer) { return Status::OK(); } -Status RandomSampler::Init(const RandomAccessOp *op) { - RETURN_IF_NOT_OK(Sampler::Init(op)); +Status RandomSampler::InitSampler() { num_samples_ = (user_num_samples_ < num_samples_) ? user_num_samples_ : num_samples_; - CHECK_FAIL_RETURN_UNEXPECTED(num_samples_ > 0 && num_rows_ > 0, "Fail to init RandomSampler"); + CHECK_FAIL_RETURN_UNEXPECTED(num_samples_ > 0 && num_rows_ > 0, "both num_samples & num_rows need to be positive"); samples_per_buffer_ = samples_per_buffer_ > num_samples_ ? num_samples_ : samples_per_buffer_; if (replacement_ == false) { shuffled_ids_.reserve(num_rows_); diff --git a/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/random_sampler.h b/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/random_sampler.h index 54f26f352b..84a07e9fc6 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/random_sampler.h +++ b/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/random_sampler.h @@ -42,10 +42,8 @@ class RandomSampler : public Sampler { // @return - The error code return Status GetNextBuffer(std::unique_ptr *out_buffer) override; - // first handshake between StorageOp and Sampler - // @param op - StorageOp pointer, pass in so Sampler can call GetNumSamples() and get ClassIds() - // @return - Status Init(const RandomAccessOp *op) override; + // meant to be called by base class or python + Status InitSampler() override; // for next epoch of sampleIds // @return - The error code return diff --git a/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/sampler.cc b/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/sampler.cc index aa3838f8b5..3c3f5f48e8 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/sampler.cc +++ b/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/sampler.cc @@ -20,12 +20,13 @@ namespace dataset { Sampler::Sampler(int64_t samples_per_buffer) : DatasetOp(0), num_rows_(0), num_samples_(0), samples_per_buffer_(samples_per_buffer), col_desc_(nullptr) {} -Status Sampler::Init(const RandomAccessOp *op) { - CHECK_FAIL_RETURN_UNEXPECTED(op != nullptr && samples_per_buffer_ > 0, "Fail to init Sampler()\n"); +Status Sampler::HandshakeRandomAccessOp(const RandomAccessOp *op) { + CHECK_FAIL_RETURN_UNEXPECTED(op != nullptr, "RandomAccessOp is nullptr\n"); RETURN_IF_NOT_OK(op->GetNumSamples(&num_samples_)); RETURN_IF_NOT_OK(op->GetNumRowsInDataset(&num_rows_)); // It's up to the derived class to check the validity of the two args // Because some sampler only needs one of the arg (weighted_random_sampler) + RETURN_IF_NOT_OK(InitSampler()); // init sampler after callback return Status::OK(); } @@ -42,5 +43,49 @@ Status Sampler::CreateSamplerTensor(std::shared_ptr *sample_ids, int64_t (void)(*sample_ids)->StartAddr(); // allocate memory in case user forgets! return Status::OK(); } + +Status Sampler::GetAllIdsThenReset(py::array *data) { + std::unique_ptr db; + std::shared_ptr sample_ids; + + // check samples_per_buffer is properly set and doesn't overflow + CHECK_FAIL_RETURN_UNEXPECTED(samples_per_buffer_ + 1 > 1, "samples_per_buffer invalid"); + + // A call to derived class to get sample ids wrapped inside a buffer + RETURN_IF_NOT_OK(GetNextBuffer(&db)); + // Get the only tensor inside the buffer that contains the actual SampleIds for the entire epoch + RETURN_IF_NOT_OK(db->GetTensor(&sample_ids, 0, 0)); + // check this buffer is not a ctrl buffer + CHECK_FAIL_RETURN_UNEXPECTED(db->buffer_flags() == DataBuffer::kDeBFlagNone, "ERROR ctrl buffer received"); + { + py::gil_scoped_acquire gil_acquire; + if (Py_IsInitialized() == 0) { + return Status(StatusCode::kPythonInterpreterFailure, "Python Interpreter is finalized"); + } + try { + RETURN_IF_NOT_OK(sample_ids->GetDataAsNumpy(data)); + } catch (const std::runtime_error &e) { + return Status(StatusCode::kPyFuncException, e.what()); + } + } + // perform error checking! Next buffer supposed to be EOE since last one already contains all ids for current epoch + RETURN_IF_NOT_OK(GetNextBuffer(&db)); + CHECK_FAIL_RETURN_UNEXPECTED(db->eoe(), "ERROR Non EOE received"); + // Reset Sampler since this is the end of the epoch + RETURN_IF_NOT_OK(Reset()); + return Status::OK(); +} + +Status Sampler::SetNumSamples(int64_t num_samples) { + CHECK_FAIL_RETURN_UNEXPECTED(num_samples > 0, "num_samples is negative or 0"); + num_samples_ = num_samples; + return Status::OK(); +} + +Status Sampler::SetNumRowsInDataset(int64_t num_rows) { + CHECK_FAIL_RETURN_UNEXPECTED(num_rows > 0, "num_rows is negative or 0"); + num_rows_ = num_rows; + return Status::OK(); +} } // namespace dataset } // namespace mindspore diff --git a/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/sampler.h b/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/sampler.h index 801565508b..4ea221027a 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/sampler.h +++ b/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/sampler.h @@ -78,14 +78,26 @@ class Sampler : public DatasetOp { // @return - The error code return Status GetNextBuffer(std::unique_ptr *out_buffer) override = 0; + // return all ids in one epoch as a numpy array, then call reset + Status GetAllIdsThenReset(py::array *data); + // for next epoch of sampleIds // @return - The error code return Status Reset() override = 0; - // first handshake between StorageOp and Sampler. Base class init will call both GetNumRows and GetNumSamples - // @param op - StorageOp pointer, pass in so Sampler can call GetNumSamples() and get ClassIds() + // setter function for num_rows_ + Status SetNumRowsInDataset(int64_t num_rows); + + // setter function for num_samples_ + Status SetNumSamples(int64_t num_samples); + + // first handshake between StorageOp and Sampler. This func will call getNumRows and getNumSamples + // @param op - StorageOp pointer, pass in so Sampler can call getNumSamples() and get ClassIds() // @return - virtual Status Init(const RandomAccessOp *op); + virtual Status HandshakeRandomAccessOp(const RandomAccessOp *op); + + // initialize sampler and perform checks on certain vars + virtual Status InitSampler() { return Status::OK(); } // Not meant to be called // @return diff --git a/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/sequential_sampler.cc b/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/sequential_sampler.cc index 72131a6de1..a3c4fe2256 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/sequential_sampler.cc +++ b/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/sequential_sampler.cc @@ -41,9 +41,7 @@ Status SequentialSampler::GetNextBuffer(std::unique_ptr *out_buffer) return Status::OK(); } -Status SequentialSampler::Init(const RandomAccessOp *op) { - RETURN_UNEXPECTED_IF_NULL(op); - RETURN_IF_NOT_OK(op->GetNumSamples(&num_samples_)); +Status SequentialSampler::InitSampler() { CHECK_FAIL_RETURN_UNEXPECTED(num_samples_ > 0 && samples_per_buffer_ > 0, "Fail to init Sequential Sampler"); samples_per_buffer_ = samples_per_buffer_ > num_samples_ ? num_samples_ : samples_per_buffer_; return Status::OK(); diff --git a/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/sequential_sampler.h b/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/sequential_sampler.h index d119fd8d08..c38a9ed2f9 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/sequential_sampler.h +++ b/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/sequential_sampler.h @@ -32,10 +32,8 @@ class SequentialSampler : public Sampler { // Destructor. ~SequentialSampler() = default; - // Initialize the sampler. - // @param op - // @return Status - Status Init(const RandomAccessOp *op) override; + // init sampler, called by python + Status InitSampler() override; // for next epoch of sampleIds // @return - The error code return diff --git a/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/subset_random_sampler.cc b/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/subset_random_sampler.cc index 16603939b3..c377fddb49 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/subset_random_sampler.cc +++ b/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/subset_random_sampler.cc @@ -31,9 +31,8 @@ SubsetRandomSampler::SubsetRandomSampler(const std::vector &indices, in : Sampler(samples_per_buffer), indices_(indices), sample_id_(0), buffer_id_(0) {} // Initialized this Sampler. -Status SubsetRandomSampler::Init(const RandomAccessOp *op) { - // Calling base class init. - RETURN_IF_NOT_OK(Sampler::Init(op)); +Status SubsetRandomSampler::InitSampler() { + CHECK_FAIL_RETURN_UNEXPECTED(num_rows_ > 0, "num_rows <= 0\n"); // Initialize random generator with seed from config manager rand_gen_.seed(GetSeed()); diff --git a/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/subset_random_sampler.h b/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/subset_random_sampler.h index 38fae6b20b..1f4c155748 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/subset_random_sampler.h +++ b/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/subset_random_sampler.h @@ -38,9 +38,8 @@ class SubsetRandomSampler : public Sampler { ~SubsetRandomSampler() = default; // Initialize the sampler. - // @param op (Not used in this sampler) // @return Status - Status Init(const RandomAccessOp *op) override; + Status InitSampler() override; // Reset the internal variable to the initial state and reshuffle the indices. // @return Status diff --git a/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/weighted_random_sampler.cc b/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/weighted_random_sampler.cc index f2957e74be..06afc219e6 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/weighted_random_sampler.cc +++ b/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/weighted_random_sampler.cc @@ -29,21 +29,21 @@ namespace dataset { // Constructor. WeightedRandomSampler::WeightedRandomSampler(const std::vector &weights, int64_t num_samples, bool replacement, int64_t samples_per_buffer) - : Sampler(samples_per_buffer), weights_(weights), replacement_(replacement), sample_id_(0), buffer_id_(0) { - num_samples_ = num_samples; // this variable is defined in base class sampler -} + : Sampler(samples_per_buffer), + weights_(weights), + replacement_(replacement), + sample_id_(0), + buffer_id_(0), + user_num_samples_(num_samples) {} // Initialized this Sampler. -Status WeightedRandomSampler::Init(const RandomAccessOp *op) { - RETURN_UNEXPECTED_IF_NULL(op); - RETURN_IF_NOT_OK(op->GetNumRowsInDataset(&num_rows_)); - +Status WeightedRandomSampler::InitSampler() { + CHECK_FAIL_RETURN_UNEXPECTED(num_rows_ > 0 && user_num_samples_, "num_samples & num_rows need to be positive"); + CHECK_FAIL_RETURN_UNEXPECTED(samples_per_buffer_ > 0, "samples_per_buffer<=0\n"); // Initialize random generator with seed from config manager rand_gen_.seed(GetSeed()); - samples_per_buffer_ = (samples_per_buffer_ > num_samples_) ? num_samples_ : samples_per_buffer_; - - CHECK_FAIL_RETURN_UNEXPECTED(num_samples_ > 0 && samples_per_buffer_ > 0, "Fail to init WeightedRandomSampler"); + samples_per_buffer_ = (samples_per_buffer_ > user_num_samples_) ? user_num_samples_ : samples_per_buffer_; if (!replacement_) { exp_dist_ = std::make_unique>(1); @@ -65,8 +65,8 @@ void WeightedRandomSampler::InitOnePassSampling() { } // Partial sort the first `numSamples` elements. - std::partial_sort(val_idx.begin(), val_idx.begin() + num_samples_, val_idx.end()); - for (int64_t i = 0; i < num_samples_; i++) { + std::partial_sort(val_idx.begin(), val_idx.begin() + user_num_samples_, val_idx.end()); + for (int64_t i = 0; i < user_num_samples_; i++) { onepass_ids_.push_back(val_idx[i].second); } } @@ -91,11 +91,11 @@ Status WeightedRandomSampler::GetNextBuffer(std::unique_ptr *out_buf "number of samples weights is more than num of rows. Might generate id out of bound OR other errors"); } - if (!replacement_ && (weights_.size() < static_cast(num_samples_))) { + if (!replacement_ && (weights_.size() < static_cast(user_num_samples_))) { RETURN_STATUS_UNEXPECTED("Without replacement, sample weights less than numSamples"); } - if (sample_id_ == num_samples_) { + if (sample_id_ == user_num_samples_) { (*out_buffer) = std::make_unique(buffer_id_++, DataBuffer::kDeBFlagEOE); } else { (*out_buffer) = std::make_unique(buffer_id_++, DataBuffer::kDeBFlagNone); @@ -103,8 +103,8 @@ Status WeightedRandomSampler::GetNextBuffer(std::unique_ptr *out_buf int64_t last_id = sample_id_ + samples_per_buffer_; // Handling the return all samples at once, and when last draw is not a full batch. - if (last_id > num_samples_) { - last_id = num_samples_; + if (last_id > user_num_samples_) { + last_id = user_num_samples_; } // Allocate tensor. diff --git a/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/weighted_random_sampler.h b/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/weighted_random_sampler.h index bccc9e599d..5381bb64b0 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/weighted_random_sampler.h +++ b/mindspore/ccsrc/dataset/engine/datasetops/source/sampler/weighted_random_sampler.h @@ -43,7 +43,7 @@ class WeightedRandomSampler : public Sampler { // Initialize the sampler. // @param op (Not used in this sampler) // @return Status - Status Init(const RandomAccessOp *op) override; + Status InitSampler() override; // Reset the internal variable to the initial state and reshuffle the indices. Status Reset() override; @@ -69,6 +69,9 @@ class WeightedRandomSampler : public Sampler { // Random engine and device std::mt19937 rand_gen_; + // num_samples from user + int64_t user_num_samples_; + // Discrete distribution for generating weighted random numbers with replacement. std::unique_ptr> discrete_dist_; diff --git a/mindspore/ccsrc/dataset/engine/datasetops/source/voc_op.cc b/mindspore/ccsrc/dataset/engine/datasetops/source/voc_op.cc index 71b4c47cf5..1731ed14ba 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/source/voc_op.cc +++ b/mindspore/ccsrc/dataset/engine/datasetops/source/voc_op.cc @@ -220,7 +220,7 @@ Status VOCOp::ParseImageIds() { } Status VOCOp::InitSampler() { - RETURN_IF_NOT_OK(sampler_->Init(this)); + RETURN_IF_NOT_OK(sampler_->HandshakeRandomAccessOp(this)); return Status::OK(); } diff --git a/mindspore/dataset/engine/datasets.py b/mindspore/dataset/engine/datasets.py index 8e6545375b..4480bbc462 100644 --- a/mindspore/dataset/engine/datasets.py +++ b/mindspore/dataset/engine/datasets.py @@ -1748,14 +1748,70 @@ class MindDataset(SourceDataset): return num_rows -def ds_fn(dataset): - for val in dataset: - # convert output tensors to ndarrays - yield tuple([np.array(x) for x in val]) +def _iter_fn(dataset, num_samples): + """ + Generator function wrapper for iterable dataset + """ + if num_samples is not None: + ds_iter = iter(dataset) + for _ in range(num_samples): + try: + val = next(ds_iter) + except StopIteration: + return + # convert output tensors to ndarrays + yield tuple([np.array(x) for x in val]) + else: + for val in dataset: + # convert output tensors to ndarrays + yield tuple([np.array(x) for x in val]) + + +def _generator_fn(generator, num_samples): + """ + Generator function wrapper for generator function dataset + """ + if num_samples is not None: + gen_iter = generator() + for _ in range(num_samples): + try: + val = next(gen_iter) + except StopIteration: + return + yield val + else: + gen_iter = generator() + for val in gen_iter: + yield val -def sampler_fn(sampler, dataset): - for i in sampler: +def _py_sampler_fn(sampler, num_samples, dataset): + """ + Generator function wrapper for mappable dataset with python sampler + """ + if num_samples is not None: + sampler_iter = iter(sampler) + for _ in range(num_samples): + try: + idx = next(sampler_iter) + except StopIteration: + return + val = dataset[idx] + # convert output tensors to ndarrays + yield tuple([np.array(x) for x in val]) + else: + for i in sampler: + val = dataset[i] + # convert output tensors to ndarrays + yield tuple([np.array(x) for x in val]) + + +def _cpp_sampler_fn(sampler, dataset): + """ + Generator function wrapper for mappable dataset with cpp sampler + """ + indices = sampler.get_indices() + for i in indices: val = dataset[i] # convert output tensors to ndarrays yield tuple([np.array(x) for x in val]) @@ -1763,49 +1819,122 @@ def sampler_fn(sampler, dataset): class GeneratorDataset(SourceDataset): """ - A source dataset that generate data from calling generator function each epoch. + A source dataset that generate data from python by invoking python data source each epoch. + + This dataset can take in a sampler. sampler and shuffle are mutually exclusive. Table + below shows what input args are allowed and their expected behavior. + + .. list-table:: Expected Order Behavior of Using 'sampler' and 'shuffle' + :widths: 25 25 50 + :header-rows: 1 + + * - Parameter 'sampler' + - Parameter 'shuffle' + - Expected Order Behavior + * - None + - None + - random order + * - None + - True + - random order + * - None + - False + - sequential order + * - Sampler object + - None + - order defined by sampler + * - Sampler object + - True + - not allowed + * - Sampler object + - False + - not allowed Args: - generator_function (callable): - A callable object that returns an Generator object that supports the iter() protocol. - Generator object is required to return a tuple of numpy array as a row of the dataset on next(). + source (Callable/Iterable/Random Accessible): + A generator callable object, an iterable python object or a random accessible python object. + Callable source is required to return a tuple of numpy array as a row of the dataset on source().next(). + Iterable source is required to return a tuple of numpy array as a row of the dataset on iter(source).next(). + Random accessible source is required to return a tuple of numpy array as a row of the dataset on + source[idx]. column_names (list[str]): List of column names of the dataset. column_types (list[mindspore.dtype], optional): List of column data types of the dataset (default=None). If provided, sanity check will be performed on generator output. - prefetch_size (int, optional): Prefetch number of records ahead of the user's request (default=None). - sampler (Sampler, optional): Object used to choose samples from the dataset (default=None). + schema (Schema/String, optional): Path to the json schema file or schema object (default=None). + If the schema is not provided, the meta data from column_names and column_types is considered the schema. + num_samples (int, optional): The number of samples to be included in the dataset + (default=None, all images). + shuffle (bool, optional): Whether or not to perform shuffle on the dataset. Random accessible input is required. + (default=None, expected order behavior shown in the table). + sampler (Sampler/Iterable, optional): Object used to choose samples from the dataset. Random accessible input is + required. + (default=None, expected order behavior shown in the table). + num_shards (int, optional): Number of shards that the dataset should be divided into (default=None). + This argument should be specified only when 'num_samples' is "None". Random accessible input is required. + shard_id (int, optional): The shard ID within num_shards (default=None). This argument should be specified only + when num_shards is also specified. Random accessible input is required. Examples: - >>> import mindspore.dataset as ds - >>> # 1) generator function that generates multi-dimensional data + >>> import mindspore.dataengine as de + >>> # 1) Multidimensional generator function as callable input >>> def generator_md(): >>> for i in range(64): >>> yield (np.array([[i, i + 1], [i + 2, i + 3]]),) - >>> # create multi_dimension_generator_dataset with GeneratorMD() and column name "multi_dimensional_data" - >>> multi_dimension_generator_dataset = ds.GeneratorDataset(generator_md, ["multi_dimensional_data"]) - >>> # 2) generator function that generates multi-columns data + >>> # create multi_dimension_generator_dataset with GeneratorMD and column name "multi_dimensional_data" + >>> multi_dimension_generator_dataset = de.GeneratorDataset(generator_md, ["multi_dimensional_data"]) + >>> # 2) Multi-column generator function as callable input >>> def generator_mc(maxid = 64): >>> for i in range(maxid): >>> yield (np.array([i]), np.array([[i, i + 1], [i + 2, i + 3]])) - >>> # create multi_column_generator_dataset with GeneratorMC() and column names "col1" and "col2" - >>> multi_column_generator_dataset = ds.GeneratorDataset(generator_mc, ["col1, col2"]) + >>> # create multi_column_generator_dataset with GeneratorMC and column names "col1" and "col2" + >>> multi_column_generator_dataset = de.GeneratorDataset(generator_mc, ["col1, col2"]) + >>> # 3) Iterable dataset as iterable input + >>> class MyIterable(): + >>> def __iter__(self): + >>> return # User implementation + >>> # create iterable_generator_dataset with MyIterable object + >>> iterable_generator_dataset = de.GeneratorDataset(MyIterable(), ["col1"]) + >>> # 4) Random accessible dataset as Random accessible input + >>> class MyRA(): + >>> def __getitem__(self, index): + >>> return # User implementation + >>> # create ra_generator_dataset with MyRA object + >>> ra_generator_dataset = de.GeneratorDataset(MyRA(), ["col1"]) + >>> # List/Dict/Tuple is also random accessible + >>> list_generator = de.GeneratorDataset([(np.array(0),), (np.array(1)), (np.array(2))], ["col1"]) + >>> # 5) Built-in Sampler + >>> my_generator = de.GeneratorDataset(my_ds, ["img", "label"], sampler=samplers.RandomSampler()) + >>> """ @check_generatordataset - def __init__(self, generator_function, column_names, column_types=None, prefetch_size=None, sampler=None): - super().__init__(1) - if sampler is not None: - self.generator_function = (lambda: sampler_fn(sampler, generator_function)) + def __init__(self, source, column_names, column_types=None, schema=None, num_samples=None, num_parallel_workers=1, + shuffle=None, sampler=None, num_shards=None, shard_id=None): + super().__init__(num_parallel_workers) + self.sampler = _select_sampler(num_samples, sampler, shuffle, num_shards, shard_id) + if self.sampler is not None and hasattr(source, "__getitem__"): + if isinstance(self.sampler, (samplers.SequentialSampler, samplers.DistributedSampler, + samplers.RandomSampler, samplers.SubsetRandomSampler, + samplers.WeightedRandomSampler)): + if num_samples is None: + num_samples = len(source) + sampler_instance = self.sampler.create() + sampler_instance.set_num_rows(len(source)) + sampler_instance.set_num_samples(num_samples) + sampler_instance.initialize() + self.source = (lambda: _cpp_sampler_fn(sampler_instance, source)) + else: + self.source = (lambda: _py_sampler_fn(self.sampler, num_samples, source)) else: try: - # test to see if generator_function is iterable - iter(generator_function) + iter(source) except TypeError: - # generator_function was not iterable, assume it is a function - self.generator_function = generator_function + # Use generator function if input callable + self.source = (lambda: _generator_fn(source, num_samples)) else: - # generator_function was iterable, build a function around it - self.generator_function = (lambda: ds_fn(generator_function)) + # Use iterator function if input is iterable + # Random accessible input is also iterable + self.source = (lambda: _iter_fn(source, num_samples)) self.column_names = column_names @@ -1813,17 +1942,12 @@ class GeneratorDataset(SourceDataset): self.column_types = mstypelist_to_detypelist(column_types) else: self.column_types = column_types - self.distribution = "" - self.prefetch_size = prefetch_size - self.sampler = sampler def get_args(self): args = super().get_args() - args["generator_function"] = self.generator_function + args["source"] = self.source args["column_names"] = self.column_names args["column_types"] = self.column_types - args["prefetch_size"] = self.prefetch_size - args["sampler"] = self.sampler return args def get_dataset_size(self): diff --git a/mindspore/dataset/engine/samplers.py b/mindspore/dataset/engine/samplers.py index fd9c50e951..f9c74f151d 100644 --- a/mindspore/dataset/engine/samplers.py +++ b/mindspore/dataset/engine/samplers.py @@ -20,7 +20,6 @@ SequentialSampler, SubsetRandomSampler, WeightedRandomSampler. import mindspore._c_dataengine as cde - class DistributedSampler(): """ Sampler that access a shard of the dataset. diff --git a/mindspore/dataset/engine/validators.py b/mindspore/dataset/engine/validators.py index 63d7c58270..165a160e77 100644 --- a/mindspore/dataset/engine/validators.py +++ b/mindspore/dataset/engine/validators.py @@ -543,28 +543,48 @@ def check_generatordataset(method): def new_method(*args, **kwargs): param_dict = make_param_dict(method, args, kwargs) - nreq_param_int = ['prefetch_size'] - nreq_param_list = ['column_names', 'column_types'] - # check generator_function; required argument - generator_function = param_dict.get('generator_function') - if generator_function is None: - raise ValueError("generator_function is not provided.") + source = param_dict.get('source') + if source is None: + raise ValueError("source is not provided.") + if not callable(source): + try: + iter(source) + except TypeError: + raise TypeError("source should be callable, iterable or random accessible") # check column_names; required argument column_names = param_dict.get('column_names') if column_names is None: raise ValueError("column_names is not provided.") - # check prefetch_size range - prefetch_size = param_dict.get('prefetch_size') - if prefetch_size is not None and (prefetch_size <= 0 or prefetch_size > 1024): - raise ValueError("prefetch_size exceeds the boundary.") - + # check optional argument + nreq_param_int = ["num_samples", "num_parallel_workers", "num_shards", "shard_id"] check_param_type(nreq_param_int, param_dict, int) - + nreq_param_list = ["column_types"] check_param_type(nreq_param_list, param_dict, list) + num_shards = param_dict.get("num_shards") + shard_id = param_dict.get("shard_id") + if (num_shards is None) != (shard_id is None): + # These two parameters appear together. + raise ValueError("num_shards and shard_id need to be passed in together") + if num_shards is not None: + if shard_id >= num_shards: + raise ValueError("shard_id should be less than num_shards") + + sampler = param_dict.get("sampler") + if sampler is not None: + if isinstance(sampler, samplers.PKSampler): + raise ValueError("PKSampler is not supported by GeneratorDataset") + if not isinstance(sampler, (samplers.SequentialSampler, samplers.DistributedSampler, + samplers.RandomSampler, samplers.SubsetRandomSampler, + samplers.WeightedRandomSampler)): + try: + iter(sampler) + except TypeError: + raise TypeError("sampler should be either iterable or from dataset.samplers.py") + return method(*args, **kwargs) return new_method diff --git a/tests/ut/cpp/dataset/stand_alone_samplers_test.cc b/tests/ut/cpp/dataset/stand_alone_samplers_test.cc index 48cc811615..ea0ae78aef 100644 --- a/tests/ut/cpp/dataset/stand_alone_samplers_test.cc +++ b/tests/ut/cpp/dataset/stand_alone_samplers_test.cc @@ -75,7 +75,7 @@ TEST_F(MindDataTestStandAloneSampler, TestDistributedSampler) { std::shared_ptr tensor; for (int i = 0; i < 6; i++) { std::unique_ptr sampler = std::make_unique(3, i % 3, (i < 3 ? false : true)); - sampler->Init(&mock); + sampler->HandshakeRandomAccessOp(&mock); sampler->GetNextBuffer(&db); db->GetTensor(&tensor, 0, 0); MS_LOG(DEBUG) << (*tensor); @@ -95,7 +95,7 @@ TEST_F(MindDataTestStandAloneSampler, TestStandAoneSequentialSampler) { std::shared_ptr sampler = std::make_shared(3); std::unique_ptr db; std::shared_ptr tensor; - sampler->Init(&mock); + sampler->HandshakeRandomAccessOp(&mock); sampler->GetNextBuffer(&db); db->GetTensor(&tensor, 0, 0); EXPECT_TRUE((*tensor) == (*label1)); diff --git a/tests/ut/cpp/dataset/subset_random_sampler_test.cc b/tests/ut/cpp/dataset/subset_random_sampler_test.cc index 5142a6d399..bb8b3439d5 100644 --- a/tests/ut/cpp/dataset/subset_random_sampler_test.cc +++ b/tests/ut/cpp/dataset/subset_random_sampler_test.cc @@ -52,8 +52,8 @@ TEST_F(MindDataTestSubsetRandomSampler, TestAllAtOnce) { std::unordered_set in_set(in.begin(), in.end()); SubsetRandomSampler sampler(in); - DummyRandomAccessOp dummy_random_access_op(5); - sampler.Init(&dummy_random_access_op); + DummyRandomAccessOp dummyRandomAccessOp(5); + sampler.HandshakeRandomAccessOp(&dummyRandomAccessOp); std::unique_ptr db; TensorRow row; @@ -80,8 +80,8 @@ TEST_F(MindDataTestSubsetRandomSampler, TestGetNextBuffer) { std::vector input(total_samples, 1); SubsetRandomSampler sampler(input, samples_per_buffer); - DummyRandomAccessOp dummy_random_access_op(total_samples); - sampler.Init(&dummy_random_access_op); + DummyRandomAccessOp dummyRandomAccessOp(total_samples); + sampler.HandshakeRandomAccessOp(&dummyRandomAccessOp); std::unique_ptr db; TensorRow row; @@ -111,8 +111,8 @@ TEST_F(MindDataTestSubsetRandomSampler, TestReset) { std::unordered_set in_set(in.begin(), in.end()); SubsetRandomSampler sampler(in); - DummyRandomAccessOp dummy_random_access_op(5); - sampler.Init(&dummy_random_access_op); + DummyRandomAccessOp dummyRandomAccessOp(5); + sampler.HandshakeRandomAccessOp(&dummyRandomAccessOp); std::unique_ptr db; TensorRow row; diff --git a/tests/ut/cpp/dataset/weighted_random_sampler_test.cc b/tests/ut/cpp/dataset/weighted_random_sampler_test.cc index 1c5d73613f..51a4bc3cb3 100644 --- a/tests/ut/cpp/dataset/weighted_random_sampler_test.cc +++ b/tests/ut/cpp/dataset/weighted_random_sampler_test.cc @@ -60,8 +60,8 @@ TEST_F(MindDataTestWeightedRandomSampler, TestOneshotReplacement) { // create sampler with replacement = true WeightedRandomSampler m_sampler(weights, num_samples, true); - DummyRandomAccessOp dummy_random_access_op(total_samples); - m_sampler.Init(&dummy_random_access_op); + DummyRandomAccessOp dummyRandomAccessOp(total_samples); + m_sampler.HandshakeRandomAccessOp(&dummyRandomAccessOp); std::unique_ptr db; TensorRow row; @@ -90,8 +90,8 @@ TEST_F(MindDataTestWeightedRandomSampler, TestOneshotNoReplacement) { // create sampler with replacement = replacement WeightedRandomSampler m_sampler(weights, num_samples, false); - DummyRandomAccessOp dummy_random_access_op(total_samples); - m_sampler.Init(&dummy_random_access_op); + DummyRandomAccessOp dummyRandomAccessOp(total_samples); + m_sampler.HandshakeRandomAccessOp(&dummyRandomAccessOp); std::unique_ptr db; TensorRow row; @@ -126,8 +126,8 @@ TEST_F(MindDataTestWeightedRandomSampler, TestGetNextBufferReplacement) { // create sampler with replacement = replacement WeightedRandomSampler m_sampler(weights, num_samples, true, samples_per_buffer); - DummyRandomAccessOp dummy_random_access_op(total_samples); - m_sampler.Init(&dummy_random_access_op); + DummyRandomAccessOp dummyRandomAccessOp(total_samples); + m_sampler.HandshakeRandomAccessOp(&dummyRandomAccessOp); std::unique_ptr db; TensorRow row; @@ -162,8 +162,8 @@ TEST_F(MindDataTestWeightedRandomSampler, TestGetNextBufferNoReplacement) { // create sampler with replacement = replacement WeightedRandomSampler m_sampler(weights, num_samples, false, samples_per_buffer); - DummyRandomAccessOp dummy_random_access_op(total_samples); - m_sampler.Init(&dummy_random_access_op); + DummyRandomAccessOp dummyRandomAccessOp(total_samples); + m_sampler.HandshakeRandomAccessOp(&dummyRandomAccessOp); std::unique_ptr db; TensorRow row; @@ -203,8 +203,8 @@ TEST_F(MindDataTestWeightedRandomSampler, TestResetReplacement) { // create sampler with replacement = true WeightedRandomSampler m_sampler(weights, num_samples, true); - DummyRandomAccessOp dummy_random_access_op(total_samples); - m_sampler.Init(&dummy_random_access_op); + DummyRandomAccessOp dummyRandomAccessOp(total_samples); + m_sampler.HandshakeRandomAccessOp(&dummyRandomAccessOp); std::unique_ptr db; TensorRow row; @@ -248,8 +248,8 @@ TEST_F(MindDataTestWeightedRandomSampler, TestResetNoReplacement) { // create sampler with replacement = true WeightedRandomSampler m_sampler(weights, num_samples, false); - DummyRandomAccessOp dummy_random_access_op(total_samples); - m_sampler.Init(&dummy_random_access_op); + DummyRandomAccessOp dummyRandomAccessOp(total_samples); + m_sampler.HandshakeRandomAccessOp(&dummyRandomAccessOp); std::unique_ptr db; TensorRow row; diff --git a/tests/ut/python/dataset/test_generator.py b/tests/ut/python/dataset/test_generator.py index 07556d9c7f..c224c5a2ea 100644 --- a/tests/ut/python/dataset/test_generator.py +++ b/tests/ut/python/dataset/test_generator.py @@ -439,6 +439,74 @@ def test_case_error_4(): assert "Unexpected error. Result of a tensorOp doesn't match output column names" in str(info.value) +def test_sequential_sampler(): + source = [(np.array([x]),) for x in range(64)] + ds1 = ds.GeneratorDataset(source, ["data"], sampler=ds.SequentialSampler()) + i = 0 + for data in ds1.create_dict_iterator(): # each data is a dictionary + golden = np.array([i]) + assert np.array_equal(data["data"], golden) + i = i + 1 + + +def test_random_sampler(): + source = [(np.array([x]),) for x in range(64)] + ds1 = ds.GeneratorDataset(source, ["data"], shuffle = True) + for data in ds1.create_dict_iterator(): # each data is a dictionary + pass + + +def test_distributed_sampler(): + source = [(np.array([x]),) for x in range(64)] + for sid in range(8): + ds1 = ds.GeneratorDataset(source, ["data"], shuffle = False, num_shards=8, shard_id=sid) + i = sid + for data in ds1.create_dict_iterator(): # each data is a dictionary + golden = np.array([i]) + assert np.array_equal(data["data"], golden) + i = i + 8 + + +def test_num_samples(): + source = [(np.array([x]),) for x in range(64)] + num_samples = 32 + ds1 = ds.GeneratorDataset(source, ["data"], sampler=ds.SequentialSampler(), num_samples = num_samples) + ds2 = ds.GeneratorDataset(source, ["data"], sampler=[i for i in range(32)], num_samples = num_samples) + ds3 = ds.GeneratorDataset(generator_1d, ["data"], num_samples = num_samples) + + count = 0 + for _ in ds1.create_dict_iterator(): + count = count + 1 + assert count == num_samples + + count = 0 + for _ in ds2.create_dict_iterator(): + count = count + 1 + assert count == num_samples + + count = 0 + for _ in ds3.create_dict_iterator(): + count = count + 1 + assert count == num_samples + + +def test_num_samples_underflow(): + source = [(np.array([x]),) for x in range(64)] + num_samples = 256 + ds2 = ds.GeneratorDataset(source, ["data"], sampler=[i for i in range(64)], num_samples = num_samples) + ds3 = ds.GeneratorDataset(generator_1d, ["data"], num_samples = num_samples) + + count = 0 + for _ in ds2.create_dict_iterator(): + count = count + 1 + assert count == 64 + + count = 0 + for _ in ds3.create_dict_iterator(): + count = count + 1 + assert count == 64 + + if __name__ == "__main__": test_case_0() test_case_1() @@ -458,3 +526,6 @@ if __name__ == "__main__": test_case_error_2() test_case_error_3() test_case_error_4() + test_sequential_sampler() + test_distributed_sampler() + test_random_sampler() diff --git a/tests/ut/python/dataset/test_sampler.py b/tests/ut/python/dataset/test_sampler.py index ca618311cb..7a58249f9c 100644 --- a/tests/ut/python/dataset/test_sampler.py +++ b/tests/ut/python/dataset/test_sampler.py @@ -87,7 +87,28 @@ def test_random_sampler_multi_iter(print_res=False): test_config(replacement=True, num_samples=5, num_repeats=5, validate=[0, 1, 2, 3, 4, 5]) +def test_sampler_py_api(): + sampler = ds.SequentialSampler().create() + sampler.set_num_rows(128) + sampler.set_num_samples(64) + sampler.initialize() + sampler.get_indices() + + sampler = ds.RandomSampler().create() + sampler.set_num_rows(128) + sampler.set_num_samples(64) + sampler.initialize() + sampler.get_indices() + + sampler = ds.DistributedSampler(8, 4).create() + sampler.set_num_rows(128) + sampler.set_num_samples(64) + sampler.initialize() + sampler.get_indices() + + if __name__ == '__main__': test_sequential_sampler(True) test_random_sampler(True) test_random_sampler_multi_iter(True) + test_sampler_py_api() From 59604af98b6027b936c14aecf5741d9c39f74531 Mon Sep 17 00:00:00 2001 From: zhaoting Date: Tue, 14 Apr 2020 16:27:12 +0800 Subject: [PATCH 239/367] change some settings in YOLOv3 --- example/yolov3_coco2017/dataset.py | 4 +--- example/yolov3_coco2017/run_distribute_train.sh | 9 ++++++++- example/yolov3_coco2017/train.py | 5 +++-- 3 files changed, 12 insertions(+), 6 deletions(-) diff --git a/example/yolov3_coco2017/dataset.py b/example/yolov3_coco2017/dataset.py index 826fe16c53..9c6a0f362d 100644 --- a/example/yolov3_coco2017/dataset.py +++ b/example/yolov3_coco2017/dataset.py @@ -22,7 +22,6 @@ from PIL import Image from matplotlib.colors import rgb_to_hsv, hsv_to_rgb import mindspore.dataset as de from mindspore.mindrecord import FileWriter -import mindspore.dataset.transforms.vision.py_transforms as P import mindspore.dataset.transforms.vision.c_transforms as C from config import ConfigYOLOV3ResNet18 @@ -301,13 +300,12 @@ def create_yolo_dataset(mindrecord_dir, batch_size=32, repeat_num=10, device_num compose_map_func = (lambda image, annotation: preprocess_fn(image, annotation, is_training)) if is_training: - hwc_to_chw = P.HWC2CHW() + hwc_to_chw = C.HWC2CHW() ds = ds.map(input_columns=["image", "annotation"], output_columns=["image", "bbox_1", "bbox_2", "bbox_3", "gt_box1", "gt_box2", "gt_box3"], columns_order=["image", "bbox_1", "bbox_2", "bbox_3", "gt_box1", "gt_box2", "gt_box3"], operations=compose_map_func, num_parallel_workers=num_parallel_workers) ds = ds.map(input_columns=["image"], operations=hwc_to_chw, num_parallel_workers=num_parallel_workers) - ds = ds.shuffle(buffer_size=256) ds = ds.batch(batch_size, drop_remainder=True) ds = ds.repeat(repeat_num) else: diff --git a/example/yolov3_coco2017/run_distribute_train.sh b/example/yolov3_coco2017/run_distribute_train.sh index 0c43e776b9..201f19ca16 100644 --- a/example/yolov3_coco2017/run_distribute_train.sh +++ b/example/yolov3_coco2017/run_distribute_train.sh @@ -19,6 +19,7 @@ echo "Please run the scipt as: " echo "sh run_distribute_train.sh DEVICE_NUM EPOCH_SIZE MINDRECORD_DIR IMAGE_DIR ANNO_PATH MINDSPORE_HCCL_CONFIG_PATH" echo "for example: sh run_distribute_train.sh 8 100 /data/Mindrecord_train /data /data/train.txt /data/hccl.json" echo "It is better to use absolute path." +echo "The learning rate is 0.005 as default, if you want other lr, please change the value in this script." echo "==============================================================================================================" EPOCH_SIZE=$2 @@ -38,6 +39,11 @@ export RANK_SIZE=$1 for((i=0;i env.log - python ../train.py \ + taskset -c $cmdopt python ../train.py \ --distribute=1 \ + --lr=0.005 \ --device_num=$RANK_SIZE \ --device_id=$DEVICE_ID \ --mindrecord_dir=$MINDRECORD_DIR \ diff --git a/example/yolov3_coco2017/train.py b/example/yolov3_coco2017/train.py index 121e2aa810..c7d28a8350 100644 --- a/example/yolov3_coco2017/train.py +++ b/example/yolov3_coco2017/train.py @@ -67,6 +67,7 @@ if __name__ == '__main__': parser.add_argument("--distribute", type=bool, default=False, help="Run distribute, default is false.") parser.add_argument("--device_id", type=int, default=0, help="Device id, default is 0.") parser.add_argument("--device_num", type=int, default=1, help="Use device nums, default is 1.") + parser.add_argument("--lr", type=float, default=0.001, help="Learning rate, default is 0.001.") parser.add_argument("--mode", type=str, default="sink", help="Run sink mode or not, default is sink") parser.add_argument("--epoch_size", type=int, default=10, help="Epoch size, default is 10") parser.add_argument("--batch_size", type=int, default=32, help="Batch size, default is 32.") @@ -137,8 +138,8 @@ if __name__ == '__main__': ckpt_config = CheckpointConfig(save_checkpoint_steps=dataset_size * args_opt.save_checkpoint_epochs) ckpoint_cb = ModelCheckpoint(prefix="yolov3", directory=None, config=ckpt_config) - lr = Tensor(get_lr(learning_rate=0.001, start_step=0, global_step=args_opt.epoch_size * dataset_size, - decay_step=1000, decay_rate=0.95)) + lr = Tensor(get_lr(learning_rate=args_opt.lr, start_step=0, global_step=args_opt.epoch_size * dataset_size, + decay_step=1000, decay_rate=0.95, steps=True)) opt = nn.Adam(filter(lambda x: x.requires_grad, net.get_parameters()), lr, loss_scale=loss_scale) net = TrainingWrapper(net, opt, loss_scale) From 5c2ded8d8eb714d22291f37945e35a4504fd17e7 Mon Sep 17 00:00:00 2001 From: anzhengqi Date: Tue, 14 Apr 2020 21:17:11 +0800 Subject: [PATCH 240/367] fix schema.parse_columns function --- mindspore/dataset/engine/datasets.py | 76 +++++++++++++++------------- 1 file changed, 41 insertions(+), 35 deletions(-) diff --git a/mindspore/dataset/engine/datasets.py b/mindspore/dataset/engine/datasets.py index 4b02004086..683102aecf 100644 --- a/mindspore/dataset/engine/datasets.py +++ b/mindspore/dataset/engine/datasets.py @@ -2464,47 +2464,53 @@ class Schema: Parse the columns and add it to self. Args: - columns (dict or list[str]): names of columns. + columns (dict or list[dict]): dataset attribution information, decoded from schema file. + if list: columns element must be dict, 'name' and 'type' must be in keys, 'shape' optional. + if dict: columns.keys() as name, element in columns.values() is dict, and 'type' inside, 'shape' optional. + example 1) + [{'name': 'image', 'type': 'int8', 'shape': [3, 3]}, + {'name': 'label', 'type': 'int8', 'shape': [1]}] + example 2) + {'image': {'shape': [3, 3], 'type': 'int8'}, 'label': {'shape': [1], 'type': 'int8'}} Raises: - RuntimeError: If failed to parse schema file. - RuntimeError: If unknown items in schema file. + RuntimeError: If failed to parse columns. + RuntimeError: If unknown items in columns. RuntimeError: If column's name field is missing. RuntimeError: If column's type field is missing. """ - if columns is None: - raise TypeError("Expected non-empty dict or string list.") self.columns = [] - for col in columns: - name = None - shape = None - data_type = None - col_details = None - if isinstance(columns, list): - col_details = col - if "name" in col: - name = col["name"] - elif isinstance(columns, dict): - col_details = columns[col] - name = col - else: - raise RuntimeError("Error parsing the schema file") - - for k, v in col_details.items(): - if k == "shape": - shape = v - elif k == "type": - data_type = v - elif k in ("t_impl", "rank"): - pass - else: - raise RuntimeError("Unknown field %s" % k) - - if name is None: - raise RuntimeError("Column's name field is missing.") - if data_type is None: - raise RuntimeError("Column's type field is missing.") - self.add_column(name, data_type, shape) + if isinstance(columns, list): + for column in columns: + try: + name = column.pop("name") + except KeyError: + raise RuntimeError("Column's name is missing") + try: + de_type = column.pop("type") + except KeyError: + raise RuntimeError("Column' type is missing") + shape = column.pop("shape", None) + column.pop("t_impl", None) + column.pop("rank", None) + if column: + raise RuntimeError("Unknown field {}".format(",".join(column.keys()))) + self.add_column(name, de_type, shape) + elif isinstance(columns, dict): + for key, value in columns.items(): + name = key + try: + de_type = value.pop("type") + except KeyError: + raise RuntimeError("Column' type is missing") + shape = value.pop("shape", None) + value.pop("t_impl", None) + value.pop("rank", None) + if value: + raise RuntimeError("Unknown field {}".format(",".join(value.keys()))) + self.add_column(name, de_type, shape) + else: + raise RuntimeError("columns must be dict or list, columns contain name, type, shape(optional).") def from_json(self, json_obj): """ From a1a2182b8aa1a1d1b0f728fd49f603f52aa16fc4 Mon Sep 17 00:00:00 2001 From: zhoufeng Date: Wed, 15 Apr 2020 09:48:04 +0800 Subject: [PATCH 241/367] modify whl file name to be same as package name --- cmake/package_script.cmake | 2 -- 1 file changed, 2 deletions(-) diff --git a/cmake/package_script.cmake b/cmake/package_script.cmake index 565154a38d..dcc8ee0ad0 100644 --- a/cmake/package_script.cmake +++ b/cmake/package_script.cmake @@ -76,8 +76,6 @@ if (NOT CMAKE_SYSTEM_NAME MATCHES "Windows") ) endif () -set(PACKAGE_NAME "mindspore") - file(GLOB WHL_FILE ${MS_PACK_ROOT_DIR}/dist/*.whl) get_filename_component(ORIGIN_FILE_NAME ${WHL_FILE} NAME) string(REPLACE "-" ";" ORIGIN_FILE_NAME ${ORIGIN_FILE_NAME}) From ffb2cb03a45812cf7e864ea4d32f302111e74981 Mon Sep 17 00:00:00 2001 From: Xiaoda Zhang Date: Tue, 14 Apr 2020 17:06:37 +0800 Subject: [PATCH 242/367] Change 'NOT_FULLY_USE_DEVICES' to 'FULLY_USE_DEVICES' and make ALL-1 user-specified-strategy valid in auto-parallel --- .../parallel/auto_parallel/edge_costmodel.cc | 6 +-- .../parallel/auto_parallel/graph_costmodel.cc | 14 +++---- .../parallel/auto_parallel/graph_costmodel.h | 4 +- mindspore/ccsrc/parallel/costmodel_context.cc | 4 +- mindspore/ccsrc/parallel/costmodel_context.h | 10 ++--- .../ccsrc/parallel/ops_info/matmul_info.cc | 2 +- .../ccsrc/parallel/ops_info/operator_info.cc | 2 +- .../ccsrc/parallel/step_auto_parallel.cc | 38 +++++++------------ mindspore/ccsrc/pipeline/init.cc | 8 ++-- mindspore/parallel/algo_parameter_config.py | 16 ++++---- .../parallel/test_auto_parallel_two_matmul.py | 10 ++--- tests/ut/python/parallel/test_reshape.py | 6 +-- 12 files changed, 55 insertions(+), 65 deletions(-) diff --git a/mindspore/ccsrc/parallel/auto_parallel/edge_costmodel.cc b/mindspore/ccsrc/parallel/auto_parallel/edge_costmodel.cc index 895646f409..21e67f9f7b 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/edge_costmodel.cc +++ b/mindspore/ccsrc/parallel/auto_parallel/edge_costmodel.cc @@ -85,10 +85,10 @@ Status Edge::InitEdgeCost() { } } if (!has_available_cost) { - if (!NOT_FULLY_USE_DEVICES) { + if (FULLY_USE_DEVICES) { MS_LOG(EXCEPTION) << "Generating cost for edge: " << edge_name_ - << " failed, it may be caused by setting 'not_fully_use_devices' false. Try to set " - "'not_fully_use_devices' true."; + << " failed, it may be caused by setting 'fully_use_devices' true. Try to set " + "'fully_use_devices' false."; } else if (ELEMENTWISE_OP_STRA_FOLLOW) { MS_LOG(EXCEPTION) << "Generating cost for edge: " << edge_name_ << " failed, it may be caused by setting 'elementwise_op_strategy_follow' true. " diff --git a/mindspore/ccsrc/parallel/auto_parallel/graph_costmodel.cc b/mindspore/ccsrc/parallel/auto_parallel/graph_costmodel.cc index 82dd723039..c56d3a6fbd 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/graph_costmodel.cc +++ b/mindspore/ccsrc/parallel/auto_parallel/graph_costmodel.cc @@ -36,7 +36,7 @@ double COST_MODEL_COMMUNI_CONST = DEFAULT_COST_MODEL_COMMUNI_CONST; double COST_MODEL_COMMUNI_BIAS = DEFAULT_COST_MODEL_COMMUNI_BIAS; bool TENSOR_SLICE_ALIGNMENT_ENABLE = DEFAULT_TENSOR_SLICE_ALIGNMENT_ENABLE; size_t TENSOR_SLICE_ALIGNMENT_SIZE = DEFAULT_TENSOR_SLICE_ALIGNMENT_SIZE; -bool NOT_FULLY_USE_DEVICES = DEFAULT_NOT_FULLY_USE_DEVICES; +bool FULLY_USE_DEVICES = DEFAULT_FULLY_USE_DEVICES; bool ELEMENTWISE_OP_STRA_FOLLOW = DEFAULT_ELEMENTWISE_OP_STRA_FOLLOW; void CostGraph::SetDeviceMemoryAndCostParameter() { @@ -125,13 +125,13 @@ void CostGraph::SetDeviceMemoryAndCostParameter() { TENSOR_SLICE_ALIGNMENT_SIZE = align_size; MS_LOG(INFO) << "tensor_slice_align_size: " << TENSOR_SLICE_ALIGNMENT_SIZE << "."; - // NOT_FULLY_USE_DEVICES - auto not_fully_devices = CostModelContext::GetInstance()->not_fully_use_device(); - NOT_FULLY_USE_DEVICES = not_fully_devices; - if (NOT_FULLY_USE_DEVICES) { - MS_LOG(INFO) << "not_fully_use_devices: true."; + // FULLY_USE_DEVICES + auto fully_devices = CostModelContext::GetInstance()->fully_use_device(); + FULLY_USE_DEVICES = fully_devices; + if (FULLY_USE_DEVICES) { + MS_LOG(INFO) << "fully_use_devices: true."; } else { - MS_LOG(INFO) << "not_fully_use_devices: false."; + MS_LOG(INFO) << "fully_use_devices: false."; } // ELEMENTWISE_OP_STRA_FOLLOW diff --git a/mindspore/ccsrc/parallel/auto_parallel/graph_costmodel.h b/mindspore/ccsrc/parallel/auto_parallel/graph_costmodel.h index 65aeb210ea..b6591c0741 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/graph_costmodel.h +++ b/mindspore/ccsrc/parallel/auto_parallel/graph_costmodel.h @@ -42,7 +42,7 @@ namespace parallel { #define DEFAULT_COST_MODEL_COMMUNI_BIAS 1024.0 #define DEFAULT_TENSOR_SLICE_ALIGNMENT_ENABLE false #define DEFAULT_TENSOR_SLICE_ALIGNMENT_SIZE 16 -#define DEFAULT_NOT_FULLY_USE_DEVICES false +#define DEFAULT_FULLY_USE_DEVICES true #define DEFAULT_ELEMENTWISE_OP_STRA_FOLLOW false class CostGraph; @@ -57,7 +57,7 @@ extern double COST_MODEL_COMMUNI_CONST; extern double COST_MODEL_COMMUNI_BIAS; extern bool TENSOR_SLICE_ALIGNMENT_ENABLE; extern size_t TENSOR_SLICE_ALIGNMENT_SIZE; -extern bool NOT_FULLY_USE_DEVICES; +extern bool FULLY_USE_DEVICES; extern bool ELEMENTWISE_OP_STRA_FOLLOW; class CostGraph { diff --git a/mindspore/ccsrc/parallel/costmodel_context.cc b/mindspore/ccsrc/parallel/costmodel_context.cc index 0ebbd2c626..82b260f967 100644 --- a/mindspore/ccsrc/parallel/costmodel_context.cc +++ b/mindspore/ccsrc/parallel/costmodel_context.cc @@ -60,7 +60,7 @@ void CostModelContext::ResetAlgoParameters() { costmodel_simplify_cal_ = DEFAULT_COST_MODEL_SIMPLIFY_CALCULATION; tensor_slice_alignment_enable_ = DEFAULT_TENSOR_SLICE_ALIGNMENT_ENABLE; tensor_slice_alignment_size_ = DEFAULT_TENSOR_SLICE_ALIGNMENT_SIZE; - not_fully_use_device_ = DEFAULT_NOT_FULLY_USE_DEVICES; + fully_use_device_ = DEFAULT_FULLY_USE_DEVICES; elementwise_stra_follow_ = DEFAULT_ELEMENTWISE_OP_STRA_FOLLOW; } @@ -118,7 +118,7 @@ void CostModelContext::set_tensor_slice_alignment_size(size_t ts_align_size) { tensor_slice_alignment_size_ = ts_align_size; } -void CostModelContext::set_not_fully_use_device(bool not_fully_use) { not_fully_use_device_ = not_fully_use; } +void CostModelContext::set_fully_use_device(bool fully_use) { fully_use_device_ = fully_use; } void CostModelContext::set_elementwise_stra_follow(bool elementwise_follow) { elementwise_stra_follow_ = elementwise_follow; diff --git a/mindspore/ccsrc/parallel/costmodel_context.h b/mindspore/ccsrc/parallel/costmodel_context.h index 04782fa366..23c9f7cc8d 100644 --- a/mindspore/ccsrc/parallel/costmodel_context.h +++ b/mindspore/ccsrc/parallel/costmodel_context.h @@ -102,9 +102,9 @@ class CostModelContext { void set_tensor_slice_alignment_size(size_t); size_t tensor_slice_alignment_size() const { return tensor_slice_alignment_size_; } - // NOT_FULLY_USE_DEVICES - void set_not_fully_use_device(bool); - bool not_fully_use_device() const { return not_fully_use_device_; } + // FULLY_USE_DEVICES + void set_fully_use_device(bool); + bool fully_use_device() const { return fully_use_device_; } // ELEMENTWISE_OP_STRA_FOLLOW void set_elementwise_stra_follow(bool); @@ -158,8 +158,8 @@ class CostModelContext { // TENSOR_SLICE_ALIGNMENT_SIZE size_t tensor_slice_alignment_size_; - // NOT_FULLY_USE_DEVICES - bool not_fully_use_device_; + // FULLY_USE_DEVICES + bool fully_use_device_; // ELEMENTWISE_OP_STRA_FOLLOW bool elementwise_stra_follow_; diff --git a/mindspore/ccsrc/parallel/ops_info/matmul_info.cc b/mindspore/ccsrc/parallel/ops_info/matmul_info.cc index e617ae6c24..8d1264482b 100644 --- a/mindspore/ccsrc/parallel/ops_info/matmul_info.cc +++ b/mindspore/ccsrc/parallel/ops_info/matmul_info.cc @@ -465,7 +465,7 @@ Status MatMulBase::PrepareStrategy(int32_t stage_id, size_t dev_num, mindspore::parallel::Dimensions combined_partitions, size_t input0_shape_size, size_t input1_shape_size, mindspore::parallel::StrategyPtr* const sp) { int32_t product = std::accumulate(combined_partitions.begin(), combined_partitions.end(), 1, std::multiplies()); - if (NOT_FULLY_USE_DEVICES) { + if (!FULLY_USE_DEVICES) { if (IntToSize(product) > dev_num) { return FAILED; } diff --git a/mindspore/ccsrc/parallel/ops_info/operator_info.cc b/mindspore/ccsrc/parallel/ops_info/operator_info.cc index 23b6a5190a..5842a9149f 100644 --- a/mindspore/ccsrc/parallel/ops_info/operator_info.cc +++ b/mindspore/ccsrc/parallel/ops_info/operator_info.cc @@ -675,7 +675,7 @@ Status PrepareStrategyBase(int32_t stage_id, size_t dev_num, const Shapes& input for (auto& input_partition : inputs_partitions) { product *= std::accumulate(input_partition.begin(), input_partition.end(), 1, std::multiplies()); } - if (NOT_FULLY_USE_DEVICES) { + if (!FULLY_USE_DEVICES) { if (IntToSize(product) > dev_num) { return FAILED; } diff --git a/mindspore/ccsrc/parallel/step_auto_parallel.cc b/mindspore/ccsrc/parallel/step_auto_parallel.cc index a42ce612fb..495c3a8d39 100644 --- a/mindspore/ccsrc/parallel/step_auto_parallel.cc +++ b/mindspore/ccsrc/parallel/step_auto_parallel.cc @@ -110,8 +110,6 @@ std::vector splittable_op_ = {MATMUL, std::vector elementwise_op_ = {ACTIVATION, GELU, TANH, SOFTMAX, LOG_SOFTMAX, RELU, SQRT, CAST, POW, EXP, LOG, COS, ACOS, LOGICALNOT}; -std::vector ignore_manual_strategy_op_ = {BATCH_NORM}; - bool StepAutoParallel(const FuncGraphPtr &root, const opt::OptimizerPtr &) { MS_EXCEPTION_IF_NULL(root); MS_EXCEPTION_IF_NULL(ParallelContext::GetInstance()); @@ -308,16 +306,6 @@ std::vector ExtractOutputTypeByNode(const CNodePtr &node) { return outputs_type; } -// Be careful the argument is cnode_full_name, not the op_name -bool IsIgnoreStrategyOperator(const std::string &cnode_full_name) { - for (auto &ignore_op : ignore_manual_strategy_op_) { - if (cnode_full_name.find(ignore_op) != std::string::npos) { - return true; - } - } - return false; -} - bool IsElementWiseOperator(const std::string &op_name) { auto iter = std::find(elementwise_op_.begin(), elementwise_op_.end(), op_name); return (iter != elementwise_op_.end()); @@ -414,18 +402,20 @@ OperatorInfoPtr CreateTheOperatorInfo(const PrimitivePtr &prim, const CNodePtr & // Set cost for this configured strategy if (operator_info->SetCostUnderStrategy(strategyPtr) != SUCCESS) { MS_LOG(EXCEPTION) << "Failure: operator " << prim->name() << " SetCostUnderStrategy failed"; - } else if (!NOT_FULLY_USE_DEVICES) { - if (!IsIgnoreStrategyOperator(cnode->fullname_with_scope())) { - // If configured to fully use devices, then checking for the user-specified strategy - int32_t used_devices = operator_info->used_devices(); - MS_EXCEPTION_IF_NULL(g_device_manager); - auto total_device_num = g_device_manager->GetDeviceListByStageId(0).size(); - // 'used_devices == -1' means that 'used_devices_' is not set - if ((used_devices == -1) || IntToSize(used_devices) != total_device_num) { - MS_LOG(EXCEPTION) << "In configuration 'NOT_FULLY_USE_DEVICES' = False, " - << "but the specified strategy uses device: " << used_devices - << ", total devices: " << total_device_num; - } + } else if (FULLY_USE_DEVICES) { + // If configured to fully use devices, then checking for the user-specified strategy + int32_t used_devices = operator_info->used_devices(); + MS_EXCEPTION_IF_NULL(g_device_manager); + auto total_device_num = g_device_manager->GetDeviceListByStageId(0).size(); + // 'used_devices == 1' means that ALL-1 strategy, which is valid in auto-parallel + if (used_devices == 1) { + return operator_info; + } + // 'used_devices == -1' means that 'used_devices_' is not set + if ((used_devices == -1) || IntToSize(used_devices) != total_device_num) { + MS_LOG(EXCEPTION) << "In configuration 'FULLY_USE_DEVICES' = True, " + << "but the specified strategy uses device: " << used_devices + << ", total devices: " << total_device_num; } } } diff --git a/mindspore/ccsrc/pipeline/init.cc b/mindspore/ccsrc/pipeline/init.cc index 24ead047d3..4aab6e2a5e 100644 --- a/mindspore/ccsrc/pipeline/init.cc +++ b/mindspore/ccsrc/pipeline/init.cc @@ -261,10 +261,10 @@ PYBIND11_MODULE(_c_expression, m) { "Set the parameter tensor_slice_size in strategy generation.") .def("get_tensor_slice_align_size", &CostModelContext::tensor_slice_alignment_size, "Get the parameter tensor_slice_size in strategy generation.") - .def("set_not_fully_use_devices", &CostModelContext::set_not_fully_use_device, - "Set the parameter not_fully_use_devices in the DP algorithm.") - .def("get_not_fully_use_devices", &CostModelContext::not_fully_use_device, - "Get the parameter not_fully_use_devices in the DP algorithm.") + .def("set_fully_use_devices", &CostModelContext::set_fully_use_device, + "Set the parameter fully_use_devices in the DP algorithm.") + .def("get_fully_use_devices", &CostModelContext::fully_use_device, + "Get the parameter fully_use_devices in the DP algorithm.") .def("set_elementwise_op_strategy_follow", &CostModelContext::set_elementwise_stra_follow, "Set the parameter elementwise_op_strategy_follow in the DP algorithm.") .def("get_elementwise_op_strategy_follow", &CostModelContext::elementwise_stra_follow, diff --git a/mindspore/parallel/algo_parameter_config.py b/mindspore/parallel/algo_parameter_config.py index aafc02367f..d1e4aa87a9 100644 --- a/mindspore/parallel/algo_parameter_config.py +++ b/mindspore/parallel/algo_parameter_config.py @@ -53,13 +53,13 @@ class _AlgoParameterConfig(): self.check_config_handle() return self._config_handle.get_simplify_cal() - def set_not_fully_use_devices(self, not_fully): + def set_fully_use_devices(self, not_fully): self.check_config_handle() - self._config_handle.set_not_fully_use_devices(not_fully) + self._config_handle.set_fully_use_devices(not_fully) - def get_not_fully_use_devices(self): + def get_fully_use_devices(self): self.check_config_handle() - return self._config_handle.get_not_fully_use_devices() + return self._config_handle.get_fully_use_devices() def set_elementwise_op_strategy_follow(self, element_strategy_follow): self.check_config_handle() @@ -119,7 +119,7 @@ def _algo_parameter_config(): set_algo_parameters_config_func_map = { "simplify_cal": _algo_parameter_config().set_simplify_cal, - "not_fully_use_devices": _algo_parameter_config().set_not_fully_use_devices, + "fully_use_devices": _algo_parameter_config().set_fully_use_devices, "elementwise_op_strategy_follow": _algo_parameter_config().set_elementwise_op_strategy_follow, "tensor_slice_align_enable": _algo_parameter_config().set_tensor_slice_align_enable, "tensor_slice_align_size": _algo_parameter_config().set_tensor_slice_align_size} @@ -127,14 +127,14 @@ set_algo_parameters_config_func_map = { get_algo_parameters_config_func_map = { "simplify_cal": _algo_parameter_config().get_simplify_cal, - "not_fully_use_devices": _algo_parameter_config().get_not_fully_use_devices, + "fully_use_devices": _algo_parameter_config().get_fully_use_devices, "elementwise_op_strategy_follow": _algo_parameter_config().get_elementwise_op_strategy_follow, "tensor_slice_align_enable": _algo_parameter_config().get_tensor_slice_align_enable, "tensor_slice_align_size": _algo_parameter_config().get_tensor_slice_align_size} @args_type_check(simplify_cal=bool, tensor_slice_align_enable=bool, tensor_slice_align_size=int, - not_fully_use_devices=bool, elementwise_op_strategy_follow=bool) + fully_use_devices=bool, elementwise_op_strategy_follow=bool) def set_algo_parameters(**kwargs): """ Set algo parameter config. @@ -146,7 +146,7 @@ def set_algo_parameters(**kwargs): simplify_cal (bool): Whether simplifying calculations in strategy-searching algorithm. Default: True tensor_slice_align_enable (bool): Whether checking tensor slice shape. Default: False tensor_slice_align_size (int): The minimum tensor slice shape, the value must be in [1, 1024]. Default: 16 - not_fully_use_devices (bool): Whether generating strategies that not fully use devices. Default: False + fully_use_devices (bool): Whether generating strategies that fully use all available devices. Default: True elementwise_op_strategy_follow (bool): Whether the elementwise operator have the same strategies as its subsequent operators. Default: False diff --git a/tests/ut/python/parallel/test_auto_parallel_two_matmul.py b/tests/ut/python/parallel/test_auto_parallel_two_matmul.py index bd6639a501..db6190ab89 100644 --- a/tests/ut/python/parallel/test_auto_parallel_two_matmul.py +++ b/tests/ut/python/parallel/test_auto_parallel_two_matmul.py @@ -100,7 +100,7 @@ def test_two_matmul(): set_algo_parameters(simplify_cal=True, tensor_slice_align_enable=False, tensor_slice_align_size=32, - not_fully_use_devices=True, + fully_use_devices=False, elementwise_op_strategy_follow=False) para_simplify_cal = get_algo_parameters("simplify_cal") assert para_simplify_cal == True @@ -108,8 +108,8 @@ def test_two_matmul(): assert para_slice_align_enable == False para_slice_align_size = get_algo_parameters("tensor_slice_align_size") assert para_slice_align_size == 32 - not_fully_use_devices = get_algo_parameters("not_fully_use_devices") - assert not_fully_use_devices == True + fully_use_devices = get_algo_parameters("fully_use_devices") + assert fully_use_devices == False elementwise_op_strategy_follow = get_algo_parameters("elementwise_op_strategy_follow") assert elementwise_op_strategy_follow == False @@ -120,8 +120,8 @@ def test_two_matmul(): assert para_slice_align_enable == False para_slice_align_size = get_algo_parameters("tensor_slice_align_size") assert para_slice_align_size == 16 - not_fully_use_devices = get_algo_parameters("not_fully_use_devices") - assert not_fully_use_devices == False + fully_use_devices = get_algo_parameters("fully_use_devices") + assert fully_use_devices == True elementwise_op_strategy_follow = get_algo_parameters("elementwise_op_strategy_follow") assert elementwise_op_strategy_follow == False diff --git a/tests/ut/python/parallel/test_reshape.py b/tests/ut/python/parallel/test_reshape.py index 43906aec23..f72e5f909b 100644 --- a/tests/ut/python/parallel/test_reshape.py +++ b/tests/ut/python/parallel/test_reshape.py @@ -576,7 +576,7 @@ def test_flatten_reshape2(parallel_mode="auto_parallel"): epoch_size = 2 context.reset_auto_parallel_context() context.set_auto_parallel_context(parallel_mode=parallel_mode, device_num=8) - set_algo_parameters(not_fully_use_devices=True) + set_algo_parameters(fully_use_devices=False) net = ParallelReduceMeanNet(conv_in_channel=3, conv_out_channel=64, reducemean_axis=(2, 3), strategy=((4, 1, 1, 1),)) loss = CrossEntropyLoss() predict = Tensor(np.ones([batch_size, 3, 32, 32]), dtype=ms.float32) @@ -617,7 +617,7 @@ def test_flatten_reshape3(parallel_mode="auto_parallel"): epoch_size = 2 context.reset_auto_parallel_context() context.set_auto_parallel_context(parallel_mode=parallel_mode, device_num=8) - set_algo_parameters(not_fully_use_devices=True) + set_algo_parameters(fully_use_devices=False) net = ParallelReshapeNet(dense_in_channel=2048, dense_out_channel=1000, shape=(128, 1000), strategy=((16, 1),)) loss = CrossEntropyLoss() predict = Tensor(np.ones([batch_size, 1, 2, 1024]), dtype=ms.float32) @@ -646,7 +646,7 @@ def test_flatten_reshape4(parallel_mode="semi_auto_parallel"): epoch_size = 2 context.reset_auto_parallel_context() context.set_auto_parallel_context(parallel_mode=parallel_mode, device_num=8) - set_algo_parameters(not_fully_use_devices=True) + set_algo_parameters(fully_use_devices=False) net = ParallelReduceMeanNet(conv_in_channel=3, conv_out_channel=64, reducemean_keep_dims=True, strategy=((4, 1, 1, 1),)) loss = CrossEntropyLoss2() predict = Tensor(np.ones([batch_size, 3, 32, 32]), dtype=ms.float32) From b02e871c1aa277217111a4fa2f3ce2ced40cf921 Mon Sep 17 00:00:00 2001 From: huanghui Date: Tue, 14 Apr 2020 20:30:44 +0800 Subject: [PATCH 243/367] [IRFusion] add derelu_fusion pass --- .../ascend/ir_fusion/derelu_fusion.cc | 105 ++++++++++++++++++ .../ascend/ir_fusion/derelu_fusion.h | 33 ++++++ mindspore/ccsrc/pre_activate/common/helper.h | 1 + mindspore/ccsrc/utils/utils.h | 2 + .../ascend/ir_fusion/derelu_fusion_test.cc | 54 +++++++++ .../gtest_input/pre_activate/derelu_fusion.py | 56 ++++++++++ 6 files changed, 251 insertions(+) create mode 100644 mindspore/ccsrc/pre_activate/ascend/ir_fusion/derelu_fusion.cc create mode 100644 mindspore/ccsrc/pre_activate/ascend/ir_fusion/derelu_fusion.h create mode 100644 tests/ut/cpp/pre_activate/ascend/ir_fusion/derelu_fusion_test.cc create mode 100644 tests/ut/cpp/python_input/gtest_input/pre_activate/derelu_fusion.py diff --git a/mindspore/ccsrc/pre_activate/ascend/ir_fusion/derelu_fusion.cc b/mindspore/ccsrc/pre_activate/ascend/ir_fusion/derelu_fusion.cc new file mode 100644 index 0000000000..d5ea315de1 --- /dev/null +++ b/mindspore/ccsrc/pre_activate/ascend/ir_fusion/derelu_fusion.cc @@ -0,0 +1,105 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "pre_activate/ascend/ir_fusion/derelu_fusion.h" +#include +#include +#include "session/anf_runtime_algorithm.h" +#include "ir/primitive.h" +#include "utils/utils.h" +#include "pipeline/static_analysis/abstract_value.h" +#include "pre_activate/common/helper.h" + +namespace mindspore { +namespace opt { +namespace { +const size_t kReluV2OutputNum = 2; + +CNodePtr GetRelu(const CNodePtr &relu_grad) { + MS_EXCEPTION_IF_NULL(relu_grad); + if (relu_grad->size() != kReluGradInputNum) { + MS_LOG_EXCEPTION << "ReluGrad has wrong input size " << relu_grad->size(); + } + auto relu_anf = relu_grad->input(2); + MS_EXCEPTION_IF_NULL(relu_anf); + return relu_anf->cast(); +} + +CNodePtr CreateReluV2(const FuncGraphPtr &graph, const CNodePtr &relu) { + MS_EXCEPTION_IF_NULL(graph); + MS_EXCEPTION_IF_NULL(relu); + if (relu->size() != kReluInputNum) { + MS_LOG_EXCEPTION << "Relu has wrong input size " << relu->size(); + } + + auto prim = std::make_shared(kReluV2OpName); + std::vector inputs = {NewValueNode(prim), relu->input(1)}; + auto new_node = graph->NewCNode(inputs); + MS_EXCEPTION_IF_NULL(new_node); + new_node->set_scope(relu->scope()); + + // ReluV2's 2rd output is mask whose data type is uint8 and value is 0 or 1, so shape is an empty vector + TypeId mask_dtype = kNumberTypeUInt8; + std::vector mask_shape; + auto types = {AnfAlgo::GetOutputInferDataType(relu, 0), mask_dtype}; + auto shapes = {AnfAlgo::GetOutputInferShape(relu, 0), mask_shape}; + AnfAlgo::SetOutputInferTypeAndShape(types, shapes, new_node.get()); + return new_node; +} + +CNodePtr CreateReluGradV2(const FuncGraphPtr &graph, const CNodePtr &relu_grad, const AnfNodePtr &second_input) { + MS_EXCEPTION_IF_NULL(graph); + MS_EXCEPTION_IF_NULL(relu_grad); + MS_EXCEPTION_IF_NULL(second_input); + + auto prim = std::make_shared(kReluGradV2OpName); + std::vector inputs = {NewValueNode(prim), relu_grad->input(1), second_input}; + auto new_node = graph->NewCNode(inputs); + MS_EXCEPTION_IF_NULL(new_node); + new_node->set_scope(relu_grad->scope()); + new_node->set_abstract(relu_grad->abstract()); + return new_node; +} +} // namespace + +const BaseRef DereluFusion::DefinePattern() const { + VarPtr i0 = std::make_shared(); + VarPtr i1 = std::make_shared(); + VectorRef relu({prim::kPrimRelu, i1}); + VectorRef relu_grad({prim::kPrimReluGrad, i0, relu}); + return relu_grad; +} + +const AnfNodePtr DereluFusion::Process(const FuncGraphPtr &graph, const AnfNodePtr &node, const EquivPtr &) const { + MS_EXCEPTION_IF_NULL(graph); + MS_EXCEPTION_IF_NULL(node); + auto relu_grad = node->cast(); + MS_EXCEPTION_IF_NULL(relu_grad); + auto relu = GetRelu(relu_grad); + MS_EXCEPTION_IF_NULL(relu); + + auto relu_v2 = CreateReluV2(graph, relu); + std::vector relu_v2_node_outputs; + CreateMultipleOutputsOfAnfNode(graph, relu_v2, kReluV2OutputNum, &relu_v2_node_outputs); + + auto relu_grad_v2 = CreateReluGradV2(graph, relu_grad, relu_v2_node_outputs[1]); + + auto manage = graph->manager(); + MS_EXCEPTION_IF_NULL(manage); + manage->Replace(relu, relu_v2_node_outputs[0]); + return relu_grad_v2; +} +} // namespace opt +} // namespace mindspore diff --git a/mindspore/ccsrc/pre_activate/ascend/ir_fusion/derelu_fusion.h b/mindspore/ccsrc/pre_activate/ascend/ir_fusion/derelu_fusion.h new file mode 100644 index 0000000000..e1811f4db4 --- /dev/null +++ b/mindspore/ccsrc/pre_activate/ascend/ir_fusion/derelu_fusion.h @@ -0,0 +1,33 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_CCSRC_PRE_ACTIVATE_ASCEND_IR_FUSION_DERELU_FUSION_H_ +#define MINDSPORE_CCSRC_PRE_ACTIVATE_ASCEND_IR_FUSION_DERELU_FUSION_H_ + +#include +#include "pre_activate/common/optimizer.h" + +namespace mindspore { +namespace opt { +class DereluFusion : public PatternProcessPass { + public: + explicit DereluFusion(bool multigraph = true) : PatternProcessPass("derelu_fusion", multigraph) {} + ~DereluFusion() override = default; + const BaseRef DefinePattern() const override; + const AnfNodePtr Process(const FuncGraphPtr &, const AnfNodePtr &, const EquivPtr &) const override; +}; +} // namespace opt +} // namespace mindspore +#endif // MINDSPORE_CCSRC_PRE_ACTIVATE_ASCEND_IR_FUSION_DERELU_FUSION_H_ diff --git a/mindspore/ccsrc/pre_activate/common/helper.h b/mindspore/ccsrc/pre_activate/common/helper.h index 4f30a935af..4cacd6fbcc 100644 --- a/mindspore/ccsrc/pre_activate/common/helper.h +++ b/mindspore/ccsrc/pre_activate/common/helper.h @@ -29,6 +29,7 @@ constexpr size_t kTransOpInputNum = 2; constexpr size_t kCastInputNum = 2; constexpr size_t kDependInputNum = 3; constexpr size_t kReluInputNum = 2; +constexpr size_t kReluGradInputNum = 3; constexpr size_t kAddInputNum = 3; constexpr size_t kAddNInputNum = 3; constexpr size_t kTupleGetitemInputNum = 3; diff --git a/mindspore/ccsrc/utils/utils.h b/mindspore/ccsrc/utils/utils.h index 08a98a3129..60d5830933 100644 --- a/mindspore/ccsrc/utils/utils.h +++ b/mindspore/ccsrc/utils/utils.h @@ -115,6 +115,8 @@ constexpr auto kBiasAddOpName = "BiasAdd"; constexpr auto kConfusionMulGradOpName = "ConfusionMulGrad"; constexpr auto kSendOpName = "Send"; constexpr auto kRecvOpName = "Recv"; +constexpr auto kReluV2OpName = "ReluV2"; +constexpr auto kReluGradV2OpName = "ReluGradV2"; // attr key name constexpr auto kAttrInputNames = "input_names"; diff --git a/tests/ut/cpp/pre_activate/ascend/ir_fusion/derelu_fusion_test.cc b/tests/ut/cpp/pre_activate/ascend/ir_fusion/derelu_fusion_test.cc new file mode 100644 index 0000000000..ffa5a42b4d --- /dev/null +++ b/tests/ut/cpp/pre_activate/ascend/ir_fusion/derelu_fusion_test.cc @@ -0,0 +1,54 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/backend_common_test.h" +#include "common/py_func_graph_fetcher.h" +#include "pre_activate/common/optimizer.h" +#include "pre_activate/ascend/ir_fusion/derelu_fusion.h" +#include "debug/anf_ir_dump.h" + +namespace mindspore { +namespace opt { +class TestHWOptimizeDereluFusion : public BackendCommon { + public: + TestHWOptimizeDereluFusion() : get_py_fun_("gtest_input.pre_activate.derelu_fusion", true) {} + ~TestHWOptimizeDereluFusion() override = default; + + UT::PyFuncGraphFetcher get_py_fun_; +}; + +TEST_F(TestHWOptimizeDereluFusion, test_fusion) { + FuncGraphPtr g = get_py_fun_.CallAndParseRet("test_derelu_fusion", "before"); + EXPECT_NE(g, nullptr); + std::vector shp{1, 1, 1, 1}; + auto x_abstract = std::make_shared(kFloat32, shp); + AbstractBasePtrList args_spec_list; + for (size_t i = 0; i < 2; ++i) { + args_spec_list.push_back(x_abstract); + } + auto fg = GetKernelGraph(g, args_spec_list); + + auto optimizer = std::make_shared(); + auto pm = std::make_shared(); + pm->AddPass(std::make_shared()); + optimizer->AddPassManager(pm); + FuncGraphPtr new_graph = optimizer->Optimize(fg); + + FuncGraphPtr g_after = get_py_fun_.CallAndParseRet("test_derelu_fusion", "after"); + EXPECT_TRUE(CheckEqualGraph(g_after, new_graph)); +} + +} // namespace opt +} // namespace mindspore diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/derelu_fusion.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/derelu_fusion.py new file mode 100644 index 0000000000..497975542b --- /dev/null +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/derelu_fusion.py @@ -0,0 +1,56 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +from mindspore.ops import operations as P +from mindspore.ops import Primitive + +relu = P.ReLU() +relu_grad = Primitive('ReluGrad') +relu_v2 = Primitive('ReluV2') +relu_grad_v2 = Primitive('ReluGradV2') +make_tuple = Primitive('make_tuple') +tuple_getitem = Primitive('tuple_getitem') + +class FnDict: + def __init__(self): + self.fnDict = {} + + def __call__(self, fn): + self.fnDict[fn.__name__] = fn + + def __getitem__(self, name): + return self.fnDict[name] + +def test_derelu_fusion(tag): + fns = FnDict() + + @fns + def before(i0, i1): + relu_res = relu(i1) + res = relu_grad(i0, relu_res) + other = relu(relu_res) + res = make_tuple(res, other) + return res + + @fns + def after(i0, i1): + relu_res = relu_v2(i1) + item0 = tuple_getitem(relu_res, 0) + item1 = tuple_getitem(relu_res, 1) + other = relu(item0) + res = relu_grad_v2(i0, item1) + res = make_tuple(res, other) + return make_tuple(res) + + return fns[tag] From f72b84ffb819e50cdce2d0265ce3b46d27b93801 Mon Sep 17 00:00:00 2001 From: jonyguo Date: Wed, 15 Apr 2020 11:09:04 +0800 Subject: [PATCH 244/367] Revert "fix schema.parse_columns function" This reverts commit 5c2ded8d8eb714d22291f37945e35a4504fd17e7. --- mindspore/dataset/engine/datasets.py | 76 +++++++++++++--------------- 1 file changed, 35 insertions(+), 41 deletions(-) diff --git a/mindspore/dataset/engine/datasets.py b/mindspore/dataset/engine/datasets.py index 683102aecf..4b02004086 100644 --- a/mindspore/dataset/engine/datasets.py +++ b/mindspore/dataset/engine/datasets.py @@ -2464,53 +2464,47 @@ class Schema: Parse the columns and add it to self. Args: - columns (dict or list[dict]): dataset attribution information, decoded from schema file. - if list: columns element must be dict, 'name' and 'type' must be in keys, 'shape' optional. - if dict: columns.keys() as name, element in columns.values() is dict, and 'type' inside, 'shape' optional. - example 1) - [{'name': 'image', 'type': 'int8', 'shape': [3, 3]}, - {'name': 'label', 'type': 'int8', 'shape': [1]}] - example 2) - {'image': {'shape': [3, 3], 'type': 'int8'}, 'label': {'shape': [1], 'type': 'int8'}} + columns (dict or list[str]): names of columns. Raises: - RuntimeError: If failed to parse columns. - RuntimeError: If unknown items in columns. + RuntimeError: If failed to parse schema file. + RuntimeError: If unknown items in schema file. RuntimeError: If column's name field is missing. RuntimeError: If column's type field is missing. """ + if columns is None: + raise TypeError("Expected non-empty dict or string list.") self.columns = [] - if isinstance(columns, list): - for column in columns: - try: - name = column.pop("name") - except KeyError: - raise RuntimeError("Column's name is missing") - try: - de_type = column.pop("type") - except KeyError: - raise RuntimeError("Column' type is missing") - shape = column.pop("shape", None) - column.pop("t_impl", None) - column.pop("rank", None) - if column: - raise RuntimeError("Unknown field {}".format(",".join(column.keys()))) - self.add_column(name, de_type, shape) - elif isinstance(columns, dict): - for key, value in columns.items(): - name = key - try: - de_type = value.pop("type") - except KeyError: - raise RuntimeError("Column' type is missing") - shape = value.pop("shape", None) - value.pop("t_impl", None) - value.pop("rank", None) - if value: - raise RuntimeError("Unknown field {}".format(",".join(value.keys()))) - self.add_column(name, de_type, shape) - else: - raise RuntimeError("columns must be dict or list, columns contain name, type, shape(optional).") + for col in columns: + name = None + shape = None + data_type = None + col_details = None + if isinstance(columns, list): + col_details = col + if "name" in col: + name = col["name"] + elif isinstance(columns, dict): + col_details = columns[col] + name = col + else: + raise RuntimeError("Error parsing the schema file") + + for k, v in col_details.items(): + if k == "shape": + shape = v + elif k == "type": + data_type = v + elif k in ("t_impl", "rank"): + pass + else: + raise RuntimeError("Unknown field %s" % k) + + if name is None: + raise RuntimeError("Column's name field is missing.") + if data_type is None: + raise RuntimeError("Column's type field is missing.") + self.add_column(name, data_type, shape) def from_json(self, json_obj): """ From b804d9103d5d69fea39701e79097d0b8b4b3b0a3 Mon Sep 17 00:00:00 2001 From: chujinjin Date: Tue, 14 Apr 2020 10:16:17 +0800 Subject: [PATCH 245/367] set default execution mode to pynative --- mindspore/ccsrc/utils/context/ms_context.cc | 2 +- tests/st/pynative/test_ascend_lenet.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/mindspore/ccsrc/utils/context/ms_context.cc b/mindspore/ccsrc/utils/context/ms_context.cc index b1ab0205f2..bee5875f60 100644 --- a/mindspore/ccsrc/utils/context/ms_context.cc +++ b/mindspore/ccsrc/utils/context/ms_context.cc @@ -65,7 +65,7 @@ MsContext::MsContext(const std::string& policy, const std::string& target) { } backend_policy_ = policy_map_[policy]; device_target_ = target; - execution_mode_ = kGraphMode; + execution_mode_ = kPynativeMode; enable_task_sink_ = true; ir_fusion_flag_ = true; enable_hccl_ = false; diff --git a/tests/st/pynative/test_ascend_lenet.py b/tests/st/pynative/test_ascend_lenet.py index 4681454489..4009844791 100644 --- a/tests/st/pynative/test_ascend_lenet.py +++ b/tests/st/pynative/test_ascend_lenet.py @@ -122,8 +122,9 @@ class GradWrap(nn.Cell): @pytest.mark.level0 +@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_x86_ascend_training -@pytest.mark.env_single +@pytest.mark.env_onecard def test_ascend_pynative_lenet(): context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend") @@ -152,6 +153,5 @@ def test_ascend_pynative_lenet(): total_time = total_time + cost_time print("======epoch: ", epoch, " loss: ", loss_output.asnumpy(), " cost time: ", cost_time) - assert(total_time < 20.0) - assert(loss_output.asnumpy() < 0.01) + assert(loss_output.asnumpy() < 0.1) \ No newline at end of file From 5617143fb871aecd36418c787d6981006bc44284 Mon Sep 17 00:00:00 2001 From: zjun Date: Wed, 15 Apr 2020 11:20:48 +0800 Subject: [PATCH 246/367] Fix arg_min_with_value bug --- mindspore/ops/_op_impl/tbe/arg_min_with_value.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mindspore/ops/_op_impl/tbe/arg_min_with_value.py b/mindspore/ops/_op_impl/tbe/arg_min_with_value.py index a23005403b..b0c23ef301 100644 --- a/mindspore/ops/_op_impl/tbe/arg_min_with_value.py +++ b/mindspore/ops/_op_impl/tbe/arg_min_with_value.py @@ -16,7 +16,7 @@ """ArgMinWithValue op""" from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType -arg_min_with_value_op_info = TBERegOp("ArgMaxWithValue") \ +arg_min_with_value_op_info = TBERegOp("ArgMinWithValue") \ .fusion_type("ELEMWISE") \ .async_flag(False) \ .binfile_name("arg_min_with_value.so") \ From e0393b482f0c07af120daf9ca07c0c7f36a9024c Mon Sep 17 00:00:00 2001 From: anzhengqi Date: Wed, 15 Apr 2020 14:44:56 +0800 Subject: [PATCH 247/367] fix schema.parse_columns function --- mindspore/dataset/engine/datasets.py | 76 +++++++++++++++------------- 1 file changed, 41 insertions(+), 35 deletions(-) diff --git a/mindspore/dataset/engine/datasets.py b/mindspore/dataset/engine/datasets.py index 4b02004086..f2d0f59f82 100644 --- a/mindspore/dataset/engine/datasets.py +++ b/mindspore/dataset/engine/datasets.py @@ -2464,47 +2464,53 @@ class Schema: Parse the columns and add it to self. Args: - columns (dict or list[str]): names of columns. + columns (dict or list[dict]): dataset attribution information, decoded from schema file. + if list: columns element must be dict, 'name' and 'type' must be in keys, 'shape' optional. + if dict: columns.keys() as name, element in columns.values() is dict, and 'type' inside, 'shape' optional. + example 1) + [{'name': 'image', 'type': 'int8', 'shape': [3, 3]}, + {'name': 'label', 'type': 'int8', 'shape': [1]}] + example 2) + {'image': {'shape': [3, 3], 'type': 'int8'}, 'label': {'shape': [1], 'type': 'int8'}} Raises: - RuntimeError: If failed to parse schema file. - RuntimeError: If unknown items in schema file. + RuntimeError: If failed to parse columns. + RuntimeError: If unknown items in columns. RuntimeError: If column's name field is missing. RuntimeError: If column's type field is missing. """ - if columns is None: - raise TypeError("Expected non-empty dict or string list.") self.columns = [] - for col in columns: - name = None - shape = None - data_type = None - col_details = None - if isinstance(columns, list): - col_details = col - if "name" in col: - name = col["name"] - elif isinstance(columns, dict): - col_details = columns[col] - name = col - else: - raise RuntimeError("Error parsing the schema file") - - for k, v in col_details.items(): - if k == "shape": - shape = v - elif k == "type": - data_type = v - elif k in ("t_impl", "rank"): - pass - else: - raise RuntimeError("Unknown field %s" % k) - - if name is None: - raise RuntimeError("Column's name field is missing.") - if data_type is None: - raise RuntimeError("Column's type field is missing.") - self.add_column(name, data_type, shape) + if isinstance(columns, list): + for column in columns: + try: + name = column.pop("name") + except KeyError: + raise RuntimeError("Column's name is missing") + try: + de_type = column.pop("type") + except KeyError: + raise RuntimeError("Column' type is missing") + shape = column.pop("shape", None) + column.pop("t_impl", None) + column.pop("rank", None) + if column: + raise RuntimeError("Unknown field {}".format(",".join(column.keys()))) + self.add_column(name, de_type, shape) + elif isinstance(columns, dict): + for key, value in columns.items(): + name = key + try: + de_type = value.pop("type") + except KeyError: + raise RuntimeError("Column' type is missing") + shape = value.pop("shape", None) + value.pop("t_impl", None) + value.pop("rank", None) + if value: + raise RuntimeError("Unknown field {}".format(",".join(value.keys()))) + self.add_column(name, de_type, shape) + else: + raise RuntimeError("columns must be dict or list, columns contain name, type, shape(optional).") def from_json(self, json_obj): """ From b55f1324a31290da15dce172d3cba524522aebc7 Mon Sep 17 00:00:00 2001 From: zjun Date: Wed, 15 Apr 2020 14:59:49 +0800 Subject: [PATCH 248/367] Fix kernel query log level --- mindspore/ccsrc/kernel/aicpu/aicpu_kernel_metadata.cc | 2 +- mindspore/ccsrc/kernel/hccl/hccl_kernel_metadata.cc | 2 +- mindspore/ccsrc/kernel/mng/rt_kernel_info.cc | 2 +- mindspore/ccsrc/kernel/tbe/tbe_kernel_select.cc | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/mindspore/ccsrc/kernel/aicpu/aicpu_kernel_metadata.cc b/mindspore/ccsrc/kernel/aicpu/aicpu_kernel_metadata.cc index 9f91c1bdd2..e8636ffa2e 100644 --- a/mindspore/ccsrc/kernel/aicpu/aicpu_kernel_metadata.cc +++ b/mindspore/ccsrc/kernel/aicpu/aicpu_kernel_metadata.cc @@ -34,7 +34,7 @@ void AicpuMetadataInfo(const CNodePtr &kernel_node, std::vectorGetKernelInfo(); if (kernel_info.empty()) { - MS_LOG(WARNING) << "Rt dose not has op[" << opNameLower << "]."; + MS_LOG(DEBUG) << "Rt dose not have op [" << opNameLower << "]."; return; } *kernel_info_list = kernel_info; diff --git a/mindspore/ccsrc/kernel/tbe/tbe_kernel_select.cc b/mindspore/ccsrc/kernel/tbe/tbe_kernel_select.cc index 92798aa6bc..e818f503c0 100644 --- a/mindspore/ccsrc/kernel/tbe/tbe_kernel_select.cc +++ b/mindspore/ccsrc/kernel/tbe/tbe_kernel_select.cc @@ -542,7 +542,7 @@ void TbeMetadataInfo(const CNodePtr &kernel_node, std::vectorempty()) { - MS_LOG(DEBUG) << "Tbe dose not has metadata of op[" << op_name << "]."; + MS_LOG(DEBUG) << "Tbe dose not have op [" << op_name << "]."; } } } // namespace kernel From 5365678eee1bcf53dc0ef045f76957c21fc67568 Mon Sep 17 00:00:00 2001 From: lianliguang Date: Tue, 14 Apr 2020 15:59:48 +0800 Subject: [PATCH 249/367] refactor kernel select --- .../device/ascend/kernel_select_ascend.cc | 83 ++++++++----------- 1 file changed, 35 insertions(+), 48 deletions(-) diff --git a/mindspore/ccsrc/device/ascend/kernel_select_ascend.cc b/mindspore/ccsrc/device/ascend/kernel_select_ascend.cc index 06af3d2ca7..dafe958348 100644 --- a/mindspore/ccsrc/device/ascend/kernel_select_ascend.cc +++ b/mindspore/ccsrc/device/ascend/kernel_select_ascend.cc @@ -31,12 +31,13 @@ namespace mindspore { namespace device { namespace ascend { namespace { +const float kWegihtBaseScore = 1; +const float kFeatureMapBaseScore = 10; enum MatchCountPriority : int { MATCH_COUNT_PRIORITY_BEGIN = 0, MATCH_DTYPE_COUNT = MATCH_COUNT_PRIORITY_BEGIN, MATCH_FORMAT_COUNT, MATCH_SPECIAL_FORMAT_COUNT, - MATCH_5D_FORMAT_COUNT, MATCH_OUTPUT_DTYPE_COUNT, MATCH_COUNT_PRIORITY_END }; @@ -82,13 +83,6 @@ bool IsValidKernelInfo(const std::shared_ptr &kernel_node, const kernel:: } return true; }; - if (AnfAlgo::GetCNodeName(kernel_node) == "Adam") { - auto input_num = AnfAlgo::GetInputTensorNum(kernel_node); - if (AnfAlgo::GetPrevNodeOutputFormat(kernel_node, input_num - 1) != - kernel_build_info.GetInputFormat(input_num - 1)) { - return false; - } - } if (AnfAlgo::GetCNodeName(kernel_node) == prim::kPrimCast->name()) { return AnfAlgo::GetOutputInferDataType(kernel_node, 0) == kernel_build_info.GetOutputDeviceType(0) && AnfAlgo::GetPrevNodeOutputInferDataType(kernel_node, 0) == kernel_build_info.GetInputDeviceType(0); @@ -112,21 +106,7 @@ bool MatchInferOutputDataType(const CNodePtr &cnode, const kernel::KernelBuildIn MS_EXCEPTION_IF_NULL(cnode); // Check input data type for (size_t input_index = 0; input_index < kernel_build_info.GetInputNum(); ++input_index) { - AnfNodePtr cur_input = AnfAlgo::GetInputNode(cnode, input_index); - MS_EXCEPTION_IF_NULL(cur_input); - TypeId input_origin_type; - if (cur_input->isa() && AnfAlgo::IsParameterWeight(cur_input->cast())) { - // weight - input_origin_type = AnfAlgo::GetOutputDeviceDataType(cur_input, 0); - } else if (cur_input->isa()) { - input_origin_type = AnfAlgo::GetOutputDeviceDataType(cur_input, 0); - } else { - // feature map - input_origin_type = AnfAlgo::GetPrevNodeOutputInferDataType(cnode, input_index); - } - if (input_origin_type == kTypeUnknown) { - continue; - } + TypeId input_origin_type = AnfAlgo::GetPrevNodeOutputInferDataType(cnode, input_index); if (kernel_build_info.GetInputDeviceType(input_index) != input_origin_type) { return false; } @@ -140,6 +120,29 @@ bool MatchInferOutputDataType(const CNodePtr &cnode, const kernel::KernelBuildIn return true; } +string GetPriorityMatchFormat(const CNodePtr &cnode) { + string priority_matched_format = kOpFormat_NC1HWC0; + bool is_init = false; + bool need_change_nd = false; + for (size_t index = 0; index < AnfAlgo::GetInputTensorNum(cnode); ++index) { + auto pre_output_format = AnfAlgo::GetPrevNodeOutputFormat(cnode, index); + if (AnfAlgo::IsFeatureMapInput(cnode, index) && + kNeedTransFormatSet.find(pre_output_format) != kNeedTransFormatSet.end()) { + priority_matched_format = !is_init ? priority_matched_format : pre_output_format; + is_init = true; + } + // feature map has two or more special format; + if (priority_matched_format != pre_output_format && pre_output_format != kOpFormat_DEFAULT) { + priority_matched_format = kOpFormat_DEFAULT; + } + auto input_shape_size = AnfAlgo::GetPrevNodeOutputInferShape(cnode, index).size(); + need_change_nd = (need_change_nd || (input_shape_size != 4 && input_shape_size > 1)); + } + if (need_change_nd) { + priority_matched_format = kOpFormat_DEFAULT; + } + return priority_matched_format; +} /** * compare two vector by priority, select a better vector, like compare two num, first compare highest num location, * if equal then next num location @@ -172,34 +175,18 @@ void UpdateCurMatchCounts(const kernel::KernelBuildInfo &kernel_build_info, cons if (cur_kernelinfo_match_counts->size() < MATCH_COUNT_PRIORITY_END) { MS_LOG(EXCEPTION) << "Out of range cur_kernelinfo_match_counts " << MATCH_COUNT_PRIORITY_END; } + auto pri_match_format = GetPriorityMatchFormat(kernel_node); for (size_t input_index = 0; input_index < AnfAlgo::GetInputTensorNum(kernel_node); ++input_index) { - AnfNodePtr input_anf_node = AnfAlgo::GetInputNode(kernel_node, input_index); - MS_EXCEPTION_IF_NULL(input_anf_node); - // if a input parameter is a weight with default format, the input shouldn't participate the judge - if (input_anf_node->isa()) { - auto para = input_anf_node->cast(); - if (AnfAlgo::IsParameterWeight(para) && AnfAlgo::GetOutputDeviceDataType(para, 0) == kTypeUnknown) { - continue; - } - } + auto base_score = AnfAlgo::IsFeatureMapInput(kernel_node, input_index) ? kFeatureMapBaseScore : kWegihtBaseScore; if (kernel_build_info.GetInputFormat(input_index) == AnfAlgo::GetPrevNodeOutputFormat(kernel_node, input_index)) { - if (AnfAlgo::IsFeatureMapInput(kernel_node, input_index) && - kNeedTransFormatSet.find(kernel_build_info.GetInputFormat(input_index)) != kNeedTransFormatSet.end()) { - (*cur_kernelinfo_match_counts)[MATCH_SPECIAL_FORMAT_COUNT]++; - } - (*cur_kernelinfo_match_counts)[MATCH_FORMAT_COUNT]++; + (*cur_kernelinfo_match_counts)[MATCH_FORMAT_COUNT] += base_score; } if (kernel_build_info.GetInputDeviceType(input_index) == AnfAlgo::GetPrevNodeOutputDeviceDataType(kernel_node, input_index)) { - (*cur_kernelinfo_match_counts)[MATCH_DTYPE_COUNT]++; + (*cur_kernelinfo_match_counts)[MATCH_DTYPE_COUNT] += base_score; } - if (kernel_build_info.GetInputFormat(input_index) == kOpFormat_NC1HWC0) { - // input is from a feature map & this input's shape is not 4d - if (AnfAlgo::IsFeatureMapInput(kernel_node, input_index) && - AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, input_index).size() != kShape4dDims) { - continue; - } - (*cur_kernelinfo_match_counts)[MATCH_5D_FORMAT_COUNT]++; + if (kernel_build_info.GetInputFormat(input_index) == pri_match_format) { + (*cur_kernelinfo_match_counts)[MATCH_SPECIAL_FORMAT_COUNT] += base_score; } } @@ -207,7 +194,7 @@ void UpdateCurMatchCounts(const kernel::KernelBuildInfo &kernel_build_info, cons // cal count of same output dtype between abstract and kernel info if (kernel_build_info.GetOutputDeviceType(output_index) == AnfAlgo::GetOutputInferDataType(kernel_node, output_index)) { - (*cur_kernelinfo_match_counts)[MATCH_OUTPUT_DTYPE_COUNT]++; + (*cur_kernelinfo_match_counts)[MATCH_OUTPUT_DTYPE_COUNT] += 1; } } } @@ -517,7 +504,7 @@ void SelectKernelInfo(const CNodePtr &kernel_node) { std::vector> kernel_info_list; MS_EXCEPTION_IF_NULL(kernel_node); kernel::KernelQuery(kernel_node, &kernel_info_list); - std::vector most_match_counts = {-1, -1, -1, -1, -1}; + std::vector most_match_counts = {-1, -1, -1, -1}; int selected_index = -1; auto context_ptr = MsContext::GetInstance(); MS_EXCEPTION_IF_NULL(context_ptr); @@ -527,7 +514,7 @@ void SelectKernelInfo(const CNodePtr &kernel_node) { std::vector node_mix_precision_datatype_index; std::vector node_mix_precision_datatype; for (size_t info_index = 0; info_index < kernel_info_list.size(); ++info_index) { - std::vector cur_kernel_info_match_counts = {0, 0, 0, 0, 0}; + std::vector cur_kernel_info_match_counts = {0, 0, 0, 0}; auto kernel_build_info = *(kernel_info_list[info_index]); if (!IsValidKernelInfo(kernel_node, kernel_build_info)) { continue; From 62807da0c4fa6caa0c941524483959c0263f51c5 Mon Sep 17 00:00:00 2001 From: buxue Date: Wed, 25 Mar 2020 18:38:10 +0800 Subject: [PATCH 250/367] =?UTF-8?q?Develop=20operator=20Unfold=EF=BC=8Ctak?= =?UTF-8?q?e=20the=20ge=20backend=EF=BC=8Cdock=20with=20tbe's=20ExtractIma?= =?UTF-8?q?gePatches=20operator?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- mindspore/ccsrc/kernel/tbe/tbe_adapter.h | 6 -- mindspore/ccsrc/transform/convert.cc | 2 + mindspore/ccsrc/transform/op_adapter.h | 26 +++--- mindspore/ccsrc/transform/op_declare.cc | 14 ++-- mindspore/ccsrc/transform/op_declare.h | 2 + mindspore/ccsrc/utils/utils.h | 1 + mindspore/nn/layer/__init__.py | 4 +- mindspore/nn/layer/basic.py | 48 +++++++++++ mindspore/ops/_grad/grad_nn_ops.py | 57 ++++++++++++- mindspore/ops/operations/__init__.py | 3 +- mindspore/ops/operations/math_ops.py | 2 +- mindspore/ops/operations/nn_ops.py | 100 ++++++++++++++++++++++- tests/ut/python/ops/test_math_ops.py | 32 ++++++-- tests/ut/python/ops/test_nn_ops.py | 55 +++++++++++++ 14 files changed, 310 insertions(+), 42 deletions(-) diff --git a/mindspore/ccsrc/kernel/tbe/tbe_adapter.h b/mindspore/ccsrc/kernel/tbe/tbe_adapter.h index 3997318c86..27f6d315f6 100644 --- a/mindspore/ccsrc/kernel/tbe/tbe_adapter.h +++ b/mindspore/ccsrc/kernel/tbe/tbe_adapter.h @@ -45,12 +45,6 @@ class TbeAdapter { std::vector *input_list, kCreaterType creater_type); private: - static void MaxPoolWithArgmaxAttrJsonPass(const AnfNodePtr &anf_node, - const std::vector> &op_info_attrs, - nlohmann::json *attrs_json); - static void MaxPoolGradWithArgmaxAttrJsonPass(const AnfNodePtr &anf_node, - const std::vector> &op_info_attrs, - nlohmann::json *attrs_json); static void Conv2DAttrJsonPass(const AnfNodePtr &anf_node, const std::vector> &op_info_attrs, nlohmann::json *attrs_json); static void Conv2DBackpropFilterAttrJsonPass(const AnfNodePtr &anf_node, diff --git a/mindspore/ccsrc/transform/convert.cc b/mindspore/ccsrc/transform/convert.cc index 251946f6fd..2daa86b960 100755 --- a/mindspore/ccsrc/transform/convert.cc +++ b/mindspore/ccsrc/transform/convert.cc @@ -96,6 +96,7 @@ const char kNameConfusionMatrix[] = "ConfusionMatrix"; const char kNameResizeNearestNeighborD[] = "ResizeNearestNeighbor"; const char kNameResizeNearestNeighborGrad[] = "ResizeNearestNeighborGrad"; const char kNameApplyAdam[] = "Adam"; +const char kNameExtractImagePatches[] = "ExtractImagePatches"; const char kNameReLU6[] = "ReLU6"; const char kNameReLU6Grad[] = "ReLU6Grad"; const char kNameElu[] = "Elu"; @@ -214,6 +215,7 @@ std::unordered_map &DfGraphConvertor::get_adpt_ma {string(kNameMaxPoolGrad), ADPT_DESC(MaxPoolGrad)}, {string(kNameAvgPoolGrad), ADPT_DESC(AvgPoolGrad)}, {string(kNameMaxPoolGradWithArgmax), ADPT_DESC(MaxPoolGradWithArgmax)}, + {string(kNameExtractImagePatches), ADPT_DESC(ExtractImagePatches)}, {prim::kPrimAssign->name(), ADPT_DESC(Assign)}, {prim::kPrimStateSetItem->name(), ADPT_DESC(Assign)}, {prim::kPrimReluGrad->name(), ADPT_DESC(ReluGrad)}, diff --git a/mindspore/ccsrc/transform/op_adapter.h b/mindspore/ccsrc/transform/op_adapter.h index 7f20a88035..421e4c4569 100644 --- a/mindspore/ccsrc/transform/op_adapter.h +++ b/mindspore/ccsrc/transform/op_adapter.h @@ -322,18 +322,12 @@ class OpAdapter : public BaseOpAdapter { Status UpdateSingleOutputDesc(const OperatorPtr& op, const abstract::BaseShapePtr& shp, const TypePtr& type) { MS_EXCEPTION_IF_NULL(type); - TypeId me_type = type->type_id(); - if (kObjectTypeTensorType == me_type) { - me_type = dyn_cast(type)->element()->type_id(); - } - - std::vector shape; - auto normal_shape_ptr = dyn_cast(shp); - if (nullptr != normal_shape_ptr) { - shape = normal_shape_ptr->shape(); + std::string format = "NCHW"; + if (op->GetOpType() == kExtractImagePatchesOpName) { + format = "NHWC"; } - auto desc = TransformUtil::GetGeTensorDesc(shape, me_type, "NCHW"); + auto desc = CreateOutputDesc(dyn_cast(shp), type, format); if (desc == nullptr) { MS_LOG(ERROR) << "Update output descriptor failed!"; return FAILED; @@ -410,14 +404,15 @@ class OpAdapter : public BaseOpAdapter { MS_LOG(ERROR) << "output_map is not equal tuple_shape size"; return FAILED; } + std::string format = "NCHW"; + if (op->GetOpType() == kTopKOpName) { + format = "NHWC"; + } for (size_t i = 0; i < tuple_shp->shape().size(); ++i) { auto tuple_type = dyn_cast(type); MS_EXCEPTION_IF_NULL(tuple_type); TypePtr type_elem = tuple_type->elements()[i]; - std::string format = "NCHW"; - if (op->GetOpType() == kTopKOpName) { - format = "NHWC"; - } + auto desc = CreateOutputDesc(dyn_cast(tuple_shp->shape()[i]), type_elem, format); if (desc == nullptr) { MS_LOG(ERROR) << "Create output descriptor failed!"; @@ -476,6 +471,9 @@ class OpAdapter : public BaseOpAdapter { if (desc == nullptr) { continue; } + if (op->GetOpType() == kExtractImagePatchesOpName) { + desc->SetFormat(ge::Format::FORMAT_NHWC); + } it->second.update_input_desc(op, *desc); } } diff --git a/mindspore/ccsrc/transform/op_declare.cc b/mindspore/ccsrc/transform/op_declare.cc index f821c71d87..420edc685a 100644 --- a/mindspore/ccsrc/transform/op_declare.cc +++ b/mindspore/ccsrc/transform/op_declare.cc @@ -751,16 +751,20 @@ ATTR_MAP(MaxPoolWithArgmax) = {{"ksize", ATTR_DESC(ksize, AnyTraits(), AnyT OUTPUT_MAP(MaxPoolWithArgmax) = {{0, OUTPUT_DESC(y)}, {1, OUTPUT_DESC(argmax)}}; // MaxPoolGradWithArgmax -INPUT_MAP(MaxPoolGradWithArgmax) = { - {1, INPUT_DESC(x)}, - {2, INPUT_DESC(grad)}, - {3, INPUT_DESC(argmax)}, -}; +INPUT_MAP(MaxPoolGradWithArgmax) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(grad)}, {3, INPUT_DESC(argmax)}}; ATTR_MAP(MaxPoolGradWithArgmax) = {{"ksize", ATTR_DESC(ksize, AnyTraits(), AnyTraits>())}, {"strides", ATTR_DESC(strides, AnyTraits(), AnyTraits>())}, {"padding", ATTR_DESC(padding, AnyTraits())}}; OUTPUT_MAP(MaxPoolGradWithArgmax) = {{0, OUTPUT_DESC(y)}}; +// ExtractImagePatches +INPUT_MAP(ExtractImagePatches) = {{1, INPUT_DESC(images)}}; +ATTR_MAP(ExtractImagePatches) = {{"ksizes", ATTR_DESC(ksizes, AnyTraits(), AnyTraits>())}, + {"strides", ATTR_DESC(strides, AnyTraits(), AnyTraits>())}, + {"rates", ATTR_DESC(rates, AnyTraits(), AnyTraits>())}, + {"padding", ATTR_DESC(padding, AnyTraits())}}; +OUTPUT_MAP(ExtractImagePatches) = {{0, OUTPUT_DESC(y)}}; + // Conv2D INPUT_MAP(Conv2D) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(filter)}}; ATTR_MAP(Conv2D) = { diff --git a/mindspore/ccsrc/transform/op_declare.h b/mindspore/ccsrc/transform/op_declare.h index 8f6dda9430..8b32e16b35 100755 --- a/mindspore/ccsrc/transform/op_declare.h +++ b/mindspore/ccsrc/transform/op_declare.h @@ -95,6 +95,8 @@ DECLARE_OP_USE_OUTPUT(MaxPoolGradWithArgmax) DECLARE_OP_ADAPTER(Conv2D) DECLARE_OP_USE_ENUM(Conv2D) DECLARE_OP_USE_OUTPUT(Conv2D) +DECLARE_OP_ADAPTER(ExtractImagePatches) +DECLARE_OP_USE_OUTPUT(ExtractImagePatches) DECLARE_OP_ADAPTER(Conv2DBackpropInputD) DECLARE_OP_USE_ENUM(Conv2DBackpropInputD) DECLARE_OP_USE_INPUT_ATTR(Conv2DBackpropInputD) diff --git a/mindspore/ccsrc/utils/utils.h b/mindspore/ccsrc/utils/utils.h index 08a98a3129..44e7b4d4c2 100644 --- a/mindspore/ccsrc/utils/utils.h +++ b/mindspore/ccsrc/utils/utils.h @@ -49,6 +49,7 @@ constexpr auto kBroadcastOpName = "Broadcast"; constexpr auto kReduceScatterOpName = "ReduceScatter"; constexpr auto kMemCpyAsyncOpName = "memcpy_async"; constexpr auto kTopKOpName = "TopK"; +constexpr auto kExtractImagePatchesOpName = "ExtractImagePatches"; constexpr auto kBNTrainingReduceOpName = "BNTrainingReduce"; constexpr auto kBNTrainingUpdateOpName = "BNTrainingUpdate"; constexpr auto kSimpleMeanGradOpName = "SimpleMeanGrad"; diff --git a/mindspore/nn/layer/__init__.py b/mindspore/nn/layer/__init__.py index 9c2c30c914..3d729edcd3 100644 --- a/mindspore/nn/layer/__init__.py +++ b/mindspore/nn/layer/__init__.py @@ -22,7 +22,7 @@ from .normalization import BatchNorm1d, BatchNorm2d, LayerNorm from .container import SequentialCell, CellList from .conv import Conv2d, Conv2dTranspose from .lstm import LSTM -from .basic import Dropout, Flatten, Dense, ClipByNorm, Norm, OneHot, Pad +from .basic import Dropout, Flatten, Dense, ClipByNorm, Norm, OneHot, Pad, Unfold from .embedding import Embedding from .pooling import AvgPool2d, MaxPool2d from .image import ImageGradients, SSIM @@ -35,6 +35,6 @@ __all__ = ['Softmax', 'LogSoftmax', 'ReLU', 'ReLU6', 'Tanh', 'GELU', 'Sigmoid', 'LSTM', 'Dropout', 'Flatten', 'Dense', 'ClipByNorm', 'Norm', 'OneHot', 'Embedding', - 'AvgPool2d', 'MaxPool2d', 'Pad', + 'AvgPool2d', 'MaxPool2d', 'Pad', 'Unfold', 'ImageGradients', 'SSIM', ] diff --git a/mindspore/nn/layer/basic.py b/mindspore/nn/layer/basic.py index 64c4cfd93b..5ac52acac7 100644 --- a/mindspore/nn/layer/basic.py +++ b/mindspore/nn/layer/basic.py @@ -439,3 +439,51 @@ class Pad(Cell): else: x = self.pad(x, self.paddings) return x + + +class Unfold(Cell): + """ + Extract patches from images. + The input tensor must be a 4-D tensor and the data format is NCHW. + + Args: + ksizes (Union[tuple[int], list[int]]): The size of sliding window, should be a tuple or list of int, + and the format is [1, ksize_row, ksize_col, 1]. + strides (Union[tuple[int], list[int]]): Distance between the centers of the two consecutive patches, + should be a tuple or list of int, and the format is [1, stride_row, stride_col, 1]. + rates (Union[tuple[int], list[int]]): In each extracted patch, the gap between the corresponding dim + pixel positions, should be a tuple or list of int, and the format is [1, rate_row, rate_col, 1]. + padding (str): The type of padding algorithm, is a string whose value is "same" or "valid", + not case sensitive. Default: "valid". + + - same: Means that the patch can take the part beyond the original image, and this part is filled with 0. + + - valid: Means that the patch area taken must be completely contained in the original image. + + Inputs: + - **input_x** (Tensor) - A 4-D tensor whose shape is [in_batch, in_depth, in_row, in_col] and + data type is int8, float16, uint8. + + Outputs: + Tensor, a 4-D tensor whose data type is same as 'input_x', + and the shape is [out_batch, out_depth, out_row, out_col], the out_batch is same as the in_batch. + + Examples: + >>> net = Unfold(ksizes=[1, 2, 2, 1], strides=[1, 1, 1, 1], rates=[1, 1, 1, 1]) + >>> image = Tensor(np.ones([1, 1, 3, 3]), dtype=mstype.float16) + >>> net(image) + Tensor ([[[[1, 1] [1, 1]] [[1, 1], [1, 1]] [[1, 1] [1, 1]], [[1, 1], [1, 1]]]], + shape=(1, 4, 2, 2), dtype=mstype.float16) + """ + def __init__(self, ksizes, strides, rates, padding="valid"): + super(Unfold, self).__init__() + self.extract_image_patches = P.ExtractImagePatches(ksizes, strides, rates, padding) + self.transpose = P.Transpose() + self.format_NHWC = (0, 2, 3, 1) + self.format_NCHW = (0, 3, 1, 2) + + def construct(self, input_x): + x_transpose = self.transpose(input_x, self.format_NHWC) + ret = self.extract_image_patches(x_transpose) + ret_transpose = self.transpose(ret, self.format_NCHW) + return ret_transpose diff --git a/mindspore/ops/_grad/grad_nn_ops.py b/mindspore/ops/_grad/grad_nn_ops.py index 149dd6caec..ae730d78a7 100755 --- a/mindspore/ops/_grad/grad_nn_ops.py +++ b/mindspore/ops/_grad/grad_nn_ops.py @@ -14,7 +14,7 @@ # ============================================================================ """Define the grad rules of neural network related operations.""" - +from mindspore.common import dtype as mstype from .. import functional as F from .. import operations as P from ..operations import _grad_ops as G @@ -52,6 +52,61 @@ def get_bprop_conv2d(self): return bprop +@bprop_getters.register(P.ExtractImagePatches) +def get_bprop_extract_image_patches(self): + """Grad definition for `ExtractImagePatches` operation.""" + get_shape = P.Shape() + reshape = P.Reshape() + extract_image_patches = P.ExtractImagePatches(ksizes=self.ksizes, + strides=self.strides, + rates=self.rates, + padding=self.padding) + concat = P.Concat(axis=-1) + expand_dims = P.ExpandDims() + scatter_nd = P.ScatterNd() + dtype = P.DType() + fill = P.Fill() + slice_op = P.Slice() + transpose = P.Transpose() + matmul = P.MatMul() + cast = P.Cast() + _, ksizes_row, ksizes_col, _ = self.ksizes + + def bprop(x, out, dout): + x_shape = get_shape(x) + x_batch, x_row, x_col, x_depth = x_shape + x_indices_num = x_row * x_col + 1 + x_idx = F.tuple_to_array(range(1, x_indices_num)) + x_idx = reshape(x_idx, (1, x_row, x_col, 1)) + x_idx = cast(x_idx, mstype.float16) + x_idx_patch = extract_image_patches(x_idx) + x_idx_patch = transpose(x_idx_patch, (0, 3, 1, 2)) + x_idx_patch = cast(x_idx_patch, mstype.int32) + + out_shape = get_shape(out) + _, out_row, out_col, _ = out_shape + out_indices_num = out_row * out_col * ksizes_row * ksizes_col + out_idx = F.tuple_to_array(range(out_indices_num)) + out_idx = reshape(out_idx, (1, ksizes_row * ksizes_col, out_row, out_col)) + + idx_tensor = concat((expand_dims(x_idx_patch, -1), expand_dims(out_idx, -1))) + idx_tensor = reshape(idx_tensor, (-1, 2)) + sp_shape = (x_indices_num, out_indices_num) + sp_tensor = scatter_nd(idx_tensor, fill(dtype(dout), (out_indices_num,), 1), sp_shape) + sp_tensor = slice_op(sp_tensor, (1, 0), (x_indices_num - 1, out_indices_num)) + + grad = reshape(dout, (x_batch, out_row, out_col, ksizes_row, ksizes_col, x_depth)) + grad = transpose(grad, (1, 2, 3, 4, 0, 5)) + grad = reshape(grad, (-1, x_batch * x_depth)) + + jac = matmul(sp_tensor, grad) + dx = reshape(jac, (x_row, x_col, x_batch, x_depth)) + dx = transpose(dx, (2, 0, 1, 3)) + + return (dx,) + return bprop + + @bprop_getters.register(P.DepthwiseConv2dNative) def get_bprop_depthwise_conv2d_native(self): """Grad definition for `DepthwiseConv2dNative` operation.""" diff --git a/mindspore/ops/operations/__init__.py b/mindspore/ops/operations/__init__.py index 40cbfc3381..492ebae444 100644 --- a/mindspore/ops/operations/__init__.py +++ b/mindspore/ops/operations/__init__.py @@ -57,7 +57,7 @@ from .nn_ops import (LSTM, SGD, Adam, ApplyMomentum, BatchNorm, Gelu, Elu, GetNext, L2Normalize, LayerNorm, LogSoftmax, - MaxPool, + MaxPool, ExtractImagePatches, AvgPool, Conv2DBackpropInput, MaxPoolWithArgmax, OneHot, Pad, MirrorPad, PReLU, ReLU, ReLU6, HSwish, HSigmoid, ResizeBilinear, Sigmoid, @@ -89,6 +89,7 @@ __all__ = [ 'Sqrt', 'Square', 'Conv2D', + 'ExtractImagePatches', 'Flatten', 'MaxPoolWithArgmax', 'FusedBatchNorm', diff --git a/mindspore/ops/operations/math_ops.py b/mindspore/ops/operations/math_ops.py index f6feb1af18..e390b6b589 100644 --- a/mindspore/ops/operations/math_ops.py +++ b/mindspore/ops/operations/math_ops.py @@ -1475,7 +1475,7 @@ class LogicalNot(PrimitiveWithInfer): Computes the "logical NOT" of a tensor element-wise. Inputs: - - **input_x** (Tensor) - The input tensor whose dtype is bool + - **input_x** (Tensor) - The input tensor whose dtype is bool. Outputs: Tensor, the shape is same as the `input_x`, and the dtype is bool. diff --git a/mindspore/ops/operations/nn_ops.py b/mindspore/ops/operations/nn_ops.py index 377ef19417..a92a75c781 100644 --- a/mindspore/ops/operations/nn_ops.py +++ b/mindspore/ops/operations/nn_ops.py @@ -2546,6 +2546,7 @@ class ApplyFtrl(PrimitiveWithInfer): Outputs: Tensor, representing the updated var. """ + @prim_attr_register def __init__(self, use_locking=False): self.init_prim_io_names(inputs=['var', 'accum', 'linear', 'grad', 'lr', 'l1', 'l2', 'lr_power'], @@ -2566,8 +2567,99 @@ class ApplyFtrl(PrimitiveWithInfer): args = {'var_type': var_type, 'accum_type': accum_type, 'linear_type': linear_type, 'grad_type': grad_type} validator.check_type_same(args, (mstype.float32, mstype.float16)) - validator.check_typename("lr", lr_type,[mstype.float16, mstype.float32]) - validator.check_typename("l1", l1_type,[mstype.float16, mstype.float32]) - validator.check_typename("l2", l2_type,[mstype.float16, mstype.float32]) - validator.check_typename("lr_power", lr_power_type,[mstype.float16, mstype.float32]) + validator.check_typename("lr", lr_type, [mstype.float16, mstype.float32]) + validator.check_typename("l1", l1_type, [mstype.float16, mstype.float32]) + validator.check_typename("l2", l2_type, [mstype.float16, mstype.float32]) + validator.check_typename("lr_power", lr_power_type, [mstype.float16, mstype.float32]) return var_type + + +class ExtractImagePatches(PrimitiveWithInfer): + """ + Extract patches from images. + The input tensor must be a 4-D tensor and the data format is NHWC. + + Args: + ksizes (Union[tuple[int], list[int]]): The size of sliding window, should be a tuple or list of int, + and the format is [1, ksize_row, ksize_col, 1]. + strides (Union[tuple[int], list[int]]): Distance between the centers of the two consecutive patches, + should be a tuple or list of int, and the format is [1, stride_row, stride_col, 1]. + rates (Union[tuple[int], list[int]]): In each extracted patch, the gap between the corresponding dim + pixel positions, should be a tuple or list of int, and the format is [1, rate_row, rate_col, 1]. + padding (str): The type of padding algorithm, is a string whose value is "same" or "valid", + not case sensitive. Default: "valid". + + - same: Means that the patch can take the part beyond the original image, and this part is filled with 0. + + - valid: Means that the patch area taken must be completely contained in the original image. + + Inputs: + - **input_x** (Tensor) - A 4-D tensor whose shape is [in_batch, in_row, in_col, in_depth] and + data type is int8, float16, uint8. + + Outputs: + Tensor, a 4-D tensor whose data type is same as 'input_x', + and the shape is [out_batch, out_row, out_col, out_depth], the out_batch is same as the in_batch. + """ + + @prim_attr_register + def __init__(self, ksizes, strides, rates, padding="valid"): + """init""" + validator.check_type("ksizes", ksizes, [tuple, list]) + validator.check_type("strides", strides, [tuple, list]) + validator.check_type("rates", rates, [tuple, list]) + self.padding = validator.check_string('padding', padding.upper(), ['VALID', 'SAME']) + self.add_prim_attr("padding", self.padding) + + if len(ksizes) != 4 or ksizes[0] != 1 or ksizes[3] != 1: + raise ValueError("The format of ksizes should be [1, ksize_row, ksize_col, 1], " + f"but got {ksizes}.") + if not isinstance(ksizes[1], int) or not isinstance(ksizes[2], int) or \ + ksizes[1] < 1 or ksizes[2] < 1: + raise ValueError("The ksize_row and ksize_col in ksizes should be an positive integer number, " + f"but got ksize_row is {ksizes[1]}, ksize_col is {ksizes[2]}") + + if len(strides) != 4 or strides[0] != 1 or strides[3] != 1: + raise ValueError("The format of strides should be [1, stride_row, stride_col, 1], " + f"but got {strides}.") + if not isinstance(strides[1], int) or not isinstance(strides[2], int) or \ + strides[1] < 1 or strides[2] < 1: + raise ValueError("The stride_row and stride_col in strides should be an positive integer number, " + f"but got stride_row is {strides[1]}, stride_col is {strides[2]}") + + if len(rates) != 4 or rates[0] != 1 or rates[3] != 1: + raise ValueError("The format of rates should be [1, rate_row, rate_col, 1], " + f"but got {rates}.") + if not isinstance(rates[1], int) or not isinstance(rates[2], int) or \ + rates[1] < 1 or rates[2] < 1: + raise ValueError("The rate_row and rate_col in rates should be an positive integer number, " + f"but got rate_row is {rates[1]}, rate_col is {rates[2]}") + + def infer_shape(self, input_x): + in_batch, in_row, in_col, in_depth = input_x + _, ksize_row, ksize_col, _ = self.ksizes + _, stride_row, stride_col, _ = self.strides + _, rate_row, rate_col, _ = self.rates + if len(input_x) != 4: + raise ValueError("The `input_x` should be a 4-D tensor, " + f"but got a {len(input_x)}-D tensor whose shape is {input_x}") + + out_batch = in_batch + out_depth = ksize_row * ksize_col * in_depth + + if self.padding == "VALID": + out_row = \ + (in_row - (ksize_row + (ksize_row - 1) * (rate_row - 1))) // stride_row + 1 + out_col = \ + (in_col - (ksize_col + (ksize_col - 1) * (rate_col - 1))) // stride_col + 1 + else: + out_row = (in_row - 1) // stride_row + 1 + out_col = (in_col - 1) // stride_col + 1 + + out_shape = [out_batch, out_row, out_col, out_depth] + return out_shape + + def infer_dtype(self, input_x): + validator.check_subclass("input_x", input_x, mstype.tensor) + validator.check_typename("input_x_dtype", input_x, (mstype.int8, mstype.float16, mstype.float32)) + return input_x diff --git a/tests/ut/python/ops/test_math_ops.py b/tests/ut/python/ops/test_math_ops.py index ad1642228d..8b7f627e81 100755 --- a/tests/ut/python/ops/test_math_ops.py +++ b/tests/ut/python/ops/test_math_ops.py @@ -30,6 +30,8 @@ from ....mindspore_test_framework.pipeline.forward.compile_forward \ import pipeline_for_compile_forward_ge_graph_for_case_by_case_config from ....mindspore_test_framework.pipeline.forward.verify_exception \ import pipeline_for_verify_exception_for_case_by_case_config + + # pylint: disable=W0613 # pylint: disable=W0231 # W0613: unused-argument @@ -106,7 +108,7 @@ def test_realdiv(): result = div(x, y) x = x.asnumpy() y = y.asnumpy() - expect = x/y + expect = x / y assert np.all(result.asnumpy() == expect) @@ -122,6 +124,7 @@ def test_eye(): class VirtualLossGrad(PrimitiveWithInfer): """ VirtualLossGrad definition """ + @prim_attr_register def __init__(self): """init VirtualLossGrad""" @@ -138,6 +141,7 @@ class VirtualLossGrad(PrimitiveWithInfer): class VirtualLoss(PrimitiveWithInfer): """ VirtualLoss definition """ + @prim_attr_register def __init__(self): """init VirtualLoss""" @@ -151,6 +155,7 @@ class VirtualLoss(PrimitiveWithInfer): def bprop(x, out, dout): dx = loss_grad(x, out, dout) return (dx,) + return bprop def infer_shape(self, x_shape): @@ -162,6 +167,7 @@ class VirtualLoss(PrimitiveWithInfer): class NetWithLoss(nn.Cell): """ NetWithLoss definition """ + def __init__(self, network): super(NetWithLoss, self).__init__() self.loss = VirtualLoss() @@ -174,6 +180,7 @@ class NetWithLoss(nn.Cell): class GradWrap(nn.Cell): """ GradWrap definition """ + def __init__(self, network): super(GradWrap, self).__init__() self.network = network @@ -184,6 +191,7 @@ class GradWrap(nn.Cell): class MatMulNet(nn.Cell): """ MatMulNet definition """ + def __init__(self): super(MatMulNet, self).__init__() self.matmul = P.MatMul() @@ -195,6 +203,7 @@ class MatMulNet(nn.Cell): class NetWithLossSub(nn.Cell): """ NetWithLossSub definition """ + def __init__(self, network): super(NetWithLossSub, self).__init__() self.loss = VirtualLoss() @@ -207,6 +216,7 @@ class NetWithLossSub(nn.Cell): class GradWrapSub(nn.Cell): """ GradWrapSub definition """ + def __init__(self, network): super(GradWrapSub, self).__init__() self.network = network @@ -217,6 +227,7 @@ class GradWrapSub(nn.Cell): class SubNet(nn.Cell): """ SubNet definition """ + def __init__(self): super(SubNet, self).__init__() self.sub = P.Sub() @@ -227,6 +238,7 @@ class SubNet(nn.Cell): class NpuFloatNet(nn.Cell): """ NpuFloat definition """ + def __init__(self): super(NpuFloatNet, self).__init__() self.mul = P.Mul() @@ -258,6 +270,7 @@ class NpuFloatNet(nn.Cell): class DiagNet(nn.Cell): """ DiagNet definition """ + def __init__(self): super(DiagNet, self).__init__() self.fill = P.Fill() @@ -269,6 +282,7 @@ class DiagNet(nn.Cell): class NetWithLossCumSum(nn.Cell): """ NetWithLossCumSum definition """ + def __init__(self, network): super(NetWithLossCumSum, self).__init__() self.loss = VirtualLoss() @@ -281,6 +295,7 @@ class NetWithLossCumSum(nn.Cell): class GradWrapCumSum(nn.Cell): """ GradWrap definition """ + def __init__(self, network): super(GradWrapCumSum, self).__init__() self.network = network @@ -291,6 +306,7 @@ class GradWrapCumSum(nn.Cell): class NetCumSum(nn.Cell): """ NetCumSum definition """ + def __init__(self): super(NetCumSum, self).__init__() self.cumsum = P.CumSum() @@ -321,8 +337,8 @@ test_case_math_ops = [ 'skip': ['backward']}), ('CumSumGrad', { 'block': GradWrapCumSum(NetWithLossCumSum(NetCumSum())), - 'desc_inputs': [Tensor(np.array([[3, 4, 6, 10],[1, 6, 7, 9],[4, 3, 8, 7],[1, 3, 7, 9]]).astype(np.float16))], - 'desc_bprop': [Tensor(np.array([[3, 4, 6, 10],[1, 6, 7, 9],[4, 3, 8, 7],[1, 3, 7, 9]]).astype(np.float16))], + 'desc_inputs': [Tensor(np.array([[3, 4, 6, 10], [1, 6, 7, 9], [4, 3, 8, 7], [1, 3, 7, 9]]).astype(np.float16))], + 'desc_bprop': [Tensor(np.array([[3, 4, 6, 10], [1, 6, 7, 9], [4, 3, 8, 7], [1, 3, 7, 9]]).astype(np.float16))], 'skip': ['backward']}), ('Diag', { 'block': DiagNet(), @@ -351,7 +367,6 @@ test_case_math_ops = [ 'skip': ['backward']}), ] - test_case_lists = [test_case_math_ops] test_exec_case = functools.reduce(lambda x, y: x + y, test_case_lists) # use -k to select certain testcast @@ -360,6 +375,7 @@ test_exec_case = functools.reduce(lambda x, y: x + y, test_case_lists) import mindspore.context as context + @non_graph_engine @mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config) def test_exec(): @@ -369,16 +385,16 @@ def test_exec(): raise_set = [ ('StridedSlice_1_Error', { - 'block': (lambda x : P.StridedSlice(begin_mask="1"), {'exception': ValueError}), + 'block': (lambda x: P.StridedSlice(begin_mask="1"), {'exception': ValueError}), 'desc_inputs': [0]}), ('StridedSlice_2_Error', { - 'block': (lambda x : P.StridedSlice(end_mask="1"), {'exception': ValueError}), + 'block': (lambda x: P.StridedSlice(end_mask="1"), {'exception': ValueError}), 'desc_inputs': [0]}), ('StridedSlice_3_Error', { - 'block': (lambda x : P.StridedSlice(ellipsis_mask=1.1), {'exception': ValueError}), + 'block': (lambda x: P.StridedSlice(ellipsis_mask=1.1), {'exception': ValueError}), 'desc_inputs': [0]}), ('StridedSlice_4_Error', { - 'block': (lambda x : P.StridedSlice(new_axis_mask="1.1"), {'exception': ValueError}), + 'block': (lambda x: P.StridedSlice(new_axis_mask="1.1"), {'exception': ValueError}), 'desc_inputs': [0]}), ] diff --git a/tests/ut/python/ops/test_nn_ops.py b/tests/ut/python/ops/test_nn_ops.py index cadac6dfb4..7364893503 100644 --- a/tests/ut/python/ops/test_nn_ops.py +++ b/tests/ut/python/ops/test_nn_ops.py @@ -382,6 +382,46 @@ def test_max_pool_with_arg_max(): print(ret) +class GradWrapUnfold(nn.Cell): + """ GradWrapUnfold definition """ + + def __init__(self, network): + super(GradWrapUnfold, self).__init__() + self.network = network + self.sens = Tensor(np.ones([1, 4, 2, 2], np.float32)) + + def construct(self, x): + return C.grad_all_with_sens(self.network)(x, self.sens) + + +class UnfoldNetValid(nn.Cell): + """ UnfoldNetValid definition """ + + def __init__(self): + super(UnfoldNetValid, self).__init__() + self.unfold = nn.Unfold(ksizes=[1, 2, 2, 1], + strides=[1, 1, 1, 1], + rates=[1, 1, 1, 1], + padding='VALID') + + def construct(self, x): + return self.unfold(x) + + +class UnfoldNetSame(nn.Cell): + """ UnfoldNetSame definition """ + + def __init__(self): + super(UnfoldNetSame, self).__init__() + self.unfold = nn.Unfold(ksizes=[1, 2, 2, 1], + strides=[1, 1, 1, 1], + rates=[1, 1, 1, 1], + padding='SAME') + + def construct(self, x): + return self.unfold(x) + + test_cases = [ ('SoftMaxGrad', { 'block': SoftMaxGrad(VirtualNetWithLoss(P.Softmax())), @@ -440,6 +480,21 @@ test_cases = [ 'block': ComparisonNet(), 'desc_inputs': [Tensor(np.ones([6, 9, 10], np.int32)), Tensor(np.ones([6, 9, 10], np.int32))], }), + ('UnfoldValid', { + 'block': UnfoldNetValid(), + 'desc_inputs': [Tensor(np.ones([1, 1, 3, 3], np.float32))], + 'desc_bprop': [Tensor(np.ones([1, 4, 2, 2], np.float32))], + 'skip': ['backward']}), + ('UnfoldSame', { + 'block': UnfoldNetSame(), + 'desc_inputs': [Tensor(np.ones([1, 1, 3, 3], np.float32))], + 'desc_bprop': [Tensor(np.ones([1, 4, 3, 3], np.float32))], + 'skip': ['backward']}), + ('UnfoldGrad', { + 'block': GradWrapUnfold(UnfoldNetValid()), + 'desc_inputs': [Tensor(np.ones([1, 1, 3, 3], np.float32))], + 'desc_bprop': [Tensor(np.ones([1, 4, 2, 2], np.float32))], + 'skip': ['backward']}), ] test_cases_for_verify_exception = [ From 734fabb780327a78f4b607f85e9b84426777f14e Mon Sep 17 00:00:00 2001 From: chenfei Date: Wed, 15 Apr 2020 15:24:16 +0800 Subject: [PATCH 251/367] solve problem when create parameter from 'call' cnode --- mindspore/ccsrc/session/ascend_session.cc | 2 +- mindspore/ccsrc/session/session_basic.cc | 6 +++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/mindspore/ccsrc/session/ascend_session.cc b/mindspore/ccsrc/session/ascend_session.cc index 93ae99f4d2..ba0ef836e1 100755 --- a/mindspore/ccsrc/session/ascend_session.cc +++ b/mindspore/ccsrc/session/ascend_session.cc @@ -822,7 +822,7 @@ GraphId AscendSession::GetGraphIdByNode(const AnfNodePtr &front_anf) const { } } MS_EXCEPTION_IF_NULL(front_anf); - MS_LOG(WARNING) << "front_anf " << front_anf->DebugString() << " is not exist in any graph"; + MS_LOG(DEBUG) << "front_anf " << front_anf->DebugString() << " is not exist in any graph"; return kInvalidGraphId; } diff --git a/mindspore/ccsrc/session/session_basic.cc b/mindspore/ccsrc/session/session_basic.cc index bea51037bf..33ef78c13d 100755 --- a/mindspore/ccsrc/session/session_basic.cc +++ b/mindspore/ccsrc/session/session_basic.cc @@ -264,7 +264,11 @@ std::vector CreateParameterFromTuple(const AnfNodePtr &node, bool va MS_EXCEPTION_IF_NULL(node); MS_EXCEPTION_IF_NULL(graph); std::vector parameters; - std::vector pre_graph_out = AnfAlgo::GetAllOutput(node, {prim::kPrimTupleGetItem}); + std::vector pre_graph_out = {node}; + // If a cnode is a call, it's input0 is a cnode too, so it doesn't have primitive + if (!AnfAlgo::IsRealKernel(node)) { + pre_graph_out = AnfAlgo::GetAllOutput(node, {prim::kPrimTupleGetItem}); + } auto valid_inputs = graph->MutableValidInputs(); MS_EXCEPTION_IF_NULL(valid_inputs); auto graph_inputs = graph->MutableInputs(); From 8765810528442d6a1431de818ae67b21b3dfd49d Mon Sep 17 00:00:00 2001 From: c00425699 Date: Wed, 15 Apr 2020 16:15:45 +0800 Subject: [PATCH 252/367] fix_coding_style_check_warning --- mindspore/ccsrc/parallel/device_manager.cc | 1 - mindspore/ccsrc/parallel/device_matrix.cc | 1 - mindspore/ccsrc/parallel/dynamic_creator.h | 1 - mindspore/ccsrc/parallel/ops_info/activation_info.cc | 3 ++- mindspore/ccsrc/parallel/ops_info/batch_parallel_info.cc | 1 - mindspore/ccsrc/parallel/ops_info/loss_info.cc | 3 ++- mindspore/ccsrc/parallel/ops_info/operator_info.cc | 1 - mindspore/ccsrc/parallel/ops_info/prelu_info.cc | 6 ++++-- mindspore/ccsrc/parallel/ops_info/reshape_info.cc | 5 +++-- mindspore/ccsrc/parallel/ops_info/virtual_dataset_info.cc | 6 +++--- mindspore/ccsrc/parallel/ops_info/virtual_dataset_info.h | 1 - mindspore/ccsrc/parallel/status.h | 1 - mindspore/ccsrc/parallel/step_auto_parallel.cc | 2 -- mindspore/ccsrc/parallel/step_auto_parallel.h | 1 - mindspore/ccsrc/parallel/step_parallel.cc | 2 -- mindspore/ccsrc/parallel/strategy.h | 1 - mindspore/ccsrc/parallel/tensor_layout/arrangement.cc | 1 - mindspore/ccsrc/parallel/tensor_layout/arrangement.h | 2 -- mindspore/ccsrc/parallel/tensor_layout/array.cc | 1 - mindspore/ccsrc/parallel/tensor_layout/array.h | 2 -- mindspore/ccsrc/parallel/tensor_layout/construct_operator.h | 1 - mindspore/ccsrc/parallel/tensor_layout/layout_transfer.cc | 2 -- mindspore/ccsrc/parallel/tensor_layout/layout_transfer.h | 2 -- mindspore/ccsrc/parallel/tensor_layout/map.cc | 1 - mindspore/ccsrc/parallel/tensor_layout/map.h | 1 - .../tensor_layout/redistribution_layout_transfer.cc | 2 -- .../parallel/tensor_layout/redistribution_layout_transfer.h | 2 -- .../parallel/tensor_layout/redistribution_operator_infer.cc | 2 -- .../parallel/tensor_layout/redistribution_operator_infer.h | 2 -- .../ccsrc/parallel/tensor_layout/reshape_layout_transfer.cc | 1 - .../ccsrc/parallel/tensor_layout/reshape_layout_transfer.h | 2 -- mindspore/ccsrc/parallel/tensor_layout/shape_util.cc | 2 -- mindspore/ccsrc/parallel/tensor_layout/shape_util.h | 2 -- mindspore/ccsrc/parallel/tensor_layout/tensor_info.h | 2 -- mindspore/ccsrc/parallel/tensor_layout/tensor_layout.cc | 3 +-- mindspore/ccsrc/parallel/tensor_layout/tensor_layout.h | 2 -- .../ccsrc/parallel/tensor_layout/tensor_redistribution.cc | 1 - .../ccsrc/parallel/tensor_layout/tensor_redistribution.h | 2 -- 38 files changed, 15 insertions(+), 59 deletions(-) diff --git a/mindspore/ccsrc/parallel/device_manager.cc b/mindspore/ccsrc/parallel/device_manager.cc index 3a553e08ec..0b34cedc00 100644 --- a/mindspore/ccsrc/parallel/device_manager.cc +++ b/mindspore/ccsrc/parallel/device_manager.cc @@ -370,6 +370,5 @@ void DeviceManager::Clear() { stage_devices_.clear(); gm_.Clear(); } - } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/device_matrix.cc b/mindspore/ccsrc/parallel/device_matrix.cc index a581dbf275..3fdc3dd15a 100644 --- a/mindspore/ccsrc/parallel/device_matrix.cc +++ b/mindspore/ccsrc/parallel/device_matrix.cc @@ -29,7 +29,6 @@ namespace mindspore { namespace parallel { - DeviceMatrix::DeviceMatrix(int32_t rank, RankList dev_list, Shape dev_shape) : rank_(rank), dev_list_(std::move(dev_list)), dev_shape_(std::move(dev_shape)) { if (!std::any_of(dev_list_.begin(), dev_list_.end(), [rank](int32_t a) { return a == rank; })) { diff --git a/mindspore/ccsrc/parallel/dynamic_creator.h b/mindspore/ccsrc/parallel/dynamic_creator.h index 145a8a0840..1b864cd8bf 100644 --- a/mindspore/ccsrc/parallel/dynamic_creator.h +++ b/mindspore/ccsrc/parallel/dynamic_creator.h @@ -27,7 +27,6 @@ namespace mindspore { namespace parallel { - #define REGISTER(className) \ OperatorInfoPtr objectCreator##className(std::string name, Shapes in, Shapes out, PrimitiveAttrs& attrs) { \ return std::make_shared(name, in, out, attrs); \ diff --git a/mindspore/ccsrc/parallel/ops_info/activation_info.cc b/mindspore/ccsrc/parallel/ops_info/activation_info.cc index c59ca8402b..c11db56082 100644 --- a/mindspore/ccsrc/parallel/ops_info/activation_info.cc +++ b/mindspore/ccsrc/parallel/ops_info/activation_info.cc @@ -229,7 +229,8 @@ Status Softmax::GenerateStrategies(int32_t stage_id) { } is_auto_parallel_ = true; - Shape input0_split(inputs_shape_[0].size(), 1); + Shape input0_split; + (void)input0_split.insert(input0_split.begin(), inputs_shape_[0].size(), 1); for (auto& element : axis_) { int32_t axis_index = element; if (element < 0) { diff --git a/mindspore/ccsrc/parallel/ops_info/batch_parallel_info.cc b/mindspore/ccsrc/parallel/ops_info/batch_parallel_info.cc index b1d9b8b60e..9d356cd573 100644 --- a/mindspore/ccsrc/parallel/ops_info/batch_parallel_info.cc +++ b/mindspore/ccsrc/parallel/ops_info/batch_parallel_info.cc @@ -27,7 +27,6 @@ namespace mindspore { namespace parallel { - Status BatchParallelInfo::CheckStrategy(const StrategyPtr& strategy) { if (CheckStrategyValue(strategy, inputs_shape_, is_auto_parallel_) != SUCCESS) { if (is_auto_parallel_) { diff --git a/mindspore/ccsrc/parallel/ops_info/loss_info.cc b/mindspore/ccsrc/parallel/ops_info/loss_info.cc index 31f80e338b..28ea19f120 100644 --- a/mindspore/ccsrc/parallel/ops_info/loss_info.cc +++ b/mindspore/ccsrc/parallel/ops_info/loss_info.cc @@ -194,7 +194,8 @@ Status SoftmaxCrossEntropyWithLogitsInfo::GenerateStrategies(int32_t stage_id) { } is_auto_parallel_ = true; - Shape input0_split(inputs_shape_[0].size(), 1); + Shape input0_split; + (void)input0_split.insert(input0_split.begin(), inputs_shape_[0].size(), 1); input0_split[IntToSize(axis_index)] = 0; Shapes splittable_inputs = {input0_split, input0_split}; std::vector sp_vector; diff --git a/mindspore/ccsrc/parallel/ops_info/operator_info.cc b/mindspore/ccsrc/parallel/ops_info/operator_info.cc index 23b6a5190a..e066142589 100644 --- a/mindspore/ccsrc/parallel/ops_info/operator_info.cc +++ b/mindspore/ccsrc/parallel/ops_info/operator_info.cc @@ -1255,6 +1255,5 @@ void OperatorInfo::BreakingTiesForPerferringDataParallel(const StrategyPtr& stra double OperatorInfo::GetForwardMemoryCostFromCNode() { return operator_cost()->GetForwardComputationCost(inputs_tensor_info_, outputs_tensor_info_, 0); } - } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/ops_info/prelu_info.cc b/mindspore/ccsrc/parallel/ops_info/prelu_info.cc index 1a44501f42..a4d601dbe9 100644 --- a/mindspore/ccsrc/parallel/ops_info/prelu_info.cc +++ b/mindspore/ccsrc/parallel/ops_info/prelu_info.cc @@ -212,8 +212,10 @@ Status PReLUInfo::GenerateStrategies(int32_t stage_id) { return FAILED; } is_auto_parallel_ = true; - Shape input0_split(inputs_shape_[0].size(), 1); - input0_split[1] = 0; + Shape input0_split; + input0_split.emplace_back(1); + input0_split.emplace_back(0); + (void)input0_split.insert(input0_split.end(), inputs_shape_[0].size() - 2, 1); Shape input1_split(inputs_shape_[1].size(), 0); Shapes splittable_inputs = {input0_split, input1_split}; std::vector sp_vector; diff --git a/mindspore/ccsrc/parallel/ops_info/reshape_info.cc b/mindspore/ccsrc/parallel/ops_info/reshape_info.cc index 0c95ee9c05..4cb81ee769 100644 --- a/mindspore/ccsrc/parallel/ops_info/reshape_info.cc +++ b/mindspore/ccsrc/parallel/ops_info/reshape_info.cc @@ -413,8 +413,9 @@ Status ReshapeInfo::GenerateStrategies(int32_t stage_id) { return FAILED; } is_auto_parallel_ = true; - Shape input0_split(inputs_shape_[0].size(), 0); - input0_split[0] = 1; + Shape input0_split; + input0_split.emplace_back(1); + (void)input0_split.insert(input0_split.end(), inputs_shape_[0].size() - 1, 0); Shapes splittable_inputs = {input0_split}; std::vector sp_vector; if (GenerateStrategiesForIndependentInputs(stage_id, inputs_shape_, splittable_inputs, &sp_vector) != SUCCESS) { diff --git a/mindspore/ccsrc/parallel/ops_info/virtual_dataset_info.cc b/mindspore/ccsrc/parallel/ops_info/virtual_dataset_info.cc index acb39247d4..cd3b40315c 100644 --- a/mindspore/ccsrc/parallel/ops_info/virtual_dataset_info.cc +++ b/mindspore/ccsrc/parallel/ops_info/virtual_dataset_info.cc @@ -27,7 +27,6 @@ namespace mindspore { namespace parallel { - Status VirtualDatasetInfo::CheckStrategy(const StrategyPtr& strategy) { if (CheckStrategyValue(strategy, inputs_shape_, is_auto_parallel_) != SUCCESS) { if (is_auto_parallel_) { @@ -225,8 +224,9 @@ Status VirtualDatasetInfo::GenerateStrategies(int32_t stage_id) { StrategyPtr sp; std::vector strategy; for (auto& shape : inputs_shape_) { - Shape temp(shape.size(), 1); - temp[0] = SizeToInt(total_dev_num); + Shape temp; + temp.emplace_back(SizeToInt(total_dev_num)); + (void)temp.insert(temp.end(), shape.size() - 1, 1); strategy.push_back(temp); } sp = std::make_shared(stage_id, strategy); diff --git a/mindspore/ccsrc/parallel/ops_info/virtual_dataset_info.h b/mindspore/ccsrc/parallel/ops_info/virtual_dataset_info.h index bf17e678a3..398bae3585 100644 --- a/mindspore/ccsrc/parallel/ops_info/virtual_dataset_info.h +++ b/mindspore/ccsrc/parallel/ops_info/virtual_dataset_info.h @@ -51,7 +51,6 @@ class VirtualDatasetInfo : public OperatorInfo { Status GetAttrs() override; Status InferAsLossDivisor() override; }; - } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/status.h b/mindspore/ccsrc/parallel/status.h index 9d773f0d9b..6bfe9f0e72 100644 --- a/mindspore/ccsrc/parallel/status.h +++ b/mindspore/ccsrc/parallel/status.h @@ -21,7 +21,6 @@ namespace mindspore { namespace parallel { - enum Status { SUCCESS = 0, FAILED, diff --git a/mindspore/ccsrc/parallel/step_auto_parallel.cc b/mindspore/ccsrc/parallel/step_auto_parallel.cc index a42ce612fb..a56fca1ae6 100644 --- a/mindspore/ccsrc/parallel/step_auto_parallel.cc +++ b/mindspore/ccsrc/parallel/step_auto_parallel.cc @@ -487,7 +487,6 @@ Status ConstructCostGraphNodes(const std::vector &all_nodes, const F bool is_find_wrong = (current_op_ptr->name().find(VIRTUAL_DATA_SET_INFO) == std::string::npos) && (current_op_ptr->name().find(BATCH_PARALLEL) == std::string::npos) && (current_op_ptr->name().find(prim->name()) == std::string::npos); - if (is_find_wrong) { MS_LOG(EXCEPTION) << "The OperatorInfo: " << current_op_ptr->name() << " does not match the Prim: " << prim->name(); @@ -947,7 +946,6 @@ Status ParallelStrategyRecSearch(const std::vector &all_nodes, const graph = EliminateGraph(graph, eli_list, index_list); size_t num_device = g_device_manager->DeviceNum(); - if (PartitionForAllDevices(num_device, graph) == SUCCESS) { MS_LOG(INFO) << "Partition Success With " << num_device << " devices."; } else { diff --git a/mindspore/ccsrc/parallel/step_auto_parallel.h b/mindspore/ccsrc/parallel/step_auto_parallel.h index 349af7c956..f120edcc61 100644 --- a/mindspore/ccsrc/parallel/step_auto_parallel.h +++ b/mindspore/ccsrc/parallel/step_auto_parallel.h @@ -55,7 +55,6 @@ Status ParallelStrategyRecSearch(const std::vector &all_nodes, const std::vector> RecInputTensorNames(const std::map::iterator &it, std::vector> input_tensor_names); - } // namespace parallel } // namespace mindspore #endif // PARALLEL_STEP_AUTO_PARALLEL_H_ diff --git a/mindspore/ccsrc/parallel/step_parallel.cc b/mindspore/ccsrc/parallel/step_parallel.cc index f0126a4027..bcd4dc3763 100644 --- a/mindspore/ccsrc/parallel/step_parallel.cc +++ b/mindspore/ccsrc/parallel/step_parallel.cc @@ -2094,7 +2094,6 @@ CNodePtr FindLossCNodeFromRoot(const FuncGraphPtr& root) { MS_EXCEPTION_IF_NULL(root_return_node); const auto& all_nodes = root->nodes(); FuncGraphPtr func_graph = FindForwardGraphByRootNodes(all_nodes); - if (func_graph == nullptr) { return FindLossCNode(root); } else { @@ -2109,7 +2108,6 @@ FuncGraphPtr ForwardGraph(const FuncGraphPtr& root) { MS_EXCEPTION_IF_NULL(root_return_node); const auto& all_nodes = root->nodes(); FuncGraphPtr func_graph = FindForwardGraphByRootNodes(all_nodes); - if (func_graph != nullptr) { forward_graph = func_graph; } diff --git a/mindspore/ccsrc/parallel/strategy.h b/mindspore/ccsrc/parallel/strategy.h index acc6ca928f..93d4d4dff1 100644 --- a/mindspore/ccsrc/parallel/strategy.h +++ b/mindspore/ccsrc/parallel/strategy.h @@ -27,7 +27,6 @@ namespace mindspore { namespace parallel { - #define MIN_SLICE_NUM 1 using Dimensions = std::vector; diff --git a/mindspore/ccsrc/parallel/tensor_layout/arrangement.cc b/mindspore/ccsrc/parallel/tensor_layout/arrangement.cc index 68acae87f3..b42ba30242 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/arrangement.cc +++ b/mindspore/ccsrc/parallel/tensor_layout/arrangement.cc @@ -26,7 +26,6 @@ namespace mindspore { namespace parallel { - Status Arrangement::Init(const std::vector& array) { Status status = Array::Init(array); if (status != Status::SUCCESS) { diff --git a/mindspore/ccsrc/parallel/tensor_layout/arrangement.h b/mindspore/ccsrc/parallel/tensor_layout/arrangement.h index 6d64e07f03..2dc13038c1 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/arrangement.h +++ b/mindspore/ccsrc/parallel/tensor_layout/arrangement.h @@ -28,7 +28,6 @@ namespace mindspore { namespace parallel { - class Arrangement : public Array { public: Arrangement() : size_(1) {} @@ -53,7 +52,6 @@ class Arrangement : public Array { void ComputeSize(); int32_t size_; }; - } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/tensor_layout/array.cc b/mindspore/ccsrc/parallel/tensor_layout/array.cc index ce1b9b8ecf..ba3858ae00 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/array.cc +++ b/mindspore/ccsrc/parallel/tensor_layout/array.cc @@ -21,7 +21,6 @@ namespace mindspore { namespace parallel { - std::string Array::ToString() const { std::ostringstream buffer; buffer << "[ "; diff --git a/mindspore/ccsrc/parallel/tensor_layout/array.h b/mindspore/ccsrc/parallel/tensor_layout/array.h index 3a47f0d818..f7d9c3c673 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/array.h +++ b/mindspore/ccsrc/parallel/tensor_layout/array.h @@ -26,7 +26,6 @@ namespace mindspore { namespace parallel { - class Array { public: Array() = default; @@ -43,7 +42,6 @@ class Array { protected: std::vector array_; }; - } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/tensor_layout/construct_operator.h b/mindspore/ccsrc/parallel/tensor_layout/construct_operator.h index 91f5236037..cf6cff456a 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/construct_operator.h +++ b/mindspore/ccsrc/parallel/tensor_layout/construct_operator.h @@ -52,7 +52,6 @@ class ConstructOperator { Shape dev_matrix_shape_; Status CreateGroupByDim(size_t axis, std::vector* group); }; - } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/tensor_layout/layout_transfer.cc b/mindspore/ccsrc/parallel/tensor_layout/layout_transfer.cc index b2ee51b40b..190a5846ba 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/layout_transfer.cc +++ b/mindspore/ccsrc/parallel/tensor_layout/layout_transfer.cc @@ -20,7 +20,6 @@ namespace mindspore { namespace parallel { - std::string LayoutTransfer::ToString() const { std::ostringstream buffer; buffer << std::endl << std::string("from_in_ tensor layout:" + from_in_.ToString()); @@ -37,6 +36,5 @@ Status LayoutTransfer::Init(const TensorLayout& from_in, const TensorLayout& to_ Status status = CheckValidTransfer(); return status; } - } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/tensor_layout/layout_transfer.h b/mindspore/ccsrc/parallel/tensor_layout/layout_transfer.h index b892a87d30..b05128f5b8 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/layout_transfer.h +++ b/mindspore/ccsrc/parallel/tensor_layout/layout_transfer.h @@ -23,7 +23,6 @@ namespace mindspore { namespace parallel { - class LayoutTransfer { public: LayoutTransfer() = default; @@ -43,7 +42,6 @@ class LayoutTransfer { private: virtual Status CheckValidTransfer() = 0; }; - } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/tensor_layout/map.cc b/mindspore/ccsrc/parallel/tensor_layout/map.cc index 4f3f2369c7..320dbe6ebd 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/map.cc +++ b/mindspore/ccsrc/parallel/tensor_layout/map.cc @@ -26,7 +26,6 @@ namespace mindspore { namespace parallel { - Status Map::Init(const std::vector& array) { Status status = Array::Init(array); if (status != Status::SUCCESS) { diff --git a/mindspore/ccsrc/parallel/tensor_layout/map.h b/mindspore/ccsrc/parallel/tensor_layout/map.h index f7bc061aa1..3f839ef198 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/map.h +++ b/mindspore/ccsrc/parallel/tensor_layout/map.h @@ -46,7 +46,6 @@ class Map : public Array { private: bool IsValidMap(); }; - } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/tensor_layout/redistribution_layout_transfer.cc b/mindspore/ccsrc/parallel/tensor_layout/redistribution_layout_transfer.cc index 2ee682fad8..7ed07ac02e 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/redistribution_layout_transfer.cc +++ b/mindspore/ccsrc/parallel/tensor_layout/redistribution_layout_transfer.cc @@ -21,7 +21,6 @@ namespace mindspore { namespace parallel { - Status RedistributionLayoutTransfer::CheckValidTransfer() { return Status::SUCCESS; } /* @@ -66,6 +65,5 @@ std::shared_ptr RedistributionLayoutTransfer::UnifyDevice } return unified_device_arrangement_ptr->UnifyDeviceArrangementAndTensorShape(); } - } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/tensor_layout/redistribution_layout_transfer.h b/mindspore/ccsrc/parallel/tensor_layout/redistribution_layout_transfer.h index 6522b7f8c2..7b57f46dd6 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/redistribution_layout_transfer.h +++ b/mindspore/ccsrc/parallel/tensor_layout/redistribution_layout_transfer.h @@ -24,7 +24,6 @@ namespace mindspore { namespace parallel { - class RedistributionLayoutTransfer : public LayoutTransfer { public: RedistributionLayoutTransfer() = default; @@ -35,7 +34,6 @@ class RedistributionLayoutTransfer : public LayoutTransfer { Status CheckValidTransfer() override; std::shared_ptr UnifyDeviceArrangement() const; }; - } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/tensor_layout/redistribution_operator_infer.cc b/mindspore/ccsrc/parallel/tensor_layout/redistribution_operator_infer.cc index 028fb5874a..b4ec6a016f 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/redistribution_operator_infer.cc +++ b/mindspore/ccsrc/parallel/tensor_layout/redistribution_operator_infer.cc @@ -22,7 +22,6 @@ namespace mindspore { namespace parallel { - Status RedistributionOperatorInfer::Init(const TensorLayout& tensor_layout, const Map& out_tensor_map, RankList dev_list) { in_tensor_map_ = tensor_layout.tensor_map(); @@ -273,6 +272,5 @@ Status RedistributionOperatorInfer::TransferConcatByAxis(Args args) { } return Status::SUCCESS; } - } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/tensor_layout/redistribution_operator_infer.h b/mindspore/ccsrc/parallel/tensor_layout/redistribution_operator_infer.h index 13f9e7af24..b4ec0c4633 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/redistribution_operator_infer.h +++ b/mindspore/ccsrc/parallel/tensor_layout/redistribution_operator_infer.h @@ -28,7 +28,6 @@ #include "utils/convert_utils.h" namespace mindspore { namespace parallel { - using DeviceArrangement = std::vector; using TensorMap = std::vector; using TensorShape = std::vector; @@ -69,7 +68,6 @@ class RedistributionOperatorInfer { RankList dev_list_; bool construct_op_flag_; }; - } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/tensor_layout/reshape_layout_transfer.cc b/mindspore/ccsrc/parallel/tensor_layout/reshape_layout_transfer.cc index 1d56aa2220..39a6bef92d 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/reshape_layout_transfer.cc +++ b/mindspore/ccsrc/parallel/tensor_layout/reshape_layout_transfer.cc @@ -20,7 +20,6 @@ namespace mindspore { namespace parallel { - Status ReshapeLayoutTransfer::CheckValidTransfer() { if (!IsSameDeviceArrangement()) { return Status::FAILED; diff --git a/mindspore/ccsrc/parallel/tensor_layout/reshape_layout_transfer.h b/mindspore/ccsrc/parallel/tensor_layout/reshape_layout_transfer.h index 9ad8e67635..8aae71631d 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/reshape_layout_transfer.h +++ b/mindspore/ccsrc/parallel/tensor_layout/reshape_layout_transfer.h @@ -23,7 +23,6 @@ namespace mindspore { namespace parallel { - class ReshapeLayoutTransfer : public LayoutTransfer { public: ReshapeLayoutTransfer() = default; @@ -43,7 +42,6 @@ class ReshapeLayoutTransfer : public LayoutTransfer { bool FromTensorShapeCanBeExpandByTo() const; bool ToTensorShapeCanBeExpandByFrom() const; }; - } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/tensor_layout/shape_util.cc b/mindspore/ccsrc/parallel/tensor_layout/shape_util.cc index 54bb976032..a26627fb3c 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/shape_util.cc +++ b/mindspore/ccsrc/parallel/tensor_layout/shape_util.cc @@ -21,7 +21,6 @@ namespace mindspore { namespace parallel { - /* * example: * shape = [2, 8, 32] @@ -260,6 +259,5 @@ Status ExpandShape(const std::vector& in, const std::vector& e } return status; } - } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/tensor_layout/shape_util.h b/mindspore/ccsrc/parallel/tensor_layout/shape_util.h index 85ca70969b..e83156500c 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/shape_util.h +++ b/mindspore/ccsrc/parallel/tensor_layout/shape_util.h @@ -27,7 +27,6 @@ namespace mindspore { namespace parallel { - /* * compute the accumulating product of all the values in shape from left to right, * the accumulating results are saved in shape_accum from left to right @@ -167,7 +166,6 @@ Status ExpandAccumulateProduct(const std::vector& in_accum_reverse, * out = [2, 4, 2, 4, 8] */ Status ExpandShape(const std::vector& in, const std::vector& expand, std::vector* out); - } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/tensor_layout/tensor_info.h b/mindspore/ccsrc/parallel/tensor_layout/tensor_info.h index 9fc6a229e2..4a64ab472c 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/tensor_info.h +++ b/mindspore/ccsrc/parallel/tensor_layout/tensor_info.h @@ -28,7 +28,6 @@ namespace mindspore { namespace parallel { - using Shapes = std::vector; class TensorInfo { @@ -55,7 +54,6 @@ class TensorInfo { // reduce method's reduce dim std::vector reduce_dim_; }; - } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/tensor_layout/tensor_layout.cc b/mindspore/ccsrc/parallel/tensor_layout/tensor_layout.cc index f49b967abc..5fbd04431c 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/tensor_layout.cc +++ b/mindspore/ccsrc/parallel/tensor_layout/tensor_layout.cc @@ -27,7 +27,6 @@ namespace mindspore { namespace parallel { - std::string TensorLayout::ToString() const { return StandardToString() + OriginToString(); } std::string TensorLayout::StandardToString() const { @@ -337,7 +336,7 @@ Status TensorLayout::UpdateTensorMap(uint32_t index, int32_t value) { MS_LOG(ERROR) << "Index is out of the size of the tensor map!"; return Status::FAILED; } - Shape shape = tensor_map_.array(); + auto shape = tensor_map_.array(); shape[index] = value; if (tensor_map_.Init(shape) == Status::FAILED) { MS_LOG(ERROR) << "Update tensor map failed!"; diff --git a/mindspore/ccsrc/parallel/tensor_layout/tensor_layout.h b/mindspore/ccsrc/parallel/tensor_layout/tensor_layout.h index 238c9373d9..e6ddc2a708 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/tensor_layout.h +++ b/mindspore/ccsrc/parallel/tensor_layout/tensor_layout.h @@ -30,7 +30,6 @@ namespace mindspore { namespace parallel { - class TensorLayout { public: TensorLayout() = default; @@ -94,7 +93,6 @@ class TensorLayout { Map tensor_map_; Arrangement tensor_shape_; }; - } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/tensor_layout/tensor_redistribution.cc b/mindspore/ccsrc/parallel/tensor_layout/tensor_redistribution.cc index be5eaa40ba..d8eef7e7a5 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/tensor_redistribution.cc +++ b/mindspore/ccsrc/parallel/tensor_layout/tensor_redistribution.cc @@ -24,7 +24,6 @@ namespace mindspore { namespace parallel { - Status TensorRedistribution::Init(const TensorLayout& from, const TensorLayout& to, const RankList& dev_list) { from_origin_ = from; to_origin_ = to; diff --git a/mindspore/ccsrc/parallel/tensor_layout/tensor_redistribution.h b/mindspore/ccsrc/parallel/tensor_layout/tensor_redistribution.h index 7e2b3682e6..ebaccadf53 100644 --- a/mindspore/ccsrc/parallel/tensor_layout/tensor_redistribution.h +++ b/mindspore/ccsrc/parallel/tensor_layout/tensor_redistribution.h @@ -33,7 +33,6 @@ namespace mindspore { namespace parallel { - class TensorRedistribution { public: explicit TensorRedistribution(bool construct_op_flag = true, bool keep_reshape = false) @@ -83,7 +82,6 @@ class TensorRedistribution { bool construct_op_flag_; bool keep_reshape_; }; - } // namespace parallel } // namespace mindspore From 4a292d48f9ff1ddc033abe797ec7c803da9e1bde Mon Sep 17 00:00:00 2001 From: "wangnan39@huawei.com" Date: Tue, 14 Apr 2020 16:03:55 +0800 Subject: [PATCH 253/367] modify annotation of DepthwiseConv2d --- mindspore/ops/operations/nn_ops.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mindspore/ops/operations/nn_ops.py b/mindspore/ops/operations/nn_ops.py index ec2c2aeecd..b799420620 100644 --- a/mindspore/ops/operations/nn_ops.py +++ b/mindspore/ops/operations/nn_ops.py @@ -679,7 +679,7 @@ class DepthwiseConv2dNative(PrimitiveWithInfer): Inputs: - **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`. - **weight** (Tensor) - Set size of kernel is :math:`(K_1, K_2)`, then the shape is - :math:`(channel_{multiplier}, C_{in}, K_1, K_2)`. + :math:`(\text{channel_multiplier}, C_{in}, K_1, K_2)`. Outputs: Tensor of shape :math:`(N, C_{in} * \text{channel_multiplier}, H_{out}, W_{out})`. From c926c36d95e7dc20b1de5f927cc58aab3e0dc35c Mon Sep 17 00:00:00 2001 From: huanghui Date: Wed, 15 Apr 2020 16:21:53 +0800 Subject: [PATCH 254/367] fix export mslite model --- .../converter/lite_model/operations/conv2d_packer.cc | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/mindspore/ccsrc/predict/converter/lite_model/operations/conv2d_packer.cc b/mindspore/ccsrc/predict/converter/lite_model/operations/conv2d_packer.cc index 0fefb89c59..176b235f5f 100644 --- a/mindspore/ccsrc/predict/converter/lite_model/operations/conv2d_packer.cc +++ b/mindspore/ccsrc/predict/converter/lite_model/operations/conv2d_packer.cc @@ -28,8 +28,8 @@ bool Conv2dPacker(const CNodePtr &c_node_ptr, OpDefT *ms_op) { std::vector kernel_size_value = AnfAlgo::GetNodeAttr>(c_node_ptr, "kernel_size"); std::string kernel_pad_mode_value = AnfAlgo::GetNodeAttr(c_node_ptr, "pad_mode"); int kernel_pad_value = AnfAlgo::GetNodeAttr(c_node_ptr, "pad"); - int kernel_stride_value = AnfAlgo::GetNodeAttr(c_node_ptr, "stride"); - int kernel_dilation_value = AnfAlgo::GetNodeAttr(c_node_ptr, "dilation"); + auto kernel_stride_value = AnfAlgo::GetNodeAttr>(c_node_ptr, "stride"); + auto kernel_dilation_value = AnfAlgo::GetNodeAttr>(c_node_ptr, "dilation"); std::string kernel_data_format_value = AnfAlgo::GetNodeAttr(c_node_ptr, "data_format"); std::unique_ptr attr(new Conv2DT()); MS_EXCEPTION_IF_NULL(attr); @@ -43,15 +43,15 @@ bool Conv2dPacker(const CNodePtr &c_node_ptr, OpDefT *ms_op) { attr->channelOut = kernel_channel_value; attr->kernelW = kernel_size_value[0]; attr->kernelH = kernel_size_value[1]; - attr->strideW = kernel_stride_value; - attr->strideH = kernel_stride_value; + attr->strideW = kernel_stride_value[0]; + attr->strideH = kernel_stride_value[1]; attr->padMode = GetAttrPadMode(kernel_pad_mode_value); attr->padUp = kernel_pad_value; attr->padDown = kernel_pad_value; attr->padLeft = kernel_pad_value; attr->padRight = kernel_pad_value; - attr->dilateW = kernel_dilation_value; - attr->dilateH = kernel_dilation_value; + attr->dilateW = kernel_dilation_value[0]; + attr->dilateH = kernel_dilation_value[1]; attr->hasBias = false; ms_op->name = c_node_ptr->fullname_with_scope(); ms_op->attr.type = OpT_Conv2D; From c3e0ce68d5b4ad6e1058e1f7d24550ef10d58f15 Mon Sep 17 00:00:00 2001 From: wanghua Date: Wed, 15 Apr 2020 16:43:35 +0800 Subject: [PATCH 255/367] modify GetNext OP example --- mindspore/ops/operations/nn_ops.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mindspore/ops/operations/nn_ops.py b/mindspore/ops/operations/nn_ops.py index ec2c2aeecd..e87551344c 100644 --- a/mindspore/ops/operations/nn_ops.py +++ b/mindspore/ops/operations/nn_ops.py @@ -1949,7 +1949,7 @@ class GetNext(PrimitiveWithInfer): and the type is described is `types`. Examples: - >>> get_next = P.GetNext([mindspore.float32, mindspore.int32], [[32, 1, 28, 28], [10]], 'shared_name') + >>> get_next = P.GetNext([mindspore.float32, mindspore.int32], [[32, 1, 28, 28], [10]], 2, 'shared_name') >>> feature, label = get_next() """ From 4a9f497283ca1882aad6ce28b8d2a8699e53e538 Mon Sep 17 00:00:00 2001 From: zhoufeng Date: Wed, 15 Apr 2020 17:03:48 +0800 Subject: [PATCH 256/367] move akg out --- cmake/package.cmake | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmake/package.cmake b/cmake/package.cmake index 8a44d392cc..e4f8fa7706 100644 --- a/cmake/package.cmake +++ b/cmake/package.cmake @@ -162,7 +162,7 @@ install( if (ENABLE_GPU) install( DIRECTORY ${CMAKE_SOURCE_DIR}/mindspore/_akg - DESTINATION ${INSTALL_PY_DIR} + DESTINATION ${INSTALL_PY_DIR}/../ COMPONENT mindspore ) if (EXISTS ${CMAKE_SOURCE_DIR}/mindspore/incubator-tvm) @@ -170,7 +170,7 @@ if (ENABLE_GPU) DIRECTORY ${CMAKE_SOURCE_DIR}/mindspore/incubator-tvm/topi/python/topi ${CMAKE_SOURCE_DIR}/mindspore/incubator-tvm/python/tvm - DESTINATION ${INSTALL_PY_DIR}/_akg + DESTINATION ${INSTALL_PY_DIR}/../_akg COMPONENT mindspore ) endif () From 1286767d0e083ad5ac6a9f50a56451b843463805 Mon Sep 17 00:00:00 2001 From: chenjianping Date: Wed, 15 Apr 2020 09:36:48 +0000 Subject: [PATCH 257/367] support building on windows --- CMakeLists.txt | 13 +++ build.bat | 36 ++++++++ cmake/external_libs/flatbuffers.cmake | 3 + cmake/external_libs/mkl_dnn.cmake | 13 ++- cmake/external_libs/opencv.cmake | 89 +++++++++++++------ cmake/external_libs/protobuf.cmake | 44 +++++---- cmake/external_libs/sqlite.cmake | 41 ++++++--- cmake/mind_expression.cmake | 6 +- cmake/options.cmake | 8 +- cmake/utils.cmake | 50 +++++++---- mindspore/ccsrc/CMakeLists.txt | 62 ++++++++++--- mindspore/ccsrc/dataset/CMakeLists.txt | 18 +++- .../ccsrc/dataset/api/python_bindings.cc | 4 + mindspore/ccsrc/dataset/core/tensor.h | 4 + .../dataset/engine/datasetops/shuffle_op.cc | 4 + .../dataset/kernels/image/CMakeLists.txt | 68 +++++++++----- .../dataset/kernels/image/image_utils.cc | 4 + .../ccsrc/dataset/kernels/image/image_utils.h | 4 + mindspore/ccsrc/dataset/util/path.cc | 6 +- mindspore/ccsrc/dataset/util/random.cc | 4 + mindspore/ccsrc/dataset/util/services.cc | 8 ++ mindspore/ccsrc/dataset/util/services.h | 2 + mindspore/ccsrc/dataset/util/sig_handler.cc | 4 + mindspore/ccsrc/dataset/util/sig_handler.h | 2 + mindspore/ccsrc/debug/anf_ir_dump.cc | 9 ++ mindspore/ccsrc/debug/anf_ir_utils.cc | 16 +++- mindspore/ccsrc/debug/info.cc | 7 +- .../ccsrc/device/cpu/cpu_resource_manager.cc | 1 + .../gpu/distribution/collective_fake_init.cc | 28 ++++++ .../gpu/distribution/collective_fake_init.h | 37 ++++++++ mindspore/ccsrc/device/kernel_runtime.cc | 20 +++++ mindspore/ccsrc/kernel/common_utils.cc | 11 +++ mindspore/ccsrc/mindrecord/CMakeLists.txt | 6 +- .../ccsrc/mindrecord/common/shard_utils.cc | 24 +++++ .../mindrecord/include/common/shard_utils.h | 2 + .../ccsrc/mindrecord/include/shard_reader.h | 2 + mindspore/ccsrc/mindrecord/io/shard_reader.cc | 4 + mindspore/ccsrc/mindrecord/io/shard_writer.cc | 10 +++ .../allreduce_fusion/step_allreduce_fusion.cc | 13 ++- .../ccsrc/parallel/step_auto_parallel.cc | 2 +- mindspore/ccsrc/pipeline/action.cc | 2 +- mindspore/ccsrc/pipeline/init.cc | 14 ++- .../ccsrc/pre_activate/common/pass_manager.cc | 10 +++ mindspore/ccsrc/utils/log_adapter.cc | 10 ++- mindspore/ccsrc/vm/transform.cc | 6 +- mindspore/ccsrc/vm/transform.h | 2 +- mindspore/log.py | 7 +- third_party/securec/CMakeLists.txt | 12 ++- 48 files changed, 619 insertions(+), 133 deletions(-) create mode 100644 build.bat create mode 100644 mindspore/ccsrc/device/gpu/distribution/collective_fake_init.cc create mode 100644 mindspore/ccsrc/device/gpu/distribution/collective_fake_init.h diff --git a/CMakeLists.txt b/CMakeLists.txt index a02fcd1caf..9c6ee73687 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -12,6 +12,7 @@ else() endif() set(CMAKE_CXX_FLAGS_DEBUG "$ENV{CXXFLAGS} -O0 -g2 -ggdb -fno-inline-functions -fno-omit-frame-pointer -Wl,--allow-shlib-undefined -D_LIBCPP_INLINE_VISIBILITY='' -D'_LIBCPP_EXTERN_TEMPLATE(...)=' -DHALF_ENABLE_CPP11_USER_LITERALS=0 -D_FORTIFY_SOURCE=2 -Wno-cpp") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -I/usr/local/include -std=c++17 -Werror -Wall -Wno-deprecated-declarations -fPIC") set(CMAKE_EXPORT_COMPILE_COMMANDS ON) @@ -29,6 +30,18 @@ find_package(Python3 3.7 COMPONENTS Interpreter Development) if(Python3_FOUND) set(PYTHON_INCLUDE_DIRS "${Python3_INCLUDE_DIRS}") set(PYTHON_LIBRARIES "${Python3_LIBRARIES}") + if (WIN32) + if (Python3_DIR) + message("Python3_DIR set already: " ${Python3_DIR}) + else() + string(LENGTH ${PYTHON_LIBRARIES} PYTHON_LIBRARIES_LEN) + string(LENGTH "libpythonxx.a" Python3_NAME_LEN) + math(EXPR Python3_DIR_LEN ${PYTHON_LIBRARIES_LEN}-${Python3_NAME_LEN}) + string(SUBSTRING ${Python3_LIBRARIES} 0 ${Python3_DIR_LEN} Python3_DIR) + message("Python3_DIR: " ${Python3_DIR}) + endif() + link_directories(${Python3_DIR}) + endif() else() find_python_package(py_inc py_lib) set(PYTHON_INCLUDE_DIRS "${py_inc}") diff --git a/build.bat b/build.bat new file mode 100644 index 0000000000..b4f3420e14 --- /dev/null +++ b/build.bat @@ -0,0 +1,36 @@ +@echo off +@title mindspore_build + +SET BASEPATH=%CD% +IF NOT EXIST %BASEPATH%/build ( + md "build" + ) + +cd %BASEPATH%/build +SET BUILD_PATH=%CD% + +IF NOT EXIST %BUILD_PATH%/mindspore ( + md "mindspore" + ) + +cd %CD%/mindspore + +cmake -DCMAKE_BUILD_TYPE=Release -DENABLE_CPU=ON -DENABLE_MINDDATA=ON -DUSE_GLOG=ON -G "CodeBlocks - MinGW Makefiles" ../.. +IF NOT %errorlevel% == 0 ( + goto run_fail + ) + +cmake --build . --target all -- -j6 +IF NOT %errorlevel% == 0 ( + goto run_fail + ) + +cd %BASEPATH% + +goto run_eof + +:run_fail + cd %BASEPATH% + echo "build fail." + +:run_eof diff --git a/cmake/external_libs/flatbuffers.cmake b/cmake/external_libs/flatbuffers.cmake index 7d7c74b9e1..18549ed1b5 100644 --- a/cmake/external_libs/flatbuffers.cmake +++ b/cmake/external_libs/flatbuffers.cmake @@ -1,5 +1,8 @@ set(flatbuffers_CXXFLAGS "-D_FORTIFY_SOURCE=2 -O2") set(flatbuffers_CFLAGS "-D_FORTIFY_SOURCE=2 -O2") +if (WIN32) + set(flatbuffers_USE_STATIC_LIBS ON) +endif() mindspore_add_pkg(flatbuffers VER 1.11.0 LIBS flatbuffers diff --git a/cmake/external_libs/mkl_dnn.cmake b/cmake/external_libs/mkl_dnn.cmake index 6f033fa565..4b2c46670a 100644 --- a/cmake/external_libs/mkl_dnn.cmake +++ b/cmake/external_libs/mkl_dnn.cmake @@ -1,11 +1,22 @@ set(onednn_CXXFLAGS "-D_FORTIFY_SOURCE=2 -O2") set(onednn_CFLAGS "-D_FORTIFY_SOURCE=2 -O2") -mindspore_add_pkg(onednn +if (CMAKE_SYSTEM_NAME MATCHES "Windows") + mindspore_add_pkg(onednn + VER 1.1.1 + LIBS dnnl mkldnn + HEAD_ONLY ./ + RELEASE on + URL https://github.com/oneapi-src/oneDNN/releases/download/v1.1.1/dnnl_win_1.1.1_cpu_vcomp.zip + MD5 ecaab9ed549643067699c80e5cea1c23) +else() + mindspore_add_pkg(onednn VER 1.1.2 LIBS dnnl mkldnn URL https://github.com/oneapi-src/oneDNN/archive/v1.1.2.tar.gz MD5 ab40d52230f3ad1d7a6f06ce0f6bc17a CMAKE_OPTION -DDNNL_ARCH_OPT_FLAGS='' -DDNNL_CPU_RUNTIME='SEQ' -DDNNL_BUILD_EXAMPLES=OFF -DDNNL_BUILD_TESTS=OFF) +endif() + include_directories(${onednn_INC}) add_library(mindspore::dnnl ALIAS onednn::dnnl) add_library(mindspore::mkldnn ALIAS onednn::mkldnn) diff --git a/cmake/external_libs/opencv.cmake b/cmake/external_libs/opencv.cmake index 5c60a2fa61..1c40769f62 100644 --- a/cmake/external_libs/opencv.cmake +++ b/cmake/external_libs/opencv.cmake @@ -2,35 +2,72 @@ if (${CMAKE_SYSTEM_NAME} MATCHES "Darwin") set(opencv_CXXFLAGS "-fstack-protector-all -Wno-uninitialized -Wno-unused-parameter -D_FORTIFY_SOURCE=2 -O2") set(opencv_CFLAGS "-fstack-protector-all -Wno-uninitialized -Wno-unused-parameter -D_FORTIFY_SOURCE=2 -O2") set(opencv_LDFLAGS "-Wl") +elseif (${CMAKE_SYSTEM_NAME} MATCHES "Windows") + set(opencv_CXXFLAGS "-fstack-protector-all -Wno-maybe-uninitialized -Wno-unused-parameter -D_FORTIFY_SOURCE=2 -O2") + set(opencv_CFLAGS "-fstack-protector-all -Wno-maybe-uninitialized -Wno-unused-parameter -D_FORTIFY_SOURCE=2 -O2") else() set(opencv_CXXFLAGS "-fstack-protector-all -Wno-maybe-uninitialized -Wno-unused-parameter -D_FORTIFY_SOURCE=2 -O2") set(opencv_CFLAGS "-fstack-protector-all -Wno-maybe-uninitialized -Wno-unused-parameter -D_FORTIFY_SOURCE=2 -O2") set(opencv_LDFLAGS "-Wl,-z,relro,-z,now,-z,noexecstack") endif() -mindspore_add_pkg(opencv - VER 4.2.0 - LIBS opencv_core opencv_imgcodecs opencv_imgproc - URL https://github.com/opencv/opencv/archive/4.2.0.tar.gz - MD5 e8cb208ce2723481408b604b480183b6 - CMAKE_OPTION -DCMAKE_BUILD_TYPE=Release -DWITH_PROTOBUF=OFF -DWITH_WEBP=OFF -DWITH_IPP=OFF -DWITH_ADE=OFF - -DBUILD_ZLIB=ON - -DBUILD_JPEG=ON - -DBUILD_PNG=ON - -DBUILD_OPENEXR=ON - -DBUILD_TESTS=OFF - -DBUILD_PERF_TESTS=OFF - -DBUILD_opencv_apps=OFF - -DCMAKE_SKIP_RPATH=TRUE - -DBUILD_opencv_python3=OFF - -DWITH_FFMPEG=OFF - -DWITH_TIFF=ON - -DBUILD_TIFF=OFF - -DWITH_JASPER=OFF - -DBUILD_JASPER=OFF - -DTIFF_INCLUDE_DIR=${tiff_INC} - -DTIFF_LIBRARY=${tiff_LIB}) -include_directories(${opencv_INC}/opencv4) -add_library(mindspore::opencv_core ALIAS opencv::opencv_core) -add_library(mindspore::opencv_imgcodecs ALIAS opencv::opencv_imgcodecs) -add_library(mindspore::opencv_imgproc ALIAS opencv::opencv_imgproc) +if (WIN32) + mindspore_add_pkg(opencv + VER 4.2.0 + LIBS libopencv_core420.dll.a libopencv_imgcodecs420.dll.a libopencv_imgproc420.dll.a + LIB_PATH x64/mingw/lib + URL https://github.com/opencv/opencv/archive/4.2.0.tar.gz + MD5 e8cb208ce2723481408b604b480183b6 + CMAKE_OPTION -DCMAKE_BUILD_TYPE=Release -DWITH_PROTOBUF=OFF -DWITH_WEBP=OFF -DWITH_IPP=OFF -DWITH_ADE=OFF + -DBUILD_ZLIB=ON + -DBUILD_JPEG=ON + -DBUILD_PNG=ON + -DBUILD_OPENEXR=ON + -DBUILD_TESTS=OFF + -DBUILD_PERF_TESTS=OFF + -DBUILD_opencv_apps=OFF + -DCMAKE_SKIP_RPATH=TRUE + -DBUILD_opencv_python3=OFF + -DWITH_FFMPEG=OFF + -DWITH_TIFF=ON + -DBUILD_TIFF=OFF + -DWITH_JASPER=OFF + -DBUILD_JASPER=OFF + -DTIFF_INCLUDE_DIR=${tiff_INC} + -DTIFF_LIBRARY=${tiff_LIB}) +else() + mindspore_add_pkg(opencv + VER 4.2.0 + LIBS opencv_core opencv_imgcodecs opencv_imgproc + URL https://github.com/opencv/opencv/archive/4.2.0.tar.gz + MD5 e8cb208ce2723481408b604b480183b6 + CMAKE_OPTION -DCMAKE_BUILD_TYPE=Release -DWITH_PROTOBUF=OFF -DWITH_WEBP=OFF -DWITH_IPP=OFF -DWITH_ADE=OFF + -DBUILD_ZLIB=ON + -DBUILD_JPEG=ON + -DBUILD_PNG=ON + -DBUILD_OPENEXR=ON + -DBUILD_TESTS=OFF + -DBUILD_PERF_TESTS=OFF + -DBUILD_opencv_apps=OFF + -DCMAKE_SKIP_RPATH=TRUE + -DBUILD_opencv_python3=OFF + -DWITH_FFMPEG=OFF + -DWITH_TIFF=ON + -DBUILD_TIFF=OFF + -DWITH_JASPER=OFF + -DBUILD_JASPER=OFF + -DTIFF_INCLUDE_DIR=${tiff_INC} + -DTIFF_LIBRARY=${tiff_LIB}) +endif() + +if (WIN32) + include_directories(${opencv_INC}) + add_library(mindspore::opencv_core ALIAS opencv::libopencv_core420.dll.a) + add_library(mindspore::opencv_imgcodecs ALIAS opencv::libopencv_imgcodecs420.dll.a) + add_library(mindspore::opencv_imgproc ALIAS opencv::libopencv_imgproc420.dll.a) +else() + include_directories(${opencv_INC}/opencv4) + add_library(mindspore::opencv_core ALIAS opencv::opencv_core) + add_library(mindspore::opencv_imgcodecs ALIAS opencv::opencv_imgcodecs) + add_library(mindspore::opencv_imgproc ALIAS opencv::opencv_imgproc) +endif() diff --git a/cmake/external_libs/protobuf.cmake b/cmake/external_libs/protobuf.cmake index 987d5c6a8b..a574e789db 100644 --- a/cmake/external_libs/protobuf.cmake +++ b/cmake/external_libs/protobuf.cmake @@ -77,22 +77,36 @@ function(ms_protobuf_generate_py c_var h_var py_var) list(APPEND ${c_var} "${CMAKE_BINARY_DIR}/${rel_path}/${file_name}.pb.cc") list(APPEND ${h_var} "${CMAKE_BINARY_DIR}/${rel_path}/${file_name}.pb.h") list(APPEND ${py_var} "${CMAKE_BINARY_DIR}/${rel_path}/${file_name}_pb2.py") - - add_custom_command( - OUTPUT "${CMAKE_BINARY_DIR}/${rel_path}/${file_name}.pb.cc" - "${CMAKE_BINARY_DIR}/${rel_path}/${file_name}.pb.h" - "${CMAKE_BINARY_DIR}/${rel_path}/${file_name}_pb2.py" - WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} - COMMAND ${CMAKE_COMMAND} -E make_directory "${CMAKE_BINARY_DIR}/${rel_path}" - COMMAND protobuf::protoc -I${file_dir} --cpp_out=${CMAKE_BINARY_DIR}/${rel_path} ${abs_file} - COMMAND protobuf::protoc -I${file_dir} --python_out=${CMAKE_BINARY_DIR}/${rel_path} ${abs_file} - COMMAND protobuf::protoc -I${file_dir} --python_out=${CMAKE_BINARY_DIR}/${rel_path} ${abs_file} - COMMAND perl -pi -e "s/import (.+_pb2.*)/from . import \\1/" "${CMAKE_BINARY_DIR}/${rel_path}/${file_name}_pb2.py" - COMMAND cp "${CMAKE_BINARY_DIR}/${rel_path}/${file_name}_pb2.py" "${PROJECT_SOURCE_DIR}/mindspore/train/" - DEPENDS protobuf::protoc ${abs_file} - COMMENT "Running C++ protocol buffer compiler on ${file}" VERBATIM ) + if (WIN32) + add_custom_command( + OUTPUT "${CMAKE_BINARY_DIR}/${rel_path}/${file_name}.pb.cc" + "${CMAKE_BINARY_DIR}/${rel_path}/${file_name}.pb.h" + "${CMAKE_BINARY_DIR}/${rel_path}/${file_name}_pb2.py" + WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} + COMMAND ${CMAKE_COMMAND} -E make_directory "${CMAKE_BINARY_DIR}/${rel_path}" + COMMAND protobuf::protoc -I${file_dir} --cpp_out=${CMAKE_BINARY_DIR}/${rel_path} ${abs_file} + COMMAND protobuf::protoc -I${file_dir} --python_out=${CMAKE_BINARY_DIR}/${rel_path} ${abs_file} + COMMAND protobuf::protoc -I${file_dir} --python_out=${CMAKE_BINARY_DIR}/${rel_path} ${abs_file} + COMMAND perl -pi.bak -e "s/import (.+_pb2.*)/from . import \\1/" "${CMAKE_BINARY_DIR}/${rel_path}/${file_name}_pb2.py" + COMMAND ${CMAKE_COMMAND} -E copy "${CMAKE_BINARY_DIR}/${rel_path}/${file_name}_pb2.py" "${PROJECT_SOURCE_DIR}/mindspore/train/" + DEPENDS protobuf::protoc ${abs_file} + COMMENT "Running C++ protocol buffer compiler on ${file}" VERBATIM ) + else() + add_custom_command( + OUTPUT "${CMAKE_BINARY_DIR}/${rel_path}/${file_name}.pb.cc" + "${CMAKE_BINARY_DIR}/${rel_path}/${file_name}.pb.h" + "${CMAKE_BINARY_DIR}/${rel_path}/${file_name}_pb2.py" + WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} + COMMAND ${CMAKE_COMMAND} -E make_directory "${CMAKE_BINARY_DIR}/${rel_path}" + COMMAND protobuf::protoc -I${file_dir} --cpp_out=${CMAKE_BINARY_DIR}/${rel_path} ${abs_file} + COMMAND protobuf::protoc -I${file_dir} --python_out=${CMAKE_BINARY_DIR}/${rel_path} ${abs_file} + COMMAND protobuf::protoc -I${file_dir} --python_out=${CMAKE_BINARY_DIR}/${rel_path} ${abs_file} + COMMAND perl -pi -e "s/import (.+_pb2.*)/from . import \\1/" "${CMAKE_BINARY_DIR}/${rel_path}/${file_name}_pb2.py" + COMMAND cp "${CMAKE_BINARY_DIR}/${rel_path}/${file_name}_pb2.py" "${PROJECT_SOURCE_DIR}/mindspore/train/" + DEPENDS protobuf::protoc ${abs_file} + COMMENT "Running C++ protocol buffer compiler on ${file}" VERBATIM ) + endif() endforeach() - set_source_files_properties(${${c_var}} ${${h_var}} ${${py_var}} PROPERTIES GENERATED TRUE) set(${c_var} ${${c_var}} PARENT_SCOPE) set(${h_var} ${${h_var}} PARENT_SCOPE) diff --git a/cmake/external_libs/sqlite.cmake b/cmake/external_libs/sqlite.cmake index e3fe77d96b..69692f284d 100644 --- a/cmake/external_libs/sqlite.cmake +++ b/cmake/external_libs/sqlite.cmake @@ -1,19 +1,36 @@ +if (WIN32) + mindspore_add_pkg(sqlite-head + VER 3.31.1 + HEAD_ONLY ./ + URL https://sqlite.org/2020/sqlite-amalgamation-3310100.zip + MD5 2b7bfcdd97dc281903a9aee966213fe4) + include_directories(${sqlite-head_INC}) + mindspore_add_pkg(sqlite + VER 3.31.1 + LIBS sqlite3 + LIB_PATH ./ + HEAD_ONLY ./ + RELEASE ON + URL https://sqlite.org/2020/sqlite-dll-win64-x64-3310100.zip + MD5 662c9d2b05467d590ba5c0443e7fd6bd) -set(sqlite_USE_STATIC_LIBS ON) -set(sqlite_CXXFLAGS) -if (${CMAKE_SYSTEM_NAME} MATCHES "Darwin") - set(sqlite_CFLAGS "-fstack-protector-all -Wno-uninitialized -Wno-unused-parameter -fPIC -D_FORTIFY_SOURCE=2 -O2") -else() - set(sqlite_CFLAGS "-fstack-protector-all -Wno-maybe-uninitialized -Wno-unused-parameter -fPIC -D_FORTIFY_SOURCE=2 -O2") -endif() -set(sqlite_LDFLAGS "-Wl,-z,relro,-z,now,-z,noexecstack") - -mindspore_add_pkg(sqlite +else () + set(sqlite_USE_STATIC_LIBS ON) + set(sqlite_CXXFLAGS) + if (${CMAKE_SYSTEM_NAME} MATCHES "Darwin") + set(sqlite_CFLAGS "-fstack-protector-all -Wno-uninitialized -Wno-unused-parameter -fPIC -D_FORTIFY_SOURCE=2 -O2") + else() + set(sqlite_CFLAGS "-fstack-protector-all -Wno-maybe-uninitialized -Wno-unused-parameter -fPIC -D_FORTIFY_SOURCE=2 -O2") + endif() + set(sqlite_LDFLAGS "-Wl,-z,relro,-z,now,-z,noexecstack") + mindspore_add_pkg(sqlite VER 3.31.1 LIBS sqlite3 URL https://github.com/sqlite/sqlite/archive/version-3.31.1.tar.gz MD5 5f4e7b4016c15f4fb5855615279819da PATCHES ${CMAKE_SOURCE_DIR}/third_party/patch/sqlite/sqlite.patch001 CONFIGURE_COMMAND ./configure --enable-shared=no --disable-tcl --disable-editline --enable-json1) -include_directories(${sqlite_INC}) -add_library(mindspore::sqlite ALIAS sqlite::sqlite3) \ No newline at end of file + include_directories(${sqlite_INC}) +endif () + +add_library(mindspore::sqlite ALIAS sqlite::sqlite3) diff --git a/cmake/mind_expression.cmake b/cmake/mind_expression.cmake index af122d4117..7e5e07bdbb 100644 --- a/cmake/mind_expression.cmake +++ b/cmake/mind_expression.cmake @@ -1,6 +1,10 @@ set(SECURE_CXX_FLAGS "") if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU") - set(SECURE_CXX_FLAGS "-fstack-protector-all -Wl,-z,relro,-z,now,-z,noexecstack") + if (WIN32) + set(SECURE_CXX_FLAGS "-fstack-protector-all") + else() + set(SECURE_CXX_FLAGS "-fstack-protector-all -Wl,-z,relro,-z,now,-z,noexecstack") + endif() endif() set(_ms_tmp_CMAKE_CXX_FLAGS_F ${CMAKE_CXX_FLAGS}) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility=hidden") diff --git a/cmake/options.cmake b/cmake/options.cmake index 6ec577f312..2f44e9a17c 100644 --- a/cmake/options.cmake +++ b/cmake/options.cmake @@ -19,7 +19,11 @@ option(ENABLE_MPI "enable mpi" OFF) option(ENABLE_AKG "enable akg" OFF) if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") - set(OPTION_CXX_FLAGS "${OPTION_CXX_FLAGS} -fstack-protector-all -Wl,-z,relro,-z,now,-z,noexecstack") + if (WIN32) + set(OPTION_CXX_FLAGS "${OPTION_CXX_FLAGS} -fstack-protector-all") + else() + set(OPTION_CXX_FLAGS "${OPTION_CXX_FLAGS} -fstack-protector-all -Wl,-z,relro,-z,now,-z,noexecstack") + endif() endif() if (CMAKE_SYSTEM_NAME MATCHES "Darwin") @@ -106,4 +110,4 @@ endif() if(ENABLE_DUMP_E2E) add_compile_definitions(ENABLE_DUMP_E2E) -endif() \ No newline at end of file +endif() diff --git a/cmake/utils.cmake b/cmake/utils.cmake index c9b7d944f0..f0181de5ea 100644 --- a/cmake/utils.cmake +++ b/cmake/utils.cmake @@ -103,7 +103,7 @@ function(__download_pkg_with_git pkg_name pkg_url pkg_git_commit pkg_md5) endfunction() -function(__find_pkg_then_add_target pkg_name pkg_exe) +function(__find_pkg_then_add_target pkg_name pkg_exe lib_path) unset(${pkg_name}_LIBS) @@ -129,15 +129,24 @@ function(__find_pkg_then_add_target pkg_name pkg_exe) set(_LIB_TYPE STATIC) endif () set(${_LIB_NAME}_LIB ${_LIB_NAME}_LIB-NOTFOUND) - find_library(${_LIB_NAME}_LIB ${_LIB_SEARCH_NAME} PATHS ${${pkg_name}_BASE_DIR}/lib NO_DEFAULT_PATH) + find_library(${_LIB_NAME}_LIB ${_LIB_SEARCH_NAME} PATHS ${${pkg_name}_BASE_DIR}/${lib_path} NO_DEFAULT_PATH) + if(NOT ${_LIB_NAME}_LIB) return() endif() + add_library(${pkg_name}::${_LIB_NAME} ${_LIB_TYPE} IMPORTED GLOBAL) - set_target_properties(${pkg_name}::${_LIB_NAME} PROPERTIES - INTERFACE_INCLUDE_DIRECTORIES "${${pkg_name}_BASE_DIR}/include" - IMPORTED_LOCATION ${${_LIB_NAME}_LIB} - ) + if (WIN32 AND ${_LIB_TYPE} STREQUAL "SHARED") + set_target_properties(${pkg_name}::${_LIB_NAME} PROPERTIES IMPORTED_IMPLIB_RELEASE ${${_LIB_NAME}_LIB}) + else() + set_target_properties(${pkg_name}::${_LIB_NAME} PROPERTIES IMPORTED_LOCATION ${${_LIB_NAME}_LIB}) + endif() + + if (EXISTS ${${pkg_name}_BASE_DIR}/include) + set_target_properties(${pkg_name}::${_LIB_NAME} PROPERTIES + INTERFACE_INCLUDE_DIRECTORIES "${${pkg_name}_BASE_DIR}/include") + endif () + list(APPEND ${pkg_name}_LIBS ${pkg_name}::${_LIB_NAME}) message("found ${${_LIB_NAME}_LIB}") STRING( REGEX REPLACE "(.+)/(.+)" "\\1" LIBPATH ${${_LIB_NAME}_LIB}) @@ -192,12 +201,18 @@ set(MS_FIND_NO_DEFAULT_PATH ${MS_FIND_NO_DEFAULT_PATH} PARENT_SCOPE) function(mindspore_add_pkg pkg_name ) set(options ) - set(oneValueArgs URL MD5 GIT_REPOSITORY GIT_TAG VER EXE DIR HEAD_ONLY CMAKE_PATH) + set(oneValueArgs URL MD5 GIT_REPOSITORY GIT_TAG VER EXE DIR HEAD_ONLY CMAKE_PATH RELEASE LIB_PATH) set(multiValueArgs CMAKE_OPTION LIBS PRE_CONFIGURE_COMMAND CONFIGURE_COMMAND BUILD_OPTION INSTALL_INCS INSTALL_LIBS PATCHES) cmake_parse_arguments(PKG "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN} ) - if (NOT PKG_CMAKE_PATH) - set(PKG_CMAKE_PATH .) + + if (NOT PKG_LIB_PATH) + set(PKG_LIB_PATH lib) endif () + + if(NOT PKG_EXE) + set(PKG_EXE 0) + endif() + set(__FIND_PKG_NAME ${pkg_name}) string(TOLOWER ${pkg_name} pkg_name) message("pkg name:${__FIND_PKG_NAME},${pkg_name}") @@ -225,18 +240,17 @@ function(mindspore_add_pkg pkg_name ) set(${pkg_name}_INC ${${pkg_name}_BASE_DIR}/${PKG_HEAD_ONLY} PARENT_SCOPE) add_library(${pkg_name} INTERFACE) target_include_directories(${pkg_name} INTERFACE ${${pkg_name}_INC}) + if (${PKG_RELEASE}) + __find_pkg_then_add_target(${pkg_name} ${PKG_EXE} ${PKG_LIB_PATH} ${PKG_LIBS}) + endif () return() endif () - if(NOT PKG_EXE) - set(PKG_EXE 0) - endif() - set(${__FIND_PKG_NAME}_ROOT ${${pkg_name}_BASE_DIR}) set(${__FIND_PKG_NAME}_ROOT ${${pkg_name}_BASE_DIR} PARENT_SCOPE) if (PKG_LIBS) - __find_pkg_then_add_target(${pkg_name} ${PKG_EXE} ${PKG_LIBS}) + __find_pkg_then_add_target(${pkg_name} ${PKG_EXE} ${PKG_LIB_PATH} ${PKG_LIBS}) if(${pkg_name}_LIBS) set(${pkg_name}_INC ${${pkg_name}_BASE_DIR}/include PARENT_SCOPE) message("Found libs: ${${pkg_name}_LIBS}") @@ -283,8 +297,10 @@ function(mindspore_add_pkg pkg_name ) file(GLOB ${pkg_name}_SOURCE_SUBDIRS ${${pkg_name}_SOURCE_DIR}/*) file(COPY ${${pkg_name}_SOURCE_SUBDIRS} DESTINATION ${${pkg_name}_BASE_DIR}) set(${pkg_name}_INC ${${pkg_name}_BASE_DIR}/${PKG_HEAD_ONLY} PARENT_SCOPE) - add_library(${pkg_name} INTERFACE) - target_include_directories(${pkg_name} INTERFACE ${${pkg_name}_INC}) + if (NOT PKG_RELEASE) + add_library(${pkg_name} INTERFACE) + target_include_directories(${pkg_name} INTERFACE ${${pkg_name}_INC}) + endif () elseif (PKG_CMAKE_OPTION) # in cmake @@ -355,7 +371,7 @@ function(mindspore_add_pkg pkg_name ) endif() if (PKG_LIBS) - __find_pkg_then_add_target(${pkg_name} ${PKG_EXE} ${PKG_LIBS}) + __find_pkg_then_add_target(${pkg_name} ${PKG_EXE} ${PKG_LIB_PATH} ${PKG_LIBS}) set(${pkg_name}_INC ${${pkg_name}_BASE_DIR}/include PARENT_SCOPE) if(NOT ${pkg_name}_LIBS) message(FATAL_ERROR "Can not find pkg: ${pkg_name}") diff --git a/mindspore/ccsrc/CMakeLists.txt b/mindspore/ccsrc/CMakeLists.txt index d4103f30c0..500972b0d6 100644 --- a/mindspore/ccsrc/CMakeLists.txt +++ b/mindspore/ccsrc/CMakeLists.txt @@ -5,6 +5,10 @@ if(ENABLE_CPU) file(GLOB_RECURSE CPU_SRC_LIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} "device/cpu/*.cc" ) + if (CMAKE_SYSTEM_NAME MATCHES "Windows") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-attributes -DHAVE_SNPRINTF") + add_compile_definitions(BUILDING_DLL) + endif() endif() if(ENABLE_GPU) @@ -150,7 +154,15 @@ file(GLOB_RECURSE MINDSPORE_SRC_LIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} "kernel/kash/*.cc" "device/gpu/distribution/collective_init.cc" ) - +if (ENABLE_CPU) + list(REMOVE_ITEM MINDSPORE_SRC_LIST "device/gpu/distribution/collective_init.cc") + if (WIN32) + list(REMOVE_ITEM MINDSPORE_SRC_LIST "kernel/kernel_query.cc") + endif() +endif() +if (NOT ENABLE_GPU) + list(APPEND MINDSPORE_SRC_LIST "device/gpu/distribution/collective_fake_init.cc") +endif() file(GLOB_RECURSE MEM_REUSE_SRC_LIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} "pre_activate/mem_reuse/*.cc" ) @@ -248,6 +260,7 @@ file(GLOB_RECURSE MS_GVAR_SRC_LIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} add_library(mindspore_gvar SHARED ${MS_GVAR_SRC_LIST}) add_library(mindspore STATIC ${MINDSPORE_SRC_LIST}) +add_dependencies(mindspore GENERATED_OUTPUT_DIR) if(ENABLE_D) list(APPEND MINDSPORE_PROTO_LIST ${MINDSPORE_PROTO_AICPU_LIST}) @@ -315,19 +328,33 @@ if(ENABLE_D) endif() target_link_libraries(mindspore securec) -target_link_libraries(mindspore dl) +if (NOT WIN32) + target_link_libraries(mindspore dl) +endif() target_link_libraries(mindspore mindspore::flatbuffers) # link protobuf if (ENABLE_D) target_link_libraries(mindspore mindspore::protobuf) endif() +if (WIN32) + target_link_libraries(mindspore ${PYTHON_LIBRARIES} mindspore_gvar) +endif() + # set c_expression building +if (WIN32) +set(PYTHON_MODULE_SOURCE ${MS_GVAR_SRC_LIST} + pipeline/init.cc + kernel/oplib/oplib.cc + ${MINDSPORE_SRC_LIST} ${MS_STEPS_SRC_LIST} ${MS_CCE_SRC_LIST} ${MS_AICPU_SRC_LIST} ${MS_TASKINFO_LIST} ${MS_RT_SRC_LIST} + ${GPU_NCCL_LIST} ${MS_HCCL_SRC_LIST} ${MS_PREDICT_SRC_LIST} ${CPU_SRC_LIST} ${MEM_REUSE_SRC_LIST} ${GPU_KERNEL_SRC_LIST}) +else() set(PYTHON_MODULE_SOURCE pipeline/init.cc kernel/oplib/oplib.cc - ${MS_STEPS_SRC_LIST} ${MS_CCE_SRC_LIST} ${MS_AICPU_SRC_LIST} ${MS_TASKINFO_LIST} ${MS_RT_SRC_LIST} - ${GPU_NCCL_LIST} ${MS_HCCL_SRC_LIST} ${MS_PREDICT_SRC_LIST} ${CPU_SRC_LIST} ${MEM_REUSE_SRC_LIST} ${GPU_KERNEL_SRC_LIST}) + ${MS_STEPS_SRC_LIST} ${MS_CCE_SRC_LIST} ${MS_AICPU_SRC_LIST} ${MS_TASKINFO_LIST} ${MS_RT_SRC_LIST} + ${GPU_NCCL_LIST} ${MS_HCCL_SRC_LIST} ${MS_PREDICT_SRC_LIST} ${CPU_SRC_LIST} ${MEM_REUSE_SRC_LIST} ${GPU_KERNEL_SRC_LIST}) +endif() set(CMAKE_BUILD_WITH_INSTALL_RPATH TRUE) pybind11_add_module(_c_expression ${PYTHON_MODULE_SOURCE}) @@ -339,6 +366,8 @@ if (CMAKE_SYSTEM_NAME MATCHES "Linux") elseif (CMAKE_SYSTEM_NAME MATCHES "Darwin") set_target_properties(_c_expression PROPERTIES MACOSX_RPATH ON) set(ORIGIN_PATH @loader_path) +elseif (CMAKE_SYSTEM_NAME MATCHES "Windows") + set(ORIGIN_PATH $ORIGIN) else () MESSAGE(FATAL_ERROR "other platform: ${CMAKE_SYSTEM_NAME}") endif () @@ -346,11 +375,22 @@ endif () set(ORIGIN_PATH ${ORIGIN_PATH}/lib) set_target_properties(_c_expression PROPERTIES INSTALL_RPATH ${ORIGIN_PATH}) -target_link_libraries(_c_expression PRIVATE - mindspore::pybind11_module - mindspore - mindspore_gvar - ) +if (WIN32) + target_link_libraries(_c_expression PRIVATE + mindspore::pybind11_module + securec + proto_input + mindspore::flatbuffers + mindspore::glog + ) +else() + target_link_libraries(_c_expression PRIVATE + mindspore::pybind11_module + mindspore + mindspore_gvar + mindspore::glog + ) +endif() if(ENABLE_GPU) execute_process(COMMAND bash ${CMAKE_SOURCE_DIR}/third_party/apply_patches.sh @@ -493,7 +533,3 @@ if(ENABLE_MINDDATA) add_subdirectory(mindrecord) add_subdirectory(dataset) endif() - -if (USE_GLOG) - target_link_libraries(_c_expression PRIVATE mindspore::glog) -endif() \ No newline at end of file diff --git a/mindspore/ccsrc/dataset/CMakeLists.txt b/mindspore/ccsrc/dataset/CMakeLists.txt index 52ba328828..b3ac34de70 100644 --- a/mindspore/ccsrc/dataset/CMakeLists.txt +++ b/mindspore/ccsrc/dataset/CMakeLists.txt @@ -80,14 +80,19 @@ set_target_properties(_c_dataengine PROPERTIES ###################################################################### ################# Link with external libraries ######################## -target_link_libraries(_c_dataengine PRIVATE mindspore mindspore_gvar) -target_link_libraries(_c_dataengine PRIVATE mindspore::pybind11_module -ldl mindspore::protobuf ${SECUREC_LIBRARY}) +if (WIN32) + target_link_libraries(_c_dataengine PRIVATE mindspore) + target_link_libraries(_c_dataengine PRIVATE mindspore::pybind11_module ${PYTHON_LIBRARIES} mindspore::protobuf ${SECUREC_LIBRARY}) +else() + target_link_libraries(_c_dataengine PRIVATE mindspore mindspore_gvar) + target_link_libraries(_c_dataengine PRIVATE mindspore::pybind11_module -ldl mindspore::protobuf ${SECUREC_LIBRARY}) +endif() target_link_libraries(_c_dataengine PUBLIC mindspore::jpeg_turbo mindspore::opencv_core mindspore::opencv_imgcodecs mindspore::opencv_imgproc) if (ENABLE_GPUQUE) target_link_libraries(_c_dataengine PRIVATE gpu_queue ${CUDNN_PATH}/lib64/libcudnn.so - ${CUDA_PATH}/lib64/libcudart.so + ${CUDA_PATH}/lib64/libcudart.so ${CUDA_PATH}/lib64/stubs/libcuda.so) endif () @@ -96,7 +101,12 @@ if (ENABLE_TDTQUE) endif () add_dependencies(_c_dataengine _c_mindrecord) -target_link_libraries(_c_dataengine PRIVATE _c_mindrecord) +if (WIN32) + set(MINDRECORD_LINK_OBJECT ${CMAKE_BINARY_DIR}/mindspore/ccsrc/mindrecord/CMakeFiles/_c_mindrecord.dir/objects.a) + target_link_libraries(_c_dataengine PRIVATE _c_mindrecord ${MINDRECORD_LINK_OBJECT} mindspore::sqlite) +else() + target_link_libraries(_c_dataengine PRIVATE _c_mindrecord) +endif() if (USE_GLOG) target_link_libraries(_c_dataengine PRIVATE mindspore::glog) diff --git a/mindspore/ccsrc/dataset/api/python_bindings.cc b/mindspore/ccsrc/dataset/api/python_bindings.cc index 3d543f946b..e2675ee217 100644 --- a/mindspore/ccsrc/dataset/api/python_bindings.cc +++ b/mindspore/ccsrc/dataset/api/python_bindings.cc @@ -19,7 +19,9 @@ #include "dataset/kernels/no_op.h" #include "dataset/kernels/data/one_hot_op.h" #include "dataset/kernels/image/center_crop_op.h" +#if !defined(_WIN32) && !defined(_WIN64) #include "dataset/kernels/image/change_mode_op.h" +#endif #include "dataset/kernels/image/cut_out_op.h" #include "dataset/kernels/image/decode_op.h" #include "dataset/kernels/image/distort_bounding_box_crop_op.h" @@ -279,9 +281,11 @@ void bindTensorOps2(py::module *m) { py::arg("fillG") = RandomCropOp::kDefFillG, py::arg("fillB") = RandomCropOp::kDefFillB); (void)py::class_>(*m, "ChannelSwapOp").def(py::init<>()); +#if !defined(_WIN32) && !defined(_WIN64) (void)py::class_>( *m, "ChangeModeOp", "Tensor operation to change colors from BGR to RGB") .def(py::init<>()); +#endif (void)py::class_>( *m, "OneHotOp", "Tensor operation to apply one hot encoding. Takes number of classes.") diff --git a/mindspore/ccsrc/dataset/core/tensor.h b/mindspore/ccsrc/dataset/core/tensor.h index 2017c2dfab..3409354d19 100644 --- a/mindspore/ccsrc/dataset/core/tensor.h +++ b/mindspore/ccsrc/dataset/core/tensor.h @@ -22,6 +22,10 @@ #include #include "./securec.h" #include "utils/log_adapter.h" +#if defined(_WIN32) || defined(_WIN64) +#undef HAVE_STDDEF_H +#undef HAVE_STDLIB_H +#endif #include "pybind11/numpy.h" #include "pybind11/pybind11.h" #include "pybind11/stl.h" diff --git a/mindspore/ccsrc/dataset/engine/datasetops/shuffle_op.cc b/mindspore/ccsrc/dataset/engine/datasetops/shuffle_op.cc index 2afafe2128..bdf39b6a39 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/shuffle_op.cc +++ b/mindspore/ccsrc/dataset/engine/datasetops/shuffle_op.cc @@ -85,7 +85,11 @@ Status ShuffleOp::SelfReset() { if (!reshuffle_each_epoch_) { rng_ = std::mt19937_64(shuffle_seed_); } else { +#if defined(_WIN32) || defined(_WIN64) + std::random_device random_device; +#else std::random_device random_device("/dev/urandom"); +#endif std::uniform_int_distribution distribution(0, std::numeric_limits::max()); shuffle_seed_ = distribution(random_device); rng_ = std::mt19937_64(shuffle_seed_); diff --git a/mindspore/ccsrc/dataset/kernels/image/CMakeLists.txt b/mindspore/ccsrc/dataset/kernels/image/CMakeLists.txt index 7a6240d173..23a26d5214 100644 --- a/mindspore/ccsrc/dataset/kernels/image/CMakeLists.txt +++ b/mindspore/ccsrc/dataset/kernels/image/CMakeLists.txt @@ -1,22 +1,46 @@ -add_library(kernels-image OBJECT - center_crop_op.cc - change_mode_op.cc - cut_out_op.cc - decode_op.cc - distort_bounding_box_crop_op.cc - hwc_to_chw_op.cc - image_utils.cc - normalize_op.cc - pad_op.cc - random_color_adjust_op.cc - random_crop_decode_resize_op.cc - random_crop_and_resize_op.cc - random_crop_op.cc - random_horizontal_flip_op.cc - random_resize_op.cc - random_rotation_op.cc - random_vertical_flip_op.cc - rescale_op.cc - resize_bilinear_op.cc - resize_op.cc - ) +if (WIN32) + add_library(kernels-image OBJECT + center_crop_op.cc + cut_out_op.cc + decode_op.cc + distort_bounding_box_crop_op.cc + hwc_to_chw_op.cc + image_utils.cc + normalize_op.cc + pad_op.cc + random_color_adjust_op.cc + random_crop_decode_resize_op.cc + random_crop_and_resize_op.cc + random_crop_op.cc + random_horizontal_flip_op.cc + random_resize_op.cc + random_rotation_op.cc + random_vertical_flip_op.cc + rescale_op.cc + resize_bilinear_op.cc + resize_op.cc + ) +else() + add_library(kernels-image OBJECT + center_crop_op.cc + change_mode_op.cc + cut_out_op.cc + decode_op.cc + distort_bounding_box_crop_op.cc + hwc_to_chw_op.cc + image_utils.cc + normalize_op.cc + pad_op.cc + random_color_adjust_op.cc + random_crop_decode_resize_op.cc + random_crop_and_resize_op.cc + random_crop_op.cc + random_horizontal_flip_op.cc + random_resize_op.cc + random_rotation_op.cc + random_vertical_flip_op.cc + rescale_op.cc + resize_bilinear_op.cc + resize_op.cc + ) +endif() diff --git a/mindspore/ccsrc/dataset/kernels/image/image_utils.cc b/mindspore/ccsrc/dataset/kernels/image/image_utils.cc index 8735cf7a05..63c9bb2641 100644 --- a/mindspore/ccsrc/dataset/kernels/image/image_utils.cc +++ b/mindspore/ccsrc/dataset/kernels/image/image_utils.cc @@ -186,7 +186,11 @@ void JpegSetSource(j_decompress_ptr cinfo, const void *data, int64_t datasize) { (*cinfo->mem->alloc_small)(reinterpret_cast(cinfo), JPOOL_PERMANENT, sizeof(struct jpeg_source_mgr))); cinfo->src->init_source = JpegInitSource; cinfo->src->fill_input_buffer = JpegFillInputBuffer; +#if defined(_WIN32) || defined(_WIN64) + cinfo->src->skip_input_data = reinterpret_cast(JpegSkipInputData); +#else cinfo->src->skip_input_data = JpegSkipInputData; +#endif cinfo->src->resync_to_restart = jpeg_resync_to_restart; cinfo->src->term_source = JpegTermSource; cinfo->src->bytes_in_buffer = datasize; diff --git a/mindspore/ccsrc/dataset/kernels/image/image_utils.h b/mindspore/ccsrc/dataset/kernels/image/image_utils.h index a4ddef40d6..51090fb9ea 100644 --- a/mindspore/ccsrc/dataset/kernels/image/image_utils.h +++ b/mindspore/ccsrc/dataset/kernels/image/image_utils.h @@ -22,6 +22,10 @@ #include #include #include +#if defined(_WIN32) || defined(_WIN64) +#undef HAVE_STDDEF_H +#undef HAVE_STDLIB_H +#endif #include "./jpeglib.h" #include "./jerror.h" #include diff --git a/mindspore/ccsrc/dataset/util/path.cc b/mindspore/ccsrc/dataset/util/path.cc index dd72f80766..24c8db5b9c 100644 --- a/mindspore/ccsrc/dataset/util/path.cc +++ b/mindspore/ccsrc/dataset/util/path.cc @@ -27,7 +27,7 @@ namespace mindspore { namespace dataset { #ifdef _WIN32 -char Path::_separator = '\\'; +char Path::separator_ = '\\'; #else char Path::separator_ = '/'; #endif @@ -129,7 +129,11 @@ bool Path::IsDirectory() { Status Path::CreateDirectory() { if (!Exists()) { +#if defined(_WIN32) || defined(_WIN64) + int rc = mkdir(common::SafeCStr(path_)); +#else int rc = mkdir(common::SafeCStr(path_), 0700); +#endif if (rc) { std::ostringstream oss; oss << "Unable to create directory " << path_ << ". Errno = " << errno; diff --git a/mindspore/ccsrc/dataset/util/random.cc b/mindspore/ccsrc/dataset/util/random.cc index e4bab6094c..2a0762c920 100644 --- a/mindspore/ccsrc/dataset/util/random.cc +++ b/mindspore/ccsrc/dataset/util/random.cc @@ -32,7 +32,11 @@ namespace dataset { uint32_t GetSeed() { uint32_t seed = GlobalContext::config_manager()->seed(); if (seed == std::mt19937::default_seed) { +#if defined(_WIN32) || defined(_WIN64) + std::random_device random_device; +#else std::random_device random_device("/dev/urandom"); +#endif std::uniform_int_distribution distribution(0, std::numeric_limits::max()); seed = distribution(random_device); } diff --git a/mindspore/ccsrc/dataset/util/services.cc b/mindspore/ccsrc/dataset/util/services.cc index 7dcb5b14c9..ea7b11014c 100644 --- a/mindspore/ccsrc/dataset/util/services.cc +++ b/mindspore/ccsrc/dataset/util/services.cc @@ -16,7 +16,9 @@ #include "dataset/util/services.h" #include +#if !defined(_WIN32) && !defined(_WIN64) #include +#endif #include #include #include "dataset/util/circular_pool.h" @@ -28,6 +30,7 @@ namespace dataset { std::unique_ptr Services::instance_ = nullptr; std::once_flag Services::init_instance_flag_; +#if !defined(_WIN32) && !defined(_WIN64) std::string Services::GetUserName() { char user[LOGIN_NAME_MAX]; (void)getlogin_r(user, sizeof(user)); @@ -41,10 +44,15 @@ std::string Services::GetHostName() { } int Services::GetLWP() { return syscall(SYS_gettid); } +#endif std::string Services::GetUniqueID() { const std::string kStr = "abcdefghijklmnopqrstuvwxyz0123456789"; +#if defined(_WIN32) || defined(_WIN64) + std::mt19937 gen{std::random_device{}()}; +#else std::mt19937 gen{std::random_device{"/dev/urandom"}()}; +#endif std::uniform_int_distribution<> dist(0, kStr.size() - 1); char buffer[UNIQUEID_LEN]; for (int i = 0; i < UNIQUEID_LEN; i++) { diff --git a/mindspore/ccsrc/dataset/util/services.h b/mindspore/ccsrc/dataset/util/services.h index 65a302af91..5e81c4816e 100644 --- a/mindspore/ccsrc/dataset/util/services.h +++ b/mindspore/ccsrc/dataset/util/services.h @@ -62,11 +62,13 @@ class Services { std::shared_ptr GetServiceMemPool() { return pool_; } +#if !defined(_WIN32) && !defined(_WIN64) static std::string GetUserName(); static std::string GetHostName(); static int GetLWP(); +#endif static std::string GetUniqueID(); diff --git a/mindspore/ccsrc/dataset/util/sig_handler.cc b/mindspore/ccsrc/dataset/util/sig_handler.cc index 1b6a3701c3..644a633066 100644 --- a/mindspore/ccsrc/dataset/util/sig_handler.cc +++ b/mindspore/ccsrc/dataset/util/sig_handler.cc @@ -16,13 +16,16 @@ #include "dataset/util/sig_handler.h" #include #include +#if !defined(_WIN32) && !defined(_WIN64) #include +#endif #include #include "dataset/util/task_manager.h" namespace mindspore { namespace dataset { // Register the custom signal handlers +#if !defined(_WIN32) && !defined(_WIN64) void RegisterHandlers() { struct sigaction new_int_action; @@ -40,5 +43,6 @@ extern void IntHandler(int sig_num, // The signal that was raised // Wake up the watchdog which is designed as async-signal-safe. TaskManager::WakeUpWatchDog(); } +#endif } // namespace dataset } // namespace mindspore diff --git a/mindspore/ccsrc/dataset/util/sig_handler.h b/mindspore/ccsrc/dataset/util/sig_handler.h index 6c5e1f015c..af40738feb 100644 --- a/mindspore/ccsrc/dataset/util/sig_handler.h +++ b/mindspore/ccsrc/dataset/util/sig_handler.h @@ -22,12 +22,14 @@ namespace mindspore { namespace dataset { // Register the custom signal handlers +#if !defined(_WIN32) && !defined(_WIN64) extern void RegisterHandlers(); // A signal handler for SIGINT. Drives interrupt to watchdog extern void IntHandler(int sig_num, // The signal that was raised siginfo_t *sig_info, // The siginfo structure. void *context); // context info +#endif } // namespace dataset } // namespace mindspore diff --git a/mindspore/ccsrc/debug/anf_ir_dump.cc b/mindspore/ccsrc/debug/anf_ir_dump.cc index fcf0777d16..e977084ab8 100644 --- a/mindspore/ccsrc/debug/anf_ir_dump.cc +++ b/mindspore/ccsrc/debug/anf_ir_dump.cc @@ -14,6 +14,9 @@ * limitations under the License. */ #include "debug/anf_ir_dump.h" +#if defined(_WIN32) || defined(_WIN64) +#include +#endif #include #include #include @@ -434,9 +437,15 @@ void DumpIR(const std::string &filename, const FuncGraphPtr &graph, bool dump_fu return; } char real_path[PATH_MAX] = {0}; +#if defined(_WIN32) || defined(_WIN64) + if (_fullpath(real_path, filename.c_str(), PATH_MAX) == nullptr) { + MS_LOG(DEBUG) << "dir " << filename << " does not exit."; + } +#else if (nullptr == realpath(filename.c_str(), real_path)) { MS_LOG(DEBUG) << "Dir " << filename << " does not exit."; } +#endif OrderedMap para_map; std::string path_string = real_path; diff --git a/mindspore/ccsrc/debug/anf_ir_utils.cc b/mindspore/ccsrc/debug/anf_ir_utils.cc index c25ad862df..8e626d6f9a 100644 --- a/mindspore/ccsrc/debug/anf_ir_utils.cc +++ b/mindspore/ccsrc/debug/anf_ir_utils.cc @@ -48,9 +48,15 @@ std::string GetMsIrPath(void) { if (path_ptr != nullptr) { path = path_ptr; char real_path[PATH_MAX] = {0}; +#if defined(_WIN32) || defined(_WIN64) + if (path.size() > PATH_MAX || _fullpath(real_path, path.c_str(), PATH_MAX) == nullptr) { + MS_LOG(EXCEPTION) << "MS IR Path error, " << path_ptr; + } +#else if (path.size() > PATH_MAX || nullptr == realpath(path.c_str(), real_path)) { MS_LOG(EXCEPTION) << "MS IR path error, " << path_ptr; } +#endif path = real_path; } return path; @@ -2247,8 +2253,14 @@ void DumpIRProto(const FuncGraphPtr& func_graph, const std::string& suffix) { return; } char real_path[PATH_MAX] = {0}; - if (nullptr == realpath(file_path.c_str(), real_path)) { - MS_LOG(DEBUG) << "Dir " << file_path << " does not exit."; + char* real_path_ret = nullptr; +#if defined(_WIN32) || defined(_WIN64) + real_path_ret = _fullpath(real_path, file_path.c_str(), PATH_MAX); +#else + real_path_ret = realpath(file_path.c_str(), real_path); +#endif + if (nullptr == real_path_ret) { + MS_LOG(DEBUG) << "dir " << file_path << " does not exit."; } else { std::string path_string = real_path; if (chmod(common::SafeCStr(path_string), S_IRUSR | S_IWUSR) == -1) { diff --git a/mindspore/ccsrc/debug/info.cc b/mindspore/ccsrc/debug/info.cc index 5c1fc372c5..3c43bfa9b1 100644 --- a/mindspore/ccsrc/debug/info.cc +++ b/mindspore/ccsrc/debug/info.cc @@ -53,10 +53,15 @@ std::string Location::ToString(SourceLineTip tip) { } char path[PATH_MAX + 1] = {0x00}; +#if defined(_WIN32) || defined(_WIN64) + if (file_name_.size() > PATH_MAX || _fullpath(path, file_name_.c_str(), PATH_MAX) == nullptr) { + return debug_info_ss.str(); + } +#else if (file_name_.size() > PATH_MAX || realpath(file_name_.c_str(), path) == nullptr) { return debug_info_ss.str(); } - +#endif auto src_path = std::string(path); std::ifstream file(src_path); if (!file.is_open()) { diff --git a/mindspore/ccsrc/device/cpu/cpu_resource_manager.cc b/mindspore/ccsrc/device/cpu/cpu_resource_manager.cc index 97df7d4487..45b9ea5bed 100644 --- a/mindspore/ccsrc/device/cpu/cpu_resource_manager.cc +++ b/mindspore/ccsrc/device/cpu/cpu_resource_manager.cc @@ -60,6 +60,7 @@ void CPUResourceManager::MemMalloc(const session::KernelGraph *graph) { void *CPUResourceManager::MemMalloc(size_t mem_size) { void *ptr = malloc(mem_size); if (ptr != nullptr) { + memset_s(ptr, mem_size, 0, mem_size); dynamic_mem_[ptr] = mem_size; return ptr; } else { diff --git a/mindspore/ccsrc/device/gpu/distribution/collective_fake_init.cc b/mindspore/ccsrc/device/gpu/distribution/collective_fake_init.cc new file mode 100644 index 0000000000..06497a2e82 --- /dev/null +++ b/mindspore/ccsrc/device/gpu/distribution/collective_fake_init.cc @@ -0,0 +1,28 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "device/gpu/distribution/collective_fake_init.h" +#include "utils/log_adapter.h" + +namespace mindspore { +namespace device { +namespace gpu { +void CollectiveFakeInitializer::InitCollective() { MS_LOG(EXCEPTION) << "build without enable gpu!"; } + +void CollectiveFakeInitializer::FinalizeCollective() { MS_LOG(EXCEPTION) << "build without enable gpu!"; } +} // namespace gpu +} // namespace device +} // namespace mindspore diff --git a/mindspore/ccsrc/device/gpu/distribution/collective_fake_init.h b/mindspore/ccsrc/device/gpu/distribution/collective_fake_init.h new file mode 100644 index 0000000000..65467139c0 --- /dev/null +++ b/mindspore/ccsrc/device/gpu/distribution/collective_fake_init.h @@ -0,0 +1,37 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_DEVICE_GPU_DISTRIBUTION_COLLECTIVE_FAKE_INIT_H_ +#define MINDSPORE_CCSRC_DEVICE_GPU_DISTRIBUTION_COLLECTIVE_FAKE_INIT_H_ + +namespace mindspore { +namespace device { +namespace gpu { + +class CollectiveFakeInitializer { + public: + CollectiveFakeInitializer() = default; + ~CollectiveFakeInitializer() = default; + CollectiveFakeInitializer(CollectiveFakeInitializer const &) = delete; + CollectiveFakeInitializer &operator=(const CollectiveFakeInitializer &) = delete; + static void InitCollective(); + static void FinalizeCollective(); +}; +} // namespace gpu +} // namespace device +} // namespace mindspore + +#endif // MINDSPORE_CCSRC_DEVICE_GPU_DISTRIBUTION_COLLECTIVE_FAKE_INIT_H_ diff --git a/mindspore/ccsrc/device/kernel_runtime.cc b/mindspore/ccsrc/device/kernel_runtime.cc index 3b0d84eada..7f3d31d8d0 100644 --- a/mindspore/ccsrc/device/kernel_runtime.cc +++ b/mindspore/ccsrc/device/kernel_runtime.cc @@ -44,19 +44,29 @@ bool KernelRuntime::Run(session::KernelGraph *graph) { bool ret = false; auto context_ptr = MsContext::GetInstance(); MS_EXCEPTION_IF_NULL(context_ptr); +#if defined(_WIN32) || defined(_WIN64) + auto start_time = std::chrono::steady_clock::now(); +#else struct timeval start_time, end_time; (void)gettimeofday(&start_time, nullptr); +#endif bool is_task_sink = context_ptr->enable_task_sink(); if (is_task_sink) { ret = RunTask(graph); } else { ret = LaunchKernel(graph); } +#if defined(_WIN32) || defined(_WIN64) + auto end_time = std::chrono::steady_clock::now(); + std::chrono::duration> cost = end_time - start_time; + MS_LOG(INFO) << "Call MS Run Success in " << cost.count() << " us"; +#else (void)gettimeofday(&end_time, nullptr); const uint64_t kUSecondInSecond = 1000000; uint64_t cost = kUSecondInSecond * static_cast(end_time.tv_sec - start_time.tv_sec); cost += static_cast(end_time.tv_usec - start_time.tv_usec); MS_LOG(INFO) << "Call MS Run Success in " << cost << " us"; +#endif return ret; } @@ -561,8 +571,12 @@ bool KernelRuntime::LaunchKernelMod(const session::KernelGraph &graph) { AddressPtrList kernel_workspaces; AddressPtrList kernel_outputs; GenLaunchArgs(*kernel_mod, kernel, &kernel_inputs, &kernel_workspaces, &kernel_outputs); +#if defined(_WIN32) || defined(_WIN64) + auto start_time = std::chrono::steady_clock::now(); +#else struct timeval start_time, end_time; (void)gettimeofday(&start_time, nullptr); +#endif auto ret = kernel_mod->Launch(kernel_inputs, kernel_workspaces, kernel_outputs, reinterpret_cast(stream_)); if (!ret) { @@ -572,11 +586,17 @@ bool KernelRuntime::LaunchKernelMod(const session::KernelGraph &graph) { if (AnfAlgo::GetKernelType(kernel) == TBE_KERNEL && !SyncStream()) { MS_LOG(EXCEPTION) << "SyncStream failed."; } +#if defined(_WIN32) || defined(_WIN64) + auto end_time = std::chrono::steady_clock::now(); + std::chrono::duration> cost = end_time - start_time; + MS_LOG(DEBUG) << "d " << kernel->fullname_with_scope() << " in " << cost.count() << " us"; +#else (void)gettimeofday(&end_time, nullptr); const uint64_t kUSecondInSecond = 1000000; uint64_t cost = kUSecondInSecond * static_cast(end_time.tv_sec - start_time.tv_sec); cost += static_cast(end_time.tv_usec - start_time.tv_usec); MS_LOG(DEBUG) << "d " << kernel->fullname_with_scope() << " in " << cost << " us"; +#endif } } return true; diff --git a/mindspore/ccsrc/kernel/common_utils.cc b/mindspore/ccsrc/kernel/common_utils.cc index 065cfaddc8..5abaff412e 100644 --- a/mindspore/ccsrc/kernel/common_utils.cc +++ b/mindspore/ccsrc/kernel/common_utils.cc @@ -117,7 +117,11 @@ bool IsAtomicNode(const CNodePtr &kernel_node) { bool KernelMeta::ReadIndex(const std::string &bin_dir) { DIR *dir = opendir(bin_dir.c_str()); if (dir == nullptr) { +#if defined(_WIN32) || defined(_WIN64) + auto ret = mkdir(bin_dir.c_str()); +#else auto ret = mkdir(bin_dir.c_str(), S_IRWXG | S_IRWXU); +#endif if (ret != 0) { MS_LOG(INFO) << "kernel dir not exist[" << bin_dir << "]."; return false; @@ -500,10 +504,17 @@ void SaveJsonInfo(const std::string &json_name, const std::string &info) { } filewrite << info << std::endl; filewrite.close(); +#if defined(_WIN32) || defined(_WIN64) + if (nullptr == _fullpath(real_path, path.c_str(), PATH_MAX)) { + MS_LOG(DEBUG) << "dir " << path << " does not exit."; + return; + } +#else if (nullptr == realpath(path.c_str(), real_path)) { MS_LOG(DEBUG) << "dir " << path << " does not exit."; return; } +#endif MS_LOG(INFO) << "real path is :" << real_path; if (chmod(real_path, S_IRUSR) == -1) { MS_LOG(DEBUG) << "modify file:" << real_path << " to read only fail."; diff --git a/mindspore/ccsrc/mindrecord/CMakeLists.txt b/mindspore/ccsrc/mindrecord/CMakeLists.txt index 4b8448287d..a2b8897b16 100644 --- a/mindspore/ccsrc/mindrecord/CMakeLists.txt +++ b/mindspore/ccsrc/mindrecord/CMakeLists.txt @@ -26,7 +26,11 @@ set_target_properties(_c_mindrecord PROPERTIES ) # add link library -target_link_libraries(_c_mindrecord PRIVATE mindspore::sqlite ${PYTHON_LIB} ${SECUREC_LIBRARY} mindspore mindspore_gvar mindspore::protobuf) +if (WIN32) + target_link_libraries(_c_mindrecord PRIVATE mindspore::sqlite mindspore mindspore::protobuf) +else() + target_link_libraries(_c_mindrecord PRIVATE mindspore::sqlite ${PYTHON_LIB} ${SECUREC_LIBRARY} mindspore mindspore_gvar mindspore::protobuf) +endif() if (USE_GLOG) target_link_libraries(_c_mindrecord PRIVATE mindspore::glog) diff --git a/mindspore/ccsrc/mindrecord/common/shard_utils.cc b/mindspore/ccsrc/mindrecord/common/shard_utils.cc index ca4bb8a261..51de0c5f64 100644 --- a/mindspore/ccsrc/mindrecord/common/shard_utils.cc +++ b/mindspore/ccsrc/mindrecord/common/shard_utils.cc @@ -65,6 +65,15 @@ std::pair GetFileName(const std::string &path) { return {FAILED, ""}; } char tmp[PATH_MAX] = {0}; +#if defined(_WIN32) || defined(_WIN64) + if (_fullpath(tmp, dirname(&(buf[0])), PATH_MAX) == nullptr) { + MS_LOG(ERROR) << "Invalid file path, path: " << buf; + return {FAILED, ""}; + } + if (_fullpath(real_path, common::SafeCStr(path), PATH_MAX) == nullptr) { + MS_LOG(DEBUG) << "Path: " << common::SafeCStr(path) << "check successfully"; + } +#else if (realpath(dirname(&(buf[0])), tmp) == nullptr) { MS_LOG(ERROR) << "Invalid file path, path: " << buf; return {FAILED, ""}; @@ -72,6 +81,7 @@ std::pair GetFileName(const std::string &path) { if (realpath(common::SafeCStr(path), real_path) == nullptr) { MS_LOG(DEBUG) << "Path: " << path << "check successfully"; } +#endif std::string s = real_path; char sep = '/'; size_t i = s.rfind(sep, s.length()); @@ -91,6 +101,15 @@ std::pair GetParentDir(const std::string &path) { return {FAILED, ""}; } char tmp[PATH_MAX] = {0}; +#if defined(_WIN32) || defined(_WIN64) + if (_fullpath(tmp, dirname(&(buf[0])), PATH_MAX) == nullptr) { + MS_LOG(ERROR) << "Invalid file path, path: " << buf; + return {FAILED, ""}; + } + if (_fullpath(real_path, common::SafeCStr(path), PATH_MAX) == nullptr) { + MS_LOG(DEBUG) << "Path: " << common::SafeCStr(path) << "check successfully"; + } +#else if (realpath(dirname(&(buf[0])), tmp) == nullptr) { MS_LOG(ERROR) << "Invalid file path, path: " << buf; return {FAILED, ""}; @@ -98,6 +117,7 @@ std::pair GetParentDir(const std::string &path) { if (realpath(common::SafeCStr(path), real_path) == nullptr) { MS_LOG(DEBUG) << "Path: " << path << "check successfully"; } +#endif std::string s = real_path; if (s.rfind('/') + 1 <= s.size()) { return {SUCCESS, s.substr(0, s.rfind('/') + 1)}; @@ -144,6 +164,9 @@ bool IsLegalFile(const std::string &path) { } std::pair GetDiskSize(const std::string &str_dir, const DiskSizeType &disk_type) { +#if defined(_WIN32) || defined(_WIN64) + return {SUCCESS, 100}; +#else uint64_t ll_count = 0; struct statfs disk_info; if (statfs(common::SafeCStr(str_dir), &disk_info) == -1) { @@ -166,6 +189,7 @@ std::pair GetDiskSize(const std::string &str_dir, const Dis } return {SUCCESS, ll_count}; +#endif } uint32_t GetMaxThreadNum() { diff --git a/mindspore/ccsrc/mindrecord/include/common/shard_utils.h b/mindspore/ccsrc/mindrecord/include/common/shard_utils.h index e18cbb75b9..d31037c8ad 100644 --- a/mindspore/ccsrc/mindrecord/include/common/shard_utils.h +++ b/mindspore/ccsrc/mindrecord/include/common/shard_utils.h @@ -21,8 +21,10 @@ #include #include #include +#if !defined(_WIN32) && !defined(_WIN64) #include #include +#endif #include #include #include diff --git a/mindspore/ccsrc/mindrecord/include/shard_reader.h b/mindspore/ccsrc/mindrecord/include/shard_reader.h index c114b17951..5548473cd7 100644 --- a/mindspore/ccsrc/mindrecord/include/shard_reader.h +++ b/mindspore/ccsrc/mindrecord/include/shard_reader.h @@ -19,7 +19,9 @@ #include #include +#if !defined(_WIN32) && !defined(_WIN64) #include +#endif #include #include #include diff --git a/mindspore/ccsrc/mindrecord/io/shard_reader.cc b/mindspore/ccsrc/mindrecord/io/shard_reader.cc index 085f148a88..fd3fede5a2 100644 --- a/mindspore/ccsrc/mindrecord/io/shard_reader.cc +++ b/mindspore/ccsrc/mindrecord/io/shard_reader.cc @@ -987,8 +987,10 @@ TASK_RETURN_CONTENT ShardReader::ConsumerOneTask(int task_id, uint32_t consumer_ MSRStatus ShardReader::ConsumerByRow(int consumer_id) { // Set thread name +#if !defined(_WIN32) && !defined(_WIN64) auto thread_id = kThreadName + std::to_string(consumer_id); prctl(PR_SET_NAME, common::SafeCStr(thread_id), 0, 0, 0); +#endif // Loop forever for (;;) { @@ -1040,8 +1042,10 @@ MSRStatus ShardReader::ReadBlob(const int &shard_id, const uint64_t &page_offset MSRStatus ShardReader::ConsumerByBlock(int consumer_id) { // Set thread name +#if !defined(_WIN32) && !defined(_WIN64) auto thread_id = kThreadName + std::to_string(consumer_id); prctl(PR_SET_NAME, common::SafeCStr(thread_id), 0, 0, 0); +#endif // Loop forever for (;;) { diff --git a/mindspore/ccsrc/mindrecord/io/shard_writer.cc b/mindspore/ccsrc/mindrecord/io/shard_writer.cc index 3d4259ebbd..864e6697d0 100644 --- a/mindspore/ccsrc/mindrecord/io/shard_writer.cc +++ b/mindspore/ccsrc/mindrecord/io/shard_writer.cc @@ -63,6 +63,15 @@ MSRStatus ShardWriter::Open(const std::vector &paths, bool append) MS_LOG(ERROR) << "Securec func failed"; return FAILED; } +#if defined(_WIN32) || defined(_WIN64) + if (_fullpath(resolved_path, dirname(&(buf[0])), PATH_MAX) == nullptr) { + MS_LOG(ERROR) << "Invalid file path"; + return FAILED; + } + if (_fullpath(resolved_path, common::SafeCStr(path), PATH_MAX) == nullptr) { + MS_LOG(DEBUG) << "Path " << resolved_path; + } +#else if (realpath(dirname(&(buf[0])), resolved_path) == nullptr) { MS_LOG(ERROR) << "Invalid file path"; return FAILED; @@ -70,6 +79,7 @@ MSRStatus ShardWriter::Open(const std::vector &paths, bool append) if (realpath(common::SafeCStr(path), resolved_path) == nullptr) { MS_LOG(DEBUG) << "Path " << resolved_path; } +#endif file_paths_.emplace_back(string(resolved_path)); } diff --git a/mindspore/ccsrc/parallel/allreduce_fusion/step_allreduce_fusion.cc b/mindspore/ccsrc/parallel/allreduce_fusion/step_allreduce_fusion.cc index 9dbd3a0246..8ab0895216 100644 --- a/mindspore/ccsrc/parallel/allreduce_fusion/step_allreduce_fusion.cc +++ b/mindspore/ccsrc/parallel/allreduce_fusion/step_allreduce_fusion.cc @@ -38,10 +38,12 @@ bool StepAllreduceFusion(const FuncGraphPtr &root, const opt::OptimizerPtr &opti (root->has_flag(ALLREDUCE_FUSION_RUN_ONCE_ONLY))) { return changes; } - +#if defined(_WIN32) || defined(_WIN64) + auto start_time = std::chrono::steady_clock::now(); +#else struct timeval start_time, end_time; (void)gettimeofday(&start_time, nullptr); - +#endif MS_LOG(INFO) << "Now entering allreduce fusion"; DumpGraph(root, std::string(ALLREDUCE_FUSION_BEGIN)); @@ -63,11 +65,16 @@ bool StepAllreduceFusion(const FuncGraphPtr &root, const opt::OptimizerPtr &opti // allreduce fusion only run once root->flags()[ALLREDUCE_FUSION_RUN_ONCE_ONLY] = true; res->results()[pipeline::kStepParallelGraph] = root; - +#if defined(_WIN32) || defined(_WIN64) + auto end_time = std::chrono::steady_clock::now(); + std::chrono::duration> cost = end_time - start_time; + MS_LOG(INFO) << "Now leaving allreduce fusion, used time: " << cost.count() << " us"; +#else (void)gettimeofday(&end_time, nullptr); uint64_t time = 1000000 * static_cast(end_time.tv_sec - start_time.tv_sec); time += static_cast(end_time.tv_usec - start_time.tv_usec); MS_LOG(INFO) << "Now leaving allreduce fusion, used time: " << time << " us"; +#endif return changes; } } // namespace parallel diff --git a/mindspore/ccsrc/parallel/step_auto_parallel.cc b/mindspore/ccsrc/parallel/step_auto_parallel.cc index a42ce612fb..1b881e5d23 100644 --- a/mindspore/ccsrc/parallel/step_auto_parallel.cc +++ b/mindspore/ccsrc/parallel/step_auto_parallel.cc @@ -229,7 +229,7 @@ size_t GetLengthOfDataType(const TypePtr &type) { case kNumberTypeInt: return sizeof(int); case kNumberTypeUInt: - return sizeof(uint); + return sizeof(unsigned int); case kNumberTypeFloat: return sizeof(float); default: diff --git a/mindspore/ccsrc/pipeline/action.cc b/mindspore/ccsrc/pipeline/action.cc index d77fee84aa..3e0f8804e7 100644 --- a/mindspore/ccsrc/pipeline/action.cc +++ b/mindspore/ccsrc/pipeline/action.cc @@ -264,7 +264,7 @@ bool TaskEmitAction(const ResourcePtr& res) { auto bc_ptr = res->results()[kBackend].cast(); std::vector cut_list = compile::nonlinear_ops; if (bc_ptr->name() == kMsConvert) { - cut_list = compile::ms_nonlinear_ops; + cut_list = compile::GetMsNonlinearOps(); } std::shared_ptr compile = std::make_shared(bc_ptr, cut_list); res->results()[kOutput] = compile->CompileAndLink(func_graph); diff --git a/mindspore/ccsrc/pipeline/init.cc b/mindspore/ccsrc/pipeline/init.cc index 24ead047d3..aa370254bb 100644 --- a/mindspore/ccsrc/pipeline/init.cc +++ b/mindspore/ccsrc/pipeline/init.cc @@ -29,8 +29,11 @@ #include "parallel/context.h" #include "parallel/device_manager.h" #include "parallel/costmodel_context.h" +#ifdef ENABLE_GPUQUE #include "device/gpu/distribution/collective_init.h" - +#else +#include "device/gpu/distribution/collective_fake_init.h" +#endif namespace py = pybind11; using FuncGraph = mindspore::FuncGraph; @@ -297,9 +300,16 @@ PYBIND11_MODULE(_c_expression, m) { (void)py::class_>(m, "Oplib") .def(py::init()) .def("reg_op", &OpLib::RegOp, "Register op info."); - +#ifdef ENABLE_GPUQUE (void)m.def("init_gpu_collective", &mindspore::device::gpu::CollectiveInitializer::InitCollective, "Init gpu collective communication mode."); (void)m.def("finalize_gpu_collective", &mindspore::device::gpu::CollectiveInitializer::FinalizeCollective, "Finalize gpu collective communication mode."); +#else + (void)m.def("init_gpu_collective", &mindspore::device::gpu::CollectiveFakeInitializer::InitCollective, + "Init gpu collective communication mode."); + (void)m.def("finalize_gpu_collective", &mindspore::device::gpu::CollectiveFakeInitializer::FinalizeCollective, + "Finalize gpu collective communication mode."); + +#endif } diff --git a/mindspore/ccsrc/pre_activate/common/pass_manager.cc b/mindspore/ccsrc/pre_activate/common/pass_manager.cc index 0dfe4c763f..f93a1ad51d 100644 --- a/mindspore/ccsrc/pre_activate/common/pass_manager.cc +++ b/mindspore/ccsrc/pre_activate/common/pass_manager.cc @@ -52,17 +52,27 @@ bool PassManager::Run(const FuncGraphPtr &func_graph, const std::vector size_t num = 0; for (const auto &pass : passes) { if (pass != nullptr) { +#if defined(_WIN32) || defined(_WIN64) + auto start_time = std::chrono::steady_clock::now(); +#else struct timeval start_time {}; struct timeval end_time {}; (void)gettimeofday(&start_time, nullptr); +#endif if (pass->Run(func_graph)) { changed = true; } +#if defined(_WIN32) || defined(_WIN64) + auto end_time = std::chrono::steady_clock::now(); + std::chrono::duration> cost = end_time - start_time; + MS_LOG(INFO) << "Run pass hwopt_" + name() + "_" << num << "_" + pass->name() + " in " << cost.count() << " us"; +#else (void)gettimeofday(&end_time, nullptr); const uint64_t kUSecondInSecond = 1000000; uint64_t cost = kUSecondInSecond * static_cast(end_time.tv_sec - start_time.tv_sec); cost += static_cast(end_time.tv_usec - start_time.tv_usec); MS_LOG(INFO) << "Run pass hwopt_" + name() + "_" << num << "_" + pass->name() + " in " << cost << " us"; +#endif if (save_graphs) { auto dump_file_path = save_graphs_path + "/" + "hwopt_" + name() + "_" + std::to_string(num) + "_" + pass->name() + ".ir"; diff --git a/mindspore/ccsrc/utils/log_adapter.cc b/mindspore/ccsrc/utils/log_adapter.cc index 19482ec193..5d6e734fea 100644 --- a/mindspore/ccsrc/utils/log_adapter.cc +++ b/mindspore/ccsrc/utils/log_adapter.cc @@ -26,12 +26,19 @@ namespace mindspore { #ifdef USE_GLOG static std::string GetTime() { #define BUFLEN 80 + static char buf[BUFLEN]; +#if defined(_WIN32) || defined(_WIN64) + time_t time_seconds = time(0); + struct tm now_time; + localtime_s(&now_time, &time_seconds); + sprintf_s(buf, BUFLEN, "%d-%d-%d %d:%d:%d", now_time.tm_year + 1900, now_time.tm_mon + 1, now_time.tm_mday, + now_time.tm_hour, now_time.tm_min, now_time.tm_sec); +#else struct timeval cur_time; (void)gettimeofday(&cur_time, NULL); struct tm now; (void)localtime_r(&cur_time.tv_sec, &now); - static char buf[BUFLEN]; (void)strftime(buf, BUFLEN, "%Y-%m-%d-%H:%M:%S", &now); // format date and time // set micro-second buf[27] = '\0'; @@ -44,6 +51,7 @@ static std::string GetTime() { buf[idx--] = '.'; } } +#endif return std::string(buf); } diff --git a/mindspore/ccsrc/vm/transform.cc b/mindspore/ccsrc/vm/transform.cc index be7aaf5baa..92976e0ddb 100644 --- a/mindspore/ccsrc/vm/transform.cc +++ b/mindspore/ccsrc/vm/transform.cc @@ -41,8 +41,10 @@ using TypedPrimitiveAbstractClosurePtr = std::shared_ptr nonlinear_ops = {prim::kPrimReturn, prim::kPrimPartial, prim::kPrimSwitch, prim::kPrimMakeTuple}; - -std::vector ms_nonlinear_ops = {prim::kPrimReturn, prim::kPrimPartial, prim::kPrimSwitch}; +const std::vector& GetMsNonlinearOps() { + static const std::vector ms_nonlinear_ops = {prim::kPrimReturn, prim::kPrimPartial, prim::kPrimSwitch}; + return ms_nonlinear_ops; +} CompileGraph::CompileGraph(const BackendPtr& backend, const std::vector& cut_list) : backend_(backend), cut_list_(cut_list) { diff --git a/mindspore/ccsrc/vm/transform.h b/mindspore/ccsrc/vm/transform.h index f862444a82..290af10049 100644 --- a/mindspore/ccsrc/vm/transform.h +++ b/mindspore/ccsrc/vm/transform.h @@ -42,7 +42,7 @@ extern const char kGeVm[]; // A sub namespace in ME to support compile related definition. namespace compile { extern std::vector nonlinear_ops; -extern std::vector ms_nonlinear_ops; +const std::vector& GetMsNonlinearOps(); using VmEvalFunc = std::function; using VmEvalFuncPtr = std::shared_ptr>; diff --git a/mindspore/log.py b/mindspore/log.py index 38455e2e18..9731b04ac1 100644 --- a/mindspore/log.py +++ b/mindspore/log.py @@ -19,11 +19,13 @@ import sys import os import stat import time -import fcntl import logging from logging.handlers import RotatingFileHandler import traceback import threading +import platform +if platform.system() != "Windows": + import fcntl __all__ = ['get_level', 'get_log_config'] @@ -90,7 +92,8 @@ class _MultiCompatibleRotatingFileHandler(RotatingFileHandler): # Attain an exclusive lock with bloking mode by `fcntl` module. with open(self.baseFilename, 'a') as file_pointer: - fcntl.lockf(file_pointer.fileno(), fcntl.LOCK_EX) + if platform.system() != "Windows": + fcntl.lockf(file_pointer.fileno(), fcntl.LOCK_EX) if self.backupCount > 0: self.rolling_rename() diff --git a/third_party/securec/CMakeLists.txt b/third_party/securec/CMakeLists.txt index e360a6ebae..b2f29c488b 100644 --- a/third_party/securec/CMakeLists.txt +++ b/third_party/securec/CMakeLists.txt @@ -1,10 +1,18 @@ SET(CMAKE_BUILD_TYPE "Debug") -SET(CMAKE_C_FLAGS_DEBUG "$ENV{CFLAGS} -fPIC -O0 -Wall -Wno-deprecated-declarations -g2 -ggdb -fno-inline-functions -fno-omit-frame-pointer -D_LIBCPP_INLINE_VISIBILITY='' -D'_LIBCPP_EXTERN_TEMPLATE(...)='") +if (WIN32) + SET(CMAKE_C_FLAGS_DEBUG "$ENV{CFLAGS} -fPIC -O0 -Wall -Wno-deprecated-declarations -g2 -ggdb -fno-inline-functions -fno-omit-frame-pointer") +else() + SET(CMAKE_C_FLAGS_DEBUG "$ENV{CFLAGS} -fPIC -O0 -Wall -Wno-deprecated-declarations -g2 -ggdb -fno-inline-functions -fno-omit-frame-pointer -D_LIBCPP_INLINE_VISIBILITY='' -D'_LIBCPP_EXTERN_TEMPLATE(...)='") +endif() SET(CMAKE_C_FLAGS_RELEASE "$ENV{CFLAGS} -fPIC -O3 -Wall -Wno-deprecated-declarations") set(CMAKE_EXPORT_COMPILE_COMMANDS ON) #add flags -set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -I/usr/local/include -Werror") +if (WIN32) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -I/usr/local/include") +else() + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -I/usr/local/include -Werror") +endif() include_directories(./include) From 593c4fc7002e962374e8f3ee15e190fef0477372 Mon Sep 17 00:00:00 2001 From: dengwentao Date: Wed, 15 Apr 2020 16:17:08 +0800 Subject: [PATCH 258/367] fix shape used for dump --- .../device/ascend/ascend_kernel_runtime.cc | 24 ++++++++++++------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/mindspore/ccsrc/device/ascend/ascend_kernel_runtime.cc b/mindspore/ccsrc/device/ascend/ascend_kernel_runtime.cc index 311abb634e..c093446969 100644 --- a/mindspore/ccsrc/device/ascend/ascend_kernel_runtime.cc +++ b/mindspore/ccsrc/device/ascend/ascend_kernel_runtime.cc @@ -151,14 +151,18 @@ void DumpOutput(mindspore::session::KernelGraph *graph, const string &dump_path, auto output_size = AnfAlgo::GetOutputTensorNum(node); for (size_t j = 0; j < output_size; ++j) { auto addr = AnfAlgo::GetOutputAddr(node, j); - auto shape = trans::GetRuntimePaddingShape(node, j); + std::vector int_shapes; + if (trans_flag) { + int_shapes = trans::GetRuntimePaddingShape(node, j); + } else { + auto shape = AnfAlgo::GetOutputDeviceShape(node, j); + (void)std::transform(shape.begin(), shape.end(), std::back_inserter(int_shapes), + [](size_t inner_item) { return SizeToInt(inner_item); }); + } auto type = AnfAlgo::GetOutputInferDataType(node, j); auto format = kOpFormat_DEFAULT; string filepath = dump_path + '/' + kernel_name + '_' + "output_" + std::to_string(j); auto ascend_addr = dynamic_cast(addr); - std::vector int_shapes; - (void)std::transform(shape.begin(), shape.end(), std::back_inserter(int_shapes), - [](size_t inner_item) { return SizeToInt(inner_item); }); auto ret = ascend_addr->DumpMemToFile(trans_flag, filepath, format, int_shapes, type); if (!ret) { MS_LOG(ERROR) << "DumpMemToFile Failed: flag:" << trans_flag << ", path:" << filepath @@ -182,14 +186,18 @@ void DumpParameters(mindspore::session::KernelGraph *graph, const string &dump_p continue; } auto addr = AnfAlgo::GetOutputAddr(item, PRAMATER_OUTPUT_INDEX); - auto shape = trans::GetRuntimePaddingShape(item, PRAMATER_OUTPUT_INDEX); + std::vector int_shapes; + if (trans_flag) { + int_shapes = trans::GetRuntimePaddingShape(item, PRAMATER_OUTPUT_INDEX); + } else { + auto shape = AnfAlgo::GetOutputDeviceShape(item, PRAMATER_OUTPUT_INDEX); + (void)std::transform(shape.begin(), shape.end(), std::back_inserter(int_shapes), + [](size_t inner_item) { return SizeToInt(inner_item); }); + } auto type = AnfAlgo::GetOutputInferDataType(item, PRAMATER_OUTPUT_INDEX); auto format = kOpFormat_DEFAULT; string filepath = dump_path + '/' + parameter_name + '_' + "output_0"; auto ascend_addr = dynamic_cast(addr); - std::vector int_shapes; - (void)std::transform(shape.begin(), shape.end(), std::back_inserter(int_shapes), - [](size_t inner_item) { return SizeToInt(inner_item); }); auto ret = ascend_addr->DumpMemToFile(trans_flag, filepath, format, int_shapes, type); if (!ret) { MS_LOG(ERROR) << "DumpMemToFile Failed: flag:" << trans_flag << ", path:" << filepath From 2122a28e1a1bb87a6a629ee8d8cdae6253e1cf97 Mon Sep 17 00:00:00 2001 From: "wangnan39@huawei.com" Date: Wed, 15 Apr 2020 17:44:05 +0800 Subject: [PATCH 259/367] fix bug in tests of vm conv2d --- tests/vm_impl/vm_me.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/vm_impl/vm_me.py b/tests/vm_impl/vm_me.py index a189aa8b90..da7fc1ecbe 100644 --- a/tests/vm_impl/vm_me.py +++ b/tests/vm_impl/vm_me.py @@ -161,7 +161,7 @@ def col2im(col, input_shape, filter_h, filter_w, stride=1, pad=0): elif isinstance(stride, tuple) and len(stride) == 2: stride_h = stride[0] stride_w = stride[1] - elif isinstance(stride, tuple) and len(stride) == 3: + elif isinstance(stride, tuple) and len(stride) == 4: stride_h = stride[2] stride_w = stride[3] else: @@ -328,7 +328,7 @@ def im2col(img, filter_h, filter_w, stride=1, pad=0, dilation=1): elif isinstance(stride, tuple) and len(stride) == 2: stride_h = stride[0] stride_w = stride[1] - elif isinstance(stride, tuple) and len(stride) == 3: + elif isinstance(stride, tuple) and len(stride) == 4: stride_h = stride[2] stride_w = stride[3] else: @@ -340,7 +340,7 @@ def im2col(img, filter_h, filter_w, stride=1, pad=0, dilation=1): elif isinstance(dilation, tuple) and len(dilation) == 2: dilation_h = dilation[0] dilation_w = dilation[1] - elif isinstance(dilation, tuple) and len(dilation) == 3: + elif isinstance(dilation, tuple) and len(dilation) == 4: dilation_h = dilation[2] dilation_w = dilation[3] else: From c4d0bb266a59f13ecfd680b3d839fac3dcede50f Mon Sep 17 00:00:00 2001 From: leilei_snow Date: Wed, 15 Apr 2020 10:00:27 +0000 Subject: [PATCH 260/367] fix optimizer.decay_weight bug --- mindspore/nn/optim/optimizer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mindspore/nn/optim/optimizer.py b/mindspore/nn/optim/optimizer.py index c2a419c565..bfbde78fff 100755 --- a/mindspore/nn/optim/optimizer.py +++ b/mindspore/nn/optim/optimizer.py @@ -109,7 +109,7 @@ class Optimizer(Cell): tuple[Tensor], The gradients after weight decay. """ if self.weight_decay > 0: - params = self.params + params = self.parameters gradients = self.hyper_map(F.partial(apply_decay, self.weight_decay), self.decay_flags, params, gradients) return gradients From 762bf9ac2538a4746c434f4cc29c1ab64ea76aea Mon Sep 17 00:00:00 2001 From: jojobugfree Date: Wed, 15 Apr 2020 16:37:11 +0800 Subject: [PATCH 261/367] fix tensoradd grad op run fail --- .../device/ascend/ascend_kernel_runtime.cc | 35 ++++++++++++++++--- .../device/ascend/ascend_kernel_runtime.h | 2 ++ 2 files changed, 33 insertions(+), 4 deletions(-) diff --git a/mindspore/ccsrc/device/ascend/ascend_kernel_runtime.cc b/mindspore/ccsrc/device/ascend/ascend_kernel_runtime.cc index 311abb634e..69b9e00457 100644 --- a/mindspore/ccsrc/device/ascend/ascend_kernel_runtime.cc +++ b/mindspore/ccsrc/device/ascend/ascend_kernel_runtime.cc @@ -260,6 +260,19 @@ bool AscendKernelRuntime::GenTask(const session::KernelGraph *graph) { auto anf_node_list = graph->execution_order(); TaskGenerator::GenTasks(anf_node_list, &task_info_list, graph->graph_id()); + // Store the task_info_list + auto iter = task_map_.find(graph); + if (iter != task_map_.end()) { + MS_LOG(EXCEPTION) << "graph TaskInfo list already exist"; + } + task_map_[graph] = task_info_list; + + // Graph may have no compute node, such TensorAddGrad. + if (task_info_list.empty()) { + MS_LOG(WARNING) << "graph " << graph->graph_id() << " have no compute node"; + return true; + } + AscendStreamAssign &assign_instance = AscendStreamAssign::GetInstance(); // the streams' flag not HEAD_STREAM std::vector wait_active_stream_list = assign_instance.GetWaitStreams(); @@ -278,10 +291,6 @@ bool AscendKernelRuntime::GenTask(const session::KernelGraph *graph) { graph_model_map_[graph] = model; graph_model_id_map_[graph] = graph->graph_id(); MS_LOG(INFO) << "TaskGenerator GetTaskInfo end..."; - - // Store the task_info_list - task_map_.insert(std::make_pair(graph, task_info_list)); - return true; } @@ -305,6 +314,11 @@ bool AscendKernelRuntime::LoadTask(const session::KernelGraph *graph) { return true; } + if (GraphWithEmptyTaskList(graph)) { + MS_LOG(WARNING) << "LoadTask end, task list is empty"; + return true; + } + auto task_iter = graph_model_map_.find(graph); if (task_iter == graph_model_map_.end()) { MS_LOG(ERROR) << "task not exist"; @@ -333,6 +347,11 @@ bool AscendKernelRuntime::RunTask(const session::KernelGraph *graph) { MS_EXCEPTION_IF_NULL(context_ptr); ge::InputData input_tensors = ge::InputData(); ge::OutputData *output_tensors = nullptr; + if (GraphWithEmptyTaskList(graph)) { + MS_LOG(WARNING) << "RunTask end, no task info found"; + return true; + } + auto model_id = GetGraphModelId(graph); bool status = ge::model_runner::ModelRunner::Instance().RunModel(model_id, input_tensors, output_tensors); if (!status) { @@ -468,6 +487,14 @@ bool AscendKernelRuntime::DestroyHccl() { context_ptr->set_enable_hccl(false); return true; } + +bool AscendKernelRuntime::GraphWithEmptyTaskList(const session::KernelGraph *graph) const { + auto iter = task_map_.find(graph); + if (iter == task_map_.end()) { + MS_LOG(EXCEPTION) << "Unknown graph ptr"; + } + return iter->second.empty(); +} } // namespace ascend } // namespace device } // namespace mindspore diff --git a/mindspore/ccsrc/device/ascend/ascend_kernel_runtime.h b/mindspore/ccsrc/device/ascend/ascend_kernel_runtime.h index 0eedad3d2b..547228d32f 100644 --- a/mindspore/ccsrc/device/ascend/ascend_kernel_runtime.h +++ b/mindspore/ccsrc/device/ascend/ascend_kernel_runtime.h @@ -55,6 +55,8 @@ class AscendKernelRuntime : public KernelRuntime { void ClearGraphModelMap(); void ReleaseDeviceRes() override; uint32_t GetGraphModelId(const session::KernelGraph *kernel_graph); + bool GraphWithEmptyTaskList(const session::KernelGraph *graph) const; + rtContext_t rt_context_{nullptr}; bool initialized_{false}; unordered_map>> task_map_; From 6d522f0a4fef6f1192e1bb9dbefc63226df3470b Mon Sep 17 00:00:00 2001 From: yangzhenzhang <285824651@qq.com> Date: Tue, 14 Apr 2020 20:13:14 +0800 Subject: [PATCH 262/367] add parallel op for layernorm --- .../auto_parallel/operator_costmodel.cc | 162 +++++---- .../auto_parallel/operator_costmodel.h | 246 ++++++------- mindspore/ccsrc/parallel/dynamic_creator.h | 1 + .../parallel/ops_info/activation_info.cc | 4 +- .../parallel/ops_info/layer_norm_info.cc | 324 ++++++++++++++++++ .../ccsrc/parallel/ops_info/layer_norm_info.h | 76 ++++ .../parallel/ops_info/ops_info_head_files.h | 1 + mindspore/ccsrc/parallel/ops_info/ops_utils.h | 3 + .../ccsrc/parallel/step_auto_parallel.cc | 1 + .../redistribution_layout_transfer_test.cc | 4 +- .../reshape_layout_transfer_test.cc | 4 +- .../tensor_layout/util_layout_gen_test.cc | 8 +- .../tensor_layout/util_layout_gen_test.h | 8 +- tests/ut/python/parallel/test_layer_norm.py | 96 ++++++ 14 files changed, 745 insertions(+), 193 deletions(-) create mode 100644 mindspore/ccsrc/parallel/ops_info/layer_norm_info.cc create mode 100644 mindspore/ccsrc/parallel/ops_info/layer_norm_info.h create mode 100644 tests/ut/python/parallel/test_layer_norm.py diff --git a/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.cc b/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.cc index ecd42db6bb..9ea583293b 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.cc +++ b/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.cc @@ -65,7 +65,7 @@ double OperatorCost::GetMemoryCost(const std::vector& inputs, // return the per device communication cost in the forward phase. double MatMulCost::GetForwardCommCost(const std::vector& inputs, const std::vector& outputs, - const int32_t&) const { + int32_t) const { TensorInfo input0 = inputs[0]; TensorInfo output0 = outputs[0]; Shape input0_shape = input0.shape(); @@ -81,7 +81,7 @@ double MatMulCost::GetForwardCommCost(const std::vector& inputs, con // return the per device communication cost in the forward phase. double MatMulCost::GetBackwardCommCost(const std::vector& inputs, const std::vector&, - const int32_t& stage_id) const { + int32_t stage_id) const { // In backward phase, the communication cost is incurred only when tensor B is a Parameter and tensor B does not // fully utilize all devices double result = 0.0; @@ -108,7 +108,7 @@ double MatMulCost::GetBackwardCommCost(const std::vector& inputs, co // Return the per device computation cost in the forward phase. The cost is calculated according to the bytes // this operator uses double MatMulCost::GetForwardComputationCost(const std::vector& inputs, - const std::vector& outputs, const int32_t&) const { + const std::vector& outputs, int32_t) const { // In forward phase, the compuatation cost = slice(A) + slice(B) + (0 or 1) allreduce(slice(C)) double result = 0.0; TensorInfo output0 = outputs[0]; @@ -127,7 +127,7 @@ double MatMulCost::GetForwardComputationCost(const std::vector& inpu // Return the per device computation cost in the forward phase. The cost is calculated according to the bytes // this operator uses double MatMulCost::GetBackwardComputationCost(const std::vector& inputs, const std::vector&, - const int32_t& stage_id) const { + int32_t stage_id) const { // In backward phase, the computation cost = (0 or 1) allreduce(slice(B)) double result = 0.0; if (is_parameter_[1]) { @@ -152,14 +152,14 @@ double MatMulCost::GetBackwardComputationCost(const std::vector& inp // Return the per device communication cost in the forward phase. double ActivationCost::GetForwardCommCost(const std::vector&, const std::vector&, - const int32_t&) const { + int32_t) const { // ReLU is the element-wise operator, thus it does not need communication in the forward phase return 0.0; } // Return the per device communication cost in the backward phase. double ActivationCost::GetBackwardCommCost(const std::vector& inputs, const std::vector&, - const int32_t& stage_id) const { + int32_t stage_id) const { double result = 0.0; if (is_parameter_[0]) { TensorInfo input1 = inputs[0]; @@ -181,7 +181,7 @@ double ActivationCost::GetBackwardCommCost(const std::vector& inputs // Return the per device computation cost in the forward phase. The cost is calculated according to the bytes // this operator uses double ActivationCost::GetForwardComputationCost(const std::vector& inputs, const std::vector&, - const int32_t&) const { + int32_t) const { TensorInfo input0_info = inputs[0]; Shape input0_slice_shape = input0_info.slice_shape(); return ListProduct(input0_slice_shape) * static_cast(inputs_type_lengths_[0]); @@ -190,20 +190,19 @@ double ActivationCost::GetForwardComputationCost(const std::vector& // Return the per device computation cost in the forward phase. The cost is calculated according to the bytes // this operator uses double ActivationCost::GetBackwardComputationCost(const std::vector&, const std::vector&, - const int32_t&) const { + int32_t) const { return 0.0; } // Return the per device communication cost in the forward phase. -double SoftmaxCost::GetForwardCommCost(const std::vector&, const std::vector&, - const int32_t&) const { +double SoftmaxCost::GetForwardCommCost(const std::vector&, const std::vector&, int32_t) const { // In the forward phase, the communication cost = 0 return 0.0; } // Return the per device communication cost in the backward phase. double SoftmaxCost::GetBackwardCommCost(const std::vector& inputs, const std::vector&, - const int32_t& stage_id) const { + int32_t stage_id) const { double result = 0.0; if (is_parameter_[0]) { TensorInfo input1 = inputs[0]; @@ -225,7 +224,7 @@ double SoftmaxCost::GetBackwardCommCost(const std::vector& inputs, c // Return the per device computation cost in the forward phase. The cost is calculated according to the bytes // this operator uses double SoftmaxCost::GetForwardComputationCost(const std::vector& inputs, const std::vector&, - const int32_t&) const { + int32_t) const { // In the forward phase, the computation cost = slice(A) TensorInfo input0 = inputs[0]; Shape input0_slice_shape = input0.slice_shape(); @@ -235,21 +234,20 @@ double SoftmaxCost::GetForwardComputationCost(const std::vector& inp // Return the per device computation cost in the forward phase. The cost is calculated according to the bytes // this operator uses double SoftmaxCost::GetBackwardComputationCost(const std::vector&, - const std::vector&, - const int32_t&) const { + const std::vector&, int32_t) const { return 0.0; } // return the per device communication cost in the forward phase. double TmpIdentityCost::GetForwardCommCost(const std::vector&, - const std::vector&, const int32_t&) const { + const std::vector&, int32_t) const { // Identity is the element-wise operator, thus it does not need communication in the forward phase return 0.0; } // return the per device communication cost in the backward phase. double TmpIdentityCost::GetBackwardCommCost(const std::vector&, - const std::vector&, const int32_t&) const { + const std::vector&, int32_t) const { // Identity is the element-wise operator, thus it does not need communication in the backward phase return 0.0; } @@ -257,16 +255,14 @@ double TmpIdentityCost::GetBackwardCommCost(const std::vector&, - const std::vector&, - const int32_t&) const { + const std::vector&, int32_t) const { return 0.0; } // Return the per device computation cost in the backward phase. The cost is calculated according to the bytes // this operator uses double TmpIdentityCost::GetBackwardComputationCost(const std::vector&, - const std::vector&, - const int32_t&) const { + const std::vector&, int32_t) const { return 0.0; } @@ -277,7 +273,7 @@ double TmpIdentityCost::GetMemoryCost(const std::vector&, const std: double BatchParallelCost::GetForwardComputationCost(const std::vector& inputs, const std::vector&, - const int32_t&) const { + int32_t) const { double cost = 0.0; for (size_t i = 0; i < inputs.size(); ++i) { cost += ListProduct(inputs[i].slice_shape()) * static_cast(inputs_type_lengths_[i]); @@ -287,20 +283,19 @@ double BatchParallelCost::GetForwardComputationCost(const std::vector&, const std::vector&, - const int32_t&) const { + int32_t) const { return 0.0; } // return the per device communication cost in the forward phase. -double PReLUCost::GetForwardCommCost(const std::vector&, const std::vector&, - const int32_t&) const { +double PReLUCost::GetForwardCommCost(const std::vector&, const std::vector&, int32_t) const { // prelu does not need communication in the forward phase return 0.0; } // return the per device communication cost in the backward phase. double PReLUCost::GetBackwardCommCost(const std::vector& inputs, const std::vector&, - const int32_t& stage_id) const { + int32_t stage_id) const { double result = 0.0; if (is_parameter_[1]) { TensorInfo input1 = inputs[1]; @@ -323,7 +318,7 @@ double PReLUCost::GetBackwardCommCost(const std::vector& inputs, con // Return the per device computation cost in the forward phase. The cost is calculated according to the bytes // this operator uses double PReLUCost::GetForwardComputationCost(const std::vector& inputs, const std::vector&, - const int32_t&) const { + int32_t) const { // In forward phase, the computation cost = slice(A) + slice(B) Shape input0_slice_shape = inputs[0].slice_shape(); Shape input1_slice_shape = inputs[1].slice_shape(); @@ -336,7 +331,7 @@ double PReLUCost::GetForwardComputationCost(const std::vector& input // this operator uses double PReLUCost::GetBackwardComputationCost(const std::vector& inputs, const std::vector&, - const int32_t& stage_id) const { + int32_t stage_id) const { // In backward phase, the computation cost = (0 or 1) allreduce(slice(B)) double result = 0.0; if (is_parameter_[1]) { @@ -360,15 +355,13 @@ double PReLUCost::GetBackwardComputationCost(const std::vector&, const std::vector&, - const int32_t&) const { +double OneHotCost::GetForwardCommCost(const std::vector&, const std::vector&, int32_t) const { // onehot does not need communication in the forward phase return 0.0; } // return the per device communication cost in the backward phase. -double OneHotCost::GetBackwardCommCost(const std::vector&, const std::vector&, - const int32_t&) const { +double OneHotCost::GetBackwardCommCost(const std::vector&, const std::vector&, int32_t) const { // onehot does not need communication in the backward phase return 0.0; } @@ -376,7 +369,7 @@ double OneHotCost::GetBackwardCommCost(const std::vector&, const std // Return the per device computation cost in the forward phase. The cost is calculated according to the bytes // this operator uses double OneHotCost::GetForwardComputationCost(const std::vector& inputs, const std::vector&, - const int32_t&) const { + int32_t) const { // In onehot's forward phase, the computation cost = slice(A) Shape input0_slice_shape = inputs[0].slice_shape(); return ListProduct(input0_slice_shape) * static_cast(inputs_type_lengths_[0]); @@ -385,20 +378,20 @@ double OneHotCost::GetForwardComputationCost(const std::vector& inpu // Return the per device computation cost in the backward phase. The cost is calculated according to the bytes // this operator uses double OneHotCost::GetBackwardComputationCost(const std::vector&, const std::vector&, - const int32_t&) const { + int32_t) const { return 0.0; } // return the per device communication cost in the forward phase. double SoftmaxCrossEntropyWithLogitsCost::GetForwardCommCost(const std::vector&, - const std::vector&, const int32_t&) const { + const std::vector&, int32_t) const { // SoftmaxCrossEntropyWithLogitsCost does not need communication in the forward phase return 0.0; } // return the per device communication cost in the backward phase. double SoftmaxCrossEntropyWithLogitsCost::GetBackwardCommCost(const std::vector&, - const std::vector&, const int32_t&) const { + const std::vector&, int32_t) const { // SoftmaxCrossEntropyWithLogitsCost does not need communication in the backward phase return 0.0; } @@ -406,8 +399,7 @@ double SoftmaxCrossEntropyWithLogitsCost::GetBackwardCommCost(const std::vector< // Return the per device computation cost in the forward phase. The cost is calculated according to the bytes // this operator uses double SoftmaxCrossEntropyWithLogitsCost::GetForwardComputationCost(const std::vector& inputs, - const std::vector&, - const int32_t&) const { + const std::vector&, int32_t) const { // In forward phase, the computation cost = slice(A) + slice(B) Shape input0_slice_shape = inputs[0].slice_shape(); Shape input1_slice_shape = inputs[1].slice_shape(); @@ -419,14 +411,13 @@ double SoftmaxCrossEntropyWithLogitsCost::GetForwardComputationCost(const std::v // Return the per device computation cost in the backward phase. The cost is calculated according to the bytes // this operator uses double SoftmaxCrossEntropyWithLogitsCost::GetBackwardComputationCost(const std::vector&, - const std::vector&, - const int32_t&) const { + const std::vector&, int32_t) const { return 0.0; } // return the per device communication cost in the forward phase. double ReshapeCost::GetForwardCommCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const { + int32_t stage_id) const { CheckGlobalDeviceManager(); MS_EXCEPTION_IF_NULL(g_device_manager); RankList dev_list = g_device_manager->GetDeviceListByStageId(stage_id); @@ -441,15 +432,14 @@ double ReshapeCost::GetForwardCommCost(const std::vector& inputs, co } // return the per device communication cost in the backward phase. -double ReshapeCost::GetBackwardCommCost(const std::vector&, const std::vector&, - const int32_t&) const { +double ReshapeCost::GetBackwardCommCost(const std::vector&, const std::vector&, int32_t) const { return 0.0; } // Return the per device computation cost in the forward phase. The cost is calculated according to the bytes // this operator uses double ReshapeCost::GetForwardComputationCost(const std::vector& inputs, - const std::vector& outputs, const int32_t& stage_id) const { + const std::vector& outputs, int32_t stage_id) const { CheckGlobalDeviceManager(); MS_EXCEPTION_IF_NULL(g_device_manager); RankList dev_list = g_device_manager->GetDeviceListByStageId(stage_id); @@ -466,13 +456,12 @@ double ReshapeCost::GetForwardComputationCost(const std::vector& inp // Return the per device computation cost in the backward phase. The cost is calculated according to the bytes // this operator uses double ReshapeCost::GetBackwardComputationCost(const std::vector&, - const std::vector&, - const int32_t&) const { + const std::vector&, int32_t) const { return 0.0; } double ArithmeticCost::GetForwardComputationCost(const std::vector& inputs, const std::vector&, - const int32_t&) const { + int32_t) const { double result; result = ListProduct(inputs[0].slice_shape()) * static_cast(inputs_type_lengths_[0]) + ListProduct(inputs[1].slice_shape()) * static_cast(inputs_type_lengths_[1]); @@ -480,7 +469,7 @@ double ArithmeticCost::GetForwardComputationCost(const std::vector& } double ArithmeticCost::GetBackwardComputationCost(const std::vector& inputs, const std::vector&, - const int32_t& stage_id) const { + int32_t stage_id) const { double result = 0.0; CheckGlobalDeviceManager(); MS_EXCEPTION_IF_NULL(g_device_manager); @@ -515,7 +504,7 @@ double ArithmeticCost::GetBackwardComputationCost(const std::vector& } double ArithmeticCost::GetBackwardCommCost(const std::vector& inputs, const std::vector&, - const int32_t& stage_id) const { + int32_t stage_id) const { double result = 0.0; CheckGlobalDeviceManager(); MS_EXCEPTION_IF_NULL(g_device_manager); @@ -550,7 +539,7 @@ double ArithmeticCost::GetBackwardCommCost(const std::vector& inputs return result; } -bool IsDataParallel(const Shape& shape, const Shape& slice_shape, const int32_t& stage_id) { +bool IsDataParallel(const Shape& shape, const Shape& slice_shape, int32_t stage_id) { CheckGlobalDeviceManager(); MS_EXCEPTION_IF_NULL(g_device_manager); auto total_device_num = g_device_manager->GetDeviceListByStageId(stage_id).size(); @@ -560,7 +549,7 @@ bool IsDataParallel(const Shape& shape, const Shape& slice_shape, const int32_t& } double ReduceMethodCost::GetForwardCommCost(const std::vector& inputs, - const std::vector& outputs, const int32_t& stage_id) const { + const std::vector& outputs, int32_t stage_id) const { double result = 0.0; TensorInfo input0 = inputs[0]; TensorInfo output0 = outputs[0]; @@ -571,7 +560,7 @@ double ReduceMethodCost::GetForwardCommCost(const std::vector& input } std::vector dim_list = input0.reduce_dim(); std::vector::iterator pos; - pos = std::find_if(dim_list.begin(), dim_list.end(), [input0_shape, input0_slice_shape](const int32_t& index) { + pos = std::find_if(dim_list.begin(), dim_list.end(), [input0_shape, input0_slice_shape](int32_t index) { return input0_shape[IntToSize(index)] != input0_slice_shape[IntToSize(index)]; }); if (pos != dim_list.end()) { @@ -582,7 +571,7 @@ double ReduceMethodCost::GetForwardCommCost(const std::vector& input } double ReduceMethodCost::GetBackwardCommCost(const std::vector& inputs, const std::vector&, - const int32_t& stage_id) const { + int32_t stage_id) const { double result = 0.0; if (is_parameter_[0]) { TensorInfo input_tensor_info = inputs[0]; @@ -605,8 +594,7 @@ double ReduceMethodCost::GetBackwardCommCost(const std::vector& inpu } double ReduceMethodCost::GetForwardComputationCost(const std::vector& inputs, - const std::vector& outputs, - const int32_t& stage_id) const { + const std::vector& outputs, int32_t stage_id) const { double result = 0.0; TensorInfo input0 = inputs[0]; TensorInfo output0 = outputs[0]; @@ -615,7 +603,7 @@ double ReduceMethodCost::GetForwardComputationCost(const std::vector Shape input0_shape = input0.shape(); if (!cross_batch_ || !IsDataParallel(input0_shape, input0_slice_shape, stage_id)) { std::vector::iterator pos; - pos = std::find_if(dim_list.begin(), dim_list.end(), [input0_shape, input0_slice_shape](const int32_t& index) { + pos = std::find_if(dim_list.begin(), dim_list.end(), [input0_shape, input0_slice_shape](int32_t index) { return input0_shape[IntToSize(index)] != input0_slice_shape[IntToSize(index)]; }); if (pos != dim_list.end()) { @@ -628,8 +616,7 @@ double ReduceMethodCost::GetForwardComputationCost(const std::vector } double ReduceMeanCost::GetForwardComputationCost(const std::vector& inputs, - const std::vector& outputs, - const int32_t& stage_id) const { + const std::vector& outputs, int32_t stage_id) const { double result = 0.0; TensorInfo input0 = inputs[0]; TensorInfo output0 = outputs[0]; @@ -638,7 +625,7 @@ double ReduceMeanCost::GetForwardComputationCost(const std::vector& Shape input0_shape = input0.shape(); if (!cross_batch_ || !IsDataParallel(input0_shape, input0_slice_shape, stage_id)) { std::vector::iterator pos; - pos = std::find_if(dim_list.begin(), dim_list.end(), [input0_shape, input0_slice_shape](const int32_t& index) { + pos = std::find_if(dim_list.begin(), dim_list.end(), [input0_shape, input0_slice_shape](int32_t index) { return input0_shape[IntToSize(index)] != input0_slice_shape[IntToSize(index)]; }); if (pos != dim_list.end()) { @@ -651,7 +638,7 @@ double ReduceMeanCost::GetForwardComputationCost(const std::vector& } double DropOutCost::GetForwardComputationCost(const std::vector& inputs, const std::vector&, - const int32_t&) const { + int32_t) const { if (inputs.empty()) { return 0.0; } @@ -661,21 +648,20 @@ double DropOutCost::GetForwardComputationCost(const std::vector& inp } // return the per device communication cost in the forward phase. -double GatherV2Cost::GetForwardCommCost(const std::vector&, const std::vector&, - const int32_t&) const { +double GatherV2Cost::GetForwardCommCost(const std::vector&, const std::vector&, int32_t) const { // GatherV2Cost does not need communication in the forward phase return 0.0; } // return the per device communication cost in the backward phase. double GatherV2Cost::GetBackwardCommCost(const std::vector&, const std::vector&, - const int32_t&) const { + int32_t) const { // GatherV2Cost does not need communication in the backward phase return 0.0; } double GatherV2Cost::GetForwardComputationCost(const std::vector& inputs, const std::vector&, - const int32_t&) const { + int32_t) const { // In forward phase, the computation cost = slice(A) + slice(B) Shape input0_slice_shape = inputs[0].slice_shape(); Shape input1_slice_shape = inputs[1].slice_shape(); @@ -685,8 +671,56 @@ double GatherV2Cost::GetForwardComputationCost(const std::vector& in } double GatherV2Cost::GetBackwardComputationCost(const std::vector&, const std::vector&, - const int32_t&) const { + int32_t) const { return 0.0; } + +double LayerNormCost::GetBackwardCommCost(const std::vector& inputs, const std::vector&, + int32_t stage_id) const { + double result = 0.0; + if (is_parameter_.size() != inputs.size()) { + MS_LOG(EXCEPTION) << "Invalid parameter size " << is_parameter_.size() << " for layer norm cost"; + } + if (inputs_type_lengths_.size() != inputs.size()) { + MS_LOG(EXCEPTION) << "Invalid inputs type size " << inputs_type_lengths_.size() << " for layer norm cost"; + } + + MS_EXCEPTION_IF_NULL(g_device_manager); + auto total_device_num = g_device_manager->GetDeviceListByStageId(stage_id).size(); + + for (size_t index = 0; index < inputs.size(); ++index) { + if (is_parameter_[index]) { + TensorInfo tensor_info = inputs[index]; + Shape shape = tensor_info.shape(); + Shape slice_shape = tensor_info.slice_shape(); + int32_t used_device_num = 1; + for (size_t i = 0; i < shape.size(); ++i) { + if (slice_shape[i] == 0) { + MS_LOG(EXCEPTION) << "Invalid slice shape " << ShapeToString(slice_shape); + } + used_device_num *= shape[i] / slice_shape[i]; + } + if (total_device_num != IntToSize(used_device_num)) { + result += ListProduct(slice_shape) * static_cast(inputs_type_lengths_[index]); + } + } + } + return result; +} + +double LayerNormCost::GetForwardComputationCost(const std::vector& inputs, const std::vector&, + int32_t) const { + double result = 0.0; + if (inputs_type_lengths_.size() != inputs.size()) { + MS_LOG(EXCEPTION) << "Invalid inputs type size " << inputs_type_lengths_.size() << " for layer norm cost"; + } + + for (size_t index = 0; index < inputs.size(); ++index) { + TensorInfo tensor_info = inputs[index]; + Shape slice_shape = tensor_info.slice_shape(); + result += ListProduct(slice_shape) * static_cast(inputs_type_lengths_[index]); + } + return result; +} } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.h b/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.h index 7dc45bae71..f16dfa21fc 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.h +++ b/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.h @@ -72,18 +72,18 @@ class OperatorCost { // per device communication cost virtual double GetCommCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const = 0; + int32_t stage_id) const = 0; virtual double GetForwardCommCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const = 0; + int32_t stage_id) const = 0; virtual double GetBackwardCommCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const = 0; + int32_t stage_id) const = 0; // per device computation cost virtual double GetComputationCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const = 0; + int32_t stage_id) const = 0; virtual double GetForwardComputationCost(const std::vector& inputs, - const std::vector& outputs, const int32_t& stage_id) const = 0; + const std::vector& outputs, int32_t stage_id) const = 0; virtual double GetBackwardComputationCost(const std::vector& inputs, - const std::vector& outputs, const int32_t& stage_id) const = 0; + const std::vector& outputs, int32_t stage_id) const = 0; // per device PEAK memory cost in a training iteration // Typically, the PEAK memory cost contributed by an operator is its output (if the output is parameter-invovled), // plus necessary inputs. @@ -114,23 +114,23 @@ class MatMulCost : public OperatorCost { // per device communication cost double GetCommCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override { + int32_t stage_id) const override { return GetForwardCommCost(inputs, outputs, stage_id) + GetBackwardCommCost(inputs, outputs, stage_id); } double GetForwardCommCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; + int32_t stage_id) const override; double GetBackwardCommCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; + int32_t stage_id) const override; // per device computation cost double GetComputationCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override { + int32_t stage_id) const override { return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id); } double GetForwardComputationCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; + int32_t stage_id) const override; double GetBackwardComputationCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; + int32_t stage_id) const override; }; using MatMulCostPtr = std::shared_ptr; @@ -141,21 +141,21 @@ class ActivationCost : public OperatorCost { ~ActivationCost() override = default; double GetCommCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override { + int32_t stage_id) const override { return GetForwardCommCost(inputs, outputs, stage_id) + GetBackwardCommCost(inputs, outputs, stage_id); } double GetForwardCommCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; + int32_t stage_id) const override; double GetBackwardCommCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; + int32_t stage_id) const override; double GetComputationCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override { + int32_t stage_id) const override { return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id); } double GetForwardComputationCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; + int32_t stage_id) const override; double GetBackwardComputationCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; + int32_t stage_id) const override; }; using ActivationCostPtr = std::shared_ptr; using TransposeCost = ActivationCost; @@ -168,21 +168,21 @@ class SoftmaxCost : public OperatorCost { ~SoftmaxCost() override = default; double GetCommCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override { + int32_t stage_id) const override { return GetForwardCommCost(inputs, outputs, stage_id) + GetBackwardCommCost(inputs, outputs, stage_id); } double GetForwardCommCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; + int32_t stage_id) const override; double GetBackwardCommCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; + int32_t stage_id) const override; double GetComputationCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override { + int32_t stage_id) const override { return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id); } double GetForwardComputationCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; + int32_t stage_id) const override; double GetBackwardComputationCost(const std::vector& inputs, const std::vector& outputs, - const int32_t&) const override; + int32_t) const override; }; using SoftmaxCostPtr = std::shared_ptr; @@ -193,21 +193,21 @@ class TmpIdentityCost : public OperatorCost { ~TmpIdentityCost() override = default; double GetCommCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override { + int32_t stage_id) const override { return GetForwardCommCost(inputs, outputs, stage_id) + GetBackwardCommCost(inputs, outputs, stage_id); } double GetForwardCommCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; + int32_t stage_id) const override; double GetBackwardCommCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; + int32_t stage_id) const override; double GetComputationCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override { + int32_t stage_id) const override { return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id); } double GetForwardComputationCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; + int32_t stage_id) const override; double GetBackwardComputationCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; + int32_t stage_id) const override; // per device PEAK memory cost in a training iteration double GetMemoryCost(const std::vector& inputs, const std::vector& outputs) const override; }; @@ -220,25 +220,23 @@ class BatchParallelCost : public OperatorCost { ~BatchParallelCost() override = default; double GetCommCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override { + int32_t stage_id) const override { return GetForwardCommCost(inputs, outputs, stage_id) + GetBackwardCommCost(inputs, outputs, stage_id); } - double GetForwardCommCost(const std::vector&, const std::vector&, - const int32_t&) const override { + double GetForwardCommCost(const std::vector&, const std::vector&, int32_t) const override { return 0.0; } - double GetBackwardCommCost(const std::vector&, const std::vector&, - const int32_t&) const override { + double GetBackwardCommCost(const std::vector&, const std::vector&, int32_t) const override { return 0.0; } double GetComputationCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override { + int32_t stage_id) const override { return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id); } double GetForwardComputationCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; + int32_t stage_id) const override; double GetBackwardComputationCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; + int32_t stage_id) const override; }; using BatchParallelCostPtr = std::shared_ptr; @@ -249,27 +247,25 @@ class VirtualDatasetCost : public OperatorCost { ~VirtualDatasetCost() override = default; double GetCommCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override { + int32_t stage_id) const override { return GetForwardCommCost(inputs, outputs, stage_id) + GetBackwardCommCost(inputs, outputs, stage_id); } - double GetForwardCommCost(const std::vector&, const std::vector&, - const int32_t&) const override { + double GetForwardCommCost(const std::vector&, const std::vector&, int32_t) const override { return 0.0; } - double GetBackwardCommCost(const std::vector&, const std::vector&, - const int32_t&) const override { + double GetBackwardCommCost(const std::vector&, const std::vector&, int32_t) const override { return 0.0; } double GetComputationCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override { + int32_t stage_id) const override { return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id); } double GetForwardComputationCost(const std::vector&, const std::vector&, - const int32_t&) const override { + int32_t) const override { return 0.0; } double GetBackwardComputationCost(const std::vector&, const std::vector&, - const int32_t&) const override { + int32_t) const override { return 0.0; } // per device PEAK memory cost in a training iteration @@ -286,29 +282,27 @@ class GeneratorBaseCost : public OperatorCost { ~GeneratorBaseCost() override = default; double GetCommCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override { + int32_t stage_id) const override { return GetForwardCommCost(inputs, outputs, stage_id) + GetBackwardCommCost(inputs, outputs, stage_id); } - double GetForwardCommCost(const std::vector&, const std::vector&, - const int32_t&) const override { + double GetForwardCommCost(const std::vector&, const std::vector&, int32_t) const override { return 0.0; } - double GetBackwardCommCost(const std::vector&, const std::vector&, - const int32_t&) const override { + double GetBackwardCommCost(const std::vector&, const std::vector&, int32_t) const override { return 0.0; } double GetComputationCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override { + int32_t stage_id) const override { return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id); } // Inputs vector is empty for generator ops. double GetForwardComputationCost(const std::vector&, const std::vector&, - const int32_t&) const override { + int32_t) const override { return 0.0; } // Generator ops don't have backward steps. double GetBackwardComputationCost(const std::vector&, const std::vector&, - const int32_t&) const override { + int32_t) const override { return 0.0; } }; @@ -322,23 +316,23 @@ class PReLUCost : public OperatorCost { // per device communication cost double GetCommCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override { + int32_t stage_id) const override { return GetForwardCommCost(inputs, outputs, stage_id) + GetBackwardCommCost(inputs, outputs, stage_id); } double GetForwardCommCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; + int32_t stage_id) const override; double GetBackwardCommCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; + int32_t stage_id) const override; // per device computation cost double GetComputationCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override { + int32_t stage_id) const override { return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id); } double GetForwardComputationCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; + int32_t stage_id) const override; double GetBackwardComputationCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; + int32_t stage_id) const override; }; using PReLUCostPtr = std::shared_ptr; @@ -350,23 +344,23 @@ class OneHotCost : public OperatorCost { // per device communication cost double GetCommCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override { + int32_t stage_id) const override { return GetForwardCommCost(inputs, outputs, stage_id) + GetBackwardCommCost(inputs, outputs, stage_id); } double GetForwardCommCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; + int32_t stage_id) const override; double GetBackwardCommCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; + int32_t stage_id) const override; // per device computation cost double GetComputationCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override { + int32_t stage_id) const override { return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id); } double GetForwardComputationCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; + int32_t stage_id) const override; double GetBackwardComputationCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; + int32_t stage_id) const override; }; using OneHotCostPtr = std::shared_ptr; @@ -378,23 +372,23 @@ class SoftmaxCrossEntropyWithLogitsCost : public OperatorCost { // per device communication cost double GetCommCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override { + int32_t stage_id) const override { return GetForwardCommCost(inputs, outputs, stage_id) + GetBackwardCommCost(inputs, outputs, stage_id); } double GetForwardCommCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; + int32_t stage_id) const override; double GetBackwardCommCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; + int32_t stage_id) const override; // per device computation cost double GetComputationCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override { + int32_t stage_id) const override { return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id); } double GetForwardComputationCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; + int32_t stage_id) const override; double GetBackwardComputationCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; + int32_t stage_id) const override; }; using SoftmaxCrossEntropyWithLogitsCostPtr = std::shared_ptr; @@ -407,27 +401,27 @@ class ReshapeCost : public OperatorCost { // per device communication cost double GetCommCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override { + int32_t stage_id) const override { return GetForwardCommCost(inputs, outputs, stage_id) + GetBackwardCommCost(inputs, outputs, stage_id); } double GetForwardCommCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; + int32_t stage_id) const override; double GetBackwardCommCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; + int32_t stage_id) const override; // per device computation cost double GetComputationCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override { + int32_t stage_id) const override { return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id); } double GetForwardComputationCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; + int32_t stage_id) const override; double GetBackwardComputationCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; + int32_t stage_id) const override; }; using ReshapeCostPtr = std::shared_ptr; @@ -438,24 +432,22 @@ class ArithmeticCost : public OperatorCost { ~ArithmeticCost() override = default; double GetCommCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override { + int32_t stage_id) const override { return GetForwardCommCost(inputs, outputs, stage_id) + GetBackwardCommCost(inputs, outputs, stage_id); } - double GetForwardCommCost(const std::vector&, const std::vector&, - const int32_t&) const override { + double GetForwardCommCost(const std::vector&, const std::vector&, int32_t) const override { return 0.0; } - double GetBackwardCommCost(const std::vector&, const std::vector&, - const int32_t&) const override; + double GetBackwardCommCost(const std::vector&, const std::vector&, int32_t) const override; double GetComputationCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override { + int32_t stage_id) const override { return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id); } double GetForwardComputationCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; + int32_t stage_id) const override; double GetBackwardComputationCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; + int32_t stage_id) const override; }; using ArithmeticCostPtr = std::shared_ptr; using BiasAddCost = ArithmeticCost; @@ -468,21 +460,21 @@ class ReduceMethodCost : public OperatorCost { ~ReduceMethodCost() override = default; double GetCommCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override { + int32_t stage_id) const override { return GetForwardCommCost(inputs, outputs, stage_id) + GetBackwardCommCost(inputs, outputs, stage_id); } double GetForwardCommCost(const std::vector&, const std::vector&, - const int32_t& stage_id) const override; + int32_t stage_id) const override; double GetBackwardCommCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; + int32_t stage_id) const override; double GetComputationCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override { + int32_t stage_id) const override { return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardCommCost(inputs, outputs, stage_id); } double GetForwardComputationCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; + int32_t stage_id) const override; double GetBackwardComputationCost(const std::vector&, const std::vector&, - const int32_t&) const override { + int32_t) const override { return 0.0; } void set_cross_batch(bool cb) { cross_batch_ = cb; } @@ -499,7 +491,7 @@ class ReduceMeanCost : public ReduceMethodCost { ~ReduceMeanCost() override = default; double GetForwardComputationCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; + int32_t stage_id) const override; }; using ReduceMeanCostPtr = std::shared_ptr; @@ -510,29 +502,27 @@ class GetNextCost : public OperatorCost { ~GetNextCost() override = default; double GetCommCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override { + int32_t stage_id) const override { return GetForwardCommCost(inputs, outputs, stage_id) + GetBackwardCommCost(inputs, outputs, stage_id); } - double GetForwardCommCost(const std::vector&, const std::vector&, - const int32_t&) const override { + double GetForwardCommCost(const std::vector&, const std::vector&, int32_t) const override { return 0.0; } - double GetBackwardCommCost(const std::vector&, const std::vector&, - const int32_t&) const override { + double GetBackwardCommCost(const std::vector&, const std::vector&, int32_t) const override { return 0.0; } double GetComputationCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override { + int32_t stage_id) const override { return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id); } // Inputs vector is empty for generator ops. double GetForwardComputationCost(const std::vector&, const std::vector&, - const int32_t&) const override { + int32_t) const override { return 0.0; } // Generator ops don't have backward steps. double GetBackwardComputationCost(const std::vector&, const std::vector&, - const int32_t&) const override { + int32_t) const override { return 0.0; } }; @@ -545,25 +535,51 @@ class DropOutCost : public OperatorCost { ~DropOutCost() override = default; double GetCommCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override { + int32_t stage_id) const override { return GetForwardCommCost(inputs, outputs, stage_id) + GetBackwardCommCost(inputs, outputs, stage_id); } - double GetForwardCommCost(const std::vector&, const std::vector&, - const int32_t&) const override { + double GetForwardCommCost(const std::vector&, const std::vector&, int32_t) const override { + return 0.0; + } + double GetBackwardCommCost(const std::vector&, const std::vector&, int32_t) const override { + return 0.0; + } + double GetComputationCost(const std::vector& inputs, const std::vector& outputs, + int32_t stage_id) const override { + return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id); + } + double GetForwardComputationCost(const std::vector&, const std::vector&, + int32_t) const override; + double GetBackwardComputationCost(const std::vector&, const std::vector&, + int32_t) const override { return 0.0; } - double GetBackwardCommCost(const std::vector&, const std::vector&, - const int32_t&) const override { +}; + +using DropOutCostPtr = std::shared_ptr; + +class LayerNormCost : public OperatorCost { + public: + explicit LayerNormCost(bool is_inputs_related) : OperatorCost(is_inputs_related) {} + LayerNormCost() : OperatorCost(true) {} + ~LayerNormCost() override = default; + + double GetCommCost(const std::vector& inputs, const std::vector& outputs, + int32_t stage_id) const override { + return GetForwardCommCost(inputs, outputs, stage_id) + GetBackwardCommCost(inputs, outputs, stage_id); + } + double GetForwardCommCost(const std::vector&, const std::vector&, int32_t) const override { return 0.0; } + double GetBackwardCommCost(const std::vector&, const std::vector&, int32_t) const override; double GetComputationCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override { + int32_t stage_id) const override { return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id); } double GetForwardComputationCost(const std::vector&, const std::vector&, - const int32_t&) const override; + int32_t) const override; double GetBackwardComputationCost(const std::vector&, const std::vector&, - const int32_t&) const override { + int32_t) const override { return 0.0; } }; @@ -577,21 +593,21 @@ class GatherV2Cost : public OperatorCost { ~GatherV2Cost() override = default; double GetCommCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override { + int32_t stage_id) const override { return GetForwardCommCost(inputs, outputs, stage_id) + GetBackwardCommCost(inputs, outputs, stage_id); } double GetForwardCommCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; + int32_t stage_id) const override; double GetBackwardCommCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; + int32_t stage_id) const override; double GetComputationCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override { + int32_t stage_id) const override { return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id); } double GetForwardComputationCost(const std::vector& inputs, const std::vector& outputs, - const int32_t& stage_id) const override; + int32_t stage_id) const override; double GetBackwardComputationCost(const std::vector& inputs, const std::vector& outputs, - const int32_t&) const override; + int32_t) const override; }; using GatherV2CostPtr = std::shared_ptr; diff --git a/mindspore/ccsrc/parallel/dynamic_creator.h b/mindspore/ccsrc/parallel/dynamic_creator.h index 1b864cd8bf..953380fb32 100644 --- a/mindspore/ccsrc/parallel/dynamic_creator.h +++ b/mindspore/ccsrc/parallel/dynamic_creator.h @@ -101,6 +101,7 @@ REGISTER(CosInfo); REGISTER(ACosInfo); REGISTER(LogicalNotInfo); REGISTER(L2NormalizeInfo); +REGISTER(LayerNormInfo); REGISTER(ReduceMaxInfo); REGISTER(ArgMaxWithValueInfo); REGISTER(ArgMinWithValueInfo); diff --git a/mindspore/ccsrc/parallel/ops_info/activation_info.cc b/mindspore/ccsrc/parallel/ops_info/activation_info.cc index c11db56082..e659759de2 100644 --- a/mindspore/ccsrc/parallel/ops_info/activation_info.cc +++ b/mindspore/ccsrc/parallel/ops_info/activation_info.cc @@ -195,8 +195,8 @@ Status Softmax::GetAttrs() { // for example: tensor dimension is 4, then axis range [-4, 3] int32_t dim = SizeToInt(inputs_shape_.at(0).size()); - auto it = std::find_if(axis_.begin(), axis_.end(), - [dim](const int32_t& element) { return ((element >= dim) || (element < -dim)); }); + auto it = + std::find_if(axis_.begin(), axis_.end(), [dim](int32_t element) { return ((element >= dim) || (element < -dim)); }); if (it != axis_.end()) { MS_LOG(ERROR) << name_ << " : The axis(" << *it << ") is out of range[" << -dim << ", " << dim - 1 << "]."; return FAILED; diff --git a/mindspore/ccsrc/parallel/ops_info/layer_norm_info.cc b/mindspore/ccsrc/parallel/ops_info/layer_norm_info.cc new file mode 100644 index 0000000000..3abfc3d2ed --- /dev/null +++ b/mindspore/ccsrc/parallel/ops_info/layer_norm_info.cc @@ -0,0 +1,324 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "parallel/ops_info/layer_norm_info.h" +#include +#include +#include "parallel/device_matrix.h" +#include "parallel/strategy.h" + +namespace mindspore { +namespace parallel { +Status LayerNormInfo::GetAttrs() { + auto iter = attrs_.find(BEGIN_NORM_AXIS); + if (iter == attrs_.end()) { + MS_LOG(ERROR) << name_ << ": Can not find the attr of begin norm axis"; + return FAILED; + } + if ((iter->second == nullptr) || !iter->second->isa()) { + MS_LOG(ERROR) << name_ << ": The axis type is not int"; + return FAILED; + } + + int32_t dim = SizeToInt(input_shape_.size()); + auto axis = GetValue(iter->second); + if ((axis >= dim) || (axis < -dim)) { + MS_LOG(ERROR) << name_ << ": The axis(" << axis << ") is out of range[" << -dim << ", " << dim - 1 << "]"; + return FAILED; + } + + if (axis < 0) { + axis = axis + dim; + } + begin_norm_axis_ = IntToSize(axis); + return SUCCESS; +} + +Status LayerNormInfo::CheckStrategy(const StrategyPtr &strategy) { + MS_EXCEPTION_IF_NULL(strategy); + std::vector stra = strategy->GetInputDim(); + if (stra.size() != LAYER_NORM_INPUT_SIZE) { + MS_LOG(ERROR) << name_ << ": Invalid strategy size " << stra.size(); + return FAILED; + } + + if (CheckStrategyValue(strategy, inputs_shape_, is_auto_parallel_) != SUCCESS) { + MS_LOG(ERROR) << name_ << ": Invalid strategy value"; + return FAILED; + } + + Dimensions input_strategy = stra[LAYER_NORM_INPUT_INDEX]; + Dimensions gamma_strategy = stra[LAYER_NORM_GAMMA_INDEX]; + Dimensions beta_strategy = stra[LAYER_NORM_BETA_INDEX]; + if (begin_norm_axis_ >= input_strategy.size()) { + MS_LOG(ERROR) << name_ << ": Invalid begin norm axis " << begin_norm_axis_; + return FAILED; + } + // check input strategy + for (size_t i = begin_norm_axis_; i < input_strategy.size(); ++i) { + if (input_strategy[begin_norm_axis_] != NO_SPLIT_STRATEGY) { + MS_LOG(ERROR) << name_ << ": Invalid input strategy " << ShapeToString(input_strategy); + return FAILED; + } + } + + // check gamma and beta strategy + if ((gamma_strategy.size() > input_strategy.size()) || (beta_strategy.size() > input_strategy.size())) { + MS_LOG(ERROR) << name_ << " : The strategy size of gamma or beta is lager than input strategy"; + return FAILED; + } + + size_t gamma_diff = input_strategy.size() - gamma_strategy.size(); + for (size_t j = 0; j < gamma_strategy.size(); ++j) { + if (gamma_strategy[j] != input_strategy[gamma_diff + j]) { + MS_LOG(ERROR) << name_ << ": Invalid gamma strategy " << ShapeToString(gamma_strategy); + return FAILED; + } + } + + size_t beta_diff = input_strategy.size() - beta_strategy.size(); + for (size_t k = 0; k < beta_strategy.size(); ++k) { + if (beta_strategy[k] != input_strategy[beta_diff + k]) { + MS_LOG(ERROR) << name_ << ": Invalid beta strategy " << ShapeToString(beta_strategy); + return FAILED; + } + } + return SUCCESS; +} + +Status LayerNormInfo::InferDevMatrixShape() { + if (strategy_ == nullptr) { + MS_LOG(ERROR) << name_ << ": The strategy is null"; + return FAILED; + } + std::vector stra = strategy_->GetInputDim(); + if (stra.empty()) { + MS_LOG(ERROR) << name_ << ": The strategy is empty"; + return FAILED; + } + dev_matrix_shape_ = stra[0]; + return SUCCESS; +} + +Status LayerNormInfo::CreateTensorMap(size_t input_index) { + if (inputs_shape_.size() <= input_index) { + MS_LOG(ERROR) << name_ << ": Invalid index" << input_index; + return FAILED; + } + Shape shape = inputs_shape_[input_index]; + Shape tensor_map; + for (size_t i = 0; i < shape.size(); ++i) { + tensor_map.push_back(SizeToInt(shape.size() - i - 1)); + } + inputs_tensor_map_.push_back(tensor_map); + outputs_tensor_map_.push_back(tensor_map); + return SUCCESS; +} + +Status LayerNormInfo::InferTensorMap() { + if ((CreateTensorMap(LAYER_NORM_INPUT_INDEX) != SUCCESS) || (CreateTensorMap(LAYER_NORM_GAMMA_INDEX) != SUCCESS) || + (CreateTensorMap(LAYER_NORM_BETA_INDEX) != SUCCESS)) { + MS_LOG(ERROR) << name_ << ": Create tensor map failed"; + return FAILED; + } + return SUCCESS; +} + +Status LayerNormInfo::CreateMirrorOp(size_t input_index) { + if (inputs_tensor_map_.size() <= input_index) { + MS_LOG(ERROR) << name_ << ": Invalid index " << input_index; + return FAILED; + } + Shape tensor_map = inputs_tensor_map_[input_index]; + std::vector group; + if (CreateGroupByTensorMap(tensor_map, &group) != SUCCESS) { + MS_LOG(ERROR) << name_ << " : Create group for input " << input_index << " failed"; + return FAILED; + } + OperatorVector mirror_op; + if (!group.empty()) { + mirror_op = CreateMirrorOps(group[0].name(), group[0].GetDevNum()); + MS_LOG(INFO) << name_ << " : Create the mirror ops for input " << input_index << " success, group is " + << group[0].name(); + } + mirror_ops_.push_back(mirror_op); + return SUCCESS; +} + +Status LayerNormInfo::InferMirrorOps() { + if ((CreateMirrorOp(LAYER_NORM_INPUT_INDEX) != SUCCESS) || (CreateMirrorOp(LAYER_NORM_GAMMA_INDEX) != SUCCESS) || + (CreateMirrorOp(LAYER_NORM_BETA_INDEX) != SUCCESS)) { + MS_LOG(ERROR) << name_ << ": Create mirror op failed"; + return FAILED; + } + return SUCCESS; +} + +Status LayerNormInfo::CreateTensorInfo(size_t input_index) { + if ((inputs_shape_.size() <= input_index) || (inputs_tensor_map_.size() <= input_index)) { + MS_LOG(ERROR) << name_ << ": Invalid input index" << input_index; + return FAILED; + } + Shape tensor_map = inputs_tensor_map_[input_index]; + Shape shape = inputs_shape_[input_index]; + TensorLayout tensor_layout; + if (tensor_layout.InitFromVector(dev_matrix_shape_, tensor_map, shape) != SUCCESS) { + MS_LOG(ERROR) << name_ << ": Init tensor layout for input " << input_index << " failed"; + return FAILED; + } + + TensorInfo tensor_info(tensor_layout); + inputs_tensor_info_.push_back(tensor_info); + outputs_tensor_info_.push_back(tensor_info); + return SUCCESS; +} + +Status LayerNormInfo::InferTensorInfo() { + if ((CreateTensorInfo(LAYER_NORM_INPUT_INDEX) != SUCCESS) || (CreateTensorInfo(LAYER_NORM_GAMMA_INDEX) != SUCCESS) || + (CreateTensorInfo(LAYER_NORM_BETA_INDEX) != SUCCESS)) { + MS_LOG(ERROR) << name_ << ": Create tensor info failed"; + return FAILED; + } + return SUCCESS; +} + +Status LayerNormInfo::InferAsLossDivisor() { + if (outputs_tensor_map_.size() != LAYER_NORM_INPUT_SIZE) { + MS_LOG(ERROR) << name_ << ": The size of outputs tensor map " << outputs_tensor_map_.size() << " is error"; + return FAILED; + } + as_loss_divisor_ = ComputeRepeatDeviceNumByTensorMap(dev_matrix_shape_, outputs_tensor_map_[0]); + MS_LOG(INFO) << name_ << " : The dev matrix shape is " << ShapeToString(dev_matrix_shape_) + << ", the output[0]'s tensor map is " << ShapeToString(outputs_tensor_map_[0]) + << ", as_loss_divisor_ is " << as_loss_divisor_; + return SUCCESS; +} + +Status LayerNormInfo::SetCostUnderStrategy(const StrategyPtr &strategy) { + if (SetCostUnderStrategyBase(strategy) != SUCCESS) { + MS_LOG(ERROR) << name_ << " : Set cost failed"; + return FAILED; + } + return SUCCESS; +} + +Status LayerNormInfo::GenerateGammaAndBetaStrategies(const std::vector &sp_vector) { + if ((gamma_shape_.size() > input_shape_.size()) || (beta_shape_.size() > input_shape_.size())) { + MS_LOG(ERROR) << name_ << ": The dimension of gamma or beta is lager than input"; + return FAILED; + } + + size_t gamma_diff = input_shape_.size() - gamma_shape_.size(); + size_t beta_diff = input_shape_.size() - beta_shape_.size(); + for (auto &sp : sp_vector) { + if ((sp == nullptr) || sp->GetInputDim().empty()) { + MS_LOG(ERROR) << name_ << ": Invalid strategy"; + return FAILED; + } + std::vector tmp_strategy; + Dimensions input_strategy = sp->GetInputDim()[0]; + Dimensions gamma_strategy = input_strategy; + (void)gamma_strategy.erase(gamma_strategy.begin(), + gamma_strategy.begin() + static_cast(gamma_diff)); + Dimensions beta_strategy = input_strategy; + (void)beta_strategy.erase(beta_strategy.begin(), beta_strategy.begin() + static_cast(beta_diff)); + + // reset the strategy + tmp_strategy.push_back(input_strategy); + tmp_strategy.push_back(gamma_strategy); + tmp_strategy.push_back(beta_strategy); + sp->ResetInputs(tmp_strategy); + } + return SUCCESS; +} + +Status LayerNormInfo::GenerateStrategies(int32_t stage_id) { + if (InitShapes() != SUCCESS) { + MS_LOG(ERROR) << name_ << ": Init shapes failed"; + return FAILED; + } + if (GetAttrs() != SUCCESS) { + MS_LOG(ERROR) << name_ << ": Get attrs failed"; + return FAILED; + } + Shape input_split(input_shape_.size(), SPLIT_FLAG); + if (begin_norm_axis_ >= input_split.size()) { + MS_LOG(ERROR) << name_ << ": Invalid begin norm axis " << begin_norm_axis_; + return FAILED; + } + + // Can not split the dimensions from begin norm axis + for (size_t i = begin_norm_axis_; i < input_split.size(); ++i) { + input_split[i] = NO_SPLIT_FLAG; + } + + // Generate strategy for input + Shapes splittable_inputs = {input_split}; + Shapes tmp_inputs_shape = {input_shape_}; + std::vector sp_vector; + is_auto_parallel_ = true; + if (GenerateStrategiesForIndependentInputs(stage_id, tmp_inputs_shape, splittable_inputs, &sp_vector) != SUCCESS) { + MS_LOG(ERROR) << name_ << ": Generate input strategy failed"; + return FAILED; + } + + // Generate the strategies for gamma and beta + if (GenerateGammaAndBetaStrategies(sp_vector) != SUCCESS) { + MS_LOG(ERROR) << name_ << ": Generate gamma and beta strategies failed"; + return FAILED; + } + + size_t success = 0; + for (auto &sp : sp_vector) { + if (SetCostUnderStrategy(sp) == SUCCESS) { + success++; + MS_LOG(DEBUG) << name_ << ": Successfully generated " << success << " strategy"; + } + } + return SUCCESS; +} + +Status LayerNormInfo::InitShapes() { + if (inputs_shape_.size() != LAYER_NORM_INPUT_SIZE) { + MS_LOG(ERROR) << name_ << ": Invalid inputs size"; + return FAILED; + } + input_shape_ = inputs_shape_[LAYER_NORM_INPUT_INDEX]; + gamma_shape_ = inputs_shape_[LAYER_NORM_GAMMA_INDEX]; + beta_shape_ = inputs_shape_[LAYER_NORM_BETA_INDEX]; + return SUCCESS; +} + +Status LayerNormInfo::Init(const StrategyPtr &strategy) { + if ((InitShapes() != SUCCESS) || (InitWithAutoRepeatCalc(strategy)) != SUCCESS) { + MS_LOG(ERROR) << name_ << ": Init failed"; + return FAILED; + } + MS_LOG(INFO) << name_ << ": Init success"; + return SUCCESS; +} + +Status LayerNormInfo::InitForCostModel(const StrategyPtr &strategy) { + if ((InitShapes() != SUCCESS) || (InitForCostModelWithAutoRepeatCalc(strategy) != SUCCESS)) { + MS_LOG(ERROR) << name_ << ": Init for cost model failed"; + return FAILED; + } + + MS_LOG(INFO) << name_ << ": Init for cost model success"; + return SUCCESS; +} +} // namespace parallel +} // namespace mindspore diff --git a/mindspore/ccsrc/parallel/ops_info/layer_norm_info.h b/mindspore/ccsrc/parallel/ops_info/layer_norm_info.h new file mode 100644 index 0000000000..c52645ade2 --- /dev/null +++ b/mindspore/ccsrc/parallel/ops_info/layer_norm_info.h @@ -0,0 +1,76 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_PARALLEL_OPS_INFO_LAYER_NORM_INFO_H_ +#define MINDSPORE_CCSRC_PARALLEL_OPS_INFO_LAYER_NORM_INFO_H_ + +#include +#include +#include +#include +#include "ir/value.h" +#include "parallel/auto_parallel/operator_costmodel.h" +#include "parallel/ops_info/operator_info.h" +#include "parallel/strategy.h" + +namespace mindspore { +namespace parallel { +constexpr size_t LAYER_NORM_INPUT_SIZE = 3; +constexpr size_t LAYER_NORM_INPUT_INDEX = 0; +constexpr size_t LAYER_NORM_GAMMA_INDEX = 1; +constexpr size_t LAYER_NORM_BETA_INDEX = 2; +constexpr char BEGIN_NORM_AXIS[] = "begin_norm_axis"; + +// The dimensions of input tensor starting from begin norm axis cannot be split. Other dimensions can be split +// arbitrarily. Gamma and beta should match input to meet the broadcast requirements of mul and add. +class LayerNormInfo : public OperatorInfo { + public: + LayerNormInfo(const std::string& operator_name, const Shapes& inputs_shape, const Shapes& outputs_shape, + const PrimitiveAttrs& attrs) + : OperatorInfo(operator_name, inputs_shape, outputs_shape, attrs, std::make_shared(true)), + begin_norm_axis_(0) {} + ~LayerNormInfo() override = default; + + Status Init(const StrategyPtr& strategy) override; + Status InitForCostModel(const StrategyPtr& strategy) override; + Status GenerateStrategies(int32_t) override; + Status SetCostUnderStrategy(const StrategyPtr&) override; + + protected: + Status GetAttrs() override; + Status CheckStrategy(const StrategyPtr& strategy) override; + Status InferMirrorOps() override; + Status InferForwardCommunication() override { return SUCCESS; } + Status InferTensorInfo() override; + Status InferDevMatrixShape() override; + Status InferTensorMap() override; + Status InferAsLossDivisor() override; + Status CreateTensorMap(size_t input_index); + Status CreateTensorInfo(size_t input_index); + Status CreateMirrorOp(size_t input_index); + Status GenerateGammaAndBetaStrategies(const std::vector& sp_vector); + Status InitShapes(); + + private: + size_t begin_norm_axis_; + Shape input_shape_; + Shape gamma_shape_; + Shape beta_shape_; +}; +} // namespace parallel +} // namespace mindspore + +#endif // MINDSPORE_CCSRC_PARALLEL_OPS_INFO_LAYER_NORM_INFO_H_ diff --git a/mindspore/ccsrc/parallel/ops_info/ops_info_head_files.h b/mindspore/ccsrc/parallel/ops_info/ops_info_head_files.h index 27b434ecca..aec25f7f41 100644 --- a/mindspore/ccsrc/parallel/ops_info/ops_info_head_files.h +++ b/mindspore/ccsrc/parallel/ops_info/ops_info_head_files.h @@ -27,6 +27,7 @@ #include "parallel/ops_info/gather_v2_info.h" #include "parallel/ops_info/get_next_info.h" #include "parallel/ops_info/l2_normalize_info.h" +#include "parallel/ops_info/layer_norm_info.h" #include "parallel/ops_info/loss_info.h" #include "parallel/ops_info/matmul_info.h" #include "parallel/ops_info/onehot_info.h" diff --git a/mindspore/ccsrc/parallel/ops_info/ops_utils.h b/mindspore/ccsrc/parallel/ops_info/ops_utils.h index 88377d237b..50920e5954 100644 --- a/mindspore/ccsrc/parallel/ops_info/ops_utils.h +++ b/mindspore/ccsrc/parallel/ops_info/ops_utils.h @@ -26,6 +26,8 @@ constexpr int32_t PRELU_CHANNEL_INDEX = 1; constexpr int32_t PRELU_CHANNEL_STRATEGY = 1; constexpr int32_t NO_SPLIT_MAP = -1; constexpr int32_t NO_SPLIT_STRATEGY = 1; +constexpr int32_t SPLIT_FLAG = 1; +constexpr int32_t NO_SPLIT_FLAG = 0; constexpr size_t MATMUL_ATTRS_SIZE = 2; constexpr size_t MATMUL_INPUTS_SIZE = 2; constexpr size_t MATMUL_OUTPUTS_SIZE = 1; @@ -173,6 +175,7 @@ constexpr char ARGMINWITHVALUE[] = "ArgMinWithValue"; constexpr char CONV2D[] = "Conv2D"; constexpr char FUSE_BATCH_NORM[] = "FusedBatchNorm"; constexpr char BATCH_NORM[] = "BatchNorm"; +constexpr char LAYER_NORM[] = "LayerNorm"; constexpr char POOLING[] = "Pooling"; constexpr char CAST[] = "Cast"; constexpr char MAX_POOL_WITH_ARGMAX[] = "MaxPoolWithArgmax"; diff --git a/mindspore/ccsrc/parallel/step_auto_parallel.cc b/mindspore/ccsrc/parallel/step_auto_parallel.cc index 33097bf2b7..5caf6573f2 100644 --- a/mindspore/ccsrc/parallel/step_auto_parallel.cc +++ b/mindspore/ccsrc/parallel/step_auto_parallel.cc @@ -82,6 +82,7 @@ std::vector splittable_op_ = {MATMUL, SIMPLE_MEAN, FLATTEN, BATCH_NORM, + LAYER_NORM, BIAS_ADD, ASSIGN_SUB, COS, diff --git a/tests/ut/cpp/parallel/tensor_layout/redistribution_layout_transfer_test.cc b/tests/ut/cpp/parallel/tensor_layout/redistribution_layout_transfer_test.cc index 4e34847582..5291e2f48d 100644 --- a/tests/ut/cpp/parallel/tensor_layout/redistribution_layout_transfer_test.cc +++ b/tests/ut/cpp/parallel/tensor_layout/redistribution_layout_transfer_test.cc @@ -245,8 +245,8 @@ void ValidRedistributionLayoutCheck(const DeviceArrangement& in_device_arrangeme unified_out_tensor_map, unified_tensor_shape); } -void ValidRedistributionLayoutCheckAll(const int32_t& device_pow_size, const int32_t& tensor_pow_size, - const int32_t& max_device_dim, const int32_t& max_shape_dim) { +void ValidRedistributionLayoutCheckAll(int32_t device_pow_size, int32_t tensor_pow_size, + int32_t max_device_dim, int32_t max_shape_dim) { std::vector> layout_list; GenerateValidLayoutByDeviceSizeAndTensorSize(device_pow_size, tensor_pow_size, max_device_dim, max_shape_dim, &layout_list); diff --git a/tests/ut/cpp/parallel/tensor_layout/reshape_layout_transfer_test.cc b/tests/ut/cpp/parallel/tensor_layout/reshape_layout_transfer_test.cc index 36b89684f6..9d6152721e 100644 --- a/tests/ut/cpp/parallel/tensor_layout/reshape_layout_transfer_test.cc +++ b/tests/ut/cpp/parallel/tensor_layout/reshape_layout_transfer_test.cc @@ -260,8 +260,8 @@ TEST_F(TestReshapeLayoutTransfer, ValidInferUnifiedLayoutCheck11) { ValidUnifiedLayoutCheck(device_arrangement, in_tensor_map, in_tensor_shape, out_tensor_map, out_tensor_shape); } -void ValidInferUnifiedLayoutCheckAll(const int32_t& device_pow_size, const int32_t& tensor_pow_size, - const int32_t& max_device_dim, const int32_t& max_shape_dim) { +void ValidInferUnifiedLayoutCheckAll(int32_t device_pow_size, int32_t tensor_pow_size, + int32_t max_device_dim, int32_t max_shape_dim) { std::vector> layout_list; GenerateValidLayoutByDeviceSizeAndTensorSize(device_pow_size, tensor_pow_size, max_device_dim, max_shape_dim, &layout_list); diff --git a/tests/ut/cpp/parallel/tensor_layout/util_layout_gen_test.cc b/tests/ut/cpp/parallel/tensor_layout/util_layout_gen_test.cc index 07d270c95c..93147c486b 100644 --- a/tests/ut/cpp/parallel/tensor_layout/util_layout_gen_test.cc +++ b/tests/ut/cpp/parallel/tensor_layout/util_layout_gen_test.cc @@ -51,7 +51,7 @@ std::vector> combine(const std::vector& in, int32_ return output; } -void GenerateValidShapeBySizeAndDim(const int32_t& pow_size, const int32_t& dim, +void GenerateValidShapeBySizeAndDim(int32_t pow_size, int32_t dim, std::vector>* out) { out->clear(); std::vector in; @@ -78,7 +78,7 @@ void GenerateValidShapeBySizeAndDim(const int32_t& pow_size, const int32_t& dim, return; } -void GenerateValidShapeBySize(const int32_t& pow_size, std::vector>* out) { +void GenerateValidShapeBySize(int32_t pow_size, std::vector>* out) { out->clear(); for (int32_t dim = 1; dim <= pow_size; dim++) { std::vector> combine_result; @@ -148,8 +148,8 @@ void GenerateValidTensorMap(const std::vector& device_arrangement, cons } void GenerateValidLayoutByDeviceSizeAndTensorSize( - const int32_t& device_pow_size, const int32_t& tensor_pow_size, const int32_t& max_device_dim, - const int32_t& max_shape_dim, + int32_t device_pow_size, int32_t tensor_pow_size, int32_t max_device_dim, + int32_t max_shape_dim, std::vector, std::vector, std::vector>>* layout_list) { layout_list->clear(); std::vector> device_arrangement_list; diff --git a/tests/ut/cpp/parallel/tensor_layout/util_layout_gen_test.h b/tests/ut/cpp/parallel/tensor_layout/util_layout_gen_test.h index e14556378f..a359cadbea 100644 --- a/tests/ut/cpp/parallel/tensor_layout/util_layout_gen_test.h +++ b/tests/ut/cpp/parallel/tensor_layout/util_layout_gen_test.h @@ -27,10 +27,10 @@ namespace parallel { std::vector> combine(const std::vector& in, int32_t target); -void GenerateValidShapeBySizeAndDim(const int32_t& pow_size, const int32_t& dim, +void GenerateValidShapeBySizeAndDim(int32_t pow_size, int32_t dim, std::vector>* out); -void GenerateValidShapeBySize(const int32_t& pow_size, std::vector>* out); +void GenerateValidShapeBySize(int32_t pow_size, std::vector>* out); std::vector GenerateTensorMap(const uint32_t& map_size, const std::vector& pos_index, const std::vector& pos_value); @@ -39,8 +39,8 @@ void GenerateValidTensorMap(const std::vector& device_arrangement, cons std::vector>* tensor_map_list); void GenerateValidLayoutByDeviceSizeAndTensorSize( - const int32_t& device_pow_size, const int32_t& tensor_pow_size, const int32_t& max_device_dim, - const int32_t& max_shape_dim, + int32_t device_pow_size, int32_t tensor_pow_size, int32_t max_device_dim, + int32_t max_shape_dim, std::vector, std::vector, std::vector>>* layout_list); uint32_t ComputeNoneNumber(const std::vector& tensor_map); diff --git a/tests/ut/python/parallel/test_layer_norm.py b/tests/ut/python/parallel/test_layer_norm.py new file mode 100644 index 0000000000..c65ee5fc8e --- /dev/null +++ b/tests/ut/python/parallel/test_layer_norm.py @@ -0,0 +1,96 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +import mindspore as ms +from mindspore import context, Tensor, Parameter +from mindspore.nn import Cell, TrainOneStepCell, Momentum +from mindspore.ops import operations as P +from mindspore.common.api import _executor +from mindspore.common.initializer import initializer + + +class Net(Cell): + def __init__(self, mul_weight, strategy1=None, strategy2=None, strategy3=None): + super().__init__() + self.begin_norm_axis = -1 + self.begin_params_axis = 1 + self.mul = P.Mul().set_strategy(strategy1) + self.layer_norm = P.LayerNorm(self.begin_norm_axis, self.begin_params_axis).set_strategy(strategy2) + self.mul2 = P.Mul().set_strategy(strategy3) + self.mul_weight = Parameter(mul_weight, "w1") + self.normalized_shape = [64, 32, 16] + self.gamma = Parameter(initializer('ones', self.normalized_shape), name="gamma") + self.beta = Parameter(initializer('zeros', self.normalized_shape), name="beta") + + def construct(self, x, b): + out = self.mul(x, self.mul_weight) + out, _, _ = self.layer_norm(out, self.gamma, self.beta) + out = self.mul2(out, b) + return out + + +_x = Tensor(np.ones([128, 64, 32, 16]), dtype=ms.float32) +_w = Tensor(np.ones([128, 64, 32, 16]), dtype=ms.float32) +_b = Tensor(np.ones([128, 64, 32, 16]), dtype=ms.float32) + + +def compile(net): + optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) + train_net = TrainOneStepCell(net, optimizer) + _executor.compile(train_net, _x, _b) + context.reset_auto_parallel_context() + + +def test_layer_norm_data_parallel(): + context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) + strategy1 = ((16, 1, 1, 1), (16, 1, 1, 1)) + strategy2 = ((16, 1, 1, 1), (1, 1, 1), (1, 1, 1)) + strategy3 = ((16, 1, 1, 1), (16, 1, 1, 1)) + net = Net(_w, strategy1, strategy2, strategy3) + compile(net) + + +def test_layer_norm_model_parallel(): + context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) + strategy1 = ((1, 1, 16, 1), (1, 1, 16, 1)) + strategy2 = ((1, 1, 16, 1), (1, 16, 1), (1, 16, 1)) + strategy3 = ((1, 1, 16, 1), (1, 1, 16, 1)) + net = Net(_w, strategy1, strategy2, strategy3) + compile(net) + + +def test_layer_norm_hybrid_parallel(): + context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) + strategy1 = ((2, 2, 4, 1), (2, 2, 4, 1)) + strategy2 = ((2, 2, 4, 1), (2, 4, 1), (2, 4, 1)) + strategy3 = ((2, 2, 4, 1), (2, 2, 4, 1)) + net = Net(_w, strategy1, strategy2, strategy3) + compile(net) + + +def test_layer_norm_auto_parallel(): + context.set_auto_parallel_context(parallel_mode="auto_parallel", device_num=16, global_rank=0) + net = Net(_w) + compile(net) + + +def test_layer_norm_repeat_calc(): + context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) + strategy1 = ((2, 2, 4, 1), (2, 2, 4, 1)) + strategy2 = ((1, 2, 2, 1), (2, 2, 1), (2, 2, 1)) + strategy3 = ((2, 2, 4, 1), (2, 2, 4, 1)) + net = Net(_w, strategy1, strategy2, strategy3) + compile(net) + From 3e05f50f5fa9ae89c6d2cdcc4a3669a3fc33f455 Mon Sep 17 00:00:00 2001 From: laiyongqiang Date: Thu, 9 Apr 2020 09:47:14 +0800 Subject: [PATCH 263/367] getnext_memcpy_elimination --- .../ascend/ascend_backend_optimization.cc | 7 +- .../enhancer/getnext_memcpy_elimination.cc | 72 +++++++++++ .../enhancer/getnext_memcpy_elimination.h | 33 +++++ .../ir_fusion => pass}/allreduce_fusion.cc | 2 +- .../ir_fusion => pass}/allreduce_fusion.h | 6 +- mindspore/ccsrc/session/gpu_session.cc | 2 +- mindspore/ccsrc/utils/utils.h | 1 + .../enhancer/getnext_memcpy_elimination.cc | 98 +++++++++++++++ .../common/ir_fusion/allreduce_fusion_test.cc | 2 +- .../getnext_memcpy_elimination_test.py | 117 ++++++++++++++++++ 10 files changed, 333 insertions(+), 7 deletions(-) create mode 100644 mindspore/ccsrc/pre_activate/ascend/enhancer/getnext_memcpy_elimination.cc create mode 100644 mindspore/ccsrc/pre_activate/ascend/enhancer/getnext_memcpy_elimination.h rename mindspore/ccsrc/pre_activate/{common/ir_fusion => pass}/allreduce_fusion.cc (97%) rename mindspore/ccsrc/pre_activate/{common/ir_fusion => pass}/allreduce_fusion.h (86%) create mode 100644 tests/ut/cpp/pre_activate/ascend/enhancer/getnext_memcpy_elimination.cc create mode 100644 tests/ut/cpp/python_input/gtest_input/pre_activate/getnext_memcpy_elimination_test.py diff --git a/mindspore/ccsrc/pre_activate/ascend/ascend_backend_optimization.cc b/mindspore/ccsrc/pre_activate/ascend/ascend_backend_optimization.cc index 60ae2cd6cc..394c5ce281 100644 --- a/mindspore/ccsrc/pre_activate/ascend/ascend_backend_optimization.cc +++ b/mindspore/ccsrc/pre_activate/ascend/ascend_backend_optimization.cc @@ -21,7 +21,7 @@ #include "pre_activate/ascend/ir_fission/bn_grad_split.h" #include "pre_activate/ascend/ir_fusion/fused_batch_norm_fusion.h" #include "pre_activate/ascend/ir_fission/layer_norm_grad_split.h" -#include "pre_activate/common/ir_fusion/allreduce_fusion.h" +#include "pre_activate/pass/allreduce_fusion.h" #include "pre_activate/ascend/ir_fusion/square_sum_fusion.h" #include "pre_activate/ascend/ir_fusion/clip_by_norm_no_div_square_sum_fusion.h" #include "pre_activate/ascend/ir_fusion/lamb_update_with_lr_rule_fusion.h" @@ -58,8 +58,10 @@ #include "pre_activate/ascend/ir_fission/add_memcpy_async.h" #include "pre_activate/ascend/format_type/insert_cast_for_runop.h" #include "pre_activate/ascend/format_type/insert_transdata_for_runop.h" +#include "pre_activate/ascend/enhancer/getnext_memcpy_elimination.h" #include "pre_activate/ascend/ir_fission/addn_fission.h" #include "utils/context/ms_context.h" +#include "utils/config_manager.h" #include "debug/anf_ir_dump.h" #include "debug/anf_ir_utils.h" @@ -244,6 +246,9 @@ void AscendBackendOptimization(const std::shared_ptr &kern other_pm->AddPass(std::make_shared()); other_pm->AddPass(std::make_shared()); other_pm->AddPass(std::make_shared()); + if (context_ptr->enable_task_sink() && context_ptr->loop_sink_flag() && ConfigManager::GetInstance().iter_num() > 1) { + other_pm->AddPass(std::make_shared()); + } other_pm->AddPass(std::make_shared()); optimizer->AddPassManager(other_pm); (void)optimizer->Optimize(kernel_graph); diff --git a/mindspore/ccsrc/pre_activate/ascend/enhancer/getnext_memcpy_elimination.cc b/mindspore/ccsrc/pre_activate/ascend/enhancer/getnext_memcpy_elimination.cc new file mode 100644 index 0000000000..a39918ecee --- /dev/null +++ b/mindspore/ccsrc/pre_activate/ascend/enhancer/getnext_memcpy_elimination.cc @@ -0,0 +1,72 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "pre_activate/ascend/enhancer/getnext_memcpy_elimination.h" +#include +#include "session/anf_runtime_algorithm.h" +#include "optimizer/opt.h" + +namespace mindspore::opt { + +const BaseRef GetnextMemcpyElimination::DefinePattern() const { + auto prim_memcpy = std::make_shared(kMemCpyAsyncOpName); + VarPtr x = std::make_shared(); + VectorRef memcpy_async({prim_memcpy, x}); + return memcpy_async; +} + +const AnfNodePtr GetnextMemcpyElimination::Process(const FuncGraphPtr &graph, const AnfNodePtr &node, + const EquivPtr &equiv) const { + if (graph == nullptr || node == nullptr || equiv == nullptr) { + return nullptr; + } + auto memcpy_cnode = node->cast(); + if (memcpy_cnode == nullptr) { + return nullptr; + } + + // 1. memcpy has attr kAttrLabelForInsertStreamActive + if (!AnfAlgo::HasNodeAttr(kAttrLabelForInsertStreamActive, node)) { + MS_LOG(DEBUG) << "node has no label_for_insert_stream_active attr"; + return nullptr; + } + + // 2. memcpy's output has only one user next_node + auto manager = graph->manager(); + MS_EXCEPTION_IF_NULL(manager); + if (manager->node_users().find(memcpy_cnode) == manager->node_users().end()) { + MS_LOG(EXCEPTION) << "memcpy has no output in manager"; + } + auto next_nodes = manager->node_users()[memcpy_cnode]; + if (next_nodes.size() > 1) { + MS_LOG(DEBUG) << "node's output has more than one users"; + return nullptr; + } + + // 3. next_node has only one input which is memcpy's output + for (auto &item : next_nodes) { + auto next_node = item.first->cast(); + if (next_node->inputs().size() != 2) { + MS_LOG(DEBUG) << "next node has more than one input"; + return nullptr; + } + // add attr label_for_insert_stream_active for next_node + AnfAlgo::SetNodeAttr(kAttrLabelForInsertStreamActive, MakeValue(true), next_node); + } + + return memcpy_cnode->input(1); +} +} // namespace mindspore::opt diff --git a/mindspore/ccsrc/pre_activate/ascend/enhancer/getnext_memcpy_elimination.h b/mindspore/ccsrc/pre_activate/ascend/enhancer/getnext_memcpy_elimination.h new file mode 100644 index 0000000000..523fc87a38 --- /dev/null +++ b/mindspore/ccsrc/pre_activate/ascend/enhancer/getnext_memcpy_elimination.h @@ -0,0 +1,33 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_CCSRC_PRE_ACTIVATE_ASCEND_ENHANCER_GETNEXT_MEMCPY_ELIMINATION_H +#define MINDSPORE_CCSRC_PRE_ACTIVATE_ASCEND_ENHANCER_GETNEXT_MEMCPY_ELIMINATION_H + +#include "pre_activate/common/optimizer.h" + +namespace mindspore { +namespace opt { +class GetnextMemcpyElimination : public PatternProcessPass { + public: + explicit GetnextMemcpyElimination(bool multigraph = true) + : PatternProcessPass("getnext_memcpy_elimination", multigraph) {} + ~GetnextMemcpyElimination() override = default; + const BaseRef DefinePattern() const override; + const AnfNodePtr Process(const FuncGraphPtr &, const AnfNodePtr &, const EquivPtr &) const override; +}; +} // namespace opt +} // namespace mindspore +#endif // MINDSPORE_CCSRC_PRE_ACTIVATE_ASCEND_ENHANCER_GETNEXT_MEMCPY_ELIMINATION_H diff --git a/mindspore/ccsrc/pre_activate/common/ir_fusion/allreduce_fusion.cc b/mindspore/ccsrc/pre_activate/pass/allreduce_fusion.cc similarity index 97% rename from mindspore/ccsrc/pre_activate/common/ir_fusion/allreduce_fusion.cc rename to mindspore/ccsrc/pre_activate/pass/allreduce_fusion.cc index 55efcf9058..70a8974eca 100644 --- a/mindspore/ccsrc/pre_activate/common/ir_fusion/allreduce_fusion.cc +++ b/mindspore/ccsrc/pre_activate/pass/allreduce_fusion.cc @@ -13,7 +13,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "pre_activate/common/ir_fusion/allreduce_fusion.h" +#include "pre_activate/pass/allreduce_fusion.h" #include #include diff --git a/mindspore/ccsrc/pre_activate/common/ir_fusion/allreduce_fusion.h b/mindspore/ccsrc/pre_activate/pass/allreduce_fusion.h similarity index 86% rename from mindspore/ccsrc/pre_activate/common/ir_fusion/allreduce_fusion.h rename to mindspore/ccsrc/pre_activate/pass/allreduce_fusion.h index b49b8373c6..e443767e43 100644 --- a/mindspore/ccsrc/pre_activate/common/ir_fusion/allreduce_fusion.h +++ b/mindspore/ccsrc/pre_activate/pass/allreduce_fusion.h @@ -13,8 +13,8 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#ifndef MINDSPORE_CCSRC_PRE_ACTIVATE_COMMON_IR_FUSION_ALLREDUCE_FUSION_H_ -#define MINDSPORE_CCSRC_PRE_ACTIVATE_COMMON_IR_FUSION_ALLREDUCE_FUSION_H_ +#ifndef MINDSPORE_CCSRC_PRE_ACTIVATE_PASS_ALLREDUCE_FUSION_H_ +#define MINDSPORE_CCSRC_PRE_ACTIVATE_PASS_ALLREDUCE_FUSION_H_ #include #include "pre_activate/common/pass.h" @@ -46,4 +46,4 @@ class AllReduceFusion : public Pass { }; } // namespace opt } // namespace mindspore -#endif // MINDSPORE_CCSRC_PRE_ACTIVATE_COMMON_IR_FUSION_ALLREDUCE_FUSION_H_ +#endif // MINDSPORE_CCSRC_PRE_ACTIVATE_PASS_ALLREDUCE_FUSION_H_ diff --git a/mindspore/ccsrc/session/gpu_session.cc b/mindspore/ccsrc/session/gpu_session.cc index c0b2323e04..c6ab1e4664 100644 --- a/mindspore/ccsrc/session/gpu_session.cc +++ b/mindspore/ccsrc/session/gpu_session.cc @@ -20,7 +20,7 @@ #include "device/gpu/gpu_stream_assign.h" #include "pre_activate/common/optimizer.h" #include "pre_activate/common/pass_manager.h" -#include "pre_activate/common/ir_fusion/allreduce_fusion.h" +#include "pre_activate/pass/allreduce_fusion.h" #include "device/kernel_runtime_manager.h" #include "predict/predict.h" #include "common/utils.h" diff --git a/mindspore/ccsrc/utils/utils.h b/mindspore/ccsrc/utils/utils.h index 08a98a3129..2859b5613f 100644 --- a/mindspore/ccsrc/utils/utils.h +++ b/mindspore/ccsrc/utils/utils.h @@ -147,6 +147,7 @@ constexpr auto kAttrSrcFormat = "src_format"; constexpr auto kAttrOutputUsedNum = "output_used_num"; constexpr auto kAttrHasBias = "has_bias"; constexpr auto kAttrN = "N"; +constexpr auto kAttrLabelForInsertStreamActive = "label_for_insert_stream_active"; // attr value constexpr auto kValueTargetSwitch = "target_switch"; diff --git a/tests/ut/cpp/pre_activate/ascend/enhancer/getnext_memcpy_elimination.cc b/tests/ut/cpp/pre_activate/ascend/enhancer/getnext_memcpy_elimination.cc new file mode 100644 index 0000000000..93885a4b3a --- /dev/null +++ b/tests/ut/cpp/pre_activate/ascend/enhancer/getnext_memcpy_elimination.cc @@ -0,0 +1,98 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/backend_common_test.h" +#include "common/py_func_graph_fetcher.h" +#include "session/anf_runtime_algorithm.h" +#include "operator/ops.h" +#include "ir/meta_tensor.h" +#include "debug/anf_ir_dump.h" +#include "utils/utils.h" +#include "kernel/kernel_build_info.h" +#include "pre_activate/common/optimizer.h" +#include "mindspore/ccsrc/pre_activate/ascend/enhancer/getnext_memcpy_elimination.h" + +namespace mindspore { +namespace opt { +class TestGetNextMemcpyElimination : public BackendCommon { + public: + TestGetNextMemcpyElimination() : get_py_fun_("gtest_input.pre_activate.getnext_memcpy_elimination_test", true) {} + + public: + UT::PyFuncGraphFetcher get_py_fun_; +}; + +TEST_F(TestGetNextMemcpyElimination, test_getnext_memcpy_elimination) { + FuncGraphPtr g_before = get_py_fun_.CallAndParseRet("test_getnext_memcpy_elimination", "before"); + ASSERT_TRUE(g_before != nullptr); + + auto optimizer = std::make_shared(); + auto pm = std::make_shared(); + auto pass = std::make_shared(); + pm->AddPass(pass); + optimizer->AddPassManager(pm); + auto new_graph = optimizer->Optimize(g_before); + + FuncGraphPtr g_after = get_py_fun_.CallAndParseRet("test_getnext_memcpy_elimination", "after"); + EXPECT_TRUE(CheckEqualGraph(g_after, new_graph)); +} + +TEST_F(TestGetNextMemcpyElimination, test_getnext_memcpy_elimination_no_attr) { + FuncGraphPtr g_before = get_py_fun_.CallAndParseRet("test_getnext_memcpy_elimination_no_attr", "before"); + ASSERT_TRUE(g_before != nullptr); + + auto optimizer = std::make_shared(); + auto pm = std::make_shared(); + auto pass = std::make_shared(); + pm->AddPass(pass); + optimizer->AddPassManager(pm); + auto new_graph = optimizer->Optimize(g_before); + + FuncGraphPtr g_after = get_py_fun_.CallAndParseRet("test_getnext_memcpy_elimination_no_attr", "after"); + EXPECT_TRUE(CheckEqualGraph(g_after, new_graph)); +} + +TEST_F(TestGetNextMemcpyElimination, test_getnext_memcpy_elimination_memcpy_multi_users) { + FuncGraphPtr g_before = get_py_fun_.CallAndParseRet("test_getnext_memcpy_elimination_memcpy_multi_users", "before"); + ASSERT_TRUE(g_before != nullptr); + + auto optimizer = std::make_shared(); + auto pm = std::make_shared(); + auto pass = std::make_shared(); + pm->AddPass(pass); + optimizer->AddPassManager(pm); + auto new_graph = optimizer->Optimize(g_before); + + FuncGraphPtr g_after = get_py_fun_.CallAndParseRet("test_getnext_memcpy_elimination_memcpy_multi_users", "after"); + EXPECT_TRUE(CheckEqualGraph(g_after, new_graph)); +} + +TEST_F(TestGetNextMemcpyElimination, test_getnext_memcpy_elimination_next_multi_inputs) { + FuncGraphPtr g_before = get_py_fun_.CallAndParseRet("test_getnext_memcpy_elimination_next_multi_inputs", "before"); + ASSERT_TRUE(g_before != nullptr); + + auto optimizer = std::make_shared(); + auto pm = std::make_shared(); + auto pass = std::make_shared(); + pm->AddPass(pass); + optimizer->AddPassManager(pm); + auto new_graph = optimizer->Optimize(g_before); + + FuncGraphPtr g_after = get_py_fun_.CallAndParseRet("test_getnext_memcpy_elimination_next_multi_inputs", "after"); + EXPECT_TRUE(CheckEqualGraph(g_after, new_graph)); +} + +} // namespace opt +} // namespace mindspore diff --git a/tests/ut/cpp/pre_activate/common/ir_fusion/allreduce_fusion_test.cc b/tests/ut/cpp/pre_activate/common/ir_fusion/allreduce_fusion_test.cc index 79a1cf1a8a..d5f2fa636d 100644 --- a/tests/ut/cpp/pre_activate/common/ir_fusion/allreduce_fusion_test.cc +++ b/tests/ut/cpp/pre_activate/common/ir_fusion/allreduce_fusion_test.cc @@ -20,7 +20,7 @@ #include "ir/manager.h" #include "debug/anf_ir_dump.h" #include "session/anf_runtime_algorithm.h" -#include "pre_activate/common/ir_fusion/allreduce_fusion.h" +#include "pre_activate/pass/allreduce_fusion.h" #include "pre_activate/common/optimizer.h" #include "device/kernel_info.h" #include "pre_activate/common/pass_manager.h" diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/getnext_memcpy_elimination_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/getnext_memcpy_elimination_test.py new file mode 100644 index 0000000000..39b60d72d6 --- /dev/null +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/getnext_memcpy_elimination_test.py @@ -0,0 +1,117 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +from mindspore.ops import operations as P +from mindspore.ops import Primitive +import mindspore as ms + +get_next = P.GetNext([ms.float32], [[1, 64, 112, 112]], 1, "") +memcpy_async_attr = Primitive('memcpy_async') +memcpy_async_attr.add_prim_attr("label_for_insert_stream_active", True) +memcpy_async = Primitive('memcpy_async') +cast = P.Cast() +add = P.TensorAdd() + + +class FnDict: + def __init__(self): + self.fnDict = {} + + def __call__(self, fn): + self.fnDict[fn.__name__] = fn + + def __getitem__(self, name): + return self.fnDict[name] + + +def test_getnext_memcpy_elimination(tag): + fns = FnDict() + + @fns + def before(x): + res = get_next() + res = memcpy_async_attr(res) + res = cast(res) + return res + + @fns + def after(x): + res = get_next() + res = cast(res) + return res + + return fns[tag] + + +def test_getnext_memcpy_elimination_no_attr(tag): + fns = FnDict() + + @fns + def before(x): + res = get_next() + res = memcpy_async(res) + res = cast(res) + return res + + @fns + def after(x): + res = get_next() + res = memcpy_async(res) + res = cast(res) + return res + + return fns[tag] + + +def test_getnext_memcpy_elimination_memcpy_multi_users(tag): + fns = FnDict() + + @fns + def before(x): + res = get_next() + memcpy_out = memcpy_async_attr(res) + res = cast(memcpy_out) + res = add(memcpy_out, res) + return res + + @fns + def after(x): + res = get_next() + memcpy_out = memcpy_async_attr(res) + res = cast(memcpy_out) + res = add(memcpy_out, res) + return res + + return fns[tag] + + +def test_getnext_memcpy_elimination_next_multi_inputs(tag): + fns = FnDict() + + @fns + def before(x): + res = get_next() + memcpy_out = memcpy_async_attr(res) + res = add(memcpy_out, res) + return res + + @fns + def after(x): + res = get_next() + memcpy_out = memcpy_async_attr(res) + res = add(memcpy_out, res) + return res + + return fns[tag] From ffe8b5d3ecb02bfca90b933858356cabca2348cc Mon Sep 17 00:00:00 2001 From: lvliang Date: Mon, 13 Apr 2020 19:58:50 +0800 Subject: [PATCH 264/367] pynative-add-op-supported --- mindspore/ccsrc/pre_activate/common/helper.cc | 57 +++++ mindspore/ccsrc/pre_activate/common/helper.h | 5 + .../pass/const_input_to_attr_registry.cc | 34 +++ .../pass/const_input_to_attr_registry.h | 2 +- .../pass/convert_const_input_to_attr.cc | 32 --- .../pass/convert_const_input_to_attr.h | 5 +- .../convert_const_input_to_tensor_input.cc | 58 +---- mindspore/ccsrc/pynative/pynative_execute.cc | 5 +- mindspore/ccsrc/session/ascend_session.cc | 12 +- mindspore/ccsrc/session/ascend_session.h | 6 +- mindspore/ccsrc/session/gpu_session.cc | 12 +- mindspore/ccsrc/session/gpu_session.h | 6 +- mindspore/ccsrc/session/session_basic.cc | 204 ++++++++++-------- mindspore/ccsrc/session/session_basic.h | 12 +- 14 files changed, 240 insertions(+), 210 deletions(-) diff --git a/mindspore/ccsrc/pre_activate/common/helper.cc b/mindspore/ccsrc/pre_activate/common/helper.cc index e5f0fafbe0..3b7d95a6f2 100644 --- a/mindspore/ccsrc/pre_activate/common/helper.cc +++ b/mindspore/ccsrc/pre_activate/common/helper.cc @@ -28,6 +28,7 @@ namespace mindspore { namespace opt { +constexpr size_t kType32Len = 4; std::vector Convert2Int(const std::vector &v) { std::vector result; (void)std::transform(v.begin(), v.end(), std::back_inserter(result), SizeToInt); @@ -264,6 +265,62 @@ void CreateMultipleOutputsOfAnfNode(const FuncGraphPtr &func_graph, const AnfNod } } +template +tensor::TensorPtr CreateTensorWithValueTuple(const ValueTuplePtr &value_tuple_ptr, const TypePtr &type_ptr, + size_t data_length) { + MS_EXCEPTION_IF_NULL(value_tuple_ptr); + MS_EXCEPTION_IF_NULL(type_ptr); + std::vector values; + for (const auto &v : value_tuple_ptr->value()) { + MS_EXCEPTION_IF_NULL(v); + if (v->isa()) { + ScalarPtr scalar = v->cast(); + values.push_back(GetValue(scalar)); + } else { + MS_LOG(WARNING) << "The value " << v << "of tuple is not a scalar"; + return nullptr; + } + } + std::vector tensor_shape = {SizeToInt(values.size())}; + tensor::TensorPtr tensor = std::make_shared(type_ptr->type_id(), tensor_shape); + MS_EXCEPTION_IF_NULL(tensor); + tensor::DeviceInfo device_info{kOpFormat_DEFAULT, type_ptr}; + tensor->set_device_info(device_info); + auto data_ptr = tensor->data_c(true); + MS_EXCEPTION_IF_NULL(data_ptr); + auto elem_num = values.size() * data_length; + auto ret_code = memcpy_s(data_ptr, static_cast(tensor->data().nbytes()), values.data(), elem_num); + if (ret_code != 0) { + MS_LOG(EXCEPTION) << "Failed to copy data into Tensor."; + } + return tensor; +} + +tensor::TensorPtr CreateTupleTensor(const ValueTuplePtr &value_tuple) { + MS_EXCEPTION_IF_NULL(value_tuple); + tensor::TensorPtr tensor = nullptr; + ValuePtr v = *(value_tuple->value().begin()); + MS_EXCEPTION_IF_NULL(v); + // Currently we only deal with the scalar tuple + if (!v->isa()) { + MS_LOG(WARNING) << "The value " << v << "of tuple is not a scalar"; + return nullptr; + } + ScalarPtr scalar = v->cast(); + MS_EXCEPTION_IF_NULL(scalar); + if (scalar->isa()) { + tensor = CreateTensorWithValueTuple(value_tuple, kInt32, kType32Len); + } else if (scalar->isa()) { + tensor = CreateTensorWithValueTuple(value_tuple, kFloat32, kType32Len); + } else { + auto type = scalar->type(); + auto type_str = (type == nullptr) ? "nullptr" : type->ToString(); + MS_LOG(ERROR) << "Invalid scalar type: " << type_str; + return nullptr; + } + return tensor; +} + bool IsNopNode(const AnfNodePtr &node) { auto context_ptr = MsContext::GetInstance(); MS_EXCEPTION_IF_NULL(context_ptr); diff --git a/mindspore/ccsrc/pre_activate/common/helper.h b/mindspore/ccsrc/pre_activate/common/helper.h index 4f30a935af..8d174a1ad0 100644 --- a/mindspore/ccsrc/pre_activate/common/helper.h +++ b/mindspore/ccsrc/pre_activate/common/helper.h @@ -135,6 +135,11 @@ void CreateOutputsOfFusedBn3(const FuncGraphPtr &graph, const AnfNodePtr &data_i void CreateMultipleOutputsOfAnfNode(const FuncGraphPtr &kernel_graph, const AnfNodePtr &anf_node_ptr, size_t output_num, std::vector *outputs); +tensor::TensorPtr CreateTensorWithValueTuple(const ValueTuplePtr &value_tuple_ptr, const TypePtr &type_ptr, + size_t data_length); + +tensor::TensorPtr CreateTupleTensor(const ValueTuplePtr &value_tuple); + bool IsNopNode(const AnfNodePtr &node); void HideNopNode(session::KernelGraph *const graph); diff --git a/mindspore/ccsrc/pre_activate/pass/const_input_to_attr_registry.cc b/mindspore/ccsrc/pre_activate/pass/const_input_to_attr_registry.cc index 42d373392c..88edfd3019 100644 --- a/mindspore/ccsrc/pre_activate/pass/const_input_to_attr_registry.cc +++ b/mindspore/ccsrc/pre_activate/pass/const_input_to_attr_registry.cc @@ -17,10 +17,44 @@ #include +#include "utils/utils.h" #include "utils/log_adapter.h" +#include "operator/ops.h" namespace mindspore { namespace opt { +ConstInputToAttrInfoRegistry::ConstInputToAttrInfoRegistry() { + Register(prim::kPrimCast->name(), {1}); + Register(prim::kPrimConv2DBackpropInput->name(), {2}); + Register(prim::kPrimConv2DBackpropFilter->name(), {2}); + Register(prim::kPrimReshape->name(), {1}); + Register(prim::kPrimReduceMax->name(), {1}); + Register(prim::kPrimReduceMin->name(), {1}); + Register(prim::kPrimReduceSum->name(), {1}); + Register(prim::kPrimReduceMean->name(), {1}); + Register(prim::kPrimGatherV2->name(), {2}); + Register(prim::kPrimTranspose->name(), {1}); + Register(prim::kPrimUnsortedSegmentSum->name(), {2}); + Register(prim::kPrimOneHot->name(), {1}); + Register(kUnsortedSegmentProdOpName, {2}); + Register(kUnsortedSegmentMinOpName, {2}); + Register(kSimpleMeanGradOpName, {1}); + Register(kMeanGradOpName, {1}); + Register(kSliceOpName, {1, 2}); + Register(kSliceGradOpName, {2, 3}); + Register(kTileOpName, {1}); + Register(kScatterNdOpName, {2}); + Register(kStridedSliceAssignOpName, {1, 2, 3}); + Register(kStridedSliceOpName, {1, 2, 3}); + Register(kStridedSliceGradOpName, {1, 2, 3, 4}); + Register(kFlattenGradOpName, {1}); + Register(kExpandDimsOpName, {1}); + Register(kSplitOpName, {0}); + Register(kTopKOpName, {1}); + Register(kSparseApplyAdagradOpName, {2}); + Register(kResizeNearestNeighborGrad, {1}); +} + ConstInputToAttrInfoRegistry &ConstInputToAttrInfoRegistry::Instance() { static ConstInputToAttrInfoRegistry instance; return instance; diff --git a/mindspore/ccsrc/pre_activate/pass/const_input_to_attr_registry.h b/mindspore/ccsrc/pre_activate/pass/const_input_to_attr_registry.h index 48007929fb..bd6cac1322 100644 --- a/mindspore/ccsrc/pre_activate/pass/const_input_to_attr_registry.h +++ b/mindspore/ccsrc/pre_activate/pass/const_input_to_attr_registry.h @@ -54,7 +54,7 @@ class ConstInputToAttrInfoRegistry { bool GetRegisterByOpName(const std::string &op_name, ConstInputToAttrInfoRegister *reg) const; private: - ConstInputToAttrInfoRegistry() = default; + ConstInputToAttrInfoRegistry(); ~ConstInputToAttrInfoRegistry() = default; DISABLE_COPY_AND_ASSIGN(ConstInputToAttrInfoRegistry) std::unordered_map op_input_to_attr_map_; diff --git a/mindspore/ccsrc/pre_activate/pass/convert_const_input_to_attr.cc b/mindspore/ccsrc/pre_activate/pass/convert_const_input_to_attr.cc index 2bef0d36ca..15d62a164f 100644 --- a/mindspore/ccsrc/pre_activate/pass/convert_const_input_to_attr.cc +++ b/mindspore/ccsrc/pre_activate/pass/convert_const_input_to_attr.cc @@ -87,37 +87,5 @@ const AnfNodePtr ConvertConstInputToAttr::Process(const FuncGraphPtr &, const An ConstInputToAttr(cnode, reg.GetConstInputAttrInfo()); return cnode; } - -void ConvertConstInputToAttr::Init() { - ConstInputToAttrInfoRegistry::Instance().Register(prim::kPrimCast->name(), {1}); - ConstInputToAttrInfoRegistry::Instance().Register(prim::kPrimConv2DBackpropInput->name(), {2}); - ConstInputToAttrInfoRegistry::Instance().Register(prim::kPrimConv2DBackpropFilter->name(), {2}); - ConstInputToAttrInfoRegistry::Instance().Register(prim::kPrimReshape->name(), {1}); - ConstInputToAttrInfoRegistry::Instance().Register(prim::kPrimReduceMax->name(), {1}); - ConstInputToAttrInfoRegistry::Instance().Register(prim::kPrimReduceMin->name(), {1}); - ConstInputToAttrInfoRegistry::Instance().Register(prim::kPrimReduceSum->name(), {1}); - ConstInputToAttrInfoRegistry::Instance().Register(prim::kPrimReduceMean->name(), {1}); - ConstInputToAttrInfoRegistry::Instance().Register(prim::kPrimGatherV2->name(), {2}); - ConstInputToAttrInfoRegistry::Instance().Register(prim::kPrimTranspose->name(), {1}); - ConstInputToAttrInfoRegistry::Instance().Register(prim::kPrimUnsortedSegmentSum->name(), {2}); - ConstInputToAttrInfoRegistry::Instance().Register(prim::kPrimOneHot->name(), {1}); - ConstInputToAttrInfoRegistry::Instance().Register(kUnsortedSegmentProdOpName, {2}); - ConstInputToAttrInfoRegistry::Instance().Register(kUnsortedSegmentMinOpName, {2}); - ConstInputToAttrInfoRegistry::Instance().Register(kSimpleMeanGradOpName, {1}); - ConstInputToAttrInfoRegistry::Instance().Register(kMeanGradOpName, {1}); - ConstInputToAttrInfoRegistry::Instance().Register(kSliceOpName, {1, 2}); - ConstInputToAttrInfoRegistry::Instance().Register(kSliceGradOpName, {2, 3}); - ConstInputToAttrInfoRegistry::Instance().Register(kTileOpName, {1}); - ConstInputToAttrInfoRegistry::Instance().Register(kScatterNdOpName, {2}); - ConstInputToAttrInfoRegistry::Instance().Register(kStridedSliceAssignOpName, {1, 2, 3}); - ConstInputToAttrInfoRegistry::Instance().Register(kStridedSliceOpName, {1, 2, 3}); - ConstInputToAttrInfoRegistry::Instance().Register(kStridedSliceGradOpName, {1, 2, 3, 4}); - ConstInputToAttrInfoRegistry::Instance().Register(kFlattenGradOpName, {1}); - ConstInputToAttrInfoRegistry::Instance().Register(kExpandDimsOpName, {1}); - ConstInputToAttrInfoRegistry::Instance().Register(kSplitOpName, {0}); - ConstInputToAttrInfoRegistry::Instance().Register(kTopKOpName, {1}); - ConstInputToAttrInfoRegistry::Instance().Register(kSparseApplyAdagradOpName, {2}); - ConstInputToAttrInfoRegistry::Instance().Register(kResizeNearestNeighborGrad, {1}); -} } // namespace opt } // namespace mindspore diff --git a/mindspore/ccsrc/pre_activate/pass/convert_const_input_to_attr.h b/mindspore/ccsrc/pre_activate/pass/convert_const_input_to_attr.h index 54caa1633c..e124ff8cf4 100644 --- a/mindspore/ccsrc/pre_activate/pass/convert_const_input_to_attr.h +++ b/mindspore/ccsrc/pre_activate/pass/convert_const_input_to_attr.h @@ -27,14 +27,11 @@ namespace opt { class ConvertConstInputToAttr : public PatternProcessPass { public: explicit ConvertConstInputToAttr(bool multigraph = true) - : PatternProcessPass("convert_const_input_to_attr", multigraph) { - Init(); - } + : PatternProcessPass("convert_const_input_to_attr", multigraph) {} ~ConvertConstInputToAttr() override = default; const AnfNodePtr Process(const FuncGraphPtr &, const AnfNodePtr &, const EquivPtr &) const override; private: - void Init(); std::unordered_map> op_input_attr_map_; }; } // namespace opt diff --git a/mindspore/ccsrc/pre_activate/pass/convert_const_input_to_tensor_input.cc b/mindspore/ccsrc/pre_activate/pass/convert_const_input_to_tensor_input.cc index 431a67792d..56be2e273d 100644 --- a/mindspore/ccsrc/pre_activate/pass/convert_const_input_to_tensor_input.cc +++ b/mindspore/ccsrc/pre_activate/pass/convert_const_input_to_tensor_input.cc @@ -19,69 +19,13 @@ #include #include "utils/graph_utils.h" +#include "pre_activate/common/helper.h" #include "session/anf_runtime_algorithm.h" #include "session/kernel_graph.h" namespace mindspore { namespace opt { namespace { -constexpr size_t kType32Len = 4; -template -tensor::TensorPtr CreateTensorWithValueTuple(const ValueTuplePtr &value_tuple_ptr, const TypePtr &type_ptr, - size_t data_length) { - MS_EXCEPTION_IF_NULL(value_tuple_ptr); - MS_EXCEPTION_IF_NULL(type_ptr); - std::vector values; - for (const auto &v : value_tuple_ptr->value()) { - MS_EXCEPTION_IF_NULL(v); - if (v->isa()) { - ScalarPtr scalar = v->cast(); - values.push_back(GetValue(scalar)); - } else { - MS_LOG(WARNING) << "The value " << v << "of tuple is not a scalar"; - return nullptr; - } - } - std::vector tensor_shape = {SizeToInt(values.size())}; - tensor::TensorPtr tensor = std::make_shared(type_ptr->type_id(), tensor_shape); - MS_EXCEPTION_IF_NULL(tensor); - tensor::DeviceInfo device_info{kOpFormat_DEFAULT, type_ptr}; - tensor->set_device_info(device_info); - auto data_ptr = tensor->data_c(true); - MS_EXCEPTION_IF_NULL(data_ptr); - auto elem_num = values.size() * data_length; - auto ret_code = memcpy_s(data_ptr, static_cast(tensor->data().nbytes()), values.data(), elem_num); - if (ret_code != 0) { - MS_LOG(EXCEPTION) << "Failed to copy data into Tensor."; - } - return tensor; -} - -tensor::TensorPtr CreateTupleTensor(const ValueTuplePtr &value_tuple) { - MS_EXCEPTION_IF_NULL(value_tuple); - tensor::TensorPtr tensor = nullptr; - ValuePtr v = *(value_tuple->value().begin()); - MS_EXCEPTION_IF_NULL(v); - // Currently we only deal with the scalar tuple - if (!v->isa()) { - MS_LOG(WARNING) << "The value " << v << "of tuple is not a scalar"; - return nullptr; - } - ScalarPtr scalar = v->cast(); - MS_EXCEPTION_IF_NULL(scalar); - if (scalar->isa()) { - tensor = CreateTensorWithValueTuple(value_tuple, kInt32, kType32Len); - } else if (scalar->isa()) { - tensor = CreateTensorWithValueTuple(value_tuple, kFloat32, kType32Len); - } else { - auto type = scalar->type(); - auto type_str = (type == nullptr) ? "nullptr" : type->ToString(); - MS_LOG(ERROR) << "Invalid scalar type: " << type_str; - return nullptr; - } - return tensor; -} - AnfNodePtr CreateTensorInput(const KernelGraphPtr &kernel_graph, const AnfNodePtr &input_node) { MS_EXCEPTION_IF_NULL(input_node); auto value_node = input_node->cast(); diff --git a/mindspore/ccsrc/pynative/pynative_execute.cc b/mindspore/ccsrc/pynative/pynative_execute.cc index e5fb0c6949..4144ad2d6b 100644 --- a/mindspore/ccsrc/pynative/pynative_execute.cc +++ b/mindspore/ccsrc/pynative/pynative_execute.cc @@ -158,8 +158,9 @@ py::object RunOpInMs(const OpExecInfoPtr& op_exec_info, PynativeStatusCode* stat session->Init(ms_context->device_id()); std::string graph_info = GetSingleOpGraphInfo(op_exec_info); - session->BuildOp(*op_exec_info, graph_info); - py::tuple result = session->RunOp(*op_exec_info, graph_info); + std::vector input_tensors; + session->BuildOp(*op_exec_info, graph_info, &input_tensors); + py::tuple result = session->RunOp(*op_exec_info, graph_info, input_tensors); ms_context->set_enable_pynative_infer(false); *status = PYNATIVE_SUCCESS; return result; diff --git a/mindspore/ccsrc/session/ascend_session.cc b/mindspore/ccsrc/session/ascend_session.cc index ba0ef836e1..4455d33ee9 100755 --- a/mindspore/ccsrc/session/ascend_session.cc +++ b/mindspore/ccsrc/session/ascend_session.cc @@ -204,10 +204,12 @@ void AscendSession::RunOpExecTask(const std::shared_ptr &kernel_gra MS_LOG(INFO) << "Finish!"; } -void AscendSession::BuildOp(const OpRunInfo &op_run_info, const GraphInfo &graph_info) { +void AscendSession::BuildOp(const OpRunInfo &op_run_info, const GraphInfo &graph_info, + std::vector *input_tensors) { + MS_EXCEPTION_IF_NULL(input_tensors); MS_LOG(INFO) << "Build op " << op_run_info.op_name << " start !"; // construct graph include one op - auto graph = ConstructSingleOpGraph(op_run_info); + auto graph = ConstructSingleOpGraph(op_run_info, input_tensors); MS_EXCEPTION_IF_NULL(graph); opt::RunOpAscendBackendIRFusionOptimization(graph); // kernel select @@ -222,14 +224,12 @@ void AscendSession::BuildOp(const OpRunInfo &op_run_info, const GraphInfo &graph run_op_graphs_[graph_info] = graph; } -py::tuple AscendSession::RunOp(const OpRunInfo &op_run_info, const GraphInfo &graph_info) { +py::tuple AscendSession::RunOp(const OpRunInfo &op_run_info, const GraphInfo &graph_info, + const std::vector &input_tensors) { auto graph = run_op_graphs_[graph_info]; MS_EXCEPTION_IF_NULL(graph); MS_LOG(INFO) << "Run op " << op_run_info.op_name << " start!"; // malloc mem - std::vector input_tensors = {}; - std::vector tensors_mask = {}; - ToTensorPtr(op_run_info, &input_tensors, &tensors_mask); RunOpMemoryAlloc(input_tensors, graph.get()); // load input data to device LoadInputData(graph, input_tensors); diff --git a/mindspore/ccsrc/session/ascend_session.h b/mindspore/ccsrc/session/ascend_session.h index c45ab6630a..2d24691404 100755 --- a/mindspore/ccsrc/session/ascend_session.h +++ b/mindspore/ccsrc/session/ascend_session.h @@ -41,8 +41,10 @@ class AscendSession : public SessionBasic { GraphId CompileGraph(const AnfNodePtrList &lst, const AnfNodePtrList &outputs) override; void RunGraph(const GraphId &graph_id, const std::vector &inputs, VectorRef *outputs) override; void BuildGraph(GraphId) override; - void BuildOp(const OpRunInfo &op_run_info, const GraphInfo &graph_info) override; - py::tuple RunOp(const OpRunInfo &op_run_info, const GraphInfo &graph_info) override; + void BuildOp(const OpRunInfo &op_run_info, const GraphInfo &graph_info, + std::vector *input_tensors) override; + py::tuple RunOp(const OpRunInfo &op_run_info, const GraphInfo &graph_info, + const std::vector &input_tensors) override; // set parameters of final graph GraphId SetFinalGraphInput(const std::vector &args) override; diff --git a/mindspore/ccsrc/session/gpu_session.cc b/mindspore/ccsrc/session/gpu_session.cc index c0b2323e04..98a84cd6d2 100644 --- a/mindspore/ccsrc/session/gpu_session.cc +++ b/mindspore/ccsrc/session/gpu_session.cc @@ -132,9 +132,11 @@ void GPUSession::RunGraph(const GraphId &graph_id, const std::vector *input_tensors) { // Prepare the graph - auto kernel_graph = ConstructSingleOpGraph(op_run_info); + MS_EXCEPTION_IF_NULL(input_tensors); + auto kernel_graph = ConstructSingleOpGraph(op_run_info, input_tensors); MS_EXCEPTION_IF_NULL(kernel_graph); SelectKernel(kernel_graph); StartKernelRT(); @@ -142,12 +144,10 @@ void GPUSession::BuildOp(const OpRunInfo &op_run_info, const GraphInfo &graph_in run_op_graphs_[graph_info] = kernel_graph; } -py::tuple GPUSession::RunOp(const OpRunInfo &op_run_info, const GraphInfo &graph_info) { +py::tuple GPUSession::RunOp(const OpRunInfo &op_run_info, const GraphInfo &graph_info, + const std::vector &input_tensors) { auto kernel_graph = run_op_graphs_[graph_info]; MS_EXCEPTION_IF_NULL(kernel_graph); - std::vector input_tensors = {}; - std::vector tensors_mask = {}; - ToTensorPtr(op_run_info, &input_tensors, &tensors_mask); RunOpAllocateMemory(input_tensors, kernel_graph.get()); // Execute the computation LoadInputData(kernel_graph, input_tensors); diff --git a/mindspore/ccsrc/session/gpu_session.h b/mindspore/ccsrc/session/gpu_session.h index d81a6c58f9..470c9b4799 100644 --- a/mindspore/ccsrc/session/gpu_session.h +++ b/mindspore/ccsrc/session/gpu_session.h @@ -39,8 +39,10 @@ class GPUSession : public SessionBasic { GraphId CompileGraph(const AnfNodePtrList &lst, const AnfNodePtrList &outputs) override; void RunGraph(const GraphId &graph_id, const std::vector &inputs, VectorRef *outputs) override; - void BuildOp(const OpRunInfo &op_run_info, const GraphInfo &graph_info) override; - py::tuple RunOp(const OpRunInfo &op_run_info, const GraphInfo &graph_info) override; + void BuildOp(const OpRunInfo &op_run_info, const GraphInfo &graph_info, + std::vector *input_tensors) override; + py::tuple RunOp(const OpRunInfo &op_run_info, const GraphInfo &graph_info, + const std::vector &input_tensors) override; private: void SelectKernel(const std::shared_ptr &kernel_graph) const; diff --git a/mindspore/ccsrc/session/session_basic.cc b/mindspore/ccsrc/session/session_basic.cc index 33ef78c13d..01a836fad1 100755 --- a/mindspore/ccsrc/session/session_basic.cc +++ b/mindspore/ccsrc/session/session_basic.cc @@ -17,6 +17,7 @@ #include #include #include +#include #include "pipeline/parse/data_converter.h" #include "ir/manager.h" #include "operator/ops.h" @@ -26,6 +27,7 @@ #include "session/anf_runtime_algorithm.h" #include "kernel/oplib/oplib.h" #include "pre_activate/common/common_backend_optimization.h" +#include "pre_activate/pass/const_input_to_attr_registry.h" #include "pre_activate/common/helper.h" #include "common/utils.h" #include "ir/dtype.h" @@ -178,56 +180,113 @@ BaseRef CreatTupleForOutput(const AnfNodePtr &anf, const KernelGraph &graph, return ret; } -std::string FindOpInputParameterType(const std::string &op_name, kernel::OpImplyType implyType, size_t index) { - std::string para_type; - auto op_info = kernel::OpLib::FindOp(op_name, implyType); - if (op_info == nullptr) { - return para_type; +bool RunOpConvertConstInputToAttr(const py::object &input_object, size_t input_index, const PrimitivePtr &op_prim, + const std::unordered_set &input_attrs) { + MS_EXCEPTION_IF_NULL(op_prim); + auto input_names_value = op_prim->GetAttr(kAttrInputNames); + if (input_names_value == nullptr) { + return false; + } + auto input_names_vec = GetValue>(input_names_value); + if (input_index >= input_names_vec.size()) { + MS_LOG(EXCEPTION) << "The input index: " << input_index << " is large than the input names vector size!"; } - auto op_inputs_info_vec = op_info->inputs_ptr(); - if (index >= op_inputs_info_vec.size()) { - return para_type; + + if (input_attrs.find(input_index) != input_attrs.end()) { + ValuePtr value = parse::data_converter::PyDataToValue(input_object); + MS_EXCEPTION_IF_NULL(value); + auto input_name = input_names_vec[input_index]; + op_prim->set_attr(input_name, value); + return true; } - auto op_io_info = op_inputs_info_vec[index]; - MS_EXCEPTION_IF_NULL(op_io_info); - para_type = op_io_info->param_type(); - return para_type; + return false; } -void RunOpConvertConstInputToAttr(const OpRunInfo &op_run_info, const std::shared_ptr &cnode) { - MS_EXCEPTION_IF_NULL(cnode); - auto op_inputs = op_run_info.op_inputs; - // get input names vector from attrs - auto primitive = AnfAlgo::GetCNodePrimitive(cnode); - MS_EXCEPTION_IF_NULL(primitive); - auto input_names_value = primitive->GetAttr(kAttrInputNames); - if (input_names_value == nullptr) { +void PlantTensorTupleToVector(const py::tuple &tuple_inputs, const PrimitivePtr &op_prim, + std::vector *input_tensor) { + MS_EXCEPTION_IF_NULL(op_prim); + MS_EXCEPTION_IF_NULL(input_tensor); + for (const auto &input_object : tuple_inputs) { + if (!py::isinstance(input_object)) { + MS_LOG(EXCEPTION) << "The input object is not a tensor!"; + } + auto tensor = py::cast(input_object); + MS_EXCEPTION_IF_NULL(tensor); + input_tensor->push_back(tensor); + } + op_prim->set_attr(kAttrDynInputSizes, MakeValue(std::vector{SizeToInt(tuple_inputs.size())})); +} + +void ConvertValueTupleToTensor(const py::object &input_object, std::vector *input_tensor) { + MS_EXCEPTION_IF_NULL(input_tensor); + ValuePtr input_value = parse::data_converter::PyDataToValue(input_object); + MS_EXCEPTION_IF_NULL(input_value); + if (!input_value->isa()) { + MS_LOG(EXCEPTION) << "The input object is not a value tuple!"; + } + auto value_tuple = input_value->cast(); + MS_EXCEPTION_IF_NULL(value_tuple); + tensor::TensorPtr tensor_ptr = nullptr; + tensor_ptr = opt::CreateTupleTensor(value_tuple); + MS_EXCEPTION_IF_NULL(tensor_ptr); + input_tensor->push_back(tensor_ptr); +} + +void ConvertPyObjectToTensor(const py::object &input_object, const PrimitivePtr &op_prim, + std::vector *input_tensor) { + MS_EXCEPTION_IF_NULL(op_prim); + MS_EXCEPTION_IF_NULL(input_tensor); + tensor::TensorPtr tensor_ptr = nullptr; + if (py::isinstance(input_object)) { + tensor_ptr = py::cast(input_object); + } else if (py::isinstance(input_object)) { + tensor_ptr = std::make_shared(py::cast(input_object), kFloat32); + } else if (py::isinstance(input_object)) { + tensor_ptr = std::make_shared(py::cast(input_object), nullptr); + } else if (py::isinstance(input_object)) { + tensor_ptr = std::make_shared(py::cast(input_object), nullptr); + } else if (py::isinstance(input_object)) { + tensor_ptr = std::make_shared(py::cast(input_object), nullptr); + } else if (py::isinstance(input_object)) { + auto tuple_inputs = py::cast(input_object); + if (py::isinstance(tuple_inputs[0])) { + PlantTensorTupleToVector(tuple_inputs, op_prim, input_tensor); + } else { + ConvertValueTupleToTensor(input_object, input_tensor); + } return; + } else { + MS_LOG(EXCEPTION) << "Run op inputs type is invalid!"; } - auto input_names_vec = GetValue>(input_names_value); - // convert const input to attr - size_t input_num = op_inputs.size(); - if (input_num != input_names_vec.size()) { - MS_LOG(EXCEPTION) << "input name number " << input_names_vec.size() << "is not equal to input value number " - << input_num; + MS_EXCEPTION_IF_NULL(tensor_ptr); + input_tensor->push_back(tensor_ptr); +} + +void ConvertInputPyobject(const OpRunInfo &op_run_info, const PrimitivePtr &op_prim, + std::vector *input_tensors, std::vector *tensors_mask) { + MS_EXCEPTION_IF_NULL(op_prim); + MS_EXCEPTION_IF_NULL(input_tensors); + MS_EXCEPTION_IF_NULL(tensors_mask); + if (op_run_info.op_inputs.size() != op_run_info.inputs_mask.size()) { + MS_LOG(EXCEPTION) << "Op input size " << op_run_info.op_inputs.size() << " should be equal to op input mask size " + << op_run_info.inputs_mask.size(); } + opt::ConstInputToAttrInfoRegister reg; + bool reg_exist = opt::ConstInputToAttrInfoRegistry::Instance().GetRegisterByOpName(op_run_info.op_name, ®); + size_t input_num = op_run_info.op_inputs.size(); + MS_LOG(INFO) << "py input size: " << input_num; for (size_t index = 0; index < input_num; ++index) { - // skip tensor - if (py::isinstance(op_inputs[index])) { - continue; - } - // convert to attr - auto para_type = FindOpInputParameterType(op_run_info.op_name, kernel::OpImplyType::kTBE, index); - if (!para_type.empty() && para_type == kAttrDynInput) { - auto tuple_inputs = py::cast(op_inputs[index]); - primitive->set_attr(kAttrDynInputSizes, MakeValue(std::vector{SizeToInt(tuple_inputs.size())})); + // convert const input to attr + if (reg_exist && + RunOpConvertConstInputToAttr(op_run_info.op_inputs[index], index, op_prim, reg.GetConstInputAttrInfo())) { continue; } - ValuePtr value = parse::data_converter::PyDataToValue(op_inputs[index]); - MS_EXCEPTION_IF_NULL(value); - auto input_name = input_names_vec[index]; - // set the input node as attr of the cnode, key is name of input node,value is input node's value - primitive->set_attr(input_name, value); + // convert const and tuple input to tensor + ConvertPyObjectToTensor(op_run_info.op_inputs[index], op_prim, input_tensors); + // make tensors, weight : 1, data : 0 + std::vector new_mask(input_tensors->size() - tensors_mask->size(), + py::cast(op_run_info.inputs_mask[index])); + tensors_mask->insert(tensors_mask->end(), new_mask.begin(), new_mask.end()); } } @@ -638,40 +697,6 @@ void SessionBasic::Summary(KernelGraph *graph) { summary_callback_(0, params_list); } -void SessionBasic::ToTensorPtr(const OpRunInfo &op_run_info, std::vector *inputs, - std::vector *tensor_mask) { - MS_EXCEPTION_IF_NULL(inputs); - MS_EXCEPTION_IF_NULL(tensor_mask); - if (op_run_info.op_inputs.size() != op_run_info.inputs_mask.size()) { - MS_LOG(EXCEPTION) << "Op input size " << op_run_info.op_inputs.size() << " should be equal to op input mask size " - << op_run_info.inputs_mask.size(); - } - size_t input_num = op_run_info.op_inputs.size(); - // get tensors from op_inputs - for (size_t i = 0; i < input_num; ++i) { - tensor::TensorPtr tensor_ptr = nullptr; - auto param_type = FindOpInputParameterType(op_run_info.op_name, kernel::OpImplyType::kTBE, i); - if (py::isinstance(op_run_info.op_inputs[i])) { - tensor_ptr = py::cast(op_run_info.op_inputs[i]); - } else if (!param_type.empty() && param_type == kAttrDynInput) { - auto tuple_inputs = py::cast(op_run_info.op_inputs[i]); - for (auto &&tuple_input : tuple_inputs) { - tensor_ptr = py::cast(tuple_input); - MS_EXCEPTION_IF_NULL(tensor_ptr); - inputs->push_back(tensor_ptr); - tensor_mask->push_back(py::cast(op_run_info.inputs_mask[i])); - } - continue; - } else if (op_run_info.op_name == kApplyMomentumOpName && py::isinstance(op_run_info.op_inputs[i])) { - tensor_ptr = std::make_shared(py::cast(op_run_info.op_inputs[i]), kFloat32); - } - if (tensor_ptr != nullptr) { - inputs->push_back(tensor_ptr); - tensor_mask->push_back(py::cast(op_run_info.inputs_mask[i])); - } - } -} - CNodePtr SessionBasic::ConstructOutput(const AnfNodePtrList &outputs, const std::shared_ptr &graph) { MS_EXCEPTION_IF_NULL(graph); std::vector output_args; @@ -724,30 +749,27 @@ void SessionBasic::CreateOutputNode(const CNodePtr &cnode, const std::shared_ptr MS_LOG(INFO) << "Finish!"; } -std::shared_ptr SessionBasic::ConstructSingleOpGraph(const OpRunInfo &op_run_info) { +std::shared_ptr SessionBasic::ConstructSingleOpGraph(const OpRunInfo &op_run_info, + std::vector *input_tensors) { + MS_EXCEPTION_IF_NULL(input_tensors); auto graph = std::make_shared(); std::vector inputs; - if (op_run_info.op_inputs.size() != op_run_info.inputs_mask.size()) { - MS_LOG(EXCEPTION) << "op_run_info inputs.size" << op_run_info.op_inputs.size() - << " should be equal to parameter_mask.size " << op_run_info.inputs_mask.size(); - } // set input[0] - if (op_run_info.py_primitive == nullptr) { - inputs.push_back(std::make_shared(std::make_shared(op_run_info.op_name))); - } else { - inputs.push_back(std::make_shared(op_run_info.py_primitive)); + PrimitivePtr op_prim = op_run_info.py_primitive; + if (op_prim == nullptr) { + op_prim = std::make_shared(op_run_info.op_name); } + inputs.push_back(std::make_shared(op_prim)); // set input parameter - std::vector input_tensors; std::vector tensors_mask; - ToTensorPtr(op_run_info, &input_tensors, &tensors_mask); - MS_LOG(INFO) << "Input tensor size" << input_tensors.size(); - if (input_tensors.size() != tensors_mask.size()) { - MS_LOG(EXCEPTION) << "Input tensors size " << input_tensors.size() << " should be equal to tensors mask size " + ConvertInputPyobject(op_run_info, op_prim, input_tensors, &tensors_mask); + MS_LOG(INFO) << "Input tensor size: " << input_tensors->size(); + if (input_tensors->size() != tensors_mask.size()) { + MS_LOG(EXCEPTION) << "Input tensors size " << input_tensors->size() << " should be equal to tensors mask size " << tensors_mask.size(); } - for (size_t i = 0; i < input_tensors.size(); ++i) { - auto parameter = ConstructRunOpParameter(graph, input_tensors[i], tensors_mask[i]); + for (size_t i = 0; i < input_tensors->size(); ++i) { + auto parameter = ConstructRunOpParameter(graph, input_tensors->at(i), tensors_mask[i]); inputs.push_back(parameter); graph->MutableInputs()->push_back(parameter); } @@ -756,8 +778,6 @@ std::shared_ptr SessionBasic::ConstructSingleOpGraph(const OpRunInf MS_EXCEPTION_IF_NULL(cnode); // set abstract,which include inferred shapes and types cnode->set_abstract(op_run_info.abstract); - // set const input to attr if value is not a tensor,such as scalar or tuple - RunOpConvertConstInputToAttr(op_run_info, cnode); // set execution order std::vector exe_order = {cnode}; graph->set_execution_order(exe_order); diff --git a/mindspore/ccsrc/session/session_basic.h b/mindspore/ccsrc/session/session_basic.h index f1872e375c..aa359c74d9 100755 --- a/mindspore/ccsrc/session/session_basic.h +++ b/mindspore/ccsrc/session/session_basic.h @@ -61,9 +61,11 @@ class SessionBasic { virtual void RunGraph(const GraphId &graph_id, const std::vector &inputs, VectorRef *outputs) = 0; - virtual void BuildOp(const OpRunInfo &, const GraphInfo &) {} + virtual void BuildOp(const OpRunInfo &, const GraphInfo &, std::vector *input_tensors) {} - virtual py::tuple RunOp(const OpRunInfo &, const GraphInfo &) { return py::tuple(); } + virtual py::tuple RunOp(const OpRunInfo &, const GraphInfo &, const std::vector &input_tensors) { + return py::tuple(); + } virtual void RegisterSummaryCallBackFunc(const CallBackFunc &callback); @@ -96,10 +98,8 @@ class SessionBasic { void CreateOutputNode(const CNodePtr &cnode, const std::shared_ptr &graph); CNodePtr ConstructOutput(const AnfNodePtrList &outputs, const std::shared_ptr &graph); // create a single run op graph - std::shared_ptr ConstructSingleOpGraph(const OpRunInfo &op_run_info); - // get tensors from op inputs - void ToTensorPtr(const OpRunInfo &op_run_info, std::vector *inputs, - std::vector *tensor_mask); + std::shared_ptr ConstructSingleOpGraph(const OpRunInfo &op_run_info, + std::vector *input_tensor); // trans BaseRef list to py::tuple BaseRef TransformBaseRefListToTuple(const BaseRef &base_ref); From a66b0c1e111903e27b6c45d5f221060346a6fe11 Mon Sep 17 00:00:00 2001 From: zhoufeng Date: Wed, 15 Apr 2020 18:58:36 +0800 Subject: [PATCH 265/367] fix tvm copy path --- cmake/package.cmake | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cmake/package.cmake b/cmake/package.cmake index e4f8fa7706..7c9c92b23c 100644 --- a/cmake/package.cmake +++ b/cmake/package.cmake @@ -165,11 +165,11 @@ if (ENABLE_GPU) DESTINATION ${INSTALL_PY_DIR}/../ COMPONENT mindspore ) - if (EXISTS ${CMAKE_SOURCE_DIR}/mindspore/incubator-tvm) + if (EXISTS ${CMAKE_BINARY_DIR}/incubator-tvm) install( DIRECTORY - ${CMAKE_SOURCE_DIR}/mindspore/incubator-tvm/topi/python/topi - ${CMAKE_SOURCE_DIR}/mindspore/incubator-tvm/python/tvm + ${CMAKE_BINARY_DIR}/incubator-tvm/topi/python/topi + ${CMAKE_BINARY_DIR}/incubator-tvm/python/tvm DESTINATION ${INSTALL_PY_DIR}/../_akg COMPONENT mindspore ) From b08a90ab9b964883f0ab2304cabeb74e3ca80f30 Mon Sep 17 00:00:00 2001 From: Yanjun Peng Date: Wed, 15 Apr 2020 20:18:03 +0800 Subject: [PATCH 266/367] add tdt transfer data limit notice --- mindspore/dataset/engine/datasets.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/mindspore/dataset/engine/datasets.py b/mindspore/dataset/engine/datasets.py index f2d0f59f82..517ec6c385 100644 --- a/mindspore/dataset/engine/datasets.py +++ b/mindspore/dataset/engine/datasets.py @@ -548,12 +548,16 @@ class Dataset: def device_que(self, prefetch_size=None): """ - Returns a transferredDataset that transfer data through tdt. + Returns a transferredDataset that transfer data through device. Args: prefetch_size (int, optional): prefetch number of records ahead of the user's request (default=None). + Note: + If device is Ascend, features of data will be transferred one by one. The limitation + of data transferation per time is 256M. + Return: TransferDataset, dataset for transferring. """ @@ -566,6 +570,10 @@ class Dataset: Args: num_batch (int, optional): limit the number of batch to be sent to device (default=None). + Note: + If device is Ascend, features of data will be transferred one by one. The limitation + of data transferation per time is 256M. + Returns: TransferDataset, dataset for transferring. From b77f41d6589043cb7ff6f23f69198db1ecc40c52 Mon Sep 17 00:00:00 2001 From: chenzomi Date: Wed, 15 Apr 2020 21:37:34 +0800 Subject: [PATCH 267/367] clear the warmming scan by package --- .../ccsrc/device/gpu/gpu_memory_manager.cc | 2 +- .../ccsrc/device/gpu/gpu_stream_assign.cc | 2 +- .../ccsrc/kernel/gpu/nn/dropout_gpu_kernel.cc | 1 - .../gpu/quant/batchnorm_fold2_gpu_kernel.cc | 1 - .../gpu/quant/batchnorm_fold2_gpu_kernel.h | 1 - .../quant/batchnorm_fold2_grad_gpu_kernel.cc | 1 - .../gpu/quant/batchnorm_fold_gpu_kernel.cc | 1 - .../gpu/quant/correction_mul_gpu_kernel.h | 1 - .../quant/correction_mul_grad_gpu_kernel.cc | 1 - .../quant/correction_mul_grad_gpu_kernel.h | 1 - .../fake_quant_per_channel_gpu_kernel.cc | 62 +++++++++++-------- .../quant/fake_quant_per_channel_gpu_kernel.h | 5 ++ 12 files changed, 42 insertions(+), 37 deletions(-) diff --git a/mindspore/ccsrc/device/gpu/gpu_memory_manager.cc b/mindspore/ccsrc/device/gpu/gpu_memory_manager.cc index 7d042264b6..8bb65963d8 100644 --- a/mindspore/ccsrc/device/gpu/gpu_memory_manager.cc +++ b/mindspore/ccsrc/device/gpu/gpu_memory_manager.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/ccsrc/device/gpu/gpu_stream_assign.cc b/mindspore/ccsrc/device/gpu/gpu_stream_assign.cc index 39d5ca3fe6..08a19aa469 100644 --- a/mindspore/ccsrc/device/gpu/gpu_stream_assign.cc +++ b/mindspore/ccsrc/device/gpu/gpu_stream_assign.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/ccsrc/kernel/gpu/nn/dropout_gpu_kernel.cc b/mindspore/ccsrc/kernel/gpu/nn/dropout_gpu_kernel.cc index eeec8365da..937f38137f 100644 --- a/mindspore/ccsrc/kernel/gpu/nn/dropout_gpu_kernel.cc +++ b/mindspore/ccsrc/kernel/gpu/nn/dropout_gpu_kernel.cc @@ -19,7 +19,6 @@ namespace mindspore { namespace kernel { - DropoutGpuFwdKernel::DropoutGpuFwdKernel() : cudnn_handle_(nullptr), is_null_input_(false), diff --git a/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold2_gpu_kernel.cc b/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold2_gpu_kernel.cc index a95c1b78dd..af95767407 100644 --- a/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold2_gpu_kernel.cc +++ b/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold2_gpu_kernel.cc @@ -18,7 +18,6 @@ namespace mindspore { namespace kernel { - MS_REG_GPU_KERNEL_ONE(BatchNormFold2, KernelAttr() .AddInputAttr(kNumberTypeFloat32) diff --git a/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold2_gpu_kernel.h b/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold2_gpu_kernel.h index 3e246f18f6..beeeb12a9a 100644 --- a/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold2_gpu_kernel.h +++ b/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold2_gpu_kernel.h @@ -132,7 +132,6 @@ class BatchNormFold2GpuKernel : public GpuKernel { std::vector output_size_list_; std::vector workspace_size_list_; }; - } // namespace kernel } // namespace mindspore diff --git a/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold2_grad_gpu_kernel.cc b/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold2_grad_gpu_kernel.cc index d5932f1984..93862aeedd 100644 --- a/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold2_grad_gpu_kernel.cc +++ b/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold2_grad_gpu_kernel.cc @@ -18,7 +18,6 @@ namespace mindspore { namespace kernel { - MS_REG_GPU_KERNEL_ONE(BatchNormFold2Grad, KernelAttr() .AddInputAttr(kNumberTypeFloat32) diff --git a/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold_gpu_kernel.cc b/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold_gpu_kernel.cc index b5fbfe4927..4f968a0fa3 100644 --- a/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold_gpu_kernel.cc +++ b/mindspore/ccsrc/kernel/gpu/quant/batchnorm_fold_gpu_kernel.cc @@ -18,7 +18,6 @@ namespace mindspore { namespace kernel { - MS_REG_GPU_KERNEL_ONE(BatchNormFold, KernelAttr() .AddInputAttr(kNumberTypeFloat32) diff --git a/mindspore/ccsrc/kernel/gpu/quant/correction_mul_gpu_kernel.h b/mindspore/ccsrc/kernel/gpu/quant/correction_mul_gpu_kernel.h index 7608ae5d3c..eeab872ab3 100644 --- a/mindspore/ccsrc/kernel/gpu/quant/correction_mul_gpu_kernel.h +++ b/mindspore/ccsrc/kernel/gpu/quant/correction_mul_gpu_kernel.h @@ -54,7 +54,6 @@ class CorrectionMulGpuKernel : public GpuKernel { } auto input_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); - if (input_shape.size() != 4) { MS_LOG(ERROR) << "CorrectionMulGpuKernel input shape needs (N,C,H,W)."; return false; diff --git a/mindspore/ccsrc/kernel/gpu/quant/correction_mul_grad_gpu_kernel.cc b/mindspore/ccsrc/kernel/gpu/quant/correction_mul_grad_gpu_kernel.cc index 211c515e02..28b5d56e68 100644 --- a/mindspore/ccsrc/kernel/gpu/quant/correction_mul_grad_gpu_kernel.cc +++ b/mindspore/ccsrc/kernel/gpu/quant/correction_mul_grad_gpu_kernel.cc @@ -19,7 +19,6 @@ namespace mindspore { namespace kernel { - MS_REG_GPU_KERNEL_ONE(CorrectionMulGrad, KernelAttr() .AddInputAttr(kNumberTypeFloat32) diff --git a/mindspore/ccsrc/kernel/gpu/quant/correction_mul_grad_gpu_kernel.h b/mindspore/ccsrc/kernel/gpu/quant/correction_mul_grad_gpu_kernel.h index 2439826cc3..29aeb3be13 100644 --- a/mindspore/ccsrc/kernel/gpu/quant/correction_mul_grad_gpu_kernel.h +++ b/mindspore/ccsrc/kernel/gpu/quant/correction_mul_grad_gpu_kernel.h @@ -61,7 +61,6 @@ class CorrectionMulGradGpuKernel : public GpuKernel { } auto input_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); - if (input_shape.size() != 4) { MS_LOG(ERROR) << "CorrectionMulGradGpuKernel input shape needs (N,C,H,W)."; return false; diff --git a/mindspore/ccsrc/kernel/gpu/quant/fake_quant_per_channel_gpu_kernel.cc b/mindspore/ccsrc/kernel/gpu/quant/fake_quant_per_channel_gpu_kernel.cc index 302ef8d99f..1da9f457a1 100644 --- a/mindspore/ccsrc/kernel/gpu/quant/fake_quant_per_channel_gpu_kernel.cc +++ b/mindspore/ccsrc/kernel/gpu/quant/fake_quant_per_channel_gpu_kernel.cc @@ -114,6 +114,36 @@ void FakeQuantPerChannelGpuKernel::InitSizeLists() { workspace_size_list_.push_back(workspace_size_); } +void FakeQuantPerChannelGpuKernel::CalFakeQuantizeForTraining(float *input, float *output, float *input_min, + float *input_max, float *d_nudge_min, float *d_nudge_max, + float *d_scale, uintptr_t stream_ptr) { + // calculate the input min and max according by the parameter ema and ema_decay. + CalMinMaxPerChannel(input, input_min, input_max, input_size_ / sizeof(float), channel_out_, ema_decay_, ema_, + reinterpret_cast(stream_ptr)); + // control flow for quant_delay + if (global_step_ >= quant_delay_) { + // real launch + CalNudgePerChannel(input_min, input_max, quant_min_, quant_max_, d_nudge_min, d_nudge_max, d_scale, channel_out_, + reinterpret_cast(stream_ptr)); + CalFakeQuantizePerChannel(input, output, input_size_ / sizeof(float), channel_out_, d_nudge_min, d_nudge_max, + d_scale, symmetric_, reinterpret_cast(stream_ptr)); + } else { + CHECK_CUDA_RET_WITH_ERROR(cudaMemcpy(output, input, input_size_, cudaMemcpyDeviceToDevice), + "Copy gpu memory failed."); + } + global_step_++; +} + +void FakeQuantPerChannelGpuKernel::CalFakeQuantizeForInfer(float *input, float *output, float *input_min, + float *input_max, float *d_nudge_min, float *d_nudge_max, + float *d_scale, uintptr_t stream_ptr) { + // real launch + CalNudgePerChannel(input_min, input_max, quant_min_, quant_max_, d_nudge_min, d_nudge_max, d_scale, channel_out_, + reinterpret_cast(stream_ptr)); + CalFakeQuantizePerChannel(input, output, input_size_ / sizeof(float), channel_out_, d_nudge_min, d_nudge_max, d_scale, + symmetric_, reinterpret_cast(stream_ptr)); +} + bool FakeQuantPerChannelGpuKernel::Launch(const std::vector &inputs, const std::vector &workspace, const std::vector &outputs, uintptr_t stream_ptr) { @@ -126,11 +156,8 @@ bool FakeQuantPerChannelGpuKernel::Launch(const std::vector &inputs, if (input == nullptr) { MS_LOG(EXCEPTION) << "FakeQuantPerChannelGpuKernel input is null."; } - if (input_min == nullptr) { - MS_LOG(EXCEPTION) << "FakeQuantPerChannelGpuKernel input min is null."; - } - if (input_max == nullptr) { - MS_LOG(EXCEPTION) << "FakeQuantPerChannelGpuKernel input max is null."; + if (input_min == nullptr || input_max == nullptr) { + MS_LOG(EXCEPTION) << "FakeQuantPerChannelGpuKernel input min or max is null."; } // Allocate space for device copies @@ -143,30 +170,11 @@ bool FakeQuantPerChannelGpuKernel::Launch(const std::vector &inputs, "Malloc gpu memory failed"); CHECK_CUDA_RET_WITH_ERROR(cudaMalloc(reinterpret_cast(&d_nudge_max), sizeof(float) * channel_out_), "Malloc gpu memory failed"); - int total_size = input_size_ / sizeof(float); - bool symmetric = false; + if (training_) { - // calculate the input min and max according by the parameter ema and ema_decay. - CalMinMaxPerChannel(input, input_min, input_max, total_size, channel_out_, ema_decay_, ema_, - reinterpret_cast(stream_ptr)); - // control flow for quant_delay - if (global_step_ >= quant_delay_) { - // real launch - CalNudgePerChannel(input_min, input_max, quant_min_, quant_max_, d_nudge_min, d_nudge_max, d_scale, channel_out_, - reinterpret_cast(stream_ptr)); - CalFakeQuantizePerChannel(input, output, total_size, channel_out_, d_nudge_min, d_nudge_max, d_scale, symmetric, - reinterpret_cast(stream_ptr)); - } else { - CHECK_CUDA_RET_WITH_ERROR(cudaMemcpy(output, input, input_size_, cudaMemcpyDeviceToDevice), - "Copy gpu memory failed."); - } - global_step_++; + CalFakeQuantizeForTraining(input, output, input_min, input_max, d_nudge_min, d_nudge_max, d_scale, stream_ptr); } else { - // real launch - CalNudgePerChannel(input_min, input_max, quant_min_, quant_max_, d_nudge_min, d_nudge_max, d_scale, channel_out_, - reinterpret_cast(stream_ptr)); - CalFakeQuantizePerChannel(input, output, total_size, channel_out_, d_nudge_min, d_nudge_max, d_scale, symmetric, - reinterpret_cast(stream_ptr)); + CalFakeQuantizeForInfer(input, output, input_min, input_max, d_nudge_min, d_nudge_max, d_scale, stream_ptr); } // Cleanup diff --git a/mindspore/ccsrc/kernel/gpu/quant/fake_quant_per_channel_gpu_kernel.h b/mindspore/ccsrc/kernel/gpu/quant/fake_quant_per_channel_gpu_kernel.h index faf8684fca..8a1bb7293a 100755 --- a/mindspore/ccsrc/kernel/gpu/quant/fake_quant_per_channel_gpu_kernel.h +++ b/mindspore/ccsrc/kernel/gpu/quant/fake_quant_per_channel_gpu_kernel.h @@ -39,6 +39,11 @@ class FakeQuantPerChannelGpuKernel : public GpuKernel { void InitSizeLists() override; private: + void CalFakeQuantizeForTraining(float *input, float *output, float *input_min, float *input_max, float *d_nudge_min, + float *d_nudge_max, float *d_scale, uintptr_t stream_ptr); + void CalFakeQuantizeForInfer(float *input, float *output, float *input_min, float *input_max, float *d_nudge_min, + float *d_nudge_max, float *d_scale, uintptr_t stream_ptr); + size_t input_size_; size_t min_size_; size_t max_size_; From b6493af6b7531f3d460d581e27415dd67b0bd6d3 Mon Sep 17 00:00:00 2001 From: chenjianping Date: Wed, 15 Apr 2020 14:18:50 +0000 Subject: [PATCH 268/367] fix gpu init fail --- cmake/options.cmake | 1 + mindspore/ccsrc/pipeline/init.cc | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/cmake/options.cmake b/cmake/options.cmake index 2f44e9a17c..3677418a98 100644 --- a/cmake/options.cmake +++ b/cmake/options.cmake @@ -64,6 +64,7 @@ endif() if (ENABLE_GPU) set(ENABLE_GPUQUE ON) + add_compile_definitions(ENABLE_GPU_COLLECTIVE) endif() if (ENABLE_GE) diff --git a/mindspore/ccsrc/pipeline/init.cc b/mindspore/ccsrc/pipeline/init.cc index 7c77949fda..b709199c87 100644 --- a/mindspore/ccsrc/pipeline/init.cc +++ b/mindspore/ccsrc/pipeline/init.cc @@ -29,7 +29,7 @@ #include "parallel/context.h" #include "parallel/device_manager.h" #include "parallel/costmodel_context.h" -#ifdef ENABLE_GPUQUE +#ifdef ENABLE_GPU_COLLECTIVE #include "device/gpu/distribution/collective_init.h" #else #include "device/gpu/distribution/collective_fake_init.h" @@ -300,7 +300,7 @@ PYBIND11_MODULE(_c_expression, m) { (void)py::class_>(m, "Oplib") .def(py::init()) .def("reg_op", &OpLib::RegOp, "Register op info."); -#ifdef ENABLE_GPUQUE +#ifdef ENABLE_GPU_COLLECTIVE (void)m.def("init_gpu_collective", &mindspore::device::gpu::CollectiveInitializer::InitCollective, "Init gpu collective communication mode."); (void)m.def("finalize_gpu_collective", &mindspore::device::gpu::CollectiveInitializer::FinalizeCollective, From bbf5d4b4998896ef2d24128d5fa41724ae02d194 Mon Sep 17 00:00:00 2001 From: seatea Date: Wed, 15 Apr 2020 22:27:12 +0800 Subject: [PATCH 269/367] Fix the error in the example of `RandomChoiceWithMask` operation. --- mindspore/ops/operations/random_ops.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mindspore/ops/operations/random_ops.py b/mindspore/ops/operations/random_ops.py index c8d7c75768..18c2212b3d 100644 --- a/mindspore/ops/operations/random_ops.py +++ b/mindspore/ops/operations/random_ops.py @@ -45,7 +45,7 @@ class RandomChoiceWithMask(PrimitiveWithInfer): Examples: >>> rnd_choice_mask = P.RandomChoiceWithMask() - >>> input_x = Tensor(np.ones(shape=[240000, 4]), mindspore.bool_) + >>> input_x = Tensor(np.ones(shape=[240000, 4]).astype(np.bool)) >>> output_y, output_mask = rnd_choice_mask(input_x) """ From 108ac9cda7d51882c67b5ced6a3bfa4a2a99b0a0 Mon Sep 17 00:00:00 2001 From: zhoufeng Date: Thu, 16 Apr 2020 09:22:47 +0800 Subject: [PATCH 270/367] fix package path --- cmake/package.cmake | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/cmake/package.cmake b/cmake/package.cmake index 7c9c92b23c..d04efca085 100644 --- a/cmake/package.cmake +++ b/cmake/package.cmake @@ -90,19 +90,23 @@ if (ENABLE_CPU) endif () if (ENABLE_GPU) - if (ENABLE_MPI) - install( - TARGETS _ms_mpi gpu_collective - DESTINATION ${INSTALL_LIB_DIR} - COMPONENT mindspore - ) - endif () - - install( - TARGETS gpu_queue - DESTINATION ${INSTALL_LIB_DIR} - COMPONENT mindspore - ) + if (ENABLE_MPI) + install( + TARGETS _ms_mpi + DESTINATION ${INSTALL_BASE_DIR} + COMPONENT mindspore + ) + install( + TARGETS gpu_collective + DESTINATION ${INSTALL_LIB_DIR} + COMPONENT mindspore + ) + endif () + install( + TARGETS gpu_queue + DESTINATION ${INSTALL_LIB_DIR} + COMPONENT mindspore + ) endif () if (NOT ENABLE_GE) From 8e8f56cbdc67fa0559f7205c3eafd92bc0aa7a24 Mon Sep 17 00:00:00 2001 From: simson <526422051@qq.com> Date: Thu, 16 Apr 2020 10:14:09 +0800 Subject: [PATCH 271/367] only link to glog when enable GE --- mindspore/ccsrc/CMakeLists.txt | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/mindspore/ccsrc/CMakeLists.txt b/mindspore/ccsrc/CMakeLists.txt index 500972b0d6..c99809f90f 100644 --- a/mindspore/ccsrc/CMakeLists.txt +++ b/mindspore/ccsrc/CMakeLists.txt @@ -381,17 +381,19 @@ if (WIN32) securec proto_input mindspore::flatbuffers - mindspore::glog ) else() target_link_libraries(_c_expression PRIVATE mindspore::pybind11_module mindspore mindspore_gvar - mindspore::glog ) endif() +if(USE_GLOG) + target_link_libraries(_c_expression PRIVATE mindspore::glog) +endif() + if(ENABLE_GPU) execute_process(COMMAND bash ${CMAKE_SOURCE_DIR}/third_party/apply_patches.sh ${CMAKE_BINARY_DIR} From e017fd891679af3fa17be9a60a27dcc11049f2c8 Mon Sep 17 00:00:00 2001 From: chenfei Date: Thu, 16 Apr 2020 10:29:10 +0800 Subject: [PATCH 272/367] share mem of paramter between child graph --- mindspore/ccsrc/pre_activate/common/helper.cc | 4 ---- mindspore/ccsrc/session/ascend_session.cc | 19 +++++++++---------- 2 files changed, 9 insertions(+), 14 deletions(-) diff --git a/mindspore/ccsrc/pre_activate/common/helper.cc b/mindspore/ccsrc/pre_activate/common/helper.cc index 3b7d95a6f2..3f157d6a64 100644 --- a/mindspore/ccsrc/pre_activate/common/helper.cc +++ b/mindspore/ccsrc/pre_activate/common/helper.cc @@ -337,10 +337,6 @@ bool IsNopNode(const AnfNodePtr &node) { if (nop_nodes.find(AnfAlgo::GetCNodeName(cnode)) == nop_nodes.end()) { return false; } - if (cnode->inputs().size() != 2) { - MS_LOG(EXCEPTION) << "Nop node(" + cnode->DebugString() + ") should have only 1 input, but it has " - << cnode->inputs().size() - 1 << " inputs."; - } return true; } diff --git a/mindspore/ccsrc/session/ascend_session.cc b/mindspore/ccsrc/session/ascend_session.cc index 4455d33ee9..b0855feb49 100755 --- a/mindspore/ccsrc/session/ascend_session.cc +++ b/mindspore/ccsrc/session/ascend_session.cc @@ -709,18 +709,17 @@ void AscendSession::SetChildGraphParameter(const AnfNodePtr &front_anf, const An << "of graph " << AnfAlgo::GetGraphId(backend_arg.get()); return; } - } - // if a parameter is a weight and not linked to any executable node,device type will be kTypeUnknown,set it's device - // type same to arg - if (AnfAlgo::GetOutputDeviceDataType(backend_parameter, 0) == kTypeUnknown) { - AnfAlgo::SetSelectKernelBuildInfo(AnfAlgo::GetSelectKernelBuildInfo(backend_arg), backend_parameter.get()); + // if a parameter is a weight and not linked to any executable node,device type will be kTypeUnknown,set it's device + // type same to arg + if (AnfAlgo::GetOutputDeviceDataType(backend_parameter, 0) == kTypeUnknown) { + AnfAlgo::SetSelectKernelBuildInfo(AnfAlgo::GetSelectKernelBuildInfo(backend_arg), backend_parameter.get()); + } + // if front anf is a parameter,we can assign the value back,because backend_parameter won't be change in it's graph + // unless it's a weight.If backend_parameter is a weight,we should assign the value back. + AnfAlgo::SetOutputAddr(AnfAlgo::GetMutableOutputAddr(backend_arg, 0), 0, backend_parameter.get()); + return; } InsertAssignToGraph(from_graph_id, backend_arg, backend_parameter); - // if front anf is a parameter,we can assign the value back,because backend_parameter won't be change in it's graph - // unless it's a weigth.If backend_parameter is a weight,we do should assign the value back - if (backend_arg->isa() && !to_graph->execution_order().empty()) { - InsertAssignToGraph(to_graph_id, backend_parameter, backend_arg); - } MS_LOG(INFO) << "Finish!"; } From 4cd237eee4495ad838a312164d0d4b3e43be9232 Mon Sep 17 00:00:00 2001 From: ZPaC Date: Thu, 16 Apr 2020 09:56:43 +0800 Subject: [PATCH 273/367] Add GPU NCCL ci test cases. --- tests/st/nccl/test_nccl_all.py | 44 ++++++++++++++++++++ tests/st/nccl/test_nccl_all_reduce_op.py | 2 +- tests/st/nccl/test_nccl_lenet.py | 3 +- tests/st/nccl/test_nccl_reduce_scatter_op.py | 2 - 4 files changed, 47 insertions(+), 4 deletions(-) create mode 100644 tests/st/nccl/test_nccl_all.py diff --git a/tests/st/nccl/test_nccl_all.py b/tests/st/nccl/test_nccl_all.py new file mode 100644 index 0000000000..99494bb741 --- /dev/null +++ b/tests/st/nccl/test_nccl_all.py @@ -0,0 +1,44 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +import os +import pytest + +@pytest.mark.level0 +@pytest.mark.platform_x86_gpu_training +@pytest.mark.env_single +def test_nccl_lenet(): + return_code = os.system("mpirun -n 8 pytest -s test_nccl_lenet.py") + assert(return_code == 0) + +@pytest.mark.level0 +@pytest.mark.platform_x86_gpu_training +@pytest.mark.env_single +def test_nccl_all_reduce_op(): + return_code = os.system("mpirun -n 8 pytest -s test_nccl_all_reduce_op.py") + assert(return_code == 0) + +@pytest.mark.level0 +@pytest.mark.platform_x86_gpu_training +@pytest.mark.env_single +def test_nccl_all_gather_op(): + return_code = os.system("mpirun -n 8 pytest -s test_nccl_all_gather_op.py") + assert(return_code == 0) + +@pytest.mark.level0 +@pytest.mark.platform_x86_gpu_training +@pytest.mark.env_single +def test_nccl_reduce_scatter_op(): + return_code = os.system("mpirun -n 8 pytest -s test_nccl_reduce_scatter_op.py") + assert(return_code == 0) diff --git a/tests/st/nccl/test_nccl_all_reduce_op.py b/tests/st/nccl/test_nccl_all_reduce_op.py index 3ba8b219e4..7c2e579463 100644 --- a/tests/st/nccl/test_nccl_all_reduce_op.py +++ b/tests/st/nccl/test_nccl_all_reduce_op.py @@ -20,7 +20,7 @@ import mindspore.context as context from mindspore.common.initializer import initializer from mindspore.common.parameter import Parameter from mindspore.communication.management import init, NCCL_WORLD_COMM_GROUP, get_rank, get_group_size -context.set_context(mode=context.GRAPH_MODE, device_target='GPU') +context.set_context(mode=context.GRAPH_MODE, device_target='GPU', enable_dynamic_memory=False) init('nccl') rank = get_rank() diff --git a/tests/st/nccl/test_nccl_lenet.py b/tests/st/nccl/test_nccl_lenet.py index 5642603d42..2aebc5da50 100644 --- a/tests/st/nccl/test_nccl_lenet.py +++ b/tests/st/nccl/test_nccl_lenet.py @@ -27,7 +27,7 @@ context.set_context(mode=context.GRAPH_MODE, device_target="GPU") init('nccl') epoch = 2 -total = 50000 +total = 5000 batch_size = 32 mini_batch = total // batch_size @@ -94,3 +94,4 @@ def test_lenet_nccl(): with open("ms_loss.txt", "w") as fo2: fo2.write("loss:") fo2.write(str(losses[-5:])) + assert(losses[-1] < 0.01) diff --git a/tests/st/nccl/test_nccl_reduce_scatter_op.py b/tests/st/nccl/test_nccl_reduce_scatter_op.py index af22c7690f..32c1f31788 100644 --- a/tests/st/nccl/test_nccl_reduce_scatter_op.py +++ b/tests/st/nccl/test_nccl_reduce_scatter_op.py @@ -62,8 +62,6 @@ def test_ReduceScatter(): expect1 = np.ones([1, 1, 3, 3]).astype(np.float32) * 0.01 * size diff1 = output[1].asnumpy() - expect1 error1 = np.ones(shape=expect1.shape) * 1.0e-5 - print(expect1) - print(output[1]) assert np.all(diff1 < error1) assert (output[1].shape() == expect1.shape) From b8a91215974bd7590028c667d982751ede344604 Mon Sep 17 00:00:00 2001 From: ZPaC Date: Wed, 15 Apr 2020 11:19:19 +0800 Subject: [PATCH 274/367] Add GPU send and recv controlling kernels. --- mindspore/ccsrc/device/gpu/cuda_driver.cc | 2 +- .../ccsrc/device/gpu/gpu_stream_assign.cc | 24 ++++--- .../ccsrc/device/gpu/gpu_stream_assign.h | 4 +- .../kernel/gpu/control/recv_gpu_kernel.cc | 23 +++++++ .../kernel/gpu/control/recv_gpu_kernel.h | 66 +++++++++++++++++++ .../kernel/gpu/control/send_gpu_kernel.cc | 23 +++++++ .../kernel/gpu/control/send_gpu_kernel.h | 66 +++++++++++++++++++ .../ccsrc/kernel/gpu/nccl/nccl_gpu_kernel.h | 6 ++ 8 files changed, 201 insertions(+), 13 deletions(-) create mode 100644 mindspore/ccsrc/kernel/gpu/control/recv_gpu_kernel.cc create mode 100644 mindspore/ccsrc/kernel/gpu/control/recv_gpu_kernel.h create mode 100644 mindspore/ccsrc/kernel/gpu/control/send_gpu_kernel.cc create mode 100644 mindspore/ccsrc/kernel/gpu/control/send_gpu_kernel.h diff --git a/mindspore/ccsrc/device/gpu/cuda_driver.cc b/mindspore/ccsrc/device/gpu/cuda_driver.cc index 3693157d2b..3b265a4d5e 100644 --- a/mindspore/ccsrc/device/gpu/cuda_driver.cc +++ b/mindspore/ccsrc/device/gpu/cuda_driver.cc @@ -96,7 +96,7 @@ size_t CudaDriver::free_mem_size() { } bool CudaDriver::CreateStream(DeviceStream *stream) { - auto ret = cudaStreamCreate(reinterpret_cast(stream)); + auto ret = cudaStreamCreateWithFlags(reinterpret_cast(stream), cudaStreamNonBlocking); if (ret != cudaSuccess) { MS_LOG(ERROR) << "cudaStreamCreate failed, ret[" << static_cast(ret) << "], " << cudaGetErrorString(ret); return false; diff --git a/mindspore/ccsrc/device/gpu/gpu_stream_assign.cc b/mindspore/ccsrc/device/gpu/gpu_stream_assign.cc index 08a19aa469..2550b543ec 100644 --- a/mindspore/ccsrc/device/gpu/gpu_stream_assign.cc +++ b/mindspore/ccsrc/device/gpu/gpu_stream_assign.cc @@ -28,21 +28,25 @@ namespace device { namespace gpu { void AssignGpuStream(const std::shared_ptr &kernel_graph) { MS_EXCEPTION_IF_NULL(kernel_graph); - std::vector allreduce_cnodes; + std::vector allreduce_kernels; auto execution_kernels = kernel_graph->execution_order(); - for (auto kernel : execution_kernels) { - std::string kernel_name = AnfAlgo::GetCNodeName(kernel); + for (auto kernel_node : execution_kernels) { + std::string kernel_name = AnfAlgo::GetCNodeName(kernel_node); if (kernel_name == kAllReduceOpName) { - allreduce_cnodes.emplace_back(kernel); + allreduce_kernels.emplace_back(kernel_node); + } else { + DeviceStream compute_stream = GPUDeviceManager::GetInstance().default_stream(); + AnfAlgo::SetNodeAttr("stream_id", MakeValue(reinterpret_cast(compute_stream)), kernel_node); } } - if (allreduce_cnodes.size() > 1) { + if (allreduce_kernels.size() > 1) { DeviceStream comm_stream = nullptr; GPUDeviceManager::GetInstance().CreateStream(&comm_stream); - std::transform(allreduce_cnodes.begin(), allreduce_cnodes.end(), allreduce_cnodes.begin(), [&](CNodePtr node) { - AnfAlgo::SetNodeAttr("stream_id", MakeValue(reinterpret_cast(comm_stream)), node); - return node; - }); + std::transform( + allreduce_kernels.begin(), allreduce_kernels.end(), allreduce_kernels.begin(), [&](CNodePtr allreduce_kernel) { + AnfAlgo::SetNodeAttr("stream_id", MakeValue(reinterpret_cast(comm_stream)), allreduce_kernel); + return allreduce_kernel; + }); std::vector send_recv_pairs; FindAllReduceStreamSwitchPos(kernel_graph, &send_recv_pairs); @@ -137,7 +141,7 @@ void InsertStreamSwitchNode(const std::shared_ptr &kernel_ } // Step 3: insert stream switch CNodes into execution kernel list. auto execution_kernels = kernel_graph->execution_order(); - for (auto node = ordered_stream_switch_nodes.begin(); node != ordered_stream_switch_nodes.end(); node++) { + for (auto node = ordered_stream_switch_nodes.rbegin(); node != ordered_stream_switch_nodes.rend(); node++) { execution_kernels.insert(execution_kernels.begin() + node->offset, node->cnode); } kernel_graph->set_execution_order(execution_kernels); diff --git a/mindspore/ccsrc/device/gpu/gpu_stream_assign.h b/mindspore/ccsrc/device/gpu/gpu_stream_assign.h index c7d2fe40e2..e3d98d68da 100644 --- a/mindspore/ccsrc/device/gpu/gpu_stream_assign.h +++ b/mindspore/ccsrc/device/gpu/gpu_stream_assign.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -70,4 +70,4 @@ CNodePtr CreateStreamSwitchNode(const std::shared_ptr &ker } // namespace device } // namespace mindspore -#endif +#endif // MINDSPORE_CCSRC_DEVICE_GPU_GPU_STREAM_ASSIGN_H_ diff --git a/mindspore/ccsrc/kernel/gpu/control/recv_gpu_kernel.cc b/mindspore/ccsrc/kernel/gpu/control/recv_gpu_kernel.cc new file mode 100644 index 0000000000..5468aa6500 --- /dev/null +++ b/mindspore/ccsrc/kernel/gpu/control/recv_gpu_kernel.cc @@ -0,0 +1,23 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "kernel/gpu/control/recv_gpu_kernel.h" + +namespace mindspore { +namespace kernel { +MS_REG_GPU_KERNEL_REGULAR(Recv, KernelAttr(), RecvGpuKernel) +} // namespace kernel +} // namespace mindspore diff --git a/mindspore/ccsrc/kernel/gpu/control/recv_gpu_kernel.h b/mindspore/ccsrc/kernel/gpu/control/recv_gpu_kernel.h new file mode 100644 index 0000000000..206eac5bd9 --- /dev/null +++ b/mindspore/ccsrc/kernel/gpu/control/recv_gpu_kernel.h @@ -0,0 +1,66 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CONTROL_RECV_GPU_KERNEL_H_ +#define MINDSPORE_CCSRC_KERNEL_GPU_CONTROL_RECV_GPU_KERNEL_H_ + +#include +#include "kernel/gpu/gpu_kernel.h" +#include "kernel/gpu/gpu_kernel_factory.h" + +namespace mindspore { +namespace kernel { +class RecvGpuKernel : public GpuKernel { + public: + RecvGpuKernel() {} + ~RecvGpuKernel() override = default; + + const std::vector &GetInputSizeList() const override { return input_size_list_; } + const std::vector &GetOutputSizeList() const override { return output_size_list_; } + const std::vector &GetWorkspaceSizeList() const override { return workspace_size_list_; } + + bool Launch(const std::vector &, const std::vector &, const std::vector &, + uintptr_t) override { + CHECK_CUDA_RET_WITH_EXCEPT(cudaStreamWaitEvent(wait_stream_, wait_event_, 0), "Waiting cuda event failed."); + return true; + } + bool Init(const CNodePtr &kernel_node) override { + wait_stream_ = reinterpret_cast(GetAttr(kernel_node, "wait_event_stream")); + wait_event_ = reinterpret_cast(GetAttr(kernel_node, "wait_event")); + InitSizeLists(); + return true; + } + + protected: + void InitSizeLists() override { + input_size_list_.clear(); + output_size_list_.clear(); + workspace_size_list_.clear(); + return; + } + + private: + cudaStream_t wait_stream_{nullptr}; + cudaEvent_t wait_event_{nullptr}; + + std::vector input_size_list_; + std::vector output_size_list_; + std::vector workspace_size_list_; +}; +} // namespace kernel +} // namespace mindspore + +#endif // MINDSPORE_CCSRC_KERNEL_GPU_CONTROL_RECV_GPU_KERNEL_H_ diff --git a/mindspore/ccsrc/kernel/gpu/control/send_gpu_kernel.cc b/mindspore/ccsrc/kernel/gpu/control/send_gpu_kernel.cc new file mode 100644 index 0000000000..c417c30bb3 --- /dev/null +++ b/mindspore/ccsrc/kernel/gpu/control/send_gpu_kernel.cc @@ -0,0 +1,23 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "kernel/gpu/control/send_gpu_kernel.h" + +namespace mindspore { +namespace kernel { +MS_REG_GPU_KERNEL_REGULAR(Send, KernelAttr(), SendGpuKernel) +} // namespace kernel +} // namespace mindspore diff --git a/mindspore/ccsrc/kernel/gpu/control/send_gpu_kernel.h b/mindspore/ccsrc/kernel/gpu/control/send_gpu_kernel.h new file mode 100644 index 0000000000..156ec4160d --- /dev/null +++ b/mindspore/ccsrc/kernel/gpu/control/send_gpu_kernel.h @@ -0,0 +1,66 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CONTROL_SEND_GPU_KERNEL_H_ +#define MINDSPORE_CCSRC_KERNEL_GPU_CONTROL_SEND_GPU_KERNEL_H_ + +#include +#include "kernel/gpu/gpu_kernel.h" +#include "kernel/gpu/gpu_kernel_factory.h" + +namespace mindspore { +namespace kernel { +class SendGpuKernel : public GpuKernel { + public: + SendGpuKernel() {} + ~SendGpuKernel() override = default; + + const std::vector &GetInputSizeList() const override { return input_size_list_; } + const std::vector &GetOutputSizeList() const override { return output_size_list_; } + const std::vector &GetWorkspaceSizeList() const override { return workspace_size_list_; } + + bool Launch(const std::vector &, const std::vector &, const std::vector &, + uintptr_t) override { + CHECK_CUDA_RET_WITH_EXCEPT(cudaEventRecord(record_event_, record_stream_), "Recording cuda event failed."); + return true; + } + bool Init(const CNodePtr &kernel_node) override { + record_stream_ = reinterpret_cast(GetAttr(kernel_node, "record_event_stream")); + record_event_ = reinterpret_cast(GetAttr(kernel_node, "record_event")); + InitSizeLists(); + return true; + } + + protected: + void InitSizeLists() override { + input_size_list_.clear(); + output_size_list_.clear(); + workspace_size_list_.clear(); + return; + } + + private: + cudaStream_t record_stream_{nullptr}; + cudaEvent_t record_event_{nullptr}; + + std::vector input_size_list_; + std::vector output_size_list_; + std::vector workspace_size_list_; +}; +} // namespace kernel +} // namespace mindspore + +#endif // MINDSPORE_CCSRC_KERNEL_GPU_CONTROL_SEND_GPU_KERNEL_H_ diff --git a/mindspore/ccsrc/kernel/gpu/nccl/nccl_gpu_kernel.h b/mindspore/ccsrc/kernel/gpu/nccl/nccl_gpu_kernel.h index cea56b9878..4ea332784d 100644 --- a/mindspore/ccsrc/kernel/gpu/nccl/nccl_gpu_kernel.h +++ b/mindspore/ccsrc/kernel/gpu/nccl/nccl_gpu_kernel.h @@ -124,6 +124,12 @@ class NcclGpuKernel : public GpuKernel { InferCommType(kernel_node); collective_handle_ = device::gpu::CollectiveInitializer::instance().collective_handle(); MS_EXCEPTION_IF_NULL(collective_handle_); + + auto comm_stream_attr = AnfAlgo::GetCNodePrimitive(kernel_node)->GetAttr("stream_id"); + if (comm_stream_attr) { + comm_stream_ = reinterpret_cast(GetValue(comm_stream_attr)); + MS_EXCEPTION_IF_NULL(comm_stream_); + } return true; } From ebe6efff719bec947f2f73d42cdd542ff67f932e Mon Sep 17 00:00:00 2001 From: zhaojichen Date: Thu, 16 Apr 2020 01:42:32 -0400 Subject: [PATCH 275/367] Add Group Normalization --- mindspore/nn/layer/__init__.py | 4 +- mindspore/nn/layer/normalization.py | 73 +++++++++++++++++++++++++++- tests/ut/python/nn/test_batchnorm.py | 12 +++++ 3 files changed, 85 insertions(+), 4 deletions(-) diff --git a/mindspore/nn/layer/__init__.py b/mindspore/nn/layer/__init__.py index 3d729edcd3..cf601f03ff 100644 --- a/mindspore/nn/layer/__init__.py +++ b/mindspore/nn/layer/__init__.py @@ -18,7 +18,7 @@ Layer. The high-level components(Cells) used to construct the neural network. """ from .activation import Softmax, LogSoftmax, ReLU, ReLU6, Tanh, GELU, ELU, Sigmoid, PReLU, get_activation, LeakyReLU, HSigmoid, HSwish -from .normalization import BatchNorm1d, BatchNorm2d, LayerNorm +from .normalization import BatchNorm1d, BatchNorm2d, LayerNorm, GroupNorm from .container import SequentialCell, CellList from .conv import Conv2d, Conv2dTranspose from .lstm import LSTM @@ -29,7 +29,7 @@ from .image import ImageGradients, SSIM __all__ = ['Softmax', 'LogSoftmax', 'ReLU', 'ReLU6', 'Tanh', 'GELU', 'Sigmoid', 'PReLU', 'get_activation', 'LeakyReLU', 'HSigmoid', 'HSwish', 'ELU', - 'BatchNorm1d', 'BatchNorm2d', 'LayerNorm', + 'BatchNorm1d', 'BatchNorm2d', 'LayerNorm', 'GroupNorm', 'SequentialCell', 'CellList', 'Conv2d', 'Conv2dTranspose', 'LSTM', diff --git a/mindspore/nn/layer/normalization.py b/mindspore/nn/layer/normalization.py index 2df064353f..cac73d239e 100644 --- a/mindspore/nn/layer/normalization.py +++ b/mindspore/nn/layer/normalization.py @@ -18,8 +18,9 @@ from mindspore.ops import functional as F from mindspore.common.parameter import Parameter from mindspore.common.initializer import initializer from mindspore.common.tensor import Tensor -import mindspore.common.dtype as DT +import mindspore.common.dtype as mstype import mindspore.context as context +from mindspore._checkparam import check_int_positive, check_bool,check_typename from mindspore._extends import cell_attr_register from ..cell import Cell @@ -58,7 +59,7 @@ class _BatchNorm(Cell): if context.get_context("enable_ge"): self.is_ge_backend = True - self.momentum = Tensor(1.0 - momentum, DT.float32) + self.momentum = Tensor(1.0 - momentum, mstype.float32) self.bn_train = P.BatchNorm(is_training=True, epsilon=self.eps) else: @@ -289,3 +290,71 @@ class LayerNorm(Cell): s = 'normalized_shape={}, begin_norm_axis={}, begin_params_axis={}, gamma{}, beta={}'.format( self.normalized_shape, self.begin_norm_axis, self.begin_params_axis, self.gamma, self.beta) return s + +class GroupNorm(Cell): + r""" + Group Normalization over a mini-batch of inputs. + + Group normalization is widely used in recurrent neural networks. It applies + normalization over a mini-batch of inputs for each single training case as described + in the paper `Group Normalization `_. Group normalization + divides the channels into groups and computes within each group the mean and variance for normalization, + and it performs very stable over a wide range of batch size. It can be described using the following formula. + + .. math:: + y = \frac{x - \mathrm{E}[x]}{\sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta + + Args: + num_groups (int): The number of groups to be divided along the channel dimension. + num_channels (int): The number of channels per group. + eps (float): A value added to the denominator for numerical stability. Default: 1e-5. + affine (bool): A bool value, this layer will has learnable affine parameters when set to true. Default: True. + + Inputs: + - **input_x** (Tensor) - The input feature with shape [N, C, H, W]. + + Outputs: + Tensor, the normalized and scaled offset tensor, has the same shape and data type as the `input_x`. + + Examples: + >>> goup_norm_op = nn.GroupNorm(16, 64) + >>> x = Tensor(np.ones([1, 64, 256, 256], np.float32)) + >>> goup_norm_op(x) + """ + def __init__(self, num_groups, num_channels, eps=1e-05, affine=True): + super(GroupNorm, self).__init__() + self.num_groups = check_int_positive(num_groups) + self.num_channels = check_int_positive(num_channels) + if num_channels % num_groups != 0: + raise ValueError("num_channels should be divided by num_groups") + self.eps = Tensor(check_typename('eps', eps, (float,)),mstype.float32) + self.affine = check_bool(affine) + + gamma = initializer('ones', [num_channels, 1, 1], mstype.float32) + beta = initializer('zeros', [num_channels, 1, 1], mstype.float32) + if self.affine: + self.gamma = Parameter(gamma, name='gamma') + self.beta = Parameter(beta, name='beta') + else: + self.gamma = gamma + self.beta = beta + self.shape = F.shape + self.reshape = F.reshape + self.reduce_mean = P.ReduceMean(keep_dims=True) + self.square = F.square + self.reduce_sum = P.ReduceSum(keep_dims=True) + self.sqrt = P.Sqrt() + + def construct(self, x): + batch,channel,height,width = self.shape(x) + x = self.reshape(x,(batch, self.num_groups,channel*height*width/self.num_groups)) + mean = self.reduce_mean(x, 2) + var = self.reduce_sum(self.square(x - mean), 2) / (channel * height * width / self.num_groups - 1) + std = self.sqrt(var + self.eps) + x = (x - mean) / std + x = self.reshape(x, (batch, channel, height, width)) + output = x * self.gamma + self.beta + return output + + def extend_repr(self): + return 'num_groups={}, num_channels={}'.format(self.num_groups,self.num_channels) \ No newline at end of file diff --git a/tests/ut/python/nn/test_batchnorm.py b/tests/ut/python/nn/test_batchnorm.py index eaafdd81b4..efccfa4b33 100644 --- a/tests/ut/python/nn/test_batchnorm.py +++ b/tests/ut/python/nn/test_batchnorm.py @@ -56,3 +56,15 @@ def test_compile(): net = Net() input_data = Tensor(np.random.randint(0, 255, [1, 3, 224, 224]).astype(np.float32)) _executor.compile(net, input_data) + +class GroupNet(nn.Cell): + def __init__(self): + super(GroupNet, self).__init__() + self.group_bn = nn.GroupNorm() + def construct(self, x): + return self.group_bn(x) + +def test_compile_groupnorm(): + net = nn.GroupNorm(16, 64) + input_data = Tensor(np.random.rand(1,64,256,256).astype(np.float32)) + _executor.compile(net, input_data) \ No newline at end of file From 898acc3201f65b3943afc6b7a187857ad4471a06 Mon Sep 17 00:00:00 2001 From: zhaojichen Date: Thu, 16 Apr 2020 01:51:36 -0400 Subject: [PATCH 276/367] Add Group Normalization --- mindspore/nn/layer/normalization.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/mindspore/nn/layer/normalization.py b/mindspore/nn/layer/normalization.py index cac73d239e..58f926cdcf 100644 --- a/mindspore/nn/layer/normalization.py +++ b/mindspore/nn/layer/normalization.py @@ -20,7 +20,7 @@ from mindspore.common.initializer import initializer from mindspore.common.tensor import Tensor import mindspore.common.dtype as mstype import mindspore.context as context -from mindspore._checkparam import check_int_positive, check_bool,check_typename +from mindspore._checkparam import check_int_positive, check_bool, check_typename from mindspore._extends import cell_attr_register from ..cell import Cell @@ -327,7 +327,7 @@ class GroupNorm(Cell): self.num_channels = check_int_positive(num_channels) if num_channels % num_groups != 0: raise ValueError("num_channels should be divided by num_groups") - self.eps = Tensor(check_typename('eps', eps, (float,)),mstype.float32) + self.eps = Tensor(check_typename('eps', eps, (float,)), mstype.float32) self.affine = check_bool(affine) gamma = initializer('ones', [num_channels, 1, 1], mstype.float32) @@ -346,8 +346,8 @@ class GroupNorm(Cell): self.sqrt = P.Sqrt() def construct(self, x): - batch,channel,height,width = self.shape(x) - x = self.reshape(x,(batch, self.num_groups,channel*height*width/self.num_groups)) + batch, channel, height,width = self.shape(x) + x = self.reshape(x, (batch, self.num_groups, channel*height*width/self.num_groups)) mean = self.reduce_mean(x, 2) var = self.reduce_sum(self.square(x - mean), 2) / (channel * height * width / self.num_groups - 1) std = self.sqrt(var + self.eps) @@ -357,4 +357,6 @@ class GroupNorm(Cell): return output def extend_repr(self): - return 'num_groups={}, num_channels={}'.format(self.num_groups,self.num_channels) \ No newline at end of file + """Display instance object as string.""" + s = 'num_groups={}, num_channels={}'.format(self.num_groups, self.num_channels) + return s \ No newline at end of file From 0b7de6968fe1dcbc9c78f292bdc2b673502b5f2c Mon Sep 17 00:00:00 2001 From: zhaojichen Date: Thu, 16 Apr 2020 01:56:12 -0400 Subject: [PATCH 277/367] Add Group Normalization --- mindspore/nn/layer/normalization.py | 4 ++-- tests/ut/python/nn/test_batchnorm.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/mindspore/nn/layer/normalization.py b/mindspore/nn/layer/normalization.py index 58f926cdcf..b286eaae1b 100644 --- a/mindspore/nn/layer/normalization.py +++ b/mindspore/nn/layer/normalization.py @@ -346,7 +346,7 @@ class GroupNorm(Cell): self.sqrt = P.Sqrt() def construct(self, x): - batch, channel, height,width = self.shape(x) + batch, channel, height, width = self.shape(x) x = self.reshape(x, (batch, self.num_groups, channel*height*width/self.num_groups)) mean = self.reduce_mean(x, 2) var = self.reduce_sum(self.square(x - mean), 2) / (channel * height * width / self.num_groups - 1) @@ -359,4 +359,4 @@ class GroupNorm(Cell): def extend_repr(self): """Display instance object as string.""" s = 'num_groups={}, num_channels={}'.format(self.num_groups, self.num_channels) - return s \ No newline at end of file + return s diff --git a/tests/ut/python/nn/test_batchnorm.py b/tests/ut/python/nn/test_batchnorm.py index efccfa4b33..4bd8c996d6 100644 --- a/tests/ut/python/nn/test_batchnorm.py +++ b/tests/ut/python/nn/test_batchnorm.py @@ -67,4 +67,4 @@ class GroupNet(nn.Cell): def test_compile_groupnorm(): net = nn.GroupNorm(16, 64) input_data = Tensor(np.random.rand(1,64,256,256).astype(np.float32)) - _executor.compile(net, input_data) \ No newline at end of file + _executor.compile(net, input_data) From 04c522d0c6edfe4074c9bdecf16146fbe617644d Mon Sep 17 00:00:00 2001 From: zhaojichen Date: Thu, 16 Apr 2020 01:59:32 -0400 Subject: [PATCH 278/367] Add Group Normalization --- tests/ut/python/nn/test_batchnorm.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/ut/python/nn/test_batchnorm.py b/tests/ut/python/nn/test_batchnorm.py index 4bd8c996d6..e73b7ebbf0 100644 --- a/tests/ut/python/nn/test_batchnorm.py +++ b/tests/ut/python/nn/test_batchnorm.py @@ -57,6 +57,7 @@ def test_compile(): input_data = Tensor(np.random.randint(0, 255, [1, 3, 224, 224]).astype(np.float32)) _executor.compile(net, input_data) + class GroupNet(nn.Cell): def __init__(self): super(GroupNet, self).__init__() @@ -64,6 +65,7 @@ class GroupNet(nn.Cell): def construct(self, x): return self.group_bn(x) + def test_compile_groupnorm(): net = nn.GroupNorm(16, 64) input_data = Tensor(np.random.rand(1,64,256,256).astype(np.float32)) From a9426cbd89e2148a813f7bda1a36ae9b3194bb98 Mon Sep 17 00:00:00 2001 From: chenjianping Date: Thu, 16 Apr 2020 06:18:25 +0000 Subject: [PATCH 279/367] build.bat support input -j value --- build.bat | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/build.bat b/build.bat index b4f3420e14..6ada61cab2 100644 --- a/build.bat +++ b/build.bat @@ -20,7 +20,11 @@ IF NOT %errorlevel% == 0 ( goto run_fail ) -cmake --build . --target all -- -j6 +IF "%1%" == "" ( + cmake --build . --target all -- -j6 + ) ELSE ( + cmake --build . --target all -- -j%1% + ) IF NOT %errorlevel% == 0 ( goto run_fail ) From ee5f3fa2406104cec0c90afc72d4fa7b2f923947 Mon Sep 17 00:00:00 2001 From: wenchunjiang Date: Thu, 16 Apr 2020 11:23:10 +0800 Subject: [PATCH 280/367] add pass to insert memcpy_async for get_next outputs --- .../pre_activate/ascend/ascend_helper.cc | 12 +++ .../ccsrc/pre_activate/ascend/ascend_helper.h | 2 + .../insert_memcpy_async_for_getnext.cc | 74 +++++++++++++++++++ .../insert_memcpy_async_for_getnext.h | 35 +++++++++ .../ascend/ir_fission/add_memcpy_async.cc | 13 +--- .../insert_memcpy_async_for_getnext.cc | 67 +++++++++++++++++ .../insert_memcpy_async_for_getnext.py | 55 ++++++++++++++ 7 files changed, 246 insertions(+), 12 deletions(-) create mode 100644 mindspore/ccsrc/pre_activate/ascend/enhancer/insert_memcpy_async_for_getnext.cc create mode 100644 mindspore/ccsrc/pre_activate/ascend/enhancer/insert_memcpy_async_for_getnext.h create mode 100644 tests/ut/cpp/pre_activate/ascend/enhancer/insert_memcpy_async_for_getnext.cc create mode 100644 tests/ut/cpp/python_input/gtest_input/pre_activate/insert_memcpy_async_for_getnext.py diff --git a/mindspore/ccsrc/pre_activate/ascend/ascend_helper.cc b/mindspore/ccsrc/pre_activate/ascend/ascend_helper.cc index 490a905a45..745ed4460f 100644 --- a/mindspore/ccsrc/pre_activate/ascend/ascend_helper.cc +++ b/mindspore/ccsrc/pre_activate/ascend/ascend_helper.cc @@ -360,5 +360,17 @@ AnfNodePtr CreatTupleGetItemNode(const FuncGraphPtr &func_graph, const AnfNodePt AnfAlgo::SetOutputInferTypeAndShape({origin_type}, {origin_shape}, tuple_getitem.get()); return tuple_getitem; } + +AnfNodePtr CreateMemcpyAsyncOp(const FuncGraphPtr &graph, const AnfNodePtr &node) { + MS_EXCEPTION_IF_NULL(graph); + MS_EXCEPTION_IF_NULL(node); + auto prim = std::make_shared(kMemCpyAsyncOpName); + std::vector new_node_inputs = {NewValueNode(prim), node}; + auto new_node = graph->NewCNode(new_node_inputs); + MS_EXCEPTION_IF_NULL(new_node); + new_node->set_abstract(node->abstract()); + new_node->set_scope(node->scope()); + return new_node; +} } // namespace opt } // namespace mindspore diff --git a/mindspore/ccsrc/pre_activate/ascend/ascend_helper.h b/mindspore/ccsrc/pre_activate/ascend/ascend_helper.h index 8925a52a7d..7f5e86d726 100644 --- a/mindspore/ccsrc/pre_activate/ascend/ascend_helper.h +++ b/mindspore/ccsrc/pre_activate/ascend/ascend_helper.h @@ -65,6 +65,8 @@ AnfNodePtr InsertTransOpForOutput(const FuncGraphPtr &func_graph, const AnfNodeP CNodePtr InsertCastForInput(const FuncGraphPtr &func_graph, const CNodePtr &cnode); AnfNodePtr CreatTupleGetItemNode(const FuncGraphPtr &func_graph, const AnfNodePtr &node, size_t output_idx); + +AnfNodePtr CreateMemcpyAsyncOp(const FuncGraphPtr &graph, const AnfNodePtr &node); } // namespace opt } // namespace mindspore #endif // MINDSPORE_CCSRC_PRE_ACTIVATE_ASCEND_ASCEND_HELPER_H_ diff --git a/mindspore/ccsrc/pre_activate/ascend/enhancer/insert_memcpy_async_for_getnext.cc b/mindspore/ccsrc/pre_activate/ascend/enhancer/insert_memcpy_async_for_getnext.cc new file mode 100644 index 0000000000..5065bab0f2 --- /dev/null +++ b/mindspore/ccsrc/pre_activate/ascend/enhancer/insert_memcpy_async_for_getnext.cc @@ -0,0 +1,74 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "pre_activate/ascend/enhancer/insert_memcpy_async_for_getnext.h" +#include +#include +#include "pre_activate/ascend/ascend_helper.h" +#include "session/anf_runtime_algorithm.h" + +namespace mindspore { +namespace opt { +AnfNodePtr InsertMemcpyAsyncForGetNextOutputs(const FuncGraphPtr &func_graph, const AnfNodePtr &node) { + if (func_graph == nullptr || node == nullptr) { + return nullptr; + } + + size_t output_num = AnfAlgo::GetOutputTensorNum(node); + if (output_num == 0) { + MS_LOG(DEBUG) << "Output number is zero, no need to insert memcpy_async!"; + return node; + } + + // getnext output is tuple and dynamic + std::vector make_tuple_inputs; + make_tuple_inputs.push_back(NewValueNode(prim::kPrimMakeTuple)); + + for (size_t output_index = 0; output_index < output_num; ++output_index) { + auto tuple_get_item = CreatTupleGetItemNode(func_graph, node, output_index); + auto new_node = CreateMemcpyAsyncOp(func_graph, tuple_get_item); + if (new_node == nullptr) { + MS_LOG(EXCEPTION) << "Create memcpy_async op failed!"; + } + AnfAlgo::SetNodeAttr(kAttrLabelForInsertStreamActive, MakeValue(true), new_node); + make_tuple_inputs.push_back(new_node); + } + AnfNodePtr make_tuple = func_graph->NewCNode(make_tuple_inputs); + return make_tuple; +} + +const BaseRef InsertMemcpyAsyncForGetNext::DefinePattern() const { + std::shared_ptr Xs = std::make_shared(); + auto prim = std::make_shared(kGetNextOpName); + + return VectorRef({prim, Xs}); +} + +const AnfNodePtr InsertMemcpyAsyncForGetNext::Process(const FuncGraphPtr &func_graph, const AnfNodePtr &node, + const EquivPtr &) const { + if (func_graph == nullptr || node == nullptr || !AnfAlgo::IsRealKernel(node)) { + return nullptr; + } + + if (AnfAlgo::HasNodeAttr(kAttrVisited, node)) { + MS_LOG(DEBUG) << "Node op_name[" << kGetNextOpName << "] has visited."; + return nullptr; + } + AnfAlgo::SetNodeAttr(kAttrVisited, MakeValue(true), node); + + return InsertMemcpyAsyncForGetNextOutputs(func_graph, node); +} +} // namespace opt +} // namespace mindspore diff --git a/mindspore/ccsrc/pre_activate/ascend/enhancer/insert_memcpy_async_for_getnext.h b/mindspore/ccsrc/pre_activate/ascend/enhancer/insert_memcpy_async_for_getnext.h new file mode 100644 index 0000000000..eb3b78d33f --- /dev/null +++ b/mindspore/ccsrc/pre_activate/ascend/enhancer/insert_memcpy_async_for_getnext.h @@ -0,0 +1,35 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_PRE_ACTIVATE_ASCEND_ENHANCER_INSERT_MEMCPY_ASYNC_FOR_GETNEXT_H_ +#define MINDSPORE_CCSRC_PRE_ACTIVATE_ASCEND_ENHANCER_INSERT_MEMCPY_ASYNC_FOR_GETNEXT_H_ + +#include "pre_activate/common/optimizer.h" + +namespace mindspore { +namespace opt { +class InsertMemcpyAsyncForGetNext : public PatternProcessPass { + public: + explicit InsertMemcpyAsyncForGetNext(bool multigraph = true) + : PatternProcessPass("insert_memcpy_async_for_getnext", multigraph) {} + ~InsertMemcpyAsyncForGetNext() override = default; + const BaseRef DefinePattern() const override; + const AnfNodePtr Process(const FuncGraphPtr &, const AnfNodePtr &, const EquivPtr &) const override; +}; +} // namespace opt +} // namespace mindspore + +#endif // MINDSPORE_CCSRC_PRE_ACTIVATE_ASCEND_ENHANCER_INSERT_MEMCPY_ASYNC_FOR_GETNEXT_H_ diff --git a/mindspore/ccsrc/pre_activate/ascend/ir_fission/add_memcpy_async.cc b/mindspore/ccsrc/pre_activate/ascend/ir_fission/add_memcpy_async.cc index 2ab11b6032..bbea944750 100644 --- a/mindspore/ccsrc/pre_activate/ascend/ir_fission/add_memcpy_async.cc +++ b/mindspore/ccsrc/pre_activate/ascend/ir_fission/add_memcpy_async.cc @@ -18,22 +18,11 @@ #include "utils/utils.h" #include "session/anf_runtime_algorithm.h" #include "optimizer/opt.h" +#include "pre_activate/ascend/ascend_helper.h" namespace mindspore { namespace opt { namespace { -AnfNodePtr CreateMemcpyAsyncOp(const FuncGraphPtr &graph, const AnfNodePtr &node) { - MS_EXCEPTION_IF_NULL(graph); - MS_EXCEPTION_IF_NULL(node); - auto prim = std::make_shared(kMemCpyAsyncOpName); - std::vector new_node_inputs = {NewValueNode(prim), node}; - auto new_node = graph->NewCNode(new_node_inputs); - MS_EXCEPTION_IF_NULL(new_node); - new_node->set_abstract(node->abstract()); - new_node->set_scope(node->scope()); - return new_node; -} - const AnfNodePtr AddMemcpyAsyncIfInputIsUsedByOthers(const FuncGraphPtr &graph, const CNodePtr &node) { MS_EXCEPTION_IF_NULL(graph); MS_EXCEPTION_IF_NULL(node); diff --git a/tests/ut/cpp/pre_activate/ascend/enhancer/insert_memcpy_async_for_getnext.cc b/tests/ut/cpp/pre_activate/ascend/enhancer/insert_memcpy_async_for_getnext.cc new file mode 100644 index 0000000000..2616354e4c --- /dev/null +++ b/tests/ut/cpp/pre_activate/ascend/enhancer/insert_memcpy_async_for_getnext.cc @@ -0,0 +1,67 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/backend_common_test.h" +#include "common/py_func_graph_fetcher.h" +#include "session/ascend_session.h" +#include "pipeline/resource.h" +#include "operator/ops.h" +#include "ir/manager.h" +#include "debug/anf_ir_dump.h" +#include "utils/utils.h" +#include "kernel/kernel_build_info.h" +#include "pre_activate/common/optimizer.h" +#include "pre_activate/ascend/enhancer/insert_memcpy_async_for_getnext.h" + +namespace mindspore { +namespace opt { +using KernelBuildInfoBuilder = kernel::KernelBuildInfo::KernelBuildInfoBuilder; + +class TestHWInsertMemcpyAsyncForGetNext : public BackendCommon { + public: + TestHWInsertMemcpyAsyncForGetNext() : get_py_fun_("gtest_input.pre_activate.insert_memcpy_async_for_getnext", true) {} + ~TestHWInsertMemcpyAsyncForGetNext() override = default; + + public: + UT::PyFuncGraphFetcher get_py_fun_; +}; + +TEST_F(TestHWInsertMemcpyAsyncForGetNext, test_insert_memcpy_async_for_getnext_multi_output) { + FuncGraphPtr g_before = get_py_fun_.CallAndParseRet("test_insert_memcpy_async_for_getnext", "getnext_multi_output_before"); + + AbstractBasePtrList args_spec_list{}; + auto kernel_graph = GetKernelGraph(g_before, args_spec_list); + + KernelBuildInfoBuilder builder; + builder.SetOutputsFormat({kOpFormat_DEFAULT, kOpFormat_DEFAULT}); + builder.SetOutputsDeviceType({kFloat32->type_id(), kInt32->type_id()}); + auto ret = kernel_graph->get_return(); + EXPECT_NE(ret->input(1), nullptr); + EXPECT_NE(ret->input(1)->cast()->input(1), nullptr); + auto get_next = ret->input(1)->cast()->input(1); + get_next->set_kernel_info(std::make_shared()); + AnfAlgo::SetSelectKernelBuildInfo(builder.Build(), get_next.get()); + + auto optimizer = std::make_shared(); + auto pm = std::make_shared(); + pm->AddPass(std::make_shared()); + optimizer->AddPassManager(pm); + auto new_graph = optimizer->Optimize(kernel_graph); + + FuncGraphPtr g_after = get_py_fun_.CallAndParseRet("test_insert_memcpy_async_for_getnext", "getnext_multi_output_after"); + EXPECT_TRUE(CheckEqualGraph(g_after, new_graph)); +} +} // namespace opt +} // namespace mindspore \ No newline at end of file diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/insert_memcpy_async_for_getnext.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/insert_memcpy_async_for_getnext.py new file mode 100644 index 0000000000..902fd636de --- /dev/null +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/insert_memcpy_async_for_getnext.py @@ -0,0 +1,55 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +from mindspore.ops import operations as P +from mindspore.ops import Primitive +import mindspore as ms + +get_next = P.GetNext([ms.float32, ms.int32], [[32, 64], [32]], 2, "") +memcpy_async = Primitive('memcpy_async') +make_tuple = Primitive('make_tuple') +tuple_getitem = Primitive('tuple_getitem') + + +class FnDict: + def __init__(self): + self.fnDict = {} + + def __call__(self, fn): + self.fnDict[fn.__name__] = fn + + def __getitem__(self, name): + return self.fnDict[name] + + +def test_insert_memcpy_async_for_getnext(tag): + fns = FnDict() + + @fns + def getnext_multi_output_before(): + res = get_next() + return res + + @fns + def getnext_multi_output_after(): + res = get_next() + data = tuple_getitem(res, 0) + label = tuple_getitem(res, 1) + memcpy_async_data = memcpy_async(data) + memcpy_async_label = memcpy_async(label) + tuple = make_tuple(make_tuple(memcpy_async_data, memcpy_async_label)) + return tuple + + return fns[tag] From 57cd9f81886d59e2ee4c4c650ffcd11c792db53c Mon Sep 17 00:00:00 2001 From: yangzhenzhang <285824651@qq.com> Date: Thu, 16 Apr 2020 14:32:43 +0800 Subject: [PATCH 281/367] add parallel op for sigmoidloss --- mindspore/ccsrc/parallel/dynamic_creator.h | 1 + .../ccsrc/parallel/ops_info/arithmetic_info.h | 9 ++ mindspore/ccsrc/parallel/ops_info/ops_utils.h | 1 + .../ccsrc/parallel/step_auto_parallel.cc | 1 + .../test_sigmoid_cross_entropy_with_logits.py | 83 +++++++++++++++++++ 5 files changed, 95 insertions(+) create mode 100644 tests/ut/python/parallel/test_sigmoid_cross_entropy_with_logits.py diff --git a/mindspore/ccsrc/parallel/dynamic_creator.h b/mindspore/ccsrc/parallel/dynamic_creator.h index 953380fb32..723c018d7f 100644 --- a/mindspore/ccsrc/parallel/dynamic_creator.h +++ b/mindspore/ccsrc/parallel/dynamic_creator.h @@ -127,6 +127,7 @@ REGISTER(NegInfo); REGISTER(BatchMatMulInfo); REGISTER(ExpandDimsInfo); REGISTER(SqueezeInfo); +REGISTER(SigmoidCrossEntropyWithLogitsInfo); } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/ops_info/arithmetic_info.h b/mindspore/ccsrc/parallel/ops_info/arithmetic_info.h index 376a1fb4cf..78dfc23803 100644 --- a/mindspore/ccsrc/parallel/ops_info/arithmetic_info.h +++ b/mindspore/ccsrc/parallel/ops_info/arithmetic_info.h @@ -120,6 +120,15 @@ class AssignSubInfo : public ArithmeticBase { : ArithmeticBase(name, inputs_shape, outputs_shape, attrs, std::make_shared(false)) {} ~AssignSubInfo() override = default; }; + +// All dimensions can be split arbitrarily, but the split method of Logits should be the same as that of label. +class SigmoidCrossEntropyWithLogitsInfo : public ArithmeticBase { + public: + SigmoidCrossEntropyWithLogitsInfo(const std::string& name, const Shapes& inputs_shape, const Shapes& outputs_shape, + const PrimitiveAttrs& attrs) + : ArithmeticBase(name, inputs_shape, outputs_shape, attrs, std::make_shared(false)) {} + ~SigmoidCrossEntropyWithLogitsInfo() override = default; +}; } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/ops_info/ops_utils.h b/mindspore/ccsrc/parallel/ops_info/ops_utils.h index 50920e5954..0b52d8e83c 100644 --- a/mindspore/ccsrc/parallel/ops_info/ops_utils.h +++ b/mindspore/ccsrc/parallel/ops_info/ops_utils.h @@ -138,6 +138,7 @@ constexpr char ALL_GATHER[] = "AllGather"; constexpr char REDUCE_SCATTER[] = "ReduceScatter"; constexpr char CONCAT[] = "Concat"; constexpr char SOFTMAX_CROSS_ENTROPY_WITH_LOGITS[] = "SoftmaxCrossEntropyWithLogits"; +constexpr char SIGMOID_CROSS_ENTROPY_WITH_LOGITS[] = "SigmoidCrossEntropyWithLogits"; constexpr char MATMUL[] = "MatMul"; constexpr char GELU[] = "Gelu"; constexpr char TANH[] = "Tanh"; diff --git a/mindspore/ccsrc/parallel/step_auto_parallel.cc b/mindspore/ccsrc/parallel/step_auto_parallel.cc index 5caf6573f2..1d52eac82d 100644 --- a/mindspore/ccsrc/parallel/step_auto_parallel.cc +++ b/mindspore/ccsrc/parallel/step_auto_parallel.cc @@ -78,6 +78,7 @@ std::vector splittable_op_ = {MATMUL, FUSE_BATCH_NORM, POOLING, SOFTMAX_CROSS_ENTROPY_WITH_LOGITS, + SIGMOID_CROSS_ENTROPY_WITH_LOGITS, MAX_POOL_WITH_ARGMAX, SIMPLE_MEAN, FLATTEN, diff --git a/tests/ut/python/parallel/test_sigmoid_cross_entropy_with_logits.py b/tests/ut/python/parallel/test_sigmoid_cross_entropy_with_logits.py new file mode 100644 index 0000000000..d59d053b07 --- /dev/null +++ b/tests/ut/python/parallel/test_sigmoid_cross_entropy_with_logits.py @@ -0,0 +1,83 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +import mindspore as ms +from mindspore import context, Tensor, Parameter +from mindspore.nn import Cell, TrainOneStepCell, Momentum +from mindspore.ops import operations as P +from mindspore.common.api import _executor + + +class Net(Cell): + def __init__(self, mul_weight, strategy1=None, strategy2=None): + super().__init__() + self.mul = P.Mul().set_strategy(strategy1) + self.loss = P.SigmoidCrossEntropyWithLogits().set_strategy(strategy2) + self.mul_weight = Parameter(mul_weight, "w1") + + def construct(self, x, b): + out = self.mul(x, self.mul_weight) + out = self.loss(out, b) + return out + + +_x = Tensor(np.ones([128, 64]), dtype=ms.float32) +_w1 = Tensor(np.ones([128, 64]), dtype=ms.float32) +_b = Tensor(np.ones([128, 64]), dtype=ms.float32) + + +def compile(net): + optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) + train_net = TrainOneStepCell(net, optimizer) + _executor.compile(train_net, _x, _b) + context.reset_auto_parallel_context() + + +def test_sigmoid_cross_entropy_with_logits_data_parallel(): + context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) + strategy1 = ((16, 1), (16, 1)) + strategy2 = ((16, 1), (16, 1)) + net = Net(_w1, strategy1, strategy2) + compile(net) + + +def test_sigmoid_cross_entropy_with_logits_model_parallel(): + context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) + strategy1 = ((1, 16), (1, 16)) + strategy2 = ((1, 16), (1, 16)) + net = Net(_w1, strategy1, strategy2) + compile(net) + + +def test_sigmoid_cross_entropy_with_logits_hybrid_parallel(): + context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) + strategy1 = ((2, 8), (2, 8)) + strategy2 = ((2, 8), (2, 8)) + net = Net(_w1, strategy1, strategy2) + compile(net) + + +def test_sigmoid_cross_entropy_with_logits_auto_parallel(): + context.set_auto_parallel_context(parallel_mode="auto_parallel", device_num=16, global_rank=0) + net = Net(_w1) + compile(net) + + +def test_sigmoid_cross_entropy_with_logits_repeat_calc(): + context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) + strategy1 = ((2, 8), (2, 8)) + strategy2 = ((2, 2), (2, 2)) + net = Net(_w1, strategy1, strategy2) + compile(net) From 77fd2e841eab957a08b766a210e2db45942263a4 Mon Sep 17 00:00:00 2001 From: zhaojichen Date: Thu, 16 Apr 2020 03:54:32 -0400 Subject: [PATCH 282/367] Add Group Normalization --- mindspore/nn/layer/normalization.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mindspore/nn/layer/normalization.py b/mindspore/nn/layer/normalization.py index b286eaae1b..4aafaf031e 100644 --- a/mindspore/nn/layer/normalization.py +++ b/mindspore/nn/layer/normalization.py @@ -20,7 +20,7 @@ from mindspore.common.initializer import initializer from mindspore.common.tensor import Tensor import mindspore.common.dtype as mstype import mindspore.context as context -from mindspore._checkparam import check_int_positive, check_bool, check_typename +from mindspore._checkparam import check_int_positive, check_bool, check_typename from mindspore._extends import cell_attr_register from ..cell import Cell @@ -293,7 +293,7 @@ class LayerNorm(Cell): class GroupNorm(Cell): r""" - Group Normalization over a mini-batch of inputs. + Group Normalization over a mini-batch of inputs. Group normalization is widely used in recurrent neural networks. It applies normalization over a mini-batch of inputs for each single training case as described From 257464ef7b3ac24ed2fb1d0760fd93b44c8bfd17 Mon Sep 17 00:00:00 2001 From: maoweiyong Date: Thu, 16 Apr 2020 16:16:17 +0800 Subject: [PATCH 283/367] fix gpu op register --- mindspore/ops/_op_impl/akg/gpu/squeeze.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mindspore/ops/_op_impl/akg/gpu/squeeze.py b/mindspore/ops/_op_impl/akg/gpu/squeeze.py index 378d096d5c..cebf6ff1f3 100644 --- a/mindspore/ops/_op_impl/akg/gpu/squeeze.py +++ b/mindspore/ops/_op_impl/akg/gpu/squeeze.py @@ -15,7 +15,7 @@ """Squeeze op""" from mindspore.ops.op_info_register import op_info_register, AkgRegOp, DataType -squeeze_op_info = AkgRegOp("SqueezeGrad") \ +squeeze_op_info = AkgRegOp("Squeeze") \ .fusion_type("OPAQUE") \ .input(0, "x") \ .output(0, "output") \ From 8bdcd8c267bbff59c0c97198067263b202395084 Mon Sep 17 00:00:00 2001 From: chang zherui <760161589@qq.com> Date: Thu, 16 Apr 2020 16:18:47 +0800 Subject: [PATCH 284/367] modify test_cell_bprop.py ut --- tests/ut/python/pynative_mode/test_cell_bprop.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tests/ut/python/pynative_mode/test_cell_bprop.py b/tests/ut/python/pynative_mode/test_cell_bprop.py index 054afe36c9..da1e14974f 100644 --- a/tests/ut/python/pynative_mode/test_cell_bprop.py +++ b/tests/ut/python/pynative_mode/test_cell_bprop.py @@ -64,14 +64,15 @@ def test_grad_inline_mul_add(): class WithParameter(nn.Cell): def __init__(self): super(WithParameter, self).__init__() - self.param = Parameter(2, 'param') + self.param1 = Parameter(1, 'param1') + self.param2 = Parameter(2, 'param2') def construct(self, x, y): - return self.param * x + y + return self.param1 * self.param2 * x + y def bprop(self, x, y, out, dout): # In this test case, The user defined bprop is wrong defined purposely to distinguish from ad result - return self.param * dout, 2 * y + return self.param1 * self.param2 * dout, 2 * y def test_with_param(): with_param = WithParameter() From c95a04f215d5316a1be1e716c52ad0de23a6bd27 Mon Sep 17 00:00:00 2001 From: leonwanghui Date: Thu, 16 Apr 2020 16:10:46 +0800 Subject: [PATCH 285/367] Refactor docker image framework Signed-off-by: leonwanghui --- README.md | 10 +-- docker/mindspore-cpu/0.1.0-alpha/Dockerfile | 18 +---- docker/mindspore-cpu/devel/Dockerfile | 11 +-- docker/mindspore-cpu/runtime/Dockerfile | 64 +++++++++++++++++ docker/mindspore-gpu/0.1.0-alpha/Dockerfile | 19 +---- docker/mindspore-gpu/devel/Dockerfile | 31 ++------ docker/mindspore-gpu/runtime/Dockerfile | 80 +++++++++++++++++++++ 7 files changed, 160 insertions(+), 73 deletions(-) create mode 100644 docker/mindspore-cpu/runtime/Dockerfile create mode 100644 docker/mindspore-gpu/runtime/Dockerfile diff --git a/README.md b/README.md index 169f1985e5..c9abd82da0 100644 --- a/README.md +++ b/README.md @@ -70,7 +70,7 @@ MindSpore offers build options across multiple backends: | GPU CUDA 10.1 | Ubuntu-x86 | ✔️ | | CPU | Ubuntu-x86 | ✔️ | -For installation using `pip`, take `Ubuntu-x86` and `CPU` build version as an example: +For installation using `pip`, take `CPU` and `Ubuntu-x86` build version as an example: 1. Download whl from [MindSpore download page](https://www.mindspore.cn/versions/en), and install the package. @@ -97,9 +97,11 @@ currently the containerized build options are supported as follows: | :---------------- | :---------------------- | :-- | :---------- | | CPU | `mindspore/mindspore-cpu` | `0.1.0-alpha` | Production environment with pre-installed MindSpore `0.1.0-alpha` CPU release. | | | | `devel` | Development environment provided to build MindSpore (with `CPU` backend) from the source, refer to https://www.mindspore.cn/install/en for installation details. | +| | | `runtime` | Runtime environment provided to install MindSpore binary package with `CPU` backend. | | GPU | `mindspore/mindspore-gpu` | `0.1.0-alpha` | Production environment with pre-installed MindSpore `0.1.0-alpha` GPU release. | | | | `devel` | Development environment provided to build MindSpore (with `GPU CUDA10.1` backend) from the source, refer to https://www.mindspore.cn/install/en for installation details. | -| Ascend |
    |
    | Coming soon. | +| | | `runtime` | Runtime environment provided to install MindSpore binary package with `GPU` backend. | +| Ascend |
    |
    | Coming soon. | * CPU @@ -111,7 +113,7 @@ currently the containerized build options are supported as follows: * GPU - For `GPU` backend, please make sure the `nvidia-container-toolkit` has been installed in advance, here are some install guidelines for Ubuntu users: + For `GPU` backend, please make sure the `nvidia-container-toolkit` has been installed in advance, here are some install guidelines for `Ubuntu` users: ``` DISTRIBUTION=$(. /etc/os-release; echo $ID$VERSION_ID) curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | apt-key add - @@ -153,7 +155,7 @@ currently the containerized build options are supported as follows: [ 2. 2. 2. 2.]]] ``` -If anyone wants to learn more about the build process of MindSpore docker images, +If you want to learn more about the building process of MindSpore docker images, please check out `docker` folder for the details. ## Quickstart diff --git a/docker/mindspore-cpu/0.1.0-alpha/Dockerfile b/docker/mindspore-cpu/0.1.0-alpha/Dockerfile index d24d23cf6b..c9fb7c2b88 100644 --- a/docker/mindspore-cpu/0.1.0-alpha/Dockerfile +++ b/docker/mindspore-cpu/0.1.0-alpha/Dockerfile @@ -4,8 +4,7 @@ MAINTAINER leonwanghui # Set env ENV PYTHON_ROOT_PATH /usr/local/python-3.7.5 -ENV CMAKE_ROOT_PATH /usr/local/cmake-3.14.1 -ENV PATH ${PYTHON_ROOT_PATH}/bin:${CMAKE_ROOT_PATH}/bin:/usr/local/bin:$PATH +ENV PATH /usr/local/bin:$PATH # Install base tools RUN apt update \ @@ -64,20 +63,5 @@ RUN mkdir -pv /root/.pip \ && echo "trusted-host=mirrors.aliyun.com" >> /root/.pip/pip.conf \ && echo "index-url=http://mirrors.aliyun.com/pypi/simple/" >> /root/.pip/pip.conf -# Install pip package -RUN pip install --no-cache-dir \ - numpy \ - wheel \ - nose \ - pytest \ - pytest-xdist - -# Install cmake (v3.14.1) -RUN cd /tmp \ - && wget https://github.com/Kitware/CMake/releases/download/v3.14.1/cmake-3.14.1-Linux-x86_64.sh \ - && mkdir -p ${CMAKE_ROOT_PATH} \ - && bash ./cmake-3.14.1-Linux-x86_64.sh --prefix=${CMAKE_ROOT_PATH} --exclude-subdir --skip-license \ - && rm -f /tmp/cmake-3.14.1-Linux-x86_64.sh - # Install MindSpore cpu whl package RUN pip install --no-cache-dir https://ms-release.obs.cn-north-4.myhuaweicloud.com/0.1.0-alpha/MindSpore/cpu/ubuntu-x86/mindspore-0.1.0-cp37-cp37m-linux_x86_64.whl diff --git a/docker/mindspore-cpu/devel/Dockerfile b/docker/mindspore-cpu/devel/Dockerfile index a48326af9b..77c422ef90 100644 --- a/docker/mindspore-cpu/devel/Dockerfile +++ b/docker/mindspore-cpu/devel/Dockerfile @@ -62,15 +62,8 @@ RUN apt install -y libffi-dev libssl-dev zlib1g-dev libbz2-dev libncurses5-dev \ RUN mkdir -pv /root/.pip \ && echo "[global]" > /root/.pip/pip.conf \ && echo "trusted-host=mirrors.aliyun.com" >> /root/.pip/pip.conf \ - && echo "index-url=http://mirrors.aliyun.com/pypi/simple/" >> /root/.pip/pip.conf - -# Install pip package -RUN pip install --no-cache-dir \ - numpy \ - wheel \ - nose \ - pytest \ - pytest-xdist + && echo "index-url=http://mirrors.aliyun.com/pypi/simple/" >> /root/.pip/pip.conf \ + && pip install --no-cache-dir wheel # Install cmake (v3.14.1) RUN cd /tmp \ diff --git a/docker/mindspore-cpu/runtime/Dockerfile b/docker/mindspore-cpu/runtime/Dockerfile new file mode 100644 index 0000000000..ccf5f297a0 --- /dev/null +++ b/docker/mindspore-cpu/runtime/Dockerfile @@ -0,0 +1,64 @@ +FROM ubuntu:18.04 + +MAINTAINER leonwanghui + +# Set env +ENV PYTHON_ROOT_PATH /usr/local/python-3.7.5 +ENV PATH /usr/local/bin:$PATH + +# Install base tools +RUN apt update \ + && DEBIAN_FRONTEND=noninteractive apt install -y \ + vim \ + wget \ + curl \ + xz-utils \ + net-tools \ + openssh-client \ + git \ + ntpdate \ + tzdata \ + tcl \ + sudo \ + bash-completion + +# Install compile tools +RUN DEBIAN_FRONTEND=noninteractive apt install -y \ + gcc \ + g++ \ + zlibc \ + make \ + libgmp-dev \ + patch \ + autoconf \ + libtool \ + automake \ + flex + +# Set bash +RUN echo "dash dash/sh boolean false" | debconf-set-selections +RUN DEBIAN_FRONTEND=noninteractive dpkg-reconfigure dash + +# Install python (v3.7.5) +RUN apt install -y libffi-dev libssl-dev zlib1g-dev libbz2-dev libncurses5-dev \ + libgdbm-dev libgdbm-compat-dev liblzma-dev libreadline-dev libsqlite3-dev \ + && cd /tmp \ + && wget https://github.com/python/cpython/archive/v3.7.5.tar.gz \ + && tar -xvf v3.7.5.tar.gz \ + && cd /tmp/cpython-3.7.5 \ + && mkdir -p ${PYTHON_ROOT_PATH} \ + && ./configure --prefix=${PYTHON_ROOT_PATH} \ + && make -j4 \ + && make install -j4 \ + && rm -f /usr/local/bin/python \ + && rm -f /usr/local/bin/pip \ + && ln -s ${PYTHON_ROOT_PATH}/bin/python3.7 /usr/local/bin/python \ + && ln -s ${PYTHON_ROOT_PATH}/bin/pip3.7 /usr/local/bin/pip \ + && rm -rf /tmp/cpython-3.7.5 \ + && rm -f /tmp/v3.7.5.tar.gz + +# Set pip source +RUN mkdir -pv /root/.pip \ + && echo "[global]" > /root/.pip/pip.conf \ + && echo "trusted-host=mirrors.aliyun.com" >> /root/.pip/pip.conf \ + && echo "index-url=http://mirrors.aliyun.com/pypi/simple/" >> /root/.pip/pip.conf diff --git a/docker/mindspore-gpu/0.1.0-alpha/Dockerfile b/docker/mindspore-gpu/0.1.0-alpha/Dockerfile index 7b82b21a1e..50ca2b9f08 100644 --- a/docker/mindspore-gpu/0.1.0-alpha/Dockerfile +++ b/docker/mindspore-gpu/0.1.0-alpha/Dockerfile @@ -4,9 +4,9 @@ MAINTAINER leonwanghui # Set env ENV PYTHON_ROOT_PATH /usr/local/python-3.7.5 -ENV CMAKE_ROOT_PATH /usr/local/cmake-3.14.1 ENV OMPI_ROOT_PATH /usr/local/openmpi-3.1.5 -ENV PATH ${PYTHON_ROOT_PATH}/bin:${CMAKE_ROOT_PATH}/bin:${OMPI_ROOT_PATH}/bin:/usr/local/bin:$PATH +ENV PATH ${OMPI_ROOT_PATH}/bin:/usr/local/bin:$PATH +ENV LD_LIBRARY_PATH ${OMPI_ROOT_PATH}/lib:$LD_LIBRARY_PATH # Install base tools RUN apt update \ @@ -67,21 +67,6 @@ RUN mkdir -pv /root/.pip \ && echo "trusted-host=mirrors.aliyun.com" >> /root/.pip/pip.conf \ && echo "index-url=http://mirrors.aliyun.com/pypi/simple/" >> /root/.pip/pip.conf -# Install pip package -RUN pip install --no-cache-dir \ - numpy \ - wheel \ - nose \ - pytest \ - pytest-xdist - -# Install cmake (v3.14.1) -RUN cd /tmp \ - && wget https://github.com/Kitware/CMake/releases/download/v3.14.1/cmake-3.14.1-Linux-x86_64.sh \ - && mkdir -p ${CMAKE_ROOT_PATH} \ - && bash ./cmake-3.14.1-Linux-x86_64.sh --prefix=${CMAKE_ROOT_PATH} --exclude-subdir --skip-license \ - && rm -f /tmp/cmake-3.14.1-Linux-x86_64.sh - # Install openmpi (v3.1.5) RUN cd /tmp \ && wget https://download.open-mpi.org/release/open-mpi/v3.1/openmpi-3.1.5.tar.gz \ diff --git a/docker/mindspore-gpu/devel/Dockerfile b/docker/mindspore-gpu/devel/Dockerfile index 0b07605d9f..113bed5b06 100644 --- a/docker/mindspore-gpu/devel/Dockerfile +++ b/docker/mindspore-gpu/devel/Dockerfile @@ -5,8 +5,8 @@ MAINTAINER leonwanghui # Set env ENV PYTHON_ROOT_PATH /usr/local/python-3.7.5 ENV CMAKE_ROOT_PATH /usr/local/cmake-3.14.1 -ENV OMPI_ROOT_PATH /usr/local/openmpi-3.1.5 -ENV PATH ${PYTHON_ROOT_PATH}/bin:${CMAKE_ROOT_PATH}/bin:${OMPI_ROOT_PATH}/bin:/usr/local/bin:$PATH +ENV PATH ${CMAKE_ROOT_PATH}/bin:/usr/local/bin:$PATH +ENV LD_LIBRARY_PATH /usr/lib/x86_64-linux-gnu:$LD_LIBRARY_PATH # Install base tools RUN apt update \ @@ -35,9 +35,7 @@ RUN DEBIAN_FRONTEND=noninteractive apt install -y \ autoconf \ libtool \ automake \ - flex \ - libnccl2=2.4.8-1+cuda10.1 \ - libnccl-dev=2.4.8-1+cuda10.1 + flex # Set bash RUN echo "dash dash/sh boolean false" | debconf-set-selections @@ -65,15 +63,8 @@ RUN apt install -y libffi-dev libssl-dev zlib1g-dev libbz2-dev libncurses5-dev \ RUN mkdir -pv /root/.pip \ && echo "[global]" > /root/.pip/pip.conf \ && echo "trusted-host=mirrors.aliyun.com" >> /root/.pip/pip.conf \ - && echo "index-url=http://mirrors.aliyun.com/pypi/simple/" >> /root/.pip/pip.conf - -# Install pip package -RUN pip install --no-cache-dir \ - numpy \ - wheel \ - nose \ - pytest \ - pytest-xdist + && echo "index-url=http://mirrors.aliyun.com/pypi/simple/" >> /root/.pip/pip.conf \ + && pip install --no-cache-dir wheel # Install cmake (v3.14.1) RUN cd /tmp \ @@ -81,15 +72,3 @@ RUN cd /tmp \ && mkdir -p ${CMAKE_ROOT_PATH} \ && bash ./cmake-3.14.1-Linux-x86_64.sh --prefix=${CMAKE_ROOT_PATH} --exclude-subdir --skip-license \ && rm -f /tmp/cmake-3.14.1-Linux-x86_64.sh - -# Install openmpi (v3.1.5) -RUN cd /tmp \ - && wget https://download.open-mpi.org/release/open-mpi/v3.1/openmpi-3.1.5.tar.gz \ - && tar -xvf openmpi-3.1.5.tar.gz \ - && cd /tmp/openmpi-3.1.5 \ - && mkdir -p ${OMPI_ROOT_PATH} \ - && ./configure --prefix=${OMPI_ROOT_PATH} \ - && make -j4 \ - && make install -j4 \ - && rm -rf /tmp/openmpi-3.1.5 \ - && rm -f /tmp/openmpi-3.1.5.tar.gz diff --git a/docker/mindspore-gpu/runtime/Dockerfile b/docker/mindspore-gpu/runtime/Dockerfile new file mode 100644 index 0000000000..9e8dabe594 --- /dev/null +++ b/docker/mindspore-gpu/runtime/Dockerfile @@ -0,0 +1,80 @@ +FROM nvidia/cuda:10.1-cudnn7-runtime-ubuntu18.04 + +MAINTAINER leonwanghui + +# Set env +ENV PYTHON_ROOT_PATH /usr/local/python-3.7.5 +ENV OMPI_ROOT_PATH /usr/local/openmpi-3.1.5 +ENV PATH ${OMPI_ROOT_PATH}/bin:/usr/local/bin:$PATH +ENV LD_LIBRARY_PATH ${OMPI_ROOT_PATH}/lib:$LD_LIBRARY_PATH + +# Install base tools +RUN apt update \ + && DEBIAN_FRONTEND=noninteractive apt install -y \ + vim \ + wget \ + curl \ + xz-utils \ + net-tools \ + openssh-client \ + git \ + ntpdate \ + tzdata \ + tcl \ + sudo \ + bash-completion + +# Install compile tools +RUN DEBIAN_FRONTEND=noninteractive apt install -y \ + gcc \ + g++ \ + zlibc \ + make \ + libgmp-dev \ + patch \ + autoconf \ + libtool \ + automake \ + flex \ + libnccl2=2.4.8-1+cuda10.1 \ + libnccl-dev=2.4.8-1+cuda10.1 + +# Set bash +RUN echo "dash dash/sh boolean false" | debconf-set-selections +RUN DEBIAN_FRONTEND=noninteractive dpkg-reconfigure dash + +# Install python (v3.7.5) +RUN apt install -y libffi-dev libssl-dev zlib1g-dev libbz2-dev libncurses5-dev \ + libgdbm-dev libgdbm-compat-dev liblzma-dev libreadline-dev libsqlite3-dev \ + && cd /tmp \ + && wget https://github.com/python/cpython/archive/v3.7.5.tar.gz \ + && tar -xvf v3.7.5.tar.gz \ + && cd /tmp/cpython-3.7.5 \ + && mkdir -p ${PYTHON_ROOT_PATH} \ + && ./configure --prefix=${PYTHON_ROOT_PATH} \ + && make -j4 \ + && make install -j4 \ + && rm -f /usr/local/bin/python \ + && rm -f /usr/local/bin/pip \ + && ln -s ${PYTHON_ROOT_PATH}/bin/python3.7 /usr/local/bin/python \ + && ln -s ${PYTHON_ROOT_PATH}/bin/pip3.7 /usr/local/bin/pip \ + && rm -rf /tmp/cpython-3.7.5 \ + && rm -f /tmp/v3.7.5.tar.gz + +# Set pip source +RUN mkdir -pv /root/.pip \ + && echo "[global]" > /root/.pip/pip.conf \ + && echo "trusted-host=mirrors.aliyun.com" >> /root/.pip/pip.conf \ + && echo "index-url=http://mirrors.aliyun.com/pypi/simple/" >> /root/.pip/pip.conf + +# Install openmpi (v3.1.5) +RUN cd /tmp \ + && wget https://download.open-mpi.org/release/open-mpi/v3.1/openmpi-3.1.5.tar.gz \ + && tar -xvf openmpi-3.1.5.tar.gz \ + && cd /tmp/openmpi-3.1.5 \ + && mkdir -p ${OMPI_ROOT_PATH} \ + && ./configure --prefix=${OMPI_ROOT_PATH} \ + && make -j4 \ + && make install -j4 \ + && rm -rf /tmp/openmpi-3.1.5 \ + && rm -f /tmp/openmpi-3.1.5.tar.gz From efedcfb5303076de2a1754d17a4e095055bc941d Mon Sep 17 00:00:00 2001 From: huangdongrun Date: Tue, 14 Apr 2020 21:28:42 +0800 Subject: [PATCH 286/367] add test case of Bprop and tester add test case for grad concat fix usage in test framework fix testcase format code --- .../utils/block_util.py | 23 +-- .../python/parameter_feature/test_var_grad.py | 143 +++++++++++++++++- 2 files changed, 136 insertions(+), 30 deletions(-) diff --git a/tests/mindspore_test_framework/utils/block_util.py b/tests/mindspore_test_framework/utils/block_util.py index b4a926c15d..75946c3559 100644 --- a/tests/mindspore_test_framework/utils/block_util.py +++ b/tests/mindspore_test_framework/utils/block_util.py @@ -65,32 +65,11 @@ class IthOutputCell(nn.Cell): self.output_index = output_index def construct(self, *inputs): - raise NotImplementedError - - def construct1(self, x1): - predict = self.network(x1)[self.output_index] - return predict - - def construct2(self, x1, x2): - predict = self.network(x1, x2)[self.output_index] - return predict - - def construct3(self, x1, x2, x3): - predict = self.network(x1, x2, x3)[self.output_index] - return predict - - def construct4(self, x1, x2, x3, x4): - predict = self.network(x1, x2, x3, x4)[self.output_index] - return predict - - def construct5(self, x1, x2, x3, x4, x5): - predict = self.network(x1, x2, x3, x4, x5)[self.output_index] + predict = self.network(*inputs)[self.output_index] return predict def get_output_cell(network, num_input, output_index, training=True): net = IthOutputCell(network, output_index) - f = getattr(net, 'construct%d' % num_input) - setattr(net, "construct", f) set_block_training(net, training) return net diff --git a/tests/ut/python/parameter_feature/test_var_grad.py b/tests/ut/python/parameter_feature/test_var_grad.py index 12c05d0594..528456d02e 100644 --- a/tests/ut/python/parameter_feature/test_var_grad.py +++ b/tests/ut/python/parameter_feature/test_var_grad.py @@ -24,11 +24,14 @@ from mindspore.common import dtype as mstype context.set_context(mode=context.GRAPH_MODE) + def test_net_vargs_expand(): class AddNet(Cell): def __init__(self): super(AddNet, self).__init__() - self.w = Parameter(Tensor(np.ones((3, 4, 5), np.float32)), "w2", requires_grad=True) + self.w = Parameter( + Tensor(np.ones((3, 4, 5), np.float32)), "w2", requires_grad=True) + def construct(self, x, y): return x + y x = Tensor(np.random.normal(0, 1, [3, 4, 5]).astype(np.float32)) @@ -37,22 +40,59 @@ def test_net_vargs_expand(): net = AddNet() out = C.grad_all_with_sens(net, net.trainable_params())(x, y, sens) + class VarNet(Cell): def __init__(self, net): super(VarNet, self).__init__() - self.b = Parameter(Tensor(np.ones([3, 4, 5]), dtype=mstype.float32), "b", requires_grad=True) - self.w = Parameter(Tensor(np.ones([3, 4, 5]), dtype=mstype.float32), "w", requires_grad=True) + self.b = Parameter( + Tensor(np.ones([3, 4, 5]), dtype=mstype.float32), "b", requires_grad=True) + self.w = Parameter( + Tensor(np.ones([3, 4, 5]), dtype=mstype.float32), "w", requires_grad=True) self.net = net + def construct(self, *args): return self.net(*args)*self.w + self.b - + + class SecondNet(Cell): def __init__(self): super(SecondNet, self).__init__() - self.b2 = Parameter(Tensor(np.ones([3, 4, 5]), dtype=mstype.float32), "b2", requires_grad=True) + self.b2 = Parameter( + Tensor(np.ones([3, 4, 5]), dtype=mstype.float32), "b2", requires_grad=True) + def construct(self, *args): res = args[0] + args[1] return res + self.b2 + + +class Bprop(Cell): + def __init__(self, func, wrt_params, params, grad_op, sens=None): + super(Bprop, self).__init__(auto_prefix=False) + self.func = func + self.wrt_params = wrt_params + self.params = None + if self.wrt_params and params: + self.params = ParameterTuple(params) + self.grad = grad_op + self.with_sens = False + self.sens = sens + if sens: + self.sens = Tensor(sens, dtype=mstype.float32) + self.with_sens = True + + def construct(self, *inputs): + # pylint: disable=no-else-return + if self.wrt_params: + if self.with_sens: + return self.grad(self.func, self.params)(*inputs, self.sens) + else: + return self.grad(self.func, self.params)(*inputs) + elif self.with_sens: + return self.grad(self.func)(*inputs, self.sens) + else: + return self.grad(self.func)(*inputs) + + def test_all_var_args_grad_with_sens(): """"test grad_by_list_with_sens with all var args input""" class GradNet(Cell): @@ -60,6 +100,7 @@ def test_all_var_args_grad_with_sens(): super(GradNet, self).__init__() self.weights = ParameterTuple(net.trainable_params()) self.net = net + def construct(self, *inputs): return C.grad_by_list_with_sens(self.net, self.weights)(*inputs) x = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) @@ -69,12 +110,14 @@ def test_all_var_args_grad_with_sens(): grad_net = GradNet(net) out = grad_net(x, y, sens) + def test_grad_list_var_args(): class GradNet(Cell): def __init__(self, net): super(GradNet, self).__init__() self.weights = ParameterTuple(net.trainable_params()) self.net = net + def construct(self, *inputs): return C.grad_by_list(self.net, self.weights)(*inputs) x = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) @@ -83,12 +126,14 @@ def test_grad_list_var_args(): grad_net = GradNet(net) out = grad_net(x, y) + def test_grad_all_var_args(): class GradNet(Cell): def __init__(self, net): super(GradNet, self).__init__() self.weights = ParameterTuple(net.trainable_params()) self.net = net + def construct(self, *inputs): return C.grad_all(self.net)(*inputs) x = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) @@ -97,12 +142,14 @@ def test_grad_all_var_args(): grad_net = GradNet(net) out = grad_net(x, y) + def test_grad_all_var_args_with_sens(): class GradNet(Cell): def __init__(self, net): super(GradNet, self).__init__() self.weights = ParameterTuple(net.trainable_params()) self.net = net + def construct(self, *inputs): return C.grad_all_with_sens(self.net)(*inputs) x = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) @@ -112,12 +159,14 @@ def test_grad_all_var_args_with_sens(): grad_net = GradNet(net) out = grad_net(x, y, sens) + def test_grad_var_args_with_sens(): class GradNet(Cell): def __init__(self, net): super(GradNet, self).__init__() self.weights = ParameterTuple(net.trainable_params()) self.net = net + def construct(self, *inputs): return C.grad_with_sens(self.net)(*inputs) x = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) @@ -127,27 +176,34 @@ def test_grad_var_args_with_sens(): grad_net = GradNet(net) out = grad_net(x, y, sens) + def test_var_args_grad(): class VarNet(Cell): def __init__(self, net): super(VarNet, self).__init__() - self.b = Parameter(Tensor(np.ones([3, 4, 5]), dtype=mstype.float32), "b", requires_grad=True) + self.b = Parameter( + Tensor(np.ones([3, 4, 5]), dtype=mstype.float32), "b", requires_grad=True) self.net = net + def construct(self, *args): return self.net(*args) + self.b - + class SecondNet(Cell): def __init__(self): super(SecondNet, self).__init__() - self.b2 = Parameter(Tensor(np.ones([3, 4, 5]), dtype=mstype.float32), "b2", requires_grad=True) + self.b2 = Parameter( + Tensor(np.ones([3, 4, 5]), dtype=mstype.float32), "b2", requires_grad=True) + def construct(self, *args): res = args[0] + args[1] return res + self.b2 + class GradNet(Cell): def __init__(self, net): super(GradNet, self).__init__() self.net = net self.weights = ParameterTuple(net.trainable_params()) + def construct(self, x, y, sens): return C.grad_by_list_with_sens(self.net, self.weights)(x, y, sens) x = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) @@ -164,12 +220,14 @@ def test_var_args_positional(): def __init__(self, net): super(VarNet, self).__init__() self.net = net + def construct(self, x, y): return self.net(x, y)*x class SecondNet(Cell): def __init__(self): super(SecondNet, self).__init__() + def construct(self, *args): return args[0] + args[1] @@ -178,6 +236,7 @@ def test_var_args_positional(): super(GradNet, self).__init__() self.net = net self.weights = ParameterTuple(net.trainable_params()) + def construct(self, x, y): return C.grad_all(self.net)(x, y) x = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) @@ -185,3 +244,71 @@ def test_var_args_positional(): net = VarNet(SecondNet()) grad_net = GradNet(net) out = grad_net(x, y) + + +def test_grad_within_if_else(): + class GradNet(Cell): + def __init__(self, net): + super(GradNet, self).__init__() + self.weights = ParameterTuple(net.trainable_params()) + self.net = net + grad_op = C.GradOperation( + name='grad', get_all=False, get_by_list=True, sens_param=True) + self.grad = Bprop(self.net, True, self.weights, grad_op, 1.0) + + def construct(self, *inputs): + return self.grad(*inputs) + x = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) + y = Tensor(np.ones([3, 4, 5]), dtype=mstype.float32) + sens = Tensor(1.0, dtype=mstype.float32) + net = VarNet(SecondNet()) + grad_net = GradNet(net) + out = grad_net(x, y) + print("test_grad_var_args_with_sens out=", out) + + +def test_grad_for_concat(): + class GradNet(Cell): + def __init__(self, net): + super(GradNet, self).__init__() + self.weights = ParameterTuple(net.trainable_params()) + self.net = net + grad_op = C.GradOperation( + name='grad', get_all=True, get_by_list=False, sens_param=True) + self.grad = Bprop(self.net, False, self.weights, grad_op) + + def construct(self, *inputs): + return self.grad(*inputs) + + class Concat(Cell): + def __init__(self, axis): + super().__init__() + self.concat = P.Concat(axis=axis) + + def construct(self, *input1): + return self.concat(input1) + + class ConcatFactory: + def __init__(self, input_shape, axis, dtype=np.float32): + super(ConcatFactory, self).__init__() + self.inputs_np = [] + for s in input_shape: + self.inputs_np.append(np.random.randn(*s).astype(dtype)) + self.axis = axis + self.out_numpy = np.concatenate(self.inputs_np, axis=self.axis) + self.out_grad_np = self.out_numpy + + def grad_mindspore_impl(self): + inputs = [] + for i in self.inputs_np: + inputs.append(Tensor(i)) + net = Concat(axis=self.axis) + grad_net = GradNet(net) + grad_net.set_train() + input_grad = grad_net(*inputs, Tensor(self.out_grad_np)) + + def grad_cmp(self): + input_grad_mindspore = self.grad_mindspore_impl() + fact = ConcatFactory(input_shape=( + (2, 184320, 1), (2, 46080, 1), (2, 11520, 1), (2, 2880, 1), (2, 720, 1)), axis=1) + fact.grad_cmp() From 80978cf3ccee7570ee77dbf9f1ab3a043ebab0ca Mon Sep 17 00:00:00 2001 From: buxue Date: Thu, 9 Apr 2020 19:07:14 +0800 Subject: [PATCH 287/367] support operator ** // % for scalar and tensor, and in not in for dict, ang str concat --- mindspore/_extends/parse/resources.py | 10 +-- .../ccsrc/operator/cc_implementations.cc | 40 ++++++++--- mindspore/ccsrc/operator/cc_implementations.h | 3 +- .../ccsrc/operator/composite/do_signature.cc | 25 ++++--- mindspore/ccsrc/operator/ops.cc | 4 ++ mindspore/ccsrc/operator/ops.h | 6 +- mindspore/ccsrc/operator/prim_nn.cc | 10 +-- mindspore/ccsrc/operator/prim_statement.cc | 31 +++++++++ mindspore/ccsrc/operator/prim_structures.cc | 63 ++++++++++------- mindspore/ccsrc/operator/prim_to_function.cc | 59 ++++++++-------- .../ccsrc/pipeline/static_analysis/prim.cc | 5 ++ .../ccsrc/pipeline/static_analysis/prim.h | 6 ++ .../ops/composite/multitype_ops/__init__.py | 6 ++ .../ops/composite/multitype_ops/add_impl.py | 21 ++++-- .../ops/composite/multitype_ops/div_impl.py | 6 +- .../composite/multitype_ops/floordiv_impl.py | 50 ++++++++++++++ .../ops/composite/multitype_ops/mod_impl.py | 50 ++++++++++++++ .../ops/composite/multitype_ops/mul_impl.py | 6 +- .../ops/composite/multitype_ops/pow_impl.py | 50 ++++++++++++++ .../ops/composite/multitype_ops/sub_impl.py | 6 +- mindspore/ops/functional.py | 8 ++- mindspore/ops/operations/math_ops.py | 4 +- .../ut/python/pipeline/parse/test_operator.py | 69 +++++++++++++++++++ 23 files changed, 435 insertions(+), 103 deletions(-) create mode 100644 mindspore/ops/composite/multitype_ops/floordiv_impl.py create mode 100644 mindspore/ops/composite/multitype_ops/mod_impl.py create mode 100644 mindspore/ops/composite/multitype_ops/pow_impl.py diff --git a/mindspore/_extends/parse/resources.py b/mindspore/_extends/parse/resources.py index 5dd24ccf80..9fb357597e 100644 --- a/mindspore/_extends/parse/resources.py +++ b/mindspore/_extends/parse/resources.py @@ -83,9 +83,9 @@ convert_object_map = { T.mul: multitype_ops.mul, T.truediv: multitype_ops.div, T.getitem: multitype_ops.getitem, - T.floordiv: NO_IMPLEMENT, - T.mod: F.scalar_mod, - T.pow: F.scalar_pow, + T.floordiv: multitype_ops.floordiv, + T.mod: multitype_ops.mod, + T.pow: multitype_ops.pow_, T.matmul: F.dot, T.lshift: NO_IMPLEMENT, T.rshift: NO_IMPLEMENT, @@ -104,8 +104,8 @@ convert_object_map = { T.ge: multitype_ops.greater_equal, T.is_: F.is_, T.is_not: F.is_not, - T.contains: NO_IMPLEMENT, - T.not_contains: NO_IMPLEMENT, + T.contains: F.in_dict, + T.not_contains: F.not_in_dict, # system function T.len: M.ms_len, diff --git a/mindspore/ccsrc/operator/cc_implementations.cc b/mindspore/ccsrc/operator/cc_implementations.cc index 5ff49758b4..62b23b346f 100644 --- a/mindspore/ccsrc/operator/cc_implementations.cc +++ b/mindspore/ccsrc/operator/cc_implementations.cc @@ -103,7 +103,7 @@ T InnerScalarMul(T x, T y) { } template -T InnerScalarDiv(T x, T y) { +float InnerScalarDiv(T x, T y) { if (y == 0) { MS_LOG(EXCEPTION) << "Divisor could not be zero"; } @@ -111,23 +111,41 @@ T InnerScalarDiv(T x, T y) { MS_LOG(EXCEPTION) << "Overflow of the div of two signed number x: " << std::to_string(x) << ", y: " << std::to_string(y) << "."; } - return x / y; + return static_cast(x) / static_cast(y); } -int32_t InnerScalarMod(int32_t x, int32_t y) { +template +T InnerScalarFloordiv(T x, T y) { + auto ret = std::floor(InnerScalarDiv(x, y)); + if (std::is_integral::value) { + return static_cast(ret); + } + return ret; +} + +template +T InnerScalarMod(T x, T y) { if (y == 0) { MS_LOG(EXCEPTION) << "Could not mod to zero."; } - if (IsSignedIntOverflow(x, y, OpType::MOD)) { + if (std::is_integral::value && std::is_signed::value && IsSignedIntOverflow(x, y, OpType::MOD)) { MS_LOG(EXCEPTION) << "Overflow of the mod of two signed number x: " << std::to_string(x) << ", y: " << std::to_string(y) << "."; } - return x % y; + if (std::is_integral::value) { + return static_cast(x) % static_cast(y); + } + float x_int = std::floor(x); + float y_int = std::ceil(y); + float max = x_int / y_int; + float ret = x - y * max; + return ret; } -float InnerScalarMod(float, float) { MS_LOG(EXCEPTION) << "Float does not support mod operator."; } - -double InnerScalarMod(double, double) { MS_LOG(EXCEPTION) << "Double does not support mod operator."; } +template +T InnerScalarPow(T x, U y) { + return std::pow(x, y); +} template bool InnerScalarEq(T x, U y) { @@ -193,6 +211,8 @@ SCALAR_OP(Sub) SCALAR_OP(Mul) SCALAR_OP(Div) SCALAR_OP(Mod) +SCALAR_OP(Pow) +SCALAR_OP(Floordiv) #define LOGIC_OP(op_t) \ ValuePtr Scalar##op_t(const ValuePtrList& list) { \ @@ -227,6 +247,10 @@ SCALAR_OP(Mod) bool sum = InnerScalar##op_t(GetValue(x), GetValue(y)); \ return MakeValue(sum); \ } \ + if (x->isa() && y->isa()) { \ + bool sum = InnerScalar##op_t(GetValue(x), GetValue(y)); \ + return MakeValue(sum); \ + } \ if (x->isa() && y->isa()) { \ bool sum = InnerScalar##op_t(GetValue(x), GetValue(y)); \ return MakeValue(sum); \ diff --git a/mindspore/ccsrc/operator/cc_implementations.h b/mindspore/ccsrc/operator/cc_implementations.h index 2c2936fc92..69981cea7d 100644 --- a/mindspore/ccsrc/operator/cc_implementations.h +++ b/mindspore/ccsrc/operator/cc_implementations.h @@ -37,9 +37,10 @@ ValuePtr ScalarSub(const ValuePtrList& list); ValuePtr ScalarMul(const ValuePtrList& list); ValuePtr ScalarDiv(const ValuePtrList& list); ValuePtr ScalarMod(const ValuePtrList& list); +ValuePtr ScalarPow(const ValuePtrList& list); +ValuePtr ScalarFloordiv(const ValuePtrList& list); ValuePtr ScalarUAdd(const ValuePtrList& list); ValuePtr ScalarUSub(const ValuePtrList& list); -ValuePtr ScalarUSub(const ValuePtrList& list); ValuePtr ScalarLog(const ValuePtrList& list); ValuePtr ScalarEq(const ValuePtrList& list); ValuePtr ScalarLt(const ValuePtrList& list); diff --git a/mindspore/ccsrc/operator/composite/do_signature.cc b/mindspore/ccsrc/operator/composite/do_signature.cc index 62de1c71f2..a4a26377f5 100644 --- a/mindspore/ccsrc/operator/composite/do_signature.cc +++ b/mindspore/ccsrc/operator/composite/do_signature.cc @@ -88,14 +88,17 @@ std::map GetMaxDtypeIndex(const std::vectorisa()) { - m_index = indexs[i]; + + for (const auto& index : indexs) { + AbstractBasePtr arg_value = args_spec_list[index]; + if (arg_value->isa()) { + arg_value = arg_value->cast()->ref(); + } + + if (arg_value->isa()) { + (void)dst_type.insert(std::make_pair(type, index)); + break; } - } - if (args_spec_list[m_index]->isa()) { - (void)dst_type.insert(std::make_pair(type, m_index)); } } return dst_type; @@ -119,15 +122,19 @@ void DoAutoCast(const std::vector& signature, const abstract::Abstrac (void)std::transform(signature.begin(), signature.end(), std::back_inserter(dtypes), [](const Signature& sig) { return sig.dtype; }); int empty_dtype_count = std::count(dtypes.begin(), dtypes.end(), SignatureEnumDType::kDTypeEmptyDefaultValue); - if (dtypes.size() == 0 || static_cast(dtypes.size()) == empty_dtype_count) { + if (dtypes.empty() || static_cast(dtypes.size()) == empty_dtype_count) { return; } // Stat the index of the arguments with the largest type in the same SignatureEnumDType. std::map dst_type = GetMaxDtypeIndex(dtypes, args_spec_list); // Identify which arg requires auto cast for (size_t i = 0; i < args_spec_list.size(); ++i) { + AbstractBasePtr arg_value = args_spec_list[i]; + if (arg_value->isa()) { + arg_value = arg_value->cast()->ref(); + } auto it = dst_type.find(dtypes[i]); - if (it == dst_type.end() || it->second == i || !args_spec_list[i]->isa()) { + if (it == dst_type.end() || it->second == i || !arg_value->isa()) { continue; } // get source node for cast diff --git a/mindspore/ccsrc/operator/ops.cc b/mindspore/ccsrc/operator/ops.cc index f3053cac7d..e190d7d0b2 100755 --- a/mindspore/ccsrc/operator/ops.cc +++ b/mindspore/ccsrc/operator/ops.cc @@ -28,6 +28,7 @@ const PrimitivePtr kPrimScalarAdd = std::make_shared("scalar_add"); const PrimitivePtr kPrimScalarSub = std::make_shared("scalar_sub"); const PrimitivePtr kPrimScalarMul = std::make_shared("scalar_mul"); const PrimitivePtr kPrimScalarDiv = std::make_shared("scalar_div"); +const PrimitivePtr kPrimScalarFloordiv = std::make_shared("scalar_floordiv"); const PrimitivePtr kPrimScalarMod = std::make_shared("scalar_mod"); const PrimitivePtr kPrimScalarPow = std::make_shared("scalar_pow"); const PrimitivePtr kPrimScalarTrunc = std::make_shared("scalar_trunc"); @@ -78,6 +79,7 @@ const PrimitivePtr kPrimCreateInstance = std::make_shared("create_ins // Structure const PrimitivePtr kPrimStringEqual = std::make_shared("string_equal"); +const PrimitivePtr kPrimStringConcat = std::make_shared("string_concat"); const PrimitivePtr kPrimMakeTuple = std::make_shared("make_tuple"); const PrimitivePtr kPrimMakeList = std::make_shared("make_list"); const PrimitivePtr kPrimMakeDict = std::make_shared("make_dict"); @@ -221,6 +223,8 @@ const PrimitivePtr kPrimBroadcastGradientArgs = std::make_shared("Bro const PrimitivePtr kPrimControlDepend = std::make_shared("ControlDepend"); const PrimitivePtr kPrimIs_ = std::make_shared("is_"); const PrimitivePtr kPrimIsNot = std::make_shared("is_not"); +const PrimitivePtr kPrimInDict = std::make_shared("in_dict"); +const PrimitivePtr kPrimNotInDict = std::make_shared("not_in_dict"); // Comm ops const PrimitivePtr kPrimMirror = std::make_shared("_MirrorOperator"); diff --git a/mindspore/ccsrc/operator/ops.h b/mindspore/ccsrc/operator/ops.h index 2dc7072972..0148e073e0 100755 --- a/mindspore/ccsrc/operator/ops.h +++ b/mindspore/ccsrc/operator/ops.h @@ -34,6 +34,7 @@ extern const PrimitivePtr kPrimScalarAdd; extern const PrimitivePtr kPrimScalarSub; extern const PrimitivePtr kPrimScalarMul; extern const PrimitivePtr kPrimScalarDiv; +extern const PrimitivePtr kPrimScalarFloordiv; extern const PrimitivePtr kPrimScalarMod; extern const PrimitivePtr kPrimScalarPow; extern const PrimitivePtr kPrimScalarTrunc; @@ -84,6 +85,7 @@ extern const PrimitivePtr kPrimCreateInstance; // Structure extern const PrimitivePtr kPrimStringEqual; +extern const PrimitivePtr kPrimStringConcat; extern const PrimitivePtr kPrimMakeTuple; extern const PrimitivePtr kPrimMakeList; extern const PrimitivePtr kPrimMakeDict; @@ -227,8 +229,8 @@ extern const PrimitivePtr kPrimBroadcastGradientArgs; extern const PrimitivePtr kPrimControlDepend; extern const PrimitivePtr kPrimIs_; extern const PrimitivePtr kPrimIsNot; -extern const PrimitivePtr kPrimMinimumGrad; -extern const PrimitivePtr kPrimMaximumGrad; +extern const PrimitivePtr kPrimInDict; +extern const PrimitivePtr kPrimNotInDict; // Comm ops extern const PrimitivePtr kPrimMirror; diff --git a/mindspore/ccsrc/operator/prim_nn.cc b/mindspore/ccsrc/operator/prim_nn.cc index 892bf2921e..3591168187 100644 --- a/mindspore/ccsrc/operator/prim_nn.cc +++ b/mindspore/ccsrc/operator/prim_nn.cc @@ -114,12 +114,12 @@ void FusedBatchNormCheckDim(const PrimitivePtr &primitive, const AbstractBasePtr AbstractTensorPtr arg = CheckArg(op_name, args_spec_list, i); ShapePtr arg_shape = dyn_cast(arg->GetShapeTrack()); if (arg_shape == nullptr) { - MS_LOG(EXCEPTION) << "" << op_name << " type of args[" << i << "] should be Shape, but " << arg->ToString(); + MS_LOG(EXCEPTION) << op_name << " type of args[" << i << "] should be Shape, but " << arg->ToString(); } if (i == 0) { if (arg_shape->shape().size() < 2) { - MS_LOG(EXCEPTION) << "" << op_name << " shape of args[" << i + MS_LOG(EXCEPTION) << op_name << " shape of args[" << i << "] should be TensorShape with dimension greater than 1, but shape: " << arg_shape->ToString(); } @@ -127,7 +127,7 @@ void FusedBatchNormCheckDim(const PrimitivePtr &primitive, const AbstractBasePtr } if (arg_shape->shape().size() != 1) { - MS_LOG(EXCEPTION) << "" << op_name << " shape of args[" << i + MS_LOG(EXCEPTION) << op_name << " shape of args[" << i << "] should be TensorShape with dimension: 1, but shape: " << arg_shape->ToString(); } } @@ -159,7 +159,7 @@ AbstractBasePtr InferImplFusedBatchNorm(const AnalysisEnginePtr &, const Primiti MS_LOG(EXCEPTION) << "Arg shape size should >= 1."; } if (arg_shape_list[0] != input_shape_list[1]) { - MS_LOG(EXCEPTION) << "" << op_name << " size of tensor param[" << i << "](which is " << arg_shape_list[0] + MS_LOG(EXCEPTION) << op_name << " size of tensor param[" << i << "](which is " << arg_shape_list[0] << ") should match the second dimension of tensor" " param[0](which is " << input_shape_list[1] << ")."; @@ -378,7 +378,7 @@ AbstractBasePtr InferImplDropoutGenMask(const AnalysisEnginePtr &, const Primiti TypePtr prob_type = keep_prob->element()->BuildType(); if ((prob_type->type_id() != kNumberTypeFloat16) && (prob_type->type_id() != kNumberTypeFloat32)) { - MS_LOG(EXCEPTION) << "" << op_name << " keep_prob type should be float16 or float32, but " << prob_type->ToString() + MS_LOG(EXCEPTION) << op_name << " keep_prob type should be float16 or float32, but " << prob_type->ToString() << "."; } diff --git a/mindspore/ccsrc/operator/prim_statement.cc b/mindspore/ccsrc/operator/prim_statement.cc index 7d5038d4e1..239aed5bde 100644 --- a/mindspore/ccsrc/operator/prim_statement.cc +++ b/mindspore/ccsrc/operator/prim_statement.cc @@ -169,5 +169,36 @@ AbstractBasePtr InferImplIsNot(const AnalysisEnginePtr &, const PrimitivePtr &pr return std::make_shared(!(*t == *x)); } + +bool IsInDict(const PrimitivePtr &primitive, const AbstractBasePtrList &args_spec_list) { + const std::string op_name = primitive->name(); + CheckArgsSize(op_name, args_spec_list, 2); + auto key = CheckArg(op_name, args_spec_list, 0); + auto dict = CheckArg(op_name, args_spec_list, 1); + + ValuePtr key_value = key->BuildValue(); + if (!key_value->isa()) { + MS_LOG(EXCEPTION) << op_name << " evaluator key should be string, but got " << key_value->ToString(); + } + auto key_str = GetValue(key_value); + std::vector dict_elems = dict->elements(); + auto it = std::find_if(dict_elems.begin(), dict_elems.end(), + [key_str](const AbstractAttribute &item) { return item.first == key_str; }); + return it != dict_elems.end(); +} + +AbstractBasePtr InferImplInDict(const AnalysisEnginePtr &, const PrimitivePtr &primitive, + const AbstractBasePtrList &args_spec_list) { + // statement: x in t + // Inputs: x, t + return std::make_shared(IsInDict(primitive, args_spec_list)); +} + +AbstractBasePtr InferImplNotInDict(const AnalysisEnginePtr &, const PrimitivePtr &primitive, + const AbstractBasePtrList &args_spec_list) { + // statement: x not in t + // Inputs: x, t + return std::make_shared(!IsInDict(primitive, args_spec_list)); +} } // namespace abstract } // namespace mindspore diff --git a/mindspore/ccsrc/operator/prim_structures.cc b/mindspore/ccsrc/operator/prim_structures.cc index 88699c4d38..31d2bff43d 100644 --- a/mindspore/ccsrc/operator/prim_structures.cc +++ b/mindspore/ccsrc/operator/prim_structures.cc @@ -36,7 +36,7 @@ AbstractBasePtr InferImplStringEqual(const AnalysisEnginePtr &, const PrimitiveP ValuePtr value_x = scalar_x->BuildValue(); ValuePtr value_y = scalar_y->BuildValue(); if (!value_x->isa() || !value_y->isa()) { - MS_LOG(EXCEPTION) << "" << op_name << " requires 2 parameters are string, but got param0: " << value_x->ToString() + MS_LOG(EXCEPTION) << op_name << " requires 2 parameters are string, but got param0: " << value_x->ToString() << ", param1: " << value_y->ToString(); } @@ -44,6 +44,25 @@ AbstractBasePtr InferImplStringEqual(const AnalysisEnginePtr &, const PrimitiveP return std::make_shared(ret); } +AbstractBasePtr InferImplStringConcat(const AnalysisEnginePtr &, const PrimitivePtr &primitive, + const AbstractBasePtrList &args_spec_list) { + // Inputs: two scalars whose value is a string. + const std::string op_name = primitive->name(); + CheckArgsSize(op_name, args_spec_list, 2); + AbstractScalarPtr scalar_x = CheckArg(op_name, args_spec_list, 0); + AbstractScalarPtr scalar_y = CheckArg(op_name, args_spec_list, 1); + + ValuePtr value_x = scalar_x->BuildValue(); + ValuePtr value_y = scalar_y->BuildValue(); + if (!value_x->isa() || !value_y->isa()) { + MS_LOG(EXCEPTION) << op_name << " requires 2 parameters are string, but got param0: " << value_x->ToString() + << ", param1: " << value_y->ToString(); + } + + std::string ret = (value_x->cast()->value() + value_y->cast()->value()); + return std::make_shared(ret); +} + AbstractBasePtr InferImplMakeTuple(const AnalysisEnginePtr &, const PrimitivePtr &, const AbstractBasePtrList &args_spec_list) { return std::make_shared(args_spec_list); @@ -64,7 +83,7 @@ AbstractBasePtr InferImplMakeDict(const AnalysisEnginePtr &, const PrimitivePtr size_t keys_size = keys->size(); if (values->size() != keys_size) { - MS_LOG(EXCEPTION) << "" << op_name << " evaluator keys' size is not equal with values' size"; + MS_LOG(EXCEPTION) << op_name << " evaluator keys' size is not equal with values' size"; } std::vector key_value; @@ -76,7 +95,7 @@ AbstractBasePtr InferImplMakeDict(const AnalysisEnginePtr &, const PrimitivePtr ValuePtr keyPtr = key->BuildValue(); MS_EXCEPTION_IF_NULL(keyPtr); if (!keyPtr->isa()) { - MS_LOG(EXCEPTION) << "" << op_name << " evaluator keys should be string, but got " << keyPtr->ToString(); + MS_LOG(EXCEPTION) << op_name << " evaluator keys should be string, but got " << keyPtr->ToString(); } std::string key_string = GetValue(keyPtr); key_value.emplace_back(key_string, value_list[index]); @@ -93,7 +112,7 @@ AbstractBasePtr InferImplMakeKwarg(const AnalysisEnginePtr &, const PrimitivePtr ValuePtr keyPtr = key->BuildValue(); if (!keyPtr->isa()) { - MS_LOG(EXCEPTION) << "" << op_name << " evaluator key should be string, but got " << keyPtr->ToString(); + MS_LOG(EXCEPTION) << op_name << " evaluator key should be string, but got " << keyPtr->ToString(); } std::string key_string = GetValue(keyPtr); return std::make_shared(key_string, args_spec_list[1]); @@ -109,14 +128,13 @@ AbstractBasePtr InferImplExtractKwarg(const AnalysisEnginePtr &, const Primitive ValuePtr key_value = key->BuildValue(); if (!key_value->isa()) { - MS_LOG(EXCEPTION) << "" << op_name << " evaluator key should be string, but got " << key_value->ToString(); + MS_LOG(EXCEPTION) << op_name << " evaluator key should be string, but got " << key_value->ToString(); } std::string key_input = GetValue(key_value); std::string key_actual = kwarg->get_key(); if (key_actual != key_input) { - MS_LOG(EXCEPTION) << "" << op_name - << " evaluator input key should be same as AbstractKeywordArg' key, but input is " << key_input - << ", AbstractKeywordArg' key is " << key_actual; + MS_LOG(EXCEPTION) << op_name << " evaluator input key should be same as AbstractKeywordArg' key, but input is " + << key_input << ", AbstractKeywordArg' key is " << key_actual; } return kwarg->get_arg(); } @@ -187,13 +205,12 @@ AbstractBasePtr InferTupleOrListGetItem(const std::string &op_name, const Abstra ValuePtr index_value = index->BuildValue(); if (!index_value->isa()) { - MS_LOG(EXCEPTION) << "" << op_name << " evaluator index should be an int32 number, but got " - << index_value->ToString(); + MS_LOG(EXCEPTION) << op_name << " evaluator index should be an int32 number, but got " << index_value->ToString(); } int idx_v = GetValue(index_value); std::size_t nelems = queue->elements().size(); if (idx_v >= SizeToInt(nelems) || idx_v < -SizeToInt(nelems)) { - MS_LOG(EXCEPTION) << "" << op_name << " evaluator index should be in range[-" << SizeToInt(nelems) << ", " + MS_LOG(EXCEPTION) << op_name << " evaluator index should be in range[-" << SizeToInt(nelems) << ", " << SizeToInt(nelems) << "), but got " << idx_v << "."; } @@ -215,8 +232,7 @@ AbstractBasePtr InferTupleOrListSetItem(const std::string &op_name, const Abstra ValuePtr index_value = index->BuildValue(); if (!index_value->isa()) { - MS_LOG(EXCEPTION) << "" << op_name << " evaluator index should be an int32 number, but got " - << index_value->ToString(); + MS_LOG(EXCEPTION) << op_name << " evaluator index should be an int32 number, but got " << index_value->ToString(); } int idx_v = GetValue(index_value); if (idx_v < 0) { @@ -227,8 +243,7 @@ AbstractBasePtr InferTupleOrListSetItem(const std::string &op_name, const Abstra AbstractBasePtrList elements = queue->elements(); std::size_t nelems = elements.size(); if (uidx_v >= nelems) { - MS_LOG(EXCEPTION) << "" << op_name << " evaluator the index: " << uidx_v << " to set out of range: " << nelems - 1 - << "."; + MS_LOG(EXCEPTION) << op_name << " evaluator the index: " << uidx_v << " to set out of range: " << nelems - 1 << "."; } elements[uidx_v] = args_spec_list[2]; return std::make_shared(elements); @@ -264,12 +279,12 @@ AbstractBasePtr InferImplDictGetItem(const AnalysisEnginePtr &, const PrimitiveP ValuePtr key_value = key->BuildValue(); if (!key_value->isa()) { - MS_LOG(EXCEPTION) << "" << op_name << " evaluator key should be string, but got " << key_value->ToString(); + MS_LOG(EXCEPTION) << op_name << " evaluator key should be string, but got " << key_value->ToString(); } - std::string key_str = GetValue(key_value); + auto key_str = GetValue(key_value); std::vector dict_elems = dict->elements(); auto it = std::find_if(dict_elems.begin(), dict_elems.end(), - [key_str](AbstractAttribute &item) { return item.first == key_str; }); + [key_str](const AbstractAttribute &item) { return item.first == key_str; }); if (it == dict_elems.end()) { MS_LOG(EXCEPTION) << "The key " << key_str << " does not exist in the dict:" << args_spec_list[0]->ToString(); @@ -287,7 +302,7 @@ AbstractBasePtr InferImplDictSetItem(const AnalysisEnginePtr &, const PrimitiveP ValuePtr key_value = key->BuildValue(); if (!key_value->isa()) { - MS_LOG(EXCEPTION) << "" << op_name << " evaluator key should be string, but got " << key_value->ToString(); + MS_LOG(EXCEPTION) << op_name << " evaluator key should be string, but got " << key_value->ToString(); } std::string key_str = GetValue(key_value); std::vector dict_elems = dict->elements(); @@ -446,27 +461,27 @@ AbstractBasePtr InferImplReduceShape(const AnalysisEnginePtr &, const PrimitiveP auto x_shp_value = shape_x->BuildValue(); if (x_shp_value->isa()) { - MS_LOG(EXCEPTION) << "" << op_name + MS_LOG(EXCEPTION) << op_name << " evaluator shape's data field can't be anything: " << args_spec_list[1]->ToString(); } // Axis can be scalar, tuple or None AbstractTuplePtr axis = nullptr; if (args_spec_list[1]->isa()) { - MS_LOG(DEBUG) << "" << op_name << " evaluator second parameter is scalar"; + MS_LOG(DEBUG) << op_name << " evaluator second parameter is scalar"; AbstractBasePtrList axis_list = {dyn_cast(args_spec_list[1])}; axis = std::make_shared(axis_list); } else if (args_spec_list[1]->isa()) { - MS_LOG(DEBUG) << "" << op_name << " evaluator second parameter is tuple"; + MS_LOG(DEBUG) << op_name << " evaluator second parameter is tuple"; axis = args_spec_list[1]->cast(); } else { - MS_LOG(EXCEPTION) << "" << op_name << " evaluator second parameter should be a scalar or tuple, but got " + MS_LOG(EXCEPTION) << op_name << " evaluator second parameter should be a scalar or tuple, but got " << args_spec_list[1]->ToString(); } auto axis_value = axis->BuildValue(); if (axis_value->isa()) { - MS_LOG(EXCEPTION) << "" << op_name + MS_LOG(EXCEPTION) << op_name << " evaluator shape's data field can't be anything: " << args_spec_list[1]->ToString(); } auto axis_value_ptr = axis_value->cast(); diff --git a/mindspore/ccsrc/operator/prim_to_function.cc b/mindspore/ccsrc/operator/prim_to_function.cc index 234c829d44..bdfe48157c 100644 --- a/mindspore/ccsrc/operator/prim_to_function.cc +++ b/mindspore/ccsrc/operator/prim_to_function.cc @@ -24,36 +24,35 @@ namespace mindspore { namespace prim { PrimToFunction::PrimToFunction() - : prim_func_type_map_({ - // ONE_ARG prim - {"bool_not", kPrimTypeOneArg}, - {"scalar_cos", kPrimTypeOneArg}, - {"scalar_exp", kPrimTypeOneArg}, - {"scalar_floor", kPrimTypeOneArg}, - {"scalar_log", kPrimTypeOneArg}, - {"scalar_sin", kPrimTypeOneArg}, - {"scalar_tan", kPrimTypeOneArg}, - {"scalar_trunc", kPrimTypeOneArg}, - {"typeof", kPrimTypeOneArg}, - {"scalar_uadd", kPrimTypeOneArg}, - {"scalar_usub", kPrimTypeOneArg}, - // TWO_ARGS prim - {"scalar_add", kPrimTypeTwoArgs}, - {"bool_and", kPrimTypeTwoArgs}, - {"bool_eq", kPrimTypeTwoArgs}, - {"bool_or", kPrimTypeTwoArgs}, - {"scalar_div", kPrimTypeTwoArgs}, - {"scalar_eq", kPrimTypeTwoArgs}, - {"scalar_ge", kPrimTypeTwoArgs}, - {"scalar_gt", kPrimTypeTwoArgs}, - {"scalar_le", kPrimTypeTwoArgs}, - {"scalar_lt", kPrimTypeTwoArgs}, - {"scalar_ne", kPrimTypeTwoArgs}, - {"scalar_mod", kPrimTypeTwoArgs}, - {"scalar_mul", kPrimTypeTwoArgs}, - {"scalar_pow", kPrimTypeTwoArgs}, - {"scalar_sub", kPrimTypeTwoArgs}, - }) {} + : prim_func_type_map_({// ONE_ARG prim + {"bool_not", kPrimTypeOneArg}, + {"scalar_cos", kPrimTypeOneArg}, + {"scalar_exp", kPrimTypeOneArg}, + {"scalar_floor", kPrimTypeOneArg}, + {"scalar_log", kPrimTypeOneArg}, + {"scalar_sin", kPrimTypeOneArg}, + {"scalar_tan", kPrimTypeOneArg}, + {"scalar_trunc", kPrimTypeOneArg}, + {"typeof", kPrimTypeOneArg}, + {"scalar_uadd", kPrimTypeOneArg}, + {"scalar_usub", kPrimTypeOneArg}, + // TWO_ARGS prim + {"scalar_add", kPrimTypeTwoArgs}, + {"bool_and", kPrimTypeTwoArgs}, + {"bool_eq", kPrimTypeTwoArgs}, + {"bool_or", kPrimTypeTwoArgs}, + {"scalar_div", kPrimTypeTwoArgs}, + {"scalar_eq", kPrimTypeTwoArgs}, + {"scalar_ge", kPrimTypeTwoArgs}, + {"scalar_gt", kPrimTypeTwoArgs}, + {"scalar_le", kPrimTypeTwoArgs}, + {"scalar_lt", kPrimTypeTwoArgs}, + {"scalar_ne", kPrimTypeTwoArgs}, + {"scalar_mod", kPrimTypeTwoArgs}, + {"scalar_mul", kPrimTypeTwoArgs}, + {"scalar_pow", kPrimTypeTwoArgs}, + {"scalar_sub", kPrimTypeTwoArgs}, + {"scalar_floordiv", kPrimTypeTwoArgs}}) {} bool PrimToFunction::GetFunction(const PrimitivePtr& prim, FunctionPtr* const func) const { bool result = false; diff --git a/mindspore/ccsrc/pipeline/static_analysis/prim.cc b/mindspore/ccsrc/pipeline/static_analysis/prim.cc index 403bbdf433..1512596cb4 100644 --- a/mindspore/ccsrc/pipeline/static_analysis/prim.cc +++ b/mindspore/ccsrc/pipeline/static_analysis/prim.cc @@ -52,6 +52,8 @@ PrimitiveEvalImplMap &GetPrimitiveToEvalImplMap() { {prim::kPrimSwitch, {InferImplSwitch, true}}, {prim::kPrimIs_, {InferImplIs_, true}}, {prim::kPrimIsNot, {InferImplIsNot, true}}, + {prim::kPrimInDict, {InferImplInDict, true}}, + {prim::kPrimNotInDict, {InferImplNotInDict, true}}, // Maths {prim::kPrimMaximumGrad, {InferImplMinOrMaxGrad, true}}, {prim::kPrimMinimumGrad, {InferImplMinOrMaxGrad, true}}, @@ -91,6 +93,7 @@ PrimitiveEvalImplMap &GetPrimitiveToEvalImplMap() { {prim::kPrimMakeRange, {InferImplMakeRange, false}}, {prim::kPrimStopGradient, {InferImplStopGradient, false}}, {prim::kPrimStringEqual, {InferImplStringEqual, false}}, + {prim::kPrimStringConcat, {InferImplStringConcat, false}}, {prim::kPrimDictLen, {InferImplDictLen, false}}, // NN {prim::kPrimPooling, {InferImplPooling, true}}, @@ -988,6 +991,8 @@ PrimitiveToImplMap &GetUniformPrimitiveToImplMap() { {prim::kPrimScalarMul, {prim::ScalarMul, true, nullptr, true}}, {prim::kPrimScalarDiv, {prim::ScalarDiv, true, nullptr, true}}, {prim::kPrimScalarMod, {prim::ScalarMod, true, nullptr, true}}, + {prim::kPrimScalarPow, {prim::ScalarPow, true, nullptr, true}}, + {prim::kPrimScalarFloordiv, {prim::ScalarFloordiv, true, nullptr, true}}, {prim::kPrimScalarUadd, {prim::ScalarUAdd, true, nullptr, true}}, {prim::kPrimScalarUsub, {prim::ScalarUSub, true, nullptr, true}}, {prim::kPrimScalarLog, {prim::ScalarLog, true, nullptr, true}}, diff --git a/mindspore/ccsrc/pipeline/static_analysis/prim.h b/mindspore/ccsrc/pipeline/static_analysis/prim.h index e154473dbb..be71f3200a 100644 --- a/mindspore/ccsrc/pipeline/static_analysis/prim.h +++ b/mindspore/ccsrc/pipeline/static_analysis/prim.h @@ -178,6 +178,10 @@ AbstractBasePtr InferImplIs_(const AnalysisEnginePtr &, const PrimitivePtr &, const AbstractBasePtrList &args_spec_list); AbstractBasePtr InferImplIsNot(const AnalysisEnginePtr &, const PrimitivePtr &, const AbstractBasePtrList &args_spec_list); +AbstractBasePtr InferImplInDict(const AnalysisEnginePtr &, const PrimitivePtr &, + const AbstractBasePtrList &args_spec_list); +AbstractBasePtr InferImplNotInDict(const AnalysisEnginePtr &, const PrimitivePtr &, + const AbstractBasePtrList &args_spec_list); AbstractBasePtr InferImplPooling(const AnalysisEnginePtr &, const PrimitivePtr &primitive, const AbstractBasePtrList &args_spec_list); AbstractBasePtr InferImplPoolingGrad(const AnalysisEnginePtr &, const PrimitivePtr &primitive, @@ -287,6 +291,8 @@ AbstractBasePtr InferImplStopGradient(const AnalysisEnginePtr &, const Primitive const AbstractBasePtrList &args_spec_list); AbstractBasePtr InferImplStringEqual(const AnalysisEnginePtr &, const PrimitivePtr &primitive, const AbstractBasePtrList &args_spec_list); +AbstractBasePtr InferImplStringConcat(const AnalysisEnginePtr &, const PrimitivePtr &primitive, + const AbstractBasePtrList &args_spec_list); AbstractBasePtr InferImplDictLen(const AnalysisEnginePtr &, const PrimitivePtr &primitive, const AbstractBasePtrList &args_spec_list); diff --git a/mindspore/ops/composite/multitype_ops/__init__.py b/mindspore/ops/composite/multitype_ops/__init__.py index db28b1b5f6..40bf71d49a 100644 --- a/mindspore/ops/composite/multitype_ops/__init__.py +++ b/mindspore/ops/composite/multitype_ops/__init__.py @@ -19,6 +19,9 @@ from .add_impl import add from .sub_impl import sub from .mul_impl import mul from .div_impl import div +from .pow_impl import pow_ +from .floordiv_impl import floordiv +from .mod_impl import mod from .getitem_impl import getitem from .zeros_like_impl import zeros_like from .ones_like_impl import ones_like @@ -38,6 +41,9 @@ __all__ = [ 'sub', 'mul', 'div', + 'pow_', + 'floordiv', + 'mod', 'uadd', 'zeros_like', 'ones_like', diff --git a/mindspore/ops/composite/multitype_ops/add_impl.py b/mindspore/ops/composite/multitype_ops/add_impl.py index 2b1f83679e..2ad81bfc93 100644 --- a/mindspore/ops/composite/multitype_ops/add_impl.py +++ b/mindspore/ops/composite/multitype_ops/add_impl.py @@ -69,6 +69,21 @@ def _scalar_add_scalar(x, y): return F.scalar_add(x, y) +@add.register("String", "String") +def _string_concat_string(x, y): + """ + Concatenate the string y to the string x. + + Args: + x (str): The first input string. + y (str): the second input string. + + Returns: + str, concatenate the y to the x. + """ + return F.string_concat(x, y) + + @add.register("Number", "Tensor") def _scalar_add_tensor(x, y): """ @@ -81,8 +96,7 @@ def _scalar_add_tensor(x, y): Returns: Tensor, has the same dtype as x. """ - z = F.scalar_to_tensor(x, F.dtype(y)) - return F.tensor_add(z, y) + return F.tensor_add(x, y) @add.register("Tensor", "Number") @@ -97,8 +111,7 @@ def _tensor_add_scalar(x, y): Returns: Tensor, has the same dtype as x. """ - z = F.scalar_to_tensor(y, F.dtype(x)) - return F.tensor_add(x, z) + return F.tensor_add(x, y) @add.register("Tensor", "Tensor") diff --git a/mindspore/ops/composite/multitype_ops/div_impl.py b/mindspore/ops/composite/multitype_ops/div_impl.py index 3edf3c8d9f..c37fcb9c36 100644 --- a/mindspore/ops/composite/multitype_ops/div_impl.py +++ b/mindspore/ops/composite/multitype_ops/div_impl.py @@ -68,8 +68,7 @@ def _scalar_div_tensor(x, y): Returns: Tensor, has the same dtype as x. """ - z = F.scalar_to_tensor(x, F.dtype(y)) - return F.tensor_div(z, y) + return F.tensor_div(x, y) @div.register("Tensor", "Number") @@ -84,5 +83,4 @@ def _tensor_div_scalar(x, y): Returns: Tensor, has the same dtype as x. """ - z = F.scalar_to_tensor(y, F.dtype(x)) - return F.tensor_div(x, z) + return F.tensor_div(x, y) diff --git a/mindspore/ops/composite/multitype_ops/floordiv_impl.py b/mindspore/ops/composite/multitype_ops/floordiv_impl.py new file mode 100644 index 0000000000..c1a47f881f --- /dev/null +++ b/mindspore/ops/composite/multitype_ops/floordiv_impl.py @@ -0,0 +1,50 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""Implementation for internal polymorphism `floordiv` operations.""" + +from ...composite import base +from ... import functional as F + + +floordiv = base.MultitypeFuncGraph("floordiv") +""" +`floordiv` is a metafuncgraph object which will compute the floordiv of two objects +using ".register" decorator. +""" + + +@floordiv.register("Number", "Number") +def _floordiv_scalar(x, y): + """Returns x // y where x and y are all scalars.""" + return F.scalar_floordiv(x, y) + + +@floordiv.register("Tensor", "Tensor") +def _floordiv_tensor(x, y): + """Returns x // y where x and y are all tensors and have save dtype.""" + return F.tensor_floordiv(x, y) + + +@floordiv.register("Tensor", "Number") +def _tensor_floordiv_scalar(x, y): + """Returns x // y where x is a tensor and y is a scalar. x and y should have same dtype.""" + return F.tensor_floordiv(x, y) + + +@floordiv.register("Number", "Tensor") +def _scalar_floordiv_tensor(x, y): + """Returns x // y where x is a scalar and y is a tensor. x and y should have same dtype.""" + return F.tensor_floordiv(x, y) diff --git a/mindspore/ops/composite/multitype_ops/mod_impl.py b/mindspore/ops/composite/multitype_ops/mod_impl.py new file mode 100644 index 0000000000..e9947677ac --- /dev/null +++ b/mindspore/ops/composite/multitype_ops/mod_impl.py @@ -0,0 +1,50 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""Implementation for internal polymorphism `mod` operations.""" + +from ...composite import base +from ... import functional as F + + +mod = base.MultitypeFuncGraph("mod") +""" +`mod` is a metafuncgraph object which will compute the mod of two objects +using ".register" decorator. +""" + + +@mod.register("Number", "Number") +def _mod_scalar(x, y): + """Returns x % y where x and y are all scalars.""" + return F.scalar_mod(x, y) + + +@mod.register("Tensor", "Tensor") +def _mod_tensor(x, y): + """Returns x % y where x and y are all tensors and have save dtype.""" + return F.tensor_mod(x, y) + + +@mod.register("Tensor", "Number") +def _tensor_mod_scalar(x, y): + """Returns x % y where x is a tensor and y is a scalar. x and y should have same dtype.""" + return F.tensor_mod(x, y) + + +@mod.register("Number", "Tensor") +def _scalar_mod_tensor(x, y): + """Returns x % y where x is a scalar and y is a tensor. x and y should have same dtype.""" + return F.tensor_mod(x, y) diff --git a/mindspore/ops/composite/multitype_ops/mul_impl.py b/mindspore/ops/composite/multitype_ops/mul_impl.py index 1d4733a46b..ce9ec391af 100644 --- a/mindspore/ops/composite/multitype_ops/mul_impl.py +++ b/mindspore/ops/composite/multitype_ops/mul_impl.py @@ -56,8 +56,7 @@ def _scalar_mul_tensor(x, y): Outputs: Tensor, has the same dtype as x. """ - z = F.scalar_to_tensor(x, F.dtype(y)) - return F.tensor_mul(z, y) + return F.tensor_mul(x, y) @mul.register("Tensor", "Number") @@ -68,5 +67,4 @@ def _tensor_mul_scalar(x, y): Outputs: Tensor, has the same dtype as x. """ - z = F.scalar_to_tensor(y, F.dtype(x)) - return F.tensor_mul(x, z) + return F.tensor_mul(x, y) diff --git a/mindspore/ops/composite/multitype_ops/pow_impl.py b/mindspore/ops/composite/multitype_ops/pow_impl.py new file mode 100644 index 0000000000..8d73335c98 --- /dev/null +++ b/mindspore/ops/composite/multitype_ops/pow_impl.py @@ -0,0 +1,50 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""Implementation for internal polymorphism `pow` operations.""" + +from ...composite import base +from ... import functional as F + + +pow_ = base.MultitypeFuncGraph("pow") +""" +`pow` is a metafuncgraph object which will compute the pow of two objects +using ".register" decorator. +""" + + +@pow_.register("Number", "Number") +def _pow_scalar(x, y): + """Returns x ** y where x and y are all scalars.""" + return F.scalar_pow(x, y) + + +@pow_.register("Tensor", "Tensor") +def _pow_tensor(x, y): + """Returns x ** y where x and y are all tensors and have save dtype.""" + return F.tensor_pow(x, y) + + +@pow_.register("Tensor", "Number") +def _tensor_pow_scalar(x, y): + """Returns x ** y where x is a tensor and y is a scalar. x and y should have same dtype.""" + return F.tensor_pow(x, y) + + +@pow_.register("Number", "Tensor") +def _scalar_pow_tensor(x, y): + """Returns x ** y where x is a scalar and y is a tensor. x and y should have same dtype.""" + return F.tensor_pow(x, y) diff --git a/mindspore/ops/composite/multitype_ops/sub_impl.py b/mindspore/ops/composite/multitype_ops/sub_impl.py index 4a3224a859..431a58b991 100644 --- a/mindspore/ops/composite/multitype_ops/sub_impl.py +++ b/mindspore/ops/composite/multitype_ops/sub_impl.py @@ -41,12 +41,10 @@ def _sub_tensor(x, y): @sub.register("Number", "Tensor") def _scalar_sub_tensor(x, y): """Returns x - y where x is a scalar and y is a tensor. x and y should have same dtype.""" - z = F.scalar_to_tensor(x, F.dtype(y)) - return F.tensor_sub(z, y) + return F.tensor_sub(x, y) @sub.register("Tensor", "Number") def _tensor_sub_scalar(x, y): """Returns x - y where x is a tensor and y is a scalar. x and y should have same dtype.""" - z = F.scalar_to_tensor(y, F.dtype(x)) - return F.tensor_sub(x, z) + return F.tensor_sub(x, y) diff --git a/mindspore/ops/functional.py b/mindspore/ops/functional.py index 4da725145f..611c569553 100644 --- a/mindspore/ops/functional.py +++ b/mindspore/ops/functional.py @@ -48,6 +48,9 @@ tensor_ge = P.GreaterEqual() tensor_sub = P.Sub() tensor_mul = P.Mul() tensor_div = P.RealDiv() +tensor_floordiv = P.FloorDiv() +tensor_pow = P.Pow() +tensor_mod = P.FloorMod() strided_slice = P.StridedSlice() same_type_shape = P.SameTypeShape() equal = P.Equal() @@ -83,6 +86,7 @@ scalar_add = Primitive('scalar_add') scalar_mul = Primitive('scalar_mul') scalar_sub = Primitive('scalar_sub') scalar_div = Primitive('scalar_div') +scalar_floordiv = Primitive('scalar_floordiv') scalar_log = Primitive('scalar_log') scalar_pow = Primitive('scalar_pow') scalar_gt = Primitive('scalar_gt') @@ -95,6 +99,7 @@ scalar_uadd = Primitive('scalar_uadd') scalar_usub = Primitive('scalar_usub') scalar_mod = Primitive('scalar_mod') string_eq = Primitive('string_equal') +string_concat = Primitive('string_concat') bool_not = Primitive("bool_not") bool_or = Primitive("bool_or") bool_and = Primitive("bool_and") @@ -104,7 +109,8 @@ logical_not = P.LogicalNot() array_to_scalar = Primitive('array_to_scalar') is_ = Primitive("is_") is_not = Primitive("is_not") - +in_dict = Primitive("in_dict") +not_in_dict = Primitive("not_in_dict") broadcast_gradient_args = Primitive('BroadcastGradientArgs') dot = Primitive('dot') array_reduce = Primitive('array_reduce') diff --git a/mindspore/ops/operations/math_ops.py b/mindspore/ops/operations/math_ops.py index e390b6b589..ce3449a8b7 100644 --- a/mindspore/ops/operations/math_ops.py +++ b/mindspore/ops/operations/math_ops.py @@ -667,8 +667,8 @@ class AddN(PrimitiveWithInfer): >>> return self.addN(z) >>> >>> net = NetAddN() - >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32) - >>> input_y = Tensor(np.array([4, 5, 6]), mindspore.int32) + >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.float32) + >>> input_y = Tensor(np.array([4, 5, 6]), mindspore.float32) >>> net(input_x, input_y, input_x, input_y) Tensor([10, 14, 18], shape=(3,), dtype=mindspore.int32) """ diff --git a/tests/ut/python/pipeline/parse/test_operator.py b/tests/ut/python/pipeline/parse/test_operator.py index a3412a6f8f..a3c5f7e422 100644 --- a/tests/ut/python/pipeline/parse/test_operator.py +++ b/tests/ut/python/pipeline/parse/test_operator.py @@ -131,3 +131,72 @@ def test_ME_arithmetic_operator_0070(): def test_ME_logical_operator_0020(): """ test_ME_logical_operator_0020 """ logical_operator_base('or') + + +def test_ops(): + class OpsNet(Cell): + """ OpsNet definition """ + + def __init__(self, x, y): + super(OpsNet, self).__init__() + self.x = x + self.y = y + self.int = 4 + self.float = 3.2 + self.str_a = "hello" + self.str_b = "world" + + def construct(self, x, y): + h = x // y + m = x ** y + n = x % y + r = self.x // self.y + s = self.x ** self.y + t = self.x % self.y + p = h + m + n + q = r + s + t + ret_pow = p ** q + q ** p + ret_mod = p % q + q % p + ret_floor = p // q + q // p + ret = ret_pow + ret_mod + ret_floor + if self.int > self.float: + if self.str_a + self.str_b == "helloworld": + return ret + return x + + net = OpsNet(9, 2) + x = Tensor(np.random.randint(low=1, high=10, size=(2, 3, 4), dtype=np.int32)) + y = Tensor(np.random.randint(low=10, high=20, size=(2, 3, 4), dtype=np.int32)) + context.set_context(mode=context.GRAPH_MODE, save_graphs=True) + net(x, y) + + +def test_in_dict(): + class InDictNet(Cell): + """ InDictNet definition """ + + def __init__(self, key_in, key_not_in): + super(InDictNet, self).__init__() + self.key_in = key_in + self.key_not_in = key_not_in + + def construct(self, x, y, z): + d = {"a": x, "b": y} + ret_in = 1 + ret_not_in = 2 + if self.key_in in d: + ret_in = d[self.key_in] + if self.key_not_in not in d: + ret_not_in = z + ret = ret_in + ret_not_in + return ret + + net = InDictNet("a", "c") + x = Tensor(np.random.randint(low=1, high=10, size=(2, 3, 4), dtype=np.int32)) + y = Tensor(np.random.randint(low=10, high=20, size=(2, 3, 4), dtype=np.int32)) + z = Tensor(np.random.randint(low=20, high=30, size=(2, 3, 4), dtype=np.int32)) + context.set_context(mode=context.GRAPH_MODE) + net(x, y, z) + + + From 4ba6f7884d3eaa144f8a896674f409f5a836d9ea Mon Sep 17 00:00:00 2001 From: zhangz0911gm Date: Tue, 14 Apr 2020 21:58:04 -0400 Subject: [PATCH 288/367] Fixing problem issues including class slice example cannot run, adding an example for class SigmoidCrossEntropyWithLogits etc. --- mindspore/nn/optim/optimizer.py | 14 ++++++++++++++ mindspore/nn/optim/sgd.py | 12 +++++++++++- mindspore/ops/operations/array_ops.py | 5 ++++- mindspore/ops/operations/nn_ops.py | 5 +++-- 4 files changed, 32 insertions(+), 4 deletions(-) diff --git a/mindspore/nn/optim/optimizer.py b/mindspore/nn/optim/optimizer.py index bfbde78fff..6c6d14ed7a 100755 --- a/mindspore/nn/optim/optimizer.py +++ b/mindspore/nn/optim/optimizer.py @@ -81,8 +81,22 @@ class Optimizer(Cell): else: raise TypeError("Learning rate should be float, Tensor or Iterable.") + if isinstance(weight_decay, int): + weight_decay = float(weight_decay) + + if not isinstance(weight_decay, float): + raise TypeError("weight_decay should be a float number!") + + if isinstance(loss_scale, int): + loss_scale = float(loss_scale) + + if not isinstance(loss_scale, float): + raise TypeError("loss_scale should be a float number!") + if loss_scale <= 0.0: raise ValueError("Loss scale should be greater than 0, but got {}".format(loss_scale)) + self.loss_scale = loss_scale + if weight_decay < 0.0: raise ValueError("Weight decay should be equal or greater than 0, but got {}".format(weight_decay)) diff --git a/mindspore/nn/optim/sgd.py b/mindspore/nn/optim/sgd.py index a18adb8184..983be4bf80 100755 --- a/mindspore/nn/optim/sgd.py +++ b/mindspore/nn/optim/sgd.py @@ -61,7 +61,8 @@ class SGD(Optimizer): dampening (float): A floating point value of dampening for momentum. Default: 0. weight_decay (float): Weight decay (L2 penalty). Default: 0. nesterov (bool): Enables the Nesterov momentum. Default: False. - loss_scale (float): A floating point value for the loss scale. Default: 1.0. + loss_scale (float): A floating point value for the loss scale, which should be larger + than 0.0. Default: 1.0. Inputs: - **gradients** (tuple[Tensor]) - The gradients of `params`, the shape is the same as `params`. @@ -83,9 +84,18 @@ class SGD(Optimizer): super(SGD, self).__init__(learning_rate, params, weight_decay, loss_scale) + if not isinstance(momentum, float): + raise TypeError("momentum should be float number!") + if isinstance(momentum, float) and momentum < 0.0: raise ValueError("momentum should be at least 0.0, but got momentum {}".format(momentum)) + if not isinstance(dampening, float): + raise TypeError("dampening should be float number") + + if isinstance(dampening, int): + dampening = float(dampening) + if dampening < 0.0: raise ValueError("dampening should be at least 0.0, but got dampening {}".format(dampening)) self.dampening = dampening diff --git a/mindspore/ops/operations/array_ops.py b/mindspore/ops/operations/array_ops.py index a7c3f50440..499f5d4f57 100644 --- a/mindspore/ops/operations/array_ops.py +++ b/mindspore/ops/operations/array_ops.py @@ -1008,6 +1008,7 @@ class Argmax(PrimitiveWithInfer): def infer_dtype(self, x_dtype): validator.check_subclass("input_x", x_dtype, mstype.tensor) + validator.check_typename('input_x', x_dtype, [mstype.float32, mstype.float16]) return mstype.tensor_type(self.output_type) @@ -1500,7 +1501,9 @@ class Slice(PrimitiveWithInfer): Tensor. Examples: - >>> data = Tensor(np.array([3,2,3]).astype(np.int32)) + >>> data = Tensor(np.array([[[1, 1, 1], [2, 2, 2]], + >>> [[3, 3, 3], [4, 4, 4]], + >>> [[5, 5, 5], [6, 6, 6]]]).astype(np.int32)) >>> type = P.Slice()(data, (1, 0, 0), (1, 1, 3)) """ diff --git a/mindspore/ops/operations/nn_ops.py b/mindspore/ops/operations/nn_ops.py index f5037882f1..180e4cfe33 100644 --- a/mindspore/ops/operations/nn_ops.py +++ b/mindspore/ops/operations/nn_ops.py @@ -1436,9 +1436,9 @@ class SGD(PrimitiveWithInfer): nesterov (bool): Enable Nesterov momentum. Default: False. Inputs: - - **parameters** (Tensor) - Parameters to be updated. + - **parameters** (Tensor) - Parameters to be updated. Their data type can be list or tuple. - **gradient** (Tensor) - Gradients. - - **learning_rate** (Tensor) - Learning rate. e.g. Tensor(0.1, mindspore.float32). + - **learning_rate** (Tensor) - Learning rate. Must be float value. e.g. Tensor(0.1, mindspore.float32). - **accum** (Tensor) - Accum(velocity) to be updated. - **momentum** (Tensor) - Momentum. e.g. Tensor(0.1, mindspore.float32). - **stat** (Tensor) - States to be updated with the same shape as gradient. @@ -1449,6 +1449,7 @@ class SGD(PrimitiveWithInfer): @prim_attr_register def __init__(self, dampening=0.0, weight_decay=0.0, nesterov=False): + validator.check_type("nesterov", nesterov, [bool]) self.init_prim_io_names(inputs=['parameters', 'gradient', 'learning_rate', 'accum', 'momentum', 'stat'], outputs=['output']) From bce5f577526e68e0d8852d8934a33aa8917f05e1 Mon Sep 17 00:00:00 2001 From: caifubi Date: Thu, 16 Apr 2020 16:52:27 +0800 Subject: [PATCH 289/367] use GraphId as key of DavinciModel in ascend_kernel_runtime.cc --- .../device/ascend/ascend_kernel_runtime.cc | 69 ++++++++++--------- .../device/ascend/ascend_kernel_runtime.h | 8 +-- 2 files changed, 40 insertions(+), 37 deletions(-) diff --git a/mindspore/ccsrc/device/ascend/ascend_kernel_runtime.cc b/mindspore/ccsrc/device/ascend/ascend_kernel_runtime.cc index 6d849bd2a5..935e694636 100644 --- a/mindspore/ccsrc/device/ascend/ascend_kernel_runtime.cc +++ b/mindspore/ccsrc/device/ascend/ascend_kernel_runtime.cc @@ -54,9 +54,9 @@ static const size_t PRAMATER_OUTPUT_INDEX = 0; AscendKernelRuntime::~AscendKernelRuntime() { graph_model_map_.clear(); } void AscendKernelRuntime::ClearGraphModelMap() { - for (auto &iter : graph_model_id_map_) { - MS_LOG(INFO) << "Ge UnloadModel " << iter.second; - auto ret = ge::model_runner::ModelRunner::Instance().UnloadModel(iter.second); + for (auto &iter : graph_model_map_) { + MS_LOG(INFO) << "Ge UnloadModel " << iter.first; + auto ret = ge::model_runner::ModelRunner::Instance().UnloadModel(iter.first); if (!ret) { MS_LOG(ERROR) << "UnloadModel failed"; } @@ -249,6 +249,10 @@ DeviceAddressPtr AscendKernelRuntime::CreateDeviceAddress(void *device_ptr, size } bool AscendKernelRuntime::GenTask(const session::KernelGraph *graph) { + if (graph == nullptr) { + MS_EXCEPTION(NotExistsError) << "session::KernelGraph is NULL!"; + } + MS_LOG(INFO) << "GenTask start. GraphId:" << graph->graph_id(); auto context_ptr = MsContext::GetInstance(); MS_EXCEPTION_IF_NULL(context_ptr); bool is_task_sink = context_ptr->enable_task_sink(); @@ -261,19 +265,15 @@ bool AscendKernelRuntime::GenTask(const session::KernelGraph *graph) { mindspore::memreuse::MemReuseChecker::GetInstance().CheckNormalIR(graph); } #endif - if (graph == nullptr) { - MS_EXCEPTION(NotExistsError) << "session::KernelGraph is NULL!"; - } vector> task_info_list; auto anf_node_list = graph->execution_order(); TaskGenerator::GenTasks(anf_node_list, &task_info_list, graph->graph_id()); // Store the task_info_list - auto iter = task_map_.find(graph); - if (iter != task_map_.end()) { - MS_LOG(EXCEPTION) << "graph TaskInfo list already exist"; + auto insert_ret = task_map_.insert(std::make_pair(graph->graph_id(), task_info_list)); + if (!insert_ret.second) { + MS_LOG(EXCEPTION) << "Duplicate GraphId! Please check in ascend_session."; } - task_map_[graph] = task_info_list; // Graph may have no compute node, such TensorAddGrad. if (task_info_list.empty()) { @@ -296,25 +296,19 @@ bool AscendKernelRuntime::GenTask(const session::KernelGraph *graph) { task_info_list, empty_list, empty_list, empty_list, empty_list, wait_active_stream_list, force_copy_stream_list, 0, 0, 0, 0, 0, 0, assign_instance.GetTotalStreamNum(), 1, assign_instance.GetTotalEventNum(), 0); - graph_model_map_[graph] = model; - graph_model_id_map_[graph] = graph->graph_id(); + auto ret = graph_model_map_.insert(std::make_pair(graph->graph_id(), model)); + if (!ret.second) { + MS_LOG(EXCEPTION) << "Duplicate GraphId! Please check in ascend_session."; + } MS_LOG(INFO) << "TaskGenerator GetTaskInfo end..."; return true; } -uint32_t AscendKernelRuntime::GetGraphModelId(const session::KernelGraph *kernel_graph) { - MS_EXCEPTION_IF_NULL(kernel_graph); - auto iter = graph_model_id_map_.find(kernel_graph); - if (iter == graph_model_id_map_.end()) { - MS_LOG(EXCEPTION) << "graph not in the map"; - } - return iter->second; -} - bool AscendKernelRuntime::LoadTask(const session::KernelGraph *graph) { if (graph == nullptr) { MS_EXCEPTION(NotExistsError) << "Null pointer graph, LoadTask failed. "; } + MS_LOG(INFO) << "LoadTask start. GraphId:" << graph->graph_id(); auto context_ptr = MsContext::GetInstance(); MS_EXCEPTION_IF_NULL(context_ptr); bool is_task_sink = context_ptr->enable_task_sink(); @@ -327,23 +321,22 @@ bool AscendKernelRuntime::LoadTask(const session::KernelGraph *graph) { return true; } - auto task_iter = graph_model_map_.find(graph); - if (task_iter == graph_model_map_.end()) { - MS_LOG(ERROR) << "task not exist"; + auto model_iter = graph_model_map_.find(graph->graph_id()); + if (model_iter == graph_model_map_.end()) { + MS_LOG(ERROR) << "GraphId:" << graph->graph_id() << " Invalid! Graph LoadTask without GenTask."; return false; } - auto model_id = GetGraphModelId(graph); std::shared_ptr listener; - MS_LOG(INFO) << "LoadDavinciModel mode_id:" << model_id; - bool status = - ge::model_runner::ModelRunner::Instance().LoadDavinciModel(device_id_, 0, model_id, task_iter->second, listener); + MS_LOG(INFO) << "LoadDavinciModel mode_id:" << model_iter->first; + bool status = ge::model_runner::ModelRunner::Instance().LoadDavinciModel(device_id_, 0, model_iter->first, + model_iter->second, listener); if (!status) { - MS_LOG(INFO) << "load task failed"; + MS_LOG(ERROR) << "load task failed"; return false; } if (ProfilingManager::GetInstance().IsProfiling()) { - std::vector task_ids = ge::model_runner::ModelRunner::Instance().GetTaskIdList(model_id); + std::vector task_ids = ge::model_runner::ModelRunner::Instance().GetTaskIdList(model_iter->first); ProfilingUtils::ReportProfilingData(graph->graph_id(), task_ids); } return true; @@ -351,6 +344,8 @@ bool AscendKernelRuntime::LoadTask(const session::KernelGraph *graph) { bool AscendKernelRuntime::RunTask(const session::KernelGraph *graph) { MS_EXCEPTION_IF_NULL(graph); + MS_LOG(INFO) << "RunTask start. GraphId:" << graph->graph_id(); + auto context_ptr = MsContext::GetInstance(); MS_EXCEPTION_IF_NULL(context_ptr); ge::InputData input_tensors = ge::InputData(); @@ -360,8 +355,12 @@ bool AscendKernelRuntime::RunTask(const session::KernelGraph *graph) { return true; } - auto model_id = GetGraphModelId(graph); - bool status = ge::model_runner::ModelRunner::Instance().RunModel(model_id, input_tensors, output_tensors); + if (!CheckGraphIdValid(graph->graph_id())) { + MS_LOG(ERROR) << "GraphId:" << graph->graph_id() << " Invalid! Graph RunTask without GenTask."; + return false; + } + + bool status = ge::model_runner::ModelRunner::Instance().RunModel(graph->graph_id(), input_tensors, output_tensors); if (!status) { MS_LOG(INFO) << "run task failed"; return false; @@ -497,12 +496,16 @@ bool AscendKernelRuntime::DestroyHccl() { } bool AscendKernelRuntime::GraphWithEmptyTaskList(const session::KernelGraph *graph) const { - auto iter = task_map_.find(graph); + auto iter = task_map_.find(graph->graph_id()); if (iter == task_map_.end()) { MS_LOG(EXCEPTION) << "Unknown graph ptr"; } return iter->second.empty(); } + +bool AscendKernelRuntime::CheckGraphIdValid(GraphId graph_id) const { + return task_map_.find(graph_id) != task_map_.end() && graph_model_map_.find(graph_id) != graph_model_map_.end(); +} } // namespace ascend } // namespace device } // namespace mindspore diff --git a/mindspore/ccsrc/device/ascend/ascend_kernel_runtime.h b/mindspore/ccsrc/device/ascend/ascend_kernel_runtime.h index 547228d32f..5d0f61d0a6 100644 --- a/mindspore/ccsrc/device/ascend/ascend_kernel_runtime.h +++ b/mindspore/ccsrc/device/ascend/ascend_kernel_runtime.h @@ -23,6 +23,7 @@ #include "runtime/context.h" #include "framework/ge_runtime/davinci_model.h" #include "device/kernel_runtime_manager.h" +#include "session/session_basic.h" using ge::model_runner::TaskInfo; using std::unordered_map; @@ -54,14 +55,13 @@ class AscendKernelRuntime : public KernelRuntime { void ClearGraphModelMap(); void ReleaseDeviceRes() override; - uint32_t GetGraphModelId(const session::KernelGraph *kernel_graph); bool GraphWithEmptyTaskList(const session::KernelGraph *graph) const; + bool CheckGraphIdValid(GraphId graph_id) const; rtContext_t rt_context_{nullptr}; bool initialized_{false}; - unordered_map>> task_map_; - unordered_map> graph_model_map_; - unordered_map graph_model_id_map_; + unordered_map>> task_map_; + unordered_map> graph_model_map_; }; MS_REG_KERNEL_RUNTIME(kAscendDevice, AscendKernelRuntime); From a6146a6d2f227458823b342d661e7b72a55eac56 Mon Sep 17 00:00:00 2001 From: leonwanghui Date: Thu, 16 Apr 2020 17:34:53 +0800 Subject: [PATCH 290/367] Update mindspore-gpu:devel docker image --- docker/mindspore-gpu/devel/Dockerfile | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docker/mindspore-gpu/devel/Dockerfile b/docker/mindspore-gpu/devel/Dockerfile index 113bed5b06..fe88bd9a2c 100644 --- a/docker/mindspore-gpu/devel/Dockerfile +++ b/docker/mindspore-gpu/devel/Dockerfile @@ -6,7 +6,6 @@ MAINTAINER leonwanghui ENV PYTHON_ROOT_PATH /usr/local/python-3.7.5 ENV CMAKE_ROOT_PATH /usr/local/cmake-3.14.1 ENV PATH ${CMAKE_ROOT_PATH}/bin:/usr/local/bin:$PATH -ENV LD_LIBRARY_PATH /usr/lib/x86_64-linux-gnu:$LD_LIBRARY_PATH # Install base tools RUN apt update \ @@ -37,6 +36,9 @@ RUN DEBIAN_FRONTEND=noninteractive apt install -y \ automake \ flex +# Configure cuDNN (v7.6.5) +RUN ln -s /usr/lib/x86_64-linux-gnu/libcudnn.so.7.6.5 /usr/local/cuda/lib64/libcudnn.so + # Set bash RUN echo "dash dash/sh boolean false" | debconf-set-selections RUN DEBIAN_FRONTEND=noninteractive dpkg-reconfigure dash From 51de42bc050f44acedf3cd08717b8f81cb327f4e Mon Sep 17 00:00:00 2001 From: simson <526422051@qq.com> Date: Thu, 16 Apr 2020 16:53:58 +0800 Subject: [PATCH 291/367] modify examples of ApplyMomentum/ArgMax --- mindspore/ops/operations/array_ops.py | 3 +-- mindspore/ops/operations/nn_ops.py | 6 +----- 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/mindspore/ops/operations/array_ops.py b/mindspore/ops/operations/array_ops.py index a7c3f50440..0af284b947 100644 --- a/mindspore/ops/operations/array_ops.py +++ b/mindspore/ops/operations/array_ops.py @@ -983,8 +983,7 @@ class Argmax(PrimitiveWithInfer): Examples: >>> input_x = Tensor(np.array([2.0, 3.1, 1.2])) - >>> index = P.Argmax()(input_x) - >>> assert index == Tensor(1, mindspore.int64) + >>> index = P.Argmax(output_type=mindspore.int32)(input_x) """ @prim_attr_register diff --git a/mindspore/ops/operations/nn_ops.py b/mindspore/ops/operations/nn_ops.py index f5037882f1..42ae394289 100644 --- a/mindspore/ops/operations/nn_ops.py +++ b/mindspore/ops/operations/nn_ops.py @@ -1345,11 +1345,7 @@ class ApplyMomentum(PrimitiveWithInfer): Tensor, parameters to be updated. Examples: - >>> net = ResNet50() - >>> loss = nn.SoftmaxCrossEntropyWithLogits() - >>> opt = P.ApplyMomentum(Tensor(np.array([0.001])), Tensor(np.array([0.9])), - filter(lambda x: x.requires_grad, net.get_parameters())) - >>> model = Model(net, loss, opt) + Please refer to the usage in nn.ApplyMomentum. """ __mindspore_signature__ = ( ('variable', sig_rw.RW_WRITE, sig_kind.KIND_POSITIONAL_KEYWORD), From 3684ec4967f977d8c56c901e99c9447354f12e54 Mon Sep 17 00:00:00 2001 From: buxue Date: Thu, 16 Apr 2020 19:28:42 +0800 Subject: [PATCH 292/367] fix example of ControlDepend to ensure pass in gpu and cpu --- mindspore/ops/operations/control_ops.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/mindspore/ops/operations/control_ops.py b/mindspore/ops/operations/control_ops.py index 30f1e25a34..ca161cfad0 100644 --- a/mindspore/ops/operations/control_ops.py +++ b/mindspore/ops/operations/control_ops.py @@ -51,16 +51,18 @@ class ControlDepend(Primitive): >>> class Net(nn.Cell): >>> def __init__(self): >>> super(Net, self).__init__() - >>> self.global_step = mindspore.Parameter(initializer(0, [1]), name="global_step") - >>> self.rate = 0.2 >>> self.control_depend = P.ControlDepend() + >>> self.softmax = P.Softmax() >>> - >>> def construct(self, x): - >>> data = self.rate * self.global_step + x - >>> added_global_step = self.global_step + 1 - >>> self.global_step = added_global_step - >>> self.control_depend(data, added_global_step) - >>> return data + >>> def construct(self, x, y): + >>> mul = x * y + >>> softmax = self.softmax(x) + >>> ret = self.control_depend(mul, softmax) + >>> return ret + >>> x = Tensor(np.ones([4, 5]), dtype=mindspore.float32) + >>> y = Tensor(np.ones([4, 5]), dtype=mindspore.float32) + >>> net = Net() + >>> output = net(x, y) """ @prim_attr_register From 85205ea80be9cbe0905c530adfb51e246b9d002f Mon Sep 17 00:00:00 2001 From: panyifeng Date: Thu, 16 Apr 2020 19:58:37 +0800 Subject: [PATCH 293/367] fix document example for insert_grad_of --- mindspore/ops/operations/debug_ops.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mindspore/ops/operations/debug_ops.py b/mindspore/ops/operations/debug_ops.py index a69dcc2df1..e4467f5ce1 100644 --- a/mindspore/ops/operations/debug_ops.py +++ b/mindspore/ops/operations/debug_ops.py @@ -123,6 +123,7 @@ class InsertGradientOf(PrimitiveWithInfer): >>> return ret >>> >>> clip = P.InsertGradientOf(clip_gradient) + >>> grad_all = C.GradOperation('get_all', get_all=True) >>> def InsertGradientOfClipDemo(): >>> def clip_test(x, y): >>> x = clip(x) @@ -135,7 +136,7 @@ class InsertGradientOf(PrimitiveWithInfer): >>> return clip_test(x, y) >>> >>> def fd(x, y): - >>> return C.grad_all(clip_test)(x, y) + >>> return grad_all(clip_test)(x, y) >>> >>> print("forward: ", f(1.1, 0.1)) >>> print("clip_gradient:", fd(1.1, 0.1)) From 37042d5b6782baaab926a194d6f5b48dd2905df4 Mon Sep 17 00:00:00 2001 From: zhaojichen Date: Thu, 16 Apr 2020 08:08:07 -0400 Subject: [PATCH 294/367] add global batch normalization --- mindspore/nn/layer/__init__.py | 4 +- mindspore/nn/layer/normalization.py | 130 ++++++++++++++++++++++++--- tests/ut/python/nn/test_batchnorm.py | 15 ++++ 3 files changed, 133 insertions(+), 16 deletions(-) diff --git a/mindspore/nn/layer/__init__.py b/mindspore/nn/layer/__init__.py index cf601f03ff..714b517a84 100644 --- a/mindspore/nn/layer/__init__.py +++ b/mindspore/nn/layer/__init__.py @@ -18,7 +18,7 @@ Layer. The high-level components(Cells) used to construct the neural network. """ from .activation import Softmax, LogSoftmax, ReLU, ReLU6, Tanh, GELU, ELU, Sigmoid, PReLU, get_activation, LeakyReLU, HSigmoid, HSwish -from .normalization import BatchNorm1d, BatchNorm2d, LayerNorm, GroupNorm +from .normalization import BatchNorm1d, BatchNorm2d, LayerNorm, GroupNorm, GlobalBatchNorm from .container import SequentialCell, CellList from .conv import Conv2d, Conv2dTranspose from .lstm import LSTM @@ -29,7 +29,7 @@ from .image import ImageGradients, SSIM __all__ = ['Softmax', 'LogSoftmax', 'ReLU', 'ReLU6', 'Tanh', 'GELU', 'Sigmoid', 'PReLU', 'get_activation', 'LeakyReLU', 'HSigmoid', 'HSwish', 'ELU', - 'BatchNorm1d', 'BatchNorm2d', 'LayerNorm', 'GroupNorm', + 'BatchNorm1d', 'BatchNorm2d', 'LayerNorm', 'GroupNorm', 'GlobalBatchNorm', 'SequentialCell', 'CellList', 'Conv2d', 'Conv2dTranspose', 'LSTM', diff --git a/mindspore/nn/layer/normalization.py b/mindspore/nn/layer/normalization.py index 4aafaf031e..4bfa222986 100644 --- a/mindspore/nn/layer/normalization.py +++ b/mindspore/nn/layer/normalization.py @@ -20,16 +20,28 @@ from mindspore.common.initializer import initializer from mindspore.common.tensor import Tensor import mindspore.common.dtype as mstype import mindspore.context as context -from mindspore._checkparam import check_int_positive, check_bool, check_typename +from mindspore._checkparam import check_bool, check_typename from mindspore._extends import cell_attr_register +from mindspore.communication.management import get_local_rank_size, get_rank +from mindspore.communication import management +from mindspore._checkparam import check_int_positive from ..cell import Cell +class _GlobalBNHelper(Cell): + def __init__(self, group): + super(_GlobalBNHelper, self).__init__() + self.group = group + self.reduce = P.AllReduce(P.ReduceOp.SUM, group=self.group).add_prim_attr('fusion', 1) + def construct(self, x): + x = self.reduce(x) + return x class _BatchNorm(Cell): """Batch Normalization base class.""" @cell_attr_register def __init__(self, num_features, + group=1, eps=1e-5, momentum=0.9, affine=True, @@ -56,6 +68,20 @@ class _BatchNorm(Cell): gamma_init, num_features), name="gamma", requires_grad=affine) self.beta = Parameter(initializer( beta_init, num_features), name="beta", requires_grad=affine) + self.group = check_int_positive(group) + self.rank_id = get_rank() + self.rank_size = get_local_rank_size() + self.device_list = [i for i in range(0, self.rank_size)] + self.rank_list = self.list_group(self.device_list, self.group) + self.rank_list_idx = len(self.rank_list) + for i in range(self.rank_list_idx): + if self.rank_id in self.rank_list[i] and self.group != 1: + self.is_global = True + management.create_group('group' + str(i), self.rank_list[i]) + self.all_reduce = _GlobalBNHelper('group' + str(i)) + self.shape = P.Shape() + self.reduce_mean = P.ReduceMean() + self.square = P.Square() if context.get_context("enable_ge"): self.is_ge_backend = True @@ -82,22 +108,52 @@ class _BatchNorm(Cell): def _check_data_dim(self, x): raise NotImplementedError + def list_group(self, world_rank, group_size): + if group_size > get_local_rank_size(): + raise ValueError("group size can not be greater than local rank size, group size is {}, local_rank_size is {}".format(group_size, get_local_rank_size())) + if len(world_rank) % group_size != 0: + raise ValueError("please make your group size correct.") + world_rank_list = zip(*(iter(world_rank),) *group_size) + group_list = [list(i) for i in world_rank_list] + return group_list + def construct(self, x): if self.training and self.use_batch_statistics: if self.is_ge_backend: - y, batch_mean, batch_var, _, _ = \ - self.bn_train(x, - self.gamma, - self.beta, - None, - None) - - mean_sub = self.sub_mean(self.moving_mean, batch_mean) - temp_mean = self.mul_mean(mean_sub, self.momentum) - mean_sub2 = self.sub_var(self.moving_variance, batch_var) - temp_variance = self.mul_var(mean_sub2, self.momentum) - y = F.depend(y, self.assign_sub_mean(self.moving_mean, temp_mean)) - y = F.depend(y, self.assign_sub_var(self.moving_variance, temp_variance)) + if self.is_global: + x_mean = self.reduce_mean(x) + x_mean_square = self.reduce_mean(self.square(x)) + global_batch_mean = self.all_reduce(x_mean) / self.group + global_batch_mean_square = self.all_reduce(x_mean_square) / self.group + global_mean = global_batch_mean + global_var = global_batch_mean_square - self.square(global_batch_mean) + y, batch_mean, batch_var, _, _ = \ + self.bn_train(x, + self.gamma, + self.beta, + None, + None) + + mean_sub = self.sub_mean(self.moving_mean, global_mean) + temp_mean = self.mul_mean(mean_sub, self.momentum) + mean_sub2 = self.sub_var(self.moving_variance, global_var) + temp_variance = self.mul_var(mean_sub2, self.momentum) + y = F.depend(y, self.assign_sub_mean(self.moving_mean, temp_mean)) + y = F.depend(y, self.assign_sub_var(self.moving_variance, temp_variance)) + else: + y, batch_mean, batch_var, _, _ = \ + self.bn_train(x, + self.gamma, + self.beta, + None, + None) + + mean_sub = self.sub_mean(self.moving_mean, batch_mean) + temp_mean = self.mul_mean(mean_sub, self.momentum) + mean_sub2 = self.sub_var(self.moving_variance, batch_var) + temp_variance = self.mul_var(mean_sub2, self.momentum) + y = F.depend(y, self.assign_sub_mean(self.moving_mean, temp_mean)) + y = F.depend(y, self.assign_sub_var(self.moving_variance, temp_variance)) else: y = self.bn_train(x, self.gamma, @@ -221,6 +277,52 @@ class BatchNorm2d(_BatchNorm): pass +class GlobalBatchNorm(_BatchNorm): + r""" + Global normalization layer over a N-dimension input. + + Global Normalization is cross device synchronized batch normalization. Batch Normalization implementation + only normalize the data within each device. Global normalization will normalize the input within the group. + It has been described in the paper `Batch Normalization: Accelerating Deep Network Training by + Reducing Internal Covariate Shift `_. It rescales and recenters the + feature using a mini-batch of data and the learned parameters which can be described in the following formula. + + .. math:: + y = \frac{x - \mathrm{E}[x]}{\sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta + + Args: + num_features (int): `C` from an expected input of size (N, C, H, W). + group (int): The number of device in each group. + eps (float): A value added to the denominator for numerical stability. Default: 1e-5. + momentum (float): A floating hyperparameter of the momentum for the + running_mean and running_var computation. Default: 0.9. + gamma_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the gamma weight. + The values of str refer to the function `initializer` including 'zeros', 'ones', 'xavier_uniform', + 'he_uniform', etc. Default: 'ones'. + beta_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the beta weight. + The values of str refer to the function `initializer` including 'zeros', 'ones', 'xavier_uniform', + 'he_uniform', etc. Default: 'zeros'. + moving_mean_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the moving mean. + The values of str refer to the function `initializer` including 'zeros', 'ones', 'xavier_uniform', + 'he_uniform', etc. Default: 'zeros'. + moving_var_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the moving variance. + The values of str refer to the function `initializer` including 'zeros', 'ones', 'xavier_uniform', + 'he_uniform', etc. Default: 'ones'. + use_batch_statistics (bool): If true, use the mean value and variance value of current batch data, else use + the mean value and variance value of specified value. Default: True. + + Inputs: + - **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`. + + Outputs: + Tensor, the normalized, scaled, offset tensor, of shape :math:`(N, C_{out}, H_{out}, W_{out})`. + + Examples: + >>> global_bn_op = nn.GlobalBatchNorm(num_features=3, group=4) + >>> input = Tensor(np.random.randint(0, 255, [1, 3, 224, 224]), mindspore.float32) + >>> global_bn_op(input) + """ + class LayerNorm(Cell): r""" Applies Layer Normalization over a mini-batch of inputs. diff --git a/tests/ut/python/nn/test_batchnorm.py b/tests/ut/python/nn/test_batchnorm.py index e73b7ebbf0..23ca79e8e1 100644 --- a/tests/ut/python/nn/test_batchnorm.py +++ b/tests/ut/python/nn/test_batchnorm.py @@ -19,6 +19,7 @@ import pytest import mindspore.nn as nn from mindspore.common.api import _executor from mindspore import Tensor, Parameter +from mindspore.communication.management import init def test_bn_pars_valid1(): @@ -70,3 +71,17 @@ def test_compile_groupnorm(): net = nn.GroupNorm(16, 64) input_data = Tensor(np.random.rand(1,64,256,256).astype(np.float32)) _executor.compile(net, input_data) + +class GlobalBNNet(nn.Cell): + def __init__(self): + super(GlobalBNNet, self).__init__() + self.bn = nn.GlobalBatchNorm(num_features = 2, group = 4) + def construct(self, x): + return self.bn(x) + +def test_gloabl_bn(): + init("hccl") + net = GlobalBNNet() + input_data = Tensor(np.array([[2.4, 2.1], [3.2, 5.4]], dtype=np.float32)) + net.set_train() + out = net(input_data) From 75fec82b5248f656f0a97701ce32f54cc0c9be79 Mon Sep 17 00:00:00 2001 From: kingfo Date: Tue, 14 Apr 2020 11:52:56 +0800 Subject: [PATCH 295/367] resolve pynative operator issue --- mindspore/_extends/builtin_operations.py | 8 +- mindspore/ccsrc/pipeline/pipeline.cc | 12 +-- mindspore/ccsrc/pynative/pynative_execute.cc | 87 ++++++++++++++----- mindspore/common/parameter.py | 18 ++-- mindspore/common/tensor.py | 43 ++++++--- mindspore/ops/_grad/grad_array_ops.py | 2 +- mindspore/ops/_utils/__init__.py | 4 +- .../ops/_utils/{broadcast.py => utils.py} | 29 ++++++- mindspore/ops/operations/__init__.py | 3 +- mindspore/ops/operations/_grad_ops.py | 28 ++++++ mindspore/ops/operations/array_ops.py | 53 +---------- mindspore/ops/operations/other_ops.py | 3 + tests/ut/python/ir/test_tensor.py | 22 +++++ tests/vm_impl/array_ops_vm_impl.py | 2 +- 14 files changed, 208 insertions(+), 106 deletions(-) rename mindspore/ops/_utils/{broadcast.py => utils.py} (62%) diff --git a/mindspore/_extends/builtin_operations.py b/mindspore/_extends/builtin_operations.py index 087b704719..6fea07425e 100644 --- a/mindspore/_extends/builtin_operations.py +++ b/mindspore/_extends/builtin_operations.py @@ -125,7 +125,7 @@ def list_len(x): return len(x) -# only used in PyNative modes +# only used in PyNative mode def partial(*args): """Implement `partial`.""" func = args[0].__call__ @@ -133,10 +133,14 @@ def partial(*args): return partial_func -# only used in PyNative modes +# only used in PyNative mode def depend(value, expr): return value +# only used in PyNative mode +def make_ref(key, value, ref): + return value + def scalar_cast(x, t): """Implement scalar_cast.""" diff --git a/mindspore/ccsrc/pipeline/pipeline.cc b/mindspore/ccsrc/pipeline/pipeline.cc index 003d4c15e9..cd4fe28db9 100644 --- a/mindspore/ccsrc/pipeline/pipeline.cc +++ b/mindspore/ccsrc/pipeline/pipeline.cc @@ -616,17 +616,19 @@ py::object ExecutorPy::Run(const py::tuple& args, const py::object& phase) { return ExecDFGraph(info_, args, phase_s); } #else - if (backend == "ge") { - std::shared_ptr ret_val = std::make_shared(); + if (backend == "ms" || backend == "ge") { + auto ret_val = std::make_shared(); if (info_.count(phase_s) != 0 && info_[phase_s]->func_graph != nullptr) { if (IsGraphOutputValueNodeOrParameter(info_[phase_s]->func_graph->output(), args, ret_val)) { return *ret_val; } } - if (args.size() > 0) { - return args[0]; + if (backend == "ge") { + if (args.size() > 0) { + return args[0]; + } + return args; } - return args; } #endif std::size_t full_arg_size = ArgListSize(phase_s); diff --git a/mindspore/ccsrc/pynative/pynative_execute.cc b/mindspore/ccsrc/pynative/pynative_execute.cc index 4144ad2d6b..5620634bcc 100644 --- a/mindspore/ccsrc/pynative/pynative_execute.cc +++ b/mindspore/ccsrc/pynative/pynative_execute.cc @@ -20,11 +20,13 @@ #include #include #include +#include #include "utils/any.h" #include "utils/utils.h" #include "utils/context/ms_context.h" #include "operator/ops.h" +#include "operator/composite/do_signature.h" #include "pipeline/parse/data_converter.h" #include "pipeline/static_analysis/prim.h" #include "session/session_factory.h" @@ -50,6 +52,57 @@ inline ValuePtr PyAttrValue(const py::object& obj) { return converted_ret; } +py::tuple ConvertInputs(const PrimitivePyPtr& prim, const py::tuple& py_args) { + auto signature = prim->signatures(); + std::vector dtypes; + (void)std::transform(signature.begin(), signature.end(), std::back_inserter(dtypes), + [](const Signature& sig) { return sig.dtype; }); + int empty_dtype_count = std::count(dtypes.begin(), dtypes.end(), SignatureEnumDType::kDTypeEmptyDefaultValue); + if (dtypes.size() == 0 || static_cast(dtypes.size()) == empty_dtype_count) { + return py_args; + } + std::map> type_indexs; + for (size_t i = 0; i < dtypes.size(); ++i) { + auto it = type_indexs.find(dtypes[i]); + if (it == type_indexs.end()) { + (void)type_indexs.insert(std::make_pair(dtypes[i], std::vector{i})); + } else { + it->second.push_back(i); + } + } + std::map dst_type; + for (auto it = type_indexs.begin(); it != type_indexs.end(); (void)++it) { + auto type = it->first; + auto indexs = it->second; + if (indexs.size() < 2) { + continue; + } + size_t m_index = indexs[0]; + for (size_t i = 1; i < indexs.size(); ++i) { + if (py::isinstance(py_args[indexs[i]])) { + m_index = indexs[i]; + } + } + (void)dst_type.insert(std::make_pair(type, m_index)); + } + py::tuple py_inputs(py_args.size()); + for (size_t i = 0; i < py_args.size(); ++i) { + auto it = dst_type.find(dtypes[i]); + if (it != dst_type.end() && it->second != i && + (py::isinstance(py_args[i]) || py::isinstance(py_args[i]))) { + auto tensor_ptr = py::cast(py_args[it->second]); + if (py::isinstance(py_args[i])) { + py_inputs[i] = std::make_shared(py::cast(py_args[i]), tensor_ptr->Dtype()); + } else { + py_inputs[i] = std::make_shared(py::cast(py_args[i]), tensor_ptr->Dtype()); + } + continue; + } + py_inputs[i] = py_args[i]; + } + return py_inputs; +} + void PynativeInfer(const PrimitivePyPtr& prim, const py::tuple& py_args, OpExecInfo* const op_exec_info) { size_t size = py_args.size(); AbstractBasePtrList args_spec_list; @@ -73,30 +126,22 @@ OpExecInfoPtr GenerateOpExecInfo(const py::args& args) { auto op_exec_info = std::make_shared(); MS_EXCEPTION_IF_NULL(op_exec_info); op_exec_info->op_name = py::cast(args[PY_NAME]); - if (py::isinstance(args[PY_PRIM])) { - py::module ops_mod = py::module::import("mindspore.ops.operations"); - py::object py_primitive = ops_mod.attr(op_exec_info->op_name.c_str())(); - op_exec_info->py_primitive = py::cast(py_primitive); - py::dict none_attrs = py::dict(); - op_exec_info->op_attrs = none_attrs; - } else { - PrimitivePyPtr prim = py::cast(args[PY_PRIM]); - auto pyobj = prim->GetPyObj(); - if (pyobj == nullptr) { - MS_LOG(EXCEPTION) << "pyobj is empty"; - } - py::tuple py_args = args[PY_INPUTS]; - // use python infer method - if (ignore_infer_prim.find(op_exec_info->op_name) == ignore_infer_prim.end()) { - PynativeInfer(prim, py_args, op_exec_info.get()); - } - op_exec_info->py_primitive = prim; - op_exec_info->op_attrs = py::getattr(args[PY_PRIM], "attrs"); + auto prim = py::cast(args[PY_PRIM]); + auto pyobj = prim->GetPyObj(); + if (pyobj == nullptr) { + MS_LOG(EXCEPTION) << "pyobj is empty"; + } + py::tuple py_args = ConvertInputs(prim, args[PY_INPUTS]); + // use python infer method + if (ignore_infer_prim.find(op_exec_info->op_name) == ignore_infer_prim.end()) { + PynativeInfer(prim, py_args, op_exec_info.get()); } - op_exec_info->op_inputs = args[PY_INPUTS]; + op_exec_info->py_primitive = prim; + op_exec_info->op_attrs = py::getattr(args[PY_PRIM], "attrs"); + op_exec_info->op_inputs = py_args; op_exec_info->inputs_mask = args[PY_INPUT_MASK]; if (op_exec_info->op_inputs.size() != op_exec_info->inputs_mask.size()) { - MS_LOG(ERROR) << "" << op_exec_info->op_name << " op_inputs size not equal op_mask"; + MS_LOG(ERROR) << "op:" << op_exec_info->op_name << " inputs size not equal op_mask"; return nullptr; } return op_exec_info; diff --git a/mindspore/common/parameter.py b/mindspore/common/parameter.py index c8ddf0eac6..c354bcd235 100644 --- a/mindspore/common/parameter.py +++ b/mindspore/common/parameter.py @@ -14,7 +14,7 @@ # ============================================================================ """Parameter for cell.""" -from copy import copy +from copy import copy, deepcopy import numpy as np from .initializer import initializer from .tensor import Tensor @@ -156,16 +156,24 @@ class Parameter: return self.default_input def __add__(self, other): - return self.default_input + other + res = deepcopy(self) + res.default_input = res.default_input + other + return res def __sub__(self, other): - return self.default_input - other + res = deepcopy(self) + res.default_input = res.default_input - other + return res def __mul__(self, other): - return self.default_input * other + res = deepcopy(self) + res.default_input = res.default_input * other + return res def __truediv__(self, other): - return self.default_input / other + res = deepcopy(self) + res.default_input = res.default_input / other + return res def set_parameter_data(self, data): if isinstance(data, (Tensor, list, int, float, diff --git a/mindspore/common/tensor.py b/mindspore/common/tensor.py index 709b2ae280..70b8b169ca 100644 --- a/mindspore/common/tensor.py +++ b/mindspore/common/tensor.py @@ -70,45 +70,60 @@ class Tensor(Tensor_): return str(self.__str__()) def __add__(self, other): - if not isinstance(other, Tensor): - raise TypeError("input_data must be a tensor") + check_type('tensor input_data', other, (Tensor, float, int)) out = tensor_operator_registry.get('__add__')(self, other) return out def __mul__(self, other): - if not isinstance(other, Tensor): - raise TypeError("input_data must be a tensor") + check_type('tensor input_data', other, (Tensor, float, int)) out = tensor_operator_registry.get('__mul__')(self, other) return out + def __neg__(self): + return Tensor(-self.asnumpy()) + def __iadd__(self, other): out = self.__add__(other) return out + def __radd__(self, other): + check_type('tensor operation input', other, (Tensor, float, int)) + out = tensor_operator_registry.get('__add__')(other, self) + return out + def __imul__(self, other): out = self.__mul__(other) return out + def __rmul__(self, other): + check_type('tensor operation input', other, (Tensor, float, int)) + out = tensor_operator_registry.get('__mul__')(other, self) + return out + def __truediv__(self, other): - if isinstance(other, (int, float)): - other_tensor = Tensor(other, self.dtype()) - elif isinstance(other, Tensor): - other_tensor = other - else: - raise TypeError("unsupported type for div operation") - out = tensor_operator_registry.get('__div__')(self, other_tensor) + check_type('tensor operation input', other, (Tensor, float, int)) + out = tensor_operator_registry.get('__div__')(self, other) + return out + + def __rtruediv__(self, other): + check_type('tensor operation input', other, (Tensor, float, int)) + out = tensor_operator_registry.get('__div__')(other, self) return out def __sub__(self, other): - if not isinstance(other, Tensor): - raise TypeError("input_data must be a tensor") - out = self.__add__(Tensor(-other.asnumpy())) + check_type('tensor operation input', other, (Tensor, float, int)) + out = self.__add__(-other) return out def __isub__(self, other): out = self.__sub__(other) return out + def __rsub__(self, other): + check_type('tensor operation input', other, (Tensor, float, int)) + out = tensor_operator_registry.get('__add__')(other, Tensor(-self.asnumpy())) + return out + def __str__(self): if self.dtype() == mstype.type_none: return "Unknown Tensor type!" diff --git a/mindspore/ops/_grad/grad_array_ops.py b/mindspore/ops/_grad/grad_array_ops.py index abad030ae9..35d37b3ada 100644 --- a/mindspore/ops/_grad/grad_array_ops.py +++ b/mindspore/ops/_grad/grad_array_ops.py @@ -191,7 +191,7 @@ def get_bprop_concat(self): def bprop(x, out, dout): dx = () - out_offset = P.ConcatOffset(F.tuple_len(x), axis)(x) + out_offset = G.ConcatOffset(F.tuple_len(x), axis)(x) for i in range(F.tuple_len(x)): slice_out = P.Slice()(dout, out_offset[i], shape_op(x[i])) dx = dx + (slice_out,) diff --git a/mindspore/ops/_utils/__init__.py b/mindspore/ops/_utils/__init__.py index 00ce07453a..8fe1102968 100644 --- a/mindspore/ops/_utils/__init__.py +++ b/mindspore/ops/_utils/__init__.py @@ -14,6 +14,6 @@ # ============================================================================ """ops utils.""" -from .broadcast import _get_broadcast_shape +from .utils import _get_broadcast_shape, _get_concat_offset -__all__ = ['_get_broadcast_shape'] +__all__ = ['_get_broadcast_shape', '_get_concat_offset'] diff --git a/mindspore/ops/_utils/broadcast.py b/mindspore/ops/_utils/utils.py similarity index 62% rename from mindspore/ops/_utils/broadcast.py rename to mindspore/ops/_utils/utils.py index c71158de57..fbd81c4f0d 100644 --- a/mindspore/ops/_utils/broadcast.py +++ b/mindspore/ops/_utils/utils.py @@ -13,8 +13,11 @@ # limitations under the License. # ============================================================================ -"""broadcast""" +"""utils for operator""" +from ..._checkparam import ParamValidator as validator +from ..._checkparam import Rel +from ...common import dtype as mstype def _get_broadcast_shape(x_shape, y_shape, prim_name): """ @@ -57,3 +60,27 @@ def _get_broadcast_shape(x_shape, y_shape, prim_name): broadcast_shape_front = y_shape[0: y_len - length] if length == x_len else x_shape[0: x_len - length] broadcast_shape = broadcast_shape_front + broadcast_shape_back return broadcast_shape + + +def _get_concat_offset(x_shp, x_type, axis): + """for concat and concatoffset check args and compute offset""" + validator.check_type("shape", x_shp, [tuple]) + validator.check_integer("len of input_x shape", len(x_shp), 0, Rel.GT) + validator.check_subclass("shape0", x_type[0], mstype.tensor) + validator.check_integer("len of input_x0 shape", len(x_shp[0]), 0, Rel.GT) + rank_base = len(x_shp[0]) + validator.check_int_range('axis', axis, -rank_base - 1, rank_base, Rel.INC_BOTH) + if axis < 0: + axis = axis + rank_base + all_shp = x_shp[0][axis] + offset = [0,] + for i in range(1, len(x_shp)): + v = x_shp[i] + validator.check('len of x_shp[%d]' % i, len(v), 'len of base', len(x_shp[0])) + validator.check('x_type[%d]' % i, x_type[i], 'base', x_type[0]) + for j in range(rank_base): + if j != axis and v[j] != x_shp[0][j]: + raise ValueError("Concat evaluator element %d shape in input can not concat with first element" % i) + offset.append(all_shp) + all_shp += v[axis] + return offset, all_shp, axis diff --git a/mindspore/ops/operations/__init__.py b/mindspore/ops/operations/__init__.py index 492ebae444..e1dd8e36c5 100644 --- a/mindspore/ops/operations/__init__.py +++ b/mindspore/ops/operations/__init__.py @@ -19,7 +19,7 @@ Primitive operator classes. A collection of operators to build nerual networks or computing functions. """ -from .array_ops import (Argmax, Argmin, Cast, ConcatOffset, Concat, Pack, Unpack, +from .array_ops import (Argmax, Argmin, Cast, Concat, Pack, Unpack, Diag, DiagPart, DType, ExpandDims, Eye, Fill, GatherNd, GatherV2, InvertPermutation, IsInstance, IsSubClass, ArgMaxWithValue, OnesLike, ZerosLike, @@ -200,7 +200,6 @@ __all__ = [ 'LogicalOr', 'Size', 'DepthwiseConv2dNative', - 'ConcatOffset', 'UnsortedSegmentSum', "AllGather", "AllReduce", diff --git a/mindspore/ops/operations/_grad_ops.py b/mindspore/ops/operations/_grad_ops.py index 6143f9d0a0..48d1a2a89c 100644 --- a/mindspore/ops/operations/_grad_ops.py +++ b/mindspore/ops/operations/_grad_ops.py @@ -20,6 +20,7 @@ from ..._c_expression import signature_kind as sig_kind from ..primitive import Primitive, PrimitiveWithInfer, prim_attr_register from ..._checkparam import ParamValidator as validator from ..._checkparam import Rel, check_int_positive, check_bool +from .._utils import _get_concat_offset from ...common import dtype as mstype @@ -107,6 +108,33 @@ class BinaryCrossEntropyGrad(PrimitiveWithInfer): validator.check_two_types_same('x_type', x_type, 'weight_type', weight_type) return x_type +class ConcatOffset(PrimitiveWithInfer): + """primitive for computing Concat's gradient.""" + + @prim_attr_register + def __init__(self, N=2, axis=0): + """init ConcatOffset""" + + def __infer__(self, input_x): + axis = self.axis + x_shp = input_x['shape'] + x_type = input_x['dtype'] + offset, _, axis = _get_concat_offset(x_shp, x_type, axis) + self.add_prim_attr('T', x_type[0].element_type()) + offset_values = [] + for i in range(len(x_shp)): + values = [] + for j in range(len(x_shp[0])): + value = 0 + if j == axis: + value = offset[i] + values.append(value) + offset_values.append(tuple(values)) + out = {'shape': None, + 'dtype': None, + 'value': tuple(offset_values)} + return out + class Conv2DBackpropFilter(PrimitiveWithInfer): """ diff --git a/mindspore/ops/operations/array_ops.py b/mindspore/ops/operations/array_ops.py index a7c3f50440..da16a2ab29 100644 --- a/mindspore/ops/operations/array_ops.py +++ b/mindspore/ops/operations/array_ops.py @@ -29,6 +29,7 @@ from ..._checkparam import Rel from ...common import dtype as mstype from ...common.tensor import Tensor from ..operations.math_ops import _infer_shape_reduce +from .._utils import _get_concat_offset from ..primitive import Primitive, PrimitiveWithInfer, prim_attr_register def _check_infer_attr_reduce(axis, keep_dims): @@ -1275,30 +1276,6 @@ class UnsortedSegmentSum(PrimitiveWithInfer): return out -def _get_concat_offset(x_shp, x_type, axis): - """for concat and concatoffset check args and compute offset""" - validator.check_type("shape", x_shp, [tuple]) - validator.check_integer("len of input_x shape", len(x_shp), 0, Rel.GT) - validator.check_subclass("shape0", x_type[0], mstype.tensor) - validator.check_integer("len of input_x0 shape", len(x_shp[0]), 0, Rel.GT) - rank_base = len(x_shp[0]) - validator.check_int_range('axis', axis, -rank_base - 1, rank_base, Rel.INC_BOTH) - if axis < 0: - axis = axis + rank_base - all_shp = x_shp[0][axis] - offset = [0,] - for i in range(1, len(x_shp)): - v = x_shp[i] - validator.check('len of x_shp[%d]' % i, len(v), 'len of base', len(x_shp[0])) - validator.check('x_type[%d]' % i, x_type[i], 'base', x_type[0]) - for j in range(rank_base): - if j != axis and v[j] != x_shp[0][j]: - raise ValueError("Concat evaluator element %d shape in input can not concat with first element" % i) - offset.append(all_shp) - all_shp += v[axis] - return offset, all_shp, axis - - class Concat(PrimitiveWithInfer): r""" Concat tensor in specified axis. @@ -1531,34 +1508,6 @@ class Slice(PrimitiveWithInfer): 'value': None} -class ConcatOffset(PrimitiveWithInfer): - """primitive for computing Concat's gradient.""" - - @prim_attr_register - def __init__(self, N=2, axis=0): - """init ConcatOffset""" - - def __infer__(self, input_x): - axis = self.axis - x_shp = input_x['shape'] - x_type = input_x['dtype'] - offset, _, axis = _get_concat_offset(x_shp, x_type, axis) - self.add_prim_attr('T', x_type[0].element_type()) - offset_values = [] - for i in range(len(x_shp)): - values = [] - for j in range(len(x_shp[0])): - value = 0 - if j == axis: - value = offset[i] - values.append(value) - offset_values.append(tuple(values)) - out = {'shape': None, - 'dtype': None, - 'value': tuple(offset_values)} - return out - - class Select(PrimitiveWithInfer): r""" diff --git a/mindspore/ops/operations/other_ops.py b/mindspore/ops/operations/other_ops.py index b6182f0476..ff66e80972 100644 --- a/mindspore/ops/operations/other_ops.py +++ b/mindspore/ops/operations/other_ops.py @@ -271,3 +271,6 @@ class MakeRefKey(Primitive): @prim_attr_register def __init__(self, tag): validator.check_type('tag', tag, (str,)) + + def __call__(self): + pass diff --git a/tests/ut/python/ir/test_tensor.py b/tests/ut/python/ir/test_tensor.py index 1757567db5..b7bf1bebf5 100644 --- a/tests/ut/python/ir/test_tensor.py +++ b/tests/ut/python/ir/test_tensor.py @@ -24,6 +24,7 @@ import pytest import mindspore as ms import mindspore.common.api as me import mindspore.nn as nn +from mindspore import Tensor from mindspore.common.parameter import Parameter from mindspore.common.initializer import initializer from ..ut_filter import non_graph_engine @@ -396,3 +397,24 @@ def test_tensor_dtype_fp32_to_bool(): input = ms.Tensor(input) input_me = ms.Tensor(input, dtype=ms.bool_) + +def test_tensor_operation(): + x = Tensor(np.ones((3,3)) * 4) + res = x + 1 + assert np.all(res.asnumpy() == np.ones((3, 3)) * 5) + res = 1 + x + assert np.all(res.asnumpy() == np.ones((3, 3)) * 5) + res = x - 2 + assert np.all(res.asnumpy() == np.ones((3, 3)) * 2) + res = 6 - x + assert np.all(res.asnumpy() == np.ones((3, 3)) * 2) + res = x * 3 + assert np.all(res.asnumpy() == np.ones((3, 3)) * 12) + res = 3 * x + assert np.all(res.asnumpy() == np.ones((3, 3)) * 12) + res = x / 2 + assert np.all(res.asnumpy() == np.ones((3, 3)) * 2) + res = 8 / x + assert np.all(res.asnumpy() == np.ones((3, 3)) * 2) + with pytest.raises(TypeError): + res = x * (2, 3) diff --git a/tests/vm_impl/array_ops_vm_impl.py b/tests/vm_impl/array_ops_vm_impl.py index 4258dadc62..38c613012e 100644 --- a/tests/vm_impl/array_ops_vm_impl.py +++ b/tests/vm_impl/array_ops_vm_impl.py @@ -190,7 +190,7 @@ def vm_impl_slice(self): return vm_impl -@vm_impl_getters.register(P.ConcatOffset) +@vm_impl_getters.register(P._grad_ops.ConcatOffset) def vm_impl_concatOffset(self): """Generate vm_impl function for ConcatOffset""" def vm_impl(x): From c8da1cf8e97632fdc420a1d003de97f040b400f1 Mon Sep 17 00:00:00 2001 From: chenjianping Date: Thu, 16 Apr 2020 13:44:20 +0000 Subject: [PATCH 296/367] support building on windows|support package --- README.md | 1 + build.bat | 4 ++-- cmake/external_libs/libtiff.cmake | 3 +++ cmake/external_libs/opencv.cmake | 3 +++ cmake/package.cmake | 25 +++++++++++++++++++++++-- mindspore/ccsrc/CMakeLists.txt | 10 +++++++++- mindspore/ops/_op_impl/__init__.py | 6 ++++-- setup.py | 6 ++++++ third_party/securec/CMakeLists.txt | 6 +++--- 9 files changed, 54 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index c9abd82da0..cf6eece1e7 100644 --- a/README.md +++ b/README.md @@ -69,6 +69,7 @@ MindSpore offers build options across multiple backends: | GPU CUDA 9.2 | Ubuntu-x86 | ✔️ | | GPU CUDA 10.1 | Ubuntu-x86 | ✔️ | | CPU | Ubuntu-x86 | ✔️ | +| CPU | Windows-x86 | ✔️ | For installation using `pip`, take `CPU` and `Ubuntu-x86` build version as an example: diff --git a/build.bat b/build.bat index 6ada61cab2..5d423720fd 100644 --- a/build.bat +++ b/build.bat @@ -21,9 +21,9 @@ IF NOT %errorlevel% == 0 ( ) IF "%1%" == "" ( - cmake --build . --target all -- -j6 + cmake --build . --target package -- -j6 ) ELSE ( - cmake --build . --target all -- -j%1% + cmake --build . --target package -- -j%1% ) IF NOT %errorlevel% == 0 ( goto run_fail diff --git a/cmake/external_libs/libtiff.cmake b/cmake/external_libs/libtiff.cmake index c9934bfaa1..4086004e33 100644 --- a/cmake/external_libs/libtiff.cmake +++ b/cmake/external_libs/libtiff.cmake @@ -8,6 +8,9 @@ else() -Wno-unused-but-set-variable -fPIC -D_FORTIFY_SOURCE=2 -O2") set(tiff_CFLAGS "-fstack-protector-all -Wno-maybe-uninitialized -Wno-unused-parameter -Wno-unused-result \ -Wno-unused-but-set-variable -fPIC -D_FORTIFY_SOURCE=2 -O2") + if (${CMAKE_SYSTEM_NAME} MATCHES "Windows") + set(tiff_CFLAGS "${tiff_CFLAGS} -Wno-int-to-pointer-cast -Wno-implicit-fallthrough -Wno-pointer-to-int-cast") + endif() endif() set(tiff_LDFLAGS "-Wl,-z,relro,-z,now,-z,noexecstack") diff --git a/cmake/external_libs/opencv.cmake b/cmake/external_libs/opencv.cmake index 1c40769f62..b4f8d55a9e 100644 --- a/cmake/external_libs/opencv.cmake +++ b/cmake/external_libs/opencv.cmake @@ -5,6 +5,8 @@ if (${CMAKE_SYSTEM_NAME} MATCHES "Darwin") elseif (${CMAKE_SYSTEM_NAME} MATCHES "Windows") set(opencv_CXXFLAGS "-fstack-protector-all -Wno-maybe-uninitialized -Wno-unused-parameter -D_FORTIFY_SOURCE=2 -O2") set(opencv_CFLAGS "-fstack-protector-all -Wno-maybe-uninitialized -Wno-unused-parameter -D_FORTIFY_SOURCE=2 -O2") + set(opencv_CXXFLAGS "${opencv_CXXFLAGS} -Wno-attributes -Wno-unknown-pragmas") + set(opencv_CXXFLAGS "${opencv_CXXFLAGS} -Wno-unused-value -Wno-implicit-fallthrough") else() set(opencv_CXXFLAGS "-fstack-protector-all -Wno-maybe-uninitialized -Wno-unused-parameter -D_FORTIFY_SOURCE=2 -O2") set(opencv_CFLAGS "-fstack-protector-all -Wno-maybe-uninitialized -Wno-unused-parameter -D_FORTIFY_SOURCE=2 -O2") @@ -28,6 +30,7 @@ if (WIN32) -DBUILD_opencv_apps=OFF -DCMAKE_SKIP_RPATH=TRUE -DBUILD_opencv_python3=OFF + -DBUILD_opencv_videoio=OFF -DWITH_FFMPEG=OFF -DWITH_TIFF=ON -DBUILD_TIFF=OFF diff --git a/cmake/package.cmake b/cmake/package.cmake index d04efca085..7d1fdc6d8a 100644 --- a/cmake/package.cmake +++ b/cmake/package.cmake @@ -30,9 +30,18 @@ include(CPack) # set install path set(INSTALL_LIB_DIR ${CMAKE_INSTALL_LIBDIR} CACHE PATH "Installation directory for libraries") set(INSTALL_PY_DIR ".") -set(INSTALL_LIB_DIR "lib") set(INSTALL_BASE_DIR ".") +if (CMAKE_SYSTEM_NAME MATCHES "Windows") + set(INSTALL_LIB_DIR ".") + set(onednn_LIBPATH ${onednn_LIBPATH}/../bin/) + set(glog_LIBPATH ${glog_LIBPATH}/../bin/) + set(opencv_LIBPATH ${opencv_LIBPATH}/../bin/) + set(jpeg_turbo_LIBPATH ${jpeg_turbo_LIBPATH}/../bin/) +else () + set(INSTALL_LIB_DIR "lib") +endif () + # set package files install( TARGETS _c_expression @@ -80,7 +89,7 @@ if (ENABLE_CPU) elseif (CMAKE_SYSTEM_NAME MATCHES "Darwin") file(GLOB_RECURSE DNNL_LIB_LIST ${onednn_LIBPATH}/libdnnl*${CMAKE_SHARED_LIBRARY_SUFFIX}*) elseif (CMAKE_SYSTEM_NAME MATCHES "Windows") - file(GLOB_RECURSE DNNL_LIB_LIST ${onednn_LIBPATH}/dnnl.lib) + file(GLOB_RECURSE DNNL_LIB_LIST ${onednn_LIBPATH}/dnnl.dll) endif () install( FILES ${DNNL_LIB_LIST} @@ -140,6 +149,18 @@ if (NOT ENABLE_GE) endif () endif () +if (CMAKE_SYSTEM_NAME MATCHES "Windows") + get_filename_component(CXX_DIR ${CMAKE_CXX_COMPILER} PATH) + file(GLOB CXX_LIB_LIST ${CXX_DIR}/*.dll) + file(GLOB JPEG_LIB_LIST ${jpeg_turbo_LIBPATH}/*.dll) + file(GLOB SQLITE_LIB_LIST ${sqlite_LIBPATH}/*.dll) + install( + FILES ${CXX_LIB_LIST} ${JPEG_LIB_LIST} ${SQLITE_LIB_LIST} + DESTINATION ${INSTALL_LIB_DIR} + COMPONENT mindspore + ) +endif () + # set python files file(GLOB MS_PY_LIST ${CMAKE_SOURCE_DIR}/mindspore/*.py) install( diff --git a/mindspore/ccsrc/CMakeLists.txt b/mindspore/ccsrc/CMakeLists.txt index c99809f90f..77008aee5f 100644 --- a/mindspore/ccsrc/CMakeLists.txt +++ b/mindspore/ccsrc/CMakeLists.txt @@ -533,5 +533,13 @@ endif() if(ENABLE_MINDDATA) add_subdirectory(mindrecord) - add_subdirectory(dataset) + if (WIN32) + set(_md_tmp_CMAKE_CXX_FLAGS_RELEASE ${CMAKE_CXX_FLAGS_RELEASE}) + set(CMAKE_CXX_FLAGS_RELEASE "$ENV{CXXFLAGS} -O0 -Wl,--allow-shlib-undefined -DHALF_ENABLE_CPP11_USER_LITERALS=0") + set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -D_FORTIFY_SOURCE=2") + add_subdirectory(dataset) + set(CMAKE_CXX_FLAGS_RELEASE ${_md_tmp_CMAKE_CXX_FLAGS_RELEASE}) + else() + add_subdirectory(dataset) + endif() endif() diff --git a/mindspore/ops/_op_impl/__init__.py b/mindspore/ops/_op_impl/__init__.py index 76444881cc..65a12cd73c 100644 --- a/mindspore/ops/_op_impl/__init__.py +++ b/mindspore/ops/_op_impl/__init__.py @@ -14,8 +14,10 @@ # ============================================================================ """Operators info register.""" -from .akg.gpu import * -from .tbe import * +import platform from .aicpu import * +if "Windows" not in platform.system(): + from .akg.gpu import * + from .tbe import * __all__ = [] diff --git a/setup.py b/setup.py index 221c6dc4f2..82e6d70fcc 100644 --- a/setup.py +++ b/setup.py @@ -17,6 +17,7 @@ """setup package.""" import os import stat +import platform from setuptools import setup, find_packages from setuptools.command.egg_info import egg_info @@ -97,6 +98,8 @@ required_package = [ package_data = { '': [ '*.so*', + '*.pyd', + '*.dll', 'lib/*.so*', 'lib/*.a', '.commit_id', @@ -111,6 +114,9 @@ def update_permissions(path): Args: path (str): Target directory path. """ + if platform.system() == "Windows": + return + for dirpath, dirnames, filenames in os.walk(path): for dirname in dirnames: dir_fullpath = os.path.join(dirpath, dirname) diff --git a/third_party/securec/CMakeLists.txt b/third_party/securec/CMakeLists.txt index b2f29c488b..b195cc9555 100644 --- a/third_party/securec/CMakeLists.txt +++ b/third_party/securec/CMakeLists.txt @@ -1,5 +1,5 @@ SET(CMAKE_BUILD_TYPE "Debug") -if (WIN32) +if (CMAKE_SYSTEM_NAME MATCHES "Windows") SET(CMAKE_C_FLAGS_DEBUG "$ENV{CFLAGS} -fPIC -O0 -Wall -Wno-deprecated-declarations -g2 -ggdb -fno-inline-functions -fno-omit-frame-pointer") else() SET(CMAKE_C_FLAGS_DEBUG "$ENV{CFLAGS} -fPIC -O0 -Wall -Wno-deprecated-declarations -g2 -ggdb -fno-inline-functions -fno-omit-frame-pointer -D_LIBCPP_INLINE_VISIBILITY='' -D'_LIBCPP_EXTERN_TEMPLATE(...)='") @@ -8,8 +8,8 @@ SET(CMAKE_C_FLAGS_RELEASE "$ENV{CFLAGS} -fPIC -O3 -Wall -Wno-deprecated-declarat set(CMAKE_EXPORT_COMPILE_COMMANDS ON) #add flags -if (WIN32) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -I/usr/local/include") +if (CMAKE_SYSTEM_NAME MATCHES "Windows") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -I/usr/local/include -Wno-attributes") else() set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -I/usr/local/include -Werror") endif() From 4907abbc2448311b6cd75d2660fa435885e4ee63 Mon Sep 17 00:00:00 2001 From: chenfei Date: Thu, 16 Apr 2020 19:41:40 +0800 Subject: [PATCH 297/367] fix bug of real input arg num of set child graph input --- mindspore/ccsrc/session/ascend_session.cc | 83 ++++++++++++++++------- 1 file changed, 58 insertions(+), 25 deletions(-) diff --git a/mindspore/ccsrc/session/ascend_session.cc b/mindspore/ccsrc/session/ascend_session.cc index b0855feb49..ad6c58bc93 100755 --- a/mindspore/ccsrc/session/ascend_session.cc +++ b/mindspore/ccsrc/session/ascend_session.cc @@ -92,6 +92,51 @@ GraphId GetDistinctionLabel(const KernelGraphPtr &graph) { // else use first node of execution order as label return AnfAlgo::GetStreamDistinctionLabel(graph->execution_order()[0].get()); } + +std::vector GetRealArgs(const KernelGraphPtr graph, const VectorRef &args) { + MS_EXCEPTION_IF_NULL(graph); + std::vector graph_inputs = graph->inputs(); + auto valid_inputs = graph->ValidInputs(); + size_t real_args_size = 0; + std::vector real_args = {}; + for (size_t i = 0; i < args.size(); i++) { + if (utils::isa(args[i])) { + auto tmp_args = AnfAlgo::GetAllOutput(utils::cast(args[i]), {prim::kPrimTupleGetItem}); + for (auto &real_arg : tmp_args) { + auto anf_node = utils::cast(real_arg); + MS_EXCEPTION_IF_NULL(anf_node); + auto abstract = anf_node->abstract(); + MS_EXCEPTION_IF_NULL(abstract); + // create multiple parameters if is a tuple output real kernel + if (abstract->isa() && + !AnfAlgo::CheckPrimitiveType(anf_node, prim::kPrimTupleGetItem)) { + auto tuple_abstract = abstract->cast(); + real_args_size += tuple_abstract->size(); + continue; + } + real_args_size += 1; + real_args.push_back(real_arg); + } + } else { + real_args_size += 1; + real_args.push_back(args[i]); + } + } + if (graph_inputs.size() != valid_inputs.size()) { + MS_LOG(EXCEPTION) << "graph_inputs.size(): " << graph_inputs.size() + << ", valid_inputs.size(): " << valid_inputs.size() << " not equal"; + } + if (real_args_size != graph_inputs.size()) { + for (size_t j = 0; j < valid_inputs.size(); j++) { + if (valid_inputs[j]) { + MS_LOG(INFO) << "index: " << j << ", nodes: " << graph_inputs[j]->DebugString(); + } + } + MS_LOG(WARNING) << "real_args_size: " << real_args_size << ", graph_inputs.size(): " << graph_inputs.size() + << " not equal"; + } + return real_args; +} } // namespace GraphId AscendSession::CompileGraph(const AnfNodePtrList &lst, const AnfNodePtrList &outputs) { @@ -763,38 +808,26 @@ void AscendSession::SetChildGraphInput(GraphId g, const VectorRef &args) { UpdateGraphOrder(g); std::vector graph_inputs = to_graph->inputs(); auto valid_inputs = to_graph->ValidInputs(); - size_t real_args_size = 0; - for (size_t i = 0; i < args.size(); i++) { - real_args_size += AnfAlgo::GetAllOutput(utils::cast(args[i]), {prim::kPrimTupleGetItem}).size(); - } - if (real_args_size != graph_inputs.size()) { - for (size_t j = 0; j < valid_inputs.size(); j++) { - if (valid_inputs[j]) { - MS_LOG(INFO) << "index: " << j << ", nodes: " << graph_inputs[j]->DebugString(); - } - } - MS_LOG(WARNING) << "real_args_size: " << real_args_size << ", graph_inputs.size(): " << graph_inputs.size() - << " not equal"; - } + auto real_args = GetRealArgs(to_graph, args); size_t input_index = 0; - if (graph_inputs.size() != valid_inputs.size()) { - MS_LOG(EXCEPTION) << "graph_inputs.size(): " << graph_inputs.size() - << ", valid_inputs.size(): " << valid_inputs.size() << " not equal"; - } - for (size_t i = 0; i < args.size(); i++) { + for (size_t i = 0; i < real_args.size(); i++) { if (input_index >= graph_inputs.size()) { MS_LOG(EXCEPTION) << "input_index " << input_index << " out of range size " << graph_inputs.size(); } - if (utils::isa(args[i])) { + if (utils::isa(real_args[i])) { // arg is a anf node - for (const auto &real_arg : AnfAlgo::GetAllOutput(utils::cast(args[i]), {prim::kPrimTupleGetItem})) { - if (!valid_inputs[input_index]) { - MS_LOG(DEBUG) << "Invalid input arg" << real_arg->DebugString(); - continue; - } + auto real_arg = utils::cast(real_args[i]); + auto real_arg_output_num = AnfAlgo::GetOutputTensorNum(real_arg); + if (!AnfAlgo::CheckPrimitiveType(real_arg, prim::kPrimTupleGetItem) && real_arg_output_num > 1) { + input_index += real_arg_output_num; + continue; + } + if (valid_inputs[input_index]) { SetChildGraphParameter(real_arg, graph_inputs[input_index]); - input_index++; + } else { + MS_LOG(DEBUG) << "Invalid input arg" << real_arg->DebugString(); } + input_index++; } else if (utils::isa(args[i])) { auto value = utils::cast(args[i]); MS_EXCEPTION_IF_NULL(value); From 27c307684901a9ef7ee1ee8327191f59e1012748 Mon Sep 17 00:00:00 2001 From: zhaojichen Date: Thu, 16 Apr 2020 10:00:35 -0400 Subject: [PATCH 298/367] add global batch normalization --- mindspore/nn/layer/normalization.py | 27 ++++++++++++++++----------- tests/ut/python/nn/test_batchnorm.py | 11 +++++++++-- 2 files changed, 25 insertions(+), 13 deletions(-) diff --git a/mindspore/nn/layer/normalization.py b/mindspore/nn/layer/normalization.py index 4bfa222986..32e4be5998 100644 --- a/mindspore/nn/layer/normalization.py +++ b/mindspore/nn/layer/normalization.py @@ -69,16 +69,17 @@ class _BatchNorm(Cell): self.beta = Parameter(initializer( beta_init, num_features), name="beta", requires_grad=affine) self.group = check_int_positive(group) - self.rank_id = get_rank() - self.rank_size = get_local_rank_size() - self.device_list = [i for i in range(0, self.rank_size)] - self.rank_list = self.list_group(self.device_list, self.group) - self.rank_list_idx = len(self.rank_list) - for i in range(self.rank_list_idx): - if self.rank_id in self.rank_list[i] and self.group != 1: - self.is_global = True - management.create_group('group' + str(i), self.rank_list[i]) - self.all_reduce = _GlobalBNHelper('group' + str(i)) + if self.group != 1: + self.rank_id = get_rank() + self.rank_size = get_local_rank_size() + self.device_list = [i for i in range(0, self.rank_size)] + self.rank_list = self.list_group(self.device_list, self.group) + self.rank_list_idx = len(self.rank_list) + for i in range(self.rank_list_idx): + if self.rank_id in self.rank_list[i] and self.group != 1: + self.is_global = True + management.create_group('group' + str(i), self.rank_list[i]) + self.all_reduce = _GlobalBNHelper('group' + str(i)) self.shape = P.Shape() self.reduce_mean = P.ReduceMean() self.square = P.Square() @@ -110,7 +111,8 @@ class _BatchNorm(Cell): def list_group(self, world_rank, group_size): if group_size > get_local_rank_size(): - raise ValueError("group size can not be greater than local rank size, group size is {}, local_rank_size is {}".format(group_size, get_local_rank_size())) + raise ValueError("group size can not be greater than local rank size, group size is {}, local_rank_size is {}".format( + group_size, get_local_rank_size())) if len(world_rank) % group_size != 0: raise ValueError("please make your group size correct.") world_rank_list = zip(*(iter(world_rank),) *group_size) @@ -322,6 +324,9 @@ class GlobalBatchNorm(_BatchNorm): >>> input = Tensor(np.random.randint(0, 255, [1, 3, 224, 224]), mindspore.float32) >>> global_bn_op(input) """ + def _check_data_dim(self, x): + if x.dim == 0: + pass class LayerNorm(Cell): r""" diff --git a/tests/ut/python/nn/test_batchnorm.py b/tests/ut/python/nn/test_batchnorm.py index 23ca79e8e1..24f0de85f7 100644 --- a/tests/ut/python/nn/test_batchnorm.py +++ b/tests/ut/python/nn/test_batchnorm.py @@ -20,6 +20,8 @@ import mindspore.nn as nn from mindspore.common.api import _executor from mindspore import Tensor, Parameter from mindspore.communication.management import init +from mindspore import context +from mindspore import ParallelMode def test_bn_pars_valid1(): @@ -75,12 +77,17 @@ def test_compile_groupnorm(): class GlobalBNNet(nn.Cell): def __init__(self): super(GlobalBNNet, self).__init__() - self.bn = nn.GlobalBatchNorm(num_features = 2, group = 4) + self.bn = nn.GlobalBatchNorm(num_features = 2, group = 2) def construct(self, x): return self.bn(x) -def test_gloabl_bn(): +def test_global_bn(): init("hccl") + size = 4 + context.set_context(mode=context.GRAPH_MODE) + context.reset_auto_parallel_context() + context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, + device_num=size, parameter_broadcast=True) net = GlobalBNNet() input_data = Tensor(np.array([[2.4, 2.1], [3.2, 5.4]], dtype=np.float32)) net.set_train() From b5e98042c531650c639c9975b9709c384cfde933 Mon Sep 17 00:00:00 2001 From: zhaojichen Date: Thu, 16 Apr 2020 10:06:21 -0400 Subject: [PATCH 299/367] add global batch normalization --- mindspore/nn/layer/normalization.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mindspore/nn/layer/normalization.py b/mindspore/nn/layer/normalization.py index 32e4be5998..dddf32ec48 100644 --- a/mindspore/nn/layer/normalization.py +++ b/mindspore/nn/layer/normalization.py @@ -111,8 +111,8 @@ class _BatchNorm(Cell): def list_group(self, world_rank, group_size): if group_size > get_local_rank_size(): - raise ValueError("group size can not be greater than local rank size, group size is {}, local_rank_size is {}".format( - group_size, get_local_rank_size())) + raise ValueError("group size can not be greater than local rank size, group size is {}, " + "local_rank_size is {}".format(group_size, get_local_rank_size())) if len(world_rank) % group_size != 0: raise ValueError("please make your group size correct.") world_rank_list = zip(*(iter(world_rank),) *group_size) From 616b9ea394d4d90b8caf6e5fc2453eb822ead721 Mon Sep 17 00:00:00 2001 From: zhaojichen Date: Thu, 16 Apr 2020 10:34:41 -0400 Subject: [PATCH 300/367] add global batch normalization --- mindspore/nn/layer/normalization.py | 4 ++-- tests/ut/python/hccl_test/manage/api.py | 8 ++++++++ 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/mindspore/nn/layer/normalization.py b/mindspore/nn/layer/normalization.py index dddf32ec48..2b55147cf1 100644 --- a/mindspore/nn/layer/normalization.py +++ b/mindspore/nn/layer/normalization.py @@ -22,7 +22,7 @@ import mindspore.common.dtype as mstype import mindspore.context as context from mindspore._checkparam import check_bool, check_typename from mindspore._extends import cell_attr_register -from mindspore.communication.management import get_local_rank_size, get_rank +from mindspore.communication.management import get_group_size, get_rank from mindspore.communication import management from mindspore._checkparam import check_int_positive from ..cell import Cell @@ -71,7 +71,7 @@ class _BatchNorm(Cell): self.group = check_int_positive(group) if self.group != 1: self.rank_id = get_rank() - self.rank_size = get_local_rank_size() + self.rank_size = get_group_size() self.device_list = [i for i in range(0, self.rank_size)] self.rank_list = self.list_group(self.device_list, self.group) self.rank_list_idx = len(self.rank_list) diff --git a/tests/ut/python/hccl_test/manage/api.py b/tests/ut/python/hccl_test/manage/api.py index 8dac167a3f..b684df5263 100644 --- a/tests/ut/python/hccl_test/manage/api.py +++ b/tests/ut/python/hccl_test/manage/api.py @@ -65,6 +65,14 @@ def get_rank_size(group=None): return int(group.split("-")[0]) raise ValueError +def get_group_size(group=None): + hccl = Hccl() + if group is None: + return hccl.rank_size + if isinstance(group, str): + return int(group.split("-")[0]) + raise ValueError + # pylint: disable=unused-argument def get_world_rank_from_group_rank(group, group_rank_id): return group_rank_id From c91a58dc55e31e3becf39f038f2fb8db527eedf3 Mon Sep 17 00:00:00 2001 From: hesham Date: Thu, 16 Apr 2020 00:09:42 -0400 Subject: [PATCH 301/367] end() iterator is causing performance problem --- mindspore/ccsrc/dataset/core/tensor.cc | 6 ++++++ mindspore/ccsrc/dataset/core/tensor.h | 4 +++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/mindspore/ccsrc/dataset/core/tensor.cc b/mindspore/ccsrc/dataset/core/tensor.cc index 8f0eae459a..8b70fe690d 100644 --- a/mindspore/ccsrc/dataset/core/tensor.cc +++ b/mindspore/ccsrc/dataset/core/tensor.cc @@ -85,6 +85,7 @@ Tensor &Tensor::operator=(Tensor &&other) noexcept { shape_ = other.shape(); type_ = other.type(); data_ = other.StartAddr(); + data_end_ = other.data_end_; data_allocator_ = std::move(other.data_allocator_); other.Invalidate(); } @@ -208,11 +209,13 @@ Tensor::~Tensor() { if (data_allocator_ != nullptr) { data_allocator_->deallocate(data_); data_ = nullptr; + data_end_ = nullptr; } else { // If we didn't have an allocator, but data_ is not null then it must // be a stand-alone tensor that used malloc directly. free(data_); data_ = nullptr; + data_end_ = nullptr; } } } @@ -338,8 +341,10 @@ unsigned char *Tensor::StartAddr() { // on the shape and type and allocate it. if (data_allocator_ != nullptr) { data_ = data_allocator_->allocate(this->SizeInBytes()); + data_end_ = data_ + SizeInBytes(); } else { data_ = static_cast(malloc(this->SizeInBytes())); + data_end_ = data_ + SizeInBytes(); if (data_ == nullptr) { return nullptr; } @@ -362,6 +367,7 @@ void Tensor::Invalidate() { shape_ = TensorShape::CreateUnknownRankShape(); type_ = DataType(DataType::DE_UNKNOWN); data_ = nullptr; + data_end_ = nullptr; data_allocator_ = nullptr; } diff --git a/mindspore/ccsrc/dataset/core/tensor.h b/mindspore/ccsrc/dataset/core/tensor.h index 3409354d19..74da40c293 100644 --- a/mindspore/ccsrc/dataset/core/tensor.h +++ b/mindspore/ccsrc/dataset/core/tensor.h @@ -363,7 +363,7 @@ class Tensor { // @return TensorIterator template TensorIterator end() { - return TensorIterator(data_ + SizeInBytes()); + return TensorIterator(data_end_); } protected: @@ -402,6 +402,8 @@ class Tensor { unsigned char *data_; // An allocator for data_ CharAllocPtr data_allocator_; + // pointer to the end of the physical data + unsigned char *data_end_ = nullptr; }; } // namespace dataset } // namespace mindspore From fb6c7ba2e127be6632ddb8f0497df54bfaedb59a Mon Sep 17 00:00:00 2001 From: hesham Date: Thu, 16 Apr 2020 00:07:28 -0400 Subject: [PATCH 302/367] Fix two problem when we create multiple instances of the same dataset (2 for-loops) -- Iterator list is keeping all created iterators wihtout cleaning them up -- alter tree modifies the original. --- mindspore/ccsrc/dataset/api/python_bindings.cc | 6 ++++-- mindspore/dataset/engine/iterators.py | 12 +++++++++--- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/mindspore/ccsrc/dataset/api/python_bindings.cc b/mindspore/ccsrc/dataset/api/python_bindings.cc index e2675ee217..d9e0ccbba8 100644 --- a/mindspore/ccsrc/dataset/api/python_bindings.cc +++ b/mindspore/ccsrc/dataset/api/python_bindings.cc @@ -225,11 +225,13 @@ void bindTensor(py::module *m) { (void)py::class_(*m, "DataType") .def(py::init()) .def(py::self == py::self) - .def("__str__", &DataType::ToString); + .def("__str__", &DataType::ToString) + .def("__deepcopy__", [](py::object &t, py::dict memo) { return t; }); } void bindTensorOps1(py::module *m) { - (void)py::class_>(*m, "TensorOp"); + (void)py::class_>(*m, "TensorOp") + .def("__deepcopy__", [](py::object &t, py::dict memo) { return t; }); (void)py::class_>( *m, "NormalizeOp", "Tensor operation to normalize an image. Takes mean and std.") diff --git a/mindspore/dataset/engine/iterators.py b/mindspore/dataset/engine/iterators.py index 268a66c0cf..69dd9ce0a9 100644 --- a/mindspore/dataset/engine/iterators.py +++ b/mindspore/dataset/engine/iterators.py @@ -15,6 +15,8 @@ """Built-in iterators. """ from abc import abstractmethod +import copy +import weakref from mindspore._c_dataengine import DEPipeline from mindspore._c_dataengine import OpName @@ -27,7 +29,9 @@ ITERATORS_LIST = list() def _cleanup(): for itr in ITERATORS_LIST: - itr.release() + iter_ref = itr() + if itr is not None: + iter_ref.release() def alter_tree(node): @@ -73,8 +77,10 @@ class Iterator: """ def __init__(self, dataset): - ITERATORS_LIST.append(self) - self.dataset = alter_tree(dataset) + ITERATORS_LIST.append(weakref.ref(self)) + # create a copy of tree and work on it. + self.dataset = copy.deepcopy(dataset) + self.dataset = alter_tree(self.dataset) if not self.__is_tree(): raise ValueError("The data pipeline is not a tree (i.e., one node has 2 consumers)") self.depipeline = DEPipeline() From d2b04664cad59608eb4754d2df93eaeaf84d7aca Mon Sep 17 00:00:00 2001 From: zhaojichen Date: Thu, 16 Apr 2020 20:58:59 -0400 Subject: [PATCH 303/367] add global batch normalization --- mindspore/nn/layer/normalization.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mindspore/nn/layer/normalization.py b/mindspore/nn/layer/normalization.py index 2b55147cf1..c85b945a0d 100644 --- a/mindspore/nn/layer/normalization.py +++ b/mindspore/nn/layer/normalization.py @@ -110,9 +110,9 @@ class _BatchNorm(Cell): raise NotImplementedError def list_group(self, world_rank, group_size): - if group_size > get_local_rank_size(): + if group_size > get_group_size(): raise ValueError("group size can not be greater than local rank size, group size is {}, " - "local_rank_size is {}".format(group_size, get_local_rank_size())) + "local_rank_size is {}".format(group_size, get_group_size())) if len(world_rank) % group_size != 0: raise ValueError("please make your group size correct.") world_rank_list = zip(*(iter(world_rank),) *group_size) From d8bd5a09c4281c467e2576d140056c29b6826867 Mon Sep 17 00:00:00 2001 From: zhaojichen Date: Thu, 16 Apr 2020 21:13:45 -0400 Subject: [PATCH 304/367] add global batch normalization --- tests/ut/python/hccl_test/manage/api.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tests/ut/python/hccl_test/manage/api.py b/tests/ut/python/hccl_test/manage/api.py index b684df5263..04ce7da6d5 100644 --- a/tests/ut/python/hccl_test/manage/api.py +++ b/tests/ut/python/hccl_test/manage/api.py @@ -21,6 +21,7 @@ class Hccl(): _instance = None _rank_id = 0 _rank_size = 1 + _group_size = 4 def __init__(self): pass @@ -47,6 +48,10 @@ class Hccl(): def rank_size(self): return self._rank_size + @property + def group_size(self): + return self._group_size + @rank_size.setter def rank_size(self, size): self._rank_size = size @@ -68,7 +73,7 @@ def get_rank_size(group=None): def get_group_size(group=None): hccl = Hccl() if group is None: - return hccl.rank_size + return hccl.group_size if isinstance(group, str): return int(group.split("-")[0]) raise ValueError From aa8fbcc06eb5801811ec008803dc933017ac5273 Mon Sep 17 00:00:00 2001 From: zhaozhenlong Date: Wed, 15 Apr 2020 11:22:21 +0800 Subject: [PATCH 305/367] add cell psnr --- mindspore/nn/layer/__init__.py | 4 +- mindspore/nn/layer/image.py | 82 ++++++++++++++++++++++------ tests/ut/python/nn/test_psnr.py | 61 +++++++++++++++++++++ tests/ut/python/nn/test_ssim.py | 95 +++++++++++++++++++++++++++++++++ 4 files changed, 224 insertions(+), 18 deletions(-) create mode 100644 tests/ut/python/nn/test_psnr.py create mode 100644 tests/ut/python/nn/test_ssim.py diff --git a/mindspore/nn/layer/__init__.py b/mindspore/nn/layer/__init__.py index cf601f03ff..6c1b19a110 100644 --- a/mindspore/nn/layer/__init__.py +++ b/mindspore/nn/layer/__init__.py @@ -25,7 +25,7 @@ from .lstm import LSTM from .basic import Dropout, Flatten, Dense, ClipByNorm, Norm, OneHot, Pad, Unfold from .embedding import Embedding from .pooling import AvgPool2d, MaxPool2d -from .image import ImageGradients, SSIM +from .image import ImageGradients, SSIM, PSNR __all__ = ['Softmax', 'LogSoftmax', 'ReLU', 'ReLU6', 'Tanh', 'GELU', 'Sigmoid', 'PReLU', 'get_activation', 'LeakyReLU', 'HSigmoid', 'HSwish', 'ELU', @@ -36,5 +36,5 @@ __all__ = ['Softmax', 'LogSoftmax', 'ReLU', 'ReLU6', 'Tanh', 'GELU', 'Sigmoid', 'Dropout', 'Flatten', 'Dense', 'ClipByNorm', 'Norm', 'OneHot', 'Embedding', 'AvgPool2d', 'MaxPool2d', 'Pad', 'Unfold', - 'ImageGradients', 'SSIM', + 'ImageGradients', 'SSIM', 'PSNR', ] diff --git a/mindspore/nn/layer/image.py b/mindspore/nn/layer/image.py index 6121776f59..72c4c6d8e2 100644 --- a/mindspore/nn/layer/image.py +++ b/mindspore/nn/layer/image.py @@ -69,6 +69,18 @@ class ImageGradients(Cell): return dy, dx +def _convert_img_dtype_to_float32(img, max_val): + """convert img dtype to float32""" + # Ususally max_val is 1.0 or 255, we will do the scaling if max_val > 1. + # We will scale img pixel value if max_val > 1. and just cast otherwise. + ret = F.cast(img, mstype.float32) + max_val = F.scalar_cast(max_val, mstype.float32) + if max_val > 1.: + scale = 1. / max_val + ret = ret * scale + return ret + + @constexpr def _gauss_kernel_helper(filter_size): """gauss kernel helper""" @@ -134,9 +146,9 @@ class SSIM(Cell): self.mean = P.DepthwiseConv2dNative(channel_multiplier=1, kernel_size=filter_size) def construct(self, img1, img2): - max_val = self._convert_img_dtype_to_float32(self.max_val, self.max_val) - img1 = self._convert_img_dtype_to_float32(img1, self.max_val) - img2 = self._convert_img_dtype_to_float32(img2, self.max_val) + max_val = _convert_img_dtype_to_float32(self.max_val, self.max_val) + img1 = _convert_img_dtype_to_float32(img1, self.max_val) + img2 = _convert_img_dtype_to_float32(img2, self.max_val) kernel = self._fspecial_gauss(self.filter_size, self.filter_sigma) kernel = P.Tile()(kernel, (1, P.Shape()(img1)[1], 1, 1)) @@ -145,21 +157,10 @@ class SSIM(Cell): return mean_ssim - def _convert_img_dtype_to_float32(self, img, max_val): - """convert img dtype to float32""" - # Ususally max_val is 1.0 or 255, we will do the scaling if max_val > 1. - # We will scale img pixel value if max_val > 1. and just cast otherwise. - ret = P.Cast()(img, mstype.float32) - max_val = F.scalar_cast(max_val, mstype.float32) - if max_val > 1.: - scale = 1./max_val - ret = ret*scale - return ret - def _calculate_mean_ssim(self, x, y, kernel, max_val, k1, k2): """calculate mean ssim""" - c1 = (k1*max_val)*(k1*max_val) - c2 = (k2*max_val)*(k2*max_val) + c1 = (k1 * max_val) * (k1 * max_val) + c2 = (k2 * max_val) * (k2 * max_val) # SSIM luminance formula # (2 * mean_{x} * mean_{y} + c1) / (mean_{x}**2 + mean_{y}**2 + c1) @@ -195,3 +196,52 @@ class SSIM(Cell): g = P.Softmax()(g) ret = F.reshape(g, (1, 1, filter_size, filter_size)) return ret + + +class PSNR(Cell): + r""" + Returns Peak Signal-to-Noise Ratio of two image batches. + + It produces a PSNR value for each image in batch. + Assume inputs are :math:`I` and :math:`K`, both with shape :math:`h*w`. + :math:`MAX` represents the dynamic range of pixel values. + + .. math:: + + MSE&=\frac{1}{hw}\sum\limits_{i=0}^{h-1}\sum\limits_{j=0}^{w-1}[I(i,j)-K(i,j)]^2\\ + PSNR&=10*log_{10}(\frac{MAX^2}{MSE}) + + Args: + max_val (Union[int, float]): The dynamic range of the pixel values (255 for 8-bit grayscale images). + Default: 1.0. + + Inputs: + - **img1** (Tensor) - The first image batch with format 'NCHW'. It should be the same shape and dtype as img2. + - **img2** (Tensor) - The second image batch with format 'NCHW'. It should be the same shape and dtype as img1. + + Outputs: + Tensor, with dtype mindspore.float32. It is a 1-D tensor with shape N, where N is the batch num of img1. + + Examples: + >>> net = nn.PSNR() + >>> img1 = Tensor(np.random.random((1,3,16,16))) + >>> img2 = Tensor(np.random.random((1,3,16,16))) + >>> psnr = net(img1, img2) + + """ + def __init__(self, max_val=1.0): + super(PSNR, self).__init__() + validator.check_type('max_val', max_val, [int, float]) + validator.check('max_val', max_val, '', 0.0, Rel.GT) + self.max_val = max_val + + def construct(self, img1, img2): + max_val = _convert_img_dtype_to_float32(self.max_val, self.max_val) + img1 = _convert_img_dtype_to_float32(img1, self.max_val) + img2 = _convert_img_dtype_to_float32(img2, self.max_val) + + mse = P.ReduceMean()(F.square(img1 - img2), (-3, -2, -1)) + # 10*log_10(max_val^2/MSE) + psnr = 10 * P.Log()(F.square(max_val) / mse) / F.scalar_log(10.0) + + return psnr diff --git a/tests/ut/python/nn/test_psnr.py b/tests/ut/python/nn/test_psnr.py new file mode 100644 index 0000000000..5a908b308d --- /dev/null +++ b/tests/ut/python/nn/test_psnr.py @@ -0,0 +1,61 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +test psnr +""" +import numpy as np +import pytest +import mindspore.nn as nn +from mindspore.common.api import _executor +from mindspore import Tensor + + +class PSNRNet(nn.Cell): + def __init__(self, max_val=1.0): + super(PSNRNet, self).__init__() + self.net = nn.PSNR(max_val) + + def construct(self, img1, img2): + return self.net(img1, img2) + + +def test_compile_psnr(): + max_val = 1.0 + net = PSNRNet(max_val) + img1 = Tensor(np.random.random((8, 3, 16, 16))) + img2 = Tensor(np.random.random((8, 3, 16, 16))) + _executor.compile(net, img1, img2) + +def test_compile_psnr_grayscale(): + max_val = 255 + net = PSNRNet(max_val) + img1 = Tensor(np.random.randint(0, 256, (8, 1, 16, 16), np.uint8)) + img2 = Tensor(np.random.randint(0, 256, (8, 1, 16, 16), np.uint8)) + _executor.compile(net, img1, img2) + +def test_psnr_max_val_negative(): + max_val = -1 + with pytest.raises(ValueError): + net = PSNRNet(max_val) + +def test_psnr_max_val_bool(): + max_val = True + with pytest.raises(ValueError): + net = PSNRNet(max_val) + +def test_psnr_max_val_zero(): + max_val = 0 + with pytest.raises(ValueError): + net = PSNRNet(max_val) diff --git a/tests/ut/python/nn/test_ssim.py b/tests/ut/python/nn/test_ssim.py new file mode 100644 index 0000000000..a698b59f69 --- /dev/null +++ b/tests/ut/python/nn/test_ssim.py @@ -0,0 +1,95 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +""" +test ssim +""" +import numpy as np +import pytest +import mindspore.nn as nn +from mindspore.common.api import _executor +from mindspore import Tensor + + +class SSIMNet(nn.Cell): + def __init__(self, max_val=1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03): + super(SSIMNet, self).__init__() + self.net = nn.SSIM(max_val, filter_size, filter_sigma, k1, k2) + + def construct(self, img1, img2): + return self.net(img1, img2) + + +def test_compile(): + net = SSIMNet() + img1 = Tensor(np.random.random((8, 3, 16, 16))) + img2 = Tensor(np.random.random((8, 3, 16, 16))) + _executor.compile(net, img1, img2) + +def test_compile_grayscale(): + max_val = 255 + net = SSIMNet(max_val = max_val) + img1 = Tensor(np.random.randint(0, 256, (8, 1, 16, 16), np.uint8)) + img2 = Tensor(np.random.randint(0, 256, (8, 1, 16, 16), np.uint8)) + _executor.compile(net, img1, img2) + +def test_ssim_max_val_negative(): + max_val = -1 + with pytest.raises(ValueError): + net = SSIMNet(max_val) + +def test_ssim_max_val_bool(): + max_val = True + with pytest.raises(ValueError): + net = SSIMNet(max_val) + +def test_ssim_max_val_zero(): + max_val = 0 + with pytest.raises(ValueError): + net = SSIMNet(max_val) + +def test_ssim_filter_size_float(): + with pytest.raises(ValueError): + net = SSIMNet(filter_size=1.1) + +def test_ssim_filter_size_zero(): + with pytest.raises(ValueError): + net = SSIMNet(filter_size=0) + +def test_ssim_filter_sigma_zero(): + with pytest.raises(ValueError): + net = SSIMNet(filter_sigma=0.0) + +def test_ssim_filter_sigma_negative(): + with pytest.raises(ValueError): + net = SSIMNet(filter_sigma=-0.1) + +def test_ssim_k1_k2_wrong_value(): + with pytest.raises(ValueError): + net = SSIMNet(k1=1.1) + with pytest.raises(ValueError): + net = SSIMNet(k1=1.0) + with pytest.raises(ValueError): + net = SSIMNet(k1=0.0) + with pytest.raises(ValueError): + net = SSIMNet(k1=-1.0) + + with pytest.raises(ValueError): + net = SSIMNet(k2=1.1) + with pytest.raises(ValueError): + net = SSIMNet(k2=1.0) + with pytest.raises(ValueError): + net = SSIMNet(k2=0.0) + with pytest.raises(ValueError): + net = SSIMNet(k2=-1.0) \ No newline at end of file From 87e71f26cccb9e820ca200d349fb55100994e93a Mon Sep 17 00:00:00 2001 From: YuJianfeng Date: Fri, 17 Apr 2020 09:21:55 +0800 Subject: [PATCH 306/367] Erase the visited attr after transdata split --- .../ccsrc/pre_activate/ascend/ascend_backend_optimization.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mindspore/ccsrc/pre_activate/ascend/ascend_backend_optimization.cc b/mindspore/ccsrc/pre_activate/ascend/ascend_backend_optimization.cc index 394c5ce281..a72fb9dc9a 100644 --- a/mindspore/ccsrc/pre_activate/ascend/ascend_backend_optimization.cc +++ b/mindspore/ccsrc/pre_activate/ascend/ascend_backend_optimization.cc @@ -114,8 +114,8 @@ void AscendDataLayout(const std::shared_ptr &kernel_graph) data_layout_pm->AddPass(std::make_shared()); data_layout_pm->AddPass(std::make_shared()); data_layout_pm->AddPass(std::make_shared()); - data_layout_pm->AddPass(std::make_shared()); data_layout_pm->AddPass(std::make_shared()); + data_layout_pm->AddPass(std::make_shared()); optimizer->AddPassManager(data_layout_pm); (void)optimizer->Optimize(kernel_graph); kernel_graph->SetExecOrderByDefault(); From cce61d462c5d3b1cef5e1c93732548f2506dc318 Mon Sep 17 00:00:00 2001 From: wenkai Date: Sun, 12 Apr 2020 16:32:36 +0800 Subject: [PATCH 307/367] histogram python dev --- mindspore/ccsrc/utils/summary.proto | 25 +++ mindspore/train/summary/_summary_adapter.py | 78 ++++++- mindspore/train/summary/_summary_scheduler.py | 3 + .../ut/python/train/summary/summary_reader.py | 43 ++++ .../train/summary/test_histogram_summary.py | 210 ++++++++++++++++++ 5 files changed, 358 insertions(+), 1 deletion(-) create mode 100644 tests/ut/python/train/summary/summary_reader.py create mode 100644 tests/ut/python/train/summary/test_histogram_summary.py diff --git a/mindspore/ccsrc/utils/summary.proto b/mindspore/ccsrc/utils/summary.proto index f7fb733597..6ea6ce08b8 100644 --- a/mindspore/ccsrc/utils/summary.proto +++ b/mindspore/ccsrc/utils/summary.proto @@ -61,6 +61,30 @@ message Summary { required bytes encoded_image = 4; } + message Histogram { + message bucket{ + // Count number of values fallen in [left, left + width). + // For the right most bucket, range is [left, left + width]. + required double left = 1; + required double width = 2; + required int64 count = 3; + } + + repeated bucket buckets = 1; + optional int64 nan_count = 2; + optional int64 pos_inf_count = 3; + optional int64 neg_inf_count = 4; + + // max, min, sum will not take nan and inf into account. + // If there is no valid value in tensor, max will be nan, min will be nan, sum will be 0. + optional double max = 5; + optional double min = 6; + optional double sum = 7; + + // total number of values, including nan and inf + optional int64 count = 8; + } + message Value { // Tag name for the data. required string tag = 1; @@ -70,6 +94,7 @@ message Summary { float scalar_value = 3; Image image = 4; TensorProto tensor = 8; + Histogram histogram = 9; } } diff --git a/mindspore/train/summary/_summary_adapter.py b/mindspore/train/summary/_summary_adapter.py index 29a5774271..7db80de693 100644 --- a/mindspore/train/summary/_summary_adapter.py +++ b/mindspore/train/summary/_summary_adapter.py @@ -71,12 +71,14 @@ class SummaryType(Enum): TENSOR (Number): Summary TENSOR enum. IMAGE (Number): Summary image enum. GRAPH (Number): Summary graph enum. + HISTOGRAM (Number): Summary histogram enum. INVALID (Number): Unknow type. """ SCALAR = 1 # Scalar summary TENSOR = 2 # Tensor summary IMAGE = 3 # Image summary GRAPH = 4 # graph + HISTOGRAM = 5 # Histogram Summary INVALID = 0xFF # unknow type @@ -148,7 +150,7 @@ def package_summary_event(data_id, step): """ data_list = get_summary_data(data_id) if data_list is None: - logger.error("The step(%r) does not have record data.", self.step) + logger.error("The step(%r) does not have record data.", step) del_summary_data(data_id) # create the event of summary summary_event = Event() @@ -177,6 +179,12 @@ def package_summary_event(data_id, step): summary_value.tag = tag summary_image = summary_value.image _get_image_summary(tag, data, summary_image, MS_IMAGE_TENSOR_FORMAT) + elif summary_type is SummaryType.HISTOGRAM: + logger.debug("Now process Histogram summary, tag = %r", tag) + summary_value = summary.value.add() + summary_value.tag = tag + summary_histogram = summary_value.histogram + _fill_histogram_summary(tag, data, summary_histogram) else: # The data is invalid ,jump the data logger.error("Summary type is error, tag = %r", tag) @@ -284,6 +292,74 @@ def _get_tensor_summary(tag: str, np_value, summary_tensor): return summary_tensor +def _fill_histogram_summary(tag: str, np_value: np.array, summary_histogram) -> None: + """ + Package the histogram summary. + + Args: + tag (str): Summary tag describe. + np_value (np.array): Summary data. + summary_histogram (summary_pb2.Summary.Histogram): Summary histogram data. + """ + logger.debug("Set(%r) the histogram summary value", tag) + # Default bucket for tensor with no valid data. + default_bucket_left = -0.5 + default_bucket_width = 1.0 + + if np_value.size == 0: + bucket = summary_histogram.buckets.add() + bucket.left = default_bucket_left + bucket.width = default_bucket_width + bucket.count = 0 + + summary_histogram.nan_count = 0 + summary_histogram.pos_inf_count = 0 + summary_histogram.neg_inf_count = 0 + + summary_histogram.max = 0 + summary_histogram.min = 0 + summary_histogram.sum = 0 + + summary_histogram.count = 0 + + return + + summary_histogram.nan_count = np.count_nonzero(np.isnan(np_value)) + summary_histogram.pos_inf_count = np.count_nonzero(np.isposinf(np_value)) + summary_histogram.neg_inf_count = np.count_nonzero(np.isneginf(np_value)) + summary_histogram.count = np_value.size + + masked_value = np.ma.masked_invalid(np_value) + tensor_max = masked_value.max() + tensor_min = masked_value.min() + tensor_sum = masked_value.sum() + + # No valid value in tensor. + if tensor_max is np.ma.masked: + bucket = summary_histogram.buckets.add() + bucket.left = default_bucket_left + bucket.width = default_bucket_width + bucket.count = 0 + + summary_histogram.max = np.nan + summary_histogram.min = np.nan + summary_histogram.sum = 0 + + return + + counts, edges = np.histogram(np_value, bins='auto', range=(tensor_min, tensor_max)) + + for ind, count in enumerate(counts): + bucket = summary_histogram.buckets.add() + bucket.left = edges[ind] + bucket.width = edges[ind + 1] - edges[ind] + bucket.count = count + + summary_histogram.max = tensor_max + summary_histogram.min = tensor_min + summary_histogram.sum = tensor_sum + + def _get_image_summary(tag: str, np_value, summary_image, input_format='NCHW'): """ Package the image summary. diff --git a/mindspore/train/summary/_summary_scheduler.py b/mindspore/train/summary/_summary_scheduler.py index fa5a228e6a..3327b02fa7 100644 --- a/mindspore/train/summary/_summary_scheduler.py +++ b/mindspore/train/summary/_summary_scheduler.py @@ -23,6 +23,7 @@ from ._summary_adapter import SummaryType, package_summary_event, save_summary_d FORMAT_SCALAR_STR = "Scalar" FORMAT_TENSOR_STR = "Tensor" FORMAT_IMAGE_STR = "Image" +FORMAT_HISTOGRAM_STR = "Histogram" FORMAT_BEGIN_SLICE = "[:" FORMAT_END_SLICE = "]" @@ -95,6 +96,8 @@ def _parse_tag_format(tag: str): summary_type = SummaryType.TENSOR elif type_str == FORMAT_IMAGE_STR: summary_type = SummaryType.IMAGE + elif type_str == FORMAT_HISTOGRAM_STR: + summary_type = SummaryType.HISTOGRAM else: logger.error("The tag(%s) type is invalid.", tag) summary_type = SummaryType.INVALID diff --git a/tests/ut/python/train/summary/summary_reader.py b/tests/ut/python/train/summary/summary_reader.py new file mode 100644 index 0000000000..647c25f25c --- /dev/null +++ b/tests/ut/python/train/summary/summary_reader.py @@ -0,0 +1,43 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Summary reader.""" +import struct + +import mindspore.train.summary_pb2 as summary_pb2 + +_HEADER_SIZE = 8 +_HEADER_CRC_SIZE = 4 +_DATA_CRC_SIZE = 4 + + +class SummaryReader: + """Read events from summary file.""" + + def __init__(self, file_name): + self._file_name = file_name + self._file_handler = open(self._file_name, "rb") + # skip version event + self.read_event() + + def read_event(self): + """Read next event.""" + file_handler = self._file_handler + header = file_handler.read(_HEADER_SIZE) + data_len = struct.unpack('Q', header)[0] + file_handler.read(_HEADER_CRC_SIZE) + event_str = file_handler.read(data_len) + file_handler.read(_DATA_CRC_SIZE) + summary_event = summary_pb2.Event.FromString(event_str) + return summary_event diff --git a/tests/ut/python/train/summary/test_histogram_summary.py b/tests/ut/python/train/summary/test_histogram_summary.py new file mode 100644 index 0000000000..50204cd757 --- /dev/null +++ b/tests/ut/python/train/summary/test_histogram_summary.py @@ -0,0 +1,210 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Test histogram summary.""" + +import logging +import os +import tempfile + +import numpy as np + +from mindspore.common.tensor import Tensor +from mindspore.train.summary.summary_record import SummaryRecord, _cache_summary_tensor_data +from .summary_reader import SummaryReader + +CUR_DIR = os.getcwd() +SUMMARY_DIR = os.path.join(CUR_DIR, "/test_temp_summary_event_file/") + +LOG = logging.getLogger("test") +LOG.setLevel(level=logging.ERROR) + + +def _wrap_test_data(input_data: Tensor): + """ + Wraps test data to summary format. + + Args: + input_data (Tensor): Input data. + + Returns: + dict, the wrapped data. + """ + + return [{ + "name": "test_data[:Histogram]", + "data": input_data + }] + + +def test_histogram_summary(): + """Test histogram summary.""" + with tempfile.TemporaryDirectory() as tmp_dir: + test_writer = SummaryRecord(tmp_dir, file_suffix="_MS_HISTOGRAM") + + test_data = _wrap_test_data(Tensor([[1, 2, 3], [4, 5, 6]])) + _cache_summary_tensor_data(test_data) + test_writer.record(step=1) + test_writer.close() + + file_name = os.path.join(tmp_dir, test_writer.event_file_name) + reader = SummaryReader(file_name) + event = reader.read_event() + assert event.summary.value[0].histogram.count == 6 + + +def test_histogram_multi_summary(): + """Test histogram multiple step.""" + with tempfile.TemporaryDirectory() as tmp_dir: + test_writer = SummaryRecord(tmp_dir, file_suffix="_MS_HISTOGRAM") + + rng = np.random.RandomState(10) + size = 50 + num_step = 5 + + for i in range(num_step): + arr = rng.normal(size=size) + + test_data = _wrap_test_data(Tensor(arr)) + _cache_summary_tensor_data(test_data) + test_writer.record(step=i) + + test_writer.close() + + file_name = os.path.join(tmp_dir, test_writer.event_file_name) + reader = SummaryReader(file_name) + for _ in range(num_step): + event = reader.read_event() + assert event.summary.value[0].histogram.count == size + + +def test_histogram_summary_scalar_tensor(): + """Test histogram summary, input is a scalar tensor.""" + with tempfile.TemporaryDirectory() as tmp_dir: + test_writer = SummaryRecord(tmp_dir, file_suffix="_MS_HISTOGRAM") + + test_data = _wrap_test_data(Tensor(1)) + _cache_summary_tensor_data(test_data) + test_writer.record(step=1) + test_writer.close() + + file_name = os.path.join(tmp_dir, test_writer.event_file_name) + reader = SummaryReader(file_name) + event = reader.read_event() + assert event.summary.value[0].histogram.count == 1 + + +def test_histogram_summary_empty_tensor(): + """Test histogram summary, input is an empty tensor.""" + with tempfile.TemporaryDirectory() as tmp_dir: + test_writer = SummaryRecord(tmp_dir, file_suffix="_MS_HISTOGRAM") + + test_data = _wrap_test_data(Tensor([])) + _cache_summary_tensor_data(test_data) + test_writer.record(step=1) + test_writer.close() + + file_name = os.path.join(tmp_dir, test_writer.event_file_name) + reader = SummaryReader(file_name) + event = reader.read_event() + assert event.summary.value[0].histogram.count == 0 + + +def test_histogram_summary_same_value(): + """Test histogram summary, input is an ones tensor.""" + with tempfile.TemporaryDirectory() as tmp_dir: + test_writer = SummaryRecord(tmp_dir, file_suffix="_MS_HISTOGRAM") + + dim1 = 100 + dim2 = 100 + + test_data = _wrap_test_data(Tensor(np.ones([dim1, dim2]))) + _cache_summary_tensor_data(test_data) + test_writer.record(step=1) + test_writer.close() + + file_name = os.path.join(tmp_dir, test_writer.event_file_name) + reader = SummaryReader(file_name) + event = reader.read_event() + LOG.debug(event) + + assert len(event.summary.value[0].histogram.buckets) == 1 + + +def test_histogram_summary_high_dims(): + """Test histogram summary, input is a 4-dimension tensor.""" + with tempfile.TemporaryDirectory() as tmp_dir: + test_writer = SummaryRecord(tmp_dir, file_suffix="_MS_HISTOGRAM") + dim = 10 + + rng = np.random.RandomState(0) + tensor_data = rng.normal(size=[dim, dim, dim, dim]) + test_data = _wrap_test_data(Tensor(tensor_data)) + _cache_summary_tensor_data(test_data) + test_writer.record(step=1) + test_writer.close() + + file_name = os.path.join(tmp_dir, test_writer.event_file_name) + reader = SummaryReader(file_name) + event = reader.read_event() + LOG.debug(event) + + assert event.summary.value[0].histogram.count == tensor_data.size + + +def test_histogram_summary_nan_inf(): + """Test histogram summary, input tensor has nan.""" + with tempfile.TemporaryDirectory() as tmp_dir: + test_writer = SummaryRecord(tmp_dir, file_suffix="_MS_HISTOGRAM") + + dim1 = 100 + dim2 = 100 + + arr = np.ones([dim1, dim2]) + arr[0][0] = np.nan + arr[0][1] = np.inf + arr[0][2] = -np.inf + test_data = _wrap_test_data(Tensor(arr)) + + _cache_summary_tensor_data(test_data) + test_writer.record(step=1) + test_writer.close() + + file_name = os.path.join(tmp_dir, test_writer.event_file_name) + reader = SummaryReader(file_name) + event = reader.read_event() + LOG.debug(event) + + assert event.summary.value[0].histogram.nan_count == 1 + + +def test_histogram_summary_all_nan_inf(): + """Test histogram summary, input tensor has no valid number.""" + with tempfile.TemporaryDirectory() as tmp_dir: + test_writer = SummaryRecord(tmp_dir, file_suffix="_MS_HISTOGRAM") + + test_data = _wrap_test_data(Tensor(np.array([np.nan, np.nan, np.nan, np.inf, -np.inf]))) + _cache_summary_tensor_data(test_data) + test_writer.record(step=1) + test_writer.close() + + file_name = os.path.join(tmp_dir, test_writer.event_file_name) + reader = SummaryReader(file_name) + event = reader.read_event() + LOG.debug(event) + + histogram = event.summary.value[0].histogram + assert histogram.nan_count == 3 + assert histogram.pos_inf_count == 1 + assert histogram.neg_inf_count == 1 From c72bafaec9ccf1d1c5560bd1bac3e97189e8a48f Mon Sep 17 00:00:00 2001 From: zhoufeng Date: Fri, 17 Apr 2020 09:50:00 +0800 Subject: [PATCH 308/367] fix compiles fail with cmake version greater than 3.17 on windows --- README.md | 2 +- cmake/utils.cmake | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index cf6eece1e7..e465f8e3e1 100644 --- a/README.md +++ b/README.md @@ -69,7 +69,7 @@ MindSpore offers build options across multiple backends: | GPU CUDA 9.2 | Ubuntu-x86 | ✔️ | | GPU CUDA 10.1 | Ubuntu-x86 | ✔️ | | CPU | Ubuntu-x86 | ✔️ | -| CPU | Windows-x86 | ✔️ | +| | Windows-x86 | ✔️ | For installation using `pip`, take `CPU` and `Ubuntu-x86` build version as an example: diff --git a/cmake/utils.cmake b/cmake/utils.cmake index f0181de5ea..21a766dc8c 100644 --- a/cmake/utils.cmake +++ b/cmake/utils.cmake @@ -1,6 +1,10 @@ include(FetchContent) set(FETCHCONTENT_QUIET OFF) +if (CMAKE_SYSTEM_NAME MATCHES "Windows" AND ${CMAKE_VERSION} VERSION_GREATER_EQUAL 3.17.0) + set(CMAKE_FIND_LIBRARY_SUFFIXES .dll ${CMAKE_FIND_LIBRARY_SUFFIXES}) +endif () + function(mindspore_add_submodule_obj des_submodule_objs sub_dir submodule_name_obj) add_subdirectory(${sub_dir}) From 523fde0e25a5ef67d798a362328a85219b100eb1 Mon Sep 17 00:00:00 2001 From: chenjianping Date: Fri, 17 Apr 2020 02:33:37 +0000 Subject: [PATCH 309/367] eliminate securec warning --- cmake/dependency_securec.cmake | 3 +++ third_party/securec/CMakeLists.txt | 2 +- third_party/securec/src/CMakeLists.txt | 7 +++++-- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/cmake/dependency_securec.cmake b/cmake/dependency_securec.cmake index 81714c21d4..7ff5acad06 100644 --- a/cmake/dependency_securec.cmake +++ b/cmake/dependency_securec.cmake @@ -9,6 +9,9 @@ if (NOT TARGET securec) set(_ms_tmp_CMAKE_C_FLAGS ${CMAKE_C_FLAGS}) set(CMAKE_C_FLAGS "${SECURE_CXX_FLAGS}") + if (CMAKE_SYSTEM_NAME MATCHES "Windows") + add_compile_definitions(SECUREC_ONLY_DECLARE_MEMSET) + endif() add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/../third_party/securec ${CMAKE_BINARY_DIR}/securec) set(CMAKE_POSITION_INDEPENDENT_CODE ${_ms_tmp_CMAKE_POSITION_INDEPENDENT_CODE}) set(CMAKE_C_FLAGS ${_ms_tmp_CMAKE_C_FLAGS}) diff --git a/third_party/securec/CMakeLists.txt b/third_party/securec/CMakeLists.txt index b195cc9555..99345c6eb3 100644 --- a/third_party/securec/CMakeLists.txt +++ b/third_party/securec/CMakeLists.txt @@ -9,7 +9,7 @@ set(CMAKE_EXPORT_COMPILE_COMMANDS ON) #add flags if (CMAKE_SYSTEM_NAME MATCHES "Windows") - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -I/usr/local/include -Wno-attributes") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -I/usr/local/include") else() set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -I/usr/local/include -Werror") endif() diff --git a/third_party/securec/src/CMakeLists.txt b/third_party/securec/src/CMakeLists.txt index 60ec0a90ee..2af0f764c8 100644 --- a/third_party/securec/src/CMakeLists.txt +++ b/third_party/securec/src/CMakeLists.txt @@ -1,3 +1,6 @@ -aux_source_directory(. SECUREC_SRCS) - +if (CMAKE_SYSTEM_NAME MATCHES "Windows") + list(APPEND SECUREC_SRCS "memset_s.c") +else() + aux_source_directory(. SECUREC_SRCS) +endif() add_library(securec STATIC ${SECUREC_SRCS}) From 10877f077e8f2db69487e4b3eeb5924722c9df5d Mon Sep 17 00:00:00 2001 From: simson <526422051@qq.com> Date: Fri, 17 Apr 2020 10:33:46 +0800 Subject: [PATCH 310/367] modify examples of Cast/NMSWithMask --- mindspore/ops/operations/array_ops.py | 3 +-- mindspore/ops/operations/math_ops.py | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/mindspore/ops/operations/array_ops.py b/mindspore/ops/operations/array_ops.py index 0af284b947..57dbfc621d 100644 --- a/mindspore/ops/operations/array_ops.py +++ b/mindspore/ops/operations/array_ops.py @@ -174,10 +174,9 @@ class Cast(PrimitiveWithInfer): Examples: >>> input_np = np.random.randn(2, 3, 4, 5).astype(np.float32) >>> input_x = Tensor(input_np) - >>> type_dst = mindspore.int32 + >>> type_dst = mindspore.float16 >>> cast = P.Cast() >>> result = cast(input_x, type_dst) - >>> expect = input_np.astype(type_dst) """ @prim_attr_register diff --git a/mindspore/ops/operations/math_ops.py b/mindspore/ops/operations/math_ops.py index e390b6b589..3665f3c023 100644 --- a/mindspore/ops/operations/math_ops.py +++ b/mindspore/ops/operations/math_ops.py @@ -1872,7 +1872,7 @@ class NMSWithMask(PrimitiveWithInfer): >>> bbox = np.random.rand(128, 5) >>> bbox[:, 2] += bbox[:, 0] >>> bbox[:, 3] += bbox[:, 1] - >>> inputs = Tensor(bbox) + >>> inputs = Tensor(bbox, mindspore.float32) >>> nms = P.NMSWithMask(0.5) >>> output_boxes, indices, mask = nms(inputs) """ From 36ffb66782175388dbe200e74cc0cb61a1881db1 Mon Sep 17 00:00:00 2001 From: yangzhenzhang <285824651@qq.com> Date: Thu, 16 Apr 2020 21:52:18 +0800 Subject: [PATCH 311/367] add parallel op for square --- mindspore/ccsrc/parallel/dynamic_creator.h | 1 + .../ccsrc/parallel/ops_info/activation_info.h | 8 ++ mindspore/ccsrc/parallel/ops_info/ops_utils.h | 3 +- .../ccsrc/parallel/step_auto_parallel.cc | 7 +- tests/ut/python/parallel/test_square.py | 85 +++++++++++++++++++ 5 files changed, 100 insertions(+), 4 deletions(-) create mode 100644 tests/ut/python/parallel/test_square.py diff --git a/mindspore/ccsrc/parallel/dynamic_creator.h b/mindspore/ccsrc/parallel/dynamic_creator.h index 723c018d7f..bad947687d 100644 --- a/mindspore/ccsrc/parallel/dynamic_creator.h +++ b/mindspore/ccsrc/parallel/dynamic_creator.h @@ -128,6 +128,7 @@ REGISTER(BatchMatMulInfo); REGISTER(ExpandDimsInfo); REGISTER(SqueezeInfo); REGISTER(SigmoidCrossEntropyWithLogitsInfo); +REGISTER(SquareInfo); } // namespace parallel } // namespace mindspore diff --git a/mindspore/ccsrc/parallel/ops_info/activation_info.h b/mindspore/ccsrc/parallel/ops_info/activation_info.h index 8dca036f9e..887be5ea33 100644 --- a/mindspore/ccsrc/parallel/ops_info/activation_info.h +++ b/mindspore/ccsrc/parallel/ops_info/activation_info.h @@ -203,6 +203,14 @@ class SqueezeInfo : public ActivationOther { private: ValueTuplePtr axis_; }; + +class SquareInfo : public ActivationOther { + public: + SquareInfo(const std::string& name, const Shapes& inputs_shape, const Shapes& outputs_shape, + const PrimitiveAttrs& attrs) + : ActivationOther(name, inputs_shape, outputs_shape, attrs) {} + ~SquareInfo() override = default; +}; } // namespace parallel } // namespace mindspore #endif // MINDSPORE_CCSRC_PARALLEL_OPS_INFO_ACTIVATION_INFO_H_ diff --git a/mindspore/ccsrc/parallel/ops_info/ops_utils.h b/mindspore/ccsrc/parallel/ops_info/ops_utils.h index 0b52d8e83c..89b174d6b0 100644 --- a/mindspore/ccsrc/parallel/ops_info/ops_utils.h +++ b/mindspore/ccsrc/parallel/ops_info/ops_utils.h @@ -202,9 +202,10 @@ constexpr char SQRT[] = "Sqrt"; constexpr char ASSIGN[] = "Assign"; constexpr char GET_NEXT[] = "GetNext"; constexpr char SQUEEZE[] = "Squeeze"; -constexpr char Neg[] = "Neg"; +constexpr char NEG[] = "Neg"; constexpr char BATCH_MATMUL[] = "BatchMatMul"; constexpr char EXPAND_DIMS[] = "ExpandDims"; +constexpr char SQUARE[] = "Square"; // Parallel don't care constexpr char TUPLE_GETITEM[] = "tuple_getitem"; diff --git a/mindspore/ccsrc/parallel/step_auto_parallel.cc b/mindspore/ccsrc/parallel/step_auto_parallel.cc index 1d52eac82d..81aae04c73 100644 --- a/mindspore/ccsrc/parallel/step_auto_parallel.cc +++ b/mindspore/ccsrc/parallel/step_auto_parallel.cc @@ -104,13 +104,14 @@ std::vector splittable_op_ = {MATMUL, SQRT, GET_NEXT, CAST, - Neg, + NEG, + SQUARE, BATCH_MATMUL, EXPAND_DIMS, SQUEEZE}; -std::vector elementwise_op_ = {ACTIVATION, GELU, TANH, SOFTMAX, LOG_SOFTMAX, RELU, SQRT, - CAST, POW, EXP, LOG, COS, ACOS, LOGICALNOT}; +std::vector elementwise_op_ = {ACTIVATION, GELU, TANH, SOFTMAX, LOG_SOFTMAX, RELU, SQRT, CAST, + POW, EXP, LOG, COS, ACOS, LOGICALNOT, NEG, SQUARE}; bool StepAutoParallel(const FuncGraphPtr &root, const opt::OptimizerPtr &) { MS_EXCEPTION_IF_NULL(root); diff --git a/tests/ut/python/parallel/test_square.py b/tests/ut/python/parallel/test_square.py new file mode 100644 index 0000000000..e9c182a439 --- /dev/null +++ b/tests/ut/python/parallel/test_square.py @@ -0,0 +1,85 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +import mindspore as ms +from mindspore import context, Tensor, Parameter +from mindspore.nn import Cell, TrainOneStepCell, Momentum +from mindspore.ops import operations as P +from mindspore.common.api import _executor + + +class Net(Cell): + def __init__(self, mul_weight, strategy1=None, strategy2=None): + super(Net, self).__init__() + self.mul = P.Mul().set_strategy(strategy1) + self.square = P.Square().set_strategy(strategy2) + self.mul2 = P.Mul().set_strategy(strategy1) + self.mul_weight = Parameter(mul_weight, "w1") + + def construct(self, x, b): + out = self.mul(x, self.mul_weight) + out = self.square(out) + out = self.mul2(out, b) + return out + + +_x = Tensor(np.ones([128, 64, 32]), dtype=ms.float32) +_w1 = Tensor(np.ones([128, 64, 32]), dtype=ms.float32) +_b = Tensor(np.ones([128, 64, 32]), dtype=ms.float32) + + +def compile_net(net): + optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) + train_net = TrainOneStepCell(net, optimizer) + _executor.compile(train_net, _x, _b) + context.reset_auto_parallel_context() + + +def test_square_data_parallel(): + context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) + strategy1 = ((16, 1, 1), (16, 1, 1)) + strategy2 = ((16, 1, 1), ) + net = Net(_w1, strategy1, strategy2) + compile_net(net) + + +def test_square_model_parallel(): + context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) + strategy1 = ((1, 1, 16), (1, 1, 16)) + strategy2 = ((1, 1, 16), ) + net = Net(_w1, strategy1, strategy2) + compile_net(net) + + +def test_square_hybrid_parallel(): + context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) + strategy1 = ((2, 2, 4), (2, 2, 4)) + strategy2 = ((2, 2, 4), ) + net = Net(_w1, strategy1, strategy2) + compile_net(net) + + +def test_square_auto_parallel(): + context.set_auto_parallel_context(parallel_mode="auto_parallel", device_num=16, global_rank=0) + net = Net(_w1) + compile_net(net) + + +def test_square_repeat_calc(): + context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0) + strategy1 = ((2, 2, 4), (2, 2, 4)) + strategy2 = ((1, 2, 2), ) + net = Net(_w1, strategy1, strategy2) + compile_net(net) From 79de8f4bdf84005b4bb88907432a7dab349f7e78 Mon Sep 17 00:00:00 2001 From: Xiaoda Zhang Date: Thu, 16 Apr 2020 15:10:06 +0800 Subject: [PATCH 312/367] Adjusting backward communication cost of some operators --- .../auto_parallel/operator_costmodel.cc | 73 +++++++++++++++++-- .../auto_parallel/operator_costmodel.h | 4 +- .../ccsrc/parallel/ops_info/gather_v2_info.cc | 2 +- 3 files changed, 69 insertions(+), 10 deletions(-) diff --git a/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.cc b/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.cc index 9ea583293b..0192dce8b8 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.cc +++ b/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.cc @@ -287,6 +287,31 @@ double BatchParallelCost::GetBackwardComputationCost(const std::vector& inputs, const std::vector&, + int32_t stage_id) const { + double result = 0.0; + CheckGlobalDeviceManager(); + MS_EXCEPTION_IF_NULL(g_device_manager); + auto total_device_num = g_device_manager->GetDeviceListByStageId(stage_id).size(); + + for (size_t j = 0; j < inputs.size(); ++j) { + if (!is_parameter_[j]) { + continue; + } + TensorInfo input_a_tensor_info = inputs[j]; + Shape input_a_shape = input_a_tensor_info.shape(); + Shape input_a_slice_shape = input_a_tensor_info.slice_shape(); + int32_t used_device_num = 1; + for (size_t i = 0; i < input_a_shape.size(); ++i) { + used_device_num *= input_a_shape[i] / input_a_slice_shape[i]; + } + if (total_device_num != IntToSize(used_device_num)) { + result += ListProduct(input_a_slice_shape) * static_cast(inputs_type_lengths_[0]); + } + } + + return result; +} // return the per device communication cost in the forward phase. double PReLUCost::GetForwardCommCost(const std::vector&, const std::vector&, int32_t) const { // prelu does not need communication in the forward phase @@ -432,8 +457,24 @@ double ReshapeCost::GetForwardCommCost(const std::vector& inputs, co } // return the per device communication cost in the backward phase. -double ReshapeCost::GetBackwardCommCost(const std::vector&, const std::vector&, int32_t) const { - return 0.0; +double ReshapeCost::GetBackwardCommCost(const std::vector& inputs, const std::vector&, + int32_t stage_id) const { + double result = 0.0; + if (is_parameter_[0]) { + TensorInfo input1 = inputs[0]; + MS_EXCEPTION_IF_NULL(g_device_manager); + auto total_device_num = g_device_manager->GetDeviceListByStageId(stage_id).size(); + Shape input1_shape = input1.shape(); + Shape input1_slice_shape = input1.slice_shape(); + int32_t used_device_num = 1; + for (size_t i = 0; i < input1_shape.size(); ++i) { + used_device_num *= input1_shape[i] / input1_slice_shape[i]; + } + if (total_device_num != IntToSize(used_device_num)) { + result = ListProduct(input1_slice_shape) * static_cast(inputs_type_lengths_[1]); + } + } + return result; } // Return the per device computation cost in the forward phase. The cost is calculated according to the bytes @@ -654,10 +695,30 @@ double GatherV2Cost::GetForwardCommCost(const std::vector&, const st } // return the per device communication cost in the backward phase. -double GatherV2Cost::GetBackwardCommCost(const std::vector&, const std::vector&, - int32_t) const { - // GatherV2Cost does not need communication in the backward phase - return 0.0; +double GatherV2Cost::GetBackwardCommCost(const std::vector& inputs, const std::vector&, + int32_t stage_id) const { + double result = 0.0; + CheckGlobalDeviceManager(); + MS_EXCEPTION_IF_NULL(g_device_manager); + auto total_device_num = g_device_manager->GetDeviceListByStageId(stage_id).size(); + + for (size_t j = 0; j < inputs.size(); ++j) { + if (!is_parameter_[j]) { + continue; + } + TensorInfo input_a_tensor_info = inputs[j]; + Shape input_a_shape = input_a_tensor_info.shape(); + Shape input_a_slice_shape = input_a_tensor_info.slice_shape(); + int32_t used_device_num = 1; + for (size_t i = 0; i < input_a_shape.size(); ++i) { + used_device_num *= input_a_shape[i] / input_a_slice_shape[i]; + } + if (total_device_num != IntToSize(used_device_num)) { + result += ListProduct(input_a_slice_shape) * static_cast(inputs_type_lengths_[0]); + } + } + + return result; } double GatherV2Cost::GetForwardComputationCost(const std::vector& inputs, const std::vector&, diff --git a/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.h b/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.h index f16dfa21fc..37b054aa98 100644 --- a/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.h +++ b/mindspore/ccsrc/parallel/auto_parallel/operator_costmodel.h @@ -226,9 +226,7 @@ class BatchParallelCost : public OperatorCost { double GetForwardCommCost(const std::vector&, const std::vector&, int32_t) const override { return 0.0; } - double GetBackwardCommCost(const std::vector&, const std::vector&, int32_t) const override { - return 0.0; - } + double GetBackwardCommCost(const std::vector&, const std::vector&, int32_t) const override; double GetComputationCost(const std::vector& inputs, const std::vector& outputs, int32_t stage_id) const override { return GetForwardComputationCost(inputs, outputs, stage_id) + GetBackwardComputationCost(inputs, outputs, stage_id); diff --git a/mindspore/ccsrc/parallel/ops_info/gather_v2_info.cc b/mindspore/ccsrc/parallel/ops_info/gather_v2_info.cc index 2010d1ed46..c315991849 100644 --- a/mindspore/ccsrc/parallel/ops_info/gather_v2_info.cc +++ b/mindspore/ccsrc/parallel/ops_info/gather_v2_info.cc @@ -291,7 +291,7 @@ Status GatherV2Info::GenerateStrategies(int32_t stage_id) { } is_auto_parallel_ = true; - Shape input0_split(inputs_shape_[0].size()); + Shape input0_split(inputs_shape_[0].size(), 1); Shapes splittable_inputs = {input0_split}; std::vector sp_vector; From 1d1e085b0b85de13a30e8d850f15e84dbc79d512 Mon Sep 17 00:00:00 2001 From: xiefangqi Date: Fri, 17 Apr 2020 11:47:39 +0800 Subject: [PATCH 313/367] fix mindspore.datasets.apply comments --- mindspore/dataset/engine/datasets.py | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/mindspore/dataset/engine/datasets.py b/mindspore/dataset/engine/datasets.py index 1c6ac634f2..d62901d146 100644 --- a/mindspore/dataset/engine/datasets.py +++ b/mindspore/dataset/engine/datasets.py @@ -516,22 +516,14 @@ class Dataset: Dataset, applied by the function. Examples: - >>> import numpy as np >>> import mindspore.dataset as ds - >>> # Generate 1d int numpy array from 0 - 6 - >>> def generator_1d(): - >>> for i in range(6): - >>> yield (np.array([i]),) - >>> # 1) get all data from dataset - >>> data = ds.GeneratorDataset(generator_1d, ["data"]) - >>> # 2) declare a apply_func function + >>> # data is an instance of Dataset object + >>> # declare an apply_func function which returns a Dataset object >>> def apply_func(ds): >>> ds = ds.batch(2) >>> return ds - >>> # 3) use apply to call apply_func + >>> # use apply to call apply_func >>> data = data.apply(apply_func) - >>> for item in data.create_dict_iterator(): - >>> print(item["data"]) Raises: TypeError: If apply_func is not a function. From 34298e21358b7eca2995d3643fc55288b709733e Mon Sep 17 00:00:00 2001 From: anzhengqi Date: Fri, 17 Apr 2020 12:04:50 +0800 Subject: [PATCH 314/367] fix commit format error --- mindspore/dataset/engine/datasets.py | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/mindspore/dataset/engine/datasets.py b/mindspore/dataset/engine/datasets.py index 1c6ac634f2..4cff9c57b9 100644 --- a/mindspore/dataset/engine/datasets.py +++ b/mindspore/dataset/engine/datasets.py @@ -2597,19 +2597,24 @@ class Schema: Args: columns (dict or list[dict]): dataset attribution information, decoded from schema file. - if list: columns element must be dict, 'name' and 'type' must be in keys, 'shape' optional. - if dict: columns.keys() as name, element in columns.values() is dict, and 'type' inside, 'shape' optional. - example 1) - [{'name': 'image', 'type': 'int8', 'shape': [3, 3]}, - {'name': 'label', 'type': 'int8', 'shape': [1]}] - example 2) - {'image': {'shape': [3, 3], 'type': 'int8'}, 'label': {'shape': [1], 'type': 'int8'}} + + - list[dict], 'name' and 'type' must be in keys, 'shape' optional. + + - dict, columns.keys() as name, columns.values() is dict, and 'type' inside, 'shape' optional. Raises: RuntimeError: If failed to parse columns. RuntimeError: If unknown items in columns. RuntimeError: If column's name field is missing. RuntimeError: If column's type field is missing. + + Example: + >>> schema = Schema() + >>> columns1 = [{'name': 'image', 'type': 'int8', 'shape': [3, 3]}, + >>> {'name': 'label', 'type': 'int8', 'shape': [1]}] + >>> schema.parse_columns(columns1) + >>> columns2 = {'image': {'shape': [3, 3], 'type': 'int8'}, 'label': {'shape': [1], 'type': 'int8'}} + >>> schema.parse_columns(columns2) """ self.columns = [] if isinstance(columns, list): From 149901ce2eb19e947492561d755853a05f6a2ed8 Mon Sep 17 00:00:00 2001 From: liyong Date: Thu, 16 Apr 2020 14:29:23 +0800 Subject: [PATCH 315/367] remove validation in write raw data --- mindspore/mindrecord/filewriter.py | 55 ++++-------------------------- 1 file changed, 6 insertions(+), 49 deletions(-) diff --git a/mindspore/mindrecord/filewriter.py b/mindspore/mindrecord/filewriter.py index 4056825ff3..90bca48038 100644 --- a/mindspore/mindrecord/filewriter.py +++ b/mindspore/mindrecord/filewriter.py @@ -26,8 +26,7 @@ from .shardheader import ShardHeader from .shardindexgenerator import ShardIndexGenerator from .shardutils import MIN_SHARD_COUNT, MAX_SHARD_COUNT, VALID_ATTRIBUTES, VALID_ARRAY_ATTRIBUTES, \ check_filename, VALUE_TYPE_MAP -from .common.exceptions import ParamValueError, ParamTypeError, MRMInvalidSchemaError, MRMDefineIndexError, \ - MRMValidateDataError +from .common.exceptions import ParamValueError, ParamTypeError, MRMInvalidSchemaError, MRMDefineIndexError __all__ = ['FileWriter'] @@ -201,52 +200,13 @@ class FileWriter: raw_data.pop(i) logger.warning(v) - def _verify_based_on_blob_fields(self, raw_data): + def write_raw_data(self, raw_data): """ - Verify data according to blob fields which is sub set of schema's fields. - - Raise exception if validation failed. - 1) allowed data type contains: "int32", "int64", "float32", "float64", "string", "bytes". - - Args: - raw_data (list[dict]): List of raw data. - - Raises: - MRMValidateDataError: If data does not match blob fields. - """ - schema_content = self._header.schema - for field in schema_content: - for i, v in enumerate(raw_data): - if field not in v: - raise MRMValidateDataError("for schema, {} th data is wrong: "\ - "there is not '{}' object in the raw data.".format(i, field)) - if field in self._header.blob_fields: - field_type = type(v[field]).__name__ - if field_type not in VALUE_TYPE_MAP: - raise MRMValidateDataError("for schema, {} th data is wrong: "\ - "data type for '{}' is not matched.".format(i, field)) - if schema_content[field]["type"] not in VALUE_TYPE_MAP[field_type]: - raise MRMValidateDataError("for schema, {} th data is wrong: "\ - "data type for '{}' is not matched.".format(i, field)) - if field_type == 'ndarray': - if 'shape' not in schema_content[field]: - raise MRMValidateDataError("for schema, {} th data is wrong: " \ - "data type for '{}' is not matched.".format(i, field)) - try: - # tuple or list - np.reshape(v[field], schema_content[field]['shape']) - except ValueError: - raise MRMValidateDataError("for schema, {} th data is wrong: " \ - "data type for '{}' is not matched.".format(i, field)) - - def write_raw_data(self, raw_data, validate=True): - """ - Write raw data and generate sequential pair of MindRecord File. + Write raw data and generate sequential pair of MindRecord File and \ + validate data based on predefined schema by default. Args: raw_data (list[dict]): List of raw data. - validate (bool, optional): Validate data according schema if it equals to True, - or validate data according to blob fields (default=True). Raises: ParamTypeError: If index field is invalid. @@ -264,11 +224,8 @@ class FileWriter: for each_raw in raw_data: if not isinstance(each_raw, dict): raise ParamTypeError('raw_data item', 'dict') - if validate is True: - self._verify_based_on_schema(raw_data) - elif validate is False: - self._verify_based_on_blob_fields(raw_data) - return self._writer.write_raw_data(raw_data, validate) + self._verify_based_on_schema(raw_data) + return self._writer.write_raw_data(raw_data, True) def set_header_size(self, header_size): """ From f9ef78609f67569c203290eefd5dbe2f543945e5 Mon Sep 17 00:00:00 2001 From: jjfeing Date: Fri, 17 Apr 2020 11:37:35 +0800 Subject: [PATCH 316/367] add nc1hwc0_c04 format --- mindspore/ccsrc/device/ascend/kernel_select_ascend.cc | 7 ++++--- mindspore/ccsrc/utils/utils.h | 1 + 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/mindspore/ccsrc/device/ascend/kernel_select_ascend.cc b/mindspore/ccsrc/device/ascend/kernel_select_ascend.cc index dafe958348..f02e677163 100644 --- a/mindspore/ccsrc/device/ascend/kernel_select_ascend.cc +++ b/mindspore/ccsrc/device/ascend/kernel_select_ascend.cc @@ -45,9 +45,10 @@ enum MatchCountPriority : int { const size_t kMaxCount = 0xffffffff; const int kUnSupportMixedDataTypeIndex = -1; -const std::set kOpFormatList = { - kOpFormat_DEFAULT, kOpFormat_NC1KHKWHWC0, kOpFormat_ND, kOpFormat_NCHW, kOpFormat_NHWC, - kOpFormat_HWCN, kOpFormat_NC1HWC0, kOpFormat_FRAC_Z, kOpFormat_C1HWNCoC0, kOpFormat_FRAC_NZ}; +const std::set kOpFormatList = {kOpFormat_DEFAULT, kOpFormat_NC1KHKWHWC0, kOpFormat_ND, + kOpFormat_NCHW, kOpFormat_NHWC, kOpFormat_HWCN, + kOpFormat_NC1HWC0, kOpFormat_FRAC_Z, kOpFormat_C1HWNCoC0, + kOpFormat_FRAC_NZ, kOpFormat_NC1HWC0_C04}; bool IsShapeMatchFormat(const std::vector &shape, const std::string &format) { // if format is default, it remarkes support all format diff --git a/mindspore/ccsrc/utils/utils.h b/mindspore/ccsrc/utils/utils.h index e346a9e901..4661deed3a 100644 --- a/mindspore/ccsrc/utils/utils.h +++ b/mindspore/ccsrc/utils/utils.h @@ -183,6 +183,7 @@ constexpr auto kOpFormat_NC1HWC0 = "NC1HWC0"; constexpr auto kOpFormat_FRAC_Z = "FracZ"; constexpr auto kOpFormat_FRAC_NZ = "FRACTAL_NZ"; constexpr auto kOpFormat_C1HWNCoC0 = "C1HWNCoC0"; +constexpr auto kOpFormat_NC1HWC0_C04 = "NC1HWC0_C04"; const std::set k1DSupportFormat = {kOpFormat_DEFAULT, kOpFormat_NCHW, kOpFormat_NHWC, kOpFormat_FRAC_Z, kOpFormat_NC1KHKWHWC0, kOpFormat_NC1HWC0}; const std::set k2DSupportFormat = {kOpFormat_DEFAULT, kOpFormat_NCHW, kOpFormat_NHWC, kOpFormat_FRAC_Z, From 83eeac9310819b1db8bad7051252f95dfcde4bdf Mon Sep 17 00:00:00 2001 From: kswang Date: Wed, 15 Apr 2020 11:12:14 +0800 Subject: [PATCH 317/367] optimize execute order sort --- .../device/ascend/ascend_memory_manager.h | 2 +- .../ccsrc/device/ascend/ascend_memory_pool.h | 8 +- .../ccsrc/session/anf_runtime_algorithm.cc | 8 + .../ccsrc/session/anf_runtime_algorithm.h | 1 + mindspore/ccsrc/session/kernel_graph.cc | 177 +++++++++++------- mindspore/ccsrc/session/kernel_graph.h | 7 +- tests/st/networks/test_gpu_lstm.py | 1 + 7 files changed, 127 insertions(+), 77 deletions(-) diff --git a/mindspore/ccsrc/device/ascend/ascend_memory_manager.h b/mindspore/ccsrc/device/ascend/ascend_memory_manager.h index dea88ac10a..90c8b2dfca 100644 --- a/mindspore/ccsrc/device/ascend/ascend_memory_manager.h +++ b/mindspore/ccsrc/device/ascend/ascend_memory_manager.h @@ -23,7 +23,7 @@ namespace ascend { class AscendMemoryManager : public MemoryManager { public: AscendMemoryManager() = default; - virtual ~AscendMemoryManager() = default; + ~AscendMemoryManager() override = default; void MallocDeviceMemory() override; void FreeDeviceMemory() override; diff --git a/mindspore/ccsrc/device/ascend/ascend_memory_pool.h b/mindspore/ccsrc/device/ascend/ascend_memory_pool.h index c2a29725f4..a02bd453b2 100644 --- a/mindspore/ccsrc/device/ascend/ascend_memory_pool.h +++ b/mindspore/ccsrc/device/ascend/ascend_memory_pool.h @@ -26,6 +26,8 @@ namespace ascend { class AscendMemoryPool : public DynamicMemPoolBestFit { public: ~AscendMemoryPool() override = default; + AscendMemoryPool(const AscendMemoryPool&) = delete; + AscendMemoryPool& operator=(const AscendMemoryPool&) = delete; size_t AllocDeviceMem(size_t size, DeviceMemPtr* addr) override; bool FreeDeviceMem(const DeviceMemPtr& addr) override; @@ -51,13 +53,11 @@ class AscendMemoryPool : public DynamicMemPoolBestFit { private: AscendMemoryPool() = default; - AscendMemoryPool(const AscendMemoryPool&) = delete; - AscendMemoryPool& operator=(const AscendMemoryPool&) = delete; bool has_malloc_{false}; uint8_t* device_mem_pool_base_{nullptr}; uint64_t device_mem_pool_size_{0}; - size_t free_mem_size_; - size_t total_mem_size_; + size_t free_mem_size_{0}; + size_t total_mem_size_{0}; }; } // namespace ascend } // namespace device diff --git a/mindspore/ccsrc/session/anf_runtime_algorithm.cc b/mindspore/ccsrc/session/anf_runtime_algorithm.cc index 44472a9a6f..4a38c5fa09 100644 --- a/mindspore/ccsrc/session/anf_runtime_algorithm.cc +++ b/mindspore/ccsrc/session/anf_runtime_algorithm.cc @@ -858,6 +858,14 @@ bool AnfRuntimeAlgorithm::IsCommunicationOp(const AnfNodePtr &node) { return false; } +bool AnfRuntimeAlgorithm::IsAllReduceOp(const AnfNodePtr &node) { + MS_EXCEPTION_IF_NULL(node); + if (node->isa() && AnfAlgo::GetCNodeName(node) == kAllReduceOpName) { + return true; + } + return false; +} + bool AnfRuntimeAlgorithm::IsGetNext(const NotNull &node) { auto kernel_name = AnfAlgo::GetCNodeName(node); return kernel_name == kGetNextOpName; diff --git a/mindspore/ccsrc/session/anf_runtime_algorithm.h b/mindspore/ccsrc/session/anf_runtime_algorithm.h index a70a63b678..78359cdd5a 100644 --- a/mindspore/ccsrc/session/anf_runtime_algorithm.h +++ b/mindspore/ccsrc/session/anf_runtime_algorithm.h @@ -176,6 +176,7 @@ class AnfRuntimeAlgorithm { // get real input index for some tbe ops which input order is different between me and tbe impl static size_t GetRealInputIndex(const AnfNodePtr &anf_node, const size_t cur_index); static bool IsCommunicationOp(const AnfNodePtr &node); + static bool IsAllReduceOp(const AnfNodePtr &node); static bool IsGetNext(const NotNull &node); }; } // namespace session diff --git a/mindspore/ccsrc/session/kernel_graph.cc b/mindspore/ccsrc/session/kernel_graph.cc index bbcc04e14b..49d482d6d4 100755 --- a/mindspore/ccsrc/session/kernel_graph.cc +++ b/mindspore/ccsrc/session/kernel_graph.cc @@ -50,90 +50,127 @@ std::vector KernelGraph::outputs() const { } void KernelGraph::SetExecOrderByDefault() { - BfsToUpdateNodeOutput(); + std::stack seed_nodes; + UpdateNodeEdgeList(&seed_nodes); execution_order_.clear(); - std::queue allreduce_nodes; - std::queue zero_output_nodes; std::unordered_set visited_nodes; - auto clear_output = [&zero_output_nodes, &allreduce_nodes, &visited_nodes, this](const AnfNodePtr &input) -> void { - if (node_output_num_[input] == 0 && visited_nodes.find(input) == visited_nodes.end()) { - MS_EXCEPTION_IF_NULL(input); - MS_LOG(DEBUG) << "Clear output num:" << input->DebugString(); - (void)visited_nodes.insert(input); - if (input->isa() && AnfAlgo::GetCNodeName(input) == kAllReduceOpName) { - allreduce_nodes.push(input); - } else { - zero_output_nodes.push(input); + std::queue zero_input_nodes; + + auto visit_node_descendant = [&visited_nodes, this](const AnfNodePtr &node, std::queue *visit_queue) { + auto it = node_output_edges_.find(node); + if (it == node_output_edges_.end()) { + // value node and parameter has no input,no need to print log + if (node->isa()) { + MS_LOG(DEBUG) << "Can not find node [" << node->DebugString() << "]"; } + return; + } + + // visit all reduce node first, then other nodes + std::vector active_nodes; + for (const auto &output_edge : it->second) { + auto next_node = output_edge.first; + if (node_input_num_.find(next_node) == node_input_num_.end()) { + MS_EXCEPTION_IF_NULL(next_node); + MS_LOG(EXCEPTION) << "Can't find node[" << next_node->DebugString() << "]"; + } + MS_EXCEPTION_IF_NULL(next_node); + MS_LOG(DEBUG) << "Decrease input:" << next_node->DebugString() << ",node:" << node->DebugString() + << ",num: " << node_input_num_[next_node] << ",decrease num:" << output_edge.second; + if (node_input_num_[next_node] < output_edge.second) { + MS_LOG(EXCEPTION) << "Input node:" << next_node->DebugString() << ",node_output_num" + << node_input_num_[next_node] << ",depend edge:" << output_edge.second; + } + node_input_num_[next_node] = node_input_num_[next_node] - output_edge.second; + // allreduce first + if (node_input_num_[next_node] == 0 && visited_nodes.find(next_node) == visited_nodes.end()) { + (void)visited_nodes.insert(next_node); + if (AnfAlgo::IsAllReduceOp(next_node)) { + MS_LOG(DEBUG) << "visit node:" << next_node->DebugString(); + visit_queue->push(next_node); + } else { + active_nodes.emplace_back(next_node); + } + } + } + + for (auto &node : active_nodes) { + MS_LOG(DEBUG) << "visit node:" << node->DebugString(); + visit_queue->push(node); } }; - zero_output_nodes.emplace(get_return()); - while (!zero_output_nodes.empty() || !allreduce_nodes.empty()) { - AnfNodePtr node; - if (!zero_output_nodes.empty()) { - node = zero_output_nodes.front(); - zero_output_nodes.pop(); + + AnfNodePtr last_allreduce_node = nullptr; + std::queue allreduce_descendants; + while (!seed_nodes.empty() || last_allreduce_node != nullptr) { + // seed nodes first, then visit last all reduce node descendant + if (seed_nodes.empty()) { + visit_node_descendant(last_allreduce_node, &allreduce_descendants); + last_allreduce_node = nullptr; } else { - node = allreduce_nodes.front(); - allreduce_nodes.pop(); - } - MS_EXCEPTION_IF_NULL(node); - if (node->isa() && AnfAlgo::IsRealKernel(node)) { - execution_order_.push_back(node->cast()); + zero_input_nodes.push(seed_nodes.top()); + seed_nodes.pop(); } - auto it = node_input_edges_.find(node); - if (it == node_input_edges_.end()) { - // value node and parameter has no input,no need to print log - if (node->isa()) { - MS_LOG(DEBUG) << "Can not find node [" << node->DebugString() << "]"; + + // all reduce node descendant first, then common queue + while (!zero_input_nodes.empty() || !allreduce_descendants.empty()) { + AnfNodePtr node = nullptr; + bool is_allreduce_descendant = false; + if (allreduce_descendants.empty()) { + node = zero_input_nodes.front(); + zero_input_nodes.pop(); + } else { + node = allreduce_descendants.front(); + allreduce_descendants.pop(); + is_allreduce_descendant = true; } - continue; - } - for (const auto &input_edge : it->second) { - if (node_output_num_.find(input_edge.first) == node_output_num_.end()) { - MS_EXCEPTION_IF_NULL(input_edge.first); - MS_LOG(EXCEPTION) << "Can't find node[" << input_edge.first->DebugString() << "]"; + // add execute node + MS_EXCEPTION_IF_NULL(node); + if (node->isa() && AnfAlgo::IsRealKernel(node)) { + execution_order_.push_back(node->cast()); } - MS_EXCEPTION_IF_NULL(input_edge.first); - MS_LOG(DEBUG) << "Decrease input:" << input_edge.first->DebugString() << ",node:" << node->DebugString() - << ",num: " << node_output_num_[input_edge.first] << ",decrease num:" << input_edge.second; - if (node_output_num_[input_edge.first] < input_edge.second) { - MS_LOG(EXCEPTION) << "Input node:" << input_edge.first->DebugString() << ",node_output_num" - << node_output_num_[input_edge.first] << "depend edge:" << input_edge.second; + // for all reduce node, visit last all reduce node descendant + if (AnfAlgo::IsAllReduceOp(node)) { + if (last_allreduce_node != nullptr) { + visit_node_descendant(last_allreduce_node, &allreduce_descendants); + } + last_allreduce_node = node; + } else if (is_allreduce_descendant) { + visit_node_descendant(node, &allreduce_descendants); + } else { + visit_node_descendant(node, &zero_input_nodes); } - node_output_num_[input_edge.first] = node_output_num_[input_edge.first] - input_edge.second; - clear_output(input_edge.first); } } + CheckLoop(); - std::reverse(execution_order_.begin(), execution_order_.end()); } void KernelGraph::CheckLoop() { - std::map none_zero_output; - if (node_output_edges_.size() != node_output_num_.size()) { - MS_LOG(EXCEPTION) << "node_output_edges_ size :" << node_output_edges_.size() - << "not equal to node_output_num_ size:" << node_output_num_.size(); + std::map none_zero_nodes; + if (node_input_edges_.size() != node_input_num_.size()) { + MS_LOG(EXCEPTION) << "node_input_edges_ size :" << node_input_edges_.size() + << "not equal to node_input_num_ size:" << node_input_num_.size(); } - for (auto &it : node_output_num_) { + for (auto &it : node_input_num_) { MS_EXCEPTION_IF_NULL(it.first); string str; - auto node_output_it = node_output_edges_.find(it.first); - if (node_output_it == node_output_edges_.end()) { + auto node_input_it = node_input_edges_.find(it.first); + if (node_input_it == node_input_edges_.end()) { MS_LOG(EXCEPTION) << "Can't find node [" << it.first->DebugString() << "]"; } - for (const auto &output_edge : node_output_edges_[it.first]) { - MS_EXCEPTION_IF_NULL(output_edge.first); - str = str.append(output_edge.first->DebugString()).append("|"); + for (const auto &input_edge : node_input_edges_[it.first]) { + MS_EXCEPTION_IF_NULL(input_edge.first); + str = str.append(input_edge.first->DebugString()).append("|"); } if (it.second != 0) { - MS_LOG(WARNING) << "Node:" << it.first->DebugString() << ",outputs:" << str << ",output num:" << it.second; - none_zero_output[it.first] = it.second; + MS_LOG(WARNING) << "Node:" << it.first->DebugString() << ",inputs:" << str << ",input num:" << it.second; + none_zero_nodes[it.first] = it.second; } } // if don't consider control depend and loop exit,a exception will be throw - if (!none_zero_output.empty()) { - MS_LOG(EXCEPTION) << "Nodes have loop, left node num:" << none_zero_output.size(); + if (!none_zero_nodes.empty()) { + MS_LOG(EXCEPTION) << "Nodes have loop, left node num:" << none_zero_nodes.size(); } } @@ -346,12 +383,13 @@ void KernelGraph::AddDependEdge(const AnfNodePtr &node, const AnfNodePtr &input, } else { input_it->second.push_back(input_depend_edge); } - // add the depend sum of node - auto depend_it = node_output_num_.find(input); - if (depend_it == node_output_num_.end()) { - node_output_num_[input] = 0; + // add node input depend num + auto depend_it = node_input_num_.find(node); + if (depend_it == node_input_num_.end()) { + node_input_num_[node] = depend_edge_num; + } else { + depend_it->second += depend_edge_num; } - node_output_num_[input] += depend_edge_num; } std::vector KernelGraph::GetOutputNodes(const AnfNodePtr &node) { @@ -429,9 +467,9 @@ bool KernelGraph::HandleControlDependNode(const AnfNodePtr &node, std::queue *seed_nodes) { node_output_edges_.clear(); - node_output_num_.clear(); + node_input_num_.clear(); node_input_edges_.clear(); std::vector control_depends; std::unordered_set visited_nodes; @@ -441,6 +479,11 @@ void KernelGraph::BfsToUpdateNodeOutput() { auto node = que.front(); que.pop(); MS_EXCEPTION_IF_NULL(node); + if (node->isa() || node->isa()) { + seed_nodes->push(node); + continue; + } + if (!node->isa()) { continue; } @@ -454,10 +497,6 @@ void KernelGraph::BfsToUpdateNodeOutput() { control_depends.push_back(input); depend_edge_num = 0; } - // the 2rd input of depend is no depend edge - if (AnfAlgo::CheckPrimitiveType(node, prim::kPrimDepend) && input == cnode->input(kDependAttachNodeIndex)) { - depend_edge_num = 0; - } PushNoVisitedNode(input, &que, &visited_nodes); AddDependEdge(node, input, depend_edge_num); } diff --git a/mindspore/ccsrc/session/kernel_graph.h b/mindspore/ccsrc/session/kernel_graph.h index ff964482bb..54b16014a3 100755 --- a/mindspore/ccsrc/session/kernel_graph.h +++ b/mindspore/ccsrc/session/kernel_graph.h @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include "ir/func_graph.h" @@ -93,8 +94,8 @@ class KernelGraph : public FuncGraph { private: // remove value node form graph bool RemoveValueNodeFromGraph(const ValueNodePtr &value_node); - // BFS to update all nodes' output - void BfsToUpdateNodeOutput(); + // update node edge list + void UpdateNodeEdgeList(std::stack *seed_nodes); // add node depend edge by data edge or control depend void AddDependEdge(const AnfNodePtr &node, const AnfNodePtr &input, size_t depend_edge_num); // handle control depend @@ -114,7 +115,7 @@ class KernelGraph : public FuncGraph { std::unordered_map tensor_to_value_node_map_; // include all value nodes std::unordered_set graph_value_nodes_; - std::unordered_map node_output_num_; + std::unordered_map node_input_num_; std::unordered_map>> node_input_edges_; // record map between ref final output anf with index and ref origin input with index std::map ref_out_in_map_; diff --git a/tests/st/networks/test_gpu_lstm.py b/tests/st/networks/test_gpu_lstm.py index 4387179812..e5208ff669 100644 --- a/tests/st/networks/test_gpu_lstm.py +++ b/tests/st/networks/test_gpu_lstm.py @@ -135,4 +135,5 @@ def test_LSTM(): for epoch in range(num_epochs): loss = train_network(train_features, train_labels) losses.append(loss) + print("loss:", loss.asnumpy()) assert(losses[-1].asnumpy() < 0.01) From b9742de3a3f13a8690b7ecf719c37d49fa486094 Mon Sep 17 00:00:00 2001 From: ms_yan <6576637+ms_yan@user.noreply.gitee.com> Date: Fri, 17 Apr 2020 15:24:10 +0800 Subject: [PATCH 318/367] repeair GeneratorDataset to_device problem --- mindspore/dataset/engine/datasets.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mindspore/dataset/engine/datasets.py b/mindspore/dataset/engine/datasets.py index 4cff9c57b9..77b7dc54c7 100644 --- a/mindspore/dataset/engine/datasets.py +++ b/mindspore/dataset/engine/datasets.py @@ -607,9 +607,9 @@ class Dataset: def get_distribution(output_dataset): dev_id = 0 - if isinstance(output_dataset, (StorageDataset, GeneratorDataset, MindDataset)): + if isinstance(output_dataset, (StorageDataset, MindDataset)): return output_dataset.distribution, dev_id - if isinstance(output_dataset, (Cifar10Dataset, Cifar100Dataset, ImageFolderDatasetV2, + if isinstance(output_dataset, (Cifar10Dataset, Cifar100Dataset, GeneratorDataset, ImageFolderDatasetV2, ManifestDataset, MnistDataset, VOCDataset, CelebADataset)): sampler = output_dataset.sampler if isinstance(sampler, samplers.DistributedSampler): From b8210eb685c64aa2cd779aae70a347440fbaa14b Mon Sep 17 00:00:00 2001 From: guohongzilong <2713219276@qq.com> Date: Fri, 17 Apr 2020 15:19:07 +0800 Subject: [PATCH 319/367] fix no wrap in ge bankend in model.train --- mindspore/train/model.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/mindspore/train/model.py b/mindspore/train/model.py index 46e4f421f7..8ce51562fa 100755 --- a/mindspore/train/model.py +++ b/mindspore/train/model.py @@ -254,7 +254,8 @@ class Model: """ # remove later to deal with loop sink need_wrap = False - if not hasattr(train_dataset, '__ME_INITED__') and context.get_context("enable_loop_sink"): + if not hasattr(train_dataset, '__ME_INITED__') and context.get_context("enable_loop_sink") \ + and not context.get_context("enable_ge"): need_wrap = True dataset_helper = DatasetHelper(train_dataset) @@ -418,7 +419,8 @@ class Model: # remove later to deal with loop sink need_wrap = False - if not hasattr(valid_dataset, '__ME_INITED__') and context.get_context("enable_loop_sink"): + if not hasattr(valid_dataset, '__ME_INITED__') and context.get_context("enable_loop_sink") \ + and not context.get_context("enable_ge"): need_wrap = True valid_dataset.__loop_size__ = 1 From 9cb665e634b7c7fb53ceaef5cea1d452550dc4e5 Mon Sep 17 00:00:00 2001 From: huangdongrun Date: Fri, 17 Apr 2020 15:16:55 +0800 Subject: [PATCH 320/367] add suport for parameter of const value pass as mixed precision args fix pylint --- mindspore/ccsrc/pipeline/parse/parse.cc | 4 +- mindspore/ops/composite/base.py | 9 ++++ .../parameter_feature/test_parameter.py | 42 ++++++++++++++++++- 3 files changed, 51 insertions(+), 4 deletions(-) diff --git a/mindspore/ccsrc/pipeline/parse/parse.cc b/mindspore/ccsrc/pipeline/parse/parse.cc index 231b98ab00..51c4fc17ec 100644 --- a/mindspore/ccsrc/pipeline/parse/parse.cc +++ b/mindspore/ccsrc/pipeline/parse/parse.cc @@ -68,9 +68,7 @@ AnfNodePtr GetMixedPrecisionCastHelp(const FuncGraphPtr &func_graph, const AnfNo return param; } auto cast_helper = prim::GetPythonOps("_mp_cast_helper", "mindspore.ops.composite.base"); - auto partial = - func_graph->NewCNode({NewValueNode(prim::kPrimPartial), NewValueNode(cast_helper), NewValueNode(dst_type)}); - auto cast = func_graph->NewCNode({NewValueNode(prim::kCompositeHyperMap), partial, param}); + auto cast = func_graph->NewCNode({NewValueNode(cast_helper), NewValueNode(dst_type), param}); return cast; } diff --git a/mindspore/ops/composite/base.py b/mindspore/ops/composite/base.py index 8670f4aa7c..4b559d1605 100644 --- a/mindspore/ops/composite/base.py +++ b/mindspore/ops/composite/base.py @@ -307,3 +307,12 @@ def _mixed_precision_cast_helper_2(type_, x): if F.issubclass_(F.dtype(x), mstype.float_): return P.Cast()(x, type_) return x + +@_mp_cast_helper.register("TypeType", "Tuple") +@core +def _mixed_precision_cast_helper_3(type_, x): + """if x is a tuple""" + t = () + for item in x: + t = t + (_mp_cast_helper(type_, item),) + return t diff --git a/tests/ut/python/parameter_feature/test_parameter.py b/tests/ut/python/parameter_feature/test_parameter.py index 696b107f56..1409fef386 100644 --- a/tests/ut/python/parameter_feature/test_parameter.py +++ b/tests/ut/python/parameter_feature/test_parameter.py @@ -19,7 +19,7 @@ from mindspore.nn import Cell from mindspore.ops import operations as P import mindspore.ops.composite as C -context.set_context(mode=context.GRAPH_MODE) +context.set_context(mode=context.GRAPH_MODE, save_graphs=True) def test_parser_three_default_mixed_args_subnet(): @@ -227,3 +227,43 @@ def test_net_vargs_expand(): net.set_train() net(x, y, sens) + + +def test_mixed_precision_const_parameter(): + class NetLoss(Cell): + def __init__(self): + super(NetLoss, self).__init__() + self.shape = P.Shape() + self.up_sample1 = P.ResizeBilinear((14, 14)) + self.up_sample2 = P.ResizeBilinear((28, 28)) + self.up_sample3 = P.ResizeBilinear((36, 36)) + def construct(self, x, y, z, *args): + ret = 0 + if args[0] == self.shape(z)[2]: + if args[0] == 14: + ret = self.up_sample1(y) + x + elif args[0] == 28: + ret = self.up_sample2(y) - x + else: + ret = x / y + else: + ret = x * y + ret = ret * z + return ret + class NetMain(Cell): + def __init__(self, loss_fn): + super(NetMain, self).__init__() + self.loss_fn = loss_fn + self.shape = P.Shape() + def construct(self, x, y, z): + size_x = self.shape(x)[2] + size_y = self.shape(y)[2] + ret = self.loss_fn(x, y, z, size_x, size_y) + return ret + loss_fn = NetLoss() + net = NetMain(loss_fn) + net.add_flags_recursive(fp32=True) + x = Tensor(np.ones((1, 3, 28, 28), np.float32)) + y = Tensor(np.ones((1, 3, 14, 14), np.float32)) + z = Tensor(np.ones((1, 3, 28, 28), np.float32)) + out = net(x, y, z) \ No newline at end of file From 00e4306518934c96774e5f7ca7004a23bbd1c13d Mon Sep 17 00:00:00 2001 From: lianliguang Date: Fri, 17 Apr 2020 16:45:50 +0800 Subject: [PATCH 321/367] convert all tuple output to maketuple --- cmake/options.cmake | 2 +- mindspore/ccsrc/common/trans.cc | 4 +- .../pre_activate/ascend/ascend_helper.cc | 16 +--- .../ccsrc/pre_activate/ascend/ascend_helper.h | 2 - .../insert_memcpy_async_for_getnext.cc | 1 + .../common/common_backend_optimization.cc | 2 + mindspore/ccsrc/pre_activate/common/helper.cc | 15 ++++ mindspore/ccsrc/pre_activate/common/helper.h | 2 + .../convert_tuple_input_to_dynamic_input.cc | 9 +-- .../pass/convert_tuple_output_to_maketuple.cc | 79 +++++++++++++++++++ .../pass/convert_tuple_output_to_maketuple.h | 40 ++++++++++ mindspore/ccsrc/session/kernel_graph.cc | 2 +- .../convert_tuple_output_to_maketuple_test.cc | 65 +++++++++++++++ .../convert_tuple_output_to_maketuple_test.py | 54 +++++++++++++ .../insert_memcpy_async_for_getnext.py | 7 +- 15 files changed, 270 insertions(+), 30 deletions(-) create mode 100644 mindspore/ccsrc/pre_activate/pass/convert_tuple_output_to_maketuple.cc create mode 100644 mindspore/ccsrc/pre_activate/pass/convert_tuple_output_to_maketuple.h create mode 100644 tests/ut/cpp/pre_activate/pass/convert_tuple_output_to_maketuple_test.cc create mode 100644 tests/ut/cpp/python_input/gtest_input/pre_activate/convert_tuple_output_to_maketuple_test.py diff --git a/cmake/options.cmake b/cmake/options.cmake index 3677418a98..3e03ed3339 100644 --- a/cmake/options.cmake +++ b/cmake/options.cmake @@ -45,8 +45,8 @@ endif() if (DEBUG_MODE) set(CMAKE_BUILD_TYPE "Debug") -else() add_compile_definitions(MEM_REUSE_DEBUG) +else() set(CMAKE_BUILD_TYPE "Release") endif() diff --git a/mindspore/ccsrc/common/trans.cc b/mindspore/ccsrc/common/trans.cc index a2b9f7ef24..5eb21f09bd 100644 --- a/mindspore/ccsrc/common/trans.cc +++ b/mindspore/ccsrc/common/trans.cc @@ -205,8 +205,8 @@ std::vector GetRuntimePaddingShape(const AnfNodePtr &node, size_t index) { if (tensor == nullptr) { MS_LOG(EXCEPTION) << " the node[ " << node->DebugString() << "]'s cannot convert "; } - shape = tensor->shape(); - (void)std::transform(shape.begin(), shape.end(), std::back_inserter(host_shape), IntToSize); + auto shape_temp = tensor->shape(); + (void)std::transform(shape_temp.begin(), shape_temp.end(), std::back_inserter(host_shape), IntToSize); if (host_shape.empty()) { host_shape.push_back(1); } diff --git a/mindspore/ccsrc/pre_activate/ascend/ascend_helper.cc b/mindspore/ccsrc/pre_activate/ascend/ascend_helper.cc index 745ed4460f..fbb3e345df 100644 --- a/mindspore/ccsrc/pre_activate/ascend/ascend_helper.cc +++ b/mindspore/ccsrc/pre_activate/ascend/ascend_helper.cc @@ -18,6 +18,7 @@ #include #include "common/trans.h" #include "common/utils.h" +#include "pre_activate/common/helper.h" #include "utils/utils.h" #include "device/kernel_info.h" #include "kernel/oplib/oplib.h" @@ -346,21 +347,6 @@ CNodePtr InsertCastForInput(const FuncGraphPtr &func_graph, const CNodePtr &cnod return new_node; } -AnfNodePtr CreatTupleGetItemNode(const FuncGraphPtr &func_graph, const AnfNodePtr &node, size_t output_idx) { - auto idx = NewValueNode(SizeToInt(output_idx)); - MS_EXCEPTION_IF_NULL(idx); - auto imm = std::make_shared(SizeToInt(output_idx)); - auto abstract_scalar = std::make_shared(imm); - idx->set_abstract(abstract_scalar); - AnfNodePtr tuple_getitem = func_graph->NewCNode({NewValueNode(prim::kPrimTupleGetItem), node, idx}); - MS_EXCEPTION_IF_NULL(tuple_getitem); - tuple_getitem->set_scope(node->scope()); - std::vector origin_shape = AnfAlgo::GetOutputInferShape(node, output_idx); - TypeId origin_type = AnfAlgo::GetOutputInferDataType(node, output_idx); - AnfAlgo::SetOutputInferTypeAndShape({origin_type}, {origin_shape}, tuple_getitem.get()); - return tuple_getitem; -} - AnfNodePtr CreateMemcpyAsyncOp(const FuncGraphPtr &graph, const AnfNodePtr &node) { MS_EXCEPTION_IF_NULL(graph); MS_EXCEPTION_IF_NULL(node); diff --git a/mindspore/ccsrc/pre_activate/ascend/ascend_helper.h b/mindspore/ccsrc/pre_activate/ascend/ascend_helper.h index 7f5e86d726..a8fd7dc514 100644 --- a/mindspore/ccsrc/pre_activate/ascend/ascend_helper.h +++ b/mindspore/ccsrc/pre_activate/ascend/ascend_helper.h @@ -64,8 +64,6 @@ AnfNodePtr InsertTransOpForOutput(const FuncGraphPtr &func_graph, const AnfNodeP CNodePtr InsertCastForInput(const FuncGraphPtr &func_graph, const CNodePtr &cnode); -AnfNodePtr CreatTupleGetItemNode(const FuncGraphPtr &func_graph, const AnfNodePtr &node, size_t output_idx); - AnfNodePtr CreateMemcpyAsyncOp(const FuncGraphPtr &graph, const AnfNodePtr &node); } // namespace opt } // namespace mindspore diff --git a/mindspore/ccsrc/pre_activate/ascend/enhancer/insert_memcpy_async_for_getnext.cc b/mindspore/ccsrc/pre_activate/ascend/enhancer/insert_memcpy_async_for_getnext.cc index 5065bab0f2..fb8b19047c 100644 --- a/mindspore/ccsrc/pre_activate/ascend/enhancer/insert_memcpy_async_for_getnext.cc +++ b/mindspore/ccsrc/pre_activate/ascend/enhancer/insert_memcpy_async_for_getnext.cc @@ -17,6 +17,7 @@ #include #include #include "pre_activate/ascend/ascend_helper.h" +#include "pre_activate/common/helper.h" #include "session/anf_runtime_algorithm.h" namespace mindspore { diff --git a/mindspore/ccsrc/pre_activate/common/common_backend_optimization.cc b/mindspore/ccsrc/pre_activate/common/common_backend_optimization.cc index c3fd292aa9..f622f2f06f 100644 --- a/mindspore/ccsrc/pre_activate/common/common_backend_optimization.cc +++ b/mindspore/ccsrc/pre_activate/common/common_backend_optimization.cc @@ -18,6 +18,7 @@ #include #include "pre_activate/common/optimizer.h" #include "pre_activate/pass/convert_const_input_to_attr.h" +#include "pre_activate/pass/convert_tuple_output_to_maketuple.h" #include "pre_activate/pass/convert_const_input_to_tensor_input.h" #include "pre_activate/pass/convert_tuple_input_to_dynamic_input.h" #include "utils/context/ms_context.h" @@ -42,6 +43,7 @@ void BackendCommonOptimization(const std::shared_ptr &kern common_pm->AddPass(std::make_shared()); common_pm->AddPass(std::make_shared()); common_pm->AddPass(std::make_shared()); + common_pm->AddPass(std::make_shared()); optimizer->AddPassManager(common_pm); (void)optimizer->Optimize(kernel_graph); kernel_graph->SetExecOrderByDefault(); diff --git a/mindspore/ccsrc/pre_activate/common/helper.cc b/mindspore/ccsrc/pre_activate/common/helper.cc index 3f157d6a64..de45239268 100644 --- a/mindspore/ccsrc/pre_activate/common/helper.cc +++ b/mindspore/ccsrc/pre_activate/common/helper.cc @@ -407,5 +407,20 @@ bool IsUsedByOthers(const FuncGraphPtr &graph, const AnfNodePtr &node) { } return manager->node_users()[node].size() > 1; } + +AnfNodePtr CreatTupleGetItemNode(const FuncGraphPtr &func_graph, const AnfNodePtr &node, size_t output_idx) { + auto idx = NewValueNode(SizeToInt(output_idx)); + MS_EXCEPTION_IF_NULL(idx); + auto imm = std::make_shared(SizeToInt(output_idx)); + auto abstract_scalar = std::make_shared(imm); + idx->set_abstract(abstract_scalar); + AnfNodePtr tuple_getitem = func_graph->NewCNode({NewValueNode(prim::kPrimTupleGetItem), node, idx}); + MS_EXCEPTION_IF_NULL(tuple_getitem); + tuple_getitem->set_scope(node->scope()); + std::vector origin_shape = AnfAlgo::GetOutputInferShape(node, output_idx); + TypeId origin_type = AnfAlgo::GetOutputInferDataType(node, output_idx); + AnfAlgo::SetOutputInferTypeAndShape({origin_type}, {origin_shape}, tuple_getitem.get()); + return tuple_getitem; +} } // namespace opt } // namespace mindspore diff --git a/mindspore/ccsrc/pre_activate/common/helper.h b/mindspore/ccsrc/pre_activate/common/helper.h index 8d174a1ad0..2b95bec824 100644 --- a/mindspore/ccsrc/pre_activate/common/helper.h +++ b/mindspore/ccsrc/pre_activate/common/helper.h @@ -146,6 +146,8 @@ void HideNopNode(session::KernelGraph *const graph); void RemoveNopNode(session::KernelGraph *const graph); +AnfNodePtr CreatTupleGetItemNode(const FuncGraphPtr &func_graph, const AnfNodePtr &node, size_t output_idx); + bool IsUsedByOthers(const FuncGraphPtr &graph, const AnfNodePtr &node); } // namespace opt } // namespace mindspore diff --git a/mindspore/ccsrc/pre_activate/pass/convert_tuple_input_to_dynamic_input.cc b/mindspore/ccsrc/pre_activate/pass/convert_tuple_input_to_dynamic_input.cc index 92579111f6..ccc4fd5265 100644 --- a/mindspore/ccsrc/pre_activate/pass/convert_tuple_input_to_dynamic_input.cc +++ b/mindspore/ccsrc/pre_activate/pass/convert_tuple_input_to_dynamic_input.cc @@ -19,6 +19,7 @@ #include #include "session/anf_runtime_algorithm.h" +#include "pre_activate/common/helper.h" #include "session/kernel_graph.h" namespace mindspore { @@ -40,13 +41,7 @@ void ConvertTupleOuputToPlantInputs(const FuncGraphPtr &graph, const AnfNodePtr convert_inputs = kernel_graph->SplitTupleValueNodeToNodeList(value_node); } else { for (size_t index = 0; index < output_size; ++index) { - auto idx = NewValueNode(SizeToInt(index)); - MS_EXCEPTION_IF_NULL(idx); - auto imm = std::make_shared(SizeToInt(index)); - auto abstract_scalar = std::make_shared(imm); - idx->set_abstract(abstract_scalar); - auto tuple_get_item = - graph->NewCNode(std::vector{NewValueNode(prim::kPrimTupleGetItem), input_node, idx}); + auto tuple_get_item = CreatTupleGetItemNode(graph, input_node, index); AnfAlgo::SetOutputInferTypeAndShape({AnfAlgo::GetOutputInferDataType(input_node, index)}, {AnfAlgo::GetOutputInferShape(input_node, index)}, tuple_get_item.get()); convert_inputs.emplace_back(tuple_get_item); diff --git a/mindspore/ccsrc/pre_activate/pass/convert_tuple_output_to_maketuple.cc b/mindspore/ccsrc/pre_activate/pass/convert_tuple_output_to_maketuple.cc new file mode 100644 index 0000000000..3f283e5d24 --- /dev/null +++ b/mindspore/ccsrc/pre_activate/pass/convert_tuple_output_to_maketuple.cc @@ -0,0 +1,79 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "pre_activate/pass/convert_tuple_output_to_maketuple.h" + +#include +#include + +#include "session/anf_runtime_algorithm.h" +#include "pre_activate/common/helper.h" +#include "session/kernel_graph.h" + +namespace mindspore { +namespace opt { +namespace { +CNodePtr ConvertTupleInputToMakeTuple(const FuncGraphPtr &graph, const CNodePtr &cnode_ptr) { + MS_EXCEPTION_IF_NULL(cnode_ptr); + MS_EXCEPTION_IF_NULL(graph); + std::vector convert_inputs = {cnode_ptr->input(0)}; + for (size_t index = 0; index < AnfAlgo::GetInputTensorNum(cnode_ptr); ++index) { + auto input_node = AnfAlgo::GetInputNode(cnode_ptr, index); + if (AnfAlgo::IsTupleOutput(input_node)) { + std::vector types; + std::vector> shapes; + std::vector make_tuple_inputs_list = {NewValueNode(prim::kPrimMakeTuple)}; + for (size_t tuple_out_index = 0; tuple_out_index < AnfAlgo::GetOutputTensorNum(input_node); ++tuple_out_index) { + make_tuple_inputs_list.emplace_back(CreatTupleGetItemNode(graph, input_node, tuple_out_index)); + types.push_back(AnfAlgo::GetOutputInferDataType(input_node, tuple_out_index)); + shapes.emplace_back(AnfAlgo::GetOutputInferShape(input_node, tuple_out_index)); + } + auto make_tuple = graph->NewCNode(make_tuple_inputs_list); + AnfAlgo::SetOutputInferTypeAndShape(types, shapes, make_tuple.get()); + convert_inputs.emplace_back(make_tuple); + } else { + convert_inputs.push_back(input_node); + } + } + cnode_ptr->set_inputs(convert_inputs); + return cnode_ptr; +} +} // namespace + +const BaseRef ConvertTupleOutputToMaketuple::DefinePattern() const { + VarPtr V = std::make_shared(); + VarPtr Xs = std::make_shared(); + return VectorRef({V, Xs}); +} + +const AnfNodePtr ConvertTupleOutputToMaketuple::Process(const FuncGraphPtr &func_graph, const AnfNodePtr &node, + const EquivPtr &) const { + if (node == nullptr || !node->isa()) { + return nullptr; + } + auto cnode = node->cast(); + MS_EXCEPTION_IF_NULL(cnode); + if (AnfAlgo::GetCNodeName(cnode) == prim::kPrimTupleGetItem->name()) { + return nullptr; + } + if (std::any_of(cnode->inputs().begin() + 1, cnode->inputs().end(), [](const AnfNodePtr &node) { + return AnfAlgo::IsTupleOutput(node) && AnfAlgo::GetCNodeName(node) != prim::kPrimMakeTuple->name(); + })) { + return ConvertTupleInputToMakeTuple(func_graph, cnode); + } + return nullptr; +} +} // namespace opt +} // namespace mindspore diff --git a/mindspore/ccsrc/pre_activate/pass/convert_tuple_output_to_maketuple.h b/mindspore/ccsrc/pre_activate/pass/convert_tuple_output_to_maketuple.h new file mode 100644 index 0000000000..a16ffaf674 --- /dev/null +++ b/mindspore/ccsrc/pre_activate/pass/convert_tuple_output_to_maketuple.h @@ -0,0 +1,40 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CONVERT_TUPLE_OUTPUT_TO_MAKETUPLE_H +#define MINDSPORE_CONVERT_TUPLE_OUTPUT_TO_MAKETUPLE_H +#include +#include + +#include "ir/anf.h" +#include "pre_activate/common/optimizer.h" + +namespace mindspore { +namespace opt { +class ConvertTupleOutputToMaketuple : public PatternProcessPass { + public: + explicit ConvertTupleOutputToMaketuple(bool multigraph = true) + : PatternProcessPass("convert_tuple_output_to_maketuple", multigraph) {} + + ~ConvertTupleOutputToMaketuple() override = default; + + const BaseRef DefinePattern() const override; + + const AnfNodePtr Process(const FuncGraphPtr &, const AnfNodePtr &, const EquivPtr &) const override; +}; +} // namespace opt +} // namespace mindspore +#endif // MINDSPORE_CONVERT_TUPLE_OUTPUT_TO_MAKETUPLE_H diff --git a/mindspore/ccsrc/session/kernel_graph.cc b/mindspore/ccsrc/session/kernel_graph.cc index bbcc04e14b..3c647bf21d 100755 --- a/mindspore/ccsrc/session/kernel_graph.cc +++ b/mindspore/ccsrc/session/kernel_graph.cc @@ -239,7 +239,7 @@ std::vector KernelGraph::SplitTupleValueNodeToNodeList(const ValueNo AddValueNodeToGraph(new_value_node); convert_inputs.emplace_back(new_value_node); } - if (RemoveValueNodeFromGraph(value_node)) { + if (!RemoveValueNodeFromGraph(value_node)) { MS_LOG(WARNING) << "failed to remove the value_node " << value_node->DebugString(); } return convert_inputs; diff --git a/tests/ut/cpp/pre_activate/pass/convert_tuple_output_to_maketuple_test.cc b/tests/ut/cpp/pre_activate/pass/convert_tuple_output_to_maketuple_test.cc new file mode 100644 index 0000000000..da01a74d76 --- /dev/null +++ b/tests/ut/cpp/pre_activate/pass/convert_tuple_output_to_maketuple_test.cc @@ -0,0 +1,65 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/backend_common_test.h" +#include "ir/anf.h" +#include "ir/meta_tensor.h" +#include "debug/anf_ir_dump.h" +#include "common/py_func_graph_fetcher.h" +#include "session/anf_runtime_algorithm.h" +#include "pre_activate/common/optimizer.h" +#include "pre_activate/common/pass_manager.h" +#include "pre_activate/pass/convert_tuple_output_to_maketuple.h" +#include "utils/utils.h" + +namespace mindspore { +namespace opt { +class TestHWTupleOutputToMakeTuple : public BackendCommon { + public: + TestHWTupleOutputToMakeTuple() + : getPyFun_("gtest_input.pre_activate.convert_tuple_output_to_maketuple_test", true) {} + ~TestHWTupleOutputToMakeTuple() override = default; + + public: + UT::PyFuncGraphFetcher getPyFun_; +}; + +TEST_F(TestHWTupleOutputToMakeTuple, test_convert_tuple_output_to_maketuple) { + FuncGraphPtr g = getPyFun_.CallAndParseRet("test_convert_tuple_output_to_maketuple", "before"); + ASSERT_TRUE(g != nullptr); + std::vector shp_x{5, 2, 10}; + std::vector shp_h{1, 2, 2}; + std::vector shp_c{1, 2, 2}; + std::vector shp_w{112, 1, 1}; + auto x_abstract = std::make_shared(kFloat32, shp_x); + auto h_abstract = std::make_shared(kFloat32, shp_h); + auto c_abstract = std::make_shared(kFloat32, shp_c); + auto w_abstract = std::make_shared(kFloat32, shp_w); + AbstractBasePtrList args_spec_list{x_abstract, h_abstract, c_abstract, w_abstract}; + auto func_graph = GetKernelGraph(g, args_spec_list); + ASSERT_TRUE(func_graph != nullptr); + + auto optimizer = std::make_shared(); + auto pm = std::make_shared(); + pm->AddPass(std::make_shared()); + optimizer->AddPassManager(pm); + optimizer->Optimize(func_graph); + + FuncGraphPtr g_after = getPyFun_.CallAndParseRet("test_convert_tuple_output_to_maketuple", "after"); + ASSERT_TRUE(g_after != nullptr); + EXPECT_TRUE(CheckEqualGraph(func_graph, g_after)); +} +} // namespace opt +} // namespace mindspore diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/convert_tuple_output_to_maketuple_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/convert_tuple_output_to_maketuple_test.py new file mode 100644 index 0000000000..961f7b6232 --- /dev/null +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/convert_tuple_output_to_maketuple_test.py @@ -0,0 +1,54 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +from mindspore.ops import operations as P +from mindspore.ops import Primitive +import mindspore as ms +import mindspore.common.dtype as mstype +from mindspore.common.tensor import Tensor +import numpy as np + +make_tuple = Primitive('make_tuple') +tuple_get_item = Primitive("tuple_getitem"); +LSTM = P.LSTM(input_size=10,hidden_size=2,num_layers=1,has_bias=True,bidirectional=False,dropout=0.0) +add = P.TensorAdd() + +class FnDict: + def __init__(self): + self.fnDict = {} + + def __call__(self, fn): + self.fnDict[fn.__name__] = fn + + def __getitem__(self, name): + return self.fnDict[name] + + +def test_convert_tuple_output_to_maketuple(tag): + fns = FnDict() + + @fns + def before(x, h, c, w): + res = LSTM(x, h, c, w) + return res + + @fns + def after(x, h, c, w): + res = LSTM(x, h, c, w) + res = make_tuple( + make_tuple(tuple_get_item(res, 0), tuple_get_item(res, 1), tuple_get_item(res, 2), tuple_get_item(res, 3), + tuple_get_item(res, 4))); + return res + + return fns[tag] diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/insert_memcpy_async_for_getnext.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/insert_memcpy_async_for_getnext.py index 902fd636de..f0320fef79 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/insert_memcpy_async_for_getnext.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/insert_memcpy_async_for_getnext.py @@ -49,7 +49,10 @@ def test_insert_memcpy_async_for_getnext(tag): label = tuple_getitem(res, 1) memcpy_async_data = memcpy_async(data) memcpy_async_label = memcpy_async(label) - tuple = make_tuple(make_tuple(memcpy_async_data, memcpy_async_label)) - return tuple + bind_tuple = make_tuple(memcpy_async_data, memcpy_async_label) + get_item0 = tuple_getitem(bind_tuple, 0) + get_item1 = tuple_getitem(bind_tuple, 1) + bind_tuple = make_tuple(make_tuple(get_item0, get_item1)) + return bind_tuple return fns[tag] From 6cc5e87126be19ea4b4115d22286ad89d1dcb6a6 Mon Sep 17 00:00:00 2001 From: zjun Date: Thu, 16 Apr 2020 17:27:33 +0800 Subject: [PATCH 322/367] Fix oplib codexx --- .../ccsrc/kernel/aicpu/aicpu_kernel_build.cc | 13 ++-------- mindspore/ccsrc/kernel/oplib/oplib.cc | 26 +++++++++++-------- mindspore/ccsrc/kernel/oplib/oplib.h | 1 + mindspore/ops/op_info_register.py | 6 ++--- 4 files changed, 21 insertions(+), 25 deletions(-) diff --git a/mindspore/ccsrc/kernel/aicpu/aicpu_kernel_build.cc b/mindspore/ccsrc/kernel/aicpu/aicpu_kernel_build.cc index cf23779415..808e87edc0 100644 --- a/mindspore/ccsrc/kernel/aicpu/aicpu_kernel_build.cc +++ b/mindspore/ccsrc/kernel/aicpu/aicpu_kernel_build.cc @@ -39,8 +39,6 @@ namespace mindspore { namespace kernel { using FNodeAttrHandle = std::function &anf_node, mindspore::NodeDef *proto)>; -const std::vector local_framework_op_vec = {kInitData, kGetNext, kDropoutGenMask, kPrint}; - bool SetIOIputSize(const std::shared_ptr &anf_node, const size_t &input_num, std::vector *input_size_list) { MS_EXCEPTION_IF_NULL(anf_node); @@ -298,19 +296,12 @@ KernelModPtr AicpuOpBuild(const std::shared_ptr &anf_node) { MS_EXCEPTION_IF_NULL(kernel_mod_ptr); kernel_mod_ptr->SetAnfNode(anf_node); kernel_mod_ptr->SetNodeName(op_name); - auto iter = std::find(local_framework_op_vec.begin(), local_framework_op_vec.end(), op_name); - if (iter != local_framework_op_vec.end()) { - if (!CreateNodeDefBytes(anf_node, kernel_mod_ptr)) { - MS_LOG(EXCEPTION) << "Create nodeDefBytes faild!"; - } - } else { - MS_LOG(EXCEPTION) << "Aicpu don't support node [" << op_name << "]"; + if (!CreateNodeDefBytes(anf_node, kernel_mod_ptr)) { + MS_LOG(EXCEPTION) << "Create nodeDefBytes faild!"; } - if (!SetIOSize(anf_node, kernel_mod_ptr)) { MS_LOG(EXCEPTION) << "Set input output size list failed."; } - return kernel_mod_ptr; } } // namespace kernel diff --git a/mindspore/ccsrc/kernel/oplib/oplib.cc b/mindspore/ccsrc/kernel/oplib/oplib.cc index d2464bce47..c8cc1530ce 100644 --- a/mindspore/ccsrc/kernel/oplib/oplib.cc +++ b/mindspore/ccsrc/kernel/oplib/oplib.cc @@ -94,6 +94,20 @@ bool OpLib::RegOp(const std::string& json_string, const std::string& impl_path) return ret; } +void OpLib::DecodeTBESpecificInfo(const nlohmann::json& obj, const std::shared_ptr& op_info) { + op_info->set_async_flag(obj.at(kAsyncFlag)); + op_info->set_binfile_name(obj.at(kBinfileName)); + op_info->set_compute_cost(obj.at(kComputeCost)); + op_info->set_kernel_name(obj.at(kKernelName)); + op_info->set_partial_flag(obj.at(kPartialFlag)); + if (obj.find(kOpPattern) != obj.end()) { + op_info->set_op_pattern(obj.at(kOpPattern)); + } + if (obj.find(kDynamicFormat) != obj.end()) { + op_info->set_dynamic_format(obj.at(kDynamicFormat)); + } +} + bool OpLib::DecodeOpInfo(const nlohmann::json& obj, const mindspore::kernel::OpImplyType imply_type, const std::string& impl_path) { std::shared_ptr op_info = std::make_shared(); @@ -103,17 +117,7 @@ bool OpLib::DecodeOpInfo(const nlohmann::json& obj, const mindspore::kernel::OpI op_info->set_imply_type(imply_type); op_info->set_fusion_type(obj.at(kFusionType)); if (imply_type == kTBE) { - op_info->set_async_flag(obj.at(kAsyncFlag)); - op_info->set_binfile_name(obj.at(kBinfileName)); - op_info->set_compute_cost(obj.at(kComputeCost)); - op_info->set_kernel_name(obj.at(kKernelName)); - op_info->set_partial_flag(obj.at(kPartialFlag)); - if (obj.find(kOpPattern) != obj.end()) { - op_info->set_op_pattern(obj.at(kOpPattern)); - } - if (obj.find(kDynamicFormat) != obj.end()) { - op_info->set_dynamic_format(obj.at(kDynamicFormat)); - } + DecodeTBESpecificInfo(obj, op_info); } auto attrs = obj.at(kAttr); for (const auto& attr : attrs) { diff --git a/mindspore/ccsrc/kernel/oplib/oplib.h b/mindspore/ccsrc/kernel/oplib/oplib.h index a4c5e04bb1..0e11e28d58 100644 --- a/mindspore/ccsrc/kernel/oplib/oplib.h +++ b/mindspore/ccsrc/kernel/oplib/oplib.h @@ -40,6 +40,7 @@ class OpLib { const std::shared_ptr& op_info); static bool DecodeDtypeFormat(const nlohmann::json& dtype_format, const std::shared_ptr& op_io, size_t index); + static void DecodeTBESpecificInfo(const nlohmann::json& obj, const std::shared_ptr& op_info); static bool DecodeInputOutput(const nlohmann::json& obj, const OpImplyType imply_type, const OpIOType io_type, const std::shared_ptr& op_info, const nlohmann::json& dtype_format); static bool GetRefInfo(const std::shared_ptr& op_info); diff --git a/mindspore/ops/op_info_register.py b/mindspore/ops/op_info_register.py index 7dd7a9f729..28821b621e 100644 --- a/mindspore/ops/op_info_register.py +++ b/mindspore/ops/op_info_register.py @@ -57,7 +57,7 @@ def op_info_register(op_info): return register_decorator -class RegOp(): +class RegOp: """ Base class for op info register. @@ -483,9 +483,9 @@ class TBERegOp(RegOp): return self -class DataType(): +class DataType: """ - Various combinations of dtype and formatself. + Various combinations of dtype and format. The current list below maybe not completed. If necessary, please add it. """ From 7fae76d10dbb9324154a6d0963cc99627152c5f2 Mon Sep 17 00:00:00 2001 From: huangdongrun Date: Thu, 16 Apr 2020 16:57:08 +0800 Subject: [PATCH 323/367] add support for calculation between float and int format code remove test case for fp64 add back scalar manipulation remove save graph --- .../ccsrc/operator/cc_implementations.cc | 8 +++ .../ccsrc/pipeline/static_analysis/prim.cc | 3 + mindspore/ccsrc/utils/convert_utils.h | 1 + tests/ut/python/ops/test_python_operators.py | 59 +++++++++++++++++-- 4 files changed, 66 insertions(+), 5 deletions(-) diff --git a/mindspore/ccsrc/operator/cc_implementations.cc b/mindspore/ccsrc/operator/cc_implementations.cc index 62b23b346f..49dc3ab791 100644 --- a/mindspore/ccsrc/operator/cc_implementations.cc +++ b/mindspore/ccsrc/operator/cc_implementations.cc @@ -201,6 +201,14 @@ bool InnerScalarGe(T x, U y) { int sum = InnerScalar##op_t(GetValue(x), GetValue(y)); \ return MakeValue(sum); \ } \ + if (x->isa() && y->isa()) { \ + float sum = InnerScalar##op_t(IntToFloat(GetValue(x)), GetValue(y)); \ + return MakeValue(sum); \ + } \ + if (x->isa() && y->isa()) { \ + float sum = InnerScalar##op_t(GetValue(x), IntToFloat(GetValue(y))); \ + return MakeValue(sum); \ + } \ MS_LOG(EXCEPTION) << "Unsupported Value for Scalar" << #op_t << ", x: " << x->ToString() \ << ", y: " << y->ToString(); \ } while (0); \ diff --git a/mindspore/ccsrc/pipeline/static_analysis/prim.cc b/mindspore/ccsrc/pipeline/static_analysis/prim.cc index 1512596cb4..b6cb57a9b8 100644 --- a/mindspore/ccsrc/pipeline/static_analysis/prim.cc +++ b/mindspore/ccsrc/pipeline/static_analysis/prim.cc @@ -445,6 +445,9 @@ AbstractBasePtr UniformPrimEvaluator::EvalPrim(const AnalysisEnginePtr &, const } ValuePtr inferred_value = RunImpl(value_list); + if (!(*inferred_value == *kAnyValue)) { + ret_value_type = inferred_value->type(); + } // for comparison primitives , return type shall have be specified to be bool. if (specify_out_type_ != nullptr) { ret_value_type = specify_out_type_; diff --git a/mindspore/ccsrc/utils/convert_utils.h b/mindspore/ccsrc/utils/convert_utils.h index fbd4485a3f..55f478d5fe 100644 --- a/mindspore/ccsrc/utils/convert_utils.h +++ b/mindspore/ccsrc/utils/convert_utils.h @@ -81,6 +81,7 @@ inline size_t FloatToSize(float u) { } return static_cast(u); } +inline float IntToFloat(int32_t v) { return static_cast(v); } inline uint32_t IntToUint(int32_t u) { if (u < 0) { diff --git a/tests/ut/python/ops/test_python_operators.py b/tests/ut/python/ops/test_python_operators.py index eb65a7f373..705774068d 100644 --- a/tests/ut/python/ops/test_python_operators.py +++ b/tests/ut/python/ops/test_python_operators.py @@ -25,11 +25,13 @@ from ....mindspore_test_framework.mindspore_test import mindspore_test from ....mindspore_test_framework.pipeline.forward.compile_forward \ import pipeline_for_compile_forward_ge_graph_for_case_by_case_config -context.set_context(mode=context.GRAPH_MODE, save_graphs=True) +context.set_context(mode=context.GRAPH_MODE) + class ComparisonOpsNet(nn.Cell): def __init__(self): super(ComparisonOpsNet, self).__init__() + def construct(self, x, y): a = x <= y b = x <= 1.0 @@ -46,22 +48,60 @@ class ComparisonOpsNet(nn.Cell): m = k != l return a or b or c or d or e or f or g or h or i or j or m + +class MathOpsNet(nn.Cell): + def __init__(self): + super(MathOpsNet, self).__init__() + self.relu = P.ReLU() + + def construct(self, x, y): + x = x - (-1) + return self.relu(x) + + +class ScalarCompareNet(nn.Cell): + def __init__(self): + super(ScalarCompareNet, self).__init__() + self.relu = P.ReLU() + + def construct(self, x, y): + t = 0 + if 3 > 3.2: + t = x + y + else: + t = x - y + if 3.1 <= 5: + t = t - x + else: + t = t + x + a = 32.0 * 12 + b = 12/3.0 + if a > b: + t = t * x + else: + t = t / x + return t + + class LogicalNumberOpsNet(nn.Cell): def __init__(self): super(LogicalNumberOpsNet, self).__init__() self.cond = True self.one = 0 self.zero = 0.0 + def construct(self, x, y): if self.cond and self.one or self.zero and not self.one: return x + y return x - y + class LogicalTensorOpsNet(nn.Cell): def __init__(self): """""" super(LogicalTensorOpsNet, self).__init__() self.const_true = Tensor(True, dtype=mstype.bool_) + def construct(self, x, y): ret = x and y and (y or self.const_true) and (not self.const_true) return ret @@ -71,20 +111,29 @@ test_case_ops = [ ('CompareOpsNet', { 'block': ComparisonOpsNet(), 'desc_inputs': [Tensor(np.ones([6, 9, 10]), dtype=mstype.float32), - Tensor(np.zeros([6, 9, 10]), dtype=mstype.float32)]}), + Tensor(np.zeros([6, 9, 10]), dtype=mstype.float32)]}), + ('MathOpsNet', { + 'block': MathOpsNet(), + 'desc_inputs': [Tensor(np.ones([6, 9, 10]), dtype=mstype.float32), + Tensor(np.zeros([6, 9, 10]), dtype=mstype.float32)]}), + ('ScalarCompareNet', { + 'block': ScalarCompareNet(), + 'desc_inputs': [Tensor(np.ones([6, 9, 10]), dtype=mstype.float32), + Tensor(np.zeros([6, 9, 10]), dtype=mstype.float32)]}), ('LogicalNumberOps', { 'block': LogicalNumberOpsNet(), 'desc_inputs': [Tensor(np.ones([6, 9, 10]), dtype=mstype.float32), - Tensor(np.zeros([6, 9, 10]), dtype=mstype.float32)]}), + Tensor(np.zeros([6, 9, 10]), dtype=mstype.float32)]}), ('LogicalTensorOps', { 'block': LogicalTensorOpsNet(), 'desc_inputs': [Tensor(np.ones([6, 9, 10]).astype(np.bool_), dtype=mstype.bool_), - Tensor(np.zeros([6, 9, 10]).astype(np.bool_), dtype=mstype.bool_)]}), + Tensor(np.zeros([6, 9, 10]).astype(np.bool_), dtype=mstype.bool_)]}), ] test_case_lists = [test_case_ops] test_exec_case = functools.reduce(lambda x, y: x + y, test_case_lists) + @mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config) def test_compile(): - return test_exec_case \ No newline at end of file + return test_exec_case From fc07cd908e85415e1e695fb63e2f8e9612b9f50c Mon Sep 17 00:00:00 2001 From: liubuyu Date: Fri, 17 Apr 2020 18:07:36 +0800 Subject: [PATCH 324/367] add 6d format transfer --- mindspore/ccsrc/common/trans.cc | 243 +++++++++++++++--- mindspore/ccsrc/common/trans.h | 2 + .../device/ascend/ascend_device_address.cc | 14 +- mindspore/ccsrc/utils/utils.h | 6 +- 4 files changed, 228 insertions(+), 37 deletions(-) diff --git a/mindspore/ccsrc/common/trans.cc b/mindspore/ccsrc/common/trans.cc index a2b9f7ef24..5c982166dd 100644 --- a/mindspore/ccsrc/common/trans.cc +++ b/mindspore/ccsrc/common/trans.cc @@ -231,7 +231,98 @@ std::vector PaddingShapeTo4d(const std::vector &shape, const std return shape_4d; } +namespace { +bool CheckDims(const std::vector &shape) { + if (shape.size() != 4) { + MS_LOG(ERROR) << "Host shape dims shoud be 4"; + return false; + } + return true; +} + +std::vector NchwDeviceShape(const std::vector &shape) { + if (!CheckDims(shape)) { + MS_LOG(EXCEPTION) << "Check dims failed."; + } + return shape; +} + +std::vector NhwcDeviceShape(const std::vector &shape) { + if (!CheckDims(shape)) { + MS_LOG(EXCEPTION) << "Ccheck dims failed."; + } + std::vector device_shape; + device_shape.push_back(shape[0]); + device_shape.push_back(shape[2]); + device_shape.push_back(shape[3]); + device_shape.push_back(shape[1]); + return device_shape; +} + +std::vector HwchDeviceShape(const std::vector &shape) { + if (!CheckDims(shape)) { + MS_LOG(EXCEPTION) << "Check dims failed."; + } + std::vector device_shape; + device_shape.push_back(shape[2]); + device_shape.push_back(shape[3]); + device_shape.push_back(shape[1]); + device_shape.push_back(shape[0]); + return device_shape; +} + +std::vector FracZDeviceShape(const std::vector &shape) { + if (!CheckDims(shape)) { + MS_LOG(EXCEPTION) << "Check dims failed."; + } + std::vector device_shape; + size_t cout16 = ((shape[0] + kCubeSize - 1) / kCubeSize) * kCubeSize; + size_t cin16 = ((shape[1] + kCubeSize - 1) / kCubeSize) * kCubeSize; + device_shape.push_back(shape[2] * shape[3] * cin16 / kCubeSize); + device_shape.push_back(cout16 / kCubeSize); + device_shape.push_back(kCubeSize); + device_shape.push_back(kCubeSize); + return device_shape; +} + +std::vector Nc1hwc0DeviceShape(const std::vector &shape) { + if (!CheckDims(shape)) { + MS_LOG(EXCEPTION) << "Check dims failed."; + } + std::vector device_shape; + size_t C1 = (shape[1] + kCubeSize - 1) / kCubeSize; + size_t C0 = kCubeSize; + device_shape.push_back(shape[0]); + device_shape.push_back(C1); + device_shape.push_back(shape[2]); + device_shape.push_back(shape[3]); + device_shape.push_back(C0); + return device_shape; +} + +std::vector C1hwncoc0DeviceShape(const std::vector &shape) { + if (!CheckDims(shape)) { + MS_LOG(EXCEPTION) << "Check dims failed."; + } + std::vector device_shape; + device_shape.push_back((shape[1] - 1) / kCubeSize + 1); + device_shape.push_back(shape[2]); + device_shape.push_back(shape[3]); + device_shape.push_back(shape[0]); + device_shape.push_back(kCubeSize); + device_shape.push_back(kCubeSize); + return device_shape; +} +} // namespace + std::vector TransShapeToDevice(const std::vector &shape, const std::string &format) { + using DeviceShapeTransfer = std::function(const std::vector &)>; + const std::map device_shape_map{ + {kOpFormat_NCHW, NchwDeviceShape}, {kOpFormat_NHWC, NhwcDeviceShape}, + {kOpFormat_HWCN, HwchDeviceShape}, {kOpFormat_FRAC_Z, FracZDeviceShape}, + {kOpFormat_NC1HWC0, Nc1hwc0DeviceShape}, {kOpFormat_C1HWNCoC0, C1hwncoc0DeviceShape}, + }; + if (format == kOpFormat_ND || format == kOpFormat_DEFAULT) { return shape; } @@ -255,37 +346,31 @@ std::vector TransShapeToDevice(const std::vector &shape, const s MS_LOG(WARNING) << "Get Device Shape using a shape size is less than 4 ,should be Padding shape by Default firstly"; temp_shape = PaddingShapeTo4dByDefault(shape); } - if (format == kOpFormat_NC1HWC0) { - size_t C1 = (temp_shape[1] + kCubeSize - 1) / kCubeSize; - size_t C0 = kCubeSize; - device_shape.push_back(temp_shape[0]); - device_shape.push_back(C1); - device_shape.push_back(temp_shape[2]); - device_shape.push_back(temp_shape[3]); - device_shape.push_back(C0); - return device_shape; - } else if (format == kOpFormat_FRAC_Z) { - size_t cout16 = ((temp_shape[0] + kCubeSize - 1) / kCubeSize) * kCubeSize; - size_t cin16 = ((temp_shape[1] + kCubeSize - 1) / kCubeSize) * kCubeSize; - device_shape.push_back(temp_shape[2] * temp_shape[3] * cin16 / kCubeSize); - device_shape.push_back(cout16 / kCubeSize); - device_shape.push_back(kCubeSize); - device_shape.push_back(kCubeSize); - return device_shape; - } else if (format == kOpFormat_NHWC) { - device_shape.push_back(temp_shape[0]); - device_shape.push_back(temp_shape[2]); - device_shape.push_back(temp_shape[3]); - device_shape.push_back(temp_shape[1]); - return device_shape; - } else if (format == kOpFormat_HWCN) { - return {temp_shape[2], temp_shape[3], temp_shape[1], temp_shape[0]}; - } else if (format == kOpFormat_NCHW) { - return temp_shape; + auto iter = device_shape_map.find(format); + if (iter != device_shape_map.end()) { + return iter->second(temp_shape); } MS_LOG(EXCEPTION) << "Unexpected format[" << format << "]"; } +bool CheckArgs(const FormatArgs &args, size_t *size, size_t *total_size) { + if (args.host_shape.size() != kNchwDims) { + MS_LOG(ERROR) << "Invalid host shape, host shape dims:" << args.host_shape.size() << ", expect dims:" << kNchwDims; + return false; + } + *size = TypeIdSize(args.src_data_type); + if (*size < 1) { + MS_LOG(ERROR) << "Illegal dtype."; + return false; + } + *total_size = ShapeSize(args.device_shape) * (*size); + if (*total_size != args.device_size) { + MS_LOG(ERROR) << "Illegal total data size, total_size:" << *total_size << ", device_size:" << args.device_size; + return false; + } + return true; +} + bool TransDataType(const TypeIdArgs &args, void *result) { MS_LOG(DEBUG) << "Begin trans datatype from " << TypeIdLabel(args.host_data_type) << " to " << TypeIdLabel(args.device_data_type); @@ -320,13 +405,14 @@ bool TransFormat(const FormatArgs &args, void *result) { MS_LOG(ERROR) << "Invalid datatype.."; return false; } - if ((args.host_format == kOpFormat_NCHW || args.host_format == kOpFormat_ND) && - args.device_format == kOpFormat_FRAC_Z) { + if (args.device_format == kOpFormat_FRAC_Z) { return NchwToFracZ(args, result); } else if (args.device_format == kOpFormat_FRAC_NZ) { return NchwToFracNz(args, result); } else if (args.device_format == kOpFormat_NC1HWC0) { return NchwToNc1hwc0(args, result); + } else if (args.device_format == kOpFormat_C1HWNCoC0) { + return NchwToC1hwncoc0(args, result); } return true; } @@ -337,13 +423,14 @@ bool TransFormatFromDeviceToHost(const FormatArgs &args, void *result) { MS_LOG(ERROR) << "Invalid datatype.."; return false; } - if ((args.host_format == kOpFormat_NCHW || args.host_format == kOpFormat_ND) && - args.device_format == kOpFormat_FRAC_Z) { + if (args.device_format == kOpFormat_FRAC_Z) { return FracZToNchw(args, result); } else if (args.device_format == kOpFormat_FRAC_NZ) { return FracNzToNchw(args, result); } else if (args.device_format == kOpFormat_NC1HWC0) { return Nc1hwc0ToNchw(args, result); + } else if (args.device_format == kOpFormat_C1HWNCoC0) { + return C1hwncoc0ToNchw(args, result); } return true; } @@ -801,5 +888,99 @@ bool Nc1hwc0ToNchw(const FormatArgs &args, void *result) { } return true; } + +bool NchwToC1hwncoc0(const FormatArgs &args, void *result) { + // trans nchw to c1hwncoc0 + MS_LOG(DEBUG) << "Trans format from nchw to c1hwncoc0."; + MS_EXCEPTION_IF_NULL(result); + size_t size = 0; + size_t total_size = 0; + if (!CheckArgs(args, &size, &total_size)) { + MS_LOG(ERROR) << "Check args failed."; + return false; + } + auto n = args.host_shape[0]; + auto c = args.host_shape[1]; + auto h = args.host_shape[2]; + auto w = args.host_shape[3]; + auto c1 = args.device_shape[0]; + auto co = args.device_shape[4]; + auto c0 = args.device_shape[5]; + for (size_t c1_i = 0; c1_i < c1; c1_i++) { + for (size_t h_i = 0; h_i < h; h_i++) { + for (size_t w_i = 0; w_i < w; w_i++) { + for (size_t n_i = 0; n_i < n; n_i++) { + for (size_t co_i = 0; co_i < co; co_i++) { + for (size_t c0_i = 0; c0_i < c0; c0_i++) { + size_t dst_offset = (c1_i * h * w * n * co * c0 + h_i * w * n * co * c0 + w_i * n * co * c0 + + n_i * co * c0 + co_i * c0 + c0_i) * + size; + size_t protected_size = total_size - dst_offset < static_cast(SECUREC_MEM_MAX_LEN) + ? total_size - dst_offset + : static_cast(SECUREC_MEM_MAX_LEN); + size_t c_i = c0_i + c1_i * c0; + size_t src_offset = (n_i * c * h * w + c_i * h * w + h_i * w + w_i) * size; + error_t ret; + if (c_i < c && c0_i == co_i) { + ret = memcpy_s(static_cast(result) + dst_offset, protected_size, + static_cast(args.data) + src_offset, size); + } else { + ret = memset_s(static_cast(result) + dst_offset, protected_size, 0, size); + } + if (ret != EOK) { + MS_LOG(ERROR) << "Failed to operate the dst memory, error-code:" << ret; + return false; + } + } + } + } + } + } + } + return true; +} + +bool C1hwncoc0ToNchw(const FormatArgs &args, void *result) { + // trans c1hwncoc0 to nchw + MS_LOG(DEBUG) << "Trans format from c1hwncoc0 to nchw"; + MS_EXCEPTION_IF_NULL(result); + size_t size = 0; + size_t total_size = 0; + if (!CheckArgs(args, &size, &total_size)) { + MS_LOG(ERROR) << "Check args failed."; + return false; + } + auto n = args.host_shape[0]; + auto c = args.host_shape[1]; + auto h = args.host_shape[2]; + auto w = args.host_shape[3]; + auto co = args.device_shape[4]; + auto c0 = args.device_shape[5]; + for (size_t n_i = 0; n_i < n; n_i++) { + for (size_t c_i = 0; c_i < c; c_i++) { + for (size_t h_i = 0; h_i < h; h_i++) { + for (size_t w_i = 0; w_i < w; w_i++) { + size_t dst_offset = (n_i * c * h * w + c_i * h * w + h_i * w + w_i) * size; + size_t c1_i = c_i / kCubeSize; + size_t c0_i = c_i % kCubeSize; + size_t co_i = c0_i; + size_t src_offset = (c1_i * h * w * n * co * c0 + h_i * w * n * co * c0 + w_i * n * co * c0 + n_i * co * c0 + + co_i * c0 + c0_i) * + size; + size_t protected_size = total_size - dst_offset < static_cast(SECUREC_MEM_MAX_LEN) + ? total_size - dst_offset + : static_cast(SECUREC_MEM_MAX_LEN); + auto ret = memcpy_s(static_cast(result) + dst_offset, protected_size, + static_cast(args.data) + src_offset, size); + if (ret != EOK) { + MS_LOG(ERROR) << "Failed to operate the dst memory, error-code:" << ret; + return false; + } + } + } + } + } + return true; +} } // namespace trans } // namespace mindspore diff --git a/mindspore/ccsrc/common/trans.h b/mindspore/ccsrc/common/trans.h index 4bebdde814..054fa89a06 100644 --- a/mindspore/ccsrc/common/trans.h +++ b/mindspore/ccsrc/common/trans.h @@ -63,10 +63,12 @@ bool TransFormatFromDeviceToHost(const FormatArgs &args, void *result); bool NchwToFracZ(const FormatArgs &args, void *result); bool NchwToFracNz(const FormatArgs &args, void *result); bool NchwToNc1hwc0(const FormatArgs &args, void *result); +bool NchwToC1hwncoc0(const FormatArgs &args, void *result); // device to host bool FracZToNchw(const FormatArgs &args, void *result); bool FracNzToNchw(const FormatArgs &args, void *result); bool Nc1hwc0ToNchw(const FormatArgs &args, void *result); +bool C1hwncoc0ToNchw(const FormatArgs &args, void *result); } // namespace trans } // namespace mindspore diff --git a/mindspore/ccsrc/device/ascend/ascend_device_address.cc b/mindspore/ccsrc/device/ascend/ascend_device_address.cc index 69d1918163..f0a30e4c42 100644 --- a/mindspore/ccsrc/device/ascend/ascend_device_address.cc +++ b/mindspore/ccsrc/device/ascend/ascend_device_address.cc @@ -114,8 +114,11 @@ bool AscendDeviceAddress::SyncDeviceToHost(const std::vector &shape, size_t return false; } } - } else if (format_ == kOpFormat_NC1HWC0 || format_ == kOpFormat_FRAC_Z || format_ == kOpFormat_FRAC_NZ) { - sync_ok = SyncDeviceToHostAndConvertFormat(shape, size, type, host_ptr); + } else { + auto iter = kNeedTransFormatSet.find(format_); + if (iter != kNeedTransFormatSet.end()) { + sync_ok = ConvertFormatAndSyncHostToDevice(shape, size, type, host_ptr); + } } if (!sync_ok) { MS_LOG(ERROR) << "Not support to trans, dev_format:" << format_ << ", dev_type:" << TypeIdLabel(type_id_) @@ -199,8 +202,11 @@ bool AscendDeviceAddress::SyncHostToDevice(const std::vector &shape, size_t } SyncMemory(ptr_, host_tmp.data(), size_, RT_MEMCPY_HOST_TO_DEVICE); } - } else if (format_ == kOpFormat_NC1HWC0 || format_ == kOpFormat_FRAC_Z || format_ == kOpFormat_FRAC_NZ) { - sync_ok = ConvertFormatAndSyncHostToDevice(shape, size, type, host_ptr); + } else { + auto iter = kNeedTransFormatSet.find(format_); + if (iter != kNeedTransFormatSet.end()) { + sync_ok = ConvertFormatAndSyncHostToDevice(shape, size, type, host_ptr); + } } if (!sync_ok) { MS_LOG(ERROR) << "Not support to trans, dev_format:" << format_ << ", dev_type:" << TypeIdLabel(type_id_) diff --git a/mindspore/ccsrc/utils/utils.h b/mindspore/ccsrc/utils/utils.h index 9405d0d334..10ef4abf62 100644 --- a/mindspore/ccsrc/utils/utils.h +++ b/mindspore/ccsrc/utils/utils.h @@ -186,8 +186,10 @@ constexpr auto kOpFormat_FRAC_Z = "FracZ"; constexpr auto kOpFormat_FRAC_NZ = "FRACTAL_NZ"; constexpr auto kOpFormat_C1HWNCoC0 = "C1HWNCoC0"; constexpr auto kOpFormat_NC1HWC0_C04 = "NC1HWC0_C04"; -const std::set k1DSupportFormat = {kOpFormat_DEFAULT, kOpFormat_NCHW, kOpFormat_NHWC, - kOpFormat_FRAC_Z, kOpFormat_NC1KHKWHWC0, kOpFormat_NC1HWC0}; +const std::set k1DSupportFormat = {kOpFormat_DEFAULT, kOpFormat_NCHW, kOpFormat_NHWC, + kOpFormat_FRAC_Z, kOpFormat_NC1KHKWHWC0, kOpFormat_NC1HWC0, + kOpFormat_C1HWNCoC0}; + const std::set k2DSupportFormat = {kOpFormat_DEFAULT, kOpFormat_NCHW, kOpFormat_NHWC, kOpFormat_FRAC_Z, kOpFormat_NC1KHKWHWC0}; const std::set k3DSupportFormat = {kOpFormat_DEFAULT, kOpFormat_NC1KHKWHWC0}; From 3f7054dccbe7e9ab18750a650037007dd81d7b94 Mon Sep 17 00:00:00 2001 From: jzw Date: Thu, 9 Apr 2020 17:13:46 +0800 Subject: [PATCH 325/367] add skip dataset op --- mindspore/ccsrc/dataset/api/de_pipeline.cc | 12 ++ mindspore/ccsrc/dataset/api/de_pipeline.h | 3 + .../ccsrc/dataset/api/python_bindings.cc | 1 + mindspore/ccsrc/dataset/core/client.h | 1 + .../dataset/engine/datasetops/CMakeLists.txt | 1 + .../dataset/engine/datasetops/skip_op.cc | 128 +++++++++++++++++ .../ccsrc/dataset/engine/datasetops/skip_op.h | 95 +++++++++++++ mindspore/dataset/engine/datasets.py | 54 +++++++- mindspore/dataset/engine/iterators.py | 2 + .../dataset/engine/serializer_deserializer.py | 3 + mindspore/dataset/engine/validators.py | 14 ++ tests/ut/cpp/dataset/CMakeLists.txt | 1 + tests/ut/cpp/dataset/skip_op_test.cc | 91 ++++++++++++ tests/ut/python/dataset/test_skip.py | 130 ++++++++++++++++++ 14 files changed, 535 insertions(+), 1 deletion(-) create mode 100644 mindspore/ccsrc/dataset/engine/datasetops/skip_op.cc create mode 100644 mindspore/ccsrc/dataset/engine/datasetops/skip_op.h create mode 100644 tests/ut/cpp/dataset/skip_op_test.cc create mode 100644 tests/ut/python/dataset/test_skip.py diff --git a/mindspore/ccsrc/dataset/api/de_pipeline.cc b/mindspore/ccsrc/dataset/api/de_pipeline.cc index b64d40125e..a62994cb51 100644 --- a/mindspore/ccsrc/dataset/api/de_pipeline.cc +++ b/mindspore/ccsrc/dataset/api/de_pipeline.cc @@ -47,6 +47,7 @@ static std::unordered_map g_parse_op_func_ = {{kStorage, &D {kMap, &DEPipeline::ParseMapOp}, {kBatch, &DEPipeline::ParseBatchOp}, {kRepeat, &DEPipeline::ParseRepeatOp}, + {kSkip, &DEPipeline::ParseSkipOp}, {kZip, &DEPipeline::ParseZipOp}, {kRename, &DEPipeline::ParseRenameOp}, {kDeviceQueue, &DEPipeline::ParseDeviceQueueOp}, @@ -511,6 +512,17 @@ Status DEPipeline::ParseRepeatOp(const py::dict &args, std::shared_ptr *ptr) { + if (args["count"].is_none()) { + std::string err_msg = "Error: count is invalid or not set."; + RETURN_STATUS_UNEXPECTED(err_msg); + } + std::shared_ptr op; + RETURN_IF_NOT_OK(SkipOp::Builder(ToInt(args["count"])).Build(&op)); + *ptr = op; + return Status::OK(); +} + Status DEPipeline::ParseGeneratorOp(const py::dict &args, std::shared_ptr *ptr) { std::shared_ptr builder = std::make_shared(); for (auto arg : args) { diff --git a/mindspore/ccsrc/dataset/api/de_pipeline.h b/mindspore/ccsrc/dataset/api/de_pipeline.h index acffc390cc..35276e5b74 100644 --- a/mindspore/ccsrc/dataset/api/de_pipeline.h +++ b/mindspore/ccsrc/dataset/api/de_pipeline.h @@ -42,6 +42,7 @@ enum OpName { kBatch, kCache, kRepeat, + kSkip, kTake, kZip, kMap, @@ -107,6 +108,8 @@ class DEPipeline { Status ParseRepeatOp(const py::dict &args, std::shared_ptr *ptr); + Status ParseSkipOp(const py::dict &args, std::shared_ptr *ptr); + Status ParseBatchOp(const py::dict &args, std::shared_ptr *ptr); Status ParseGeneratorOp(const py::dict &args, std::shared_ptr *ptr); diff --git a/mindspore/ccsrc/dataset/api/python_bindings.cc b/mindspore/ccsrc/dataset/api/python_bindings.cc index acfe6d9988..076f2ecc36 100644 --- a/mindspore/ccsrc/dataset/api/python_bindings.cc +++ b/mindspore/ccsrc/dataset/api/python_bindings.cc @@ -446,6 +446,7 @@ PYBIND11_MODULE(_c_dataengine, m) { .value("MINDRECORD", OpName::kMindrecord) .value("CACHE", OpName::kCache) .value("REPEAT", OpName::kRepeat) + .value("SKIP", OpName::kSkip) .value("TAKE", OpName::kTake) .value("ZIP", OpName::kZip) .value("MAP", OpName::kMap) diff --git a/mindspore/ccsrc/dataset/core/client.h b/mindspore/ccsrc/dataset/core/client.h index ac289a0e07..b39ba3442b 100644 --- a/mindspore/ccsrc/dataset/core/client.h +++ b/mindspore/ccsrc/dataset/core/client.h @@ -32,6 +32,7 @@ #include "dataset/engine/datasetops/project_op.h" #include "dataset/engine/datasetops/rename_op.h" #include "dataset/engine/datasetops/repeat_op.h" +#include "dataset/engine/datasetops/skip_op.h" #include "dataset/engine/datasetops/shuffle_op.h" #include "dataset/engine/datasetops/source/generator_op.h" #include "dataset/engine/datasetops/source/mindrecord_op.h" diff --git a/mindspore/ccsrc/dataset/engine/datasetops/CMakeLists.txt b/mindspore/ccsrc/dataset/engine/datasetops/CMakeLists.txt index d23d6bccb8..9e511f78f4 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/CMakeLists.txt +++ b/mindspore/ccsrc/dataset/engine/datasetops/CMakeLists.txt @@ -11,6 +11,7 @@ add_library(engine-datasetops OBJECT project_op.cc rename_op.cc repeat_op.cc + skip_op.cc shuffle_op.cc zip_op.cc ) diff --git a/mindspore/ccsrc/dataset/engine/datasetops/skip_op.cc b/mindspore/ccsrc/dataset/engine/datasetops/skip_op.cc new file mode 100644 index 0000000000..5b0433b6c8 --- /dev/null +++ b/mindspore/ccsrc/dataset/engine/datasetops/skip_op.cc @@ -0,0 +1,128 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include + +#include "dataset/engine/data_buffer.h" +#include "dataset/engine/datasetops/skip_op.h" +#include "dataset/engine/db_connector.h" +#include "dataset/engine/execution_tree.h" + +#include "utils/log_adapter.h" + +namespace mindspore { +namespace dataset { +// Builder constructor. Creates the builder object. +SkipOp::Builder::Builder(int32_t count) : build_max_skips_(count) {} + +Status SkipOp::Builder::SanityCheck() const { + if (build_max_skips_ < 0) { + std::string err_msg("Skip count must be positive integer or 0."); + RETURN_STATUS_UNEXPECTED(err_msg); + } + return Status::OK(); +} + +// The builder "build" method creates the final object. +Status SkipOp::Builder::Build(std::shared_ptr *ptr) { + RETURN_IF_NOT_OK(SanityCheck()); + *ptr = std::make_shared(build_max_skips_); + return Status::OK(); +} + +// Constructor of the SkipOp. +SkipOp::SkipOp(int32_t count) : PipelineOp(0), max_skips_(count), skip_count_(0) {} + +// Destructor +SkipOp::~SkipOp() {} + +// A print method typically used for debugging +void SkipOp::Print(std::ostream &out, bool show_all) const { + // Call base class printer first + PipelineOp::Print(out, show_all); + + // Then display our own stuff + out << "SkipOp:" + << "\nCurrent skip count: " << skip_count_ << "\nMax skip count: " << max_skips_; +} + +// Since the buffer may contain multi rows, this function will drop the rows +// that need to skip in it, and then return the buffer. +Status SkipOp::GetNextBuffer(std::unique_ptr *p_buffer, int32_t worker_id, bool retry_if_eoe) { + if (child_.empty()) { + RETURN_STATUS_UNEXPECTED("SkipOp can't be the leaf node."); + } + + std::unique_ptr buf; + // Drop first max_skips_ rows + while (skip_count_ < max_skips_) { + RETURN_IF_NOT_OK(child_[0]->GetNextBuffer(&buf, worker_id, true)); + if (buf->eoe() || buf->eof()) { + break; + } + + // Consider the rows of buffer more than 1 + TensorRow drop_row; + int row_num = buf->NumRows(); + for (int i = 0; i < row_num; i++) { + RETURN_IF_NOT_OK(buf->PopRow(&drop_row)); + if (++skip_count_ == max_skips_) { + break; + } + } + } + + // If buffer is none or the rows of buffer is 0, + // then get a buffer from child. + if (!buf || buf->NumRows() == 0) { + RETURN_IF_NOT_OK(child_[0]->GetNextBuffer(&buf, worker_id, true)); + } + + // Handling eoe and eof + if (buf->eoe() || buf->eof()) { + RETURN_IF_NOT_OK(EoeReceived(worker_id)); + if (state_ == OpState::kDeOpIdle) { + *p_buffer = std::move(buf); + return Status::OK(); + } + } + + *p_buffer = std::move(buf); + return Status::OK(); +} + +// Base-class override for handling cases when an eoe is received. +Status SkipOp::EoeReceived(int32_t worker_id) { + skip_count_ = 0; + state_ = OpState::kDeOpIdle; + return Status::OK(); +} + +// Class functor operator () override. +// Most dataset ops operate by launching a thread (see ExecutionTree). +// However, the SkipOp is defined as a inlined operator, so it is invalid to +// launch the functor since this op runs inlined inside another operator. The +// function is overloaded to ensure that it is not called by mistake (it will +// generate an error). +Status SkipOp::operator()() { RETURN_STATUS_UNEXPECTED("Logic error. SkipOp is an inlined operator."); } + +// Base-class override for handling cases when an eof is received. +Status SkipOp::EofReceived(int32_t worker_id) { + MS_LOG(INFO) << "Skip operator EOF received, do nothing now."; + return Status::OK(); +} +} // namespace dataset +} // namespace mindspore diff --git a/mindspore/ccsrc/dataset/engine/datasetops/skip_op.h b/mindspore/ccsrc/dataset/engine/datasetops/skip_op.h new file mode 100644 index 0000000000..0ae520c3ad --- /dev/null +++ b/mindspore/ccsrc/dataset/engine/datasetops/skip_op.h @@ -0,0 +1,95 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef DATASET_ENGINE_DATASETOPS_SKIP_OP_H_ +#define DATASET_ENGINE_DATASETOPS_SKIP_OP_H_ + +#include +#include +#include +#include "dataset/engine/datasetops/pipeline_op.h" + +namespace mindspore { +namespace dataset { +class SkipOp : public PipelineOp { + public: + class Builder { + public: + // Builder constructor. Creates the builder object. + // @note No default args + // @param count - The number of skip to do + // @return This is a constructor. + explicit Builder(int32_t count); + + // Default destructor + ~Builder() = default; + + // The builder "build" method creates the final object. + // @return shared_ptr to the new StorageOp object + Status Build(std::shared_ptr *); + + private: + int32_t build_max_skips_; + + Status SanityCheck() const; + }; + + // Constructor of the SkipOp. + // @note The builder class should be used to call it + // @param count - The number of skips to do + explicit SkipOp(int32_t count); + + // Destructor + ~SkipOp(); + + // A print method typically used for debugging + // @param out - The output stream to write output to + // @param show_all - A bool to control if you want to show all info or just a summary + void Print(std::ostream &out, bool show_all) const override; + + // Class functor operator () override. + // Most dataset ops operate by launching a thread (see ExecutionTree). + // However, the SkipOp is defined as a inlined operator, so it is invalid to launch the + // functor since this op runs inlined inside another operator. The function is overloaded to + // ensure that it is not called by mistake (it will generate an error). + // @return Status - The error code return + Status operator()() override; + + // This function returns the buffer that is at the top of our output connector. The caller is + // typically our parent node, when the parent is asking us to provide the next buffer of data. + // Since SkipOp is an inlined op, getting a buffer from us will simply bounce you to get + // a buffer from our child. + // @param p_buffer - output pointer to the buffer that it will fetch. + // @param worker_id - The worker id + // @param retry_if_eoe Set this flag to true to allow calling pop() again after the first pop() returns EOE. + // @return Status - The error code return + Status GetNextBuffer(std::unique_ptr *p_buffer, int32_t worker_id, bool retry_if_eoe) override; + + // Base-class override for handling cases when an eoe is received. + // @param worker_id - The worker id + Status EoeReceived(int32_t worker_id) override; + + // Base-class override for handling cases when an eof is received. + // @param worker_id - The worker id + Status EofReceived(int32_t worker_id) override; + + private: + int32_t max_skips_; // The number of skips that the user requested + int32_t skip_count_; // A counter for the current number of executed skips +}; +} // namespace dataset +} // namespace mindspore + +#endif // DATASET_ENGINE_DATASETOPS_SKIP_OP_H_ diff --git a/mindspore/dataset/engine/datasets.py b/mindspore/dataset/engine/datasets.py index 63b0fa4405..4a41e3f778 100644 --- a/mindspore/dataset/engine/datasets.py +++ b/mindspore/dataset/engine/datasets.py @@ -35,7 +35,7 @@ from mindspore._c_expression import typing from mindspore import log as logger from . import samplers from .iterators import DictIterator, TupleIterator -from .validators import check, check_batch, check_shuffle, check_map, check_repeat, check_zip, check_rename, \ +from .validators import check, check_batch, check_shuffle, check_map, check_repeat, check_skip, check_zip, check_rename, \ check_project, check_imagefolderdatasetv2, check_mnist_cifar_dataset, check_manifestdataset, \ check_tfrecorddataset, check_vocdataset, check_celebadataset, check_minddataset, check_generatordataset, \ check_zip_dataset, check_add_column @@ -423,6 +423,25 @@ class Dataset: return self return RepeatDataset(self, count) + @check_skip + def skip(self, count): + """ + Skip the first N elements of this dataset. + + Args: + count (int): Number of elements the dataset should be skipped. + + Returns: + SkipDataset, dataset skipped. + + Examples: + >>> import mindspore.dataset as ds + >>> # data is an instance of Dataset object. + >>> # creates a dataset which skips first 3 elements from data + >>> data = data.skip(3) + """ + return SkipDataset(self, count) + @check_zip_dataset def zip(self, datasets): """ @@ -1081,6 +1100,39 @@ class RepeatDataset(DatasetOp): """ return self.count +class SkipDataset(DatasetOp): + """ + The result of applying Skip operator to the input Dataset. + + Args: + datasets (tuple): A tuple of datasets to be skipped. + count (int): Number of rows the dataset should be skipped. + """ + + def __init__(self, input_dataset, count): + super().__init__() + self.count = count + self.input.append(input_dataset) + input_dataset.output.append(self) + self._input_indexs = input_dataset.input_indexs + + def get_args(self): + args = super().get_args() + args["count"] = self.count + return args + + def get_dataset_size(self): + """ + Get the number of batches in an epoch. + + Return: + Number, number of batches. + """ + child_size = self.input[0].get_dataset_size() + output_size = 0 + if self.count >= 0 and self.count < child_size: + output_size = child_size - self.count + return output_size class ZipDataset(DatasetOp): """ diff --git a/mindspore/dataset/engine/iterators.py b/mindspore/dataset/engine/iterators.py index 69dd9ce0a9..7227dbb2f0 100644 --- a/mindspore/dataset/engine/iterators.py +++ b/mindspore/dataset/engine/iterators.py @@ -127,6 +127,8 @@ class Iterator: op_type = OpName.MAP elif isinstance(dataset, de.RepeatDataset): op_type = OpName.REPEAT + elif isinstance(dataset, de.SkipDataset): + op_type = OpName.SKIP elif isinstance(dataset, de.StorageDataset): op_type = OpName.STORAGE elif isinstance(dataset, de.ImageFolderDatasetV2): diff --git a/mindspore/dataset/engine/serializer_deserializer.py b/mindspore/dataset/engine/serializer_deserializer.py index d1ed5c47cd..15f430aeb8 100644 --- a/mindspore/dataset/engine/serializer_deserializer.py +++ b/mindspore/dataset/engine/serializer_deserializer.py @@ -297,6 +297,9 @@ def create_node(node): elif dataset_op == 'RepeatDataset': pyobj = de.Dataset().repeat(node.get('count')) + elif dataset_op == 'SkipDataset': + pyobj = de.Dataset().skip(node.get('count')) + elif dataset_op == 'MapDataset': tensor_ops = construct_tensor_ops(node.get('operations')) pyobj = de.Dataset().map(node.get('input_columns'), tensor_ops, node.get('output_columns'), diff --git a/mindspore/dataset/engine/validators.py b/mindspore/dataset/engine/validators.py index 165a160e77..3502cbb204 100644 --- a/mindspore/dataset/engine/validators.py +++ b/mindspore/dataset/engine/validators.py @@ -709,6 +709,20 @@ def check_repeat(method): return new_method +def check_skip(method): + """check the input arguments of skip.""" + @wraps(method) + def new_method(*args, **kwargs): + param_dict = make_param_dict(method, args, kwargs) + + count = param_dict.get('count') + check_type(count, 'count', int) + if count < 0: + raise ValueError("Skip count must be positive integer or 0.") + + return method(*args, **kwargs) + + return new_method def check_zip(method): """check the input arguments of zip.""" diff --git a/tests/ut/cpp/dataset/CMakeLists.txt b/tests/ut/cpp/dataset/CMakeLists.txt index 086a67c7d7..db207363a8 100644 --- a/tests/ut/cpp/dataset/CMakeLists.txt +++ b/tests/ut/cpp/dataset/CMakeLists.txt @@ -41,6 +41,7 @@ SET(DE_UT_SRCS random_vertical_flip_op_test.cc rename_op_test.cc repeat_op_test.cc + skip_op_test.cc rescale_op_test.cc resize_bilinear_op_test.cc resize_op_test.cc diff --git a/tests/ut/cpp/dataset/skip_op_test.cc b/tests/ut/cpp/dataset/skip_op_test.cc new file mode 100644 index 0000000000..c2168b24d4 --- /dev/null +++ b/tests/ut/cpp/dataset/skip_op_test.cc @@ -0,0 +1,91 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "dataset/util/circular_pool.h" +#include "dataset/core/client.h" +#include "common/common.h" +#include "gtest/gtest.h" +#include "utils/log_adapter.h" + +using namespace mindspore::dataset; +using mindspore::MsLogLevel::INFO; +using mindspore::ExceptionType::NoExceptionType; +using mindspore::LogStream; + +class MindDataTestSkipOp : public UT::DatasetOpTesting {}; + +TEST_F(MindDataTestSkipOp, TestSkipOpFuntions) { + // Start with an empty execution tree + auto my_tree = std::make_shared(); + + std::string dataset_path; + dataset_path = datasets_root_path_ + "/testTFTestAllTypes/test.data"; + + std::shared_ptr my_tfreader_op; + TFReaderOp::Builder builder; + builder.SetDatasetFilesList({dataset_path}) + .SetRowsPerBuffer(16) + .SetWorkerConnectorSize(16) + .SetNumWorkers(16); + std::unique_ptr schema = std::make_unique(); + schema->LoadSchemaFile(datasets_root_path_ + "/testTFTestAllTypes/datasetSchema.json", {}); + builder.SetDataSchema(std::move(schema)); + Status rc = builder.Build(&my_tfreader_op); ASSERT_TRUE(rc.IsOk()); + rc = my_tree->AssociateNode(my_tfreader_op); + ASSERT_TRUE(rc.IsOk()); + + // SkipOp + std::shared_ptr skip_op = std::make_shared(5); + rc = my_tree->AssociateNode(skip_op); + ASSERT_TRUE(rc.IsOk()); + + // Set children/root layout. + rc = skip_op->AddChild(my_tfreader_op); + ASSERT_TRUE(rc.IsOk()); + rc = my_tree->AssignRoot(skip_op); + ASSERT_TRUE(rc.IsOk()); + + MS_LOG(INFO) << "Launching tree and begin iteration."; + rc = my_tree->Prepare(); + + ASSERT_TRUE(rc.IsOk()); + + rc = my_tree->Launch(); + ASSERT_TRUE(rc.IsOk()); + + // Start the loop of reading tensors from our pipeline + DatasetIterator di(my_tree); + TensorRow tensor_list; + rc = di.FetchNextTensorRow(&tensor_list); + ASSERT_TRUE(rc.IsOk()); + + int row_count = 0; + while (!tensor_list.empty()) { + MS_LOG(INFO) << "Row display for row #: " << row_count << "."; + + // Display the tensor by calling the printer on it + for (int i = 0; i < tensor_list.size(); i++) { + std::ostringstream ss; + ss << "(" << tensor_list[i] << "): " << *tensor_list[i] << std::endl; + MS_LOG(INFO) << "Tensor print: " << ss.str() << "."; + } + + rc = di.FetchNextTensorRow(&tensor_list); + ASSERT_TRUE(rc.IsOk()); + row_count++; + } + + ASSERT_EQ(row_count, 7); +} \ No newline at end of file diff --git a/tests/ut/python/dataset/test_skip.py b/tests/ut/python/dataset/test_skip.py new file mode 100644 index 0000000000..bea7db4e05 --- /dev/null +++ b/tests/ut/python/dataset/test_skip.py @@ -0,0 +1,130 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +import numpy as np + +import mindspore.dataset.transforms.vision.c_transforms as vision +import mindspore.dataset as ds +from mindspore import log as logger + +DATA_DIR_TF2 = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"] +SCHEMA_DIR_TF2 = "../data/dataset/test_tf_file_3_images/datasetSchema.json" + +def test_tf_skip(): + data1 = ds.TFRecordDataset(DATA_DIR_TF2, SCHEMA_DIR_TF2, shuffle=False) + + resize_height, resize_width = 32, 32 + decode_op = vision.Decode() + resize_op = vision.Resize((resize_height, resize_width), interpolation=ds.transforms.vision.Inter.LINEAR) + data1 = data1.map(input_columns=["image"], operations=decode_op) + data1 = data1.map(input_columns=["image"], operations=resize_op) + data1 = data1.skip(2) + + num_iter = 0 + for item in data1.create_dict_iterator(): + num_iter += 1 + assert num_iter == 1 + +def generator_md(): + # Create a dataset with [0, 1, 2, 3, 4] + for i in range(5): + yield (np.array([i]), ) + +def test_generator_skip(): + ds1 = ds.GeneratorDataset(generator_md, ["data"]) + + # Here ds1 should be [3, 4] + ds1 = ds1.skip(3) + + buf = [] + for data in ds1: + buf.append(data[0][0]) + assert len(buf) == 2 + +def test_skip_1(): + ds1 = ds.GeneratorDataset(generator_md, ["data"]) + + # Here ds1 should be [] + ds1 = ds1.skip(7) + + buf = [] + for data in ds1: + buf.append(data[0][0]) + assert len(buf) == 0 + +def test_skip_2(): + ds1 = ds.GeneratorDataset(generator_md, ["data"]) + + # Here ds1 should be [0, 1, 2, 3, 4] + ds1 = ds1.skip(0) + + buf = [] + for data in ds1: + buf.append(data[0][0]) + assert len(buf) == 5 + +def test_skip_repeat_1(): + ds1 = ds.GeneratorDataset(generator_md, ["data"]) + + # Here ds1 should be [0, 1, 2, 3, 4, 0, 1, 2, 3, 4] + ds1 = ds1.repeat(2) + + # Here ds1 should be [3, 4, 0, 1, 2, 3, 4] + ds1 = ds1.skip(3) + + buf = [] + for data in ds1: + buf.append(data[0][0]) + assert len(buf) == 7 + +def test_skip_repeat_2(): + ds1 = ds.GeneratorDataset(generator_md, ["data"]) + + # Here ds1 should be [3, 4] + ds1 = ds1.skip(3) + + # Here ds1 should be [3, 4, 3, 4] + ds1 = ds1.repeat(2) + + buf = [] + for data in ds1: + buf.append(data[0][0]) + assert len(buf) == 4 + +def test_skip_repeat_3(): + ds1 = ds.GeneratorDataset(generator_md, ["data"]) + + # Here ds1 should be [0, 1, 2, 3, 4, 0, 1, 2, 3, 4] + ds1 = ds1.repeat(2) + + # Here ds1 should be [3, 4] + ds1 = ds1.skip(8) + + # Here ds1 should be [3, 4, 3, 4, 3, 4] + ds1 = ds1.repeat(3) + + buf = [] + for data in ds1: + buf.append(data[0][0]) + assert len(buf) == 6 + +if __name__ == "__main__": + test_tf_skip() + test_generator_skip() + test_skip_1() + test_skip_2() + test_skip_repeat_1() + test_skip_repeat_2() + test_skip_repeat_3() \ No newline at end of file From f7872774f3ebb44e345af364bac9230adf6aac32 Mon Sep 17 00:00:00 2001 From: zhaojichen Date: Fri, 17 Apr 2020 08:33:40 -0400 Subject: [PATCH 326/367] add global batch normalization --- mindspore/nn/layer/normalization.py | 2 +- tests/ut/python/nn/test_batchnorm.py | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/mindspore/nn/layer/normalization.py b/mindspore/nn/layer/normalization.py index c85b945a0d..04de71f71c 100644 --- a/mindspore/nn/layer/normalization.py +++ b/mindspore/nn/layer/normalization.py @@ -79,7 +79,7 @@ class _BatchNorm(Cell): if self.rank_id in self.rank_list[i] and self.group != 1: self.is_global = True management.create_group('group' + str(i), self.rank_list[i]) - self.all_reduce = _GlobalBNHelper('group' + str(i)) + self.all_reduce = P.AllReduce(P.ReduceOp.SUM, 'group' + str(i)).add_prim_attr('fusion', 1) self.shape = P.Shape() self.reduce_mean = P.ReduceMean() self.square = P.Square() diff --git a/tests/ut/python/nn/test_batchnorm.py b/tests/ut/python/nn/test_batchnorm.py index 24f0de85f7..b6e27e6950 100644 --- a/tests/ut/python/nn/test_batchnorm.py +++ b/tests/ut/python/nn/test_batchnorm.py @@ -90,5 +90,4 @@ def test_global_bn(): device_num=size, parameter_broadcast=True) net = GlobalBNNet() input_data = Tensor(np.array([[2.4, 2.1], [3.2, 5.4]], dtype=np.float32)) - net.set_train() - out = net(input_data) + _executor.compile(net,input_data) From c5120e770caa1e04aeefb287497a9674d2a05127 Mon Sep 17 00:00:00 2001 From: zhaojichen Date: Fri, 17 Apr 2020 08:37:08 -0400 Subject: [PATCH 327/367] add global batch normalization --- mindspore/nn/layer/normalization.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/mindspore/nn/layer/normalization.py b/mindspore/nn/layer/normalization.py index 04de71f71c..6456a3603d 100644 --- a/mindspore/nn/layer/normalization.py +++ b/mindspore/nn/layer/normalization.py @@ -27,14 +27,6 @@ from mindspore.communication import management from mindspore._checkparam import check_int_positive from ..cell import Cell -class _GlobalBNHelper(Cell): - def __init__(self, group): - super(_GlobalBNHelper, self).__init__() - self.group = group - self.reduce = P.AllReduce(P.ReduceOp.SUM, group=self.group).add_prim_attr('fusion', 1) - def construct(self, x): - x = self.reduce(x) - return x class _BatchNorm(Cell): """Batch Normalization base class.""" From b1585f862d9730b95e06c73c21f9b4e441cb05ae Mon Sep 17 00:00:00 2001 From: liubuyu Date: Thu, 16 Apr 2020 10:03:58 +0800 Subject: [PATCH 328/367] auto mix precision --- .../device/ascend/kernel_select_ascend.cc | 89 ++++--------- .../ccsrc/kernel/tbe/tbe_kernel_select.cc | 61 ++++++++- .../ascend/ascend_backend_optimization.cc | 2 + .../ir_fusion/parameter_and_transop_fusion.cc | 120 ++++++++++++++++++ .../ir_fusion/parameter_and_transop_fusion.h | 41 ++++++ mindspore/ops/_op_impl/tbe/cast.py | 6 + 6 files changed, 253 insertions(+), 66 deletions(-) create mode 100644 mindspore/ccsrc/pre_activate/ascend/ir_fusion/parameter_and_transop_fusion.cc create mode 100644 mindspore/ccsrc/pre_activate/ascend/ir_fusion/parameter_and_transop_fusion.h diff --git a/mindspore/ccsrc/device/ascend/kernel_select_ascend.cc b/mindspore/ccsrc/device/ascend/kernel_select_ascend.cc index f02e677163..36c622cbc5 100644 --- a/mindspore/ccsrc/device/ascend/kernel_select_ascend.cc +++ b/mindspore/ccsrc/device/ascend/kernel_select_ascend.cc @@ -45,64 +45,6 @@ enum MatchCountPriority : int { const size_t kMaxCount = 0xffffffff; const int kUnSupportMixedDataTypeIndex = -1; -const std::set kOpFormatList = {kOpFormat_DEFAULT, kOpFormat_NC1KHKWHWC0, kOpFormat_ND, - kOpFormat_NCHW, kOpFormat_NHWC, kOpFormat_HWCN, - kOpFormat_NC1HWC0, kOpFormat_FRAC_Z, kOpFormat_C1HWNCoC0, - kOpFormat_FRAC_NZ, kOpFormat_NC1HWC0_C04}; - -bool IsShapeMatchFormat(const std::vector &shape, const std::string &format) { - // if format is default, it remarkes support all format - if (kOpFormatList.find(format) == kOpFormatList.end()) { - MS_LOG(EXCEPTION) << "got the unknown format " << format; - } - if (format == kOpFormat_DEFAULT) { - return true; - } - // if shape size is 0, the shape will be a scalar - if (shape.empty()) { - return true; - } - if (shape.size() > kShapeSupportFormatMap.size()) { - return false; - } - if (format == kOpFormat_FRAC_NZ && shape.size() >= 2) { - return true; - } - return !(kShapeSupportFormatMap[shape.size() - 1].find(format) == kShapeSupportFormatMap[shape.size() - 1].end()); -} - -bool IsValidKernelInfo(const std::shared_ptr &kernel_node, const kernel::KernelBuildInfo &kernel_build_info) { - MS_EXCEPTION_IF_NULL(kernel_node); - auto check_function = [](const std::vector &shape, const std::string &format) -> bool { - if (!IsShapeMatchFormat(shape, format)) { - return false; - } - for (auto shape_value : shape) { - if (shape_value == 0) { - MS_LOG(EXCEPTION) << "dimension size of the tensor shape should be a positive integer, but got " << shape_value; - } - } - return true; - }; - if (AnfAlgo::GetCNodeName(kernel_node) == prim::kPrimCast->name()) { - return AnfAlgo::GetOutputInferDataType(kernel_node, 0) == kernel_build_info.GetOutputDeviceType(0) && - AnfAlgo::GetPrevNodeOutputInferDataType(kernel_node, 0) == kernel_build_info.GetInputDeviceType(0); - } - for (size_t index = 0; index < kernel_build_info.GetOutputNum(); ++index) { - auto output_shape = AnfAlgo::GetOutputInferShape(kernel_node, index); - if (!check_function(output_shape, kernel_build_info.GetOutputFormat(index))) { - return false; - } - } - for (size_t index = 0; index < kernel_build_info.GetInputNum(); ++index) { - auto input_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, index); - if (!check_function(input_shape, kernel_build_info.GetInputFormat(index))) { - return false; - } - } - return true; -} - bool MatchInferOutputDataType(const CNodePtr &cnode, const kernel::KernelBuildInfo &kernel_build_info) { MS_EXCEPTION_IF_NULL(cnode); // Check input data type @@ -459,6 +401,29 @@ int PrecisionReduce(const std::vector &node_mix_precision_datatype_index, // raise precision int selected_index = RaiseDataTypePrecisionSelect(node_mix_precision_datatype_index, node_mix_precision_datatype, kernel_support_datatype, kernel_match_datatype_idx); + if (selected_index != -1) { + int max_match = 0; + auto iter = kernel_match_datatype_idx->begin(); + int match_count = 0; + while (iter != kernel_match_datatype_idx->end()) { + auto kernel_datatypes = kernel_support_datatype.find(iter->first); + if (kernel_datatypes == kernel_support_datatype.end()) { + MS_LOG(EXCEPTION) << "Can not find kernel index" << iter->first << "'s datatype."; + } + if (kernel_datatypes->second.size() < node_mix_precision_datatype.size()) { + MS_LOG(EXCEPTION) << "Kernel datatype size is not equal to node datatype size!"; + } + for (size_t i = 0; i < node_mix_precision_datatype.size(); ++i) { + if (node_mix_precision_datatype[i] == kernel_datatypes->second[i]) { + ++match_count; + } + } + if (match_count > max_match) { + selected_index = SizeToInt(iter->first); + } + ++iter; + } + } if (selected_index == -1 && context_ptr->enable_reduce_precision()) { selected_index = RaiseOrReduceDataTypePrecisionSelect(node_mix_precision_datatype_index, node_mix_precision_datatype, @@ -507,9 +472,6 @@ void SelectKernelInfo(const CNodePtr &kernel_node) { kernel::KernelQuery(kernel_node, &kernel_info_list); std::vector most_match_counts = {-1, -1, -1, -1}; int selected_index = -1; - auto context_ptr = MsContext::GetInstance(); - MS_EXCEPTION_IF_NULL(context_ptr); - bool auto_mixed_precision = context_ptr->auto_mixed_precision_flag(); std::unordered_map> kernel_match_datatype_idx; std::unordered_map> kernel_support_datatype; std::vector node_mix_precision_datatype_index; @@ -517,16 +479,13 @@ void SelectKernelInfo(const CNodePtr &kernel_node) { for (size_t info_index = 0; info_index < kernel_info_list.size(); ++info_index) { std::vector cur_kernel_info_match_counts = {0, 0, 0, 0}; auto kernel_build_info = *(kernel_info_list[info_index]); - if (!IsValidKernelInfo(kernel_node, kernel_build_info)) { - continue; - } std::vector support_indexes; std::vector support_datatypes; AddNodeAndKernelDataType(kernel_node, kernel_build_info, &support_indexes, &node_mix_precision_datatype, &support_datatypes, &node_mix_precision_datatype_index); kernel_match_datatype_idx[info_index] = support_indexes; kernel_support_datatype[info_index] = support_datatypes; - if (!auto_mixed_precision && !MatchInferOutputDataType(kernel_node, kernel_build_info)) { + if (!MatchInferOutputDataType(kernel_node, kernel_build_info)) { continue; } std::shared_ptr kernel_info_ptr = kernel_info_list[info_index]; diff --git a/mindspore/ccsrc/kernel/tbe/tbe_kernel_select.cc b/mindspore/ccsrc/kernel/tbe/tbe_kernel_select.cc index e818f503c0..127451851e 100644 --- a/mindspore/ccsrc/kernel/tbe/tbe_kernel_select.cc +++ b/mindspore/ccsrc/kernel/tbe/tbe_kernel_select.cc @@ -19,6 +19,7 @@ #include #include #include +#include #include "session/anf_runtime_algorithm.h" #include "kernel/oplib/oplib.h" @@ -510,6 +511,64 @@ bool ParseMetadata(const CNodePtr &kernel_node, const std::shared_ptr &shape, const std::string &format) { + const std::set kOpFormatList = {kOpFormat_DEFAULT, kOpFormat_NC1KHKWHWC0, kOpFormat_ND, + kOpFormat_NCHW, kOpFormat_NHWC, kOpFormat_HWCN, + kOpFormat_NC1HWC0, kOpFormat_FRAC_Z, kOpFormat_C1HWNCoC0, + kOpFormat_FRAC_NZ, kOpFormat_NC1HWC0_C04}; + + // if format is default, it remarkes support all format + if (kOpFormatList.find(format) == kOpFormatList.end()) { + MS_LOG(EXCEPTION) << "Got the unknown format " << format; + } + if (format == kOpFormat_DEFAULT) { + return true; + } + // if shape size is 0, the shape will be a scalar + if (shape.empty()) { + return true; + } + if (shape.size() > kShapeSupportFormatMap.size()) { + return false; + } + if (format == kOpFormat_FRAC_NZ && shape.size() >= 2) { + return true; + } + return !(kShapeSupportFormatMap[shape.size() - 1].find(format) == kShapeSupportFormatMap[shape.size() - 1].end()); +} + +bool IsValidKernelInfo(const std::shared_ptr &kernel_node, const kernel::KernelBuildInfo &kernel_build_info) { + MS_EXCEPTION_IF_NULL(kernel_node); + auto check_function = [](const std::vector &shape, const std::string &format) -> bool { + if (!IsShapeMatchFormat(shape, format)) { + return false; + } + for (auto shape_value : shape) { + if (shape_value == 0) { + MS_LOG(EXCEPTION) << "Dimension size of the tensor shape should be a positive integer, but got " << shape_value; + } + } + return true; + }; + for (size_t index = 0; index < kernel_build_info.GetOutputNum(); ++index) { + auto output_shape = AnfAlgo::GetOutputInferShape(kernel_node, index); + if (!check_function(output_shape, kernel_build_info.GetOutputFormat(index))) { + return false; + } + } + for (size_t index = 0; index < kernel_build_info.GetInputNum(); ++index) { + auto input_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, index); + if (!check_function(input_shape, kernel_build_info.GetInputFormat(index))) { + return false; + } + } + if (AnfAlgo::GetCNodeName(kernel_node) == prim::kPrimCast->name()) { + return AnfAlgo::GetOutputInferDataType(kernel_node, 0) == kernel_build_info.GetOutputDeviceType(0) && + AnfAlgo::GetPrevNodeOutputInferDataType(kernel_node, 0) == kernel_build_info.GetInputDeviceType(0); + } + return true; +} + void TbeMetadataInfo(const CNodePtr &kernel_node, std::vector> *kernel_info_list) { MS_EXCEPTION_IF_NULL(kernel_node); MS_EXCEPTION_IF_NULL(kernel_info_list); @@ -534,7 +593,7 @@ void TbeMetadataInfo(const CNodePtr &kernel_node, std::vectorexecution_mode() == kPynativeMode) { kernel_info_list->push_back(parse_info); } else { - if (CheckSupported(kernel_node, parse_info)) { + if (IsValidKernelInfo(kernel_node, *(parse_info)) && CheckSupported(kernel_node, parse_info)) { kernel_info_list->push_back(parse_info); } else { MS_LOG(INFO) << "CheckSupported Failed for TBE op" << op_name << " kernel info."; diff --git a/mindspore/ccsrc/pre_activate/ascend/ascend_backend_optimization.cc b/mindspore/ccsrc/pre_activate/ascend/ascend_backend_optimization.cc index a72fb9dc9a..7a35627e25 100644 --- a/mindspore/ccsrc/pre_activate/ascend/ascend_backend_optimization.cc +++ b/mindspore/ccsrc/pre_activate/ascend/ascend_backend_optimization.cc @@ -37,6 +37,7 @@ #include "pre_activate/ascend/ir_fusion/transpose_reshape_fusion.h" #include "pre_activate/ascend/ir_fusion/adam_apply_one_fusion.h" #include "pre_activate/ascend/ir_fusion/adam_apply_one_with_decay_rule.h" +#include "pre_activate/ascend/ir_fusion/parameter_and_transop_fusion.h" #include "pre_activate/ascend/ir_fusion/transpose_transdata_fusion.h" #include "pre_activate/ascend/ir_fusion/transdata_split.h" #include "pre_activate/ascend/ir_fission/topk_split.h" @@ -243,6 +244,7 @@ void AscendBackendOptimization(const std::shared_ptr &kern auto optimizer = std::make_shared(); auto other_pm = std::make_shared("other_pm"); other_pm->AddPass(std::make_shared()); + other_pm->AddPass(std::make_shared()); other_pm->AddPass(std::make_shared()); other_pm->AddPass(std::make_shared()); other_pm->AddPass(std::make_shared()); diff --git a/mindspore/ccsrc/pre_activate/ascend/ir_fusion/parameter_and_transop_fusion.cc b/mindspore/ccsrc/pre_activate/ascend/ir_fusion/parameter_and_transop_fusion.cc new file mode 100644 index 0000000000..faa1308f8b --- /dev/null +++ b/mindspore/ccsrc/pre_activate/ascend/ir_fusion/parameter_and_transop_fusion.cc @@ -0,0 +1,120 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "pre_activate/ascend/ir_fusion/parameter_and_transop_fusion.h" +#include +#include "session/anf_runtime_algorithm.h" +#include "utils/utils.h" +#include "operator/ops.h" +#include "device/kernel_info.h" +#include "pre_activate/common/helper.h" +#include "pre_activate/common/optimizer.h" +#include "pre_activate/ascend/ascend_helper.h" + +namespace mindspore { +namespace opt { +const AnfNodePtr ParamTransRoad(const FuncGraphPtr &func_graph, const AnfNodePtr &node, bool first_flag, + std::vector *trans_road) { + if (node == nullptr) { + MS_LOG(ERROR) << "nullptr"; + return nullptr; + } + if (node->isa()) { + auto cnode = node->cast(); + auto op_name = AnfAlgo::GetCNodeName(cnode); + auto manager = func_graph->manager(); + if (manager == nullptr) { + return nullptr; + } + if (op_name == prim::kPrimCast->name() || op_name == prim::kPrimTranspose->name() || + op_name == prim::kPrimReshape->name() || op_name == kTransDataOpName) { + auto users = manager->node_users()[node]; + if (users.size() > 1 && !first_flag) { + return nullptr; + } + trans_road->push_back(cnode); + first_flag = false; + auto next_node = AnfAlgo::GetInputNode(cnode, 0); + if (next_node->isa() || next_node->isa()) { + return next_node; + } + return ParamTransRoad(func_graph, next_node, first_flag, trans_road); + } + } else if (node->isa() || node->isa()) { + return node; + } + return nullptr; +} + +bool ParameterTransOpFusion::Run(const FuncGraphPtr &func_graph) { + if (func_graph == nullptr) { + MS_LOG(ERROR) << "Func graph is nullptr"; + return false; + } + auto manager = func_graph->manager(); + if (manager == nullptr) { + return false; + } + std::vector node_list = TopoSort(func_graph->get_return()); + bool changed = false; + for (auto node : node_list) { + if (node == nullptr || !node->isa()) { + continue; + } + auto cnode = node->cast(); + auto node_name = AnfAlgo::GetCNodeName(cnode); + if (node_name == prim::kPrimCast->name() || node_name == prim::kPrimTranspose->name() || + node_name == prim::kPrimReshape->name() || node_name == kTransDataOpName) { + MS_LOG(DEBUG) << "Skip trans op"; + continue; + } + for (size_t input_index = 0; input_index < AnfAlgo::GetInputTensorNum(cnode); input_index++) { + std::vector trans_road; + bool first_flag = true; + auto final_node = ParamTransRoad(func_graph, AnfAlgo::GetInputNode(cnode, input_index), first_flag, &trans_road); + if (final_node != nullptr && trans_road.size() == 3 && AnfAlgo::GetCNodeName(trans_road[0]) == kTransDataOpName && + AnfAlgo::GetCNodeName(trans_road[1]) == prim::kPrimCast->name() && + AnfAlgo::GetCNodeName(trans_road[2]) == kTransDataOpName) { + auto cur_transop = trans_road[0]; + auto format = AnfAlgo::GetOutputFormat(cur_transop, 0); + auto dtype = AnfAlgo::GetOutputDeviceDataType(cur_transop, 0); + auto param_format = AnfAlgo::GetOutputFormat(final_node, 0); + auto param_dtype = AnfAlgo::GetOutputDeviceDataType(final_node, 0); + + auto cast = trans_road[1]; + auto cast_format = AnfAlgo::GetOutputFormat(cast, 0); + auto cast_build_info = cast->kernel_info()->select_kernel_build_info(); + kernel::KernelBuildInfo::KernelBuildInfoBuilder builder; + builder.SetOutputsFormat({format}); + builder.SetInputsFormat({format}); + builder.SetInputsDeviceType({param_dtype}); + builder.SetOutputsDeviceType({dtype}); + builder.SetKernelType(cast_build_info->kernel_type()); + builder.SetFusionType(cast_build_info->fusion_type()); + builder.SetProcessor(cast_build_info->processor()); + AnfAlgo::SetSelectKernelBuildInfo(builder.Build(), cast.get()); + if (param_format == format && param_dtype != dtype) { + manager->Replace(trans_road[2], final_node); + manager->Replace(cur_transop, cast); + } + changed = true; + } + } + } + return changed; +} +} // namespace opt +} // namespace mindspore diff --git a/mindspore/ccsrc/pre_activate/ascend/ir_fusion/parameter_and_transop_fusion.h b/mindspore/ccsrc/pre_activate/ascend/ir_fusion/parameter_and_transop_fusion.h new file mode 100644 index 0000000000..823ec083b1 --- /dev/null +++ b/mindspore/ccsrc/pre_activate/ascend/ir_fusion/parameter_and_transop_fusion.h @@ -0,0 +1,41 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_PRE_ACTIVATE_ASCEND_IR_FUSION_PARAMETER_AND_TRANSOP_FUSION_H_ +#define MINDSPORE_CCSRC_PRE_ACTIVATE_ASCEND_IR_FUSION_PARAMETER_AND_TRANSOP_FUSION_H_ + +#include +#include +#include +#include +#include "ir/anf.h" +#include "pre_activate/common/pass.h" + +namespace mindspore { +namespace opt { +class ParameterTransOpFusion : public Pass { + public: + explicit ParameterTransOpFusion(size_t groups = 1) : Pass("Parameter_and_transop_fusion"), groups_(groups) {} + ~ParameterTransOpFusion() override = default; + bool Run(const FuncGraphPtr &graph) override; + + private: + size_t groups_ = 1; +}; +} // namespace opt +} // namespace mindspore + +#endif diff --git a/mindspore/ops/_op_impl/tbe/cast.py b/mindspore/ops/_op_impl/tbe/cast.py index a18dcddfbf..07e14139da 100644 --- a/mindspore/ops/_op_impl/tbe/cast.py +++ b/mindspore/ops/_op_impl/tbe/cast.py @@ -44,6 +44,12 @@ cast_op_info = TBERegOp("Cast") \ .dtype_format(DataType.F16_Default, DataType.U8_Default) \ .dtype_format(DataType.F16_Default, DataType.F32_Default) \ .dtype_format(DataType.F16_Default, DataType.I32_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F32_5HD) \ + .dtype_format(DataType.F16_FracZ, DataType.F32_FracZ) \ + .dtype_format(DataType.F16_FracNZ, DataType.F32_FracNZ) \ + .dtype_format(DataType.F32_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F32_FracZ, DataType.F16_FracZ) \ + .dtype_format(DataType.F32_FracNZ, DataType.F16_FracNZ) \ .dtype_format(DataType.F32_Default, DataType.F16_Default) \ .dtype_format(DataType.F32_Default, DataType.I32_Default) \ .get_op_info() From 3c02c82771ad4075d90875a00619688f0901ca30 Mon Sep 17 00:00:00 2001 From: hesham Date: Fri, 17 Apr 2020 10:12:50 -0400 Subject: [PATCH 329/367] Bug in weak reference. Add new test cases --- mindspore/dataset/engine/iterators.py | 6 ++-- tests/ut/python/dataset/test_iterator.py | 40 ++++++++++++++++++++++++ 2 files changed, 43 insertions(+), 3 deletions(-) diff --git a/mindspore/dataset/engine/iterators.py b/mindspore/dataset/engine/iterators.py index 69dd9ce0a9..d498d57c85 100644 --- a/mindspore/dataset/engine/iterators.py +++ b/mindspore/dataset/engine/iterators.py @@ -28,10 +28,10 @@ ITERATORS_LIST = list() def _cleanup(): - for itr in ITERATORS_LIST: - iter_ref = itr() + for itr_ref in ITERATORS_LIST: + itr = itr_ref() if itr is not None: - iter_ref.release() + itr.release() def alter_tree(node): diff --git a/tests/ut/python/dataset/test_iterator.py b/tests/ut/python/dataset/test_iterator.py index d2518e1119..102fd0eea1 100644 --- a/tests/ut/python/dataset/test_iterator.py +++ b/tests/ut/python/dataset/test_iterator.py @@ -13,8 +13,10 @@ # limitations under the License. # ============================================================================== import numpy as np +import pytest import mindspore.dataset as ds +from mindspore.dataset.engine.iterators import ITERATORS_LIST, _cleanup DATA_DIR = ["../data/dataset/testTFTestAllTypes/test.data"] SCHEMA_DIR = "../data/dataset/testTFTestAllTypes/datasetSchema.json" @@ -41,3 +43,41 @@ def test_case_iterator(): check(COLUMNS[0:7]) check(COLUMNS[7:8]) check(COLUMNS[0:2:8]) + + +def test_iterator_weak_ref(): + ITERATORS_LIST.clear() + data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR) + itr1 = data.create_tuple_iterator() + itr2 = data.create_tuple_iterator() + itr3 = data.create_tuple_iterator() + + assert len(ITERATORS_LIST) == 3 + assert sum(itr() is not None for itr in ITERATORS_LIST) == 3 + + del itr1 + assert len(ITERATORS_LIST) == 3 + assert sum(itr() is not None for itr in ITERATORS_LIST) == 2 + + del itr2 + assert len(ITERATORS_LIST) == 3 + assert sum(itr() is not None for itr in ITERATORS_LIST) == 1 + + del itr3 + assert len(ITERATORS_LIST) == 3 + assert sum(itr() is not None for itr in ITERATORS_LIST) == 0 + + itr1 = data.create_tuple_iterator() + itr2 = data.create_tuple_iterator() + itr3 = data.create_tuple_iterator() + + _cleanup() + with pytest.raises(AttributeError) as info: + itr2.get_next() + assert "object has no attribute 'depipeline'" in str(info.value) + + del itr1 + assert len(ITERATORS_LIST) == 6 + assert sum(itr() is not None for itr in ITERATORS_LIST) == 2 + + _cleanup() From ea297c0889596791f6222732f9806bc4a1d3cc34 Mon Sep 17 00:00:00 2001 From: anthonyaje Date: Fri, 17 Apr 2020 13:24:02 -0400 Subject: [PATCH 330/367] Fix dataset serdes for MindDataset --- .../dataset/engine/serializer_deserializer.py | 49 ++++++++++--------- .../ut/python/dataset/test_serdes_dataset.py | 35 ++++++++++++- 2 files changed, 60 insertions(+), 24 deletions(-) diff --git a/mindspore/dataset/engine/serializer_deserializer.py b/mindspore/dataset/engine/serializer_deserializer.py index d1ed5c47cd..4de5f8101e 100644 --- a/mindspore/dataset/engine/serializer_deserializer.py +++ b/mindspore/dataset/engine/serializer_deserializer.py @@ -127,9 +127,12 @@ def serialize_operations(node_repr, key, val): def serialize_sampler(node_repr, val): """Serialize sampler object to dictionary.""" - node_repr['sampler'] = val.__dict__ - node_repr['sampler']['sampler_module'] = type(val).__module__ - node_repr['sampler']['sampler_name'] = type(val).__name__ + if val is None: + node_repr['sampler'] = None + else: + node_repr['sampler'] = val.__dict__ + node_repr['sampler']['sampler_module'] = type(val).__module__ + node_repr['sampler']['sampler_name'] = type(val).__name__ def traverse(node): @@ -253,9 +256,10 @@ def create_node(node): node.get('shuffle'), sampler, node.get('num_shards'), node.get('shard_id')) elif dataset_op == 'MindDataset': - pyobj = pyclass(node['dataset_file'], node.get('column_list'), + sampler = construct_sampler(node.get('sampler')) + pyobj = pyclass(node['dataset_file'], node.get('columns_list'), node.get('num_parallel_workers'), node.get('seed'), node.get('num_shards'), - node.get('shard_id'), node.get('block_reader')) + node.get('shard_id'), node.get('block_reader'), sampler) elif dataset_op == 'TFRecordDataset': pyobj = pyclass(node['dataset_files'], node.get('schema'), node.get('column_list'), @@ -341,24 +345,25 @@ def create_node(node): def construct_sampler(in_sampler): """Instantiate Sampler object based on the information from dictionary['sampler']""" - sampler_name = in_sampler['sampler_name'] - sampler_module = in_sampler['sampler_module'] - sampler_class = getattr(sys.modules[sampler_module], sampler_name) sampler = None - if sampler_name == 'DistributedSampler': - sampler = sampler_class(in_sampler['num_shards'], in_sampler['shard_id'], in_sampler.get('shuffle')) - elif sampler_name == 'PKSampler': - sampler = sampler_class(in_sampler['num_val'], in_sampler.get('num_class'), in_sampler('shuffle')) - elif sampler_name == 'RandomSampler': - sampler = sampler_class(in_sampler.get('replacement'), in_sampler.get('num_samples')) - elif sampler_name == 'SequentialSampler': - sampler = sampler_class() - elif sampler_name == 'SubsetRandomSampler': - sampler = sampler_class(in_sampler['indices']) - elif sampler_name == 'WeightedRandomSampler': - sampler = sampler_class(in_sampler['weights'], in_sampler['num_samples'], in_sampler.get('replacement')) - else: - raise ValueError("Sampler type is unknown: " + sampler_name) + if in_sampler is not None: + sampler_name = in_sampler['sampler_name'] + sampler_module = in_sampler['sampler_module'] + sampler_class = getattr(sys.modules[sampler_module], sampler_name) + if sampler_name == 'DistributedSampler': + sampler = sampler_class(in_sampler['num_shards'], in_sampler['shard_id'], in_sampler.get('shuffle')) + elif sampler_name == 'PKSampler': + sampler = sampler_class(in_sampler['num_val'], in_sampler.get('num_class'), in_sampler('shuffle')) + elif sampler_name == 'RandomSampler': + sampler = sampler_class(in_sampler.get('replacement'), in_sampler.get('num_samples')) + elif sampler_name == 'SequentialSampler': + sampler = sampler_class() + elif sampler_name == 'SubsetRandomSampler': + sampler = sampler_class(in_sampler['indices']) + elif sampler_name == 'WeightedRandomSampler': + sampler = sampler_class(in_sampler['weights'], in_sampler['num_samples'], in_sampler.get('replacement')) + else: + raise ValueError("Sampler type is unknown: " + sampler_name) return sampler diff --git a/tests/ut/python/dataset/test_serdes_dataset.py b/tests/ut/python/dataset/test_serdes_dataset.py index 2ef93dbcd6..7fdb0f1dde 100644 --- a/tests/ut/python/dataset/test_serdes_dataset.py +++ b/tests/ut/python/dataset/test_serdes_dataset.py @@ -19,7 +19,7 @@ import filecmp import glob import json import os - +import pytest import numpy as np import mindspore.dataset as ds @@ -28,7 +28,6 @@ import mindspore.dataset.transforms.vision.c_transforms as vision from mindspore.dataset.transforms.vision import Inter from mindspore import log as logger - def test_imagefolder(remove_json_files=True): """ Test simulating resnet50 dataset pipeline. @@ -217,6 +216,38 @@ def delete_json_files(): except IOError: logger.info("Error while deleting: {}".format(f)) +# Test save load minddataset +from test_minddataset_sampler import add_and_remove_cv_file, get_data, CV_DIR_NAME, CV_FILE_NAME, FILES_NUM, \ + FileWriter, Inter + +def test_minddataset(add_and_remove_cv_file): + """tutorial for cv minderdataset.""" + columns_list = ["data", "file_name", "label"] + num_readers = 4 + indices = [1, 2, 3, 5, 7] + sampler = ds.SubsetRandomSampler(indices) + data_set = ds.MindDataset(CV_FILE_NAME + "0", columns_list, num_readers, + sampler=sampler) + + # Serializing into python dictionary + ds1_dict = ds.serialize(data_set) + # Serializing into json object + ds1_json = json.dumps(ds1_dict, sort_keys=True) + + # Reconstruct dataset pipeline from its serialized form + data_set = ds.deserialize(input_dict=ds1_dict) + ds2_dict = ds.serialize(data_set) + # Serializing into json object + ds2_json = json.dumps(ds2_dict, sort_keys=True) + + assert ds1_json == ds2_json + + data = get_data(CV_DIR_NAME) + assert data_set.get_dataset_size() == 10 + num_iter = 0 + for item in data_set.create_dict_iterator(): + num_iter += 1 + assert num_iter == 5 if __name__ == '__main__': From 36e87c26306eef4361f8a6585ababfb915f506ce Mon Sep 17 00:00:00 2001 From: lianliguang Date: Sat, 18 Apr 2020 09:36:25 +0800 Subject: [PATCH 331/367] fix bug of deal ref trans --- .../ascend/format_type/deal_ref_trans_and_cast.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mindspore/ccsrc/pre_activate/ascend/format_type/deal_ref_trans_and_cast.cc b/mindspore/ccsrc/pre_activate/ascend/format_type/deal_ref_trans_and_cast.cc index 2d44bf8f8f..83a44029a7 100644 --- a/mindspore/ccsrc/pre_activate/ascend/format_type/deal_ref_trans_and_cast.cc +++ b/mindspore/ccsrc/pre_activate/ascend/format_type/deal_ref_trans_and_cast.cc @@ -101,9 +101,9 @@ AnfNodePtr AddAdditionalToRefOutput(const FuncGraphPtr &func_graph, const CNodeP auto origin_type = AnfAlgo::GetOutputDeviceDataType(origin_pair.first, origin_pair.second); auto cur_format = AnfAlgo::GetOutputFormat(cnode, output_index); auto cur_type = AnfAlgo::GetOutputDeviceDataType(cnode, output_index); - auto cur_shape = AnfAlgo::GetOutputInferShape(cnode, 0); + auto cur_shape = AnfAlgo::GetOutputInferShape(cnode, output_index); // insert trans - if (origin_format != cur_format) { + if (origin_format != cur_format && cur_shape.size() > 1) { auto kernel_select = std::make_shared(); final_node = AddTransOpNodeToGraph(func_graph, final_node, kernel_select, 0, cur_format, origin_format, kTransDataOpName, false); From 780aa6f87b594e80cef0e345f5e9e69aed656782 Mon Sep 17 00:00:00 2001 From: chenjianping Date: Sat, 18 Apr 2020 01:41:25 +0000 Subject: [PATCH 332/367] Add license head in build.bat --- build.bat | 14 ++++++++++++++ third_party/securec/CMakeLists.txt | 7 +------ 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/build.bat b/build.bat index 5d423720fd..76d7f19262 100644 --- a/build.bat +++ b/build.bat @@ -1,3 +1,17 @@ +@rem Copyright 2020 Huawei Technologies Co., Ltd +@rem +@rem Licensed under the Apache License, Version 2.0 (the "License"); +@rem you may not use this file except in compliance with the License. +@rem You may obtain a copy of the License at +@rem +@rem http://www.apache.org/licenses/LICENSE-2.0 +@rem +@rem Unless required by applicable law or agreed to in writing, software +@rem distributed under the License is distributed on an "AS IS" BASIS, +@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +@rem See the License for the specific language governing permissions and +@rem limitations under the License. +@rem ============================================================================ @echo off @title mindspore_build diff --git a/third_party/securec/CMakeLists.txt b/third_party/securec/CMakeLists.txt index 99345c6eb3..f012b05860 100644 --- a/third_party/securec/CMakeLists.txt +++ b/third_party/securec/CMakeLists.txt @@ -8,12 +8,7 @@ SET(CMAKE_C_FLAGS_RELEASE "$ENV{CFLAGS} -fPIC -O3 -Wall -Wno-deprecated-declarat set(CMAKE_EXPORT_COMPILE_COMMANDS ON) #add flags -if (CMAKE_SYSTEM_NAME MATCHES "Windows") - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -I/usr/local/include") -else() - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -I/usr/local/include -Werror") -endif() - +set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -I/usr/local/include -Werror") include_directories(./include) add_subdirectory(src) From 17e27824c54c163cb11bd1d4b8d3b257b149b123 Mon Sep 17 00:00:00 2001 From: zhaojichen Date: Fri, 17 Apr 2020 21:47:39 -0400 Subject: [PATCH 333/367] add global batch normalization --- tests/ut/python/nn/test_batchnorm.py | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/tests/ut/python/nn/test_batchnorm.py b/tests/ut/python/nn/test_batchnorm.py index b6e27e6950..10b4cb00a1 100644 --- a/tests/ut/python/nn/test_batchnorm.py +++ b/tests/ut/python/nn/test_batchnorm.py @@ -73,21 +73,3 @@ def test_compile_groupnorm(): net = nn.GroupNorm(16, 64) input_data = Tensor(np.random.rand(1,64,256,256).astype(np.float32)) _executor.compile(net, input_data) - -class GlobalBNNet(nn.Cell): - def __init__(self): - super(GlobalBNNet, self).__init__() - self.bn = nn.GlobalBatchNorm(num_features = 2, group = 2) - def construct(self, x): - return self.bn(x) - -def test_global_bn(): - init("hccl") - size = 4 - context.set_context(mode=context.GRAPH_MODE) - context.reset_auto_parallel_context() - context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, - device_num=size, parameter_broadcast=True) - net = GlobalBNNet() - input_data = Tensor(np.array([[2.4, 2.1], [3.2, 5.4]], dtype=np.float32)) - _executor.compile(net,input_data) From 852e61d46cf3995c74af4d1b0b7878daa5a56299 Mon Sep 17 00:00:00 2001 From: liubuyu Date: Sat, 18 Apr 2020 09:49:12 +0800 Subject: [PATCH 334/367] bug fix --- mindspore/ccsrc/device/ascend/ascend_device_address.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mindspore/ccsrc/device/ascend/ascend_device_address.cc b/mindspore/ccsrc/device/ascend/ascend_device_address.cc index f0a30e4c42..79241df612 100644 --- a/mindspore/ccsrc/device/ascend/ascend_device_address.cc +++ b/mindspore/ccsrc/device/ascend/ascend_device_address.cc @@ -117,7 +117,7 @@ bool AscendDeviceAddress::SyncDeviceToHost(const std::vector &shape, size_t } else { auto iter = kNeedTransFormatSet.find(format_); if (iter != kNeedTransFormatSet.end()) { - sync_ok = ConvertFormatAndSyncHostToDevice(shape, size, type, host_ptr); + sync_ok = SyncDeviceToHostAndConvertFormat(shape, size, type, host_ptr); } } if (!sync_ok) { From 97e250d4f1898000cbc44a2620cd36f6e52abd2f Mon Sep 17 00:00:00 2001 From: zhaojichen Date: Fri, 17 Apr 2020 21:51:45 -0400 Subject: [PATCH 335/367] add global batch normalization --- tests/ut/python/hccl_test/manage/api.py | 13 ------------- tests/ut/python/nn/test_batchnorm.py | 3 --- 2 files changed, 16 deletions(-) diff --git a/tests/ut/python/hccl_test/manage/api.py b/tests/ut/python/hccl_test/manage/api.py index 04ce7da6d5..8dac167a3f 100644 --- a/tests/ut/python/hccl_test/manage/api.py +++ b/tests/ut/python/hccl_test/manage/api.py @@ -21,7 +21,6 @@ class Hccl(): _instance = None _rank_id = 0 _rank_size = 1 - _group_size = 4 def __init__(self): pass @@ -48,10 +47,6 @@ class Hccl(): def rank_size(self): return self._rank_size - @property - def group_size(self): - return self._group_size - @rank_size.setter def rank_size(self, size): self._rank_size = size @@ -70,14 +65,6 @@ def get_rank_size(group=None): return int(group.split("-")[0]) raise ValueError -def get_group_size(group=None): - hccl = Hccl() - if group is None: - return hccl.group_size - if isinstance(group, str): - return int(group.split("-")[0]) - raise ValueError - # pylint: disable=unused-argument def get_world_rank_from_group_rank(group, group_rank_id): return group_rank_id diff --git a/tests/ut/python/nn/test_batchnorm.py b/tests/ut/python/nn/test_batchnorm.py index 10b4cb00a1..e73b7ebbf0 100644 --- a/tests/ut/python/nn/test_batchnorm.py +++ b/tests/ut/python/nn/test_batchnorm.py @@ -19,9 +19,6 @@ import pytest import mindspore.nn as nn from mindspore.common.api import _executor from mindspore import Tensor, Parameter -from mindspore.communication.management import init -from mindspore import context -from mindspore import ParallelMode def test_bn_pars_valid1(): From 0ed6d9178e0a411d4f809451665d187482953337 Mon Sep 17 00:00:00 2001 From: ougongchang Date: Mon, 13 Apr 2020 11:24:43 +0800 Subject: [PATCH 336/367] add Histogram summary operator clean clang format errors and cpplint errors add some test cases for histogram summary operator --- mindspore/ccsrc/ir/anf.cc | 11 ++- mindspore/ccsrc/operator/ops.cc | 1 + mindspore/ccsrc/operator/ops.h | 1 + mindspore/ccsrc/operator/prim_debug.cc | 2 +- .../ccsrc/optimizer/irpass/branch_culling.cc | 27 ++----- mindspore/ccsrc/parallel/node_check.cc | 1 + mindspore/ccsrc/parallel/ops_info/ops_utils.h | 1 + .../ccsrc/pipeline/static_analysis/prim.cc | 1 + .../ccsrc/session/anf_runtime_algorithm.cc | 3 +- mindspore/ccsrc/session/session_basic.cc | 4 +- mindspore/ccsrc/transform/convert.cc | 1 + mindspore/ccsrc/utils/callbacks_ge.cc | 2 +- mindspore/ops/_grad/grad_debug_ops.py | 9 +++ mindspore/ops/operations/__init__.py | 3 +- mindspore/ops/operations/debug_ops.py | 27 +++++++ tests/st/summary/test_gpu_summary.py | 57 ++----------- tests/ut/cpp/transform/convert_test.cc | 6 ++ tests/ut/cpp/transform/transform_base_test.cc | 3 +- tests/ut/python/ops/test_nn_ops.py | 17 ++++ tests/ut/python/ops/test_ops.py | 19 +++++ tests/ut/python/train/summary/test_summary.py | 2 + .../test_summary_ops_params_valid_check.py | 81 ++++++++++++++----- 22 files changed, 181 insertions(+), 98 deletions(-) diff --git a/mindspore/ccsrc/ir/anf.cc b/mindspore/ccsrc/ir/anf.cc index 924453a7a6..658fb578b7 100644 --- a/mindspore/ccsrc/ir/anf.cc +++ b/mindspore/ccsrc/ir/anf.cc @@ -103,7 +103,8 @@ std::string CNode::fullname_with_scope() { return fullname_with_scope_; } - if (IsApply(prim::kPrimScalarSummary) || IsApply(prim::kPrimTensorSummary) || IsApply(prim::kPrimImageSummary)) { + if (IsApply(prim::kPrimScalarSummary) || IsApply(prim::kPrimTensorSummary) || IsApply(prim::kPrimImageSummary) || + IsApply(prim::kPrimHistogramSummary)) { std::string tag = GetValue(GetValueNode(input(1))); if (tag == "") { MS_LOG(EXCEPTION) << "The tag name is null, should be valid string"; @@ -111,10 +112,12 @@ std::string CNode::fullname_with_scope() { std::string name; if (IsApply(prim::kPrimScalarSummary)) { name = tag + "[:Scalar]"; - } else if (IsApply(prim::kPrimTensorSummary)) { - name = tag + "[:Tensor]"; - } else { + } else if (IsApply(prim::kPrimImageSummary)) { name = tag + "[:Image]"; + } else if (IsApply(prim::kPrimHistogramSummary)) { + name = tag + "[:Histogram]"; + } else { + name = tag + "[:Tensor]"; } fullname_with_scope_ = name; } else { diff --git a/mindspore/ccsrc/operator/ops.cc b/mindspore/ccsrc/operator/ops.cc index e190d7d0b2..b1a8a9b782 100755 --- a/mindspore/ccsrc/operator/ops.cc +++ b/mindspore/ccsrc/operator/ops.cc @@ -235,6 +235,7 @@ const PrimitivePtr kPrimVirtualDataset = std::make_shared("_VirtualDa const PrimitivePtr kPrimScalarSummary = std::make_shared("ScalarSummary"); const PrimitivePtr kPrimImageSummary = std::make_shared("ImageSummary"); const PrimitivePtr kPrimTensorSummary = std::make_shared("TensorSummary"); +const PrimitivePtr kPrimHistogramSummary = std::make_shared("HistogramSummary"); ValuePtr GetPythonOps(const std::string& op_name, const std::string& module_name) { py::object obj = parse::python_adapter::GetPyFn(module_name, op_name); diff --git a/mindspore/ccsrc/operator/ops.h b/mindspore/ccsrc/operator/ops.h index 0148e073e0..26c13993e0 100755 --- a/mindspore/ccsrc/operator/ops.h +++ b/mindspore/ccsrc/operator/ops.h @@ -225,6 +225,7 @@ extern const PrimitivePtr kPrimStateSetItem; extern const PrimitivePtr kPrimScalarSummary; extern const PrimitivePtr kPrimImageSummary; extern const PrimitivePtr kPrimTensorSummary; +extern const PrimitivePtr kPrimHistogramSummary; extern const PrimitivePtr kPrimBroadcastGradientArgs; extern const PrimitivePtr kPrimControlDepend; extern const PrimitivePtr kPrimIs_; diff --git a/mindspore/ccsrc/operator/prim_debug.cc b/mindspore/ccsrc/operator/prim_debug.cc index 28f7e92303..c8db775320 100644 --- a/mindspore/ccsrc/operator/prim_debug.cc +++ b/mindspore/ccsrc/operator/prim_debug.cc @@ -69,7 +69,7 @@ AbstractBasePtr InferImplTensorSummary(const AnalysisEnginePtr &, const Primitiv int tensor_rank = SizeToInt(tensor_value->shape()->shape().size()); if (tensor_rank == 0) { - MS_LOG(EXCEPTION) << "Tensor/Image Summary evaluator second arg should be an tensor, but got a scalar"; + MS_LOG(EXCEPTION) << op_name << " summary evaluator second arg should be an tensor, but got a scalar, rank is 0"; } // Reomve the force check to support batch set summary use 'for' loop diff --git a/mindspore/ccsrc/optimizer/irpass/branch_culling.cc b/mindspore/ccsrc/optimizer/irpass/branch_culling.cc index 7c4ada8b3b..d90b2bd44c 100644 --- a/mindspore/ccsrc/optimizer/irpass/branch_culling.cc +++ b/mindspore/ccsrc/optimizer/irpass/branch_culling.cc @@ -51,25 +51,14 @@ bool InConvertWhiteList(const AnfNodePtr &node, size_t index) { // node because it is attribute or ge specific reason. // Example : when convert CNode(kPrimReduceSum, x, axis), node of index 2 in CNode->inputs is axis which should not be // converted to switch guarded. - std::vector>> white_list({{prim::kPrimApplyMomentum, {1, 2}}, - {prim::kPrimMomentum, {2, 3}}, - {prim::kPrimStateSetItem, {1}}, - {prim::kPrimEnvGetItem, {1}}, - {prim::kPrimEnvSetItem, {1}}, - {prim::kPrimReduceSum, {2}}, - {prim::kPrimReduceMean, {2}}, - {prim::kPrimReduceAll, {2}}, - {prim::kPrimCast, {2}}, - {prim::kPrimTranspose, {2}}, - {prim::kPrimOneHot, {2}}, - {prim::kPrimGatherV2, {3}}, - {prim::kPrimReshape, {2}}, - {prim::kPrimAssign, {1}}, - {prim::kPrimAssignAdd, {1}}, - {prim::kPrimAssignSub, {1}}, - {prim::kPrimTensorSummary, {1}}, - {prim::kPrimImageSummary, {1}}, - {prim::kPrimScalarSummary, {1}}}); + std::vector>> white_list( + {{prim::kPrimApplyMomentum, {1, 2}}, {prim::kPrimMomentum, {2, 3}}, {prim::kPrimStateSetItem, {1}}, + {prim::kPrimEnvGetItem, {1}}, {prim::kPrimEnvSetItem, {1}}, {prim::kPrimReduceSum, {2}}, + {prim::kPrimReduceMean, {2}}, {prim::kPrimReduceAll, {2}}, {prim::kPrimCast, {2}}, + {prim::kPrimTranspose, {2}}, {prim::kPrimOneHot, {2}}, {prim::kPrimGatherV2, {3}}, + {prim::kPrimReshape, {2}}, {prim::kPrimAssign, {1}}, {prim::kPrimAssignAdd, {1}}, + {prim::kPrimAssignSub, {1}}, {prim::kPrimTensorSummary, {1}}, {prim::kPrimImageSummary, {1}}, + {prim::kPrimScalarSummary, {1}}, {prim::kPrimHistogramSummary, {1}}}); for (auto &item : white_list) { auto matched = std::any_of(item.second.begin(), item.second.end(), [&item, &node, &index](size_t idx) { return IsPrimitiveCNode(node, item.first) && idx == index; diff --git a/mindspore/ccsrc/parallel/node_check.cc b/mindspore/ccsrc/parallel/node_check.cc index ffd40e421d..e43d03c29c 100644 --- a/mindspore/ccsrc/parallel/node_check.cc +++ b/mindspore/ccsrc/parallel/node_check.cc @@ -66,6 +66,7 @@ const std::set BLACK_LIST = {TUPLE_GETITEM, SCALARSUMMARY, IMAGESUMMARY, TENSORSUMMARY, + HISTOGRAMSUMMARY, COL2IMV1, RESOLVE, BROADCASTGRADIENTARGS, diff --git a/mindspore/ccsrc/parallel/ops_info/ops_utils.h b/mindspore/ccsrc/parallel/ops_info/ops_utils.h index 89b174d6b0..bdae87858d 100644 --- a/mindspore/ccsrc/parallel/ops_info/ops_utils.h +++ b/mindspore/ccsrc/parallel/ops_info/ops_utils.h @@ -246,6 +246,7 @@ constexpr char STATESETITEM[] = "state_setitem"; constexpr char SCALARSUMMARY[] = "ScalarSummary"; constexpr char IMAGESUMMARY[] = "ImageSummary"; constexpr char TENSORSUMMARY[] = "TensorSummary"; +constexpr char HISTOGRAMSUMMARY[] = "HistogramSummary"; constexpr char BROADCASTGRADIENTARGS[] = "BroadcastGradientArgs"; constexpr char INVERTPERMUTATION[] = "InvertPermutation"; constexpr char CONTROLDEPEND[] = "ControlDepend"; diff --git a/mindspore/ccsrc/pipeline/static_analysis/prim.cc b/mindspore/ccsrc/pipeline/static_analysis/prim.cc index 1512596cb4..2a2e90d675 100644 --- a/mindspore/ccsrc/pipeline/static_analysis/prim.cc +++ b/mindspore/ccsrc/pipeline/static_analysis/prim.cc @@ -131,6 +131,7 @@ PrimitiveEvalImplMap &GetPrimitiveToEvalImplMap() { {prim::kPrimScalarSummary, {InferImplScalarSummary, true}}, {prim::kPrimImageSummary, {InferImplTensorSummary, true}}, {prim::kPrimTensorSummary, {InferImplTensorSummary, true}}, + {prim::kPrimHistogramSummary, {InferImplTensorSummary, true}}, }; return prim_eval_implement_map; } diff --git a/mindspore/ccsrc/session/anf_runtime_algorithm.cc b/mindspore/ccsrc/session/anf_runtime_algorithm.cc index 44472a9a6f..a11122bbde 100644 --- a/mindspore/ccsrc/session/anf_runtime_algorithm.cc +++ b/mindspore/ccsrc/session/anf_runtime_algorithm.cc @@ -714,7 +714,8 @@ bool AnfRuntimeAlgorithm::IsRealKernel(const AnfNodePtr &node) { } auto input = cnode->inputs()[0]; bool is_virtual_node = IsPrimitive(input, prim::kPrimImageSummary) || IsPrimitive(input, prim::kPrimScalarSummary) || - IsPrimitive(input, prim::kPrimTensorSummary) || IsPrimitive(input, prim::kPrimMakeTuple) || + IsPrimitive(input, prim::kPrimTensorSummary) || + IsPrimitive(input, prim::kPrimHistogramSummary) || IsPrimitive(input, prim::kPrimMakeTuple) || IsPrimitive(input, prim::kPrimStateSetItem) || IsPrimitive(input, prim::kPrimDepend) || IsPrimitive(input, prim::kPrimTupleGetItem) || IsPrimitive(input, prim::kPrimControlDepend) || IsPrimitive(input, prim::kPrimReturn); diff --git a/mindspore/ccsrc/session/session_basic.cc b/mindspore/ccsrc/session/session_basic.cc index 01a836fad1..0ef0ad97ea 100755 --- a/mindspore/ccsrc/session/session_basic.cc +++ b/mindspore/ccsrc/session/session_basic.cc @@ -45,7 +45,7 @@ void GetSummaryNodes(const KernelGraph *graph, std::unordered_mapcast(); MS_EXCEPTION_IF_NULL(cnode); @@ -83,7 +83,7 @@ bool ExistSummaryNode(const KernelGraph *graph) { auto all_nodes = DeepLinkedGraphSearch(ret); for (auto &n : all_nodes) { if (IsPrimitiveCNode(n, prim::kPrimScalarSummary) || IsPrimitiveCNode(n, prim::kPrimTensorSummary) || - IsPrimitiveCNode(n, prim::kPrimImageSummary)) { + IsPrimitiveCNode(n, prim::kPrimImageSummary) || IsPrimitiveCNode(n, prim::kPrimHistogramSummary)) { return true; } } diff --git a/mindspore/ccsrc/transform/convert.cc b/mindspore/ccsrc/transform/convert.cc index 2daa86b960..417989247e 100755 --- a/mindspore/ccsrc/transform/convert.cc +++ b/mindspore/ccsrc/transform/convert.cc @@ -353,6 +353,7 @@ std::unordered_map &DfGraphConvertor::get_adpt_ma {prim::kPrimScalarSummary->name(), ADPT_DESC(Summary)}, {prim::kPrimImageSummary->name(), ADPT_DESC(Summary)}, {prim::kPrimTensorSummary->name(), ADPT_DESC(Summary)}, + {prim::kPrimHistogramSummary->name(), ADPT_DESC(Summary)}, {prim::kPrimTensorAdd->name(), std::make_shared(std::make_shared>(ExtraAttr({{"mode", MakeValue(1)}})), std::make_shared>(ExtraAttr({{"mode", MakeValue(1)}})))}, diff --git a/mindspore/ccsrc/utils/callbacks_ge.cc b/mindspore/ccsrc/utils/callbacks_ge.cc index 50fd2f0b11..36bbcbf297 100644 --- a/mindspore/ccsrc/utils/callbacks_ge.cc +++ b/mindspore/ccsrc/utils/callbacks_ge.cc @@ -131,7 +131,7 @@ static TensorPtr GetMeTensorForSummary(const std::string& name, const std::share auto shape = std::vector({ONE_SHAPE}); return TransformUtil::ConvertGeTensor(ge_tensor_ptr, shape); } - if (tname == "[:Tensor]") { + if (tname == "[:Tensor]" || tname == "[:Histogram]") { MS_LOG(DEBUG) << "The summary(" << name << ") is Tensor"; // process the tensor summary // Now we can't get the real shape, so we keep same shape with GE diff --git a/mindspore/ops/_grad/grad_debug_ops.py b/mindspore/ops/_grad/grad_debug_ops.py index 431d82192f..1cb756219a 100644 --- a/mindspore/ops/_grad/grad_debug_ops.py +++ b/mindspore/ops/_grad/grad_debug_ops.py @@ -49,6 +49,15 @@ def get_bprop_image_summary(self): return bprop +@bprop_getters.register(P.HistogramSummary) +def get_bprop_histogram_summary(self): + """Generate bprop for HistogramSummary""" + + def bprop(tag, x, out, dout): + return tag, zeros_like(x) + return bprop + + @bprop_getters.register(P.InsertGradientOf) def get_bprop_insert_gradient_of(self): """Generate bprop for InsertGradientOf""" diff --git a/mindspore/ops/operations/__init__.py b/mindspore/ops/operations/__init__.py index e1dd8e36c5..1f0ee8a04d 100644 --- a/mindspore/ops/operations/__init__.py +++ b/mindspore/ops/operations/__init__.py @@ -34,7 +34,7 @@ from .comm_ops import (AllGather, AllReduce, _AlltoAll, ReduceScatter, Broadcast _MirrorOperator, ReduceOp, _VirtualDataset, _VirtualDiv, _GetTensorSlice) from .debug_ops import (ImageSummary, InsertGradientOf, ScalarSummary, - TensorSummary, Print) + TensorSummary, HistogramSummary, Print) from .control_ops import ControlDepend, GeSwitch, Merge from .inner_ops import ScalarCast from .math_ops import (Abs, ACos, AddN, AssignAdd, AssignSub, Atan2, BatchMatMul, @@ -148,6 +148,7 @@ __all__ = [ 'ScalarSummary', 'ImageSummary', 'TensorSummary', + 'HistogramSummary', "Print", 'InsertGradientOf', 'InvertPermutation', diff --git a/mindspore/ops/operations/debug_ops.py b/mindspore/ops/operations/debug_ops.py index e4467f5ce1..1a3248cb15 100644 --- a/mindspore/ops/operations/debug_ops.py +++ b/mindspore/ops/operations/debug_ops.py @@ -98,6 +98,33 @@ class TensorSummary(Primitive): """init""" +class HistogramSummary(Primitive): + """ + Output tensor to protocol buffer through histogram summary operator. + + Inputs: + - **name** (str) - The name of the input variable. + - **value** (Tensor) - The value of tensor, and the rank of tensor should be greater than 0. + + Examples: + >>> class SummaryDemo(nn.Cell): + >>> def __init__(self,): + >>> super(SummaryDemo, self).__init__() + >>> self.summary = P.HistogramSummary() + >>> self.add = P.TensorAdd() + >>> + >>> def construct(self, x, y): + >>> x = self.add(x, y) + >>> name = "x" + >>> self.summary(name, x) + >>> return x + """ + + @prim_attr_register + def __init__(self): + """init""" + + class InsertGradientOf(PrimitiveWithInfer): """ Attach callback to graph node that will be invoked on the node's gradient. diff --git a/tests/st/summary/test_gpu_summary.py b/tests/st/summary/test_gpu_summary.py index ef645fcb2e..c97c08c4e1 100644 --- a/tests/st/summary/test_gpu_summary.py +++ b/tests/st/summary/test_gpu_summary.py @@ -24,17 +24,6 @@ from mindspore.common.tensor import Tensor from mindspore.ops import operations as P from mindspore.train.summary.summary_record import SummaryRecord -''' - This testcase is used for save summary data only. You need install MindData first and uncomment the commented - packages to analyse summary data. - Using "minddata start --datalog='./test_me_summary_event_file/' --host=0.0.0.0" to make data visible. -''' -# from minddata.datavisual.data_transform.data_manager import DataManager -# from minddata.datavisual.visual.train_visual.train_task_manager import TrainTaskManager -# from minddata.datavisual.visual.train_visual.scalars_processor import ScalarsProcessor -# from minddata.datavisual.common.enums import PluginNameEnum -# from minddata.datavisual.common.enums import DataManagerStatus - context.set_context(mode=context.GRAPH_MODE, device_target="GPU") @@ -43,6 +32,7 @@ CUR_DIR = os.getcwd() SUMMARY_DIR_ME = CUR_DIR + "/test_me_summary_event_file/" SUMMARY_DIR_ME_TEMP = CUR_DIR + "/test_me_temp_summary_event_file/" + def clean_environment_file(srcDir): if os.path.exists(srcDir): ls = os.listdir(srcDir) @@ -50,6 +40,8 @@ def clean_environment_file(srcDir): filePath = os.path.join(srcDir, line) os.remove(filePath) os.removedirs(srcDir) + + def save_summary_events_file(srcDir, desDir): if not os.path.exists(desDir): print("-- create desDir") @@ -64,12 +56,14 @@ def save_summary_events_file(srcDir, desDir): os.remove(filePath) os.removedirs(srcDir) + class SummaryNet(nn.Cell): def __init__(self, tag_tuple=None, scalar=1): super(SummaryNet, self).__init__() self.summary_s = P.ScalarSummary() self.summary_i = P.ImageSummary() self.summary_t = P.TensorSummary() + self.histogram_summary = P.HistogramSummary() self.add = P.TensorAdd() self.tag_tuple = tag_tuple self.scalar = scalar @@ -79,8 +73,10 @@ class SummaryNet(nn.Cell): self.summary_s("x1", x) z = self.add(x, y) self.summary_t("z1", z) + self.histogram_summary("histogram", z) return z + def train_summary_record_scalar_for_1(test_writer, steps, fwd_x, fwd_y): net = SummaryNet() out_me_dict = {} @@ -93,6 +89,7 @@ def train_summary_record_scalar_for_1(test_writer, steps, fwd_x, fwd_y): out_me_dict[i] = out_put.asnumpy() return out_me_dict + def me_scalar_summary(steps, tag=None, value=None): test_writer = SummaryRecord(SUMMARY_DIR_ME_TEMP) @@ -104,44 +101,6 @@ def me_scalar_summary(steps, tag=None, value=None): test_writer.close() return out_me_dict -def print_scalar_data(): - print("============start print_scalar_data\n") - data_manager = DataManager() - data_manager.start_load_data(path=SUMMARY_DIR_ME) - while data_manager.get_status() != DataManagerStatus.DONE: - time.sleep(0.1) - task_manager = TrainTaskManager(data_manager) - train_jobs = task_manager.get_all_train_tasks(PluginNameEnum.scalar) - print(train_jobs) - """ - train_jobs - ['train_jobs': { - 'id': '12-123', - 'name': 'train_job_name', - 'tags': ['x1', 'y1'] - }] - """ - scalar_processor = ScalarsProcessor(data_manager) - metadata = scalar_processor.get_metadata_list(train_job_ids=train_jobs['train_jobs'][0]['id'], tag=train_jobs['train_jobs'][0]['tags'][0]) - print(metadata) - ''' - metadata - { - 'scalars' : [ - { - 'train_job_id' : '12-12', - 'metadatas' : [ - { - 'wall_time' : 0.1, - 'step' : 1, - 'value' : 0.1 - } - ] - } - ] - } - ''' - print("============end print_scalar_data\n") @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training diff --git a/tests/ut/cpp/transform/convert_test.cc b/tests/ut/cpp/transform/convert_test.cc index c7cd394002..4388312592 100644 --- a/tests/ut/cpp/transform/convert_test.cc +++ b/tests/ut/cpp/transform/convert_test.cc @@ -621,6 +621,12 @@ TEST_F(TestConvert, TestTensorSummaryOps) { ASSERT_TRUE(ret); } +TEST_F(TestConvert, TestHistogramSummaryOps) { + auto prim = prim::kPrimHistogramSummary; + bool ret = MakeDfGraph(prim, 2); + ASSERT_TRUE(ret); +} + TEST_F(TestConvert, TestGreaterOps) { auto prim = std::make_shared("Greater"); bool ret = MakeDfGraph(prim, 2); diff --git a/tests/ut/cpp/transform/transform_base_test.cc b/tests/ut/cpp/transform/transform_base_test.cc index 944721ec83..eb083e2dd1 100644 --- a/tests/ut/cpp/transform/transform_base_test.cc +++ b/tests/ut/cpp/transform/transform_base_test.cc @@ -73,7 +73,8 @@ FuncGraphPtr MakeFuncGraph(const PrimitivePtr prim, unsigned int nparam) { std::vector inputs; inputs.push_back(NewValueNode(prim)); for (unsigned int i = 0; i < nparam; i++) { - if ((prim->name() == "ScalarSummary" || prim->name() == "TensorSummary" || prim->name() == "ImageSummary") && + if ((prim->name() == "ScalarSummary" || prim->name() == "TensorSummary" || + prim->name() == "ImageSummary" || prim->name() == "HistogramSummary") && i == 0) { auto input = NewValueNode("testSummary"); inputs.push_back(input); diff --git a/tests/ut/python/ops/test_nn_ops.py b/tests/ut/python/ops/test_nn_ops.py index 7364893503..bb2bb3ea9f 100644 --- a/tests/ut/python/ops/test_nn_ops.py +++ b/tests/ut/python/ops/test_nn_ops.py @@ -198,6 +198,19 @@ class ScalarSummaryNet(nn.Cell): return out +class HistogramSummaryNet(nn.Cell): + """HistogramSummaryNet definition""" + + def __init__(self): + super(HistogramSummaryNet, self).__init__() + self.summary = P.HistogramSummary() + + def construct(self, tensor): + string_in = "wight_value" + out = self.summary(string_in, tensor) + return out + + class FusedBatchNormGrad(nn.Cell): """ FusedBatchNormGrad definition """ @@ -443,6 +456,10 @@ test_cases = [ 'block': ScalarSummaryNet(), 'desc_inputs': [2.2], }), + ('HistogramSummary', { + 'block': HistogramSummaryNet(), + 'desc_inputs': [[1,2,3]], + }), ('FusedBatchNormGrad', { 'block': FusedBatchNormGrad(nn.BatchNorm2d(num_features=512, eps=1e-5, momentum=0.1)), 'desc_inputs': [[64, 512, 7, 7], [64, 512, 7, 7]], diff --git a/tests/ut/python/ops/test_ops.py b/tests/ut/python/ops/test_ops.py index 078ada8406..3345f77862 100755 --- a/tests/ut/python/ops/test_ops.py +++ b/tests/ut/python/ops/test_ops.py @@ -160,6 +160,19 @@ class SummaryNet(nn.Cell): return self.add(x, y) +class HistogramSummaryNet(nn.Cell): + def __init__(self,): + super(HistogramSummaryNet, self).__init__() + self.summary = P.HistogramSummary() + self.add = P.TensorAdd() + + def construct(self, x, y): + out = self.add(x, y) + string_in = "out" + self.summary(string_in, out) + return out + + test_case_math_ops = [ ('Neg', { 'block': P.Neg(), @@ -1104,6 +1117,12 @@ test_case_other_ops = [ 'desc_inputs': [Tensor(np.array([1.1]).astype(np.float32)), Tensor(np.array([1.2]).astype(np.float32))], 'skip': ['backward']}), + ('HistogramSummary', { + 'block': HistogramSummaryNet(), + 'desc_inputs': [Tensor(np.array([1.1]).astype(np.float32)), + Tensor(np.array([1.2]).astype(np.float32))], + 'skip': ['backward']}), + ] test_case_lists = [test_case_nn_ops, test_case_math_ops, test_case_array_ops, test_case_other_ops] diff --git a/tests/ut/python/train/summary/test_summary.py b/tests/ut/python/train/summary/test_summary.py index cc595ea883..82287c4290 100644 --- a/tests/ut/python/train/summary/test_summary.py +++ b/tests/ut/python/train/summary/test_summary.py @@ -132,6 +132,7 @@ class SummaryDemo(nn.Cell): def __init__(self,): super(SummaryDemo, self).__init__() self.s = P.ScalarSummary() + self.histogram_summary = P.HistogramSummary() self.add = P.TensorAdd() def construct(self, x, y): @@ -139,6 +140,7 @@ class SummaryDemo(nn.Cell): z = self.add(x, y) self.s("z1", z) self.s("y1", y) + self.histogram_summary("histogram", z) return z diff --git a/tests/ut/python/train/summary/test_summary_ops_params_valid_check.py b/tests/ut/python/train/summary/test_summary_ops_params_valid_check.py index 98dfd6aaef..23c85d398c 100644 --- a/tests/ut/python/train/summary/test_summary_ops_params_valid_check.py +++ b/tests/ut/python/train/summary/test_summary_ops_params_valid_check.py @@ -40,6 +40,7 @@ class SummaryDemoTag(nn.Cell): def __init__(self, tag1, tag2, tag3): super(SummaryDemoTag, self).__init__() self.s = P.ScalarSummary() + self.histogram_summary = P.HistogramSummary() self.add = P.TensorAdd() self.tag1 = tag1 self.tag2 = tag2 @@ -50,6 +51,7 @@ class SummaryDemoTag(nn.Cell): z = self.add(x, y) self.s(self.tag2, z) self.s(self.tag3, y) + self.histogram_summary(self.tag1, x) return z @@ -58,6 +60,7 @@ class SummaryDemoTagForSet(nn.Cell): def __init__(self, tag_tuple): super(SummaryDemoTagForSet, self).__init__() self.s = P.ScalarSummary() + self.histogram_summary = P.HistogramSummary() self.add = P.TensorAdd() self.tag_tuple = tag_tuple @@ -65,6 +68,7 @@ class SummaryDemoTagForSet(nn.Cell): z = self.add(x, y) for tag in self.tag_tuple: self.s(tag, x) + self.histogram_summary(tag, x) return z @@ -98,6 +102,19 @@ class SummaryDemoValueForSet(nn.Cell): self.s(tag, self.v) return z + +class HistogramSummaryNet(nn.Cell): + "HistogramSummaryNet definition" + def __init__(self, value): + self.histogram_summary = P.HistogramSummary() + self.add = P.TensorAdd() + self.value = value + + def construct(self, tensors1, tensor2): + self.histogram_summary("value", self.value) + return self.add(tensors1, tensor2) + + def run_case(net): """ run_case """ # step 0: create the thread @@ -121,8 +138,8 @@ def run_case(net): # Test 1: use the repeat tag -def test_scalar_summary_use_repeat_tag(): - log.debug("begin test_scalar_summary_use_repeat_tag") +def test_summary_use_repeat_tag(): + log.debug("begin test_summary_use_repeat_tag") net = SummaryDemoTag("x", "x", "x") try: run_case(net) @@ -130,12 +147,12 @@ def test_scalar_summary_use_repeat_tag(): assert False else: assert True - log.debug("finished test_scalar_summary_use_repeat_tag") + log.debug("finished test_summary_use_repeat_tag") # Test 2: repeat tag use for set summary -def test_scalar_summary_use_repeat_tag_for_set(): - log.debug("begin test_scalar_summary_use_repeat_tag_for_set") +def test_summary_use_repeat_tag_for_set(): + log.debug("begin test_summary_use_repeat_tag_for_set") net = SummaryDemoTagForSet(("x", "x", "x")) try: run_case(net) @@ -143,12 +160,12 @@ def test_scalar_summary_use_repeat_tag_for_set(): assert False else: assert True - log.debug("finished test_scalar_summary_use_repeat_tag_for_set") + log.debug("finished test_summary_use_repeat_tag_for_set") # Test3: test with invalid tag(None, bool, "", int) -def test_scalar_summary_use_invalid_tag_None(): - log.debug("begin test_scalar_summary_use_invalid_tag_None") +def test_summary_use_invalid_tag_None(): + log.debug("begin test_summary_use_invalid_tag_None") net = SummaryDemoTag(None, None, None) try: run_case(net) @@ -156,31 +173,31 @@ def test_scalar_summary_use_invalid_tag_None(): assert True else: assert False - log.debug("finished test_scalar_summary_use_invalid_tag_None") + log.debug("finished test_summary_use_invalid_tag_None") # Test4: test with invalid tag(None, bool, "", int) -def test_scalar_summary_use_invalid_tag_Bool(): - log.debug("begin test_scalar_summary_use_invalid_tag_Bool") +def test_summary_use_invalid_tag_Bool(): + log.debug("begin test_summary_use_invalid_tag_Bool") net = SummaryDemoTag(True, True, True) run_case(net) - log.debug("finished test_scalar_summary_use_invalid_tag_Bool") + log.debug("finished test_summary_use_invalid_tag_Bool") # Test5: test with invalid tag(None, bool, "", int) -def test_scalar_summary_use_invalid_tag_null(): - log.debug("begin test_scalar_summary_use_invalid_tag_null") +def test_summary_use_invalid_tag_null(): + log.debug("begin test_summary_use_invalid_tag_null") net = SummaryDemoTag("", "", "") run_case(net) - log.debug("finished test_scalar_summary_use_invalid_tag_null") + log.debug("finished test_summary_use_invalid_tag_null") # Test6: test with invalid tag(None, bool, "", int) -def test_scalar_summary_use_invalid_tag_Int(): - log.debug("begin test_scalar_summary_use_invalid_tag_Int") +def test_summary_use_invalid_tag_Int(): + log.debug("begin test_summary_use_invalid_tag_Int") net = SummaryDemoTag(1, 2, 3) run_case(net) - log.debug("finished test_scalar_summary_use_invalid_tag_Int") + log.debug("finished test_summary_use_invalid_tag_Int") # Test7: test with invalid value(None, "") @@ -196,7 +213,6 @@ def test_scalar_summary_use_invalid_value_None(): log.debug("finished test_scalar_summary_use_invalid_tag_Int") - # Test8: test with invalid value(None, "") def test_scalar_summary_use_invalid_value_None_ForSet(): log.debug("begin test_scalar_summary_use_invalid_value_None_ForSet") @@ -221,3 +237,30 @@ def test_scalar_summary_use_invalid_value_null(): else: assert False log.debug("finished test_scalar_summary_use_invalid_value_null") + + +def test_histogram_summary_use_valid_value(): + """Test histogram summary with valid value""" + log.debug("Begin test_histogram_summary_use_valid_value") + try: + net = HistogramSummaryNet(Tensor(np.array([1,2,3]))) + run_case(net) + except: + assert True + else: + assert False + log.debug("Finished test_histogram_summary_use_valid_value") + + +def test_histogram_summary_use_scalar_value(): + """Test histogram summary use scalar value""" + log.debug("Begin test_histogram_summary_use_scalar_value") + try: + scalar = Tensor(1) + net = HistogramSummaryNet(scalar) + run_case(net) + except: + assert True + else: + assert False + log.debug("Finished test_histogram_summary_use_scalar_value") From 7cded1ec32ad0455532b5ba9ae545134d0b7ea7f Mon Sep 17 00:00:00 2001 From: huanghui Date: Fri, 17 Apr 2020 16:50:16 +0800 Subject: [PATCH 337/367] bugfix: confusion_softmax_grad need to be set with axis and keep_dims attr --- .../ir_fusion/confusion_softmax_grad_rule.cc | 22 +++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/mindspore/ccsrc/pre_activate/ascend/ir_fusion/confusion_softmax_grad_rule.cc b/mindspore/ccsrc/pre_activate/ascend/ir_fusion/confusion_softmax_grad_rule.cc index 1270ae77c1..8078247c2a 100644 --- a/mindspore/ccsrc/pre_activate/ascend/ir_fusion/confusion_softmax_grad_rule.cc +++ b/mindspore/ccsrc/pre_activate/ascend/ir_fusion/confusion_softmax_grad_rule.cc @@ -21,9 +21,30 @@ #include "session/anf_runtime_algorithm.h" #include "ir/primitive.h" #include "utils/utils.h" +#include "pre_activate/common/helper.h" namespace mindspore { namespace opt { +namespace { +void SetAttrsForFusionNode(const AnfNodePtr &sub_anf, const AnfNodePtr &fusion_node) { + MS_EXCEPTION_IF_NULL(sub_anf); + MS_EXCEPTION_IF_NULL(fusion_node); + auto sub = sub_anf->cast(); + MS_EXCEPTION_IF_NULL(sub); + if (sub->size() != kSubInputNum) { + MS_LOG(EXCEPTION) << "Sub's size is not equal with 3"; + } + auto reduce_sum_anf = sub->input(2); + MS_EXCEPTION_IF_NULL(reduce_sum_anf); + auto reduce_sum = reduce_sum_anf->cast(); + if (reduce_sum == nullptr) { + MS_LOG(EXCEPTION) << "Sub's second input is not a cnode"; + } + AnfAlgo::CopyNodeAttr(kAttrAxis, reduce_sum, fusion_node); + AnfAlgo::CopyNodeAttr(kAttrKeepDims, reduce_sum, fusion_node); +} +} // namespace + const BaseRef ConfusionSoftmaxGradRule::DefinePattern() const { return VectorRef( {prim::kPrimSub, input0_, VectorRef({prim::kPrimReduceSum, VectorRef({prim::kPrimMul, input0_, input1_})})}); @@ -48,6 +69,7 @@ const AnfNodePtr ConfusionSoftmaxGradRule::Process(const FuncGraphPtr &graph, co auto shapes = {AnfAlgo::GetOutputInferShape(node, 0)}; AnfAlgo::SetOutputInferTypeAndShape(types, shapes, confusion_softmax_grad.get()); confusion_softmax_grad->set_scope(node->scope()); + SetAttrsForFusionNode(node, confusion_softmax_grad); return confusion_softmax_grad; } } // namespace opt From c69f3be75873d786465e9884a3651dd2607809ff Mon Sep 17 00:00:00 2001 From: Yanjun Peng Date: Fri, 17 Apr 2020 15:40:16 +0800 Subject: [PATCH 338/367] fix dataset api description --- mindspore/dataset/engine/datasets.py | 6 +++--- mindspore/dataset/engine/samplers.py | 3 +-- mindspore/train/model.py | 4 ++++ 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/mindspore/dataset/engine/datasets.py b/mindspore/dataset/engine/datasets.py index 1c6ac634f2..df05da7032 100644 --- a/mindspore/dataset/engine/datasets.py +++ b/mindspore/dataset/engine/datasets.py @@ -556,7 +556,7 @@ class Dataset: Note: If device is Ascend, features of data will be transferred one by one. The limitation - of data transferation per time is 256M. + of data transmission per time is 256M. Return: TransferDataset, dataset for transferring. @@ -572,7 +572,7 @@ class Dataset: Note: If device is Ascend, features of data will be transferred one by one. The limitation - of data transferation per time is 256M. + of data transmission per time is 256M. Returns: TransferDataset, dataset for transferring. @@ -1897,7 +1897,7 @@ class GeneratorDataset(SourceDataset): >>> for i in range(maxid): >>> yield (np.array([i]), np.array([[i, i + 1], [i + 2, i + 3]])) >>> # create multi_column_generator_dataset with GeneratorMC and column names "col1" and "col2" - >>> multi_column_generator_dataset = de.GeneratorDataset(generator_mc, ["col1, col2"]) + >>> multi_column_generator_dataset = de.GeneratorDataset(generator_mc, ["col1", "col2"]) >>> # 3) Iterable dataset as iterable input >>> class MyIterable(): >>> def __iter__(self): diff --git a/mindspore/dataset/engine/samplers.py b/mindspore/dataset/engine/samplers.py index f9c74f151d..0bba559210 100644 --- a/mindspore/dataset/engine/samplers.py +++ b/mindspore/dataset/engine/samplers.py @@ -112,8 +112,7 @@ class RandomSampler(): Args: replacement (bool, optional): If True, put the sample ID back for the next draw (default=False). - num_samples (int, optional): Number of elements to sample (default=None, all elements). This - argument should be specified only when 'replacement' is "True". + num_samples (int, optional): Number of elements to sample (default=None, all elements). Examples: >>> import mindspore.dataset as ds diff --git a/mindspore/train/model.py b/mindspore/train/model.py index 46e4f421f7..851cd63af9 100755 --- a/mindspore/train/model.py +++ b/mindspore/train/model.py @@ -362,6 +362,8 @@ class Model: If dataset_sink_mode is True, epoch of training should be equal to the count of repeat operation in dataset processing. Otherwise, errors could occur since the amount of data is not the amount training requires. + If dataset_sink_mode is True, data will be sent to device. If device is Ascend, features + of data will be transferred one by one. The limitation of data transmission per time is 256M. Args: epoch (int): Total number of iterations on the data. @@ -485,6 +487,8 @@ class Model: Note: CPU is not supported when dataset_sink_mode is true. + If dataset_sink_mode is True, data will be sent to device. If device is Ascend, features + of data will be transferred one by one. The limitation of data transmission per time is 256M. Args: valid_dataset (Dataset): Dataset to evaluate the model. From 8f4cf323f84951d8c192f76ae41c98261128d4a5 Mon Sep 17 00:00:00 2001 From: kingfo Date: Fri, 17 Apr 2020 18:39:57 +0800 Subject: [PATCH 339/367] fix cell output issue and vm operator in pynative mode --- mindspore/ccsrc/pynative/pynative_execute.cc | 6 +++--- mindspore/ccsrc/session/anf_runtime_algorithm.cc | 2 +- mindspore/nn/cell.py | 5 ++++- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/mindspore/ccsrc/pynative/pynative_execute.cc b/mindspore/ccsrc/pynative/pynative_execute.cc index 5620634bcc..6a1ddf6a7e 100644 --- a/mindspore/ccsrc/pynative/pynative_execute.cc +++ b/mindspore/ccsrc/pynative/pynative_execute.cc @@ -39,7 +39,7 @@ const char SINGLE_OP_GRAPH[] = "single_op_graph"; // primitive unable to infer value for constant input in PyNative mode -const std::unordered_set vm_operators = {"partial", "depend"}; +const std::unordered_set vm_operators = {"partial", "depend", "make_ref"}; namespace mindspore { namespace pynative { @@ -141,7 +141,7 @@ OpExecInfoPtr GenerateOpExecInfo(const py::args& args) { op_exec_info->op_inputs = py_args; op_exec_info->inputs_mask = args[PY_INPUT_MASK]; if (op_exec_info->op_inputs.size() != op_exec_info->inputs_mask.size()) { - MS_LOG(ERROR) << "op:" << op_exec_info->op_name << " inputs size not equal op_mask"; + MS_LOG(ERROR) << "Op:" << op_exec_info->op_name << " inputs size not equal op_mask"; return nullptr; } return op_exec_info; @@ -163,7 +163,7 @@ std::string GetSingleOpGraphInfo(const OpExecInfoPtr& op_exec_info) { // get prim and abstract info (void)graph_info.append(std::to_string((uintptr_t)(op_exec_info->py_primitive.get())) + "_" + op_exec_info->abstract->ToString()); - MS_LOG(INFO) << "graph info [" << graph_info << "]"; + MS_LOG(INFO) << "Graph info [" << graph_info << "]"; return graph_info; } diff --git a/mindspore/ccsrc/session/anf_runtime_algorithm.cc b/mindspore/ccsrc/session/anf_runtime_algorithm.cc index 44472a9a6f..0fcb3ce39e 100644 --- a/mindspore/ccsrc/session/anf_runtime_algorithm.cc +++ b/mindspore/ccsrc/session/anf_runtime_algorithm.cc @@ -457,7 +457,7 @@ TypeId AnfRuntimeAlgorithm::GetOutputInferDataType(const AnfNodePtr &node, size_ } else if (tuple_i->isa()) { return tuple_i->type_id(); } else { - MS_LOG(EXCEPTION) << "Not support type " << tuple_i->ToString(); + MS_LOG(WARNING) << "Not support type " << tuple_i->ToString(); return tuple_i->type_id(); } } else if (type_ptr->isa()) { diff --git a/mindspore/nn/cell.py b/mindspore/nn/cell.py index 088f3f3e57..5507d12af8 100755 --- a/mindspore/nn/cell.py +++ b/mindspore/nn/cell.py @@ -140,7 +140,10 @@ class Cell: if context.get_context("mode") == context.GRAPH_MODE: out = self.compile_and_run(*inputs) return out - return self.construct(*inputs) + output = self.construct(*inputs) + if isinstance(output, Parameter): + output = output.data + return output def __setattr__(self, name, value): cells = self.__dict__.get('_cells') From b600991cc32ce713fa8601e8449d0b96a269a6f3 Mon Sep 17 00:00:00 2001 From: buxue Date: Fri, 17 Apr 2020 16:16:10 +0800 Subject: [PATCH 340/367] dock DepthwiseConv2dBackprop DepthwiseConv2dBackpropFilter DepthwiseConv2dBackpropInput --- mindspore/ccsrc/kernel/tbe/tbe_adapter.cc | 3 ++ .../pass/const_input_to_attr_registry.cc | 2 + mindspore/ops/_op_impl/tbe/__init__.py | 3 ++ .../ops/_op_impl/tbe/depthwise_conv2d.py | 44 +++++++++++++++++++ .../tbe/depthwise_conv2d_backprop_filter.py | 41 +++++++++++++++++ .../tbe/depthwise_conv2d_backprop_input.py | 41 +++++++++++++++++ mindspore/ops/operations/nn_ops.py | 1 + 7 files changed, 135 insertions(+) create mode 100644 mindspore/ops/_op_impl/tbe/depthwise_conv2d.py create mode 100644 mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_filter.py create mode 100644 mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_input.py diff --git a/mindspore/ccsrc/kernel/tbe/tbe_adapter.cc b/mindspore/ccsrc/kernel/tbe/tbe_adapter.cc index e11ff7d6e6..9e4553e057 100644 --- a/mindspore/ccsrc/kernel/tbe/tbe_adapter.cc +++ b/mindspore/ccsrc/kernel/tbe/tbe_adapter.cc @@ -39,6 +39,9 @@ static std::map tbe_func_adapter_map = { {"reduce_min", "reduce_min_d"}, {"conv2d_backprop_filter", "conv2d_backprop_filter_d"}, {"conv2d_backprop_input", "conv2d_backprop_input_d"}, + {"depthwise_conv2d_native", "depthwise_conv2d"}, + {"depthwise_conv2d_native_backprop_filter", "depthwise_conv2d_backprop_filter_d"}, + {"depthwise_conv2d_native_backprop_input", "depthwise_conv2d_backprop_input_d"}, {"top_kv2", "top_k"}, {"scatter_nd", "scatter_nd_d"}, {"tile", "tile_d"}, diff --git a/mindspore/ccsrc/pre_activate/pass/const_input_to_attr_registry.cc b/mindspore/ccsrc/pre_activate/pass/const_input_to_attr_registry.cc index 88edfd3019..c2f96e54c6 100644 --- a/mindspore/ccsrc/pre_activate/pass/const_input_to_attr_registry.cc +++ b/mindspore/ccsrc/pre_activate/pass/const_input_to_attr_registry.cc @@ -27,6 +27,8 @@ ConstInputToAttrInfoRegistry::ConstInputToAttrInfoRegistry() { Register(prim::kPrimCast->name(), {1}); Register(prim::kPrimConv2DBackpropInput->name(), {2}); Register(prim::kPrimConv2DBackpropFilter->name(), {2}); + Register(prim::kPrimDepthwiseConv2dNativeBackpropFilter->name(), {1}); + Register(prim::kPrimDepthwiseConv2dNativeBackpropInput->name(), {0}); Register(prim::kPrimReshape->name(), {1}); Register(prim::kPrimReduceMax->name(), {1}); Register(prim::kPrimReduceMin->name(), {1}); diff --git a/mindspore/ops/_op_impl/tbe/__init__.py b/mindspore/ops/_op_impl/tbe/__init__.py index 9ec5446165..340cf9efe3 100644 --- a/mindspore/ops/_op_impl/tbe/__init__.py +++ b/mindspore/ops/_op_impl/tbe/__init__.py @@ -133,3 +133,6 @@ from .arg_min_with_value import _arg_min_with_value_tbe from .fused_mul_add import _fused_mul_add_tbe from .fused_mul_add_n import _fused_mul_add_n_tbe from .fused_mul_apply_momentum import _fused_mul_apply_momentum_tbe +from .depthwise_conv2d import _depthwise_conv2d_tbe +from .depthwise_conv2d_backprop_filter import _depthwise_conv2d_backprop_filter_tbe +from .depthwise_conv2d_backprop_input import _depthwise_conv2d_backprop_input_tbe diff --git a/mindspore/ops/_op_impl/tbe/depthwise_conv2d.py b/mindspore/ops/_op_impl/tbe/depthwise_conv2d.py new file mode 100644 index 0000000000..fdafcd3fa4 --- /dev/null +++ b/mindspore/ops/_op_impl/tbe/depthwise_conv2d.py @@ -0,0 +1,44 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""DepthwiseConv2D op""" +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType + +depthwise_conv2d_op_info = TBERegOp("DepthwiseConv2dNative") \ + .fusion_type("CONVLUTION") \ + .async_flag(False) \ + .binfile_name("depthwise_conv2d.so") \ + .compute_cost(10) \ + .kernel_name("depthwise_conv2d") \ + .partial_flag(True) \ + .attr("stride", "required", "listInt", "all") \ + .attr("dilation", "required", "listInt", "all") \ + .attr("pads", "required", "listInt", "all") \ + .attr("data_format", "required", "str", "all") \ + .attr("offset_a", "optional", "int", "all") \ + .input(0, "x", False, "required", "all") \ + .input(1, "filter", False, "required", "all") \ + .input(2, "bias", False, "optional", "all") \ + .input(3, "offset_w", False, "optional", "all") \ + .output(0, "y", True, "required", "all") \ + .dtype_format(DataType.F16_5HD, DataType.F16_C1HWNCoC0, DataType.F16_Default, DataType.F16_Default, + DataType.F16_5HD) \ + .get_op_info() + + +@op_info_register(depthwise_conv2d_op_info) +def _depthwise_conv2d_tbe(): + """DepthwiseConv2D TBE register""" + return diff --git a/mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_filter.py b/mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_filter.py new file mode 100644 index 0000000000..c19a311009 --- /dev/null +++ b/mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_filter.py @@ -0,0 +1,41 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""DepthwiseConv2DBackpropFilter op""" +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType + +depthwise_conv2d_backprop_filter_op_info = TBERegOp("DepthwiseConv2dNativeBackpropFilter") \ + .fusion_type("CONVLUTION") \ + .async_flag(False) \ + .binfile_name("depthwise_conv2d_backprop_filter_d.so") \ + .compute_cost(10) \ + .kernel_name("depthwise_conv2d_backprop_filter_d") \ + .partial_flag(True) \ + .attr("filter_size", "required", "listInt", "all") \ + .attr("stride", "required", "listInt", "all") \ + .attr("dilation", "required", "listInt", "all") \ + .attr("pads", "required", "str", "all") \ + .attr("data_format", "required", "str", "all") \ + .input(0, "input", False, "required", "all") \ + .input(1, "out_backprop", False, "required", "all") \ + .output(0, "filter_grad", False, "required", "all") \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.F32_C1HWNCoC0) \ + .get_op_info() + + +@op_info_register(depthwise_conv2d_backprop_filter_op_info) +def _depthwise_conv2d_backprop_filter_tbe(): + """DepthwiseConv2DBackpropFilter TBE register""" + return diff --git a/mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_input.py b/mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_input.py new file mode 100644 index 0000000000..9e671f18e2 --- /dev/null +++ b/mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_input.py @@ -0,0 +1,41 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""DepthwiseConv2DBackpropInput op""" +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType + +depthwise_conv2d_backprop_input_op_info = TBERegOp("DepthwiseConv2dNativeBackpropInput") \ + .fusion_type("CONVLUTION") \ + .async_flag(False) \ + .binfile_name("depthwise_conv2d_backprop_input_d.so") \ + .compute_cost(10) \ + .kernel_name("depthwise_conv2d_backprop_input_d") \ + .partial_flag(True) \ + .attr("input_size", "required", "listInt", "all") \ + .attr("stride", "required", "listInt", "all") \ + .attr("dilation", "required", "listInt", "all") \ + .attr("pads", "required", "str", "all") \ + .attr("data_format", "required", "str", "all") \ + .input(0, "filter", False, "required", "all") \ + .input(1, "out_backprop", False, "required", "all") \ + .output(0, "input_grad", False, "required", "all") \ + .dtype_format(DataType.F16_C1HWNCoC0, DataType.F16_5HD, DataType.F16_5HD) \ + .get_op_info() + + +@op_info_register(depthwise_conv2d_backprop_input_op_info) +def _depthwise_conv2d_backprop_input_tbe(): + """DepthwiseConv2DBackpropInput TBE register""" + return diff --git a/mindspore/ops/operations/nn_ops.py b/mindspore/ops/operations/nn_ops.py index a4efd5a47b..9827975fd0 100644 --- a/mindspore/ops/operations/nn_ops.py +++ b/mindspore/ops/operations/nn_ops.py @@ -696,6 +696,7 @@ class DepthwiseConv2dNative(PrimitiveWithInfer): dilation=1, group=1): """init DepthwiseConv2dNative""" + self.init_prim_io_names(inputs=['x', 'w'], outputs=['output']) validator.check_pad_value_by_mode(self.__class__.__name__, pad_mode, pad) self.kernel_size = validator.check_type('kernel_size', kernel_size, (int, tuple)) if isinstance(kernel_size, int): From 5eb53798894ca2be54cbd33bfe5f0088a04e0875 Mon Sep 17 00:00:00 2001 From: YuJianfeng Date: Fri, 17 Apr 2020 17:06:16 +0800 Subject: [PATCH 341/367] Add AdamApplyOne fusion pass --- .../ascend/ir_fusion/adam_apply_one_fusion.cc | 56 ++++++- .../ascend/ir_fusion/adam_apply_one_fusion.h | 50 +++++- .../ir_fusion/adam_apply_one_fusion_test.cc | 151 ++++++++++++++++++ .../adam_apply_one_fusion_test.py | 72 +++++++++ 4 files changed, 321 insertions(+), 8 deletions(-) diff --git a/mindspore/ccsrc/pre_activate/ascend/ir_fusion/adam_apply_one_fusion.cc b/mindspore/ccsrc/pre_activate/ascend/ir_fusion/adam_apply_one_fusion.cc index 3f905fedf9..4645167191 100644 --- a/mindspore/ccsrc/pre_activate/ascend/ir_fusion/adam_apply_one_fusion.cc +++ b/mindspore/ccsrc/pre_activate/ascend/ir_fusion/adam_apply_one_fusion.cc @@ -42,17 +42,69 @@ AnfNodePtr AdamApplyOneFusion::CreateAdamApplyOneNode(const FuncGraphPtr &func_g const BaseRef AdamApplyOneFusion::DefinePattern() const { const auto prim_sqrt = std::make_shared(kSqrtOpName); - const auto prim_deal_div = std::make_shared(kRealDivOpName); + const auto prim_real_div = std::make_shared(kRealDivOpName); VectorRef mul2 = VectorRef({prim::kPrimMul, mul_x_input_vars_[2], input_vars_[1]}); VectorRef mul3 = VectorRef({prim::kPrimMul, mul_x_input_vars_[3], VectorRef({prim::kPrimSquare, input_vars_[0]})}); VectorRef sqrt0 = VectorRef({prim_sqrt, VectorRef({add1_var_, mul2, mul3})}); VectorRef mul1 = VectorRef({prim::kPrimMul, mul_x_input_vars_[1], input_vars_[0]}); VectorRef mul0 = VectorRef({prim::kPrimMul, mul_x_input_vars_[0], input_vars_[2]}); VectorRef add0 = VectorRef({add0_var_, mul0, mul1}); - VectorRef true_div0 = VectorRef({prim_deal_div, add0, VectorRef({prim::kPrimTensorAdd, sqrt0, add2_y_})}); + VectorRef true_div0 = VectorRef({prim_real_div, add0, VectorRef({prim::kPrimTensorAdd, sqrt0, add2_y_})}); return VectorRef({prim::kPrimSub, input_vars_[3], VectorRef({prim::kPrimMul, input_vars_[4], true_div0})}); } +const BaseRef AdamApplyOneCond1Fusion::DefinePattern() const { + const auto prim_sqrt = std::make_shared(kSqrtOpName); + const auto prim_real_div = std::make_shared(kRealDivOpName); + VectorRef mul2 = VectorRef({prim::kPrimMul, mul_x_input_vars_[2], input_vars_[1]}); + VectorRef mul3 = VectorRef({prim::kPrimMul, mul_x_input_vars_[3], VectorRef({prim::kPrimSquare, input_vars_[0]})}); + VectorRef sqrt0 = VectorRef({prim_sqrt, VectorRef({add1_var_, mul2, mul3})}); + VectorRef mul1 = VectorRef({prim::kPrimMul, mul_x_input_vars_[1], input_vars_[0]}); + VectorRef mul0 = VectorRef({prim::kPrimMul, mul_x_input_vars_[0], input_vars_[2]}); + VectorRef add0 = VectorRef({add0_var_, mul0, mul1}); + VectorRef true_div0 = VectorRef({prim_real_div, add0, VectorRef({prim::kPrimTensorAdd, add2_y_, sqrt0})}); + return VectorRef({prim::kPrimSub, input_vars_[3], VectorRef({prim::kPrimMul, input_vars_[4], true_div0})}); +} + +const BaseRef AdamApplyOneCond2Fusion::DefinePattern() const { + const auto prim_sqrt = std::make_shared(kSqrtOpName); + const auto prim_real_div = std::make_shared(kRealDivOpName); + VectorRef mul2 = VectorRef({prim::kPrimMul, mul_x_input_vars_[2], input_vars_[1]}); + VectorRef mul3 = VectorRef({prim::kPrimMul, VectorRef({prim::kPrimSquare, input_vars_[0]}), mul_x_input_vars_[3]}); + VectorRef sqrt0 = VectorRef({prim_sqrt, VectorRef({add1_var_, mul2, mul3})}); + VectorRef mul1 = VectorRef({prim::kPrimMul, mul_x_input_vars_[1], input_vars_[0]}); + VectorRef mul0 = VectorRef({prim::kPrimMul, mul_x_input_vars_[0], input_vars_[2]}); + VectorRef add0 = VectorRef({add0_var_, mul0, mul1}); + VectorRef true_div0 = VectorRef({prim_real_div, add0, VectorRef({prim::kPrimTensorAdd, sqrt0, add2_y_})}); + return VectorRef({prim::kPrimSub, input_vars_[3], VectorRef({prim::kPrimMul, true_div0, input_vars_[4]})}); +} + +const BaseRef AdamApplyOneCond3Fusion::DefinePattern() const { + const auto prim_sqrt = std::make_shared(kSqrtOpName); + const auto prim_real_div = std::make_shared(kRealDivOpName); + VectorRef mul2 = VectorRef({prim::kPrimMul, mul_x_input_vars_[2], input_vars_[1]}); + VectorRef mul3 = VectorRef({prim::kPrimMul, mul_x_input_vars_[3], VectorRef({prim::kPrimSquare, input_vars_[0]})}); + VectorRef sqrt0 = VectorRef({prim_sqrt, VectorRef({add1_var_, mul2, mul3})}); + VectorRef mul1 = VectorRef({prim::kPrimMul, mul_x_input_vars_[1], input_vars_[0]}); + VectorRef mul0 = VectorRef({prim::kPrimMul, mul_x_input_vars_[0], input_vars_[2]}); + VectorRef add0 = VectorRef({add0_var_, mul0, mul1}); + VectorRef true_div0 = VectorRef({prim_real_div, add0, VectorRef({prim::kPrimTensorAdd, sqrt0, add2_y_})}); + return VectorRef({prim::kPrimSub, input_vars_[3], VectorRef({prim::kPrimMul, true_div0, input_vars_[4]})}); +} + +const BaseRef AdamApplyOneCond4Fusion::DefinePattern() const { + const auto prim_sqrt = std::make_shared(kSqrtOpName); + const auto prim_real_div = std::make_shared(kRealDivOpName); + VectorRef mul2 = VectorRef({prim::kPrimMul, mul_x_input_vars_[2], input_vars_[1]}); + VectorRef mul3 = VectorRef({prim::kPrimMul, mul_x_input_vars_[3], VectorRef({prim::kPrimSquare, input_vars_[0]})}); + VectorRef sqrt0 = VectorRef({prim_sqrt, VectorRef({add1_var_, mul2, mul3})}); + VectorRef mul1 = VectorRef({prim::kPrimMul, mul_x_input_vars_[1], input_vars_[0]}); + VectorRef mul0 = VectorRef({prim::kPrimMul, mul_x_input_vars_[0], input_vars_[2]}); + VectorRef add0 = VectorRef({add0_var_, mul0, mul1}); + VectorRef true_div0 = VectorRef({prim_real_div, add0, VectorRef({prim::kPrimTensorAdd, add2_y_, sqrt0})}); + return VectorRef({prim::kPrimSub, input_vars_[3], VectorRef({prim::kPrimMul, true_div0, input_vars_[4]})}); +} + const AnfNodePtr AdamApplyOneFusion::Process(const FuncGraphPtr &func_graph, const AnfNodePtr &node, const EquivPtr &equiv) const { MS_EXCEPTION_IF_NULL(func_graph); diff --git a/mindspore/ccsrc/pre_activate/ascend/ir_fusion/adam_apply_one_fusion.h b/mindspore/ccsrc/pre_activate/ascend/ir_fusion/adam_apply_one_fusion.h index 77f6641463..5ee8a86cfb 100644 --- a/mindspore/ccsrc/pre_activate/ascend/ir_fusion/adam_apply_one_fusion.h +++ b/mindspore/ccsrc/pre_activate/ascend/ir_fusion/adam_apply_one_fusion.h @@ -18,21 +18,23 @@ #include #include +#include #include "pre_activate/common/optimizer.h" #include "utils/utils.h" namespace mindspore { namespace opt { -constexpr size_t kAdamApplyOneInputNum = 5; -constexpr size_t kAdamApplyOneMulInputNum = 4; +constexpr size_t kAdamApplyOneInputVarNum = 5; +constexpr size_t kAdamApplyOneMulInputVarNum = 4; class AdamApplyOneFusion : public PatternProcessPass { public: - explicit AdamApplyOneFusion(bool multigraph = true) : PatternProcessPass("adam_apply_one_fusion", multigraph) { - for (size_t i = 0; i < kAdamApplyOneInputNum; ++i) { + explicit AdamApplyOneFusion(const std::string &name = "adam_apply_one_fusion", bool multigraph = true) + : PatternProcessPass(name, multigraph) { + for (size_t i = 0; i < kAdamApplyOneInputVarNum; ++i) { input_vars_.push_back(std::make_shared()); } - for (size_t i = 0; i < kAdamApplyOneMulInputNum; ++i) { + for (size_t i = 0; i < kAdamApplyOneMulInputVarNum; ++i) { mul_x_input_vars_.push_back(std::make_shared()); } add2_y_ = std::make_shared(); @@ -44,7 +46,7 @@ class AdamApplyOneFusion : public PatternProcessPass { const BaseRef DefinePattern() const override; const AnfNodePtr Process(const FuncGraphPtr &, const AnfNodePtr &, const EquivPtr &) const override; - private: + protected: AnfNodePtr CreateAdamApplyOneNode(const FuncGraphPtr &func_graph, const EquivPtr &equiv) const; std::vector input_vars_; std::vector mul_x_input_vars_; @@ -52,6 +54,42 @@ class AdamApplyOneFusion : public PatternProcessPass { VarPtr add0_var_; VarPtr add1_var_; }; + +class AdamApplyOneCond1Fusion : public AdamApplyOneFusion { + public: + explicit AdamApplyOneCond1Fusion(bool multigraph = true) + : AdamApplyOneFusion("adam_apply_one_cond1_fusion", multigraph) {} + + ~AdamApplyOneCond1Fusion() override = default; + const BaseRef DefinePattern() const override; +}; + +class AdamApplyOneCond2Fusion : public AdamApplyOneFusion { + public: + explicit AdamApplyOneCond2Fusion(bool multigraph = true) + : AdamApplyOneFusion("adam_apply_one_cond2_fusion", multigraph) {} + + ~AdamApplyOneCond2Fusion() override = default; + const BaseRef DefinePattern() const override; +}; + +class AdamApplyOneCond3Fusion : public AdamApplyOneFusion { + public: + explicit AdamApplyOneCond3Fusion(bool multigraph = true) + : AdamApplyOneFusion("adam_apply_one_cond3_fusion", multigraph) {} + + ~AdamApplyOneCond3Fusion() override = default; + const BaseRef DefinePattern() const override; +}; + +class AdamApplyOneCond4Fusion : public AdamApplyOneFusion { + public: + explicit AdamApplyOneCond4Fusion(bool multigraph = true) + : AdamApplyOneFusion("adam_apply_one_cond4_fusion", multigraph) {} + + ~AdamApplyOneCond4Fusion() override = default; + const BaseRef DefinePattern() const override; +}; } // namespace opt } // namespace mindspore #endif // MINDSPORE_CCSRC_PRE_ACTIVATE_ASCEND_IR_FUSION_ADAM_APPLY_ONE_FUSION_H_ diff --git a/tests/ut/cpp/pre_activate/ascend/ir_fusion/adam_apply_one_fusion_test.cc b/tests/ut/cpp/pre_activate/ascend/ir_fusion/adam_apply_one_fusion_test.cc index f4e418bed1..c2ee7b6519 100644 --- a/tests/ut/cpp/pre_activate/ascend/ir_fusion/adam_apply_one_fusion_test.cc +++ b/tests/ut/cpp/pre_activate/ascend/ir_fusion/adam_apply_one_fusion_test.cc @@ -66,5 +66,156 @@ TEST_F(TestHWAdamApplyOneFusion, test_adam_apply_one_fusion) { EXPECT_TRUE(CheckEqualGraph(g_after, new_graph)); } +TEST_F(TestHWAdamApplyOneFusion, test_adam_apply_one_cond1_fusion) { + /* + * def before_cond1(input0, input1, input2, input3, input4, mul0_x, mul1_x, mul2_x, mul3_x, add2_y): + * square0 = Square(input0) + * mul1 = Mul(mul1_x, input0) + * mul0 = Mul(mul0_x, input2) + * mul2 = Mul(mul2_x, input1) + * mul3 = Mul(mul3_x, square0) + * add0 = Add(mul0, mul1) + * add1 = Add(mul2, mul3) + * sqrt0 = Sqrt(add1) + * add2 = Add(add2_y, sqrt0) + * true_div0 = RealDiv(add0, add2) + * mul4 = Mul(input4, true_div0) + * sub0 = Sub(input3, mul4) + * outputs = make_tuple(add1, add0, sub0) + * output = tuple_getitem(outputs, 0) + * return output + */ + FuncGraphPtr g = get_py_fun_.CallAndParseRet("test_adam_apply_one_fusion", "before_cond1"); + std::vector shp{2, 32, 224, 224}; + auto x_abstract = std::make_shared(kFloat32, shp); + AbstractBasePtrList args_spec_list; + for (size_t i = 0; i < 10; ++i) { + args_spec_list.push_back(x_abstract); + } + auto fg = GetKernelGraph(g, args_spec_list); + + auto optimizer = std::make_shared(); + auto pm = std::make_shared(); + pm->AddPass(std::make_shared()); + optimizer->AddPassManager(pm); + FuncGraphPtr new_graph = optimizer->Optimize(fg); + + FuncGraphPtr g_after = get_py_fun_.CallAndParseRet("test_adam_apply_one_fusion", "after"); + EXPECT_TRUE(CheckEqualGraph(g_after, new_graph)); +} + +TEST_F(TestHWAdamApplyOneFusion, test_adam_apply_one_cond2_fusion) { + /* + * def before_cond2(input0, input1, input2, input3, input4, mul0_x, mul1_x, mul2_x, mul3_x, add2_y): + * square0 = Square(input0) + * mul1 = Mul(mul1_x, input0) + * mul0 = Mul(mul0_x, input2) + * mul2 = Mul(mul2_x, input1) + * mul3 = Mul(square0, mul3_x) + * add0 = Add(mul0, mul1) + * add1 = Add(mul2, mul3) + * sqrt0 = Sqrt(add1) + * add2 = Add(sqrt0, add2_y) + * true_div0 = RealDiv(add0, add2) + * mul4 = Mul(true_div0, input4) + * sub0 = Sub(input3, mul4) + * outputs = make_tuple(add1, add0, sub0) + * output = tuple_getitem(outputs, 0) + * return output + */ + FuncGraphPtr g = get_py_fun_.CallAndParseRet("test_adam_apply_one_fusion", "before_cond2"); + std::vector shp{2, 32, 224, 224}; + auto x_abstract = std::make_shared(kFloat32, shp); + AbstractBasePtrList args_spec_list; + for (size_t i = 0; i < 10; ++i) { + args_spec_list.push_back(x_abstract); + } + auto fg = GetKernelGraph(g, args_spec_list); + + auto optimizer = std::make_shared(); + auto pm = std::make_shared(); + pm->AddPass(std::make_shared()); + optimizer->AddPassManager(pm); + FuncGraphPtr new_graph = optimizer->Optimize(fg); + + FuncGraphPtr g_after = get_py_fun_.CallAndParseRet("test_adam_apply_one_fusion", "after"); + EXPECT_TRUE(CheckEqualGraph(g_after, new_graph)); +} + +TEST_F(TestHWAdamApplyOneFusion, test_adam_apply_one_cond3_fusion) { + /* + * def before_cond3(input0, input1, input2, input3, input4, mul0_x, mul1_x, mul2_x, mul3_x, add2_y): + * square0 = Square(input0) + * mul1 = Mul(mul1_x, input0) + * mul0 = Mul(mul0_x, input2) + * mul2 = Mul(mul2_x, input1) + * mul3 = Mul(mul3_x, square0) + * add0 = Add(mul0, mul1) + * add1 = Add(mul2, mul3) + * sqrt0 = Sqrt(add1) + * add2 = Add(sqrt0, add2_y) + * true_div0 = RealDiv(add0, add2) + * mul4 = Mul(true_div0, input4) + * sub0 = Sub(input3, mul4) + * outputs = make_tuple(add1, add0, sub0) + * output = tuple_getitem(outputs, 0) + * return output + */ + FuncGraphPtr g = get_py_fun_.CallAndParseRet("test_adam_apply_one_fusion", "before_cond3"); + std::vector shp{2, 32, 224, 224}; + auto x_abstract = std::make_shared(kFloat32, shp); + AbstractBasePtrList args_spec_list; + for (size_t i = 0; i < 10; ++i) { + args_spec_list.push_back(x_abstract); + } + auto fg = GetKernelGraph(g, args_spec_list); + + auto optimizer = std::make_shared(); + auto pm = std::make_shared(); + pm->AddPass(std::make_shared()); + optimizer->AddPassManager(pm); + FuncGraphPtr new_graph = optimizer->Optimize(fg); + + FuncGraphPtr g_after = get_py_fun_.CallAndParseRet("test_adam_apply_one_fusion", "after"); + EXPECT_TRUE(CheckEqualGraph(g_after, new_graph)); +} + +TEST_F(TestHWAdamApplyOneFusion, test_adam_apply_one_cond4_fusion) { + /* + * def before_cond4(input0, input1, input2, input3, input4, mul0_x, mul1_x, mul2_x, mul3_x, add2_y): + * square0 = Square(input0) + * mul1 = Mul(mul1_x, input0) + * mul0 = Mul(mul0_x, input2) + * mul2 = Mul(mul2_x, input1) + * mul3 = Mul(mul3_x, square0) + * add0 = Add(mul0, mul1) + * add1 = Add(mul2, mul3) + * sqrt0 = Sqrt(add1) + * add2 = Add(add2_y, sqrt0) + * true_div0 = RealDiv(add0, add2) + * mul4 = Mul(true_div0, input4) + * sub0 = Sub(input3, mul4) + * outputs = make_tuple(add1, add0, sub0) + * output = tuple_getitem(outputs, 0) + * return output + */ + FuncGraphPtr g = get_py_fun_.CallAndParseRet("test_adam_apply_one_fusion", "before_cond4"); + std::vector shp{2, 32, 224, 224}; + auto x_abstract = std::make_shared(kFloat32, shp); + AbstractBasePtrList args_spec_list; + for (size_t i = 0; i < 10; ++i) { + args_spec_list.push_back(x_abstract); + } + auto fg = GetKernelGraph(g, args_spec_list); + + auto optimizer = std::make_shared(); + auto pm = std::make_shared(); + pm->AddPass(std::make_shared()); + optimizer->AddPassManager(pm); + FuncGraphPtr new_graph = optimizer->Optimize(fg); + + FuncGraphPtr g_after = get_py_fun_.CallAndParseRet("test_adam_apply_one_fusion", "after"); + EXPECT_TRUE(CheckEqualGraph(g_after, new_graph)); +} } // namespace opt } // namespace mindspore diff --git a/tests/ut/cpp/python_input/gtest_input/pre_activate/adam_apply_one_fusion_test.py b/tests/ut/cpp/python_input/gtest_input/pre_activate/adam_apply_one_fusion_test.py index b55764b18d..225964ee38 100644 --- a/tests/ut/cpp/python_input/gtest_input/pre_activate/adam_apply_one_fusion_test.py +++ b/tests/ut/cpp/python_input/gtest_input/pre_activate/adam_apply_one_fusion_test.py @@ -58,6 +58,78 @@ def test_adam_apply_one_fusion(tag): output = tuple_getitem(outputs, 0) return output + @fns + def before_cond1(input0, input1, input2, input3, input4, mul0_x, mul1_x, mul2_x, mul3_x, add2_y): + square0 = Square(input0) + mul1 = Mul(mul1_x, input0) + mul0 = Mul(mul0_x, input2) + mul2 = Mul(mul2_x, input1) + mul3 = Mul(mul3_x, square0) + add0 = Add(mul0, mul1) + add1 = Add(mul2, mul3) + sqrt0 = Sqrt(add1) + add2 = Add(add2_y, sqrt0) + true_div0 = RealDiv(add0, add2) + mul4 = Mul(input4, true_div0) + sub0 = Sub(input3, mul4) + outputs = make_tuple(add1, add0, sub0) + output = tuple_getitem(outputs, 0) + return output + + @fns + def before_cond2(input0, input1, input2, input3, input4, mul0_x, mul1_x, mul2_x, mul3_x, add2_y): + square0 = Square(input0) + mul1 = Mul(mul1_x, input0) + mul0 = Mul(mul0_x, input2) + mul2 = Mul(mul2_x, input1) + mul3 = Mul(square0, mul3_x) + add0 = Add(mul0, mul1) + add1 = Add(mul2, mul3) + sqrt0 = Sqrt(add1) + add2 = Add(sqrt0, add2_y) + true_div0 = RealDiv(add0, add2) + mul4 = Mul(true_div0, input4) + sub0 = Sub(input3, mul4) + outputs = make_tuple(add1, add0, sub0) + output = tuple_getitem(outputs, 0) + return output + + @fns + def before_cond3(input0, input1, input2, input3, input4, mul0_x, mul1_x, mul2_x, mul3_x, add2_y): + square0 = Square(input0) + mul1 = Mul(mul1_x, input0) + mul0 = Mul(mul0_x, input2) + mul2 = Mul(mul2_x, input1) + mul3 = Mul(mul3_x, square0) + add0 = Add(mul0, mul1) + add1 = Add(mul2, mul3) + sqrt0 = Sqrt(add1) + add2 = Add(sqrt0, add2_y) + true_div0 = RealDiv(add0, add2) + mul4 = Mul(true_div0, input4) + sub0 = Sub(input3, mul4) + outputs = make_tuple(add1, add0, sub0) + output = tuple_getitem(outputs, 0) + return output + + @fns + def before_cond4(input0, input1, input2, input3, input4, mul0_x, mul1_x, mul2_x, mul3_x, add2_y): + square0 = Square(input0) + mul1 = Mul(mul1_x, input0) + mul0 = Mul(mul0_x, input2) + mul2 = Mul(mul2_x, input1) + mul3 = Mul(mul3_x, square0) + add0 = Add(mul0, mul1) + add1 = Add(mul2, mul3) + sqrt0 = Sqrt(add1) + add2 = Add(add2_y, sqrt0) + true_div0 = RealDiv(add0, add2) + mul4 = Mul(true_div0, input4) + sub0 = Sub(input3, mul4) + outputs = make_tuple(add1, add0, sub0) + output = tuple_getitem(outputs, 0) + return output + @fns def after(input0, input1, input2, input3, input4, mul0_x, mul1_x, mul2_x, mul3_x, add2_y): adam_apply_one = AdamApplyOne(input0, input1, input2, input3, input4, mul0_x, mul1_x, mul2_x, mul3_x, add2_y) From c853f985cc8c7e696f9d3e5e23de446d37a6663a Mon Sep 17 00:00:00 2001 From: buxue Date: Sat, 18 Apr 2020 10:03:26 +0800 Subject: [PATCH 342/367] support equal list with none --- .../ccsrc/operator/composite/composite.cc | 2 +- .../ops/composite/multitype_ops/equal_impl.py | 31 +++++++++++++++++++ 2 files changed, 32 insertions(+), 1 deletion(-) diff --git a/mindspore/ccsrc/operator/composite/composite.cc b/mindspore/ccsrc/operator/composite/composite.cc index 347641829d..9a665e8a30 100644 --- a/mindspore/ccsrc/operator/composite/composite.cc +++ b/mindspore/ccsrc/operator/composite/composite.cc @@ -743,7 +743,7 @@ FuncGraphPtr MultitypeFuncGraph::GenerateFromTypes(const TypePtrList& types) { } oss << ++idx << ". " << item.first << "\n " << trace::GetDebugInfo(func_graph->debug_info()) << "\n"; } - MS_LOG(EXCEPTION) << "Fail to find overload function for `" << name_ << "` with type " << buffer.str() << "\n" + MS_LOG(EXCEPTION) << "The '" << name_ << "' operation does not support the type " << buffer.str() << "\n" << oss.str(); } diff --git a/mindspore/ops/composite/multitype_ops/equal_impl.py b/mindspore/ops/composite/multitype_ops/equal_impl.py index 428cdf4705..ff54c34fad 100644 --- a/mindspore/ops/composite/multitype_ops/equal_impl.py +++ b/mindspore/ops/composite/multitype_ops/equal_impl.py @@ -190,6 +190,7 @@ def _none_equal_tuple(x, y): """ return False + @equal.register("Tensor", "Number") @equal.register("Number", "Tensor") @equal.register("Tensor", "Tensor") @@ -235,3 +236,33 @@ def _none_equal_tensor(x, y): bool, return false. """ return False + + +@equal.register("List", "None") +def _list_equal_none(x, y): + """ + Determine if list equal none. + + Args: + x (list): The first input which is a list. + y (none): The second input which is none. + + Returns: + bool, return false. + """ + return False + + +@equal.register("None", "List") +def _none_equal_list(x, y): + """ + Determine if none equal list. + + Args: + x (none): The first input which is none. + y (list): The second input which is a list. + + Returns: + bool, return false. + """ + return False From d8feaa9c19e7acb89c17186f90ac202756d25f1f Mon Sep 17 00:00:00 2001 From: gengdongjie Date: Sat, 18 Apr 2020 11:44:08 +0800 Subject: [PATCH 343/367] remove list append to avoid memory explosion --- mindspore/train/callback.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/mindspore/train/callback.py b/mindspore/train/callback.py index dcf630342c..b9635acc62 100644 --- a/mindspore/train/callback.py +++ b/mindspore/train/callback.py @@ -686,9 +686,6 @@ class TimeMonitor(Callback): def __init__(self, data_size): super(TimeMonitor, self).__init__() self.data_size = data_size - self.step_time_cost = [] - self.epoch_time_cost = [] - self.per_step_time = [] def epoch_begin(self, run_context): self.epoch_time = time.time() @@ -696,8 +693,6 @@ class TimeMonitor(Callback): def epoch_end(self, run_context): epoch_mseconds = (time.time() - self.epoch_time) * 1000 per_step_mseconds = epoch_mseconds / self.data_size - self.epoch_time_cost.append(epoch_mseconds) - self.per_step_time.append(per_step_mseconds) print("epoch time: {0}, per step time: {1}".format(epoch_mseconds, per_step_mseconds), flush=True) def step_begin(self, run_context): @@ -705,6 +700,5 @@ class TimeMonitor(Callback): def step_end(self, run_context): step_mseconds = (time.time() - self.step_time) * 1000 - self.step_time_cost.append(step_mseconds) print('step time', step_mseconds, flush=True) From f4164169faea1a125428be716b9a4a0dd61b1691 Mon Sep 17 00:00:00 2001 From: dengwentao Date: Mon, 13 Apr 2020 11:22:45 +0800 Subject: [PATCH 344/367] modify gpu tvm cmake --- cmake/external_libs/dmlc_core.cmake | 2 +- cmake/external_libs/tvm_gpu.cmake | 18 +++-- cmake/package.cmake | 12 ++- cmake/utils.cmake | 20 +++-- mindspore/ccsrc/CMakeLists.txt | 109 +--------------------------- 5 files changed, 39 insertions(+), 122 deletions(-) diff --git a/cmake/external_libs/dmlc_core.cmake b/cmake/external_libs/dmlc_core.cmake index 386a52429d..e07df83fd6 100644 --- a/cmake/external_libs/dmlc_core.cmake +++ b/cmake/external_libs/dmlc_core.cmake @@ -1,4 +1,4 @@ -mindspore_add_pkg(dmlc_core +mindspore_add_pkg(dmlc-core VER 0.3 HEAD_ONLY ./ URL https://github.com/dmlc/dmlc-core/archive/808f485387f9a03f78fa9f1159f387d0d91b7a28.zip diff --git a/cmake/external_libs/tvm_gpu.cmake b/cmake/external_libs/tvm_gpu.cmake index 57a045cb03..2edec52ee1 100644 --- a/cmake/external_libs/tvm_gpu.cmake +++ b/cmake/external_libs/tvm_gpu.cmake @@ -1,8 +1,16 @@ -set(incubator_tvm_gpu_CXXFLAGS "-D_FORTIFY_SOURCE=2 -O2") -set(incubator_tvm_gpu_CFLAGS "-D_FORTIFY_SOURCE=2 -O2") +set(incubator_tvm_gpu_CFLAGS "-pipe -Wall -fPIC -fstack-protector-all -D_FORTIFY_SOURCE=2 -O2") +set(incubator_tvm_gpu_CXXFLAGS "-std=c++11 -pipe -Wall -fPIC -fstack-protector-all -D_FORTIFY_SOURCE=2 -O2") +set(USE_CUDA "ON") mindspore_add_pkg(incubator_tvm_gpu VER 0.6.0 - HEAD_ONLY ./ + LIBS tvm URL https://github.com/apache/incubator-tvm/archive/v0.6.0.tar.gz - MD5 9cbbd32545a776023acabbba270449fe) - + MD5 9cbbd32545a776023acabbba270449fe + SUBMODULES ${dlpack_DIRPATH} ${dmlc-core_DIRPATH} ${rang_DIRPATH} + SOURCEMODULES topi/python/topi python/tvm + PATCHES ${CMAKE_SOURCE_DIR}/third_party/patch/incubator-tvm/find_library.patch + ${CMAKE_SOURCE_DIR}/third_party/patch/incubator-tvm/include.patch + ${CMAKE_SOURCE_DIR}/third_party/patch/incubator-tvm/src_pass.patch + CMAKE_OPTION -DBUILD_TESTING=OFF -DCMAKE_POSITION_INDEPENDENT_CODE=ON -DBUILD_SHARED_LIBS=ON) +include_directories(${incubator_tvm_gpu_INC}) +add_library(mindspore::tvm ALIAS incubator_tvm_gpu::tvm) diff --git a/cmake/package.cmake b/cmake/package.cmake index 7d1fdc6d8a..81d58ccb92 100644 --- a/cmake/package.cmake +++ b/cmake/package.cmake @@ -190,11 +190,17 @@ if (ENABLE_GPU) DESTINATION ${INSTALL_PY_DIR}/../ COMPONENT mindspore ) - if (EXISTS ${CMAKE_BINARY_DIR}/incubator-tvm) + if (EXISTS ${incubator_tvm_gpu_ROOT}) + file(GLOB_RECURSE GLOG_LIB_LIST ${incubator_tvm_gpu_LIBPATH}/lib*) + install( + FILES ${GLOG_LIB_LIST} + DESTINATION ${INSTALL_LIB_DIR} + COMPONENT mindspore + ) install( DIRECTORY - ${CMAKE_BINARY_DIR}/incubator-tvm/topi/python/topi - ${CMAKE_BINARY_DIR}/incubator-tvm/python/tvm + ${incubator_tvm_gpu_ROOT}/topi/python/topi + ${incubator_tvm_gpu_ROOT}/python/tvm DESTINATION ${INSTALL_PY_DIR}/../_akg COMPONENT mindspore ) diff --git a/cmake/utils.cmake b/cmake/utils.cmake index 21a766dc8c..5516fa9543 100644 --- a/cmake/utils.cmake +++ b/cmake/utils.cmake @@ -206,7 +206,7 @@ function(mindspore_add_pkg pkg_name ) set(options ) set(oneValueArgs URL MD5 GIT_REPOSITORY GIT_TAG VER EXE DIR HEAD_ONLY CMAKE_PATH RELEASE LIB_PATH) - set(multiValueArgs CMAKE_OPTION LIBS PRE_CONFIGURE_COMMAND CONFIGURE_COMMAND BUILD_OPTION INSTALL_INCS INSTALL_LIBS PATCHES) + set(multiValueArgs CMAKE_OPTION LIBS PRE_CONFIGURE_COMMAND CONFIGURE_COMMAND BUILD_OPTION INSTALL_INCS INSTALL_LIBS PATCHES SUBMODULES SOURCEMODULES) cmake_parse_arguments(PKG "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN} ) if (NOT PKG_LIB_PATH) @@ -270,11 +270,21 @@ function(mindspore_add_pkg pkg_name ) endif () if (NOT PKG_DIR) - if (PKG_GIT_REPOSITORY) - __download_pkg_with_git(${pkg_name} ${PKG_GIT_REPOSITORY} ${PKG_GIT_TAG} ${PKG_MD5}) - else() + if (PKG_GIT_REPOSITORY) + __download_pkg_with_git(${pkg_name} ${PKG_GIT_REPOSITORY} ${PKG_GIT_TAG} ${PKG_MD5}) + else() __download_pkg(${pkg_name} ${PKG_URL} ${PKG_MD5}) - endif() + endif() + foreach(_SUBMODULE_FILE ${PKG_SUBMODULES}) + STRING( REGEX REPLACE "(.+)_(.+)" "\\1" _SUBMODEPATH ${_SUBMODULE_FILE}) + STRING( REGEX REPLACE "(.+)/(.+)" "\\2" _SUBMODENAME ${_SUBMODEPATH}) + file(GLOB ${pkg_name}_INSTALL_SUBMODULE ${_SUBMODULE_FILE}/*) + file(COPY ${${pkg_name}_INSTALL_SUBMODULE} DESTINATION ${${pkg_name}_SOURCE_DIR}/3rdparty/${_SUBMODENAME}) + endforeach (_SUBMODULE_FILE) + foreach(_SOURCE_DIR ${PKG_SOURCEMODULES}) + file(GLOB ${pkg_name}_INSTALL_SOURCE ${${pkg_name}_SOURCE_DIR}/${_SOURCE_DIR}/*) + file(COPY ${${pkg_name}_INSTALL_SOURCE} DESTINATION ${${pkg_name}_BASE_DIR}/${_SOURCE_DIR}/) + endforeach (_SUBMODULE_FILE) else() set(${pkg_name}_SOURCE_DIR ${PKG_DIR}) endif () diff --git a/mindspore/ccsrc/CMakeLists.txt b/mindspore/ccsrc/CMakeLists.txt index 77008aee5f..e227969162 100644 --- a/mindspore/ccsrc/CMakeLists.txt +++ b/mindspore/ccsrc/CMakeLists.txt @@ -395,114 +395,7 @@ if(USE_GLOG) endif() if(ENABLE_GPU) - execute_process(COMMAND bash ${CMAKE_SOURCE_DIR}/third_party/apply_patches.sh - ${CMAKE_BINARY_DIR} - ${dlpack_DIRPATH} - ${dmlc_core_DIRPATH} - ${rang_DIRPATH} - ${incubator_tvm_gpu_DIRPATH}) - set(TVM_DIR "${CMAKE_BINARY_DIR}/incubator-tvm") - # Utility functions - include(${TVM_DIR}/cmake/util/Util.cmake) - include(${TVM_DIR}/cmake/util/FindCUDA.cmake) - - # include directories - include_directories(AFTER "${TVM_DIR}/include") - include_directories(AFTER "${TVM_DIR}/src") - include_directories(AFTER "${TVM_DIR}") - include_directories(AFTER "${TVM_DIR}/src/schedule") - - include_directories(AFTER "${TVM_DIR}/3rdparty/dmlc-core/include") - include_directories(AFTER "${TVM_DIR}/3rdparty/dlpack/include") - include_directories(AFTER "${TVM_DIR}/3rdparty/compiler-rt") - include_directories(AFTER "${TVM_DIR}/3rdparty/rang/include") - - # lib contain dlopen and dlclose - set(TVM_RUNTIME_LINKER_LIBS ${CMAKE_DL_LIBS}) - - # add source group - file(GLOB_RECURSE GROUP_SOURCE "${TVM_DIR}/src/*.cc" "src/*.cc") - file(GLOB_RECURSE GROUP_INCLUDE "${TVM_DIR}/src/*.h" - "${TVM_DIR}/include/*.h" "src/*.h" "include/*.h") - assign_source_group("Source" ${GROUP_SOURCE}) - assign_source_group("Include" ${GROUP_INCLUDE}) - - file(GLOB COMPILER_SRCS - "pre_activate/gpu/*.cc" - ${TVM_DIR}/src/api/*.cc - ${TVM_DIR}/src/arithmetic/*.cc - ${TVM_DIR}/src/autotvm/*.cc - ${TVM_DIR}/src/codegen/*.cc - ${TVM_DIR}/src/lang/*.cc - ${TVM_DIR}/src/pass/*.cc - ${TVM_DIR}/src/op/*.cc - ${TVM_DIR}/src/node/*.cc - ${TVM_DIR}/src/schedule/*.cc - ${TVM_DIR}/src/runtime/*.cc - ${TVM_DIR}/src/runtime/vm/*.cc - ${TVM_DIR}/src/runtime/vm/profiler/*.cc - ${TVM_DIR}/src/codegen/stackvm/*.cc) - - file(GLOB_RECURSE RELAY_SRCS ${TVM_DIR}/src/relay/*.cc) - list(APPEND COMPILER_SRCS ${RELAY_SRCS}) - - file(GLOB DATATYPE_SRCS ${TVM_DIR}/src/codegen/datatype/*.cc) - list(APPEND COMPILER_SRCS ${DATATYPE_SRCS}) - - file(GLOB COMPILER_VERILOG_SRCS ${TVM_DIR}/src/codegen/verilog/*.cc) - list(APPEND COMPILER_SRCS ${COMPILER_VERILOG_SRCS}) - - file(GLOB TOPI_SRCS ${TVM_DIR}/topi/src/*.cc) - - file(GLOB RUNTIME_SRCS - ${TVM_DIR}/src/runtime/*.cc - ${TVM_DIR}/src/runtime/vm/*.cc - ${TVM_DIR}/src/runtime/stub/*.cc - ${TVM_DIR}/src/runtime/stackvm/*.cc) - - - file(GLOB COMPILER_OFF_SRCS - ${TVM_DIR}/src/codegen/opt/build_*_off.cc) - set(USE_CUDA "OFF") - if(ENABLE_GPU) - list(REMOVE_ITEM COMPILER_OFF_SRCS - ${TVM_DIR}/src/codegen/opt/build_cuda_off.cc) - set(USE_CUDA "ON") - endif() - list(APPEND COMPILER_SRCS ${COMPILER_OFF_SRCS}) - # Module rules - include(${TVM_DIR}/cmake/modules/CUDA.cmake) - - set(CMAKE_C_FLAGS_AKG -pipe -Wall -fPIC -fstack-protector-all) - set(CMAKE_C_FLAGS_AKG ${CMAKE_C_FLAGS_AKG} -Wl,-z,relro,-z,now,-z,noexecstack) - - set(CMAKE_CXX_FLAGS_AKG -std=c++11 -pipe -Wall -fPIC -fstack-protector-all) - set(CMAKE_CXX_FLAGS_AKG ${CMAKE_CXX_FLAGS_AKG} -Wl,-z,relro,-z,now,-z,noexecstack) - - if("${CMAKE_BUILD_TYPE}" STREQUAL "Debug") - message("-- Build in Debug mode") - set(CMAKE_C_FLAGS_AKG ${CMAKE_C_FLAGS_AKG} -O0 -g -rdynamic) - set(CMAKE_CXX_FLAGS_AKG ${CMAKE_CXX_FLAGS_AKG} -O0 -g -rdynamic) - else() - message("-- Build in Release mode") - set(CMAKE_C_FLAGS_AKG ${CMAKE_C_FLAGS_AKG} -O2 -Werror) - set(CMAKE_CXX_FLAGS_AKG ${CMAKE_CXX_FLAGS_AKG} -O2 -Werror) - endif() - if(CMAKE_CXX_COMPILER_ID MATCHES "GNU" AND CMAKE_CXX_COMPILER_VERSION - VERSION_GREATER 7.0) - set(CMAKE_CXX_FLAGS_AKG ${CMAKE_CXX_FLAGS_AKG} -faligned-new) - endif() - - add_library(akg OBJECT ${COMPILER_SRCS} ${RUNTIME_SRCS} ${TOPI_SRCS}) - - target_link_libraries(akg ${TVM_LINKER_LIBS} ${TVM_RUNTIME_LINKER_LIBS}) - target_compile_options(akg PRIVATE - $<$:${CMAKE_C_FLAGS_AKG}> - $<$:${CMAKE_CXX_FLAGS_AKG}>) - target_include_directories(akg PRIVATE "${TVM_DIR}/topi/include") - - add_dependencies(_c_expression akg) - target_link_libraries(_c_expression PRIVATE akg) + target_link_libraries(_c_expression PRIVATE mindspore::tvm) endif() if(ENABLE_DUMP_PROTO) From f182edfd448ebe478d6bff58113978abeaf5f5aa Mon Sep 17 00:00:00 2001 From: Ziyan Date: Sat, 18 Apr 2020 12:50:47 +0800 Subject: [PATCH 345/367] fix lars base class type --- mindspore/nn/optim/lars.py | 7 +++---- mindspore/nn/optim/optimizer.py | 2 +- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/mindspore/nn/optim/lars.py b/mindspore/nn/optim/lars.py index c0cb71cfa6..02538aa61a 100755 --- a/mindspore/nn/optim/lars.py +++ b/mindspore/nn/optim/lars.py @@ -21,8 +21,7 @@ from mindspore.common.parameter import Parameter from mindspore.ops import operations as P from mindspore.ops import composite as C from mindspore.ops import functional as F -from mindspore.nn.cell import Cell -from .optimizer import grad_scale +from .optimizer import grad_scale, Optimizer lars_opt = C.MultitypeFuncGraph("lars_opt") @@ -61,7 +60,7 @@ def _tensor_run_opt_v2(lars, weight_decay, learning_rate, gradient, weight, deca return gradient -class LARS(Cell): +class LARS(Optimizer): """ Implements the LARS algorithm with LARSUpdate Operator. @@ -98,7 +97,7 @@ class LARS(Cell): def __init__(self, optimizer, epsilon=1e-05, hyperpara=0.001, weight_decay=0.0, use_clip=False, decay_filter=lambda x: 'LayerNorm' not in x.name and 'bias' not in x.name, lars_filter=lambda x: 'LayerNorm' not in x.name and 'bias' not in x.name, loss_scale=1.0): - super(LARS, self).__init__(auto_prefix=False) + super(LARS, self).__init__(0.0, [Parameter(Tensor(0.0), name="trivial")]) self.opt = optimizer self.parameters = optimizer.parameters self.learning_rate = optimizer.learning_rate diff --git a/mindspore/nn/optim/optimizer.py b/mindspore/nn/optim/optimizer.py index 6c6d14ed7a..00d3fd3b7b 100755 --- a/mindspore/nn/optim/optimizer.py +++ b/mindspore/nn/optim/optimizer.py @@ -57,7 +57,7 @@ class Optimizer(Cell): def __init__(self, learning_rate, parameters, weight_decay=0.0, loss_scale=1.0, decay_filter=lambda x: 'beta' not in x.name and 'gamma' not in x.name): - super(Optimizer, self).__init__() + super(Optimizer, self).__init__(auto_prefix=False) if isinstance(learning_rate, float): self.dynamic_lr = False self.gather = None From 9773cf024d39a03dd7bfdf08285b668bf3677850 Mon Sep 17 00:00:00 2001 From: chenjianping Date: Sat, 18 Apr 2020 06:22:23 +0000 Subject: [PATCH 346/367] fix windows building fail --- mindspore/ccsrc/common/trans.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mindspore/ccsrc/common/trans.cc b/mindspore/ccsrc/common/trans.cc index e0c70e1b01..b4e02c8fe6 100644 --- a/mindspore/ccsrc/common/trans.cc +++ b/mindspore/ccsrc/common/trans.cc @@ -920,7 +920,7 @@ bool NchwToC1hwncoc0(const FormatArgs &args, void *result) { : static_cast(SECUREC_MEM_MAX_LEN); size_t c_i = c0_i + c1_i * c0; size_t src_offset = (n_i * c * h * w + c_i * h * w + h_i * w + w_i) * size; - error_t ret; + errno_t ret; if (c_i < c && c0_i == co_i) { ret = memcpy_s(static_cast(result) + dst_offset, protected_size, static_cast(args.data) + src_offset, size); From 93e909594ddec13256b16f66e3b9d72553dff09c Mon Sep 17 00:00:00 2001 From: chang zherui <760161589@qq.com> Date: Fri, 17 Apr 2020 15:50:30 +0800 Subject: [PATCH 347/367] checpoint support customize network --- mindspore/train/serialization.py | 56 +++++++++++++++++++++----------- 1 file changed, 37 insertions(+), 19 deletions(-) diff --git a/mindspore/train/serialization.py b/mindspore/train/serialization.py index 90d8816094..8ec1b38804 100644 --- a/mindspore/train/serialization.py +++ b/mindspore/train/serialization.py @@ -224,42 +224,60 @@ def load_param_into_net(net, parameter_dict): msg = ("Argument parameter_dict should be a dict, but got {}.".format(type(parameter_dict))) raise TypeError(msg) - logger.info("Execute parameter into net process.") - param_name_net_not_have = [] + logger.info("Execute load parameter into net process.") for name in parameter_dict: - b_par_dict_have_par_of_net = False for _, param in net.parameters_and_names(): - if name == param.name: - b_par_dict_have_par_of_net = True + if name == param.name and param.layerwise_parallel: # layerwise parallel parameter data loaded from checkpoint file, # was a complete(merged) data, need to be splited - if param.layerwise_parallel: - new_param = parameter_dict[param.name] - _load_tensor_for_layerwise(new_param, param) + new_param = parameter_dict[param.name] + _load_tensor_for_layerwise(new_param, param) break - if not b_par_dict_have_par_of_net: - param_name_net_not_have.append(name) - param_name_param_dict_not_have = [] + param_not_load = [] for _, param in net.parameters_and_names(): if param.name in parameter_dict: new_param = parameter_dict[param.name] - if not isinstance(new_param, Parameter): logger.error("Failed to combine the net and the parameters.") msg = ("Argument parameter_dict element should be a Parameter, but got {}.".format(type(new_param))) raise TypeError(msg) _update_param(param, new_param) else: - param_name_param_dict_not_have.append(param.name) + param_not_load.append(param.name) + + if param_not_load: + _load_dismatch_prefix_params(net, parameter_dict, param_not_load) logger.debug("Params not matched(in net but not in parameter_dict):") - for paramname in param_name_param_dict_not_have: - logger.debug("%s", paramname) - logger.debug("Params not matched(in parameter_dict but not in net):") - for paramname in param_name_net_not_have: - logger.debug("%s", paramname) - logger.info("Load parameter into net process finish.") + for param_name in param_not_load: + logger.debug("%s", param_name) + + logger.info("Load parameter into net finish, {} parameters has not been loaded.".format(len(param_not_load))) + + +def _load_dismatch_prefix_params(net, parameter_dict, param_not_load): + """When some net parameter did not load, try to continue load.""" + prefix_name = "" + longest_name = param_not_load[0] + while prefix_name != longest_name and param_not_load: + logger.debug("Count: {} parameters has not been loaded, try to load continue.".format(len(param_not_load))) + longest_name = sorted(param_not_load, key=len, reverse=True)[0] + prefix_name = longest_name + for net_param_name in param_not_load: + for dict_name in parameter_dict: + if dict_name.endswith(net_param_name): + tmp_name = dict_name[:-len(net_param_name)] + prefix_name = prefix_name if len(prefix_name) < len(tmp_name) else tmp_name + + if prefix_name != longest_name: + logger.info("Remove parameter prefix name: {}, continue to load.".format(prefix_name)) + for _, param in net.parameters_and_names(): + new_param_name = prefix_name + param.name + if param.name in param_not_load and new_param_name in parameter_dict: + new_param = parameter_dict[new_param_name] + _update_param(param, new_param) + param_not_load.remove(param.name) def _save_graph(network, file_name): From 6775190e480197243de557c3811679072392e1b1 Mon Sep 17 00:00:00 2001 From: kswang Date: Sat, 18 Apr 2020 11:20:26 +0800 Subject: [PATCH 348/367] add cpu one hot --- .../device/cpu/kernel/one_hot_cpu_kernel.cc | 74 +++++++++++++++++ .../device/cpu/kernel/one_hot_cpu_kernel.h | 47 +++++++++++ .../device/cpu/kernel/reshape_cpu_kernel.h | 2 + tests/st/ops/cpu/test_one_hot_op.py | 82 +++++++++++++++++++ 4 files changed, 205 insertions(+) create mode 100644 mindspore/ccsrc/device/cpu/kernel/one_hot_cpu_kernel.cc create mode 100644 mindspore/ccsrc/device/cpu/kernel/one_hot_cpu_kernel.h create mode 100644 tests/st/ops/cpu/test_one_hot_op.py diff --git a/mindspore/ccsrc/device/cpu/kernel/one_hot_cpu_kernel.cc b/mindspore/ccsrc/device/cpu/kernel/one_hot_cpu_kernel.cc new file mode 100644 index 0000000000..e4b3f03f58 --- /dev/null +++ b/mindspore/ccsrc/device/cpu/kernel/one_hot_cpu_kernel.cc @@ -0,0 +1,74 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "device/cpu/kernel/one_hot_cpu_kernel.h" +#include "device/cpu/cpu_device_address.h" + +namespace mindspore { +namespace device { +namespace cpu { +void OneHotCPUKernel::InitKernel(const CNodePtr &kernel_node) { + MS_EXCEPTION_IF_NULL(kernel_node); + auto output_shape = AnfAlgo::GetOutputInferShape(kernel_node, 0); + if (output_shape.size() < 2) { + MS_LOG(EXCEPTION) << "invalid output shape size: " << output_shape.size(); + } + int axis = AnfAlgo::GetNodeAttr(kernel_node, AXIS); + if (axis != -1 && IntToSize(axis) >= output_shape.size()) { + MS_LOG(EXCEPTION) << "invalid axis: " << axis; + } + if (axis == -1) { + axis_ = output_shape.size() - 1; + } else { + axis_ = IntToSize(axis); + } + depth_ = output_shape[axis_]; + stride_ = 1; + for (size_t i = axis_ + 1; i < output_shape.size(); ++i) { + stride_ *= output_shape[i]; + } +} + +bool OneHotCPUKernel::Launch(const std::vector &inputs, + const std::vector & /*workspace*/, + const std::vector &outputs) { + if (inputs.size() < 3 || outputs.empty()) { + MS_LOG(EXCEPTION) << "input or output invalid!"; + } + auto indices = reinterpret_cast(inputs[0]->addr); + auto on_value = reinterpret_cast(inputs[1]->addr)[0]; + auto off_value = reinterpret_cast(inputs[2]->addr)[0]; + auto output = reinterpret_cast(outputs[0]->addr); + size_t elem_num = inputs[0]->size / sizeof(int); + + for (size_t i = 0; i < elem_num; i++) { + size_t stride_num = i / stride_; + size_t output_index = stride_num * depth_ * stride_ + i % stride_; + size_t index = IntToSize(indices[i]); + for (size_t j = 0; j < depth_; j++) { + if (index == j) { + output[output_index] = on_value; + } else { + output[output_index] = off_value; + } + output_index += stride_; + } + } + + return true; +} +} // namespace cpu +} // namespace device +} // namespace mindspore diff --git a/mindspore/ccsrc/device/cpu/kernel/one_hot_cpu_kernel.h b/mindspore/ccsrc/device/cpu/kernel/one_hot_cpu_kernel.h new file mode 100644 index 0000000000..f41ac63265 --- /dev/null +++ b/mindspore/ccsrc/device/cpu/kernel/one_hot_cpu_kernel.h @@ -0,0 +1,47 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_CCSRC_DEVICE_CPU_ONE_HOT_CPU_KERNEL_H_ +#define MINDSPORE_CCSRC_DEVICE_CPU_ONE_HOT_CPU_KERNEL_H_ +#include +#include +#include "device/cpu/cpu_kernel.h" +#include "device/cpu/cpu_kernel_factory.h" + +namespace mindspore { +namespace device { +namespace cpu { +class OneHotCPUKernel : public CPUKernel { + public: + OneHotCPUKernel() = default; + ~OneHotCPUKernel() override = default; + + void InitKernel(const CNodePtr &kernel_node) override; + + bool Launch(const std::vector &inputs, const std::vector &workspace, + const std::vector &outputs) override; + + private: + size_t depth_; + size_t stride_; + size_t axis_; +}; + +MS_REG_CPU_KERNEL(OneHot, OneHotCPUKernel); +} // namespace cpu +} // namespace device +} // namespace mindspore + +#endif // MINDSPORE_CCSRC_DEVICE_CPU_ONE_HOT_CPU_KERNEL_H_ diff --git a/mindspore/ccsrc/device/cpu/kernel/reshape_cpu_kernel.h b/mindspore/ccsrc/device/cpu/kernel/reshape_cpu_kernel.h index 908c3df2d9..d371e3a7ac 100644 --- a/mindspore/ccsrc/device/cpu/kernel/reshape_cpu_kernel.h +++ b/mindspore/ccsrc/device/cpu/kernel/reshape_cpu_kernel.h @@ -35,6 +35,8 @@ class ReshapeCPUKernel : public CPUKernel { }; MS_REG_CPU_KERNEL(Reshape, ReshapeCPUKernel); +MS_REG_CPU_KERNEL(Flatten, ReshapeCPUKernel); +MS_REG_CPU_KERNEL(ExpandDims, ReshapeCPUKernel); } // namespace cpu } // namespace device } // namespace mindspore diff --git a/tests/st/ops/cpu/test_one_hot_op.py b/tests/st/ops/cpu/test_one_hot_op.py new file mode 100644 index 0000000000..3f2c54b3cb --- /dev/null +++ b/tests/st/ops/cpu/test_one_hot_op.py @@ -0,0 +1,82 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +import pytest +from mindspore import Tensor +from mindspore.ops import operations as P +import mindspore.nn as nn +from mindspore.common.api import ms_function +import numpy as np +import mindspore.context as context + +context.set_context(device_target='CPU') + + +class NetOneHot(nn.Cell): + def __init__(self): + super(NetOneHot, self).__init__() + self.on_value = 2.0 + self.off_value = 3.0 + + self.depth_1 = 6 + self.one_hot_1 = nn.OneHot(-1, self.depth_1, self.on_value, self.off_value) + + self.depth_2 = 4 + self.one_hot_2 = nn.OneHot(0, self.depth_1, self.on_value, self.off_value) + self.one_hot_3 = nn.OneHot(0, self.depth_2, self.on_value, self.off_value) + self.one_hot_4 = nn.OneHot(1, self.depth_1, self.on_value, self.off_value) + + @ms_function + def construct(self, indices1, indices2, indices3, indices4): + return (self.one_hot_1(indices1), self.one_hot_2(indices2), + self.one_hot_3(indices3), self.one_hot_4(indices4)) + + +@pytest.mark.level0 +@pytest.mark.platform_x86_gpu_training +@pytest.mark.env_onecard +def test_one_hot(): + one_hot = NetOneHot() + indices1 = Tensor(np.array([[0, 1], [4, 5], [2, 6]]).astype(np.int32)) + indices2 = Tensor(np.array([1, 2, 3]).astype(np.int32)) + indices3 = Tensor(np.array([[0, 1], [1, 0]]).astype(np.int32)) + indices4 = Tensor(np.array([[0, 1], [4, 5], [2, 6]]).astype(np.int32)) + output = one_hot(indices1, indices2, indices3, indices4) + expect_0 = np.array([ + [[2., 3., 3., 3., 3., 3.], [3., 2., 3., 3., 3., 3.]], + [[3., 3., 3., 3., 2., 3.], [3., 3., 3., 3., 3., 2.]], + [[3., 3., 2., 3., 3., 3.], [3., 3., 3., 3., 3., 3.]] + ]).astype(np.float32) + expect_1 = np.array([ + [3., 3., 3.], + [2., 3., 3.], + [3., 2., 3.], + [3., 3., 2.], + [3., 3., 3.], + [3., 3., 3.] + ]).astype(np.float32) + expect_2 = np.array([ + [[2., 3.], [3., 2.]], [[3., 2.], [2., 3.]], [[3., 3.], [3., 3.]], + [[3., 3.], [3., 3.]] + ]).astype(np.float32) + expect_3 = np.array([ + [[2., 3.], [3., 2.], [3., 3.], [3., 3.], [3., 3.], [3., 3.]], + [[3., 3.], [3., 3.], [3., 3.], [3., 3.], [2., 3.], [3., 2.]], + [[3., 3.], [3., 3.], [2., 3.], [3., 3.], [3., 3.], [3., 3.]] + ]).astype(np.float32) + assert (output[0].asnumpy() == expect_0).all() + assert (output[1].asnumpy() == expect_1).all() + assert (output[2].asnumpy() == expect_2).all() + assert (output[3].asnumpy() == expect_3).all() From 638b11909907338fa7acb0a59130c2f6650eb915 Mon Sep 17 00:00:00 2001 From: zhoufeng Date: Fri, 17 Apr 2020 16:05:21 +0800 Subject: [PATCH 349/367] windows sqlite patch --- CMakeLists.txt | 3 + cmake/external_libs/sqlite.cmake | 22 ++- cmake/package.cmake | 1 + cmake/utils.cmake | 10 +- .../patch/sqlite/sqlite.windows.patch001 | 133 ++++++++++++++++++ 5 files changed, 152 insertions(+), 17 deletions(-) create mode 100644 third_party/patch/sqlite/sqlite.windows.patch001 diff --git a/CMakeLists.txt b/CMakeLists.txt index 9c6ee73687..46804c8dde 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -20,6 +20,9 @@ set(PYBIND11_CPP_STANDARD -std=c++17) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OPTION_CXX_FLAGS}") find_package(Threads) +find_package(Patch) +message(PATCH_EXECUTABLE = ${Patch_EXECUTABLE}) + include(${CMAKE_SOURCE_DIR}/cmake/mind_expression.cmake) include_directories(${CMAKE_CURRENT_SOURCE_DIR}) include_directories(${CMAKE_CURRENT_SOURCE_DIR}/third_party/flatbuffers/include) diff --git a/cmake/external_libs/sqlite.cmake b/cmake/external_libs/sqlite.cmake index 69692f284d..aa6f236917 100644 --- a/cmake/external_libs/sqlite.cmake +++ b/cmake/external_libs/sqlite.cmake @@ -1,18 +1,12 @@ if (WIN32) - mindspore_add_pkg(sqlite-head - VER 3.31.1 - HEAD_ONLY ./ - URL https://sqlite.org/2020/sqlite-amalgamation-3310100.zip - MD5 2b7bfcdd97dc281903a9aee966213fe4) - include_directories(${sqlite-head_INC}) mindspore_add_pkg(sqlite - VER 3.31.1 - LIBS sqlite3 - LIB_PATH ./ - HEAD_ONLY ./ - RELEASE ON - URL https://sqlite.org/2020/sqlite-dll-win64-x64-3310100.zip - MD5 662c9d2b05467d590ba5c0443e7fd6bd) + VER 3.31.1 + LIBS sqlite3 + URL https://sqlite.org/2020/sqlite-amalgamation-3310100.zip + MD5 2b7bfcdd97dc281903a9aee966213fe4 + PATCHES ${CMAKE_SOURCE_DIR}/third_party/patch/sqlite/sqlite.windows.patch001 + CMAKE_OPTION " " + ) else () set(sqlite_USE_STATIC_LIBS ON) @@ -30,7 +24,7 @@ else () MD5 5f4e7b4016c15f4fb5855615279819da PATCHES ${CMAKE_SOURCE_DIR}/third_party/patch/sqlite/sqlite.patch001 CONFIGURE_COMMAND ./configure --enable-shared=no --disable-tcl --disable-editline --enable-json1) - include_directories(${sqlite_INC}) endif () +include_directories(${sqlite_INC}) add_library(mindspore::sqlite ALIAS sqlite::sqlite3) diff --git a/cmake/package.cmake b/cmake/package.cmake index 7d1fdc6d8a..531dff29ca 100644 --- a/cmake/package.cmake +++ b/cmake/package.cmake @@ -38,6 +38,7 @@ if (CMAKE_SYSTEM_NAME MATCHES "Windows") set(glog_LIBPATH ${glog_LIBPATH}/../bin/) set(opencv_LIBPATH ${opencv_LIBPATH}/../bin/) set(jpeg_turbo_LIBPATH ${jpeg_turbo_LIBPATH}/../bin/) + set(sqlite_LIBPATH ${sqlite_LIBPATH}/../bin/) else () set(INSTALL_LIB_DIR "lib") endif () diff --git a/cmake/utils.cmake b/cmake/utils.cmake index 21a766dc8c..894a0de1b8 100644 --- a/cmake/utils.cmake +++ b/cmake/utils.cmake @@ -282,12 +282,16 @@ function(mindspore_add_pkg pkg_name ) message("${pkg_name}_SOURCE_DIR : ${${pkg_name}_SOURCE_DIR}") foreach(_PATCH_FILE ${PKG_PATCHES}) - message("patching ${${pkg_name}_SOURCE_DIR} -p1 < ${_PATCH_FILE}") - execute_process(COMMAND patch -p1 INPUT_FILE ${_PATCH_FILE} + get_filename_component(_PATCH_FILE_NAME ${_PATCH_FILE} NAME) + set(_LF_PATCH_FILE ${CMAKE_BINARY_DIR}/_ms_patch/${_PATCH_FILE_NAME}) + configure_file(${_PATCH_FILE} ${_LF_PATCH_FILE} NEWLINE_STYLE LF) + + message("patching ${${pkg_name}_SOURCE_DIR} -p1 < ${_LF_PATCH_FILE}") + execute_process(COMMAND ${Patch_EXECUTABLE} -p1 INPUT_FILE ${_LF_PATCH_FILE} WORKING_DIRECTORY ${${pkg_name}_SOURCE_DIR} RESULT_VARIABLE Result) if(NOT Result EQUAL "0") - message(FATAL_ERROR "Failed patch: ${_PATCH_FILE}") + message(FATAL_ERROR "Failed patch: ${_LF_PATCH_FILE}") endif() endforeach(_PATCH_FILE) diff --git a/third_party/patch/sqlite/sqlite.windows.patch001 b/third_party/patch/sqlite/sqlite.windows.patch001 new file mode 100644 index 0000000000..f92548e15e --- /dev/null +++ b/third_party/patch/sqlite/sqlite.windows.patch001 @@ -0,0 +1,133 @@ +diff -uprN sqlite-amalgamation-3310100/CMakeLists.txt sqlite-patch/CMakeLists.txt +--- sqlite-amalgamation-3310100/CMakeLists.txt 1970-01-01 08:00:00.000000000 +0800 ++++ sqlite-patch/CMakeLists.txt 2020-04-18 09:16:28.258637600 +0800 +@@ -0,0 +1,6 @@ ++cmake_minimum_required(VERSION 3.14) ++project (Sqlite[C]) ++add_library(sqlite3 SHARED sqlite3.c) ++set_target_properties(sqlite3 PROPERTIES PUBLIC_HEADER "sqlite3.h;sqlite3ext.h") ++include(GNUInstallDirs) ++install(TARGETS sqlite3 PUBLIC_HEADER) +diff -uprN sqlite-amalgamation-3310100/sqlite3.c sqlite-patch/sqlite3.c +--- sqlite-amalgamation-3310100/sqlite3.c 2020-01-28 03:25:14.000000000 +0800 ++++ sqlite-patch/sqlite3.c 2020-04-17 15:40:21.005440300 +0800 +@@ -1167,7 +1167,7 @@ extern "C" { + */ + #define SQLITE_VERSION "3.31.1" + #define SQLITE_VERSION_NUMBER 3031001 +-#define SQLITE_SOURCE_ID "2020-01-27 19:55:54 3bfa9cc97da10598521b342961df8f5f68c7388fa117345eeb516eaa837bb4d6" ++#define SQLITE_SOURCE_ID "2020-02-17 19:25:07 387240fc85ea3549ff8a6ed060ef07c6184548457fb91cd7c6fc39ddb678alt1" + + /* + ** CAPI3REF: Run-Time Library Version Numbers +@@ -17428,8 +17428,11 @@ struct Table { + */ + #ifndef SQLITE_OMIT_VIRTUALTABLE + # define IsVirtual(X) ((X)->nModuleArg) ++# define ExprIsVtab(X) \ ++ ((X)->op==TK_COLUMN && (X)->y.pTab!=0 && (X)->y.pTab->nModuleArg) + #else + # define IsVirtual(X) 0 ++# define ExprIsVtab(X) 0 + #endif + + /* +@@ -104133,19 +104136,25 @@ static int impliesNotNullRow(Walker *pWa + case TK_LT: + case TK_LE: + case TK_GT: +- case TK_GE: ++ case TK_GE: { ++ Expr *pLeft = pExpr->pLeft; ++ Expr *pRight = pExpr->pRight; + testcase( pExpr->op==TK_EQ ); + testcase( pExpr->op==TK_NE ); + testcase( pExpr->op==TK_LT ); + testcase( pExpr->op==TK_LE ); + testcase( pExpr->op==TK_GT ); + testcase( pExpr->op==TK_GE ); +- if( (pExpr->pLeft->op==TK_COLUMN && IsVirtual(pExpr->pLeft->y.pTab)) +- || (pExpr->pRight->op==TK_COLUMN && IsVirtual(pExpr->pRight->y.pTab)) ++ /* The y.pTab=0 assignment in wherecode.c always happens after the ++ ** impliesNotNullRow() test */ ++ if( (pLeft->op==TK_COLUMN && ALWAYS(pLeft->y.pTab!=0) ++ && IsVirtual(pLeft->y.pTab)) ++ || (pRight->op==TK_COLUMN && ALWAYS(pRight->y.pTab!=0) ++ && IsVirtual(pRight->y.pTab)) + ){ +- return WRC_Prune; ++ return WRC_Prune; + } +- ++ } + default: + return WRC_Continue; + } +@@ -142591,7 +142600,8 @@ static int isAuxiliaryVtabOperator( + ** MATCH(expression,vtab_column) + */ + pCol = pList->a[1].pExpr; +- if( pCol->op==TK_COLUMN && IsVirtual(pCol->y.pTab) ){ ++ testcase( pCol->op==TK_COLUMN && pCol->y.pTab==0 ); ++ if( ExprIsVtab(pCol) ){ + for(i=0; iu.zToken, aOp[i].zOp)==0 ){ + *peOp2 = aOp[i].eOp2; +@@ -142613,7 +142623,8 @@ static int isAuxiliaryVtabOperator( + ** with function names in an arbitrary case. + */ + pCol = pList->a[0].pExpr; +- if( pCol->op==TK_COLUMN && IsVirtual(pCol->y.pTab) ){ ++ testcase( pCol->op==TK_COLUMN && pCol->y.pTab==0 ); ++ if( ExprIsVtab(pCol) ){ + sqlite3_vtab *pVtab; + sqlite3_module *pMod; + void (*xNotUsed)(sqlite3_context*,int,sqlite3_value**); +@@ -142636,10 +142647,12 @@ static int isAuxiliaryVtabOperator( + int res = 0; + Expr *pLeft = pExpr->pLeft; + Expr *pRight = pExpr->pRight; +- if( pLeft->op==TK_COLUMN && IsVirtual(pLeft->y.pTab) ){ ++ testcase( pLeft->op==TK_COLUMN && pLeft->y.pTab==0 ); ++ if( ExprIsVtab(pLeft) ){ + res++; + } +- if( pRight && pRight->op==TK_COLUMN && IsVirtual(pRight->y.pTab) ){ ++ testcase( pRight && pRight->op==TK_COLUMN && pRight->y.pTab==0 ); ++ if( pRight && ExprIsVtab(pRight) ){ + res++; + SWAP(Expr*, pLeft, pRight); + } +@@ -223667,7 +223680,7 @@ static void fts5SourceIdFunc( + ){ + assert( nArg==0 ); + UNUSED_PARAM2(nArg, apUnused); +- sqlite3_result_text(pCtx, "fts5: 2020-01-27 19:55:54 3bfa9cc97da10598521b342961df8f5f68c7388fa117345eeb516eaa837bb4d6", -1, SQLITE_TRANSIENT); ++ sqlite3_result_text(pCtx, "fts5: 2020-02-17 19:25:07 abc473fb8fb999005dc79a360e34f97b3b25429decf1820dd2afa5c19577753d", -1, SQLITE_TRANSIENT); + } + + /* +@@ -228440,9 +228453,9 @@ SQLITE_API int sqlite3_stmt_init( + #endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_STMTVTAB) */ + + /************** End of stmt.c ************************************************/ +-#if __LINE__!=228443 ++#if __LINE__!=228456 + #undef SQLITE_SOURCE_ID +-#define SQLITE_SOURCE_ID "2020-01-27 19:55:54 3bfa9cc97da10598521b342961df8f5f68c7388fa117345eeb516eaa837balt2" ++#define SQLITE_SOURCE_ID "2020-02-17 19:25:07 387240fc85ea3549ff8a6ed060ef07c6184548457fb91cd7c6fc39ddb678alt2" + #endif + /* Return the source-id for this library */ + SQLITE_API const char *sqlite3_sourceid(void){ return SQLITE_SOURCE_ID; } +diff -uprN sqlite-amalgamation-3310100/sqlite3.h sqlite-patch/sqlite3.h +--- sqlite-amalgamation-3310100/sqlite3.h 2020-01-28 03:25:14.000000000 +0800 ++++ sqlite-patch/sqlite3.h 2020-04-17 15:40:21.005440300 +0800 +@@ -125,7 +125,7 @@ extern "C" { + */ + #define SQLITE_VERSION "3.31.1" + #define SQLITE_VERSION_NUMBER 3031001 +-#define SQLITE_SOURCE_ID "2020-01-27 19:55:54 3bfa9cc97da10598521b342961df8f5f68c7388fa117345eeb516eaa837bb4d6" ++#define SQLITE_SOURCE_ID "2020-02-17 19:25:07 387240fc85ea3549ff8a6ed060ef07c6184548457fb91cd7c6fc39ddb678alt1" + + /* + ** CAPI3REF: Run-Time Library Version Numbers From 5c9791a802507a7a387810845c65a36cd2a1b551 Mon Sep 17 00:00:00 2001 From: liuxiao Date: Wed, 15 Apr 2020 17:50:34 +0800 Subject: [PATCH 350/367] Add Abs\AbsGrad\Sign\SmoothL1Loss\SmoothL1LossGrad and modify TopKV2->TopK for VM --- mindspore/ccsrc/kernel/tbe/tbe_adapter.cc | 1 - mindspore/ops/_op_impl/tbe/__init__.py | 7 ++- mindspore/ops/_op_impl/tbe/abs.py | 41 ++++++++++++++ mindspore/ops/_op_impl/tbe/abs_grad.py | 44 +++++++++++++++ mindspore/ops/_op_impl/tbe/sign.py | 41 ++++++++++++++ mindspore/ops/_op_impl/tbe/smooth_l1_loss.py | 44 +++++++++++++++ .../ops/_op_impl/tbe/smooth_l1_loss_grad.py | 45 +++++++++++++++ .../ops/_op_impl/tbe/{topkv2.py => top_k.py} | 14 ++--- mindspore/ops/op_info_register.py | 1 + .../test_tbe_ops/test_smooth_l1_loss.py | 42 ++++++++++++++ .../test_tbe_ops/test_smooth_l1_loss_grad.py | 55 +++++++++++++++++++ .../{test_topkv2.py => test_topk.py} | 2 +- 12 files changed, 327 insertions(+), 10 deletions(-) create mode 100644 mindspore/ops/_op_impl/tbe/abs.py create mode 100644 mindspore/ops/_op_impl/tbe/abs_grad.py create mode 100644 mindspore/ops/_op_impl/tbe/sign.py create mode 100644 mindspore/ops/_op_impl/tbe/smooth_l1_loss.py create mode 100644 mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad.py rename mindspore/ops/_op_impl/tbe/{topkv2.py => top_k.py} (86%) create mode 100644 tests/st/ops/davinci/test_tbe_ops/test_smooth_l1_loss.py create mode 100644 tests/st/ops/davinci/test_tbe_ops/test_smooth_l1_loss_grad.py rename tests/st/ops/davinci/test_tbe_ops/{test_topkv2.py => test_topk.py} (97%) diff --git a/mindspore/ccsrc/kernel/tbe/tbe_adapter.cc b/mindspore/ccsrc/kernel/tbe/tbe_adapter.cc index 9e4553e057..3fda554759 100644 --- a/mindspore/ccsrc/kernel/tbe/tbe_adapter.cc +++ b/mindspore/ccsrc/kernel/tbe/tbe_adapter.cc @@ -42,7 +42,6 @@ static std::map tbe_func_adapter_map = { {"depthwise_conv2d_native", "depthwise_conv2d"}, {"depthwise_conv2d_native_backprop_filter", "depthwise_conv2d_backprop_filter_d"}, {"depthwise_conv2d_native_backprop_input", "depthwise_conv2d_backprop_input_d"}, - {"top_kv2", "top_k"}, {"scatter_nd", "scatter_nd_d"}, {"tile", "tile_d"}, {"gather_v2", "gather_v2_d"}, diff --git a/mindspore/ops/_op_impl/tbe/__init__.py b/mindspore/ops/_op_impl/tbe/__init__.py index 340cf9efe3..2cffc37491 100644 --- a/mindspore/ops/_op_impl/tbe/__init__.py +++ b/mindspore/ops/_op_impl/tbe/__init__.py @@ -14,6 +14,8 @@ # ============================================================================ """tbe ops""" +from .abs import _abs_tbe +from .abs_grad import _abs_grad_tbe from .adam_apply_one_with_decay import _adam_apply_one_with_decay_tbe from .add import _add_tbe from .add_n import _add_n_tbe @@ -49,7 +51,7 @@ from .sigmoid_cross_entropy_with_logits import _sigmoid_cross_entropy_with_logit from .sigmoid_cross_entropy_with_logits_grad import _sigmoid_cross_entropy_with_logits_grad_tbe from .tensor_add import _tensor_add_tbe from .trans_data import _trans_data_tbe -from .topkv2 import _topk_v2_tbe +from .top_k import _top_k_tbe from .matmul import _matmul_tbe from .sub import _sub_tbe from .reduce_mean_d import _reduce_mean_d_tbe @@ -107,6 +109,7 @@ from .minimum_grad import _minimum_grad_tbe from .maximum_grad import _maximum_grad_tbe from .concat import _concat_tbe from .slice import _slice_tbe +from .sign import _sign_tbe from .greater import _greater_tbe from .clip_by_norm_no_div_sum import _clip_by_norm_no_div_sum_tbe from .clip_by_value import _clip_by_value_tbe @@ -130,6 +133,8 @@ from .resize_nearest_neighbor_grad_d import _resize_nearest_neighbor_grad_d_tbe from .pad_d import _pad_d_tbe from .arg_max_with_value import _arg_max_with_value_tbe from .arg_min_with_value import _arg_min_with_value_tbe +from .smooth_l1_loss import _smooth_l1_loss_tbe +from .smooth_l1_loss_grad import _smooth_l1_loss_grad_tbe from .fused_mul_add import _fused_mul_add_tbe from .fused_mul_add_n import _fused_mul_add_n_tbe from .fused_mul_apply_momentum import _fused_mul_apply_momentum_tbe diff --git a/mindspore/ops/_op_impl/tbe/abs.py b/mindspore/ops/_op_impl/tbe/abs.py new file mode 100644 index 0000000000..30a75812bd --- /dev/null +++ b/mindspore/ops/_op_impl/tbe/abs.py @@ -0,0 +1,41 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""Abs op""" +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType + +abs_op_info = TBERegOp("Abs") \ + .fusion_type("ELEMWISE") \ + .async_flag(False) \ + .binfile_name("abs.so") \ + .compute_cost(10) \ + .kernel_name("abs") \ + .partial_flag(True) \ + .op_pattern("formatAgnostic") \ + .input(0, "x", None, "required", None) \ + .output(0, "y", True, "required", "all") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD) \ + .dtype_format(DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.I32_5HD, DataType.I32_5HD) \ + .get_op_info() + + +@op_info_register(abs_op_info) +def _abs_tbe(): + """Abs TBE register""" + return diff --git a/mindspore/ops/_op_impl/tbe/abs_grad.py b/mindspore/ops/_op_impl/tbe/abs_grad.py new file mode 100644 index 0000000000..ba630f6570 --- /dev/null +++ b/mindspore/ops/_op_impl/tbe/abs_grad.py @@ -0,0 +1,44 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""AbsGrad op""" +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType + +abs_grad_op_info = TBERegOp("AbsGrad") \ + .fusion_type("ELEMWISE") \ + .async_flag(False) \ + .binfile_name("abs_grad.so") \ + .compute_cost(10) \ + .kernel_name("abs_grad") \ + .partial_flag(True) \ + .op_pattern("formatAgnostic") \ + .input(0, "y", None, "required", None) \ + .input(1, "dy", None, "required", None) \ + .output(0, "z", False, "required", "all") \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F16_FracZ, DataType.F16_FracZ, DataType.F16_FracZ) \ + .dtype_format(DataType.F16_C1HWNCoC0, DataType.F16_C1HWNCoC0, DataType.F16_C1HWNCoC0) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD) \ + .dtype_format(DataType.F32_FracZ, DataType.F32_FracZ, DataType.F32_FracZ) \ + .dtype_format(DataType.F32_C1HWNCoC0, DataType.F32_C1HWNCoC0, DataType.F32_C1HWNCoC0) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ + .get_op_info() + + +@op_info_register(abs_grad_op_info) +def _abs_grad_tbe(): + """AbsGrad TBE register""" + return diff --git a/mindspore/ops/_op_impl/tbe/sign.py b/mindspore/ops/_op_impl/tbe/sign.py new file mode 100644 index 0000000000..823715aa9f --- /dev/null +++ b/mindspore/ops/_op_impl/tbe/sign.py @@ -0,0 +1,41 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""Sign op""" +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType + +sign_op_info = TBERegOp("Sign") \ + .fusion_type("ELEMWISE") \ + .async_flag(False) \ + .binfile_name("sign.so") \ + .compute_cost(10) \ + .kernel_name("sign") \ + .partial_flag(True) \ + .op_pattern("formatAgnostic") \ + .input(0, "x", None, "required", None) \ + .output(0, "y", True, "required", "all") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD) \ + .dtype_format(DataType.I32_Default, DataType.I32_Default) \ + .dtype_format(DataType.I32_5HD, DataType.I32_5HD) \ + .get_op_info() + + +@op_info_register(sign_op_info) +def _sign_tbe(): + """Sign TBE register""" + return diff --git a/mindspore/ops/_op_impl/tbe/smooth_l1_loss.py b/mindspore/ops/_op_impl/tbe/smooth_l1_loss.py new file mode 100644 index 0000000000..3723b30c04 --- /dev/null +++ b/mindspore/ops/_op_impl/tbe/smooth_l1_loss.py @@ -0,0 +1,44 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""SmoothL1Loss op""" +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType + +smooth_l1_loss_op_info = TBERegOp("SmoothL1Loss") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("smooth_l1_loss.so") \ + .compute_cost(10) \ + .kernel_name("smooth_l1_loss") \ + .partial_flag(True) \ + .attr("sigma", "required", "float", "all") \ + .input(0, "predict", False, "required", "all") \ + .input(1, "label", False, "required", "all") \ + .output(0, "loss", False, "required", "all") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F16_FracZ, DataType.F16_FracZ, DataType.F16_FracZ) \ + .dtype_format(DataType.F16_C1HWNCoC0, DataType.F16_C1HWNCoC0, DataType.F16_C1HWNCoC0) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD) \ + .dtype_format(DataType.F32_FracZ, DataType.F32_FracZ, DataType.F32_FracZ) \ + .dtype_format(DataType.F32_C1HWNCoC0, DataType.F32_C1HWNCoC0, DataType.F32_C1HWNCoC0) \ + .get_op_info() + + +@op_info_register(smooth_l1_loss_op_info) +def _smooth_l1_loss_tbe(): + """SmoothL1Loss TBE register""" + return diff --git a/mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad.py b/mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad.py new file mode 100644 index 0000000000..fa1ae1ec34 --- /dev/null +++ b/mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad.py @@ -0,0 +1,45 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +"""SmoothL1LossGrad op""" +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType + +smooth_l1_loss_grad_op_info = TBERegOp("SmoothL1LossGrad") \ + .fusion_type("OPAQUE") \ + .async_flag(False) \ + .binfile_name("smooth_l1_loss_grad.so") \ + .compute_cost(10) \ + .kernel_name("smooth_l1_loss_grad") \ + .partial_flag(True) \ + .attr("sigma", "required", "float", "all") \ + .input(0, "predict", False, "required", "all") \ + .input(1, "label", False, "required", "all") \ + .input(2, "dout", False, "required", "all") \ + .output(0, "loss", False, "required", "all") \ + .dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \ + .dtype_format(DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD, DataType.F16_5HD) \ + .dtype_format(DataType.F16_FracZ, DataType.F16_FracZ, DataType.F16_FracZ, DataType.F16_FracZ) \ + .dtype_format(DataType.F16_C1HWNCoC0, DataType.F16_C1HWNCoC0, DataType.F16_C1HWNCoC0, DataType.F16_C1HWNCoC0) \ + .dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD) \ + .dtype_format(DataType.F32_FracZ, DataType.F32_FracZ, DataType.F32_FracZ, DataType.F32_FracZ) \ + .dtype_format(DataType.F32_C1HWNCoC0, DataType.F32_C1HWNCoC0, DataType.F32_C1HWNCoC0, DataType.F32_C1HWNCoC0) \ + .get_op_info() + + +@op_info_register(smooth_l1_loss_grad_op_info) +def _smooth_l1_loss_grad_tbe(): + """SmoothL1LossGrad TBE register""" + return diff --git a/mindspore/ops/_op_impl/tbe/topkv2.py b/mindspore/ops/_op_impl/tbe/top_k.py similarity index 86% rename from mindspore/ops/_op_impl/tbe/topkv2.py rename to mindspore/ops/_op_impl/tbe/top_k.py index a03871f8b7..92733bbf46 100644 --- a/mindspore/ops/_op_impl/tbe/topkv2.py +++ b/mindspore/ops/_op_impl/tbe/top_k.py @@ -13,15 +13,15 @@ # limitations under the License. # ============================================================================ -"""TopKV2 op""" +"""TopK op""" from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType -top_k_v2_op_info = TBERegOp("TopKV2") \ +top_k_op_info = TBERegOp("TopK") \ .fusion_type("OPAQUE") \ .async_flag(False) \ - .binfile_name("top_k_v2.so") \ + .binfile_name("top_k.so") \ .compute_cost(10) \ - .kernel_name("top_k_v2") \ + .kernel_name("top_k") \ .partial_flag(True) \ .attr("k", "required", "int", "all")\ .attr("sorted", "required", "bool", "all")\ @@ -33,7 +33,7 @@ top_k_v2_op_info = TBERegOp("TopKV2") \ .get_op_info() -@op_info_register(top_k_v2_op_info) -def _topk_v2_tbe(): - """TopKV2 TBE register""" +@op_info_register(top_k_op_info) +def _top_k_tbe(): + """TopK TBE register""" return diff --git a/mindspore/ops/op_info_register.py b/mindspore/ops/op_info_register.py index 28821b621e..e4b0bfdbfe 100644 --- a/mindspore/ops/op_info_register.py +++ b/mindspore/ops/op_info_register.py @@ -599,3 +599,4 @@ class DataType: F32_NCHW = ("float32", "NCHW") F32_NHWC = ("float32", "NHWC") F32_HWCN = ("float32", "HWCN") + \ No newline at end of file diff --git a/tests/st/ops/davinci/test_tbe_ops/test_smooth_l1_loss.py b/tests/st/ops/davinci/test_tbe_ops/test_smooth_l1_loss.py new file mode 100644 index 0000000000..cc0c0e0fc2 --- /dev/null +++ b/tests/st/ops/davinci/test_tbe_ops/test_smooth_l1_loss.py @@ -0,0 +1,42 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +import numpy as np +import mindspore.nn as nn +import mindspore.context as context +from mindspore import Tensor +from mindspore.ops import operations as P +context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + + +class Net(nn.Cell): + def __init__(self, sigma=1.0): + super(Net, self).__init__() + self.SmoothL1Loss = P.SmoothL1Loss(sigma) + + def construct(self, pred, gt): + return self.SmoothL1Loss(pred, gt) + + +def test_net(): + pred = np.random.randn(2, 4).astype(np.float32) + gt = np.random.randn(2, 4).astype(np.float32) + smooth_l1_loss = Net() + loss = smooth_l1_loss(Tensor(pred), Tensor(gt)) + print("------------- input ---------------") + print("predict:\n", pred) + print("grount truth:\n", gt) + print("------------- output ---------------") + print("loss:\n", loss.asnumpy()) diff --git a/tests/st/ops/davinci/test_tbe_ops/test_smooth_l1_loss_grad.py b/tests/st/ops/davinci/test_tbe_ops/test_smooth_l1_loss_grad.py new file mode 100644 index 0000000000..1ab9d998a1 --- /dev/null +++ b/tests/st/ops/davinci/test_tbe_ops/test_smooth_l1_loss_grad.py @@ -0,0 +1,55 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +import numpy as np +import mindspore.nn as nn +import mindspore.context as context +from mindspore.ops.composite import GradOperation +from mindspore import Tensor +from mindspore.ops import operations as P + +context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + + +class Net(nn.Cell): + def __init__(self, sigma=1.0): + super(Net, self).__init__() + self.SmoothL1Loss = P.SmoothL1Loss(sigma) + + def construct(self, pred, gt): + return self.SmoothL1Loss(pred, gt) + +class Grad(nn.Cell): + def __init__(self, network): + super(Grad, self).__init__() + self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) + self.network = network + + def construct(self, pred, gt, dout): + return self.grad(self.network)(pred, gt, dout) + + +def test_net(): + pred = np.random.randn(2, 4).astype(np.float32) + gt = np.random.randn(2, 4).astype(np.float32) + dout = np.random.randn(2, 4).astype(np.float32) + smooth_l1_loss_grad = Grad(Net()) + output = smooth_l1_loss_grad(Tensor(pred), Tensor(gt), Tensor(dout)) + print("------------- input ---------------") + print("predict:\n", pred) + print("grount truth:\n", gt) + print("dout:\n", dout) + print("------------- output ---------------") + print("predict grad:\n", output[0].asnumpy()) diff --git a/tests/st/ops/davinci/test_tbe_ops/test_topkv2.py b/tests/st/ops/davinci/test_tbe_ops/test_topk.py similarity index 97% rename from tests/st/ops/davinci/test_tbe_ops/test_topkv2.py rename to tests/st/ops/davinci/test_tbe_ops/test_topk.py index a505865637..275ef50038 100644 --- a/tests/st/ops/davinci/test_tbe_ops/test_topkv2.py +++ b/tests/st/ops/davinci/test_tbe_ops/test_topk.py @@ -24,7 +24,7 @@ context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") class Net(nn.Cell): def __init__(self, k): super(Net, self).__init__() - self.topk = P.TopK() + self.topk = P.TopK(True) self.k = k def construct(self, x): From 8a2a1eb310560d5ce1d4ceba5dccf6cf530b3695 Mon Sep 17 00:00:00 2001 From: simson <526422051@qq.com> Date: Sat, 18 Apr 2020 15:31:50 +0800 Subject: [PATCH 351/367] fix the error in example of mindspore.model --- mindspore/train/model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mindspore/train/model.py b/mindspore/train/model.py index 46e4f421f7..a9f59207bb 100755 --- a/mindspore/train/model.py +++ b/mindspore/train/model.py @@ -71,7 +71,7 @@ class Model: >>> self.bn = nn.BatchNorm2d(64) >>> self.relu = nn.ReLU() >>> self.flatten = nn.Flatten() - >>> self.fc = nn.Dense(64*222*222, 3) # padding=0 + >>> self.fc = nn.Dense(64*224*224, 12) # padding=0 >>> >>> def construct(self, x): >>> x = self.conv(x) From d4822137f36c706ced937b6c4ee5028f77cbe181 Mon Sep 17 00:00:00 2001 From: xiefangqi Date: Sat, 18 Apr 2020 15:21:06 +0800 Subject: [PATCH 352/367] fix segment bug on windows --- mindspore/ccsrc/CMakeLists.txt | 20 +++++------------ mindspore/ccsrc/dataset/CMakeLists.txt | 7 ++++-- .../dataset/engine/datasetops/dataset_op.cc | 4 ++++ .../ccsrc/dataset/engine/datasetops/map_op.cc | 22 +++++++++++++++++++ .../ccsrc/dataset/engine/datasetops/map_op.h | 4 ++++ mindspore/ccsrc/mindrecord/CMakeLists.txt | 2 +- 6 files changed, 42 insertions(+), 17 deletions(-) diff --git a/mindspore/ccsrc/CMakeLists.txt b/mindspore/ccsrc/CMakeLists.txt index e227969162..8c33b9051c 100644 --- a/mindspore/ccsrc/CMakeLists.txt +++ b/mindspore/ccsrc/CMakeLists.txt @@ -156,7 +156,7 @@ file(GLOB_RECURSE MINDSPORE_SRC_LIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} ) if (ENABLE_CPU) list(REMOVE_ITEM MINDSPORE_SRC_LIST "device/gpu/distribution/collective_init.cc") - if (WIN32) + if (${CMAKE_SYSTEM_NAME} MATCHES "Windows") list(REMOVE_ITEM MINDSPORE_SRC_LIST "kernel/kernel_query.cc") endif() endif() @@ -337,19 +337,19 @@ if (ENABLE_D) target_link_libraries(mindspore mindspore::protobuf) endif() -if (WIN32) +if (${CMAKE_SYSTEM_NAME} MATCHES "Windows") target_link_libraries(mindspore ${PYTHON_LIBRARIES} mindspore_gvar) endif() # set c_expression building -if (WIN32) -set(PYTHON_MODULE_SOURCE ${MS_GVAR_SRC_LIST} +if (${CMAKE_SYSTEM_NAME} MATCHES "Windows") + set(PYTHON_MODULE_SOURCE ${MS_GVAR_SRC_LIST} pipeline/init.cc kernel/oplib/oplib.cc ${MINDSPORE_SRC_LIST} ${MS_STEPS_SRC_LIST} ${MS_CCE_SRC_LIST} ${MS_AICPU_SRC_LIST} ${MS_TASKINFO_LIST} ${MS_RT_SRC_LIST} ${GPU_NCCL_LIST} ${MS_HCCL_SRC_LIST} ${MS_PREDICT_SRC_LIST} ${CPU_SRC_LIST} ${MEM_REUSE_SRC_LIST} ${GPU_KERNEL_SRC_LIST}) else() -set(PYTHON_MODULE_SOURCE + set(PYTHON_MODULE_SOURCE pipeline/init.cc kernel/oplib/oplib.cc ${MS_STEPS_SRC_LIST} ${MS_CCE_SRC_LIST} ${MS_AICPU_SRC_LIST} ${MS_TASKINFO_LIST} ${MS_RT_SRC_LIST} @@ -426,13 +426,5 @@ endif() if(ENABLE_MINDDATA) add_subdirectory(mindrecord) - if (WIN32) - set(_md_tmp_CMAKE_CXX_FLAGS_RELEASE ${CMAKE_CXX_FLAGS_RELEASE}) - set(CMAKE_CXX_FLAGS_RELEASE "$ENV{CXXFLAGS} -O0 -Wl,--allow-shlib-undefined -DHALF_ENABLE_CPP11_USER_LITERALS=0") - set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -D_FORTIFY_SOURCE=2") - add_subdirectory(dataset) - set(CMAKE_CXX_FLAGS_RELEASE ${_md_tmp_CMAKE_CXX_FLAGS_RELEASE}) - else() - add_subdirectory(dataset) - endif() + add_subdirectory(dataset) endif() diff --git a/mindspore/ccsrc/dataset/CMakeLists.txt b/mindspore/ccsrc/dataset/CMakeLists.txt index b3ac34de70..0bc4065ac9 100644 --- a/mindspore/ccsrc/dataset/CMakeLists.txt +++ b/mindspore/ccsrc/dataset/CMakeLists.txt @@ -12,6 +12,9 @@ endif() set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-format") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-attributes") +if (${CMAKE_SYSTEM_NAME} MATCHES "Windows") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wl,--image-base -Wl,0x10000000") +endif() ############################# Options ################################ if (ENABLE_GPUQUE) add_definitions(-D ENABLE_GPUQUE) @@ -80,7 +83,7 @@ set_target_properties(_c_dataengine PROPERTIES ###################################################################### ################# Link with external libraries ######################## -if (WIN32) +if (${CMAKE_SYSTEM_NAME} MATCHES "Windows") target_link_libraries(_c_dataengine PRIVATE mindspore) target_link_libraries(_c_dataengine PRIVATE mindspore::pybind11_module ${PYTHON_LIBRARIES} mindspore::protobuf ${SECUREC_LIBRARY}) else() @@ -101,7 +104,7 @@ if (ENABLE_TDTQUE) endif () add_dependencies(_c_dataengine _c_mindrecord) -if (WIN32) +if (${CMAKE_SYSTEM_NAME} MATCHES "Windows") set(MINDRECORD_LINK_OBJECT ${CMAKE_BINARY_DIR}/mindspore/ccsrc/mindrecord/CMakeFiles/_c_mindrecord.dir/objects.a) target_link_libraries(_c_dataengine PRIVATE _c_mindrecord ${MINDRECORD_LINK_OBJECT} mindspore::sqlite) else() diff --git a/mindspore/ccsrc/dataset/engine/datasetops/dataset_op.cc b/mindspore/ccsrc/dataset/engine/datasetops/dataset_op.cc index 7edf1dd288..5e3ea3dc44 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/dataset_op.cc +++ b/mindspore/ccsrc/dataset/engine/datasetops/dataset_op.cc @@ -109,11 +109,15 @@ void DatasetOp::Print(std::ostream &out, bool show_all) const { // Gets the next buffer from the given child Status DatasetOp::GetNextBuffer(std::unique_ptr *p_buffer, int32_t worker_id, bool retry_if_eoe) { +#if defined(_WIN32) || defined(_WIN64) + RETURN_IF_NOT_OK(out_connector_->PopWithRetry(static_cast(worker_id), p_buffer, retry_if_eoe)); +#else std::unique_ptr next_buff; // pop is a blocked call and will throw an interruption if the whole group shuts down. RETURN_IF_NOT_OK(out_connector_->PopWithRetry(static_cast(worker_id), &next_buff, retry_if_eoe)); *p_buffer = std::move(next_buff); +#endif return Status::OK(); } diff --git a/mindspore/ccsrc/dataset/engine/datasetops/map_op.cc b/mindspore/ccsrc/dataset/engine/datasetops/map_op.cc index b6d603bac9..3f8d70b606 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/map_op.cc +++ b/mindspore/ccsrc/dataset/engine/datasetops/map_op.cc @@ -65,6 +65,9 @@ MapOp::MapOp(const std::vector &in_col_names, const std::vectorGetNextBuffer(&buff, 0)); is_eof = buff->eof(); RETURN_IF_NOT_OK(local_queues_[que_id]->Add(std::move(buff))); +#if defined(_WIN32) || defined(_WIN64) + if (is_eof) { + eof_worker_id_ = que_id; + for (int32_t id = 0; id < num_workers_; id++) { + if (id != eof_worker_id_) { + auto eof_buffer = std::make_unique(0, DataBuffer::kDeBFlagEOF); + RETURN_IF_NOT_OK(local_queues_[id]->Add(std::move(eof_buffer))); + } + } + } +#endif que_id = (que_id + 1) % num_workers_; } } @@ -159,6 +173,14 @@ Status MapOp::WorkerEntry(int32_t worker_id) { continue; } else if (in_buffer->eof()) { // Calling base class EofReceived to forward eof buffer. +#if defined(_WIN32) || defined(_Win64) + if (perf_mode_) { + if (eof_worker_id_ == worker_id) { + RETURN_IF_NOT_OK(EofReceived(worker_id)); + } + break; + } +#endif RETURN_IF_NOT_OK(EofReceived(worker_id)); break; } diff --git a/mindspore/ccsrc/dataset/engine/datasetops/map_op.h b/mindspore/ccsrc/dataset/engine/datasetops/map_op.h index 4c9d27f9c7..5e16bc3fed 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/map_op.h +++ b/mindspore/ccsrc/dataset/engine/datasetops/map_op.h @@ -193,6 +193,10 @@ class MapOp : public ParallelOp { // cause additional blocking because pop calls to Connector from the threads are synchronized to enforce the order. bool perf_mode_; +#if defined(_WIN32) || defined(_WIN64) + // EOF worker id is only work on Performance mode, to record the worker id of queue which gets EOF + int32_t eof_worker_id_; +#endif // Private function for worker/thread to loop continuously. It comprises the main // logic of MapOp: getting the data from previous Op, validating user specified column names, // applying a list of TensorOps to each of the data, process the results and then diff --git a/mindspore/ccsrc/mindrecord/CMakeLists.txt b/mindspore/ccsrc/mindrecord/CMakeLists.txt index a2b8897b16..fdd648a50f 100644 --- a/mindspore/ccsrc/mindrecord/CMakeLists.txt +++ b/mindspore/ccsrc/mindrecord/CMakeLists.txt @@ -26,7 +26,7 @@ set_target_properties(_c_mindrecord PROPERTIES ) # add link library -if (WIN32) +if (${CMAKE_SYSTEM_NAME} MATCHES "Windows") target_link_libraries(_c_mindrecord PRIVATE mindspore::sqlite mindspore mindspore::protobuf) else() target_link_libraries(_c_mindrecord PRIVATE mindspore::sqlite ${PYTHON_LIB} ${SECUREC_LIBRARY} mindspore mindspore_gvar mindspore::protobuf) From 8c1939f1b04f642563204dafd1decec9f176c7c3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E4=B8=87=E4=B8=87=E6=B2=A1=E6=83=B3=E5=88=B0?= Date: Sat, 18 Apr 2020 16:09:40 +0800 Subject: [PATCH 353/367] fix examples issues --- mindspore/ops/__init__.py | 2 ++ mindspore/ops/operations/array_ops.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/mindspore/ops/__init__.py b/mindspore/ops/__init__.py index 01ca039862..b73d683284 100644 --- a/mindspore/ops/__init__.py +++ b/mindspore/ops/__init__.py @@ -26,6 +26,8 @@ Note: - The Primitive operators in operations need to be used after instantiation. - The composite operators are pre-defined combination of operator. - The functional operators are the pre-instantiated Primitive operators, which can be used directly like a function. + - For functional operators usage, please refer to + https://gitee.com/mindspore/mindspore/blob/master/mindspore/ops/functional.py """ from .primitive import Primitive, PrimitiveWithInfer, prim_attr_register diff --git a/mindspore/ops/operations/array_ops.py b/mindspore/ops/operations/array_ops.py index 7605c2d1e8..4047a19b23 100644 --- a/mindspore/ops/operations/array_ops.py +++ b/mindspore/ops/operations/array_ops.py @@ -795,7 +795,7 @@ class ZerosLike(PrimitiveWithInfer): Examples: >>> zeroslike = P.ZerosLike() - >>> x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32)) + >>> x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.float32)) >>> output = zeroslike(x) """ From ea6958c50a02c87350b26ec2080abc6650f1a045 Mon Sep 17 00:00:00 2001 From: Wei Luning Date: Sun, 12 Apr 2020 23:18:04 +0800 Subject: [PATCH 354/367] add pattern AdjustAllReduceMulAdd --- mindspore/ccsrc/operator/ops.cc | 1 + mindspore/ccsrc/operator/ops.h | 1 + mindspore/ccsrc/optimizer/irpass.cc | 2 +- .../optimizer/irpass/arithmetic_simplify.h | 78 +++++++++++++++++++ mindspore/ops/operations/array_ops.py | 2 +- mindspore/ops/operations/nn_ops.py | 2 +- tests/ut/cpp/optimizer/lib_test.cc | 19 +++++ .../gtest_input/optimizer/opt_test.py | 45 ++++++++++- 8 files changed, 145 insertions(+), 5 deletions(-) diff --git a/mindspore/ccsrc/operator/ops.cc b/mindspore/ccsrc/operator/ops.cc index f3053cac7d..8cf2d1290f 100755 --- a/mindspore/ccsrc/operator/ops.cc +++ b/mindspore/ccsrc/operator/ops.cc @@ -226,6 +226,7 @@ const PrimitivePtr kPrimIsNot = std::make_shared("is_not"); const PrimitivePtr kPrimMirror = std::make_shared("_MirrorOperator"); const PrimitivePtr kPrimVirtualDiv = std::make_shared("_VirtualDiv"); const PrimitivePtr kPrimVirtualDataset = std::make_shared("_VirtualDataset"); +const PrimitivePtr kPrimAllReduce = std::make_shared("AllReduce"); // Debug ops const PrimitivePtr kPrimScalarSummary = std::make_shared("ScalarSummary"); diff --git a/mindspore/ccsrc/operator/ops.h b/mindspore/ccsrc/operator/ops.h index 2dc7072972..548980bf2d 100755 --- a/mindspore/ccsrc/operator/ops.h +++ b/mindspore/ccsrc/operator/ops.h @@ -231,6 +231,7 @@ extern const PrimitivePtr kPrimMinimumGrad; extern const PrimitivePtr kPrimMaximumGrad; // Comm ops +extern const PrimitivePtr kPrimAllReduce; extern const PrimitivePtr kPrimMirror; extern const PrimitivePtr kPrimVirtualDiv; extern const PrimitivePtr kPrimVirtualDataset; diff --git a/mindspore/ccsrc/optimizer/irpass.cc b/mindspore/ccsrc/optimizer/irpass.cc index 96d88f6e61..d64df33f99 100644 --- a/mindspore/ccsrc/optimizer/irpass.cc +++ b/mindspore/ccsrc/optimizer/irpass.cc @@ -48,7 +48,7 @@ namespace irpass { OptimizeIRPassLib::OptimizeIRPassLib() { arithmetic_simplify_ = MakeSubstitution(ArithmeticSimplify(), "arithmetic_simplify", {prim::kPrimScalarAdd, prim::kPrimScalarMul, prim::kPrimTensorAdd, - prim::kPrimIdentity, prim::kPrimMomentum, prim::kPrimMul}); + prim::kPrimAddN, prim::kPrimIdentity, prim::kPrimMomentum, prim::kPrimMul}); special_op_eliminate_ = MakeSubstitution(SpecialOpEliminater(), "special_op_eliminate", {prim::kPrimInsertGradientOf, prim::kPrimPrintShapeType, prim::kPrimGetRefKey, prim::kPrimMirror, prim::kPrimVirtualDiv}); diff --git a/mindspore/ccsrc/optimizer/irpass/arithmetic_simplify.h b/mindspore/ccsrc/optimizer/irpass/arithmetic_simplify.h index ab191aab20..0d48fc1463 100644 --- a/mindspore/ccsrc/optimizer/irpass/arithmetic_simplify.h +++ b/mindspore/ccsrc/optimizer/irpass/arithmetic_simplify.h @@ -228,6 +228,82 @@ class ConstantDuplicateMul : public AnfVisitor { CNodePtr cnode_; }; +// grad = AllReduce(grad) / worker_number +// grad = grad + weight * decy +// -> +// grad = grad + weight * decy +// grad = AllReduce(grad) / worker_number + +// {prim::kPrimAddN, {prim::kPrimMakeTuple, {prim::kPrimMul, {prim::kPrimAllReduce, X}, Y}, Z}} -> +// {prim::kPrimMul, {prim::kPrimAllReduce, {prim::kPrimAddN,{prim::kPrimMakeTuple, Z, X}}}, Y} +class AdjustAllReduceMulAdd : public AnfVisitor { + public: + AnfNodePtr operator()(const OptimizerPtr &, const AnfNodePtr &node) override { + Reset(); + // {prim::kPrimAddN, Zs} + if (!IsPrimitiveCNode(node, prim::kPrimAddN)) { + return nullptr; + } + auto addn = node->cast(); + if (addn->size() != 2) { + return nullptr; + } + + AnfVisitor::Match(prim::kPrimMakeTuple, {IsNode, IsNode})(addn->input(1)); + if (x_ == nullptr || y_ == nullptr || z_ == nullptr) { + return nullptr; + } + + auto fg = node->func_graph(); + AnfNodePtr tuple = NewCNode({NewValueNode(prim::kPrimMakeTuple), z_, x_}, fg); + AnfNodePtr add = NewCNode({NewValueNode(prim::kPrimAddN), tuple}, fg); + AnfNodePtr all_reduce = NewCNode({NewValueNode(prim::kPrimAllReduce), add}, fg); + return NewCNode({NewValueNode(prim::kPrimMul), all_reduce, y_}, fg); + } + + void Visit(const AnfNodePtr &node) override { + if (level_ == 0) { + level_ = 1; + is_reduce_match_ = false; + // {prim::kPrimMul, {prim::kPrimAllReduce, X}, Y} + AnfVisitor::Match(prim::kPrimMul)(node); + level_ = 0; + if (is_reduce_match_) { + y_ = tmp_; + } else { + z_ = node; + } + } + + if (level_ == 1) { + // {prim::kPrimAllReduce, X} + if (IsPrimitiveCNode(node, prim::kPrimAllReduce)) { + auto cnode = node->cast(); + if (cnode->size() > 1) { + x_ = cnode->input(1); + is_reduce_match_ = true; + } + } else { + tmp_ = node; + } + } + } + + void Reset() { + level_ = 0; + is_reduce_match_ = false; + x_ = nullptr; + y_ = nullptr; + z_ = nullptr; + tmp_ = nullptr; + } + + private: + int level_{0}; + bool is_reduce_match_{false}; + AnfNodePtr x_{nullptr}, y_{nullptr}, z_{nullptr}, tmp_{nullptr}; +}; + class ArithmeticSimplify { public: ArithmeticSimplify() @@ -243,6 +319,7 @@ class ArithmeticSimplify { eliminaters_.emplace_back(identity_); eliminaters_.emplace_back(opt_update_zero_tensor_); eliminaters_.emplace_back(constant_duplicate_mul_); + eliminaters_.emplace_back(adjust_allreduce_mul_add_); } ~ArithmeticSimplify() = default; @@ -264,6 +341,7 @@ class ArithmeticSimplify { PrimEliminater identity_; OptUpdateZeroTensor opt_update_zero_tensor_; ConstantDuplicateMul constant_duplicate_mul_; + AdjustAllReduceMulAdd adjust_allreduce_mul_add_; std::vector eliminaters_{}; }; } // namespace irpass diff --git a/mindspore/ops/operations/array_ops.py b/mindspore/ops/operations/array_ops.py index a7c3f50440..b348e9a700 100644 --- a/mindspore/ops/operations/array_ops.py +++ b/mindspore/ops/operations/array_ops.py @@ -1235,7 +1235,7 @@ class UnsortedSegmentSum(PrimitiveWithInfer): Tensor, the shape is :math:`(z, x_{N+1}, ..., x_R)`. Examples: - >>> input_x = Tensor([1, 2, 3, 4], mindspore.float) + >>> input_x = Tensor([1, 2, 3, 4], mindspore.float32) >>> segment_ids = Tensor([0, 0, 1, 2], mindspore.int32) >>> num_segments = 4 >>> P.UnsortedSegmentSum()(input_x, segment_ids, num_segments) diff --git a/mindspore/ops/operations/nn_ops.py b/mindspore/ops/operations/nn_ops.py index 62265162a9..3f24996b1b 100644 --- a/mindspore/ops/operations/nn_ops.py +++ b/mindspore/ops/operations/nn_ops.py @@ -1572,7 +1572,7 @@ class LayerNorm(Primitive): `Layer Normalization `_. .. math:: - y = \frac{x - mean]}{\sqrt{variance + \epsilon}} * \gamma + \beta + y = \frac{x - mean}{\sqrt{variance + \epsilon}} * \gamma + \beta where :math:`\gamma` is scale, :math:`\beta` is bias, :math:`\epsilon` is epsilon. diff --git a/tests/ut/cpp/optimizer/lib_test.cc b/tests/ut/cpp/optimizer/lib_test.cc index 2d4cf0e78e..8e348c698a 100644 --- a/tests/ut/cpp/optimizer/lib_test.cc +++ b/tests/ut/cpp/optimizer/lib_test.cc @@ -556,5 +556,24 @@ TEST_F(TestOptLib, test_constant_duplicate_mul) { ASSERT_TRUE(CheckOpt(beforerl, after, patterns)); ASSERT_TRUE(CheckOpt(beforerr, after, patterns)); } + +TEST_F(TestOptLib, test_adjust_allreduce_mul_add) { + FuncGraphPtr beforell = getPyFun.CallAndParseRet("test_adjust_allreduce_mul_add", "beforell"); + FuncGraphPtr beforelr = getPyFun.CallAndParseRet("test_adjust_allreduce_mul_add", "beforelr"); + FuncGraphPtr beforerl = getPyFun.CallAndParseRet("test_adjust_allreduce_mul_add", "beforerl"); + FuncGraphPtr beforerr = getPyFun.CallAndParseRet("test_adjust_allreduce_mul_add", "beforerr"); + FuncGraphPtr after1 = getPyFun.CallAndParseRet("test_adjust_allreduce_mul_add", "after1"); + FuncGraphPtr before2r = getPyFun.CallAndParseRet("test_adjust_allreduce_mul_add", "before2r"); + FuncGraphPtr before2l = getPyFun.CallAndParseRet("test_adjust_allreduce_mul_add", "before2l"); + FuncGraphPtr after2 = getPyFun.CallAndParseRet("test_adjust_allreduce_mul_add", "after2"); + auto patterns = std::vector({irpass.arithmetic_simplify_}); + ASSERT_TRUE(CheckOpt(beforell, after1, patterns)); + ASSERT_TRUE(CheckOpt(beforelr, after1, patterns)); + ASSERT_TRUE(CheckOpt(beforerl, after1, patterns)); + ASSERT_TRUE(CheckOpt(beforerr, after1, patterns)); + ASSERT_TRUE(CheckOpt(before2l, after2, patterns)); + ASSERT_TRUE(CheckOpt(before2r, after2, patterns)); +} + } // namespace opt } // namespace mindspore diff --git a/tests/ut/cpp/python_input/gtest_input/optimizer/opt_test.py b/tests/ut/cpp/python_input/gtest_input/optimizer/opt_test.py index d494ad27d3..d74aa15952 100644 --- a/tests/ut/cpp/python_input/gtest_input/optimizer/opt_test.py +++ b/tests/ut/cpp/python_input/gtest_input/optimizer/opt_test.py @@ -908,8 +908,8 @@ def test_print_tuple_wrapper(tag): def test_constant_duplicate_mul(tag): fns = FnDict() - Mul = Primitive('Mul'); - Sqrt = Primitive('Sqrt'); + Mul = Primitive('Mul') + Sqrt = Primitive('Sqrt') x = Tensor(np.array([[2, 2], [2, 3]]).astype('float32')) tensor1 = Tensor(np.array([[1.2, 2.1], [2.2, 3.2]]).astype('float32')) @@ -936,3 +936,44 @@ def test_constant_duplicate_mul(tag): return Mul(Sqrt(x), Mul(tensor1, tensor2)) return fns[tag] + + +def test_adjust_allreduce_mul_add(tag): + fns = FnDict() + Mul = Primitive('Mul') + AddN = Primitive('AddN') + AllReduce = Primitive('AllReduce') + + @fns + def beforell(x, y, z): + return AddN((z, Mul(y, AllReduce(x)))) + + @fns + def beforelr(x, y, z): + return AddN((z, Mul(AllReduce(x), y))) + + @fns + def beforerl(x, y, z): + return AddN((Mul(y, AllReduce(x)), z)) + + @fns + def beforerr(x, y, z): + return AddN((Mul(AllReduce(x), y), z)) + + @fns + def after1(x, y, z): + return Mul(AllReduce(AddN((z, x))), y) + + @fns + def before2r(x, y, z): + return AddN((Mul(AllReduce(x), y), Mul(z, z))) + + @fns + def before2l(x, y, z): + return AddN((Mul(z, z), Mul(AllReduce(x), y))) + + @fns + def after2(x, y, z): + return Mul(AllReduce(AddN((Mul(z, z), x))), y) + + return fns[tag] From 08775a67e7d377cce596bb20e8edd1a24d32dd08 Mon Sep 17 00:00:00 2001 From: guohongzilong <2713219276@qq.com> Date: Sat, 18 Apr 2020 16:50:09 +0800 Subject: [PATCH 355/367] add print comment and fix some example --- mindspore/ops/operations/comm_ops.py | 8 ++++---- mindspore/ops/operations/debug_ops.py | 3 +++ mindspore/ops/operations/other_ops.py | 2 +- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/mindspore/ops/operations/comm_ops.py b/mindspore/ops/operations/comm_ops.py index 441e441c2c..a5a4c9f236 100644 --- a/mindspore/ops/operations/comm_ops.py +++ b/mindspore/ops/operations/comm_ops.py @@ -65,7 +65,7 @@ class AllReduce(PrimitiveWithInfer): The contents depend on the specified operation. Examples: - >>> from mindspore.communication.management import init + >>> from mindspore.communication import init >>> import mindspore.ops.operations as P >>> init('nccl') >>> class Net(nn.Cell): @@ -130,7 +130,7 @@ class AllGather(PrimitiveWithInfer): then the shape of output is :math:`(N, x_1, x_2, ..., x_R)`. Examples: - >>> from mindspore.communication.management import init + >>> from mindspore.communication import init >>> import mindspore.ops.operations as P >>> init('nccl') >>> class Net(nn.Cell): @@ -187,7 +187,7 @@ class ReduceScatter(PrimitiveWithInfer): ValueError: If the first dimension of input can not be divided by rank size. Examples: - >>> from mindspore.communication.management import init + >>> from mindspore.communication import init >>> import mindspore.ops.operations as P >>> init('nccl') >>> class Net(nn.Cell): @@ -252,7 +252,7 @@ class Broadcast(PrimitiveWithInfer): TypeError: If root_rank is not a integer or group is not a string. Examples: - >>> from mindspore.communication.management import init + >>> from mindspore.communication import init >>> import mindspore.ops.operations as P >>> init('nccl') >>> class Net(nn.Cell): diff --git a/mindspore/ops/operations/debug_ops.py b/mindspore/ops/operations/debug_ops.py index e4467f5ce1..ec3f10a696 100644 --- a/mindspore/ops/operations/debug_ops.py +++ b/mindspore/ops/operations/debug_ops.py @@ -161,6 +161,9 @@ class Print(PrimitiveWithInfer): """ Output tensor or string to stdout. + Note: + The print operation cannot support float64 and bool types currently. + Inputs: - **input_x** (Union[Tensor, str]) - The graph node to attach to. The input supports multiple strings and tensors which are separated by ','. diff --git a/mindspore/ops/operations/other_ops.py b/mindspore/ops/operations/other_ops.py index ff66e80972..2ece6b7088 100644 --- a/mindspore/ops/operations/other_ops.py +++ b/mindspore/ops/operations/other_ops.py @@ -120,7 +120,7 @@ class BoundingBoxDecode(PrimitiveWithInfer): Examples: >>> boundingbox_decode = P.BoundingBoxDecode(means=(0.0, 0.0, 0.0, 0.0), stds=(1.0, 1.0, 1.0, 1.0), - max_shape=(768, 1280), wh_ratio_clip=0.016) + >>> max_shape=(768, 1280), wh_ratio_clip=0.016) >>> bbox = boundingbox_decode(anchor_box, deltas) """ From 5e01b94ccd457e04eb2e82a9729712a0986fde75 Mon Sep 17 00:00:00 2001 From: limingqi107 Date: Fri, 17 Apr 2020 10:41:49 +0800 Subject: [PATCH 356/367] gpu dynamic memory pool suppoerts multi-graph --- .../ccsrc/device/gpu/gpu_kernel_runtime.cc | 24 +++++++++++++++---- .../ccsrc/device/gpu/gpu_kernel_runtime.h | 6 +++-- 2 files changed, 23 insertions(+), 7 deletions(-) diff --git a/mindspore/ccsrc/device/gpu/gpu_kernel_runtime.cc b/mindspore/ccsrc/device/gpu/gpu_kernel_runtime.cc index 584f66eee7..11b8bdc162 100644 --- a/mindspore/ccsrc/device/gpu/gpu_kernel_runtime.cc +++ b/mindspore/ccsrc/device/gpu/gpu_kernel_runtime.cc @@ -155,7 +155,8 @@ void GPUKernelRuntime::InitKernelRefCount(const session::KernelGraph *graph) { mem_reuse_util_ptr->SetReuseRefCount(); // Can't free the device address of graph output, so set the reference count of graph output specially. mem_reuse_util_ptr->SetGraphOutputRefCount(); - mem_reuse_util_ptr_ = mem_reuse_util_ptr; + auto graph_id = graph->graph_id(); + mem_reuse_util_map_[graph_id] = mem_reuse_util_ptr; } void GPUKernelRuntime::InitKernelOutputAddress(const session::KernelGraph *graph) { @@ -179,6 +180,7 @@ void GPUKernelRuntime::InitKernelOutputAddress(const session::KernelGraph *graph bool GPUKernelRuntime::LaunchKernelDynamic(const session::KernelGraph *graph) { MS_EXCEPTION_IF_NULL(graph); + auto graph_id = graph->graph_id(); // The inputs and outputs memory of communication kernel are special, so separate processing. AllocCommunicationOpDynamicRes(graph); @@ -194,7 +196,7 @@ bool GPUKernelRuntime::LaunchKernelDynamic(const session::KernelGraph *graph) { MS_LOG(ERROR) << "Launch kernel failed."; return false; } - FreeKernelDynamicRes(kernel, kernel_workspaces); + FreeKernelDynamicRes(kernel, kernel_workspaces, graph_id); } if (!SyncStream()) { @@ -341,14 +343,16 @@ void GPUKernelRuntime::AllocCommunicationOpOutputDynamicRes(const mindspore::Anf } void GPUKernelRuntime::FreeKernelDynamicRes(const mindspore::AnfNodePtr &kernel, - const AddressPtrList &kernel_workspaces) { + const AddressPtrList &kernel_workspaces, uint32_t graph_id) { MS_EXCEPTION_IF_NULL(kernel); MS_EXCEPTION_IF_NULL(mem_manager_); + auto mem_reuse_util_ptr = mem_reuse_util_map_[graph_id]; + MS_EXCEPTION_IF_NULL(mem_reuse_util_ptr); auto cnode = kernel->cast(); MS_EXCEPTION_IF_NULL(cnode); // Free the input of kernel by reference count. for (size_t i = 0; i < AnfAlgo::GetInputTensorNum(kernel); ++i) { - auto kernel_ref_count_ptr = mem_reuse_util_ptr_->GetKernelInputRef(cnode, i); + auto kernel_ref_count_ptr = mem_reuse_util_ptr->GetKernelInputRef(cnode, i); if (kernel_ref_count_ptr == nullptr) { continue; } @@ -361,7 +365,6 @@ void GPUKernelRuntime::FreeKernelDynamicRes(const mindspore::AnfNodePtr &kernel, // Reset the reference count. kernel_ref_count_ptr->ref_count_dynamic_use_ = kernel_ref_count_ptr->ref_count_; bool is_communication_op = false; - // The inputs and outputs memory of communication kernel are special, so separate processing. FreeCommunicationOpDynamicRes(kernel, i, &is_communication_op); if (!is_communication_op) { auto device_address = AnfAlgo::GetPrevNodeMutableOutputAddr(kernel, i); @@ -369,6 +372,17 @@ void GPUKernelRuntime::FreeKernelDynamicRes(const mindspore::AnfNodePtr &kernel, } } } + // Free the output of kernel, if output has no reference. + for (size_t i = 0; i < AnfAlgo::GetOutputTensorNum(kernel); ++i) { + auto kernel_ref_count_ptr = mem_reuse_util_ptr->GetRef(cnode, i); + if (kernel_ref_count_ptr == nullptr) { + continue; + } + if (kernel_ref_count_ptr->ref_count_dynamic_use_ == 0) { + auto device_address = AnfAlgo::GetMutableOutputAddr(kernel, i); + mem_manager_->FreeMemFromMemPool(device_address); + } + } // Free the workspace of kernel. for (size_t i = 0; i < kernel_workspaces.size(); ++i) { auto workspace = kernel_workspaces[i]; diff --git a/mindspore/ccsrc/device/gpu/gpu_kernel_runtime.h b/mindspore/ccsrc/device/gpu/gpu_kernel_runtime.h index 6f761342d3..e0eb2dc3f1 100644 --- a/mindspore/ccsrc/device/gpu/gpu_kernel_runtime.h +++ b/mindspore/ccsrc/device/gpu/gpu_kernel_runtime.h @@ -21,6 +21,7 @@ #include #include #include +#include #include "device/kernel_runtime.h" #include "device/kernel_runtime_manager.h" @@ -57,11 +58,12 @@ class GPUKernelRuntime : public KernelRuntime { void AllocCommunicationOpDynamicRes(const session::KernelGraph *graph); void AllocCommunicationOpInputDynamicRes(const mindspore::AnfNodePtr &kernel); void AllocCommunicationOpOutputDynamicRes(const mindspore::AnfNodePtr &kernel); - void FreeKernelDynamicRes(const mindspore::AnfNodePtr &kernel, const AddressPtrList &kernel_workspaces); + void FreeKernelDynamicRes(const mindspore::AnfNodePtr &kernel, const AddressPtrList &kernel_workspaces, + uint32_t graph_id); void FreeCommunicationOpDynamicRes(const mindspore::AnfNodePtr &kernel, size_t input_idx, bool *is_communication_op); size_t communication_op_input_ref_count_{0}; size_t communication_op_output_ref_count_{0}; - MemReuseUtilPtr mem_reuse_util_ptr_{nullptr}; + std::unordered_map mem_reuse_util_map_; }; MS_REG_KERNEL_RUNTIME(kGPUDevice, GPUKernelRuntime); } // namespace gpu From 852039b7265cccf975a5d9246ebbe4b305e27a31 Mon Sep 17 00:00:00 2001 From: caojian05 Date: Sat, 18 Apr 2020 18:16:29 +0800 Subject: [PATCH 357/367] fix dataset deepcopy failed while depipline referenced in the iterator --- mindspore/dataset/engine/iterators.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/mindspore/dataset/engine/iterators.py b/mindspore/dataset/engine/iterators.py index d670de508c..cb56a435f1 100644 --- a/mindspore/dataset/engine/iterators.py +++ b/mindspore/dataset/engine/iterators.py @@ -231,6 +231,9 @@ class Iterator: def num_classes(self): return self.depipeline.GetNumClasses() + def __deepcopy__(self, memo): + return Iterator(copy.deepcopy(self.dataset, memo)) + class DictIterator(Iterator): """ From f0c07c3fa633f70cab54793ee0edbbc9bd3dba6d Mon Sep 17 00:00:00 2001 From: ms_yan <6576637+ms_yan@user.noreply.gitee.com> Date: Sat, 11 Apr 2020 18:59:37 +0800 Subject: [PATCH 358/367] Realize take op and add ut --- mindspore/ccsrc/dataset/api/de_pipeline.cc | 12 +- mindspore/ccsrc/dataset/api/de_pipeline.h | 2 +- mindspore/ccsrc/dataset/core/client.h | 1 + .../dataset/engine/datasetops/CMakeLists.txt | 2 +- .../dataset/engine/datasetops/skip_op.cc | 4 + .../dataset/engine/datasetops/take_op.cc | 146 ++++++++ .../ccsrc/dataset/engine/datasetops/take_op.h | 107 ++++++ mindspore/dataset/engine/datasets.py | 65 +++- mindspore/dataset/engine/iterators.py | 2 + .../dataset/engine/serializer_deserializer.py | 3 + mindspore/dataset/engine/validators.py | 19 +- tests/ut/cpp/dataset/CMakeLists.txt | 1 + tests/ut/cpp/dataset/take_op_test.cc | 103 ++++++ tests/ut/python/dataset/test_take.py | 317 ++++++++++++++++++ 14 files changed, 779 insertions(+), 5 deletions(-) create mode 100644 mindspore/ccsrc/dataset/engine/datasetops/take_op.cc create mode 100644 mindspore/ccsrc/dataset/engine/datasetops/take_op.h create mode 100644 tests/ut/cpp/dataset/take_op_test.cc create mode 100644 tests/ut/python/dataset/test_take.py diff --git a/mindspore/ccsrc/dataset/api/de_pipeline.cc b/mindspore/ccsrc/dataset/api/de_pipeline.cc index a62994cb51..5f61c86f06 100644 --- a/mindspore/ccsrc/dataset/api/de_pipeline.cc +++ b/mindspore/ccsrc/dataset/api/de_pipeline.cc @@ -54,6 +54,7 @@ static std::unordered_map g_parse_op_func_ = {{kStorage, &D {kGenerator, &DEPipeline::ParseGeneratorOp}, {kTfReader, &DEPipeline::ParseTFReaderOp}, {kProject, &DEPipeline::ParseProjectOp}, + {kTake, &DEPipeline::ParseTakeOp}, {kImageFolder, &DEPipeline::ParseImageFolderOp}, {kMnist, &DEPipeline::ParseMnistOp}, {kManifest, &DEPipeline::ParseManifestOp}, @@ -650,7 +651,16 @@ Status DEPipeline::ParseRenameOp(const py::dict &args, std::shared_ptr *ptr) { + if (args["count"].is_none()) { + std::string err_msg = "Error: count is invalid or not set."; + RETURN_STATUS_UNEXPECTED(err_msg); + } + std::shared_ptr op; + RETURN_IF_NOT_OK(TakeOp::Builder(ToInt(args["count"])).Build(&op)); + *ptr = op; + return Status::OK(); +} Status DEPipeline::ParseZipOp(const py::dict &args, std::shared_ptr *ptr) { std::shared_ptr builder = std::make_shared(); diff --git a/mindspore/ccsrc/dataset/api/de_pipeline.h b/mindspore/ccsrc/dataset/api/de_pipeline.h index 35276e5b74..6ff7bb091c 100644 --- a/mindspore/ccsrc/dataset/api/de_pipeline.h +++ b/mindspore/ccsrc/dataset/api/de_pipeline.h @@ -116,7 +116,7 @@ class DEPipeline { Status ParseRenameOp(const py::dict &args, std::shared_ptr *ptr); - DsOpPtr ParseTakeOp(const py::dict &args) const; + Status ParseTakeOp(const py::dict &args, std::shared_ptr *ptr); Status ParseZipOp(const py::dict &args, std::shared_ptr *ptr); diff --git a/mindspore/ccsrc/dataset/core/client.h b/mindspore/ccsrc/dataset/core/client.h index b39ba3442b..b865c54260 100644 --- a/mindspore/ccsrc/dataset/core/client.h +++ b/mindspore/ccsrc/dataset/core/client.h @@ -38,6 +38,7 @@ #include "dataset/engine/datasetops/source/mindrecord_op.h" #include "dataset/engine/datasetops/source/storage_op.h" #include "dataset/engine/datasetops/source/tf_reader_op.h" +#include "dataset/engine/datasetops/take_op.h" #include "dataset/engine/datasetops/zip_op.h" #include "dataset/engine/execution_tree.h" #include "dataset/util/status.h" diff --git a/mindspore/ccsrc/dataset/engine/datasetops/CMakeLists.txt b/mindspore/ccsrc/dataset/engine/datasetops/CMakeLists.txt index 9e511f78f4..655a739ada 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/CMakeLists.txt +++ b/mindspore/ccsrc/dataset/engine/datasetops/CMakeLists.txt @@ -5,13 +5,13 @@ add_library(engine-datasetops OBJECT parallel_op.cc pipeline_op.cc batch_op.cc - batch_op.cc device_queue_op.cc map_op.cc project_op.cc rename_op.cc repeat_op.cc skip_op.cc + take_op.cc shuffle_op.cc zip_op.cc ) diff --git a/mindspore/ccsrc/dataset/engine/datasetops/skip_op.cc b/mindspore/ccsrc/dataset/engine/datasetops/skip_op.cc index 5b0433b6c8..90c160b5bf 100644 --- a/mindspore/ccsrc/dataset/engine/datasetops/skip_op.cc +++ b/mindspore/ccsrc/dataset/engine/datasetops/skip_op.cc @@ -88,6 +88,10 @@ Status SkipOp::GetNextBuffer(std::unique_ptr *p_buffer, int32_t work // If buffer is none or the rows of buffer is 0, // then get a buffer from child. if (!buf || buf->NumRows() == 0) { + if (buf && buf->eof()) { + *p_buffer = std::move(buf); + return Status::OK(); + } RETURN_IF_NOT_OK(child_[0]->GetNextBuffer(&buf, worker_id, true)); } diff --git a/mindspore/ccsrc/dataset/engine/datasetops/take_op.cc b/mindspore/ccsrc/dataset/engine/datasetops/take_op.cc new file mode 100644 index 0000000000..d9625b6c26 --- /dev/null +++ b/mindspore/ccsrc/dataset/engine/datasetops/take_op.cc @@ -0,0 +1,146 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include "common/utils.h" +#include "dataset/engine/data_buffer.h" +#include "dataset/engine/datasetops/take_op.h" +#include "dataset/engine/db_connector.h" +#include "dataset/engine/execution_tree.h" + +namespace mindspore { +namespace dataset { +// Builder constructor. Creates the builder object. +TakeOp::Builder::Builder(int32_t count) : build_max_takes_(count) {} + +Status TakeOp::Builder::SanityCheck() const { + if (build_max_takes_ <= 0) { + std::string err_msg("Take count must be greater than 0."); + RETURN_STATUS_UNEXPECTED(err_msg); + } + return Status::OK(); +} + +// The builder "build" method creates the final object. +Status TakeOp::Builder::Build(std::shared_ptr *ptr) { + RETURN_IF_NOT_OK(SanityCheck()); + *ptr = std::make_shared(build_max_takes_); + return Status::OK(); +} + +// Constructor of the TakeOp. +TakeOp::TakeOp(int32_t count) : PipelineOp(0), max_takes_(count), take_count_(0) {} + +// A print method typically used for debugging +void TakeOp::Print(std::ostream &out, bool show_all) const { + // Call base class printer first + PipelineOp::Print(out, show_all); + + // Then display our own stuff + out << "TakeOp:" + << "\nCurrent take count: " << take_count_ << "\nMax take count: " << max_takes_; +} + +// This function will be call muti times to returns the buffer, when meet required max take count or meet +// EOF buffer then this will stop. +Status TakeOp::GetNextBuffer(std::unique_ptr *p_buffer, int32_t worker_id, bool retry_if_eoe) { + if (child_.empty()) { + RETURN_STATUS_UNEXPECTED("TakeOp can't be the leaf node."); + } + + std::unique_ptr buf; + + bool last_repeat = !BitTest(op_ctrl_flags_, kDeOpRepeated) || BitTest(op_ctrl_flags_, kDeOpLastRepeat); + if (take_count_ == max_takes_) { + if (state_ == OpState::kDeOpRunning) { + MS_LOG(INFO) << "meet max count and push-back eoe buffer."; + auto eoe_buffer = std::make_unique(0, DataBuffer::kDeBFlagEOE); + *p_buffer = std::move(eoe_buffer); + state_ = OpState::kDeOpIdle; + + // Reset the count and drain + if (!last_repeat) { + take_count_ = 0; + RETURN_IF_NOT_OK(child_[0]->GetNextBuffer(&buf, worker_id, true)); + while (!buf->eoe() && !buf->eof()) { + RETURN_IF_NOT_OK(child_[0]->GetNextBuffer(&buf, worker_id, true)); + } + } + } else { + MS_LOG(INFO) << "meet max count and push-back eof buffer."; + auto eof_buffer = std::make_unique(0, DataBuffer::kDeBFlagEOF); + *p_buffer = std::move(eof_buffer); + take_count_ = 0; + } + return Status::OK(); + } + RETURN_IF_NOT_OK(child_[0]->GetNextBuffer(&buf, worker_id, true)); + // Loop until non EOE is received + if (buf->eoe()) { + take_count_ = 0; + *p_buffer = std::move(buf); + return Status::OK(); + } + + // Check if the last buf is next eof + if (buf->eof()) { + *p_buffer = std::move(buf); + return Status::OK(); + } + + // Get buffer and push back when take_count is still small + if (take_count_ < max_takes_) { + RETURN_IF_NOT_OK(FillBuffer(&buf, p_buffer)); + } + return Status::OK(); +} + +// Function FillBuffer mainly prepare the buffer for returning +Status TakeOp::FillBuffer(std::unique_ptr *buffer, std::unique_ptr *data_buffer) { + int32_t buffer_size = (*buffer)->NumRows(); + if (take_count_ + buffer_size < max_takes_) { + *data_buffer = std::move(*buffer); + take_count_ = take_count_ + buffer_size; + } else { + MS_LOG(INFO) << "In last buffer: Push one buffer."; + std::unique_ptr new_tensor_table = std::make_unique(); + while (take_count_ < max_takes_) { + TensorRow new_row; + RETURN_IF_NOT_OK((*buffer)->PopRow(&new_row)); + take_count_++; + new_tensor_table->push_back(new_row); + } + (*buffer)->set_tensor_table(std::move(new_tensor_table)); + *data_buffer = std::move(*buffer); + } + return Status::OK(); +} + +// Class functor operator () override. +// Most dataset ops operate by launching a thread (see ExecutionTree). +// However, the TakeOp is defined as a inlined operator, so it is invalid to launch the +// functor since this op runs inlined inside another operator. The function is overloaded to +// ensure that it is not called by mistake (it will generate an error). +Status TakeOp::operator()() { RETURN_STATUS_UNEXPECTED("Logic error. TakeOp is an inlined operator."); } + +Status TakeOp::PrepareNodePostAction() { + RETURN_IF_NOT_OK(PipelineOp::PrepareNodePostAction()); + tree_->AddToRepeatStack(shared_from_this()); + return Status::OK(); +} +} // namespace dataset +} // namespace mindspore diff --git a/mindspore/ccsrc/dataset/engine/datasetops/take_op.h b/mindspore/ccsrc/dataset/engine/datasetops/take_op.h new file mode 100644 index 0000000000..02218cf610 --- /dev/null +++ b/mindspore/ccsrc/dataset/engine/datasetops/take_op.h @@ -0,0 +1,107 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef DATASET_ENGINE_DATASETOPS_TAKE_OP_H_ +#define DATASET_ENGINE_DATASETOPS_TAKE_OP_H_ + +#include +#include +#include +#include "dataset/engine/datasetops/pipeline_op.h" + +namespace mindspore { +namespace dataset { +class TakeOp : public PipelineOp { + public: + // The nested builder class inside of the TakeOp is used to help manage all of the arguments + // for constructing it. This take op is very simple though, so this builder is really just + // provided for a consistent look and feel for creators of Dataset operators overall. + class Builder { + public: + // Builder constructor. Creates the builder object. + // @note No default args + // @param count - The number of takes to do + // @return This is a constructor. + explicit Builder(int32_t count); + + // Default destructor + ~Builder() = default; + + // The builder "build" method creates the final object. + // @return shared_ptr to the new StorageOp object + Status Build(std::shared_ptr *); + + private: + int32_t build_max_takes_; + + Status SanityCheck() const; + }; + + // Constructor of the TakeOp. + // @note The builder class should be used to call it + // @param count - The number of takes to do + explicit TakeOp(int32_t count); + + // Destructor + ~TakeOp() = default; + + // A print method typically used for debugging + // @param out - The output stream to write output to + // @param show_all - A bool to control if you want to show all info or just a summary + void Print(std::ostream &out, bool show_all) const override; + + // << Stream output operator overload + // @notes This allows you to write the debug print info using stream operators + // @param out - reference to the output stream being overloaded + // @param ro - reference to the TakeOp to display + // @return - the output stream must be returned + friend std::ostream &operator<<(std::ostream &out, const TakeOp &ro) { + ro.Print(out, false); + return out; + } + + // Class functor operator () override. + // Most dataset ops operate by launching a thread (see ExecutionTree). + // However, the TakeOp is defined as a inlined operator, so it is invalid to launch the + // functor since this op runs inlined inside another operator. The function is overloaded to + // ensure that it is not called by mistake (it will generate an error). + // @return Status - The error code return + Status operator()() override; + + // Gets a buffer from the child node. The caller is typically our parent node. + // @note This function sets the `retryIfEoe` flag when popping from the child connector. This way, + // this function will retry to pop the connector again and will get the non-EOE buffer if any. + // @param p_buffer - output pointer to the buffer that it will fetch. + // @param worker_id - The worker id + // @param retry_if_eoe Set this flag to true to allow calling pop() again after the first pop() returns EOE. + // @return Status - The error code return + Status GetNextBuffer(std::unique_ptr *p_buffer, int32_t worker_id, bool retry_if_eoe) override; + + // During tree prepare phase, operators may have specific post-operations to perform depending on + // their role. + // @notes Derived versions of this function should always call it's superclass version first + // before providing their own implementations. + Status PrepareNodePostAction() override; + + private: + int32_t max_takes_; // The number of takes that the user requested + int32_t take_count_; // A counter for the current number of executed takes + + Status FillBuffer(std::unique_ptr *buffer, std::unique_ptr *data_buffer); +}; +} // namespace dataset +} // namespace mindspore + +#endif // DATASET_ENGINE_DATASETOPS_TAKE_OP_H_ diff --git a/mindspore/dataset/engine/datasets.py b/mindspore/dataset/engine/datasets.py index 642e2beec8..8de56a6dff 100644 --- a/mindspore/dataset/engine/datasets.py +++ b/mindspore/dataset/engine/datasets.py @@ -36,7 +36,7 @@ from mindspore import log as logger from . import samplers from .iterators import DictIterator, TupleIterator from .validators import check, check_batch, check_shuffle, check_map, check_repeat, check_skip, check_zip, check_rename, \ - check_project, check_imagefolderdatasetv2, check_mnist_cifar_dataset, check_manifestdataset, \ + check_take, check_project, check_imagefolderdatasetv2, check_mnist_cifar_dataset, check_manifestdataset, \ check_tfrecorddataset, check_vocdataset, check_celebadataset, check_minddataset, check_generatordataset, \ check_zip_dataset, check_add_column from ..core.datatypes import mstype_to_detype, mstypelist_to_detypelist @@ -442,6 +442,33 @@ class Dataset: """ return SkipDataset(self, count) + @check_take + def take(self, count=-1): + """ + Takes at most given numbers of elements from the dataset. + + Note: + 1. If count is greater than the number of element in dataset or equal to -1, + all the element in dataset will be taken. + 2. The order of using take and batch effects. If take before batch operation, + then taken given number of rows, otherwise take given number of batches. + + Args: + count (int, optional): Number of elements to be taken from the dataset (default=-1). + + Returns: + TakeDataset, dataset taken. + + Examples: + >>> import mindspore.dataset as ds + >>> # data is an instance of Dataset object. + >>> # creates a dataset where the dataset including 50 elements. + >>> data = data.take(50) + """ + if count == -1: + return self + return TakeDataset(self, count) + @check_zip_dataset def zip(self, datasets): """ @@ -1100,6 +1127,7 @@ class RepeatDataset(DatasetOp): """ return self.count + class SkipDataset(DatasetOp): """ The result of applying Skip operator to the input Dataset. @@ -1134,6 +1162,41 @@ class SkipDataset(DatasetOp): output_size = child_size - self.count return output_size + +class TakeDataset(DatasetOp): + """ + The result of applying Take operator to the input Dataset. + + Args: + input_dataset (Dataset): Input Dataset to be taken element from. + count (int): Number of elements to be taken from the dataset. + """ + + def __init__(self, input_dataset, count): + super().__init__() + self.count = count + self.input.append(input_dataset) + input_dataset.output.append(self) + self._input_indexs = input_dataset.input_indexs + + def get_args(self): + args = super().get_args() + args["count"] = self.count + return args + + def get_dataset_size(self): + """ + Get the number of batches in an epoch. + + Return: + Number, number of batches. + """ + child_size = self.input[0].get_dataset_size() + if child_size < self.count: + return child_size + return self.count + + class ZipDataset(DatasetOp): """ The result of applying Zip operator to the input Dataset. diff --git a/mindspore/dataset/engine/iterators.py b/mindspore/dataset/engine/iterators.py index d670de508c..3d6873d04c 100644 --- a/mindspore/dataset/engine/iterators.py +++ b/mindspore/dataset/engine/iterators.py @@ -129,6 +129,8 @@ class Iterator: op_type = OpName.REPEAT elif isinstance(dataset, de.SkipDataset): op_type = OpName.SKIP + elif isinstance(dataset, de.TakeDataset): + op_type = OpName.TAKE elif isinstance(dataset, de.StorageDataset): op_type = OpName.STORAGE elif isinstance(dataset, de.ImageFolderDatasetV2): diff --git a/mindspore/dataset/engine/serializer_deserializer.py b/mindspore/dataset/engine/serializer_deserializer.py index a54a7a6b32..61417e4d52 100644 --- a/mindspore/dataset/engine/serializer_deserializer.py +++ b/mindspore/dataset/engine/serializer_deserializer.py @@ -304,6 +304,9 @@ def create_node(node): elif dataset_op == 'SkipDataset': pyobj = de.Dataset().skip(node.get('count')) + elif dataset_op == 'TakeDataset': + pyobj = de.Dataset().take(node.get('count')) + elif dataset_op == 'MapDataset': tensor_ops = construct_tensor_ops(node.get('operations')) pyobj = de.Dataset().map(node.get('input_columns'), tensor_ops, node.get('output_columns'), diff --git a/mindspore/dataset/engine/validators.py b/mindspore/dataset/engine/validators.py index 3502cbb204..b74e913202 100644 --- a/mindspore/dataset/engine/validators.py +++ b/mindspore/dataset/engine/validators.py @@ -602,7 +602,7 @@ def check_batch_size(batch_size): def check_count(count): check_type(count, 'count', int) if (count <= 0 and count != -1) or count > INT32_MAX: - raise ValueError("repeat count should be either -1 or positive integer.") + raise ValueError("count should be either -1 or positive integer.") def check_columns(columns, name): @@ -709,6 +709,7 @@ def check_repeat(method): return new_method + def check_skip(method): """check the input arguments of skip.""" @wraps(method) @@ -724,6 +725,21 @@ def check_skip(method): return new_method + +def check_take(method): + """check the input arguments of take.""" + @wraps(method) + def new_method(*args, **kwargs): + param_dict = make_param_dict(method, args, kwargs) + + count = param_dict.get('count') + check_count(count) + + return method(*args, **kwargs) + + return new_method + + def check_zip(method): """check the input arguments of zip.""" @wraps(method) @@ -759,6 +775,7 @@ def check_zip_dataset(method): return new_method + def check_rename(method): """check the input arguments of rename.""" @wraps(method) diff --git a/tests/ut/cpp/dataset/CMakeLists.txt b/tests/ut/cpp/dataset/CMakeLists.txt index db207363a8..ae9c46e62c 100644 --- a/tests/ut/cpp/dataset/CMakeLists.txt +++ b/tests/ut/cpp/dataset/CMakeLists.txt @@ -64,6 +64,7 @@ SET(DE_UT_SRCS voc_op_test.cc cifar_op_test.cc celeba_op_test.cc + take_op_test.cc ) add_executable(de_ut_tests ${DE_UT_SRCS}) diff --git a/tests/ut/cpp/dataset/take_op_test.cc b/tests/ut/cpp/dataset/take_op_test.cc new file mode 100644 index 0000000000..7f8508de20 --- /dev/null +++ b/tests/ut/cpp/dataset/take_op_test.cc @@ -0,0 +1,103 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include + +#include "common/common.h" +#include "common/utils.h" +#include "dataset/core/client.h" +#include "gtest/gtest.h" +#include "utils/log_adapter.h" + +namespace common = mindspore::common; + +using namespace mindspore::dataset; +using mindspore::MsLogLevel::INFO; +using mindspore::ExceptionType::NoExceptionType; +using mindspore::LogStream; + +class MindDataTestTakeOp : public UT::DatasetOpTesting {}; + +TEST_F(MindDataTestTakeOp, TestTakeProject) { + // Start with an empty execution tree + auto my_tree = std::make_shared(); + + std::string dataset_path; + dataset_path = datasets_root_path_ + "/testTFTestAllTypes/test.data"; + + // TFReaderOp + std::shared_ptr my_tfreader_op; + TFReaderOp::Builder builder; + builder.SetDatasetFilesList({dataset_path}) + .SetRowsPerBuffer(16) + .SetWorkerConnectorSize(16) + .SetNumWorkers(16); + std::unique_ptr schema = std::make_unique(); + schema->LoadSchemaFile(datasets_root_path_ + "/testTFTestAllTypes/datasetSchema.json", {}); + builder.SetDataSchema(std::move(schema)); + Status rc = builder.Build(&my_tfreader_op); + ASSERT_TRUE(rc.IsOk()); + + // TakeOp + std::shared_ptr my_take_op; + TakeOp::Builder builder_take(5); + rc = builder_take.Build(&my_take_op); + ASSERT_TRUE(rc.IsOk()); + + rc = my_tree->AssociateNode(my_tfreader_op); + ASSERT_TRUE(rc.IsOk()); + rc = my_tree->AssociateNode(my_take_op); + ASSERT_TRUE(rc.IsOk()); + + // Set children/root layout. + rc = my_take_op->AddChild(my_tfreader_op); + ASSERT_TRUE(rc.IsOk()); + rc = my_tree->AssignRoot(my_take_op); + ASSERT_TRUE(rc.IsOk()); + + MS_LOG(INFO) << "Launching tree and begin iteration."; + rc = my_tree->Prepare(); + + ASSERT_TRUE(rc.IsOk()); + + rc = my_tree->Launch(); + ASSERT_TRUE(rc.IsOk()); + + // Start the loop of reading tensors from our pipeline + DatasetIterator di(my_tree); + TensorRow tensor_list; + rc = di.FetchNextTensorRow(&tensor_list); + ASSERT_TRUE(rc.IsOk()); + + int row_count = 0; + while (!tensor_list.empty()) { + MS_LOG(INFO) << "Row display for row #: " << row_count << "."; + + // Display the tensor by calling the printer on it + for (int i = 0; i < tensor_list.size(); i++) { + std::ostringstream ss; + ss << "(" << tensor_list[i] << "): " << *tensor_list[i] << std::endl; + MS_LOG(INFO) << "Tensor print: " << ss.str() << "."; + } + + rc = di.FetchNextTensorRow(&tensor_list); + ASSERT_TRUE(rc.IsOk()); + row_count++; + } + + ASSERT_EQ(row_count, 5); +} diff --git a/tests/ut/python/dataset/test_take.py b/tests/ut/python/dataset/test_take.py new file mode 100644 index 0000000000..ed71f67e26 --- /dev/null +++ b/tests/ut/python/dataset/test_take.py @@ -0,0 +1,317 @@ +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +import mindspore.dataset as ds +import mindspore.dataset.transforms.vision.c_transforms as vision +from mindspore import log as logger +import numpy as np + + +# In generator dataset: Number of rows is 3, its value is 0, 1, 2 +def generator(): + for i in range(3): + yield np.array([i]), + + +# In generator dataset: Number of rows is 10, its value is 0, 1, 2 ... 10 +def generator_10(): + for i in range(10): + yield np.array([i]), + + +def test_take_01(): + """ + Test take: origin there are 3 row, and take 1 row, in this case: will not meet eoe and eof + """ + logger.info("test_take_01") + data1 = ds.GeneratorDataset(generator, ["data"]) + + data1 = data1.take(1) + data1 = data1.repeat(2) + + # Here i refers to index, d refers to data element + for i, d in enumerate(data1): + assert 0 == d[0][0] + + assert sum([1 for _ in data1]) == 2 + + +def test_take_02(): + """ + Test take: origin there are 3 row, and take 2 row, in this case: will meet eoe + """ + logger.info("test_take_02") + data1 = ds.GeneratorDataset(generator, ["data"]) + + data1 = data1.take(2) + data1 = data1.repeat(2) + + # Here i refers to index, d refers to data element + for i, d in enumerate(data1): + assert i % 2 == d[0][0] + + assert sum([1 for _ in data1]) == 4 + + +def test_take_03(): + """ + Test take: origin there are 3 row, and take 3 row, in this case: will meet eoe and eof + """ + logger.info("test_take_03") + data1 = ds.GeneratorDataset(generator, ["data"]) + + data1 = data1.take(3) + data1 = data1.repeat(2) + + # Here i refers to index, d refers to data element + for i, d in enumerate(data1): + assert i % 3 == d[0][0] + + assert sum([1 for _ in data1]) == 6 + + +def test_take_04(): + """ + Test take: origin there are 3 row, and take 4 row, this is more than the total rows + """ + logger.info("test_take_04") + data1 = ds.GeneratorDataset(generator, ["data"]) + + data1 = data1.take(4) + data1 = data1.repeat(2) + + # Here i refers to index, d refers to data element + for i, d in enumerate(data1): + assert i % 3 == d[0][0] + + assert sum([1 for _ in data1]) == 6 + + +def test_take_05(): + """ + Test take: there is no repeat op + """ + logger.info("test_take_05") + data1 = ds.GeneratorDataset(generator, ["data"]) + + data1 = data1.take(2) + + # Here i refers to index, d refers to data element + for i, d in enumerate(data1): + assert i == d[0][0] + + assert sum([1 for _ in data1]) == 2 + + +def test_take_06(): + """ + Test take: repeat is before take + """ + logger.info("test_take_06") + data1 = ds.GeneratorDataset(generator, ["data"]) + + data1 = data1.repeat(2) + data1 = data1.take(4) + + # Here i refers to index, d refers to data element + for i, d in enumerate(data1): + assert i % 3 == d[0][0] + + assert sum([1 for _ in data1]) == 4 + + +def test_take_07(): + """ + Test take: take is before batch, that mean take(N), N refer to rows num + """ + logger.info("test_take_07") + data1 = ds.GeneratorDataset(generator, ["data"]) + + data1 = data1.take(2) + data1 = data1.batch(2) + assert sum([1 for _ in data1]) == 1 + + +def test_take_08(): + """ + Test take: take is after batch, that mean take(N), N refer to batches num + """ + logger.info("test_take_08") + data1 = ds.GeneratorDataset(generator, ["data"]) + + data1 = data1.batch(2) + data1 = data1.take(2) + assert sum([1 for _ in data1]) == 2 + + +def test_take_09(): + """ + Test take: repeat count is -1, and read the whole dataset, take after repeat + """ + logger.info("test_take_09") + data1 = ds.GeneratorDataset(generator, ["data"]) + + data1 = data1.repeat(2) + data1 = data1.take(-1) + + # Here i refers to index, d refers to data element + for i, d in enumerate(data1): + assert i % 3 == d[0][0] + + assert sum([1 for _ in data1]) == 6 + + +def test_take_10(): + """ + Test take: repeat count is -1, and read the whole dataset, take before repeat + """ + logger.info("test_take_10") + data1 = ds.GeneratorDataset(generator, ["data"]) + + data1 = data1.take(-1) + data1 = data1.repeat(2) + + # Here i refers to index, d refers to data element + for i, d in enumerate(data1): + assert i % 3 == d[0][0] + + assert sum([1 for _ in data1]) == 6 + + +def test_take_11(): + """ + Test take: batch first, then do repeat and take operation + """ + logger.info("test_take_11") + data1 = ds.GeneratorDataset(generator, ["data"]) + + data1 = data1.batch(2) + data1 = data1.repeat(2) + data1 = data1.take(-1) + + # Here i refers to index, d refers to data element + for i, d in enumerate(data1): + assert 2 * (i % 2) == d[0][0] + + assert sum([1 for _ in data1]) == 4 + + +def test_take_12(): + """ + Test take: take first, then do batch and repeat operation + """ + logger.info("test_take_12") + data1 = ds.GeneratorDataset(generator, ["data"]) + + data1 = data1.take(2) + data1 = data1.batch(2) + data1 = data1.repeat(2) + + # Here i refers to index, d refers to data element + for i, d in enumerate(data1): + assert 0 == d[0][0] + + assert sum([1 for _ in data1]) == 2 + + +def test_take_13(): + """ + Test take: skip first, then do take, batch and repeat operation + """ + logger.info("test_take_13") + data1 = ds.GeneratorDataset(generator, ["data"]) + + data1 = data1.skip(2) + data1 = data1.take(-1) + data1 = data1.batch(2) + data1 = data1.repeat(2) + + # Here i refers to index, d refers to data element + for i, d in enumerate(data1): + assert 2 == d[0][0] + + assert sum([1 for _ in data1]) == 2 + + +def test_take_14(): + """ + Test take: take first, then do batch, skip and repeat operation + """ + logger.info("test_take_14") + data1 = ds.GeneratorDataset(generator, ["data"]) + + data1 = data1.take(-1) + data1 = data1.batch(2) + data1 = data1.skip(1) + data1 = data1.repeat(2) + + # Here i refers to index, d refers to data element + for i, d in enumerate(data1): + assert 2 == d[0][0] + + assert sum([1 for _ in data1]) == 2 + + +def test_take_15(): + """ + Test take: large amount data, take a part, then do skip operation + """ + logger.info("test_take_15") + data1 = ds.GeneratorDataset(generator_10, ["data"]) + + data1 = data1.take(6) + data1 = data1.skip(2) + + # Here i refers to index, d refers to data element + for i, d in enumerate(data1): + assert (i + 2) == d[0][0] + + assert sum([1 for _ in data1]) == 4 + + +def test_take_16(): + """ + Test take: large amount data, skip a part, then do take operation + """ + logger.info("test_take_16") + data1 = ds.GeneratorDataset(generator_10, ["data"]) + + data1 = data1.skip(3) + data1 = data1.take(5) + + # Here i refers to index, d refers to data element + for i, d in enumerate(data1): + assert (i + 3) == d[0][0] + + assert sum([1 for _ in data1]) == 5 + + +if __name__ == '__main__': + test_take_01() + test_take_02() + test_take_03() + test_take_04() + test_take_05() + test_take_06() + test_take_07() + test_take_08() + test_take_09() + test_take_10() + test_take_11() + test_take_12() + test_take_13() + test_take_14() + test_take_15() + test_take_16() + logger.info('== test take operation finished ==') \ No newline at end of file From c889320f4e1bc014e84eba8d286a2291a731cb4b Mon Sep 17 00:00:00 2001 From: zhoufeng Date: Sun, 19 Apr 2020 12:14:46 +0800 Subject: [PATCH 359/367] fix cmake verbose --- build.sh | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/build.sh b/build.sh index d555188c20..7550d76c8f 100755 --- a/build.sh +++ b/build.sh @@ -314,7 +314,10 @@ build_mindspore() if [[ "X$INC_BUILD" = "Xoff" ]]; then cmake ${CMAKE_ARGS} ../.. fi - cmake --build . --target package ${VERBOSE} -j$THREAD_NUM + if [[ -n "$VERBOSE" ]]; then + CMAKE_VERBOSE="--verbose" + fi + cmake --build . --target package ${CMAKE_VERBOSE} -j$THREAD_NUM echo "success to build mindspore project!" } From 7b5a31dc831bbf40578788b387b1383f32f24757 Mon Sep 17 00:00:00 2001 From: yangjie159 Date: Sun, 19 Apr 2020 23:42:40 +0800 Subject: [PATCH 360/367] add comment for export lite model --- mindspore/context.py | 4 ++-- mindspore/train/serialization.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/mindspore/context.py b/mindspore/context.py index 2938b87119..ba0ac36b66 100644 --- a/mindspore/context.py +++ b/mindspore/context.py @@ -487,8 +487,8 @@ def set_context(**kwargs): enable_loop_sink (bool): Whether to enable loop sink. Default: False. enable_task_sink (bool): Whether to enable task sink. Default: True. enable_mem_reuse (bool): Whether to enable memory reuse. Default: True. - save_ms_model (bool): Whether to save model converted by graph. Default: False. - save_ms_model_path (str): Path to save converted model. Default: "." + save_ms_model (bool): Whether to save lite model converted by graph. Default: False. + save_ms_model_path (str): Path to save converted lite model. Default: "." enable_gpu_summary (bool): Whether to enable gpu summary. Default: True. save_graphs_path (str): Path to save graphs. Default: "." enable_auto_mixed_precision (bool): Whether to enable auto mixed precision. Default: True. diff --git a/mindspore/train/serialization.py b/mindspore/train/serialization.py index 8ec1b38804..ae17bf8116 100644 --- a/mindspore/train/serialization.py +++ b/mindspore/train/serialization.py @@ -426,7 +426,7 @@ def export(net, *inputs, file_name, file_format='GEIR'): - GEIR: Graph Engine Intermidiate Representation. An intermidiate representation format of Ascend model. - ONNX: Open Neural Network eXchange. An open format built to represent machine learning models. - - LITE: Huawei model format for mobile. + - LITE: Huawei model format for mobile. A lite model only for the MindSpore Lite """ logger.info("exporting model file:%s format:%s.", file_name, file_format) check_input_data(*inputs, data_class=Tensor) From 698c29eedfaf7922036130878946b75252410099 Mon Sep 17 00:00:00 2001 From: chang zherui <760161589@qq.com> Date: Mon, 20 Apr 2020 17:47:34 +0800 Subject: [PATCH 361/367] del graph --- graphengine | 1 - 1 file changed, 1 deletion(-) delete mode 160000 graphengine diff --git a/graphengine b/graphengine deleted file mode 160000 index 5869ad8541..0000000000 --- a/graphengine +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 5869ad8541403235113812f872f462bf19ecd11e From aadf06d70bdbde6e409e22520c755ece6c4bcfbf Mon Sep 17 00:00:00 2001 From: chang zherui <760161589@qq.com> Date: Mon, 20 Apr 2020 17:48:43 +0800 Subject: [PATCH 362/367] add graph --- graphengine | 1 + 1 file changed, 1 insertion(+) create mode 160000 graphengine diff --git a/graphengine b/graphengine new file mode 160000 index 0000000000..5869ad8541 --- /dev/null +++ b/graphengine @@ -0,0 +1 @@ +Subproject commit 5869ad8541403235113812f872f462bf19ecd11e From a67ff264a86087070ee11bedabce659a116c7037 Mon Sep 17 00:00:00 2001 From: chang zherui <760161589@qq.com> Date: Mon, 20 Apr 2020 20:15:34 +0800 Subject: [PATCH 363/367] fetch graph --- graphengine | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/graphengine b/graphengine index 5869ad8541..70bb745b45 160000 --- a/graphengine +++ b/graphengine @@ -1 +1 @@ -Subproject commit 5869ad8541403235113812f872f462bf19ecd11e +Subproject commit 70bb745b459ff9a0e7fc1008d15fe4b510f03da7 From 9dc561b7c68db208c65a99ca116b9eb12c2e67c6 Mon Sep 17 00:00:00 2001 From: chang zherui <760161589@qq.com> Date: Tue, 21 Apr 2020 10:00:45 +0800 Subject: [PATCH 364/367] modify topk --- mindspore/ccsrc/transform/op_declare.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mindspore/ccsrc/transform/op_declare.h b/mindspore/ccsrc/transform/op_declare.h index 8b32e16b35..2d15bfa181 100755 --- a/mindspore/ccsrc/transform/op_declare.h +++ b/mindspore/ccsrc/transform/op_declare.h @@ -217,8 +217,8 @@ DECLARE_OP_USE_OUTPUT(Merge) DECLARE_OP_ADAPTER(Switch) DECLARE_OP_USE_OUTPUT(Switch) -DECLARE_OP_ADAPTER(TopKV2) -DECLARE_OP_USE_OUTPUT(TopKV2) +DECLARE_OP_ADAPTER(TopK) +DECLARE_OP_USE_OUTPUT(TopK) DECLARE_OP_ADAPTER(RealDiv) DECLARE_OP_USE_OUTPUT(RealDiv) From ea3fa240b5fdb90e29bf99bb87ed3825f1038fcb Mon Sep 17 00:00:00 2001 From: chang zherui <760161589@qq.com> Date: Tue, 21 Apr 2020 11:26:02 +0800 Subject: [PATCH 365/367] modify LogSoftmaxV2 --- mindspore/ccsrc/transform/op_declare.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mindspore/ccsrc/transform/op_declare.h b/mindspore/ccsrc/transform/op_declare.h index 2d15bfa181..6fdb4b5434 100755 --- a/mindspore/ccsrc/transform/op_declare.h +++ b/mindspore/ccsrc/transform/op_declare.h @@ -268,8 +268,8 @@ DECLARE_OP_ADAPTER(Select) DECLARE_OP_USE_OUTPUT(Select) DECLARE_OP_ADAPTER(LessEqual) DECLARE_OP_USE_OUTPUT(LessEqual) -DECLARE_OP_ADAPTER(LogSoftmax) -DECLARE_OP_USE_OUTPUT(LogSoftmax) +DECLARE_OP_ADAPTER(LogSoftmaxV2) +DECLARE_OP_USE_OUTPUT(LogSoftmaxV2) DECLARE_OP_ADAPTER(TruncatedNormal) DECLARE_OP_USE_OUTPUT(TruncatedNormal) DECLARE_OP_ADAPTER(StridedSliceGrad) From fd344cb8f7a843b077fc950b940d981b876d5c3a Mon Sep 17 00:00:00 2001 From: chang zherui <760161589@qq.com> Date: Tue, 21 Apr 2020 11:38:18 +0800 Subject: [PATCH 366/367] add softmaxv2 --- mindspore/ccsrc/transform/op_declare.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/mindspore/ccsrc/transform/op_declare.h b/mindspore/ccsrc/transform/op_declare.h index 6fdb4b5434..933896a275 100755 --- a/mindspore/ccsrc/transform/op_declare.h +++ b/mindspore/ccsrc/transform/op_declare.h @@ -403,8 +403,8 @@ DECLARE_OP_ADAPTER(Sigmoid) DECLARE_OP_USE_OUTPUT(Sigmoid) DECLARE_OP_ADAPTER(SigmoidGrad) DECLARE_OP_USE_OUTPUT(SigmoidGrad) -DECLARE_OP_ADAPTER(Softmax) -DECLARE_OP_USE_OUTPUT(Softmax) +DECLARE_OP_ADAPTER(SoftmaxV2) +DECLARE_OP_USE_OUTPUT(SoftmaxV2) DECLARE_OP_ADAPTER(SoftmaxGrad) DECLARE_OP_USE_OUTPUT(SoftmaxGrad) DECLARE_OP_ADAPTER(Greater) @@ -447,6 +447,8 @@ DECLARE_OP_ADAPTER(Round) DECLARE_OP_USE_OUTPUT(Round) DECLARE_OP_ADAPTER(ApplyFtrl) DECLARE_OP_USE_OUTPUT(ApplyFtrl) +DECLARE_OP_ADAPTER(SparseApplyFtrlD) +DECLARE_OP_USE_OUTPUT(SparseApplyFtrlD) DECLARE_OP_ADAPTER(Diag) DECLARE_OP_USE_OUTPUT(Diag) DECLARE_OP_ADAPTER(DiagPart) From e5dc50533c4ba3b9925948a8842ff08960936cc2 Mon Sep 17 00:00:00 2001 From: chang zherui <760161589@qq.com> Date: Tue, 21 Apr 2020 11:50:35 +0800 Subject: [PATCH 367/367] modify op_declare.h --- mindspore/ccsrc/transform/convert.cc | 8 +- mindspore/ccsrc/transform/op_declare.cc | 212 ++++++++++-------------- mindspore/ccsrc/transform/op_declare.h | 6 - 3 files changed, 95 insertions(+), 131 deletions(-) diff --git a/mindspore/ccsrc/transform/convert.cc b/mindspore/ccsrc/transform/convert.cc index 417989247e..46e5586122 100755 --- a/mindspore/ccsrc/transform/convert.cc +++ b/mindspore/ccsrc/transform/convert.cc @@ -176,6 +176,7 @@ const char kNameAbsGrad[] = "AbsGrad"; const char kNameBinaryCrossEntropy[] = "BinaryCrossEntropy"; const char kNameBinaryCrossEntropyGrad[] = "BinaryCrossEntropyGrad"; const char kNameSparseApplyAdagrad[] = "SparseApplyAdagrad"; +const char kNameSparseApplyFtrlD[] = "SparseApplyFtrlD"; const char kNameAcosh[] = "Acosh"; const char kNameFloorMod[] = "FloorMod"; const char kNameSpaceToDepth[] = "SpaceToDepth"; @@ -204,7 +205,7 @@ std::unordered_map &DfGraphConvertor::get_adpt_ma {string(kNameMaxPool), ADPT_DESC(MaxPool)}, {string(kNameAvgPool), ADPT_DESC(AvgPool)}, {string(kNameMaxPoolWithArgmax), ADPT_DESC(MaxPoolWithArgmax)}, - {string(kNameTopK), ADPT_DESC(TopKV2)}, + {string(kNameTopK), ADPT_DESC(TopK)}, {string(kNamePack), ADPT_DESC(Pack)}, {string(kNameUnpack), ADPT_DESC(Unpack)}, {string(kNameSplitD), ADPT_DESC(SplitD)}, @@ -329,7 +330,7 @@ std::unordered_map &DfGraphConvertor::get_adpt_ma {prim::kPrimMinimum->name(), ADPT_DESC(Minimum)}, {prim::kPrimSelect->name(), ADPT_DESC(Select)}, {string(kNameLessEqual), ADPT_DESC(LessEqual)}, - {prim::kPrimLogSoftmax->name(), ADPT_DESC(LogSoftmax)}, + {prim::kPrimLogSoftmax->name(), ADPT_DESC(LogSoftmaxV2)}, {string(kNameTruncatedNormal), ADPT_DESC(TruncatedNormal)}, {string(kNameStridedSliceGrad), ADPT_DESC(StridedSliceGrad)}, {prim::kPrimGelu->name(), ADPT_DESC(Gelu)}, @@ -363,7 +364,7 @@ std::unordered_map &DfGraphConvertor::get_adpt_ma {prim::kPrimMatMul->name(), ADPT_DESC(MatMul)}, {string(kNameConst), ADPT_DESC(Constant, Const)}, - {string(kNameSoftmax), ADPT_DESC(Softmax)}, + {string(kNameSoftmax), ADPT_DESC(SoftmaxV2)}, {string(kNameSoftmaxGrad), ADPT_DESC(SoftmaxGrad)}, {string(kNameParam), ADPT_DESC(Data)}, {string(kNameROIAlign), ADPT_DESC(ROIAlign)}, @@ -373,6 +374,7 @@ std::unordered_map &DfGraphConvertor::get_adpt_ma {string(kNameBinaryCrossEntropy), ADPT_DESC(BinaryCrossEntropy)}, {string(kNameBinaryCrossEntropyGrad), ADPT_DESC(BinaryCrossEntropyGrad)}, {string(kNameSparseApplyAdagrad), ADPT_DESC(SparseApplyAdagradD)}, + {string(kNameSparseApplyFtrlD), ADPT_DESC(SparseApplyFtrlD)}, {string(kNameAcosh), ADPT_DESC(Acosh)}, {string(kNameFloorMod), ADPT_DESC(FloorMod)}, {string(kNameSpaceToDepth), ADPT_DESC(SpaceToDepth)}, diff --git a/mindspore/ccsrc/transform/op_declare.cc b/mindspore/ccsrc/transform/op_declare.cc index 420edc685a..858b9b6b39 100644 --- a/mindspore/ccsrc/transform/op_declare.cc +++ b/mindspore/ccsrc/transform/op_declare.cc @@ -138,11 +138,10 @@ OUTPUT_MAP(ApplyMomentum) = {{0, OUTPUT_DESC(var)}}; INPUT_MAP(Summary) = {{2, INPUT_DESC(x)}}; ATTR_MAP(Summary) = EMPTY_ATTR_MAP; -// data +// Data INPUT_MAP(Data) = EMPTY_INPUT_MAP; ATTR_MAP(Data) = EMPTY_ATTR_MAP; -// resnet ops in ge // BatchNorm INPUT_MAP(BatchNorm) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(scale)}, @@ -193,10 +192,9 @@ ATTR_MAP(PRelu) = EMPTY_ATTR_MAP; OUTPUT_MAP(PRelu) = {{0, OUTPUT_DESC(y)}}; // PReluGrad -INPUT_MAP(PReluGrad) = { - {1, INPUT_DESC(input_gradients)}, {2, INPUT_DESC(input_features)}, {3, INPUT_DESC(input_weights)}}; +INPUT_MAP(PReluGrad) = {{1, INPUT_DESC(grads)}, {2, INPUT_DESC(features)}, {3, INPUT_DESC(weights)}}; ATTR_MAP(PReluGrad) = EMPTY_ATTR_MAP; -OUTPUT_MAP(PReluGrad) = {{0, OUTPUT_DESC(output_backprops_dx)}, {1, OUTPUT_DESC(output_backprops_da)}}; +OUTPUT_MAP(PReluGrad) = {{0, OUTPUT_DESC(dx)}, {1, OUTPUT_DESC(da)}}; // Sigmoid INPUT_MAP(Sigmoid) = {{1, INPUT_DESC(x)}}; @@ -241,12 +239,12 @@ ATTR_MAP(CumsumD) = {{"exclusive", ATTR_DESC(exclusive, AnyTraits())}, {"reverse", ATTR_DESC(reverse, AnyTraits())}}; OUTPUT_MAP(CumsumD) = {{0, OUTPUT_DESC(y)}}; -// softmax -INPUT_MAP(Softmax) = {{1, INPUT_DESC(x)}}; -ATTR_MAP(Softmax) = { - {"axis", ATTR_DESC(axis, AnyTraits>(), AnyTraits>())}, +// SoftmaxV2 +INPUT_MAP(SoftmaxV2) = {{1, INPUT_DESC(x)}}; +ATTR_MAP(SoftmaxV2) = { + {"axis", ATTR_DESC(axes, AnyTraits>(), AnyTraits>())}, }; -OUTPUT_MAP(Softmax) = {{0, OUTPUT_DESC(y)}}; +OUTPUT_MAP(SoftmaxV2) = {{0, OUTPUT_DESC(y)}}; // SoftmaxGrad INPUT_MAP(SoftmaxGrad) = {{1, INPUT_DESC(softmax)}, {2, INPUT_DESC(grad_softmax)}}; @@ -269,21 +267,21 @@ ATTR_MAP(GatherV2) = EMPTY_ATTR_MAP; OUTPUT_MAP(GatherV2) = {{0, OUTPUT_DESC(y)}}; // ReduceSum -INPUT_MAP(ReduceSum) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(axis)}}; +INPUT_MAP(ReduceSum) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(axes)}}; ATTR_MAP(ReduceSum) = {{"keep_dims", ATTR_DESC(keep_dims, AnyTraits())}}; OUTPUT_MAP(ReduceSum) = {{0, OUTPUT_DESC(y)}}; // ReduceSumD INPUT_MAP(ReduceSumD) = {{1, INPUT_DESC(x)}}; INPUT_ATTR_MAP(ReduceSumD) = { - {2, ATTR_DESC(axis, AnyTraits>(), AnyTraits>())}}; + {2, ATTR_DESC(axes, AnyTraits>(), AnyTraits>())}}; ATTR_MAP(ReduceSumD) = {{"keep_dims", ATTR_DESC(keep_dims, AnyTraits())}}; OUTPUT_MAP(ReduceSumD) = {{0, OUTPUT_DESC(y)}}; // ReduceProdD INPUT_MAP(ReduceProdD) = {{1, INPUT_DESC(x)}}; INPUT_ATTR_MAP(ReduceProdD) = { - {2, ATTR_DESC(axis, AnyTraits>(), AnyTraits>())}}; + {2, ATTR_DESC(axes, AnyTraits>(), AnyTraits>())}}; ATTR_MAP(ReduceProdD) = {{"keep_dims", ATTR_DESC(keep_dims, AnyTraits())}}; OUTPUT_MAP(ReduceProdD) = {{0, OUTPUT_DESC(y)}}; @@ -294,7 +292,7 @@ ATTR_MAP(CumprodD) = {{"exclusive", ATTR_DESC(exclusive, AnyTraits())}, {"reverse", ATTR_DESC(reverse, AnyTraits())}}; OUTPUT_MAP(CumprodD) = {{0, OUTPUT_DESC(y)}}; -// SoftmaxCrossEntropyWithLogits/ +// SoftmaxCrossEntropyWithLogits INPUT_MAP(SoftmaxCrossEntropyWithLogits) = {{1, INPUT_DESC(features)}, {2, INPUT_DESC(labels)}}; ATTR_MAP(SoftmaxCrossEntropyWithLogits) = EMPTY_ATTR_MAP; OUTPUT_MAP(SoftmaxCrossEntropyWithLogits) = {{0, OUTPUT_DESC(loss)}, {1, OUTPUT_DESC(backprop)}}; @@ -306,7 +304,7 @@ INPUT_ATTR_MAP(MeanGrad) = {{2, ATTR_DESC(mean_grad_output_shape_value, kOpForma ATTR_MAP(MeanGrad) = {{"mode", ATTR_DESC(mode, AnyTraits())}}; INPUT_MAP(SliceD) = {{1, INPUT_DESC(x)}}; -INPUT_ATTR_MAP(SliceD) = {{2, ATTR_DESC(begin, AnyTraits(), AnyTraits>())}, +INPUT_ATTR_MAP(SliceD) = {{2, ATTR_DESC(offsets, AnyTraits(), AnyTraits>())}, {3, ATTR_DESC(size, AnyTraits(), AnyTraits>())}}; ATTR_MAP(SliceD) = EMPTY_ATTR_MAP; OUTPUT_MAP(SliceD) = {{0, OUTPUT_DESC(y)}}; @@ -411,42 +409,10 @@ ATTR_MAP(BoundingBoxDecode) = { }; OUTPUT_MAP(BoundingBoxDecode) = {{0, OUTPUT_DESC(bboxes)}}; -#ifdef VALID_CODE - -// Less -INPUT_MAP(Less) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(y)}}; -ATTR_MAP(Less) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Less) = {{0, OUTPUT_DESC(z)}}; - -// Cast -INPUT_MAP(Cast) = {{1, INPUT_DESC(x)}}; -INPUT_ATTR_MAP(Cast) = {{2, ATTR_DESC(dst_type, AnyTraits())}}; -ATTR_MAP(Cast) = {{"Truncate", ATTR_DESC(truncate, AnyTraits())}}; -OUTPUT_MAP(Cast) = {{0, OUTPUT_DESC(y)}}; - -// Minimum -INPUT_MAP(Minimum) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(y)}}; -ATTR_MAP(Minimum) = {{"alpha", ATTR_DESC(alpha, AnyTraits())}, {"beta", ATTR_DESC(beta, AnyTraits())}}; -OUTPUT_MAP(Minimum) = {{0, OUTPUT_DESC(z)}}; - -// Sub -INPUT_MAP(Sub) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; -ATTR_MAP(Sub) = {{"alpha", ATTR_DESC(alpha, AnyTraits())}, {"beta", ATTR_DESC(beta, AnyTraits())}}; - -#endif - -// TopKV2 -INPUT_MAP(TopKV2) = { - {1, INPUT_DESC(input)}, - {2, INPUT_DESC(k)}, -}; - -ATTR_MAP(TopKV2) = {{"T", ATTR_DESC(T, AnyTraits())}, {"sorted", ATTR_DESC(sorted, AnyTraits())}}; - -OUTPUT_MAP(TopKV2) = { - {0, OUTPUT_DESC(values)}, - {1, OUTPUT_DESC(indices)}, -}; +// TopK +INPUT_MAP(TopK) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(k)}}; +ATTR_MAP(TopK) = {{"sorted", ATTR_DESC(sorted, AnyTraits())}}; +OUTPUT_MAP(TopK) = {{0, OUTPUT_DESC(values)}, {1, OUTPUT_DESC(indices)}}; // Multiply INPUT_MAP(Multiply) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(y)}}; @@ -486,7 +452,7 @@ ATTR_MAP(Iou) = {{"mode", ATTR_DESC(mode, AnyTraits())}}; OUTPUT_MAP(Iou) = {{0, OUTPUT_DESC(overlap)}}; // ResizeNearestNeighborD -INPUT_MAP(ResizeNearestNeighborD) = {{1, INPUT_DESC(images)}}; +INPUT_MAP(ResizeNearestNeighborD) = {{1, INPUT_DESC(x)}}; ATTR_MAP(ResizeNearestNeighborD) = { {"size", ATTR_DESC(size, AnyTraits>(), AnyTraits>())}, {"align_corners", ATTR_DESC(align_corners, AnyTraits())}}; @@ -511,9 +477,9 @@ OUTPUT_MAP(ApplyAdam) = {{0, OUTPUT_DESC(var)}}; #endif // Relu6 -INPUT_MAP(Relu6) = {{1, INPUT_DESC(features)}}; +INPUT_MAP(Relu6) = {{1, INPUT_DESC(x)}}; ATTR_MAP(Relu6) = EMPTY_ATTR_MAP; -OUTPUT_MAP(Relu6) = {{0, OUTPUT_DESC(activations)}}; +OUTPUT_MAP(Relu6) = {{0, OUTPUT_DESC(y)}}; // Relu6Grad INPUT_MAP(Relu6Grad) = {{1, INPUT_DESC(gradients)}, {2, INPUT_DESC(features)}}; @@ -525,8 +491,8 @@ INPUT_MAP(ResizeBilinearGrad) = {{1, INPUT_DESC(grads)}, {2, INPUT_DESC(original ATTR_MAP(ResizeBilinearGrad) = {{"align_corners", ATTR_DESC(align_corners, AnyTraits())}}; OUTPUT_MAP(ResizeBilinearGrad) = {{0, OUTPUT_DESC(y)}}; -// ResizeBilinear -INPUT_MAP(ResizeBilinearD) = {{1, INPUT_DESC(images)}}; +// ResizeBilinearD +INPUT_MAP(ResizeBilinearD) = {{1, INPUT_DESC(x)}}; ATTR_MAP(ResizeBilinearD) = { {"size", ATTR_DESC(size, AnyTraits>(), AnyTraits>())}, {"align_corners", ATTR_DESC(align_corners, AnyTraits())}}; @@ -549,9 +515,9 @@ OUTPUT_MAP(NMSWithMask) = { {0, OUTPUT_DESC(selected_boxes)}, {1, OUTPUT_DESC(selected_idx)}, {2, OUTPUT_DESC(selected_mask)}}; // Unpack -INPUT_MAP(Unpack) = {{1, INPUT_DESC(value)}}; +INPUT_MAP(Unpack) = {{1, INPUT_DESC(x)}}; ATTR_MAP(Unpack) = {{"axis", ATTR_DESC(axis, AnyTraits())}, {"num", ATTR_DESC(num, AnyTraits())}}; -DYN_OUTPUT_MAP(Unpack) = {{0, DYN_OUTPUT_DESC(output)}}; +DYN_OUTPUT_MAP(Unpack) = {{0, DYN_OUTPUT_DESC(y)}}; // ScatterNdUpdate INPUT_MAP(ScatterNdUpdate) = {{1, INPUT_DESC(var)}, {2, INPUT_DESC(indices)}, {3, INPUT_DESC(updates)}}; @@ -584,8 +550,8 @@ INPUT_MAP(SigmoidCrossEntropyWithLogitsGrad) = { ATTR_MAP(SigmoidCrossEntropyWithLogitsGrad) = EMPTY_ATTR_MAP; OUTPUT_MAP(SigmoidCrossEntropyWithLogitsGrad) = {{0, OUTPUT_DESC(gradient)}}; -// ScatterNd -INPUT_MAP(ScatterNdD) = {{1, INPUT_DESC(indices)}, {2, INPUT_DESC(updates)}}; +// ScatterNdD +INPUT_MAP(ScatterNdD) = {{1, INPUT_DESC(indices)}, {2, INPUT_DESC(x)}}; INPUT_ATTR_MAP(ScatterNdD) = { {3, ATTR_DESC(shape, AnyTraits>(), AnyTraits>())}}; ATTR_MAP(ScatterNdD) = EMPTY_ATTR_MAP; @@ -596,24 +562,14 @@ INPUT_MAP(PadD) = {{1, INPUT_DESC(x)}}; ATTR_MAP(PadD) = {{"paddings", ATTR_DESC(paddings, AnyTraits>>())}}; OUTPUT_MAP(PadD) = {{0, OUTPUT_DESC(y)}}; -// MirrorPad -INPUT_MAP(MirrorPad) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(paddings)}}; -ATTR_MAP(MirrorPad) = {{"mode", ATTR_DESC(mode, AnyTraits())}}; -OUTPUT_MAP(MirrorPad) = {{0, OUTPUT_DESC(y)}}; - -// MirrorPadGrad -INPUT_MAP(MirrorPadGrad) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(paddings)}}; -ATTR_MAP(MirrorPadGrad) = {{"mode", ATTR_DESC(mode, AnyTraits())}}; -OUTPUT_MAP(MirrorPadGrad) = {{0, OUTPUT_DESC(y)}}; - // GatherNd -INPUT_MAP(GatherNd) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; +INPUT_MAP(GatherNd) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(indices)}}; ATTR_MAP(GatherNd) = EMPTY_ATTR_MAP; OUTPUT_MAP(GatherNd) = {{0, OUTPUT_DESC(y)}}; // ROIAlign INPUT_MAP(ROIAlign) = {{1, INPUT_DESC(features)}, {2, INPUT_DESC(rois)}}; -OUTPUT_MAP(ROIAlign) = {{0, OUTPUT_DESC(output)}}; +OUTPUT_MAP(ROIAlign) = {{0, OUTPUT_DESC(y)}}; ATTR_MAP(ROIAlign) = {{"pooled_height", ATTR_DESC(pooled_height, AnyTraits())}, {"pooled_width", ATTR_DESC(pooled_width, AnyTraits())}, {"spatial_scale", ATTR_DESC(spatial_scale, AnyTraits())}, @@ -632,13 +588,13 @@ ATTR_MAP(ROIAlignGrad) = { // ArgMaxD INPUT_MAP(ArgMaxD) = {{1, INPUT_DESC(x)}}; ATTR_MAP(ArgMaxD) = {{"axis", ATTR_DESC(dimension, AnyTraits())}, - {"output_type", ATTR_DESC(output_type, AnyTraits())}}; + {"output_type", ATTR_DESC(dtype, AnyTraits())}}; OUTPUT_MAP(ArgMaxD) = {{0, OUTPUT_DESC(y)}}; // ArgMinD INPUT_MAP(ArgMinD) = {{1, INPUT_DESC(x)}}; ATTR_MAP(ArgMinD) = {{"axis", ATTR_DESC(dimension, AnyTraits())}, - {"output_type", ATTR_DESC(output_type, AnyTraits())}}; + {"output_type", ATTR_DESC(dtype, AnyTraits())}}; OUTPUT_MAP(ArgMinD) = {{0, OUTPUT_DESC(y)}}; // ArgMaxWithValue @@ -654,14 +610,14 @@ ATTR_MAP(ArgMinWithValue) = {{"axis", ATTR_DESC(dimension, AnyTraits())}, OUTPUT_MAP(ArgMinWithValue) = {{0, OUTPUT_DESC(indice)}, {1, OUTPUT_DESC(values)}}; // ReduceAll -INPUT_MAP(ReduceAll) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(axis)}}; +INPUT_MAP(ReduceAll) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(axes)}}; ATTR_MAP(ReduceAll) = {{"keep_dims", ATTR_DESC(keep_dims, AnyTraits())}}; OUTPUT_MAP(ReduceAll) = {{0, OUTPUT_DESC(y)}}; // ReduceMeanD INPUT_MAP(ReduceMeanD) = {{1, INPUT_DESC(x)}}; INPUT_ATTR_MAP(ReduceMeanD) = { - {2, ATTR_DESC(axis, AnyTraits>(), AnyTraits>())}}; + {2, ATTR_DESC(axes, AnyTraits>(), AnyTraits>())}}; ATTR_MAP(ReduceMeanD) = {{"keep_dims", ATTR_DESC(keep_dims, AnyTraits())}}; OUTPUT_MAP(ReduceMeanD) = {{0, OUTPUT_DESC(y)}}; @@ -728,11 +684,12 @@ INPUT_MAP(BiasAddGrad) = {{1, INPUT_DESC(x)}}; ATTR_MAP(BiasAddGrad) = {{"data_format", ATTR_DESC(data_format, AnyTraits())}}; OUTPUT_MAP(BiasAddGrad) = {{0, OUTPUT_DESC(y)}}; -// maxpoolgrad +// MaxPoolGrad INPUT_MAP(MaxPoolGrad) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}, {3, INPUT_DESC(grad)}}; ATTR_MAP(MaxPoolGrad) = {{"ksize", ATTR_DESC(ksize, AnyTraits(), AnyTraits>())}, {"strides", ATTR_DESC(strides, AnyTraits(), AnyTraits>())}, - {"padding", ATTR_DESC(padding, AnyTraits())}}; + {"padding", ATTR_DESC(padding, AnyTraits())}, + {"data_format", ATTR_DESC(data_format, AnyTraits())}}; OUTPUT_MAP(MaxPoolGrad) = {{0, OUTPUT_DESC(y)}}; // avgpoolgrad @@ -751,57 +708,55 @@ ATTR_MAP(MaxPoolWithArgmax) = {{"ksize", ATTR_DESC(ksize, AnyTraits(), AnyT OUTPUT_MAP(MaxPoolWithArgmax) = {{0, OUTPUT_DESC(y)}, {1, OUTPUT_DESC(argmax)}}; // MaxPoolGradWithArgmax -INPUT_MAP(MaxPoolGradWithArgmax) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(grad)}, {3, INPUT_DESC(argmax)}}; +INPUT_MAP(MaxPoolGradWithArgmax) = { + {1, INPUT_DESC(x)}, + {2, INPUT_DESC(grad)}, + {3, INPUT_DESC(argmax)}, +}; ATTR_MAP(MaxPoolGradWithArgmax) = {{"ksize", ATTR_DESC(ksize, AnyTraits(), AnyTraits>())}, {"strides", ATTR_DESC(strides, AnyTraits(), AnyTraits>())}, {"padding", ATTR_DESC(padding, AnyTraits())}}; OUTPUT_MAP(MaxPoolGradWithArgmax) = {{0, OUTPUT_DESC(y)}}; -// ExtractImagePatches -INPUT_MAP(ExtractImagePatches) = {{1, INPUT_DESC(images)}}; -ATTR_MAP(ExtractImagePatches) = {{"ksizes", ATTR_DESC(ksizes, AnyTraits(), AnyTraits>())}, - {"strides", ATTR_DESC(strides, AnyTraits(), AnyTraits>())}, - {"rates", ATTR_DESC(rates, AnyTraits(), AnyTraits>())}, - {"padding", ATTR_DESC(padding, AnyTraits())}}; -OUTPUT_MAP(ExtractImagePatches) = {{0, OUTPUT_DESC(y)}}; - // Conv2D INPUT_MAP(Conv2D) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(filter)}}; -ATTR_MAP(Conv2D) = { - {"stride", ATTR_DESC(strides, AnyTraits>(), AnyTraits>())}, - {"pad_list", ATTR_DESC(pads, AnyTraits>(), AnyTraits>())}, - {"dilation", ATTR_DESC(dilations, AnyTraits>(), AnyTraits>())}, -}; +ATTR_MAP(Conv2D) = {{"stride", ATTR_DESC(strides, "pad", AnyTraits>())}, + {"pad_list", ATTR_DESC(pads, AnyTraits>(), AnyTraits>())}, + {"dilation", ATTR_DESC(dilations, "pad", AnyTraits>())}, + {"data_format", ATTR_DESC(data_format, AnyTraits())}, + {"group", ATTR_DESC(groups, AnyTraits())}}; OUTPUT_MAP(Conv2D) = {{0, OUTPUT_DESC(y)}}; // Conv2DBackpropInputD -INPUT_MAP(Conv2DBackpropInputD) = {{1, INPUT_DESC(out_backprop)}, {2, INPUT_DESC(filters)}}; +INPUT_MAP(Conv2DBackpropInputD) = {{1, INPUT_DESC(out_backprop)}, {2, INPUT_DESC(filter)}}; INPUT_ATTR_MAP(Conv2DBackpropInputD) = { - {3, ATTR_DESC(input_sizes, AnyTraits>(), AnyTraits>())}}; + {3, ATTR_DESC(input_size, AnyTraits>(), AnyTraits>())}}; ATTR_MAP(Conv2DBackpropInputD) = { {"pad_list", ATTR_DESC(pads, AnyTraits>(), AnyTraits>())}, - {"stride", ATTR_DESC(strides, AnyTraits>(), AnyTraits>())}, - {"dilation", ATTR_DESC(dilations, AnyTraits>(), AnyTraits>())}, -}; + {"stride", ATTR_DESC(strides, "pad", AnyTraits>())}, + {"dilation", ATTR_DESC(dilations, "pad", AnyTraits>())}, + {"data_format", ATTR_DESC(data_format, AnyTraits())}, + {"group", ATTR_DESC(groups, AnyTraits())}}; OUTPUT_MAP(Conv2DBackpropInputD) = {{0, OUTPUT_DESC(y)}}; // Conv2DBackpropFilterD INPUT_MAP(Conv2DBackpropFilterD) = {{1, INPUT_DESC(out_backprop)}, {2, INPUT_DESC(x)}}; INPUT_ATTR_MAP(Conv2DBackpropFilterD) = { - {3, ATTR_DESC(filter_sizes, AnyTraits>(), AnyTraits>())}}; + {3, ATTR_DESC(filter_size, AnyTraits>(), AnyTraits>())}}; ATTR_MAP(Conv2DBackpropFilterD) = { {"pad_list", ATTR_DESC(pads, AnyTraits>(), AnyTraits>())}, - {"stride", ATTR_DESC(strides, AnyTraits>(), AnyTraits>())}, - {"dilation", ATTR_DESC(dilations, AnyTraits>(), AnyTraits>())}, -}; + {"stride", ATTR_DESC(strides, "pad", AnyTraits>())}, + {"dilation", ATTR_DESC(dilations, "pad", AnyTraits>())}, + {"data_format", ATTR_DESC(data_format, AnyTraits())}, + {"group", ATTR_DESC(groups, AnyTraits())}}; OUTPUT_MAP(Conv2DBackpropFilterD) = {{0, OUTPUT_DESC(y)}}; // DepthwiseConv2D INPUT_MAP(DepthwiseConv2D) = {{1, INPUT_DESC(x)}, {2, INPUT_DESC(filter)}}; ATTR_MAP(DepthwiseConv2D) = { - {"stride", ATTR_DESC(strides, AnyTraits>(), AnyTraits>())}, + {"stride", ATTR_DESC(strides, "pad", AnyTraits>())}, {"pads", ATTR_DESC(pads, AnyTraits>(), AnyTraits>())}, - {"dilation", ATTR_DESC(dilations, AnyTraits>(), AnyTraits>())}, + {"dilation", ATTR_DESC(dilations, "pad", AnyTraits>())}, {"data_format", ATTR_DESC(data_format, AnyTraits())}, }; OUTPUT_MAP(DepthwiseConv2D) = {{0, OUTPUT_DESC(y)}}; @@ -811,9 +766,9 @@ INPUT_MAP(DepthwiseConv2DBackpropInputD) = {{2, INPUT_DESC(filter)}, {3, INPUT_D INPUT_ATTR_MAP(DepthwiseConv2DBackpropInputD) = { {1, ATTR_DESC(input_size, AnyTraits>(), AnyTraits>())}}; ATTR_MAP(DepthwiseConv2DBackpropInputD) = { - {"stride", ATTR_DESC(strides, AnyTraits>(), AnyTraits>())}, + {"stride", ATTR_DESC(strides, "pad", AnyTraits>())}, {"pads", ATTR_DESC(pads, AnyTraits>(), AnyTraits>())}, - {"dilation", ATTR_DESC(dilations, AnyTraits>(), AnyTraits>())}, + {"dilation", ATTR_DESC(dilations, "pad", AnyTraits>())}, }; OUTPUT_MAP(DepthwiseConv2DBackpropInputD) = {{0, OUTPUT_DESC(input_grad)}}; @@ -822,16 +777,16 @@ INPUT_MAP(DepthwiseConv2DBackpropFilterD) = {{1, INPUT_DESC(input)}, {3, INPUT_D INPUT_ATTR_MAP(DepthwiseConv2DBackpropFilterD) = { {2, ATTR_DESC(filter_size, AnyTraits>(), AnyTraits>())}}; ATTR_MAP(DepthwiseConv2DBackpropFilterD) = { - {"stride", ATTR_DESC(strides, AnyTraits>(), AnyTraits>())}, + {"stride", ATTR_DESC(strides, "pad", AnyTraits>())}, {"pads", ATTR_DESC(pads, AnyTraits>(), AnyTraits>())}, - {"dilation", ATTR_DESC(dilations, AnyTraits>(), AnyTraits>())}, + {"dilation", ATTR_DESC(dilations, "pad", AnyTraits>())}, }; OUTPUT_MAP(DepthwiseConv2DBackpropFilterD) = {{0, OUTPUT_DESC(filter_grad)}}; // MatMul INPUT_MAP(MatMul) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; -ATTR_MAP(MatMul) = {{"transpose_a", ATTR_DESC(transpose_a, AnyTraits())}, - {"transpose_b", ATTR_DESC(transpose_b, AnyTraits())}}; +ATTR_MAP(MatMul) = {{"transpose_a", ATTR_DESC(transpose_x1, AnyTraits())}, + {"transpose_b", ATTR_DESC(transpose_x2, AnyTraits())}}; OUTPUT_MAP(MatMul) = {{0, OUTPUT_DESC(y)}}; // Merge @@ -878,10 +833,10 @@ ATTR_MAP(Sub) = EMPTY_ATTR_MAP; OUTPUT_MAP(Sub) = {{0, OUTPUT_DESC(y)}}; // SplitD -INPUT_MAP(SplitD) = {{1, INPUT_DESC(value)}}; +INPUT_MAP(SplitD) = {{1, INPUT_DESC(x)}}; ATTR_MAP(SplitD) = {{"axis", ATTR_DESC(split_dim, AnyTraits())}, {"output_num", ATTR_DESC(num_split, AnyTraits())}}; -DYN_OUTPUT_MAP(SplitD) = {{0, DYN_OUTPUT_DESC(output)}}; +DYN_OUTPUT_MAP(SplitD) = {{0, DYN_OUTPUT_DESC(y)}}; // Neg INPUT_MAP(Neg) = {{1, INPUT_DESC(x)}}; @@ -908,12 +863,12 @@ OUTPUT_MAP(Pack) = {{0, OUTPUT_DESC(y)}}; // ConcatD INPUT_MAP(ConcatD) = EMPTY_INPUT_MAP; -DYN_INPUT_MAP(ConcatD) = {{1, DYN_INPUT_DESC(input_values)}}; +DYN_INPUT_MAP(ConcatD) = {{1, DYN_INPUT_DESC(x)}}; ATTR_MAP(ConcatD) = { {"axis", ATTR_DESC(concat_dim, AnyTraits())}, {"inputNums", ATTR_DESC(N, AnyTraits())}, }; -OUTPUT_MAP(ConcatD) = {{0, OUTPUT_DESC(output_data)}}; +OUTPUT_MAP(ConcatD) = {{0, OUTPUT_DESC(y)}}; // Less INPUT_MAP(Less) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; @@ -948,14 +903,14 @@ OUTPUT_MAP(TanhGrad) = {{0, OUTPUT_DESC(z)}}; // ReduceMinD INPUT_MAP(ReduceMinD) = {{1, INPUT_DESC(x)}}; INPUT_ATTR_MAP(ReduceMinD) = { - {2, ATTR_DESC(axis, AnyTraits>(), AnyTraits>())}}; + {2, ATTR_DESC(axes, AnyTraits>(), AnyTraits>())}}; ATTR_MAP(ReduceMinD) = {{"keep_dims", ATTR_DESC(keep_dims, AnyTraits())}}; OUTPUT_MAP(ReduceMinD) = {{0, OUTPUT_DESC(y)}}; // ReduceMaxD INPUT_MAP(ReduceMaxD) = {{1, INPUT_DESC(x)}}; INPUT_ATTR_MAP(ReduceMaxD) = { - {2, ATTR_DESC(axis, AnyTraits>(), AnyTraits>())}}; + {2, ATTR_DESC(axes, AnyTraits>(), AnyTraits>())}}; ATTR_MAP(ReduceMaxD) = {{"keep_dims", ATTR_DESC(keep_dims, AnyTraits())}}; OUTPUT_MAP(ReduceMaxD) = {{0, OUTPUT_DESC(y)}}; @@ -1040,11 +995,11 @@ INPUT_MAP(LessEqual) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; ATTR_MAP(LessEqual) = EMPTY_ATTR_MAP; OUTPUT_MAP(LessEqual) = {{0, OUTPUT_DESC(y)}}; -// LogSoftmax -INPUT_MAP(LogSoftmax) = {{1, INPUT_DESC(logits)}}; -ATTR_MAP(LogSoftmax) = { - {"axis", ATTR_DESC(axis, AnyTraits>(), AnyTraits>())}}; -OUTPUT_MAP(LogSoftmax) = {{0, OUTPUT_DESC(logsoftmax)}}; +// LogSoftmaxV2 +INPUT_MAP(LogSoftmaxV2) = {{1, INPUT_DESC(logits)}}; +ATTR_MAP(LogSoftmaxV2) = { + {"axis", ATTR_DESC(axes, AnyTraits>(), AnyTraits>())}}; +OUTPUT_MAP(LogSoftmaxV2) = {{0, OUTPUT_DESC(logsoftmax)}}; // RandomChoiceWithMask INPUT_MAP(RandomChoiceWithMask) = {{1, INPUT_DESC(x)}}; @@ -1126,8 +1081,8 @@ OUTPUT_MAP(LayerNormGrad) = {{0, OUTPUT_DESC(pd_x)}, {1, OUTPUT_DESC(pd_gamma)}, // BatchMatMul INPUT_MAP(BatchMatMul) = {{1, INPUT_DESC(x1)}, {2, INPUT_DESC(x2)}}; -ATTR_MAP(BatchMatMul) = {{"transpose_x1", ATTR_DESC(adj_x, AnyTraits())}, - {"transpose_x2", ATTR_DESC(adj_y, AnyTraits())}}; +ATTR_MAP(BatchMatMul) = {{"transpose_x1", ATTR_DESC(adj_x1, AnyTraits())}, + {"transpose_x2", ATTR_DESC(adj_x2, AnyTraits())}}; OUTPUT_MAP(BatchMatMul) = {{0, OUTPUT_DESC(y)}}; // DropoutDoMask @@ -1178,6 +1133,19 @@ ATTR_MAP(SparseApplyAdagradD) = {{"lr", ATTR_DESC(lr, AnyTraits())}, {"use_locking", ATTR_DESC(use_locking, AnyTraits())}}; OUTPUT_MAP(SparseApplyAdagradD) = {{0, OUTPUT_DESC(var)}}; +// SparseApplyFtrlD +INPUT_MAP(SparseApplyFtrlD) = {{1, INPUT_DESC(var)}, + {2, INPUT_DESC(accum)}, + {3, INPUT_DESC(linear)}, + {4, INPUT_DESC(grad)}, + {5, INPUT_DESC(indices)}}; +ATTR_MAP(SparseApplyFtrlD) = {{"use_locking", ATTR_DESC(use_locking, AnyTraits())}, + {"lr", ATTR_DESC(lr, AnyTraits())}, + {"l1", ATTR_DESC(l1, AnyTraits())}, + {"l2", ATTR_DESC(l2, AnyTraits())}, + {"lr_power", ATTR_DESC(lr_power, AnyTraits())}}; +OUTPUT_MAP(SparseApplyFtrlD) = {{0, OUTPUT_DESC(var)}}; + // SpaceToDepth INPUT_MAP(SpaceToDepth) = {{1, INPUT_DESC(x)}}; ATTR_MAP(SpaceToDepth) = {{"block_size", ATTR_DESC(block_size, AnyTraits())}}; diff --git a/mindspore/ccsrc/transform/op_declare.h b/mindspore/ccsrc/transform/op_declare.h index 933896a275..9fbc97f3c9 100755 --- a/mindspore/ccsrc/transform/op_declare.h +++ b/mindspore/ccsrc/transform/op_declare.h @@ -95,8 +95,6 @@ DECLARE_OP_USE_OUTPUT(MaxPoolGradWithArgmax) DECLARE_OP_ADAPTER(Conv2D) DECLARE_OP_USE_ENUM(Conv2D) DECLARE_OP_USE_OUTPUT(Conv2D) -DECLARE_OP_ADAPTER(ExtractImagePatches) -DECLARE_OP_USE_OUTPUT(ExtractImagePatches) DECLARE_OP_ADAPTER(Conv2DBackpropInputD) DECLARE_OP_USE_ENUM(Conv2DBackpropInputD) DECLARE_OP_USE_INPUT_ATTR(Conv2DBackpropInputD) @@ -157,10 +155,6 @@ DECLARE_OP_USE_INPUT_ATTR(ScatterNdD) DECLARE_OP_USE_OUTPUT(ScatterNdD) DECLARE_OP_ADAPTER(PadD) DECLARE_OP_USE_OUTPUT(PadD) -DECLARE_OP_ADAPTER(MirrorPad) -DECLARE_OP_USE_OUTPUT(MirrorPad) -DECLARE_OP_ADAPTER(MirrorPadGrad) -DECLARE_OP_USE_OUTPUT(MirrorPadGrad) DECLARE_OP_ADAPTER(BoundingBoxEncode) DECLARE_OP_USE_OUTPUT(BoundingBoxEncode) DECLARE_OP_ADAPTER(BoundingBoxDecode)

    zJTYs^SUwYT{w>Z;G`J|+J;8Qu-mpBPJ z5>m@Co%H8^-j2a))!TB@(E#oZuZJkPVmmOpF6hu%&zp$l?zYa%RKAQQ^?pbk>2PS| z=ij6)!*z4#5_2jo*g~os!KC}@oJ+WW%UK9CQl4@3XukswInU%+Al3fAh5Yv|CP*!FvnRQ2&c^!TA?@|EoNU_xvwf&VTglnZPy!F42G88EGL5?*Wr>$MTlFgrQ;2 zHo>{QL#}B-z6b`KmG^yG#8y>92-CZ9wQti2NbJofq*Bn5@Nu}u_QhEE74%6;SAE)# zN84r({SeYj<`ims2j*ZKc&$L&KZAObp7x;(dp8?63r?-sh2Tx^S0fNQ&91qn}-a_LW5H9Y-1Dq4cHc zi)jScUd_`Q9~E_I3;by3Y#~%D&$9GbW~`;{c0tb9bRS!5VjCBFQyjL|Et4VC(Z0iy~LsOIV!>Trt{t2S0!@szlkq^ znZ9WB3#qO4z^Qz5O@LCkAz;~_)~kne_2ZkjJE%BI%lvl7R(g}WZ`seUWf&bwFdlO;mxjODus5+_jW+c2lT>mL&-E;qQJ@N%Urz#yF0$7|&S8$jvkCMNYmHNdS&DOIw|@$*hrjL(3vm^E zBG#J0n}1@@a+7u}?=KI1`C2m0KJu$8zNI{muF&hmZR(bmmX=f5!_*eiGL{;1&!GI0 z*N*c1?ix1beziUSj<277O0F|B!dVm0QVKB#J>1vr9k(2JutDV=Ea3Q;bTD!yDA1FiaS6Z~j-PY0u%h~c> zfV_aR<+q;LVUGvxJAc>FB)O%EL|=j?U5Z4LzI#xHU)zFWevjmI-N#zIc#eNN#xRd^ z^Z~R%cke}QzrvfNXu_dJ?ZaC=IjMUJPJMrB-Wsr<5`Ethd@sVDQF1LM=1jElk%W#z zqj#ljXF%7Hj~1OawTKF+QF1UzU&~ zz^QMw9IB+Qf)gRMmU&NEpU;Vh+;LMYxDh&3Vt$NVQZ_gbLqB{GX$?saLV8e@LoY*P z?$x1Hm-~~C3Tjv$>0GP&E;P`}w-17YTqS-qFgc&Vbtx6-eIvS0E8hR8RG@BQEBicM z+p*M%mXoJXAl3Cp{0sJqeIU8s*kUI!RMKwNPl@|v#PGe9(|)FU+;{D^BA&FI4JnTy z&|Wj~^wK#JSf)_ArX+dbQZuE;y*)q&bK@o58+P8qH*-e zUL~z_u)l~N&N^9FpQN$wlzAj69}XUNB+l5{#n)0|K?(h#Org^+wHRAhKDs-ko4b=G z=b2-UZJFBH3m*Ql$JAeeBLd%cxRxN>cp2juKC}gXP^*08#i&SaMqyz3Kop{e~~=9 z(=tw_ZHqC(@Y3blChEiE$N zojudW)%A?-E#dgEme4RLTuE+h4>>%2gKfNxgPK0%yMHRIP_vhVB2Fdc48HpJm?E^J z>0HIv%DcH_OIl;!%A@p@vv_ECp<+YKx<#Tz@_V&#-lb+ojNl293 z`jo`wF+5f92d?679UVPt1LO7%?SrV<^cMTimnY#1>G>^M#hJ;<4{fu~XnWh};qY>* zlU=1Gt*3-}5$@D`RwXCvQ03}f?}QdzBNQ_d+zsM>QM9=Hr%W!TzHfPKY`>?(_ivLk z1HSUR_YxOnNopL!onzI5x)waE6}8)HNYg#}ayhwVo`-~2E>nC={^hupt3AbGGyNg) zw1PAJ?(SKzJ#XDYQ}v>+2Gyd`m8eaeuAH`f_eQ@02sUBYj0S@3;a=@w$Ml`)JCeg4 z{)Z5MF<-=_Ga=_gz3+joUWB1ajGeEvp4kkIg1;=T5r&1IY$t-Au4Ifto)+ zNzMpcO#L2-cD5w#sQkCBIJuv+O4Alj`K~W83;$urp!%3i(~0>=S&P(z2Oz7oT(x>e zLo;6pJ99myrf#?H?l$FrpY_YU0gi7YM=>Z>&*3!A)m*hkwCFcdTFE7b$R*CI_t_N( zt`9hxQq_9d<5`Eq+m1Ke(X$)6F6qj7is@eyMq4($j7O3>=|bzxoO2yZl_x!W=@lux z)8(7r#sfiG^aIh(Z-!<3?Z`o0+drPl5$V*UdK%QZ9a(a}N@cNeZOhEBoI~D`n4rDO)g#UY)cSMgrlX~25pyW{T}1xQQku-Uh`c6| zQeK#Y%ux1c{SKndn!tikV@XS4hF9`UOmU4P64XqVSXtCeD@1ECi+Mvh4Ch;-XRdiN4^#}EJh-3f1}E!>DK zYeLf~dwvF_DTBoFN-&c)u{+L*lhoz*dtH?}8R*@$^Ac>yI%3+_v2xg5C z%1tc+y(U_m_UqcnOZ-*a`Op;FQBB+(=MeH?V7DcDuwt|h3K&40D#W{oH?1w{KzsT` zQmiLTAHI}qE9(pKsKzy6%8>v5sug643a2*n4S`c-mZJz#n%X-hfzriOnU^8H=aEk- z1*re?Xn+j%=w-x2{bUdZaGau<)E@x?J4OLWIWOBfs1$HBM%)_yoJ?F_($gfFDTK5+c-SS{6) zo_{@*KQ*YlH{SGksZ)M05Q?>CaJO1Sg%>L|hI zJfv@7E8okzs}1rwt(BB9DND3gN-1HS>L<0!(Ws8J`SmYzFH8B$d*-WWR_m-v?%G<7 zfeoMKQO~VY8Y}HWar;?EXWB4jt-HPYPc^a77T3Pm@e5cl=3Kv1pe^~zS?<|~Tpc>@ zCb-MLPrA~nC!V0Q+A-*^eF>#nLJL9(Rt*@QhxY!8-l6{p5vB>mkf#oyjTT{Rt-(UO zgK)J^!My=`HQu(z4dkDz#Vkju<56mnCY~J$n!;TVNkNRoaE829BR~Ius5Cf-_j|(Y znCVwgzH(B(Y3?N~<)%iLBHU}|bg*K6{^P^7peYIMz0SrvWE(hFZ*ZUUC&(sR(E#aMBOLC zfyW_d{jYM){C4fM(x*zf)>_Ot&g#5{8t~TRLv#jGD>Zlbxa7*yiRU=h;nZm`_eJn; z=3u|V^+&CdpLu$u@7urf`t~`XvZrWET>_r$+v|TBj;*L)W(MIKkRcU97tpeDx1JsWNQv=iznLt83$oH~lJ4r$Asj_o_vcoVlCUrUMik6E$x_dnr$WiD2C|%tdF6q@dpyfDlNyo|_VcS|Fm8E_9uKhT( zz3i=}kG(EwaohN5<7^vm+Zev7ajTz7`RQ1t1QnCEqP>^YWxjj<<&nIiye*%MQ%dFX z?rhf9z;b#b7D73D5aPUOX;mB5S+KrI-`WBCNTglIQ{1sf4qZVStphFi#b9ZVXumz+ zJZE}1({BVJok`)&Go9mW6JL~9u8$f_n$1Dc1r#j5Y+a+=nY=kdlP2=3ywDIa{j)@w zLX4|1{Fm&*e3w3iFyqSKy(zccXE5<+d`Elqc1(npXTrhLgMPb1hZSmX?#{th^V>u9 zy%7p)C5nPx^bXWV9hBB8}W-^Euj~30ZVLINSss>**0k$}r(^UVtN?a|yEr zl?IO5{eYIbndkYO`qq2Vrd7VI=UpA5oOF~yDU>^hTQm1Z{}q;nd&13X?{$dXRoIH> zlg`;{(m-2;bw^fIkrw{>_UEK-qO=B}o(68G^F>Gs-lt;OIpTBzRV)joQ={B9kT4RWStidlXEj$N%5b&P2kC|YR`_?*=wwkT1%}KF_H?6``;aoHDnyMu)Mt}xGTf#;pQP_ zl+XXq?)EOrn8N0BL)1~O$TP96T~Wr&djs#BkO`j9>I$ZZ!izZ|V>R08^6$DCi}&iNzB(|xQK25+AM1x-K; zoELL&^lKd*a#9*lJ8TV2&gl`_+3n%23WIHSZqubsnWvq%UQky|Nl_uA>vOOZj>o@dsm}{)w~$ z!*8SiN&eYtd^t3_SarN-9J0qf)3lqFMvYiS6KF+!FiO52P=l*^@8nxIt{b>N0fwr4 zai1q=8x0Pqw9Wcq_u1!ll%haNttH;$17A#Vdwl^opnx5E1hQppE3PPtM_m`8lABR#fz15;g4c0oKn zLiW*wQ(52$Qv;_M%|4uZ)ipJ{&}t7dg1so>REXnjw6fPG2K`foGRmG!m8KlI68WSYDxV+X;+qB>y*=_^xhhR>Sw@Xk5oUqlkX_YUoe0T_3eI#*h4R>@_PrbpZ)KyNoVzrrex|ydQF`}sk8h{VGiBgsl zs=jr`pR&lAxPOTDl#@Esf>Q|F_K;$>K5Fq+;p$TZJ)VNQ6320%wLBjRT+(M9$HIU? zs#4avvwCwpHF0(Rhijk_j!Lf{5$Ky=9ve?O);y@ws?b@ zI@)$Ibx`Vi3};SdQsvOV&L-eYENVM-VI0c897sbyK$7$a#FIs|RCS!% z{;oL|Pwv~R2SA&p5AU{slH@4t!{l>A<&pg2-wK>X@+4nN{bmg1&(xsC8l@jVJ*6G> zxe~{IXN~ufadx70g}RX(l6QPd3rYETP*{;!<5IF!`Tqa!qY5td^dyu@K4CUQ-1z+2 z@+7fb*6sRO6<5Bg$+QFI(HgW=K$HGY)wjX4+?*&y`86AyDR+NI+dfUGWBksBt?}$0 z=8y0sw-mIC{+c(?pHre$+_TMA(7(16PH^?`h$yQMR4*j;(1Y#2?U7uTVL3Wg*1c>= zV`v2nf$?UOUzPf}%3RLuj%?`2(;beg;bC!nM;YME!UC9DsBh8^kfYIW04wTGZSwxq zSvld+1B!Bfx0=oROeg=vJS&$fSkH$Lwutkg;H}M+?_4)6Fa0)`Q~KMK>l$ip9ruF! z4p8Xr&t9mbL+<511L^MGRo!UW24ZVbP2fBM8{#{BQD3JfIWC^2QohIbSnb;VZP3J2 z@NXT&(M};%&S{r6LN5N5La!j8{hLC_>`1R|>R(YD3w4I2Qfn4%YhTk{GxS7f ziFJ|Bymmet-yZrOVbi#}WB}hQz?j|+Z4`aS1@YzwwK^y2bg1ZN!d8HX?A5D{)LqVq znn_K*LCZNlU`>=I(|M8>lq6aVT35~rs1asMC$VL4Kfk9o**2dF`PZV1j;C#7_$H?RGPDfQ zR+GzH+UqP{7I8@9);V!j#xC#K=gYUsHo2Q#zOy$w1RZiu-gH!#TrRFy_Q+V!9JTaNxfJQ>D33{<6_7&4p zn;Nmhrcg_~OB(yvDJSd|>-%u@y_6k}n|PMe#Jp0c92xf3YM|5_GavPzZDXI?UdU7+ zNs3Rb%>~YFC51FFg0TPnqeC8aJayz&A?MmQxqntH<8M!`9;GGM(`(oPOKlwE636k+ z(Jy1ct${LKjWgqjzoS3;B|;9Rr}_+juxFXCw+GMurQSb5?Lbdp53v6mc>Hjz`forD zN5dNn&=5P}vpgNy{BwH2Q?Ujef#^%Qza!OTW{e#MW&RLIx}n3SqjA1Sy19Jo3U9tf z*&m_3uAM3uuSsz_^6*mBCQfZR}C?XVSh-K21>e zu}AzfWq*@=4`%J;Y;5XXj(7r%el7Om58#EJDB&r51pAZkG9YLP57$dj+1|i<7hI;t z>;J|lx&lsap`KrZUJ9(6Zve-an8Uj>-#wHmY8tIiTYYQ!nervWwhqsf%ITs#mke=t zfx*DHXUvKi*#6BD96@pQmVJt`w2{3OwWt!)wEmIFeu7qDmAii=m#_3&r>8>8C^>$1 zdx(7r`#WhfB?MmGt$7mnJmi`3S<0HD}=W}nWuvH8R$WGIN1R`TJYrPv~fOec25u(QC^m+4Y3{czlk`;_AuW~?avv~aV^J; zIzs;ICuv_gE00gV8?N>fx-yPlv&FQcePI7`@~=q3*Vq}_{?PGQ_k$QAzY%Yt666tN z$r(rdJv+QKBQuUAbSGrrdGPU{nI$oZU0^Om>i(EM@Spj59@od9jmOZTA0yLVqfJ(E zEarFz@8%e^EygO^?i==|DJPSB<^p4{GXwb z^Z0TL6n!ile<91cn&ZePN}~Ivwwurp9+|7L#nPJhpU15OO&+)KKTf4ZwA8W z&zJ!{;NsUKq4s~IHb(=q>u-+XdIDwK2VQRE{8{{a?O-teCN%Iz za+^>7XF)q>0R4W{>N#5FcAy_ioBsnC|2>rVcVPJEVDNXqv@cfpPodejD0vcfKc3PT zAeA4Y)bXS|3%vXi%>Ig+^kIk78{n;B(Cycuv_X8CNW3$Fc?=xYk383q;#y!n8tMCG z`UQ8A^Wn6~7~(7kpC5zA$AS0Efd4s4yc{^c3y=Kjh$GrdLo=OWqpGjFGIFm1J$1k zzMkgGI_UFv@cL_DdIflU051L^Gg_X58!m;n?jq*b;hAs4{o{GMhP;J4y)viwb+{!IJ41s4B-l2<`LPxE{)Wq*i-I~&Mfz@zg3lJ`xp|6AI51NeK6 zI<*jU2HN~Q`a%9p;XBaDNXp;B^FG)WAA*w=QOf73!6lTk6EOc1a6AIECy?K9NP+kG zau`&z47l&2wx@yFk?8r|kWiNrXBgBm8IE!tWlw5-19_cHd)x@uOvk2qhwxW`WFzrb zqU-kq=F2GO1>jf%MVvw#K1;10CD)%InPx**{vqIhKymLQRaZjQr$T83t-2R-?Rs$g zGBi4w+B888O9|f-KXYlHmKt;0dxs|7I(%nZUpu!m$Boo_M0l0c+vBd0v#Fn!NR>EU z!lFt`$!kACt|VN0TVLBKo;@;#|0s60^hmA+*ZihuGi}v;_iEXT+Q0jrG152Gz&HI! z^_+VEpKGm*Z<%FI^}N|5Fs=K-p9qFpd488vT2y1BmuxMxyo~9XCbG>Rp&voBqo~EONH>)>FG!o^_6>ig7<2~zX_2oZNLi9LwU&~wIfG0q zx0$>D?{CDVSCw%zAREnQ}>UrRUV z(F=us0qt2!$W<%(tXC+*%|Xm(OnoBWD80+PlL4;$g-)}Fu#FH&OuJ9nJEXm7$xFr3g6(YrsT#m)k67m<21`3<8UCsVg+$k~6U-PAMl&=CuF>1b=&=xe>Da5{?7U_#l_*EPipndN~FFp3r^$AUbKEDJ`{s^u98H{}mIEK?&zlAnW1rv8eP5%HSXCcMZ)6=1& z2hidF421StrXpE(q2*^&Go`_1=t;Wz0%flRo4qONs~pFW{@3Wz7ohe-ka|+qY@qoi zxxEJ6b%CyLr=EXDs^#Q&E%*JQoioUB1Sz%1-sby2sN;P?e*v7UBlIaG^uO|a9eKS1 z-hTzOXeDj?D6l?-E%-;;^%!b$C}q9~_Wp(3?EQ=d z?ibM#mjc_%9Ghw3dx*IYC433GT0wg*2Bu-eSqv;2;I|dzIg;{cfVD4y=f5S@3a~zg z=K)}OF8OFL+Gl-^oIb=dypy)uNGtw|HaZ1t4h2`YQ@->4j;20`V%6PFs_Wps89+3f zn75Gg1EkvzJ#X*u#=zK2-k+dVkEGT|lYSp~%)Z!9!P}3Kf6Aa^qj&OasBtx62axwu zk?YR1MlW*R6jbmQxt{{(tf0Juq47(>m)7fSB*Bm2j-Nok{{lBI<~klsUIx5UQa_+M zg*^HNJhwv$BdOz`Xv4SR)qfsS`oAtUZ1&Ia=QfOxwQ^A_58XRz@k^ma5@JQvDe3FdF1Zr>05d;^_& z7MkE%Y@P?nXLsuLb>zVVl)Nt~b_X;4faFf#+ziC;(w>9B^Gm#GJ^q0b9z;7n0VONp zjsa)=u&d{i`#xBu{|bfu32c50tZSf?YiY3qp@A3CESo9eC1m1C%6<&0IvhS|#v-{9 z=yrzsCXw?8p-J8+_6+(_dxvEB1Fg9P+Mhs+{GKwToMqt9_SSpbA9{WY47UJX3ot3? zoT0We>HTAvYwi4_&wZpl4?6iLusf5o&d1_AkM!?Axyz{WAoTo)#CeDG&w-1tAuZ&G zad7KbLZW>G%sd4RjU>-Esi(cx=g`x7XjZ}rcTwuoTn`75VbshWRo_E*T}C_nQ`GWV z+UZ1UeLWB!PoMg1sBabRZr}bN!NA_&j|J0LLl4&TbI82&kjIyjY9+Nh8Q9)|tKOou z?nautgZw^#7W+Ci7>AVnDz#3G52ExRgQHc%dn3ltop16G*j_?tGj({KZ-0ZNz7$GK zOL$w0r9h@=Gk?}GmioAc#r>EaWh>{{?Sk~0L#S5$3$&rOmpj1DpI%H6+G@s zjC0Xt<6}g!l^Rv(9k}kmqZ9l-ApBuBZDlj+!PTPm92~umts$#K${csK57RNG=(AR^ zF;-)9J1fvIN9(-nr}gSzm{!nMdx8%=;$1mix9loT>*E-BD;n6D1N!_o5$5cL{GW+E z0zC!J+sQp})91gG(j2FH2}*Exi(@I{GGuYu=BM*DN2LlEEZ1!{tEZi@tkoc zQ?C6nSBNg;*?l1Wmr9LLXNoOm^k^;E=|U>kQLLb@?yKfMHLAqf5owjj}HFBj&r{EUXjtFUvsM&MS=U-5w(7Q_9 zeW>hps?qIv*ypg#s@&b5F8dTi;az?1U6CwT^ToMK_B$L^)Z+I)tHY3T1v%bK&dvoE z;#uVFtPDqkYVD_EIqp_|8&_>~d!54P>bd_(*}cGjJ>LK04;yBf6qB$P%lVKZ3gtVc zQCX``iDD}iMRaypG~29FF>|OCsjZ{3B&jHy#F8vYDq#p6F@=@f{`bf0rP;pweSg32 z|MtGUPp{YYx~}K>dS2J{yq;I3VhmZ%y=PB$TFU(VcUg_|{AvuYj3(+9pYit_6|iPx z@&Eso)_1*jjU9IVFTV%=^M3iiCTmp#4>D7{n0>j1jILn0vTFKFaecHNBXP0Uu%PYD zeZ0u3&E}C!V^8k(?_gFd{?%J-ZuYCT#Q!>a&XzcTDT$14a{_spt*2PR@13vo20Gu0 z&FxY6JH6<@YWFgSwoW~3+0u*@cB<_~<+iY6Pv}i<(dJII4`=b-WD(cX%T^$5rl#7g z-IH2dpy%vHYUaE0q|bi5kT-Fn3#seNDt1Qc%hbVJY zH)<)duxs>!_iJxfF(T+f12gM6Q=1p-DNke5v)Y)QO^eyyNicX;%XhH%zwrOt+FYfL zi6k>#coQ_*svr6uh%2<4e*FPhB)?ii?=RJVhAXGilv6#Q8PaCtWCiIuO^N11W z!mBw!Eo;@bfL}S72hkhU`{DZ4plz=Q%xv{kPd-ReM&NT-wVs0?<)~$$_FvF?f7EY| zCz)@5g9d~qZ3^G0HY$w-`z5eg1&-QyGD_)H+I^D0&>5!>*WyyxWv+ZAEw2a11+ZD@ zUUio41a+EM#?=j8FQV&Ml@-FMl*4FIe3?l$V5cG0hV8~hbJg~iF*%f?@OQS zabtzrZb8##BrZD{JJO(qde0Sfu`heH-FGE5&vf@W828}ce5psTAP<>W-hwh&{YW%z zcf7larHe1w9Hm;*?oIr@G2lopf$T2Nn#?FT&MZb3ll`8khR&|8^E-Es1@Y-1TZVq| zQz{oeSypAIxY`np&MKs7KP{|KV@8o(NoZfK9)toD)iVnJc9MoF;AoA8u@bXr<`Q&{ zv_A`v#i*NoPFvJJlucfxwXE}>>uyGK6Hs?3*sej#e&i>*dyT6rlwH9SI*;sspoX1f zwjb+K8P}?@*(2~PW518_`gW4uO$*PV_a)%ph(fUi9gNiuA&0+Z%X4y51&gqRTzyHS zYvRIWH2YaW$#?V;i#_!r%fD33*;#%VI*h^RgF%?}wGA{cGTsh#TA=MO)jgj4A5ir7 zb5Qni@Vr#e;~X~SMUd>EeXZHS!+2^9U{T8}GwWT!9Vy;Kle3e3rMiZ>-kr3}(kq=v zR=VR##+1GBo9;YMo{vN6>$T8TOR(B%F^U-RTsg{;0ns%N0cWoY*$KOiHtj9IP( zT@zG`p1g?GBS3Kq&i%U=^6$P#{J+df=at#(qdoCGzZ0Rj7kbWhE%AUgifftiA6S%) zKl&lb$ys{w?~@@h-ujvhtf-i| z3EUl3dxz;c$zdNcj0uOc`B`D6jA0r z80cT3ugj4^B8Psej`)B_eRU^Z`!X=(bh=gUCzkeHe2Pce8of&V@6=*ukrS<)Q^S(| z?pRv8Ck;>Tn(SGv#5O$PGwUzOMN!RsZ8guWDxORfN3sW@1&&+YyY;IU0^k4cRYSS4McJ=^Wu#g@J+}B2?&R)&f1QAunX5iu z`w#O*#^^I%U>A(q#7LE@cMj7e=ym zn^=!Ju20d2-%ab!b*~|gOl9d#Ba<@=D>kBNb*Xy`Se&fEwj-Tcwdg{ho1ysIB)vZC zv4ux}BbJ&o<_!ZNNS=b0;6;d(v&_=}``F7AFzT5@Lf zb~Po|aw|JpS1(hC-nZ1oC#>O<+L?+D3(#ST@67tF1wk|QkH`DWMQ%~YNE%-Y4kPsx z=W641_BqiqkFnLS@{QAjB*x?mH2glw#TOl?RA#3adG2%C+Sb*0Wf@&B2iqk6^0$dK0{gkh(zBzE~#7ArF zYE~=K=eE_#4?)ocKlW8~AJ0_iV=|xndz@bfqrTv8Q&4CE{Ga2`9R%0fEc_|OsB8y5 zwZym7_c5rhqJ4?8c^0L+=}E72=MB`#Nbn-q*7eyI&lADXfoGKyB>QM(m1j5WV=H)u zJ;As{e9<`Vy+#ru$3yjHZ^HjJH2+L{Q#~8r&Bd$v>U)07ZXmcHq>0qI7)--S%Htrp4Wx;s zs6)E%z_SBk{DL-`;QmmMWG(7h7{`O`>e?FoOl?Yu|c-XIM_QR+MBI3M&+k*MCTZq#~c*3+uZ>3q+hlaF=uFEhW* zLGwJFizMEp#=S`T0CLp|E#G4uVqIRN`(w0G)8}buc83we;aa|l{BOp~jNb0i%41&V zX(hBt_ON}>vm?0X!KO*!lO@(@fF61g-fkenk^RB=&;U)(qub}O=Bqs27!6)TwL!42 z3Y&LGMK8RFH8_uiFC^!0!|fxuUO*0SryIw?aGDx3Gkjbj6Loz)4A-eBw#2hl3-3EK zET4e-sbV}b5x+;PC%JGA#n%=3?slH5uC7>z9`N{#EN6zJF5bV2dcVZoc_e)iyVRES zw#JL^lJ&}3dK5n|L4m<^@iu%;q|Wtddx{pKucxV_E2`I1HaodSg6twtACE34Dt{z+ z&NMD>rR6~&dzWOLj1~>Z#3H=Ps@MnY>x`oIca-f$`j6Jy?xZF9)mhDTTz!}%J&DUB z3#;%$8XnvBN`YBlkYr|HCyF%jp2J3@hwBNJ`DC;)E|1|mWW8dE5|5$c3*g8(p0m&~ zKK)V@%2>RU9{qJKk3hLc=~w!z)}-=S?Y|D5o$%h_+Ilq0yvY!-?PPOLhGT12YqJb} z@q8W{HYC|2*`LIGEg^sH@nR^7)z|6@7_4Tw){~tcC~_&>(<|MpdD?K#AAP4J|Z z`=e1H5mAi_|0!8_<42YM{@tT0z5R?p_F!F-b0s-BvrjH_w?CrwIrrx{-cG*bTbBE6 z_B6uD%yKdw#ozx4}L;Mwq%U;i2EoUc8xff#BmGV3D`0*>hO@=cP z+4cJISHIHE0d!-6`Wm76YVb8G?wwh{kaF1#klFOR;Zu`cs;1{VhaZ~}Ngbc1&m5n& zS22s9)s=Xj`|$f}x;EA)f0taB>oQmP-n)RNxmqfW;FB3DyH=AOXjdl8z4RFIe+Q~3 zP+#YnoZlRrvybt=+=&l<40@dG+2oZdPi#$zoIIFZJfmF?G!OaA*kRWz*)Z?oy<`>Y z`QmE!kmYXT%f`~DTlp->Tw29Dkw)cy@p2U*@%PyU+DE@~I({YNW==*-yKnit&J)jr zGAm3+<7#pa{?hfs==|PR9dhbNcH^eKjBfL*TQPEK#P((lCFcYD)s@={kNr~;(Hb-d zcskiT6SddWGvPtT4f!m`59MfN*LWs74g*uxKzhin1o(xSd>4P#>%7~|gA#fby{Tq;{^-0CQM1uwQJev~x zc$dH;^Q(sU*ssd$x}R(R_t!UA;~_Adg^LHX5#4BWPqaK9mKTC< zFza*!E0tL9?8uE3n!uhsM)%KFx*J~P{GQv)-DD;)mNL;ciA{O{H?#Mkr8|9C^IE)| zb!s1>r#_iI+3wjykpG3Hsl|%@NS&Q{T!UTTX=J-m@7tKAo9^El{cF~IGbTt3LQaxf zrsgl%_(NF#m$cQw|DM{5w>;U?C$n2G@KzeA<05Um1DfG}-v`D%`rk)L^27T1rG;(2 zMjPp=`>^euwelg0T8-BIN?+KW{MPpw8=o99OZiJLsXs9R8_=$o8v4-IM3&XZ^^I_w zs+|u(nwhE=X!K_uNL4ToWo-@y<8-vB(9@g=#;k&V4{S%HKpoz|{o1Y#h9QM#b-CY} zYkLcH8CPU>`e+h6QVTyozr?yHZfJylq(2X0fY-&^JyD&R7mQbuoTSI1Z2F8T{HWnd zbkOc-K3z{V?};88ijl|?wN-(~6lI2^{OhQg)s4hvK8NRtD9MO$t{y#+3|D#gCi~NP zdnbAy1;<_m{AX$VL>l~x_7lfa8NHrB@td?c7p)IP$0Nk-C6fPIFm=%r+)#{%;wi^h zZ3)NKBZ;LJ`silmE$=>VCr;mm6`Cm!O5HgtAyeT+-6G|mI`$68qR@|s<J8TT zu@|CS{%4l|_q5N7*~v8?CmC!QaP${a`W`T$vRf#Cmlvv~~x?$#&Y;g|-}kkX(OwI3QG+D+N1JMV@_k6zR6fW-AR3{)AHr@f4N0V9e9H3wvG7Wy zXYBuGKG-2ok-Q z(aEu-t1H{J5CnIyY8yawySl@HL~^a9ONoJOK++FY>nt>C0lxX9D$z`b;7MyRR5u=u zZ~Yl=?4U#M!N0aA>flHA7H0QE7qyJw!)?&&cz5ggAI(3F6lXqXq&g2Lw~cY@SlC~U zcN^$+Mz>?|;z5wc#w8Xoa@|F#?3L(Ddt)=Isim)bPqW=SaPTv zy%`<73d0$29;&S!qzUZ#c}8Yil*sDEB0O$vCSnrYPKW7I zwDerLCH2jkx4Oxjdfk0aH4 z2c4{B5E=8+}B;w&>)H;`zt@7(pm`@n7kiN1E5X!@-mNVB_=YGv zPMs%#JacEqf&4W!z0am?CowI-kWtU^INJ|(FTk_TIQWI${$b_k(DDgIjA0FJvx2WVn9mV@;H zPxMBcAECklg%|s^2lpLtC40fwSKd-rnR_wk6KDQxUo_0De8v^=@OJ}O=H`E)my1t) zgmwnvOD$Bdj?P&xx&vM0ZqTDQ!o{(E?^>HW+V7^G9*7F5>-oY@P5Zl6q;|oQu|dvm z$vpqAYx$19TaHwASI19_e~t7eS>ed~*@nVfzs~g<>WmNjAig9Q&p7Sn?4{kvz(9Ah zTF`;#pYg#reP;bC*UQ}qmC-dLsbIL+XU0ZXu|(sHR^l-~^wpRm&@@t4>PzOBtPAa_ zRN(xDYj3GJBdLr74pd8a=_Fh0QGREiNTcG(?E7ehTd6ItjO+IL>i3V-n~`I31rPk{ zT3Y*~-=Wf<(IIpyDN_^1$N8Peh~$ZVkVGc-Dr;$(kElw2v%5WGiRqPK(J#iE}+FPl!^=er&% zT$ak1t;Dn(D`q^G5l!A@{9twP*YNz_MxLQoD4thYCOplEq|7B7zTE1S)vlBY=Aqv} zTu5YC;0cGb9&`YFvkI9N$Z~xsd>`m{#&^j%`(wWn@s_J26+ z{RsM(k^g=53hT8xK-m}dk5{sED|rk{w4XJKtop21OCx^RZ2uAiwGJJ+)8ZrDFXuK# z^L4AJF& zpQz$_?Cv)0jYh+qo1JyDH^JWxq=`7XhR^f>TfM{6?LqxGh??qW+Oz7L)seXII(mWZ zFn>u+SuGffQfIRUiA{b(T^;ob>rkXO7$>N8on9^b-{*ORmccv0qSxWTJRqjt9AKh4oWaj%s@3I(uk_lykTK5Fmt9YE){MXgfjbD*{7K!HU z0FL%JlHRHvsDGw6IvY0ey0hzZtrjP$WmoU}*P{0xM2ZuG`;xIkGnj4x&tKKOLFw$q zxe_0m^E(d1p_;s+M6wUTm)eCCWc}cLTpd^R&%IEnrYDB76c2%OfPeMzW(w(#k6#zZ zvx6$*pq${4_{i*3z7Xz#`c-Xi;FDy2cp*$5z{!5*5g%4QUcgj!4;(TF}16c`q8eJ1fHySVBVH0YRxvVi{HE;z8eo7y*qLLlb<<6^k*Ar=D=A`?n z|HZ;T&i?bnD~>Aok@bSz(5g8(O59x5lDnemqol4jJcofVD3yFl%gy1{}{D-Kym#uuIu!*+R2V{j?QpvWZ*`0#kM(_XWWQ5G3C2 zJaqq-mbc&gD)BI^*VCu$&AymUKjGhog5r;B_e(rm1kYrJ z$ePG`YOO+^8-UKt4Er@*-#du?Yw0sRMrQfC6mq*9l~#HF4``ls#(3%%qDKWtlAkrX zAu6Lx6E?rQ8WI5->pRrHhxL8g>r(~w&h+~*Sd8?(i`->S>SO#^psfb%%xHXniKXj< z&P_m=Ikb=2s#$!?c%>C9B$2a~!jq+vD4XOo6c3#cE!xw$0gDijOfhimyT zy>4{q4C9Hcemw`e#3PsaIqGZe=U{9NqdIWyi>9@~vZUbt(=;<{#&tYVU8(Hhd>JNk6lD^N{2X5Y4SzBl8LQP*+vmbO z`}pb93F?d zn#?813(`U_Iug`faQtjE>*LNgQqM0UgD;}` z3h*{V(RrS!1A}{DIS;%)07v#&bwm5f%Fm^J*#|gIOGo>czV==%#U8arvGFL_#e0_r zo2b0(P08v~MtldbROh-@-fzTzZiMd@g-@PnzZ&>a=JzXo@weFCKjXnbSCh-T%;%fD ztos{JMI9 ze5PFGVwEZ9C#0=-`{hXBU2>6i!F*;9);(Xr^Ss|#HK_rPL4N;HJLT9m84z=ItgGd@ zg&C)mxaCS-`In4USyMX7|98o8WAv)5<*fJpn0_W&@nEoKtuG^&z>`*!Lo}n9uV3ZW zum6|7GD7?lJD<dLy>TP*jT;7x4cL$oLv!w%7B?Mn*NPd6gtiEli}=bd`D?~tLsbTd(dZ;_IruI@v! z|47<$KJwGsIpW)U5lB#M=d%G8+Qb zyK-~|`=($iPvAMIz%{#scI`$gvw7`LpCf_gnqp2fC}R(gE95f~qW7pjv729KEhy!% zoMA|GWw;w@{D`e7N8R1VAR5e)t|ohlrppr#xRUYj zGSvP=-?)wZtkBx0V0?sSdq`yU=Ve;Vy3Xt1I0F?D8JVn584JCxOl3YrSDzd72l13gdAS$GFy5zmsUXj4ihFKKiRW9(|&}UWa4l z;*~kj-_W7uF#C{H+=fnfE7?Lz&y%1>)YBgnL)BW1994tKDE?&!KHp0HWJaagpBCS( z8tilRWOeLN%lXBqAh{0P>P51*pgJ4c0YzH+{9LWm{LWtD8(E61|Mo}QYhc(51%3|t z?et}{J9WWVnRmG_PHd*h@U5DyhuOX?4LY<;y^y5GfQ&lM zBkjj)sRyj?(EHYAK`$h6AHd=xEq?*F#CRQ!kN<#OMi#B$_JkInr*~7-IR^xp1^F1X z+j$VM~`D_aMCo5;hC||&Q>Ndy2%=KsPA#^X6^4Fbp5`TE=8YYUpW-cL-|ix6WapAvuQ~; za6hW_t2FmWux8abF{Itd+Yr(}U9Cgmu_rpWaWy-LM!A~t;Zh|MofirXrKyQ-9jErJ zBc99uJquTo`=SfD-o&ldN_GOrVV-&%-!s}tR8#6qY(Zl8ZsbE;N=kN;xa@+f?)gse z`2i}NrblnWKAxfPJyczXe}+ZL zI^=OUv_kF2quxY({6dY{xmW|`G9&jH{&YvdwFOUqrhL|Hp3$#l-(;e3v%fmobvp7$ zmMWXo`Hdu{HaT4A*FETzwX?*qU9Oh1=+jE|ro|0bEi!xZJEg0`YLefH`OCauqG_AM z@Y=!~PYs*Ub(&Vjd*)GT5X04zt%9<9!-S#HL!pRu4fsVloa zqXoCQvx(d!I$(sJBO}V^$ZFyc`=jh4w0lOKlieK#-`iN94{7$jWbAkpsm5;`!Dmhm znd-D9YwPjRYqN{D;@b`2Y@iRkn$%{TpZaepWHr+CBeI#e#LTzN^nV}Rycq8?pZIHa zj=+nz3jUwYlJqs|{yk}&uHD2gEN8#63OEC#6H)9mFpL9Ltl+a``*E7y3s1VCLw4i- z1+Cum!~v{pKM*EU`1`n*+<_fvz!3D!yxItLUWG2-B!P*%&-`a(;w&vxkb_RH@4)@6 z^o_^M4eH8>e<13vL7!9MyS&ij1IgoOxRH3xWGj7x+%3>sk5D=@5uMb~50Bpj=^%I| z!(-x99xR|4z_wqly@tj2GyIh6q%#{l#K7>PaUqEFV(mW9Ud=ibaikaj-C_9Orpb+@#!43EJL3Wbf+!4 zXP4nOJaZ6T&+Kz8PhaEO%dYi?Z{`dGTh{xgg8aLK`yI!~+lC|Dn6Ca#8d{!7`P7yfoi#XpbVUH<<1GiP1x z{ngb(Q3kdvjamNe>8wiT%Ga-2Xjld8pMm7Zq@@x`%H527f2`HB$kMk8ENgf=E2QNr z=3XegtCrMSj+?KbVY{yG`c9qu)0V6sW-V%mRx*+~lbkhH_v>0}?)Rx^y%;{n(aW3` ze4z4KHOuQ3{m9e4n|kg~?(M4kfBMcyynN-fjbvnNrRJ=%?T)8|+#6Ti&sto@I*D1# zS;jTk%+=(zl~+&r?+v;=lt`|KJxS}+#R|~R`TI5ff6Mz-HkzBDrgnRQ5J`;7gv^DWmh zhA!7+^PLsHa&<4Oc?T4C8?m7&{|Z|X+9%p3vl@+xcb>_7K{DQ@uBNp96~FFOTPuCm zZa6X)1tLwyeMP%Lu9S4m8f@kRGG8&S__b@~m(gEYX4hwp|Mq{^^?&)+qyNjlfBQYd zzuR%(VwNPm>=b?J4{>oi8~+FPD?7EzeWlmA{{(xx#+V>8!O0h!OuE#!@a1wT-7lk3CT=0t>?&Q_eKuj)iHx3j73w9}kT=m(A|?AT`ZX^j!a z-?g(zz43`gd3KB5^KtjuYW+dbZs135XKN?1AKgJb*Q>9R14!A)ddkhTuPf_1g{|$P zKWGoTZmiqwU>*mWyV&1$e$`T{K8yA_-#huZwyS#peq}zr33yM0YhSHqBrspu^F1H$ zC%Fj*d0!9C=HQ+J(!__}=t}ZxolcS)un)udPnl=T`pfmT0^Rl@bLTpQx4Zg>bk^?O8|2 znVU;^3Wu_Dhrr@9J!?lCe%*InbUc|ZW%X|i2>NL0GW0tJ1u9tV54D#SqD8LW!XEd~ ze`H?vLp8>4?4tGXCOu+T9z^#4Pe;Y%yvokNTD+z_|27Xfd-xI;*9%PN;Y=O88Vb`_ z^|!0pqs$4^)^`mhMVFz}c(yDn)ooe7Y6XQC=m{tK{WliiA@ya2bphB0!9V#74tM2l z^vig3J?^c5YfpG5ck>+39I4f}QROW1@^={C4&TJH4Ju&l2*#m>91iCd%>`BVa!e*M zS(kiMZGDQ-PJi4@Cd9d5&B@lEsx#+g-M~{=4TlAu98>t@@pDh&-F&FldgS1Ga*!T) zh5uPI9l>)tABGK3cB|RTk-G_FyzFD zsx!f)NpchKGYwv?$!im`I0k<4vQ~qTj=<+j3oT2wifchonb&camTuH5PSZ+j z^<=+lSG{#VwC_fHtD^c=u=NJd6IzTVo1~tMqgv9L%yB1z`v^8^raQ}i^$s+k~W!ybwuZPwS8&P zQiYO_k&({i=LL9Y$6YM@CxqL#o>jptk0sg)fc=;zC-J- zu)CKO1h;7H5;Guy^d6KML4z{lt)i!yLw2j9)cYuNjDJJ4+gp7PlcQFAlI*I;Y+0hy zGnRTC|vSf}K%^Pi^ z7rIwVi~Z_G>YDJFlLI{2J2rs7H@lQv=+TCsk%xHl`@?OqcC+JcG#WHT^Q?7`@LCAA zkKsSp^NE99K~rPvG6Ej0?_8|bd7zw%0=JN~oZvDZeTEej>_onofbZ#yZi9hdvs-b(C z1s$S%=0y&H?S42ApML~9c?61I2J#Q#(FNVd;AV4eoQV%J^^Dm$`Z_E#UaUecvbs5o z1Y`&CabW!`J{?zR>K{?}Bv;4MnQ)-hN;{pp~aP<3d)hl3h0^|6HY> zah^L}+10ol${$VhrlD)*1r}(vwe}J@TcM_mZHCdLWN=AT(1&@xobPiii(gyI$+>?m47Qu99CY~uLB<@o-&a$zSz-G!KigWLb+y`mPj|0e&u8gO}K*UD*DF9Sz8PTw01Zl!;{z?3of6xT}_gI!)3WB%tWIa?eh(IdI?i??AtkJMt73;g7|=+zHp4upL=nP5zmjO5DD{J2N1?Kp4sRy?eX+ z^|W%KZdS`f)6>9|6Qq(AW-RFlU&vPvi^{)T_@9*7U91rP0&^(Hw16YWkd2eRRvqPm9PjD#O z-$!Rw79L?DkXo_`zK&gq$+dk8R5= zcn2jfXU86Ap;nXh2lNa>*^=$rzLdqe-1C`D{TMun@0jD+KCDwRnyuBdG$qYT)VYjg zBr0UP-Xgo75^?h|2~2*g8`+!S(#lhhXfG=+t;x_Co=9xcM)+q&AS0E;N`1k)B`?|O z{w23|R(*D;Au%muP$_fqKlN{+Ume)WMQp=$ELjgWdp3%-Qd@P;r`OAF^w2P4_ufjk zXVuOk^ON;ABlRVX;oOxKUdrcaj3Ud}*wnlXMecTWCd~?zYeAg+Fv&$wA7^i7XL|W} zEDm(hL&SUPggPr(zE-pdyEk8Rj#`o{>y;ORNUPU_SMI_?K1KPf+-L zElxL5cwEit4_C0DE$}wkSUdPmE{YE7sD+Y?w3+NTC+RP@k&q#%l!&YS-CvKNbChhU z@3=r;la-Uxz}48V3HrW_t+GNfOiLew{NjSUSL4M5yO+cM9GAijMrI>zY0c+QKbWZtSRJcA!^is(FP;OtllS6NKberL(*1|^Z?H) zw8%>PIFgh-jK_MvNz%W>pGoSEN4FOYTWPI3OSBokyMr(@3unP@IJza8xu@3aqjAoF z>VvZ%vu4Z4WxUB+UdhQ6-{eI#C7$;gceCoBOg@=gSw_8@b^d4a!DkI287c2kF7An)_*QPtRubnOTTr%YI*5$wtwGN1a`xRSWx* zGcI0$-)7ugiJOy@>&ntJM*B%1S&QQtL2p$1)804oYKE($FK_OCRC`73u}awkd?_ud z49+LrdxI>GK+DW$_j9*4-N@SJlXU)Z{7u%}CHRv0r0*CXM&?gfHhEp^!6UnGGhP{` zrL1;UL9=!E)d-cc3oQGymZ53<(Tuc`(e@-Z{2n-G=I2T^O!6<|=Z*!3+p>D=3jXI@ z>(Qj{C74%NQ}#{#o=29|g2c&mX)mP}efgY-dT^_?(X4 zpMvyAJw?`S&mcoT@a&i9|0mF8r}Qc21v7`0-HeHjTF9?R{K#~28we&BG)jDJvh37B zlk6fWS2(BnKNy9Es401;vU-**?4d|Tm0iL8tHSzS<9<#6=>pE2J&-Z-#WvbV zww}!A4Ph6Ob#S4stL3axCN%AV>^E zby{$W`H9rn4%FlD>=2ON4bs8nv?2ee2^&^TFPN;I$zYVd@s*Uw={Y&!E)g-Wx|=ia zHp4ZUWU{JwFKQ)6Q1m@I_mr!p1~oM5=}(4|fj8dBLio*x^C6zd=wmVXv+}zKZMqT9 za|%J$8IxD3GfcAv{VeTyN!xSud2jekpO~oDWN!IP{F6-p()L)H@x1IHM&D|c6})fZ;R z_%=2yvBBfnu8DlvVBHes-Ud%1qfb<46LtSqT?-0LnoDMCqGrx44D9v5-4)ILq9^^_ zukVu0rKpe`Lz(fo(ZB4wxF4T4z_&k+U#6{dX??J;Iohs((UDre zjP5Qk+WQO0B57IANzQ;Cr0dThZO9TYB6I7s)PTI)Zlt}Dv>i*Hlm8;8$9?KfM{@#b(^{^_4+Isq+7>n9YpB15I{s z=G3#~cKJ11lF?1he$I;Fcj@$!qMkB;Y8eH5lxKGHzLo4G_fW>G<3N!dd!byp4{~=sclHYmES{?2Gh_9%FkSnNiuS&S{Xc(YRP`MiwQIe%%%qyN z^BSJb*g0djoRW}dkHeqLk0-)9d4xjKa;#pC*>j%5o<+%=MVB#VUVG7=3HZ~Tw_cY1 z9?jkY%D0r+pO>+ewa#AL_esD2(wUq}`@v}s5|s=Kk1qg=m+T^);qd*O3V zfVdMDIgk4aR%0ybTI^cRX1`U>l(>OOD3KNDeb|)SN)PPXDI7U~Xm zB=d6<%`Rkyd=!m-l8s8-Vq&Fc()KlYnz2W8d@ox38#>&NCzXBr$-dT#O|7Gz7xfuA zpY|!eVJp@sktnCJ$60%Q30FGmSAL_e%UPS8_cBkd=aG&Ich4&HI3txONcEAv6NPuW z-ZFdea#HOA)~+t-vW}QItrtMpnJrzTo#c4VsRlW}V}-Vpry(BNI^}c9)B@Hlx$m=D zyd2eM)6T=l)d{w_%_-{dKwoo8=F$3~-zc+EjTP=KfW!4D_BlMWLYkf1 zFX|cIAt%XIy3wzlg-@4A(VWvYj+EY{zSCIEvwda_ExAFSL4#M|f3p^H3hm+UCfCnZ zsQZL6Yh&Si+G>Onv_-8%ujse z_q+aG3!mQjJjJh8`k>Ze`WxoQ11@E4u#p@Twj5*iQmgg?m>QEtPkAHy*|oUfFz^mzri{uKW{?Ut4YclG+mCf zV@PB?>^I3=x9m)3?t* z>7LGtNV4eE1MzprY=7QnPD6T}d}Y^DL(nuspNY!%6TNU5i9a7+JqkIx2*sDd>?zkS zM5$-}&v;^*wmOpAdG6*Ux9rKvDO}mLewcO>VY>yc8JAQB&3%3+1KBiXbH;39jX!g> zA+5}r#V6oTT{Kt*szGecW%P5063HJl1~n2FxmGJ1>C|?$9E=(jq$?|lTkxSh*tQii z`7qdDpml3O-GU6t=RZg>g)`= ztRmmfr<=^*%mr>>n3%rD4<+~TGwee=Payb9kNd)vnLBt zY?wB)FTXEKxf6UDAzx>HHhJ7mQmPIbv{cJWAk3cd#1dupbE5ViVk6GdM&f*iy1E{w zBeWYEf1uB3_994 z+3Yt-F4ZygsUvyrMt82!N}hfUY<1Cn8hxHl>(N}rw zT zVm*2b+~^0MtoUz&VR9$@8FU#(Z}x8%?0%tC)^al!lDt^SGt`lPmPqjVlmzMBQ(D?IxYs@w+hjM{(5!%aTL zl%Il^Ssjkw@O|$aP;jt1+PO0TP47YX?%Mr`caYw6F_@dO8*N~-gB{GO?L1q z*;N*AV;sx80kvz9jCoo+k#t2yMikT>1dE@cD>U{(j z(^2MO?Ie%EDe!m{3@vCy&ZWx!wOK60G+NbCo98R>t|zYMA7tNcddFHu88_&y7qPup z7ar6wS1(1!=A>qgcldQsVO5hcvoX8%2%TI)CK`b1eJ$36`zkd~p(UN*QweuxYBO_2 z$q1Fq_y=hr`jrUpc*lQ-!&a8LvU1rc@;?3h9=a!1VJaK-GE4I~jo*vLJXPp)cX-TH z&zjoX<&tz3Eyd{m9c~ z_6an~9Kb`$w?x}UWHckdlyE#r~&9f>wDI<@cclBy`f8;we6}Pe%gS@{@KmRi}I4iCh zY2K;UyS{=ek;(tMF8UVCQ|ez|v(4Z= z4)6c$UgDd_`o5|7l^Kk(zFiUf<-cSBE@ukL|DnY1*~`i5C?mYD)3aoM_wM*P0|k$07XM~@f-;GFUD3SAEc2S~{U&)7+e9WQC^IASy zE9EM7WAz;b(#&k+l~wXG?mV=tr@hpZcR7m?c}clI6j)MfSDR(K*$E!L{-4*uvtPWF zuCl%V&F42!q$b`Mzz zCx2@4qeovafN$a(Q&;@x3Kn7;8_`O?xr&CJ&hBN@w1U-HqMupEJL${Yv%)zAET?NM z$AkD_pRoj6Sn4J0)4qI407MqtSz zl2x2VtVMD;ZY%6-=FyJ_(Fb6EQy=&j);if=zpb&kucHgwn|Zkshf+nar>p!4^04bn*};Vm;@3Wn`ZWA3uXd zW40?R?pyJ*y%rLWo>Nz*sv&XV$#fpO+Yt0$YA3s?`-1Kwwa-)EJEVWU-mRzhZqyIY zA%~sVutjDia+*gxk3nBv zgJ`(Y-JWo&hS%w3dZAb9%}ilDzO3Y4#QJw4bMoCYDIJr66;o93b7vl2gE!sV(_;YU$%{M4!XZJ*Sp0Li2T6pUu97f>}dcuGM5EP1eZE z>DnCaCn9{IwsHnX_GoPb`Q_>jJ-hJz&IeQ0EYC!T?B*Ow&S&FUokE%-?RRRSLfLq# zUD?GC{O=CZLCU^ByB<(?a^;Lf*;aV=4A`dN{FgqD#_8u#@^PcXy!7&18Qkc+T+uU^J?)&MG7?>zgA; zK&YMQ>u1jllIUbvjOkG?#wtd&2HdJ9nEb|qV*aaOpV(Sv5^N9n=FV_bq_<(2GX z%ns5M$nzA?W$(ceENbQtB9DDp#C^!x9VF**aP>6;nT^hk)t)uB_(d6eCi-?E{Bn-r zv7kycLS_%b?x7oO3k4xRTv5 z$&TKYd{vO<Q85v$8NZIk!OK9Ka}DFs9jW$nUA+usgSFKT zl}3@T>MZE_7VNM58W^gAcTpRc~0 z4m6&hm=k|8TN7@-8ucyua;*R)K&afrt`3Gbvwmy+M zIbAZ51T9Hr`ql6I_b^X!HEJbpXaFoStCKn7ad65k`j>FLnZ~q7$wZqhh4ts8qXH)B zliGpcD!%6oTK#K&a$~q(g@WU>b10d51~1kr9a}SvexE^aTfuPz2}q36&D#DibZ*DC zwlw1H1cum>L&!uQxK9D$49};x`578iE38wZgsvnhucO$DI9?figJ7DG@Hu+)i_z=~ zRJa0_mUtp(TqJ&d96ld|k8MDoGkCH#o9Llso*Rl|O-RXNkRL^Vo<^^n)Z3YVn3;>@ zqH2$Bx2rvCo*m(OiQX<5&@uy%eI^Y^-BS0G6=XC2u`(%L$DUMxB`1Hb2FDIIK0f$p zcup*6oh;e4Jv%|m_3*elX-lNW&s;qRT$9+iM959k-cq{xOLTh;oY_}#HEiNxRnclr z-h59hpVE=1U^i8%4?X`j$=MU-7rLGuz#qXP^9Lhf@|;n@D)N;%sVZodd{P5E7h1-e zC*q(R8}mn$4<_mR4u$X2I530k=!gFY!6Ea0Rp`?b`Qa>=){62 z$5i$~)dX2ixLKpb1at_b@8ftC)*<`ZK7i>R-pQ$T42zt7%{f~vv!X}CHX528-OF$` zbFXbsFnj6x<7ye%^9Z;THP)BBMt-Z1sXrEdW1B*6)2FOcW-TassHbXC<^y%+q~1x| ztsr-&gK#@u z(4X+syQpxKUg9LOwHDmd@#GIExDez&^uz>EW}nFQYJMGk6G3ts+SP(z_6c6%`*GjL z(x}WH9c^^j8vjSr%sasO0a~pCQFb~0mNlJ(8q3+1`KUYA)dz86B_1T&J(&x$wtqgz z4`b<$24!~g#ebNjp7o%7mZT(K{d06O^DEn2O|FM$Na}W8)+h91JlbWJqN`_4hUc67 zowjOi4Xez|J`b{F;~u2;@G)nAKY@0$ai>0N9q0Z}J$D$NG^6k;IFj|`WW}1|SNXb< z&U~S7eUPPimmW2u^EnB#25m`1drIx?`Eop(@p(pMS&OcsOx99UqKp^MuSBbt*YYoO z0vU7X%Kdtqznf3mfC|6&KXnFz&i`L{@1@OTQ2w#s?|ScgW$c*WnLjw! z`=`aIKJX24byD%(9k(-M(We;MH7e?8>`G=DGUCfSb$niY=2u$S9}H#Pxsul&ph-K8 z{LXwr#{q=P5e}C5NX}qdL9wH$dwUTkb z?%K;*be^e!ALo+pcS*p0EKFumTCm9(&p!|A?C$Q(U&slgb3Hj)iHFpbb9ufX(SIsx ze#h18dC-+{ArTs7-1*rk{k-2-vhy{-u}{IR#LuUdMrsHT%Jun!P<0@9o#F07d9(O2+4daxq{luG`t$?i;3$4>m*!iRj- zy`5ws=SN=6!h9EuS3^z1tS!-;Q9*|0DGPXR(dhch`|+ z&EBTdU~)DK-c4&+m(ObW{ruY#*}o-VYVFRaTAof`621KfyPw^$S+{-%mG5&ud$n^G zLjx8&=iW^O+ifiVYEpDR+Psa%%jn2&!I_-GIi+GYxYPG$&bPhs%K+5q3AQJ2;R1J( z6)Zb3d+9GK7dCtm?LM@i#%u1*_1q{uMlC+Z)#x&h=C)?Xv*$UxDI2oQnaTJq8h%2y z(~o8yZm}n? zM-oQnyI&If|ehV-wC+5gM`#UrTDj*J=nyqXIJVmdhwhE_)8l5 z8tG4#)AeB8q#sW{y=8o+DqbhUd#5`?$X2pNoTs&{PGnW{ZWONPJqd)_SCCQIVSXjU z#O)`GQK&I(WVS)~2QO&9MvFzLkd9R~J=+RBPl#-2;@p(UALLn6EQpA%)K`}G7{ z))dT#fFL;myZe>(vFy*w3Up+20&8-u&jsK~cCem!I6&EhQ8qhlmU?YRn`F?-Dr@%s z&IMr~nw}NQoYC?inMoe$%!%~%z7$_;<9K3};`?NM^c-@S+}fSMbw4hI&LhxqCt3ND zP0gxV_AcgJ&x~uEv+-H~sij|i&y!uWoJ@Vg==+uAop9PaW_dgd%@L zlMSd8J5q-f_AaDn0IVPLY<=&{AY@1RGVsr5f0Ltp89R6yUU%_-B(JU~{KAEfSH?msaN-?xCib}<`N__o>gd;4YdJltBiT&^@i?`d zt6cVih8~&KO2*2>vW%oNS?#J@!1Xac57YM~6We-R&6qShEb7wkCt#kP)ML=0sZxF6 zpIph8vZRNreKU-jf$JT<*g`rr$n~6(x>f1q5WE*|UB8+!FV9jtA1v46aYki7Lb)HI z!ne_Ol~!)k)*@}BKgo*Y?@3M0zRk>87bSD9LU;0(ID&8E`%y~Y0{`q|dlFpXcVZK| zk-cbNB49?5{-$)~MmoE)(3tPj?a3@{J$CaRS0<=whFXtdHyXK`nfMP-Vl#OosB(CZ~j zUQSB;dogp8U15oS`5n1j3bX8ZPX_N~uD=LAgA3pAVKuKcn{y_3Lht7bySLo$XmUoN zFXQWrcs-c*Bz~)Xp`qvVJ)@tKJ#!@q=8~$h4W|E6o#uq$08U;4P z=@;}lnYJ>I5p0r|FA)&WcqWlbnGZRY9qvl%`g>|d!Kc5#I$60V8V#I}^S^{iGk#EJ zZ<`sLHPy@h1mC(^TlfoT--Yv;Z1tZ(crBkLqrSITibO1(Kt7f#vzBb;%($j%ZSH@@ z%`3@f_K_co7V&C62km&YNG#noo*W0mk+ixEjf&sWn+#u!LfOZ*2K~E%A-z^*9_b?G zv!fxWUtFi1oUxmoTw`cYRuf`LGmH5GswXSMa+KSz=nEIBC;1qH^R0B`bWc|!am|%! zh$4xiI0Ic)8clQtbssYRyeE=Xe2c65qGD!9Yw;)t>qFYmr?fSg9XbVEwP@Kbpv`{3 z%4%8$hU{2c1;vs3QgIOoR)F_XT;7+iJV_2y|CzMxIkd=%e=;qs_G^ne5|6*a zzj(ay;IbCqlO`QY<6;$>!}J33I+ZPNt|w^5(&gNSMADyK*v(mm4qwPJk7p-`;cx2c zjfcsP&_~@1;NH`d?~}G8;FZ%_lIuKUrua3PAAA%%+t6T!{yMg4CmFjQUu%M;51881 z&YD^}+<1Q*zAvOr*?T|CXB)kCC)iIg)`?$Ti)BnU{6s9(Lhr%!=+`7JInb~64qdyz zF22JHT3?~6d$e|ocs9y2x(#hV{vCYND_ga@q1p4 zwA#pc?H-@y8fYV}W-oHNV*2kR?C%%PWaO81(fy1aa}sQx*#E1sVCuZn`&9oYk%FiF zPIP$2W49Wu)kKpTuI)+syXrME;;ux-_ffhMJqi@%t8Al$BXQ#yYo7kkySvtNca{F{ z-@z{{zG=N&pWhWRo*GL`GiuLzaQ2Uvk?D>7D`ye12Az2HtTmVQWL)3KlNlfXZ(f-T z`G%6;C_ex7iS@Xdtm>bs`A8P3Im=Y$Hc!+-CD%`-dD&N<)2Qkeyej7nlCk}pu4J#p z6=Wc*3poiYPd8_?PVpcBdPee@4`@n)e(v5EbUXX@v&;Qk z>a0%gv-5wflHo_*2YQ!z-jh)<`!h1W-p8+u?vKXNfpj}_HF-^DbCM753UIb4PE-Y17!S}K1>cFNIuM)yO!%2b*i<+tT&*Y|KUI)SUG=l9d{ww;salNu*SAY35wq z@r4Juj2%jjvExZ_wZiIbCEX{ZZY)p@R_i%E`lWO=u}V{5HjDOuPybL){n?2$oyKSH zc6Q3f!t}@4ZR}dxqJ|TcPZr5!*SHD1ZTw%z+Equl_X|rfjfWQ7n7viUv1(gkl-SmD z@Fw}Px9SJ7LoMrbId!xOYqr+&oyo?_Ajn)sQ`R>#8}WFj<3_T{CC~Q)cyC3S^E~$) zie%<;p|RH+G+= z7FLjsWD7}j;(7Yx?|GsF%l`wl-Gx#!(En^woD7}SVbI6jhA7!X`y=%g7lZ9_IA<09 z!@`44PP8Gox!IGKfUzYA)}lsTm}M-S`0&4=$y6=3gnc}@@Tom-WhVNsN8@wAl~LT; zD4l55Kca3jPp90guy27nb7|vV=-d{sGuQi=x)U$(BQlblrx|}_X1$!nd4#3AgLDl5 ze@?!5fX$w*hg^c%UFpU^m}P!rjjK6p=R*DAqxy?v4_M;4iTsrp-8lhPwF@|U_;)Cs zTVfU&?j%*G&H^4vsqDkU2ir670v+d zYvj5oiZrp4amDREr4lgaFF zT%@N=ly6qN%QLjvlb55lQU#@k(}~1`*TUVbhU`rPrn4U1@#$Lp%qn?u5BJbQVo;ZX zX(w*qgI`%uolXB|qH3a^D`;6(JeTq;_w;0c7|+487O1?{{p=&@=DV8ulIiynILvhS zSh6>O_OIhPgdPj=@D#S`+vpYFIpf9#>f8X*>@iO)WJ6CSpI=4;o4u;B5^HJbY3j)g zUUCpk#@|6&ZbB+6Sn&JVrOao2LbI}t@+Ro3vRug>ev!WGW|$|({v3C*)_a{YRkiny z-eQv4Ps8)Rbn{0bxe0ugQ9AKWIl&-V+OCG{U@hMS)|~R4^GD}^IQixeMVC$BOuqA) zBxxv0tWm0uHad{APvJb&@B92)47XXNu?H+OrfO;wkafV!FI>ehJ(YI%bS?7-$#>BY zOgSYXnVFJ7vM=bXYW3S>`$^aziop}l^h__$PRF@z@X6@tdcTq>x(Z61L(|_cd0wj*}Yv8XYR(K@8U^DT7N`~OL2M%D)a_xPp#~O3yHR#;pxF- zJ8>_YVfq`~T1N5{1HVX3S7>jl@+;Joy%p!-No#OsMY5?9qfliTOYtI(WC#4Y>PkO8 zOAXCQ)-&pfUE9gm$=-pN)OLm1>X43!WGdr;>4FV6+;gzOBv7jb@emBodeT{+@b|3s|E><8@NzG;JoX zsfXXk;ny@WmlgG4zV|gonaWa4BnkIwp$+Z0KwTe$Y9pvmb1gd#6E_y!ct8)Bh=N7F z_X0zD>~E5gE$T}imt&Oero<%BRip3c;pb4cA~R`^fay2xBoljg_T~}z9m?t?!{iWN zLDqb0`IVLTOGsCIfav8aQjrLhoFNz=D>G~7<56-o4uEs!3+t*QE0&jl^LlqWYw7Z@ zG;a>L9^jcIo5$%ek8EdFsY~(N&DETL9y)aaduJGp)ra1|Vt?)kf$TBf2+1t02d|gErG9THRwHyt$fl7A9uPrRb!D#ymS*ppB-GeT* zK`{*`9a+w&-0gxY!&s9VXf&I&WNmphE{Af56xMiO^uJDxgV5+u{YFjl^R!xTM)9Aa zYxH!a_Ud>xF(HSt9KUw;nYl~?$4K&?dAj&Fi5}TOzcYh673GpM zr4H?HNrJwq{_O8rg&LU?-ROzw?tVeOnt!NQSd~OpoZ)(AXMT*&Bl$58<7gi= zJ{89mse@tm(|g zEl|@aW2mF?FsE{5-o3MT^4}*mY61;ksNLiSyadN)qf;Uqy5e}5si6XuGS`^c#he|M zbIxKVl0hiF+>J^fi&A&^eL&GmCjUd`zuyN*_TQxsnog=C$v?sUL0}Ab9`!j0|IUTO zI-cEy#(7gg-kJT&=^8mNBv(Xt2i3+X4a<1`*pUuCjVk{R}{{?%5d6;JF$ zw8`ite%yih(@5V?)y!+gaM|Bqt~QtdzX6+!>$68-APS^RRZx`Uz04`>dgUDH{LYBy z*XqbgZ=9=<*$_c@7FRC&$#{_JhPDWUQariOdg_xhl#?fRrooO6G}Ob2ypN3D1A5_8<6F zRbT(K?_4|g!b*Dd#4c3$m)0Y3nRjZW<>XMzOip$Xr2PN%Dk0rPiT|loc|E)I5{ulK zZcjwPtY~Ic_E_Kj^?~(h#wgg<)dO8dxBj7*Td7pm&ad*B5&I}LWPCIl@6LDaF(nhr zIo;EwKu@AWy)5twTUj8Py4e|JX-z}tz;}dN}Ef_$LILF9j29O{#KSdPY$8$ z$=bcn)g>(10Q@XREZJp0iLAtvuBE-7@=I2MqO>3v>Q9=}^4sn+7`Myn+ zZQ;ocY+=SY*?m?UTsdtkqpb88*#~?Pd*23~27$eayU7}uJym6Vc_-38OFi+i6EDAA zZ~MGj|KRD_Y&^6VV@T2Q{LN?8xd9%%Jvk8WnK{0eB!!PTVe=a8-)2nH znT#ywS=7UktfjS8^9uE3FZ@Gj{upg|md>_a=XRzI#=J3 zGf+a!c#K(V-%J9Z(Sszjerm~%#vkByA_^j}a-Aq3v%~ zp7DO3XReH6;b>#9k3!xnK@oRvjCGlbJh^Y@X^J>Tmui89tPh8cbnQ;Ph5W9C)|pMR zHDl6pC^mwbRUfFS|D-S6cJBWgvzsk&>ID67fsei6Lo;+iZM{levyN?~migmK^2{}f znY7xVhn~;&%yNIm%kR-@cl6yK>cW%rz~=s?=hg3{P3@y6*Ry~=KbqeSyv=+4Hqf}A zYrY60I|@U^*3{FgHHMmrNXJ58GxpX~1GD+@q--0Y7>8VYD#Pk;wqRscbgr0w+L;~%50%9amML!jl-$H$$1$&{%KV z40=hRL@Kiwv*zj7Hs5@EAT-|X2$om?r^>$Z8oF727&Cm`>y3}fy{n~o(_kScSB`!NO+s4Q392i*`(6fX?ppDG1izw zSJ?xQu7%L&AMjm2Q3Rv(TzV}FJgtDP7voz?C^V3@RWfrxGxh#~W^}_Q=)dj3d$R@f zp>=h~oN%k)?qBe$8S|11W<~?xs8&fImwvMZu<>5^`p~i{Hsv?|4y4aGbSR#>+rg_Z zFuMXT2;KWpvno`59cvK@1c!io5ma*oYzn;I&GtIboCfQ!13BGKAB(}(X!#ZB!BX^~ z4gYz5u^PBdq*rhCHJ)iWGUZKY$9Uo?aQ%wj**p}h{|U0Pj$Y~k*F*53A6Q&q%#MWv z3*kWqWg;k(f#wzh`$(X2R=FOm&9E24nEM?iB4~9Vuo|~l2F{G9{W4$`iIi6a^EJ$B z+&MSX+j8m{{dEdU+K{L24SNFT1Qycho{zAgJD`|3RrF}KrQUXM^QI3YWXuWC7>RiV z+11d8TSUN)59e=X|CPtjNQ9|Y!>gw0m){t(swlt z`pu!lt59<@ZFpu-?{xw2(-L~t%Cm`mfFqT5(t?$qXVa>Ihp{1rp{?gMJgw*1_#NPL z9DKgQTAC%nXoKiLH;t;wO_>Z<$TpBKVgVS-KodOmGv0i2QrUthM>fKP`DrF*&j22+n0t$HF z(`!(_8nkm>^A^yWb7CvC^1+`oKoZL=s~UQ=ly;ZWlKH19GgCD_)$FHpgZ8zN%NeUi zK@p=y@}q(MsksLV7y(%c3*ebLJ@4m{foQCBB(tQwKw+#*XLwzP@Sr%>_~dcHV;{Y3ZzH;R1g&!C?Jp?t4$r)0;P#-zQmC8_WsKfv!TNInTJD*4<1}`Hw`aH; zP<|^MSp-(4`3YBao_d&rh?&*6khBrYxg%!!g*y_sE zz4EY}fA8U)_d=ZFTW%!w7vA3*tVU}=$@{>?yZln&kn8nCevjaPYlc5LaD@L&51xkk z?JQ+h#YF7UGFmmmzHtxwFI<1u`TK9}04B35evO=&hrk$!MC9xgzaM4p_!yXkaUb>) zW;-asH&^(H{0)Ep(>aImO-?jQPD$yM$mbV6cl=umybspsp-;m#zo$?vSuyzT|KX4K zRs92d7mGwc4IYP)zq_FBlW5&h^xsk?;GuUi)W&MNt6LxL{}8E)fBLa(_%l8D`>+3Vo`-*P!z*WKFJa3!L#wk; zXf^;?1&5rA9*1YnS+cRB=D~AUNAKuq>`ZAbCxi9Di`WArVHcz4e=vG^2X}eKnTzy# z1q-zU8{qxN!x&RPqm`D}s%zMT5!l$u@Xs~I1US=#I)kv{&9Hn~yzRoLJ?}@el3EXy zw__)U(bq*RMml(3Pd!n;3t*nLz)qx6rvKaSFV=k!le9GV8o6+WUd)12ErCkKtX@=ACH^gl!70diU1-_@dtK;s z4O{yG)I31_BGr-gI(3^42l zXDUI}l8lIA6%*|Cm~&t=d^Lh=A9^|kSj>vzE^8I)wnw_m1APYkdx6ynxHuQ>xj=bm zjLy^PJ2GKB|Q`ZmKVTkAU$S6n;5w2e&j4{ zQ!6-9HK-Ynop)U@g<8Xb_5ge}_WwB27scmJMwfE5u$MlfsG$|wja)|4nkRdPAuluF z+~Q!)1`aEjEj)mp^=6D1juov$YY)I}^=Ar_I|#}+JCDTvd0x09 z<3s`!Uk|3<-q;)3_M*-k@a9c;V?GJ9yj7wNW5$%K?|^A4d|Ai*pb{;%#OfK(&=puV zLXkpXxC~6zLrpcs+a9kmvWy4v3|hUD+CM?XtJIVNgP>@8xg>9s_mjLM472%_!-8ax68`!*y*E5di(e5qCZ)NH&0tfwNj$?1YjX0=a{+ZHH+^D(;F#i?{ zb{Xm#^`4CWt*3k;DBc?RHM30Uwe{}9QE24HKr({bx6-bA%_MTR#b zHI>jaV{4v50*u2}Tc1S+2BXPsp^0ZEoj5qH(W7^mK8q~o!(!-@pAQ!QfQGJ9&Gl%W zmWlA9D|*@xDthaI<4QHSCC}FfW2hOE2eS`U5*#yWdlb~@$ln=gV-p~Mm!~Dcvkees zLf;}h`GvZ-V{!8G*X)j&j8*qAB6?HjH%O=Vh`a?RibqV0RYeH7)r$Fl@5KMplL2l5Vm{EkGF zK<2K{Y7BF{h4eiIis_MbFULD>he1_i-EK#^w0Zi2Uq!Aa!K3cXsh`5u?1RQebQmM7 z9g~ZWv-6M@?|wDHw;gh2aPhn>5L)5HE590#uH=CN7 z(uy%YrqbtTMwT^L)a%IFcKE!UR+m#okBqk~1AkZ+59b+wWI;kPna81F9p1#P+=JDMBb zm*CyA!TUMEZ%aNKJm~?Ung}wZ5KO6o-iK4XkA)`_*c;yL*Fha$fAY7pvOHd3L%-S>u`n?9Gx4`|< zz)%Y=?&0s9j2l@%=-H24@LzhCf_I_c<1p50D?RK*`x^4D5oHR)<1&HY;1=v=3Hqvt zwcSU(3zSZT^JdM|=W>Y}Mp)d5e9QudhXbz91(HPG-iA)+M2h6X4(Pe^)&>0yW_Wab zk|tr!jJx?(l`^)g|0ay7uud3p;a{Iu0YA@?C~5Nf7Cvtx6C?QR9Tz?ezxzAAV+HiRuYfLNsW%(Vzm7dIF1!mmsDHvy&i1~W*Rf;zXm!ojq$M4AZ2jZ|Ru4nhry1Po}dG1ZzX^hgxu*U=G%{HzV%*@U1wrC*y&Spov*nleR$Dn0GbcnS0>nfWh32 zC$NY+`Ry3h9Zs2@FgF(Ob09XNYah@rrjBD3P;d)xrZLtVCtQee!Yn&xu#ZPuE;C#AcH`>2HUGfN zU{rt>_5;^>Xf`6?sgZR?wz+@hPG^4LI0E0B!(Gn@bp(pV)HaJ^CUpOp7FS~rjf30; zlpCO}Ywb=*)n#Bdo8@ZwybD@eCYjM{CKmG~BY`uvY}%a5$k`AoBw_0ZFrrjqPB(~= z!(D^swC9>C3aL2`tPOzk5oo-G7URJ+1K6C+Jp~5g>4h<1U5qmQpksaDoQXUd^K1r& zw168kgFd}ABnwGijoh0}bU$*`1CG_gKJ)|%qiMc_77ZyEj|EAFJB@;tJp(Wgj!k0D zG8BmCF_St?>sip{Rk-7NzB}|i!DwlIJ)^Fx0=<#XWx@Iy?Ux7kPr>C3v~G%Y9YTI* zgXf(z)$Oh55ZwGRJHWM-7fDByf;22lSRv=iPP=UMfK!EOLlJW8FuXxkO)#j=uG!}AU= z+`)+MoOBrQbYM&|*2@;Z4g7!6wA!9|lw2DHOAkV-W8S)=okm;U2Jar`n|w3&%Ls(iNQSqn8<*6Y zdHO!6QUE(!iuppmz$fHgAFaU2TOy3$C`|dQ@UI!VUIi*Y&vWxxc+UF>Fh2<;jdC$E z)je0ELHY&yZ-nuawCO!L@r)Io)j0#i1$iF_WbabbJ^EPa`~YdeE&k@EB9(-2NdTC<{ypNQp5z-_XMwwDC5*Bm+}f zU`;^=Jh7dD1UA41IA7ex%>M`AG6sD${nP^6D%hE~kzvQ3LDY$7o~*CIc*<7j>jPlw z*#%cmJ^p2?ocj}d2{+sfvh>k=cj zt0C!0$mBtK*vUGo9WpjNknF}#rXRGgf*mUi#`&0u8r#;D7T%|QPjY_Fy3d>#-n5$$ zK+_0*I38sn*Qb%g($Hukme$BAX9BLu|732WT{rr188UQ|mC!hNG$!C`B+z-^k83#Z zhG>KCwgf+Kd)Q0=9fMJ>J0t%p#*ntaoJBvyc<1iMQtH&iLOH|oKEG2yGM|yiI8r@o z)v=e)(?)0d_lC|apl!@rp%ipV#u`tCm#%2c+Zh3lwV0g^L!QeZ3q|3uUQoTkec(hj z+DU?rXW+Fb>QbQ9C-6bfj5VHRM%n`0mms;lu?|MnT}AIo!{v_9LFp-jWQl2hu$;lS z&TxDut#qQr(6iWyme)i3(Ujc>-*eM*6UtY`N;&^Day){O=274=CPfQ23HkQ6kCBv` z4Mb-4Fhig6-V*)`cMsMq-e01xaT;_qCiG`|HMgp#;*#Kt@iEK6#@lJHQ2RIJDYX67 zpt&&__n{Bk3+E)`8Q_T(mP!3)qm z0_cp0c?%id1s($!yNum4SNR`U1NYp_D`lQ=XJ=z*(yr1O~u-riVrJ(I8G{C*Ic4(S#>G|1BnZsBNqagEvqq%$MBP~vX$<}9FMJu=>Wz&==0*WSRq)q)IgkE} z!^_6J9|6tGY^?V=1NaiLF`k^#=i$9gTE*wkD%W5=1O4g2`*bAf0Cg${I;40119)mg zNdh&WgHqj*+Mke{H+i}aT1V2Fx39S-t4ir^Xj6C&ZAk#rKjDaZW0r%tIbRw>$>YJB z7JNI0G&=j!C;TNk@)+g9w_#l3j9_N501V9Dc?p{Bg?{R$8A3NO4jIQ2182_zdvomR z0owE2kEdkzLZ6D%IYgTW;O;JFV){1BV)8lCbO0(`1Sa><;^6bW)J@0Y)}_VXl=S8m z*V<9&jd?V#GV?IQWlo$&Zs-`r3RvI9qtq#YbUulVdzha|{FjGTW0Aoa#-4kT**62d zXu^EpcWlkS-*@PZlzK}1B>rF4(5|fQT^~{jcpH`%vb=%E=at~w2549Y34a&(e?emY zf&LbN>UrU{tL)o=ZCcQ`xgd)papP#o75*vgqvzR;d|88S?THK?!lLAX3bkqbhQPw) z0<#S~`xJ@QkL4S4R3!2@{4uh_6T-&jgdVR|0o@X5xdd?N>G;>rhrxIKS&4r??eh0u zPrKCq|4K?BYfTC~IezoDj;hetGbZ|M^w@Y>C^uHB5w^#hMAOg`@7i=VUK^_41Z>j1 z3zq$TY=^Pcx!{aYWP-b1^ZSt$<7q;FS{mjcW;zv5eXp33|j7lJooeiLQKGf2wQbZ9f3wx=T{B`D+n4+j*SgQVto zgY@d;oQDc;*r*LOmucVIKE0*WewBzY!pVA`Kg!A1>bp98LQhgoR@Jc1zkUmS*iwkc zzux@UZ|*t|fzF-L-)!vOdHPC0=3EyV^Bs%4Z^r(3d$p@>ZB$d%9p~W71gzEgK&Lw} ze|Z~?tONDuU@?zVcPCot46`34{zC4agznxi5ski|!P1)du?F&44IMwgIB^b(YxHz9 zWh(M)Cp{;?!{^X%cZ0nTYb$!^3U~olyfQTFg2mmA478xMHz;192ea9A!G<*imP5gO zD*)B z-3!b-!8(K9od0{@;uy-018dJ=JDNA6^bw@1ITpeAoHfYX$)N3)aAX)%-w3}GkT{`? z!wNo3iI!NV@4&1PlIiNw6B)Pj*PSrWCtSk@q|!1wDU9f^K%Z^Q{Q6LH5AWUiUdEFG zv|wI}H1O#_+c(m?`4!^=KjC?(9R-d9(HlK5y#np10sikYht?aH2_~L9=?F|IeAJmG z$dfk@7~8JK?L%A4Q|a#1a46|*sO}f;0=xZaoO^aff;r||_*D+5OToP!$g$K}0(^Jz zv;yTkH?|tB9RvKX3!IPC2Q#D4!$|1G%m|Fqa3(eh9{z|eISnl;2Q!QmIBIPESg7_H zZFt+v8J>O4tj>LZv+=$Kry4^4j#$m_=s%hFp6!{0%y&i;hQr|njE5(Is4qN?MLNdA zE!Q}YP~O{CCxM&yX`F;>eUXgaNZDt6`x7d6WFA)u>6i@k@1f0R`Z$d&oX4(s+J6z0 zz6_SF(bi;i!EA%BTN**tFTi>&{2#?L?;4Wwz zv;nyIRrO~ zqs3ufmpb6KmFEqS?Q@hg%Ey?)5n#HLwx@ zPN3H{kiliZpqzPHq9j-qX6CY$mcI;i#8t*J+Sg{ApS>Qf_{~!w<>7=j&Kug5xyD$> z0<`5WzU@3s3*KB=p1$?u2<@M6O%L6C#@WTtsREqRI=KF=!aKFE2QcbmbG6$OsdEKn zzE00K6o4krQ0oGcPy+hvMVW)$bu>80jN%jM9qyHydwMgrZ41)Xff}CfPQ#WM8E^dE z2{f!NwCjpQr-Ff}5L}h|dl@j86Tr+SnY8Ghs<(y>=lf1@&{JkMQzhGb3(Z@&AN~4? z(w@OM9ndsC^@l-^M*Nt2cO<2}aX6bc=JIAYKV~SN41S(Y^`w*0O0S{==17W1rX0E4 zZ#fGUrv{^qaZV|4XAs=o1Wk(sBi0!xF%J##4%B@-i9rsGn$hDHgA}RV`Jr4jp4W%N zYoLgcKd!ety;d9w#89shyl@;U30!8i^yH9x@$>Pn?Lofh!jDbVG0w0cWxO9S9vZl_ zQ55Qz0;9X=VY(A~0Tx9n+g&OoeBewAp>Y>IC@2 zK==7z<(b?&pg?#o==s>$02DQDCqYh&{V7j5WndATe^_b^6OdQ*K5Zy}`&F z+7+OM9?7coGZt#pf?JEg!o70W>BXSA@jPa8&~FnB7M>kDfxTT0j*-yB=s5QgoPRZe z6Ru1>8>_!z3?-$wUJj$28p03n$xK7;CL#-FEDU$DJ41(g==&77s)f+!+7a6~gi+Ev za^y+^Rx*V?J>m2RypDi+rGaJu5Wh@$bMVf<(!U3P_44iEzc@Y z-pnYdXkHC({A>aZlKDRdICk-^8K1AA>tdcvq~`lzcM7Pt!>bc~HzF^M|IAQT6*)71 zs99TF^_fNG3+$a49-4!pUh+)n&=fA+0r!e98;|12WNeiF|84ZJo_FmS(K`lK$a8Sn z%%6>1iNlh|K{I`}i_uu~Eg6wKlQK(y#5+30C=*RSiFGr(qJAi2vR&)-hng=TEsok| z?R^ax4)d-8u+O4*Z-4z6Dn1O}T8Z^Y{C?!aoHpg?HIlVz3RtWIrxEmWiqE@1GL6-X zx3qkY++?s~`3jxrjTXHKCi;B_qg&3}oHO2rT>k{+>au!nLodell|lbX^4|M8jak(r z`4jw~kH%hvBl#&k4>{Zdt&CNRh1$F6(HwL7tfJ`Am@03ktIu+9pb%V*gnnwqC{{Tum=_s`Xx|&bL*J@5kD3#BI?}1%*AdP;NQNU{=0?o| ziw-p4NMwH__C0ob#M{Gvvavi52FS)W&a+VD1YW6_Mt zs)k;9zN<6x;hiC0vet;k3VS9l1&fnTOYX%w57FDb9w_@l;|0Lg393g!t-Y+Zi(;LB zqy2dBOvbVuhki-4?yA63``%!61-&w#wC4-^qxbFtucD93XwqU}HZSQd*z|r_X|uS7 zx0Oc1*R_-!59CcL|2aBXfxl|n&$N0UqmU=?TJV#~({5l@nO=?}areXT;=pu-S<*>p zW#;hlXvq;^>`9;IDROKb4c=y8iiICtz+2z^uV7gSi|_gM9rWyN1=(10qwcox%-QQ1 zs8|yoyLWFabW@%=_qjmt?SOI%GUL7O_tUFcf~Or*%F-pHo<+ z$wRZKU1dzXiY4=W@4d`tGFhE%rItC%%)PK3-SL!`Ys_c)=IXQ;dYTKon^3k2kj3z= z8?(zegTA`C#CAxI~|2_#WnJ{U267IMg3$6u0o!)ZEbNL;ib`kNyYG2F;~?pZTsTvQ``3 z>5-rQouN^Y?0#{b6t+>7oQU2};~7|r6@tbg#P0OdafYP}eG zU-TA*caEBS#?noYArn4TLmrX=!SNQ&~Gr^|W>fDuE_O=aPh#-NiPbS$$Gcjl zQ%d^B(wp9`b6Ad0*7PPRkxFNnaW|CE#=LJFP(AeGxR)UP{E?DUJ*0g2JA4|x|L?!` zBmGZ5g|IZPb)pz;Bd~0qATTT9b*$zNEaeMOzbWsdkq`IR+^Kq*(aOBcY0N4*U{97{ zBbo#w#*N^!nP+C}U(Op(nlHxEc-!14bkR&6zw+iYKEtq4&H;KLNlnr7Q?%Eb`O@k@ z<5@j1qNgwmtO)l9VV^#O%AcY+-laGgDfpgVJW)SBnA@DA>;b42`U@UKdydfmZ;Xb` zp}BkDO=u?`y-fusZ$ljg=ChENF^qSP`OXH+Et1Aaqm_>fp<$6#THv6`-g&D)5rb2r>O9lB9s6z^U8Uc)Y$OT=5uJiVC0>|g`!$5SJY*}rqt zF7UnsqvLL95eY3Ghc-sME`#2l_ZZIiuC!PYC`Z8AbnNv7DEKd7qwhGd}8lYJ#kPi5w)r1$U-)L51Sf zGh=$VL*YuI4K+to)_s>f)X2||Q34yG@jNJU6p1iqdNUl?o2LvFMB~gsX7uYmIM5xv ze+eZ5S*vH?s_L;Vc! zbFP~YN*ttBJ$Q3~v3f9vcnoToD<>U#8E0f}8%Id@Ee4>Y3&7<^zVF6fTQdtuoJ>u# z8k>PbADgic=NPv;pd(Gd(p&|eyT5^NlUaEV=j~AND+XVUgfMSsU+^*aLM0#{Mhib< zsSYCtJD34`mZdYjy~Ol&QX=jSQ#QgA&VG!OleDag|VIFuK- z-lo)9W==K0VK%%-Wi*YZUK(M?&vrXs!hG6F`Xf^m=M*e@J9q*yMj;zn5CB1q1dE>19W?IYw0`pmT z-mE&ZIFxqxz&&TB)tM0;V;*Fz^nCC*0bh#2SEEndOLo8eEk>wO)Jq4)KD_m=`B6Yq z7VG{F@U(}PhnVvXV8mL5b@iOHQ5Y>5360!%0u7i2r?hdOL-EU$(!XVVaTQwXME%W3 zm1lQ6!)z8NXSWBjvE6{em@)Sydjao$aJ&&HoF7G@o95@OLyK#8{~E36>wT0~!mJ#* zdDc9T{s`Ru5xVuGigW-f|`;v>LgBkk)0 zxel#7$5WSHoN4N9SqJ6zfJqeGjD)Y{z*gOt@jA2l1wZt;j`B+Sv~T z!;l8^S?TrP!u;bHT-7&O18O#bzfFN_b?UY)e#Y>bjN$GZ)(heVHUnh@m~G>kJH9iqI-XVQM=SnRh0;5iUwB4i7jSCTKBd1s zl=cR#MbNf5c-P@G4?Y;>aUBbwUA5FedfEy-`|&O_@X`E8Uk!Ns2b5P5XV8M#x1+(o z6_UM)S)x9@pTRy8$lVKGK+hLxA&dTYp$+?>`CG_zCRmoD?BxI&PrZDIwx`1TzStro zWEaxHL0Z*o`z(|+g0um8o<2EBasec z46b2Ox&m`5(2r&&6pOuZ4DejGyTbdZp9ZyGWkmOO?E_%ApZ@nTCu?et7E4 ze;Q+z`w?1#Em*-vpkWqo+abqGg1v!vq2W5T#pt=-{Om{i2GXzhc{HTWOd$FaS?R^- z`8_>Nhbs|KuLQU(v=$Xk;?^WjM=KxdLtsu7TOECRYJq6@ID=! zt0IN_k%=s5d5lrw0lW~6p+plX8HJr~g-u<^O3*k}&+i$Bd=(h8z$*s49)q9RP+=^s zIiIr3Oj^pOM>A8^Lvl}Gc}Bsl{cvs|?Hj3Wa{2}z@-Rq+$y%0$M0Ll?Ob1(iIB*`eBOxhcV z9jJ?kZCgNrG@v;Y%$jbeXX6kTV?DJcBj_^$dA}F>dV1P9n74wB1&jNE#8XXkc{hf+ zk=e7uOwNOWI~seiK9D4HlDrEBXVA~S;M572$J3kp8Fgu87?eJR_K&3PS(I5zYwz)_ z0rIQ==zi#?e18nb`+(z3Q1KELry8w0N1IEXm%u**j(g(R(@4?Gh1xOZd<+MYStC4( z^hWVKBH-j@-j=1`WLoZw)R-~U-7imn=@Hd$s;ykaZ0-}ZwGf#0g>v3`wjSJ8QnM2L zejN$+X5><6Vl8Mi8d|mFCk=`nLCXH5$3o0|3t_p`kP5WmE}l0n8uMBUt8osvzo$QE z4+W7zeKPTZp6;S{etL;V59MehAe%=EU6EzS^Wl7sKn?LvpycCl&0Bp^f!NsBH?dpJ z3w-|&HN7F^4E2qp_2hur3Zv1LlEBme?6TI8*;L-J10gR>4jY_3{dK|I1JYF482ymO9iEaY`o^p3uIA@sp+1b;m=zXOeVFOP#^ zUF7K~l&Oa|LC@(ZdfJctFM}q|k~{P9T-OZf^E>jqnbOWd^dSksEU5S*u(`MH-le{r zs^IMEp&qm<0!7W$tCw#+IBC_i_Q^a+KzcfXyn&ZI8i=?$p&@ z^g~09HS9pW4E}n9$1J|(LOMR?^AR)4n#g^9q{^6?BfN2MFct3Q1&W6J?}~f_qn;TS zaxpTOrDhl%@;|M+!*}Lb)$`&i{THx)5nXwS|IKSN3ETB7GpQTtwF-Y zu#WKFp|5GhEIFStf<4Uo!u(kcXk|3*-Uny(3jG7zT?yo2L|+G0>j%*EL<02a90ull zkWzhDzUGV=p1 zn`!4>;1Ne@65bdrjSE21gvk+htW_^Q2H?35i7k=`lCWGoVP`F zK*sc`RiUrD;L;R+o`nnDp^tmSn(mUnfFsjQKGb69wRY{b`_^!7zS<_`6vgLI>Gat*rc4N|Nb{e4sFAh{_D?;P|lOz?HRL-LT!#^ zFdkws_B0zAJdKVSDg6hdU_I#Ahq=`jEZ9&usJGxHwDfso`8%|B6!KM=w!9DM(g{5 za_-+x%YjpAC}2d$NNi|48tLw5Dj4hoGqW&U#i|uy^b;FrZDv7qW{?R6U(&Mq(d=(V zFe>ZEx)bWm08g`X=~F5R_I;Q|??Q^N1mmBXFrVk?RQfm(v=|8_xsl#V^tmaRyVM7o zU5p%kurghs*HWl(2+F1MeF2&=n31{&qmcROMgmbgY`S|Z2bnbtfEHHjhA`Y^LJcj(O$^e1Ka0+Tz{&hq5eV_@wGqozFH#Tc*zYc`Ww z)qs8sbv}VM=b`%&TCWa|TR{(F%gz8vDQwkh#zAwj8V@@eTpT^CQp;$zpP|Qn(8_ao zW_3Hl{9rt_%|+0iHpej9#PQU-qKbo+>w-$O?F{laXdX|0`mUVyouif3KsFN?s{u>3 zUEB~NdqWI4v|fqa}>|A=pO5&dq`;rfNY@;&)hlrf z8CZ@5=uHdWx8dEjM|nF6>>tDGo`!}S>BSrP9)-%wkVV%gWhwg@bD>l8?p;s4>0u$? z+Cr@qc+nUt-%m@e1N|}1TOIIL=_OdAk@R*JzGT5uZ!R>}*fSod(c&TCdz>E4m7-;6 z0bWT+`uo6tZ_tXn&W)kO&(zS1)EC@((6e5bLCB+-l&8_+y}TWZRo(^Ho`)u$7?_Cc z{gd~V;MGNRAr%<61?%PCsJ9fV8EIw?=x88vKJ#lJSFT5w(?@?~dNdR?W9TOOKS~|v zuqoK@n;3h?0gb2s&Fa|?X)sICYPgz-g&l}wYr)2XuO}$HSGgIzpF-vz1`oZ2?U0E2 zz?}=)nAd7HbUMnqVl5WX`_%MmG^Yn=2(IjncliJ+4Mb}9fwkwMw_x$i06T@2yaVq5 z?d}0PXS4c0FM|CAX4rSr+VvN{V?lSC*7Pypu#DM* zk({q%qtEjs1qn~ZHtB~l(~7xul;V9ZM7xvzRFcHCH-@D?#oU3%8885HuQtEZrx zKNf;M)seQL;P_J@`Eso&Pn^LvqJ)-PPfs3ZliPsF%)uWr-#r2)o6x_WPS4VI2IqW8 z;YDgzfg>&9^adcB48HC~Z$U5F12L0Xz4@x5&E8%WaFdB+B#lwG354VM{qk#A;{#tVY zmSH)t?ML6e(XAF1=TG3f6N>kvU2p7Yh*nhrrz=p}G4L`if5U2^JFTT4Jy~dN1T_ZI zQfc0KO2rrgGjf@Q#dxK?@W^O~%RsI?CxFo)sIi~g-rrS&zkAVRqs!*enlZ2Dxz-vt zM+%JaRxkP?CG&ykL27n^1|4a`9sEtyx`y588>|r*(Bls3uZL4@nVTBf`7W(oVHH^o zY)=4%=X6#My$DlT>!o3@p}O>v6L1( z3Edkx*IHFT^Mq^^YmeuVLG$bzO=3ReFz=|*)2Z-!Jo>a33=1HKdMxz{ds@2}{hh&P z45yU$%Rh~UDuh%yD=-eiSPi``RRgY?W$Rb?)Dcd43rio$t)PV-KvJ5~ZZkSytiWI_ zZ@9m912Z>uwK81nM!Wi!7D6l6v2p0pPT=lDA3LG7r;Ln+TuIB_==m0Ovnv!jL_N>5 zPJ!!7vEcpT!0X7HCyPBZ+zxx`9r4B+8%yU&JonwQ!SHmTDYfYB13;O27SV=gBi5kv+#bxmPoZ4px@L8bz@Rj!0);I z^;}{zT8KvGw4`cWY2-N_s(uJ(x^N-LHJ>wo`qobC{|+*9N15*#_t%DIthF1UBJ)N zwmVK$1K2l!Wd@M$2Fj;_x)(6@MpxFdnlLkQBP?wc?G1;98PLobT5PcH@ElkCcFgte(vx(GNKDV+U@6n;oE1USw-C&+8+3 z-tm+V3!Da2t6S3=N#d4RzywlziHG?wx zzg_>&LnAk#t@_s0%N9^if87qK_B87;*UR3f*9mEE#Oz`!^i8JLQd;i`CCboe&T1$L z8BasQnqf^^(83z@!qbGFHYrTIukyAOZ_OAv13kG2XFaoAl6vOI*+JP6ym2+x1s%}e zI*NDh!;gnrQS@J)I{TqvSE$kjZRiWX%=Kwb53{m)!+K z3hah4+TpEq;rwYc*1?nUU9oqru*w48;Gpy|bn+tW!(+&cCqwoDqq<&=?-%&K7p~Qz zO?|13#b1JbdHU6^dj7o&up52Zof7Afghur3$>>zDHE&UVey20zn1)21qtsbiPrycR zptUp5r7!;#rL|P3XOyFU!vDJB{(t#zM&M0Iq~cK|8k>~dFbH_>Isj2qeR~1feM*W=PXw1N7}oQ*`7YH+p$@C zEzI%sJNh+;)^;{dSD))#0|hBVK zJuJQNZ-zE`;i%6&zv{DWXml8RbOSWM$p8Bo^D6WEPJWCVng%4sL7Ca@6x#k1wT=3i z3kSTL-Zs3Iy)S=j1SQ8|qr9c66SPPKudmUIL@ZMs@JpnxU%~hyEqdR>gFy5q94Z%l zUqdfl0_e;pl}PyyflS}gt#DxuZLEVDQe5nesLP9{gs0#l0%-Hok`Rr6=PRgHoZq?7 zq=$jycjz9T4txr{szMdLd++o8MoJ$KJYaeR^(X0bR37zM85flo4%bEk!&oaJ^1NM6 z#FRb$>*qf|{nz_kSgW7@t$fI*e}5l(N{(XpN&&l>f!AO$zr(VcU(x)6RglHiScG~& z{sT53lQJi;GH+uIzU1RclrI?%w&Pp)lsWx&sHR=a4D8o1u=e)6qSVjBtX1#6`$Sq2 zBa(JwHH>hN#iE;OOn?1a?4|cVuE&O_@^%eeHNL|P@VSwqKJ?fC8*UDh<&<@I!<}a1 zCXB;!4u2VmXh;1Yn-?> z22pAZZ59NcXzZTRv|XrS#>s>9+Ln>SEK%Nwc#^uFhH%90z}R;KYh-W#a!odX-^*#U z2~-TDf!x>WMN27+TXp&7oZPth#jb=it#gtD1^K*=dF~H>-LkeC7!Y1IdkJii@`$+J{3|hv% zm=&-fBYIKBi&*9`*O9W0@XK8u&ouNyp1y1gF15$us2U5_I+N+_g`2zHf z)kP{s!of<+^Su9a0yH)TxdY`r!!(4Jyg}eJb-%sgCmBigWZW?%*t|ydXvv7|5y+Ek;?HQc1yBzJ z($ciD32B*w+-Ae~IgC@Wlr;;a9?Xbf=DLHKq|xu*v($wC{y_3iG2hJ&zSY4J{YHr$ zJn2o(QT$9qH}%GO_DGMx&x}>Zygx=eM*bL~bCCLLp}+R298h>S?Na!nB`Ze@MwPE) zd|6K$2Lfp}(^gT&gN@);2RZ1*?@i3Qj8oahPb~d^OwY;4MOSF~BXr(}<=@HgH1PM9 z@hM<;KX1)J;J*EQsQm?Ys0olMb)LEPG?Q7WE+TJr&}w(e_2x_lC+`y~37vn0_Wi)j zEUtRh(vjc?m`QsJYkRcMIn;zepFAbC5>Ib?q~`?mUxXgcWY%YfeEouY%T6IP-mEajT$bqwbAP z`3YIwN_$47b)&r{e43$wX|!?{oQ$u(6Pl+WbdD>tc0qD)2?2 zZ87xm7_0qR^sdeOg)+wbq_b+c6Mahs&Z}4kXRSul^#hX;fqhHCqUw3>3+43JGzEKi z_z$8vSz!7h4Ng@N*cvo=2aanMkI5C8Rci_tDtUL_%9{UcCVyu$D``rP z`eE|Zw>g#zf?+0e$8*TR3h;Hs*^8eRXh27-hCbj)&@Kbax1$5sC^Z{e#nAsc#^F`W zb-jV674x=lkYmrXc$eIAr1)OsemKy*4VRb0r@g$}h_0pql{gqNJO!?;M*n63K`eA_ ziNv-+;ys=8Ewiv}Brp!UzY_YHF=-W0Z-U!P;gPo-q|n-Apf&4|d5^b2sr>=X$MFAe zNZVj&)}5ODfGPvNZU%NERXi!*98RQBK7w|Pe=Les@J0yN>}D`dMj{uWac1-B1S}KOo&>FnjLc)k`w;U>ZyP!k zSdF*f{vV9I?)O9@rH#>6M_(nqBXlf9d(Sa8X2SjISkH;j@zA<0GvKT{2}aKCV>PVb@F3Ky%9AL{kA}ims51kM zJqh?KV^ca(p&fo6FF+44?iNt#DjIYzQqcigCV<;yaGeNE&APOQ8JIJ#byi$G!Kp30#uv9HXrzTo7`HkodGqvf%kA|+5^0% z(Lz7uwJbDS0~Is*z8i}=5;~c8p%}6qgN0fVNW+of_fTMUv@x%ekvONpYc^$nVvO}h zH>2V@!)bHl{Q&JN^Tuo|pFo|FNSv|wp3|BP1b@+AMKoYB{Wk+&_YagJql(-=Yl~fW zHNT4IC+X#WU*n z$w&7Iy3n6@Xef+yco?^d3fUuF|O#nI3uN!Ax+BQwR%r!VI#-JrYY1eKS% z=vhg4rdRoMs8|ka8p$;gsy~43@56uIacm5x5!@dm5lPs#_KXomz(yh)&taF0pjm-7 zniH)Sqva{uK1ca7Na`EZHZNjlKAwZ}uCgWcRueq+cGiU!+vs;2<=p$)0R{A@USY1X z5gPZ$;#35Vc9urEVHsu219udetB=;0fAMWRC9zYz~5_NVhpnFnE}hY5NZQq>7cFWpt&cU zeQUmy!_<#PYQ6zK=Y{Q=OFL^k%4acf45!}&q)dtMw9Rt3RtWyO%Xx%PLolp}RJDiu z&q7z{n9XU^D6_qcfA>?{zW#!1+tCPDgyt+UYghvF%?Qd~LGsTb?OE`wBjfgd-dV@U z!&Xq$n{+%Q)rVe;YBgSV2&3+7wBc!Zdk|Y1ft{(wo3lvRzxxexzK2meW(~@T?+Nn< z=@-*yG@V+WN6Q24JX!Z4G}EKyS+U;GV60 zfZV`wGd`Zftb|8X&X)4RoA4ak%jl79gubQ%=yq<#CZ8BrWDA~B(*stA9>1b?cvHCV zjl41jt^g%UBI!2*Ssi53{my8#KM%C^zJt6_*1eS*DSay=xPGoO*o@j>+nvue+AYLf z@;Q1Oh}IW@nzuktTaIAW7seSGb96R<-V=i%RfIxcTn#X5L~U`-`3cV>4hdk)g^l)f z;xD|t0Se93Dse^!mSt^!U^abe&nbj)lw5@PBRUZeSgo3u*t57WUESYhYIt zD_a7Hjg0#Meq7<*E69l6uL0QTao|~kcD|=)TkekBC(zoh^rjcnR*N7%Il0BkA+@*h z)X1`Dp`)Dmh>x*?dZf;xnO|U)Z|1jnC(^p!L~YJ=D5<~HGl;jLFSqc&?{e~o%g@}{ z6Jx$c@FtN`4`R?s#Mrzf903GS6Z+p?{10!8$jgZ$ z`>*x>Jtpi%B-eYB%Q2#Ff_j(Hpfl*Gw>GJHMhlza9!q6bjd56gV>YTF^KF=q zY(~JR9WE@B>EKwA8>1AXl3ad~A?J$zS8MNoB z+z~aVbPjzqLf@NGwm0LibKDu&rRi|bn|~)`CCrV~m!6t4*YHNM%E*rKC##uBHl&AV zXm=X6D~?_-Qhy3klgaSK&2!-MhAsJOP+?6u5h^&M!%yxD99 zwu&5uXwygVrU1{KZ+mlvCs_0zg#HfwejSjP*t zbxc8P+A&u21Gdxf+7qj-u@Gy3E(#c($1DZg4%EotV?HXQSU!OBjo`Dnb@VHDK}&xH z_wRw&c#`EjZO_cd+&BJjWZ5bBA~ig*HVLV3kGwXbPBheQ3Z&+5x;d!-2Q7X{|Hh1Z zQm_MICHMtYGv~TH-=!&Cg?=|P zhiwZD&9zq-zD-6ZvXEJ4Hk+|to1op1zxmaRG1=1!Mw;&7-FIN{JUumnCuQNJx28E8 zGVhtQ?T+*{41FbsbhDAMalTLiY-e!Vmowjx_p8&mv!DZ*B~o+W=cSHp;wl z-sYSN4ZO$k53HuX3#INMu>1*$bmt)oh#S*pN7iJ!Xv32+DR9PHjXcBf6ng(NEmo(# z`!gMp^uEZq*)fbNt4jG#pzj+^6f}h3)b_DlSc+w833NtCVUfT~HjOy-zb$uPmZ2{|f(9*jU zjm7R1P;(

RL2to;x!d$d_80M| zx13%??V?<1x6uo{mA>wtobA$^nWanKb*|FePaivd*bi9O+gh(KX!;3_Tk2Osp8NpX zpBdls>^1Xxm3YW=yph_P)4gQg9!2LL;={h74drM;PXF9mP}HLErO9Paw(TLb;{|c% zhklje>__;O^U4l&oz>5us%bA8c$lZ9`SEXII>i0#o%9M0?E}xi%1&u*)tfQ3oSxeo z=9w=p)e_oH;Ineh?0Ix!Fp9o&FEhdi#Z8}k z6HdI0>WA_64wB8zP}$iwa@w1>NdGgtTqTyjf)6W$<33ga`7{sn9gJqtpX6f91n=kkMR^)@A04c(OgXxelkPTX>C4m6#yFQ1@YS?~$x6;D zn)ku%MSrTHE9WZr@mI!QWwr0VAU4vnoI&SvYTRig+K>DXgUeQRl;dLtt0x)xS>cv3 zhu@I%PqckGt?E0a_J#W)Ji|WTkAz0>8duWgKWWjckwzc-{xAz)N^eqnGFfC4rZz5j zqwVEM;(VHs-2IIrsKiPix!Z+}#S4`7!^#*+MZSCjd;KqU^kPrzwS77Dq8dD(FoN_S zYOjSG1NFnp(2|pf%RJlrhyTHUX~DG-?q-%gBPHEno~wWV?8U>>cPCE{FKi^e@_+wi z93VSl{CIC)mYVsAyjR*EF1pr|PL|QZMWD$4X9LM7xt%SIV1-@T+Wl;!eX);Y@p6(t`8qnw!^x!=HXA#~G=lQPW5yvZc7%k4Z z2I;@0EgQ@(meZb^L z@Ajs5kH)K~`2I~wzbwW}ym2Yp&%D};VB{p-$-YbHbJW!qHp2gm5r2vEndwSzp`8|` zs@l)uyB~y2KXqkw)CbzHSJ2)Hgw|^5<4JQKKlxgjFJFP~%&~u^m1~Bxv(=p#IP0vd zu-+rsQ}U~Og4#mq&q!b;yKCe#jL!AY7A3Fj1(ZFjy~`@py4s>GH0*j@Tm_$T+T3&T zqel@ftYH^NllvyM%z=G-wmpWA$T@iJ3h33 ztEwmaD_sfu9-fUt^JtRIob`L`Vg-oH{60-vnRUZW)snNpvbWZ6NGcF=vT=GE87Dc7 zwJ#K3%tLP{BmRW$TJ3bG_Svi=wwE+d* zEx|n&-Ph8U9VF2M7uVrUMp-K3)JWg$@pB+bPEp?>w4ot8t%k$T`CS(^-BB@Jtrt-UPoV;>u@#a5pF1^~E#C#sT`Gn+i{>F>EMe-!F^FSi0 zp`^Qj?p}>^TToRxt>Ya}zay>D>|;XFx62Cdc#UN2X2tT{LhG}-A~XHnMFu(XG;@Zh zf!$8s$+$ldm9g@Mc$4!vdV(8yZR0(fdMCZ*!L!XtYZXNR`O+X3jLu{MQF43pcZOb7duxIhwtd z+p4n-I6Ya+hbVvB@9gPzI_t^`*8wystAMMr=b7%d_U2~ts=_C28$TUZ+D~Q!%I4qy=q-;=v?*Jw2F%XLI?p{rIe#aWy+Ke50n@{8jhs4_fqF zm3oMV9bi;0YXEZk(OUfa5HDA|R|DS3R>`V>WLDIM#S(ZY%ReKn84t|)BeC_Tak7pQ zS(oyX_p{?rde)<`|X?k3jEjQP|2XigTp8uW|E zKf7Gbz>zQ1KTql81ALC2_Ii13)RR7LMoE9>?KX6&9-UaEObK33n6;!4j~3dUQRR$! zW-p~^LsrX-RPVcJtjlh)r&=rgZ4cAd;Lb!_vMv6M?soWDn}k}!bpULB1DhtK`35-2 z;K@EuS+Ug=jAvmvPTSRwSLvq38Asbkp?Wht%jvEeJN_Q7*>|9gci!}Fawon}B5Q?` zPNGO3N-!(9^od>8uKMtJDw9mvSKj1t5t)4&I`?nl7GSV z9pJypLN=qYI(h$GnXFguBTns!gRMov$Ef8ZkSciAg-mACyzA84fY&+`j>pp8%4)e( zi8eat61AgS z@qcy5;%)Nl$v&cut5KTPXFHiCFE8idWLM3;a?Q7kI@f{l5=d9Tt%Lff!6NH0GJf&^ z8vDD-E-|-yG8CTK8|^pjXEdl8#m`8@L{KuDpHs&k=jnchJEICbm(%2&{xlJc0X*P` zOJ^Lo1M{ zdb=Gd-Gtsw!txXkAv;H-%MI7`^55F+CDi5Pu#--xSXu z;=kWxnJsZ=3_exytF%@-`v@koh@IbtH=F3st9bB%{`Z^SNbce9)K(AwZ%1o(n(3?u z(F?WNjcgapGajFlyq2=(sWkLb(s=`iv)9<3=ssuj)&HTNQ0fmS^EDZ;nSahc$vJN& z>!`EW?oV)HDV`m~IzJL8WPbWy5=mBedNCF0@Hzk8>o|x_X78{P{5}1Q^xaRST|d)X z3KmPnJ8RT&0c**L0%c(4e)=I5{bldwy>RUfPe!;-HgNWX$SiB=lV1Mi>di^rwP;r- zw*M2p=^vt>Cg5j}<>ov^`k`Buy`6TZ_rDZZvv2eG zo}Lbm3$=;Ar$yN{IO9ES$UZ&DeYJGec&iF=IjLz48JO%6UIM)h}68IX-D2 zs)nc`8Jt-woqesh@GfP1-G87t`2t&cqRfJ9B(($lZ-9nwFn=1B!$e7$NzM7QWwi8T zUTKHBcfc%}N6FyMyzS}Aoq->R!X{bM$>8ZJx=9{wm(7jeccr{30GEC(HhXWxS&7@uW~+P01F>O8V2`nKj|%S<|Cz zBE9j`i}HtvZOTxV74F$>`4}*bOlZ;j@H1K2Uy*10$ov1q;XNZv_0Ulchxh1Esb5`i(^dS)Uf_LW7_u?&I@GGM!(Y9E{yZ%>%+h*VC z_wK`bzW2Pj7WZ{fPvpZYlHx>~yD!Y|g!k(O9T{)wK-cSgE8`E@t>7f`$;e4C$rw#e zM@p9d~=X}ZrekS{gUB(7B;l~KiI-_DK-#$!< zTH1icyyp@&G=Qgi%CCVmDt+jUV)A6iWsOQztzP!t`O4jI)YOtr^hHx?h0#6a{TeUT z4*qSFNH)=Q9O$D>IE$TCLsdox8;J~d6?&X(o*QxCul!d|O*z+_)5txm!7{dZ4V>qJ zzmmpfBr% zz75t5++R=njd`%_X_MKj86dobXIJ5A7hKxtJ99V(v)oTnwgK$@*+g{^5`$$AzKrpY zFWUcPa<(Fq>_nO~QHGQEg>>K~x^^ZhOh#Ax!d^03-y8?(sHrK+v&K0-^mcG&liKsV zYi4j4swX2qIcpxs%?du8W}x08FyQ`&RFGK zI+yH&%x|?8)m+MFWDcP1gzd8{v^Gh-yZSoA2h zCDu&;J?E*nXWut?mgu;R@)wgy_KzLo+4CfqJ)p|m%?{+hqCd%`ea-W;P@CC}j5RED zeYw(^om&9!dZhbH^zYO%O-9Q+ZA13|%le44xSzO_wR(qnBk}7B{EIFmw>zf}{S{A} zx_+%_1Aj_mCirB9=v;cSkt}j5(HbQafi~rhuZCUv@Dtowj-zwf=jVQ%Lr0G$@Z{wZa$FJ#sf48g~Yt^LI)ME25p8r>h`d)mG^YRqm&vDLT8>1h5wrB9*PLFF9nLI9XZA?grFE*oDzmS3=JMxwUY#5!7M|=r za$T$J6g-ZT$>c+$BBC}ra(7p*QA$jWm$ySmHl03Dn zYN;fCX`oypflK(SLHy2*B-VtEe}~TWrIPvbKHYqlb!SJr@-!qT54Fb=xl~&8te|-W zpH78$J7wp>u!|DeDKgmt$$hO~Jg39!E|SUV0a;rXea$$^BTApm6E=k3 z6xf_dud?E)kNS=!(={YK8&z2oIt`ef0@6@*~VG> zkSw8$m}R$*zv651!Lz?c<|qf!>*-?rp)l%%wk@o19Sk!1I)%m^uSb{BhIZ&*2(KZ0 zRL-ou1dRDCJ0nEN$;?Sa58-EaRA`1@IhQ(p`j0_RixaQE)brO0Pn13CMx$$@I?qMv zaVY9RigU$hBmEi#+W*Jaoq&BkrhUMVvG429ARaSfNDWPr7R?OfF{!L2Ef}g9rIhwW zXc#jpLt3=x$&$3Grd?$jHA;mxX|ttimx_uy-p}{<=Xu`uINswv?)(1#f4^%v_iH=P z^BPMY%V_s5RycVqPQ&kotj9`H8ejO_UYtw?8R;6+(f5r1{j`xh9|M(qAHCN2$r*L- zs*BCKS$prIVij%X9^>9-NV$`u61pTheY-X~lg0athZX(Lebk-cJqb?F=&u^;B?hD{ z`Y%xGIc?6+QUztQUvwUtbVjwFWHEUL6N$7A4QA^xSv;~M7axs5^saP%brs4kMEBfx z`T!Vv;6o?0Zcf&oFdNSK?1x%tV?;aNcsviMCX%LWQT{@57jM911+Pv|CKWpktI>3@ zv!8gO_X0yC=QO<dy{-kHg6Y%l%$DLMKQ?So5Vm|LMwM&kzT zac)hVg1^I!#H&G@-LGUs8KA`%l)MNH#%kj~@c0GE7=&N_;V^@A)IrVjaQQ=aZjW!X zXlhkft}fVf1KW5dFC}Zu%!1E?=MCuAq}Y=?Ru8Z0I}z{k&acRZK5I;S8Bc!dZ}#0c znSW)(?uhdDYd?`#-1A~*5i8=$Y;*bsmDVPq`3c-MeA8QDK40Ue4l_7?nnOYPNkZH5tOocHmR zh#l>$r)0ZKrh{afOWej-{n;;tSTXSy3k<{C9d! zKIc2sd6`^f?O7Fu@p@mcMi=GI@ig~2W)&N+?g9FZ$9K-;k5KZjKl~={InlT!58|QOh#9dkOt%`-t@1Kythc~!?;yXJ15iPgS|^`pX|ZEq4qZI#zXxhve}9p zJp=mehTKF-mMZK=}ohx`13{u3dbeTYP;AC8i56!qd!S;KgrTzMbW zIogQ+71=w2OxA#9^+KxmTGP%b*1X@7)a+dR9i4mP&OTV?Ubto`db>{tsqqTfcB4&!Ka*lBm&`B*rwgDd>`Rm_EqN5o7meUwEhTt7azbiXup^ie2o*y z`jvbPKWCj=!S*M5J(o34BzJrnUo`rxRQF{xe9jyt^QW>T;wcinxR@Cva<>P{)nhO3 zWgn~KOEPobuFbIpubxysyNt70&CU1|Pq}8sip;eRfW8OK$?c4ZkzLNxZ}v8KFlC+G zgH+y0)+@8qnWw}vt+Td|W7XT}FaC$0nhPw|b8=vRhMvjSxekXD0h}8Sd8n@xSw`$+Hh=2o?K>1D4r;>(=u$`9zJCkq(w+?=jH z3c5~O$_@AHNLzBqk5cOx_9}7Nb;!?mV4d&Nv&zInJ^LM5^W5ok_Ha7lOYW$Nm-{u^ zy3g#jEE$P)9;($#aO+JalPMrMyskr)kucZ@`-b|z-jg<@VhvlE+%y}(Hifs%L{F1f zHv0}6^bqg$ZRGJzkYw$44_Yr$CiB$g%4`Gu8*EB)PA@88{ZJ3HwU)Esu}1tv)o!2- zIpMEm)pr|8)FTHOn{!)CN4k@}s&MWiTAP`Bq7}|&2m5-S6@R>zH>r^vU$-gOo!s^& zGk25CtjDvu9}Am7WW61@zGZzHlY)_jq-MXOBbeiZ-%YvP5tmit=k#JPYwl?xUJzk6eQ&s@+&nZPI{tOk8 zt^9QGz6-P5>2@AGXM!h@H+}pjBK9j%vc~_Pkdxd&JAlTIQgSe=CJx=NYK60G+4f;xYR81jTmhc{~iyV2xVqp}&^e zk&XM!vg_&lX1K*GB>PIqMK~7rxkvC-^4Z8N_Y%;|CMk(!SOmYupw7v1X+P&-_2My< z)3BUCCpRv2= z8TUq^Nbb#gkrrpqDJOZQ9P?A~;3Dl0@@5MdCMwq!FOvJcJ_xRZRXcn+gI2cyWpBO3 zPvADNwFhZ^w0cOLhd^G1{)Jx2w^W&2wDx=>*-ea9O)$1)(RYxbd9c|7j>%d`W`V3m zvT}=`#9Vm9gFP#O->_meKr~6o`%x@+;Jt#L1HgAJ3-BcxR4d?}NP==hLvEyf2DUB9 zO7f{MLGS-23#;`MUxkZ}ip$kaL_+pr`>~zLoiRr%_pw+z^p;b-GteVi@EyB4gj~fk z4I*`wQU7Cn%4u>>^w>b&lRq-I3M9AYRvPp=UAv;dxvF;hd%FxI<;-KUV_g}YuBC5T zqh}1LU%CsIq>F2Yox#s6=8X)2?{!Z--|yyn52->`FJ!6^GCsoQ~uIc-hP$>p{J?AiPMT#HYl)ZNOqDE{X@Wo5z}=R_*Q4!+;CIHQ?8a0u8(ReG>}l;#t2h2{U`uo7{6(M$7q_v9 z2XH5O)F&45Ia=-brmj}1wr3U00zX6ZM4PlGx5}U?@jjcYrH>2BwfF2zI=D@*@8MDQ z5*B%vlaKjYn+TSE;9ty^jba<>=q-Lm*-QQgt1arE%bpJb)t%_^ zwi3DD`8Jq8gl@OOps#jD(#f~USbdT{26dNf?*;n-lR%sO*^9t9+~+;$ejQk*7zN`u zQ5lCCdzw4Y6LGXo*-^%lr(ibBZ&kIj7f{70kQ)VZOW9~tzJ_G7>=9-o0#EW&D15dX0)>`L<5KSr(xv*gKAGmI>BLhspZN;U6tms?{0 z=HhmJ_I4-^e}l@87On4PFLEmQ_LMWK-w;6rXG zZf$J&jBYLNoH2J zrG12+T1y0d*82H>yH#&;)@A*Z)mc_NiMlPVv$EdL8Fls&4$@0@8ip6^r_;!6PFrjL zP%^8h)G2upWR-qS@jWNES)*;BG4adKUPuMC;sJ4}`CvD#mD-5ELst0NHAx(M*4HD^0qZ{IXaw`X0o?reS4YyvLgV>bZ4}7>5ODD=hJQ__DCglHARee1Um>2bhzmAh8<5ap)Xcx?X>OGn3jtj^9u+yAfqPDMS8hoAK9BGJfUH zqvlOYgc|?vpPZgw@ApR^`BU{>n{1^_B^YPdAnW?&HqAk zE_-(y>y$fX&!_);(Yq^%CWB=>dovEK<@7WegfN1p8@V}FEmylI!QU;og<-^oJmpGmf)f0}D9K-Zkbu3|wF zH=H=t-K==#{X5Wej5hyL$Vg`8OHg7SDPPRGB?Eb42;1u|)V|o~rEK0%^Ox+6C(ltI zFbqM5WCqW@r;)WY_498MyPb_&2mgCuP|C4+x)LWDpT~MyPwf$8^*sH5qwUO=CZl8* zc6u%SNc32L&!^MRdL*PRT>j45<_xZ>`j7d|&DzP8oD5^>xvajDb$24z;zjv$d>ciJ zcN+6@m)N&3NaW#}=(Y*BYoXU`S}up0D?rvp+c|M;398&WvX5-%e0L7J_qdU%M$zAI zLHv@wzfmqZRC6Qn;G+NkQmQeS@3b~a+}CC5eh!jE8sAG2AH@IdU`;Ncz397NsUE0x zG^wgYCgP*t!Y;>9xb=tWQ^kxm{std{W&rpWsgoQvSs6YCp2R?u{2!9@JXuhh7-OpA zdM6ajp7=1lYRw|n(cWz1-dJ=$9c{~KFERLaeLfm(l7am@lpab0zVIYb;#JhooTxi~ zb|W!g;>IKe+*~Kip>1 z=T%1Y$BZyp=Y6K`KS3Dp)Q;M3fd}!Yy~+5s0p*t|m3@w^KIZ$DotH##meu#^#?!=| z4A5R#)cXWg&&0R-g`{_(tD8_G^7brfCcrbijrm=R`RaZUX5N2W zp@-9PF1KSx5;GScpoQFUnb~bacB7Qpcov?oSK=-Fe41RuPq?d^RmkOXP`#(ulTj<4 z9Gl2@?swjS#vQ@94^)|rWKCAXn|QqZ-rIHf-3!%5vJJ@_HqGaXW+Fqhn_aP-l-3|O ze}wzjsIV2Cp7uR!?2F*q-)i$Zl(}Dx@kX1)YAwg}c&OGO2YrpOxzQ$()bWAHJ=6EX zDzl{lXf&EG?bb_2`ZiddMyT?AVXan!XdTPE0G^3<{SyhQsK0n~pQoj}tm5Lu6)CJr ztNw-Guak!}yxYvyH8%QW6_pk6B`oIxPjcT?PHFGeOJdzir=@Ms;S+SOKpKX!K8vj6 zKDCyZ3hLMKYI+pomqDC|)Yo^cPpSap*$;@1%Vb+aZl$ocU7RBdy73T(xx_+)DQ~hx}`$4O~6m2a> z5<=@2NqO?b=XT9xiJn8RPNEA}z~D@ny-ByCF^S<#rp&~_ehq@$?yws-_ZJfP4w`2K zT4`2zZGmwj*pgQ!UTIz6@Hi?wgCes?S#~PE#pOSsz#nigyPNCbJ_FseQ@uq$$y1lH zC|U9w=`*LdE7Z?P_!L%gF6^RfPtoA){UrZJ@Eb|)AHe+qY*5w!Lwriy$omC+tF)R3 z&P1JcqY*1Xa5jAJHTO9Nce2_Z$u=dofAGnE#%^OycB2}SgJi834TJWyWT0|qsFC~I zb3fj(TKx*`zX$Epq%M2T$;)3B9aggrv6JzuxF5%RgX0%Udr*E!3;5m-q{P;{Cnk ze>3Wh*289!Fct^u!6?xw%e4|2y%@|Nk=LbUU_I#_3cfY^>4-8@N&XU$j|9zSB=`^H zAom1cPA=M!%m?(=M{CX5kNzlMhm=i+!8qS;rC-y`ajwLv#PDUGI%~3S=$_eDGBJMZ zTPb#}3mQ)(UCY2R16P_UksBi~@$5-*8C*-b16Q)=*@qke%Va&NsFj=XAbyIyK~oz0 zJE`9XbnV#Cc=RtK_ti-0Y7#VAxpmgV^T8E3#qJTB zR+NA1>vHt32a=m$Um2#klP8%zGD1!;drxdeZlIiuU&-Y2B-=6{?f-_Ft6-MQcF9t_ z4YuvAiI?MaymfM?zz`7rLa(x3+Z~w09t6ViWa&~m(2g8jOmeHhWgxh}(8K5Spp$y% zk)Ov=`W7;jy|qLKpN_8?b=zt$kv`{RIwgyg2(@|;+!SVR9K5WiX5mfPL`-N6=x8f?0j6kW}xt@G~@m3q8m2dqv z)c;%h|F_C2SPJ=cv%KqB;d!Sp^Wj_ktBMS%sdK81V_LjB|(NM@eu zbfvk{n{jxnzsaX647wGLeT(v@jIqSAc#eS?K`x&UA4T ziHLXHy=b)AxcxYe=A`RP6iU9PR`@#r7fN{v2ZH!jTge4Rm>sWPvfM7v zMq+Lj=zR<<<15{o4DHn7eiB#H>>tE?p^Y`p5#Y`F@gX!O?|xj7{`fy}0omQiYPRG{ zF+y9(KUn%b$B3RilAK&G_BT1&lKr5xgHhYJ>;2{@xdyUA4#f9s?Z;uLzQ-BqinDsw0q*@bSU z$nvN3ojsFybR_n^8tS}8l9tf4%RzKz!K<%~#Mv7Ojn1K=xvecZ9&+naPxbqNJQ?@D zf<+y2mED8~eU8UQJT(Gec6oAk`4E{nnngJZ?8#kl6AP3K4O>a~{}?S!(aPf_=x8)K zw7@O1;o*AEZcNVbA1_+`tC2r6${tinl0uhKU+FJDB|n9ud`i8Zu#T@o-ej++#P@vr z2dWK6>DPQtmWs$h{9(e)!;K_+y({fBmH0cDcHD%q%W^1Mox*bdhMs00C0Q%8 z0kUppP}7}0m$4Y(J1xtdz|%5MVgmg+tcaP4P@pl znqOb-%A~$Oom<0F{oUUSw9}jZO*H~{X7lE^Xht?Fdw7S^LCJ jc25!^nmB=(db>&ecnc)MQSN-5 zU?iGlJWM2EJM;QY>fJ_0e#(Ak#Lf*jxzQyV$dXelQTAKOO`uJdoN*w_$b2>JSb;7x z)#^yTMuRU|n7?7?s-RP27Ch%L|1r+IOwz`&JF#l(e}ium?N6p%$y%`6 zi1#TBb8fc@z29W5=aH?guy|gHNXgF&s`WO`WvzIxzsVCc&FbkGv>oa1HnY&?VB8F% z*4k+Ws%lytqy5Bo?DLb;-a-DpO2^C6^=$=R;$@Mw-+FWyNq1YDmm~^g6v_|x8()l> zFt2CqO&*8bSF^!bnf&w>Nk|)9&kab6^*7VF6VIxwQon}F4e+m_^^@^_rddWebH)u| zn#nTUV`NS)-S|Am)36%NX$XSeWM>{5K1%6eqgQfWj8wZZC`aOA zJW4t!@jC6=Mb4kp>zSaNs`X{4up4%5wHI%p2k6XMFdT?RtI#_u`$YC1OLnrelC1X& z@OzFv=M~)VOsCJ#XS^}u$D2%scba2OwYHsE%-S=D&+bWQP{g}GzA%$Xa&=M}Do@j1 z3%D(3?Os5w`Y_uF&pLE-8wtGJ`{^{}1rnZY_>18DI1On*my(C-B(!^-g-%RH#--d% zkh{tOISh2k zdG-Vy%YEsCwA0<+X4*K&>@ptYhoJKsTwcg>^hfW^*Ai*c#q%>kP*0uQTe%k9hk)^U z(*w2apE;k%4=aPyq2L;D>&tLfZZgg zAE`eNlr3R$KRj{=SnhD%t!yGUI^bF|q&0zWJd64$A9-(q<;qqDRa?q(=I6;0ZZhBrMKgrl#aa6DPs zfo?21EYngv<0lk$xSiJaDW9AKUy``oif{!vpQ_|e@|Y2NRKfQ|w*Cc`E5TzJ%JkEI zJ8*n&t{=aRN;G0E_`2asqVSrK<+-R_p_p03@9c0?-vsvk-fRNtTrk9Ue?0kk8|+<- z86}>LCt*1cj%W7s+SrEL$$F4Dw~vhlQ(^LeUf*DWv)|g!^PIp|C(UnzuDzd0Y77z|SQ#tuef2fjizw z3$@_QoIrOO(W^@nwmW&YNDEQxhwT<&o}7tJ|(i-bQ{{9Nj{Rtw+U)>^Dg^YpQCKfop%@d zk^TP#c=n-Eho}(`$jrKuLHt&lbfY!NPqekgh?Kpw_*`5Krr5nEB;q=j?Ko7vl{Cf2 zWQ((eZ^`^+ENU~-&=OQta3C3FR_b>YN<~6*n{{r8=?(U4QRf-3U8w(fKQ|+F*OTxU zd>W<2wrpx66nj_si}3$c^lpHbP0g0pve>iOrf={$o_Y_0b&xgDT=i$to7_a)(A%X( z^u>DnGs$WWzwDMIvbHli)FGEGNa2Z~9EWz*QD_Rdv)4HkPBYMS5SfUVWpYI(r_Y1n z7{&f9MT_JDj-Gvur&HL0T7DBhQVlL0Nn$th_A!bdARouTe5M&wZuFU~l?Q2EMVwiJ z>-F^*Khf;P*C=>=Hr$(#gtzf$KPn{iQhZgHY9VJ1@pQ|GpIZj!Ssnj^MQTQWZX*rZ zuPCh-|8M`{$M4B$kf*iv)ANT?xj8b9i=-B568Jb`1a%4xYScT9J0DDMfXoo{&;muYv}B*d|a$zvky`F`L_7pT8-Mu z#=juB9uCI88l<&^M|LWbiSU=CFTNmIXD3(2k8ND&Q!?>?<$sT2-JI-id0T>FHhW!$ zG##Se1F-#*Hp-*xqo|qPkj-kIkF!~&$6w=Y{LC)GZ1|vyPTKKV- zd=4Lv@Y|zkA^9Sw`IeZG_|f&mk0a3QX5(dUQWycFP_qXa`5POYUh^itONYZHdn>sy z;V3pHkuE1#dmKWq>abwhi~U-ASDN>)K>t6`yc^*$g$2l{JA$2xm9EJe)m0-tZ?`Br z3Z-*GUr&o2!1p)`KF$v2w%go&HiZRhNOJ}nT_)1kI&}9LZ8n0*dL=iK&zy{I1;@uE z-~~0}N8FjDRP!vpfbZ%zH{u=1ZY9gnUH;Bsi&xN%xv*_wJ(%pe2cjauCq7eVUTsjdfLBM&)Y!PnOvQ!oz|@W z{aVdw(Jx^3vGKipp%uCHpasfZ%hn&yHopwl#i-Gl#jflp@uOAAW((S}0{s&WFx9ue zuzk%ybPNnS8%5rRWAe9sh(_n*)O1v7X>_cnRClc==iN-yPW<=ttnI~CIX^GPx^3FK zNQ+Zxo&0Pd2gxx1TTtgz zIy01b$yCLa?&wyDHOQ@Txi|M+I8F!WcoMT-ix+`3`NJ|6T+0gQWV#al$cn!Q+cKn> z+2ls2yY=1}SK@E_0r;O+W-=R*IZ72=pUQIFK_1#GlL*8bN+wt9ICI`Vljj*|o72<# z!T6+>bLU8I9LNkkImf4HE&EW(K%a=kOW1=%yUzt_ePhZdcqLC};CssWGzlIZVV2qL zNV1*Vg3@1AwxY5YUWLzWmMr@(@ya-s#Yr4WymOPcVzN=Sm3r~RP7HMBOUWbB1gx*1 z@IDX^f=lKH+x3^Im3S&7_uK7c;PPU>Y6M;{0ZUmtuB!Gfy$ql^XXaEBgl+VgTY#(3x6S_NCWxGeuZ8#j1OM!NCO){Db`sS$NNdUZksN<-!}u_CI-R6F zj&pnIMrA#eZYLk*NqnppC_7!<_X^IRrvG8&ArZ6xK%YTsY(mT22C*6kF7uoAvb%FR zI#gnR;#twhn-wH4(J{HX`ZzMS2TupGMaeIF7)+l5(dA%mO!j8ubaFYw7w;y!H^a@x zwv&UZ;0aga7m?_r>^a2C>s6Fn!)j!;IIpk@$?UoS{V&10Xw<)~ZQ^TD1*{JkM>f-x zS)^m5a^;Px4Md&EvyYaa#8OZ5dZYW4p&}Q?$57o%+djktydEWc} zsFOXkL`C#tH$Q`ZxkQ;KGBtY>)1|SkannIuJvsHWHl<_2SMdQHH*^{h0|Be3140yKo<7H6`MdFbWFPe3QYx*N9(V(w^@O|OG9$n zpDcrq754T)c;*h9`6x4uJmw^GLP7OaXwn`Xa~tVmoSRIeccM!-WA!w8&>1YZ8!HBa zf}tutWwh-@qHCku0JKTY%x{%htoQ8iPsQ<4Tu;0a*X#Rf7UL*f zO?>AnEhUp@*6#6>OxBGi@TmsYMDMosbU%8RGiv3`;Q^oOdo~27htbt}DAj_7bk=vW zX*M8R7lA#VQ`s9#mdMp?;L$#1Px;sA_`Gp;8SAl+R-D0pwME%SU^^cFTZ1Y$nYKjB zc51$@#cxRAer-hJ=j!POy^hjb@>k`i+Dmb8QVx z=2R^#Y4+@wnA(N-xI3Eu52i%}Ks>h4o}ZAZMIcNLmSkoMG*8gmiKsZ0Y=1z)GGECl*KOdL zqxL@1oEs=}%hPE_g;MV6M3lVc%}vD^Ho)w5iX%~^QF??Df3g9{}=i`fnJ@>QKL`ishLa* zIT0!6Z+3-;7MA)X>zd~H@i#Qd4NEKad!N#?V0XSbbuD!7QP|sL;hezM{ukOrT5e$3 z7PD8Q^%cLuB?W(z&1yQSXk>o95xtXNB6|au)OpK@zDKK_(Xt;3WC!(bbjWUdPE)Fo z=2-2q>c_t&H%wlPwkyGxv2s3)+Imt~xu5B+D(>9_*SBdx#4O>USo z$-oK{G8G*&2TDxb4m~{T`Fr^Bh1PQ4?_5xKQGPg$=xaF2lX}Mdm!PD(&tA`c^QnN!kzKT@6-YD;jJxZEwX!&b$@ZJv!r$>;V*QhXJB-=_z; zUof{yBs${=t<}{_Ph;m_X-E73j%CeDdmjg(b9N_2fG^nwY8U+^182!cptiN*q26aj z8jpbdWMx|pg@-HkwN-3jioefoo+r0L>G%Kq3*rTGPEoVudywyU6yM{^ad5%;c$v)b zEbHL>9w1Z6cJNo0_G9gq(PM3YOFJ56e0tDt_CU&kdMLT5WwyQ_Oo`RV`>xu~eS3%5 zWhj;ZotgjfIm%Xj)S%ca>g@-2=GE!T+r5%it zS5NkgN}qn@V|vc56z3HG!-1Zt@G1@_R-~ur;cb47&|7JrqQrrR>Bk#CygQAB{Rvsl z&Qao#{_XFdV3a7HQ0^J?z!6%DT;$G($j=Fd3_k?FoVcI(1Dv(BGl4b_Mz`dg*^Z|} zX-FbZhSA4*o}AC-H)U_n$DfAubrntOU_@#{537^F(w%I%zov~nl4Nb#P8&0BHaF`? ztVAN<;_beWEoj5ybOC=(`rg6Y4^i|>@7Lkatssx*bS02irp31z<0`Tg&6N0uQ7e8O z`$@n9#@D*V>}LfF)r53RrRDLXy97nE3h%D{M5rg<<@BOtZ{n|O8P2#>1MHWP+sFJ}XVl2f>dT<5 zWxm}Rw2%5Ugk?!Kf<_?wdoezbVzJL>4<9F0ZCTP@pwGyj=;_4v#n1N40`hN+Y=c?u z#>Uwl#@D%OT&A@uZ0$gD_=vs}mpK7sBaJ5;^mjU(GFIoF;6Ku|wVub{a2$xHp~#gm z-l>oFbRwKw$PRAv?JjV(0NJg0nEW2eNSnD(ZkPL>9jj%O%6|VIV_)dm5{JKLf09!? zUOc}v{!H}08hBdZOmlEQX+|-VjwE~icW9EBlga3xeTtSOKE9|Y!)H85l5?rE@^|4; z_Cr1+$;pBL9{ZGxgrkip^YLj3eoaP)f%x$Xi+(m;byjIaFL@Cp`&!mBFDdtToSFjL z>q*b0+UX6x_#N*eMTrH77s^#|Sq0luNL%(%sunBduZ?MY&@CP{b!fmGEk&+3v)`LY z<6rT%fz}pkBk==?1kQbp^KfyQ5>3!1nV1ucun~n?qiSw{z0Qn0v%@yVt7JaPsr~`2 zwt`n*ST7+*EQzscJRNKAj9;=Pf$=wu$<2cLmj%gU|~DrY6M2d`SOWWOq?cn1B5mSh$9 zu(@UC+llXwM^19=Hq&FWCa%J-+>_Y>Zn1FL+bd6UvI458-FU4gDl0w_Zl&r0n5Jvi43X2-BfpW$mV z-DMBpPiU|a{o-4my@snnoD3xKM4k-Nd93EU`i<9q_HN3e>{k7jMXC7JB`5HDTJ#P`(x3IoBZ}xL?x*N$l12s-ZnNdnV>uF*Qd#abLjBCh1IU{Z8ofwj2 zN_bq)bBu30^)i}$en+p_0ZyjTQA!Mg?^hto-b-EUfQ2|y3-6m~BR2LO>jFW zT&+=kivNi(E3HD~kN+--%N?sN(R`238(|aOcoz4Q!QnqflIBK$+vg3J}0x8$6SZfowbo{K#|U@jn=cglZ-!YaVAo^&9j{OCcpn?f2ZQanY1vr zEpy@cde%epco95?9+9cGXf#r9cY|~!sN%^HJ9`uvsSmo=O4ThSA(1stg7Zo?XaETN zlEY+th>v@2*NGocDbr6aE$o1IJYQC!!ytO{8LrLO{$cu0#)sSkaH@Lu;@tr4OfIk* zhNBrJ&%>PqRv@__B0C^m;F2i(RV>b_Fq%SEt_S-s>H826BxA{!;6D;~Dl5OLkj_y! zdoJm}iFJ5~p5{iw_+MpbCb?wd6_&`U8Su@XVRNz=D;kfmajeE#Kk*NFP`N~sjrTTp z=^TzK(_o)n!`b@E9((d5wt>}DR_biB5U=2P5M`%iCYa7f#pQ7Aq~>-{s-i+C)OfI% z(=>wHM`-YnzHh|kxpXA)R4ZAW%A_f`Mdn`LoD0Oay&Zb4!0C>lj<@EGWFfgjl9A6>tp}_7C!<#zP;7(Cc9Jrm6f`4W z8Ho>|SQ{fqG6TOzniFx;P`z1TsAHacHBD`aFGJYI@V6qq{F9Zfqt+()C2PY}z4k-1 zj<~V|KN6|gT@M#%JDE}rm}@1vGV_Jb^X5WZQAO*=`~7Wl1HdJw5k#?LYMk-NLhA!Ifsm=p9c&+g;G1M!q2<~EmYSH~g8yX7`D&o zTB650l7{Ry9HNzEdzk6{U%Y)7)v`yCjGoC(`3R|aMu}tf9KXG)aF~e~PZYh4^SvPq zlJ_XP&2ON}M7+C(3@5Uu4Qd_)!ekcyhAb{%aV|iq&bWH4adnd?i&6S0zoU&Nqgamk z#B_FsmKzIl`$yuOSAw+_-cDfGHfb|{7TIr33~+62)={soQRry0`55iL7UXB)PEOaq z@i%k2H;aC=pYR&E8mRFFxN@5Jv9{)rkO$$i)S2KI-^y#@C7l0_&kc+egU~E~Rz1?hVNPNrVqC^7(}SeR4C2%ag#8-%@tetdO(EayXup z@%N3QO!j`Vb8>hAK~?gQ?}>IM3-ea@_;&tR-X^Y{~|$uLjMs3T*2|`A4-jaq%2)A{*Id$X-Kgg@(_Q!?c)tJjz%rC93KZ^vaILaDPibUG$i> z>ZiVEhwiuRM{ah>+xUFs|J7P;tyXfSyy5+y3oaybEqP+%OOY&dx6-%D#kcsJ{tpeS zLDzF)^|3kr3+!$($Tc<=Y|`IG+PsYgddg_|Dw{Er)qR(?Y@?k+>A};+!}xq9qfdO$ zVsmqgck)z!j^+5c=f>}xUS5aJKLypladNLu_xQOQN1oC0Oq95Y z?%hug=KA{p4eUtUUof-Sre-_+Rw4&>Y~T( zsG7Tmb6?IL|Ese4PZ{Ypuo20E*Ua4DGOaZM@1@|$O-iq`m%GVsZe&Xgbv?ai-m=~Q z(V$9p>&om+eBB$faa&P0nHcwKb2NICRd<}H_xPKyWxMbHt&Jg zyP!&Th0Y*PG<8PN%m?E6-5$4lz`3D5lS{0gb~83y0jhf1=x7w(N(Y9LuVtvvkM7oD zJ#re8bG_vtm|_&akyIoj(^lB8Q}7hBedf|BX z9Y&GZAvoL;Wu7xGXFnh()0feYj5B=-4xgjfVW^f{yc=sZRGmv^`@l2)zq!dXw~n7~ z_HAJNYAxeSMr<72Ac>yfsK=Fs)^_qbMa^74_fbg*ZU(ha1mrcaH{ zsrGx9yFH&H6%F;dMBnX{*$Dc}z_pgdB->(c)Y=Tb<>WRi@qd^PO)Tui1TvJpo@QvD zyc(5BO74}fjlRiMa-+7A1195dXLMSF_Fu6}x8uVs_;o|8uYErbEt0!;wBF{x@Ci6h z@hw@zbBkzl=C5Tt9`i1z_PtP`4+vuouEm8jwcLpWPtt01FkTD0!I3?eN;K`RLjE$d zN`{8d=|^W&x`b89>E|q5im&l9GPjSk9Kieg$@6<~SOdSzWuJukU$v9ntRqQN3)0vU zP506IOHp@)CzWX266JQVgqdr!@$D~W2iJoAFzx1k&RMKVb~v&E>&6l`*IPsMTdaO! z&#EXpLjBM2sH`FOsw%q7&uV*il;>>0L<0nx|H>w}M;AX~v>ZI@kki_FPXFj>1 zr9Mh;)n*fI$M5Ygc#~UYzeLB}Njp=kW7ST!x17Y5Wq%`ai$PTt)#INtPrDb>tlT;r zOSZy@lYO?_FjVrrU8kLo(6BjLtkCmW#^(4q*Cm~=fIfM}M;B6+JglAk^o4C%)}e~` zi(r@ebal2Rc^7wTDK>Q-uH%&KsBUFc>PwpHkhz?OCUeJUbU&wV$=h%eD;;m8p?Fl2q-4Kg z2AQf2&fITtpKq(l(Ouquiw5_zS`|s-9OdH8lkC4)`A^jAdtlG*`V#Hu)SwYdeH|;m z1(XeNId=109M4H$NA~h`wKvg{oWU+a&)oU@H=lCSum=}A;&x8yPDY2E8??~kY&aiG z?&A@*hGj}TOLo$0;CwPCt)g=kl*{Orv(H3$ysrLnFwQ-I&B0d{e-m|)U4rCChzG;~ z)+IMECC;ibSoe{!oIWSBc(N>QLcz~S+DaBVS*FIZX~VUb@vbgd?nk4^bYmsSyHDSL z^xIL}@4zQ_ZoW+SXVa{iMzCaMiiYJ*onc_@gofFhO_bhuTD)A3xj!(wF^Q(%huTkS zH+dFDp?SQ(k~=#4t;uURom^KcDBRZ=x&favg0}+cZ_p$o(bM41zV5gFPe;3{+W1i4 zgVbE*&9#Nx)HFKIL&;g*egpbss2ayQCQoxB&T`Z2OjLLmUdcbWug*v#dbGb2o>x$M z7Rk%FvlT6t7CI5n`B^Bi0}RO>@g6$l?waIG9t!8=nu`C_cW}NE-5)}Uo~StqrrmL; zG$Lf*sve6xNS~L2U>sY~1V0Cm%?5a#-L)BNmV?h{TD_E1Cz>+3wa%hD*W%eYmaIAq zmXf+1+K(qn{H{8IrI+@*o6jYOQSPY?M-pinZ=|ff@5GHn#^zqH%e5D+dK#}Yk1SAiao=C1cM3*d{(B-qC~fQwNXhn)~L)(FJ(E zNzIJ>8^Cn0|6|npnxrNNc4mNc*s5K4R1;j2wez-KtI>qER>D_+_ihyE1H$=eo85>m zK7~_d+34)+#TNaX1jGlfEvOQ4kUKtysyEQ{*vZ_laiw|56rYn}HL^Yyblr=VzJlu% zdRh@AnYrAG|G$PyqJthLzc+&{QMbu9JOlj`8+sB|1sukbU6|8o&a2bv~3f{ixm3dt} z6mlPMCzwYMyQr6#>b5j_4INu(}*m3>IoOtN+qnClw_a#!O=tocGVDtilK zNx>^P(j7#0I?qfa*|uNvthL`N)=R(FPhIrA2?R^E7>}pwWO)D%kHe+SG%7o!-Mxz@41#IS zY~%}r8oBIGr6IFSenopy} zS$J|UPOSE10BSchkLXJJ7LdL!%ASEAxnC&rg8qeWB`NX>$|l@Ngy>c{oNYe(KWs&EkTpPq_i(kAQ6hfoIRTi3GTAjt(O$szNV9f{}>aS;y^EuA5yG}b1P-?8k|F~PC>P-Q9mKIv4>d`zYeNy z;P}RqWDw0-`<3GV1tdQiGato^>^Kbf^a$@BQ6@1kS^36Sqq;J0k;ZDps`z+4{g_)M z?U!Zc(8H)4f63eo!v!3vnz0#Hj)b~H=KmNpOKHa?`zX1?C1(dC@ah~GXZItLTid(rsZ=jy zqYT|D?W#m_l8GZcNtyR)T(aZ_-xK}z^z3{3RNC80UZ3m&4=c)++WcOLQ`o{&je#>* zwCwdaMZ0ExlB+nmp$3zwdujJfwEPtQbxG+C&oVbm^xC&HtsLpE#-3HDVg1?U17s*s zVK=e!xlQk%Ecriu@2`)K*{*}x&fHd(D2~zeF=s$E>CGWNcf^&YDBOZxFW2@>Y+-+v zH@8KN_y1CSh%Zk(>fZ~V@q-VR? z@XTpXV-1sQyCLqr0oT6Z+2?r;wk5a1Z!%)F@;3JfX6L4<|2KG_GoAh*&B`b@P3*;i z*Gb7xHLGfKnlbftEoGi~fGtT@soayW2sP)jx4AF54+}iiOeUw|tJt*>B>8nUPXNP0 ze9Ap$$y4??%C;tJ>x(%?b3G1%OESQHUW}}(NJ&T3Xrr}0Y*RgS+wJ>C{7NKHJ3Dt} z(euyfeX@EB&>@~0&#|x%D3zR>_v3rw(}%&joccLQPUh7vEOcX>9}Ax`#?|`ZjPG~m zu4~XJCsNBu#~(@mh2VJ-412ZLQoa6qU5|qCz-gtITS1oi@Z6V{^>|LQau4GRpq^|l z{S~g~T>0Vx_r$zkT=4Io#`Uc@o|#cb;yR?fA*mTqV4u6M{|(!^=Ft_jSjt?HTw8DJ zC%0bb>~FK#z|r`gIp6bSGkXTd;nrr9OD?MIEKN@NDxmWLJUpK!jP*88x*C0Q7koTh z9*0ZTD&6U2@kFAflBp^0gJU3e~sMGF;TzO$xuVz zbC#L&oIj9_crormy#plSF6G~a_dPHu#X%>Iq`I14;z0|z=8SF(NysgH^HA?f67|0K z6^$ABX|}qYJq&T9^NZB5U;N}W>}Y?M`p?M)t{!F1c08lT zqGvKjU4mDOJW2kLZ^_J37AH3WR3W!_d6pHzV57!%?FaJ>_&f$Ii5JMaKi>FLl-Xi# z7Ju;WaEpaMl6{;9#^lcV0QS2{-Uim;Z7Y&{!F3S{s1KXFNnOrP#-Tyh$H`YW(M;<> zoXQSseG-$C$HnBWig(G8+6U!^ds0i!wLmiybUABpj8^ec`iItY3e#Tsdd^0(CYykx z4T`x*V(|}Uy%wNM_9g53pV*vutS3&YDT?M){vwv+Ui@r}&coQm_$K7sI+6IvVY$es z=IF2o%)8(>L0`eC9De1_*Po)!CR92N64^Hm`Q?P>dM`frR@S;w~5_iEBU$*2-vggx}@3ObRzH@Pu4J`U4xIOn#> zhy66E%WXT~{BSOqJTv22|7zrTno?PbEWxQ*qn0Q<67Mq~PHx3!J}sd!ON`6Ox;q6t zO;Nmqav6Qv8Q02dzcFk1DVo+m=f?2PXmq@hqXWKeqREMJ&rE$53`UtfuY&DbJX?uJ zqqH8q{t#AQl8wHou~SP|keS>Jv6dwKQ*Rpz2%f_6l#NI7CeXApc23dr$YS1_ReL8; zB^ysmKci9mS8!Q?pU>*8Jm|mBdv2AfLfTU=`=ob*t+8)S*ylv}jiJBEeer9M^<_^& zm)Z3DBBiI`z~4OY2i6nWkSkC)d3c+^JvoV&;=-}C?ju?kkA+ouRK=cQGX2azr9?U8 z{_eC9p5Lb52kt2x zM#&v4QL=rnz~l182tUsE9cDN8pS=PW}o9-I=KeT7ohrRpK?+hpRXk} z@bRa_mx#|M)uN4OB z3mjsHCWB@LEBZ6K77xRfpj@T@@6kT@e|(F6@z2Uhz+$tvE3|xxwi1o{DLI^7%rlyj z*5^s<r_C{r^!Z@p=Z8)(~U%l||a!%{5j|4`?UjU*a^^JK-Jd^T~y^&7R5cwD<+dPLBVs zICldsH#Kk1`NBvu`~~FlB=z6Z(wl7HeC?dam^{7(j>H1j_P;OMPC@%} zFq}quGV{FHv+v;_57pnJOnG?Dg@1P9vhQ6>e`~?Mh;}q(G0LOUWw88I>G--&p(F8+ ztYrkrT>$48Sz?R+YIc@LrB6xLIxt;Jf9j&}8`klo!JllziFfa2jaZKbdJA`x2{Y@; zM&OxGj@m2nOE}$PJ{MnzWGlF(7_C>KcP}Lq1(dqknHdL~)^v0&YZgzxMtC~ky!B&P z3^J=|iidSsrfsN|TWwe3^_#|!$#{|Y?^fD4h0G4pV?|iJL{ghuDdx83#5KO`Zv{^W zpkwyJ9@Fx(=(LGteV(KyUbzlFbt9?uwf7#rC9b_8PVDkKM$PO3HdSV8!Moe(+$vgG zO{r*BXO#G!<|Mn{B_#R|`xLp|BRMwyNi#a3!3Z+)2mfbUH(hVsKL`DuDpt6s`ZmF* z>>Fh7Rh~d&$bbeg0d~W?Ic&;c>pj8|8T4rum=z4*45DmiVCjxYyH`Mn2{F??8JL zX$hvE;==Lz&W_N%YJ9{32i~-uHcMGxuh-&_nJdzFPk6TW-i;(i^xB@(!~N{)Y-n^4xDV2L;$BkzYr2!17KhRI=V1FTp7ktfFgU^Tj|Fn6W9^;F07V1p) zxx}NNkiBQgT<*sC1Vp=higbj!zq786biC%tM@5Yj=)u^+W@M!}z^8ra{3VU~kfbHs zTs3-nlIO{3ojm97v6k1-gshR1q3;jM^rcl7>2D|vs#Hk+t*pv_;5&!yPA2bf=*J3p zFGj&+3q9HU-0J>0NaI=l&tjdITdcDVe-hs_SI_C=W9;ukEL>M5=aI!<(fG<*yORu8 zQnwN5D36PFc6>_)qNZ9njTA2QTi9as{zzNf^d29JEx0;F>z|XHzYdwd+0;XoqvpVe~iJI&2|iTAmYPF6C5$*#|S z5Zy>_&nzT0CuzB3J`$)@|9R?j!PJ_gj| ztg(OZU2e_IO*=zLRq&`vE*7wRAE|$lQ6#xQGHUc=sk0(l3AzE=sYq6K<7R7Z^)X_U zQ*$jCk{PU_=PN;TjQW$*8i~ruquE{Gx!pDq|CtG{(ARUc=@OdJp^*0MLaYMyRItS- zU?F`?PQ}|m*#Txt(K0(H9gJ3uP_PO<wDMI{K zx`TF=xzL@c*Q4O-MUmF4rH?zb^dt z`1@znNp`bm%{+Re;1wXwYV#GXCV$4a@aPKDsxP1*%J~E|0?_Sl@k3y@O(j)n?QX!E13O|mgt)7f~UbV z(KTh+gTxEJg_>1R;XhzYhSeJ6`+c(4t&o@anQYQiY+qs@y1{UW_Hx=Bj0R~rZ|gWY zNiNwfXg>oNlUXNcu8YCfR2%VZi;qBm^MxUFH*prz$<8peJKorI0Bn1TIm%%CYXzS9 z;7J@`^6$o%`?=zjw!J>DheOT}lL=rg13 zeFr?vy7w6{ey@%7WH|RWjltW*GjGt!LVRzb^;qwDcz-u43>O@jgz9&gu zRrt>5tNZWI^m*$k4^;le<)}W9wf>%jC6A zPnWX`@sM7qx06w0tvUG;nDnPVSwlSwkE~4Ef$&S3@HrThx34d35+O987)Qrz<#X*$ z2j!{WB?e?CIIF?!Vmi4(oqC{tizTVxTkgJHMefV%f2Y2$2g@QL-A&{5j*Ga@qntxo*YBBhx(scct(se`bh+0 zK2J59e;R+gD;=vj6)&2g$m=ZayPno%XUBLNFRf&r&8hcV??3f4Hz-d<%^BqB=kRNa zTGfnwlR!6)rP_uH>p^uVEaw)oeHK}bFI_is+mR+OK#@goi08us7Uwv!zY?yi3ko$w zneMbQC(6nFo2+4v(!WHgbk<}1l-iNOFO_-7ytf^`O@&K5AYvh}1w|`xbwRbRu-r(> zyTLOzi)4qax4C9~*9W84SzxHIW@j4nOZ2=|iK?V%1wLHFW*vn?P4QtbOqzTCuVMsD zTwD0Rwb1a)gvaCKC*Vt*$^)!cGErt-zY?{QJ^3P-9EKkmv#vzHs^F`l&*Yt5gu3w; zy}Yoz8Oi>sW*;<5CX^w#*OgsPwANA*d7)?L8DpNsmr*QS?ynvIlH`9M3c{YCh!a{OW9xgl>9f@VHo7uTV+cNZ!or}8-8J|g?c z;!uI)4~1{6PWIw+Q_B{!+wA-_)LOER_eSB0;GDwVyi?3++S+T1FYC{|PXu~-n64l_ zw`spA${pl;Jaih-|A*9_L#A>{@TfXn$kOR-Maoqn-%Dvm_9M%Y!EK;_imv_wH9FDA z_@dRvox8y`8x<4nmH72o$Ye)PBx*7oUm;Q3Jjo4Hd(a{o?|Yc>&Lm+Uk>r=bmgt+S z{XY?vo`dsDC9A8s7~T#1#3OAgO8rGYtI>CY(dB6E=RS|@ki39H+0Ph@-&tKgV-%|m z;tj0KMEwpVU){_)W@#%vX)6mmnJDQ%pPVSI@T`j7m(ZE%T3D}CCtRz5vY}%Wedb>4 zzrbsN+R6L$D$X58FMFWx04=wqX%Eyn(ukfBd>@I5%v2&*uPeU+Pp$x0PM&gG)fn>7 zP79f*<*tT{K%RR<5_R!^zgaCOH%Rsa4n?6yiv5TS=>Go{rIWj(RQ@Bh$-c$kNKCJS zK70Q6U1BPZRr^9X#m8p^UZ=&6Q1N;;;6FHgFuj=JdDh)Wpz$Y0p9j>)uGPQvR`L9)(FFa5-iBR9aDi;vIX)GKh@f)+i|ex%RmumEoqe(Ge(-oviF79jvXMJ{{q=Yr)4;@V9iMaqjjly$b}9g@ee@&F~7( z%CNO>_*;e@-9?5{^8bEH{R~&~6S~{8sC^~)&MDwd3uVZBy#M}+B0bp3RxH+J{7MFm z6ScXMHhkuLYjV|28#Pd-DVf}kZlloY_qbA?Ep39fEsV5@gkQ{7+^?@HM)VOV+6dn( z(D}+NV@BAuWPB<8|IXXoV|GBF*$Wwf51aHg67GkQpenRJz96|RZ#irDHO|D_bAV5o zZ{}URF0NqxUe-(E%$6$mjHik1-$af+B*o_&K@(4r{9Qi-`wi^ba?fvd%KEAP+puJ# z*wC!}b3S$gZMs3<9ku-e`>_C3l3n6&W-i~dxH(nK>F*qxmJAS?v+URQU1TKlfo`xm zm)yi#{ARVw(vSy@wwW_*QDUZ_SJ|j}q-m@2Em`LBtZmNT8+clijvmP(CGY?1^dK1} zm!f`h9ArOy2COdfw5cbRwEt%kSqFvJ`#h!CbBe7S=QrNDGe8;tfQ)$qJv~jyufZNq zjzlFa#?@p3o#u02+&zE-tw}|8;opK=GMy%COjfM7fPZH(Pu^q&Gn9_iS2t1SH!A-N zdh!hUt%{nZ7^b^GFjX(v2^wyu)sb9GBsI%Hm37sVT95y8rDC;P!E7>nX?@Z2C2&oG6#oVhHFQs?}=eF6biQ}ig2^FT|!Du+V zOUqk>eV(5Cp;m4R$mm>ym5dZ#j?yRE!7Fbx$j-??@|1N!V!7LE`vK)U!*ZxM$-6q8 z9PMYvukj{%T&wx*=65S<_EKUY`@F>e+*H3BJriNm6ogyUxQ<+>?X0z{lKM)dB_-lH zJXk*`;oM#_e<;YBdv-JkW_bQGst(78^&}>d^K(GDgPi1UiR5YQ?0Z(rIX_Mgy=1oQ zOFR0L(7sCL#`&G*|964y7Sy>--S}laNZPg-i~q^uT&LZajUuO`Ltm!t>sW{VM)Y@#gNb|_Q%KEt^hp-m zo3#It{t{2}C|ow--LYEy7yjf9)a*sxg_6l*8XhEvLk%?FUC3lS*OqJJKI^Hb1*OX3 zWOh*&;7@;gvY4i?Fy@4-iCnA2R^F-g*>v(n(40y;?<8YuQ2Iu+%c)`;7-x0+YxZ)2 zQQ{KRT*`7}Z5MBo+*s6{d_?owkjMD!HPqklwO!8~|57k_P^h9a^-FMU99~|I+wu8279W~xEglGo&y0so{N=LC{3j46vLNwAleN&5ykza2-1^B0 zSx3*wzLUM!M8Cy@EnXnG`E)w%98dFSfibswCU;cq)MPWNYw7eHrQ`E=u~swl8-ji_ zNW^+NoV#e^<6c>b+;n_8ZO^?QPvG$ilxWD>ha$f;8qGK2_JT)tpX;$*$^Ac%j^*}~ zsc5mUu=eX{#S&H}Ypv&vINATJfEJ7KB{w?k)Z+qpeW!=Shh@Ez_NMc3Fdsg=a_g=i%tv zp56}fp?d0r?=77L+@r1B-I4nt5;^iciO-2ag~G5jnmKhi z)%Rq+8IM}OhHpG8xS&tE->MI2o^y}4~K|5mM>t$jn6a}Ksc{SjcRsJB>=#Uyl^`jN)$uzju60^`90 zX36(>QlS{n&SuXWdehrDyAJek`Mn<(5<6GFu#ijXZSrkJug8J-9{nuWesV;_)2u1} zCzo+^m}X2$wyZ=`UQX6;VY{Xq2e#3Z%yHrmmD`io`+bg`nWa`m6pi0n=6ahwUqKQp zkkw?_$W5#@(WgHN%P27n44>1s^+w0Ukaq>ifI?2j8;6qvt_GTYPO5q-ACIAG%6^3J zQ?#Afu zf*+`N4*70{E~!zb&TB^W9t{jA>a_<;M7lEJE@+j)h4t<6utOvtAofKslT>%FP%J&@Fo;xx3`=Y{n?E zv(k*CHJxaqokvl4Cdy1F9sS_`06cz60-$>b&w_h8ZgFFq!Dt>3}D@u)G% z%w!U-&c?&J=0Y3jQYSX#RQ%k}3U4Aq-N3$Cojy4Gp-;WpqxbcF9muj~Jq%yU!#AEy z@d>!VZz3L&P4hlBA$jZmZw*=cbPz0RvtL`_nJ1T#i9|pA)ta;v4U$`Ak2CVtW@V2> zm#lD)QsxYFDdkUXzye%vWt5r9zlt^JL@UreYHiT(?Z&5M#Qjw1tp&BSx-H#I`wuJK z+}@ELfB&0(muEMCcoSJlevflKy-@9*Xz(x%>xr|ck?Z8XC{f}o^n2rn&s+5P$q%2)pnLM1 z+^GK1EKtAVM0PQ{9#H-(rA~ldO`m^V)H)tKxsNy0xCS*2XHU{YEz)}eNy|>gYV@j) z8rQQw=_^@Vb5rlD{-*5lw7;kSr>I*?%eB4xiuT;$=~19a)K0uRhWkAWhKG=?TS;c} z)W*N7wx6_IH=wLbs~MT$5qM7E?J~2(S+=lhe{RL>V}0i%2*Mr z@icsAlm4mfSk5Z{!M@%^&c9?y;vYwv zBSY!Kc}BA2bXd;X525!PS&WI=O76v8D8H4a-fI-d&2-7N-=LUDjbN1$rS}0#d<+@L zogJsEmt0}z)A;pfo==j;WCr@uv&>aagxSq3`v1e&oyUDS=KsSFV;}p{WQj3oXq%WQ zl^A1bQkg>1HY6c!(oV8W%uvl&lvHXgC0RyI>(yd}s1(Z5E=kfVDn%Z@=j*)2cjo@x z_aFD;dR*7_`JCr@9LIYu)|-$P#ZU-aI51%Fobe6B+wLPdl{R#crLX zWe?oU*=o+`Q{gVLOKVYhx>;!$I&TD1tWVB~uCt~{-oPF9Tj#S*bI~nTNY17KBjG)c zTs*7)KlDl*iW&AZ3*4RAaWy8X%jg`R!~ z$9tetd|{TsEcFAE^>8R%y3GCMXtD{c*#kC&&y|G)l%B4hW*w6V_N<8W{yVu`x9BCO z&h<%7_A>DS&iklj=E$D9Ejrw2l;+x9fY*m>TMuVu>wg?xXTLIsG$o?t1@!I$l9uMD z#GdEW<64+x_g?~w`~1y0TRcivk+N?<9v}Ba(Il>*kGM7p_gy{>G5J(SD8%SW#oG9Z8dJ*=WHOcH<63dDtM9B z_3q@YSMoPz#nn>V!%(*e8eLHT2{U@~z2)5PR#f>GMb99ApZbB-7K*#DQZ$+OV)A66JO)^vWbOM>(Q|LuA@WbFy`=#LJxglxF?i z1cu4*I2Z@Zklh)m7*G0SP8#W%@ARB_{_I!A(&3Ky&>!8C?P03Ecd{R$aaFVo-%`~# zGL~vR8^GL!ejJ928{oa(^)`NLpxleJY?;v(uoS1FZQd0o!g__#nxo|u+^=HP8ooV7 zBJ)PD1H9Uz+&FXaG+33sove#Lc{`N~zo}h$h^HIT&_{e8uE#~BC3}!R>R}ve=ly&9 zz~ldrS`NvN(bs-%8cnRRpb%b79!#-XtRjf|(_9uQK?eusD-RX@=@%>8u;PLvs83iX77Ux(k zpT)hZX1(O{Z^g3x6aDJ3c>O@M2?kHPUm0$REc-i4u@ugEgBCA=dD?6RS3J9tKWQnf zcahYqQDvl_8q$wGs5-!iuWP>)e9!6YIJE5OPHK3yE~IZhe4ZjfsWcKlz`Dk4Q(P$r z(p6R!FBUTRHT;szzb*R^uI7z#>ILi%jv=(HrnN|F886a)3tJa|_Ou^c$XZT-Cl~Y9 zKv#MgJ6hAmv-68RMe-$dB8{0hJCdt*G=D5?F30t+wM;b9SgXMJ_#}=j`C)V3`U$%d zjm`OCXq`y@HY6^&D4U~uJR)|}xe4yf@m$VTs*#|MEWl00XkTf$l-#@xpG3W;mto*L z4fS%49nZ_;80g4uZ_(4&`pND$=YFZ8UJERnX~#+6NKNReq~dZCd_LYCV*9Cf+iVDd!bca3%vQOW5qCd!! zrFSWsB%&%6W8=GZu5Z1`TK4d%!<=)%oM)w)?oxeJFFa7EvODz){q1PhPOj7P`rV9E z@yjaB7GEBvx>}2!K`R!xmb|E`0JqRc9ndjQ-;VFE>-7iJ-iVR|Narp%rmoLje(JzI z*?F=j`#DJd0gsN_S2yZysF-@(E%lpdqST|wdafL5u0q|;_|e{sb&Vbl18-I)AJU6Q zNZ@K$HtMUYXUl5a77w$B97v11;#^x6GSPmCTq)7^K(>7szHMV&=9AEt#!ci=Wl!h) zIUY61DxApEn#TAu>$->1QN*WWlXO8!to8)Z6GtZeAjpUC3F^xwes>%(Th< z6hFaK*HGVJjuAJNb?;Gw}3I3)Z)XFIq*fFCb9Yb^qVNfNK_Le=Z(cCR7(WnS~B_( zOBBlHy?@SwQ*HJ@ni0ReHC9=V;7@n!kwlD@rMG{Fr!cKd@6{w zBt?};_*#5_4|h|?uQ^fcasUZ>pVSP+mnz`OX-Q(CQYTv`IVL7(Jqr}EJ<0g}F59>TpEtuG5rxz6A~`v>8@s-~@-`yx$g^TN%MK-L zs5kVISk7d@-eLy$5f@&9`DOZk83)!`NymGqA{@6Ca+(O6K4#q`Lbb^xY9`yS!7~|S<67?L(O^jvECKGL$ zbBE+>p5&~f1DkR^DR_a+o}zWK2~;rC9c(rHkRA1N*2kx!bWgjJ(p}l5T4vAkF}=+m zYk&HYNQkBS`yFhL(0|@7=M7%gyX^~(T#Rzpn{`^+yDh`{>5%C!ldoQC`6PD&*}j^Y7oBD_m&Y_yoj@ zD*KkX=y8kh2kZBEP%ZWUQ+?#UeBKr(HvRysgNF353~Qegnj`6K8NKd?%>nE{vbwBv zr4=iac$LyqoA}=JHC8I$TtySMxN{MHCiiY*kiF-*!;C$d`7kC|lXF2J42L*B>ajB;|<7DJ3-Uhc7v?|TYnp23w?AMoqsunyGuk(OwdkUK8 zt?5AD5=(N5R;jsjMA3RA+tC69pL)JDW6Hmb9dC=A2Q2bAUMPu)e9V63*15Q;Ce6M%HpF5f71Y^8*$#u|6x{^IMcp1WYRR9l+in%Sv4V>zrbh z{`_}-lw3)U*Z)2jDaGTbr*r31y0-+*p+apOERU**tO@NC1ydH}d2S~@ruTgd$@r;< zt37$KXZPS-Kdo~@GXpnD?;l$5`B3^gnf=Rdd#u&hF1l2m)yj;N8Ge}6(=rtK1)3%M zW-6QHY+yKvcoDD4vvDOT@lLVoxQ?c0jXblkFv&cdH&4m$oEU@N?kr{v;_q;?d84^r zR=Ga}7qfr7$x7lTIA^Ujn0;#NdvaAgZluI1T!;QyZB}MMz92uzKfTfj=___8Z!fbW zPZg{)wRpp-XBeE?(5YnCe-dVo6z3L|t=gySJC(bxApJF9I>x%EEzA=4Q4OSxS?dYL zIGQ|P50ktRScqd4=~5%qxf>7W`xYPdM5LygQR>jlGwwVzYY56lAh za-IqtsXW$(#b`>Bdywr*ixq8l9od5<2Jc%IdjNVRUsvK8eqr3(^thEq{8fLWjMfXL z$*L4T$ zzA^EjnooW@8Fv7VCBN?5=A)cUZO|_3k)^n?L@zgxfb3PefprKJcv`XrY?GY*cf|Yk*sZZ`8@{Yjr8;iPTY+S@oY)%<~Hblm+@{V9jX5_7wpNF z&=NI!>+53@&=L-pY1xQfSpbvdXQ>T~v51 zC*fKhJCsN0#Z*^PZ()j0528eCl$dB;oinPMIF`&WwS2x2{o-j*mUZgt)75k{IRU;b z?CSzH=}4ok_qzn`;@upN?8^8Tnu^=mRbfW$n8dvEzoo`Tc?N*<)_0!!GS!@;;7 zyhHK72i;o-hQatS?i3yt{7S!hnc67P{3iNlPZxVo$?lT-CYyt@)) zqxAYcc}aBKTu-Oo_z$2z3|tSQdU6CN&tu*-4>Q^qxR+C)#7ZQ0Um_hZ!0~uUzX{_p zxIJ9UWJ4GMo}sMox2_EV(?T3h97wWWO~L!b;^g#Xse9e=Be zj?YK~Epn=s-A^wvm5eys;Mkt!?m*Vpx;qz6$s1e;v?tt+}Wvi*TuNSHgQkp@)_9)tO|y59(yq>O@OY_2M=1 zl$z$LzY~w{#I{vqVb;^5o_KO8o~MdpHI&UxEb&0=a6GlPlN%>d9%DU|-EpGIQc1Ok zCqB@7H#%8Sn^v%zz~T%pR)VwfGxeF*`Art}F(k1HN~GG>FI>IH_2kTnhfgbgZ^MIB zM-R7iHu)8*pN}p}{MMpNIkP+oBv0W%Ef(bqeA@+{uW>h#Nb$+fiS+eox=P=jtcza9 zvvy|l(i}PK!1t;VGiuHx{sFG!Ce5kujKUhUHR?i~iFb9f*VTsSdNA!mpTr!@z>m$^ z%)qt8oy1;k-9fTQPgj%Iov8gVsco#?A~SmKB$GkTpx(lduTilzd-`g@$L~=; zIha$)uNjNf$XU&L7P%%VC?UL@idsS@6B8P(p+MV$V=p` zp;br)yo~qMBsP4kR_l$tj2y&o<_P@`C!gQa*s-AcQqRfL+EPDx552kIZ0zAc`qkCS zFWIl(&>}P4SlZbUWy7yWeZIu!E;xP?8ZR>12+}hXSFh9CW_WBODUV{ja7azt_W;R)Ul8Cu65h^-XO2lW0G~ zxK|c)WuEOq|IXCg^(13FxDydFj-5<4t|8X9o}7u}x*5*V;Z!xAhziT- z)MywirOByGv6|KWt~fCnh5rq$2ilqWm*V17xF&~SC2h9SzTc6*0?4$30 z7S`+~@E)w~T=J33ie=I2P%uqI@#>yAngsSi@!{kyYpc|+Ej=TT@7L!j`h(|YqRR)K zxY!80$=hgsF7k9)649BQ&BXWdICVXXI^BAzx92;eK^K%AMmlz(R4p>{Jn5S3-g_up zmQ*)JrDRcmo$P17lsJLTsGPhUpO`_)pjPU3#Ft-O^-pq%B>LqhZAXLXYV_*IZr_74@!+U~1N+dN z+U~^v;b64Bo9vXq#~%ucmVwu+q;nsd{7YCStLi3e)zG9b+H4}B*<&9I_HAhQ8pzwg z=+|g_2>d^HFL_53Yn(HJGI0Al8pQvkK_R(0sW<`UTcA=2JO+dI1@iVlakfwaSNi?? zlxZKmCl;+d%GcKSueCoOO{VD~J{w&@vme`$Xzf(_ITdYcYkOZI?FW#9{lK^n*^7ru z8JPa9X!%>crmja~UPc-to+y)x`-xi#-A-he%Fy#|pbkElcxp7sdk-~o8r8$KRH;40 z&wZYF-v8v{h&M**xkJ7kS$xi?x?s=sR0&R&)5MG99OPblbrs%)+ef;3vEC0Mi^(}s znvwRtg3skiQ>qh{@hwoKbtE!Bqy47t@RZ`oL|K*ge>g2q49z~p6S?+#&!mD@-cAlB z35m)4hW$u9_GfhPEcapo*O>!TXLAY4Wd${oUJN!PB(^GhfXUAAN6_Y;q-O}tPqnYV zc=l$pF@U8?9Lz8hS5mA*E@0>DIWJscUbx8^&ywFnM*q<$i?!d%s$|bJ7j$>x;sx$r z$I|80y_1!}_j*s}t;Tr&bJn*T%lCU4*v+~svHvA>unEaOqp&5}PxhkmAA`FZ+q;SW zCvtF}-+^ZI%>ECVe-h17-kotQ$>&xLv&=HJtS0`9^NDbI(w&a*Y;H}Q%xZZfF&}L0 zP-UAdiDo_3=Q-xbcTuk#O=)MR%ejjyu261%26U^B)!iHxq$b$1N{tuxk0>6P?v~{wQy|KQ+z_f8z}tznBME$)S8B+UYNKN}qt$J67Evfwq^=?a0bhdVCw+y<&{S z^6alw&Q!8LdkDXCb`qY-Ev37fXJykqr_dZjz2w%)lyS-D^{`#8h%q)kMlva(bLI|*ow6o zfp6d7%#~)3Ka$AT(enYEZbiO+0p@#Py~XoA$xkYsCYm}q8hVn|xn_@cdftxC@q)-3 z;K63Eo_O3AgujPbeI0IXay?mfs)9FpJLn|Lwdj*G)rQVi&SV$vL7&7}%_Rvx=}zJU%ChB&Oqgb*ZlJ!#9KHhG ze)Q}@JWZzj%BVCEPP2@ZT(d1mK;9RPgGGO1)HC{{wCW>I=1p~c0v{wVpMfym$yq-p zA|xZ!!Oc{BOof6LD3e@&Z_&f+Q1&LYjK5dbpozR#1m<2adfAopj95YwHlxkmIFK5H zhZ-@JcRR2fIh79G^GvYEX}V=A#MropN15R1H@ zaZj;I>yPVSc`7z(i_zmf)`R9eXoSQtWmcH(iNsY6MU_|lZ$`40n;Uzv@73_96^ifG z>dQjc_xit+W+neda*~wB%+GgXbOUNl22BY$E$`DgdaVoJc4W39I>np5BDlJvSMtcd zMXoD@Z3GBbvLdJBXks?<&VCAxRD@G$KFG{6J?Q94P^bD)YO_q!|4y`N{9A{w6Rn|)zBkH!2YyjOa<8wozCejPJD=+DAtJij2jkB=b`| zPrtRJXoi+A23lU!J{?U$01BGnq=%keLIo;TI4Nz%!5D;Zcjg63VAs59@` zAJDX$5hl7{4_;ru1NLu1)oAaV%ksg3zE$Qr6C>)E?0){0dvW&a&gnb8WiG_Yn zZ>QsY{1LyfLSBoilW0^jFLZ~+WuWW9a^~c+9IhwZ?s)V#UHhyoe}i_Vx!aGSz5g=S zA86~(jkg6ylC9!>IE9oyN9Z1oGMZ|`9BYRPS#V-;JcHS zk*@Z3uc6r{k-cTXHqFi`CvX$NP?}FB`BUnX+524Y0sHx$8N;HCVV$Rw=c{~gfrlk< ztLg9d;?(0NeJ#MtV^F%2SvlF5KcipIc_P(H<6+d1zTQs0Un0Tr_h<~`5@XbG_lshz zHSXsGVS-Up+bzDLv8OqQYh)Je2eR2lnx#!E`=>W((fxRk*qY18>Gkd;k|g`kugK;E z>x!&Xnu7cRT-@kN1vI-9uH&q)#=&}jR5=eMx09pH9mzA(35OQ2{_9CYBH^lnCUpWf z>Ul5R_gcH`GLt4Ex{cmv!#Zd6_u^(Htt=$Dd~i>+?wu~kEj zGy|2pkkn1?*MrlO+IMQ3CD`>#JXj0!?Tgynv+VX>I!{@# z|9?Za+JbJeZ>gf*nk2_!m+qxrF)PQ5Z5PQ;R+FLVHV)lJpwKe?C);}>sS{&yzcouL zlwRTQ5*m?{gJj(7j(6)&r8Ug=qF6V)$WAuz(OaVWXf(XkEY!~2RtD`;^(Z^=WWspc zN+$J4Tf=O&+3`};ia$tansN&&BsWKC_0)Yy%Af4<%NR4i`?#Ncm%GXTYh*7mdmYL2 zzrZuRcw%_5k8Y*?pV0Lw7+>Yt3E(=&=lJ%l(Ekyrk;sgep2&$uyb%8J?{T*1=XuX= z(K2tm4x~-}T>A#xi90!j75*N6i6Kb^v$I_*Puh;dkr62MC%ENAXGBf(ni7Z zY!n*h`I!YhZ_&doWd2Qa-D7lgEo$WSJ#ib!mYR0~PkZJ_l*>;3ZW6qY5qcC@{-Ez; ztW)}tu-8acB1L}YsZ_z~1HyaZcZzmpP$+x(GIS~)bvv~mt)B_-zR0IW1@&{@QkoT` z2P);9>AQco`horHM{5r#?C=pZ_Fk0w(%pFH?1Q_L^-u;~w`o}w28o8xnZQ;0|G+b$ za;h1}@8hSRRObEZb9{b&`dnI%=b++ydF^xkaIKHf@BjSoQ@NV{_kne&eymT&dhS#I zKZj>&F4ANo$%#nmk9;s@^oxAUIm>9gExn%8lYUmB8>~a#rcbF;G1N)K39cl=@l{8dwN%A;D-}D%4SGFCo7JQ! z6%69H7=Pc(^*j(9S;-~Bbq{O~F_Xk2^Cf+zQfYe{)0gaK&Tq}qrA|m`ZqA&gp2@ne zckYs`MH5kC89SH!m8nBJfQHAvY%+@UM3MMxzM(}tT1Hz_>~LiiIb7u0vqnuu_hbqy zN0!@zCy{}voRRl~PipgmK4!Bc&znnMW*=)=4Q7Y<5(r1*dO6ZO$;|zfQR0Q4{D0-p zX)3&4GFD@B?gOi##(P3f9n4+JjdLQpwzrn(1%lVfMqfQ#k4`_J-Ux8M?Awzh=3lH+ zqBxT+YB?z8f+Kn1SK!Q*;ID_~8{mBhSZ9!g?9SpVGF`jPXto$WsRPv2ihqUc$KY00 z>G3D{4%d&wySIGHd!A{YY^hD&ANN4j8f?KV-?AsY0L*6@Wr{O_x4^a<)#4KnU+gJ# zDCejrxN|N%V%NjXxpaLX`IxECRF}zH*W|Ign;s^sN>+w%!N2tEEPj2zL(7}ZqE!lB z#787^MlZCU0<&-J2$R2|G{GLii`PlLw)?^B5zuCj^IOlw+aa0m@*cM|Iz4mz(fW-Q zegKcxm?e99B4-Ej1=?(cGuYK%qr^?dS_!9Q59o}_YjA8hUcTu0DSAjgxCe}K7Oo{) zDl4N@&S>rVZJy10;5+pgDLTuu$=9>c=(~*esQcB)X9d=wIvARxL~~bn;$JuYO*G5* zfJs$W@kFy+{8UR&CGl2u*zjb-?5Y2r)(G$8SYno@kn&}E$b08@`mBk*e+6xKzd7A$ z3BJ6kpKrv9U`P)9;dW7DP=6?WN>t4E{->H|{G7&lvX`DZna%%X#`(balStIHAbX|Q z1CH}tVh;wgADp9{} zF8_{xJ_FBF;QuhlXVbD-B;#mpb8^rG|E92$n~hoCif4&gsVN8^#>@0E3%6rw4%04? zKzmt%nWVLZW`2u<*@bik-O2bo3Be8(c@53KNLj4rk8trU45TccJfq9{lp8U zBATB~FIwREqs58C&td(oUZ&$-H!_zUcV{;GM_i9rZ8B>l0_`8LP6p;v*qyF;9dEki zC(ilmGOOGYoc}rcRfk6j%Tl^un?ie+lc~Hb?5V{)u=?C{3qhKww03B666l^o!>|2M z&B5=$UW3$R?f)k|r^?A%{5zbyuGinkXz(RU#2=&wNvsaD9(a*+r_+slH=RhHz|Cwz zMY@$Y?VI6Mg(W-#Z+fs9C!OMUs7U3aR}&j!p&Z956&*ryG5vr=mAC z(W9-uIh*ZDmY+bGRk+v!lzrh-f`)mkK1`eVhz+Bq%Z=PlZ=*o*E^cLQ7=O5VY)#HP zrq}OpMsMNXd!SDI%1J2pvL~nEeJlH($z(XS#J7O)O_1lbV-HzL#gq5|rh-WLwh~;& zxt5yjP4$vD^0(=)tkLU&F1{l1G)T>`i71vG`2xBaU#z;;$FG>h+MrQRTh|rR@r}0e zs!NUGZYX;`?)L@7DAp_M@;-Q04vq3gJm>b;!S*igL)mBOXKN!Rb8c1zGeHzjrDP=9 z$qFRXV_kA~JPa1%!>z`gi~9YIk~o21q0yh+FOMd3LA{F}KJ2NP^kptey^AaLS*j)Q zFTsg#{O{noY2b~|Omb^SPLt0s{3-R=OLX%FI43tqLo$~P3*$X=Z6O))fJ_upWqtS8 z>mpj7h`A&6nNy?E9FWN&yqXM+z>#C%mQ1$0(e45ebRyT$i3VuXlO%1_e{9|Rr0^*t z<-Br;abrQFBUufdMr@e1hq|drSEAp)u)8^7yq)B{SE{sGxb)IF{`5MV@YvG6puf{$E0_qp32dT+^qd;3 z#qmwXc}CyMj4>W8r=wqc*o=3*3_8RQY7uHzGg~LZHfLLn3!AzZ?#YW%+xj&1efz?_ zyWdGD`8as5wC2AIBok@Q@G=8>z;@XFqD8tGrlu1`me8*wPoHq2~1O>?Skbv zZT8jsyLwsy^VA1Q=JvUT9r+u{_!i~kNg13IwSF5a_5s1k@LGU2lhE^X(z(!6EBxgA zy&2tj#OGwsKFgiPc5QhFvy)^chtnW*&8fkYsJ%bkY6<7$le^gS13@qXX5&z5zV+N} z{Xd6dfAD`dp11LP8@Lk7(HeHiFFKx0OeL5^lqMQC_O5|_RT;cV3}vF!x_kb3H0W47 zbBj-%Kz6TtS(A1#1HWLjczyp3Of%WlczGSEpHzMwY;GCN8s4stWGQTm5~q>mHE8j7 zyT7Y(X$JhR#MxWne6eu?@mwS{oD}mN_VIC!TCVau^lZ_@nsu~ z9&)WTRwj8u;>A(=rass{^zZwBEuMcBjLCnpFD(BIlhWO9S?i(|+N3V(rzkPV=L1o< zum7brMNaamlOFa%hrS?>f65UwCo9jyw44Z!eyr|vyt}S|VLdp?qx3`Qc#$zvFZf=c z+rVw46+juZNd1r#{rwU=52E4zdfTR-sxg$&Nlb+2Dc2@4>ZD6>`fxK_Q0)$ zl~3LsEdgixPR`LwJyDlj1^?gaA?GbQ3o2u@k#zq+vXKm}50cJJ1vR>R{#5sS=`rUZ zW6gN47W7TuWA&WVhSZ)p7T!7OC_UGT^*T`xqtGL~PSxx~Jae8g{!~2mrYHXdo_Ogb zOUV9yerK!$!4+SQey$}eM|#gsX}nMA=l}a}Y2J`{kX!_(pYn*L&x?xR!i%-+%Yl6UL>vT#^B$jF~Vp+OK#fxFyq0p8px^@dU|AXQq&ej;uta;Z;^R z*#S4|Wgr?h0!7xC3s~X!QA}ZvP6uZbpR+bv%$BvH!8elGJ!a4LxKx)-dkm-UV)vq- zTWR}+?C(}qtr3eih>eMt*fx@wc+anW4ixRlMxqV+7<&?1_YRx1m<%Qc>^m@Jx0z^) z(u@@GAH5AM@lZ(~^*u&TMui?QU(O=D3F6^qs=Vh4rV~9`87&@WiE?t#kcP!mG&}U$ z^_iRu&zVh{Tg^YkUM>XJ>2SV+^_}T%a#hU*-9OzONmnPrt&X|5v{GK;0|se3j$U5x ze%>nNd?i^$zd-qT4V;T=L%^`Cm_M>RKc}GHYLbu|J5PXOF8FKXPqO6oW#hKPq8{0- zfo3(#eh;A8Sbs}!?ju}^zeOzjM@CJANlv1h=;I+WQ3tgf>Ma%HR-w*p`<(S~ngN@`1Ji&}pM(YwaXnMW$hXKysm@2{*KlX-T8dG=*3&tg@x z;!XbW=3p;d?7JQ_Uqlj<-zAYNi6n^+@lJft8}oUfOHKN(S?yQ!bEtNyl>RUYT#fz% z*_?1JXA`Yauyn65m9$l)*~w}!k<{hvWtQawQqiN@Lu(yT`wFyrTha%0`x>?5FOde8&>RTFJLoaJVRUb`Sp;B)W^o{vFzW9%j>#W(%C%>SiOz$ALILN;!eAfj6l(kh72%{qF&?xz6s62TwZ~MmNS8y%)~s zjI+H@ThQn>u%t3d8+~N2a=)J5!Ryr3?nu_A>#G&JlDDn#Oj>3|H9!w%k>^)Y|6e#U zK|kl?OLkD3NO;~irY1x3BQ{3+RF)m)wV#9WMYj{ ztu$v>_rNUGAV-0D35q`GxeL(r9ldP8^Yh5XufS6Y9Or>&BuK{=^4^l(Ek*Tto~{AY zx<*NcogMhs$vm02uH`+M9B0phx2v(|qjI~#4xWZn8_8FH<4*Odmp{%sV_kPt6|^5Z{7pvEIPjG$%&C%I?1k-wR0ka zt~W;NZe6CgoXY2%b0YajZh`o@r`pU0&(73aa);in?-3|7ht9P$(@h8epY1VnT9)X8 z5*a$zwRr`Xc@aIWY?NQsd`ftybb^M{Se8x~ZRw;4s!o zxi)H)M$)}v{M%tM5e?!CP|@GyJ4qGl#JQYYX!ZnDP7eMmB<3XCTZLZ9{nE&spKJ%u zv32n;Nj;u{#*S}YH<~p8J~=gy|7LaA*D}uI#!t0^8K`m|T}(EfIU%7Uja}%si=ZQHzA9noCb3Cu2=c;Aa<>Wjp@GlO%PEGGp8b$F{gT z#nc|47_f9K@JPMNt!P*Y{ZE2lazkH7Hmh5+tj6n~p~-NP74Lx5-c2;h3Rrez2XeZQ z6Z1syl;C%=B_}KF`DE+YU|a#J8F>9qlp2Sg6+tr){o3J26YJS&>|_tRlXX=*nmXzC zA^#H@)}9s5&b$G-Ty0@|LWrr(^AseIqkk-en}O>S|D3=Wj#$GL0rDigY~b zc>s56lI3`W#LKH8?(_uRHgcW{bEm;J>#a59dX;%SewZV)S?Yi4cg`rJswJ8YAS=l? z`wa|}m!|_Rjxzdsdey@Uus6&eK<^R~+7r~N!8ZziS^urIH+{i;9*@8^M(u>=!?iud z4E>pJsR)xA_1SB7As5N{+l91_L&a6}E`F`UtvzDH4hF>ltAm=Zz6FM?$CB${quw92 z-b((A)Xj;<<}59Gqs&Zl*^6Wz4C+n(KgHIb1lRLmbGANff;+YA)*1g*<0S9PaCH5h zE6J{Nsb?yhdnV~W-rQA5=PomDve>R52Y?gJ) zYq5W6m*}abxSDh69W2a==o9accKCjaJ=`+a`+07(-N!=kW>@?`v1jW{tCA({e*4US zl7ZW4`YLnWt*rcqpuNkLnPe;#X0m7eJB@y%{&q8Z@~2#CoXKYOmiqf8IgF=IKO8%j zeQZpgKPFMBigOJP?FX;xz`v6HZ-slQo|9TosWGz$zN6vUlNKkd$(?@BL;1cWrWr1E z!sEmiClci&>(FyS-G`*ih0g=Ut~t>l^@UH_4j_W<1XEs=IYsgpVscL zAYKRJpLy;mw97vBKR?-3A7VFC`oG`5|0ilA&%~#oH7FAm@^`XTrf6NBj@(BMYOq&z zjBy{H?(1GECX{9n9jnbq6dMhfeaPP@IQ2OSO+?$oi{!MW8~L~om6PXnpq`SK;!<|# zFK8S7%}0Z?*qCHVjpxR9s2#tK3k!J9^gTIRMx#W$NPbHv*1)jzTx1`dew(E^)%Xt? z?<}_BNI%KNl9QFPzE{;c{uuGD*oGrHA2<#jchiH!Z)I=)sV8chOWro)w*$kj;>_T7 z+POa~`DB5~KlBifm42R2JWUxK%cr`e^#Jnr8Vt_TQ_f*Fxqqse^*wFMqFUOP@hxXk zIYs!SI9)gvMLK&venC^|`&6xRPLtD_P%n9E5}_6^lSCjTR^|WSPy9&!`^n&(?>Q}r zN6jI|iGNS<_^zNyIDTO9OliBj;gVVsN0QDubR$=jujZ#yy1)CLGp`%0M3SLyHXC{) z*`9-+U-;G&=ih)^Gn6ku)7~s^EjB6dWkwYC=UR4Yj=Q_H`-Jp9!MeWW{zMv@H(JS* z_ljPlzcWZfQ#0n}uH@}(yqA+FY7c(YB_A(%wiY;M(A-We*jiec2#OAR%sav4Iercv z$v++c;UmmU$#j2#**;R4Ty~vkQasL+!E^#!lq~u=J@3SBUdY}j=R-qK#?B}IRq|M_ z0zr27XR!#=jGHW#IcdHF9s^NgJUC7ySC4C#)m1t2alGq;_4qLPUqw>>itmYm9ph&$ zxlXn0O`cEGb9OiH<3jufe=q~R1EOf#bXGkT=i?DEiN#EHtW9Y4B-@q@Ce8Fxf}?MH zzM8(~>#G$zpZ8A{wQFzojlbe4o_m=UyAy3{ph!;0uhn9Sc_10;m#}WBiZGcCOng&T zREy30M5|rqHLNm2!562p0zBJo;E4}8; zM0-%gm-RdIMpc%*JDZq_vs{aLKt6AD9*1i>unKxXYfh09% zvu5c?D*C{04p{rs{=@ax(fuK2!QMuD4zJ=to|vDUdc?mi?`;xidK?Intzn+K@96sr zW0#|kcY5|28q*6cQ<3q{Wab527!Uhol!;$_D%vKtC^=};z8UV^<>WY-^ezC)OXxch zKjY2*r6-naHNm$H;Lhr|mEPvUxG`R5Rg#=-iLKv)8$H2Y-YlD_?S-CCZoA~gPd1vI z5q(aI;xioojl|#_N^g6jOLf|mcV+RDIL4<3Y4kKBbY&5;oBEtgj3Zwk!LtV&llZ9` z^ymjRDH+wy)a!*{*bJ*gqHV<8cyW(J@rtDNL=crHS>ISAwIQ#`5EH-sWnf!s{Ny;8 z#{R!ecCt^(yR*vL#OvcPaEzaFvZAlDrrZUmA+UQ7oV|@P#oSYqRf_NNT%+8s*HpYs zd~hT4d0jkC^x0T=U*NfTrYweebt9}aqxUdcB~+MEjMmWFp$8aCvpScv=J^D#Q`aK7 ze1i8QsPG9$Ue-(YY`egnGoI<5xzY%UZR_lNeb2-y)&$FO56?Kv^z%sCzI$ssJ78_smJsbIv;A} z5>O@{=Q;4r1#>%`+dz5~e{%;&_Q3aWuvWx@9{NbcS$wA_f^8$6c>#{=!90zIWfy*j z-_(&gN*~F`F&!n^lDK4g%Ff~&xcwFdauS(0mrr^AXrr{ZR{I2g$zYur`kc0X53O=K1fetT%M{VsB(|9Kt-tJm51;hs@J}WzEe0*yf z>aVf8i7l#6YX+0sE+|$<)GdS0ghbdNV1IX$St^W#(LB>51f7_Aafu649w}#bN z1=NTq<5vBruGlCv+^F6AWHZ@#XJ~%~-luYRO*7a)(z%=!cm=$Ffmexs8lXk8qdrXI zJF#)owHgbS_&wDIO^LozF?lenm>oqYpPzx>X1%V&-*_?I0P|!dx{+0AgZ}Lci}?afjE`UI<6`!6GRnqlq6e&}xwg32_kHADfc8EBT!Lzzgn=MW;mctg)6DW_C-RrVq$Q9T4U1L_6{yy-%Iy_<3$)<>Nt^2&h!r zUSix=Q8t-yJ_3F604#)WPWYZkvYscS?*_CDACi$_qNkF_<|xpA zRIFH%X&^OkQdvEohcDy8i~c5dEIHAV12N}Kp?P9WQm17-e9tvTcIJuW%nWoLnk4^e ziJ5Z(Np9(GFVv1VK~806Y1_Ks*ht)HZ58$!3D3Nm6-uHdyTkihHn}~HS75d71J4eU zxQe7@M>QUdISJpX->*q^vOXujM{0a$KJMsFGGVRtDersgkhZyKnkdLz%e?fi{(Irh zQ}7?+`VgPj;LBiIvD0p0E@;k$?>FS)Ms2f7Nvz6ZoS$IE90RZRMocc&2kF8nda+S& z*$cl6``P+Gh%CleWjZUn!WbVLJ7>oW_4qE_uOQhU;^VFQO$MR(>|_<146AuJ-yX&D z2BM7}@e(pS3n!8vD3L?+K-P=aryB4ul+F3qI&yT4JMk$TN!K6rdkx6m!2RhccC!99 z>S+;s_6kk=tsdV6*%VyaX3suEk9*u%OkSoNFIis3(zSRn4Ij#=W`-4XzaG3Qcxo`Y z`czLZ8TmzUUa8#(*RLz^i0{=H{CVA#CM35v&P^dTS%Gc=cl=Olfc#A|`ZZk{#x7TK zWeZ4?<-Y@Hr=ZZ;EdF^kWDkn1C}=Xj;K+q&m7Ei?UCqEU$ZvO&6OH~I+t>$OpOWxo z`c6%RPvDq3K#{k^J-*CtHiT8)^L|#G>+~-4bhi-`nR26@_Nn%FU0j8m z+nf~4)>}t?Cb#h#R-+GDO`fGx1^z8-|B#mNX_2g&&)UaSz`xr_+(YhHLZ4Aa*ssva zbHTTdGlpmNm>MBx8=(@erT*SQtbStPo&Zi!ol!0A z%bVH%VXcx8P9^P$CQ1dJV7ArOBeh8$hO@Q&fbC5^m9YhOSCO_sY}pZ7CXdM_^zst2 z^a`3K&ul-~f9~#UEZJ`DB4>x-=Kgf-09SsAHv6J@PIo#P@htj~n4WEHN2!0!N$%`N znsWYfI7nual0Ig?er$T9;ckSnxB4D3l$FzoWbPMi zNHWczpzrM3>zNDp;#4x1E%E#)*S;r{N07hFADh6I96c++b++Go(KZ#D5Au6jac*`t znD_mEa5!gd|L>opQEVIwkh)bJjQS+oHP{-aG!m#EPW;s7dHs#{`*#>dB2(e0R1#CK zXoMCy>&ee1c4a4RJOGDB^-`w91?)m>8IX;X=_NR9r!8ehhpR*z@aXJ2; z(@DmPVzydG^CpnKaje!3_b*3@F6fXPsfme?4?#5;RJXdkMw`t>ok&-g`4o@RoOM^H zp9|UfRI*Hr=eK$q;_tUO_*Y|PSK0!XW@s~qG-kJ68VQj1p2_cgt5w)aI&_fMiT6%j z#CY81{plhjS1a!3U42`v$C+6Z8~Pxv9;1gAtVm+g){~9tY)p2<$p^gvZZlZwX=pQu z4VVJD`^nJH^|PLiCnl+p`7aUA$yacb*>H<~ll}2Y)H;?-)z|+o+)KUDG4@S2>2DC5 z6wm5pDBgtwsnq*~dE*UzRRF<_=sp>w-ALyGkfyTy%YH_%UWuql^`94v^nw4Uo7pQF z;ahlj)GqJxx1iS|RIYAT%WAG3YTrtp;z!#U{=f14DkCI}$CE!ikB`;}- zmCm_7U27&j8!gM47rK$1)JvFQgi8Ki4#UM};?%gvO7I4&keTetl^|`)=2X+ZHp%_~ zRXd^CVi-3B_js5lyYWzWXM^i*<1d8sD!ly_&QFI;Y9v3QpK9pQ*XW5l*+x2Z;`j!x z*MnoEd=CrL#CjtYPwL=SGFP@>Pjj03F6t-q<{r>J0oMoFzN+Yv3<5pysj|NF9(WL5 zWF?h68Ux7lG3ZkTRAWh27h~NC=S85-ZaT3|Pm%Va#q4qu>yj9j{zm9;g>xJ##b0e3 z899sXPX&Qyo_GY5<=~Jwy?8R+jB3xY?V}0{o$Sgv&r0_EVYs-#-Pq&Vpp1`IyaWbCkNAss? z`a)bg7X@xG2gKu}H=RvH!mm)ZEed82mAI*KMwx^6Z9S99JKd~ivoj30r)ayFlx3D3 zs7=mKr=m*s%Tug#B0Jyu{#EhBUiY6t)vsN-+yCqfccWBAR{C|%<@~I~d>H@omDk2eW0fTxo(&$um-kH9VX}*n;QDSlUFJ0WfWk zmZh-1L!+|m--9!Wc1S+q#Hnwm!yBv$ns~A`SO>B*Ip57m^*U{{7(Q&?+ zVnjii!>uHNX3LQvvMmP9;4AX%uTXG_=1@|B5*$1fA7M!c_ZV#K7Y8~+;O8&%0 zX;y0NuGji*QvM;kbS9YMC)1S--GyEoNc2!V4IkrKT8jo;U9?KX;5DeX2(R+Spf8*@ zgQU98*TORARmmZo`Jx%z2gB!E-*Ot))A#D+ED;$wk&ZR(<@@*gi~ms%y7eZB>J6vb z?nJ^@pv2DN{s$nBj>Vtg&yb3x`kQwb))3uFp+Cn;bv|f2;Sr_9!VbB!R=imcO-3x z;bgLfXV=`uOf-s|#0O_Gye@;mXuYRm{ETAlaWh)v)Ho4#pU~BAMw6*9FuRnJKDbk-km6vD5_-P z>J7i2!K1S0yTC7}Fzc<>LeEIU0u(QeqpYfz_sHyL`1*hu{ZUrzS2XQ-Ftqk`BC(TQ zES11hoxGy9i@{KjB`)=|d5MIa492D)*a7C`XU%)3N@j{!mago|6-LQBnDP3qO%AJh zD!wASX-($;Kfts%So>(-6E(`C{i|?jj<4~VS_y{u+wC?^ORM~+{KOA;3EWZ*_Gh>^ zk@e52b_V{hBvU_!^Km#GAH3voe$!J!@iW|_K{2N$PW4(iMe9jWwe}|o{xTa>N6S0;%}bZHF@`O2CT+_c@SCt8!Rtn`$|aicB`pzU`^ir zbBx*9oRJvUtbtdc>oomeiax<7?@dx$bGI?xW_RoONj>UKW|J;xeKS6tY1E};Jn!He z!1Z|6YXo-&A23L5~0Ben+%Uba^9V{!GuQIk&)*iRgPB zuMUOD>1> z9Oy?27Q^^O{HaQ(`or%>F7JJ!bbWJSKUa%xyCpib-GeXWH;>Fb( zZ<3i}k5(6h<|6$cNNTFk@%YR1C-H&xzpKcWX7J6`?4ELN@-MLdlT`FX&$r3TZvE^} zN8@8t72KbK_;4IO)wkXSbcx%!8%+H`-Lp8MI2Dv-LH#KRa;lTKvHtjx+`A8w>`UnG zr{GAI-h;?#d@_!OT}QnPaOGqa_`!4gvB%GV;t=}L1jTN0k~&A5Xz?tV9^u<-o=?wT zx*Lg2?weaY-_QT|NoE-|f6w1U$>c2N6us|f{KK_REKV}1K8+%0;N;h&{Ne(>_}G=` ze>o}cPTTe|Vy&XZ)z+-XqG)O#r9x2rO#XAuk+`SVTr2%K54JfqJEVXtX)bA3y)Y_GW z>|k9woSgNkaq$C+CvrH|cvGk4CH*Wjk2Ykd@+LTIt}W)0tjd#PH5tH?jXxga?-}Q2 zv(8X6`VO;uc023oU*4D`UZE?z#*&h(Kw6T&Z&|W$&5)y5h;g7?PM*r?HY8I{UxfjDJ;@iY*G`t z-b>qL{>Yg@Vg}AYyGLBR3m4+`^`!YCJ}5`Cme-QRMCp%k{Q{p(Vxw1*`#1f)4Hqtk zQ;E6dRo9Y{v^6ddh4XwZ?zAe-TjmPnwvBsv!&kvfQxP?5`Zkj7TnOS{u@Cnc>m^jb z6&*izzqKA08SPBA^JzBqO3-Ds8t?Uk%{J5U=1p*wuzxv+NDhF->_VdXvx={z&n9@& zQeW{gspR=ptkTn3u7~}(X2X-vb}jy%YW|QiqKm?F=AQh?AWpZ-stEXRCvz0^P_v?vqihIS$9_Wk>uO-5mtK$W|Y1 zhP#t|KQH6z&E%slo+YoxWb%~?b*Z=T3%orYj)#IVULlRpD89nkedbN}a$|G?(J@9_ z1^PDT=y(}@gXa&U$7=VkH+p&PC!qE{tjd14I?jr^oR)ce{3X2Ol|S7K(?&m`d9s-t zK|9xxwho@EtKaQ#${V`Wr28AHHb%z{IPn?ToC=zb=yfN2SJS!;Y+BxIr`B>JYHEozMlGFJCaIA*y25@Je+K1KY4YK%uXSb1+>3Z~vmD#Dk zwQ!t{|I^sqE9hZa{VgPq55r&wydsmyFBt!TKk6yj786M@Oxwdyv<*4RIbovE+R)oY z)~h)S=nbZL@-`z4Pw4SDBPZHF6_wY5<$2dz!Xt01XR~T8$V1LaT5I19*2%p3lK&UE zo|uyL+RrfFyZD#9rt!)6*!5CBfhJ_46?#=6le5t)CxHt=o(#ShpvO?3zW013v>WV+ zvz;3ZL!sp4eiApj`F0MO|AuULquce^jpK1X}AFgc$X3D2LKcZRv1sKr!h zPF&nfSe*mgU9=<_h>p=?Ggr&g{H-k5$E5taf+LBYt3!^j)az4b-FOhhtNn6TBw4E$ zS^>_3VfMGFbb201d(~``^X?cyM>{}X?Nce6n<3M;P4@3!QY8rnq-o_{Xbn`{(DP@Mc@zHu~g@xdg}R zu{QHz_c6K2dGB5vOr7of-0x4zb{Q)(?s)$jk*v zgFCbLYWh&Bp_|EU&fS-jkTN(v+1=zyOPozhtx_8#ISI}+a^CCJ!Hf9ueq_|V$xfdB z7M>sA>WzA-YK5FEOizO^5vrTeaV>2~49HM$%m!!9CvGE)r|UEIqgENe3R*l%ZsMVq zSgy$h7xS(p`>J?LC6Z^n{u|<4>e1!IWgY0>aKEyeSi z)$@yab0rOL>-*E>G4^g1Op_~WFzy})-pMRcD($5@d$Qkuh!d%Jom|?fiT)OPeB_DL zHo08iZSm&>?K0y{F#a9(xO?c>Ds)Jd@H_Cb63Naxr%*Lj`^+Ir(WZ;hclkVs9PDT3oBDRO^p~niCwl(Qf~R?ZpYy0j zg|*F`I-%hITJO&nk-w!ZaaELz&ucOgzXZp~Le5f4PtoUt;Q{iK8cStytrCcb!|)ED zYr<`l8EK7HYqeU=R>k|~fAp8A*J*U*O%yxPw|?l9d?tyY&nhsU;5n7=PHUQhChOxz z$k8`iB!6t`3nbsq^Keb zi>90agT&juUpzmNyp1Gr+x0w=y?c)3=!yEN0dg+-K1CW@Sv$5ysY~>~5#-50ToYa9 zv(5X^_t@J%n1>p&oadrn&hfT^cZktnb0sm#&*IGj^u5ri&9$3Mhg;h7oM$DMeC!>` z-f%qK!J53Q=R|_UyZ$_w$M<+2dbI|ZGfG>~T?Ct)Th%k#Y8I>$oD#G8kWrhF)9=7> zzV%Qd?WV!6J2_g&+9YyqiT}gE)~e9g=AM{`f4?%;d~Ig335)c$0Vk4kr=`{86mpTQ zGdUlvhMSM+J*RjNYI8@i%g(9P2W(;TuEpbLC7hbEKUu$b07vQ>K4#>MG{tOsh4xRH znR|iZMt4f^VFh~|fATkARo`Abb&r2t>|ftOm3ij71L$kwd~5r?p5D9--&BlF9L-1n zyC2;j-}+iZ9*0X;+52Q~R}P00Nguk$>*54GF4QJ5Mu~1nF5DBzTI#8%0zdud|a4ViYse<%KL4nlN8I86llcuALmK}9+e;nWhr3dUg z>gy12Heo4p?r{oww$$^%?2|4TCw6yt)YDjZsf$JrgchbUXMiH#CK;T z7#{QF?~Sn^tD4OEpZMGiRc;{>ITiS|XOfAvH1em8Zv$Z&&zGsR?#SXC>QH)A+mktI zO7vMO*N$Lwwy~k916dcpPBr!|p2)dFzvBN-Ib&0k=w6T?L?`xhVxOFmpVIln=fuY* z6@}vqb86A5Crq}xmipZN;84blo)zM2?&nk^J|Ty~q%>dcRqWM2&>+4k@$fIDbfCy- zP3iyq4gLT7d;DlFLWTc4G5X)1OTBM$F7lu&;oe0o!*R6uHQ&RH&(R|1LuZ31eoO}i z%76ci^*y{Q9kF&{;cDaDB(jpd>9s!3rfZAXwdm?OxR#0z$*s^FU8=Bdtvr#p^2yA% znDy<%UcE~0r z$))}t+n%$U@t{q8t9RI-rB+?_ihi@NoQ?wVUWpI=EuM=N1oomla(mb27Ob7Cu`N}Ecu9Hb+*pV?hN#gPf%7?$rYDXNF(y}nD57e zrVi+nOX61eG$Z4AFI8ImG^Y>q!JRs0rLhfj%}}#hyxph~f8Zsob)ts%)qh)5or)II za4IXLR3xnf_Ut4Qznk86pwgG{yc{)Z{XHGtxSA7hEd+E4J6&Sf@HFGspS=Eu7XPO)k?(Nm2fO?wmFVOw)J^`eIzI6 z(Xd{P&J)0VCk;Fr4Oe(Nd(>4Z-2lur^`0uWb4XVM&^_Q<@?lmaN452l$~FziODpXY z>-H5&B|l^80Br?D>d$tgxp~8Ua-oY|%}H0I(@K2LySl{NM7uIq&O^&Y;6KIU&hSJ- zE0XBgE&98ZELH_e{0yeR@niH!{*x}M4GW}(hGR>|eqiN|qm9Y`jj{L7ww%35HS zzZasyCK8f-{yCxe5H>q~pMZORgY`G0<0d#YN9!Xz-I5ec*JC`rQx9Yl`E7(^i8orO z-xo+|9enO#yak@l$?C!AGoQ}9Ra`#`WXZCUJVX5pn(ip9QgShU$g(EO_h9$d>NV9) zaytDWIlG)q91G%;wU6)WN>DzBoAc;gO)|Yhn|LqnbZRr#{M_388D^g$G`y!iI{F_e z=u}vb*!C&me(eCIrQCJZhD{oOPz)uJN-lL}p*fZ93H>5m`D(hwh( z;ULOhd91eguw_|4l`D3@<>8z<3W+<|9~I6dpV{Ab{x{FJplcKGe+jdvX#S68;tH;` zV5f%AqUHYg^6Xu(-mCXlie8)3hrE;q;;2UM2~7^O{`IGf^y=*PEbZ7d-k7 zW)<1d>~X(_Vd7pA9hvGYwb`B8=-ttGkrrn3y9=pF)q-`NO(beVwk4JzCk1(%{0%+c zUSQb_Zpph>4lQobOZLEtR7j4V4mh7vg^uK}D_j#zn@q{chjEX;t>N{uF@HnCrWH^u z^S_;^H?ZOjalF*_r%p}0$d}vsWCh=Yq+Q7N^dgOW*|fZ$j&Dd} z@C-|m%u#{-4i+N2;@+NJi4L#(loOnuWNkHBZbRbw;d*!8r0T2Xvk6>ypfu-qzV`FJd^5?9C{kdTH7zR1jeF06!f#^l8Z_`WGjq$7L z#!k#Zzovx*G&D}mKNA0#{PPczfE}KRFIMWicOkiniRny^nt*yGdG2j>ot&DB+2)(= zNSbK5O8b-b8=u(3F|DB=eNd`SVWFS4Bh941$~8r$&)>+yKne5l!|Ax^n(J$p84X zruM;Ma7!k z1dEBZY><&xpw}ee@A8g;uDL?0G}by18B#;#$^eWsCEe7r@dJ zH4-271=>GKP6mv0ZzQf~*WZ?_5NO|;pizRu~$-uil zHHY&O`c%iL$;CgNHP}EeKhfX5sCqqESF(s@3JT_(+!Qz^KkL%MLZy27&E|#axRB~t zzs0R^Cg<&W3z{>V7ieC5qks-NsL-ev#h;$dQnr;)(B@HLr5CgMbVR4xMF3r4L@K0XEI zeAIe}&crJJ1;=}$z$5TG)1L2ilpRmrH>2BOBFu~lleHOIeozXJnNsT zYR4F{5;)(%pU&hZCpz)7ON`t-s5aAx+3lW7AMdjc9ZB+UA`g4vnfk#WvG~cpai)8n zSdL_cd77ptvL=}=@|NwNdYDN{j>V_*K=Y7xW#PWbD5(pa_>$w{n`%gx;Lo2}<%e9) zKJh5uE+8F=gRViAFTegHgjVwKpR(w`FJUrL&jkLGL~IJ>~O52_A_^=;%M(MtED ze|*bQgJ>w}*#{R=_vI?1=FBAi5Wlozy2{T*?q1FzukSx4*TUa~jUlb_>HPrnzj80%2$Y4l26sO|6^ ztFL2m>r@W?xKJ$Iycp7XvqiP@j-zT@L^t}x{`+BZVA!*xaT0bKvZsyqH zyyb)f+Wo8@4l-t$f^zY+2`0f}LxEeoPlDe*bn_FRvVY3cKg9vfLYu_7tiby+G^d|2 z<1=?PxckAqw70|&{eS<2!~cEf|Nfo~!=>>(@l7es-I82A|B1l~EPsp?_{}-afv8v3 znk1Y%5Z%&y{A}_wk>pf02dszN4+x*U{du*^B8YoA-KoFI&|N)EH-W<4R8E z2a@x4h285#Lq0G{ViMw6awhKH>X`{Nt91U&Tem4JK;EkzqvaOXbO(-PFK~@fk`c7D zre89j#IGgq^mdWeoPf8Yukm_M49ua%%5J+Go$kVVr{ZgQ8hqLtG4YZRzk-P;H ztgKSSF1xv!)(JPG%VJMmU-bT#D?jM}eZ2XV_N&o(8oQJlOUXDEug*GPuL#e(eNP1M zg$1v&S4(!t4mg!uVvmsgYUZAK)^hQUt>(F=#Y`2d9}m;S9WBt?IkYgR3m3w53hApu zbJ~%DC&2X_C|i-VtZ3pHoeZ)a(D71mCk8oZCNuSPFo_t?QVxLUt>}>5P2SeUf3rDW zC;RtGPtJ#3&M;Qcibt#*D!5Z$n~o?H_#e^tar!yWQ)7(&gkEkjhb5k5CN6)5+R5FU z%pV7kk7Ov_fJ3Rp+`zL#==l(KeJ~4>2$OMONnXpudH-nrb}dOwzMk*!`Yqh6h0hnj zwjBDDfb~f-oh+YuyZR=3kjkp@a*t>5WG$K)by8KVAC_fFGGyo8IrW9Tr(Jx3L$L_NfbpX+OrQ@hA8cgrI2b8?PZ;!RHGDC9q3I0t zOfIT*?*8CzBa}PE_^=q{$$XMXhF0)t#y%zvC0Pe{<5+U9{0&y2c``;efZJPm9bc6d zMLV-$cRf3qI&=ngB!py}w>SwGhnBSb5AuFy{6#W#}H zUG)4ul5!FIn_adQv}QFpvaV{+V)Ot}Hxjyr?j`rfBAj1lp41WN+k-R~=S?!7y}PXc zvI9^ZXA*0F7ig1NJz0q>fp@GonxWm9_}h$SEbEEn5L#CF*5wQ=aTGZz*hoejqy5h) z+7O>6(B!|vvNNjZ{5n1bvr#>xX6C##%(e&Vr7`OrFU2dYQF?*?L^JkDco$EN!Nmyk zBnj!k!cBFpE2#Q;zYneH>?fLS<7?7{vgzfA|+OCFgq@XQ{~cdkw#gV~u%++rntY)6&8 z1uhN9a5-8WOK}S6$@xw8UK@cpd5Qazrg-dpiC5*#kCKPvT=-{2UKU=dKk%9BdyKey z^j?*9%baAVev@76EV5F~x9l(-qP3S<)I)ubUtMYu#*;FhyXikwoRd*Cej#7zp+EUq zp@)lMn4Qv8CypOftZ{aMj&!aZf5@|NzpS29ySj_}t*e&qNP6?Y2QTEbG_N0%)LMoCVM9lvQ=;>xuwr$hVk2NCy%Rwik?#7QF9`29-%saz`Y7=4jNqm_3`X{nS2~N+Wo_^Dy{aq8!&C*P)5y^AsNWsTTeX}S?vG&02~cxX zIRW08v6j|fV{){wJGuWp_{x)&vRX|P)En*&#^LJzE=B#+KTI{d_{=x3M-|_?CVDMJ zH`dxInTYyh>3UYS&*^t?!OKK1H#73AcJ~D|ndk5E-WsLdRCCIX-w0HXclvUWZvgXs zupEchiAG;by4Tcs(1>1I5A!^CC#ofP?-D$ESgR}0GVzC_H?-D3e}h1ACLGRT*|7XJ;lAYLh>z zg-tcuFTvZ7FlS zzx#HE&)4by7`sR3kmYwSx>`mb*){0uUfzta%xnH8^Tr^( zW}hPZkoo#*l&azSJi2wNdG(QgKY>%SksJonZ*aP*p68I@BVhR)j_d?`Nww$^@J|M( z?5})FGu|-Zx(vuV*PxJhCh*)WAJoey0p50F#I_cHNOV)+l5Y- zWJ}4e+kx(Vr`_aOsqXLRTIp$=4!y3>M{Q4>=5un`WY4c;C!%Cu;}jCTukk56Rk?m+ z@%J=PB{NL+FH!-!jQb^T{a>$dlta;K}aQapW)+m`_KA>}~zky^{T{-;mqdS}3VSR1!xNZpDjWI*RluMzdY4 z&v2IDreY?Q9f$0bucXzu%vtv?GL}(zHEO&?mPgUVpY-{yyRF%mrs@w3ZpU~zqw>K-qo8uw;S&9{okpb*7%+* z2jbvHhUy}@+~ie~TWC$h4URNY2DR{Uo+dk8ITfEHCe(E<%W zCKpw_cbb;xgS$KFNG;@6?v8}#8~W*`?=SH(zFw)bI*qKxK9z=H6_&MC@vS_%#ZTxA z7B-mxtKiZKau9!=721x+!uVpZV;1h0F`_4$=>j^Fwhq?Eb-A2nO_Y2?axj_}55=K#>B=JGaw_yB%ijQe=t~b%QR_YJbT)!#ZL*z&9SVzC z z++#fv3co_$E}&b>*r^S8mi%ei>r9MF>J#+W(^n{v>RHLDlhfkaZ1E1QMB?L()JdD! zqkRcI-zUe5QQ%?@d*m(_AV^*oZjI}{yD&XVinbv;}No-@#Qi}_4;%$mbA zXV>u!+@r;};Zlj*q-tzU*u^WU3L7!Z+k3#V27K$xeiC~$fUYD0s++buqr+3U5^E5f zburya*5z8hCu?s;iKn~~AEiXb4liuR@xC>qt-pXZS(bCk5g*cJs2@L?Dj=xrnZA1J zM!z42{dp+e5GN*Rt&X1Sle7DM`wVqbiz?KDY$>|GR1mh5$4spZXhT>Z`1mG#t8I@*u*++Ijs>gPtgA|=UBeJ(AI ze|hrZwDwkV+NPe$rQVFj!~^4IbB*WR8&5{dkbs;PG=g6zyxRn>#N8YMzi87cP-HJF z8lG6Y_fVlF`Al49YRY9Lp4{#&%`1|}`EuHylhW0CiXYr&y1dic@C7#bUQ|q`j{g2` z^HjV?+JNl`&?U3SayZQ<={?-dPEN8{WN-Ir@Sg8~;%JkvHG88O@s}48JllLb8G&lh zu4h0unI#-Y1~=kf6;Q;ol|kp9y&K&a3h%7fYk2ZNw5Us}N+O)%S&%c%>_M+B_!sYr zU$l}_fVudR9hM=W8Vjcb=tpvQB&*0V_36cUhr%BJInji%w~QIiub|y5biI~#&UfwNBiI5OH;kfF?PqVr!LO7@y59* z(;LoB^jV$FPiCp*q~v8|({h>;>ogayO2hjABYpNIo(FkjGsD63aB)GUsw~FqMxm?x z{kG7?RjkA|mM<}yiLTrao;gRTj)vJs$%vA=j050v0$X(jJCH+ZAJRMJf$%$z5Hi^#}(NnHQA_kL!{otF}prgH84z-dc z`a5vHJ)jXoZ*Qu-9u z3&D_Rm}Jad>h6yyShDy2iTPdPH{(;2O31N^^Q^I&vKq-+^A$&h3_iz7G45 zEbQGu_#D|@L$BxSbw7|KKW-`u-^tEAp|3V{WVSg-GUt_?E6l~qrtD0rRGdVAAM(WC zP;X(OYd@KV+{9XBSNm%)RlaKdkb8^((NRs}5!{t#Yk>xdE{9HlJcqy(xt3BlH3M>Eb=-nz$XSEaG zm`J;c1>Rq2VGGO9hn0DsWFNvlWW7|1t*DQ0iKF?ecW)zqZ-eJ-@Lg?u`~vM} zf+R6MGfB}uz4;>w&-V23X0ZFv)bV;dlRk7Tc3c`+1x@j6_BzheYAjs*H~t&Gi77da zwM`C}P-|aLeO){mZ;AMp+~eDy%zBPR#eF3WSV4_*anyXL1N-@ z)7?|o`1=9l!413WF&dlANSs``%A~7b>K~QkYn|?h?LFrc?8}DaN{Thpu`~UMN8B>n@ zUsrPXSQ2!#|M6nU_usQgYmK&Vl7w8zj?7QRr_*ra7&29o#WyuOdb+#=yuEdAbWXK*&PFUxYiPYM<0WGs*l03EX8Qp3`t4=SI(Jj4vukXw> zcA1kThI5x|EAizLy*x)gGFLs5G_KZeAJ27T`KIIjXnoAPV7Xokly3XIkdDM*amniv&`h<8|b;Lgw}!Uax#<}ssrd$D(57B)?IWW zHI+U>ui0$WOK6eI6CcyCL@vc6WCy9t{z3L{nj06}Xk{l@hM1G3wogXbs&L5Z(HfM? zX?x=Cx557nEo{+4?zcv#RDwTSU)j03gx#o%3h`l$C;u88kGIWScaJjbXyl#2xR{!h zi}icH@#+^>XVZZR?xZSmvpUO+=*e^1j7>WMjaH)aW_`uudIbtigYV05O0>lQ*pH?? zsoa>|u06i}°-2~FV`&z$?ijp(W!FZL-teIElbhEY|-z=>2T$ZAq zk>q}FM&>GlDX|r0(XJcIS6&-S{LS3BzqiJtdE(uxz@assvJ(bJ`mf}wZPxpn%WAv}7(WIg8L$v%dtd255`~_uOm{EMI z_3U|NcV(g07J&0clJOG$MGEH7uGHe#7gr-8sSudmy;O6Gt!@bGhG>^;gI{>6skRfF z-x)@!ur!I}uhhe(Y*&4DVhJk7k0YKilYJWItz=E9%tjnjQ0#j!BwOK)?k;vOpBsAX z0jq$HB(*-fmb0s&BqZ`@=-mOVDbIRCXUwz%XmQLjSy28tmNtGTztwrGV@FDSv8_WV5 zX(jPAt3k1e^_cB@`$F21L2@lknhfesz?)d+%!L=BU}~^j0mn=9|Cr~021%lIs?h55 z!TAj=UiEzHN{*vN_n=A>7P4!xT1hpuM6qlJV>J}ph8yu;OZLaCw?~4fwfiL%PP@Bz z5L&#C=h{DsHq0Pru{xD-Y6U3X@N{2LG}QJC@Rie7I~2{X z&>k(M7Q{H*f7`VIuJk20JzRg0%xoZY1C00GaAkmNRf_+~j57oti;NWWJ^Le!&SkY1 zX=kH$y6CBr?^z>foiNVmoxB}sV`D+rWH;_Yx;lU_TAh8&to&Zo`(Nl{C@_&VI2V?w zuRKX>9Z)IzN!{Td3%nU6#9;ta2+(T}i=@%re5a@lWaO2Sh^_am5`Ral^nI8zpFvqsztkK}>w zkG`d8L1i3CZj0~0nM!NPyOW#;$#|VT#vb5Y2~;B1^aKo)3@m52ikU-HxpG-!`!|(${vm; z)$y<%N+l9Ee5|Ow(I6b**(G{j&JsOi7B$!BAMpGv+P2Z(YjHJu=++}{`***do$L!UmD%S(3+|IjeV6s&^+q{uot?H(&o2xCYH~Lem<3;+ZwW zd-31AN1J=W6c4Ca@W}5PxMnXqS)hi%wF`(#e4=}JC#PN~lF~;}>R~ov4J}NL@U67( zaBpUOtP1Kj_%hhf2)O?lf7)9c^07q;I}e-Y32#uk|Eye=E6ZO2U?r;F{*j*>8G6E8|&_+VnK_2?x{uRNpPp z=E*3Q+$(j-ZmJ>l2j$)L<}=*x$d1GtF%wQ;VJ zH`>8&rSFyfe}z=8WP8Ya4W&2vJ};gw2zb6 z)~r}hT5=iK4nv8z^zo9lbh0QX=k5qC=4`dPb?(9LjG(32^}38kKZW<%VL2Yxr<1NX zeM&v=tMKI$tMrUX*IUi@EiA=gy+38V-Gj>U15Xa4%pDh@R5h~t3OLTzb{`nTBcv4Q z{>aLuhg23kK>O$5+djT0U(L6yOllD=gnisjHQaV&px<+r~j!mydR8G zwLOt)*$sLDFFy5uKQzhiQg#8(@wb9Evt#w7wg#Dx^#1sBkuO4@6_b^;aHsb8d`~bR=9gEa8N-J*_clP=_O8?Qj zSA9ML<+FpydFOLX|o>O;xBp_+HU~gZwpRbUG!Pj{XZJ{egxkscEM_RH}EBY%{j*0 zzUcOtmTQpnv>wQBL!BeQah>)mz#;xnsYKlqXHF|VlI2GFZ^?K@fcyN9$IMRc-VE~@WVk@~PrNktJB$*^)R5VC2lPxGk+E#XVI=Hz81EpP ziCyZ%9^6FNZy~=E3fuB0_WL|gJdI=h>1`Jlv8tK*dQu&16K~MLyU(*#&!Iv)mNB_} zGnSpl(#6A|E!&;*=3T5>Q+>|^*Dg=>p=;~dwS{b7Y1~cCm#1iRA0tZqk7CbaO|w=X z2i7{qh?liAiya%xo{R-`@}%zu|483bBQh0_x1h)ry-d-{b`<#DI~{xrZu_A?GNmPl z*)VP7eruSXVRm^kx?YW=@ha+WZuq=*KVW@w-v1Q5x4E*+x*`!L9gP+r6%_cpw{D^x z$w${kizn)9Dov?K1IE#-J(G3epSq0X58!nF0#uhyK0~}h@gPafk3tf_LV?CJX0El6;bR#eKg0!0xr6^%GtF2=>RbY8TUq;owV*X=_xz8^$>;t_r66=xaQ? zM&n68ZM=#Om2frw=cz@#ndIeMt{;h8iuP}qp?(O{c)!=vN)1rG<=%Y1J>b8|IJ}0Y zzTt_i^DE)VO0DfFFiyV5WO#W7f0O6uc9htGYh$#yUTZlW`3GKRe|EI-KiMEMM~pu~ zA|z73qEay%Tw%1CYW#`+%QHr&RwVFf^WwJb!433ii;-ao+}jiyupJ)T3JChJ5}Q#Y znS}qWy|+AjrSBuPQ0YJOrF~KC7w|2kQQy0k*-&4Ui9cO(`DIt=eo&t5-V|6Qi`wWy zkK%JW+Ee$TWa=5WL-ECArz~AeeW3Q%iCOLc0^7H>*o!PI^j!84XRzYQ!1Iz%$ps$Y z!|ZQNf=_ZVyzTBP@{=kjIWgOf{$>59dTC+`E0ef;;FcJ$2Bi2BP!A(<8@v%qQMDM~ zlF=iPAEW41DkYWkMm!aJ;CB3=Q+I15-Ak_8p)fm3zthOy9&HT+$voreP7)DrVWP`*v#!dWrsGtlRu+MCDcj18l%8kvhF%PM&~j--}NVxTT($JT-@eu`Z^y^)k9Vj^p}C3;HD&@1$k zSxu_fMIt5@T*(UhaBrn9?hN*{jW>RVY4-Y8dh<{x#wUf%4 z>p-)MwSF3o@lKh^Hhe`l8{$OrU9Qx2@&nun>rP;f1xcpzkK!dcQ%6E0+MMoZw{HP0=Ko`Nv@>qHuQzbm!$A@?Tp5$1?FVQnUbiBQK-KG&WUme z^%EUD4=kxBmss6MY8hAW)b1#pZp${Ugi&{I*Vozwo{Xg#U`ExK^u{kCxsltuQj-lJ+pfia<&3G4dEO>ZKjd5dVphYm7l}&@(t4lbDICwPj$|P_b7Rn?4Cp=} zQ5(>%9=Se-#zqQnCasCsT7m};;7<0VKO{|wxjf60$)Nj~zuidr+aT+v&6D(gpHEHc zX!uZLz!Ej;GIG1Zx-Y-oT0T~uNQ+OHb2oBQ_Yy-;knqow&buc zXx4l8eK;m^qNTom0R4NQh<#3e;Rp2d2AVa1!#6Z-5IM|wQGGqkhkt4G?F+V-v~mj0 zUx2dNt!O}w5*xEw4>_|><+?~ie2jn8(UF@+#Di?oMjt%b`+shkv-O|(>v=uJ8IXE&siW%hMo9k&PBOYQk;$QRd6ZUKT8tpa%N1oJv}Y%^Hb>9sv7 z%r+C>PH$4}sFmK@c`x;cE9mul@Wn$c`H@<(F*#!izAeyd4qVF8mca$>liBA)cayWP zj?Zty=S2UzS=A;>#4AZW;dg?xX$yby^zr&yLdlqbv1f!q~T4; z+4Cg2EFLt*$G>|%{=Qw@Z-)bKy4SIQBp#w8JiEKFYj={7Q%J$JU`?)xL{MiZDf#@S zdMA}N*23w(jTdiuE9YbPfo_iH)}ZnL`jjd|L-n)Vd)>@^nv$IwBtL#)LveJND{r7_ ze^w=RzmgrW0WEEA40sAg2eE21&ALuuIV!+7u(TnS*^f>IiY_2YpOv(bU57+CmeXG7 z)<+K+Q<`|@Z-u4mh7+k!9?!!6HXC^k6~DF?+lfMhTyLW9F2=H>K|a(9;32p^LK8Na zG1k#ibr5H#x*Qoe7M^XbB>SQ38RRaKa0q_%C++bGTdVKCz^@amQ>%0wc#@;)G_VcB zxm4I#LpO3ddOM6>VYj9m_pV39#2_W>+kPOaNO#`S(F{V&E<<0c zq0>#Q&{X=krf9t(ELZ5|7~}awxU@FDok5Q4+3Os{awf+1K2S}xPQQS(RsrP-Jg#g8 z+}{1qU8x9~1!!8&-pByoE@ngb*Un5>)q)96*%3cmt4hokPT>|%Cg65+JOxAxxO-5r69RTC7D}}0&6^UX2N8z&n0nVmw6^JX$M>L-3ZnPNoCH=2ivXrT<`IVdW>DZ z$#2dEzhLK*kGTR|fAL=OBAwv+Uhk$3)t4xA7no94Wju;Jjo+Ky`ES3!FCh5|Y+b$i z3oV?2QYAYf$Dw@ovu-Z-(-N=e)Z)42K~3SLsoBD#{M{m^ctmiM4#Dw~z14`bkSiGHe(($FfW!HHcHVALbZKx<;&u8iI-Hamh3>~Q+@*FZ&5s%f=XKXxA#&@ zdvCE*mB@zld?Yx2eW&Dq?c#eoT+8WpBK$654-!-L8m?YXDlTLVr~2EPK4hjJujt*y zsJMp?#asJq_NA$JvRl5FBn~Ctvv57N-Fl<;cKRGYlHqhQCp&j*E9dkd8a19{?~+yG z8STc~e^fCBr1JH9#_hz0ucBCD2sXj^5Rz~PicBIORke{SkMY17 zN;*?{AwDOGu*n=KCjei7G4U)vfhN0YzqnT4?=IJ}THHkA0&o1VGdulG-+NH1@occu?MGK3yAMc)KOZn2v@eIp%&q zmBWk_S(%1{1Km%x+2dfcK<}-5{{t>nMU}QF@I1|b&ifC7A(pct$lDsxQm=X$y|FV0_3)UUuaEK=u;h8=s?UddkY>BwBDI znca@hV_}x+HJ5@i=bKaE(;gjvL76W|!1w0riG8YPWpF0kn`=2yFCVZ{*|n=jg0rKV zNcmK1cm-CQ^cnw*cE*one%%GzDrog*)UD5Meq@YXp{MLD4nx!SG^nfR5`T0dinqh% z7w|FjjUzlcNPEd`lKMo+9+FkeSuE=W6k7n#4*H7iyb`>}d!hv$SP%C-G_j3uJy_SZ zaOleNi=Q!~w4Ua&wUE_AefZVab1xVrBBCxD zXTA9;=u;yoC+Xv~-W^@5lJcwsKVaj!_>{UP3tTUwrNlfZS79GebO1}@TK16oFX1%UC7cU4^-k^;?GcnHJs?J?xbij_{ZvLIzFw# zjUDh_`eT)j=`7IxLpU@%x%}AS#r&u3)@KbB&{bNC=^T9 z=;7`slC3N0jDPw(^ckp)*`P^wqNO0uZfQrdm;CVy;rV)DgD%FmSG|>J$yBmfLaM^o z37$x_SII2>Pp-d6M{2>jp|<+dvt;W!LaXu8I2VkGc1=zCYuWY=H0D+miDsQ=Ub~a_ zWmj(vINNyYHZ*z-1exD9^4y7JJ+Yk|U{?_h){xcfJ)g6HOIWleT1lL5OPp-$nHy0y zHg1=1y-{E+8l)1~9Q=rPX$!ri=4?y)F&;Has{dvGY!A)LzGg=6q zMwH~zZ;Kz$lBf43-?JxoELxl>-%ne{@JIE{w9ZfZ2LHrF0ZXb!X<-L{5`Yg9YN0% zr<4&gl_4{p&0rh1;r1H%Y+(y;LD~2R)xo_-!BLitIhTbhOD4ZC;>ChyHJ0kK$r|yP zR+2eswr8vO{xawmn#s+=#q7LXPF|9GeF>d7-FR}H9+ROj-f~0DV5)*A=b3G^xrt@D zqOhe+z!@*AWC)CJ>L0!N4cSlL*`;`!c!JoyMIdkPnT)K9Q0acx78aB{LyxginSqSQ z$DA2tkG`6IlB>TNTKqzfk_-PsPma=KvI^(4pcjg*)ZbO88fh+z%1^o4&r|Ue&blHQ z(ryOrOWMp1^g{E4cyZruwEG$byP?iqXggmIqtJD}F(#D^Q?ntNc^7*#yvP{Yf_~3u zq1NMiHFS@!NxYUnHE-MKZlZF3)OxCWr)t#g@l0sv^GC&8teXBq z$uBY zaYwFF!^rd_nK8oXLQ>Rvj*-zQ|S7wT1+OH32c5NvU8fH?WX-Q zo``2sDo)l~rM~kJ4 z{fT%@FVNc$o*2SbEGej)%2x3PPCf1?Ts<6Da?WxZ*k)^aE=#ckMrHN;lFzmD-INSv zE_b3-#wE&2Z=mvQD9mdhu)@tpVNzYfqtw1 z2Wd?#Z{|dg(S;|B`I)Un(mJE?Yh>?Ukf(0>?Idm@>AIJ09%W2g?v0z!yE|)=npCTc z*;^zsRl_g9xj_67n2z&wc9;9X;${$Z(c8=DvjeC1#n0rU%Ffk7(wHox|JLF}_=R#U zwb~Q+wxQ%mbkB@?0!y8$-ShC~YxL+x&XSEMJ*D>3V&D3ctTREJTDrHBlw0UScB7I{ zGMTAzwv;+USrgPm`;A)M>fLz4bt2_CwM;d&7U-2c2j7sqC-nCP{91Y|^~Z9eoQVI_ zxPH&`ssHp4IA5lH@mBr4S^ul_Bbl-G@#zOw_W{-ZWb1PM{1&GBYA5IFwO}!XW|vst zXK~>R48$D*kjJ&3n=5K3cR7OYwJqPeAGWw2{g|Z~ML4r&Jh<@5hrq zCyUK>t~4mt(D9*3m7*^}vdaC$yVWjsTvC(hGSAiW#0&ItwK>Z^Y}I|__fOhNhUMC1 zGSz>wu5U}~2Z24MGxcSavXV!zse&8gn1(E?hmg$ zu3t!BdlpY8rt3PerCxd>zAkpRCTsH{n$HB&K_K|8o_|5V_-O_6QmAl;@1sC9!kwO| z^dOseFx}mwr{9p<{b@s8eEbbs##1TKq;gaAv#Ym~g?SCh4>r-_{k1UyO|qjl)Y@zW zx}~rD1pk+`aT*OszW!0ZXV>KtTH3?a-Q+1A4Ikp~8K|AzlIYt*q_BR$wGw}-#@;&+ zt+Su9k2kmbmRQIKP;y@~kv)=lY9$X*DG-FS!8V!De**8@B-|gpiwkCNcQaL7(qHWDJ zSG%9e(kJWTC0hIdU8;p@D+&$EdGy;fehC|Mig)T7yHa;28F#wl+EYfghGch^@h??g z;-{0n>Arej#%5hoSlaex{>7;B=7v z0>4+uTSdGG5NTACbJso)h)hGE*xXcTYDO4>EKlka*J5pYu!3!j5N+K17|gHZc4Q#2c~d=k77cq9|KeerObF+q z_aRzZidq-JGV_LJbZ$I8<=k+6My`1@NaAmkQbI#w0d<}(nJiscG*7odO{E?GEGEpn>W6Ifee0vw8 z>fuJd_h9|87Uz0DtAgP)dJmZxucevrIuzgLz&!pj=YxI@$a50C32pk3wL0ju%gCBM zqFL*$!^hqrix5@tgiZ>%Y^_ChbkrYw~5)218D5%fnEyclLxTkoar-#4q|BbZJG8 z7K5ZeDG%?O(y{o*H8%%J-Hcvvn$ALIZI;u_SM~8I3TB1Xm&Q#XC%Ztf)Sc|hWPhBoCuJeC~wBg+Fw z>gRAu-p`-WAv+OO&?FI#&3#@_=*UD+Zu2?yiyj5j`Fczw!w`0^ssD)@oCoLB9lFO1 zXS=(PyE=v?S;-#U0gCL*Cb}~fh~gc62H8$liAwBc6L*qTJ@oBOg1VZ)T><(;N+nxP zU;NI_RbmQn&~~EHI%%aQE0H-u9a?oGY_{Q9Q@H*^e?QaGW!g=Rmbq+DA_Wtt7Vf`Y z*tos0y;6(u8)?MqE~aErq8=rtWJJ_YCfWGeNGYLM(E;7hE}eE9V?+LR*! z$t`|EVHbyjF1a5Qk#!`RH>XwA{XJXT@yQ!wlzFUXxtCp-UX9p>|}Wo zn6-IHw9#j-v@3XD7j2T&=|ym*2KINLI~7+yfY1H78h^sne@LFNJ#0;4uHV&G8CJ4| z_u?m)eafurOU?pr(qsE#mUI`m6Djf}I1=-@5mg46y$$tza>$H^#hIjfm(gN0d(s+p zXL$BU`&P+$8^4xRWlARMhGaE0gb!zjk^?mPHj{^amN94tI=o)2s#luX8EwOX`0{>JjL7p{ds7R^CGmM-jYJgGA3G-Kg9d+Qx7CqA{e{!`a$C245|s$_;a1-Fu$q8Bbt zAy1LyOHlA&@3e%^GqB6-W3k>^qt_I2+7bp0!JE^9L<#N&Z@fw)8>4CG>7K8t{cWgN z1yADLyWALhiSLOY8c%y7*~v;Wl?3$x!@H#CWuI#5za5%wr)58ZtdIMt5}CD4GRi;U zdoq3;N(;`1TYMv{najL|29r@M{&Mf4Mk+qW&!H`goGfCA_&&f>nJ+(&W@Ev>2V|)j z)&^eLIganvT;uClP+ms+O6zATn!Sv&FMw$p?qtM!+PrNCs9Msd4d~O{b8WTK3>?Xn zd!_kR{F+|@PdRujG%HMPg{*u#qr~TCwPUrO7_MjWs5J_f!{5ZPO=7iIlcq#BUsXs! z9}?XJ#rwgnKFCGz8qt#zJA2nZnL%y_OLLT4f;LsW_o?0wK%IEl#X^0CiX)9EgUuN} zCF%b%7thZ1Dc%`Jm*WfAkFFg}<`Y|by_WjJ`Eu_+r~S!ryjaWkpunr7dnav-Pt*PQ zG6fYk8?$!eYJ6c*F)1^ulgLE!5-m4w&85@v2WyMA**lCs;xi>2PHy$R@4k&R!ht$)+d?Cd0eS+ktAH(Uq;%)v_IDW?0R-_J#q8%Xhbp&Ek&zq$-|jo8l=@3cJm&^ z?c|H8V)j2oTd5wDTr+iXYq?S77_^#;iWN|6Gl&l*Eja^y4USX5k^TFt$is65Y)VSL!^z0{4QQOo0#ECq9obCo;rO@DN9FUqw}>Pq<5XYtZHccdwSOg=B%5k> z;Nr>gA!rjBRn47ue4pe04QMn4%r)Th6K;KD>`0EvU-W)C&OJkVuPdls(F&;_yE4>F zD<0bG(f@4v^D_NTp2sREvlBHYnK4zs<62;k9@H=w`i*|iWG73r07K9?dloOlBJq5c zQF=epbu`U+8H7DaN(=W({L?=${!P~2I`>m2^3EH*_lgj zzN?Iq@ptTGJ^BKx*%5X3AvJC3TLbo~HhY^|8}xaPwmc2y#9N%M&%~#W&||z(G6HACn;b9M!N^LpmKnf-B&iiH^z_!J ztkHGFoiE6JV%$y?99^AMAx73eA$YYcQDhN4+og{14nHrL9RcDtjwa z$lm^B?{ie&ThO_utB?DB7>m}CeM&_8sW9IH`XfkGY(jQVg4OYUmxHmRHoo&|k0(Ou zl9hhR{>2OyVn0%xcj6oJ57s7?r{3{aAgl_f3BbX{mpk`wpjksFO()7hd`>`7M7sdSNQz~%MuiFLtX z?Is&V13EBFtBI^%MatH?_hvB*N#4GkvDF}F@jK|Cr%`OyyxspxY%%H|X}+=@r_I&CC^FuAi@k)b_=AaM&15sRE5?xCd-GQ^dddq;{G;o^g)q<#jeXxBlXWL z*E~IC%wGt{lX1KadHVzEZPxMyIKF_i#}6e@QA5zRztM5M=c=K7{AruBG(Y-&4qd7Q z@AzVU>ZvK-`K)L&exd)R)yxF00MBS#NF09VpWX0s8~UVTQS#sByte~tW$hi0ijE{M zqx>kkHP*Lx@VzsPK10cGaN{YwpF`%Wz-cA9O^xgt+I!zqD@fB+cH{`XrgFk+ttJoi zC7zB?MYQcgcaobSxn^#}sd)MR-KRt!rLOK_-uX(u$v>NDqwkFU>y1H)jL0ri88Xq- z{cq4X{=cV!Ai19}(%yJ&Y@~zRJT*k0i|KIU20nv#ayP^ye3KrkFItWOhc{bJZ>A>edcFAn37TBWfVxwQkgYFP@1^~ZtvKBKTX5wOGPb6W+qQTZuk5GL>~pZSqOp7R)5Gr#s2i{E zMyNO)^p)sMq6_My-7Zu~WO>ejHi32un35y2jXo!n_2*D&xHkHuW(^p{6Xz#5W}d$U zO;V$`3TZf|pmk17H^R510^XryKao*w$^DtEYFCig)zTh@6aSe#nEC9X4GcOA8rEQm8v^m#qsOZEnPHeMi8L6p_@8zglb%HN^K z6^sWzTVbZ8~` zeW0%|jZvu+yo;@V7nPo;>sOJe$>^87W%0~v1+r97%4yoAXuXnj)`eX%DK4aSKZE)( z(v$jhAJd%V)f}R)RWz}u=ii4(S{M$NNdF})(>8SLU|hb4G|x4wNmZc1pdN_lSv4mY zZf|yNzNaz+P43qv)*Xp0S_h_Z(}#_*J>qH9Bj4`PmyZqmkPJ3+4E<;CC}D+Qqa`h#1nMJ z^Ve}bS?J<>ncP!}XTAbuQbD+;-UhOw(YuD8eGj!V-h4v}lX;;JZe)MpieeS{Ft`$_ z(9k@j5`JC-l1cXSCh0vZ+^PEB45ClLm%3|NV-BkGx)Hs(IraW1^9%~i(MGZ>UFw~w ztWoGj$LW&#aP=CA3V zp9H$(z{)xP(gN4>Nmp;zE76=oNojMv=Hw*41Bn1mrk6!%(w4qF?9+wruR!hEtkgEp zrY`3oupI#RnI!iwV5>!srlZX=(sG{n;#<50{okacnL{;1$HZX9(HcVT56Fg*fIIX(i*63|`BR*@pQTuiNB5&sB~N|o z`5(0tUzOKTZ<#(5sa{9Vi3rNc&k%IV=#jbC?QG4v{x;B7ReU-Fo`Z4WY`xaf%GG3i z1$bA2aT(Z?r#UAL+1JPl`2%pJ&-LV|A3jWg{W{cs6P&5cIMv(nDlJC?>KIQCaitTi zv$L8L{uhil$(-EV3cS0!CHp#)S=?xJGW(>?Yirt38bw>8=1SOS?V0_C2A(*?`-x!t z-dOb;xWy|ecJOcJIDf(6-@|2`C;kERV~xY95cCOaQieXndm%9bsS@>&H!i1B@ylzB zcSo{B$%2{eC=cj$80{)Y3%a;+3{F2~6wbNlSkiJWK2Jye(QI!gcI!r#AcI>@+suVKgeMg^{GctEWTH?f4QO zg+HTKXqHv{KJfqA-&#i4$mu?`u_s&i348P|=nh2FWV$H@kCHury~XFO{Esbo5D$|r z1)MMZXWyU{9@K{Ke#QN?d6(Z`^Tg!)|Nhypkdgm=?f>sv$xgz-r0)oQ=gI%l!Zqj{ zZlznul4yyK(h;qwG91?T1Q>NLyX>On&21 zG;IxeXz6-8IvA@|%}nh8_B4LMnddKc_i@^tOr@ztG7#-!{o)fE|KlBKntWu9a4(** zquJ4QuBMvuBr=m#^ow-t5l?0Ox$-|FdKnfnXTE0^Pb>tV%@dzJu9B9sS`k+D!~K5HlNK-PM94eWty>evv%r%DcLD1884Cn_9(4p zZ!vW|&Lr)#d`qs8cniI!pVQ%Dx zJD!t^TR|TWi8DOk5lz0)MylkcX60r0yq#^|rt9AvHI#reCvOv(P8im(F5KQWZ37zw>af5qOgeC)EX7v8Cm`naaO8eUFEC zeB0-nc@8!o9H_Td>}EBPjbY)c!t!qc|hWPfaA7Li;A$%L8ch~r7a zYiM5)Z4xWf4!)o3ZKnRe)7qb5Tu+;;Je85@To4Xq!TR{0Y|B^aCG*4f-bb%Ugv){8NbbOuMNhq4n@?vd!Y97Z=Yg|{o}XjwK817QX)2K0)Z2;Y z%#~>TDG6Om2G{C4Rd5nz`o4av;@RV#-Gv?*!E2IJ zZ|b85c^a+H2B`HgtTwumoD&ULlePLv4TwJ4jwg8PAdQ8|CO9UiP<3s7gCnE$m)OKb zev;kx0$8Qm@}aEAWMfNJqiB4(llh_^3O?uFbP^GM&eURmE$v`iK7da=>lbUYKMjq? zWqb&h!D%84+K|vw%{G#iqau#=(_U897vb$zcWbgK3)~qA`ZtZiz1q5+wGBlZ z@nL(5MhpgFb{3ulQ9Kn>O(e7W_~FOL<}C7-jB*FVI=MU(Q=ic{bDg}Kkv_5ib8)7; zr%U#EvfJ{L5&Ia|o<*JH3Ybc3<4ZdLWtYJrIc!7U!|)(}tkeBJ+U)i=pAsqcFaP6r zU(Fkd*7#O`%gm?N;AiR?4y6;(>cp6oMbXagePe8!3--If-V`5hgwI%`Qwx2^qE92+ zsr(y@@;o@!lJ7rwyIY|h$$FB#m?2;oqTh^3Z+JQ@wp1bbnglk5>va7LK&^NOBzhzM z1Q+A-Rj@kB6WdXGjpuTfH{0(>Pu0M;k+`xEM%U5Xx$O97pjb>UpK6ociQ8*{7 z)wN%Z+$?qFRa72kd`Wy?MvQ9a-PKXIIxA4q^9|@&6)i5pkE5+z78JV(*;CJ1^Y!}6 zjB}_pde+}>89#TD>vzG{oW!P*!cTaf8c8`_NcQUCC^OrL`VOkThEMIac@!uUt8kXS z-T~`CGWZc{_d%<2Y;aBihOr;@+4$sVNS^vcw$3$EJpukdvqs4%8ZFJ<`!>{SVO(en zzu7*WOzTsLXd)Y!9hU5rX5>lk$w9Oteo50jd5y7k2-qHVrxSYjg6&l(p8dBL=$pNk z6`sm2XS5)3u7|=b*;dMeKXIJ#LrtV)W-3bxtfqKk0Qv^%z?_^sm!ri%zwOa$Gb*g1 zhu@)cO++Bw6N3L zEzu#K9?3)B0}ge-Seeu&vN18hkNCSA#S>?h(Jqn3smzxOG<{*6ywSITccaf2Yde(= zo-b(Igq9xyf<%v;f^%niH_^gx7k0R(>m7=9URPM3hdv*n?g(;H)ty+;REdo2rb=(l z57wjY7Wc-}`|KlR5A{6Myvca55ycm>uc?Ul2R$63&G)skmyKzHk{8jVWYS7j?wVF1 zf1uZIqf5M8%9|r3yLp#Qv0@f*K=L^V0qSKy0&sADE309Rs zlKekYz_}YG{!Mz9qTnhmSH!W>1y0F+nmQ44(KB%?gWL}O zle_7)Dl3`%#U)uNP6AVHvih}=;M_>pQo!*KW>7>!`>-YQ0g`^-|`G(&EyV)(jj*OJ2F(W)N2Y0sk)D@L# zk^MiRe^0rJl=W z1)f{He*kLqMF09^;wh4Kl>67C)z$*Dx6RnAdOtOmhSQ#RJv|D&|LSwRa<^EUmSCE^ zv6qm%IR&;~)53Sq^hOdgmuwwKst?DdQnW2uGWIL@ak=(dfiTdY3xdQKWDeKK^Pdh6JpCyA?XQa|XJO#JYvuDiPDx%&dI(9coev7UdNfS%)f$<^LBOYN7 z?)21|dODS6ETGFd5Bl7EYc^O{vJ$By5`7qAJV>_1v0z`s>aKUKozdY+v#0VfNZj#6 zysS&Y-lm%a^mVr<&+*m;AYSgdc$03Y1@C%uqM2i&wz`7s4$!tBy|IPa`S>0AT5qgK zO}W|RF8RA|0B0kXGLZq9DSZl`W~erYw2z{ZeO*19oy)rAUY0ICj*ZY_l{Z$QVmzQy z?=@%dSu>wXl2ZY1ta0Q5kj9rInb32h`wgga8hj&7O$~$8c{m4;PQ=O0>{V-4DHSGC z#jO{}_86&8HrL_f2L<RfnVgcp5am%JStVgEF44by3I(MljX#mXm3~utQ-p<+#W;^GAa~S&HMCM-hsShsJ zHgab*n%XWENZ^~`xWM&P#rWCWG!Y@mg8DdG{Rozcpjb;L=A%gBSq??H>@U1XB9mXZ zBzMSS@IRo{%uEt#UYDMB@bte);?H>4l5A}Q!5DwMXkPsN$H4Is{Z)kdY*yt;qf>I;{YCp@wU)@lQ#}8+p2zDewJC?NmC1p-6Mpdt zzKs-ShkGwN%!XBaP~8ctoS}b3!{W7k3RvPhJ&-;oLf|J_lXcM{c=!=rx|@vMgmTF- zoEYm*y_@VSWl`>Yvey+pKeOqnkeO_EXX(E`O@G`o3(4;^y;b%`XmAB+#%b$MxZ1c_ zZU3mnyV3Fzt*pR_PAHZ=l8va4++L4?C3ENTU{5y0lj;6?7CN)r^Wl`*%vo3Vf>q*o z<0&$d{Qr$ME=0L_pSA_%-R`#mO=1NO(MswR&!E|NfugbgvVKU`l&{&*_#Qk@dMeYv zoTj(d=0!Mi1N)Y&5Dj3|oHcx=m_uYmH%EW{y&eDAiP}vT&UFRncayNJ=x=3n;>DBN zT1#1y>ZsmGTessv^4DBNyZgIyFD$=i3zJv$G8#Tl+w0*s(9_e=abG@DZvlU@x0vfHl`7W4G<&k~_}z$F{j_$Cw^On73i>bxW{H1D z&DG@8N!|Olu(+<^`2+5K<>{N~&?KW{GSyWi0bRgww05$WkjlI{>$*crU$6^Ddpi4L z@ojq_pMKDD@}Q)uN@E&On#877Yb}_r($_)WpK7EUiXMrL-;MqmMN)M&>#>hqL=lco`O0|;YgGmmCz1muVHywSB zHK;^3D(LfS{bz0co^~J8)?TmYke&YchlBnzexPoGWRx-$^l?q4v%=_ zm1P&Q-pp=5HSniK#Y#NwT*&Q>ByN@Kw;N+ddp4`{?4l=oV^-78!2WJh-;2zwEXK`g z-iw#W)%waAT6{6GJK34URLA|rq~K9Ko~xzAAh(2DYF<4|7Fu{Wqs}_eEYMq`ss-m$3vlC~WGVTe)`GDyCr{F(bzsY9thQB%KU4wQz$xrr@ zrg%5w^)wvXrN@~dde)tI&b*5w*%wT%lCRJ$JB&Bb*hpFSHG0FmK3RNEI~Tc^9o*E> zIK0^P`i@@hGLj8Ooo9{Zi(%EnTmPh4%}83fG@a&kqRZdtbr1=7PwO9pXPVC|j12vZ zCgWJc%I3PMj+B$uWLnMm7Jr*heJfwszZKeVX*7)7oIz6OnhPhdO?=iQb$G}Hn?ZcJY&VJ-fDwZ+iB$zaO{9zd>T8N@AadP=a^+oL$3yCle`%N$=X`)zK-fy z)ukGE>P;-hubgR*D(2R0K=KWl$=ZE5D#YuolP6M%I`ISrXr5k+$Po?JMg@@H^v1HVMt zWS``IZ|z4;rg-xtus1TVN?rOfY{Hdfc(g0Y{avGgtu|`Kj>P^i(fj*om1xT7=2dht zBUCm0x1cfIe4339$!!)-i$nGMZ`j0h_BMQoe_*0eZZoQuBcWfB(dFP9Lh@7j?-Ekn z9PI{xB73OGr94Bgjm`E4u*r?|kea<4(J7T&Z-@0-ve%idsSeZBCEQP2OVKxZ9hMo_ zr;y*jvacT*!&3EdG(Cx*$xQmx2>%kpoJ>zy;hqnoyZynehmk$4Bd5Fx(%{ULi@MrBbhaSU5%B(=Lz`yij~!Fea!Ly zTRi)oR+Ow(vzDq2min|c^_P=-F_G}uHTc|}>#buqf~*sJmvhejtu2#DYJYbdvwQJb zo5+g1V6Ksq;jGuwd*U0eE!L6;u(4PBR@3hZ;626@Bhd8E?k3lHDyyW|>{cWKAbAT; zN2QUSPUS&<4sG4ykZH*_6Ait^kaUQKd z)!pO(NVI=W>mO$ClPmTe@WwZ{lyyQPFMjgGyLxC$YkI=%d*6Gy^MhyZ@IPJ`*|9mz zyV+$pmE49>`N`@!HI!fRQ*tHi>VMLLl5g3~`Stq${GV#f;rjo)md~jlP`kJqKZ3LW zbANBq`hjf3C|C2`{m1S;ACJ@@zJ#0}r=Jtp%6;+YX>>Y(J$r^OCx^nBo=)^mvR2*6 z9(BTj3jUYX@*=%`VJ;Uxtz@D~+*mx~<8_pkPvW~%WuY9Y+oh$Qc-YSSS#PI0;4p2Z zO76zO8rB3y?CCJjre;cVIo?aN_i8s@1M@+1G?_b$j3jeI&Nt#qkeU8xbfPhsvqDN- z-AZH914ivt;5x~h$x2YeT;xoj&ZUEyEma^TSJ8w6NaM|H=*1xFr={eh|DGKfY225se;-eX_+w0*phXwoqzPEe-DgSd)AJ6YpT${>{CEHtzf)|Ms zPJR1zuu28jR3J}W1}kUq1IA4BtAK9z?XG%VpmTE@%?yyEBd8^$1{3PluBah+u}+h<$LM*eRp%-Re?r# z@`j$}<#IH*MytDFov8CveM@Y4YPdARm7lfz7)f7=zBzI1N;8v% zBOGas`Xh}cOFb8Fuq(CrBbybSN|b!O_-l}i%yPGo>s>HS9)x-3sDr_hoaN7&;tG|w3BnY)6pv#?XLBHIe!~# zZy^dL)54pDv?V8e6IZ(7)S3EQ!#39Oc+{%`v{pzR=K7mJ*UjA{ca+|x8c;s zg)DTytts^6A$NMfxhom#fOqi_h!&;p!1RLV@1cKAvlENn5Typ=)h2DsLYet6n*`#M z$ml8(T?22+k&NW9%+5_4v|LY4rqQ5Y#`q1f zT&exJ_<7oCV*%5M%`VgGYCKKDxG*w(yz@|KFl2I@b zf77+MQt#Qr8K=)_;2e)C{m|}abFXB~Z4JJ}2PM9xmzGmSVH#Z=O2TU5UsH3Mj14Ed zyB$X>(}?VaCzD>uS?&R&zZqUFGGCKjn86Ze zM`S80Cg<B+JF%yNqlck3MTr_XK4_sqFrp zq1H|_!DM)EOxkmgF%f8!ndb+bY6;e4yvyCLxgfinWX4NjHmmp=jwgRYywGPD|s*))LKcCYQ;+JWC(BSM&giH={NGLWNy=UE=AhFbh64!FVe^Bm(#~ zpiXSg><_Lb73H*>d^o$bo6Ig%!IlxWotod%x?_~R4hQ@QzXPS{S@nZU*UhcbnTIly7;Ft!}6`*;(fS@+4j%Od{qg_ts7t_$p zJ1ST$=S;T)7_TP<^R;s}_)b8P$I15&Gv%jPfG=p=VWg%Co;PDTYO#nTaJrXvemW<* zjwa^bU(SP%L9bF^P2B9<)?aFtdZS(vQKYqz;av1M8zg%@9YX4_hEqJJV^NZ$;XY&c z1a!$dI9UtYXzf00j%sA_cjV;^5?=|$BIUQBWb$0kqp|T=9l^GAS8k@!u@}0nhgtR@ zzaT3gXgfRgJ7IFGHgBW_1Igu1RBDJC18GmZxSt}m8;!V`4;+nVdq{q=D_p_GjYpZR z-*)?(Rac&8EuS?~GC&N(p@HBU$dbkn`v%-z=l}jyeDy*MFbQ z+D4yVgY}Q7-O%%YE4`hJ&!l_*_n&s!vN&bv#w#T62Nb_jtIx7rALDvU??UCOEJIe- zAEV8M%EnvhJnxP#Rspx+(RU~_Qrkbm{|Mic>opc6G`t;^`oKQk4M)KucP`=~wnY2yqQq?sQ3d7AZcybM~<^&yQ{7}3Ai>X#&Fg*i@QRG&;18XFZZ zRsRir=4M@@?#$C;)?%m9pIZ3&3^`65+Uyu#MElQURrYFgHvRh+WF5)P7?5QbITtZw%%d$6x*c{q#J6O=iJ8_j|r%9bR8+6RTDx`F&G1%FOjzX1D(=$o4N zd-6W39PhXIiqC|1Jb71;g)iyyzfkH6vzQ&^I#~kS=w}A2vJB3dAIDcZcK!wSGo$5v zwmTV<6NTVI7N|VQ+^qfh+kQr0_ktp^{m#=vVu~fE%>`%@uk8&!$L}ujm{w{r_snL) z;CWQ5t?hm&p9pNV&~L0yFX%lP0@i{0Gx|QoII+c(+_O!z>^^4mm!V-!cE2G>gN?t5 zAoMmK<)m*OoX08qE{!-#tBDoU5`DJ9v1ESz5Ndy-oeAjO0FOU2#~umF&rvv;&$kye zJQlu*gnSE2w*obeQZ6_2+u=~|LbpPdLuqkN%UZzmWmc<^_qpAl?CaUz&N%%Eo^00B z1QzFdbm$K5WGkymGW+_e>H89NPY&X#`gn)U2oGz3eh7+eH=6cCi*2mTO-7IwAkOLj zD74R7yro%7R%W?hnOJuFaW)Z|vjb31?Ju=COsTr;`xGrr(9e1ptwoKto^=GtAnoQH z=qpxqvp%nb&pF11v%Jr|Y6mS#e!j&<&e|+VVh22;#vS;P9H}?z^A39b11X!z?mh{Y zdbpIGutc!SiT9)6_zj8AU6Sir$VdEa2Kz1+wGW*8v*bIhQP*kj3H109mG6P|Nc9uX zH8C|#pefmZpN32GNMyW7`moub>S4QjL;O9A{ATr)2wLl5nFvqG-nfjd$rsY8Ru_BvP7e z5}%Raqwqf-Iy=c>d}(r~l;~rLBsv|u=aRLYPt7My@e6rPn`4SOe>WvQQ6jnBvbH`M zcOD{_*Bg;@dn6M2C(tI-OKbgafM4!me1#tC3c0z5Chq`Cb{P6-w+Btk{@sbb^#X4>b~Iebi1{w`#E^ zUXP)L(T%LI;yE&1jfY`6f|RvG-JG-C&*skXbPa6YRI@pYnFs~d(6=valf`i_yFLVV zLuo?dCq}O8`2QM7dWz-DUet8Dmh6`E@F=l}LjP&3`Cx4gMWN&{ZADwM)1NyLlZ>G` z9qES-iNO2`3s6Vvb(QK5ipWy-YU_YJF{oCc?8_*zT5pSKLr!$g@*9rU^{tQ5Y$O;H zK{~gNv#XF?gkAmpNV&uV+39^U`YrW+Fb&HMj4|eT@tn`;^c?)^Oj;L#H@g?7d2VAIJ`5Hv34~i%MLA>&^9g!!#qwm9U&Z z3YNe=(UEtWAAJS8$#C3&f7z>9ix>0Eoswz$&oufdy0QjEze9t_-t}ZIx!Dg#@8|Ut zACp9^NS=m!@#Yq~oy>yq@Xz{d2W~bt8>)!5-=N|qns&XnrAhh_y)MJKYs^O$vhm^k z7JpabUQHO?h0ZrC{a5_{NWJ)C-3HdC^tpqXNk+T{C|}dt2he{LYQ&GCA}(K8j7&eH z6B(nj=YAr1TA{^YJjn<%heXErHnP9E9{(Q#&qlP$O5j5LnT2x6!IDTgiEDkV zw%^CUZY*Ifyv)AoFOG#oObwtz`RwA+DBi%49&&YPps#dJA$Bp!v|*LX0R zg(-`RiMH?&`u~NjbjAAzwEQaDa4XF?0hhXf=NEd<+4FQVe~kA3;7y`CBr@z&)Ovvg zuOwrQQF^}COQU}F7IiUjQ>nMZ=+H5UCWzbC47ta7q)8Q4kQ>ac+)nt?9@(}yf(JTd3dg|Cer zQ$0`aqKB+cm(rQx`pH>a4f64KxL3EHC{1#ws5#!xXj*vyEgN5~QIq#>HE4g~S#tEn zm%3cR^TxhU@alIl(NYOx8GOpg}{W$`@41>FX&<{KoqdZ{=oitd8o( zfGDfm+~Y|*?}I4u&k{3iC3}B0Zhecp^^K(a{LLz39|_Ez>9X*81AK=omuT}Zd)gdL zF7m8Cp69Opy|}jpBy~}wJdH~Px{KMgKcnBZsM=fYMtb`#{KlEN4pd?!%1_XKe#iSQ zS&`+IdNRf4%s4mgOBdK=HJT?`-EH^uaDCe!q;wHXc?#N%nE+;>3_B$$o zH+P~FN265H;#WoNxK*G^n;#eS$eHjjN$MOJZY-XaCb4gL8d@i_ME)i=U1@Kddz0Lh z!~G=sVos@(xiNRobGlt}XZ{fFJ>&Cuy79VM_A&5(7#umF&gpM592{NL8K_LSI{`d3YdX7{iUqeX_#1mPBgGoo3;_3gE&F7Mpc=CGY z+tuo|Alt{%t>Z{{e0+P+k=_38C!xo&E7e$p+=g6j*4RdkdvQ1s2meJ&E1DB!#eb#o z|4R06mjCD3%{Z2>EGM&Xqt?ql#}j!lm|myXx#Jiw=Fv2)WKS|{$z}S<&TCGDbMxg> z&?gI0qG9&&ehyol70hxael*&(W(OXmiFdGrbJ>=h^5r&Tc`}zfNr{Wo10`1YUF%IE z#v}qpdAw^6jx&r>Ze1DCll?a9;TmM@S^xi`=QhTHdTeIqAgjSM54SHf`&dRI4%b8c zC=(0xG9$6zwg*|frI?2$YjF=xZlX`^*w@Xz&1XfbvX+T_ z(@Ni0!LKPEb*6`jDf@#l>3b5rSYOFmJq+fFI=etG3t8!^>SdpEySgXpskWAC`W;7q zJL}<9x|;cOW?0$z8^^l+KvLJzx=~6z2GYa_j1T_h*J?U?nxtYS5jA9sm!BuXK^F>$X)Vg zxRc%F_|aVNf6l~e;QcK=$LIGm?GDsu{Jd79!$kDhrJW1ev1A}m6y~wyy_<0)Gsuic z$B^1DL7Ch($yQ$t=82wmr2jd;->CnbLu8Dtt(9|q8>juuYcuoc3Wv355^C19774~# zXFm_F1#ECTyv>OIjlL(sIT87r>LEUVSp&w0wW$_&l8t3*e9ltOR`PR@{+p~fW^dym z7q9JMp4G&W7tpXd-1nfzn}uwyMzz~~Yfmq$`8<=nE+gTYC*?#lvzyttm-Cja$|r#< z=i1kx&qOW7t9>K-WUUkL&8*ogfgvm2UT|GR+j3)}O(CKC^pp7Ar^7t+nq)tisQ=;K z@50Z-k-N~d>LjRvu{Sa3lYe|;A;+tW(J<#M9nmjdsZX#0t?|CI5?gV#5=v)<5f6hz z?n&(Vcw=-YA-Tu4A2#ji=rppNTlZgkpIKF6Oszo2rsgNv!TM3nf5G5n)@d0F_%Ip_ z(8sUH?^-m-9n%#c&Z&1#(wR6{iPRrI+x_s{L|cZ_(#|yG2GmKc{ab0}MpiQ?OVjW) zvi3CSa;En=YHT9SmAp&zguv7d1v(mO+WH^eOf2|ne*2pv{7zDb+%j%%yd?RfiWxA_fk%SK*9k=Q$ z_hq{J{5P7H8@{J1G2Ga*fqa(SL3oHPT%^p`${q;QcTu#Jl3UR6Y^@DI=S0ytUAwJ8 z`jBzsQ@FH3f$K=|7!W?Bm&8?G2hzd5WyiLLS{JaaStIS%))U%Ftocx zG%@iqe=KBZH+m-0c`G#Ct&cB_1hqgN|FDC=GLa-C;=*$D?9Dn3X?&9ry`M6}V4R!= ziS+dzuB|jWW$$7Lio8s6ZqZ-vhL0hAoqW61njrB;|EO*))ObSwQ%L1B*z{tH27@56 zxZ~lQoH4iSdm`Mskd0<^D72~ph7~M)yrFXQ|9w1(XUa@tRL-R{&c8?2JF$yL8BcS| z=~*zYQU7~<+65nq-8SWw#|au{z;Ga;e29A~Dk1 z*f;2f(=%XpyLJ-8HPP|27Tae&+W}m!!|p5)CYHxGtz`CIM!7a>)I#S6=|<}0UfQ?X z&Msekx) zM2*CO&n=~RKEFzub1QfmTM^5!n>O8vHwWNhBNV8wy{>fmP?SjAtakW%85n1)|2oRY z$KxY($*$nF^r{xAzK0}a=9~C?yGdEjb04O~zlC-Dz7iF!3`%ClG8xwzsg?Wo>$G&X z9^w^rGi^A;*b^VKmMqy0urz>QG8q1fG!JLDGXu$4Lpi1QnSG4$ZZvo{vi>jQ*>6bc zG~>y&Fzlh1A?R_l8f~@KmYn9)JAM%H^vQnzt>Dg{;QQdL5B|yUoo~Fl9W}lnd6D7x z%*BgzQPFD78Wyu&xrKkW7JBP{Jlb5R+;jTatk!xObum110(^!Q<0dkFCJRstk59$P z#;npo#dzAq7}rS6hsa7JZxV6*9@3i=>EyKOWJT~SuKr1X*P=jtlj`fUGfiI()9j~I zqguF&b|%O8geqiDUL8O2*?lF*JS$ z{`@UX)9-`xMABIbMSg)^FXKv1?{9!_)^_EzG*BNUYw&|utK`jVprx$;PS$!Ql;5n6 zKjK=}c1IX{ZY)+_Kdr>F)=EydL>eol^_-x-NP6B-_i8m0tv$Z@iPrwL9<#Q1OFP4r z$ocL7wjw_1@f!Tt|HPp?-lvb?^%e>wb75;Gn!@;FoKK|r#K0WsS^VDu@g@*F>;K0n z(upoq0r_OlZf9@K#E(SjFAb80YA1_g&fYh}bA_i3*_%?uPtLis2Qh|L9RR}IV?LSw zzwG&TE#%z1yYiuIylO5+mD`kArSC5KK9V*aL`J5wOyya=Z)xCL_!b|ID!wNh_7y%o z0HR)?dqYcKgJ!({!Msty+4xTZ%hBq{OY%9Uzn@B83D$Rf8}IqIS}f~(y!3y1pIwC#4~VCVX9vOMMv@XK8PDcD zL=WTjIszZ(8(+R*?arq=x&L(&sY$-^PUbUf=}BL@e>wTcEwS9i%!yM^tA?%Mo2k@G ze$O*14zlXrh!(j=oA`Sr!j-HbDA`zPRvbc57bvXY0 zmZrwWB@$al5ckl^I5Vd^1_RwmlyGHoC4Stk;BH@ka-5qBo3Z{^u_ zdd|%GbMvkBjawVhKcwM9jmDqS{hsV}z~aMuZA1+cqVqs(CZ~#^U^+iV^&F{FB!w<~2Xi@nrcpi+rsB-*9jy{!A~Q4l({En(R1w zmgqG9!i#a}ccC7hM8mZ(yHB}e(clKqKS-}`HVRZHlhsh{d9#((#hBCpT*=~_v*pe3 zS%y=$NOY>|soJ%t(ERdS7W}6b;)_P$PL~ zl9BcaEj{mDGK}T^+h$wTgi z&S!<1YWV~3UPmk9WBj;Yb3V`mbiLHh+UOnH)RO$}#mmg_pTW7_+Mc6){?{|^#_zWl zxw}PMxz~BRH+>6i8iPFE27_>FCKyMcW_Pl*!JMq3cY8fcWT2ciR7RC|$!Bx$C)0eg zWwnISZgP;~UNkxpc?Cs~ksz9%bmGCIaL z#Is^X@jd674;!lzF{v`LscM7qYH>L3Sk^ zj%RlsVjJG0Q;YQSENb7QtzInT1!$QlK%e`5j+X8)TZ^o;LEm44tvxJ$jw6ZT{$ep_ zOpb~+xcENGJ*hq>A>wE4tL=|%%yW51F^^hbRT z^E@67FQETzbhS6Bj9C8%)k1Fg|5dx6pj5I)=T!f|LT}%}pZ;h%M*G>DK1&-p zWnWfE_+wzsz0#Xt@&I}#o_ZCu%VX#BSnBaO zyoBV$!=NdgTKjZ1?wrG-#zVP{dC4EtNiMG`=-CA~_TYCSk>&j696H~M?mS!e~ zShTej%O78;_i!f>?;7KL1zgz37QI1R5-UAEVE21B7L9Q*)M*t7&^55Ppvr zxiiw+lY#hsuXf)BL3H>4Bf?_;+p{QBjL2CdtYd+zpwq1^N=q}Ba%v|#TVki@+%<6q za^9Diamm~9BS|@)Ty}+j;_LTU=0g_!F;C(F(jGq5ik5S}+YWv$*oB;fzv6TDtCBe+ znF2cKC;r&g(Jxt79@JvipE&_MUaO-)eTWt>WMMMiWXG^Je4irOJ2-podghv4+B z-jmyKAj!;r>?vqjT%Qb&I{LkgKD_7K zD*E;V4&|0_R+mq+(21;hRk6zHX8g_Fh+**Q3d)fntmy4Dw7HEY?|@5AUwe^-%*B3W zjSdA(Wo_RyphQ*Ev195kmRH^DR~Lw#UZ2Zgg|IZF@Mn}@ezzcThUS0^L;fg~{*(HEiN z`?PK-8un5qdz%-N`w>3<(zS(Pw^ONgukWqkejbF@bjPE9}UxuFc}VubJ6fm`pNFxdNMbR zE?uLa!QOoU!;VT;ri;0ARi7QbQ|~txtAMA_tdG(Y)%{4BH6-I}b>r!I07?8vzqx0d z3}nfG_9XdB4)Cl2PQm|J+eSsrvfftH(jHjuS7s-wxl3E^E|hPzbA!>Kz=8^u8jL%d3Uhh%EL5qxN|@CKx0lfJsd`6 zb9(s$c}(1dvYzCmvy8uqBhlag&ftC*r0F9$3-0o?2I@Ct6DQzjQ<54C4kI0jLzH~# zio7$3+h`yRjQhUidoMqQd!K7%dV zuI+Es?~YF|71;Df*{l?{z#{R1Zh=c8w3fofP2kCHVpe_M!!CG-5+y&$)0Y)vqEZH$ zs`yxvfjq0aBk1<~=vNZ+EBWs8tOT-N#iuiAWCikd3L75Ymh1}T4sjy0r;ld%RbR_F zkI(m#Q{8;}-=ClS*GrzY{eQj79pNhqsVUhj=z|^&|5GAs#H@h-`{aLHE%}@n?)z}( zW7wTXV&8&MZV%L#vL3sy3-pZI^R)N7(aEjI?7#9FLp z#}CzeFVN={Dd(<_lhP+Wzne81;n~kw#-VyRib=NCFl4yBhN{sFrQ|d3r=F=lZUts z+mjR1?597AGZk6hQK;RPMkQ`SvbQ&4j{{etk5pu}ax-v(KIf2!kF>biSol19ks0I? zPsjSX7(QpCZ5Nou4$emFvbd8OPCT7*%GN{Oc--ce&Diuv?1AUcY$OmtG5frV~r(sl}qHXTS-mw zdi;)j_Js52%EmMLX|N^2ZTx3)|E-H>S9tMfpZC@v?G*gj2qyFun(>v*_n@G(6dKtH9+bbA#N+9#hPb+o19o6j)1QYLSetB=%ChMsw?G zag(U09i%Ft0I&yw27QME9KWolih{-^gU6`y-yBF}E?(Gs#R)SwV^2UN> z2beRLo1~4*&ZfY(ChY(1dt#ke@O}?oC+Ac@Jf47KYqXXb-#Mf#p0f+oOV;|wNcA-& zWE6aIOE1}Y(pF*=B~o&2bjaIoV5kYN>*2H(EIZAK<`*_-vAIVFGV(6C29bmKR_5fi z9+```dR_9iOO4Pll2HSP9zfHaD`yp$)6DEdP9sf;IDaKrGw;f3da*Z){3csr^3lBk z>O|O^i<0p@ZH!8Z9F<&&zfvMG`@g`kHG2D!POT*$p=^4)!K|?zT24c+7jZfB#C_Vy zo?RkKpT|yZq!qo$z;XURO#bJPi3ilGq0jDqzVJNz1-n?J+?C%za}#gqCa^CtT4lUl z4Wq0c-m{iyg)-}L=TY?f(#kRWfRoL267wzhDObb2i4x0M(hc~wntUAy-X+RiV|+@6 z>=mTAJWgazQ$t&`&4)hJN)Ir1_bsPR`@BswqI>CF?q}xI@J8I|LqcbQ?MqsnEMI5P zrkvalRU^^j9>Liwz}Q~hi}l-4%~+;tELeB__6AvQg1%0+lI`Ph_~tzE!2+UJa67j+ z6F(vz?zuaZS#d8|tJvOZ~#CW(P@KRMci zUJFUw8WhjkHgk&+r08xI{zD}fqwGR@zXEMXY5hc!HXxI~AfLOD!9+j088nIWH9)z=s1>i9a^$iN zDh;Bs$wH7^7?}~)!r}EGs6euBFrw$2?q?`}JZfd`|0c*Uq=6@B={Wer7kMi@Mi!&e zZOT0Wr_N-)I+zb6hdIxElJ;jcmiVXrL3KE&rl|E8DlJxKq4&A>+Jsy!B!5{MB=d3& z67&rjDW}xiddujy6^<_|JDM%K0Cb6M`Id6s!B>k;zKi<@vV0@)eHPqvPc`?_I^jb2 zd?Xr{LG?olOI6zcy7;jjETut{8TSIS+u`VWDm<^$ckb@JL5EIIH>Xg4G#kk&=0D9+ z#_8o0pH^vS8U0Rv%{uTO1;4#C;B`8hTa?4d&DCaO&Gef4biadLZVMg^mp{Pe0B=VZ z>xo_L$kVtziMHg9+F>k8PWIwuzEAsO!I&Js^-%|pjG4b9oz0X;UaWY)<|fb}dUOmv z{tebybtT7i?)85R{+v@K0(AxL-{}8I`1=gpn!&f39BJJ>Mu+-ne}P>HZiL_o!3F9R3Myy^K2Xm`+}yoc?{U>=CTS57s!z<`iFs(QH^* z^c(on0lhFGL4(EGG>71F?^Yq|Ch9Xb@ZKY&QeQ?pw0qRCt6S1f=C|8(4{|*kr zNO|sk_8}ej<6~$%${3M-;#FiPo(ug+&?hL9cbmxY73@HKk75ZHkcoxxNqp%G%-Vib zcdMC3JmMZ!B9Ur8U{5pqoUD(`KhI@FZuX`#{93aH`$_VeqV?+Zd$L*0O48O+KZ&Q< zT+L+1xqw|>j`zuf`8KGA;lYcjzMovJhR+mb6A?7`S}z4zpuJSP%gl}Tvo?QWSJvr! z9?LpYnLD(*jRj21mdQ#zq*OQZexbHkvu=xguCI^G4L(Bs_gJCi$ehOd{gO`n5$}hy zrHSa>gKgZ6UL(j><^>&Dzs!wWlJ4%l{gX|;m9_d3{VVD#GlyhTOk9XTbfl3dyUkb< z`KC9T+{d1ep#jVFdNWFvw^~S~mnmx1MdSF0CPqvX<#T%cu8}D|&f#YRxOPxFdz&r% zJPYb%6Fa1!$Xt-N#oPGD+@i*OdOr#;o@T?cYVNCk;%QF8yEVp|>>_0Md^Si=CX?|` zxfcw{3Y*yU$-?ASd?cz3lzxwncH?>B7X6zxV1HmmWY z();QE|FIPt(D+k5pQ)XFsFr>3c<%gFo7s6@kFwv8?Ax{Wh!H*Kc=7S=O&)SSmN?d( z&BwCCJi_Sp4-i&@<@Kmg8y^yts7b+x#K@dT3%)UKZ7C>{9M2EHvoo9c0cx+pt9W=O znnVLyyaUBoz-JeGb-giW3p%et<2Im*CuLXI3^Hd(ZlRobCqK>r<4PhaO(VOBn)WIP zKGk+@u&(qwO@GO)b}gNXoIi>#iOX~jDLD@x8YtTqedZcFuLNIw$R9-MNhDzvsN=yK zKh~TSWp*@$<=#W)X3^C6M<$x(N^_|%NqcLy=0Q-F^X@C7_H)|kfx7WLPaK`^=~;emwfsfrP}E2Ns#?h zpVQH%Ke%H}b2}}OiI*8?;-~uwxruMYR`xY_>Em~`M(I{seuy6ACT^>ON9TBw++Sl^ ziA@DR2I(=kjvA1hD`?ena) zlg&xR3O2jb=aP(P*zLqN%b9idJZgb)qxNzi>v8m)PabatacvmomSk4ExdX5aG>Nsg z-+VPOT$d~RjyFGQ{VHd#K%(C5;Yz>yVAQ{LuCf4bD;J$zciO`6S@%#M;tdcwEQxs1|w?|2QX56)K zHG7(MaVPOJ6R+oNvK!mKku6K6sQCZowC^^yr;719S&B|4FQ<{4`}7_w@e0m71hek! z?F#l{D7hI8o?Rq&ogS94N*lq_6NbrKc0Ie(s#|d&Yq!4^)X2^0E8x5Vl<`pd z1|=4G{|=ZR#<%!P=eA|ms^j1|4=o1P(cobGSd4<*s_k`*ksR&uU51D%uAEi;W|UU<+tU4m z)~B=Gt#KpSKyEYJZlqrJIu|N&u(BI{%K808#_I#<;|fpaqs8qg)!wKz4^8H1C-)2L zqT98sPCOlQ8a5E7m+N7#wkFfDoAglLh`t)u$^mZ+NU5gtR>R}i@B^E>@>#s-9J^7y#&u$+Y zc7Wku)y_)(05bR->P^v4b9G1Ha6C{m)Bla%_{Owh5tH>Wv901sToo`C;foR2mubn^`F^Y#pq(%Z9~aK{685=j{VzMRiCK*y43);TRq9E^sZCdN`v zGs6b-=SSc3WCl9-^C@wk2J1bi(uaVdG~GH~y=Pd1U!YoB(qF2;V7&ji$6E>p+i`3L zElFhNoC6<(1~0S8RbUhg`XcJ)1a&+qh{x(+y;UXM4Oq23*0p8vc%U&l`g{P2{ll}I zVl~IJc%c8F@2tFXTAbDAFczmWyepCSyU_1c?UrH7cYx!MEI?ME=c$vuj!-cXrq1vz z>zLqMvTjct;E2lJ0zCxMorQE~r zK4m0GPR7!nr`7{S$&}81OKvO2r#au_34Jnn;^&<2`8nK(ni$tVegC;tU@2B85x;X+ zc?npLQGS4Sc1hXN#V$ZsTAl$6WfPb4KA|0P*D2E60g#P8Iq zT2SpAZ!^xdC;K_?N+z$&p=;2-XhBuhEC0`8Me;x1FXyoA{n@|M$nFoQy4B~zZi(f) znM^fiiINNBGn$?lPq%9I1~T(zvHI8#uc2z@2G$R>VWAe*=sQtYa=YzE|I^cK`dZTZ zv;N;;L_F7Tydh_6XH`*V7*1xFeT~uJWNpQd;VXLa1`IF4|L=_;qrj0FPJeULE#$lr z91>SKbK?ct{RYHu={2Wcxra8Ke0{E5W972vu$5g{;?0F5EIF(u>ZguTH>Z}lfq4Z9 z|IGUyp5FkDaUf|yPvXPak)_{GZl7f56OVEgs~oARi~^lNow2efh>l0?POMsY?f)JZ zrm{#K3hQ|xt8yL+u0gHG(Wqq2QpUUaT3yKMb}({34el3=AE&dux!L<>(cin?H-_CO zEa(z%OQJ|kHtx?f4oos`JffXMnEAWU+2h;hXAz8&O=y9dCuwO446+_y#R60(4T)>s z2lf+bW;HsW6<{)sBxYdZm^9aVqMT+ABw4^}vCzFh_&7doz~`JDen(QiV=JCS-`=c9 zvYLE=&YP8=?P({YNIwu~b(mH9v|=nsyx&t%w3j(b&bWHR`U8?Xl8riv-HSi%d@|Px zwPqBwjW=Lse~GS{oHf;qW^G|`opB)1n@?7AH~c=cl9+F-y9L%cmwMZ$@hH&-M=#aJ zPVcfZ%uZ5P_nE7{4a#cZA3(xp7qjtrXvcfDixGVhOOw5t&rqzHPi=6Zj&`y_IgKa<-ST ztG(wrlR3@bv)F(kN+oC5R`jh|P#}?4=78jTbiRn3BvNl6N>0+mD#{(#WM-XFV+_7i;pMF*R|U+tBEElMXa=l+3oFa z)XM7ZEHIx6lAN>NLVjl#R&ExE?$PEJ5X?3k3RQA;zK%_ekHpn_tI5hG-`^HgtVa40 zzxGS*wkNlHNqBC^Rb_1|g8N%qm8cEjS2Dk@Cz0_cA4yv>4?2j<#v48Bp2VNu0o(Xe z^hf}G@BGyV=CbC;vtp9;BsjHErP$J0sJXuNGp z|JI?}&&(!9f-6}KMxt;7IE0tkAKywgo-61T|C4SYxt1g?WG!;q__Q9MV`Ux#$qUNI z@1+NdXD2ba?f2rt9|~PQ8@vNpjkWMtTkMsbfQmT_jGuS*6;Jm)5qD#Ib`&$wM2%}l zlCy%Ysg+w%;u$YzUg*Q<6hsk=BxBp|q!cNJ~pF^-`;Rl8{)a}HC%uaUhO6OMC3>Y*gcZp~;7wk_KcKFZawI*3; z3a^S}F%hy8S)c-{$E&`UJ{FRbbI4+HY9^}tckEYuuM(-df;vOt(HH$s1Jl1y<^|TX zJ;|w}|5_v__l;Wnl$CvM4<_p99ypBf=5Djp+%b$UmNNUe+PAyC%Lq}M?YIKWx1dEb zjMV_$m84*sHgjJr>zQdr%MN-^rlj`Zxd*KhPcp%H+0jg!~otR;x$o&<5P9TG|^}F7Ddj)J>VE6l>W)+sF zDT#eunfTx>YkZgy{Z;+!F^fK(-Zu4aGfc7$%enfYT0ek%{gyS!t&OA9S*V5Cr1f%I z6)(t@sFv6Ym%=vjbZ#eq$&L9kc>04XvywJipX=?#^fl|H#8Ey2&h^l$i~m`P{z8v+ zwK@&wv$K$M{|275(fVu7nyV^(Ci-k6quW8c3}lJ4@QS~Wu{F6Lm%F)(@#c0?pL6nU z#>CYqb*2`s#J9F2;6Au+#*OU2cK}5VV|x|7HKXClJyZd$9$<;@(&Hu)dbzioJ!^(8 zOUP13bZAQ(25av_oNDh)VkwXJleOM`R@3pd9RR+Z-{zj{3>3`TWr7kvCx3}o)QG(v zO><&lR~FJf-l&sx&6jY>S^qsnjf2%6?^B{O<=k{2$a3R9>(xeZ&$)B<=w2ibN73lb zAnZ!^6SHX`__7-Qqi=8d&7I#P&Af8DkVr^hi(nXtE^91d1rN5 zAJ$JPnbmn}CMxrIH2qugE|IqbUG@XA#?Mdg?*4QdUGgM!{um?&7w;4OJI}LP4s<0y zWmv1+4{nCPOZD(`bV`p;`JF)4KQ5l-l)kbj*_BAWBT+E1zW;lcU6NBf?+zjp>7^OH z%DzS!a=zJE_HVPP+evWlE+n2>6|~-nq8Fj=+jMF&TQ?K`I@6c|eA6Ns7a7Gehx^>8 zE3{h&Z!#MhfTp+8j2E?^X!d7$pP15jfbRyH@;77Fh1$!kE_cd4*Zwvdx}VicHkHJ6 zO#6#z@jU#juI<`ro*7f_uZ^QMUlk?3(bFnD3@^U@iXH8v{jMxja@)@&YgsGpWm`Am zVq^V%r{{Q(eXfQ3jWi;wfI&u*3veW-`d8uGNSgMXp1-y>%UC_uo8KE(tC5Tf zYR@)fc+dB1>Gq+@~H7#sR?vK@8 zX7=06)8ZMNdyqL9tj%97F#LK22XX^C zK9ae+{ud+bAJH=>HaFu;CH*~vp4&XBtNq-Xx~f<+)PlzfbbXuUm`F;K7b}@6Pl9W_ z>1JrDpC0ia3GHD*n6dmy*%5;yBlsFkyX>G-?{yfcmHWeOgCqQ6dRH}R$wey_*L7fED% zwwF(_ZI4$#D=bcaX&D8Mt3hqgDG0Qoz`E1I?K>^I5@hJpTwq2CerM6 zOjY&*xSUPH9*1S_!Y4A;TGBU*OeK0iyq1#{sUv%{P(O{?xUQsPj~?Smc#?YYdX41G zFEGzttEy<%gT{oO?~;$c=rJ4h6ALPtj<b(xDj7}$ z`7XGTxXC$7+CU#0pkd-9f2qIh`88+X{*IoteX0w$d9WIXqT|T-NVNV=&sVD1y@2M= z#f-43df#h(Gk9(WOCmDuASF51o2RE$r2cO#K_xnntYs@eoV|qXU7o1qLEs&MyNMy4 zxLmj4&zHuK*v{-C+>bepJS3}b@)+gYG+G@G zic`T(;2mzg8X`L#moN{7~gEB@gA#|JEOU~zX*(( z7bh!RJUnt{Jq$cw;&Ed5=bXDAEIx(nZtYbBEydP-pnS6O?q_}DA(cB3V{s_= zTCE~*2Bnrd_D1A3fvwqEb;uCnUWVdr`Vi@}O zq=#8Oe^W?%O?(?n!n68FRFVtLwR285Lyym*M{^q35@vm2yBS?C(?VjX*C_aQmN7is zsiD8s`i`|6hrfx3m6gYu!on`45Ao&eU}Txd)(yph&#f7nXe)cLjcHv*irMh&LSjpU zB{@(Mvtc5dXJ;WdNgney+P9Yel{dE=0>kWR-oY|HspPFid*ch;*kE4J6|MH+Ku$4o zVxN_IE%ZG|-6wFiEy#zV`L$$aJDkooR$T(3+&)XLw?o;mM@dQ*6s$oe&m|d?^_8e_ zXOZ(r)F3k0lRkdtQ*ik`ZpZ%D0OgZ~&00bOPF1!SN}i;CZFI;OwgX4s@@A-ZtNS|v zg?FIaC8Y5o-1~sdiY~@R{tO=;Muk(2v8_Rw+X~qY>Y&v`s5nQ@Z~i2*f_MA+yGrsPq1 z9c`xgo9H8>%x#XR!K;nvqsUb~5?Z!c%jZtWU-Xq2+-I`{$*|KDZ;xe@7MiQ%H2iF> zjAHe|(;8&!bo0lyo zW?9GkZNyR?k5{#gFPEsBY~vGfY6<)DyxGoQjX1whs^r}C0Gg27#)%@Gv-N>AJm=*N zm1qI0(kQ!~mj0hl9m!lr5cY!aKvYW(=sS!F_h~EVp;;B40gn=|^ohpmpONvzcuM}D zaz?hCX~yHYB+AyoFfH+1F8Ny@Uh)4){ke3p0<5x{jJH%Nu;gUBC5(EqAorteL(giX zRAL)9qZRXUZ6Ik%2Bss}r+#eykNRu?=J?Sc;aj3!#dErq&u_DCFQI%({3)ZYoRR&V zeEx=>4cpR-na*`gTm-qc$ z%_5#?Bs?E1$*&ZT?t}Ey3}sVyANvsh{qW^y?dlZ)z&qI6=ImDGx#dAu1uMukQ+sN}Z%|G%$>MfnnHC1QExD?2}z z`&wV&VnZex(pDJkrlF$^BK535u@&?+5m4V{Jw9PG zwiu~?O=g#AWq&dIO{?C+X&a#bUJ1TiUPR}Q6WgDn+M|1)BlPRe+ ztzSiA;^TWiESFmmUIf2*3H46a-~Up^eZd&Wa+-gOtlHaC*9hQ3IRE=G(Gjbz!2Ut%20dBakz<(}_QpLQEV zpER1rr|*~Oa3>gEfc-vrxAAm4>cnd{af#Q`#sQwzX2C|G-^-+;oYuP<1vcq5@n{>; z-f>#Yo_TUW#NX%|H8cCEr^jVQTO)BJd(nyYToJ#|!NoyjU?uwH_RVZ_g7_G}M^2K@ zs4HmNvy?qptZ7EGy0}nHFW<12i6QkVO^f%#9W*<$@)7zUrOu-uU8~*OK^wVgje>~= z`=K)D`~D3})`i80H(q-%9^M>tJeTQhkkTJ${g48Kx}>wbnm^Oyh45{R zuAiV|H{(n-aJTdOYgEXY{3vC|(8%;Z+=%%Np1)3h%i@1xm?yhJZ+yJL*qYPg3&8$4 ziZ#&xLYO}cw`5*QY_i-R&E8>KRQr%-tY^XF=RaS2Bh~AHbNz56drM8oSYz$Js@n?S%<1n5$|s6$@@NkuJrlKGO*>bkVKr3v*nB7V4)&7O z%wTiMxr|n2hO`G1(@0H4Y4llSJ^MV9;r1!XI}y~KPp7y8_7pG(snuwrl8<=u%5~uCKlFAC0c_b=Rf7h zM@#i8!YUT;a&P~R`uk9~75Pm@*TkP)tLLMAP9)i5$ak`Y<&I>1l2y$NVH@hkLw^qr z<__^xFm2Rs?mwM?dkbm87213kPqujTBALl5c7vII^1W}?N3b}V{2fk8$Dqq~g-y;m zO-8i~S&d6kx2cj3(WlWMiO=T(u*DA}C(rd!_(XJ$r^Aydze*4D(Jr@NFNJGYxNIPC zGrV6$`{#k-IBmthDC5g-QTkyN7{p37W>+VxJspHYS*jz@=3`Qjn9bq91`sSnqb;!d zr*`*(ct2^q9~{XC)`e^&F7_O7Uaj;H?Is7^?t*KpmCqfKpRrO?w6s$zxnndLkA|Rb zeOj8;K_8UNZHC;~42xU&az zEwy|(s^|8_vt&FJIsxBq0#`$LJ&F#q_0-F3$HE_2x2(@1@1^W6o;^(scbU3yF`JrKMc^LFT z$>p@-Y}BfT4#{wtJ+C1o>@WJcp9CfrOYWS#2+mP>cOf~s7lq?N)K5EyDmj`im!h8^ z<7*;aO~cQzWakeoQ%}9V6ndDn_Zu)$BeRxl@lzs!#yYUTk)e8 z8HqRk*(ekZn2yI+q1{Esidy7)46e8F_cK(er%rPBv;gUeTKLxYr&-qQ$(Lbc5`iSq zOLODrc<)=VC|}}A&a5YTx5WH4ei)TdY78DO2X7T}-b&lK4LF>}$K!05Udo`)cDRkV zHpq>~r&x>Rzb!*zGCTi*#VD`ErJyKo>O|0fKjp%0-Gx=I*p1iJELw|`&E$C0~gd}3~ z)#Tz3bWSwo>>1aC+r?z|3DlcOR&s{;0^QmKg8D4j7CQh(>N_ij#GuN$B5Tq&=;CXh zC0b9S73YR^1y;Qw$^_doC~_BQo<@VaKyr%q&qTR+8^>4qpW5o@H{P=cd$P+c`V2GO zO`crK4t(s}izIfuGFg3p0iT}6ova;myZbI&EsvIW`rC<2yoG0@!TCGb?$q12zvRjXQm}9yizoOt{K)#1>9K~W<6C;4+Hf+Q#8>)Lo*?@%qWCH7EXAMc+U|a!WdAwk?b{ufQ}jKz6Sb`sYo_Np=30{N&s^b<%d8{C8D)gf??xlvQZFD+d0j z#!uej|81emR@3rCg2{e+)&jYG`w?Aw z3|JwuvQw>pfc{@3G5&VntJ{}0_e6*7;GRg6a=)X6b{diOWMn@|?FYe7 zOTQnXe9mNJ4dV@%j7lwy1&1oLm@F?eGEHK~_L*_?RV&em8WkpM_Z?Je$tK=H&W}~11-;&YO6@?CHBx=! zLUtY#+bt&{@xS^^f61ixE1Da>$IMbP&LvJyPRjZl!B(L6=l(7M$uiF~i_2MIyw9s> zs~fK7o>%q?LPL=M-zrxgh?MWcI@M`JSy;Gw0d&U*bFMjB`OYJW7&USO2XeaG{)|2i(kk2R;{@%4`P=xn@9 zM7JHde>#k2p<_-mN1|2kX~r-8Gnz0O&o0n^R!TEq87cY!RM{yw5w~uD!!|HIPs3-T zZ~VS{f-XMOyZmOQkqDBl$@ta2C%S#2H?2a!_}A{!;_jlqDrOk#3!d&rxgL7|0~@dh ze7O_XizeMfMlK^~$HF)<;3k7&8Hr51-ZmsQ`%1(0ml!y`SgI0iOVfbE;F}$jHTa(R zRei|AA4vaPl2#8?*%z9s?c7RF9PQjV8%%Q3c5aO&vRX;@?c|MZ>gik~+#>xY)5Rj# zWEV3Y*V$$IKu@0SxUB+oVxs-F7+Wx6O~OYvWt{#U1(e47tAuM3_`nyGBj?l?qg#E{JemZ+mw_^N>T0yBMyEexc@yEd z7MUKxk{ydm6Va%f`Al^60~S83lLf4K?jkoZ{#942m(~;EXbdQ_Kk_CR63O{c&^@T7 zw(yP2Mt*Yw84mVDqm>{`oauiTR%?^FaN-xt)!QV06M1@vB%$$4BXRDJh$!>|Lmw7o*fi#iOyRopzX}cY7 zZt_2|?r(tWY;gS>Esv#Lhl28W9M9d|cus9a#dWm#W)LQlVpV;PFErptG#sN;a`z4~ zg1@S64?PYo#(~?kkTr2mpSPl6d+^)~w%m;#2>-^oa1!g$%$PmZtf@38>J|ET0gb3d zUpHGdj?#M*^vOMl(EB!;lo+sY=`sE&xrvvP{^7LkMil)ENE>)kojv?q&o62v^R$QH z_X>KHfnB0tJx9LA7FfhP`~rGa;!RVzSS>bJr#@N#n1r{c=X*%ny)-wmUH)L)ji}#JVbAaiZ&iicj8L2H+=x>c#h|WZ)-Ho8CD~Ik0dj9D|44;iKlv}dgqYewP1Ku zFOT9zP8}}CiPC!bJ?Xrb{{993lf}0YT!z8$F)b&;%5~=5UFmTJ?F}ZCEsPt7SykqS z=RlYrYen|5mBj&8tJTyx(bN0L#yEK1ht5k#-f+@Bo?N_#UO%!q8w#1N>}_SMmy_XA zg=D=wQbI=Pb3@htDVrej1m1VKNIOt&WlTfEo;WCiVsoyYR|I9$r>pcg0jj= zWU-U!XnEMgf8!UZm6cC?c<%vCPLcbNwnO0i1}J(P+sA7?=eUW-HXH{^73=S6Bqr;o zv(WGr8uu*>v%YJDihoeEMuE-W*noj7-XyJl4Eh=Rdl9}nK@uR`+J6~72g4$KOU$m@e9k_@#q2~w zGJQ9#$$B_z*xcLxncAhTe?Ink4+u&Xl6*e;9tPGge6Pd;b<*-cpYv1ltmO1K`y}~^ z|8~vWvZvRQ%{#rI_h!19d1CT}9)a_l&4KEWrbHV`KG?*CN)C{5sF}!M zgGqV(^7i_?nQSlf6R*;D=v@V6$CA~5faGwi`#&g^jKF)%j2_0zsx0LtYRxJvQhZ6r zuqb86}9IMAP_#~X3!F`xg>_CQA9^-d3%7z~S@Cp}R?FehN1i7- zZgMtF*Y{?e--=q(%`p?pas*x{CRAJ_6Y|Q08D~`u-R5am6v#Q^7I??N^!r{nz=pAuK)MtZtUKiyf*xvXDT zvT&J^tPhSg^XV^SBGH#~Q|wK=&b(}iC)XJrPQ#^mSDr(vyO6}+_>DjID}L*1E%%9^ zfX@hXw!~W<;#qQhZ6HyH!!WzniO2e_cIu$@7j&))_;Oy6s8-3vyo`LW1Vu7N#^-gO zXE_NTgR||7DJOW=7yNaN0Ml@yI=R>ln-TcYSB_-s3puh64Dg*bCc$3*>d=PW0 z)Qi37fR4GF-5JDhD^U)<3&7S2&dK*#&p5f!o6K&zd!8u4nGq$P_PH!%X1h6Y?g8!( zXlLdj@$`<@^lptVuvcR%Rmax!!?Iy{fs8?m$FYP(%Qp~Q~4o>gmX z?wfo3%ao5~CgyPV*%CW3G`@nhI^L6N_nttp{SDf6aMj`~iu7|41zNMe7 zW@u?OvHbEWyY~agR$VwGpLZmADLXL?=d-(V5f0zPRt=$*%av?I%Ce3>Rf&>wfXwe7 z)5F=YI|6@}DRGU~TWKvnXKOLHQAe^<3s7j5)_URMJK7nd&+M{(gD*LI4$L2sghgcO zdRB1=s#Z7tCH7Gp^i7QK72fRu!FS59M8AKSZTAE7zqEQ1&ddOByi^*ne%WPPhpved zb}rcr=0p8W41=|L>1~v*ubqqV=&xX1PhWd#Yb8AQk&$Ha&28dFtaJQ`rh*{0{@1Af zEBslCa*6ym2sE!1cKNTciGNLJHvc1abGJ0na#!dpYxn^qV41Q5LHBzy5gQr_ukQ0K z(w_(o(XI>0Wp1S$>giBzW#x2^c5(-5uC}g+&5K1}xjpGyg(juyZ$D;&0caM(Cx?T+3et*sJ#p?Ge&)HUJ$R3lWE{PFu4*0@v#W5 z*WhGk1M|(|67zhfx*37u^V(F4pMmXiBVP6+KE<7C>I_Ah+<$4M_ufAD*XAft)kV|C zd@sp3)!uxj2Dp0_C6o1F88~xFoKv4BAY2KDt@|fM-j0VrcHSyq9viI%ua-vd+@%RE;wGKUVhyQ+3*weU`8^b+NFp{x= zY_#>0bI*xjyvXzH4BtTu`oe4y*?iL5V`+YL)!kkg%)U|oc9e;=s0f@v$~hDZ81DoSt~v5O>#!vh4#0QmZMR# ztXWiU$v#=Eoo15Fo#3jfhtI1E4Qk11N?3D znqa>CA*+6|mU72-0=yr9c?Yo7BE>yXB$;4dGW(o|H}M?57_au>z%^m?!| zSLh}FwMT>VAd)fwPIt1iIdz+WE>~zJXE=w0C2Q1@o4Scqo!CT&!6*5a8scHet={jI zNGyau`@7WJlC?x2Z0K`NXtPE+rLZ;M(!!kYy${NquVocd%1;>_yv>RycZ_p?_%3j) z1;OD34}L?2vliV1qROP;J#FUPIg1|JdjKVjt`5H4M|rOtHCeLiOVTno?Uqy z_6M_AiBR?pxZ=ssn>1WR3tsX+>#O=Gk?haw*uIa*ZyBRZa!>vg(fMxnrIeEG(K)AM z192i*hI6*QlMG&oCKLP~YlieFPBkTqqgknV-A22PrhUl{97rF~TWC2R7UM}=YxOC-kmJO zos8a}6q4}+Da#$S#GO8wrf;Apxet__#anUk9d8C1<8y1TEjyKrHQB2lq^;~6_w#H3 z9o}2m+81%6BzIDLm}Y3XA6fg175zxPAGCV~jqT4iWY=vrJ9`le_rBV>F`Tw&+%oc%8+H{zcm=DH8$g-2HYbJgFP_d;%roBC^)@TbAz*Ih=PP=dm0Ek2 zbtl=`=Tl#`PS8vIw{rU_(O%=@67Qck^s*JaIn8*B2A_oz*(bh;6-k8O{b+D7uC?>& z4kJir_4}3X>upYj&e2aiRqLpG4Jm$Hogt|IF%09wF_JAgntW!T^99nBGq3mwW`BDo ze&=3FPLPlH8&A=CtnmtOPiM9EupaYiL?chnWv7$R=X6w@XN0}07_G|Dwtu5hA{jP8 z$z&ov4vd$hcOn)JqAlxL#rTi3^!6X#?e}f3*-me8|K054I~2(7ePUfyCnslvBsZU` z;PTj5SbP#(V|Tu1{EQR7L$(m^SyuLo%t{C@gC3dJkRfU-`8^P*LGg# zxt{$=)T=$bxgIyOzrKx}l;Vah(N1!69>)&nB&{!A)&l(_T5Cc&TB%tV=EG@NqMGH5 zJ=shS(#O>>8-^m?@#T4Rjj!y(sIe48z4i5oUY;=8bcWRea3l^*yyK>L`xpHuYI*Vr zOw-Cqde0twGHN9+*gshPHu!#w)-p%SY`T_j-SPR4Bzy_^ISPcIgJCWTz2og7eC(zC z8QRXB<@lMeD4=)<&h7BCI}Gc2`#6fM)9?MHC4SJg(YBTnEkO1dUbMFpoZW#Rtiy5w zlwJ3qKzdKH@7Eqb65C{d6c~xdBUqI9nN0>|m0~WK)l)p7A|tce#M-!>s0UYRDKTp9 z!;5JkYeEB0MT6WCkB3fTT0B4kvs;qeslCYkNKfz6>d`28g+6vG==cjA-3iURpv!9{ zJ-g01%L7>X;|wPW-o|?lc_x8s(FTDE9*D>ES#*j8%Ik{wKe0XRY-l^Eq)o zQU8fJ*OUI`=4wy2;5^*S*+pWZU!lw%7gb&S(DCHzG{Ig?(}C z5xmKb?8FPonMpOHePX}l2JI|;WNnlimxIV=Bqtdck0r&O$jc@p+-5wBZ{aLY+p(LO z^^Zc4objIv*PL@?HTyoUN^FWz(`A&22~f7Gnw zQ&;U=0kXxg9^+|FYvMO}r#3Iv<|q<(5*}oycOkjX$@xv%-d~M+^dd2s-$bbyAc$Y# zk1!j>PBx~;`{?0m7!1>Avh8FQQVoZi>m@m%hojD7B?iEJ5sGDAeI7bYQl>Uc2f^b8 zda@D>xn*)S$v=sGdzj3|U$L?hCxB$LzH*Bo7{pusT{@E+A9b~vXm{}<&R#<->zbD9 zjOe?8_CfSW%6WL&Lh!>ph0rRMO)_M?-urFI*HD0>$6B&_P|b7vK=~{M?Rk_=)Q+>>MVf4AW-s3ql%lBK^po>8_jnpSz z|J^8a9&RMhZO*cf!1Wj4csg3|0JF$?GCFRy!aa~YEob3!yR`v2CMs8M+a@MvO(mOX zXSH|j!Po!>LqWZf<-3}s#a|$q6?6M7ciazT?ViDnHpH z_N;a8Y#)tV@dRzBj~-yHLhCN@t{iA;nUk%uj$Vs1$(EauBReX+(X@>gGrGoetv-D? zUWq9n`$@g$aAzDC+9@4hj(8Mo&}QP0HPOrG%KgehegfwKpvnH=J!o;I{;PVI*cuB+ z)<*RzdOv`rdYOODjCd>}xXW?kUPMxOjKjT_I z80_n(Rk8ADPHuK4AFb$1U9yukax&V)m$xR~=O)}Z{hVU0b3R;?u_w3ga_XAcxbX_k zxomdO5;?G@u|6}GL8$O|E$vzK)eI#|E32%QUL_S-g`Z32Tk0=w_VB+fJx>Ja9=`A8 zw-jr*3_I|qlF14kkEQr1^kma+Db{Zfkb?>!xLo-IQDb)!zl!`{>L<59lgIer0@j+K z`aLWY-UU?U$BxAgZw|a6>mqCeU#pis_O8QEC-lJG=4OI3t z-?NI$-ohA^y;13S7gr(yJ9wXn;8`i}%+g%%|4v0KrMB`nC#ie0~zRb-ESrp7^02 zf&V{%@_HJ6F0B|vHed91A}w4^0_L$hojkjhP0JbV65nTOwF|jj1CCmF+KDV>9hDVk za+ObkU3r$Sr#5qoZV_A6ou(%nXwJprH(ATPA@}LOMbq3k%}vR^dOg+OpV{79%nGhB z{v=PrH)YJ8qhdo^^0O9x2dj44sll?;0O@6*_=xR%7B_O= z<#@WfmbIAyqLcKIxCyQNBr;B4R{9h?n`mC0*jqL9zS#2-#gh}Yco@jCTAk$ii`q@> z%(lj_>_+^quzu0$AJorWcD7bKqS-qnIrn3ddnnNvo8wzNgcE~vnAVatB2k4aq1Qb8 z?WTUB1P;Tic4RI$Oz%S7^Vy6l8Rk4Re!man%@=T5gjR|E_nN=)Ac%zhYV=*mG90ROAJW#D^k$Ya-<!`%TuY!M7$aMCe;}P~2C_e>#UG&+a#nH58cW?Xa_gHjU2-Z{Kx}3F&2kw_>S5e*M zpG@?wVP+M85x|-AtBb)ERtQ`qDs<2!ilgap9--Cn6Vvx0BPQoXUx}3=Uin`~) zdj{UL(ndS&CThmZtjpV4-AcpO(DRz`nhvAf@H);8L-cq>BEebyC} zquHldL6>;qvp`nL$-4+IF44y2>`>0-zk^@)v(8m_6sewt_wl@FiT3d+h;LQ0bEHfQ zaOGtC5q&YmThA2HU-~(1&-Ggv!SdI=AqXoWuYU08j!Vf-{3NZ-?dA@8PMnlw%3S7u)_ZNc4N_1FNn zFDu~9De3`ul?e7X;6)k#-}m@LYdy|dv8>p2l^&@Or+SY;CmjH zEpRK@*Dv=xdBKuNDVdZLNhy5Hu3~O!Cr??t_FvOOZjmJ}cDyCBeo18LhDt?}dizwm z)9^MvtWk4m!GkN*NsgbyP~J?2H)#De{jNa&rR+-X=qGzfG6$@sBe_qQU5#PhbzsZp zJH^O}&6lWrJsXxWBQx^M#|MFa82ryir^HCFK}M5@b+o>6=Kj35`k@XEt2pg?=K6_B4V`U<+@7`w!?F z&-?6F=4RO2o-~4gB)>MSK0uM2o#o^+mb|w*Ic1xQe*^Vy^h*9u;$OQFB5UA zpa0oc=?j-chnqn6<4a$r=q?Zp<>3ZgF3MD#xQhM(ZwG%w6{643B+ER_SnS zqW|t`Z@&4)r8KFD`fs4-#p;YejgRrF z6a06okEJj1_dS}9HbehT+Ma@Y%|N#R_1mD!{pfcFTE%mFXK$B#auF*06Srp2 z==dN`CTT73p`t#&199TuhL;(A8-r;EUL2yAQl_F-X0JD!+3!i$zxL*1T)x-3?p~1X zXO?vc8`p>ooQ7&o<5hpq)Fyqo6PnYn{ZModoNn$(Grc_EebxkHw0|-UJs*TG;AHY@ zj6Es2 zR!kQaE2Q|>|L9F6Gqb~C`+N15|Eo+j)Z0P7xuvyQKUv+xAGe&6Wx+AH7|YL8Vk3!; zN6TO`kQ2wOQL`)1hTfMexSKwA0b^q{-r4h6<~|jC9s;xEKg$08U&v4Vl*?%=r)O^# zJtZPsyjo8}o7O0mdw9?3r88TYTe=&yu?oFQtHv^9ZI0Gfs?iJ$;)^4L-*Y5-4PU()cYDtQ{a*NvY}S+ zc~80YHUK19&t~mh@&VpM4=GjK?-T#}KStR|(SQCNpq=gC*ZEYs@w@%W|LcGDDz^V@ zKe^3%_$HFEi{D_K_c@u~UT*vUzp^W1V4o5-@PBYoVm94KbPQDBD*E4WmY}$dh5-mCnDd6VC@K>cnTyla@Ks0fiIa#AvOW7oXAP*nmz)WMnHMp0|Ku@B6P1MY- z%tOK15616;dnp>sX3^p`no%baaFVrUAddb4+`Y9GpRd&{;s_K-oR8c&eMX5EB)#s7i+f&v{#VdoJNhNBk|G%M|OYH-ea5PBb8=q)Gxmh?9{Sw`*7a2;%r2eEk_em#{kDQn$tI9GQ zPQ2=5_4o~p@5PO#=4kQUoUGnm^fq}blT$u-<&xdI7HlpyE6Cl^N4>dQE6L}TlZX$< zU3^~Sne|89&z{4@aG1;XPX%3N<&%m0XJx-84Tr1S1m&jaCHANjIY<`W$Y6Q0+YSvA z?K}H8KNu;#)>C$prsKdYpAOW1@)UfhMAi$57(2;4W2Txp%?Y*R*HT&o=Av-)3w_VU;a#{wt! z;{L3$N=Qt)_&eqdrnNFTjmpZllQH*hGk`>eN#xb!o=wEB9}3$y(9_iXj zy0U7W$cFqCcAZF5B1GS)%mAFt*~s%Gx+*#k0P)i(GzlHDgRvVMp7WWNxSO-yWbW92 zdLI;h*JTBFR3o`lPb}6vYgpNa^kD-y2Z1y^OfIr|Xp^j!W6?%xJ9n*<3n-_-`{QxW znm*T4WBhAND^Eu6+^fjyYG*Bdq;5_2479r+pf=7na?Z!U4OTBVkj&hS{w;bhhwDT7P9BlVNm)muhG{9Uo@fAj)Uq+8%VCdk@QJ_7K&AHm! z#;BG%nO`Xt|LQx`x)S^u$qymJiA|sAMXwk&;+54yJM-{=D2a{dK;m>HzUuDyo3qr; zFsWlyOr-r8e&(5JP9^Ed@H!bcyVLrXtm+arIC(8H>!<_n6}0^b)OiV?vg@)teaiiw zFTwt~R*s~#iGY2mda?Vtwf-?qCc63FT6|957aB+3_AF7QZv|8C)qPAdhM>#`@Jg=U z_$770qe-k&@-p3v&Wo)UmZ1CfaOkC-+~)q49m|@fxp#}n&=2T!CtfXPi*w67Cs<3Z zXs)GK7n!+rX9<#1XraF5m>F(`?;WUcH;zmJ^Rwjj1JaO~nmMs=qRyjAMDt$sH@URi zntycEdd@R*a=O_}vA@;87;+TNt_z-=^!&-Q=g{gp|HsppQ#?&{;y)Nu$NGt{Yhrgz z#HXs-tibLktMqT-aT6?#(r)PlDKqYzHtwsXoH6aEeCFgi$xN2sKdD)s%w!EVLq9o% z%ZNr^tT~$8jRLnY+)suw#Ak+r8+Nd| zK8y6`1pgKDw!_((ct@P1)@1GF<%uWj_v9}bThdHU*k4{h|q zy_~RLt4!hrhs!_eFE<|ttGSbUSxGfDip8fl@g{SMe0K3{FdQ~|bEj{W*tBM9t}5Oo z(r$XF=_fplpK(^rIbY66ZQ6VSJcIo&trBl9p5^)WpB-`IqT*YkApQI4&HwT3-@d=w z-v_eoKlz+f_nla(2SIQ)2(q`Z{r$iH?^!&n0HVNkAgp#H&DlxH{?Z@8zXzL}9I8(l z>#`G=s7CWyo_Go6zQBWQMB)b3W%qNx|9W&vG@*DzC0|HpKBtn;J(Rze&3hUbI{K6w z|6SF6jii1~O2?C_PicE^@{rg*PlNe=T6_s^Wc2v~pPr-V_u*RZbTrfJqeg~tMuydF zQ+z{muj2vMF?U`s)nf9SB%bR6{5zd}OE$U7lzfuiNFKGeS~>xZ4g+fsRq&eG}yxePH(hnsPKmR_KY8Bgef%fCaIfHCfQF^8^uomb((fT4fa+$gN z40gSbzXy}#>=Wl~^KyDOoz-}_knY4JeAif#=(?F{Udy)KK)#aCCjL-~;d35*h8J_R z3sCJveYZ!CCE7X|3^U>T6`e}7j|DK;)$?QUF0;rBPip$&ZC>)=T4*xLryjaX2j?lv6sF-{OchJvgj4wGcd(Nk&W(9r7 z!D@JS1xMD)hoE2skgg{`53}Ei7@D}x_krzEyq*i<_))h5V{)Oyhw5dtPNd7d)Lvv< znq}7A8-DTTd=f<#>p9sMhximv)ideZb#!z%>efZ+j9R(dy%-+JIIx>B<6d%kuD7}S zx<3g{6p;z$E#uT*OM@G-dGEmbXYl0KR`UBKrex@Ln@^b^Y+^;S0z3hhAL?}^3je63 zxuht!MiS3!IGS`*dlc@+pKdidnGe=Ar0xv$E`#YBC93G7y%u*zqi;}iKD|hkou;HL z8Ju!vvH@j&3-|bw$G`dpn)na3vg1F3HBN+?b4Xgeh)OfB#BRC=$G`AgOkeerf1^I? z=al7RmU1_hTRbUyY);HuhI|u(%rZ4_4Fb~_?SQj1XhC7LIQwv@GV&?K5 zyjPO1Wcy6Kp%o-#S0#@k*~yA_A*kk~QSz!JL;L{W78hf8&cCiiu|)bg1Ex9qTt|N1 zBrS=$)|SlWeEvtYN?hP%qPUv<-_e-Vve4PbN!MaLJR4o=sGadUh&%`n3Ah!I)F!74+~fSS-f9 z*?K(H=ortE#1Bs%<}cMugs6!H4{|3wHt$n>SU>`*;KL0pYa;5ruSU+M9>9xdP$u*~ zP;1F#l5_vFz`8*zjaZeN%-HYNMs8_$DrUuNz%fT$0*Vg>+l4s!2I)8w zRXXZ@9+~Tf&+(~!T+7KF6p#4C5K26XQ)$s^W$$M3SJ0sL{%7sGH)%{X*C(|19lCTt zyM;jNFK!JAN4A!-N<(0ABEGz9#Ebvjw!%6m9`-z%x0>v4qbF^& zGL@WlQ?EWL{Fn{OO~McL7zx`1{=}I}6qKpTW>q-b*x3md&+6|x?PrfI@#F^h{;pcP zlIui9Zcirf@;Ba(mBH7CF6N|lh_%qf$SMSztVzx3aGO0 z_c}S*f&T8tZgrw_^U))>L2o2S{nR?djOljr{D^Ox^>Z-I%00rIhFoPfe290I&4ovx z%Pb@M5IqBxf^ASj8{UwKd*E>yO6u{-~T8)<&PF_|y&W2Woe{{@=u#74-iT zcJnw`HLy1Lg+{hx9qSu6+Z!+Hp>Ic+K7#7&K%Y1i8})Ilr-^SoicHVJ#Q}PaPA7uh zIUvpn#brL7NxIszg^Tg<7nq*H5+z1VPG1(1+Lu|=DWrZpdRHKy6O1>H>7#+U-V-SJ zLNTw-I%OBFkEWMHtWFEpmY`cQGCn=lrtV(d} zh*nP)v;P4&`j*+)R<&|3{2<)Us->Dz@m6fAir&gKoSJFyWB7rM;oyu7^CH7A|GLttpCvz1^%M$&! z!N06gc2Fm=U(fdcR-;5Sc<+cZhikJU%GE@vtTo~zc|3@R(T(IFi8spMNqFMt4$;er z!Da#Hkh#Q0s*F#GA^bgB9EASy#cd7uMCaYWe#C$FO~2ou$d2aS zpBL++-Cf%Yx^#12h&J_RW)2s`ZfS@M1$c^5^ z$zDp8%8v1A7rYF+{jwy;<;*zUxedCmW}M(9`Q`Y;$bpj92&k3X?^@$X!s^aggPGpSln!?O34doF$G z%VB6bnGH&t$(OW)nj2}x1xDzOxN*BuCmR>y?YXml{t44$FNlxUSUMcLoD-wYbZRI) z%!elHIU2eZGu#&x7Fs;Fzt>w`LK?l9}XhpF)@4G2$hrNp9CAdgwFk#{=4Y zfh0X=oS0V3o~9YQ*Rvjzlt?t*{b>GXSWi_mKA|1K@f`{LqZ+NqUB=SfjGsnxSK|4h zM%Wuje9q{qpvuk0lV{+$tNQbeW9N{8e(b|6pZl}2-;vPVsc6Yg=A0(G>gUjmc%aS4 zlLq8B_Xz5vO>&L)RPQroa~^rS-?W$!W0Drn1jl3O(-+0Flaw2z9pLr?IxVE%qs_6t z#lytpTd4JQb}_ zeJxC~i*+e1cSeB`xPJ!Bb5A%c^;z1v3K!R+)^G9uYNawxld3sC6j%0C2l%!SYdc7x!HBlesW~-N5P%UWAg+_?^ zerKz9C;Vy{(Q|()UTry9-4CVeYJVBcYN%8d5Zs`Zan_1i;U!w*6}XxxJ=5TJtWSOD z+BtaD5hdE9OcQVq!oR6z8K;A10tz?9nQK^yYNT?x*?D~TlEeHvw)-y9`zz|?9`B2| zus6E)Bcq86QooRzdFWSJTU+!!f|S-EnbY)D*9;>23h~40#LCByt*w@lbs~K~k3u78 z(p-3-i7LmT!*#62-%%=lyVXIObztHFoq#Wyf2~1}wIJ$YMBLzM?mWDVdJ_w}^#Rjk zpi7jgMdU1Vkt$lAN&+^ib2|HwJLVP9@(Gq_D=x3mXJ&+bNaFL}EHeMeY2y|Yn?)z? z@a8Hdw!)>IZ=rC1Sl&${8lrkK(A34pqi9ZiP%~#w*3=`koYmAB1r6JgzvkMFSH^R= zmN+z<*~S*6`hNV0S6h7VJL@wR^)B@$le5IK+8-U_J-UGnOh)CSl=%b%Xe=4Nb`&L{6*kL-Tdy;coI%hn&YNI1<&^4%)jYbQyU#ie_hZJzA~g zZOJ)E6*WhLv@^=*rq~jr^MmAU6R6?`z5%tbG^5CwbRzVg;B#ia7va+PU|3IvJ^|%u znz5VOQ}s4ltH0v-U+B#!kXJ>A8)*AOC|M6hy5iLh#^L=Za+1i%3%!}7_EP@!$!nW?WwpT5 zLEl4snn>ysBc=l^exq(D(t0VKcn0?;dGjt=J{5g-B^w{1QSJ+0>+c`P!Aw10OzYdx zl41T{Zu}TWqECbMxA>H^vYYX91-i{(oz|ks6+YdpPV&S=rv6H&-$0QsiZQ3Ec1{QN zeZGyL+qv~{Fbi~r-`qienHDB~_6RbRn>y1#Qi>^e4=OIDsimE`+r0mf-KnajkBt(G za5MW>-Sqi}CyA(ASMP}+7Vp&Qcz<_c4YLzIjEr7_?=Pa$dKkv{a}&5{!X)d3Lp+&I z_OAdzJ(8UWtdG*A3UvAuxXrfa$lmcvbBFz5dal($8i=jwZc5`4J2PhkrP*0E|91!d4o0z4Soio3W*0cH-43GX zw2@h1cKCnwX@mOlC45f{Hx)C>j!Z#ci$!&#}8nM zYO7ySTQ`&Xv|JHQpI75q@)t{WgPsSY$&Afk2wiSjl>Tstxyv=HE=91RvI<6 zofx3od-(6I?^vw7_Jm6!!KVLktRyMBz;d}({|4p>=x*gEX9JEKEM-O=uR6z21J3H8TDfSCIcN)EmGuPFL@J zZ9Z&dEX9dx&8B~=xA>VPzR0oUAs#mqQ8$^U*Uwu_^t$&yLYhaLtENVz*4v?FJ8ZZ4V-i{|#8O@TLINmZDRp;YRZi+u>az+PaJ=hL23jc)7Ft{aPUB#d7}mB(1l4Xi_M`dz`Ybp_KgXx0*x`;+Z4po(X} zK$?&SO++}w5JX(J4^TY6qU*Dmu!i&XN@-ry=u|Dx9oosZf zq{VpqPa%QV;p_h1$KUBDobN?Ga--@R(7dF?gQ)WbO69!rTR1dF(Zp{L#irwB=56t+ zNp86(;Xcxv`ZRR_swLKJFPN_c<7DrDATjX_OHAz(^wkpG8p0$ec<119a?~VpL*i9+ z#)Yox#Y-~2n8UPmg}K5=dYl-ZnSsq`j~ByxALH;^QW&qUE46Sdc(#!6taKA2uOI0- z!RI5bLgJTlBzvD-%Wy3FJHhx!_$7u$q9p#2J?n`o*?aA&-CK<1J?KPkx&GO2{49Q} z@9YWOkACZ6d^pau1I1ia$t{`3(6gPslRKoYQTj4A;R~4eK!MXyyCLd6qtD#o`wF#( zvw?F_xC)LW7C?8{ey`u0KW#vPn)>UfpZG%kfR@>}{y=}7y?a;r-un2XvEuE5p7C^T zuXgqpnt0b3#g?E<)~4B2i(g=3<$a-*#B{BP12=)@P_pwcP$e6?qxOthmbIk-#j>rgkj;!^t<*v8B6c=L^T_9h?BY!)u->33O) z2MTEF;AC|2EquzF;S3seES<<*mlYsM)QSW3aG7yFxneTgx?i8A*e}D__{@$o<4x?^ zH;jil0nGaJW#x}2L9=jvI=Off=Mqg~Hf|k4f*0aQYnr-A?PR*i-PG*-FCZ=1`AX#8 zYw`DKc*R31E3}a)x|&>`g;JMjxv`nhcyK3z=&zuw;oBBe9c^T3fNHtbv#a@4JbRA< zVe)J=RI;(wRu?m=hUzZC`z~arCoXl@`glG57DZRGZJ|u|V6J8Rb1T0Ccs`+HISu@h zd|l3-Bud8#B&j|+|IND(@F+ZPP4iA9X{Uf`1AEb!G;cwfMERJ7io4RQ9Z71=jazFy zC%9jNw1dAt(~)xMIShraWNBvWyGOBlc>s;pqv#-|8fYu=Q;q{qOOUK>d4m!CSCm>q z8WXEBSz=DdiN+u*k6O7Un6tR-4U9sQtP?)NmBgxP1%lgIfDPVuRO&=}@QQiOV}&f- zK|6BCdLat*(`s_hJ#IEx4W{wR?+T-r^iv<#kHY=jk$s0&{GO#LhrWxoURB%imAaIc zUf}o{1|M3s0;pKLYI0~K!~yEn1C9eq0lTzjEZGMZ-=unkm8Nyj-#v_`K)x=hZf z>}xGXkq@l^Y8STjXmD3T{rJH*(M~(xj|Oi$cCxnbx5ND^G&@uqgFVX%B5RGrbV#(0 z%$wu2RL_WaPBE`d?t_a_wH$do2-a)#(?*>#TDsKVSxV-VV-(mDQ{%Y5&oAp2i7{n8(4CV;YCU`MX8*bTw2hm#HX5gz5UkE7_H?uXKOi~ zD1$3I75_^#`-><4^C#=1|Nb;9)BFVXJqmc^7ngmbL7*$kf(d8Ml3yQ}veKPLdiN!zFKHI}lMD~&*- z%+}(yaFS8vE>>btVO5v0)tl*XMwpF$ma#aA!jY5n{>m*ytyhc@BaAHFaWi8|JUr&$ zQD=OQH$n|O&3^F!eLTdPpF=LD;(emG^fLmyZM18x-77(Go$>JpJnX8*=SGfrt0rGn zE%nX=WhHBg_@U2$QD@Q;D?U)azx97uLD8&g-i3J^?axHzGRFGOO0P7xxD3X}(V;}P ze**+pvJ(e*m%MVvdw#9=yO5gsEJWh!XAM6RzvjUxxA(K_-V)A3P-l^TX43c9X-<6g z)}YH|dm~q}CiB%x6#C=@TF>&$B&ofMzQ%i++fUut-b8b{j0L$vTREjlMCRN~@2`Ac z(*L;{mGEjYn@|`0y=hq@GOcB&zfu2LlH3X=9Z>!R`nds&@xZ>oe$E2rE<*dns_2cQ z*}>|JYVnaD1c$|N$j!5O?!+5zsn7S&>aTG6ayprHWP5a%5zvS}%jfL!=XU*SH28rY z#~f?ODvRC*oydswOwV`E=wJ zv#Z>LUQD*SfIQK`zky5QrTnPv)!;l%*7wN41|p--qD+ zg+0pL0vQkL(oXxuO1_34t#P#>?LGo8HlgcR`WcOO&yu-`IG<=}ufleNH;Kc!#+$66 zzaV3M;IYaG^`kwHUqO?&L4!aR`X!e}H}pE*x*@qpd$Sb-SjMOD_9r#61N|L|+o-RV zpv`=#pHHWw?*pDCAJtT);;;P;P0Ssww#s)#y>?{uK{QQ#k>%{dN8reb+y*$jO%fX` zk)4=s`uxt=w~Nm!Q8ZB$CL34(0`sh2`?5%fXs4$(d&2%(Q13_*6WuYshKcjH#JoMR z*SpfiFU)r)dl#yWQ!jA~zh$#N#JU)Awxh^;fne{tnss*nk)9)Lv#!^%mIWF30o6 zuFd3hh@MAzlFSD4aDQjzb4HdZEPo>pqs_vO^FO{$xluI$j7?yW`F7TdSCWp(xSux-0lc>_5CE!1*;xr z4qw2fEL^tWcp?uaYU`2SHNb;wQ8;m`j#91?N?)Z`BG@E*Y&;F_L+#|I`G!;_L*r(n zYR)_tq2NpARd?b*Gqbqvpcum@=G-`Uf4_rsq7!|DA{V1zO;UFyP9(}?b=)6JLc)>c z+?izVG>U}OMuD?Q<9)?m#7^*-OS_L!GI`&x*XwLh=U!}VKt`x_%JgJmHt8#?w?ufk z60W&Nbf|Av!YMwBxh2(zR6d0MQ#~CF`k^E*`8cmJ+uM__odL=&rX1i(x`@TPkgm0N@sR7&DtnC>_@B79n6(+dmlZ#P0!+kKh&%!aqcfuVg?yojLwNy zkkv-!%(sIq5h4Dpy?oyd{9{0ud3gL{zM^@D;B0xl{=z!#fHt`elt=}~qy6Ea?1b;} zs`{niW}^L-Vx_kR%jKZ{oBz#WlBl4!qs|rD&aIZL6>;nL8>Q+h)GXm8;|B4xx)_%s>jdoC;BhlICFJ=46 z&HCKnsZ1KSfoChJyhxu9vd|S-?s!b)+#zQ+XOrVfYTu}>_>&!`jqDSZqVDD9$os5q zAkFNzthwm{#>i*!_YqGX^)3+#DuF+9Gc)FDp zD?@fJ0zu+Zgf1TxpAPaiH-~nonWYF1S!Wzwl(+)NlT9IK40~8{WPkH!@^A!wdkzd` zz+1{)mVWY6j*K3TcKJNT?}_v`{LL<8p5-(pCvE@#pYnS6-%6c}He-xiiCGxA+!HNt zMA@7fVaAPF-?1$oC78X5L@wj4in2)Lp=xWCP8g#3#5n#A7eY|~K z+ntS>58`U9-akoleHN&;cZu!3pqN4ZnMNh|buvb7qRUU<{Bm;tDXDMCT8yAo4O#F0 zEZ@^~Y=}|sN!Dv$edXqKKNdK#RoX0ib_C0{JK1iay>?)E0iCZ2G<&mg#6Z3J|1rT!75F0;)AC{f4Ql)aA30Zui4Sx7gtE58k& zGCR+`?cAnoftQ1gS?xW|ZJ=ewjq~-@S>JEqd(OQQn?0k@b#!j4RuYF{va#he)S9l; zO!(%;!ECVR&coHp#`E|vwX-V9Uh!moub|zH&?C{A6JzLM(B@r3WqaZ02PEf5{l0*^ zXQ6B&d)>yi=(hv;OQ3oduWqWG6mw z@$q^D4_mUN@es+`)oI4cwc5&>u$OX`P;8X3Egs;>QQny>%ty=kHTNV>2h!Z!sL##) zN9n{Yw&!yt+go?{@nkEF$<6LWx=nuF@2pO*K=a1hyo#RnC8^ne|3;Y(tn|xDuQYcb z=KWIbW*(YVRw4|%50>ZQegh7C0;*lnzb$KemCr3a`C83xu+N!CJjEJ z?$}(d#IGcLjW!Z1BGC|@H-2Qs|B?P?z$DW4629cNd}3j*AY-SKnM8AmUsr2M{$ew4-*T<*aaJiLC9{{^4tZQbVcDmt{6N$6@pF;B9 zApP?}k-1|(vb2;WWEWr)Jx+YTY9L8gh$cAl1^(WxjpNW^oH^;sbT6k>e^xp%v$A76 z5^l-Rwn0mYuC^rg1Z(tUwBDxW_zNbR;wV)4owAvCKc?Jz zwGt02cR#)bPZK{M;b!b@yp`iwHyb?!&`v$0*HOjlE%CRGR4!4){s5jJ16_0WeG$9ctC)v( z$G4U+zJoMGI%=T!Utt`XPJI34FrA8q@8j7r`ckcs>#pW%o$%rS&{TzEJNR5m*H2Qj z6}*qb^$YQAn32C4{O@3k29V%nKKK@Oc85Xyb-&T_5hNxz#d>%bKhmCb`bA^bT=2a_ zcajtATAyRzKE%VRAZr2l6G-Z0HaPxUi5)itA6~%U9Wc`4vIt~>FOtUU2>N7EbL!KqeNS3hF`ho z_^me`L0<_JgY-NDzR{zK{?906DPD_-+SOAp*OL7B(j{(OH}6M~hgwFEozSX2Dj#Nb z-cj$j`<6&yJFvycRo4veXVbWy;kY*{jq|yI9&;iwM~#xl(|+hRg#;=k9Locrg4a#a^ASlObC-x#M ziS?|1DT8iip%;>%9qC6_m>>8%n|z$6|08Hz?vjj!Q*H#5qP}GpI%|+bD9dhh?lNSr zINo35@n>IfB~cv2^yAqJH+_&;Hk^co!vRcm*R`Zs{KcmeNYDI$b#bU!`i0 z-NCfxN#7H}@^V`B1Gs-wFJ}R{FBMy{L-B2h*7G!3tKakgaI(?LxSe}2mGo7Wd<`~k zbi%is5{)5y_rkEb-;>b!BJ~?7w-cC_z$bB#{-o{F+NK6O6aT1Lu)4>y`gA+b<|uiO z*;aXWcM!YKOv_nm^`&{4l_zpRX4#2@5ST*A?LUb;a+7ak)J#8>{OqZPbA4-7K%Ex; zx1Y#Ee%>$I{?8I6PpCG<+r&!mb8l%M1IUy5=~QLhXwIR?)@@cu_5!xZ25WA8KK z>_-k~u?AO}e?91IY6WFlR$V#G8K0wD3qc~ROZ#e!ux%$|Mb-vN#2=Y0V6`hCc9n}3Nog1sI z^H{ta+2)}jNYs|UvJk)VEa&&h-hCNr-Aqb`c=i+6J_g-*qs0h3yPWO4kaQ)gYO;IP zN1aa#O}hfMP6PQJ+HGUZ=?VY#M)d4F9s%ct`W(f^^ng*Ku3bp(GtXWPpXlpCa3xk_ zEtt_J~)b+A4TAfc5iA zbkoKwIM~M+*B9LDXv8#a&NpWFH-kJ~|H&2AwXm=EgZ@x3W@Yjo_^#DPe6MpeVJR+8 z_hzW^G!cRC#{b+GNQRH8ddkjg4;sAM_ouDG#r!*C1=`NO=0(Pn53E4<2lX~IdKV@B;#p#1WTsF9T*(HPczaiaYA?8!^7PK48MzUZwQYTNAaS1d#@~6`%06oHeSe2K zH9)vtyOq&y4tz?n6I*~}9E*||_phSXo#^u;I#mPD>7FkD`&N?p0B94t>t(Q>ggVPm zaRGP|{Vyxb`0>;>v)zhYi7{{{sTq#4UFpm~GSbNV%vBR{`_vF?Dk=M?j$WH304 zB}gW==A^JX>Rdo(8>0NA!a_~<=}tH;U|n+lHUY+OSOsUz^(2~gLdE?-{t>xLB-};* zF7hPtJ$h?5k#|4SVs2P8($|q_7TPCv(T8Mlfw#E@me}PDl}pr*SHKv5tfO%836hdE z##TBw#FP8bsyB+Bh$dNIPsP)$;Svonv+?(|*~thKXr^ho1G?rUxW5@uZ_@HIdEece z)}C%99cPlN&1`E8(w|65yW;N^N}T|j14!jr;JDMsk{sH}ppdo*=_xskWC*4lt6)+rI)s*t{9y>18l zSoUP6IKkU~pxp+S1|)5bdBm0E}P z@ib})k`MIyJnc_BzM=Fnr_IaIdbsvx(x2S4_yIk$C$ogEgj-qf9FKx)*zDNueaO;K zQa6$IZw1}aw0IpY`Cg5W@Z&7)?`75+Z=d*-ykP!vftgq{(oqL=$7p{o8JVfAXO*qM z^2Nt7-XiaVrxtlhe4SEenaRe#^=NY+dL%dES9;4mr{tHq&lnfYd;e0HA4umY=;%F3X* zzvr5Eqxa0iI&ErR5lzDMx$G7-B+i%V@awoaAcR2wWgA1iM$7pv< zQ6gD>Hu_do*|K_iRoO?tnOSlCrfU?l*Rtq-F?dQnoK=jCGf3ZHHZi-@nb(FM$$Zlc z#F=R)?_utoy-6=iyW?4@#8bH zMb~e%m55rW6f2A|tX%2*s0`_xOkxI;+8^j;IZ*V}=FUaCxjnE$G5^mB;CZ9fPuhQz zWc>x^LyEcc4)iP8lCP#?4;Sm(oaOu-gjb>D9=LT=!Gm{5Ud|SB2fi|C>+5OhUe7h2 z@8~D=FHhD(fkeK_na1(vgv(JhYogGo{J-YwIkU<4_=VLjBrBg2d#$Fw@he+}niu*O zZVkrca9~GmrCjNLNJ*;l{lETXExP?lX+4r}<+Yc{ZY3|O-;l99%_&3f_0M4^=lI;* zxbrg!Z{>3(zmcK*l&~v(&flzk6XhXq(&jxVn{&q86s*9yWyPCFaZiCY>+zC=WUccC zyLyLN#R@It{JSUVON6{+Mrq2<#7}>{mb$So@hWNLQ&uzCy-)s{`$_1Nt;q4^oL(li|DgLOrDY4b3X}FX7pz~SAaj=d*eC~Q> z|D`RN+7HYhnV~g6|GoA50BwDYh01yDRQ)Bk&#TrzZ_}c?lxYHvI(kfm#m=llZbH2a zx7^+BhM$cKTl0Z3`)MIFyhP_6!1jI2ik(WSbO+<>9NZj+>WSRbQk}Et!9WyR3x;^`y@}uBK;9lKnRO3<^KjVb9{(9WU#(tp z`fb!#MSZU%!ykeqajULpkE*d8$-$5~0ZnPu^}gkP?`UJrIR8Iae~7ooYCTaTtLv!= zOcMnwr!jqeUWS58$z^ZakQjJP(B>bY?1~oGDDx|tCrWd&OdM%cJ`QK&VT5u6Z+<^N;3VmhjshHLM=9TS&qhuE1kWBXY}?C z_^z=(atsKLMEOSQWPk2Stu7{ypX>J-l!_NpV#ZE{cd`bx18@AVXTtbKmNPpG(aDnw zEA$zfKB0%|ApP7=6O^li*LBgjgWeJe>aL>i2R!+Q_NRjBS9aidvebPQF{HfY)NFMH_Wr;OdvnV$~ z?U^JcnFfA?x}VXaVQ6=cv2!8(zXSR8wEqv}d=nV1CL_y8V0N+kvbp0??r_>VMXRT2 zDQ5^RVEGjIUuJtwp*1Vt}?~xhzP>uxexVp4^ko$W>!?*Rn^8Sg$H*_!&7`N-JJbGP*X% zw~qLD6N&j)t51;Fb!a=4>@{L-%dtlHkn)K};5XH|PumZJ?lb-5j=}wSwuj4Xp95i;-So_RzGrcAyXBvxDlvJpi|F=0MAzCrd)gCzp=rLy54|GV<^1_c-!Cnm zj`6+^T9$4sOwrcUp6`Xi@d;T@v&)mi8;vVzBXO*<|9%!c#`vEIQo-;|B@#RKRTlm) zV2)=~?&)@dd-ldZ)kbEj-L$x`KH_jYHko?@~*v-@5ki;cERb~$Yanx69i6n!M_Z&~^ftP?S7CnLbE1;OGyOX*4AD*aR_VBCWZxS#GvE|LYPG(S&?_zuOdEqC?P=4cT7p;*)9G-Z1$ z|J(ogEmkgYuYl{%DciItUyfD!l-+&W{3OwUh8i(Dk*~vynrl4i%PwWNB)J8Wi~ebL z@P6a;FgkUZn$N({-wL$1E$e?7%{@h#$HAYtN|k7Fd!JtS|8I05x3T_UJgTUN zeOb?;M)cgBSZR!F%=VW9%kNpaNa0g#d1dw`=TCe1l+*h!SfkOb*l}bbzT_AC9>0Y@ z;&o2gd(yz<%19owH0~S(8Hq=k z)9CnP2is2MuCbBg3)=oWmisjLXTSSYGHQNb%;zWKYa-%a43lZ}qdLqdXz4XHikHVWwk<2*wMMVaxOkINozb)lZCy+k zQg#uj+iUZEwQDK8j9zx7GfS{I6Y@#*+dy@&m@LZKIoow=cN6<8A=G`n^~z_6>*@s)}GC&t}CPqXTL zi|ilIx=d#Q>YEw&QtlzoZ)1zrvY5lPcm{}jsgtr>w!ZlDmbU-y-FzIIK?_&XoF3@-0Y1g^dU7$d znQny0Ey+?O))#z!7_|GK!OluAgmWVM)@Bj1Gj=Zc_6FZ6K39h219Y?_T2%&VBRCC! zTRc4<1=|#Im0S|p>HQ~*c`crHM8DiB`%>u!C@{qcoUDIu>ft85dsOS=l{+3p-Qj$R zxj{12FCdHY!#UlUnA832?Iv>Q(MF-YwfQr-I#WN5^p;c7EqF2#T|dD8%noR%(@Z5*V zeDgx~>%yX3DOynSNzBn}O?5J7&U__tNz0=89G^1B`IHTe|9IvAx5F>+#rI^4zn`{j zYD7=DYUXg_m2$?U39F%OGx`q#c?zMC~~=4H7tzKJcr$asGZn#?Y6NL<*};MvQ1B6~<# z2W4&;o;C9+Ssu@Z+X;m)N@OKJf7RaEcvkwh2d%7x(rwA;@hnzTw)+LMjvDMjW^8X3 z<$95$#1y&-ck^@(nulXce7?x@o7s}=1An1>GAO3)WlCjt`FWp@@q0ia0fX6wKYO0N z`f$G4|NXnWXU%;Jt+En0%in{+n%_JLg>O`TJLGwineo4W{&P0Ey`8j~Opb&9qg=Q= zSWEF`4-C28@$U$w|86fQ1X(>aBj<^~v|N8p|FxA{Ee>QI>J;PSk!)jpRI=}N42zjDU?Zy6@_UmK zIjiZy#$*n0t)FCuTc`aGJga2 zL(ve}i$>Gs#A=L(@(b|4g|5}7hw+sdVnvnRrG|QV&AY@rT?pr$aUc=a zkF}Orh=bj={}HQphHuI5kaPb(c(xwZ+JGe5n>&L2JsASl`?PQciVuMO(cu0DHP;(i z6LmHj5}(3@V z%+Yo&v`^N9M3*Yd5^ts(Ia}In2GGRVbS7zucm7QMZZkI2pi#+2eL4ww9u!v)`{G8x&Z&5CeK z!~cbO1q2A3LRgFR^O0_ zPAGB_O7_CZwQ!xSGCX*=Kly_L(HYnieK;lVRnz5`F5 zpmkN1D9Z-q?EE>&hP@|;utnnkJX9Y3etjD)qx(BDvVgmHO2m~Zx`G%MN#Ov#&-T+`dn zna@G3#4}G0r0k^4rW0x9Y0&WaNdq{(eGMP)1q6QAqZ(2$Y<5ToK&*vH_ zmVKnmp_juwdy~1L*#uO{P_=_~NcJ8}5nXNr!TEF`bF-cPMX6?}d63%MQ0qPI#;f`| z<&H$tM3FxTZ)X&|{+0FVfllHUlIq;^iv%Ql+8OLfX@*+Uy7oQwg6B;}pgr`HlY_H; zORlz1Feg}LN#dW_mBhcy`SxI=+IaJe+%?P|bv&S(`8%ih{IXB6Hl?Wc?~&UZY1Eve z#mui?@+m)=qg5nfR~7WD$ToK(T|2>NFbfm9Qvr_Z%~e%`7q==mR{>4sk=a^mwumKfc_enr9pv<;kb|POhv4xSAUk zxy{rBbh)RsfaN&`opSf}X?mHMzwufe$%1!A)jD)9)vt#nw}2KUGVe0><}DWQ z95Qhih-14?VVSaXotW_P>i7hn@vmCS`kcu+C35#y+UUSm^w3W7f+k~qJeDs|FFQT= z(wQY}SJsxRw6FtQlHV|%bt~b1yk|#}nC@!7rXV z&j(jMRxc+ai?wq;x&6xMHi2FI7**o$`?VTd!7+_39s=HaW=y#sIKk+%mwDzt*ye|e z8P^)L$Qa!Pb>d%hAI(|8)_;b&cPXE7=~p;TLYvI7GD?odgSI4R3iuOqj?X=tvgBO$JjayBhQ}(4@L|PpT99 zx&vuQT)1&4HOB1aVPoE6wKp2^6X|bZLE(+0dY-mhf`6`2E&fzvP$4^Ym#_+#X!|M_ z;|1f)&1%l@J+aUdziSRkCu;I1-kuJh+%8K-lq=|BEt=fY-^Word0Lx$I<@HODdS0U zi62J)XW?Nbl5`|~WN&ne=dZzPP(j5`D0I9hIU{+^?Bi3qn7y9dN~^6@;{5GjQ1)09 zo<}#@!z=zDiR0J`&5nY1qNA^%vyuBb`1dNAsB4@b3hK8|p(AL<;AuVY8yepeJ#T~> z3u*bcc-fjfBo0mXNvjyqU!?)rM{5m|tS*zUW)=(a8BI*~f|gpIjtj@pt7Cmj{K0-o zlxE>g(d!_ZJQPjpdKXWKczjPM8IPglMA}=WXCK(UzCC#}_`?4B$6!YgbMu+*Nr4N~C#?IUi);gX%<5LGxv#OX!=KgXUl&?UC zZc{pUBG=NKabzlMmc(Pc4Ggorn~5&B`F<+8#fv9*dyYf#6>x5XNA;A6wnz8+7#%jD z^C4t+49L6Eg6t|!A{mKv(_Nca!uO~`7W)>`bUPd>qW5{MK^I)lzR}HkIFTkLI^0q% z*GAK5*3;^J=1EU5p2pH;R7)meseWGk5m6Ll<}mivN!q}CG8Zxg-E9_v69 z9;n3Ybm=GkpQrp)+J2Rs&%vL>I%;pe@wFa1li0d6@+ozC;q|V#m^_MOJ-wA>&P}Sy z&SwVW@Jte4hel5&SM~AyYqY(OHmpFC4tmdt<-s&G(UiupnTZfwo4jN{;wo=`L&El? zt5xCJ5Z%uJ^))nb1l#>7`O9o9-cs=j9YL$M;?O>JFxwa}b~B54p9MIG^i4;f_}kyC zr>gY$RlU>&Wi#JO`$_S9YzFeoi|%E6{|2^rC}l_UCUD1ZZH%5jgX3pDzhYff!TU3d z9jL?MSCLdFujXyaK8}CynFZa*jtwSN%W*Vbof(UBmwg2+;`h~z)u}~xUd88(&O?lA zhrqv${%89ff6l{E;P>8TU+{K+<71jpI`g;EZdFPh0lLi763b#xflFyuDqeX9={s|Q z+mlp{A`csj);8-g zcl+X5*;+59C~E6a<~W}#_@9;gIDf19_8fjBHeU_YOFX-(#a#R?G*0x%=5*p@Z3L=B z)EhvC_d?s8-X4T^&D4p%ZlZl8D@`n7%4Rkd3jJO?&D1Z;24wd!@dl$;r9H$`=;8|8 zzlUw_2!~tALG}%Ec9>kG7x_FGOxZ)tOz2mwXI_|j@JrfSPHRr{CeL%yom1x2U#`@f zp1);olv!BjU4Lhv0$KcMlX)Q?mAN~YUAOQlw-9qzFT9U5?5V^&G|B9-Oz|5zOSI^e z&CjDg@A-efZGW2or5)4%d{Xiw*bA4N6?3(`%RRdIW)Jo`vEE9Ol=)|R{NH~D|40Av z_s!1l0MZtD%T2xI1^&&noy=4Njdq!_)KqI1dEX~_P+iPW#L@hb#B?LN zs8qZzJ(LnD70ryogIr4y6>>RD7cnO3LXGOElqQ!mG7{?l|9u33t`S_T~N(9h;jx+1(_oY)w{cP2fki1d97+}=g)&9O0;fCKI8 zXVB=);bdVny4g2KbLaI>294(};*ViZ3}!rh3l>LUZ+uN2@AQ~T4<;eeJazONYJ8D; zJ_LWBIPwN8_i5D!&y9e!h#riGhDPz-ODpG+az5BLO2Fxq_!Bj`^VH}7`QR~!?SJ_=D@T8>o?#yS-JDl2< zz)jxaVI=tT!P+0;pY!PdpD4E-cvDcm8+G3lYMV;hM0jyNKE03W(=*h38d851Joh;e z?LaDcTIdz^7s6vpp#-TDbNCaO2)FcN=u4H^#Nw$cTL&JbT9J*HCvlHF<+r zC1}(GE7i#5t$CL)GFl)>2U4rP!(mW#IkmZuWikA;3$5!)c?)cYVMygqDYYNIugbXa zMrtMeN!Sw0Y5OQRO@OmyVB%(a*%k?NJ!5?yIi975y?82KuY`w&0nIz`$XoP!0>74F zXEX(}D=4Am^cTi!8*pczM$-F}sLObIXTrt$Hs7PfG_bC>=~v{tl;6f(*apswOkADO zwU&DH;d;Bw0Jy}NS$!xogS^wgZ$H{wizN0Ge_3cb3;uS8bzLaflDbZ&?|%nx-k#PR zOlX07XR56>qSSWus^0q&Qu47P|3FLbpKbxIT*Y?RuIt>+jh*l1Q$}6)cX0DP5I3T< z_lB*fSNqTkKLGDiEVB)8?+#$NfKT^*EF}5E^r?us`J|}0pS~An&79rD8ay?Vq_Ac^W zK(CsDNoU5c#d(5l9&Nj0&>I%l0=-&U%OcIEYld0|dVR_U`=|#t&SYvjh>WWSCTEi4 zK1OMIQjL79ZCeg`lR1kb9j#*I6(;d@boZp=ndq6(AI%NZ)DUy!I)kVEUB_UVWi}i!OHfo%yMwL zG3EO4TWj(=cyuK77#%v&sXC)$6L5 zNY4qB)7tMukL|ay=DmT`og0>n)~oiV|Nh^vGEmXF^+m_r`U~Af>8G#i0RsKcNq(7X zRJGXJf3E*XC>J5IAKsGVeAEcyof!Mtw4N8zqn!gCoD07ME{ox|XbPq(5a2BVKXlemrFAHmH#kqU1kiyoz(Z)s%-HP%9#yUOBT71yawp#8;2 z9PdV(1b3Z8+eS-wmgf#Z_k8X~yL^KjaF5pI(Ecj0@&Z&opSmA}C%m`KGwE6dtC4p% zVMA=?)7Zgf;Gikg`W@rSXlm-mUtxDRw|)(oT$>W#f(!2;_ui9v@ahW6{fIRDlF|N8 z;CYJCH5*=>0~E&T^v;+|fY{aAL0GA2r1=%})*booW^aV9az!?mvE*9VbJUxSG#`l$ z`WWf5i#i%1+aCskop7&vA-2+om$A8TVf|IlcRG1{UWU=)DE1JVzg2qs`j1Js%yC z2Zr5$HkKa#3J!P@*|?Y*UPaF|p|@hyXuTf*{j=x>qZm&H%X{IVZyC=<#XW$p;tg{R$HHNlJQmV;airWYqtOp1(r7lacc7rg9JdQ)ofsdff^434yT? z&Y|t+>Gg}0F}jj*hbmF_2sYV9DEen4+G_e{Y`2>9$+fO{BI9~$Y7UR?r2OJA9zDb5 z^OxX>JAvRzWc6eyS(SPZB13K?pOL0Z;JmNFv%6VW!VNdjJEK&650-`_1w7mFHf22F zz5#r8g;L(f=qzO^w0atDX-pgLWXT0zjnT4N8h63}OM!AQv_27jxgB0wN}0FOCMUwd zufbQ}(!X<{U{fIM#VGO2viGd5fZn&msdv!oo%Bf?y(92;qP$Uj6ct2S?L*{RoUBuy^!p4?~K3VxcEoa}VkFg1^t8jj`&!=HEzQ*W>ixVCH$g zS3--0!9F$ef%{6IhezvD!>wSw9@sP@w-)Z{z^q>N%!BjBuB2+6Pa*%wv^y63KLxif z!8Y9&=+~0b`75NyIZ${5+}#K&88!JVdgRUn*G)fwpDu$QuaV~IoXtSClo56-<7gq0 z{|%%|7WHa54+7pv^u`-EdSM3`%fMKT+emu}9DfY8&Y+(k!e=|c=;L6!fRvBl3gY@&yl$b_|V(bEA3w;teuL+~k^BjAr*XZF%Y~vgG zx0)V!OWH+@&Ln)(2$);Y<1WB!l*$#*L5+R}wR)kLA!Hu4r{mAGvz5R|)wV$33-+5p|t(!|}tU)yTk3olSX7nYHiAJ$4 z19aMZDe!w6c7-vbA7>JKTlF6~UpD)%){%zV*Jo1Ev#jm+QC_z_P2so4}YhbeiXnGAcZUyZ~D7 zM#68SX3vzn^CX@tQHQssw4V65<4Zr07#M_%h}Oon!0Rff&-?Oh9KYNHuFTPDaHZQ7 zc-O^SArG}8jsm5#q{BdZD!pp~mJ(1vZQhp__4Zzmym$tToQvf3j7NDmG(lQZXt?k-_^m8GFCjfCr@s_lzOuQ4zl>pyzb#w^uK0O;7n8Q1smpcQ%x`JFIp9$X+*J4ErN}Kdy!T`rIc`9jxB_35I$MLq%}6k# z3#Y+N2QY6GWqko@S!d>+`CN6z_JC@ZONBihwdDf`h; zuY#XD;NyKrz|OR>7)g6ukgVQ4{yS1##q5q$Hm+?ycxNX$&SrK|iyqv>NMDV-8^_4& zPm4yqdK|ilm0OY9ZIC2-Zk7Y>A^4*+FxQ6;<0<(kaJ~QvJcn+57T7)m7sf0|T;vc}HbN_wd$lF@A*xWa$maEUxn~{-ELaEc> z5@$!QEH5Q2r0_!AaoWd&1qtdlC}wJTU9+IA_5*y=m<*-`k<`%k+H>aJt81Fj&$$e2w0G zghc!kx?>I){{Y-I;)!QW>R_!s3D0?F`#NOqTzIJ|n)VuSv4s5N&|F^ufpf?K@N*IT zuaDyfaP%7}V?8=jeXjf-a7`GGsdoik0wLONpSTRN*jr$8@Y`yGaD@SrjFAocNXw| z02H4>xhpBx5zNnrZjT@#AEk_|!;|Qb(S~QyPuumRs5Xf=ExPlsBUGLb{?<^N(OW(9 zW5ku^{H_(?D?#tyfxbN%ym&Ikdl4>yYF_~FA!LVs_I*&t*1RjN5zo9MZ8iU10@tT9 zE-q%YJOQtM2;|R@_DA4;5EzW@@g|(|0;6I#_;=Ox3aEcGW9Tfna4miB2sS)<+ZaCa z7QcP;y)|5M9yn1d4WM=G#onxC_2;I zl|WIC@t=()bv9hof}X$5uL7W%zvfC*h0_7_;7l zpnt@u_|C<~(W-Gfjgn|=PVKKj;9U=hyB}+i_Bv1-$#N_Z^`*sLq*o`+bWxuqjx0eZwh`Z z%V^eXW&94;%(QP@OZ=3lb)d&7$o~2CX&L1`4KN&x$SeAPT2k*AMw1??aqyCNG+zfK z1*GY7(H|bQo$X|N210IK_Ele!XCWE_+n8WEx}*CRc&CW*_$BEH`d3ViJ-|XeXw{3~ zTj7W_SR4}Oah^f_nz7)0a~EQri+k4t)4>Oz)#JIyj@VEso@G&cF;LwH6h=$Y`=a;n zBl4)4Jizgqswvm=4Gy|+0TYPbYw>(ORu4ULcecGqKkmp+TX zv?^nCqVl0{nVoKWM6^kdz!%%VM^A8c87WJVHCnYPB;+)DqJ8R~8E=Aa#h8ASr=J1Q z0c3a%+%z0M@eJA*Sg5@iMJ^})&m_7dy9FGzl3CIhNV{*4u+Jht zHZmUn3;yt~4)3{{4A)=EXH!PuhtTy6#@kEeR;$)Pj?PA-R|6l$dpw!X5AYlr-Ru>{ zgj&{HaE+$YhCIf3xQSXj!+V}Uy^A`vYt-W_sqK7p&@AfRjtr2_1CS$gku|H3y$9*- z5MUk+9wxz;KVd=iK)>!n4mX2i24Eqm(H1abzl5*OW8`>q%H6c>$*K3ypnG`g9q-;d z=L(!VvTmWZ!OU2%p`GX8*}3%T25OJDg-!wQu8Q`<8m~`p+>w17IR}A-ozyjpy3PQ$ zfpB(hB=coZ!BayEsp)#;-~ygL2W&>SZVmo#orF0dP;OcJN*D| zo}y3g&+)9?0!H2|;7lzwo&I&CM{}X#Ti~oFno9qcG3_RTnIqtL3a!3JjVs`xGr{jN z;PIxLZ1QU%z66B5(4(FXyNEtriVVnNT)MN&n1K(`L-($?rv>i=I|Rg+@XLDnH1kMHmXC->(ZYFOD?hQ?vz&*8S_fGobTGw+xet=v{vE4e- zV|T>70Y1B;XA;1%fK<;IYE!KOhO$61moZ?ZnigQT0?_XT7A=T&=L-z`y&zVKv6(3cMNKqy0V< zeFSLlrj7f6>=JC4hp+<5L0flto`tn_3iawma}ML#Hsf?Z1ymKmg{xG?F&RO=&!{60 zNWTGN&FI%7)G!}Qr7Zn+ZOl7MJ)ippIC&Lor80RY(4Kp3I`i~xdN7Cnc>3J%sYEuWK&LkBHDSN4MLo&=~5hxLG~tI=Dd|0olT^6Z(-(NHB#n}?A5YKQss zJ%?Vpwo!og_XZyIhdzoJTV0}Hvr{>;8Um?1A%+;*8_uor?bHAQCgTr zYr9En0KU{TW$4X1YUu!XxSp5|cepRao63iVw!AOs&&ag|5dHw9@s`cC=n(A(?Fy~E z1Z}!|ts1y-t;JF24JOXDM>6W`BgxLf;yaVPC-Hnd*clD%`VjX*fiX~KH4sPt#USYa zIk2{;+(9ft_fOed_r++J^?_pdf~|41`96JEMsJOs-X8urNI#OK?LvkW(~{nZ{$R(G ztJ}d-5ovms+&dxEt~-j8_SCCZH&RU&JgP))3I>NmJ$DRvR`hM;q-$Fb0_%l5?F-Zm zp#3U(?QFObwYygR190eh)H8T3=|&Fu7z`LkX+0x#6XSms7QAOI`%*TWdZn82UkcD$ zTBzD@dRCglgKOxGHm$qnU01UQt&t{w1>XH3m;X)GwrEDnbD@lOTiKABMXj-nUe5l3 zv(lvK_k4h!C;9D3WV!8lPm%lEc7>9b&b(qvnSa_*(Q1vKrn_=jopgOdWf>>#^2>+b z8M{diFMaE4idjgu&Gz9IB;lL=-}zab?8P<=(y|cFYsQ*rg9WI z4u#VAIG!V}iTu3=?%2gB{9Dj>ze9G6fH%K_){~KP-c;f}jn9CoousyfKfNo)J^zy! z9c|EB{g8vpDgOrWOh$5UL<(FJr1?%@><8BxiP$*9v(QMcDBj9wI6yt;AUg*_*C=V{ z&=)O^vq|d=N54r9_uFg=vVLcY||!abhYpwps+i+{=rzNPcJ-YP>DXdbLRnQ=nWZHLZ1`C=ugNuqmlOp zhhNjy0ywA_?Vb%hM}ffzw_Tx5Z={4$!+lxKlox~T<YYQ)Mvw4Jh4JGqM@MdC+`E=-bVE;OzQA+6o$j#x8*-%psup3ZWPv^J2XQx_Q6XF_ zHWKneat>rPErxT>pr`KOejDmtM?Hg>JDd#%Tmf&r3g0vZ9@jaHQ|3v*djh}q1uor1L?;d z@RRFQx07m=zs2zX2w-19+xy_D6xe78yko%LOnO+Ko?cF0^fQk`7HDHEgEJZdg;83( z_5KO)VziHsfZV(3ra|TFu}j3*NGSX=xJ<%z*TF+C(=+}170}m<7%9ezJ&pWDNJ9JJ zjw)|S?*$wcfZ!5Z*B;0R&Kc-3?^$^XsJsEb88Fso3`|0HxtqB_R(_QsYJN zLS!CJ-c&-s8lSmtF?i6jgA89dM(GduTL&G89asd3= z6z(e|T?@?J;f3ffqkm;nUN2=1wQG+}#Mha(SXQ$p!*FE%o4wQ=w-w zq~@BSx%AR(q0I!)Y$8vZ(pq-=fx=x+p73;k)EKnG5^z=oZjH@h1P>|N66usgsvdxr zt~fZ?9>f2^aN;6T-07rV($|?wse6zZ-bd0YXi#H|EC@XG!J4mQD1a<8LK=_I}W3e1+XCL=hP<5^Fhx!z)wx`x1^{oyIc zd0=<~zrKX#+o7Pn*Jg1&?Eq3{MVKW|q@5R$HzmMdf%LoJyN^RJuLZ~D!9X!}tb(r! zDNzMJeux@xhGy>1pA>3!uU3pwT%IxE?kc0BHKRuDk-Fq`XS#NjbJe%`<=*^!Fr;N& z8LB=?D>Yvv{%!-#*^1Rx|ZhINONWO zFIh74a?gD9B4tv3`RxuVU-^`Gj^HhfnH=$KLKYk%SNZaW7a`C}>CPyV&*k=ie`!}` zQjNKt`uU%*YCmSdVP!+QYlH6Q+7C88oA3QH4S3x^T3_hC5h=Hi{LPUVE07jV;0ai;=LM~vJ#2#KHBzka(lw+I=J;PQp}TFPs4F2V-HJ zg8Z_cYiLEg=4(Tlx`qF%83&KR z*f19KYH(uIkbd;TNYi&v!vRXa7Hqgy$V~hs_kvY#wmJt5TF5y3lo8$^ES^lSUjX;-GZH;He<8Zh6?#u4 z=+RexHbZWDw(JsQ-%MKi26&sp1;0W+y#`mEOD%I4g(uSg7ofdoL|=e9F9Ss`H1u|W zuaT+VIB_qn|2H(RMM{dk8M(q4qbDiH)53XRqz*DduU0J}@D!XU)vf~~_Zoe{lcuzA zH7%!+sGd{x)-hL2y@~5B?2ww+xJFAjn>KExWM#Pf8S4Fl{0re`BicK+d;)&)hGTDI z>%)ln3R-C;o`j@%7J7^ZvsZ%c5#)6LnsU7^wdw_QjqD!q`Z^donZ7NA7Ed$ol2A=8 z;r*k=6LL*m-@$C2c{T~69uc;zd4cQJW~AWOXoAd9-|L!)e{+6girhepSHnNe!EY{F?R_|W1T)Zb)Nvj*@l}*Fig#z~UJL|!9;Sea zIgHR>q34X+y^NF%$ zKSY-_heOSoA8d{^v>3>!@&ujUjc<6C!_(;dFe1N^ray*Pw!l&O*c?5e)nR0yCqs-u z@*oy#H#lktE$HQN4s#lizZ`6h6X4Qav~VdB&D)Q9LzjtgmTQD+IOF#>gO58y$7g`s z^~ME^7yImezZcV{=aZ9QW^#aSAbm)N9(0FZT2}fwv`M0#Z4HJB_}7lw#f>p!wNbo5 zZ3}$T96jWGwj-Qk#IHhl;yKdXb)z+KKKZJ^SO26AZy1=u?7)~$WoTzRxEVo;=h&(t zu}8pR5717JphqVHMQiA-*D6{h^6ot7t#-DK+@ML^9Xx~@3gD|g^t1?#Fc&BelOsVL zv+$PPNM3I)Ffy5Ns8Q9H*T9wT@YVa^$vw4AA4ZC+H+S;ZqtTP^t-)rig+x@x4Fk9C z)_3>wslkSDP073bJ}2c~@>D|-de%$*?RiM;qayg&bD`P5-ww&L2B@ks%D#Zco{Mfu z-?cW@BBiFoO-Ctv1c`PxzjC0_YtU^Bl$YyIp%q82v)HygNl>~SBhQ-zs=#A4N&g43 zZZuNvAyTy(4lr(wDbSu@uQ3kh@wpSu>H#*k!6%+o{F3sXnyVf@M<7#NnR7pVE1>=f zs+PgBaoy@v{(0gua>`aDg;Gnav4Gm5&Q8<1D}K94HM&+7V=?Zf9usnu0ZZo88>olT zbvrGBO2|>^&=45Cl_RctX#vH0|MyQJ%RJSl%KX;S=t#{OTVy|Nrl>jcwbU!ksW(xG zVnmN=P%q=#Db14&h93(yk#Lu$`%4+vozYzz%G+LzfXirwZi5ev8y0WqFn0r_gsXcE zp+-||iOrPo_8M2Ub|Q)XhD>M;-lDJOE1u1OzVGtg1eq}now0|fPaqxK1JH;O>K#bV z{GLXydnhTF<)~fA%FLnzQ*c-~rEooP~~7|7t%KAz#wee_g=$Q%J23(6=8! zjjfD^m%&bZAolFP_LUm%dgQS?Y+M0zr%o66>Jj+92hZJCdNEw;F1dwhoNj1|6XE6O z(2`BTrXG!WL)8dm#%o|jZ8ip+)dypG3UiSG714X|0O9YDqVBhT3YfByZk_oYKrO#Q z4!SeoTxhc%IvTCmmgZp5xE}K~dZQwJdI?!{E;h;l@UjC;Xj>c&awd)btp&X9$m;;E zjFb5^xbI5oJ#bwUV80less{^czX^2IF1r95VK%+p&M4Ea+zNlp1>+Ynnl}RFRkZXV zqy7nWwRSq{!8A{UJ8 z-<{gersRTP2aF9|Q4f1-87-@$_5=-cB|WbOcdr8CK}d_*aKMvjt0O?Y0W0oqxI!FV z0Q5$1_r`#C;om7>b02;4Ec|Bh9JOozz!OGO*v2#W^3;buo`hP3)-&#fbq?U&7W=6i z{52K6I|qtwf&Qz({#;-_!gxDOfAj}U00-yuyB{<#>d95e)_gd4AP_YVJU$!y#}l5$ ze(OOmYf?_H!^Lpr<@B^a-b}5nE5J%`a8nE3GopMipqdKDHb=%~^S>K8w*vRM)Uyh> zJr6h%nskK=-siV1XmOMSC)>z*542o?1er?OkziGffo{WrtSfp|Pl{)$YB3Jl!oh_| z)PvAnKZ`s6mI8?ucZBsS%Da~}0j->i8~sMT;;P|nxMB-q(bM<2@Ql0gvVqo!ngbZq z&7l7%ayg>3?8@@$K;OLg^fPii%-TwvvA8FGD;AsanobSlXB4GIfW0T-UDqtWdinK(djqU- z4d&3oM08V(P#*Vqbce2Oz;g$rrBMn~P$f;x7xG&_N>gxNo}Rnb-JhE5wc5eRr)7~# z`bV@)+;_Yce2Iw@;qjqhU0B?8b2C)P0_(3bj*>t!g8sB8#Tl==*c(IdI9qlExkix# zr>y5YiyG8h#bCt!-9w;XWuB-LwVDd3`7Hi>^6OP}_gmn82Yu-Y&vylTF&@(=^tuPq zt`)i7VzhZ1sUCs*z+4Wa=@XuG30Uh!4?L%TKL0(dwv@K#LlNgat&k8U!0{|kjK_H| z|K3B}EQF>zkjF>i2JO!>Xr!+6(^V*a37Z)ii)pO{oEnC$IUN zqE)hJZ5nl`S&h`;n-qb3H6t{#P4!>PNaSq~64p+J2sr^lP`vx23QMNai~{LXyd^7DT^6QwWxK$(~Po<;(t1D(cE>uBiW4xOCP<21aP$tyQ{ z@9{5v9GSJDxK^bt+XHD-i*I`qU(p`T{PVjledz?&+(9lEwS%+X=vozx?hIu-G&?@G zboMUden`8faEo_HdS{Y*^z~JA!3y{T)b(VfpXRe0nD~JDx?n-S z4qtl<$s~C9bo59Icz!eE&$F&)@T)v!J!d!|$a#0m`~{73C;pyKsbe9nl*fK(j!boL z^xJU6b=2UAXZIz$DqoxSa>887?{mrdD&?O*=I9~yrr3|*dU@3d841c9VZuS4wB2~bbH`8Z{?do23^&$NtPE6_&PUhYr6t;g!=2Q=Cs#stu2@U)k9gy*}z zf&cO4kzfDe1X_2W%q;r$J9?Q2R;_!+_S2F#zxAh7eYi|ZE1o`zHbV#69R;^E0e2mk zp=|^D1;D?YcHBRj#Dj7bvUM|7r)w?VH~uE0JxOieZ9j+7&3JjLXA1U5#Q?=cv{XRb z&OvX2FK2|=fwjz}oHBbZPcDWl^~7}J{|fBs=E#asaMhDw!kfH2vsw}Ow^5ffy}|Ir zZZPr?{OFES^|4yq6T$i$mcw7Ok+L&090X6iGB{&t5aih!#Rmc=aiEejdj9BJS#(2=Aa0H&&p+lJI7 z40>Bis4ofjoCQ~7EWA%Cy$!K;cYh3_Eq5@uU&fOldMQq!U*?+!e4~N6F;;dl7|iH_ z=*>4GxijkZ)Ubx%_AxS-eboeibEvt|`gt6yA!NieG2-P{j&yx(ILwY##loblcf z{yqi#rGRudaH#v%@HE~JRg9hT7GrifeP0GAYO{O3dmoe=g{prSf%KDc^;-_77ici-m0&%a^hw*=n0^sqvpN0OXzf2%uiESUpO>7S|zOxjQL z7~_wUrvxl#QOl{cslDKOM+v`NS#-6c6+QHyo)32h7x6ELG6$%o0LrMrle|0eS%ccw zQ`chhOSO!SK3;E&do9~YgLpbB{x1EL0+G{P#gXQj+^&d}MWY*G>I<|-3Wz)LPlz+G z>|nnLRoq$Pjt!r=|GU((i2q}m44hiiRmm0iti)&5uHUC4Ju zH}h#+0{23dg&KO#yoY=^()=4_;STuk*T|*XaN3(Z-Gt|$37nHe4?PPP4j`=$9CrjA zdOIavL@Vxr&PS2I|G=I(NJ?3xM?c^=7s>4{du{Qzlw+)P#m6=tPU=IA2jCJtWXiF+ zSSg<8ETN4(j8bp4U5&I}jeOfgPWLPxptTP0;~T(p9-Q(tb$K^t2`~uXjR8(?>J{QP zwC{6o>TU%u{uR#H2%imSYW2#39hWb?+JX5@7q>8^psA@b(88~g z4_m0?UVeQ@=~vNz?w9eldiUB)K{Cz*mhE8AozfrC+gV_+4VuWfjVr)JKJ&5ku@fwVpWO?oLK{Y+#>Uvk!i@&lo)Yf(Mu z!4*NP-$?H^B15&%7GW2D0A?@30(7VB2|#imkX}NL)6mmbVfQr;(A*9e%m)7-@_#op zokd&5H*)Xk8Z^!>Y|ss~HXZtSC+T45zX17VZ`@CzsISzG5*%@+@5(ubw0XA>!Sj)(_2>#@jDMW5p(<0^B&&9 zttZ*?-rwxqBmPzs`4&sI+?o94p^*Cr`-ZaKX4U|yq*VytsUTJe8tl;X`ZAhr&Sd(n|WUPUHj)UnNu|JSq^Q) zUwO#F_*-8_8+5pSC4D;Xvo>O6R}QS10hSm&sE^F*S4F zU5fhLRU&7#M~k#YlX^m=Z-BWPy7mK}{ulh%0z0q}snC{DQ6CO@18L(OylbGlx4n8= z;CZB)w=W+?7wy2_J^=T%4?NWw+4%w5e+^@34Km!(^bHzm3Gy?}Ugx3>YBIB0$xP({ zspp3Dxk&hp=n>Dyxzp-VaQshX!HbmM#~kD!67C?{H=ogJL?h#t#(GwxM|aVpw#qIv z+G=2#jbwTZiT*jfyB@gK0O@da_+#*CTQGNkGXH?G#>Z$2*4I#WIIVviEP!@k;Vk-f zfD-2dVH-ZT0oe>F`~!Mp7J70qoE@zgz0Hf!=DVq(JJ0LF72d9AbQ^ckI`BK%6!3lI&?k) z=5C;_wzR(+tHJX{_d$ur(4?`3VrtP3--i~iqm0zI?8!h^2VLci;8Uo07&t~yPMfSP zRB}J$Tl~KmOuZ5`eO>4zH+aU&JygcY_y<(&K*=`zms)K?4Q;96G@ebPjmpTn3DmWQ z*6V_WhmWPZn{0k~BIW8*&q%a%A9U&W$iltYN8JLyz5-o#23g~M+EpmghHr13mg6ph zAOFl;W*7QUYW1f5M|d)XwT-m+Ku_c0v& zJ4Q||>a0QyPhp7|D|-*TRR@~h45q5VV_#9rEXrRC4n1qo;aJ+k;QT(y?VwiAGLHZo z((DLhcN=AvQIqT8&r|<11O5?awm zp}jhdv+)^^}Qd!#wqeKSwoC7MScpF(<9Bai2O^${2FR7*mS4SYHbqT(VM+J5xxce46#WKz_JMc3W9$p4vk;nOQMManPn)R&z4!r* znG21Y(~8mVoQLfPs_1|4PPsRb$L?h4LaKJbpx~cKgPF&Hq!T^M0y=#pWob8$6xZdC z0)H)jt8q(cYZA4u10%cO0?!0DhWp)}oPqu#YIq+gjDPnq`So^AL`u1q(t);fnB}-C zbQ<>R7XCfNQ+*-&BNl+mc|5xpJ4$`#2_m1k+hP~r{_>u%jK#8sgl=*!4^3_N-!_$#P0gg!+Le<3_s9<26YWE$P>e){L?gtL9It*6Vr8xz?5TDi!< z35<#&(zFQG;yyQ~O#G!S;wuf7JkiuC@QJ)q5K8E8h+YPBC3tJwX}$fYS3E^Cj%U?= z_Iv1QDa~udMC;5$`kGqyxM!ug{AIt)d%QpgYJ=WxBlKnP|+pH83#V~q_&qK#$#bBNY{BCQ+}cLID-0B1_2OOa5&BGqVl z&LzJ_3Vg}4s*J!~xb$WuOdd2Fh&(cvt*ZeNwCtU~o_Q-9WZDF*6y?yn=#+W^Hf2X+ za_C>z@2yn4hBUpbdfaou_k8&A*T|svkh7zZFP;>O`>jVK=aoT5)-;-|`!?MV=!&_! z${&aCS5tdG`1>N{u%6i7;KSA&;pWN*mnVad!PHcb90|sqF%MRd*FC7t50&KZ%il;I z<*ciZ^TB8b#*`5YT;p~|wwk()yI{1o5!fLmz_2~w`95T}8e%QwYBDa@lJ1U^x6$Y~ zQ&YcC`ghc>E*OgRGLm*^^EI(Kz$tj%HcduPqgiwFNKCyeZ%w{s6Vbv*c;Liv$s zFmL*FE#Y41<$grZKVAr3jCSh&(-zQB>7=25`k1{<#-@Z3v2eX}d*oi||iLi2hy zQ|TLTnfrnMv;sdl@Jn5EnzOj0$8_lf!E=yHz4eji-Wj_cjj;zzR}FH~Rca$FO8@9{ zY)t#Ex603YbDe`I^%KD248tf8$|Ogy^X|;rw{xa!aHv=C3R1_@k|!k`iH<{aI##P< zTiD0rU#sYuetgIN7Ho=Q+Ixsm)rg)Cq^H%ei7Jsg6>8N4nx@p?x^LS+vC2rzedKO{ z?E5Y6ZRA-4E)!sDb%3)acyC7EhXG9<81)S2L$s!6-F_H}X zQO`7GboYlFojvRMpBU__Jo2RiHe8cG{$ypQCiza-D~7rwFRJCErDmI>dHFPcM>vj$ zL=MYFKKbb`p;AenX+yuc^WJEgm}=kkAavusH%ONR*7jIh^R|z*NHL?609)|7$9n1` zqqEV?nOe0~#C+zJ2;)SFn}aps9{3~Go9*npFf zC9a#AFRle!#(geI+TP$+S>HL-?yefon&=^01H4IWvpHbgbHQ2IVzWu<5}=#F|G(0k zk-;~5G9z~pxx62uBRy4`8|_oc;o0vp!22M5vR|%7mcg?2v|={Uy$>AKXkTxmdxoYU zLw5sB1*~SZl6Ls%w6C|y7S!wNy!J?WR}+^p*HbpHqwN$@Ef-nrjm+BG&aomUvVy$R z`Yg4?)vHop%O{(jS<6@0P|Bqjfd9gfJ_{Lbin9do)%A{qGebR|^ezf5dNwQidCJiv zy==CsPfM;E9r#c`qwRPqU2l!;=||I=*ADS_O;Yt1xgMJF#-(`Xt3Pe$A%R{A^+ZW& znX16r8Y-oLt}Ps94;zy1jS~$Rr;Y-9R*b~k5NfIfUdAG4iUW-BBI9`jxf%fLbb6=8 zaj$m=D6|PUTG68S@EU=;R?vB6kx|Z$jbx$aUJT6cm(lh$&h#)Win2(rc5qBClH_9~ zt#^30goeZLrR}55&RCL-kcefFu^&MvCG2!;gi&a!B67S5e+ln6G<*@|-=Usbw4*22 zs4NZ8XKCO(0O!1hUUb)(-`CTAUHWh@aO)qeKtGImusu+|7yRQ4WiA@c*gOej;4JFf z4X@Q>Z7|+6oCL~SfUGajZws@i;ZVfU{Xe|lhOf8clk}#ZrS#bo1(8O|<|1nJpPpoQ zK*iO$iXm+THlM4{_aWEcra$EwHMf8H|G_p0K!))N0U7 ze)q}fpUY-)9$!ZmIa1`%s#I2`LeG!SF)ln+UOT&xKjg)!Xyl2=qT+z-FX09;>KxD* z>kFV(ckr(FU9MK{N4%ESobh1B`C%_w(Q@`gNeb+w7*ToyEah0NhdfzI8=b<4t$`l4 z#&P7X2yN?;e{vYN4fs792&*$j2ZOz*;7INHE#$tLkz#ZT?NOh56Z?DgNr_qrzdLqw zX;rD74wBfl1MNcNr@4Ex3+-jnK4GrteBP5E&f#ysM$M;{xP#JJxjT7;MCqdD@*Lma z$?5D|OVfxPk1-bAZ!rl>R)ZT~#Iu+VquUidOFG-tn$~_-dYw#b6+^$B75GG3%(rzq z)~qi@ikxapcWrFFE-B=+yPUK-o#74!TCwOm%5ps@4JoB1C_N(IX<_)!IkE59T9k~D zo{lb8-isOYH$s0cD78|OmsXipW{UUd0K*tsaoxtqu*qW@H;)nKEl4S9_dXR@-M=Ew zRP1@q8;O+i2%$p{D8NXhaF)892N56h#N5p)#f&a{GnQVw(C7V#nP?&eM=*JL#u8 zQR+ZjPvI7lzK=R42YU1cKKEw!fybnzK7tv@6)nMbQ1mX~^VEl)3h&`-%BX6OWNJhY z-Bs*x9bCRj%THdJMNT_)qrX`w3$m^-0x8W z^hd$*X7Hm$sgJ>(g$HPJ7xd^sf98Nu_lzFp$@zS@gHqCR3q2^L9e1di8KDn_{rtd8OVyb*V4G##z~H{FY=~7I(gchrT&fh?D$WmPbp7p z%l0`=MyulnP{(s%P-)XKjIJbY7gO(kWPzGh`@uT3>LY(1--7Zt3+jyHyCI)ST}PiI z^xt!rmQ07*96kP$!llpRuf(ww?*m>7MEkKg-2GzYjBM~c3M*<96wo8_TCj=_g&9CS zIN#kT8S5w37I%ci-JJ$pW~=4 z#S^9aqwwP#*4PW7NGH6vMet@0qiHfQ7=i!bvDTBw#-)s#i%6|Toyy=zjB?khqt~N1 zP$Yof-C%m8m9@6%h}H5gLtdqu_VNZ~!bJMvjW$}!o*pcVrrH6-yMy;Q0gOg6j$T^j zU9?Lo(o5}s_lB!e*W^Ft25596Wut?)9pGjJTe@^g&7+ zi*z$^y5jaXxIY)pG3KuPZcP90`R@wsdDNp{-jjQtuFDBC-|^7eJw^KaqTE~wHascN zh&Il`!;^w$2Z4MCIPC=9>hM27{R_eNW29+cPlZB%2ZOGbZ>7AEyE{{#G3@q}+cn{0 zpg#oXx8~cu1w{dm&Pk^tt?vbU+L?vW!?iu7Y;W=>gN2pFvzGL+H<~jq=*_CE-MF*u z651V!XD$mJkY>fpy;(}S&!DgI#&=Nj2K1jXnLIhJ9M!wwntv8_H~^GCQnxe9F9Y>l zq4*J9Ttr=JPj+p>`j{8c7s@Dwu)CA(3=m5TDT zK1pe;52+JlC&B-wj0b06op|dnmExJFN@ic`KyCXJ zpYB!jlyXtXuMHIEXem;C9$&Ff@pn(sj5wgCa7SIVu+)DgKj*LW^RG+|(RQ+Dz6y?e zUK08!Z`fv8(>Ht6x1Q?-^%nQMOjZ52>{*2K7fYr@z{u+S1N#Eu)mwKW>^>n*jf6Rn`j< zSKHZngmz@4a^2t`$q&-v>;Sc1TBFeEjlPPycGKfZK~wd^*7FRG_Z?-^l0N^c=$UJ= z`gT*F*2Z?8#Xi|r{aB;1h_a4JYWYfAR-bZ;C-PC$vdZiz&+mo%mB`TsHr>@d<*FW* zBKmYQ9vD5yo>4ZsmyxBSFT);3UMbJWx|`e;sLA;KTAQ)#a9UOBd-v!+fW@=K^?1!g z%G{4NcnGV?^o(xVitqL+M(WqWg7}VF>eA|LYE`%1%t-T8;9*+WiKIM@@@0^hli?P% zySGKb1z(dy&}M)b4}b$lif5yndqg*5LF9 zF!mNW{tQg*A+0>Ra5xw*10P98CA$9oy5Kg&XTQK%(O+E%mKxHMy{Q63?wr$VJP`c* zL&3`h$omw%GM9FL6-w*feUn^TxK-#wMyBMF?rwKqW$B&XhIzb)Lz}pQD^6wxU88-I ztZwRim#-vsjFgQP3LrWv!tvWnW6YWO3 z)P%aRe$#&Win2VDrfgN3`dyZqGU@;RZI5D(<28k&r8KXwyXRbwC;L5)2>eOR12{pEx*9K6;jWU@40t#h?gj>osF-+ z`mDy&1oTp_=kwGXWFAHmoQ33cEmE8L70Q+Rp7UtKx<-Wd^o~02ESW|s&BDKTH$6-s z8}xLlL5yZGmw$5P4o0PLv(AS`SEg?!M=PMJ2QT!2|MZLNvo4Et+(6DaPcr7X_|g9N z)4TtacVJvK>RGQSG$VKCC+>LZTfY;Eo{}ICj;KCO4(n#puKkYtHIesoU zs6ZdTrLH5;q!2ze9@pJSyKBj(#ZU!)@NB$RxDhY(0DB@;53nQZ&d@vWVVMk1HbBb# zm9gmQ$W_SG6eR`(>bgqU9?RM2qsIQNf%KS34Rz_m9kiun^M2>Oq)g=BUiy>E^KsOv z4sc#phWfm}$atBbfb+PU!?Eazn9;no-;DnBL!kjDG@7|`jj?_z#Ul$;&+!Hnm%A5h5-qqF_SndMTo)guda0YdIM_;rul`Eb~ zw!KMw_Cb2R052IyM7{4$kT^E#0&kT-%_^+Zi$Qnc458eOv|S7ow*kRaa=8+C8Tj|U z$>`~-%Ls5rHVJC+=akq%_R*T;9(d!&*vbKMH>37YP^jQX*nS72#eTrJ}Geh1l z;3LOmcPKQEX9?bopmmCGrJm!dB>eV&=B3PtlEXbgnbiOEt(RDTPNsyjqL?oozDM({ zw4AX+^v^vG9d7tJy#RVXgoUym3Z?>YS|ZBBtN=-r*4p(2VA3%iJw^HuMlsH_0@j`L zl(wZuG%NUlGOI_*`D5#fz^werVf<*rDS7oK=rxL71|_c0x}KPY^l^2|y>G5Pos0G6 z3G@>f$1wut_vn7@^8e=ZGI|{4UW$^w&SX79{dY0;*j2RE0QsXY-W?hnfWi~o&JXoh zq_EYBu~7QZo<8BZ(62Q#C<^leWBTkyhG^eM8~Z~>xxVWxU>Z{HpayFdm|M{}g9T2OmbTHBHu;#%sCT)Br7?fNXn z@clq~DZhkw81DjFan`C9D5hKjnNh@OFC?ccD@l5kBwzX%pOgWMRe)5S<^#7L&t>QX zrNAIyj`5CDl&C{$nzF6ovHsw-gxcLDw<5Iq0i*X@>PjH-+_V1}T)ZF4-n(SQh`rXg za+qIf-rBQz!3d~fFP*W zmwu*c54tx-F8qJJDl(#%F?u?(>vfn(8OrGkat&T>v5U{DP`!xS z(jm?CXpyOJA{|XJwoy!%KlNUU=Zvgzrk4(V%FsSa+D>3tt2tWE(faK~z3y4k_c;;j z_T<;zAjQN~^gGuJG9Y>v;yk=DE!@wkLQh{f8r?@b6)trbu%odLy>%@;!Y`!8tnUfO z=b*<@Azk}NN_|01p9k50Hxe`6#oN)c zs;BfK#&(U@(Kq~V4X3sXzl1A3)n0BIU>p&+Ii~*dQ-Alr>Yq%GXea0i843rt=i4Zy zjt56w4)*YBU@OB+u_hkR!2#>Wp7(@^b9DD$eTQ6gCH`?fm4`c_YaQtSG*aUr{j7-G zGOk-a`uPlF^%+KYBUSoNY$jM6!RVBxLU{*$^`y)qXx^H73xUma zSCN#<&VUclZi;%0Dd9ZV)h_pzM1FmUs0;2jRzjc3sO>|)^c1M4Juy8Dy5^803(cRz zf?PqZ+EaJ&&mF1}vq!13AAR%e>&AeoY`EN=*E6Z3Cok{3b1$+h%||HPlM)NDFG<_GpAqx);Gam{{rMkX2}-GNl|GL36z>GjW`^gkQpI_Xx=rmrmhUJ{ zv~`llYLSDL%TYSUnw1U8-cHBzm?}p6E;ueNmJy$ez{+Im{69Rs5RAA_-CgWzPpz%E z!_1MDU(ffdK^RO)lLllmvkXViGH4(-Qrl$pu1azO`Vv)4l}a`Kp@ zi>u`GfzA?Ij!JPQVL`yN-k3~p+EXfeabt?!2ECb{K3L2%Wvtf2dRl8tEkl^eMz5?g zAp!N>``(cErAPzkz70cr?)#cVNo_#a(JO)d6~H?fjvh>o)5AFjvCtSg$|>sJxU(U) z?~Y#MP~;+8?X&#fgR$@p(yrQl>ypuS_OJpIvzx`!GjLj@(EyDJt^auHZ@yvLWCHOV`!k{*WYIe}uO zc72qyDe^cLw58NY^LRVA7Ea}?Cl;@zy#_pYFMWRKs~G7CbVbxTkKe8n)d$vGu&Bn< z_As|{ssS@H*Vv7G6mL*H13GkPbPhsRH3kbmA*;Q4#yH{5_4Vs%DLW22@HCsbvbO6e z`s=ukHfA=sQflfEycUSHv&3tPJYT|1uC*7TSBC(BGybyJe`%z-Gc3p5e(>kMhk5kK zwPA5}4WDYOq44(%ey^l{?H~P$o@LR`W6ef)oJo#hjE36$kKWP>NVqgFXRGsQHy@ci z18C(>W1wxOUk#8`?#wY}n3nB$YAqzU@iFwQ8|iXifUg+Mt=x|%LAukXcNq2!mf|kj zIK9%it==L7uoPw-wke0vqeW*(uay zzt+PEMhRDeZ7Mn&u2po4}+4*tm7tiEY@#cuOOb-0yz6UWf8X9+n5`$>do&D)B9{a%)+H}qVouSID z+UaWZeY~wjzsIG(m<-=y!Iyh1(m>+8)zb`(slOMPHGM(wtUCK%i}gJi9un(PL7kBZ z@9yN)OVE?Hl2FyV5cC~nB500^dOK1_0$SY-fA0mydOwX3d?J#(JN$2)tJS&PQ4zC=)dg7_bKGc z0m8%797kdbj`@gMmDbK5jSqPz`Nxo^-iqE2*Oeor-Zt<_fXfv*@3$AvQRBB{Cs@(3 zd@1;$Fz6QPtZsErlyY)g$f0LI>bmDcdGD;Q6Ie>2XQKrYTX%1P`*I79aY8*ft0Mi7 z^G?P3*9S2KIpiF)3i_j%ywlN2amHgTQE%$_j=sdF-XhR6i zvdoO%=b2;4UE#*8@s{ykq*O-MKgwq_eoMjg;di~w+sLIC+bI3b=&5?vImD#0BgHS}P#Wy1IbA0k zg@iR8Vga>O4<%*;;k&dn4_MtlsD!x;iKM4lyVJhLmu)CVlgI(j5DKNT*ja{AZmROx zKJ1rpULDIQN3^1~-<2Z9iB*UE>#KlV37$5h_5$k4*u61dX69$TN+oC1L%`}dD6k-S zn)H(iS**p~F7C5g28ACFve8&4?$BugN34eW?iBWfxL&pO@Yk1=>=a77Qy}`PjK%!` zuumkH&%})qum|gjkCW$2aJdUu_1B8E7I0w=^8Z9X(⪼`59j+z8!_yTv4YUg@g29 z?hT{HRBc|zm-`Ey)s#?slnd5To}EVb@GTtGfil*6{HJM-q+Z62fccrL6Aa;_tgd!DBrTG_LI zaX&(AduZU2Qav!A9%Zch1d!RD{28IuC!&Tl!oG1vjc%s=$zhkz99mA3Qo{J!4VKnY zk8^F;*=hpWRJ?XtL&iGL5^Y9r*MqT2VFcBHBfcbU71-5QJQ%d?Za7m<@=AKtlu{kR ze2TO>YNw2#?>r2t`nnh8gqdZ&xcc=vN%d@dp4Z6J}@Qv z%}~$%s|Ea4#=gi{GS=zunJxD|6`&1yCKw4dP z9k>r%z8(ygj3MsY_!08C%C3}-Iz)S_4sRvCTw9j5ATFF+EbILvn4pL z3;*d!Hjc?VV8;9Zl5pxeFyJcNPjJa%YCnT|+{r!{OHK(Sw-iz8PVzXvI1gQ6Toe7k zN2$LCrM&?%2`uKw+;J=gj5nA-pE;*}i>ECaW$s@Q>LFNMky6gL^5Bh`^xPDGy-%5D zU}`4+eQKSwTGQ#>uA^faJy-k>xcj)dGqPvh7( z4xzj7s|T2rl?VClzNgD*DS>t4JtdcdL46IU0FyFnF3=?ygT&xQqkoCST1T4uY7gND z?aveEev2vNp7bOo4w5z!+__roJg6F8`uw2P-@~#=G437lN*C92ronag`3cGyal!ML zMWh=arW^cTL`(Mp!6Zufa@LZWuS7j3_TsbBPq{b+OsVk1*vsRnXD2vm7O-9sC|*Er z7?EiWx~>b)M*&pQTnXI^`)&h$ zRM%etA4mCXS)(5v1lRga8q#NJ>AiN7utZ;{R!`+cnV%qc9&dGELq_8-HB6js*ynLP zx6htg^?vg{aM}_2<(}cu^sop_x(?(XLRSD^g=>wVvX+0BKqF&T)C+s~KO&_QPk-W> zGbUqoj-!rGgIC}mjB?lPT&;dEc$f;Hq&IvHf@-Jm-Iu5Gu-NHDdq&;9pSp@kix$FE z>T}QbW5ATn++MgNX4S&}+Cp0$fi}()o%tK*pl{&xES^i99^}pFD|djFLmH}q+n^tc zcs`Q1YfyVAwH_>t1G-BXQ7eE`jjP9^4SlVPW^-?mcVro@!11r&#Zs;?>DO|ubd*`= zQu^XeI-a2RtfBI_9?z8uM%k>$3`8l=951pnxGHdWfADi2w5t<1*fBl|t!z&{RqhSe zr%=W@kWpu^2BspQ&?@ar+tb0%5MCc5f8sfl8rTRg(37vx2Nz;td*1sCB&<3`Iopcz zZRpQC;MH}%>0qG(_L}>?>H@!a;W%EM8+{eHvwyH))f?_St^?&aqs6txhGLyLGyN2a zVqCnqrnV`3XVKn%EGt)(rvdwspsBT=>oBrULtlAz*O(@HnZ0%UNpy#MxT^6>3f+e^ zuT0)28HvJnTJXr-5_%%;i-Fu*P0oiRj;iR7aQ^50BgWJGJN-}7hq__)K(5Onm)4V~ zHQaY5x1>U9T#b|Cb_6bq9*hflK9lFpP@=cf694*A!s>%v|MQd4v>UzF6&aseACA)b zLBE!kPC`|!k(km9Y$&;Ava9B8){ULzxzjn~OlrFeJ0Fae!zO4hOTtr>+1Nr# z=ho0ld-7vGQ@o8=YWh+B%?1bFzR?33Fw>)EZ;bJby=PMYNy$d^>qKZjm=byd)yB>e zKO=7`Ui{_xnXK?GO^>Hla`2NZI+nW~Says)04L837=N6a+?!CIG1H1%mmBvre3Aay9ub~C2VMdH3psy8I+i}t>x-vVPH`1vig z&8OuZU{3xWOP|yeQDR2;qc6_g%2VmXo#4gv$&{D(I`dy|WfD%1J01Y?bfB+VAw_#X zpz{tx4Wb#N(T5=cjbylnvJdm51o}sbDegyrgS~Ld*Fl~&=4l1!JupzT6%Z^6oVgY$ zGZ84|mtCQ>{(B=O_rQj8HX!eNvRF^eEaa2(^(-K`Kj?!>z&Q#y`iFY8|J>J_wWV{WLh$eEouI1pgMyxRj_f4f@|t^j!I!g#dWP#@vT@QlWL=#wHwS8eE}3{Fz}pGheX zP7m^S7s$16g?6U)Rf1g6<}3q`>G9O7;C{MQjCmt0t2^!wBdCy(y@qk3_cFzmp0!+90UYT3I!DD_-3 zUCYQ(wx}B{Q=RvP;4scAO|dR{#{9~jBrTdRmsI7q-fJa_ez!QYvxF1v|p||%wEHv*E zFN6}GBHc&Rd*jncG0)IX1pje}?T#r`TcA{S6NISp18ya;EEu_Mym`_o=`Qppm zncw}6e>T)L6_>(tS96Pu%Xr1hw*EKZ*8#FwCtRJEE>o&02k6jXYkTa-*oyI45r8} z(NC)#t;NxWIy_yc<$4mD2Xq;SLM=pg-@7&fX79Mu(LT^hArlG|%IEc4WhCJS=z0+9t>@{3$bW6vdGvlLeI109 zoq=T74F+3*7yV3HB~r@ zW*ItDn_wpGYDwD%^@ZPhM%8Ee%&(kzbYhHer|u*<$Meews)zVpOs%{5-IDLhSY3L%^{;(_Mp*(D>Orqo zL4$aLdMl|1$f=gpW_c7G^#z`0^k*CV=nXbwVdlm?kIOGN>eT3Oxp$ETP^b#Jg!%`VZee`vK(+9y;d$S;!}y8$Bu+l zfO05a)eZbw#f)qOd@r17a+L@F;-diSyHXd|Oy%Ots}wg;q7_iHV^S&Ehd!o$&gqJv z^){rP#>Y~%H{H)-TiNJQu@RqD;@_AsUerMmkMW5$#MUE>QClkK^b%E|-6Z2kt8yme zUcIUHHkHvR9*lqgEm9`#7B1%hI`~XDl!cceW%7{R>XIaK!PS@U@Ib_XHLIyoaIf(- z+f!;h+|~;kG=ZPn;GOU`@%Q2N^?Vlv`0ZK4;PZ^UlA@H*PNBB4)LkW5M4lg=M|vKw znLlGnFQ!~5#f8Ch9R4Ov|i?DRk`nBPvA|X7<=xjDVnGVob3dr5@gCJ zP*9(Q_Gy~BEY~y8(5Tn$lF+JZ&lq=JW~@4Ijy@0SD8<~pHW6+cj>T6tj1~2u`$Wg_ zbR2!F0$hdUx`SU6DWAvOswlv$WbB0wR3mf;;#9!se58UN(^ru{-b0+GwTqEryWw=l zULIvn0plrn$CzZUb#DbGORGcWaP#!%i5@mBAiaH~fhk3Qi-As`mKe3{Uy=81;J{a@ zZyPePGum$uFqoq`@?%o3>k2Z&K;HFSljLaTq&1$0r&)k7r%(uG$ zTO*16WiMlzG}Ip?78>$f+_+n@fE<4hIp@)XvEZv7ZAIU!BQ}n9Yf`@Xoq7H1f1k?z z&Ulp%@^9Ri6?YED)j@S)^h@YX^ahhGMyz{9?7>8Et7MC>`;eVIphb$*qJYaoJgEY1 zl&qe~Q}?*bBA%>qWk7CsXQ(HFJ!7;p$YW)!{<=60)08iUKb=+DTU(2kY1VO)I6*%= zr_v5Cm)^>ZMgbqneX*F)I+pik+v=KMzM{qNx^I#?Trnyl$1U{3d`i4j_$5s4`?(pc z)!?NMKF*Du4LWO&?MfYg%lCdO+tusJZg*YkRkfr~as#9f(SufSXNq2WX3O5n3+e^6 ze;zo<$a8gG0*RD45tbrH4kOZa{v4o zgjwfiQpUl*Mv5AWMsLS|E#DNjT~F$CRnJ*i7Sz?E9SF8o@zi;d(kKBg^NWw2hjEPh68_9=GtDpgQd?h(zaC|zfqtu(!}|PA_}P_VqgU6$ zwzY&3Ln+{zhjkqaxb;qk6#ujE-M-9|kx*3r7OORZW(cyPFEFk}@;fHtsB!;@e4w;7 zYD0A-`81@_1EdvVZ@dNW-U_~w$RD0sj$=Isi6AY9(N-wV8YeksJDRHXzrX|L4$_C*0g&Xc@h? zS;zo&wWq35A=M|PFUOM4GcR`5ne)I(RGJ4+-s<^{kEVz5pTc+2eIm%$yU&mATB|4{5 zvwZ~iybW3TS``{ae7W!3apvsXCwleWZz+xvv^W=<=z&xRxKH0XYl3S za;81ye5ql|^X)3}@jX^1#r+5w`J~K<5;&HWDp6{x74$--sL6gf16WF4>1>acj778{ zO?Od?<2Vuc$(r1oq-@d>_rG45NICz-RvcOO+7&KkwEOR)wbF^a%FNZY;?15nBW3Di zcgfG~;J;z~N>jSbFSUmA%pQo*z%>?&vMaepVap{b zBPHA~Tm~sxoAR!gL@ZsrQZin~^GQJ-B&i`y|CDGA;2zHwMh}R$$GbK;8ZHpy{lQNf7@ehE1q5RK5HeFg zrR(XQfm%;5r&DhkD5br2HYMGotanWxlhVpNt-Xzg&OZCoKu@kA})dm5>pD_)C) zG2-H;@Jw&+|E29+!0al^gYB8gB$*`SKoSEDG3>+uL!{Bt7)j-@Cm|X@AV~a+6_OD_ zg9eE}5eSEsA%Qel894|BgiQn){HhM;Llq_zB+v@M^3_1G4jLr1ND?@D}A zXZ|)}TJ@fdvjX_cg%7&QR*si?-=6}fAM>_?l#bvJfPhb~{BcdHZE)vI6L0F2YLo6c zyE!n`b1C&vt2_5o$0F9e`EBLNcnw@@?RTf{Ag?(479%YbI>K}L|4{!I;j5?MIr}T> zcnRJlH(v;MbA|a{V0tImxj9DQHtKzYGW-_o?t%EwX@B%ZZy z{{pDaS~IjUhmh~CYgd0t>$OHbYk+|=LOsXR_fH8=w(Boi&_Tr;*V9wfBFN-k4y~IiRUim)oUfCg3iKP}NM2A5W(*u5f;lRD|uHCV&2f4J*18Xe#=omsB2}e^8V?TFyxti7)2<7mfQmcCsT>0WW=L*t! z{_3MZxkBH+12t6OvjutdGpetp20A^;QPSJjPeU#W?Mc zM$;1|wZL^Zv36q~@=x!w-nQ}ZufeouSfr^1uV|viQtnGHo-yOO|Eu8|sR4TqgS*&U z;Lc-ci|22R51N@k*bI2>X4>Jb%ollb#%d95u^cVmDr2vQ`w8lvgMGCR7=1qU+6tx@ z;MCzjXEB_~84dTQ{tAqcTY0X^RBD>Y`_E&1?t_~=3r6bsR3iT+FgyZ2=FIONq0>0W zx+HANp-{hHi++rh`%{WLqGsSR(5G@SIW8x^Fn8YN4EkB2-J_^iNb6H{1ZTy+Cu3H{PUEYp}_ZCa9QudNI3CN0|O3(riMcASJPW(1ggOFD?oTf z@IQUH+SyX$W?HMZqcy0<<~8p7P=@QFU!-h#>Q=C{pR|4OSp6@lO{n9@HC-vt&6E0^ z>p6v5InQwh5|UEethPPQG`_p0&*xlwV?V0#{xbhbds?=Y z2rHk~MO=ou&QE;*I5jH|-5spYQ6KqJa1i}i_VslBYeYp&Gz^uzlrV@LCr+)#er z49;bYGbv|4DY;9`b=Os5o~`rL3ePouYvNass@UF^%-{IW(&d!$k#7f9xI0T3^$HN! zK$#Rol{5vv=yeq!Q}2& z;A?ax=BGK!<}0-C?}5a1c(>1>_G{oC`WKYHek=XsU{^CC`!S#3c?0M2YlBxP=SIO* zb^*@L{(dE_13eC1jKBlIBi!+JJ2*dtQN9h$MZSIlka>mH4~-OJcpn&24NN%&KJD2A z>d#f+B?R-_k+T>j<=qu%5I>=eLDYCLrS3)P6S<$kdmr$SKj?8#a$Q2-6RP_5^*TuL z?%lqdHf-YVs^c1R!PV%>Lw6td#|WJR^nZtxccqFaH+%tW(3qEzzS~I`$I5o9W zd&#@H<`K~YsXY0PXJTnN6ogC-DQ z$=jjUU66fpqVxF=%n8Kp&nG zSdtVXyEvZlTVDXhlo($o)4fz-)Q8ryU*) zZ+jwotA}q6<>bn_N=Rrip$4|}M7a0#73@#m#FOE~R3epa%O}69o}Uwqj)M(Ide z`6Z~O1uVY~KjwR5WM*M&I=+70vkw?JAMH*N?Q6XQT7sT2?7Ey^5b76GmQcodxh31B~yl$lVOrJDfg$37^7NTJs%jMg5Q8B=#I+%z2bLj}%f|o+&;W9n+H) z`?0QGBj>Nsuhgdg4Zf@n<#%SapgnD-gor(MeZmtlcF(~z)V;049^_p1YSPGUf5wQq zhIA2pu0VQxA4+_Se#~Ij?q$;4PMdduBYuO}9|!l>OXfGH)vxaZXPm3iTj=`tB#duPf?m)k28243Wrwv(JSVB zu{)QV>BmU0Zv(trjnCPrlo-n?&Aq=VVQTyBmpay%xK{_f9Yw6E<*#x**E*?Z)@bXx;3oS} zqx2zdN}UDq_Aq(1z73zt>(uQ}Yj|1%25LF?1f|NCs*H#;6(zqf^EEiCz0Wpvka`~4 z$UmX8$mz%985|C+I0JPb7}Q6s`)R(xS*Fr$Ka{U6;O>Eb^2+O*>g_J;+3{YXKNZrv z{YvSj{3`f$1*CfdGnI*sb2ZvHkG8nivY!$xXDHO~*USrQJ{n3b;6Z*}s+)Aq_*eK> zzGyX-HixNmO@KO7H#x5Zd;2KOlLED~w17Oz`f+GdADydfI-%~B(C@#Hw>vaER|>ll zq6M5e0Z6#7(l3!!!B6eBHtJR@k`oCf>s?Gu$70Jnu2Wfkcr=i81;Kp&9g#eVH??DP zmRGU;N%+=ybN=ZWYEsWEfXzsF+kD>E!YlXSCuUXQOfORR6xwqMd0ZRnzRoVB=4(8u zzkBYJ-^?99r&;DTeVUw0Us`W`s%URbEwn8R7u;hIl%bvIG! zsazAu(*jp^Jp^V^DlKLOs{-rotl&=_D0^A^%Oj{-;!BAF$~X!bbWlb$ewHyB9F*VWe1q#R^sKl(U8Barggb~NG(DA({#C7ET62=C*f0iosOjcZKPd9+;}8^ z8!+#H#;T-o-fIlB_9*&(mCvCFdFex=@QsN+_luH=|I z_J164?cl7owjLkb{Q&K71(Rd0)U=G@r0R-Mb!MX)=@v$eu{qTMf0R@Y9-TXJMerVN|Ecymoq2c>!jd|9Q*LC!ZnG;?C z{#;7?J!kAIl z$7rnV!+`HS=%8DeE836I^xMz>9OI#8t>t?M|Mq1i^Y`~t;xKr}b->>-`Uw2437IgL zT0Teb7NE(U1f}MF871j)$VKa^1np7OQ~-z+MNEPeHl*3$((Y4T!13_C|X>$8ZBr+TL2tYJ%oWT$Ap~ z=kx-&H?OlEt1g%noBE2}PhKm;mE$AP!JUgpDv(cV{kqP?)w*g$`ZtfFyf)HEE#6H% zos>1MBHpttwE@o+o-HkN`Ic9P`7GVl_(@9{XUMwxvwIJ#@w}s6nkoH%_kg<7LLN_B z9q(L4lwK;S{GG2naeNA+pKWwrPw&9>@OFLS2S7U?f`M^to z_eBkY9_2#vBc;XjaNE_4R}ospo7A9Ys#coos$6qpYI&@dV-qrcFdWJg(>C`$NI&%$%-&K~2oWhl*e0v%6kwsiTn80>l8e?{&e z$H;3%JG!o2=>XzY?*4L(k#{YRlu>HBQ_*v_5_ScAs|yRbpruNS`y=J4@oye=IOkbB zF~vHU@idb@xelp6;wlmE>HLmB?&r9PxL1%C(w$nNUraa!nzS$D2zwvc;rV->>g{*C z`)K!J^zet+xV_x}9d6r&mGcOk(v?j^@P}3jTTc2d^!Q6~9ryY?43s=gwV<|Z(R}X! zaxLK8f5PFMn|~91a!F`jqgVz1Rm%7=Jm!v&V1Bb#je82?{9VT0o?pyU%EraCULLSH z+U%~evBAGK(b8uKRZ4w_e?8Ub6RX!`A!$4l#qSY}Cf$pnfgTM6)aCqkfqVbgGt0Dr z(m&6r$~A?FvsjP8KjmYleu$Ae8{BY({IOt-wzsl>JpWpHzoqxfsN*t9@_PnSfEcM< zUS@i<(GIk}rl@xgBi{#>deY3Ez|3&hhvt{F{Gvgo%^e)hxQX5J zb5AdCqqnB7)KhtKhI~Esv!tQSFD|COm+uQ{S9-LX8I6*!s8bh9rYd*O{?Oj|0=;YHyrTyQTf$GvqY2NeCAGo@ z^z*)j_jkvV=hfq#lkvA>Z22ZM8dq8nO0EoZ73<%JoOH!@jj`1V8HCNP=9}l6no@6u zZ5E1S0=k~rlRCKa%~4d#a}QfP|7DG0lQS*iuC%L9cM@1!g)6_CIQQE)63d{lnbfx# z%XBN+_dS#$f7O>F?Nz}dS0-=eO&PD%G!^>jjXcLu`kE+R&!=^dA=N!-Z%SmXqVxwD z;}@sIvl2TRYZ_duFDB1|+gGd}2N4*%>?cBkKzhx{iw{^<77-D+}Biu1@(2#>3e!WrzIdVg6^+uk}2A zn*Tg^=vZoc7>Ii2mEJwaMvQ%k`qz-ZlacWJ{|?&zJoJBA)bk0rlo+kGy$<+zF81NH zsesR132fwIp1JMp?p8Rqr{;Ka*72mcpO(uJwJZOF_Q?k((Tje@Hv8tDr4oZYqtI_j zrsm-a%x&;d*D_YX!|SL?-E%0q@(_BV@FzcG~WsY=3GOg}U%@&^Anj~U2)IS=ABwER<{o3iBt@5=6G z%G1-RHLXTz4gb!+{LZ!LXE2sI=P^8FnWxp{n(@@swCCgj@=tlHwzYSErYP%6Tvyg4 zPbi-n;blrWva#j$_V+UUzrX9}t496W&dV5^>h=;FIZNI2CSc@VQ|Y7$c>0BjH|WzG z-fH~LukQ%10t@5+2I|SZCVMhQdSx~cZcNG#^Su2kpjP$7c7EziuI0!ya^JkB5T-OJ zB|_?5Wvug^_DYDPrl@`E+^zdKw06}$-T$n7cMbGts=MmBYr+JKAZAf%AT1RqZV6YOv^$#O}s3*1zqa{fjAEH%1p%%}#b3W;D_@?_J zTsL(Se5Rnx(NOJm{IrAh_^IE|rtA-)A1UklDa*Nq{yI-~6xM=+%4Ky|F>Du}^qUs6 zxQ6Dtikt^huQTcPsDW=%eG|CbgC^_>gtqWxZpPj z(hFBCD(ke_egn1nb?onfP2-^boC|b5?nOMT&O%6uass%ZnM3Hgr}w+d@d5tzvRsRw)wP-DQqCE`##urA?KcCr_wm#Q zCb}kTFZy>IEs-zE!QBz5Z_bmJroeUe5MF?`dO=|7A~=zj`aO(~-wZj8zAj^=r;&3p zEjy36TWE2O(DaejC`YTR#)%Ru(0>=uH+?#KgWtsOxiowW&a|Ek=dF->J+bcG?C0*; zta7d0q7~Zrt}k$Q*b#HBt~;?Rz;1W?=iK=wLhp*4XA+)vh1lUb1oZ`XEL8$OrO?!s zrbK!@(BB}1waT$~=a=#~wMPB#Td7$)-%-fR7C#G|r53LWzu~lUGuV|JE9atUP!e_aQccCD6P#e;VumJr0G|zhbJVnhf>G+)-tpMfKMPTpC zp`B?>Pl%Q~U#egGIBJ*rw-965`l54&x)$$-l~>7;Y>9S#d8L#X1BN!_iZ+qb>Wt`# zBP~UhIZ6&X66_QLesx~2wQa~%qfaohYJcutY69zXCZ^z7-sy?Q4L)22c8}6`Ph|DG zFB9Muu5_xN?o0To-%g?q z=XIS4_%75Z*VKb9KP_S8JtbG}eRGx7If6$D52YOkXK@CpgBsrAeHyhpN3{`%s2i(&Uc}pE@>QuvuIi2(dvggb*bDyq z4A4*u)cVvn==X5-pWMc1xW3bs1L}mzFlqdJWaUSQ8%GK64XNrJgj#qD_TNxsfqc`` zV-^Mn6Kanm5&jeIr{`Rq_+?t~B9iV+YVJqROez=!Y_>8=eWZAm5;tQlAHxI;D1~pD64lMwQ?mk^* zs3Wh($~931rRTcOa%R$a`RpV0``Pt-5xh`(Dfe%gPXAfrigmjBMxUUXse6c&hfRc5 z_*WX5LmX-FNZ)q6hVw2js>c70aVZO=QNzV%*R>b%_V$$j{kxivDb|o%XR!B)|R;9KspmcDP*X*J+Ke;>|f{x#vPgVhVUOhA=hVWz@PM zp0(**EhUH0cdT~g*oj*SA#DNMx8pcfBDW(|p?uFo{uH%TscmlP2iBuC;Qh^LuQQP= zs436Hy_UP@9z9CyUFErryt9D8g|y%UP>kA(bIX0SX&cn57f||FBU5|yt5MEby85M; z*7?0l%XxveTBoDhL77#a8hh zZht7P(Oclz9G=Uo#-mMFVx>ExU!}F~EnLsc>|aq!FWlFXbH~tR=+!e@lrbAA`7E$v zTeKx>cSoZ$IO^1LEG=fe44t9NsPWDTd~v<9-}`E!{-)ptqk~T^hqvrTjpqSzPZ|9d zz1$hNde+qyjP!i);YNJqE8$EZB&-R&?ppY^r&2%3sPqx9q?b>MYsw$zOXW|^@XLPM z(m}gMf=llQ+vRh9^U4zdorCKmu=|ELH4TGD)<_IY-YIAR?`;268{ zrpMv_xGP1(EH%S|5b@QorB9)CxsuY9!6dNuXQjVyl*DIK}8G%;e}C9T@}lzpBcUXa@}CX28U)auoZ+|hh3 z=^V);D6b9M;xhcgbHJrv(8pPUgO+kNqkb0l-M_LHO&#Dhd*8Q3a$hx-|&v@(1kWaWX{!TQp61MixcM|_fYnbsajrx!)|?|JLw=RB&CRF6u9 zU+zuVN^T*vGrUs$L#gF&6JcxkHGzQz>DA?HJl6;@cMUv|M`p`i$C__`cX51p=ggb< zskPCLCgXeBJ4!>fM!!C{fl^k``el^Rk47yG^#;~CYUyXW2@dAY)@8KpT2_$!2(ILr zv#$PHL>tr=awUZDABsembGxfa>ZtKczk=AF)FE#ZhulG!YZh9O6Yelj&w40&AY@&~ zrVJPjKQm4!RcWDmNWloc8g}UAr1A8S3U|-+lY5G1FNHQZlJzsLiV>35mNrH|?P{V# z|GT(`GTt3<^iK2?^j;k>lZhN9NH88 z?E>-?l$M&BeOtrxCb(w}?sYXK+!CjAdTzTsOK*sK30BaL-H}-vfv#(Bw(&oN@@^n( zIJozpyo<@-LaTh3ndcSs#I-=K!|CPCo%pV_&z7hu>jNoh_k)ZjggdL=XE2RYoR{aI}TM5XzJ*$vJBgZ;W1aM^O$fcR84Q zc`f8gKH)xv|DrX?(V7{_y+RU{a4XY$AJ0#IQr%w4z}WwrXT1gTZ8ee_yelp7F5ZhM zTaGobytI-GN%@jzso0WME>|midd_O7d@4O2j-2bEDH>0tV+{y#Ug z=p)heet|5lf(0v)_nVM)p0DWn1tQjQ&O%PLLcv9 ziW12wK}e0?nWWRIa1C4&vQwVnE){hewL(WF zxrsZCItZ6TI>&1X^4DD1k?r7J8WFNW(EDq8A-XatCAlD>&58vpfvZ*x%WbiwF7IofQFyPE|b5z+I?8Swb3?t zvKmp16xq8%-wlXNq!jm%{{t{N0eV#z zs=#f;2}eV&G>-6dfYdlf-l52q{FHhM0eAvkPCHjEr z0YK~m^z-k9-{weg{MW$=(}SYc{qd9w`sx=jnj=#PeuqMNV-UeSU=$?hbzd?(9ie8|afb;~J`m z8E;qjh+SQHS6-z>6)rVKb+KPjyXU|Ef_sfrT4bY$Z>nE%K_yakdx$aFi-tBNfAXtH znM#e#rxyP211`zWOd$!E|V1XPHEmTEauVQF$Q=R z;GZ76Tos&rw;J{PRqyI{ej%@Dl;M{xUap5I+2!dA2=Tk@uG?;*jhO$XUdn)R&|-4nBclAC&=RXaWD^ik4}2L) z(px4qI2Z2Pxtn7QZHed4ID_GS^Yu}i-j*s-$8%kk1*xG{>D6DLac3($Q{h9Drzh|Y z?&@Lc2+oh_lN2Ky+iGxK@l>i!UyWSIFD9BoJEsXr@Boyz3SBGVQUpAv0vT6gxkgZ_ z*aMHfk=CAt9^`6pziE37Gen0{f@}W&p1WLTA)4!p@aD9p#nUmAC$w)5ZSGUF^9^wC z2y`b`Q+Lu%t=jA0o=4J;`}pYr85vw!50>9(>jw)xo558mdIs`LX?>uc=y5k?o(_L& zV?6X@9ROZ_3@UgISYHop{X*x;kPu_2_ouA5`8M)nZ~E^kzs>~zntDbs0-oS{2Y!p6 zQ;s`qT3N&HH^VQX6-TqOvY@69!tMH?hJw;PKYKkr5mrj}r=iMu;J>G}xxVO4xZ1zb ze=U~1V%}G8%40yH3mo=z6aCkFQ(plLucWMg;=jwt&ZdTiSO9COV*=L4 zlaEt!&P(1IF!4JkmZ@GM@ArE$P2ifGqrwal!ZP;Pk<<|aS zW5!4m>I*M1(t2U1)a9CAD|QFP{gk&DsXrLb@DvdAB-O1@!xnt|@1>7b?1gz?fM10< z0(xIeZQ7EVcM4;uh1A3h!V$<;wPI;OkK^3X*0rQ6?3eJo)?b4G?phv3+#|FoR}N=S zGu{894Wnsk?yeSE@1h0Dt_A!W*I3#x6rPc@{LbmhiEkpe8i;aw5V*WKawvn^;K|y| zuFSkK#_3Q(QuF))e898voQu+al?NOhC03~U8^BMC%P&ZjV=i{v7X60mNf(3F?poO& zthD}=G@j044z+l(Lj8RMPj^8rM}p5Cgt!V*TSzE(P(!J0>xs+W%9~tK>1j=lvV7vZ zK=^W=lt9y|`F+@!($1f;+PRZZcj2w0X4mpdjeCNPqmk&&G$kbcf_fhqaYjJzMoRK4!q<{VZDx0RH~~yPADVH7ufs1)+|++JM}98~j?Uwx3YptzGk5 zc2+(_Xb;av^E3twmA)?`*4Fm%pQ~*@gqC?bt=3cf60rXY{hLgm%{3SZd>WqM*?xW( zR9X9Xfi-H^Bk1A5K+rF`o&#?zz?&aO?-v90y}*dY%(48O(fJW87SDq(xbyu!;C(%9 zUIZ)-q$m2-ZTZXm7V!KyQ2bXU!#TjU6PP`Y7xP%2PbdBx)cqRNBqzU=5gY;(|AsM| zM;j(l`u((YF6G}zpDtsU`3UgcGlqXjDemld*Q?%K{TBL6FQBBSPCV|5JGb-TAxJi4 z3?lQxfcwKdD_)!8?z2z7r*TbGV!9C=#x;tcPJ>YZcofCDk!;*yyFy44`JceGD`@u~ z0P$|Z{(=t~96-ghx$lRds=|big)#Vv$G@Z-A0%`E6KF^AwiqP%EC}*hz;rkcg0n!Q z6BvYJF%CY#0_`qD?5`08M}u6aqM8X~ z+q^4vHLka}wR|tzoOz~3pUWN!4=+hC@0rW7YoUcxqP-ooZYu4|cK;q_r3vgZM?z9K z=A!MlP?&y%z&RFZ*FbuO&f$lB8D_R0Pq)Bcbic~h?}w09!H~MC=XGL!39h||o_2!o zP88@^x&)@-!TGjnD81gu+g2vrCqOWYmXCp*`;z7^@akmhn*kG81B1$qP3h?JfPtA% z!YZ7;!@%*qU;*tgl0K+H2U`~w1?N-Mxc zQ(O}&mBRuY+dxQ-(pQrA3`)xBMDxxd)xb+5!}t7m3&4^c;w`fPbE=HY2%NMxviaB# z4_->^Pe3G=H4Et)Ewro(Ja2lvl!UgU;$oHx7^2CiF<+zPY21z3@e)5?54<;AOiY(TWDdI{Ba0lxy5@WqIgZu;sH zxuGZ-8gXwSg!&mJp|_NNxbW>M@KM+HYz&lZDO>l{BCuRVr^gl|845gfX0MIK#2$;hAMW*Mz|JB# zh)d4QyDM!PjJY!njQCeL)K-M;3Iz9+)aPOGgWyl2p~`09xs*#6)p}%NFXfzok^3O< zE#QDoQ654`5AwT>cKY$>vzfjW{$HR6%PH+1xb){}!G+YfjJj`zV^6~OTCv9Iv8b3m#)9-sX_VJK}rt-);w`Jj;VK@BpKpT9iKECj{=Gdz-Oly4Cx{FYJN4|lhjeulOm?$y_zwb)`B6mQ>-thif>{5wcb8O?({6eBY0MpH2ny)^30EC z_&=3gj<<&fA4=U>ucOe5!flDUe9pW4%_)ka9-d)pDotT%%Cqc$4_e&gjM`&_9~ZPN zTvm`*2_l4>>Z4xe>0YqBtIjXK!n;@~x76%uLt;-S{qT6R?5wlInS4v7*aag}d0s9y z)g(E%&KZ+;s*MU=`PxGMrCMouLP!C5P7xtlh7!0OZ9{bK-yM>^jko3e{N&?Zff;x5 z_7tP11E?dgUz|CLXAfbTjtuwc&t>%ErO@r{K=v{qtIJ_lj9wSioyT{( z5Gt`tr@xCv5mU%?7riZ?*B_5JjjU?$!98hdl`v~=4te3- z4L9m>nr#o##v$O&MU02jk-bsEAfPdc_ojNDv7|}td!Dif)4vLo-3u(8=t^9cio|f2 zBc_|dY28UdpE(I?OXaKfJ9P@*XTe1>MvgUt+`f(Cryf${Cr^=<4bM--c&-IiNF4(&@vGF3goloeD{1*)N_`+8TBGky63L@`=+%XAZtcb{;MPuA1)-Lh zJT!Tn?=vGbr?|dN$?fDh0jzS7{UPA;AeK9BB+XJRmMZsw8m#%Jg5}8(TI(el>Vi}0 zTby=8rqafJXkUdI#MIGX@Pdd_S4#MqKeZQIE%$0B#^naBaJa8+ml>vW{8GcRd@pgM zoPbepGgb1bGDU7|^)1IpiQ@;YlrlROpR{u^pLOXY4I$0MKk>5ex8Rxs})kUAXP4>cUM!} zbF|mPPLhK=*=60Mk%Q~8`1;^cMYKfh z?*OpdBW{-Qro0{lgdELdX!io{BjHD_aGLRleH2K&0gqH?5##0SHTd#wJdX#`i?CEr zMlYSqDCww{Z?uF)x0SX$K&p1iJ0bAovmEBT6Iz0wi`YP@*I?kgk^h%@)?(QmS^p0) zj&hoAe%ft%1HQoB9FF|^Xy1X*k9xg2*%;c?#h7S4*wdTg`8v!~!|=V2@Jo4@U&%kz zG|G`q?kFZ!2`w*WWyxsz6V%22?Du7WsGEUW!WrZy8XodgYE+g;G`l z-@^%CL)+EIu0=YUN^9SVRa+({AI1B^2t5k8e;g}Ar|RobzOqJZqtuGb?;>xln=gY2 zuk$`DMtnI>QrS`DS2Nm$7R+I6I)Leq=)+3J^%Fq&Zu+lvU~09Oo#2_}xEZEKnM+^o z?<@TBQr@^|U}L-&+r!n{@=cpXz9c`{3g(zEt%PYjZA9K@NpkC(gGM&fx`U~)FJekK zm#KuAi)?&vrL8-Tr2H`_MsKCOvDm^`v~~+A++~4z+#}$Z zy>R3#M{UZbJs$%<`gwLlU5x0~J}0BR66@6{^%%TPEq)&8hwvNW{xtGk-q#82Hqwq< z3M)tJ=V#59*+uz|w(YX_mAEI3+G0l`$X zT;rdeRdR*5NYhFK^sYZ7K_p#9OEu>W5>_ko% z?==(N&C?*L*-FxtYIs^Zya9c>QpDl3D)r^-q#TTfdld@)E*}Vzxn?vn?4h@f^&x5eX_d~mUFI-_by2upx zkG5wG>h=ih0@^zb>^g#St|j%U+&>8yDu95Kntm!}DZFnP;ckvlj=S{IUT?w%?gPeG zLTgiirMB2U=&^^8+Ye7&LoGLf)5n2X_tIKDW?RU&kh_Zm&&OXk3HwkSy_)bjgx$qB z%H`jG*k@vNr!iu8Vr4pR>V1RIu@4I>X+}%8O&%?w)+NO>u9mQj6F-IO^NdCfDK(xn zO56NUQ1S-75lXfB!HOyH_il^*j_pRDmLk5I}8nS<%QwY(m*>!G#U zEG^(bCGNRksVu?Pq&?gSX*@MBA)g$bf!Cbvv@Vs26W07?#9B&Hg zD<78vmC&alh5EQl9UEzlBeV^CQZE?;2Hg#0{mi!dgB++83>`#YJ8l()^mwFae(sJB=>W(%PU z$l(E%(nrZ7&qeO4BQ&F%+x7>kxiOYs2je#rvXq{_E2NblKKo7Ze_P?4+|asob8hOl zyt~-Vu^0@+NkKgk=7`9d(;Jj6cW$p5Va`K;4r#DE|7~H@dhp3~;%){9X4CR&aLK{6 z^v|KiYVZMll$JDvR;$x=+Pp0#(l(ohtSbL=6!IgnQ*9)L2(D8&H9Mq$%JC1tmCd zF&vBbzrcdyDBby+aqy1i!QUC98 z+!8oBEy`=;m5&tluy*eGoKI>>15>{JdC%Zj*6wG_lQ#%4b5(%!Y9Q!BuC=@iFJ+7n z(!bIutCcZ5iLv#_g++|HcBYhg3s~UzTnd)@Iq9L`d&0Wz(ZwS+u4 z!%rC}6&SZ*K#KHG3VXfqYD*hwvD~(mTJ`yCB=r(x*uhcK1llhzx5k^H8b5O>b!VN* z<$T&viz~xyZE~$#g6na{u6}5u=JdhaIu`X{mCy1={-*Y5om6|93ztr9=<+JJFN<;J2;#Gc+f0ZxH3B4De7r zKcw_0=!<^#QfhR;=LwWNJS zJ(4F64t0Z?Y1YH-4xt6=8GE8_4aQdNg3^ye*K9?z8VkpGBwEl0jgNy5RYT(%3TLSV zCpen^DKqy$>*=IimxDUXDy{#TnvV`kaRRyBe9;T1bvdT~3y-V+ETQuFq3~t5xDTO4 z@_^+*AJ^gk^U%G8*!o|?xAr3OD}hEA^}P&qQ#ZH~o_RHSv{D`6NwivN|3>)p+Uh)M z5HN0{b=B=OJ%)F^5kgJjpe*r!RS^4A|uG5=HtwoIFcApmf$_xAYNu@sr%TPYJ@eO{FH;e zm1Cy&LfZOcR}163k$P{0r`M=?I-}8pzcDvm^}j7&8}I3pk(Lf2eG{oyfo;xq%wxPt zI2-S5u?H1Z;fY<*w?->U>(3CWrwy!nA*r9&W2-W+i#!}iJgTaAA z_{nK=8-Oywyke>Uj7Hr?7cPF7sx!ik%S&u8;yEFwj5eQh-ae();|8l5HE-bOoYyj9 z{1~1el0CK#8KIM9HbZk4Q|gCFQ=vtF3%%q@g42-xTYy|MTtyqzPXblzxr$haBfq?X z+FYUWIIS&08*T;~es;4j+OH4yn2z49#RA@$0!6)$tIAgM|Uc+Rb zTd27?=I~yG1NwPbKdta{sIY@NoE3e4j7<|5JB`xxQfPr|<&U5R&bO!uf0LfA;Qz!( z(}q1Zi(T!*j;T{Fh2`)ZK-ov6V`X8Q!daLk`V~BC~ZYr46M~#<) zCC+42fr5S@4@cMYxHFvdSzvw~oV%aU`SefjHHqKe^xzIk|2dGUgnwlb`R#)33__n8sbN+0auW?HGfFqkpWBU1~VPx^Nr z*_jn<_DUa*cu?ZImZ5l%@s56ZNpU`xcYQM+-*hznRM%)BuH;9~Dax11>+J1M8B*|` ze8xNlWtJ&@UoqgV5neu-wta^bY8_I4C;bv;d1)i4!&JA2s1JN?`$ z?R%845I$i(*NS95Ybqj7u2ImpZu!!%W6~FI2}yM#YczfurDQsBR*ll}R9Di^EDW{$ z(wdyZ%f+w6~q+W8)H+UTu0G$J8J7$Lar+@ zoozCQBd=`Nw$$@GI6Sb9!QN;cee_}ktsDk^XWh9bE7w?TrllTfxHaswI|E+Q{FOjv zm%2V>ua-pl*0DbtgR;zdvz#3<)Jq*ntw1gGZtf{Ni-;>V(_t)puLRc5!jAh1ar*_d zXG1-68GE^ddV@Z5w<>i}$_m=j%xa780VfarS%j27lBX&?+emubrfmN{diDrlmHMbs z_9}886+CP*)OSGSc$u88mgtK*-2B`M{T>=i>aQWkt<0>=rv+c+*G=8i(7N`42c6D1 zT!(+m+FWCLJ@*Ar_An@MR`7x+2wxgp?NsEz&Ome}_hImpKHBvV9-8?;rWHQ8jHhRr zBm7%>?3M=QGb;-K8B~L6W^HYq<)5Fxth0JyH>qe_S_q5w*KQE^)n;5qgx|?ZD=9l(8{jYY9tfomi3FPMvWjz0Q!*av4mY%aF7N zeao4CdAFR++B*0h49%7J+sIQ&^hSu-T?J!o^F&6b5wka;mLNq|BHX9{=}PU<-pZAf zciD=o;PQre%?qtR`S+rL$0xSyDn+FDI%84c>1*`q4DRa~U31E_pJZe^fQ_?AZIoW} z0cqc#JjRvCeWWk>t-L`>Sr?_`ii~V&aw^NtwNq|j9T}8lIUZE7k-BoUUm=w|q?>*j zH=I!}@OB=7C9{ASEeh#5`>E{l1Lv0Qvvy5+*-jy&w;;oY@>~hNbEfw*IL7radV;ckP?gm~@DnoXs*@SZ^%Zs&dsspRa!PRguC8{Gio z2ZTLZLW{VKvb760hrGUvo_JjISol`rhdQHvkRi14P3~ErBY6!aI*&dL7-awMr6fIr z9%wcVS`=T`FarAIr_;ZEXvJ6HB>m9#3s_etVjEZCG~IZ+*0XACB{??%`OiV&--4fZ zqC@36BjHev)>P=?OiI$uxs>*viT31Sqt19vV=VfB@|WuE^r%^3;u^o(JA# zpq`1q;6#42Xsyut9Xa0*C#zAe>(rd1_z-$!0X}J?%%-giLzni*pekjpCA6P(TD{|W zeu}*5IT1?piTN~r^HVbVS9^ij3i_EabQOcMl20)XHTu1l6&#h21@_D_6pt?nyb;SR zv6MKCF>)fu-NQ*d$i9>3<3cl1qF)ch#t@&MJTQ+Q7cKmSkG_LVaiK=NljWTuJDtBl{-b;^#VCk+HvxGxoh&(OR%6!<0`F<(Ai;dz^Jrrqv97647%B-~jda3k5cqmBn&pJhRt~{NlWu^lOd1)Y@ysyK>0wpT@(DOnfH@;T8M@y4xK}sN344cMwPb8SAI;N zmcp%+iq6lfqkR`HGKjENXu(aL_OT1P_Lz;;jHSL2Dg6e<-_;pa;?L*bO|QyFcM>Tv zzCaD`e(9vQ+N@(p(T6n1Q4oK88pG(jR!1I|SerhlB|uQUS_-(A|LLJq3V-Q7u}P~Y zOA|t_n$H81v|{C##>+|l`RsaBJ;Q1(atUh^cg6YitJNY#Nc;48e)*)Wp7dzWk`JY? zrG-*9MZVI$GR=Q~N%M`Xy8SsVm_|%Ki97m))r&evyMUf(6$lkmB#n&a=@_)*4cwjA zQm#Aau_=6z!pxN80}4&(Szm=R9W^29RnA4}*Aha`RH;+d0yb%T6cJVfDoXt6~g?Gi@oe_v(sMtZYtjc{Jv<)u$*1I) zw~+fol$~07g)sd*sq;GzsEk&cii@T?iMF)D{hs9C`SmJcdA|!t zp49tX{rVEv=!}?{@1;fZWhkV#{+^*~0pE(+UayC{5A%3ZUjrZh6#e)oxR#!bEAXf- zfhXQV&-;14ks5@uJ13kC{uxj2C8m||L!sbB_Y7r;5SQFj&GL0hFeb?Jf@c2RcjDf%eyf5Nl&>bGgj zn{cNqX@?*9`XRijO6%WG4*883Jsci2h7?Kl!mbIfpnl~V>Kbo;Lf|hWTwS)C%MZv3 zMaL;Gk9zcJV*IO_`lgPhbUv9DkH?;_5K{2;24O{{*tzcSG_??!&u>k{*g_?syf3{* zXOJ`3RjAQ?i*}exzOgXuHhWazZ5{Wi}ReM{AiuhhZCZ$BW)Di zovpOwl%dnfD;3$_uDIJS%a&R*o&C(GKm1R=l2n*dUQe?=13BG-$fqwT{Xw>{Ts>l0 z>Lck#*h;(1FP++#taBl0{Mjo%f|PR;`|&)OpNF*PN~V2@FRyIHvBYPH`PHk`cRM3Y z=_FLvQi}Rf$(tMF?MIp&u{=idkjSMb=1NEPSZ&NIt?#78Wxd`FUq=t7M!#}&)R2U3 zjmw(LORa4X`E7}sn1@Y#H8dXAOI4tSb@)-fPAqGLkm&mb2ASKHWFExc3C*nqlMA>` zJ3f&Ol&|JGgq2af{G0NLz;wCG0)Eb;TSD@#I14 z_CorR^C&ZdLrkFcqu?ILqeJ|Td}l*%9%(Zh>Gd)C`3ySA#c;CQ(Jnp$R`ikEpD;d= z+Aj&u`YUKmhsElPlc;kS%FN?H$574{;O(Eo-<&^nH`i)fwi;hn52Lz>k@z;$dp0rq z@>YdEN%gbPVh&>@DugTFRsajvGyDYF+k?ERrPkHL}L z^jBAb{*}Zg9>|}h6-W7U`d^E3Qd5^l45x?w8a`O#TBupeJMS5%kJfT3{9H#fo|@bX zYkTBX#w0HjKJDZ+wRc0+n{wZxT8mVC6hA{!O53|8<}5c7mi#68gYD0EpN>ZE6p_-@ z<&}ENSmfAW)2farIpgl$4*z`LDQLJLr#h-0pHK4aylb^ynx_q<6)K+?6YaTx|K##& zR>{ZYTym2#pXDb%@+sHkBn6jHF??QNOs4o3F!lI=S#mC6R*=R$A64$+p6_0r^}F?{ z$+?c+aUE0lotP&1Ql=Lo9(;6Yq5*A!5A6@NweU2TUQC21 zJVN^UK?|-}Dm^KVfLhWhXj0x=W6Wxl`j2>u_T_GgGx-wneMtqyoXu;DlOSgm`RihpE z0bf@-Tm!eSlHzvw?j~Bfi4>Etz7C}~$JWPp34BZn??5()wS(!|@o06U7-Qwbi;UAp zsPQ(uUOUrDj}Wi%F21-&$ImiIzm{BJ56BxvfPZNVFQL7iJXdMuHd?8qTR|T;()$`M zm_*N-;J}?x;spNnl6{|Xw+8XBn}3g)-V*os)AAanETw<*=%4;(wYL+g!Spk@C&#Hp ztK9K5o$*DY%4|IfwIPoi%=zEx(x>&qBJ?6xEuXsnHtgBHxYF;0{yQ^y=~4@yfCc zOCIB)X+4axFf0OEKKbjRZhP(71D&KVy*cKUPE+G^l~+Qc3_pSQ%OXc>=usaJWHmtO-s_n~bC zshe4iDSur^tagez+#alVQ~oa`{eK`0QZ8oev~7|a)O6%NJ^aKrso8N)UQs|V1#hlu zGF-fu;!^L*vdVv9&;kxhllr}r|D~pMT+mI{r6gZRTFY9))tKqB9qq)HB}&%?Z?>(x zYW1hoILc7(N$pj}uk_zkc^V9smZ{!NuYJ}llxrfj)GDFJn_KP2G1m+J_k^o0bRwD zCs%{If^Ks>zeTQ{kma4cIqJj6Ex%dE`wi5(H`KNZ6w?MprnEAh%kw#L2QA1B;H56?$85)Q5}a0T`rjL6lr zbu2meAgxkf9m?~24giPdgQ>qHUP}Hj68&_-Z-vL4M@vUSDO%R2l4mLLgTNAJ)xJz` z)Hbg}FO%kLRPfLNv2ExmAb5f76v=!%s+*}A;b0nnp8^Jnt7{^Dv zHAb6N{Y9Pa6F{LeXn#6zu7KIw08w#|>4>jAV}JwndB*cw}?)5 z>lWxkw8B&3yWB&6B=>GT)3yb1w9% z3|@`iCYK%rHaZLQIyC1jpC=9}1zcxS1@fyS^-%tW_svjcL5h=j(>9X>x*9NR>F3QW z_mK??p02*0I$-+YE{zl|F+b;y@0-FxTTKaV)FC!x8P@JcV0-KDIUDXeYpLlu+ByN+ zyo5Hm8}B9ZuYsrS2e%te3$^~V@V-{h?X0}^tz&v2RHQ^}g~qiHOt~w1#|-Gj_1~_H z@#xddP^+iC6r}hJq0gbCeHbio4Z?GTwjc%cXFq{Idn5C6PXU`3u@1+BOaBO`(aYl5 zFLJ1_dLU5{=H&?-?1?RlS z1mLPQCdH>O%<~zx# z1K+LDveFA?%+B=0yJcx>4lb4)8d~wXg&Kh-1=S@zkCtrNFANibpRG*bv zle6TG3jrT&oiXsi|I(eMq`g;`S0!Yp z2sc)aU(ouDbw)Dxo+`UMvQ{gh4-BnQe;XxT9#HT@3@?HaUj;%7koql*T??fii!_`- zSweg+6muffp#F9oAz4F9$Y-DD2QTw!IC;mx-<}Q|y^35jbQIL?eKPfQQ@-Ak0t(A{ zpYDj;$iD}~A4>~UPtM)@DJR7cW&K{@=Ida^U^s&38+PGySr~fG0r1alczM*EKf!$9 zrNC0n>M1Dccl3V4b{kR}-5p2_2ky7h2hVo6of18RA+baYcQ$I3(Wd?h4%kQ8uaf6r>T-|G61eF{s98PLmAt=! zUi2IZ9Z#w9{7xmqwb1)6v_Kz%>(&c$RY-FxyhJ#N2~VQ4y+zH^{u9jRX#wcDoB<5o zcjJ8eos{7itS7D?d~z?r--pEQA^oR_a|Y0lzW$gxE&|Hq;qwJK^4c8Fj+lN&C9538 zOMLR~O2_G7k~=HXUX!+zm+JDiCt>QYM`_%-1##Tn$FC5tY_SB(ItGk;E;yZU1Fv!} z-t|pdiB)hw-N$!l94eGpM2t`5(22 z8aTL&_bGVEx?<$4S8Zqp*gKPUG}Tv~m2HxSQz2n3wJhMIdZObD(WHDo?ZygjPMg~LgX`eW87=L zkT&P(vI#j?1v%fDJa+{4(5-l)d+TM*3a&SwH+hKoUxRv9K~pdDbR(s!Z7&E9;)v+2 zFx&{;=y~YpS$}?oyAX42>~zMtLJGB?Ufx_I=N_hB#!9R9`=UMcsbg@PC(^yY{o9qe;yv5`^@x@dD8E2yx)glNxp)NabxIzLs`WxAw<1|KTq@~u%*8JoPp(>%(Myj!bJ!qrx)8I^bGE-yo7#&dF7;ovnk z{*|uT?|HP_)uP>$Q>~}Tlw~~|33X=?b`cUi{VlG|Q(qiW=QMhS^j6vT&A@Nl_L7`? zb+I&(-z^0E9b~6~= z1S~Efj~JYqi|YpqsNqEFA4M)@gx0&-w`1d}aVfc^W?Lu@52mJ-P=^%VPOCl{W88*DIEb@}mocJWC*Dtt9gQ8|hrYFn z`aGRuBJ@|~{bIP6_}Plps5jt1Y8`}bR-sL&@$YJG`N9HXR*-HZ`3inrlv1I`+NIy5 zWh;nxz5H3Udm9q&ZwdPqaJw15Q9qpUOh)A@xT5@A3AlkaTC3d3nKR|(Y)0}CpmPj& z*Q6c^XY>TaOQL@JJcIF95_(d=7TPuk&%d*+hcS2FN4&fFEm!IMGCxmaljAffFgdk6 zK|WFe1HMBEu3haRrK8t_?7eo zDQ|1EsJ&iNrYp<;@9t$Q2J$OsPVTOkROU^8s+i|#_R{#lJwa#eX$Vq$7Q+1oO= zm{{^+c}M0ev36oeocTz-lKZ!jUhBg7zAVFBbJ1qxB^&E-6lKQnYk8g>%iM7cd|xTSsGimCR1jgs)Sde^pDs1 zO38=hJf_p*tX$2Xr9Bb&WXxBg)9Fa#CwTH*h&H@IuS7r9h2(y$ZsOMg*BuF*>}#&n zaz%U-Uft74JscU`hX&CFz8il4wb{N>8Z-qKwtz(~_%@y+JWD<=(wNSxHE>&Nd;_hU zL5UTpLF$OfJ z-$F~Y*1b0J_Ht;p(&i`8ENg_`Ob%iC3>uVttwPQ%@CPC6EZj51y5mlqYh!-D0*@39 zy`jY}!XjKoidl@FoT{7LGr}w4Xnz)7DbKl^HhOMEFL1vZAJ>sQ%?!+Nq->=h*yV_@*3yW%=30` zQI=%;y=+?<_CMdXZ7Te__*u&upe7BPv*bCc@d*3k9ZIg$!=_R~uFT3>bFGWdZvgc% zk!nXybuwJ0J9P9R!8O#n#$eU=AqmwX$H8w zDaJ_N;mUA*ZQ8XTik6sGjyRv6mwWa80lag~OfT<`FuKD?c?-Rt$eVKcSCpVkcOLaP zbbu!b4gl}=Lpk|%BCW^(vR(VL-)v*0^^K({k6@*+wW_T8jbV{k{VTr$t?R6`E>+30@> zY&(&%J@M$8us+Ov2)b8}nAo-~B#DyUlOc+TDXBVB4gB{*-VOPedci_Ml)x$5ni$ty zCGOlwjkumD)4Ig>HT7Qu^9Pon&(+BBhv6Brqt~=d)u8f0Se<=Qg8I9slUc84b z>5-`NzK|HL1oO)Q8a%8u(wF7t-4RwpD)<+Ji^!LhWi1E9su^kAmE6s6hFkGBW@_`g z4rLg%d;naMqqNYT4a8ew)*#kR3n&Up*JNtB)atRv)bFPC94jpwFD;XN@;^N^S!U|5 z8C%<)My@NQa!fMzguq{USZW&X!)(H@Sf%tP@No}t-VX&gah*-eY*%84>swv~Yt(I= zM{~ct*2T)8vL#@m?JRY^QZlWi_T5;?QVHHP9en&Wsd}ISSKV}nuf^K@hD<{rlhb~Z zS+ECbjWE8Ieq0~=ge@0(p3dKdhxANvSsay0sSjF8kw8}j>##71VcJ5>v4__S3{XV$a2--Fn8?KvvtGzr$i##t; zK72Gep92z}=9#*u{9ayWdzEEU(N8Gplf-LP&5GHsPf&wr#!FkSu~2Ifj`R6d!5L3n zS27oY5q%L?Y8GnI#y%8vnk%o=2FkZTewSKO$JG~@+Dl5^g7!A@88dJXf4P2KeR0R@ z|4x@NuN=U8ckp0s3xCe>r*)y;P>mFFnoaT41U8vx;3cP!LpuB2MSKwylDh=UbjwL$ z>CVTwzVmNF=47lG@jCIY7I6f%H@v4e#=G<&WylA|^J`dYW%=4PLavB>87d!J1HI`?<{#F_SCZ>qqG0U*D_zFJY=KH5uA^51eW`UhL%gCERV{5`H<(Vz#U0 z(pSEbaNFO@zuw$6l&A&SMf-ah-4h9yf7;F|P@Sn$o)l0`uE*35vJ5IxIA63uw?Z;rV2)$P**WK-%(CXh&rypX!jdEN)GM0I|o6vjYikFh^gZ!UBsk_la z*GUPZKWFC3b~|?{ZhxQHKO<&AKxH@jseWkBOBf29@z_E7R-7AuKa}ssZXLV1(7blj z2wHRqPia|POo;1~N?K7PodQHYgFf>ht)798DZdku^;Du)rPn{h{4cpgxkK4;edkNL zrJO1+Ew1ueOpv$LfR$s_!_yjKcjO?Ze!K2bY%ilM%Nc>jvrl8{{9#AFq8*v(l&575 z=LyQ^@~$*e?l{NmoU)p3ZF{POXw!fY^J#~aX)2L6wF2YvO6VRQ>pq-AXsN@)Gf$IS zxkfg1BzFMnOLqlZl`#7x4eyLa;5?MNoj#dT=V+D=v|{+YzJX1MD~)UKDKAdRE=r#yLjlxwV;IU6-wrZuMQ zwgr}n4vIv-n)`bqr#>eo#2di1JK|?XTgF1Ejdd>02o@2$Ex48^Kh)w~9jOgZuX0CQ zXjP1j9LoI%spCyV8xe-uMcO|d(Bq}d-Fp6`Ya{*c+;Kd$VZ|)P)1r^9+<_>}w7WXd z7IHqMEvV30Ysc?*cvw2QkDy&IQI@-KToG6$Ecdl^Qs?1Vbn;5^&s8v=Cti!jUH)3M zQmnebY>WV8URy-3^4zgpVj!{V2Oz5_5ACBf{5dUzD05a)m#0aLqO56@o8S2? z0y9@qce$soIdsJ)TJ$`9)V}LSA5ix{JVxz|D7A?YH7GCV#qz1~UnA7F9;BatC4G@%*YL|30PR0Z6yvMJ$u@#DkpceU0yde}}(CqMPwrs%JBlHApD)y3?y zZSiYv>jT;wDD}I9C4 zL1yt*i}(sYa}qmE>0w!M%4^m1f~R?ustZiUwTK3p4XZXCB6LH!G%diNKf zi2NReM1L+Qq(VBq2F{-Rk`li_xsFypA;&~3T{ZVKe*1OI>kLEoo<)9ry+c^fQ~IQa zqN!@n+(t-Vt}wMn+IZVod7v)k8Pa}%^&EQjIk?wK>bjoT`H|9myU~hG0ckxkPh)4v zm9OW2G2A52`Cm%R17M!B5ZxmDQ()!Zn=> zOWv@75M|Dp@GIq}nw#+7pPsu*=wKi*lXhPp(%g|fpY%0S?0D%zFEMY&nDR}GD(GGL zUf$LCh6S|cCd%21!G(6<)W>_ynL5(zX@zHo{sXw9^D$UgxbYGcID z0bkC8D*9P%{}D$1TWFVSfykbWz!$OOPp8FS=Jx?0e-|?7AS~qHz{!uqYqXV|zeLOJ z;)I2pV%64t%oqF=D2_x+I1*`d5_#Xt`0R?j*^|6~jqh&=dgll5;a>|iJOF1{flT`{ zShARNg_TsDV8ye@!%M)Q3mCK02>%NBbO|LtiXHnT_5KCz{x3A(pTpsPh&1~vDCoV> zf^H(SpfP+)MQ1d(`q9WUu924n2M(B<)kQ(~w=M{b5=-4j$)M zh<*a)o{rV`Gw|UD%uDV?-xs4{nD&Rj`UlLC`21_E|G%Tphd{rhsrv;Wa~|b9K#vzw z_d)dNy?B3j2lGw;3+$&e&}bedJ0GUS z2eGF2CUX21kMge>#pg(UbCmgY(z&YdB-(pQggit$zCtVCg%rMvKAlE=d%!bZr?)3V zxo4r@*qW=riUSyn2dUvM;CnvQ>5lV-5qk%|oa3R!pYZ<>>CQtJIv>bAiA?_{J^4$@ z|7Y5MA^O0rjNnY#_aJyVf%ML=zteit)Q87Vx0iD8AgD}w^$gh6P_sKjoiQ=Klb?ID z!Z$GTaE;yOI)){5ByUB0}ck$W+cSH=@u8XSOW_@azwRqM- z(lcm{YKA8U?{oF|x4|o0_(^=*T0m#>uT1k_|3zwidMn+zswJz&=ohQh3R45ph8O}~ zIo?yDrz_!DPt&sQDB%n^^D|MNWtzA2YG~QL0j$+%(xMyq?szu=vx^M4pf0*{mm%N*$`oU1=gmt^qga)pv%!^bt3im2t*>=Mv&&T;6SUVp>`ohL$HGD}6prOEYEKLXF(wc*>EMXh+7B z7~V+tfA-Y#|1x$bU|)`D{P1ZsW1W#CyCx)&ByC2-6j5nXX(5@Ki9%^Z))-5hZ76MM zqEaEGX%HG)CS^-S_9YaCvW$}6_w&8)9?blY|M9-Z?|6RC{k!k$I)&q?j||Of#5y68Cq9fu&w&Bkgj~!@%1`%Wu!wLcs5#L(rU*{Y(g#DI3-&vf@49)F_<_iUln2e_)nVsk#(gaT@HF#C$N z-)nt7jK`w)Kwhqm5>-X%kJQMCo<|g=KLE3i$et728spw_e9YVACWxQoVbcjEC-Vhg zYvXOYHxF(HfP5NH-4Jyh&_79w1LRTTwRb^*#~oy_np8H(bNjOSxk{GNq7(WaMb$!d zec4eIva-*<2t)8Bk)luxQJ|D6x}+rl6`RgA(DDV9B$86Cwg)* z4VaF9$K%Nj{=NY*_(pO+h@knuLhbs6hJ0DX@NamT%ayE88$O25(PG>N-y87m9(aBa z?ov`%j;>BP*Fn3h_?nERqa_+c#DVoB)fr+4}yT-b_E2iG|}}^6lmlRY-B2 zR>RvVv~3a?(UxxdE^p!GZ8*BI(B|3de#;lvA=}P)QO~nYcr{Lo7s&x0g~dr?$~w?5 z@nj}_{*s6J2G=HmaXZ~xN7~i+t+x6$Pm0e~X;3}cMQrITImfTj(+S4M+uf5BZT_xC zAC#@u;;CfWk@S}0S`YqezIMB_l5cRn4-L6f?aSdX3G_|wbv7d2R-3!9&U-<i#gH)gG~#OKxcut}{Ad`{|oOn<*+IqlJZE1jS2N$7Y*t$npu!ISTm z9#i1igQd;G!|5QO3-Zyhn4so9D4c{>Iq&0gxzeXFo1p&i;(G)N_oJz2lEfn<+quw= zAJkq$!)_qIdY-k0TL&fI;uq%PMEG+g*f-0TfiwNS3Oh2{*+EUEyqN+KuH~~zp0PAU~^>82xQ*#-j1!cz~f^; zX^)FJ@8vp@IR{q)CA(CI)5M)%lD$P4-K-e3-kp_a#I3u0BKb-xwR`^Y3zbBzCPksZV z!*M5TVXkKnnW-t)cO@Sz$4HVb{mEZB+L=Ca4f#bz;w#aRKq*i5%zCF#z1a68wpmNm zI#Rie0EDVP6%tMcmh3or#XxqosmOOfpvA)qDx1-%efXqf`Lsr;`=@f>`@0c0ej}S6 zSd40ZMBn;oXQ&pE(GT;THT+lL^TT3dIR+EX>pn-By{+u|3k))&cVyA)NKT(sr0Kn^Lr+dkU&~ijF0#bT z6>aTr7>jw3*UxI)I;2p;oy<7wh}` zlFcYvW+2P4;Ex6D_fKZ#0`33&{P)VL^7-a@*+eE`IGt zzvk#yzgqnIE3eua)(2{Pp_M0p;Z29~dmY53&-jJg{MxAJaXz>+)G0H<6Tu$~|CwUh z(=xi}Vfg{w=|l>xSwP-M{sKMh3{Df;G=@H&sMX%;+(vT}rH8PX75XjxQ8OPc@9-Qm z`T1Mu^;GV73kG+?c-qyO*pdpT%(!S8uC zH-$fM0l(g4@QElnn}#mZ@?YTfjFK0DzNdcsvoKDE*B=g%+k7pqVwWe8#qI76l4YDn zYF}v~+4qYeHV{*CGJD$VF7~xA_&FFJv+%8+R^JA@sov>gJX**K`ic_Glv{?^+4Z}T zr&|X82_oUaO1%MgLl(C$7|rB2Yd{|Y<2i7xD&9V=%&|1yRU&rAYG-RL{ z`w_Ue!stvKzFCW(dA>}|C&;3k|20YQ6ZB`KYd+1c>S_?JeVSAotG9$VXhE(A)B6kI z`yrn62lW!z4X0n}B~@iNAM+wF;P6s3PgDC@a?4vwd(+)#!Ji2CCSV={PFvXQ0qd+F ze34f6hU+5S-452H@cTUfBeUIU&n!69MDKDIFx%6nC^(GH?E&w4q*)UV-D!SDc|Zq{ zJNjFV>z#PO-DM{CsyRhgx)IL5^}imuy-%z1rrgg-a1Bnzz8=Aq<8i7ReY#D}_2hXK zj@*IH2T1aFG`<1(FUNzojdWbfp6_5A_u^H~E_j5VpNj6s=ZQ zq3Jr@d0OqSVc!r28OwN?6b`_Jp#{ujT+_t1^GI^4k}tp_-fImxcY)hSbhTO0?s|Bg zs*R;E{E94Ak;E6|+6zCg$K!o){W9?H6`wMe*$li-WKnsy;51U`#oF88%xHCc`kX;; zaxO&13bvxB@wj=X$FSn6hVPxUUuko{zZ|PLl1`<3_B^CqW+i_9IYzEhF%MJTugJg3 zTcnz(USy9HvGXGdY$p74_7=J78=6fGBjP%KjcB=xja;3T54?E^pb+Z1RYLuQhN!?=;*`o>4JNnO0jC5j`VzyR)!A6{9XW>1jVUHcSiI z$(8pR?8p}LUaZRMXRhcX7Ia3Dc_p`c*Zt%`uaU(#`aPCDABz`Rk-Ilv@>|m19tJtd zIIBIf;w58JzsHH+@JBgWu}Bo_^+kWR&@*lYE|Ky;%X% z9BrAE{?4z#Fg{N!?*%Nmi4htzPVCH#`rX023CPraE; zYqOaL*h94Cby!4-JJW-!@a9dml0ygYWMAbPw~WrcTfoWv)W`_%6{3GhVebC)sf6dv zfA+iLN%{dh7x&XI*&c+f5c|{rl>E6?RMf2apS5Q%{j7Y2ZYDFFLdJD|e3miJidxaZ z$yyrb|0G)cTT;p%jl)E_BY4h3*nbcHyF2NBk9!07$!28q9X>T+L+iw`jDCEGXN~xs zhO&bCn?3M!-em?avqp(}o~%&geV+QUA||Z|Wdm3}_~1=^@16Ya65i*0INF_KuRzif_w#$8VILtK=i#%tptHV$qv)d5Q6- zn`zEin7pn2zFKa|AO91^dx)YBh}(7P+5~uxVwbNg*NY_H_kD}jeqe9Qv~m!7TA;BZ z$m7J9Ua0yG^hd?{S9#W3Y3QRgYZ<+I0i0_ReQ3DdMV&z(b9<0!OjFRIV>|6Dx2OwH|J&>Orj z#F_ahZehIWOgVa0(d|hsKI7grdbBH;4;1wp;6X!iqXBv@1ND8+GX60h_mkPrAlsZ_ zJObwjkxhLg-^Yo9cY|jB&XbPX`zxD#$erYWy=YipGRSq_1YNNks`Woq6 zf`XQ!{lD?Fkut4tZyF74fDhNm6*9K*Ym(~e`fmKVgq*(RaVCSDwUbTJ+mm*m=6+S$ zH5T6eNw+GFd?l{tO=C;w_jaLhY$Gf3fFG%znSFj#Q$-4`ES3*;lLa=l=G3#)>=1o-2j6b z1-*lCY8(w)k1x@y3&4IzMpX;1&wy`t*v{3$}4na8Apm)g^+b{d73kXxA4h0K{L7hu;N!nU^17PPdQREO04WfV|Ln{ZEeAeRBpZ1 zkA3vR`TwlJN*-D9DdXrbwE6Dld&vsQn7;*|l=;o%e(hNHF#i(;Gx8ALq+C9`(xd7{ zsdDXNqG-jK!LFVk%FZ&@bcXL_l$m2|;d%o&pWsHu`@RM5ZhH4mkXq5}jE!YqayXXN zAsG$KyJJ2pGKQ?1AL3bdqWwxQq9H$W8@hwd3F^J;|JO=iNLvOe5ngBh_!*K*_K>V6 z+{=j|+wjrZ?|P^l<~&b_xjxU+1LQATvb&Z_f2N&b`2Sv^V;NoPQH(=9s-=u7*7kP* zEy@Vk6w=$0#T-L^!KX40Fi|W#mM)E??^#)QENJKBL8P3K;K;HSC>aq-oy;_rGSBr{ z{*pf?f66E0K*@~$Q2g2vepeKD!np1g+y!}l5rB!t0LY}V{*{A$69(@Q8`5sPp_kSa~Z{S~s z@HVSZJe!aDftNU%*H{MAwKTgbO*%o)ZoO?*n>vItdNc@*r5XfyG(uAZssX;K>}ma0#v2 z1f%!0@P#J7!@83A^0UDnvuwE>#cEf>WQjN6#Jig@W zMVho!)aamQb$mS>MKxjfvd;nhOcgj@=Klw%`6D?FS2H7Tz437^44>j_W)`ukCZF*F zE}X5E7kK-nFkQ>?{)+mXVAT|NyDIx15FggkNH~sCdnIX{!kX{$U57_qfkS7YWigCu zie}s6#;KqjK&sbixr6vOSR|S$?qt;M9{hPgnQcgDFwMHm_ea{?U-^s)O_Yyr#DmPb zG+;pu@ol5rG?~W}+V5BJ{a()}(ZLP$_7v3R+=t}K53BWe6ellTOWRtZwIfcRQ{)?; zyE*_DvcKYL#2!x@E-bROoLfH{b#H<5B)PPL=dS45Rh(S{m!sf$ou?1O_7glk7vEO$8uf6v zC+_?n&tJu%tH9_E=Dx)k(MUe2m68XO(e7}02c%jgS`!9Cl$}ZXd%~;=+@3_?D!e>O z&b$CO{s?XdcwbEG+PZ$0Hjc&nkLgbK9rVJDPw8<(RE=dv$zGRgs|C7xgc z)a|dWc!+&y>7&Z*>27_H8y2@A8y+w-Q zaHtKFoL=!RYI25UMGjWJEk5HvwPp3?+O3*kSL`+`@!5{2@1l>E2-ya_oZGU1K4<@A z*05(TD`!?#tXJNJeTBk|ApPKZxi+m)ksoAa;-#Wa2|Kx1)(K^Ht%kqDebka>O-f(`V&e#oh9Ma=hl7 z?|6?LS#(}OCTV}eN!9rSVC!Vt-zf>0& z8~Amx?4Y|An((8Ui|9rEHBj|Ayy`9v>IUWN0Yar#kwXd^D&L%+K75Q|ap0vMm7>KnXX$mjP4`yHIl$j~kD zo}tG3EaW6uwA9y_E2iBi8sFghCjPOFl6Mx7@F>3TO)!@fV=x!sdgiZ|u*ZyzysYi< zU{8eQnV_!3rJmXu>3h5qFDSQI3&|*-B*``+N9Gf!D4%(guW&2v@StCci5Y455{}2ge>A-Jp~EvteS5Ol8MjuF z#8J4Po_r&1F2jwQ;QgKLTrckSkm+THFMGhxS1M;Fk7l=_Y_91457G59HOKNew}Vt) z^nC;+`^i}9@;$r3`0uE{MZI;ftL9k?-t$@I$Dw0d5%+J`Pr8~uPeWaJTaAZ#9)DKS zfygJb$*ozyLL9uFT-(DuGjtgjy9Nj9;oeM|vp;$>_Bcy6bG%%ACJ8sC>wQr6M>Bvk zaOfY}ode6vbp1}*dHRnN=*%kml^M(<=x-A|Z;RiD!n-qk(+9|WQ7tt-qdS+w;vhLe zEqZvG5>3cw5ZZS{>pXlJjypGi&;iY};I#nzwZJ=9>s`=#2ig`EaC`F}*OF6my@rLAoy(S|!u(B=Odp_XVbu@QsRiEA@(GCiC*wC6<6tMOV-qVg?m%%va8@%()JpQG&uWoC;C%C1K918Oy3 zOG9b$L|2F6?u}}_;rcSX=nCWA$>LZxHI;ln;Q5jdX6E`(k}c;rvzC4aUS(y~^M&Nc z(1=cS;W(N#M6NI!FCJ09se6N!8xN}~^z&}`4IqK`a9+j-o>=5^e}(U89Josh(d`*H z7-`I>3xCFsID6LIZxJ`EAve+C zrDQnM_j@2O<#{$1^sMDuKSJ~Iq+ISH*%x=>HJ%X@zX9tB+Vm1HvAEFuaPlh>or~@z z@R>jc-=X+DUhI>35dC>>3-gPms!q0 zO|DR4ly&FX|8+YJZZ1a}ijvTkHR&1sn&VeS#>zVRu0{g46u(PIM~MX=DwSX5zSSnh zb#lwhYvnA7>=D_5RXyjsVs-yn+DnEM4sIg?gvOCy}(lG60jCm`RHut?jRLncH(+_xETp3ohy9Jod zH(zbxf5n+Uxej(^uVHfWD|foVDfrhe?&K<2>#*W~ETMUEm08)zo3B!0ioeO~&GSE2 zkj!`sIF*`}>;FHWQfC7xIc?gVT-3|Xfih#w7;e_W)bz<1N!DleBAr`E_b{2m3S7-R z&8}i|3-;fGbiU#>veNdZ!ov(EsdlvOadEOU57LkP*YOdn`Qsb#_#2-UeBK6nAFsAT zM7$lf*V4_!?hXWbg{#-;Xx8b^<>_7|fya5tn^^Z6n7pFyxWX&-p=q~+)``y?%B$Z; z%b#%7jgNescUt03W@5&OQj5gCLyMd@dC@?T;WWOjkJ8D0KY?N1Q+xsaYRqr-FCxz^ zO5DqPPVn@7e?wuni0uEyt1W{~Yb{mvyq}n07lVl2fG3=VmY%F;79QL~mm3-x?tz!> z`R}9rKSDOpS6o=ZF7h^vWIgT0^wlE7QLcuH;Wf3pga+>`UTsol3><5zH6Q)S4C~{^ zLpc39`peULtGh+ zg5)8$YOgWs7SjFXIDM77m{kuZ%NJ#I=PTP#@p983qE)$(qrTnXFuc(E(lTabgCYy#c-b;4lLGli>dc@W;U?<3#I<$Qr(#1V%<8 zGmm+qdgDNeoOabh=72Zh{8-sTX44kq*7mSjikkU6&=K0Ij}J$ga9VzGRz$1*NJCEYR&TKU!o4epiH*L05<3DKcgj>CNzsw0f$y0BGzMSx| z4II;RiS1Pe^-N<(yZe6S2Pd<@FBDq7mSxt0-4&jPoApR$8J>Q?Z!H!BlIsir=}ymYMpw>u$^O(6 zKpm=7PL_QX=TC?6JL)XtQ%(qPjv-t zm>Pfe_bAI+g1WxA@HuSrj>|E{?=JK|!7m+s%mN}nW=2ILd(THi=WW;AXm z;reLH$Y*w<{u;#@k$(-|uhQ{pr1>cxUO+}o@ZwN7Wi{P{o-{*sJvcuKVnbz?a=Id3q#!1w`JP4{~ZoowVgyJ6mj{e49qyNSmd zT5$Ydh2L4l2Mr;a?1jr(xQlp>-Eev>?_LYN7qf&<@bC_QCu!p@Ho6@3?}56H`1~<^ z=hBXmxV8iNRi{}!v_D0LeFTs6wkOf2qfj|r>5KHPXP{#$t!(SwnPR{voAJr~#H)o? z{oDW5Fv=`(GkmxLU+(~SDjmtLujprQx;0*lr+HolmzU#eb2uMLn|?)lIZY`0k26lQ z!;g{qr|4Ra?3HJE{B*axDo3BL{4sJ5E8>6oEMrLJxh`dt&nl*j8> zowCL@bZaY|xkX;o6Bi~IIQ@LYD(A=TsKj5`*b?3`@5IX3T}~Cdf&}~WLicMYZRMSg zIX5V?a@kLNC3||QXf3OgE_9XA&yxCWf zd%3fY#azM99m5hb9=$J_vj|4fnTlx;az`+Hy?Yg41awM=VD3wL@&qywLUDllCT?VDp%KZsBo8lTZ^OmQ~ zp}*!YJ*hMNPi~jppu=!1s~UG^t=T`YD<8Rz&wLKNj8T^FxXhVaEqS8m`abE=b-nTRuvzG``Hh57hYd4#!Zyx{J^JO$~P^Rt(?Wq z4`KP2fsu9c<9%ip{q~VGva<-8G0oX5f1?PU^<0ne201Cc0grVbNoVEz*Cg|*C~_H3 zGK5{dLV9{z~2f?n5j4^Gp}OudR5Roj<+6&bwgesXw?Fn`p|9 zo{7e+^BJ$bK_c;9Fir;WnBSj?7Rh(p@y&g3<2KJ4@5GRf60;@?F@C)P;y}MLf^vj%o%xuo8VnaR%Uf0a?MeHpxT@;eHRw28z3l&- zTI7?N_d6et8l&MLZ4E_r_Oc!W(|!1v>=?|bct+gAuWR(4gvj+So9P5Glthar@^~W^wA$JZ1`xzG2n6@4XyX^|eK8^BF`X$TC?$}i%G!6w# z#kP;NoUy?de6dkKJU<#2DgIOr(k!2%TT}O(f+6LC%V2NnD6c8k2F<@oWF>HD9R?oH1QXEk&2s2SWl;X`ZuT|zJF()aYrUPD25w7rSH-Pm|T z+Ex>`S+&vw-IuVvJ6S=p{EVD$U-0rpR{9*7R4bs~4)5NDzCMZD-Q8VmtZOA6XH8~3 z5o(C`4rN(;;r^TO{Il{4Y11^?F-!RYbmajNeG?u01n(A-S!?m-_#!sU1A7*3JdKij zK)4=^^!ZxT@yrRG!tPcpF^P?3w`=;ki}?Ppd5NzI&Q`_af79wR?>&)TAE4Z;`2RO0 zYT?6Zxb%{*b;XZGEU=z7zk%;jq}rIj8?5c`$RwvBWWW9+Xv&WKtS9)JJ1e#K4jI3} zikqmp9{h55N!DEsCa0`p?t`137aBZ{wLC`JbL1Mai<|?M+~-ExTN|%`iq#owOD2$b zoE2*Sor_kK%Zm4NmC49s*8T0Mbe?VPx-65d&nLRXytpk^SotIBIA%1Zw(BzP>`qP9QEaL zuyPdw8_OEN)&5$z&KO%V;a}2+=0)9?@GyIuZ=|=a z#DsEA_jVjjly0eB&hSbedY-@J&AGFSzvPG61N=mr;L3u>RqJIhZtW&JSIwQ+>{e`LpN{(^Wl9xKm|Kx{d&mt$t zwaaU&>o&@$!%2QECL|=d{++?_c9X-s?9ImRA?g%Kk!S_I9k_ z_bXG!|C~hETI;XzN8hTGY#^uPoQtln>FyLZaF&*q@*{a`YV$&e()#4hQ1jlxD^OBq zrop28MDwx=B)>8~oD*a2hf~G#ic-m5bDjF-CvTItBG#|uozcqla-yeMFVg(Sa%Jgc z?G>M#hgWf3{x5UZatBLVh)$PTZcdrbi1HtBqO^@@LLc^iCJxr;<@aHsoBYlkPjc0d zS$8+y;Um_Zb&lCf*GnYGENv~e-H+AR;}@2Te90GYmk(ygLuOe1&7T~s^{T8V<70>M zWGncR{-Cbsv+mVmR$#rT$lGSGEn|}`0NF2{>-zc#wW0=Xp zXW+n{^s6J7yYO} zJ(*3<{-#t1EzJ|JavoYnWtVF6G#TkS_&p4E1DIyT+8kWy$cv7_weOWX2xa96=t)K1 zJVjfHrmNNb6ebPeKZzaZWRzXlVrMi@<%_bLIdg@zK$!08Hhr?J!OANBtd$I&C-EdB zS)^W z0({!|SVeve^=aiIF@I7k?hm-RX-YolOrhu9;*qR_`cfugF?}~R{i(=XFnz8AA zIFcU0k#z8PKF8C@>@-^9&Log~@;%S2CC4FuaHIFOMjhHPM$A9_f4(ysk!el=J{Uz4HI~ zHNpQ!K=~A3-^2f8Vae{!#I;!@_a}1iLk}(xRdP1kaI)G}`F@4&e}m#VA1h}o!~xR z%{_~(`CC*}#`(KYUY{p@3HNF%d5E@eC6(z~%ZyM9IQ$b$Z|a+5B)Jp6k$qm7tLch^ zvw4pCsM-sy^I`TfIX#3gtLR1xR{bujvo`1`65R{E-HXim9{kF=MT7bK-uz%@!M?$n zZ(-QI;MxYQpQg1DdP-S4vxKCxZYyKwC*#K{{Prs(wHxioT8Jemy^y|SKi*~dn-hEb zsQD1hYsP=%w1*z(c;0nRjOvcNS;^2JR#V}Ule~_QgM6plTRcKm;M_z0m&!f9cjq^> zFKZ-nV$CMcf2YMpc=sjF&!&^pik!Qnw)TPL58Cfs^xvPwuk0JY)p&eXHN1z;X$2m8 z6_GjX>DuE_Uzp~6rmiTy2HuNlUsu$u`kzdoBG3Klz2X0zDOBViSvOmejV1>vvy+Uc zRlMUcnRi(?_p&Exlt}gyw9Ls?k`ruAdc)N3_+xIfpA4gE0WmvmGfSSgnZK*_{nd zq%;QL>6uRQrZD{<3r_!k>0cRihkJDKlqj-IT$&F-NKwH?aS3n=Nj+^=DxL?>;1 z#%FAcca?CWqc+pWNKgBGWty|IoN1JI&6Vp&tHGubd@`D!wI7p3mh8K#;8yv6vZqSK;)X4wlKmID$>4wX%{dljU zWcqb^pHXGFr=OVpQf1FEW8uxwQj5LbCZg}BWae$^iZs>TKN(M=pR0J#^s*Ow)|MPz zKy~IXXR!4iG-4EuNW_>&h9C3NiK-iThGp)*!s3&W59INh@%Z<++kkI9iKjRj+-V|V zQ+)nNi87zQ!2k80cNdc<(%fa_^f$EEh10FFu9+}7pSOCB)@60qRB{+Vk{xBZZL~jv zo@QlCV}9jAxPDn=6WfRoIXi7RFMT#|lU4iK2imLfPKj5w@c30;uPXd{DA|WE+ROd% zv~dvXzN625J=?j6RQrf1cfda0JExFcO!qUkGg0Ju6ZZS_lXZ(MZb_lr@y1^&m#p>e z!k=c$?Rc5~AlQC{s!9A~#-oqtd$MQfRV5FW4>Um0Xs|1A>&F*-4tggZaxn_eSGOjO z%G%7FDHFbp!Huk1U4_ai{QP^cdkqFFN&jA){!Uy^wpyPgu2Fw9*;Qc$uYsFc>a5;; zMOJh@&6rB&6P20a*KTlqfkY;tEo*!~*6u{HV+mZdqrM@0u3_m}3(`$XJCkm8{LW6Y zksxF@c~!KHLuvLU{SLfK+2E5jZW4N0$W6*|-NoY5rDD$t)c1sKGc?VhF&Bv$;rt?5 z!+Y>}0-i5{a=F$%@GB#5^9|E1EswQ#Mrd(wxzXZAWz&s4H1pRx^PF9-aJcnj^AGO! zWhEJr-_g^BTFdU6u6}<2&t<4M9^G%C`+U8+mSn#eUG3F-Up!BCI10SvTutGWvDy(Z z8^y-A_B;D~hT`@hS@uE@f9)zOPBLQs444C8zH?zw_3`OYP^!!L&(%`)dne!9K$0u? znerU&oSpsvP0Hy>S=(?PUHKC(*OY@cWa|&X{6#+HXxMy%U+JT+6D@Y33)4t&A~~K0 zmz>s}_4aLKb~VYZ7Hu4l+Lip(4tGO?YpF;Yc*hr*}^( zvl0$D?|U@*TB7w_H3#}NhmO3DqTamJ8Lpm$_ez-MRM5`i?ZJL$&hr>L7e8{f2)Yw` zN24M0F$a*qU+L&kGS0hM$!GAri=X<8^~?h|>nidlom!;QoxJwL&8(5#PmAAaeSo^{ zQ8$a$v;zAuJnPP8a&pl`67H>D`n?Ou=^+y6LsPRielR}#1Lbd%$0EP8a{fDYzjJRf z&Wr=^AaclwQA_FDR=6=1{&)DSf>CCT-cYKy&mvx+I|}CORj<)XPUT2OoO8Q!D&!#X zeLNj)f#*}Te}q0#&Lx{9hIXM(gS9)Fj$RDHt8}p^%%0_`m$T(*^7dW*`U2)T#c~E* zM$m*i@T3mkv7dgy-Z-8W(IfG>J$=293}=(@TX4D;WnI91j9!l@zU#yGZBg=-qSyQ{ zQd$by4LF_MY>%?xrLKE}u#Uvz-7nDIC$PShT<)MDH_)|e>?!Af&XHqgtU03=*^iM~ z(nd<=?BC19!t_1H!FyIQmh~~cc|k;%ti zMcHH^88*&!FYv%++1S?oV)YCLfA-^yecT(e5_S9tp&`>Mox$B5Tkq2w+j zrj=>NbFe(t{b8Pm%H;4l@9+w>7ZeyAgZ_$hqLXa}=SE;;1UMY-Ldw~*HjorUQ})(h z0Gqo-{7`TVO!MZ_^YJYs(_0u7${K;~;82BkYKgYo{SE!72FIbU^X`#>%H60&6JGx` zPe0K{c5P);{ntF%UtHCJ{hstGqx*SM_H?%pMOUhmeP5O6ctbEV+Mm5xpYa^&%j6w5 z-=lni5+#SecK2%6)%laGQAElEZ=?h-*qZ}m1j3r{QCL-yVVSR z*&UZGe6s7DB{jv>-7uW&b36P$_jePI(37PeNW+@3-+RU9-C1{E{^olWE%zkn+$93{ zSK@LWCfQ4N-JMS29Ywy*EcAUAK}Snu)1X5S*1IO^fGq%AG*+4E0=)q48M^4ygv_{*{o`EkjHq?rhMx2 zT5Kab>L$zXFRs*6YX;3|L{F}zPKjvAsT1qcrSi*q|)c(#yTEw5vU{F z9{{V2{56EnM6LBxD(9~4s9bW!>;|6e$$9Sg*KYFT!9@%3)s-16G33m-l>_ zZ@iqQJzn6t4nHzJdmqcnj+o>j*NSmBz+aU=C#_MN@k*>iOwoEyNt zn^M`8o;G@+Dp~U1$^0g9YK6OX#k{dHnLCZ`97oFi!MGm(YlGhty}RIUMq$q-iMvpD zFU|j$_nsi)zQvZaes(;*p6dT`O7)h9p*)7 zrw73JJrJkSltj5_;INUMJOf@%+shi2@?3!-;Gf4A@4}m$=Kge;zCyG2C5?GFa)KIH zfN%=gUX7+cw6YI;hvV1wq}5l+wMD$m{B(5|+68{y>1ld7ANiG)=AZkOGZhB#Njar` zt*d`17ah!*Ia!SnJ?TmkC*n}*u7t-OxSsVp83i2(W=216N8{^iXH@n~@UxbpG2Kg^ zpHt#8&N>0l>dPuuEAu<{k{N+yefi67m|i6R3|V9?^AaT<)Y<`Xsl@tDRywObqTk=i zkFpveYfeX?d6N1^v&8z^&m6){@Hz#q8OZS@^e-`W0x2f< z&M6Yd(!S9&wLeQ)O`k5||7wa&d!n+1h<-82p8=O6WGXpR`~;Hj#)<~RAnUO|R{CMJ z2BG3bI`)*S-OzN3mfzCOa-7WS@8&r7tSHipE@tih5V7no_4;bniU>&Tr25Nl3FJ2Eq&M&G(-_Il0ukfTN+sZ2WztY5;^iT%)Tc_RA zz`K~w`z09v67lA+(p6eo3E%YN7QruXM#@fs@BFT>cGfVxjY~PtGrg#8WZ%N~pU7zr z-qb>4uAg=PF@B~FY;$7K4(c?e@%_kfAX@juhlS){UE80ccN;P~SU=<+>}xTOy@BVA zTxGoX_(IFyGoEl8-!c{5841`)D>;2SGlX?XGHVwa(1p|REIU7Q!t5@dy`=TK#I~%P zZHS)5yz4F`)ey}W^HfkR*E$Yu;rmo;NqN#6_)j-nyI zfoWFipNHn7WDh67eU{(LaBHHn`;$lZ)jjTN4Y|xFm&|Td!Mp3(^f`2P7G2IrZ{qxH z-s~RwSW62X;T{j38L|CwEvv!0(%kHfd>>X_aPSLS{DqpklTTk*cY@uk@axEz?xo!| z_<4_#+4($8PBnxkHB~3Rdkj6!$!uBSbvC-Q^EWe1?a@Dr{bU#Q8q~c<_dfw|16gkC zIuUGV(z=_CCl|iUy@f0#r?#Bo&foaf?1$Oe?-BSq2;9{;u!pOPh(4^iDo^Upy{*}F zB6xW}8OQ>6w`2$T%edE1t1f>U$*Tg_V`LV|)qeUUkIwm%Ssj0&@0W_aVQX^!1z5>H zX8KM`6`AQCo*YNwXW&duZ>&S^)8uf+^48hkmD7VJE1$e0IbHcluJ;QTlu^D`Ki)0Z zlq93f+U4dfWwLtP(t@;^?_@IB*EZb$tQZ*PyAu8UW0C(>^=rDjmGFB<7!HX(Bw<`g8TXZU-`@tRq{@sR(yZXA3IsOj8Xnm`x#e%Nu8|fp0PQ@KAzXf zx`zdDt60PG){j!{C>9Olc`CDlclGeHTVeq?S>rngl&sR-3cOnE;$S$1>lum8>f`ss zoxDMJM-oalz8|WxzB#$_cJyPWmakJQCmSr(MsnEa3LGl&BWYs`626MpNETV144XH( zXGKg6Jjh$7YxquPS;==z_j96o`Vr+ULGF}r^4iQ>r7?DPTL%`9)#yP?a-syMlU} zn3S?lz&^Rsnab=fOE}xJwLHk<{<2D@6Rh&imAO3RI`YjK2Q7Jqv1;|_75m_H_WM1J zwwvLZ-EPOhJ?9>NM|08-T8iHdQ9jsr_NHYwen$QBj^D_Aci0aggY2r>R}9JO>fOb^ zrL?~VIgX@T$pF7)bDc=BDvoA`^CQ~S)V)Qda3ZX$gOY4<3H{n#OLbt-hQ`ij^UrJZ zE4r`&SEi%vCh_HRR-1VCIjDEi!|bG53eU4pyHdotPCE??Yz{)fee|veT|FCTMriv5 z@G?GnD<}_ZZ?rZV!D<;@y#_~*O(C`8QFT6`L6~&8bRnlR{Q` zc7pk#ppGo~vL`(|g!M0Ei5WQ_1A~mYt>B5@lhMp1`$0vWjOsabUA+bAa zT(Z{Z9Praa$ZFr@$KUXJ9o0_1;Ck@a;@!E0E)NGed!pW=>mRs3T>Ho4X4b6!hyPAw z&)Wm886(7>!l&^=ugBG|c`iCo6p~p=CRuV}jfyvjQjj>qdu8 z-sf7jJU`B^Woc1{+iz7DJVTk`;$oPU^aCH>&VFlmx1^YJWe+{ zlCjc$=%4S`8F=;>TN(rM2jbld<1Rzo-za+Tgwn&@JreeB!+9f|GM+acy;eP57%Cvx6Zi2(jKqY%LC}4nuOO`f{KdH>8om8YojXY~ySI;q)h$|VsP##_aX6Mer0dnn zDXaU_nve0n52<&iO$+gEFVe^v71iLf2-kYx=}k&^@pmGNNiTi`FY+o$?$38;q$q2q zvyHsyQEbpWt6|F3S|xdArrw@{x=pUq)wcSE3ud7b>wQpZ1k! z>E+&u%irSoGP2EyMW5qA*41ScYK8cg6VmPg3e4kvFvD!H3b@WX&J?D?fR&Pq}Ke;!ZO4dXx?g*9zL&Axtm8<{pFYPF z`C@js)x@RD_rIpqGUrdOT7$jcMwh0$KcdJ9t0?qqA##<>{ zX4Bh}-cN1iH0Q&?w0c5B&zTC%!4Fm01-APGigY`FT%NM#q!YHm6rD3d_owvaPl9 zTkv0^ao5nl_B>m;%6xAya?;r|;N)%c>3`f0!u?A0^J@!wnA6XOx|jDzr#F)|l3Cqy z8eQFj?Ih35yRVO>VHwZOhc$LrCglo~4@hma_HZ+&iBByunkQr0fj7A*U%0;SV~(>sod`o-fG>`q%IS z8KIib8)jzk5il~woq4wbxPQ&U1G;J-v-rnu^|J zmw%Eq%Ig_%Gmi5m^_32?5b{} z<-=+7EZN`5>gN4JT}iMU`8ol_oIP-x`&l!b5#-NFZ?!0vIMG3T`Zs>O1%sCG$!O#b zIFs4ztF$_wHasjMHHA+V()}&k@59aB_|+4SzS7d4&%ycvszrs8(waKL1JP~$oz-eIg>LxhgPw)@&t;E3g*oI zobH}8qwb_rIjQyzQaKL?k}GFzc-9!CPuX9rsjhxCbXHNa4qCHnD5v?(t}oAM{9E?RCaFWM8(-4UxVJ9L!5P;!DL;L``nK&S()+zKIMIt z1N1)z(%Qpu>^?f2to$(6mvcxi5tXm!d#(Yw4||xad`?rznCZ=GJd0a7fpVz!8=`5Q z|9jGr-e}n#FW$urjOE4Cc|YrYyG&348%k zPEh#|P0t#n{{Cl=UG@m%JyJFO?JGx0Cik?q+S1u~S#;*ubGE@8d}~88+4=eci|%QJ z_8=I>9xp;kdXhEi;bIV8#^p}1TPV{U=l^0YKkPew-u3?X;K6r<_Y%~fTX>bv3thMg zw!b8erQp@VgMlEv&%bBA#__oDGOVWHOx6kn%bfoE4yl~Llg&`_4Or!UPU#t=n<>F}S~spY_$b*QWuE?60l0WRu;!*~9n}obx8CYm|RdD;Yb@ zDv^wLw?}br*qljYCc3W2Pvnj6O^X~fbq>J!lW284uqUCiAh}zX{#X>ie5`gol*-1jiQZ;k~WByFjjod#9@t z81vna)n?wH1Ne9HYU|*a)lH}2$-7#=i-z2;?hLKhg4Gu!o5=JgJkC-+J%}Z^RtvsW zaN#ce&$^Av;Bh^@$Q;Z%@G?{M9@@r|YtCk?haWjjG3z^e)Bkt1GlKrV58qa7GOIEU z*G6Nw{YOsoH~0=B$!hd3dm1{EO5W-@hJ-t_;;&HHAN-7;R3*nxa3v$!gK5T#g+IB5 z_GV<{GLpYQTX`?pdN4;QaS5IH3phCq`w;v%2xVFIUI~Zd_48huliWEJCBJjsU2Dna zyRpjoG%Y70SL5%lhR-OzryC0IMN3URr0js~z`ylyC*JH8e(5BhqBAVA?bo(%Z=zx5MdUecX9P$-Pl_nHDoP^RNZ$UB$2^Kf~q zS$+Pj5^KFmM)o&heioC1 zC+o|ZY7d~T706l5l6{bqeP>5y<^uxfezuqODVbS#MQfG8-2x^1^Dm#^M|0fCsTkGB zu!pv{H16~(o;2@VOZIxGRx+ZWo=6Slg7I2hX;)B}K6g&P>!Nl!w!S4j&uD*e-QB(9 z$qUqa%U=_JgWWv@tQ+ZNb6S9(cSbijuS_-;wKzx?ODd%qi@4O%Ku0=d@Db@ z{CJh$_iz3+Z1ey0j542G$EO^-&pMZsEaj7{ice_&=~K>6ROaz>%18G2Y=!sfrEG!M zr}9oWz^pPGy~UlQ_|_VHc23nk81=8PwpBbuPF71)cucFmXX&rf&z$#gC_j^o@?*Bu zSWJ0Z*_<{xL4NZhIS=r>ow$;b;Jk0TKHoKlXUW@=vp4Q?QG2PFdKj4;4a0h3cus1| zZueO)vn9iWZ6c+r=a%&zwOyytFw(rR%dZ|}R844>v#*71lL@5=t_ z*9$mDM zw@N*u?ryx?YM89WiP!kWIdILc_N*@)4sN1I^26*cTPdPfheNRKtcB2&H%erF^$yyY zGfDpe!<>M02y4qZ2g$9^=7YPlm`})JcU+m{>0V@#U6ZFMGf~@XP?NRZm+;_q$afiC z?BY77iX0`ToGx0VuhAagPsO25%p z-&eks=c8Sn>27w-PvDUcC&h>Gc@;@!hvR6Hz7n)sU@$^U9Z)-oe|e70>?ML-4R+p& zIUi;xdeQ-v8SBe_%i3y8#M7*z&A4k$gzBfgHMq42pS}hoGbVW>!)HZ|%;@&>^0L)1 z%=-C^g*THo-03@KB3$BXEG`Y;r`xjI<+LzkNKHsDW7~PJXig%pCd#Crk+m>&d99qP zR|OwV=U?(hfxNf;9I%shU0m>Bsaje2lC$lit1lOk?R-$mlkfH-#jJsP4p%cj-d4%c zT3Lx>IZvPgeoX@Z1({4!QpgU|sVp;l1#2FKXGn_pC!RorOiJUo{ zQ{C3WVx=cffYMW28DY%ZL~8o4N8FI?m zGgCx}tnSVj{8Ue;(c)j>R^EwMADzj5JMr9E!8HyibKXSWp_jM)-R9XcExw8RhhU%8 zH90@FC$G|9=8`w})?i&t3tx2sC?(}1VARCZziRs>99YbDPJmH;_`c}g!R{naZz;Y` zfd2%t&ur#XO8gn0TG4>>SWko3YNSKAI|o7ySzSUD!;*6s*0RPX#W%(DYM+Xb)~U(vpaAZNl(PblSwdR zCQFQp++N@up5H{T7OOkL=TX!zR66?^-$Yk-H{M5ThqBU~Jy)Lad;uErp1GTS=WTlN z{r@7bW667K-#gR#zW5OOk5V_Y#y#bQ2lDIL<8la@w-6t8A)VoHn(avgIYL&%G@yIe z;Yw$kunP^GfLl+}+PrV29*h>s%y#Gf-uG)t!HK$2zOSayt8$B(ehjjYXgndEt#Dl6MS$r*&R@H403uOX!~MTYD3ZQg@fdI%@;csZjb zv$0uWmtOpZMcjN-E5A`^lT6@iezPLqzm4BWZ{clN)dMf@mTQmmc|Ueeo9!&Oc$lmv zkX_D0C}#mWz@!zL@`jc~lDE`LKPf8`v%_=0A}=bxlOZ)Pe*cs!l~>L1x?;>fXFz7v z!QJd`SW!N^ADh$Jjv{)wl03Dt;-d@yp4ASMH$%-kI?lk+Vak=OzH?SYR<~qjTgIqU zvpRd<8SQ!V?erpd{C0Ewtfr{p`lpkJ%e{7GKOw6~a?WwyH_-yDoSB`hvw2bbZmm3l z-)SLlqANe;8t9Jlf-QK1VLs^-J&D?V==2bnRF*I0441rLtvQ^UE76eleyr>cy!9b+ zv~libJb4GD7x@1R(s-G6l>LRYv>hEem~P~0)_Z4Fdlm7p5^YPp%x`?{N!FVDiZ9xz zy_}()8JCOPnMaECc&p*G>-eJOtT#%pDQ`X7kydS8%vz*Rk!*KM6eWlM7RQ_Wb)Gf? zEoYnjP+T46?vAKjpzoMb{nKERmG9?+m^pxoJUZ)=@($cHl*_)(^gAjmoApDJ3vZUP zmBoe~+2PeRJQD1KmfG6Nc{y9KhrGWnW44()`KSM>lU~|L?WRODFaIO0jP@rNPrJi> zZ{^d32M;&@P~6Y6l>JZfKlGOO%d6^qP>teAdUnAkTDb$fqbI4^y!g&l`ANU4yxJ0e zt#I_W{M8|-=!qj2s{bLI`b-}vl%{vrOR1bTxeQg4+!@Bd)?p>JU2n%ucV=rt!OD5R z*NZ&Oc$U*aoJTW~9cIPyI#!w)|9HXqBFQSx_VyVH?h!b*4&-Hcp0#_)R*z!SIW2!8 z9zTNf^V$2$d|)SbmR%{G!8$^m$&*7JcjCX&ncO0M*)JFe#Bt0tmmPAL3>3?D#kGV<+w zA1qqE==XPI*hO?ZkB*EIJzk}Q6U40t;c*!#kK=89J~$^cOkv%p((Qv-%jZR2y&X)G z6U|0n#)O7yG4Ga{N{ z7ZawELmBnzcs>U&E+UhR27azyC|d>Q91{60-D&Fg;kdF0|DWPJS7?2y=jXD6yvd~# z3A`^NKI3jq*B(wLH<8C`T77cSKWPNQ$+RRg$ZmnEXwItWjat1EjWy`aKCsU2yvphg z$FZC(cOWP;$+{_t&j3BEZNF0cT0G6}$INO^hs_E&Tn_WBQ}4pA@-F4po)3p*R`+BD z^2;EmubnmO&1rizb%yf%J88cY$jgeH^zcH4PoZ~d(K~t*jVoX^o?qVvolnaReoJ3Q z!Q?;MTjOa*b#mUzXQE~uQs15p?+50EdV@Kaa3J1fEx<^>mea4;Qv>|}!guBdUZG#R z@`WdRT2t0=35`3L&R)g?+#vcL4cqj#GrD_$c9W%M&3a$C*t;;v>ZA8$ZnII?SzflK zT6yox129Xr_z3KuKu1nd%v$?PM2z*I=j6t7WjX2b=e;;@(ZKV;`hsjmE1x|TO|(CX zw5q`HD)nzuZf@%~B6|9ykCACJmY&ha%w612^#3xyF-KPXFr0I)e0IOgrMG|KAzISC z@vzO#(C=yDYoI@;R2#Vbj@GOtqyFTb_YT&Rj~$FJ8+rJgtA2u*pVdISqko-pc`tJ{ z9IUHsGTf|5OAgkC49-G%c2IQkKmDMpAmkK_oHesjw9k1azZ2tvWkxIN(10eaav595 zy776+wNxhOHoUFO<>VBKbN+XHM8;sQ0xj#cdMmLlJ7WUjpYF@b7ItGbh}3DD*mK*k=wUBNppv^UXZr5*nS6n4GEEmX_oM ztUrR4`P=Lqe3|smgzoRZo!WJkTMvHD+Mhw188^>vgX}Vi9rVG$ zuXw!0c#)B*d8Be2U)xuym)TxUpURt{CxMmsWIfCmWL@HHt!*HYbCvkelX`eq1$QUW zuBZ8m20Zs@)=(AHob27xrzxoE12n+%Ye?*af|m!gz7BBAo|bQmUPfD*G6NTK>gYx- z?E=Cc^!TrI@cNP+9ssM}=%~qG<`mHHz{`k1=7q1&Lj3#3{%55~dhY#Y_SMmPGJVXB z^&7Q*zEY>l)VG0ScBVwCtyu8wG-sOZVFrj9pLzoAO=v{kzH84$&n$E)qXDa3_bK2^QKl>Hoq+e-f^vhq8+fW$aWpI0{w>00 zY%*gwnNNCwoNrb?-ttzw%vp|WKwC|E>5pHC+XLZ#fx2IyZ!dBAUi4+ zFk|4Uak0NjBv&zlUBmCDG`$QIH|xRhmA+D$+P?4B4@qCGqRss8qE6WZ`1#%czAm|x9^KvkCKp_-D6ywI<)_>& zQeJ27;XFBM4YK+d|1gaI%SkcmSHH%etVi3s!Hsuc>zq zs&}S!`@(w~i>k-=_u>nt@)tQdeD~ICMD$L)SZ4OicROc1y$vZfVlA6!!r5YfR%T@O z{WPVrUN$EwcG5=1I(MYO-@BKbE$1HOEb@KX{44CgHYrYpXZj+UZRif>V2j`AdL11is`JD&Dsol@3nS8+9w-JT5dzC8I|%3Q5~a2hY3_Zp8v@1f+q zhYam(acjKtdH2m2*j2^PH6qMHeCXtJgqk@WC##_j?L~Ak%f4BHH2B)r< zBj&91jC!@Bk4wq9MG?46r)#pmoWEDjNE#{r%9mMP7VAu<0&#;vl_?XjahRQ9n=4m&0q~D*~ zBeauwgq*r}qIQPMqif+z_9<_JtFsC{I1-=tMeR5e>Lj~53x6{+Gf0`Wu*@p8x%gL` z-5#sy5Wv??gc=ydk= zWSn-QGSTE@agEh~lxB3){((4|bCu3u6(=awf_?m&b!XBZ^=VZbojG=u*B9nNR zty$F3^gA=6>qxk=b}u7`ef7JKChaQP7^=<;SAW9qWjtv5j9H2GI?Wyq%4oH>|5_;9N4c{KPE?Hvh=_C@CS742z^52NwCB~D~dYA?LK zM~Rc*k~#g?VVrr#ocFyyYEPg&-;v$@aLe1gw#46@kku4F(tF6-v7GMyBB`g3ozs6Z zvY)-&<5-aqa=7+L?li_upI_a@K1fN0VaWyI04M=Xtm=iidp>R@=yIvlD(jxUsuK z{rVlRzY0dDz~Xh(HS)KCJRYNw@rbYL!{yZfoGF}@n_0I%g@^hE?04|sFNNL@ht)~^ z-y=AevxwiO3AMDngQvTpXDBYu6Yc*F^3^osK0WIZ@T!70E0z3=AM46nJj~ln_DWGw3JuM^Slht)^)Ggh2aOy44<&kpO-z=`akJ!xkz!oSgxUChtXgGbPRygKjW=LlF{&f7Mo$+6k&+FORI)#UWP z>o;Y5qhZ@YiJYI@37_*;`NPQNA-@jC^=oNTU$`uRTSf!xD%X+hYU0Q@uInp*5gMD} zOwNnSjM)2dodJ5@Pk%ZZ@;=7TMTEbo(LuJD9pDZ4!8_=39XQ9Ec4C29-S(Eu@JkZB z!`Rple8<-`;ybcTpZ*J+$hprSySkNRGujZpw;NtB{xMHWpCH+0MwWm6NtTx^v;2IC zFT?z$Cz7@9KjnQ{QD45hyQTlld7{fR)1Yax zhNhmeWKCv}HDpAxD`Xj^vL$P_$uhDHk&sA|Y}vAetc4JzY!yYX|NDL4=6Rm~_xJt( zf3MH$Gjo6L``qVT+quql&bhAZ#6uNxI6CJr|Bbcv35S%X(N)HZyuY zTqXB;=vx%}$D3I(0=)IqxBk-nO@W5)n8XM7j7A8bS#xdbG=|z4#~$v#!Z$Mb>x|01 zjvx5w>l1-AI^&3^K4)kP`Gy~@Wv?>q+Gu7*&b5cpH#})^5o4pX8FSdYjPwa_R;Yi4 zt5Haum^{zD97yX1ewlH2Dq6#Mg!15B9@#U?&^YFhdyuMy=#IVElCN{OI_32>8jmI@ z$jGN%E)N>;v= zNS-!h7AX4|cq%iC!D$=+-OUbnL?WpFZd!8R!fzVz$kmLG`y`prFB!l8zy5n#Xf$8v zoMo|Pyb5b84?X_RxALL&SAth&G(sh4^dg!o8T(&?{!3*9DsB4k?4WEa}y2H2hHAux|86ndAzQ0 z&uPPb=NssxKIoGY=$SaQVtzDV10cW5v&)PPPw?AF z3Nx(u<|{~LGvIcA-b{7IQtD;!4zhL_tuz1(%AlQ%P0$}T652#k_Fwd{`A{Go2ueV! zHbAcT#sK<{el=I9zdJM)gILDu-i$A`1A2|(olm&?DkbNEZEgOV|1t@98_+w9_I#Pq z)1054?)EeM=)|~ik{X;H_F-f)d&7t5zCl=&1(<`4g?c*}Pu76VYxL$_=*x88zY3qs zVf{GN*XA=KxD1e)v;A-8K6!cPAI6VlVCal2>(xDomcn(v`=OID4ZXmA3%IPJm1|(? z*^C1iD{2ANJHTHC{bo+@SHX4{G7`pwC)4T<-Znb69PJI`Np3i%FZX7&%^BZs^29Ti3PJre#iLb)u{qSZ!st?ik;?N`&tQKXgUVy5xhY&1 z>RaHF01R5wdZp`w-kFxV07W{`G=z%Hpu|9~Ij@Nk-TSUPI+u4B=tp|wO*F_fuD+$@ z94sO;OBvmgOiNejiF*7L0B*CjwuWx6&{8D5Zy=mbyKV0ym8RNP;Tn6!?}qMS=Zp?&pPvQ;bR!u0;5}e%u@iYWuMUCyBN!cRsC>9R6llcPe#!$)_ zYEL!Q)-n%bDz)AZ7xJJn+{^4iFPelds88=N2_-&eWNZxW_2u~zy7huCe*n!<#*`z# ze1cj>(0A0b^O4c=NQLKgW&v+~VHQx^Oze-l>7lP9^*;eyIpk*_Tnp`a<6`eZt0#hW z4yfkNQdxR^0-wS3`s?(!m1vM8ATEaFD5H8;CerpVKyw1h_oi0kd!5Os*&9N&9dLLB z(CM}G9PodM+~`Yp57-%Nu^O3i|5RJ75Oi_Ib04SF4CvAdE~wM`Fh)$~ zc~fdQ&Ipwkntp)vPlRhm%GoQ7Wy?!#jTmK}pPMn%)vWDM);{VP0GWP2SmJ57}0hRD=p>}yah|%~xD5JMwICtm>hi`*B?knS#Z{Gcp3F<0-}qQOa#Bu$btFq8q#M}6pgTI2K z+xom{{r+;>wvB*BrI1D~(*ekod5vC#(oZthWQVtTc%om?^Fghlb#JtTeoZsDpAS7` zmiG*6R{PVqXLIEI2f9pHh$&D^ELvdu7vQ}&`JarYivh|M?$728@$H7RxtFkkI=WC- zeJGg&Z8!<4*JJcH>!tOCpNyDa&wL6`JQa82*`2Mt&b!*k`a)@e`(76Mse5u|8P)T^ zE2B0V^V=*3VeCsSB*7V*^Bi|y9zu`A!$nV|Ey|2{Hu&Aih#pPP$P6uJqDlJm##+im z@GBGCxr5rO@=I&E8(O0d^5dxlp3`A2DWl+xgegMldGusw7w!Nx0>3KsoEM>YZhDbX zKAC~TnMh&E8!Iv%E%Q9}W}@ebkND;Q|BrYpGcC44dqwaGJ<*)2c)EaISn;>Ls-n2_ zdeFWz&C>MrIXrV7k)4t6ORmM;H>HCoPi}yoV(j-mXqko5&RnAT&JN01_J8|?nf9G! zx(nl3U(VD1w~`TPl^1|(7$tu|8gnvI{Xj`${gaX5OwjO6=0;*4^5~Us(F>>119Q=oW$5Q-r*zfZ*ab(y7W9D&@WuUQPXU?<4Nvmc z6y9D;4>$L+7Ng!cpQGvX(9e_6AgQ#v75>zRbDjxyj4@{;y`mT8y&KMMJU{1abkSL) z<|4Jt=bM)?VmCUh2fE`pR%TJ;@F@DTC6c(Eap)v}&87S^TB|Q_g_$M$0I&PzjnKnw zdBa@$C()2!@a`mNW{&Pr+&zuH84o=o(M#c3hWV*?3s4wOXf_y6L&^@c+o7`lz|NxX zr>!Gs=fl)!hG%n9+zd3V=bs#;CZjgpVV{ns`;PAxv|SUl>TLM`_`l6mqe^)`kQTc}|wnx__=83;zN!u1u0HC zlk-q?KCSkm7k5P(_Jhw}FfpHx**YGi&#y*uoYf71VmZ-G3!!uu$~K^cv%0yI?ty&i z$6ti{^K%u;`@aQqtC{q5=QOQ>!JP`%?es`{7y3+uE_%9jr1jd!nG!n+i7*0s9#XCx z%%cCv^Ha2#z!7Dy{034Lk*eR`E=@ti`csz5ih$*zY3`udrhtSU2qX9=g^Z4ywKQM4=6=s@2q z19i^R&yUf+8&Ik=GO`32Hy%C`Nv(!;YrnOB#0GZPUZ}Bzvad33i~zDy!0|qBnny>^ z_f7Ph0boCa`rW%}1V>u{%QWuTLt<&a745ql;7W%674Ck;A!|pVopN6RSuzK)X9fSm zEbTIw&BYdVHzSNyh{Nt}&d1ZI%wgn7YZWNh61g`L!=0;eH>?+&8H^MpA$>!kT@PAn zNI&|6dcOfW_i^;NEQqyy2tCrD-)7DAtf3~9^em@IXqbB3n+4A+0oy5h!)16_pBj!Z zW^aV+Zve|K_&E+9?178==MM*#D!`QmO=}MHa$suY@srTb+-B4G)fp{X5~(ob&k*EZ z`*R_jNk=Ds!D!JMtF0roOvC<+g3_&M%M;D|z{L(+^@V@?kx?_nj-XfNfcEY^tw!4O z!uj+-mpYT^!`$QuT1F3(<%}uE;iRXqb)kHigU1~!V?&&46sEPS^uV#fs5FzBx>2?O z6f+Ove)?luBx?-vW1QMH`ni!>_PsZNqXyqy=%V7#_9PhPrDc1X=gp3;Uz`zLyptK_ zC!lk_XFPw6G4M?^@kXS=y+tY52YL7as%#2m&`g`giEjYA>p*B0F?Y(EL9bEBoM%G3 zPkVj1pPzEBfxSnYEuofk&>6rULk%Ay5B2DCyP)j~e!$lLu_ zUYCagm6826yyc0|H=*0#l!&CIyvTw3KU=|cGZ^L!~Ns_5DB?V-!S*w1n6RZx2)7`N#}bFf$S;n9tl?QzQKxVguR&MmqlOo${rh12Ys)<|*O*7C8kEZo z6rOwd5B>jB{yqV<%;K31Es=_Z90H47K;zD`J7?x*T~BS7z`77p-4dFlLIwT5&eB87 zCsq|KMxssT^Q1r2FawuS_QtQdhVC58y%ar6Cqm^!>VKc|P0>@^;nAOgw%7mHGdrB6 z>4~)k$)C-4Efg#R$MsrR0L4!tm$w)V^d)rcA4pHz$F=+Idx5qS9GFX;##x?X9NUE? zJb*3Ri~2n|`d!KyB~%k>Fs9192>Yo27QNvV{r?oSbdS`tnAHpB!9GJDxd5%|FhVy* zmo?@8OnO36(2F|q?i}i950!@U+})I0@U|rsUlB;Pb0YI&{R!8!2Fz8`mv`MM))(3N z%zbEJqcOA;!>j*k(>ZtW;~vQ~XtsiYHlLz_Eva|Yp4(6K^9i#A z#sB-xleaBXv)$yRj@ihM^|;62sR~|O<4msg1ImEWGhk;2En6bv(=E8l#kkyoy5#b` z$WLJ;tP@fj+Q50xx&x53CwW?wQd*y$ePKjJCS*PS-?e4b>i_DL>JR1IPsqdPFUqP< zJ_=wc2z0hxj#Bp^0n6c8L+tuCNXf6XFdkW(4V52-;zl6oy|jYs%#^lv^B2sfbSZ2x zvp9{TuiS+UAEOm##O>&b`=~nxcn2fX5nPq#?VM045+FBh>R898clD?dE`rVRV9- zL{`AFr{U3lY$x|hv_lF(Eq9;?VfUE#e*ik<9dwWOMKyHTB6Rys^yeXT$QtxVxR>G{ z!cF+^Xc-IbH>1(UGZL3YXYAwGK{T2>`4brxlDS_LX!VTHLs(B{Pbyjt1)2auEhO`O z-YkjC>49XHnDyu~b-iasn`d(>Bao}eL-_4GpHh1zzGKkMeRyiLigCq8JMTkZsn37n zyBD~=V2)xut^2m;0=pp@KF>s+^|Nvnb{^Q4;dw1I&}i!N?qz;g=8dh;>mS?-%7^>@g&mRQC{j|{@PW{Z)KiGEdpj~aWe_O__&W{0HvmF+#q?uLKxQAnhET4jBSZVR z-y2%XHBVd~50#Dc*W%IZ*}O<`(8IiE=Xu{VaNP$puT)9u9gQ^Tv2&VsUV$$9H*3+l z>!l^-31x}YcNS~L6CvZi9lA=&8RT;bv^!0t{cUg3*Bc zuD0sa?ri5H`p_1j>JNQpQrcOExOvuG7h2NG+Y_qR1Ia*SM6XRf49h{MUT6u|LOnO2 zBa~hPoN>Usl;6hwc&={WVB9XtRWoF@1sD#b-Oyr*LvHed)AvBM1&)n`Hf#AVW_9HT zHRgfxiNJ5hFXvT7Xm>1>nTG6_fv)Yavzh`!Gw5I@Uw1ByCtyD#f!CVD>eVUIl8lhMq&=-bi#qRxo;xaz?tvp>NWIx;G-z`uv=v-7Rpn6|~vT zsNr5@Nhq04zjkEuB-vxoDITjtt5hE74Qo{Xc4o+3_&9U>1lq4dL$u}j87v{KrQ^Ig zADy`aY!kstZ~R5z(i0iJN~^!2*-OEh;{o-%LWy)ntrkG@E~BZd;XR<2{!E@KlNZ_e z3aQ%!&vysw4&R~&HXh=`2@4#7KzllI`mX<2Or-jV7oa>pT%k}=rl=iHOPCR=I*fv7hXxj9QiKToe zqAlJ<;+$Je2a8;W4D9tlGtwoMe$bt#F1b&Vcm_E$6Pj-{`-D&pBy;f|g;6JCX?#{8K{&lL^NB}_(> zFQRs{D(I7}ufb)Y%S3H^k%&5Avxpv`l~EcTzT>UUP|tj-jnQ;Tz@G?|2cXFfG($)9 zb0YofFWT{hP4fa*qu%A<>?xH?;oel*T0^NmjPkX(pM*@6hbot$=mI{ znDPU8U-(|+>#U&z{lyH06=C2GH>`EcwHBW28X8fkI%p>i4oZouJyyLiHx0x zb|cYKHIS=A$Y=t*i2)N=UGme$LF#g4#CdjA=rf;R`jmF&-nT$~H*fWTd+F5j8kBYq zUtQXoKHLQdd5flJLUE- z_d{Z+TMxIa(7`-C5zwq9G#gIs@%++fdX;~Djw zXVs$49@xw&NY4aDrZoDL^T9m-p1r()-F%qd|2dNEoY07j2chs%>M%R5^FO_pq+TOx ze?Gw0$eo*{y&Uk;(@16qU;XJk!_$wxQqDQYp#`&`bIkSSEG7y(!Wpl7Y-Yc9&ukiaJ4cnC z;ht3v?q30*%~{zf(A}PXG0g z6i{l%=AoW=r63=-9d9%y`ly6_W9I7?{`SN3AFYUMnKc6|y9<1`SOBW4G^eFrcU z2d)F~(Dz(X8A}VX=toz9nj)P$(20NX{uA6Y=VDcK@l~W@J{(())SHv`BDVK2I3>Nb zbJlXt{r}PQ5m(crfZmKZH)yjP)Nf4t&YNyx9sU}uo_YFeFW#O7m5%{$e>9zuE-_qB zrsYfYk<0W(qsyD4L!M=Pc#V2;0p(dR*Vo8Q>t?2U6DmX^IU~^&dS*S1R9a?wzymYN zl%xhn)9vuZlX8bcuXMCSZ(uQ3Paa@c&M0vN>O9B)uE^wSXzk9yyHMnAD3%x8e?}f> z!fhj!oqOxGqX$o4w3xp=Ghr*AGT^K?xOtssVEuha|B9n;eGlF4gZs;V{dRa4{l^IuBSl*{W6TRaSr`CkdKD5X8bbV zN}r?i+<%J_<?V zov6*2)BOA$L-}v89i6oqSF?cr@*H>de%JfLlL!_8%~yjktEN^Iu6NWN8p;WZ%gGzs}L1F5Xe{ldt~6z-Up z%XpB^@M$VgzJN`73aa;p=OwXd)&W-sV4OzJpUC90s`7s}?JlM@XAR}Ba1MZp+RG@OB3M5YxvO{HVOmMz-DK{$n_*1C zzkqf%`tEL?dQL`lbmj`MI|Tmk0$~NT?F@Kq?utZ8)P+yqLcfYoa}&SKU;K11^D@@I zH8b!^Xw}-(w2;<~t~S>|NhIQFepf_lmZPU)f;#k#xdbiG0@YMtF^~BhjLT~6skGh& zj;A0kM!tJ)kmqXr0;UzIGmci=Ihz1|3vjiCIttTow}9(?jEfJ$CG!>(q9^YRWO*8+ zdM4z+%n7yV_lM!~Y&7~GKxXDBv;7#yG!9GA9EAGy?*u3Fj=MYGH?V)#g7pY+*bQt& zv2b_6(^UTV=lLo)_%bm4!pPK>H;m_T-`dl@&B6LDy1*3;*Z7U#Xh`301-I;@uJ620 z-=7CZ&B^3>6YHsC9ulK()M-ZetLPrj^l+Czdr(hQeSFoK&K9a*VK+juuF-Srfb%ur zG7|CzJ;imb6-c!Cc6)&DK=AvWZ%z8tozV3IF!8KyS0=6^f5sK*8`%=*9aWs|dV*0G zew76RXZNQmZFJUJ?u=q2TSx7mLce67xlZl2ISUCajz%zYP&lu{`?ElBlO8C~<9YHD z?KDC!)kQ`>$L7q$i1{k@9s~+c%K4QMWimazCva%jU#8wy8Pm-5YqUglO0EQoFur0Z z(l-g+cAQoh&>PHoI~}SmKv%v5bq>Ml9%#ntjBm!JRfBU~;b#%>iA7@0!?P`vbM2=U zqmFTw<-n^u{XqY$qiEj0cq zj;rokyJ0MbYtiAV^qp|zKPA1&_FuoXGxe658rZb%cRdaDI&s&mzPb798{7CU46gj8 z)n4iLFLU4;Q&NE*VN^y8W5@gS0JEn`6D`fL{EbKF>NjSYcKm-Eonst`5oB7%;XG(9 z8hZo3v@l(Rh!6G`;-JkR+{uf^+5lEr7#Y6<>M(F2&<0Q8jko#h+Iu0S**%iK=_j_IiFeIargxINGk4IpvYLRh?3|u%;i{{nA(MET>qr$5b~(4z=6`WOnm z!+V~%)|pYMBjuBk?P=(tjzF`Ik)aKJy(=8L3@u}k+9S})yeDRQeGu#2>`BHEcLeXI z^uQ@#=Q#m!P(j#Ez{NP=T18uW5tRYsuiiyLu=g&58Or4x;B=!A=Ir3{$prw3GdedqB2l^ zDs4YNP3wWC7+fArFEe*c=&>>pi{$~@`h+^1lTAf}O95dUAaTWfGY}Mj)9J{qzBis3 zxg2>?Ce2f@42@%7UBO#ovJU9XJnKsB?{LN4yIN=?Pl?n*j>UFJrthz#Pu0XGa$G(F zr#52=w4k<5KsJ_|TuEGnT%;r6%jgTvJ{CgV9mr@)>i(YgcF>zVD_z^ZBr@8W{mY$$hV;IiUFZH(E z2=z9By(d+Urc?u@sVbZZy&?+%i&4l=(!N}7sbHH8I~;@ zL+JWjd+r*y;g~xMEZxayKu;L~&GREgZGmGE*#3g-L;~v{aJMhNyMuEt`j;oJm|vk3 ztriFO9Mo*TXv%$~_8&xFG-iC62yC5rzZY0lMCUobo*77eUB(-I(Ru*yEZ~m>`({|U znW6eccsK%0+M8$E(+N;92A)3`jCf|aZbhlnv^|D$2f%L-wO^tA$y|4bs%GTf1Pw-k z#e90(H+-Gj?Z?s|2*=~lJ_E2Lzo#F0`jYEbCz+`nXP9n9N+hZub+x!sic=sev zn^4;>B=9(N*Xzbz4mIu@NJrD4#b@Ea9so@!9Y-GPd!UM}adht+_+F6bW%uPbma06`lNP$4N969+w=FTqUAnA?#zwi{Gu`Pc`sK+YP3dn zheBbk(BFby-41-7pf1<3jPc(DoUV{KM{I+{_CVi{4y=ufP|55y)zSY=sCQw|`x{^z zc_!iSXwTn~;}783A~@6pnR=Bzz8M+#pC2tdZMVk)>on`_=UL$KN4KB+$Fd7&0e^$V zY-n5qnuaU`w}`{ozlQ zX%jD`+(P(dwj-}|@QtQ)6#rWUbBl%C)AkIlZvCRn4je|R`2RO^p>mr)3A#sLY2qpRpo*{af$XXGT$6S4SN{1XVZc8TVe0ca7(Mk+KibdIw4jT=&0eRj9_XNG z$`=QR?-*e!@upcPp5pr@Pwr%l84T7Rfp2T{*lRp@x6u`yJY2O#8|%T^9Db)$cYR<= zqMx6ley#C!^vvIZZxs69+^I*AJH7p{g2hqlJcFJc8+_kEmYSns%%AuU`bmG1Q)s6e z(8WAEZ}a<4+IWEx<#%fSks8$P*C@3aed8G+6VZvr^_iK(%-m^6P%?153~&3Q>x}$3 zf?Tcwt25}I@1fhzj0ru^Ci;x*2Ez_OVXV3Pt@9ZN&8ho2TE*Uee%ApMUL zsHrq<-9?YC4Q2IQ)fXU^8r==u2j!2zkGIi);TcoTG0hfZ?EKLHD?NuAP~S-E*c4c* zEzz!rsok9wwdWl`pifp~>hnD2In+IxvgYN#NVyKsv^})D0)2WQ6DxVUGxM7Il<7?G ze-18dNtI)SIDw`$=Gy&o*LC$(dWp8p>Qsd>pbzzY$9wt&nYpGjR67K}QlXZSuP^gP z6CnE#8Ei?rE5J2|^2}BF&H>_)w5z?Y!I z6>5or=6S$vD{WUmgZ1J0KLP%|&~4kGoL-4qW@eW$T3{=f^v9C_94&c2lD3uFi}K!f zv}b;x)LY8EC4DybBf0glIua?>oVH!()I(tqdSN_&4+FUwVA}xQ0wil1bu6KTQRjIn z+ZC#&(drm*&I^sTGDo4cz#T8 ztfvXc!zm=81v=qjsPhOgzd$RS!Q(65xesVpK;Z;9?@YcEtt>{T=yB?47!9-*!5l-f#d>k-_^CC|MG%n^7ATLMD8<9TuFN}IF4!`b2 z50B=p>hzq zP!j%yo};d{#KRL~x6dH^o(S_NeMB$vXOO-6(C!NDZ33f<)ci5`24Ka1jbuzi2WJMV zYUq5UBFlQf->(xmuGm> zQ+)rS{wlyy7VQ*G>*wIdi?lzETFoI|1)gfhR0Phv{L;g3G?w^cI5!Ch|6!iy`lS0Y z>uJr*(b{WbuC;$Jly)t)Cv^W4N|nT3E(~OcpyMv=gWr&nBRtu`=MU;V%M;I+I0bd% z(3AB8`TP(nRz+_a<#mJ>r$LL$NNfR0U4imT>5pY-*%L{cAm2V81qZ95%aoH=;IkBp z>_>)DfN~3WJP)J*&&olQ5{xX>sLQO4ji62=#p)jYNtc;mxhs2y2kr(bW71)Tn^=bx)={a_NcRdYPGd-zeev$eC70 z_zBlWwT=D#Bf2Lgu*pjp;t=qtQ*u$=PD(zn#y90M8nD6!81>qay>T_6wZsC zpvU)c?r4RE?U?1qH~gg?TmGHuTR4%V+E>Wc-Rio$0Kjf z3bglg0aZ$-N))|GS_>%kKe4rL^4WsPqw6CSs4Q#b;^%XEiE(J+W;j;*= ze&CMwuPyk=2}eiqEIahuz}>&m@|n=kJ~KAqF}@|?K@v6hMsgzHgV6<5dEzN>Pg3LE zw4NQ<%q+W?w(f;iWr3zVy2VqcU!(r7z-uP|&E@(Rc-G=~DI{=N08t9hLU`72Cz|J1 zsBauv<#{l@2dyA2QlP%}rdi2u1m(3yGb0NrL5VuFf023{>+U0 z^Waf{5nYb|3w<#QdDBzLa~6y9{|BT;zP}B0Iq7+xG5#dauF)G) zc@hQ0%A45DM2ocJ?V(7-Tw0uk4lrAcJv|Ej=SlOG=+TkrUQd*8HLpI_^gv2`PSx3< zr<>J$0T!F*Ebl}oOoeKmo-qxra0CtA59^~b{o9-gkD+bNQ#+sfE^}QK{Bxoim+{^~ z{=d&vBL90)W*noXaR+H=zEkM`O^jpg8X8THC-cf|K~7IlDj944Wn|mjv_%-x z`Z4O9!rqAirbp42o`&K2Q6H$k6X_mJ-#&qi7%`oLK4e62GVt|bB%R5Vrx<;kf^T!M z{v0g21eV5oP~-r*FAd!P!9LJ~UO#W+4UMWi00%14(h^3F9*kW25?@80`@pHo^uLi{ z@A-U3;g8W2`n=pi&-Xzm^a0CsAWnuxuDfY#9i@gOw8ToN?s~S?YC3J71%KxS=4h-4 zJe`3wiLs#xqs3%6T@;9#P|sATUY?JBS9*Hs3A+RgPXnphrmoN@@20-f$eOXrdLH!S zj@bkCZrzDK_cU_PHt^(Qt>dB4KuO%k{R51c`{3nPG|xn2qb`tdq2(pOs%_g0nbK## zHL&{N(vaDpS%>40$YqTF^Wf-UYE5G#T!vhj#owqsSKxO;@y*c7h?QI5WpDw=siy8W4_z z3QGC`xV@V`(*PO!35x8b*6UEjEHLg4jbx0S$n$r=Zwm5lmJ(Nzds63M>MQ`X(`dyE zJ(sb`zTmy<{BDIDZ$};qGQP*crApL%28q}X2Rw7E6x<(6-8GPhBs9S^%1s8QB8;J3 zz@#B~6lKN4vyq>ruN%Kpg?5Kfljq~5gNG3utKz87yag@j2lde%_kvMlsF4gs z?S1C(TnVPpXcFT{U5~F1yeHsb1$5~MtN_nD`vu)-Zs$etU>%y>IcO@fau@X}r=AAA z59+w0w+3FdqyL`dSxKnkn#6}d_91Vni}KSSivj0kEE@g4JwMPHavaj8S9dMmE=$`7 zpie#O`~=RP1^KV6mei`q{D~2TPT3D12A0=`keAw7CiK>H~V$*z_zIa zwy#51PpBIKwVa3Sgu?o6Zv=qZM2e1r zsaEl)JZ%e?CnIYyw6_Y(lF?Jz>xU_66!t1)&b?emj0s3|B6RHsTtz8)30$?U&8b`p zX)H*o`B>xYz|&olueq;9`wVh$8ZBo2p9#phu^8rwDhU;K!ngK7XRPlo#z*rZZKP&* zehxBLwt&hBP&fu{I*s<~p+h{?!sy+x$fu_odRF*-)O81Sjta(2Pm(o%v--w(l6ka! zfpO^?{c|Hd=yj;(X~^y{4WLAI#^?m7rKR1Em7|+968-w{P zXo279GvCl_D!~7j;hU#gxEtm^x;Xe+X-EN3p9~Y|EHm#am%Zq&^S0&1&(cp z7klZmPon82@q8QH%7Ps2XmbMmr-PN*W*>r5t{hBZBv{Oy zdAxHSUYKibF254tx!4S$9d~HmQE=_-G8A+^P!IU?qrIjeA8O1|jEaL8bKAhdS?KTU zNRlgY-=Jwcaq52HH}kmu|L1}1406{W`kbPdm~An`G72qS?a_>ne&w$BRX`H7#=`rd z?X4{~hdY)J*UG(Pi5$pISaK9ryYqmoNa+Ub!fuSUDg3$yoMCjXR<|b+q|hJYx&Inc zkphQ)#Des!11(bPO5tlsuU#V!tvD^IkGM0A?~`1+4`75rbtFJ5uUlZxF60ffY-Ox9 zhv!U(5V{(h6&s;0oVJaVynh85{fHW(Xk|9uG^fB&4)7Ccb z8Zn&>`S^)@x!}b%T4>4WWFD(X-d}zj!;?JE3U{Bx_Psl(%M&o=fEIB)QYhYLc8Z7h zX~?egC9U!z^xP;oP!`$APOYIY&m79RzOJXfwQNMXeCC`=IdVs&K6j+EaTw9JIi8Jv z7M__N(pkPI{97KqCwDVG#%Ot6ny+)RoWX1~wDx~RB3+AlJ=llHj%GF6(OzJ$!Br;u zO&F_Cgb`vkINQ?S{4Ii3HPh5MuyaP^4#p(@W}L2_1!``=nlW2yWk#*8NZb&#X%oim z0cg?p!D|-1@q0>~!#Z6~sppU+V}o6xtIzM5(61NN=*{dzAJ_`?Q{#a3AI-~%a|lf` zABabwHKw9(=FywJ0G7V|ugX(T{#=7*RC~Wkn?<3aKFqG`G)KC^*q#%N62>GC0)rA@ ztCz|ds8NeI+@*d6cz*($XOX~>)Rqcu%}m??%gBh-cJxy7vKSq8fic!>LVCcTM+&0Q zYdg_AZD`9pOk1({c0;G#XhS`cghxLDy%$afW1RNCIm#PC-}`~Q2Ug8Pl-fxBA0pGP z!XE~*a_Fm_yk)ixeLdVK-W1Th3OHA$LXVvja9Qtc_f$I~qjP}G-4gu@w70v{ znz0u3nWgG)^a5CfQN2dAnF+=-&OG6`dmt@l0F^4n$S*=V5~*hw&&@wN7RhK!|1d|! zJ%M#&{?)O(q1RYdcz6qV`oV#Gz}^Zzdv4(sB)S=r;_BQc`hh%bN(py>oDF>rU#BA_ zM#Nu1@_Iq*S;(=TLdU^<8T4OF>A~n+XV1>uE>ZFTwe*4KGvL^p$nMhm0~paq@N6qM zErmWYl<9&*H2^wu2Q>liRn(;iyqO>K0k!C+Ya}ENeNzp+?_QLrc|1k=29&)*Ke|o}PodL3Li#%a`EdBP7Wq2?cg>Y%496-c z`7soB$G-)9>q`61Et433PSf5pWU~nTc85;fJfqdwthwn6pcki8k5Q`a;Kw%Lsz*&r zks#^&KDG1&!#{wiEq9&ko7>6!d|E8mX|o0+O*$Mm$B^d|-v#`}Bzy=>mH^8d=%vrr z7tlnjQ@o9P_0&GIQ*MMyE2(uO@9N32g8tA2U0)teksWF31gA#x$xOf1=c_n%7)3vl zS;TX^S&RCO2wh6avOuaoomrW8fX{KLbscHB9<=%=&lf=bM(!=K$N`3;Z&6wngObw{tRgNBvWyO*Fd|+tC+vmW| zOmT7WJprkGnO`p;$+@7IlI1L91X4K*3=@!`K9p?&99rGx>``m3X5@LAndEDfeHf^U zBZcYIKOGs+>#iH+oWnK*qMgugA{al1#4ez|8`PwQUkGTXb6t{m%ff-*(RKRDd)~%3 zP%Me}jr1LYoEiu3?(+)Dq*3dyyzSY2KVr#$3?z}%a2N91A09ZDTt&|_*N>UdBH`wj zjI-u(?+ONwAg`|O7?a%)`{V=ukB5uT0cqxd){Xd86ZtL$xd7`t zw5E@n*~Z+*>I~(~%sdiFXwS78IT9)L+{U88$J0t0(uThNdKsF9%nTR(9u&>R`LE0^k=<5vJZmS zAjUm2Nwh%z##7F0;c-xD3iK+5oc;<74Uo}3{9Oz#%7P`_mGi{w#nk&m!24N1@e{wL z=dZx+2``_a{hkPVhB=yMA-#E--5iC>o+4TioUTKaCG_o|0ttT_3AKNfVsv(8$P-y! z1&4OXvvZq*Q28s$Dx(Ye?U`QBAcdhN^$h>zkM?pVT5vTuKQ>wTR9o9K8H`!gcgBzQ zTFUL~2=uh~y7Oi{l-Nd}$pNL*a^6p*?mR&73;j?3>Ri!Rh@L%As<$vIw`FtiK@z#?MOcH~Y7$MT<-#J4B<-@i-U#t*P z^E~CXnuV+tZQE`(Fp|dFzRnjOr9U>Ox%gv@xVLUwT7z&P8JkC|FKQ13I05t*6E z>Cf-%>nCJx7*f=eeyfEl{F#x&ub^&zATlaI9}riobMsbDXm&^NeV%q>xEp@z29Si; z@yPHyv^|=yp8i>aGMT`A`@eZyfcn#DX*P7%X4l$w1-%q~?=CRNhh`WVy!Sd(*he3b zHf3+u<@p_+reL&k3fkPA!%El|djg7Pa5?bjVnok?(f``t|MyS0%kvZ*yaUU?9cK3^ zJX5_a^7srgGaGB#dd$-+Wg38gF8bm``27GBD^9Us`sP!p~%M!z1Sg#Ogsu_F@D2@Bv)ezeCK`rk|>*$6D7Zl|Fmx0wr*Tzy#V`M1@2F;PZL+GXZk=OIc*l46R ziBaPoN#&9PT<|nlIe7s|ILS$1|gbi&2=0jx2k4tkm?gT; z(hE9HgbKZ(q32#N=KghbXdlKPS7(ftFf-*>)SSpTdJ>2VL&Y9I-j7<6_~^$onl~PX zgPW`7@jG?_l)sPGoB z)j>-C0G7U3FNdJld1{}F^}P;QcX9U!uo}m!r`G*Qr>8fJg1Vlwrl0eL+vC3@QdO`z zhbH|C{=7qL&M7@-POtoPQ2P+=m^-#qU^AKF&{%EH5~=~ddVCq177MLQfNOaq-ds>- zH17zPjrMa-C=IH=O3702c@r&o8lq8o?lmb39pSQGM0=5#8PpjIJ%>WuM*OWpUokJ? zDn>QeoL$-U+@l6?r!{rBTa`+kzXOf&Fy@*vW9$V+%ofPnQfM)qeliufKIG%-WK}3M z0O-a;y~=2w(NOI-U^_?;=uDk@))obu6;S;v+WwwCJwD*x3aFu%*h*mVToV1KQ+aYP zcMf1pY(kpctJ3>v0Gg*H^}HX{^94LKua)~D(}3tDu8df01{d77DM6nxX1!d{-}JRJ zlDq+w7(?p`)ZdtPE5XMbU}hGk(r{Yu7$dWX(ueh@_LTY~NXSe$F^Zbk(=QGImsx8c z=jm)9Du8~O4s9aQ7tf*Z8bWz@M;6d;oJ$m8{4rkj6-pcB+X-rnpw*WdWjsZo6)-7J z72xh>Y8Z)Z)C96r`h$6DJoRoSZMynA6>1!Wzo}q$18iTU-@c1{pW?mg;QASQ)Lp2* z8Fh_eG-s{eW#fPKx-rjrT_|k9r9`)sXwg z85_^hXJ#US^TEb-*Cp`B{I0$E@2;bnzw$%(PpKmo-8UKN&8Oc9+NYpdAECxs(4=?( znI|4hhK8Ha_vR)z2t-+_+r6_wz!aX~-kvAF!Lj~GwpLhmSJX{8qu<38121C{#Gh-vhK@K9hIR8Rx;nvkg~5kIPW^ zD6K4@&TDY0CVV%Zz}=ll`pija<1W^E`r>X%^rI)6r6Lh3ouSU!U^@&tbOAzhRC@;Z zW;E6#$Zju6?c!at3Em3kWn-a28)V>9Muo9ZyDgA@8o=QxM3otvjN~^Dm6>ao(vJI* zW;<$ww1{zcbk8Cn*o8FJqrLt}lAZ!hq1j)w=$z>Uqu4sM&=_iTjUX>QazDDph&xw| z%2Vz-aC&x+67M+%jb7+IxvHK8`w_5gny3SKum^-|B#( zKrsZ_UxGI4h`jIO*95rcj?Tl3c8=xd1!;$ly2blL(b;BvtqyLX-%?-bFdx2ZN4tmc zI(;#mr?lbf3|~)bGYegg+jACeRIS?ow~eW?2l z(*%i5;lFFgKf_U6JQee?*wd>_G3jS>Z#!T?jQzql-om%VH zu%DdUxJMBWrEGZ)x*$2I%{g#9GM@|m6hTkYlcfsd$Sm$=f;&$K7`U@j0tueP^T)wO zkFCA@EdWe+AS+LUfqNz9V@+XPdW5##0H#{}+6c76;i3@;DM&|lBqk5sh=8h_Y3mL! zb&e!mI|5S+uE+89&JX;J0SYN%l<-sZ4Lzld2@n(aQOq8CocdGXR#EtWJ;1UEzYFum zHd^@`&HWS9+y+$I_wnfE;=C&aPs7Cs-ur>NX9s*S0z<#E>Cn-LiS6L&3@)A;GVx>s z<@CkB2dxv&lfQv3Yaj>rajztD;9OJ=8X4@Kl@Z6rFgrVR2TLEdP!5y?=NuWozT^6T z`(*gXh0p3B!R|U4y=@$54k+vCfZ5SSVK&()dcQE&#j^G+yztx*BgrE8z6>VTRP6TL zz9rXt(5KSa+^2dL>Ct%$o8t&{azwp}oSZk!~FmzMtI=j8TJUi=>glLY zP`ehL*^$gj`4TPapPd(Od9?_Lhk&t>X7vI{%L0L;%6p8M#!H_=FZD&AmJ6UM0+n`y zZFk;Y30AeKu`j(te}K8X_craNLz{2mj(H}{lI zVpkb^rU%#>OE3vtn+!fR!GAZ;Ml)7aU_3Unr8C4d-tB?BU8Pois!tcW zvOXVP00e?>oEC$?N7}0-&S_k24Lm=~v z-qT2inP<&jsYSaD>39NcP5_@i%${mih$pW5kAUKhs4orZ-61f0mA<@lkh`sD`srwb z3s7z#y) zP+X>kSXxX*_fDfdJp}YHHsZo_O7tx;kNzpBS&b*II8H+{4+4p^wMNw2mgmw^FB^T2 zjB_1{l$uGT1J~!l<`9_u299TtQ%_Nt52fbAZ_giJNvoc3mk<0Nf=c?lnH~58zV4jJ zspiyUv`HVz)db@%)L8>9@+>u&0d@><#=yB_yni=vo1OF-DCWr*S|ke-H!0T zDOzYZmY%W1M+5z1ZltML_N{>_k+=E)T>>puM%J28TX=SekuQ2*s3VjupldARSK0sw4=|@ZMnoWl$`RV(c;OD7e_7Mrs(vWM<^X-hb zn2J=6M{iGtYG&RY&NKI=^x<%Ncoi_=OstN~A2 zG}^r-*v*2Y?h%+pViWKVgpL(}cs$>i!O8Qw^+sy~B;$}JvnXz(AB~3JJHW&2$CIIx zImTRJIu7sjUR5U_fz}(ai;R!E%>N<*)yzMBmEPp+rW3T9npmoz*l=P2lUWmVLP=?gfA_?^FCnEyX;M*VHo8G0rOq-K%=~R zpqc&!P8*TU%E;D9AlgsKvA|%KqgKG?*{XV5x>}Hn`+1qA>sMC{o$HQk8Wfnz2-utV z8&P9eekyiQZ}2jTaT2}ZA`~mZh&z{FV{YI5z_=oKIvI!(&_hR|U|q0x1ep&Hn<960 zkeWtl^g-ZI8o8?jMM}~)dLUtoxxXKLvOuHq0k7tBZx-~Lht3-cmCVrAnDw!J!P9r( zXfixWqfU1(`=SNX;j^dXsb>$<1BTM)jJh!|ZX8fY!9_C~{Dr0&4G$he>w30BPfFG0 z?PxxW_!U}rpVHf1S$7w~Pnb0|3$pG0%R`Ll8_*EigGNFa-{MThXjRWD7)G1rv8FBI z=S6;nGo81vU(M!Rl{-1$&ZgTXLQB>;OjaPh5!}s+oJIh#Uw4M0gS*BRg!>A514Red zIgyQUhe?<-O6Is-vMAE|3-#tfLl~W#1=+}jEExf8Ja2aXz7FnvdEyKzoOfl^6a$ry zao4lD3)7>=(W+-#y|1flL;b_g=1i!3Aw$G-r1h_hXemT^wZf$fpNahdhUY9 zZ_o$7L_+VNPsGrFw8M>;eiE7d1+AjjQZz8sVeQKdky+6`Z-aF>D{xjSYEl`?Pu5-N(6;kJ+wE(ZBDc*9yB{0EdD02lSD9f9`G>dtsDA$bqbIH#geV*+U)1 z{Vhmp3ay!K+?bKCkp2Xuumx{s(Aqgw3UH)QhqIcG0^W=YYRtpm()?Z+{PM4RN@4Ve z?;BsU9IW*N^?q2Vu>9XXrQxeos)uZZ^}ACwlV0cS>Kb2ZWiGLhF0;Agj%9YvyHcAc^JvN63xu8!;rjI={(cV3diqaCa*WWv0B-Ka)P+8~8Pf)E zeTiNagO-Zp$uuN?20f-C6rYd0u7GxWP|pIQ9Mo%e)<398FV)9r-BZV1*}e*%zfyl= zYBh3wGP0``*$h4RE3|b)F9H|trA9fsm2xxa&->1F9HMCx}4ukq3mTe*FZR}mC_SBjpb@h0EgV& zMg29fFoy8|34Z-W+h5VnBHr;_q04Y)3crjCDF!^o8@2$G;o$NO_Tz(K@9xfvj7BY? zjgiwoql?{r>IS|3g66F#qc_ouJTseWZsh%a^w$ui(TJh*$nt3FxGYn5gG9#hWAaE-{?Q4N>FOt0jsGfxe&ii{qqaINH9a?G) z^m^{?2>MSwAZpLZSOq!KQ_j5icfm4bP`Sj|E8EA!^N!?y66%H>hJN@2vp} zXLVcAo2`*A^B|RAMo||y&QPa01}@W5Kd7S>?Wu8jk)^8C83&zKz|ST~%MeQZ4)ppl zDS2t=2V?Q|EV)9<=Ggogj=7&O2ueJNtQFv%c^r>Y^CloY3dC)&;RZ0Go4K5tY78F}f#gyk5i9vs2zt6_X+Hc{kb>jX_y&Yc;)9|00iljw>*(`V-&Mk2l8E&;iF zQ}00y_eouEsRoaX3uqHa(Jz!39E?5_p=5QAa`P*`x$CKgY^Cg7B|s*`H}BwV7D4=ah;mYJ7j(^v$*X>4_pAM52$%6 zFdOr{1o>{jofEXKRGE|bEpYTaHT_l9LiuUgwIz9E&dRCu?YE%85XyXt1PeE&+8;%DJvu0}5nE#~Nj!9NZ7+y!aCueJPb3GPbfCC2XKv_2Af>&WO- zkbcyJI!4jz1GLow8QTXR3PF`CP*lItt=t>STaP38Mrn@#k}He{=fGKQGmxJ5IMPuT ziE*`0|BgqY+Ji`mXVG@xUJMd-FV|y`8T|@9`EF`JnF~Bu>ouj;Tl{|q=}{u9&<{L~ z;R)bcMQh(f&2>nG(blda=3%C1yywq6GlxnkWTOt6a4z-4@^KfpIek0KaNdx5%G0`j zXYW93Euq#}R%Uc{AH{DX-;7TtBq^R>VXXM4!PQnI#z=~se1C$%Md9|H)YKgw>WSdq z#7Ush-^SBIBDj|bSHcx@_aDNw{t^6N$T#DY6}nkEp0Yn6oqLef!PNc%e1AJArOjx3 zue&RH78OG-e7iNhVKy*%ZH;Nr%*>*CA!%(}dIQ$u9?ECTcfxgaZBuJ|leQZ}-(1uZ zPmkJ&UeI%`4$l@*+YP9uHUA?ctY-#zKK(1mxs)+K)Gbyg#__8bPoISkbCD18j{d}N zW6-ouFHyE6x+oK{ehMVZnMI@mYfkihZ{`atk+0G4JDit=u^&cwm_aEwI%5vCL;$53 zUu#g-Ju~^GN81m`!B8}iB_e>`m>#1_HUNb&<<6yia*}T@=;|zPFjSQyv%x?QDo=Pw zMl$p+^PDS+|f@{!>;i=cQ+)@ZZ_4GuyCy_;>by{rc|=0^!r> ze?P4PT+S{t1NTeR?;OTR!7%p3Tq*7v{lVB#2_2AyRw~f{U2Sq#r!Kt90SBYt-3Gp2 zqyHZY>Q<`#Wa4{{(Kj7AG6v{0+N>8N$y}_Brs&GSSfG2M$Y)qYJ>c9KI9R!UV@CAG zyw`@FYy|QOB<*o}tKK25JU8TP{<~)A%dgP9LusKbl3r{3_*IdF-9V{o;7xX`t=mo%GD0qjxIh;&$__qaS5r&jfQB6+|Oi;)=$-J zaOM>HiSefe&?gVjpyCWAM{7c*GPG{i@y*D8Y5sR+bkh&RlXP9Te;LWH zk4!b8p6S&409@(@#T!A%BG91>@HC>Axgz)saIWULXKyv&Z&Tnk-ovw=N}|7=@qGey zuL0*xo*hNoyEfSrc{ieDD`QAspqNJeKSQ@w)N%w0y^T#Xk5OMAwlYvyiVUVEZNq`Q z_ZV-bp_%nHz85asK<}SoY`1Kf!@E1}T&DhVNI)w0=JRA7A9vl$z?TC+r9}0EV?Q(A zIaf6c`WMhUKN9jFwQYiCE5N{t#Airv^mqj(pAQZXrynjIb3J{HJH)P zrw+3iU4b&@`5T0G)5gDobPb_3Pak*1zXs!ntKsgMOb27neQ%D&D+3iKBjb7m`~+97 z!SQc-YYXL?BDpJ(?iz5{mG0@ZI~s^QtFJCK6@w-((Y|9<9b_>Tc=ciE0vE1f9TbFa zt-#$#oDRIJr=~XXP+)usDR5tBD)fj!0v_W1L%f+Aez^1PxeT5ZD%KApw+E@YD-f55 zUUFstZB4?`l23ZWx5Nf=*0T~?nB%knyj_756{MEm=&{Xc`%$=e77e}$y<3Z4-_pj% zNW@NLVGs4qrC$9pDgnX8!1g=O+l!&8YnK;!+Z`0`5dF`bV{bZ= z-uWz=HIY(U>v|?+p~P44?Fuk;rbExHOsf%=BCe z>FA8yE<_$)1VUqd9))@h;n`!@eGwZK(z&IH!+i$j-0GOFV$we z_QV%G7oXue6&qnCR4h&X4=~0}gj;u`Z{4-11P{vdI~`3I4IUS1={Qnv9FwzqeKgAg zwewy*I3~dDywot1@n$PHrUUt1$e$U|h6g>eKP97(YZNTsll9dXq<}L9& z)OxfW1t*Ou%?zbhQD+L=J4Wp%kfu~>@LVfB^~@-80BB<2Q*W-4km)ASPw$s@wEY49 z&D>!R^5izPa0BY!#NW-l{R6nZjMmyj|ELA!+?l=)sE?sfJa=;gZJ1YX8xq+8z4QdF z*XQfJuP?AgQDZIil+p3#wc7_@Ya;`X&}*-ui`T=WfxPLN*6tg%rS0`dw)tsaU_?(v zvt>bgTEIc0;s#*ntpK)PY1tY&Q_~;N%KVHY;N=Ol)q7y;tlqqf-_iaSS{RH@Gf(V4 zT;0u+_FT_|0+qQ_5-D%aM=SI%U~qKW2t8bN4=s}LZwa)M7PWSwHoo3N<{QZs3~TFHsXnAwV5rO1Vb@C>hEz#7)K7pm#o((~W7TC<oOiedY)I zoLUxAsxi{~C7gG!Bsc#XLDL-YPG3sb#IwMy3vgL0R6SmjtAey-1j8bFUM47aopC`g zr%za8cL&2AkiU^mvojgp{vZ&%!(Ba`N&&?NMuSXn?tGxvF43~sj)z)#kkvw5*Mhot z(zi=-*VvzEpmq;1foEFJ+QXT-WAsN1J^6Jw6u~FVR+MZ9YWL z(x!NxcIv?|SKi(G>&biZNP!l=zDn=#{%e7J#PIt*B;ipc&`hy=fXYaW`QYHbNH%)8 z&?oXYg8nBA;m4f}Ke_0g)}b$**8EdkrSQ!PWqs}(#QBQ8fc|wZh{cb=inQb6ZEnx5A)gPx@iTMlG#TF`n5^o{0?6y{8s(SGVg`7IZR2J~`n z<_y!>l+=zv2b$w10r@hsqB`w&q-QmFZRctkG}2es(e`cbzl!YYAGsgxHkf{Y0608- z%Gk$#jANer`7_sN8Lhh`RqldU<@W-5M?G3xMNc-%L=$?HCs!WgzvtGP!Nz_4FL*M5 z){O5RN=-&Cw?V_@rGGjb8AkuR%-4~#6&RjFijx^x)?*#HW*kX<$H3(GV0~j567K#~ zHMH>rq}=ln`q9rl8D|9cm8W8QMwR*S)Xcl-Kb}D6-opj%!~vgPN?gh4a!797)hJB$?Eow8H?Pc_s ze)h)tc+SU5aL~wev)z=Zha|wA%lvN2s1!yvPJ{a`p|5AtdOnptN2zegc$A&!`E@)k z24D5S@l@i;lxq$R76XN66S-!s7l^Y6&l2)nj#z5>1kJRX*35S*N45gZTqMKH?CIdR z3)wqE*`?5SA73rkL&%JA0eOOvZy;@Iv6)qK6`b;2^MiIo?>W10mOB%A_NASU+;R1_ z6x`4k`4%ng!-oEt+WJ7FRy=>$0K7&6 z{mp?UY|cOL~d?&AoGs=$^@8PN8RkV=vO{tkyH#%sD#` z7@foPrZ*f2_>}}#%kV}WXxJxc-Lnq{!b9Uql^8Smwx`W=NYy>)#irmh7RZ`IcQZFN zptV<#kNI%UtWeL;q87wW=xxvI2K?p*^281EGj*lkccj$MK%O5A`alPB({%>Z%E(9@ zlJf_2>k96!f$EXa1Ub*o=Vj=cn_hGZ{WbzE{uMBlW=#4RY^p;^_qvKQR@MRk?p*x> zz1yPOS3}#>Ks!AL1lsN0c*|T`Md6Cxq-v3}(DV$nAIgY(4Es;$2cf+akS23znZrwe zrOVu13~Y(iZse6K3Hps0iQxU-aI-OWxaSn+sJ#I;3*o49zH-3R3);FL<9Vp&>GzCB z*KU)bd}rp$*MP{q(NylWhfdX?!^6Y&^hB+h5ujLGOG?MlfJevuZ z7V+c*#;8%i;e5ahTaO~~`ghc%yiq{TCE5Uw8JY7@Vn1}Q$Ee`BCFcKd25}Bf$1%2A z&s8{{&i{V`NZNz*0x0cip9R3eIE)|B?q`BocrrXH&ZyiL$<)5tPAji~?+~DVG0>4i zkldE=up%R^r{;YSj1Q~e&zsmP&FGKi=!22;ync*9TX}v4s+|M&C~Pn@d>Ngkaf~B+Mh!#i#vuj2BJG}zlNSm% zL@&O{y)jVOh^8#`nlf<8Y|@9JTq>Mi0%g|#fiVkL!0dA*@=bL5IjE|gRgaRM4^FjM#|O?EWIpH&urP_n0I@&@Hezx6iS=>SI$Hu z6D5L`+{18mBO`T5bk%sSjM;qz`n(4fOJLcTV@!{PF6l^gQD~M9Ci-_p@=XMPbKk}y zG3AjzJ^8lK*Ru2WQYbSNI@F;RSA~WkUvDGJ4{&7!N;+*dLu-2KOaf55lWaDPA@Hpj z^)#crb6?NOa@V#DG&&Fb&5)qsK@X_JyN#)-5gO_*B<&Y^m*;V;pr1Z~6z+rXdaSx0 zxC@CaM7hP3EF6px%3?n-Xu}wO4vwsV)Bm7l76Y~UB3%_*4<+<~)JLom(qeSPY+x`R z+vx6a?Y%hipNwqW0{oL8k|ei<*v4T_J9|z zR%fN9QSc!2DbXq`3J0Q)K659}1Dgo=Is$vF6g?@GUqyJT|I*)->x@2*LJH0y1r_Ki z#pz??&{KY9BAI_s|6cg<7;iL23qFk`dUbD_BqA$)k=Wi)X(n^(JwV|bcT!_A@-H6wXt6f`6Y1%$QJYu6 zvxGgx%zY5gxoCl0M+12@HDy?&`tI6qYrq||a=nKw{x`7Jq{JhPD4rUd69^kq?n$_J z1FGt^`2%vEKwmTi?mg($vuLpmyz?5k&4%Cq4_$Wx_w|(ikH5v9F_KC&B&noD6OuJl zN{Mz$g%Cw5WjEZ6Xp^06)s(a;OG2_VDNAK5jiN{?LcYo}Gs)7*ev_HL4*6Y!UKi4VL&^2`{PJkT;pi3LJ|{7fAM92oe*!^E zKcCUUtdZxoxw@pYrM7+qbw$|BgU4OQ>bCl7$HM2%uS7}5D-Xu2(JiMLDk_y5=Q@Kt zdmq1nVF5ng3yRFsW@$BhO6AD;FP>CDu?gy(gM(>nzIL+Xl6xKZAq`KH&+=q?Ex5~} z-wsCfSJ}DTvayWzJm^zyqu8&2HW{X}OVXMYx7BYMefzsMLZNd0mlgl3fblAy4)q%; z3(h%>vex$-jm3xKW=@V|rZNrww|HN%m;q!KS_^m6T7LE`p5`v{)Bf`;GLgA@G8*J* zK4&iU`zeyti~jug=l6Yx(ymsXXO}H=;c~2XOHhQyBlMh|nI`!2JpH(Z&8v-`@51U2 z+Fz@5?)J-UIk(x&Vc*J;;7_%X*6I|0lOL?ir=4la%wcPL@tdB%b70_NRuiQT;46 zbKbbF67SH_+{&6Ae*~MXyWN66>&M{OS;_fp$OYHh#sGRejSE5Tda{V4CyJ;=H_%m?0 z9DJ{_wIgw0ESP$ekbXFHFnZ0VWn*aVGpKQ~-j0Ovbo~2@J+2JPldP_up;m4{pQq+N zpvY}%x$|rq?&XY8*7#;@Mwc4D({Sz&C-*WK6K>~dts z<7m`tW;AGyYPnSYcg;#bbk^?z+u>FPP|3H?{Q#z5-i8JX8g8?v%jBQn_ z4O#34*Z9WWwYI-HZ=iT`p*~GYx8mTJq&ufZ4%Bme$6a9@O4S8lUvOoHnw`3yXqTVd zP5mWmyawO1+2Ol!yQzA^Q0;#0=iLc7GR)JfP$PF_OsDyWfxaym8aYQ?sL0kMD86z9!=O zpYY%yWs*xFaTb;FcOE#0YNE4sEqYo95zNr^3muC&6P*V(u5e+jy;ROb_ZXKp~hlWZpH>LdR@g8dxz6JvLT zF?DZtCLC{J#OuqhCQ9lOmS+I%OeETQ{F&rH8p5MUo`8DByaU1CQ0ecC4KvVmCEdHX zkbq+y_(bofn;jhNH@866N7F9&UKiG9@C33K(Tv_@M|C<{B+Ky-BWHiO zU5Mt}>0=!Tk`s1}H_5i~F-Kk&?K4pRR5tc+;J#aX3t6PG zq<1vgPBdt;`<2$z-=bH^8ru!#xli)}{U(=n@=;CJ(;WE5N53AH&*9rZBWG1@oCDSw zFw7bI)4>qQe_gFauAXQ_AED1F@EuAg4_Kk&m1a2)pPjsCcpUKpVvn=0kn0qcy30QoISdh26n{BI&8?v zBs8aqdVBvK8@C1Za#!GBvT+#un^W}gHyT@)c@K~SzLM|R*i*i>>ZVs=l z_7oKVoIJj&o!?OY6_neAH{)S8Ngo|qo$h3*F?C=2nP0>$kCOiEjI{N6C)~OfmhqWNKV8*J4vBb8mlw}c zww3y&tV;2eTWc}dxZ+pt&Kl;PnPg)9(1_6#cXnhW+Id!{{hamK18m!o+Y>xrM*?>A zE~mD0ZZ7NKITPQTR%CU4Cr?uA4{X#WBydO8VVo9z1bbHgPg8PtFbpFrbwSciuX}@G zPc~o(Y5JZt_l0+E;;m0M&qu+`+xjUv$p6;L<-Eg>crhJ?ZqwV1g~ptu?gUn65X@Vc z0o{Rvr)j5^|9>oCdyk#|1N-)d`pIM(P3=Q^4lMlVbHT7P>1l_;S#6)7rAt5@4>xnf z=g>bZ?$3cg@t3(bZG-l1V#kk0fkX{vSL}T4tkXv_YaLG7dZI`&tr~ z*^9iXkb&xKPBL6nATw=oVH?rH(=ltx9mixn`k-jccC-^6m@NHTzkFVJ?_(tjN{Nx_*Cq4bq z=PP~7JY^j%EyKquICYMmx3UDKw%fw&Ciu=T=oLPfHxBOv#u+rVv}3ZP(qH*po+M6$ zXXXsqTS^PpfFpe*VlAgEE&_jGdeRKx3wA0onJF>B)6&jkPG!{c8;mnQPcQ%d$=v7v z{gb)MMSc@umw9FC-vsvDo08dS3%w_kMs70vjBS~ZhG{Jv+Gdnm3#w%W$8+*$4UUu* z@BiCleHI}x_?=j>tfoB7kI1RMWxgH8mVbb+pR#oIjN1>O)O2*t+RG=b>L4C)EKGJB zo-1biSsl3??XCxFR>TLhA;aiLU-qK~{fh56f;Fm3ezxG_6W-_C;_Kux`M!4Ww1Ia^ zNbXl)7^lbe-p$iqR-ZGE8L0HJMvt7_sOL>ixhJ33k?Q@(>U~&9OK;Nf8!34Xc88(F z3ZwgZT3w1mxq~g|fpc~xwOzkF!U~G&&0oHIz;M``jNf z3*B;?@hW9vsW0SflNRBgc`T9v+213I;>`2a{s~-RQVs+ zy{3)iUOL71;iNIAuFr)1FT9|vJ`OaZH%0BCD11Fg-zvPr>=GRZ)1wNAXBw%GhEvx9 z-}n_B)ZP!o-@!Z?jIyh>QJu^cH=%IW(+A`DRU|tjNpgYx6K7-3qjmMsDtlIS(P4&` zGjmIX+FSbTt)0)cn#i+wR}1mrHT*aL_GcOIUj^?{Qr90|$*4R59qK660gT7M?SF8z z6v=k3{vL!y;?f4QcnA8M-aCV-1rY%(X_f%Lxxi&oxN2U9N)CqHs8F!VOACD&L^lPp51 z-0_`x@?9i(rdF;Y+t>4R-qz+!wUf>L1F(Ke^G{YXUe&L7bOu^XgzI!Pd_YTy$eKgW zI+BOqNZvR$JrUlE@Sz$>>#Eh%d7zN5lW}nkT>67C^TNy&j`A%x<~Bp|$51J&2S>@ajiT&qR~V2dnC@A-^c|p`11QGw2@x#cEb+0~nHZtpO>@n*7ha!-;H2 z@*nT5_A>typ6tQ!ya4AS<6XV~GuzdTG!A0hI-t`Lps2?CxdN6=&^C5$D_PGv zU}bM|$~pNGR>8ZE*2DYjw3u^8ok&x%>K*FyMDpAf?=NSkP9@hTgDo+FN73=y(K)BU zvX7a$(|S;RPMS|(jgz%0XKYqzDf>#f194}%e-~S~o$@)c)ZIuvU7v@-HonvbQl0az z&nw@MCSHK%Uy`{a)k~ayH(H+6tI;TuQ&S5-x1#vJ9~z%dE|Pg8`S2FQc0EX5G7eUw z6FHZd_@&3m>tgUsBDkkoK5nja1{@o+Yj+u)E1>eT`W#74tAICi(vS65MtW9yeuo}M(&svS zluj^8CW3gmo%Nh-1>4ct+G=JmCVOq6X{2x*`5TGDu}HE1o3*oo2e|vDPL7n99&fZIMAm!}epF#a12s6qz;aBX7FAvkz zMtVEk`(&+eNLG?x?m1W|GIFEOL;aowu3d5b1afjPj=im~$GpjnfEViFY7h-T$7hQt z-IUn`kK+n!-vp$i^>r(ImA(Gs)XeF*RdD0(31-a^T>#^Ri~jyIWol+tOAtQ247c@wls8^3!+@zHYgLRNM%4;fL^O6lOf zHyV}0^E+S|5BgVDJh4>Siz)3>#OK?H%Ddpv0+#I!W5Y^i5Ayt4^gf1k*JHga(Yg=Y zS2v<>Z%%ZC7P6zYBgl6_pNcH<1avrp>lbU3S-`CayMUe{y%`8H}*XAvv;?P+Xz(bIUj zbqWcN$Gts!y1={az`W=Egn~|q^11?#S{AshHIwNHrfHyEtgSWLXbHBep55txqL=>3 zPrcDh=21Mz$%ZGv@hq7+mG$dD+8Ux~75&Cj&I#uKa_C7q@F;1mj;g7<4_I>NNcIfQ z_rE7PUa$4|<8{bo`0;mbl-FuYbjZ1wt^SVkekF`DAK4M5lZ*5=&|U}1I|@%eTrH=i za%OM$7gpd1Wq&N5mS!E9Zw^7_zmk&7V*^F$>_>J!@;N_)^j%hb$}Y_AKIJ6J1A6`m z9;G*X|GURSQ0*MrP#1L~5%qC-ILyu|>X#>PX)!-Lf^(%YeZJ3!n9-l3+v zUa?2fLaWV{dySnMq4XnK ziib7Vw?3?QGC6*){|00znf@*#nOS$ZiavD1iw^9_70N%#Pe~s4tKg9p>g#FmP3*)+ z-gGmX48X4|mFR0soC3PT)yuf_i1F+dBg+~Rn5^hm=qYE%bGy?6Bq(=(w?T_UZL~*? z%sC!~ad$M_10~1c(QVqvtmke1!lkT3ZCbkz=pXWI1G|4cnqFr_YJ#F|;FGLAPw~Nz zW0@x@ah!HurR_f`xtFneH5>jfI-FcqGf}G}n#_UqdwR(!@|=F_qpt_~Q(4QLLQ9Tj zYi8o{hcInKqxzuCZLEDFp+85f*S*VrP#1P0zSrJ#?iC|hayC88|IDbd1%7`nyp9*p zd?To`FR)BoTR`?UE_W=(0tbGVyfrGIKJx2`+Zj#$<5L zM5WAglIizDntChwngpt&{*86~qIW`8DZ0n9fK3kA!=&Tm1!uiGt3(>CMP-Yi~aW$Fa&MYkqAu zGIyF+hka92jsKg7_F1GbnM$|Plci|!CRwRQ)}H0<IOVG0qjP^p2 znRt5&P1vjb14i^dJfdzz?f6rNk@KAF?ZZxQAl=u(>InEG(_gf6HQFxL#ym9dtybct znt9U0+w3kbE%f43)OdtF$~lnS^Su!zGH>e6_QxmP9Zdqob=p~l`g{0R!{6MI{f5uk z+1ws)a?esE`(V_{{3My(Uo~zdtMD&m{XTs8P`#{d)}=kU6(bR23;oUo!QJX@&|`9r zU83%#sP;Z<5g01?Uxk*{BR_A#yn~UktG;rJ-wWuNjDA0pg6w#10as2CAEu3*^GhsD zJMHJT%Ukdv`-Bbk{gvKI_;gYFI!_OT^+KF%2=m-Ep1YP3ANqHF#}3^{X1by4{rc`t zV|Uf-CZ9h>oy!YKRa8Fv<%vpLi%Kt(hKA(eSa@G&>=>c{3*etM*<18C7q+<@<#P4L z8k3$z>-J!{vY=#5bw6dj_P~!y>|$MX-NP*6V)(CR=gekiL?^I(@shG{ zn>@fHNdHo>oJ1KWIIXDf)=PO;4waZO%C+e>pD5|4C_T(PmYwbY>7Fh4O z3s#8{YD-!ocUg@%lvMN~HCs`56K=&nz8B}munN=Im>ODLg+5Q9NlxhW0LzcQ_f_w5 zrS{kIE3C;ZIIJR1lUUNMNoFp*3WX=qw_CtClZUV2 zyyJOp(Ynk!%MNIlh^)lFWG0gT`K>^HuJb9gl(WH<7_fGD9-ll}IBrz?3*Uw-QPbNU zNWn;UsLbaI_>}#c8+B5x=k(otuDbhx$I~-~FZ6+<8^sn=AP3+v7;?9cfH! z5}z{yhCgZ}ccz?(##xn5=7pA|YBkE`-px|xqx7D2^UJh8Ly7EP9Kd?zwwG@_KZzXW ztjh-d?To+2Yd1T&yU?4OI609lS0bg8)y&RKIku?GcvCv5QIBsk+_RP>E`O6j_SuM5k9{C0a{_)> zG@B3i(`bI~a68BM!Q^3kt!HJkWifBM1P+Vw`0N6b z+jD#??|#s6=*<7I&vaqS|Kev z;6v$IDSmJ-)ZHB(bxHJxDAg7{vhQ$=+J7VYe`j5C{wF7icGpAp?$^?=(+WHJE_$?p zOZM2xJUbCg!$9-~m=Xc^BsmP7U+{KsEd{%G{Vh-Kf@^+Cv!+P&o{QSOq zvOUUH)?P|y9{Ioj%7k z8t(~@b!>o>c}Da*3W}$d|Mr+${(dmZ)IhH_IK8N_`neY{^Mg<5dNQ3Pf7-sd{}G8k zm4(WUfIYQbn;d5i>sS1WS9vzumYGL=zEnUuSR6AL- zy$dBvC;lgcH9rk$#bD(Vzc!Qao->;#(&gMblpL{j$^IKg+1#Iyc(U6bFu&9ihf!fKl`FnyuQSc zJ%i4dgZ*B$DwFn)*{?(gREAN`mt28|$%mVJRpQ_5NAq${qz8L*J^lZxu=&X&Rk{=5 zE@iLLP7S!#^E43>iOKk<)~4(0OSoqB`Zw5&N5Qj5)$1spJ5oN->lQH0Ar)7uUt5_` z#>MrZ%em?MLGq~f5@mK9nay2a2db6Wn*+^Yl8Yy&m1m<;{K36ZWh%=ull3}Odo$2F zc>}T+c7-u9bAgy9;bG$QuQX~*q{$DX-8_A(aeT${thRz>(LKIF2>s`>QrX& zs?epJ@ITJ@b}?#X7L*o~w;>sgaz47Rv1eztAu;m@dHyZw$~~{K4>_-Y86GYI`~99} zSL{qwT!w}h!YR3bAAs*0V0##^c2FYc)si49_eO(6VNL;K=Byu}P;SAjY^)8P-Z66At;aK2vf6yX?3v`;Omlc-XYpIL z{$z|g7OshG%*^+2cvi&cI%GV4;#q3vluP!kcGXXJ(pA@+Gu7{4yxYU)1N7KWo0*R$ zA7M8bEz!;j&vJ+B31~8h%zS7B9HD$}_-u;a@8jLeWcoqcm%Bbok$BZe=T1IvC9}!S zT^CG?)yw(LfAKoDgHa85R{?ozmNJ>V&L>ZkP_7@XIbYA~$W?9)O%Z`6P6a5q&dl>Wk9n z<9t>!|AxA`rzO#^nT4E1&vLg>GuEh@(R7wK$xoRKF@5moUwCl6@A-Bq7(Ov7^n%R} zO76^FeT{Qf&^9(KD|gw;ZAIGOhHq|D&yLzRU`~ciJNqQzHGNK^FGEmy1=$%5i%{(w zv_F!@k1M{dgTuFMP_pIZUdw&d8P2L+sg);Sx(I|bw0b`}CyKBEX-G!L@9{RPHl=$T zlh^Mn8hd`R@|4qh2aupOH1tKhjQ7%9yGu!R?lO5y>89Ro)_M=$>f%J{uG_2}p2uQm zo*LgQkzct#FA`88``@D_B4)DGrs<#K>&1#;@A8A~y+DCIDSM5NQJGgH^?ax?*^f0LC zzZz*t%uG+ei5x3=o3qJFZX!92Re9L6GE{5nTj_Mob|7l6ol^=Av~;>Av$U*|Wxf_4 z^)l2*PSst=?)$!H{(QZ5^64o5bBZDNBxX-0F)1H=-iwTER%(7R`%7-A_}-b>rQV;d z?oXga`J59~jkOUhH~5=qp!Kw&4AfhT66q%zoSmPk>|nCr{sA=Q$#mwI*-6c5j^|ON z3{SSA@T26nj{m>%qE?|#$s6Ctlb?L9N*{LyZEm+pp5N{NEr)?{4%^a~y~_+FxrGnr zhtvYw&AvruzJ_7;cWRT6-OzStHuxdj?ydLa%ss&9ax6FkS@sEbLxam$hGR&`BvOza zfoi_*L@&!5VU9me$M0{%Z|`nMuGnHVG6B!Pal)fKNjUG8WGO#M*G$( znf<(EUv3AwqtwZK^l0DrM)k_drM1$z`KqktR9yH24(9~Z-NhWGGKs3szU4%HGJLnj zucOrXBMK)1ZCChA0B^XQU60Zkms&-ue$=6N}hnEk=?>CU(4Jb?z6{io00p=pM?u}7QOh%GoVjxHz4{RY@w?dduc zn#<1okFo9-Je|ct$5Jk(_d|@@+rTlFmL`^JxY}Fs`g@c;k9=H)Q`^Y!Px#oJEqjEt zW;T$BuH5jr%vkiMcezR8tinIaP10YIsV;iCPss=9%bRpPGn=p2s;=PKfhEjY%x?ak zi4P;e`Ks^R$jcm>(vHm?2by@wPt*R7j3dvgpIe(h1;I1y^_lqHhW&`&xsLR%!sqAM zgJdwDt^&aWOStiy=h;)wS^gtYF85{p#hdIsyj#pYs(^6?%RiNF zWTZb2471sUM%wzBk8u^d-3MNosU>65%f^V@obVmqjWv=d{`Dd@;q8Je$;CAp*YDGR zDcUGAn+2%32W}^FW)zs-;bkSKT3xNrraybZwGn$e5oVQPutB*;z?f4m51~{Q7PE== z#-l^Hm)SseGwOl!5uD$}+gCk{C!aH^*XTX_2)QZyCy>5bz}85O6Je1v`Nm`wbH^U2(pD4rf*{TmE_z{B?o2%gc>9J2ipz32?iy+GEt{Z~fx z?ldcR3h#=WowRaJfkPW3Q`Y|zk@PH$AI}c&f)0`7y_EhK_gd&Nkp%;BHv5EQ$?OrR z6%RVM4Cf|^+wk&MBXi=9=90mj8_8+hyZvtg(k9xz%J*$J)CL@h%%A5?7w;>xXOrQQ z`}+1Li-S?URLz_4W*q!S zfFtJ_F45|q_>`Gh=0aDipGdKPYH2#23`c<}o*YuZzXo(G)a*#x-v>+1l{`gSBf}$T z_qlp)fWO)C%c-fOwN9eP&l(qGVu4QO~Ko0u71YZmTH zrT;{a!m&g>Jf&o)F&W3BE5nVHnWN_XPgefd(3*S5U1MW&Z8Z22O_q?qMtJfpYW2bA zoY@$quO6sB8Mkjm*}k49>+=kw%0zXulROY6`+#U9D|Eg3$plzKpMN4x^GIcUn)|?* zv)warHM^^WV3kayBlVIfzd`I%?r3d+Pq9t8-QaHRR3mAPL6CK!+=&wFxPeEIvtWP2 zlSB=D0q#doa$O-Ii`bA;^*w~uKeF&q=YcD?@r;9|F=ev$Y&)Z=whYfTh?Vq zm+~k&qU>sKa)N(4>yrJNnmE)=pH<0TC%rD$-w+hYDAWw~rn3}@Q7x@~<;20>ByC_} zudCr*vPNC3r&fh@T!kjfJWV#b+`9Fd_IHNY>|&+jQ?=V^HFKgnjY%8fcrUo$AqVTV z5sg0wJk5=NW8v_)Zx6HXCmPG2Mx#1Ki6`);wN|r>`Y1V_jL(T>oKHe>Lhl~W7yC&T zjqbdUGtqWmRC<8T&P{EJ{yGaS6SLK~pxD7${zyM{S(;=vd{0|t_;@j_SD{KWe;$RN z@qqq-TA3xZ*3L7F?o5TD= z`f(#TbK-mzt5gmb69G1#Jcp_+3SZ}a{Ut^r_vmIlGv`@4@(vF}+Y4yk3i##Ln`AC- z2BT>pX%FlD;hsIK16Ya0@M{jkHHAJ;LXU4qRWkkVLx*nFQ*w?y!lSyNkor?`q&axr zC)E$(*&bwmuqSQR>A>3=gUWBSE7NH50`-pK1x@8wyoctuvPIv6=V!9}HF@mBW{g1d z(#;_``*klXbR-+`Ha>^v%h;bENzOJjeT8K^S|6{Hyt!JPiyBi|-uPXiY)?L8ZnCIF z4$BqP4&AZ}JxqJ?-rCa8L`psFSt8(9^Gq|N&FS!d+Agj9#Ct65W=y8(**nQPbm??V z?v|TRMiUjaJ2|~WyXAb3cX>SC<;44*e9H@qnuCgYL9%X@qq*@1(^6&-6JTC}6?oFO ztcmAzKrh_xO4sA{->+;|?Bkmb0$-V@7x_)3QX_4DMw%~T8*@gxtEY3d^ad+57Y)xx z#d7FZ9uL#wO8@KO)L@WxRPP*G{07T&4mn()tVGVR=U0|Eh zx)RB|0i}~?D5o5LrC+;{=le2?1CD{ z;_eV5<`>@m##SYhN7g(pLXl$&>s6x|7i+RMIXilWvOgQSb5~V8W790F3%O-!9Xp?# zKiO4DhW|P6sfp`%`$u}Tg27PphC>;`W_iR423#kg3Bh9%2U zaw5J%#y~Xc=sxFz|T#UDg9vQ%nXB{hFU{Qn7K!;!G*M;; zJ>b+vEBoTW41arrD4B7qqUGrYoPUDNL-eKruEyJH&Fjzl;sG$p+I%uy<*v$!yyC-+ zPpeTZIaUUNrkgSABT~~vuerN0aXVT0%KFtu?EB-|eFV+AgE;YM!Dh44hw(U*r|1^A zb%1YT0k>#zhEZs>C&|NlwUOm>yl#(|&B)u~Bq=k&3rXx!+SQm&?STT>Bin!juNIy{ z;=xN1Oo@W-41#UsE4ka6>8FpMOUUcxtYppzoTZg!s1~~X48PndQ(x)*JYPhPx1#GJ zai6BZf?+p;s4;b$Yd zxEOZHRFkX?Lvj2)5G1zmQ}9#;UtO}-js&zZj+Ekps;cohUC3S1(^=Si=tO6H*~_Qo zA^soKc#wqL;_r0!C^?*BwO>N%oHm_Ddj{+6Cp>z_9MYZo>x-S|>^WwK$f|IYK&*Lb45G;?thm*Az>B|1t0M>3fjZXB-XE4iZL_NG2 zQc!FhTm7__y0T-5m&}gMRVbJo(Yxs*Ssa#wrx^{4O}P}bdngt8N@n0k(J}J$xxNOe zGm8$LM>b!>xnx)#z#B|{vY)-pD#r=lBr9L;3C~HQzMgJkTUMg<&*(H451SZAc7cBv zW9IAV)(93^oo(G!g98#4^HKj#Z78u|6(`&*D?ksPt|?;Pu`Ek@J`O7`S3mx z|3mRnZ0#IvX74Sl&C^Ne3n0Ci@AbKF$=&xe^*V=)$KL(cexVV)q24bA zcMmjw&EFAdlH3jP@DqF0$oD#W&Ac|bkw3+s|qr?_fA)T<(jC~2wDrt3hFwD~KsVvecu+%7Ob>@xl0*f>BkgQ`{LGvE@ zU#zc_;QT(B_!I_{)lHu0)^H!g!i@7|2z+{hHZf_*0d$)BQ$RBszpo}gYuUwGFdb_= ztp9#&Yu9gA~NxEF<${i;-lNzth;U{^+|IHl>`& z+thiFy-UW4jAKKMsr`9)XOQIKWIn6xQwl$B1c{qRQj<&Y3os1CkGBfxZ-V>rsyCtL zaN4?w1Z+0)L~mYa(MS4!92+$rO_H}Nk-X_^44aeau8%DEm@R!b1l{D$zQsjM1A62 z$@j_WgsjqMrz;Ukr{Vmbep~WICj7h1?;cue?4-4vT{x1|lva|ffNoDVFmsVa?LS6x zPbmDacxhemwhz3MscoVbOA%DL?Je_*tS^3nqnTS|9#J2ybJKcy%DzYsu;z0qlC75i zS(i=(*A$wcou4n2OB`5ju-~liDJWGA1rtG;h{n>q=p~%Z9AlYx=l|OmF6E!ynY46J z^E(h9LB20#OImtzh4MLro?DV`R`YAVOB-@nz0maXB)o<9TlM}ezO++v0#3~%=WpRc zIdYe?6Wi19P_(+zP1v`1xNAL|Q23}%8l^H<$t@!LqIc$ObI9syR9LFk7odKboP3Vr z&Dp@|N}t49=Dw8e;JAe?%8aTP$!txwBT4_n%QH#OFtFXBpX41rn5Oqbx%74nn4a;! zCOe${jq)VAvrn1zXMUPpmWt&1N#D-UYvvyBYO$rK4|t#XNyVa<FYa`tBdIBvqRcm3amY?f&)61KLOcb*3GTE1W4Nke?h>?k`)fpI!0 zGCRnQ=XP33-;RiC%`7rD!M zp|&@$eK}v9oL8sn`(H|R#*5rYopxG~!aj6;1^ND&jZ550;+b-uAY$|g@X6durSY7an#p+@=KaoE78&Dvegy7We^7jSO0o?H2O4ObKSovgT> ziZ!1##;-mwI|P4zHNwPu8;u*u`SB7|nF3>K7>#jVrlS%Rldi1=JEfHg# z_%NC29Z9>|k+0gMFDDizp?DkcY=dv^qsi)e6+XZ%{D{m&hOqyif^|>ja*zCNsGFJ5 zY;&qZ)tHWw!@bL_tuiWh0M%nG`xsKQ5k5B=Uq)y>J0Q1v+DDzU;JiP0f2DCBqrnU# z(CaAku`wk1yw>|krqEG*r{wyaj#nSUX-{&WTTxc?tIudM)3VsgP?{Jh#JlbCD<22IqonBUfX$`!a zYGV!R_k~CHe7k_|Ht;3VKXbozN`DH@KK>6k4rh+`9-ds{=|&ho#vh2MHim3{jt2|X zT7*_tkhp2=)+jJ_Q}21YlKfYRL~DnaJ^j6zMEnS|9l+I(BvgSx1N6xqdsRF;0wv$j z!tSV9(%AWUda3@h+miDtndfD8l6l4ByuPh^ybSJL)XohqxgGwgf>X%_k=z^u(CRU; zya=D0!SSAPGbdnjH+BQkF-rfV^cElXHFEwoI_3875yqnnQ2JB)p8HPf<3qfjtgIyN zE}4hst8=_kPqPmV$XZ`iK0`0tqthhR9YnWI^3cc zTh1djMU_|i$UBjo+#FZQ-(=5dgysX;&;9k7e4jfTziJyfbJN0;h1DBQN2?mKtHPif zA7Ky*T@Rmyup9=Hv(%f;@~zZI_9358t1Esc3)wR8*V5BCaySf~W|GxAw41z{iQ!6) zsqK`SfF{x9Y3Q3My__}Axs9{In>ko=ZdE4T1L#L=SG?GJ$XaK0jxcIJOny(-+SSE6 z)*$aoqtbHLW+=JX>VKkEFD1S0aq3$V+ELF#_5GA~vrh9A`G~wE2Yx*x;BmAq-atnD z1KEs=Xly*o2aRCgqWzCN#hjdapAV9q`kdS71>)R{@`@2Zv8=Igx5Fkg=j4n^X1<4g z9!}SDFGw;k)k3cqJxLtS?w%Z|PJGlWVOa8*5&@KW{V6Dyb>o((^xxC#RSN&?cdsV@ zPgc523ln^f7nyjulDE}GpV<|N4;rsCyB_6@=)15Fqi|pX&#AOxyqG5KgxlGbm;jQU z(0MJJ(^A>H$Yf6YWCb~;d+DKS@mmQkYHBk#i{1p!^I7MT@0XLx|I}aB$#=%N?6(~b z+cNL}$k*QoZt;C{`^8P9=y+Pc6D+SPXfjUed)ShxI5U@Y{X@O9T+6qWS{y-pGZRft z=c_#bGwU!OY`cK;anI+2J@*YxB$qGZ@bjq9#*@c9JCQ_g$3o^@`}cT!3Tern!w#h7 z7QSrqI3KUg7U1~|+{tS>AO6SV=)2(Bmv`_rI%Q^XKAC8*?8T%h=l1u<-((7J%Uj59 z*CA|G&TO;>MGftKPorDV%u4)(L&;#t{|!DRk3D-Q$DqJ~0)oZx$%_0r1+B6lklRUi z0L|aYUigtXx9oO=!|%fTd*5pKtqiK2U>Uhc9)ZiWnLU_9AD3bwckyn5w*&BTB}ne^ z|5)Q}D^$r@qVr)Ind@2nE$u8N({P?8*Wr$MaXNT%OJsN&AHRZUhyMF>=3XZj{{B30 zY!B1qgw9O!c(PNDZTs(RAUCo`YSYU9_LI5A5!RP-cVbTPR079u%9l>I*V1P>9@6KdwNou>Hha~Wo_lQyyOCW(Kyn99Zr^{FL^*&-_3g1Wi0e^ zv`Ng<#qezg%g%*&mi#G~l7`$Pw1rJfhSOxa>PtQop_B-u7Fu13QePEb)lBvzIcJjB zDUm&&=(`8YF^0AbBA-8iV1Y6CJEi-0w-lv5W@CTTO1$f8Y+n47tUX?=m7I6ZO;=M2 z+kP7hmwdCov0^i|o6{k=f$Rkk_cqFW2(rQGu@tYBycE-J8_Y{YNQBIJxnH*$Lz zT(4CkvyhK@A&E9=rsre;K18Xv*w=p;Ymz`y9V>A8@|U4rO&cYm+&-k~0F|`^|`W3b=A3a5cRr z_iN?_BhYvpdo~1grxH^%oNGEbeB1 z<4d;qah&PPmi4BkxtnaDa>-(leTwgV&Pm`i^?xy5%<-lNc}kSU4P>R8-)%IhikivL zo~*k$)0G?BHfVjQxAEhe7d*%f4qHL{9!yUnQ{zcfP33pT+uYbO&FGg~e-j0h7`Zhx zA-qdu)@-ew1;)lKVY2Z~1=%{*BofsPg&#ue@I7&Rt>Dnu_k-Y_bGiTUHs^JY0spaJ z>87>hJ5HqFkzg4K|HElYb`oAi=fjL{<3YV1ZTIwlZ!@dJw7jH^MdU8`DBh*TWOzz^ zTOw3)5_JY%Kckn~WFaGWdy+hw-?grgtZtFOXOms4S3z+#-UYt~AU;Mv zLr^2pa6hTJ7;M8yNX|heTki)j&7C(ni=TO3PC?vfM1R4%Jw40a8L>dgACYLj#OQP- zZ?~#-skUCgxvk``a^a!A0D||76l1~jEuB091(U;VE=+gAh1bd5r$)h?EE%NC{@Tb* z>idH_c@thipPW8?N$t#P-!T>~(o+vyTZ{v_DWM^Y@Pg8}!ZB;Bud@;zU{)KJ9eKPt zC3G!`xLf_@=n>2&!74M`7u6U{nseeJcdDeH=lNt0q3q|R=_hY9r~j}hI~zY1fPEvK zIgHjl1-i_AhHL#0Z9a?+ojsWg|HP+EK&Qw|E59Q=o1>*R-uFex+>_COe5?h3*4o?P zR(7JE((YyGbvj9DMAC0Yt$(lz=b`_DZ2YpqdfejsZOV4#caG5i&A7G)%toN_Ev)9N zxKdp|v*tEs22`rhXZ^fREp(TH`Q5jeZBIlq$ChT&T43;wsjvIiJ;Mz@CK@E9DL zhwGcQzQEXgG@Nc$Be$!6XjHiw-f#GuED>jr@b&bpmeyaypB3o*3=Y(QLCy$0V}@~+ zev(6NfAUsZDNIKDoMjmg_pEwmExCqItJKb|6UkGRXol@b)f-0R!P-i`@590PhIzm+ zR;9e&XVTbOAbDE-Nw9qbjguRzj{o)9)J~pd{OzmnM+&c^3MtKL^tU~^P`%bl<&;mJ zC6C*K>`CI@#=vg6Z^>Y`QG2aWc@xX^9O`^T@_H-Z8c+M<+!E53eb9mEHWgHhLE200 z(wa*0K{8i) zJ_4nV|2LGIV73`QVqbDzyt#I=|6da{-SwYaD#sQhZn88CXR*ex6Mne` zmgMVvwjpcME6gM^SFVrmxj}K6PunSfla}`Tk#}WU61%T zribHXdGq*tl}V=BOUUMS;JL;7A7Rjw9F`e1+ZW?RR_KSJd*-s+P$Dbp*}I4@oYU&h zqu?Fdi$}ZG|LfVcc*>_MU&gf62a^*s;@# zSyJTRrVaRmywnK>{>+PCiwk-{aoVBDm^|6=ba1A-pOuP0B`0> zv6XFEgZakeL0Zqv%ZX#kO3F9hm*zxiKX9c+s2{D!oqGkQJAD?Og3z54lGCP}Poa(|h?8AcG zPa5JEB|7O?HhrLHAFvL|i#e1w_9KsD_=~@=P}A6lL;^1=Jhgc7Pw{@|lb{|(l*HD& zj1C(>@f(Y@h6OJ9L><6ggRJHD!lrzmuh5}39&Rp1gEQ0^4bFwES#Aiwop;gEsF%2m zwaO%}sT*t`Mah@pxF6|APN>K9pR?`1;Pp6fm#Up?^vQI%H!95ZsVT_}opKvjywa{P zXw2h2Ri78Y`Dyam3eDn&=S1)6p2dzHjUw4A_?!0PL%hYlo$hZkcrGk@e1J|TQhO6k zkJq2^VFkE%FqR)d%iECprwjbo7$xeW_Bpt95L}kiyhJ$UJbF&T45pj+!DNPVmG#q2 zpL6t+ES_)roe1W4^mP%)awEk&++Ji1PGnVUI3>eKvK;<~Lo?BASN_okn4YQK?1r?W zWewS-cnO}l$zd{08%@4@sVo!X(ax92XHH&ZpXy^eIa~Y5O_p1|n!_S-GPyVMEBwuT@K*MCVKhkFnNu{!ubkkh zqW|1mcsowT%TMmBL<=N)_|LFC37q@FEWTlKe`amrOzoCtBso*O5cSVyCz5BYJwBg_ z<716ECt2Z1R7FisY8Kl18aiD8$5|+w8F=(4=VB)plngZ!<5h~ZnMJOK7|9dyIEGL0 z0lq&?{}OATTfTGBAg9rjOKBdMbB1Li8DB_Fdno%N+C70^y+}{w`%~>44EJrMuNNLS z@_o5h5`CCi_9dwPxzF*#6KQo~(MEDsEz@FRzvCC4it9Pg^pTM|TA#Z&GH*SXwXSPy zxs&D3t)gceXLFZ8ZS5{qCub$@#PjKTN>2HM^m`r(&q48)K4n#58tGiFzivFSV_4YS zYd(?&)L=!2s5yZ2C#&y+djC?{zWVzd?QTQ=>129WoIaE7N>t|!u>RW9yFqbuQLDMu zVgc$Hv0ED{$AUMbSUK2yViuU2K8|1ws_HkVC|^)&6F%oA+W6}q!zTH#vg239Xp*dk zSu@{&D+jX+%kUzx&y#pUxpybGr+mX|Z=nz=XQ`wecKN74M8{`?zN5M$pUr@D6$TbJQlm7cIK&7 z^xTGC#HQu!b>ix0!+tWKC^=?^!L|-OFY@dT@->JRp6_#Q64n6xOY~Knem$+Xt?(>^ z+d7|P%X5eDwLGu#`bpH+5O8Go>dj(?RaRJ&MC^s`*=5Y!uoKFz@i*t!G8@YcsSklF z`%^g$mMqFS2c3L~-MvYS*>o~DTlr)ldy2;NgVjZJC;0~klYnIG_zkSd$vCmV@;z|9 z#xHrnx17LgV?R3&Ngf_6dQ8}avqqExeCzdB%EtO8 zo1S>9#-zFf3o(j)ejBAurSUJ|PVCg6LQkIZ^c#|q_3Rh%^8~!iO{5ROrZrrWRrz$( zZq0{#lXPAPrtB9cy8LUN!+GRxE_u(qBkRb={o9jWsjcK-%FH%1ful)S8HjVLuxml9 z?cj9+JGeW1GDE7WOa-uIjuVgYCNTYt4}J(8%Umfl=SSJ=oc!#jg?PtxNJ@5W67TeT z6xTi^mf7$`xK-Zc5w^zNIWzr5w0zwcQJRr_iwM zI%KD%jD(bVek0vnODpzZ9lwV4+a$R*DJrL*%;}SxX%IYSE0_KKWaRxD{{GGTw2|ob zwe(>$+fxa)w}2%%v~$KGcNb>2v9CcEb|i zR2eKeyAvJS)3;KMVEDiN z`uaT>N9HO2jnS89miALtmR}Uj*s}D zejgx1vq)d!enu$W2Nx5me!d=Ndj703U#q{T(Ka)ptV<8~JYMTYG*1?zjQq)Tx&$m= z;mruGoe0ju>GoOV`|ZLvUJA1K_VKs-f#)dgU$3WW?C`w}gF6fYZ%vvLY$gJdS)5HHiKpc+Vw@do@D0~{}ZW{b27P&`3903eK{0VJz(C1_qYM%uN1PK zOarATrn}ouF{0;&_(O1ImU_w0JY7qV;M;RbFVSv){p2Q-exUD&l38W^S?hzz*KP1i z-q#Lj`nW!F{@`{n|!Es-?W8E=vmH*2%2eOm8L?nBF|+stXYk-JSKH#c}4 zhHHm<+X039EB76_$}ZY!y>%$O{`+D3D2V49EjqwIaqbPl*-$&j($rz3?QwF}0!-aW zZ?Jp7^F^qeyA?lS9|smXd#U#g;BqedWUYOSzqwJP5gMMXg=DM98Q3*Ak*JUe{H?G)IX5(pJXKZWJfq4qRwH{hKj|Sc#JBN| zE@sikp}-yBdlMEjJtLNZ!ENLsSdfjeMHiXkQ%{Uq^{;YPV)FHqz$? zc$TP)CC1rzNKPWzX23SNerIW858oRW^NF0om~I@sk%c=+FNt4D*4B2uKdGN;BxMoo zvkLe*92bzz3yQJ-0Z$sjsjKlM=je0GXD8Hn2EG-1tEG)(^ZX}!e79$P$XR6`aBj^$ z-1s<=K4kA^6!+vgl7Jc~pZNWX1+%8wMvCp~bDR+ilM7F=j zg@fqxIOY1&`^sp%k_076;1s{V;!9?g)0A0(+sC3@va;9HR-y_v@&M1IYsqXjhOKJF zf|TxGX^A)O3y&*aTUR*dHjmmcuD~aYj}yQ6P*x&7es-$k`#i7AVLZ~DF}Re4Nd~4< z)K4y?9r3CI3f4l$oS5#ZrJup~B$$7OSx!H!0DJaEa-L^0`;b_v+>Muvxi9d(Kl8Z? zy5`LD5T18dg-_Mu6|`<4To$1H8=y>7e$GyWyN8pQuhnZ;NN#ZW1Qi1HjiBzzzK!sA zDW2>JkH7P4MwvhC!e)J|RCY)ALGSHI#~{4E%DVhf-j*<`N6x+_A!Yc#6^6CIxSPH| zgHL(A-lX@eJMRd~JqwtU?KkslMgB zVv7#I_p}&KX*VS@YskISxks=e8+T&C`S5)h8?jCO&g9}V9z!BJ{;JpIN;Wc$%1Yu@CzwZLHC0X&#e=H>OU9$Rx zve|FR+^jT{$O-Z?-;%v~iMQwAT?_v!z^$d0awp-0qL;w(`>pBW^OT}qo@I8>TIo*e zmiMi6>&O3j{-RX3BKnp-9{H(V%dLHUiV#yZqF`m^&Lp8Ue zLb5X@<~|OE6SHXlx8CP=l! zKyNZ)=kAUZ=~Q-Ql3{tdQD%wSk@4K8m5j0RbZ+&3px&~#_JjX@VS77C$xhtk`dL91 zSK&!K@!W}$$eq4qFt?&i!t4HYCFdb?qNNu}e?lMY_!KYsv^xv*o7Se0w8YZntbFDd z-{Ep{43}b=<0UOuVngvX*^!T+ryuC?D^ha>I@QO!<@Dh@lt~QK6wtqo%h!_TJw5w? zelJljbIe2tt^h~wlj(&@iK_UQQU5FYzAsFl!|m_yO&fq1vC}+zw421<3$%vz|63TUp`= zv)bQ93w5=ZHO^&VNE~kiZOlTyXxN9WRTWk%_ctdVCFiM=v!RDlu{jTutL%~{=4d#H zd=JlO6x13-wvwUh8GOxZZL%U|@9;joXGict()}+MF`0Cdy|BG9yBY^>@IJ91@yIXW z74$LE{|Q$!pGd@iX5_Duh3wcQ_d##(cLs0r&^?9vZ^JinEzPx-yF|L->g9U)nnb>e zvdNixo;o?pv{7FJztueSYmJTl(KPwGz6060 z-v6ZjF6!Jv8ynN(e}OzPg~{C761JUTGnRZUCTWXk&2!{$p}zXz%$h<6a_dVs@*jG> z2Isk+&SF36=`ZUMOW-&e77x*xk>I+|vqOzj$%J%}(I%06b8%)X-T0Vin}~^KWONR? zGXh(kUBx@HK7aq`Oebjja zwzcp&KKe6$-k_m1(Qlb{kH_8YsFabZ_@+CPnguLhH?Vw)H}T=Nf^ZU8>ad~hNq1&m z53ru+6y9%QQ{Sg`&9uCi-!sww7`1b%D(Bpq(uH`KxdCSiTW}Ssj7FhOII}ZZKUXhz zf#N*hGGm$xo;mt>Sx>D<+A*XdSrdQZ33T^9F#@r+-Du0##+LE8Jw*Nf=v|u~TuQ%& zlE`NIZKj2@&@HzBWhLMME!Xty1??_@+tDol<@}42)IN`W`O2u5Q9h@DFN5=aG%za- zJy?j0WlyjIi5$tw&OG}4o4&H@m!0Bd1CMXfLT`yuZHqtG!gMA)bK}Hl5_mj*4$@y@ zGfF;8_EQsanw^TZ|9KvH$m;BkN@sOHClKN_=YM|VHN`XfTq`++8;>(jx{}AQ(IU71 zosH%>!=6>)+<@1Foy+-zPk9|VU6;G2)8|~=e;$VtX}lfW-t|AXNKa*H61kO3x?S+P zKB?SJ?^zj+H@CO{8}Q>)Slyw-GLn|L>II}Q*~sFf2Fq(;pY#4-gQS}IMb_SHkfn!| z`nzWp(K1-%jKSAxBzsY!X=ahByGUuGk8(Afl6Ga%)_4D441vtPM+W^w3+DHwInatXQlaC<77)uUowN4p!`PPGHL~?5u|EY z5SQZiGv`ShQ_jgG((5+5b`FZoFm^Zgb0co$tnx+59Sghf;ZVlnW!o!o&7tqs*uLB*HBJ%PUuUlGTjr z_*za&nW^L_vx(!?TIJpErAlW^_R#0=Ptw-$C{$PLIV*M=E++R`?g-f6Z*N+W`(Jh{ zYMl0e)GPBjF$#!!$9p8^ne+_B5 zpS3&57_%FDFpH%>lHJIu(&XOA`fFBU!|l__=I!iaqNuI~{dh1YmSmbw+teRpl>7nS z$wzk{tI}0Z-?O?og^-iM%e8o&x*0W-!F~z5mYXo%q2W`FC|i`hhL7+Ns4E+l8l%HD zxa5q;RU|&yNpt%12(_L7X)`*zSWhpKgh$w`#Ik&&m1KYIj&s9V_mSxPn^F53kmuyU z6Z)B>)yzsVuQ?8lw$R)}&1_L_vXz~u^pSP`Zws1Tj2_8Im3Y_2%4c=^2DtWN?XIAg zQ&8ZH0+&P8i+7y(l*i$DHY(>d#R?jo*x~pmhp?Qh^*qg36YutVw&ZefKImI+plIk> zvd-SXhBrsoRUrEuWJ~>Q)X#KfpYi!fmboXoUW!Il^^iPl*;AhehndQMg%8i8S3L7( z@ass&6aV#A`({S;nKUP>`#CR?IQRajJ{rUyn6LH;1z9%h9HYA1V%e}ZvV7n4zJDE%wtlg`@W zN9<-_6wAD*qfg0Fkn;(h3mwW{(|NGWY^0g8IUUu6M7~WPuGdl`(+)0ti%;?Q6BOG@ zu5!kIGdo?%`kNak-U8XB?D=xC+l_SX=G$F#c?H`4tjC;E$olF~b-qEnW0pHvDtmWx)Gi3LDf%C zw>cW*j;!p|bs$xVQc4Wk+gi&Zlir^T7Gl>dG&Zkd#1t%2pR}ZaEAVE*Tc^EzWfF>qFz9zr!UKALr{19Gy zMe=%}dN~KNhv!RZXK&Iyoy5IB*4vZ&>}Sj=<_n#`dkBjD6dZYI_>78hg@}N zM{?yRmu|d-r|=?k*$44)EGd1Jp6>zTCt1VfGPsjG_!TH z!1ZJzZiB{GlBND+Vh#yu3d@;zJG8Li*$a7`UCGUlOUcDr)VkZe@njsX&Kk`^q1cpU z986U5X40LR#1g+-$xTN(-w&TUp-M0O9>Rj$58JEA$`!`HWuz~2hU`S=zR7E7@_POM z#=>On|iI}@#K2o1eeE$;M zE7Uqo-`}#2H=^LvDEI~%E+mV)D3@C>J(bBx{CGcEi_XlVmNFB_a8_A& z@-6$rQ^7I6sF68GYL?eeX-zi~SFezOex9}RH1S6*JxjzxAD=3au6!%=n_Bt&`%k>H z?3+AUyc-3EJ7`oBy5G~MUY`Dq_rD7DLZ273{SaE-4X;t8p%+=oUe_J|??VpCv|o<2 z4)Qh`J&%D$aQ~iUl~?OcpGs@+pRw1+DW4VhL`>)AzpUx!)bgchIj&fB|B?a@4Y40jmo{tDBY#nZ&GmR8_PyDz!Z?zn=Zm5X`Q4(xO!D(7pihEMi~cI983 z%POVU9q{Eda93cP+rr}}&;GzR+(cp{R8=LBk$_-c z${ZGnNif|MIl; z9$Nkxnh#{RqUG1q?on*kE3EG=BxoR9&teO9V;5(k)R*|xk%SE*$^T%pwvnziG^Yt| zYQZ`*fmKJAu^t=tpx^8-e1IyKf^>#D$FkWo^pV^k*=uObdUP=cjA5T{;O#w#(}%K+ z%SqL8eP>PdaQ*L1V}CZnZ7Fo?Ly%r<^hxYOGV@;zqKTl}q?LE*Xk}2%V=d;exTDZy zh__D|y#}+1y_GnLjO6U^hvfEP_|0HBzcD7B#d?fm0drQYzme=$p2Qk>XWwKt+f%v~ z;ydtWM$kyD>;=5dw@e(>X1(q1a}Oi$bWpAI`?&f|P~lm)@6R4LfbVov&Hl$>`cI_T zm7dh2WzQOA_F|di3+AqYW;A5B_LHmWY}odp{nvX}TYJ~?Yc_x}Ypl;Gy$z068mp28 zG?AIzQR_$ESx)2iW!FQ^?B&Fx%e)o8KN$fYLgjbR=3+ES#LPVUUJWI_!H4AW zO4hAp-%b4$;M;&MM;jR~hiPT7CA#(?_&36b7f~T`@K>mHDtyQLb~avTm*QokR_^>e z)%VM_I2XpfV0V@>y4Vd>EIM_Wqy<>kN)5a|C{S0H%TtThvp=D5?FJxybnlMpzq0e|FqiQqv7)?b}>8B z0{5GvN*j8W43yp3(G$t_a?Eh=Y{OzJK)Ue&#^3i zX0>0k^~vMg7jDV@oKvM)=Z`PH5sss^)v2J-N+nLELuaei1D0dS&u?&>r@e~`J+GsO zR|_w53odm>_v{T_sl;vCs)N@r8bNbk_PMN4ZWPH~lS|dwh$jw_&Q!p^Qloy%dHmF~n;wAAZ7(w4bfQ@^98NNdhWB(^X&MdpgbpTmn7#8DT&Ks|QeO9vv z`P2iJwPAaSGHu9Avj2SriwS2yeq<7Mm zWTw1d|H)R^x9I;VRCx>an}GK=c&^vd5|;C3aDRilPvCXV$-INYi2yqRwTH3ZrMch` zknN9pxubT3_C|qtsros?za3f*Ck@llEc<4+>M18Av(J*~n5@%`QY*VbKd~Q&kkHO* zlrk!O0^cq0>B>$XXheI<3}Y(%|3AL&1pLcs{TF|w_C_1ck*OV0NlB3?vUg>cCXy&2 zN=g(WeC-`dW>P6hQ7U8#C4>eVkj%4`S(MDfbzbl1qaWw|&-q`U>oYyi8t!@BYd!b6 zSJosCRxZ0OA1drqdlHj;K%Kposa1uZ{s!0V=NL@p)~c0fZq27LPac3}^cBAPzo$p1|{l#}RkS9iCvU|x3mAOFt z|IGYe;%S8srIp$2Q>cn-xl&Gx*Zb^Rl+XV}v*%eu&Mz16-=FQEOV8k^fZ~wWwj?WsO;G7i?64%T8XAQ2q$xH;hnS~ zr<~Q*ay;+MI2IKiZ3S=)cO_3ww(*@49@Z2PZty!R@28Wq#9CF;Ui|LONo)4QCr3hE zS0;PcMwhhusn3nx*)5&tPI=1GpB6q(mvjFEpC!*bnUONz3vLzA?1AEU{{LKH9-b!N zGd=f4^M$ZX#N+)1|1ux?^L0ErlokK``wZPO6WJ6ORGQ6BF5vvLPI;PA`geiv@}wl6 zyOV~tz9;K7&zmaq)-MNtdD@bbR?^m=ZT$cLYCv)_U%iKoP6X97<6+KzYe+9XV?U~} zzi+eoOIWazXl_$ubk-kVAw`GN_ruB0t7J17Ym(>e3NjWwZE8dx$PcK;vw4AbCW3p?BujrHj`|x%vd;Juu2jXPFW9(`+5Mr$l;J$mYN5^<;^TKtyWCgM5qmGPg(!D=9X zBnzR$xRilAtImlq z%MQ~%xRsseIl*;lf!kZIosGMRO37~TL41a+P~XB1oq`V8sh6DSiA1>@P1+fOo1x`M za?wnU`PyA+lt@m>M24M(6X8?xn`TvNCi;ymBq90#6N53TfFq~EHHUd3RrVoUi9K0P zLreQhYvOR`TJ?F4i?#che%=T1O4vuLI=cEinq9z8TWJ;%ElG~PWC|Kd2I_$(dBJCb z^kMjXS$LYsI5OP7hfySvP)8e~qBl9=p^CaU;!$!G4ApOabw6kC>Y-dB-+pBef2S>p zFYAUXUy%2l(s7*j`YN>p8F-uSXMH-*BvvXrl5OEFspeHc$R^64(U%MT0dOLx%W?!qva@{ zQ(kh;U^~BN=`m*F{WxEe<$tn8; zANWob?Ti{Dy++|qeRO%1B*oUur?F{&Cv@llzgqB0O!*0{!oTtCVxz`ruxzjW=-3-9 zz(+JMXW_NbN>A2gqhE*NbzRo>Dp=&iokR>z)ZbPxIiKZw40UIq=kxG>)B9GhkJNhz zm6O5xSui9tOICscEk$S>)67F+6bmjbm(MM_`#h| zQQ!x#Og9#N2F`Oy%5pS}ms_c@x{FY7H&n zIln6>foGok6?#-P68xg2%(9kgW1;IE$;m^se=l-05&zC}FPJ|STPl z-RPM0u2TH&wXn!eqQ^ln$>)3!uO}z5Hp$1okrmJU=~1?342pI}y**GVKKa9>y#+dy zGJv+zZgzc^*4^8qQWI@F?z@s+Cy?WuCm6r1T~T{DYFDCthk`WwHk+uMRk_Tt5`!|( zJnv%qcBvM&#L0EI`VM_dr2P&>$;#^I9H=dMV!ykdrv#PBSyp5x`}d6c4U}wx(pkya zO1~pJB{~0Jwh}!V zXNDDRR8?nlQl4n{7ky^+wk6$INf$OVDwWzz=8&JX)RU~1wArseu1Cg;wLM6sNVnbq%{-d#u8?(A!4RNNU9IYnd|nMqXJ zr)1za^pC78&~tWMZ$>_@1nXs_eJecA4)#lF-$HlB<5&FXoFBa{9zR=9$= z%P#2rI-TBz1KHu6sG(%ms;6vb5Bba*etEjq)~_nKm?sE-BOhracmI41_{;BHjeL~u zX4bXoN;j>Adw+ek_Ae4v^6YcxV({mz^Ncu~(XpjC_6Cj1y^>eow)p+mR{p=g8>7S4 zI5!J#er7kDv6Nf0E9+6UBkngPgTEAB-*EOVQ3Ai?%T7jz>~$Q6>&YYfAwDIS>r+}e zmL@KP&B^?R*?)K+W7+eEjK+ItEvHNEkETsvFu(}fs_+OeC3kr)kQ2pzD6CTA5W0cq z0J@wAn?z7QMK%Ic)_e8`OMehdaODN~_kjC)<=z8Fe-eHkem7 zUd~DG?q@5&g-62;FX*@ ziF!#~^?UgLGALH*dp0debVg3D%NYuN*^rZzN!Hn{c4iON6u2eN!gDZq7`4({b66$2 zYtFjPTxI|-CEi**%jtSuM%S`mqQBZ3jbh1^c%WLtV6&Lb>7o5s)%-W8aw^6Ke5nna zUyS$f(a`0n@NfOjE4;S&f7!AAs@}VRH+vzE(|2Y)nNuYv)FnKVp&+PcT**mpdB%UW zc9Y@h5)>R`6t0IF)nS*_zJ5wi0aH$^`xtbqU2X4oo*CuY{=MGIc@E2Axi2k0$U8eT zvhUy$upSHN#GN!l^Y}a~NL%7cazauk)V>zXN8`f~EyS;1fgAPp-wMwUK%cC~ma=X= zLn;%~nf(m=pxNj`{$8c2Z?RfGvhioadaTcdq)XNibEYP=F7vrXaLCijRjk!)I`yj3 z@!*q3=`NNfZ4PIjUuP8u`A(F2GXG>|d!{>!jiA4PDt>fh{AjPtT2!0?x|hJ+9&Hl+ zpV{mS+PH^SBxB4Ih30(+@<(a&+ql&O4Q8kjDrSZC0y&LlXR*Zz~*r-F9^4&JJrHCnmfsFYvZ=wlTgEz{$N zMut25n}tT%&%YWqAE4Q%tACpj<>lCf+S&PARNH(uQ0e@;Jmkfa=fSDndUUotfkJj3;P z2W(22W!92|?0HT6#>wzH(w!dgk4+qJL`X}u_y##+c>$;zDDfuxC$Ca-nlh;vC7;EQ z<%N#zN)oE!dSw>#W+iiSSaYrPV1??z;%T}WFZ?p{bF)64!|DBTrY7!Yr~e&bOmzK? zXx2!pBaDTaMZAe7dl-v%*ZN85y&OCv-MNvJRrBvYa8@@`cUHcp>viGRmK93$OwI@R zg%($5sU9N9fqEOV(g0=)aAOL(XNSq%#=tkhl;`B}8!M_Gk1Ho3%s~IoSeraQ`G6ix z#Lq5h*PdKPJAYRa)7e<~wvPLH3bzm(crYaL;gli%ZAA7Fi=ly$TQ>EB3F z4spFN`X+b4Z>X8I-FLJz#Qn*7zL#ISl&&ve0SAz;?tIEELG==vWLN$BXq4FIQk?zE ze&r0~WJw)}rq98(15YQB>j#4=K5KGxd_pEF(Cs`eD6J5W$C0+^wUQ0Exxjq{dT*)T z0^U?RoNcXzL>7F=f{sz^Ae4Iy9naBcZ5s3AN$-;-F9CuzXb>Nj$= zp}U*b=*z)=W8oi_qB{~D@s0n3^;i*}Zz+E#=(jCk%5(l?Lr-6kt7LGVti)s^ch2Xy ztMElD6}29qwPzG`%Nd0G@fYfojI3(62K6+s)P_-AwXeXNaEJHcNlp~Zl6=MURnch1^?^>ibp&e-ie1Vr_^QI z4xG4Kd>0xZXwOAEGVsteW9hB6h#S!wux>aaLeGYw(cIoM5`xsF1U3Z)c;17Cr87 zE|YvN@htlo4X$P7M#I04D|tFN&sdRjWj|xLX5!IF?9nRnT+^r!?{6o*?=SfOwg1cY z(80PwGbLuL@w0w*B3T{OJe@r~sDL*+dp=ZuQ2}{v7>>Z{JUQQke13uoPvAo$^0Pa% znKrJYwaNJw{7#3>M@Gmzz5WVZgIU9^i+T5WnBBt~rZIX293W%{j6r60qcJ3MI*m!DE~ERJ9{V7%ww))g?M%0v7M;z=lIj#q3&}@pSPCx;Up8p>ht* zQ#5%OwCo0=Ni2NU+QPZ>v>N<(0B3fry@L*`Skl$3#ZG9IOyVc-Z?ZQyas3Y}m2;wU z`fyKHEze$)yX1EGKIK|w5ATuSNJC}`jX*yJoFy-|m8;WPhMz!iD{3V3YUXh{hx9-k zJr{)Y@aSidWZVeejrFwxcGshSvM=S#t(@kND3TfEJQ*#X)z@!V1p|N;c%BeGpF7#oCl7}%IU*h^o_G&tsWQ}thFXv(_#AU>y>9h z^FWhX;{apifkwVAXwaO+nC&zBowJAf5%zY5YdN#}UZt9;yANL{nKEwD?y-8^$9HoS z?Mq8vL8WG($@xxCDK&}g9}R;#TA79tS)q6nX35Yx+Q_ssoqm#C?^{SnKi8I^UUp#C zPo+efbW_!e5I1 z|B4LNN5RAk{!Y%jfcIKgvS0Z%61x~@b9(K;erI3JST^hwdViMwvh%$O$gZKWU$NHN z3lN#fUWJ?@U0siLV316NYj8X3$GelG3OJIJ73Sb?Gxg3?ax?hXq8T}_I`I=ZcOjWi zZY7mtcq556%HH@>P`)`@o~ukUNWBZb(V&TcI7-RsZ2hD983q2VbtY!|N4|N^Kg-(d zAd;8Z?V9TE1d8cs`zd^q5$6kiB?|mfva*~fwJ)hiq|?2$DY0bZ!I8Yot+YN$AFDx` zCuhgOAiLydqH<$Y?1ZD;&?yl{Q(zHqJx}mX!27L9N3vTq2Vea0FZJ~>oD-4%toAb3 ze3ks(rR-_sFqvDXD4BVGB3-Z5+m@(&6<>M+`FzuucptuIwJ9raH{)zGHhK_-(lXH!>FTw=jwAj2pY3%H-M#y5u&%d51~Y|$W+$OzO-RK z&<`*|<^0*rl^IMz8i42D=(?OVUCAFfk3Rf}N86djWkowrhYv>ktfZbn(&HH%t7i6s zCTcnm$M+r&%Vb%v0LMLzyw~t!8i8z%wss~V?`!XBW!_>3x{->^kJ^K47bD_l^l=CL z$}BItOnQKEzLw9!zbdpn*xlxS6{UN^vNy>)7oV#0&PJkZAJY1eR+IDLGP2wXZ(92O z8d%%<9qSVwK8=pMqr`<|ZIJe=;zS#=d#k?g!qeZhFbA&x*7EOU;tbK{=gUMA|eU80r0OstePIS_NYK+8{ zS}gMYW+#u))wEn5<;rN;R&+5@D#`QpAJiIJ+vF znb5U`Uw>1wZK26&H%~N@!!tQS>*7)}I_*o(;*%$azLB2F$XajkWu5zQ*e4SS*l-L%e_#h%;y>4zpTI{YYwGnB&GX*{S3u(Vq9hl$-R>m zf>QgXnL?cckHr3LPIk@(XIjdx+e9{{pPa=O3TNLzewA9!oT|YezyI3brr#CWv{6Rh zne5xUBw;Pj^*(evghrm)YcXO)Ie636; zBOS?lDWljUp#6mRa~e!H>Lq8!#xqZz_UFLai=FroZt<{_L-BCl{U><(HfqFEuFhJ= zV>y#nWUcyIp675B`h*;1Kklu@s_dP8m`xZ+-g3^@$Doh*IgQ0yq=kd{AqVJns&O~@ zH?9TeDpwDq2b1*j9-rZHTKO?6nVkjoc)efg_d^<&n3K;~uEe)=H=c9=f6lR5g0|IJ zr)yci%u6!gm}*>{&NsW3tiEFO%zF1$%GO~GTH;DF-Ze$tUMO{=Um0We0paJcn2lqt zQ1@-4VRj)8g5_hd*$mGvGzR{T)``0xM?0s`?5t>)^6wvoV}rf32e_rN@JRZ1F0EUS z3iIL6McYT?+1=_TL(VKVAO! zTFGqiCK9t2V~cj%K7)Z+aQQ#NbT1L6FFw)*4105lqiM{~7$1L>YhPTqp=1EeHA zb#j{=3*W>K)MF*S*7o5_XB{$L>|1cj`OC?2bR-#XrQ}@JEZK3!lf9eJbt$ON(#lnE zE6qb5!Lghgm2=1Dk+W_n(}QH~>ph8nH{y46GfL$A?aXesC0QSn#^YSiGlOKg$qu^- zGJV-{| z9ccDmAYY^3t7vm}+Vw??NML*PdXjF>#J`-QlRf$E-CbY!-LVRX;`R$l-{fBA#dA>k zL=yTeZOb#6#9+*W!+4b4fVM43V{+@>3;(}^D|;;dX+)n1s_jU@3>fc9igI%9O0Xq! zW!71%veW0F(DkrNeE+k`-@vUlX+R8M}Ws%q~)|Aqh?XkR>SC z4n#kq`&?4;H?=Z;*V1D<9E}DiI%JF%_b0noPE` z`=*-mZFy>U;r}OQp4oLb8pe^bK80SCvO4EvuaD4t7kpWv?aXkm^PRo@O;Pq_(`oJyoY zX-zp<#oMFiYGt-0Z^@dOIHE*wKZ^Is!;%=etWDM;FOiPST(fpLimrtdEtL8fpJ;N? zX9YFVcT+7^Af*#olc$wl<6k$wd*aJX^0)_StH6FG;^YkPeZf&?{P`4?rO5PO=tx_A zY`~es%3MLBcBbXo&z;%WPhjk&r`l=;?lNuNs*j4e+ZHv-+)K2>a5N|*XYoMeJ1&4t zTk^ODBqh%(H3RdeSE4R5V@UZ#tZ%xWUtQGC>?8YmlW#Nk>cT5ep59?m)}nDDVP0}Q zQRruDdt1N%cMm@2e~o}!Vs$3N_zH4#Dywsrk~e|2B|kpT9dhn()|&6v-gfk<8Tzav z)7f+RW#Nm~QEms+&l-4J7*%sGQQ+lRjk1F8HMDU6K9nMma<9zYbu=|T_5DViWT#1< znM9!{)@LNz9K%X3&~9*yT;;srjAT3Ebvd?wjL&H77ARSvm{Cmjopve~pMQfvdpK^! zwl5(k;dAabDn9dM;m>vPMCFgcmDqy6(mUnL740X6JTv;qf7CABDSg)c{{E zI<{!Jkumv$vu>RIBgs~fOnW_8l=Updg`~2-(dI3q?LjQVSg_8<#e0k$zoW$=>L$DH zrL1Qw(wtMDvLgLC=)YzqvY&E1YaZzz&cZz6O3vck4qu*N4QA5TYuLHuJXmMU$?nBu z+sNM2Pw36pWaA`dbEZ$$a&~0x=E3h<5a)?-PULFM4kWK{&cQkdWXJKJrt-V8^QI{t z_b|#;A>n!ET${D}9EU&Edoyr-1p7;jLa!Uil7YHJ#b&H%8@0AUtFK`**6UuhxX|~_ zbSClI7o+IID7XZT7op}(@aT@3ZyHI@09RElKSs9>GiK*x-x(}mEA^YQ3=2V!DBD~8 zx|K!jg2rc|L`__Klgv(`Q=e$76AFxR{~q^eu;`V+|0Ig!w1MA2^fm1`#Q5}}((&FV zqD>;PUw1dws)-Rj*+>wEv~@3>`?xJ zby-42lLcoS47-ExTcsCj{cht{b~#+_UQ4*oA;Z6r&^qqLYtC-6S+0byZH#i$^)!Qw z)pGqdwtoX$5&`vjF*o*3WTduqTLSlCSgOxL)l%wRlU-M8wZUz05D3F-9z5a|YpGa@CJ3{G%sO z=P{npJuvF&-56aHLmn;o$(>j9dlq=d!z)kTx`6dFoM=+CJ)_`ZVpuw$)61Z_*GREe zKcB!qd6yoiSveK97TI``cI}40r}L1qc729A$-sD;_aN_sQ6VEtMxl9X&!&%c*|1Yd zPv&ty(8Bw)a5s%@OHy7hMvR=O)Q!}R$NkaZsDv_4XeXzI?Wcvz`KH13Nc^}Bjkh!I zOmZhVWsbtbxkkB##-aTRp5!FBMDVw8Z7SV44A+xc<}?u90heXQruX?^N1#jAUpt}b zZ`!JiZ(A8{bGmNkwb`k8BTOr^lJWm4;8R1b%miC@YQC&|&H=m8@6oKlS4Q!>QMxny zlC?0=7tM`+m2l-D9P0tjdd7$N)X5ZbWYJ4`8106?16Y-%JPb{AMrBINnKi z=?_u%78WVbHy6_2i{Nz+TnC}YKS^|UtKUthZsnPFK-ZgKn&(}kaO?F#YpWX_a@J!j zWt+I0o&3M)BPVD52O;RGU4QyQowT+i z&*>P~5~)%Th3cxcz`Lp|tx0#Dxc(DQvQB?HIgOw6F5kSjdDSYieILIor(I8&Z~67(m>WCyHT^}a|V8wp;cx!rCd9QXmyG)=@@r6VfsI-sYdj~Q2#=fz&MJ-Er#>6r7B!b?|j_Qd|b3E!2u9V8X}3q_e@cz7OmT0EPIqi|C|mSh|k}#k1Gnv+yy3=v$5x~I&F9Y zLr7yJHs{$Ui`g{g63dmnY1!$xt#(=}HLEB!9Vg$_&l7k%$LN!qSGbq_eed(x?xUBF z!25BM-$EN78hzHX(c{>dpNxM)wb|9Zw_y7TZMj+7&B$^?dO3`yC1*)i!d_snaxV8( zAUTl@tBFE6_a|1j34O~?2Ddf;^;*Rz|X6T9>d4u1>QWC^KC z?q`GI6ncCD%o~9$UTw17G(E;5O z_tT5cW;bmjmh)UWE3;W$ZK%KX{+;Z-pX*<yuhi^1#%?U59!E!We9pbYdiYMYcIZj&?a`z~I;Xu%xNg}g*_9~-DPT*R_ zXFJ55d$k$+G!lICw33+UMxfhW>l?N9nk$Kvxs*m+jbgpvHw`@-DW6+n6C7x^DAKU#m75iTA>SY=l=X9`~kK7yw`hXyrFovR4X4iA|`2wt- zfMXMSl09%|qtpoYIj2bPORuK0DqEs-C%n5|sqE$K#~PiCdnao7Nv$O#>PXx=9qm6r zk>n3q&UTM>Hy++Fw)+ZjCo@mx&i~a@a(moB=awqDP;a%hJdQNop+r;v;xpDQc%PFW zdf@NvEO<}f-zeRbOnt3Xc7rCiFS%=`kn??Dxe!k6(P*LjmxE>iP0krji45t-o-{13 z4mJWdN0&EASaKoel&GGxs3lrfN40q@arW_^j>2z);4mY4_F6VavwF(@q}Ev1`}_Ag zzOANnS$EDDHI7x!+HPNT$qe9Z_iN$whC=Tu7)x_T%qnd?hablkJnMn$Yk5Bzqn4`w zp#P0%O8nKs;j=UPtwy8lXkUtk+v}x|dDAGgXhuqoH)3}BPwLHW)bgb4sLZKtkrwJZq zU7!s2l8v%I{PHYdJuBIfuTcec>*~1)yRbLAa|;Q2gYD|%-Yw+ed{VKu5?k^r?$CcV z@O^;}|AhAd*Se$fEykttFiCF2%h7xuYRqRPc0$8sZ)r>ZpCNU3>Uj|?vV$razmvW0 zQ4~Gi^(%NGwT%A1kfS$1*%;-zvck!4^8uKyfL}wulhL&)ZORFY86nr}?+u!ED_ed7 z$bKg=c~&r%MkWI7L|2FD?c*ic!PHok7^+N z?sjdH2a-61@~C#`newfzs{!$lx?5)C_}b}u@R)5ntKdIEmt4A6g( zhS;gC@h_{&$q~ONJ&QkgmY(7z9}dc7e@yhvR=84DSh58mtxlhYX(LfNz4W{eUx(A& zQ%PVlpnO?yuPa_86JPcRKI(T)uTA99W~i5y*q@8m%b{at30rwrFk0mdo*D(lS;Ow7 z{seWubpKRXS1=|VL@T#~=h-OuC0(2i+MHyRQ->bMiR4SjzKpYJdiI`{>HRlX$LK%V z4JWxex4`8+n0y73tPih&%@Jhr`htt?wHEKaBAM=@h0@8Wp+q>epBkH9Rf=DU$qCP= z=`VE?Thau_4C%+&>ceeXMdbN0u^XHG8rnuRQ7|3EO_sHG1;cD8?i_nkeJ zH<06Gj{FjI4P5yfh~xF=OxuRwEYs5WyxQ!V%?x5AEC--|8OkM+Dp%h?=ir-P`M+tG zXr9X@&Sj2!+u+TD;%-W8dd(|pbuFlxIG3DmdyIdPzsj_tQ2|p~fn`n+o~&kh{ic5M z#8gD5$;xC^zin}~JQ`>IR<5YIsh>^XIRha%@i+ZXq|9~2c<~fHyNl%I1fTXKvnF^G z0kMSD&u-|9;ZtelC$uAR7}v21iQ?V@9mb>RY*fkat(+r0#Fb^Px1k6a=+^Sty2dN+ifOLoHrbaw~~`x|?lT+QRiZASJ>jCsi~@jLms*+}&m z%hbc@-bb12$xI%PRqXKA#)Ul1u1CJI|FZ?@>kF@B``V482Q+@?3Bon!gIx zQLvl`ySvm$*76BPlk5=cOK!eHk5}|ElZIzCpC>4r8&~u^cH?*8eo4)f9avj{8vkBpxMmvOQ5O=kvbc*B*t` zv_aeW5p!L?5O&Q-UUD=a;@|$@nMS@cn@ObR$EYxj-;$jVdD8H!o?B}rXCT~0s***t zH3`}S#^>N%N4OmV#+J&iLX``Q(Cb))86Zlon>JvciSi%AE2r-rq~7^3D4i=@6IJuv zV-;JJd`aEld$cj>YO=7%uU53DFCGlTr{P){f?f^5@&%igEZ05Z6#w`Xcc&T|H-dBq zs{Tt``++=}GZPuTi+>*(4HMyhJRA2Rtf!!7Rw5gM{%E6PUs`h(-cF&jN8^;F_meSvSo7$mI0Q=}?j0o7s=?G%9h-iDJK* zT=anP5VHK5dzn+u*8fU4l`^u90ppYUxyLHO%XDax*7iXEFG*q|owEZYr*7|POze)j zUy#KQjUbD_w>4_^rxmNT)*bwnad9nd@6vnL-`CKUL%k0t-4CMQk-Um8;L(h)R-aUk z!jEJZ`v3%q^Qg|-Z{kkI!IpZNsoz<&YdbvZz`p#KJdI}y-cj#pBh1xacPdwpPL0z` zo<_x!ZVKP+jl?-4`##bY%dpJY(+zetT$`w5@_8p(EV+4(g!|Lk9rEABQ$r|PJl zXQb8jSJUUeV7@*7ELqZyB^5*Xdh@i<2E-%Kpf5l69JpV^C;mm5n^2)I2p?y|^W7O% z50Zo}S<_wNw>PRkU<9nH_1(dDw;Bf;sggJHM07lW23Li_JGgujPi8qf)~xW ziFO<4S2Hz_1ZTW~DQpYT|@@lh3T0?t)$eedeUYJiEy`-PdZnrk)d%dnl@8&&!u+6r2AcjcW<# zrsOhrqn$kqjjuv$wuNI0yr_&mnIpAAm)_v);QuuA*@^sT9sLeGNM_NTEz=I)r=r+b zbRy?rbTMirKTCUdEN5}uhw@qJj}Q41O8taJ;mU9}tSYXJMZe_Vy#(*Gt0Z3Kk=ox5 zue-swH%c_dm#ye&a$YRplT4*44ai^Df*R#`9`X8;OC+96d1d2IjYjEb_!#jkv)A${ zSO2D$-O#-@2(v;u$M>$j`y3^8MC^Z^~<(w^sCC1{L1oLUBeDqrM49Z0tb%g&ig7J`{+>KHAqm;}eyX->qwke`+dXl+51i)mKeo_9Vlp%5yI30^ zDAf-xKZ2_wEzUV*=YZ}`^g2gd`zc#meLuwigUu^Ou;KR~(ZGnFx%c&8P1H^olC%H~ z6Hi}PFBQnatLV`gN3!!Y@5%b#9mm$eF&<()*UGe!J=ck`IhA~T?P{{%R3L8^Xv!3P zsH8-mx_qwhtx$ge8E>qo8^tp4Iq77WFQ!u12-2@yDw@482#7@N#6dxAFco|H|~29EN9tr!G#nF%sl?!vGjR z=YJ%&B*&RIRt9G|_++;HJsqy1_du1HqC9gb(?c1Ub1mmWW&cX%1A#m5aOr;9ROa9I zv^(E_zT!8x)q90vR+HydiQ=!|Gc(3JQ7C(QR=T|pzRLHld)>vZE% zvO(Rh&qSRp(%!ZH<&5;{q%@xBLE6cg%I}c=v)td_7?}u~foORY-aHJ}U1(p{eG?z? zKHd5UXcAX_CL1}3gkL(n;>;8Gm9qlEpBw8JU-4x6~H+S0CJo2ARWS${CPL z&^hh3VY_F;WP2^1Pev2LnOR$RmU~B3TtttOJtF7UpP}aC?7$1!${O1Y_VIQ#PvmjL z18lBD&b&>w^W^2|%W{n~+Gk(xx9~WeWW9~{J-rVsJb~BM*{K*8jzq1`aAG_N2eTs+ zj98PRFOJh!f0TF;Y<0l(i81Y37H4lfZ-xKU z@FrPP*0K`un8&~>PYYh;dCkMS9-#hQf64544H~vK(wt2eUespd!xN_xF4aWcU-j6D z-?o^Yc%MD|9dB~}@8Mv+4{V7a+1>x!aIHFdNp|{6^_aY!$MXxulZ52e%zFB>#?Y*T z{y=ku}gu~1t%skSe;Ue;rxbCO#q`4Ouc zt?Phc2HM@Lon(yon#}C#?!~Oc0r;Ms|6Sf>T+GGufvxDA*2#8q$!QO_^-=S-qZy z`#GU78TE2%Y@QVC2iw&sbs6c1Z?~AfBnx*pGI&1jBr4@{b=H!|U2ydQcc!|Q2%EEU zY=2t34g@(tC?3fmcQHf>+M4e8sq6&nD-|iTln`5S?|YcU90bV$ahOPtV6?f zbZ=jlt-aBuno(+y`gPgCfj%?3j-p9fU9Lw1TF~wabUyLoiGw&691iynRn90Jh$5mC9>jo z1x`;u#S_?-p~mKBaH@|&$(etWtE0)qcx@dEucfTZ$2ivC*qA-*E8S~>H_@l1AQ=XV z1B~c-a`+9amQ0QNY5yV~My?l9Mc} zIh{4x7n9ez6z^JeY;C4}dE(z+rI6nNH84P7UOK;7}z$M4{DMNdDBU?(EG5T|(|B zpkpoi`UN--WEb*WcStekpN<{}($`Jnr;v`fX?1C4@QW+S2=jLmP>GCuifV~M%e?0o+_?@+dxH8GlJ_$XBn~E-YECs{ zI9nU#=`d<4;CIfD zy#z*m{r}zd^Kic!tCt9n_IUE9U&*RHfi7f~xEJbPM{X{FW!C-M>gjpo)MR|@iz?;W zn9co5zVs$E@LZV2qpLtV-Yg_MyL$5^ql_P$3@*94JDasgOP{cTIh87T=qBkU`=NKm ztwgKT;7d;cv%FZR_(;?*Vi-UU9tTQGO*V{To!tn)w@W{uxcmllS=5m#LMx-s{GJ`&~H` z{mS9MR`{Bzr_1o{Hh3ihC(jUGG9vB-!t9D@tL1HcZm!qF!9N7v@a@l6>_X{Zi~sHF z#=rc#TZ<_(g+78bN%kGbWXZS7UGRCJ_wZa^|}goYq6|3kF=?A zIy*lv<)Oq{#-GYLgn3>tOiR0?!bR#$M1jt1<4Jgz-U7`tMy6z$Si-J%(rV6GO^nPw zuw0AI$=r9OeyhW(zLp;Y*DBJP*}`~U%}pfzZEz&pcJj{61nKt$6f2CytMzf4@0`$> z3=vstpQyFbsCgTXwcwNGyue>kFj?0g;$>t99e=QAhI$7Q3>#@XbEw zVRk0dOWYZQ9!Ii1wTvZsf}0FD$K&l_wE2w8W$;7`88<;bl z&LE3@z50-%_tno1z12proD+2~=nmCdWad$y_o3d-FwT>bJC*qy^#_y2oM@X|4a0nA z2ii1r=?Lc$+WLr$PVrt0t43->HWRlOU#E>;b|m|kpzK2R2cpjYAX!Dqb4uns^0omk zxsq6}<$et#=huQPUPEHAy3@BIWc>~Icf_@)aQ#|(xD3Dc*G8bvDeBF9#*fJW&F zUCH9_u7~ZwpS+SGfzX@yB(=4ibYqV{GUpeRMB-UbrF=lW6 z-)QvxfFDr9_?2wbnX??LzrMW4?9VvasE{b6w+;ClWPJxfO-aS5Yg8yRD?>GOZ`$M=igX((VQzyum?REN|oT!i&ki*>?&G<(Wa&7_%GlgFmh$dN@AX2`KT6@8rtZ0xjM} zvs!H5iF}%a^cWvKx$hE5nbSX8;7238yqNTV%6E=$Q(F7nNsSgt$7jtR)I7r|MPzi> z>&@C|r?sure22gJqnaB?#9^qISm)c6*ut2Vc=ZKjZ6l2S>DQJdW~ICTVH?J(5sqi= zG#+=nqj)q+X+`!ORL6k^NEuotV^%}gUZu|$ z==VH!;SJ^5pj@K0GIPBjHc#VQvJU>>J<6}_t;t$oZFA@jY1?GDjIkJMD2&v80OWnZT)@O^N#jK~Ny>iNx zBsTx^bgA?i8Gey1AIEM^WsefKIGyGXWC=>A6rAP%Afv(0EJt7by@v;p+#);jmZp=N zr&x)Z#=o3-ms8++s<*c>;5*}3oD{gtdg(Nz?VUg%w!X3LQZRp^*l+7lR&We6u3k!`XQJQ~l9w2nZeZ;L@_2*u`CSd*n!Jp8UVl8FVv;iZD0PNf z-)rF`txh*awg%5I@Wf+!4+NirWUz4{PYOPS+kN<)2=|^a%*lOUfHgZHlNl~~4K^0k zco|2UvwwYQL3aM;N&Hc?bkAb-?NXdN*YBZfX62~|j!iN~y#%9%TFf(%SK0P)X!?$J za}G>0!mR>D;&=81!D-5tB3SC9aXkF}z}rtN*~9swR_=jaJN+-gqYYl`aqUGoc7;h! zUd-vP*+1I4pwA%M`V*Kl%Ueqm*Yd+=813&MVOc~Gy= zd)970dmksh1~=bU33`*o{#Y|BItmHG7+ zJ+HwswgulB5RV}dznWMV4IS3miOOad$sjsjv+&MmxtG}f({O7R+w;2q zvL|*N>K+FF|H3$t`eS`30_+Jh!5v_*jN~NaQ3D!y9q#wnPflMrlhi+oQ|-XHRDW5$ z-;<|R0oBhp0z9dQ2g&p);IB%0_a+O^D-$374KlO{JP-Ld0ezEs`a{qK=7){wYspC^ zSCV0|Dk;g+x6)c>P6?`r6Qw=9^;okDwYQZq;0B}VV!v;8|1d4aBdUkCIf?X6TF?*| zlM%f-+0INo^Ue(@@C_^~7>9S#YA3wU`3M()D$=&V=$BQ>BbCjm#4GUiH1PZrM^@wL zp;}Cqx$5+G8`$(APj%V6M5ryq>1o;@tl#8@^lXfaD>p7Ukn}{mI^p(9gcY|zJ;S0U2 z-ClI94vu_J`i83Yt-EL7T=J0ohXqZZ(AxO@5)AioEvI}n;7J~iYNc1nPmbr2Ja=u2 ztMP2($7gjhvCqkEmvbdc9@lAbPSiuwg0JQETamQ2(?fjy-sl^Rt*Ay#vc7||lWFUl z1&>O7CK@L(?-NP+zPyjbF&qwqb4blR@IMFl)^I(T$DBxzeIJgcjOlBMSaO~F#ZokWPVg-5*d#7P{ihs3aCuSm`XEj>5L3}Lk2(`kQs zmi1riZ6Ha=J z_5WX_E4gex(B|3TXo)6W3+xY4e=QH^My0M-KD$>hAZgDV+13`VeL~xgBzeziVI-@( zfyGY-;&u3w9is2LzBwHn;m!_Z>>|>%6PhK*WX?5vkcF;9PBULRQcsB*`q?r@`TWwfC+ISo&KRAXe($4nNwGkHv%tjZtiGjS@1(AdlouAnLLi}T9<*ebpg zJ2qU4iMl_-=sE&b9|cABiMIWtY^lbdpC$kQr9bYLV(vHH?MhN>YA>1Vv$OVJxRv;o zQd>3kSz5!7Y*%IJe_~=^OKDTvnOEm`WAE(?=u3Bw`J+VQ zak5r_4CwR3yu8ozaPU%m9>QCyO1m#%@$W4B$eJjcxbd%O$*=COCL6FthsnTk4jc9E=@ z$D`jW^}pxUy-7!QhWU3&^)NErr0=Zn)I{%4Ga2Su!@8%EuhFKZpt{YKw^^KIufE!d zJ`mS#^UeuMc~U(?iw$7hj7+QrdD?zS@iEc!|4n!J3=_H^Vt+&$fi+A{1+( zWJmNE2%?qbbU0W>YqeW33(R@iXQBR`ZNyYdPDhPr>b}>UX8vzo6}l>`YC4W>$9qxNlMO3T2ZWG-m~L0`p1| z6&W5*V(xOKJv?iJ`d(6Yp_27c=oT`yN5QSr*^Bk$D&B2Jmis3%^*!n9PMY7*YjS6P zYCKF#(OdX9&g)lJbOUdrsun`U=CDs*ks1YsGTZpv$exp{4l%R34F|tP;r)%#vti#v z*}kB<4!w8wzat)G-{@#u%PzZn3v3tDxE znguo>1&JJ52dXn+mrT)5v3hUe&$#`eem~8 za*^j&=d*p?d5PBLcKZmxQkK!qp%(CG^ z^5t|zfj!v1b4lcW7vTWyXk~^tY}4mVsfUaq+%CG&k8eM!@$cZJvea zu~6}ecSV~IU^Pmwx8quClutxkb{ifB$EVSxIqoIaBl))zX;Yuv{f936gN)9B*~je1 zdpMCZxvHW;@*yRn_5_^&1?M`G&+%|rO7oLNvoTCsX{8hGzn7-}Kx&fZV+IM!KA&)H z5%{hmLp9ycN^K&g`+zk28es5QwuYVH{pOYE0b%h7T1Sj!k?OmK8}!dG`G9BphFO0B`y=0FB$5PhIA2cBhqhE&EZE zS$ZLRn6WO=gvV&(VxGrjFux7k?3_#H(+|`hh?ZCBX&L{d9{ot(_>;Y=kcvu1_;>Ls z8I;QDzd6}hjh8v!IGKU+Wcxf4--E7y^S5fY0|FXIr zPcdsvJ@g$vzn9vtz&~@!8u+``XquUAG9gUC%i1&|equ8mc-Zx?Xmql0<#gcF!IW6< ztR3wOuH-p-(AAdU`wlldXmN8TPr-vFYPTV4Klpzz%{U1?d&4(TPuqzGaQW!{LVGk~7mlfMbu(v#UjJhmr5HxV~Nxn4B93#lvl@5lJwsMln>tVg0Vqo`YG%w2x32E!q~FTjDE z1bZ2-B|H34e|UV?qSlFeP2^9a#X^-ftnun2b~2)m!NW_n-H&E2W{9F z5;yam(RC-|!l!!po*!S;l?CiviFaG;uZHWDz`oeE_{+&;U&$qLS7|S6J}KJ@)X~c904YP$ zL=4@Ja&LgQBB{vUh&5jOqVe{iY-%KaQ_aM}Rbjgl$8vws!)EA^*rl@KU%5X%Gat?G z|N9z!V94IxWB4O&$-%8!9f%6?<;xYGX7=qiF#0~vvScsCmW2gtU&uvWT(}$bH|;O^J&;72-+5H1=o@%pX=F+e6X5{8LQ%dEO??Be+9=UBxedLT!RW#Q1^G*yoa7{ zx89n}>TTG;50$u-mQ5g6LumgcMy6BPk$tq#h?Z=Pzj+qhlLg!8Jw(m5=zkSm$=M7o zNqZvTAH|Ul%3p)NlgQ<}Y|xLqfM3m|_tDd%u+FM@qKmVqYDc;|jed%mQycMj{B+%`uPD7>QDZXlaT7lndX=u(n$jmr?>`$;yHo&auH-qfOKS#ZfIf?2o8h>*3@=5S2D5%*p1ue}TDHe|_u&r(v!>p!7sN?y8OCeH@4b zJxM}5-|WV%1Ha6p!tcvjp54?sSDz2zZWr_ToaWXVb&_@EM!xvVD3ly8?}OlhLauJ2 z9XS^^yChGgC*}21pXHu!CXswsH?X0J#jnW9UCysg)|2f>d7_MS`o(_wAB_g>jDi^- zcO%myVN#h*Imdi?7qo8$yQf{BsqX#Ym;ttHaN`!zl33g~+0wIc;%SsSROt`U<#K(l zExf1AwNusq?B}fHbv5``fj==qiOtVyd7dXFE6)u0?u;8dxc&)g>5F!i^z)Muy^T?( zBKuyU;O^hl+ZTTihHK6`%}&d+i}9x$DkWa&Z!GZ*;2T9Q8jH!Pmt-rvJQ1H`QFErn z-hLm2Mqj~jGzy-l+%lt1m%_St*3ORXc2)A2(|YF8j-jaZsk&JgC=Y^ZMx~$G)w@{4 z>$KQHnOLpLsgSwl~Tj(Mn>sE(9M z*2b4n@M2t^N~Zrw0uJOM_S5GSk`r&NDk^8i`$Km#PH%;N@mx#q$-aA{P02S%)b!0b zk@5Qw?Yso4zitVAU=?#0@6BsG=jQ)?y4 zaO+pJiiehXid@O_`io((!SBhwLzSOVqU6tZp>dOOtQ^XefqiNrAEjuHUasc3OEo%u zp*|Ap-U$@t!LR^U+0#%Lh0e!=rudeVhO$@pC~betQcPf@zSe&7NDd`q$tZq18OhGq zcJw0AhP#3HKkP|kWm;-Ee(yR~v7Fxr(W&H&FGmC8dp6TgR>G6zIuYktyS~W#LK2WC z7k{S@Wv*`YE_s@NqpO$UK~|~T!g~kMW=BEx@BYp%#Oo_p$V^T#jcGGrG--`;VhqN@j+hxSXuK-|4&b457^R#Li{6`4NR2mm>q;f;2I9chKp78yE5< z@jnHwX?1eZZs_`ddsR^HKl;ByKc)MfSejR0*OeW40SB{3F?-p*LfNy7G!NnSCA^y0 znIDQZzC_d}dt2fjXOV~1F@21g&FR_abHQD7k@d>9FX~_`L&Zydvl->-s=KdwFr7cX-N@Dq9 z(caNh^3h&MI)@l{lNn~FGFjE?$!fn2(^^LV7r=Zv*k^+LmBN#n4(9skSciqpsXMRW z*8DrYOodx=mpp7l_@wZm#-nHQ(R{^*CJ#vRnC;+mF&q9ADy&57 z{z{C)fxYSHaj1Mo;eB4Ae!RHlu2p9*JM$C=^ZkzV`jy?v$dI#^#*?}8lxvP!@f$|N ze+A9{R1fvZ=*L<;Rk@z*%zFI#SSvZ3G|wCsYjqYH{@dt~C!r_vZj(PT`((4`ml*sR zbf|-ta>hqZe2y)ASM85LmALs(=NzL>XZ;T)NqK*cU-8RtU}smM#~4^uQVRVN3?P<+9ywc{LFLkEAxWQ^d_#si2fkX?~j|^)O<^urJRqgVKfT7%h>q% zYwgMQnQZByC^V4#CCkWXpow4pk&>}s$u*zVzdJ$R9rfyyh>ECxJZK;BJDJFHj^t~g z$O&mVLvdFvJiLc9^OWrf zf^uryh@(3hKgJr@zrmm8Y|_Jcohacvb?)xFD$d?b`dh14O%JU=x{|C+<3%oXZ7~i0 zOwD8==m(QzN`4vC50d7EYW#+m&yd=B_>){Ji&1JIsNYh0JQ-VtM~QdNKGtO9YmQ=v zl9Xgm&K|q(`J2_BQQG?ykBjfnNc(@g~G8$)hS5EHP3!O4w&sp=y*}j$D zmy?Y=j~c}NDh9CmirB}>$mtmW=S*%ox^XLL>`qpjhzAKu)o#8~$D0}wP)=QnUY zgr<3xaxED-5u~S)`AfaOWr+^)D{psTgb%|AiWy=-L)LMSPu4Q z`xRWWj#k5K9%*hwV)sI^_@`NEyayIpzigo07qqrJyR{E}T|-tk$KkexFO=BXUhqmT z&8+h7q5l`~G?DMg-IkF*Pp0F4eelQFo!pZ z;#!^}FK|6OA&=5);`+ve^?CT-jskUP&MB-|6Eumw?M7oNnAsjeYetckr$K(dE5D+~ z0rV)R=(K=IW;px7zP4Ty4IfTdU|-)LXF2UAYoC$cjx=~Q%WyM`k%*G5=-&udAv2V$ zeDA>rdI!Y@lZhj=o;gfKR7pJcNbWcv4B~BF2!SSW|WCUMZ(IXcdiGf#B}8I&sRoYpH~stTcWG_E z671udhC zJ9**~nJq@mpSG+wMTWIp!yg^qUN%r8temUv$&w)hv9@ zqR;s=dCEVIbj&78PvXxPT6)%ZvaKCSH|?G`=0^9c83S|j`!ZU69~#$!*K%5ylZr?1 znTF7}gV6bQqxd8isGt95kmIbxt!KO9TiwiRoU4sh<-4wf>x+meeUyYE|TX%s6ss?g3nKfO~MIotg!8lRJrw>DzD zZ=4=Tx^nK#A$+mqv*=(PukQX@&~!o17in@%rdbWoM7qvEhsC77A?RA+-9ezw**)E8 zR(m7QDEf7ulG%fLEf}|Prw#4cNE;GYa=R8X-tLL_Ic+x?+Y(Xw6#maZnLg+<5hmBc zt0_GlqMyCAG*-Esv6d`oZuX7^k4LC5} zSk(^A2b1HA&}Rr-zG9VgD${=W5_(?k>bI^ZYWP?%zvpuX+`orkyt71N<uQCr^BG(oSfW4DWgB-x8;CUQ8p`vO_ng!4J^Va<4T7jaGv$Sq?X9DLc@kpVQHM z2@d`jpOQT|r=q_`$J?_w@qssjFz2o(Ut`A8#5{b5(uu429aqmU{MSTST%p$+jCuW0 z?MRUBsm&2MJdzw%LzS+enCJJ+XwhHU4JbK|jO7%$wWu%}?7M+}0$Y1Kxw{xFiP*kG z$((TWFZAxCrM~|EFS_moZp$(K8+a=vOADnmBbB6yBqU-UrihBtLWCwtB^70xEKf}+ zHHkvWl90+)D$1Iar3HxzgJ~>bEJH(M|GeMdbx(gY@Be*2&*y&b>%OjYIrj6o&ht2r zGqLZl3;U8h4?VGxPE~3Ccd!0i-GKEU`ez zS#t^wrsT=E*A(yXgjr6e&FO^u8_znSRL-Ex?#NbZS0hO|pKNb$Aju%|fim}@_&eI^ zhmNJn!{;6 z?q$ZPk-ANd44MD?9)Bj{R5@+Vsr0vyt*eZ?WqPjay)m)!*}1<3Jc)1KqR-6uj$)g; zqe=~R4yAR;GF=7M^I&@=i`1HwP9Wtwx!wtE>(HQqQrYz}8=pSba3qt4N!zY0|C^mJH=;M9v3+T9W!$?{sn+hdGE!A0Nn3GtrMfL>$Y^(8!HFl(za6e5 zk|O8HU5|?uT_v+j+aJnK!^$|YoF=4|>~6c%cOz1p-}O*6 zIj_U*+sMqLMz&g{J>Fd-^uM{V85Ka1_1yTzd$Kt%>i-OS+0>Yl9jq@FZwDWFz78$w z!Zzd4Mm<%cQCI3AIli((Fz4eYD|b#r80KmPy~v4MiPz34wWG<>oBCM6PAvC4Cs~fg z(d>aML$eo^-Ih7J1&P^$?#cF^3?Dg>z9Je`BHy3-E+cV?;&_4FL>_k_znS0t5bv(@ zG?6cjwAP=!&0POh`dpbE$vS>=&L@f~JN2)EW4w!FmHazBiN{=(4jzD?%kl6b_ZG4| zhp@m8YQGAuKdZNjV69x()lKduS4bs2T!B_qL47NlhW_ywa~|hGxSCAadAkaKrlH9_ zppJ);caTPE{ixPWo>tRt^=jtz<%ubu+xZ4?gkD%p|VCY{+&EMGb-?J;3N$*A{ z-q24Yz2{voZ)T~TXol=zD2Ky~sFXLrt;Lhe*_rIH&0prgbCyu)S0wjJJYVqd-=(|x zUy`-t$|>DTzsZ7l0jdPI(%tNbtwh@LzL@ud6~!mH2piFXsbnVW0?TPxGD<~~6XU-N z?GLP#inpxz*8lBIzw+30YB>KLQ$8<7IC-P_u zjtyabZpM=*jrMUpJT7z*UsZ=f2!pktnF|zu*gXJClDQ|WPR9w37bEI?kV-tb8<8<)AD3CF_|9{ z50U+9!<7G-SCSo?i5*Bz-I1h!8NQsRW?L3{0r_si5+o}_Vv^U8LuB`tCGe?S)7Z}Ya;)CCs)Z2HxQ8d20*STflUS=S{%0zrHT<`(~e~L6^vm>{jdVSz@^U0_U!t z>;b}kz_Ekp8{K(Nn;)b1p|DNIHH*+9F)3@&Ydo9zh|wnd zJ>wrIo5>}(m>unVXt@Q+&D>Q3)JTj*O%~}1^lk0xd4K1j#Y2A82kCh@7rT8UJ!}T{ zX*keKzw`8aC##Wtk7twE>iX}e#ZQ!-PRer*Xn~D=Ty)kdB^KkKg zwUS|^vG%hwa};{Ftq&$gi91l()v`(dTJu;qzq z7zW>*{hNKaS@Bz`<>Z@9Ccm?3z*X=a2g;F!ulg+G1eods*MvYHV%}>&&o!xQOTISgXb%a zRsFTQ1a`^#@ugP!uqu0Ne;NyZ5i8q>yzisU{n_frwOEy2$6n;!ryf2GH4^@W4t3Gt z3KY4S50Z>`U(mS6@L(AEI~rV#X~o_wWJUDJ-0yr6bd=wTNXc2~O<3$v=&+XTe52P- z3q6=Ys%9B8ma}s4a5HyRI>j}9`}v?~g$_Hj2jlUwwRS&7+urJb?e}LaWp8yFqv7>_ z?dm$20$P&XCyZkY@o6bJtf`mm*6Yt!Y=m_uFx^A~k0ulA$=?KW_Og-SV|4wN-1Q=b z4aw&knvy*lkHTO*edytOHXC&?Xpl%>k>UUjrw(?Bg@L5l~lL;hQhHJW?8M|$>bf>VliR!&YKV4Db2EVfhE_Zj+O_%)5S zo~#RR_}vOc;#V(cFUyU6d7D{Y^c=r^G7KYS74-MKmaD1tR}#DqT~E|+6;vAy=5qEl z8JEiRc8E5f!PTauKCxSy*wtogY$gTE)y~PlD{=TmTpvp|a$?&b>CzNB))*K642q_O z_8yPh;o6N_&KtlW5?+zS&UY`qSu%0Nvwy*L&ixI3n@M>YDu3;b@pG70QaURV@q-d2 zlll2dXi%nw)TydWHSOfRqh{d|RU_RE)p!VWIVJO0bUFZ?9zop$i$0R;`2x^nwLfva zbx3Rc*mgcUvTd_`zd|q4SKc9RhEej@C6jb5lF@`zmrk!K?YI4b9+aa}&Q~he<~3Rf z2R}f)v)oDkyUg5o;6E&5kxFgt0rG3LpNNO75u|2wu>79n>@5KLB@J2Wp$2WoP zW4f`*^X%4(e3ty#3hHih9XW_>maZZnSxe9V(%-fj=F+civF+77jkmi$7|*5U@kZnA zrc@#(x}jpSj_35ADWIMSf@f*bHSntMUO5Z2oMkJcKf9qra?dtF{SoL8k1^RnO1lYn zb=?u)^0u0jZ_51sF320rpT zE8g9WMqAjz{b}U+eov?AOO?(U)|VF&(bv;A3;DR(`0*+$+27~)EOSrXcnFWy!zfXM zSHdkhMKUk?EqY&w_C5F@zcHe`&C=b$%4BDJc4qZe>uz?vE4!Fz*Arlo^U>?j=SxV( zrF15H-(F%To&x11?k9U}BO`jE!*crHYBG8exs9LFh)yi!GtV|tG65FH^EQ5_5qF{d z3$&~aY{t;E*~-7~`8A~DPb}(pr2S$V6Z@9kOXuUn=X9wh%-Yl1%Vnjn6^T@^Xg_QKtMpgz| zf_@ica4_ChdaKQDo=i@D21z0~l6yP5_gmvh@@2Fo0g3EQJirVz zNfwNGo+XQPPNuKUtB7xz{hHaUm-BzJEA((9PS!KxYo5t^BwI+%F3e8(L-dy&eAzd4 z5h-}x@AvV&jW%-Hcf6e%xX=RZ$p?^GsP+0fOv%K><~;Hx%1k$2x2N|xy*_y*Z-xID zHugQdUCB?_ou_#pdCxlasi01-jd*U^gZiEKxgl&@JoC)Fqy!?e4l=#Io{9;Bie7^k#+S% ziGHrHkCdE+Hm%7^Mxv#t`V*-;L65hQ{vTobr1m~Sug_4h7p}gfzu$P;4W-@#bu=@1 zJ+hnsKJX>l>L#$92J(UI&2`4)#IEE-%lE+d3C{k7ZiMX)=Gd_MO$~=2}l?i65?@`KTN{BIPk}i*VUr6nyACt&7R)Lr-B0;? z#*gJ_^bbdS@5ZhZyP8u> zGQ*u{jCc%nKrjm~Clx*BY@3QGUkN9&Ui~-!^Tsk8N8>RL2SK13M7kD${tOtFqt6X~ zX9Zw$Mq$tA`CpIp=e)qYv0jgEhr9EP`iUAIN!mx~<*)8%Pw`hE$Qpkua{VlAuSoxI z#+mbs=oe};=T0TYE;;3Tkm08O=M>AQ*pt>;>h5{=I94UU7n9?$ti)uc7HIF!Fif;< zN8g*tXL6>VhMPBf7j6uv5d{XLK%Gp+S17X>ugc(@x1t?TbOKCn(sstU*!sLfeecs9 zMNgvjO|*3q?2gk`VyTy_{~^oTnNIBlzY5B{r^SzWM#;&3wozo0yE%oUJAMu#J0Fpq zw7IkXm(z(Cl}!wHL%O>NgpJ(Gs!UF5i3iypjXnTT1*3GLa`q&(+2}dA74V_hQsYCXm38Y=p@!ooIt|h zFWI?9g@t(r{}S#YHj*`Zoi)hH(#338P8d7_#o|4jugq<@ zb%gKNj5b#qcY2VL>s)3l{2h2C|VtxyP`TWD22a}wo zITP_vIAo7r_Sz=fba$_$Dx3(m_$8akV$SqBk!8tw;ZMS% zE6m7Fbp)w&5;@I&FWsJ)O_&WOw)#wzALnFyi#pgoMZt^6v%OSksWXo z&}M+|@i6V{e`fsS|MVfDImNuQ)~15)5YOfqvz~-iGju-)Z4*hlK`Wcty1~4L#5SK# z!%9&cIcw)ia&-&6PEP*Q;rf7jIjwtxxrn|b@kUp@c&d{?HU!mIqF*w^Wo^8T`Z+1% zYV!Lr=<=@hIY0VB7$k4OTCm-!cJ?H$^DO@7I(=+W_ASu2P%;^Wld14SkhRg~H_C4T zOJBXc57LcBhLt4yYIGlmkH^x|4ccpfYF)J70}LZsshsfgI~o-TJJXnBVf#rTi#bnu z3>@c>g_pEDNBQJz|CO?t7s(7`=>*7u@VXsuYopv+Rye2HCSTYl649T0X0CM;o+Vpl zJo}n#LQZ?=gYq-bFzfC8)Xr|6dtulC4F6)elha{TL9_i(FXxWr)YAIKiI=p}m-Hr% zGghz{I*-%$UyNDF=g^ZEcD51V6ne0jtk1^Jwc4u&f>mH22#^@GnH7)BF8tLtH1T^ z#8HJOIK$oMo#z_So05sV&8@#~6O(K&Qvb#Ie_g==rOGwbI@Hrl(YNGJz>Si}r zHGMZFO+84?uDI}fy*|QDT??~LG;=Ho?MizWDS0273^oQdA{*I_RSSOE^K+B`HPoEp zJF)Y5t4>y*S|A<*=0#vgrr~OASt2MIc$AL#cCmT2MiOFY_h;uWpcQk~dtM9KDU&QMPs6toiJ7lN z2Y2ENWp*_)XRH06OD@a(u85ZX@uY=&`IV@YjPs@Owlv>&1j=Oup36?oD%z??UZ#Pi zGcA9Q>}P(fL-8q{S5lGu--bK0%-GFB>*Put<4$$*kcidqv{u?ZpZT#bz?50)`1e=& zOFo)eV0hS_KPcCMY-Rm?2UhSH_*6iCz$+0SProaD=|jN z{6E8EnI$0l)qN${C zSM}o4mnnHXz9uJOPWQ>2Y&mOES-%INWI0`lES0eb9bM(GG$UQQj{lnsAD!F}^xOU- zfmuP=_WPIb(we|8ze`uUvftOxpY~`ny{MH@=^3NZ-$8yExT^BgQa8Lv9*ncu+xPI| zPPo)Xi!#ruf+4vab|Qtzca(R&@&fzx5-&CHSJ@j^skkon-LjC-?fhTNMkTw?QawJ+ zl8-V1e~53JN$nXd{Ut`WlgZw6R_$k;{5$RZnPd;}JD#=NgRVcs=gE%kD+|kzEUn3)`-8jhqk0!|pE;o9^vx+-$B^&r@sE@*arb?a zu+XTPGY;F+jz@8DA8o8xCVuf+?cPsEj#Dz zoCJamq;H^}`{Vao*e1Gga}kSh9_rUbl?}#&cmtUYnFq&H`ST6+^^zVIDL+Tu>;W7H zr;e=bFcdhGb{)i9+s{al^}a(va7kf_PekL(X>KhrpG9Za(Z37vauMhf$&uZG&!PDu z?fupJa((tMQOy(hRq;=flk{O)+D5x4gS1&8Mahpb1gEE|lk>kv`ZXB$-T?ir>JI{8 zGBwK7^(DR{sYpHy#~B)r)9k_PU>fUp>K*+0P~Dy92JR2g3urjKl`?gWp)6 zHvE90T1fu3S%oi`vuF|>+}YhBV9BnIWX(xNiK(>mS?zv9^3UQ^4s~Y`2nHze8JvFY zGl5l}NNYRkIT1w@VU+zn_v`60@GOFT&Q#AX#Qka0>3H;#{+>Xmy5uJ-)5(^eQ#Z$< z+d^1ffWQ4=lt|)aUzlOse8&HEIDIM!JcnL=4#$b`IgjMut+(V$cne*3#=mdWPu9D{ zSZ7CN&g@KvgZ(TWC{A`*!m3@Dh&EE?}Z@{I!$i~iKXrcdm zm8wj`r?Lj?;E_Df$(~R}+impzni7p+6B;C!LC$80*H!~Ic_ZzD=KHd@lTfZM$rYeQPFtVLfY6rr44h)V)_tm_p>S&S((a=153zv}AG3fPo7@kDl zL#H8NKAII3sRFJG+)F%cBXxe%c5P55w&POea%SLS{no&v%Kf;6e7eLBBs;-DU|7w%HPTxJI8;|EGcrrjE_*F=TJ*pC zPds5}>pB}>W~#l_*gqM}RQvE5k{v3$G|nVP`|?jFv4~^bdl^+u^Sl)Day$xm4drtJIRKS*)?aUUnVk@%t`-Fam(!J!l%IS0QK;zjwMXnOVSsImV z(17IKDb074#_qXF<>}M(v=W*`mzVf%aR`icfI8o8Q?}mN=E<;o1q#6I~_8!w)`h zX}vud%h4)xs)-25>3jR2ZDLaP@JR$h12R&EA~n@c)XU#Jt-wP|97Md{ZGV6GpR2^S zg!>EpU+RmW}4xY|weR5j}or-hi{?EnaEld%34`{RR5m$$LfKH3M@a|F``e zLn@MS>7Rw4^*sJ1wstTs+{{l^#-LC^bWXQChGxs$vDop=J-ubmdQ>Obv{MxS^cZFnO&n>aaA6VEn*|Cbh$;rT?k zX9wEBENnYt&>T;)ik+4GANUo%ItnYQnV@(+@w z>u@ONP=p@I(vVg&i<+&Z zt}(9ePoCG3%2qgc7=7$b({2MzGD;SZ=Mp4JJZV+L7nN*){gj@?P$onf3g@yW`yEHo_{q(30LlgUw2w9T&b z7eO;$uPZ=w6<*9!C+Fpz4Z38CTcVGw)Mo5Wq-jRNtbiv<_k3RUTcB;D^imYhy6L0F z;M?>)69toDryK<<>#HyO*aa_gj#4s>9fmcOsD%bI*{pMDNg}gv#nbG1N{n4rTDGE7&eto` z@^ftA{v@Rd|EvkV_J(5_OR|`Db~f5%*8B$jCg;_@$|jO&Wx=gv|H}+Z?Lrf~p;;B) z%uubg2E`)OIMLI;;{U-gIgiYbB%{gp_Z4ciA=Pc+oO#?Qaqd-CHs=SNqTiJyGf_it z_!WQhL)d4xMsnGo#^UBIuQzGiBSwKQ3ki4`)pOR&6f!#=?Ab?n6ASe$Exq12`X1U$ z(oQt2o;$zB!3~97ZA%~bVuLp0%J<;;(KwmxhL6E95nD$ny@{+OPv2|m9)x$ZKZgC3S_Ub(Re1hXVxt7W!>n;zAV@GxcmaV7O`+uS(BkO z=akMXjOdvs%s#8bak-wW0r)u_Ehgi~I=x;>KIemK6}~i8cSn-c68yuozP#{?G7np( zzxLX?6AyQGok)e{?sroo{y;m|ne)ruoe}QjuOg1FfPW*`CI2CFtJ&*Q3muPgHzz~S z@_%_zq5>Yq+bnT9e#Ri5WF~$?8>Mz~;#+k!{-vEwG`<2~r{o_e67_u7sqvx~t7vJd zr|CVJg?H4`M7-=qq6T=LIP!SiJ?YgOBxVAx^(5_y;a`ql*Wf@kkoF{70}9Ffo7S?E z?0R@+tUehI*`sw3DT+5-162F#BWDMmM=PeX7Rzv|6D?_~rA~axz*>p4yyEH)B(E*_ zTG7`ZKp5ChL$SV|?!e#84$5Tv+s=F0d*EMAW?mwLIrFMCzuAfR7M)4n>FnprJm@fV zi7%PmSrrRv&ne_CzS|s{nmOlCHfN4xrZREv$qCZl-TWnMTD;snVL1@gWrcmN?f#wU zc&EFM`JXza@)b#UuBwxZ$U@?~;%9$NhjvryNKzJ`b|uP}qG~IW_bEl4hGe+{`j=YD zsduqyc}Mt9AGow~P*bTy$&||Gf5~s&<}YVPICsxL{X@`h4ojBE>*a9xHSJrDRyE1= z7&OT2^(>eq68svPU)2ckEk36HET2*@q25@(N)a`A-G7+@#O@?Txr}{bf?y$l)8sR$Gap$7YvBr;c zKy?_Jd~Q5_gq>YPK2BxDuhDyUm9H@}_GPF4#D2}=YaXVLzr*1zcT0Qf61nycExwH; z?}svLY2i0?aWMS*Yd3jJ|Ahms)Y})Guh7C))cYQ%x*FRO!FxDb#~a(I|71_j4!E6A z^b-0#o4xoHh2DchPGm?n_EFkyhFUAZFz%Ihl*Mi9hKJnpnN;Q%eqy?Bma_k#F^ME?73BY*v*s8(kL_vS;@ZHgAo- zvZL))aO}(;odSYQ{(r<9ITh~bkhYC5Prjv`2r~gK{!!p{s`2Mjcn(4PF6J?p>8T6e z_5)vHhZE6w4A`~Ra;yS#35J!KaM-Q+--?-bqXgN8}laJ$Da{NItHfL_N z8l4(V0;j<(`;c3ax<~c!DV}8SVMA7^ALw$z;Oat`;$_tGcLHhr8=Cb-xhJ($M_a4( zH3&A@b6KBc|3+))gYHn+wZgHA?%V;^%;_J`ANZH&m#~S+l(!iVR)Op!WAN?tZ6%&{ zWC@09zn!P=vhkfsz+gI&^|4-jxS#Zwcg&WcTgNWl;#t;l;%A>j+On?F$mo|h)nPok z>8xUHBl;XVe;i2rYyCvforsIKvwllReG5+$$CP=#N5D1{-A?ti3X78Sd8*@APWf6u z%eEMyR`95*v;ECMyc&c(VAnzY(?Hgf)$Fa8Ritzs`5y%C4uub2dLMp^?j*M2Xk4kn z-t33lr@QKHgcw!GUEWuE!(av|d*XB=h9$6_3A z!Dl<)h&zs4UP;fB|247CiMC${$ILrityH|gwXn*{aZYOMpsgYFXPLU)J*|zhC+fWl z+EsM#4Yd3lEHjEHI=L>Y-mb-p;5Y@%rn8UP*>f5!W{{<i;699>mKbN z55`sWEV~rfs&SgP#{KXtdt`rOUM{oQx5KY=&dLBXvKxQ1zg|`pJgI?0iE>H~j}5M} zuPiH2m*d`^TK$AvzoeDC=QKf+EA>%22X!=BpNh73gXKUvIR!P!)a~p}yoj80a60)d zr-vD1&(KE3c$+{yiieX_r(0gIejx#loRtm**RW<~gqxUvA}Iu-5b%(GGk zhn8@7M9HNnk&^?jf_rroi4Xh}+^#p~r-kF`-loEDNkrrV_-CJBIr}*a9PyhIYni*5 z?F%d=k8)42WESi;_{2X+JW$7?L_C6W9BX8}UR~f&OS_d}zCfM$Ngo!zZz)&SHeVxa z)rkkFqW45f|G6lcJ!(x!Q91jQY;6_TyJW39SIbv>mW(-73W=Bv!}pA0HQ}8-`lCIq zN8b-9D3cwK7x`U==5OOub|frUDzhcOF8ralV6%nfBriu@EoGnNFgVP{r@16M8NBmO znm5rEdP!W{O1$3{T{mg5vtP*?@V5TCfgmeuS%FPl=?iRWr9ujxriIzNexo{dwDb`R z`6Ig*DcFt#W#?*L)O^iYG6KAb(jGyxrjWP?Xl0qP;XtnIZf0|nk6xlF6aDcixbU1(4R)KA27 zO;WInaby7Q+(E_@>5%?wg5)JNegbh~PI9M`9=rLUNbT*6Or<&9Mktyy6CdEemyxbF z3cj2LmJ%ML|NUKxjmf?JQFbZHm7f3q{*TdG=~-#zAlDU1Qf4n#kj?NQ>nXouCz6M! z3Vaha^R>@jdaq&5Z+}`)u6*E6H1ze_Ol#SFS64To#%WY#blt`bMtmu&1zj=HFy(Qi-ElXJ@sHS$knpDx$>Wh~V1 z`NVTQ`PrRM)z3R|vif9Cdq=G&mw$ZGWcJ&v?UiKa2{z|Kc&Fu=__>mO$`0@`?%%?{ zco9U|S$`~U<_zbT*zIH$Mjf81D=A7M}ol4owJHx0miuHILJ(9^c`HA-hOJeWxD}K>i>}R{q?;FvxX8ffV z-ZnOt_TcpLJTbI=!Iumv^Wd|R1WiZ1*TH>J0a?zh9s-BV2u`Adx3czgw3Jhc5>;Bu zTjRUN?VRYD)8yuWVi>FNA&N9`_W@o)H2DG4e^?Uj6usp8 zL{KF=!I^AN;$*L)NhcR|CZNrio*k;63sATg3Hc*`F8LUGYWqajk$|K19A9TJpWrRj zxrIOcl{<;jo$r2DtmYVl=CG()<=&69+(<(5&XU|qIqBkEa`BlK25ReA+LP=tGqrPw zUJ~iJ!v71*2H;v4>bIs|_4xT21$7tnp?)bGv`?pw&Hw#?PBHicW;$8Y*g#w9n zO_WD=z3)q3N2)UsJYD!qx09gVwUC`x1NE49ltW2Vi{f1%r^65B1-wNY2H?jL@Vl|l zki*%oh02|VzLV)zKbrlpx`}X4&gZQAv>-zrK$OUXHel)Lx)utbhp*WgpIFc2Ow6wT ztW`Aid?2gy0NnES`K|f`+06kUItV0(;p>Mud^VYT5DilAopUSNEB7i1N#u0mIM%aTE$Gr^ z{E0cJ@Gm^b2@IcUVHtX!fO3hbT8(!PYvnxc->%OxG?@g(ulyPaf>vZY=T_!axfPzo z-Wc-9;dHc zl^JeasRf=vwC!3WVRX44-X`~X8+JRVb7T$gF=OU$y;tV^&Q;{>DZKlJ?&e&UL162l zje+PoUVmkJ56q8(DjCySqEE8xCu`Fz`0tB$o#4^Yox%7SSX+|rV{vpmIM$;~6_#&> zkt-(?Wd^zrOEHlK<&CPJQrR1M8+pD8992;ML6SIvR~>~Z0JB5J*sFcvyM5>?HV<*kAED_ z1P&uRpNqdd#mAXypj zW~|CA;SEOUWRKcZ)Ji;KJm-?Pk@??pze_Eq)p!6O7vH5{@%l?;lOrH&=JB9QbI4Ul zM!7yZ<898=O3PJXz5^_}k+rW#z=y2VRG3si$xRg5CzS-@m0Ywk|YA=#A=pQwjOU!)?luYn+$ zn;YYEI1~A~?{2o;sC={qX{`=V17^6^G7 zySunQPk7V>!qT(<|ui)y@qpB-V>6^yHB^i3>oLzJqZ^h~{E z#=MbV`P;)M-f#BsmF`5Ny17pN#6Xh>=~@4NmJ*vtQn~MzR zJgrBTE}=CYd7PJ%v1BgK2?Q4!MU#oEzOn1CN_`8f!${j;wI4A4WR<_6K3_7vF4lV@ z4$fCEGX}MI6N&0Q(x}h{7Kx$D*-JNRb16Dxt~n=VZPMCven0MavL_vkJJ~U`g15gJ zU#Fw_GItK+E4|N$Pd?Z-{$H$}5xBXhmY*Vtdy|6+AnXOAWUJT*FOoMk`%c=R%?3I@ z*^|R)?3TilyVLXRH#!s*64RKR?K6xZf5fdlm1{-L-=a4;M<(UoLZb`y5zi}m;pWrU zCt!F8s@%p~z7!|I|I2A$vU{ANOl{wPMv25OFXive@$?9-tb%8E^vFpfkCT{zwdA5e6N2{8_)f7v(Q-_gt+#u7y2~VqdDU0`1$Od&aTT$xj#X zPA4@FDKn?Aq}k1Qo;t}`)&_^0Dw$q7DfglAHM88$vQDr2pPl#*!0ts_Kb5@Pk23wy z=QLi+Y@gq0p`3I*q>Zo0Phv}RDn~ozrlC*P?UUX0+(JrPliLcon`r60=lrC{?dVCe zQ(g#{g>>UmRO=1;2hgSi3VlfK5)pDU4(AOm^V6r1;Tm|qj7`5#|B2X6Hm+fX|J>Ho zSo~#R$w^)HaAGa!+iI&89eGb1iA@{{r`fLC@`+yrZ?d5;AwzXo*sgqqM51KwxH_M` zM^U#9YMz19D^aEwD7wJ-R#N&Z%CAMQ!%!0pb$`e*#F!!~CQagD2WB()^k}?$POX*t>PZ`x>HPyxj8kG9nXieWqgeL~wABIL_4vVc z@U*@0^ig`0jO^EFy912YqQU*}NW@qfid?UL<_B6CcUS1+efA|g#qPw1Pe5}Ic+25@ zBbxM9>TEUY!2d;UEk>ndjB01=f4Fjai(d(f)x4M8*@DJwRCDmRrT@pkG;>M|ap-6i zKNgHNL3BFagDyVc2$tYInE&7n^I>DqRirdoHo(&qgL9xd%E%` z(Vh5xRZ%&az&g60*`8{oZ7)z%h0VWEXMeJsv+J@i?$^aAo{=xUV)mkCZ z`J96mcZ0DbO>RQQ?$X0ZdYOD#6+wR^8YC7rE8W|Cf#x92s_Khq)Qx4#uB#JCOlH+` z?m@iN{9TDJiA^m1%KwUN(zZS6ulSuagHKn#w(Gk8Q?3>mvf`UHhr7@vHS3a#7l(+7MoeI7n>42kM`z_8c@xNxXWPcx>{fW<3-nNl6%V$jX?d)xL<7W=O@`hl(Xyj5 zN3n@7DU;oW<*1dchv8oSLb0@-sD(r_CsRm%AL~t`lszs{2Z?4##6?fg4Dt!&`P<%X zNWL3_vvjBQ|LlMNioaRrllbCj#Wrs=SBZkj|M0u~KjblI&~yd$4j>wf7uhqjzdoZo z-?Q_HN@_OaqZMs|P=MS-vB&%HdKMB9cdKcM(NV0s46zM_dMwN=-XWCqL0Z?yxyFc=47)3E@GvFUpy3|I@XV$B4TRxTvQG|1sn9K+SbUptW(9kZ ziVb=@lXNz9wX5r7Gf%YhR`&2xnstZXrlH5Ht`}=(HVk_i2OmZ6WKjrgi7MF&g4aEH z!2M^*-GMCB31~kYO(&pSW@np`mQZWB@h|he4RE^^iYzL;%MR$%yTCJhzH{!>I<}`B zSoY-`=M5xd$91UMr5HbRvejco=Z)^pBf%%2X#DtOSk4-H_Kzjs(^fh&3}1(`cGuJ5 z?3YW1$>B!yW!j6??hTg>q;$Q026}T(?9yKB|7BY33xbU7*T6OMd<=`8d|>;5?H^iy zp5#Ubv%-+Kn8X>Z*Tdh{ibO{?UN%Y$Q)e;Gttq5p87lQKqUF4cI(*?|F#Qwz-JSM# zP$w&zcaw?Iy1{9vHUVvqWm%uc&#R5=$x*jPUmHLedpg+&cshKOBdP%jJ&}yXGu=>d z;2JnRt#sz;Hq)^C$VJY@t&g_J%T^Dq!jp4xD!KP|(|^mtx0*|<+G_J2ykBE%T&2J9 zJnCdvU84OD@pXT&v;cqd_4g;wjZxr5Qa@X*%=fNPGnqgh=0hdh@aJUoK3q#q!Vi_~ z<1aQp5sTTKF$I^ZqR4WiNal4D`7jJO4xp`T@h*|EE$GxxqkMMtKL(Nwpc)UKk*Icj zLFKG3CnoL{)TySwU(t~H>|HZ78UW5#w4*M%+~#*8h_j!sOk2r^c{KWdsr{DZAUR!+ zB-54YOtR-*3Xgg|cag~KHC+kE&tab#B_ahyrk%EJYH`XK6+MR_y|-4L_UvPNv4wpf;C|-!YiZ|s`p_TDJy9#OQI$wZ z8A*#b5bGH167yZl|5BV_zBA{(6fN&or?iHjbGYs%86EI0v5(c!x{QB2Mg8RVdIM$Z zvJA7-OuLPI=E6K<>Rb|?JN4*ia~LhavBa%hfp5)eK|3w6x-^vRBr+yf@t%^!DSP`5p@S=3#k<~4%^j6_4y;YtlX%cq`cz_1K0@_b?7|9i zHVsUfRo+h9;n8-evXmT7Q?D8sxt8`NhjVo4S{Q8fyeq5snU*%{_Y9PH1GN_7etgF8 zd*8wWZiQVncIQztxq;Mufb#2=-JWDVN`H4^$v5$M&sMXst59*7+Sh@)x&Gcnn>Ap{ zF6_LWynwDN3NGyrqJwa?h4wP-{jhEq34{JkzBBM zu;P{2wX0w_McsQz?Q(Ln9a@ZWJ=JL4T)AjRqHe~K{meclv)2tI>_ETDeXeC~^CX$X zqEqep9(g-Tj_l->O}^yP-*)u0%yo9h#g{Jaeaq?8+upe&z)&*|BIn?`z2>kyV6O zjOhP&Uy1xK-TQBNGJ735T6C~ zueh9!EFnvo+e~EckV2Nf!ncWJt|v+?(OzO7YoXAmRL62g1j9>@j>1OwyD!jt%zRNsZTeiHp@g!Muv!CG` z80Ji~Z)x@JAm5K%?``D#HCVTEFWLQ*vv#vPIgP2S(LE=YeXn+F_CKRSGB#ud@E<(s zCTR9gI-Sgd$bH?$#WG5$t-pBj? z8f;5YWEyTI14Pcs>*ez}FRmr}Z&0o=jcf*k16+?ptx+i13w~F?HnXNL)0%^gkGsI^ z7&XoS{{nS)Mwe4jej;d3K&yw~{v&TIyxah`LzNgp>jseHm$f<_Urxn`>^r*-E`MPE zK45v?=Q$mP4#^L6fHv0h7si2L0179zV!Z#qheP%$Mox30a59lUh+Y?XT zNb*MaBMq&m_s8($R&suBA%%(BdY3G(M3qO-a3rqZ1=h^N*7rNPLK7wVEM3^sSaAdj z#;;GNxb`4B7nX^}Y=CK(>6XwuZoYA67CaM7QzR(D#0YH9wm!)h?)Y zAo*CNY)&8hy?c#OYmnMg$yg$B@}7H#_vK}vFYON+;`eIhliOl89h(T3oKw9!4cLwp z3^!Knr|vsNjXvaRSH8}^;7$IyWhkCAE>^&_CaiCyLo4wd&Tu;`P(9N8} zcrU7-g1*V<@vS>I;cDWP^OhQ`kTd0yS$PC34#v5h1^yUJlL_xqBinp9JV;JnV1*wt zCbg$=d8>Nb=yeA?lcPU#?%7Wqzx{Y+l6|(Hmbce(EppmRPucA;pSJHs?ssFw-{8Hx z<$Dae)X-0DcBu#GJEC!-QxhFAQHwKZK!1Gwz_X)C>Oc~gsJcaTI`4QB-8oO6y}?{d zE8n5qL{_#th+4v+728k_w%xROA!sWqvs8%=sP-C4Rq#nfe><@5PmjCkvo7csd;SfL zzJ=ypLkd5H`5G7>ukJd(a;D&WFg#q(yTPivmf}NJ^?kHEiL%UE{=WQz()|zEmQr4> zF<{HucJ{?gMze*a;|A@Q!+NY%;|FAhwmG^T1%}D!^8-3(#WrVNl*8*0PqIp#GXmpV zE%3R>lUej6YnByBY0gjD!Dk$KYNY0_Wb8%H;@vNY=a)r4S#w`$j%|722UKRq5?gwY zwvzWM85Jt|Tn@5iB+d*}=D{+_M>RcOSjQ-#%I!FZ*4c-sHTotY??g@CL<*o{VReJ>8BZR6vWC;4eiPWft-UTiu2%-YlwB)1cqm3#6c*w%YC29*DRS8|daM<%lCej1s5%d_V2&r0)Ndhbu# z2hg9y>n00wJ6KdDUyVq0_Dl}KyQ<{lV)uV0j~BTAX<-ktc6TIB{tS++#m)dt*46jK z--@oLkc>#qx#$;Ol=Cl6(ZdwwPDjy}C^im`rT3n#Bre%eW|QkfLEDX;NM5jW_1#GQ zacDD(Jai*liSg)$9)Dx~lUa99ShY~5MZv4H)tKd~F}UiY*+Z@}i#ix?Zy75dW|v+- zue@7CVjGg-qf;IR}uflp0em4MRR(NwZ zeOi6Z^9t}wEbzAYmKtO&cXN6~{-STC@6sN!(%tA|BljDFeW>r9*z;2UhD(iiIeFnh zBi=*$I)in(ksMx2njQw-M&sZBws^EpRwNUlm#qGaTz`+hf5PR%$lxRrG=ha~h2G8a zEZJ5bWjU76sr%e(K?d(5r9;(g4Xd?AzH?Z!yrC!NaS*A?;<_^nNDPe@*|S) zH}cn5shf={&Abgx()K*PCHFz{7-ZkfIHeNL>da`BRQS)RFCd=wprEb?| zqH15p*Sq{31n2A+YvX=$BP3QdvHTAgzTiZXbSsOJ`NOCAI-h`Mj#Ba6g7-AGxDI?; zYw;L1>wI_}X1wXij=zgjBaL)Tv~a(6&sXy)CEJ7Q6y>uwZ6w`l0H5q`9koV-j=`j(e!wXz4SM=do&fxVUZ6ely% z4TfQh!uy+0@O&`Z+=hy6)%eEKPw@Q&*7_@!b{%O*6!!*veu`#fPVZUm}8nmKQ-7W=luXxxbf&q-nrYp=neWav>NX#ZxcDwxx2sN1y+5v_NMDb z?BW0;%Szwpq4D0vgl??oKj?Sj5VB7D7@2BnT)IK&nfg3i+k@EKPf)go-&x6+L3?h* zgJ9akSpO2atcLDA=+xEZqFn9c;oS%o7c2DyIoO`JFclTXxO*q**#MHONk}~Sm1z8o z`cJUady>KA_&rCBW$fZHuBLjL2$u$=D&Bp6(Dc-PvSHfUVMOl?x>s3>=+V`lCT2b* zr?FXCCAkdcQg$v+C#M)^ec@boA}v&ab2Sn%7i{%W<8=K+-j5;OiQ#(^t^efd38ZT% zsI%fW3?##GxfAKFp_Q3rsXHu_-|BAG=mOsZ@%3#Ssa{CslO#BADLwHk(ZNmhdNPU6 zsTEm$Z-Tb5&RGfCQLRLuUchcA-Z^h0`xr^asChNJU4x|EXv7<=Rt=Ujdjhj_?Rn#E z{N74z$N`14=glQ=29xN^AN=KPjqD6~RGrp%*@rAQC7H<`vjW$j)Iw&5vUZ<*o|l4T zIjPL)sPUlVgWg9{GN%~dH2&Klbj-LrhGo41Kg&RM72f5vgRHO5(&G+f;!Y*1nt{57 z%(ca^{?b37SK3Nn8<8{F}fDw5ef=~hlS&aaBD{#kgt`JLIR zI&f`)*4dK~O{)ll%Sr84PbV44uA_|$XkTcx2fht;m2+Y<>-jjop96;NNK<8RUO8*5 zk{RY2Bq;m&2Nl-;3*+llrTU_KMf$xbE)0U@KiHGJHQcJFJ>Z`6VD?7Sydh@aXtJ+v zgi)C`SE5T*b+Yb~_?6M5@eEuWq3-2cctM|cd?dN>ANiHyw|*U2x? zO}W9Suq!C{aCgUo&ab2Wg>W0K<*b_?tLIAkznVqN8{-c&@dH%96P*&bkvY*R+DJx> z(qFPs#J~OTzr1@)|DSuMS=VhPBD4R$zesT>yc|nkclMV!iNstd*16oLIV-imy_^Iv zrhumvUAO^lzw}uSlX$ZKyY-6d{O8@g)8!o9^8dJ-GlZ+NZJ)6J+56NVr|%#MPm!#z zaeon>jpm6Qz#H8Nj-29?vkMoq_F2EVnpRv0njcuKM9I&@<-l)jU_&V1NL z7OOWJ-%Ib}$!s7YbIAKWWZ^?rE%`q0(%vLA&zkl;IvI~Fb0t}wzKq3vf}9RecMiBl z;!*sop=$ksULRwO{gnTdJi6DiX!Eu79;k0*6CTj(7@Bv6eoDK*6XAF=nH#3%A7R}} z%Y(_xWcD;WVYm4GGEA1~^K_CE&-<&wvw0H4^Fi=kF`gyA*D1=Z0zq}2!zAsk!>i9x zAoI96^Qr^+8Ug$6>I~L;;w(1PoI^--Z)4)&-U!-(t)FL4)6l#fEJpX%I8l{s9{{)h zXnkB^5ntENC8)ebnG1_i>wR^ufys5~nDb(v0>}AA*zCibj(aDe$e)yHt==+l<$R`` zmD9{PvpZ;WN<-#GlZ9Zuk+mI)#K(wdeYhTSCf7P6dSVI_ojC#anPEvB>JP9?E}&2J za~)p1PMa&Q#RyvJf@xB3=XuGDr$lH^wHul2X}I}&_9`a7J4{~cF9 zLxs2TG^@DB($a%r|Aw9)_xoABtI60;*^btF)7T3ilE*VN%-LHx zQKyA*@O1d*bma~3?}cK$(W)IN;u(!4F+0-0E_&)%@cop+N6Buz%Zm4(yw|)y3Xjm| zAT-QL4bzl=-T1pNY;qdv&Bl+vxfc&O+3#lKdTrGG1k}mcxLUb~P%fvc9Z6m$8H2`= ztdsHZuZ4yTfywwnjvD%Y$=pVw_A>)e4g7Ct?Rz~8<$3gHbz0-dIe789wr9e;7g{AI z@E~+Z-nnFfORliwf1C-E*R|D${v2NT0jC>H#)G*ds9O{=lHHs+iSE?`mT1=(q~K*% zXg`vY)rY*v<)os{AnB;B?dW)0^R+En^Di_%37@hmw~+3}`Yuo^ z?`O%=I$ZlJl;4Tu)gsl2_?v)U@4%@E9uI{3-TH5>&&+(j$zBc9LKSZtBWZIZc)bgP zYiZ>SSRSn1HZ-yte&2-8IZ-wFm0#2IKH7W}Ox4h@KFTDD_)PMVvl$LYu?tz*6MUaw z9K8eA{(ysr!=sm;k{PNc+Vs-aI(}N#IdVU_1zLmVd01psyA~PlfQreNHI_G!dBf*O z?&G+!yRo4*3EqUhu`tWQ*@5-SylLXB`ty1BrRmc^S)0$go;)W0rxh-}N*}uL4Sq)_ zt~Fj}elD|&nVG7rc4AGF{p2FrbiW#RyUvO4m1t}nLq7m)DM{-p}25|3{#ohkFzTuDhyR6clsfVhh zzKo1l#P^?-IR}o3uPIZu0&SZGo}qBcnW~wKYRX={Q1nvU?}jMzYt+aor`s!acLBwD zbTqZIcAAWuBYd)x{w4S%BJyWk?V!c0X@3PzwqABY&B)FLU0GjjxlSpIqC2fqP5#Osl7N16gbT*ovi8sH(wd7Yh z7yOr!nZ(Zj0Lz@o`#B1YclCE=>!V;JPiBDj5ilQvE?*S<>}K2v9dpu74ZK?p`)>GI z%FUa(&dRKJ-c@hJt-PyM#lgt)2zpS;$D8$_D#qrybTR&KB1|ffnIWuHa+Zz2qwnd& zOwU^Ac`kWKyzx$OO9sCIp53nP@a$DJv#)TF?^w*eeb@3m8Fb@ZSAz9jr0ECc_w&8U zo#)Um^ZrMGBlDBL?L4cPbzkcG6raS4WKHiNR~PvFZ{M~8d@{RPO>4m*b^c#}HQlZ4 zQw3LZ7H{U@2eL<%=vO85&RMO=08$fTx<_Gq zo=4N+T1c$yA82vTO85|VuaT!jOpfxsluUk4Vw30mema&ppVfN%f;@~Thtug+_H$(x zVmNB##G;%RntWc#0PsG|JA`#F`Leykj%Pnq7gS5s;#`($1Bpr7P1u4-C{f9?ms#=+@Y=#2 z-3{AB0!&7&$*g1Yp;pC%oa&vdP~qkbFlT>HSD3G*S&M07;t2C(JiY8ok{fCDSiFvB zlxW8%aX0TN|6ncpkesae=PcTs!26k!i6KkOMISKSPt&V_s16ys6%IS2!0sr#rRcvc z&CSWfiOrq{uD0l&_5MRi^iwFC(}wDxO-^X;PI_0f(TDI~Zt$L!dBo(4ScERm((goq zF5w@IfX6$iaILZnYliQ;Q>!^+{slOS5*J%npf6X-y8Z`#!Oi$)WWE>h4QAr;~|AppC!3 z07S{2c_$sH3FDlGeFY59R{mC_XEIY}wreiRcE#y|D7cU$oJp#_B2n+Edmvm(XDg)F z>Bh0@ta?j#SNol{j!Q}SQO1wVRi9;~Y{+I*)lNS&f7tuf6Z+n(u)&Xjd~bChL7hj` zoKEKMg28DtHc{hK@aN@13bwj;4r-plf@a)2t-$|q5|e#LN9!RwO8<%r(_uai4-bRf6j|3E50{fkR-&M9qX{cX`i<@m)KW>K&u5)aL%~`kXPv&*qfZBzd=7@J zbKI%!V({P0R{qI|)xqrX?qspd^Zh+bgvSaaVAkx9^*^KFTym3~_8KP8`+<-gOVrUZ+3@Y584IF%qFKRicu>gZQ9GYJ%iM&N4q`1n$wD_!f**`+Y>M9 zqg#BB%tU2&D1Ju%K0=}Fa(}UNB_sMzWNC$7k}YnLb{mu2E=pB2c2s~@Bl1`Yyw%m4 zs)y{!sl#^eqihXwb+b?6l9v{JbYh>%*pffei0#>(?6<5A>&zKvr&gj7GV1*rjG2qP zn$0>vFFC_1x#&(J!R7As^ZAh6?LbnJ=k-tUUrxjN(%og~oIFm4qI@|Iq)R;Ur2lP=VYw@;~SAHbK9$UB9V(X3Hnw<7Y~rtA%6=+1<0<$VUV1Vm{&KK(L5DgxlgRMD=&diCaV-Aqg0c%} z>TzuPU8MFgH4g$^3w#+*24<7C1?<{R^5tcHNn#j zC3a$AU-3UD?`5X#DsbhzlZxz1C-pLC+|AWiV^-#Ir-Cin_o{)SoOiTDncDoAXhucy zl$p(Lo@Rxk3OYxFs*v`vN>5cL(Ex!VGG7%WQ*h^Z?grlYi~Y!Eyxq!5<<7R1`YTcT z?V?=vW0n`sqGjKdE1$#jcRN}?j`HdDmF3uNpiN`I9FG(dEfiX9*3MDlbkDwS-p;x z$PCaHj|OGO*4O0bQRC8j5}MWD4Wu%&UF}%QN3~xEEzZzV-l4yz!-tX7PFm=J z-1p-+6lLiaOUT(?^-V zvVQl`tjs)BFqY-4*dtKwSepF^tC~zvSMvHYuiKm^X8k#tF2|zVxdji7(bp8k}AcZKhLe4RO{p7r(L7PhuATksXEo&f74G-->A&G4(6x?7aa=@_y52e^8Pd_Jbn zeK)4KjTcT~EPN%}- zLzXl9*(xjF--vpzKF&5a&m*}j$wcNab2?~tx~vDsDm^sT&W-qbJbV1MQUlnqreGbW zx0BeC(jJe*h^&U`z4U7{D#p{^4Ey0Q9!O7e`f9QnJxc<1#;ab&$7*`p7w>Xz^Je=epbI{V|sSLA1S=gW-fON}CZ==D%7B$H3GVjge|jB~zL8+>Tz-d5!% z(!t|tTs)j!boT?Y^$L1T_3NK7OjhMSF!~YSo@d4OQSZ;J+FT{G&uEnv^4_zE4E#tg z+G{(fj^&h~fpp+4lsj0PuY$M_D)qqE-8^fD2FI~*rF>k;c`_DewaDTFaEvWXWMMqQ z#C~22lH?Q)Z?XmxpT9d;a@JZ-shz9zDxA3;9d5wGxv27ha*4m)N`FV=Ko#7%0ZjAo z`k$oeVG_}oCfB8B3-O_^Ue}>u*8ZE}KvqM)_oV&3UJpgUH+6LomomBXwzD zG9)Lv@P7<4&t@ByRUsoKq+&x^p3zV=csG)*4sydjN8k9xe(n^EvwHIqp;JEoFD zAUlJTN35aJJAw5~RyM0xIictWGI=k2Gm|`m&NJa*LW<2hl1A?yZw&fir#;A`P$q1Y^k;HrSDlFbgRy+Gl z;%`T;?qy?6MU8vF(xC7YZeh<7q4BIbue!4yj0ceIw{Rsp#1gfWeR}m#V43!-c^jB( zjQk^wNzBP9uvIvhWd<$#v&YoiE(w% zDj3(m<4thOxy>ni9R8f(&P9diH3Ael`8T)lmmM+51U4BBvSTXPW%t-ta=w$kchXC8a11qmmV!-$9{P4_=Uq)8;A@j*Hn@m!JK$P-d zfik{eBXr5>v-44|J>9>E{oV;BUZ6>*Xsa_?9jb0+vNZ~KMzb$Fk;q|cosI7Y7=NE8 zBbSqy1xBv=uo>XlMes=8i^oXMesDc`Q@96} zuJ?2}D#V`8@_!kvdPVJN{;J|~m7>mW;JcNqCo5y-^-qJ>aJ|G&Ka-@EatE~X{+YAY zrm*#i&bnPcb$t^3(8<`IyxUbtU$P^Y&P4vbvG`Q&%=Nc}vSnIZ$R>3I-`#M3O^sYv z1!wYFl`_00vNrD?@yg%u{cD&;wsSu_2a~(vI#}jj1@%kck^TStwe9Zz{?6=iR^%(; zXCrOB;8!%fJIEp(k+WCGMmdeo8^;vC;{gu>?SHoxx<$)>`O9ibe&sD7u_R^MyvJ4i z|H#*uwEpibS$x^Vn*Emc?ZJ|tM>a|)lV$cjJ7x|hwY#tsqj)LJ_zowV={>@oEo5;E z>ld#z^9CNb3EI9U-xyL`*~j5d0phn8(`|(`&F#%6w1GfC8+@i9|6ndjcef1yJEj* zVu9=dbs$nOBfeP_^gIjU$OhuGNt9WRZaKCUjmTC&Zq2W0Z0-IYvmxn`9c^?6gL>2) zi*GA+(E1hBvY3eGEq=cWl^+H-$DyBu8|Gf^ioR)mMuMl;z5?^_k(;UfpN^&Xbcb|m zGq2a3;BGAI?s;Kw9{po{ohvn$<7wsr$ujP~i0obft484dHg<|=FH$(9?E&V6dUs$@aKh+u32`Dyf1Y76G`tz zng3G5CE_qMuYC{v$Dn-`>~#X|J_HO$C{v#{%qHa8qcupm^W01D3GOEQ7%45rv#y3* zj(gA$M!G?v!$9|J_`L_IbeDcJ;8;SLVch>3KhhO#>Q7s9sMoma zY1;5q=6^%`pMm&cC~an4*D!wohHnFzW0fngapS1@Q)FfhS5_bm$DrUGxM}Q}Ok18l zH8DWr8td)QVFy>+<0;H!{2nyPg`{-GueitYE9`~q)4!+Y4#>|8tWbZ--U_#*kgXj+ zQ%}=+eM*?GWd%?6!16Dm4(IWE5mmUN;g%3Nc^0n`?>OY)0DKpEW6ga?`dHe_OZlyE zJ1-ophE+O7-5&wHuuMcY^aX2>uai9G==fx!y6W(AFtM9$^`zdO#1Nh){46kfGV|L| z>~H=rMn?`%dIpqp->WN^ri04__~6;A1E6sh`0NSsjzW9pN>#i^%Mh8m8h$Nh^I=EK zi{A?#KS>PX%#Cd);xk4dhh`Z*1{LRGja*%m4DOxKhDOlrDl$DC4atvGzJYuU#1@#N z*d3AX6g-A>=;6(a`yFN1K>H)mr7Z1EMho1#WH#0~s9gch`Mm4uKgXKnKuQy^9f!E* z-bPpT^oG-&u`*_IUjpSFr=JWKuJ2vOuc1igd7gD`?vKQ_o>;t|dsl(gJrHWzLAbvR z`4`H5@YFTcWAUuLXzvB6@5o0v{%wSYPhnxdfC_I@t0Tu|lXjQaB)C5os+K{o$6#|3 z!7P)O@>9xHr|w^!5o#!pEQnn@V5*7r*u+0avf}Wdd3Z*9lashACvjR5Tu+0?r@+pg zDUQ9*1RGcLS?`^ccb8ln{7@S8{eu5H4K%~J?+!9Ew0454i_qmnY`{XWH8XQxpmgW- zNy@}S#W*lGw(S%AtKKq&Yxb}7wZb+HInj^CF+ZQl{nE()8Kl#gz?th4eBKF^uGn<^ zu^89lk-qK7S}AyY4{*K?=E<~Dl(L>CK8LHW1$JHRNI2Gxx*7soPM+1`d(QMJuvDuQ)EMUT|auB`*FzSi}dsofytdv8-U%|VhFUF z1;ri&lS9N%8Bpmk+=)@oMED+$?7R9kGyHcv*4=RfxZ^05GmzV`t&Si525p7pb3Shj zJ&;d{A3vd{>R7JdxcVS8_9O&Xvzk$L7uU>$<;u9-)bTWJy^dbxM4pFZXL$b46JJoL`wpjK zYsLbza{wQr?Pd=%qRc~0?#gcttvyX49w~RnV>@KkQGYojZyfua$WkjdY5udE3Q9t{o8X5Qqc!vXft12zdj()yD9Q{Cv_j^dKchd#5)E>UWUp( zA#_omvOfIEhs?UCS9)5HcYXiAFZW_5fn9E>uB1ohUHxAW3tNZCa5S_%2d!SE{$GIN zX*l*W_uXOWIZ36tHyG>aSaD~rJ%MCj;;H4tyzYH32tAz-t;+S*(BS%dzmZs^?#QU) zq38H)g;pAOUO~c_hDgHIa9u}iolbx42Qot)r-m96ySM8-XuJfSx|tdV;+Ny0&k0(77q3(UO>|v@-qzkmeJtd9q;?Zm z-DkfBeiQ|wj?_>JZ=?@>7yI%PbuR}x*Hk}%e=Ch9orXFmpkWHSQwd1t5wo<%zIb}` zt2`aiZ5!17k($zhs|VN@g+?ubU;|$8cWm6Z_~X@J-HPYGp>_A@I}v z6{)iW&^l^X$?_*xKM zcCBj$`2EH+|EB$-K>R8ixB!Z@!)jLM)0m(s5m`BOFAwdxR;&?H^dOSz+-G0#{VyDK zH(*0_)U^T5e)prznZWM)^K#g1Pl{_tYpc+$FF)aULUlrevnr`j$9c5~HTL>CHDz60gU14k+{wKDL|>~ZFBe1W=$ z!#7tmPQ{`R<0*G@x{|YYQq-Qa&R-t$W=e`9JWN?kWo`bNrtT5AFvN^Vn9x zhk8QPI(SfpC(DAdS@|o1iJ7O$6O(y@)o*a$aclRe)rZbzraOrQJML$c<~a<5s97EU zoHiarCaMF)N;r{0yOVj+k^W3zab1qz7Om9grW#^wjJ( z(atJhwi{@6^ZaToL?+KqfU_%+m?GFG_oO}yz89&<^%|~Z>D_o19(^EGG~@FTa5oQ{ zW7;LSx)TaC;lCq2<_$JagwgUYEMgvL{2ChJ$w?h(Wiry{Y|9@|U?i5L6Zg`g$_ON| z1ob`x-j(69y<%7RWe2A=K%WJfa0KaoEd3__B~r^z+*^edH$d7ldG*AO>ml4T;|$^? zKHF2K9=h&ag)^m>@U}PeZcEg45WlIvi06(i#3TFW>+A>KC*fNGI5-Mv9fy2mfmU0g zNiC@KGFVOGo^6dl%kzMnW3N~FK8s|!qA(uFb3)5Q{Oe3jw&FRZErC{l=ZtJ3RQ8vz zL%dQb=crXK;*Lys*9BW`myW;X@eeF%4-qjt|2nnxQIz_xeb zfa@GPLDdFeSqUj>MA<87zbkFJfs?WLyZr9RsGhs48sSO*i$sipucd+qbLRRC*tm{i zDfO@Bl^WoULtDkEA#fbP`rHd2_i@EpTu%}uQA%A=y}DU+oj-(`Ckjcg#=(O%x6ce$Rwn&3i6Z)K1Y$) z1;Fa*lw*-(vmBq~>C#Z|IJ`H{fcxGY2Y()3YvpGlIUTULuJ~{zr}K74fyaE%_DsyR zm%)fs3Z+WHnMctUB_p8iW4?y8lp7k^UIIL5j+ShuR4#CM4Y^8!YMJ0_-d|6ybu~*9^!6w= zZ8&wt=refExKFxXTR7}RCTDK8|SJGW5N@$0B25qmkIwpN6;0-|FmiQ z(W-E*jAvAv3*sMW(gIEiVJvPS6g8sUOgpy$f$_v7Y~n#A%rmD~VLgwdSG%y##yH<$ zg*(xscroZwIVd}dUdle~!~!h+`{?&1H1K0Ar=xfF!rk4Hft{I(M%{)+@51vqqUEUo zS9uRdo?PoNfl;(uk+rvop3JA(m?zwqm5$VXLJQ7be~g{{nOdDAdj~$w$H%&3t2(yV z{p|mN$4hx~I5nLFn%{{Y2I3dm!rgQDUU#KcM;aFsR~X^!2gcJ_gDJe+;a?vOy9vEF ze)<5r@-QCHQ>Q#%tUPdbz;nGz%T=L>84@Z{?mcR02A#~3;Aqpm_zGA185y{mQe7QN zJ?>(viM7g!hiHPu@s!cmp@MrtqOpg5#M(b`)e+3HKz|F;P=KdBIj0!qJQ1J@(%p|M zSJBXZ(9H4auc1^EC|?xYGZlVi;8CP#6S!C#y=)2f-o*dD%D+XFI}6=zfCekzQYW}y z9DDo+Sa_mSXDq?@VA6v-y@BTsaN3V1$^CO+xr`RpAz$V&G9zUvIQb>*#Y0b58uURD z7l7|`@NF~v$ctv3Ku0}cX(AMR2FW)AXbkAhSn(7!Jc9Sh;BBUnk?^P=P@2)#DD)Yu z^D{vEGx(H1-}eE%8AA(Fhof;7!PGMyi((;@LLc3Ed{@q_p*7=3v$<{q|DWK9yVD*4 zI%D(o=)JSTp1EsweD}?)hI5|T){(b+mR1BQ^AyouydHoS7irIZS|^Fc76QjJ)UXr` zYr?l`*jD%WEJq4FZ`4_2Pwi`uJ#);a7rIiCS}TQksx7#W4gX$))3brzn7TjwyPMe6 z>{yf0EJtcRtHSFwfya~P&5BTir!OLx+t3xCI&b7uWhh$AR#u z5$&1HsT*8d4pqB=*CXKNDxKZ%BoP{U=FYEBvjM-HfpCVVCh~O~^~{4Jo`v8J|JK;* zkFYgI;8s~Au_@Rb!Upf=s@kyxD4&6QYq1Pw7I5EW(cs^{LXKUld^l*#ZRlJo`eIJl zFR0s0Wb=@4ckNGvN3H_d!!<`KjoZe<`6c|ikNT@~y)rWQ9&L=_sr<-{ne1Ldx6J69 zpVHFSTlxtKy2cZTKd7f5H zuHFV^%}mh(nRdlNZ78<|id5lOF?2hLm+Kzh=UNdkC=SG)@!6JG(mekQ@d2B7#`Do$ z!k_;D|NcVOlHiH609&9%1uWaA;B**y@@(2A(6=&hd$xuB22Xvtf&cgNoc}r?m0uy@ z1G#b*j@(AO3(({Tk(IN=um9po9_X_J?J@iH>rl_^PGjL&49WMT*2#F-|I$idbmlZ1 zor8z^3)Ujk%_;gOV8}}VGxt%B7<39x$9*5dpfS?Xg4#%6e!XoYnTr~Q-ou?kgJ|=Rl zFHgC$P!1c1*5`W}6xq&Q^BwrV1$;S7Tw^3M8S0p!V+%IP^^wsy@U7T8u_*$aci=0{ z$d$t9kI1#V5*<@p#JAr33U6Up1Rqj>{1u=y;urzko>G*b7(E60zkpV`;@Zq$jv~DP z#4YePA0jsgfHjTQ#5Nl;GXa{XK$#di^&J{%PazL--J0)s=vxB~Fw@L$LF2n1bFOG~ z=3p?t95*e3ZF22K3b2*|FGsNI(sF5PG^dj|l?Jy%KsgvmcT{x%S2DRilKPz=PeTHZ z!V}lyq~JMwp(lSsvF-5oWlFpZ{I11KL^8(!!x?IGhG8=$&Cg)8^$@agBi7_OWP3PMpVEnZ zyAHTAUiUhDec*>$^*Z`c9E$JZ&a`WHv%;rn7TfGiH=M4Xd<~yVc=yRd`Xcac=tP|3Sv2nz|(YoGB3Z z>$PA1{^v^6D|IzTMDK{6N5d1YtFWZ=Ao=-K80qzlz|O?9wXm*wDJ^UZG_nNJWjyN1 zN7eCI=ds3rprMcBYn}@6)CqY1C3a~klIPhQN8#F4tohs6%{h-$&bKoUegiGy-F1yg$R|n)k94T5~##mX^SVErDk52FjEmmiY_$GCRYo zP^}d>7=IbhK1nO?v2YLPeQ1|+yrpzyITz3JQyg)(cT)Qs}8!kg)4iJ`&~fju8fC)`8WQ}g10|G z*Zn{_ftGqfpVh=DN3gTb;+l_Z7}P!joxZ`!%ta1v<(b}GS;N0x(6%Y{y7sdm6s$|R zGf?LOc#p&mq{3y#GZ*5)-Usp<;mLVQI6FHO+6|)Id48XT)+g}N<|8$G?R!{@UBFp} z*31ig7n;%@={W>-+_(13p6`r=nc1ikT($=_8d;wPPhRAi=D>RrE3*^$ zlv>YHs*Iib5J?=2XGulMJoVl+z@GTo14ukW@d+fya~PHbi)X{#3T@t`rfx{dRd6<2 zKs)$a24A=o?l;7a=Y#W}9dtJ|wGXJD8AZ=$36`iAdSz^Pe~_QAfO8Jk$c!9|!DB9z zxHI$t(kS^Ia@>{nzXzTfXp^Hub!gF*9v>q?o=cdWx*a!c5S}*MmFFaF1Ga)ZkxDK0 zL>nL-$xx{vd`Ljk2gCD^iA+lYkvk9Fd$=53H+#P;UhEUu_wrogw~?l|@Mn!EIR#23 zfY~CT`VLyC_2;lr=aHRSaPk8va}JJ7!A?!Y{@jhV--uM0O*kESnSc}z<#PcVZSMCk zLrsJ59z%iFvzVKL)sOVC3WYhH##q5-z|YcXLBUYcD1cIhfOW3*P!3Wbq?f z-3QHXg!4B8tqTDdBTZ)C~hqhZ=YarScxI77LwPQ1p15aYf5A8Mv z>eh#QpK;CA?!BPeg}{+V;MD@?mJYqm`{n8s&tZ#4PF*qBh;mJk`RzdN?s3mRJPSnb zgdB+c-G^-ULThQgIx< ztbxw?XumaDHHoKsqW_+&;>YY&?S-HWgawnFjO-qYaig94;P06 zX&bO`ePIbc2T*ei2#nv1h>S!W%;&0^(wdog0VJGknq zgX7RA72Oz#=Q0AE%%`J*!ENb-28qQaO<|&n0j? z`9Yp(%Kr@Pw=>8CkQH-f)Z<@ixO_i#$HS>cKywftxkB|SaN2Wd&pqS8tWY=;2!5ob znB2>?yzt-x621n?)PjcHDgPW&uo8=YAjn=C8a0cO3xND}G&l}RJ&ah`ytmUSc?b-R z{qLrpE=WTkVw%B}@-!~<))`Zj;JaWxJXX^AEw;B8|k4GrYYcXXC(z5F=)PbTwx?HWEf)#abKqY879?5$E{->c; zjj;#r+=}!icCE!|{IwDPZlz2N8B=QSf_G5~;i}5ptObtk+IzDXX|@7e@pNSMw*jBN z^YJ^1OMUWxzgA%piiRuI!!zC+4y}*TUK47M&X*DY*lRJ z8h7`3*XSPQoi#|ASInQdjBj&I$N2W;`jzOHkQ%SI3I7WtrQ3OSH`;CH>IT@4YH<4p zUTxs-P58D)h-==&BN`VqA+{)i?L2`G`wUIqgY7q)Z5ec=Eu2aQveVS&7)Vt#w>)xW z^so_Yc^fuq3eVI-|D9iUrR7BUy$zWwh({bk9j?^tgVl9x_81f}8eU6F#=>*CG8E7E zE|M?;&uBDY1oAdA=t^F9)j1aPC^e3v)FWV14QsWWdX2=lp;OJl=TT@l5j!sAU+`Ya zbFMzv4Bb6}P|Vese*yn|qI&Ji-^2+uXv?*f#et{_Qt}j@=MD7T{Tfs85k;uEA0<5F z>{nt*_XC=7Wj9pX2)!JapTM(^17)6|am{GmJZa9w55c~BQhNdPc`fjyQ@3j__9K<; zpvT8}m!?3broN5Eu$_}UpOc=7c__E?Zy@x42LAV^H!_b}-I-zzw(?Ne69$doJ=u07 zTEDn>_H&y{B-w877t0iiFkiRaO}g0?i&bbtR$WbBvF+90HP5%`?}j+M~DCLU}*m zhgX3M$-Ti2jxaSVap~$AE zuIf{x-s(o|%WTTN3RmoBBmr|dDCBOs?)*X90B#>-?^BushKe90!TCGMJ%A>y@@%+7%$>jG)Xy$B+ zXMF#FKm8DjxISYv|2$Q3J~E*+CWRivJ$To>)Mc;bZz%3*)6Q>T;TLYK=1CN%#i8;{i6Y9Pf`fo$0dO@v| z)Ug2E98>lL)F#xv517rH_&)r38~k&EOJQK?4~L(j_I1P)6@b?DA<3S1GQCx8`7z zD)&OeMo4-Nq;5746+pjSzq}QjnjfjZkyg!SxDx$-hiGy$Qalh|yV}h0ok!5s`LwJi zn+c-{p3ybmu3o+iPxU6T$xiM+&;60a{_jEW4wM{D?X!_$$M4MBF@lR>SWDeXSXdVyNb=qDjLS@8e)p{nasTwBnS zxX2t0j?_nIMD9UST&Fw|&GkgiX>h>P!-hkHc-?msn?@T*Ag zH0tfc6FcyJ8Nl}j@p56V=0zV|VNwB(|A~!w6c}=VpSid_fwutqWL5=XP3HISK;b&y zWL~ZrDMY(tdHO6|DTEw9j#R#bgiRpMG|y*sYBKt3ME#p-F9qzJ`|uQsAHi)k+Le!M zx1kYPDD?uhd1_;7(1EpBl_l`bcziQ=)9@t+Lj=14nXSwdjyBo{S`U?6jk^*#+XyXP zOVbe?=0WE(;A$qoQP8F}`e0@mBjS5_x%cfR;B@VWt3sS7aYVQ*DgD4L7pRu}q9s7@-3hla@Xhn$;x2tAKa_uQJG=`+E zqRoFYT=>g#C~5{>vX<+4^sLz|b@rLFhk85{RvX|QM-wuko4;&-6SOgJq8Vn|V}nz` zD=Oz_8E}iChyHervY-C8C-Kkg=4RcO2D;w|x&U24MQ(e6`8VTw|WcCJHG8O(#MrNI3U4aMPLhtYd(q9cOxvT7V zsCgfJ9!-5s;p2K*{s68vpj;#R5uWkz2hV$AR~PEsN!zdSt2xk?fF^B$rWEj9fvRtD z-5qr1F=&efEdZOJ=E|8cg$9`23!WA zYw@(@`5@Jx;*VfH2Rk#2XU&y1h?1_fGbfX4#XYOb^YzvMdut&1nP*(BX3Rbxn*U9S z|3CrrjvfxtOer*CGPc0DEI+kufhO6p2t(-+xw`aes56y_Y!g(Ni$!o0z6tfE28b_0 z+lxew%cG67Hr;XVCeYiSqo>O_ z+fa(So}lkg3<>fSYIm}`65u>ivm0$XPOYW5yM&tN1N&%f!&12R4bi|AqKOBf`%b7d z8$70B1D*v7_eC}W*W=J|G~cc~wcj(5nkIwAC}j6HU~$CI{@-cfxdN8E@lb8ii2+Ez z=WDEm?#b}G1QvHRH5Nuf4x|59q31d3*#Z_bkb%y~wJVPvq2{6?LYhjA_HEZg!-bUG z2+WhYdkZqxoGVq3aQoxmqfy>hHvhnKjplc6>dC~j*=O@ymgexI5*kzs42`~SCN60n ze&qpbvt8Tkyp_`RdA2-HH>G@EVEh2t_>MY%;BAk`_~r~)og>iuJceu4ecGS z9OLQATzd$r*jh`lzY0CC;4`1#*E}FKtE{~Q*FoO~XG&rFM*`mwq@Xr3FdDoc#=?9W zYIq%uTmxUer*r~XIo@5H=bFKTOF(rAnSK$Bex=P**dp_O|HSpq=#g`8t{pkYx2Hj# z$8reO2&8WUS6g%K7*Bo4x7Sx90e)Y`*8K==^V8y0sQnjm_YhExMn)X{p3haIyb{pq zDiX7Y8m=PKT|#Ub!}gqmGLNEH1$m|goHpOEH9JymPD|IU=I7pvVA2TQ-3VTn@Nr*U z{Lu>FqLy8_HL zk*}$t_uw4J-Ed+zIv1Uw^A)X4fs3vHY7X6>pyd7l!Q-KgWl-){pe%=t7>vBur=EV$ z#62wc!LiX;lGmaBC20Etlqtj&=P%6EEj-Jy6kE}A^9F22ss@4QXf$aG^tlRr>#6Su zSnQ_E2Tt&QyjzZ{w{jR~*lIA5Wi#x^;kbB3OuD zB`9hpkVuP8z=2Uv?g=dC#PFP};f91?Hv`Y*Fna4*ouAV~DS#}QLue#+)qF=wppT<| zQ=w*6;2ne=*uzN0jYwcY{*}U~dj-TeOzS`~x$JA)=q#6K;@?>r2R#d#I_aE_S4IUQ@=k43WQ zx&|uT3>@2l$Qha(Dd%xO@?d;O=KFPq8Y5aC3C>pWt z2o~GnSQ65(1RoYf%E`o!Mrf~5^ACI{!2h9m+UJ1XUV&&?v|E^y!8nY~1JA(_jmE58pSQ-`5Q5?1mDpdSlO zol(g~9Y*X6kOmYhJ7Hj)B znvejD znHoMs&duqv2>WG^B^y^7Pmk7xV=_ z-(4~z@g~m3$qV-&?SP{@s6P#j zNvG5v@I1&JXKYrXA&w-J3Dk|ab(woLc*j9g=S$pAot3)Xm10Io?}&x+ATEXYjQU(= zxUUs(ym|(Eks0{qDVRrtB`XO1 zMqvBPVfFSQYi7rI1TAWaZ2pDLErX`U@;?%P>_h5CL8q7bJsWBNFSdFl-g!BCFqNmy z!)tp!Um?4-u6i@Y2gcL{22K-={>TA;;n=K>ymKle8C%$#u5oF)^Ye-k3$w(wV~ z3%{Pnr^ews&l9&8Dcy?K_ys@N?^=!PvFE=cvHgM9KK0ksG!B{i6km7}Z|Kl=z64KuUuUTCP2!_r!1)8QP*d()0wdR8UEuR~ zJepVfg8AR@?Q1N6Jzm#CJLbO&n9T^;1X*~J>vuw@+>~F5Ot-}1xVrEiV5}2l$$eM% zQ^zG}|2QU9C{hR=nMmyEc*%bFw~`Vk;OJAqhkgYGzM{PAtlIL_dvH}; zwo~$L{(ntB=@InlX(FtN(93lwrD^dUJjH12nmI)~VMiC@MS)nnAKf?AKzb|-Yr zlJ5EY0{iDq?2Fhbcd$pjksZ*_63c*~IdZ##7FP45XEyzWWY2-RKfqmAaJp;MOiEQL zR|T8*ukfVVrq1!K`{~~V$05*YGxVs=^IrqYRV3>XUS>4%9B%vBt%#UhMd~izp;(n| zP{rB1UnrZzZ&w652eXFXAHlmHk=@sMx-C|~+)K`EnB!(M*l&f_D}x41=8Ey2*Tzccve?Zw(DvIP zo2|qDg2;)-Kc#n&$%oAEY_(dxG&+??L7SxlsETRN2vWSb#wr})=1@H zDD??eZzEJ_L%Zfjy&c$oru4mBX~O4KO1mO0$kTnc>x+u38t~R4 zYBJmDLa64RpKtIXTBR#+zGRQRw<^0;hRcEoWJ_hq7Y>UiR1qU=wQNRkMJ99yI79EOJiz zz-{3CBy{l$?&U**^z0dM-7{u|H!n7LC7R$mvzf?W9@;2|G-cxZCLjSB=wTM*|1g-u zQ;!_F2bplkXI7+OB{tMvlslPyjiR-9D4I$Ax%h7+orHdx=_UpK--!%+{=r0ADg&2( z<=L{xl{w$f^E(eP{|KzPDE}w=+YhKcBhzdLWogqhZq4G@23j^o+meyZ`gpHVz%>Ti z&!&Y`aG8%JI-}{{1J~4ShaS6VEeCav;;9eOGMl@)PyR(-+`K0yr$b+7p{7FHoLo2azGoG?zbgi%>%r;5NT=ta zyJJwE%|rS+VI?!6)z`>jZ7f6qxZ!!fj`nS*#v#D(?v_NZL{LVbd;YI~=B2m47VX(? zN~)_$a={TIEH;HUqcdrL$XV&T5MBKJsbjWW^O`??8y%%zYT z?%6+eEFcFj=~E5~h@lCo*q|8gL@hbfmXj9mAd+g!uNFb0g*0EN&3GnSPpHKbu43R4 z;dmbw`v6q(+`KMCI6dH%`OaO_uGTk4t~~2{JKw|k*8nNI6)SQKkJtbWbWh0@o-~V% zW0bD%@Qk%rv8$eC-4*Nk2~Ruo^DnOELK0genUU{ye&2bN_o;azxa~%h9d}y@ZNA5T zUI7-fs?R0L_#P>J5!pF{6?y|nAAuA&3SAIsyoe0HLtTz{9SruD}0< zdWM04KK&%HEupnL@faJim464w-8b0=4L3_l30jC?+=*srwvjq76hR*w;ymNJ9041j zqD@z~Jc{LVKk$dZy^v_)S*(#)|AcPy(XfZ$*?!7D1l-2HOVP?I@aiWd_+jWc9X?b+ zOCJZCw}8j*2$=IIK$dJ2t+#fLz=`@#jZCWUdB#hyAAL0V9|>DwO*babrA_$0C!#O>B{z# z;M*tQ-<|tQX~l@@Ur70Oe0)o6$|uAYuD)+h4$0m|g;g z2JoQ_R5qu+`1L{-_tENnWTzQk$x}xP!3}q+d`@jmpon=^eJ&4lGb_$#l)nn5XW;Z+ zVvnzZ$c%pb;EN}fP64x}z;F^9ZLfSM_zi^ut-$hIC|!wo$(`p}z+nJ)n{w6c51!oV z^-@6LdXSr;&yUDJZ79D0dv+0s76b7)p0P|cGd-UuzYuhE)z%~2J%&D=feZa<$J|j{ z;qDaL`4*0q_1rbv^dXRT2KOYOstJ@EuJNWxz|r%LM`B|NoZ@zrF-DoulSun$VHDvo;B;!`_x2I$bzWW+2ZUgF>aAF8GY=PbrssA1BzmFBTip2a% z+n)bX2pKPo40>vG39N{@kv>7Iu8$~nf&Q-pw`c1*-+DVZ7XhXXVCvesLX`F_)!)%| zC2?t>$8UVP|K}nYnSE;#(3<)D3jA6}%Zu?=$ygZU*&0aS6{PPuBzrBk*j=;k(l~@= z%?~BdV!is~7h}|rP9MXSP-EcaR@$%+(UT_}cg+JW_h3KvL*asaPXdBu?zIP_1H=^# zsi76uSK)#0A`UWR@EGdK0t9<$Jq2(74ds()cRhY^0M^8lE{yyyPFt@zky%Z8~ zp5Nt&1IrO)moDdf9`<1* zw#jvIOZj$>QhqpPPTcB2m)x`V9JVYw^?GWqWBpZ;&p0r3m&77)9Se5uFEQ`1d7HK) zQ_Y3(4t-JNwQ==B_IpeLnfq7CmtGjpxC z3(I>qP?(u>8&4$$%Tggc^&k>?fEtDo%fz5YFQOMSBi%v_ z-k$5O%{mA@n$Z)=3B|WzpK}D*;-PB-a@!5gG{7FX-o!j{|3>zT1p1pXb0c1J63@GG zt|}6pfrO=CVPZ&xv0@#rrSR=|zHzZLX~wkCY>n6R@!SaLniYsF*_XSGsWAqQQMCCO zzw?IQLy+WRXy_O$;WWxRlVYF4Rj7H8-RHr^{bX_Y4836&;CTQn%~)2DQf9&}3kt&~}#?c(TFdFoAog61+U1jSRaLf&+G3q&J*RLtbH$c zcO3=ame;O&+NH2M^K&P1o&aw*!&7B58^88I%ir-Z-JwHCDAyS|96&2C(HqXh;<-P5 zB4vvrY3X>q4%qM!!9tk->ks_yV?^hjh<*1!Q`f#fi;cgE+?c!bB)WPxw$y&8+1uSY z=)ToTKyisX&iy}%3?8A3wP|?{VOu;kaxXISH}!R*?Ie;o>MfAb{`@pGLO^b5H&tQyl&=k$4FcA#B`onLd(Xg&JMU%<6HD%JQRBz3@#$0 zZ-PS`{u$rz1)_mm{f_4jgOM;^CeFAW{j)DHnp({LX+$)dvgUU%n}%z8^7D+RviFCx z?$&k{*-WfcJEAa8GT#DKOHy-huyA+KEkL0RoWw6qN4A?n$IE!u7<6)Xkh^Uk!IC@P zT?NXSZz&nPmcXTtX{i|AqZD5FW7;o@Wto8#rc>6_b&Q8RQTht7HVF|_8K808+05U| z@z+u_7v+Bg5+n50v~Ddw^XyJIY}U#oq+?Qu2YW-6t;p5WaAz@fNZZw5k`AYz$7)r? zd%VMUbM)FCjXP&|L)9nX&}lIDOs|P}EZ5rLFPqdbjg9@O8;mEVEM zjJD2hJ`F#WH&1DLEBrbQ|J=vnDH73q=V#P<9}wID9isI%lc1bA0+!(G{zdD5BQuRD z<-Yh!p=Gt;FSy`7uVb|QE-l{-m8()}HxPdr+A2!vj>w`piJn2m--2$h(|#?!f2Pbh zDC8J`I~QI<8ovR1=Q8#|<5^I9J{+mdl|`Ypn~}Z}wAl=*{s0Xg=fC|bbNXh2?_Ofh z;ovc!dZL*f&o1f3?}0s>4zUw(E}c4dAuXHuu1Kj7X!C9?R%0Tr zz1Y-MP%afc84Av`z-172qZf5mBxce6{YlAxQKkwq7oA@)mr{-4%4+aXCX=DlYT!)g zUly!?N2F&EK42xZu7-Urfd0LY_WJa9aKvm!?%*1Um%Wv5XPL57`Z4U&Tr{H_&#!>K z1Mp%K&{g+Y$M7-(@Ni?f?r8)?sbN3*XXe(K+;h+TTfpzhP9^#8DGBCOkLT$O;;HQL z}Ph12i~+yGv*<8R#$buN}|0%Eqjh<}7nRP7b7NF(s0qT}SM$r)9ZD zCkt0ID4h%*_U~un(I3U)r-8A4XA~ah3J|^u7URG>jyhdMx0SNVaN|7foJ2eNai<;c z!az3x=mvx7FMKwDV#ew1xbrRk`cjCU65*O7`SZcV9RFviOWAS7Q%AUFZlgPB*-_kS zSoYtjsSI+EgX@llWWYO5!!^5>>sjjZZa}+5DE$D`G-Fmav_%?sM4nUOw&zU6L%&mj zGWPc}s68LO#5T0k6kN>LIv)Hg0dFpztjd#~KC}|Jjkjt+txxd}_BwJ<{|agv3YCqO z%~g6HUT#2R%oBE=8h%0IVmvn%DVYIB-OI5K=*(=A06(4#SdZpO*SVSTb~Nwo_@IJ# z(sZ7?hv$u}T-B3}GPm%%8o%~K9rKMk%29|qpFlz%!#175UasNw4w{+`40B>jCR5Jy zMz^75z3~^GhddH(TuU6|C|xU{j3-)q5E{gw>TzTwlc;MLQj}~=uZNXwq z=XW;v|2Y^rVptG+Py`w{ir^}|t8gnj{M|vTi-5ZjQfxHsnX0z}eSe;I?@mW(zZ|&w zAV2P^G2claYMV{3A{ia~6d5!hP6h5v$NJO&g8lGkD%Qw+X(OTY84Ln-NEKQIn0clVF&3x)ldbl$n))Q4!Qk*5&a2Z{1S z=K%uGgXqnuV-j`6ph;<@*xB)?Y4HH{jv(^yjpcI9VHZlJaorxA_4=8Jj5>?n9bdQy z|L`PuUBv<#Ek4WDS=2fVPq7(4;i*UGknnpu~a%kzDQ1l4&igrP)4ROy|{+YqwIf|dqP0z#C^LGvL zTyCCijGUfBQ|my(pWxInXnq&o)*S-ha$h*zD|&|Z-Lvo}ExA|SI!*%L3}6{V>~joj z`zBY7K%Id<4`ri!#uDG)h4P1zE8xpiB5e0z9Jv-3HOF7|L(inm&a|ODS9P@kzVgWG2hja>>TFJZ=V|BiwKC)I z)HS*C7Ibs1mE($apg^Ax+4q6dr;(^vpi4n$<{lzf^}R!Fzk`i2u(=SfQtGK7gXQoY zM(_63`k<=?un2!q;wX2W(f9&y@+^2SL$f#0;%F@J6GWGu#AURaMonL0y(jbhV&q5q z+=wJU*|;MfeLcE0f>^2vvNDjG{^a@dSZ`Od8Lv0xsZ(H>pPEi{&sn^NgL*{ z>I7V#BXdT)=`byopzS4C1v3P>$GkZ(_T#k`zIHs8$=z9WJ>?5{CrZ*FzKM57i@^m3E{tt5PYL>~|cTapLY>!Z#4gK(<#5diLxf;}X zCm6V=brn6P-Skn+oc0nn!_^+A!N(Y+8oigkNc}kc;0$DLAh_;nTy;vAr7M+x>)@Z~-1Ni`b*IcO zuKdisXR$y(!o?>jzX;ly?8=dcX3jN}YhLcT!eA98{8bTb-$QD$ z)1GGzWTMNS%jL{%D)CewBqs(2u26Xj9%Kb}<6Gy{=J8y=5WNrPeqlJgpAw1q>Y`Y1 z&ti4$$t=obLaA5xMK!)*Lk#{&C>935q8DXhZ_gVxzgtuiC>6Gn(wKwKp1pM#P?l@{H8J-%5W@T_M;5LLrXgYk?Y>?2hI`Lxu@`y*`a<8sNM+9HN?tohi0z5aeUT%0`FnX+`;fXPYLt$ z$XP+SlMBkbf8N!zm#MiqWl92*W4=T9XTGdlKxrmc$MARZsU9Wr%wC?HP2KKQaTMG0 zM9SeiTnAx}19Psq+P^FMUx|9$6O{pt)RI+L?*UNBb@?9v+gR?-gTlg=4Z0?y39d6a z1zo*nBl-o$Xae>#KQemWRrl%HMbIrflrSsg_ zYu`%fV5HsX);KVc`V!H{>%YB|1eZN&Eho6=28#OB=PrZ>@H_>H83&$o@u+pF_Y(KC zDr1ma+lYF&3EXjX(jLV5KtD4t9KeQ7qs&q6M0*Szsdp__j88Eh0Nm!=I7+PH&}0D6zJnb%XXh$@zeU`!3;pbk z?3e@WW=0WyhbINF5HF$?MYwm2wuitYch5~mM|u(Ql!046Ah)(+#2>v2^RE$B<|(wi zJrbQjG&&7yaVyx>=l^@$tqql4=6);uvF8ih2YQ)b^^miN(O^eTT;snEf4_uQ?+0f` z$`|1SyCTguAbEFzabvCv`7SgjnOLPD+==#kNB+(;qTYZ5dR#|vE9y{ZOY=sQ(+R?;<3-3e?*Hypc~Vk5uVX8^aYxNX%d- z4(-C#-Ej0}{FN`E-T%X<^kL3*HWS;#Dn(yY0v4;Qqaiu?!drNrc7`({j z?p(Mb_6Ml{-vN`m@Zt8D3-k0IpmVHBzkLUkI?SDXj8GMV|7O5̊LP@*W5{|32q zJbWxT^@VFI@yXiwRov-@o$W{|PcT@4^sGa3_0eA=iv@z-jYE1q0B_M?T7L z=T^rep|gl0v+;QVJ@7>6hS)oO?pXY_XE@uA=LP8thagv;M>K-BbJfbFD+$b&JO%Cb ze7xjfUwd$6F7;J}dYb!6Ol=MnzL5E&Aqq=x^4(oUQV?Gi_a(> zkN-);;yI`4>E~unaZTFoa866&2v#bsyPq!`-oYO6MoPIV&0f0JC_dQvIJo~2|B|>@ z9BkvkWHR>A2*>r4OVKFpNgUjAuF4hUpCVH?QLpE;JImaMXVm+lP~Mr;b-^$YK zBVOx$JYEKMyPrKfGMoe7>B?w5Z)N=C3hqtBXKm-6d)PiCMz{xCIfIrC(Bc-Nb8}_< zfGn8>?Gkj%hE+R&T-i5Efk#J?y4+Zl;yjbV)8=>@j}F!)#yrbYn}8_+u6U}qJNfiB z6_B?Kt``FK8qo6~wYV}?%;wRW`3~y=%WPgH!N)u$KLWK`mYiig4wZ61f8+L}P%TF3 zj?md$g9p)p4~gj-)8=4W+>8XzLX*vReF4n=rghiy-GO(VLe08|vg5Kya+KaWyT+!N~SpD~`a0KF~8cqWZ^ zI-ib$=>^MBo4GgL)7T1FoeOq#wz=HZJ?WVeV3Z?CP{6$Qu9Y4i^x_7-<;8#iwV0ej z%Uyf6Jz!^SbP=6thJBU?+tIiTB=aAr)kjyHMpry{J^@`4D(}cG*QxXbvZ~k02y=|z z%KCI#T!Y4Cp$$F0t3Ml}#buDQGoe~qlavRwIv`t=Xl@q7hnCp#F7*RO!Fgq4)sUDmahuF#9mq}tdPB% zu0cBOc@{-yKSgT#QT7HbyWZP5Wh@axwr4w1yJ7kfDS4PFI9@3gbFz z%+05;l&5S~Xz4D(b-c`}x(hqDhG+FjxzNLYP}=qSW=(mI-|czods@{4xjW2kP1ci0 znevo${>NV3TU_%@-9psX4jpQX6^p~yXVT6Y?&PJ0>|Avv{{Vc1qj8?$lf-vlXgC0z z4&skjqdWO1U7XtAqtqFmdI_2J)YX+(gU-O}DvyImO#*E-;yaotXa;5NBdOhvvR~r5 zJwQ*Lh(k7v1C=aauK#Gp^Ge-oVYFimPjut{B(8W~vNj+;Z}$*6_n}tQf&v?nc-P$z z4s~0f`c$89Tk#AJ>(2$>gew=2u7YX@Xv@C=K3_nM7%2)~u}8!4leaYp4Jv?~qfuGYUBUL3{#ZU?qIXxTG3+@-Vw3p5_u zyP{T&zkp=99^3J==8QqT0G&7UuL5OHbN^R3avz*d!5*}Ohn|O)1qym2dVoOB=Tw`a(soh+)U0W{@Jg~E7&JwBh_QM*& zv88y8!PJ++wc$XbH^=~jRahraseOt5VlyZ)0X=RHr)~{#$`qc;fQIJNb$`T8U^L_K zLb&}p{oaXaLkzxnf>2&;%TeleyxQF$W~euNZ$7U5<4^v#H_=PTSD*1$9PRA9_AC1D zr#>YP{y0J@Kg~iNm5s_g4plEB7k!{jR=j))jv=gqz(T#wX!-PT-4onZsi6&}lF_2RP_8Eu-3WM2W7k_^FA7t_K37-jcRVpK&pQ8H49#pz+dF94 z?2o6pt3QZ(H2cw$R5aaw!(wE7552^m=+Hj!u7V~#NEDqF9Gxvci=NwKuy+)VCYm|G zX9r+Ktgk#TE%*S+m{IpGYTxz0uEfKQL1@@|DDf74FhAvwBgKp1ll`Ru^bkDRB@e$B zQBPa0I3H|ZVkPvcf+aCuroPS5^{*H~O9SsGD3yn}`xJb$$5a?t_e1rgKxbBX*C1|% zIvLPp9c`wf$+h_(%~tOuqV{}~JV>W~Z%x{RxgZHsCh=PG2Uh*bQO-0C`I*A>(B-B zY3B>JF^SL5(AnHrt(-u)4D217JPe*UBh{{;O(04=6y&fNR?&U0g^&;TpA_e{gXcCe zlI+UMj=**jP0oU)IEqaE9cbMxSgf&uCr=V1D3{JJZ{oLFV@^V6iSGr@7JD%S|;EdTaoH=keMRTG9BY@fP+8NJ0SXgaEJ!XaqBV}2!a5<^JE?A4xbYLmQ z|6XvkEWAGm{5h$`oExq%j?w3ESMV%2@hG(wrmc3AUr6~PyjSv!=bFUfRY&rxCQmd2 z_M^b+-s?x8%AK_0n(>e5spP^Nrc)*hJ|hu28gH)yQ~Pzbpze8UDh}S-Ohx+yMZna{yYYE7SNWfvt4~*7Q#k&{Nua|P}?%-o(N2hiHGY2-uEWzbLFIM zSokjyp6 zlQQn^g~DLG6U*O}v7ncrLo_xRe-QB|bX_ zJUyQ_!ZauPCf?%U9>@rr$Sc)@PWnAxQBNr`{FYwPRr^rBvmuvF!D4KXjc9yv5@m{C zE2o{wMr1UO)?K5U4xJVNfn%>bv3>Sl9Ya|K$ER`CdF}6zzf@u`*P*NLW1*0%FjBAx z>jR|DdYYr80UE!D?*+7ej7at1wN@QxOrfSSVDa$(z?{ziB;NJ~9dkEYTt*vyr_;)H zKe&hA6QOZ;>WFdQoj@r>Q&L0?NDSQZ`SehGNg#Kx!u`}U3#gl*FGu0cU0C`=T5J;F zxD)Bk1s09aR!`W@fF?DuA#>4|chQM5P+}iCY4524wPnKwY^A=f_#96OpH5G;8Qd$t z?~d4pu9RF0_q(DG8N@fvHaV;9x!PONii$w#Dxtl=+zvg`UpZ1(9SnByoPMi%uyO8z zRx`J9wHlDUOv|1`9gJZZ8f{Q+|(X+fOxb#&WN)T&sQ6h3Fb2{R_E z*`BGif>xbJDS~}5%A5gVR+BkYZwSfFrDDo7R%N-uBF0BdX8Bp{B{QD50w+D8 zGWJFGA|sAITn4TdaMp7i?Yqo{_A%;DL$YIF-JJi<>p4o~n3-p!rf_8$^_=17wcFOCuQ^Ql+3>~x?Rb9?BzFxGToSom4Q(uu0=||PBYSjW%gno^lx<}qI`gG(k30ZgE z$rS<};9e7+xhwEBT1nvUvtG#dc*@^^Z5{}I>u7Nva5+wML*o~Nz1P3>LuQKd^i$BT zDAr*eINr<%y1l#pK-LJVG)AADEz!@|A1VwiGXs6ov4TC3-fq;=7zr*<3D-|}s;50O zp~@0`Tq3_$QTq-2?-n?4Ai$Ito85wvJ>h7c!10E(wH3}8Z)Jn(d4ili4;3~cnP%a6 z4eE}djcwSeOj_;?RbPbKcW^y9c&rJ)Sr?0FkMuHLIvcj=KBBgY@Sr`Brv0oB1CNRY zZG9R~Sq=zXy||7t4T$4Q149ee9_7Z`Yr~6E%LhPj|1$^N_B^iX&?E+x?ng_t%h{=S zAkt8r5ttP`nUCid0AU`i&mnZQKXLYXc-RPB%zshA{?=`?C} z*S7hvUq+{mw)NbpfwIsp#10fqCAtAh2}%yX-exdb@&3G{GIs4Sd_f@e=kIsL4vea?z8u^%E@jL9oWo@=44z3rkAPdD$-*Q!rg_RH+}(+9s|zkRf20N z|MSmlo*1PMiiN9@zcOa`o)VP;-EM@Q8^Uv0z%&l&bB5<^xMJ3ns1H>WSOz0oDd3Ps z`D~0+6~O`+8x;<Uj0fjdtvQJZ(H@Q0qjqixyp^Y&0%VSN z+kR{8o0h_OBp1(2;`%*c?Hoc!u1`XuV?cfi=#*VwiPyd(uT}$mq`?@TN#l1aclAgI z!P{9b*QoTNmlcia8OdCKKJt|@o;g98XRt?SpigDIZ9^>PYsjP&aSYSA%9ee(YRYd< zGW-;4)C4L^ch7HdUh8|xCV-bz=!-3DgD&-?mLA}fliHKPX#($u@mE#Axjc2*>v@y! z?ckBdy+lgv3Es$lm7``I&>GhkdFJQ>@T-fzKR|8uc|JzndZF=XU;_WYr@V77h0uIw zOL`*VIceQlu4o3tIUDEG^3z&T>deDvOnKtVx9tp1I6F*D|>)qbPKH3wv1% zId}@}_2k++=cpbbUbZ(imb<@VAB;uw@a$yja%W9N`icqI!*OVYD-zt3>soqyLSR&LSVlDF2{f`k+$;Vy~;AH z(Vv(5+nfn=&A98XCgXYcVil~>_4{?9)>N>5j_6{wR3flxJT+3bgZ&^3(;s{cV5BQ%>9|b2L~9=Y))ph1y^C9J26^c7#M% zy?zRg4yJ_tnLWs4bjRLGBEP-kiihgq+xUhV}wHFVy-{ z0+;NA`A>=SoK?#v;ukXEtNYP2!xi&@=?C>Y&Y$T$CIR~@c%_xP3ZGqf`WY=t(`<0I z7T?0`o)ky#V&O9l+o2t8h@P2O%QYEhA*&wft!L^9pW?4^>PC3olK1sKoAim+Q@F<9 zD`>w8IZnjF+y8M7nSRW1L`Mu0d8hGqOfHUBH_E%>w*Y$SZt5*?PG4b+)f>33P}0#n zdrO{aSqiLLqMHZM3$u8*qP;&B&XG3D*mrTpdj-*Av|=GU*~ms(Y5C$X_Ne28JqMP zGUe?4Wd7URc4v<#^-Aw(*2VpaUD2;u$cXVx9MnmqWDMSkZ9RU?g@3MA%LJOS!0vck zS$JWzXwOugceS(lI(}*FnF4frV^@?o|6L0R9Z!3fTB-&PC@*n5Z?u?&CmQkj02q4p zKeX{auzI~NFyzIX8WB|jvLql%M~}o}Fn8BtU!3EV`}SK?upj0in+7&U`mTgn3|$_j zym_QNle7;teT&y_3db&^v#wL`4y5a`AzRVMU%+KQ_S(Hp<&nQ*aL8W$Jy6xzMJ?-7 z=>PXv)GTnIH2z}<^_kDI6LR!opof%~Divt6I&ci(Ni96Ja|6xc`Lk%`SfJ}h9Z}u~`DbKOhL#ps3 z|1IA!W36v_d1TO--m82dWaj z4C;0-m5g{w=nA-5nf5;azW;_4mGPEJ5bvcto&P1LD-SF9cYgMaD8ZE|#}V=ozhsSh zDz2WF#(2-5w0dg!c?s-12;;vEG{-=h7eLW3W3Lq9>Pvw4o0MYDM9Eu-n8q$6G>w&5 zUuQk7BSsosAH!2Czd=yCbE2K&Glh^JA|oHh*3de>k=h;%#kqg6Cn7qYubg!BK&$RS zcrmS|PAiaHo7>12_b5{8O8c(3(=VgUv>&*Dbfch&a&mbT>Z?Uo{s4Zui}r=Lk!EG)kz?BI zJ1FD4u#e1t9Jm<8{X!(cWkA1zmU#!sR|pI%MVxO_M)-8-xHlTNHasMbr2P(QD@RvS z4`=w_5iPzJ42&au2$I*e%^6nnIOL1H)G1)gRX)8b>vrNy2j9+t+C*;G()uHyrxxNm z=iyCwrGmEjEAc!J3}!)LD-Zw#E)3t4jmIXTWe z3JtAx9Y+bCAyOo58Evce?_NWH0ek8_tzLbJ?^2rvB#^dW5s0-1&mf0#^0T*C!L<*e zYP#-OKUxdQ$_nV(UijlK8M*BOOKa3de?4$%80dI^TZ}+1+m(Y2%p86*X9yPsI<4IoGamsF=26^f_5Mnf?e=9 z{d{*-kQ)z0lIkhF1^QIim_DUW+S=5HIp(ePU{7xX_)0TwTG5nJZ8N_WNMKLwdXsN@ z?E7LN+e6Z_yA8h1!z+0x^hNTTLM_|qVY@2fHF9=#k-ivrV=DsZ*G1WQ;W?v8@Q}-TBV>ozXs}a)xiujyS8WP=L{sr&mCnv5qVvpm2alb11aN_h zWJyDi-~-6hnXg*Po=l;1{XK1TFVjyR`zjA zYVr{eTB3&#l0lo=tCq1CzEZ-EA;0ISdlRjFDPLVDa{@R$5BRP}E)4m$~*>&=}T+5#%cnsKMu_~?-@1eP0Ack{>q?;b0}Zg zWuHC^c~Svh+*d(<^t{+F1YNnymA+wp1Vh2m#c1oLd_S4oZI5rD@7+7!xq>^uRtK8R zG8XdN9m^r(-hK58ylP$b2YNNM-}}>w#_0zw6e%sm?~J>ne%jvBO(XyIV6`@l!%h)s zbms?O7BM_6S!w0Gn6`+aKmBk0%PHR^Da)`L9Pw^Kb{{|<3#n&;d@F%2tq~X&In0gx z^^GLhmQu$N;Mm!n!$Uex52<$v@OcuOd!^h=8+{4Co#(sMf}`e(!BQI2b(O{L(3vAUn~=xu zt(PZHPh@&Ms8efHYS%lUWF611NKMA`WziX|Wv7MuBh6WsBx;wQ!YOT-|(idp;*DeT*-T^;+k6Z?#0iO)2e;NBi zkMSh*dk-kk5nyRFrB2d2&zpJZ0dwBVYINQxX!vqC*WRW!ZdJfg)g_hd33a5^lZ^L6 za=WJCIAkd{aW6%A(dXqR!mS8iE$L1tXDn@ zeV)Sq{$JfD*3_i8QLb}X#HZ^)-KTI9Uvfd=jsR+gdZ-^ln(hZaF5-DPU-Z08 zCiHA*yagFo@~*rPmaFxB8S6|w_z2p22YHyIw#k!Ns;QD-D!<9(l`TpM_FHgbOus*cHS17}sj;lr=lrGQlY9J5K6nCb#n)Xg|E&zdj*IDQgb+oeo z?y!<}M~l1#+`S?yGgj%FRlx$?@_N1JWw_6JHJ;`S2aU!x^I!>~`8vJ_po%@B%BV*!g|6QNYFy5jW+1k&`XSG=iFE~(Z~|6B1}@*D4!85w zR?GA6&smYt%r@({HcIBsqGz6~81zq$ZO(9Cw zFMK{w9unIdsJ)LQ^j2P?anS7IWo<%JCyQv>+&CE~iS~sB?gf3IQFXqbuP`)#^ z^u{~8!Ts7M5Vwh+vzU}-1Ff_Seg8Jl*Fi(K zQHHWd3F}Ct-r~XZa^25Rn^?UwiTI7A|Co@3LD~|Hp3<4No|0zrQ&!unuOe?DDy`L( z?)9{FU)If&z_nJ)Ravv0|0BTj5K41*31?baw}G_oX~@1Jl(!gp`zqlB&@pO|+xb73 zKI$BJKpWY0jo(6_Oee;#$+s5zuLLv8f$7NLg?_ZlI!gZ)c2qm}D+pgoxt+B4t;F39 zH#j1fCzb1EHw5fDnkHmc*Zg?9k=!N)@_R#ZPhsG_!>2+u`XHe^J(CgM0 zN$QCA^W?DrnKg!*Jc?zqk$Qa(d4DlfHlI+}`ebn39mMOwOxKcB=x!IeRDN)u0C}bw z&8!r4wwxMsAn}8J{F98z_$X>caq9WJ5Wm68{b7gxTqcJtd4Uo zK(C0Ae0U`YrbbvU{!3xniYaaiN%IICO)AB(_mHGu&yYc1ME{gg4{2NdWDZ(KNwa&J)d%rgSwEKhaSs(9)Q+FAa3;f|uc^wi7*SJ#lfNbgdQs&kf1aUl5B z$2px;t;l?-xj<=0@|1QO>muLGM}f61!S^JtG>>va!u?@e1hP7Zx& zFMSW+BYuH$Tm^h5->aZAXP&l1Yn5u9?&_r+r8v?xJ|Ijrv!!ypCo4=YJeyk1CZuGI ztHDY?ce!+57DvFhhIDa`*S@5f2UeAvchP*W)}x?$^Q(2cieY^6Ue~B&z*p)Xe-A!Pp5n}m@R%8e%=M(G{v^>F%t;Nunb>k#HSqi&gU)7 zx^=wbD)K*`l=4RUd+67^J~Yh&VrS6D26}XZ3B8k)N|u-5(>hwr za+O=p@{`K-lxmMCC)9`T1mmi)ZxE}R5HO+8|bbKrL^Z`Q%yRv8qL<1QDJ zp}`$#S;mvx=jHm`(}}ZB#GOi6j*|$blyQ z^@ptteiRo=dAC>IE&Y7X^HU-fxa*&gdy|~hN|yJOMCrTnOIIPFD!qOFK9)AFCZ^nC z`rOV`&du)wY#qRJU`UJ;sB<-PYIjOmO+7qG`gmyTM$**mOs7qidWnwq3w5%lXNT_4 zcGHsDO4>=ZMJ@LN^rxn?#~_EL9S?+aFh4I>msZkZ(qy7bL$&_~fX5%9vvQ17S(2Qz z4XotA{8o6uzJapI@dv%c&fRg0?>Kno8p?GwsB`1BlG?Bj7D5e|kgke)SCZBh4Qspd zO=I*%oH?0MaP4IjTb-EU;6~WGH*kaCjTuJN#*4YHw z!&Bc*U}R_-CFs@JOiE95uLdV_O*t~b`LUkt;d*d&!v(;V@l;0mCvoo;+~s&rIwQqz z?pJdKmYN(??6RVkkzxxWo-k;P#JBQ8N}+bTk2sDHqM=fwcD{a~_1=YpIXY$lj`JFl`+r@=t-x8Lf+gJ3d0X z_5!yrljHZ0`i?#*AGVRM5H@n^#cz!81hLY58#(@Tm$2u6?J+dc0fDEHA(c~XEyP6; zQ!4ltrKjF0qmk4`{M)m2me!(>y86uwTTH#SQHw0zgh_6*>>}wr;lnrKa2JGWJnLaF zETxyKls_5mpNr?z@6%H=84TDjIy9uLyKxVK3S8a$Rj}ra#eS5&owTjsw+Njy@y(Sm zFT#f%NZh}`LGG|EWw}$|6zZ*y-}4MSOH=NsjozwUptcYiuXUgM6)4Y8dpTN+_SEII zLWHIiyZ&1rQ;gmDJ&QUf8{PFD&V4eUlxi%}-?;7_r+#q$%w*v43|+BPm5(V=K4cX$6TT5}e$F9Y{QtdACAzJi6RPEI}oEjzj28ryTJ;mQ2c z<%(Wc0Xj482c(vI`vQ;in)c#dYb^D?J(oUkqf-G^3Ax2dNf-kLQ5;Irwh?mV&s`ubU6}t?xXZ)O41eBaZ@idrwM! z!_uYwX;?nOt#=xiQ=`?e8Y-I$lj1uPp!2ozo9HbdH?sVA7m9*b*h`m+w0)F47f7>ZT&P`a~4=qH{?ilFcQMOw)~#1 zHtuJEzf#}M)b2-I`rZ*wNr%YQk5OAsTWusNh8M zQc=dc9@EjdS+tN8Sx1g(l}2f{Y>rUVY$vaj*1m~j3`3$M=~3^CmpVp{k=mW@@fdl# zSJlhltP)%ERy2$~$+hToDd;d{h5Xq{9?ksjg?E)z--HV*X_uqWB2r7bs^CAp-tMf@ z5*ByqOZQ#rAU0D^C%?9- z@-J1SQ7-GBrxg3gDX0CQ(y2W6jvQu%FXS`E38o|A9anQ6kK=_ka6ubwwE=B)TFB|X zT~b-f*#vj$?aJ}hnMY-`ik6wP;g5kjYQw5|7#Tc=PP(@#hkRefbMX%Km;_(EKs_BDPCnIlgA?qNEXERb7SuaDzl{uVuQ69s=#zE+>tCQM zJ>f-uzlNWki8wq;&@b8$J@JyxOq#FT?_c6SWo)`E-M1uhlkJ~Y^Nc6dUW)Ms1^0_< z^-LR~IdoJWD!ha8^{`HedYey<_>1v676|W0%e_G>?E_TyWc0_n3iJYU)+1A-CYOiZ zaxkgR2~DK6{8O}+ep%&x4cOGGu8E#Z2iSQIyE2a~QL`)|c3(KyaoShG_P(U}3;C{y z*7+gWcQ=?>XfDr_xRdfdC3t^Ib2h;d-1Bg?cvW{#=iL(&-0i%A+}r2;nC;Y~jJ~+uwE-~(RZD(7pn?(6GRsMO1iva|VIw)70?n!S4u_(Sg~B_b z)DH-;d@cS{po%Xd-&YZLHSs>>#T-i?0OfnC_K%2{quzZZ zwR0&`o}TyZK104a|3TT|rHyLuyAmkwjF4(-rA+qP9J;0&ty<)*8kif2Y459<-)#}+ zSma+BRHIkNwPK!){0I>3K|3|ZyQkUy8zJtFnJ1TLh_4^hzP+c~e*u})7YPv{ghQNr(i6P= z1PZp4jnvv*bA>?7TT7Yhr&?NDTJTrfrF(LGnZr--s4^@MuFm86v%vccSfViT}Zwxqu#Y-Q*xJ=fm zwRgv-WEvMa!~eKSkx z1w76WSOIP-@ZKF7p{_@K3OhDOd2eCEXVAPO0*&;1j|xue1mo_-T@3j*l#-oKkc*y~ zu$$*daNkNw=kiKDqrjsxk(_(rc`IeKlymvE5ZZ*yya5QeBJUiz{wn?l*J8gP^G!z6 z-pi56O5Lx9#;itm4xr7O$zcjTlXq#63R+34ZX0cQKKNV=_S0T!bCAXpkU3Lm5$EW+ zBCZ`7?7GDbNWw9sbMJ_6BiqI?ujzV9c`@2?B5i#;aVycE7odAupr{OQ&SiB{B#FOB)!1qP*?a>m4*qihMz_(|X0p)$>@+#Yev)m3Qi)O{rN z%}Cclm?OCPu=|`TwVs;Vw@9s<=dK;7pUqLwAJI#lhGxu&RRFtx1gn*yi7M!6G|~Qg zG8Y5sNI2&m(zy%e_nS&hS=xn&P$3@ zJ15F3N!*lH%chf`W3FkO*3thO;CH06M@X*3vq=V($}F`+^0%&_78$u{>nsE(j(@oG zjC(-4=ZiSLILe<*yg@;CjuSlzRo3!#HZrw0n9TEB7TP17Woln9dBU|P%8)hM(1WpW z9^>x(zPaT59(nwVHgaZ-tGgcn*V@aC{Om_Bhl6Z6+bZqluI1`TbrBv-Ht8h3E+e0L z$aVJz{g_nl)_52+y_xh{9>`uadfL0%8G_D1f1H0cOSdL^ly{}IRO@{V_J(^Z=)ZFv zYevf3u}equR!7c{Q?onZCD()vCbXIrk>8~zzXvDBAU6h6aszd9rB%X@C2DIXJN2Yk zw~$eq^tU*$XKhoDN_pojhC_oJ?kBXgf<`Z+>$r2^>r(D3!i(n`t0bvir8SEZUBjy9 zvLn)(-b)(3g5Id*IAi1~(qD-bH~`MRm2@w_OaBrvrvSmHNM_fJ+VgOS8`sA>vR43e zjy`WB%>IgcNn6V~O?hHJ4UEb`Phu~^QI7UbCf3xThAQx;_4Y%g+JnSAi=KT({axpf zWX2N8r~$5un1jCv8ax-Ph%g^1g2)yO0B!LcQpv$Lh$BgYnXRIrWI z_V(-pYNO3W=O_<`LYMC7miR+xvPdD9Crl=P>lEI+w7vgK`FXzg7BMVE*cOwQ-qjxmpBQ#AtthSJ!F+QU?74_7Wji)+taK@d^6{MHavEnk zgfeQ#c>xk&5t_wQh*Iy=zRD&jA*C@t$G}Uu*)n;4C7+jVmxuSSfzFlC73lRdp;E_3 zj|9Utw0bG0l$Sow8C^9@tEcotr@2DT*-Rc(<;gE7Ra?W6UT4}rLA$k*tMtAv^zL3$ z?#1BzjDH1s_k?cjYDY(qH^C9k`E-v`;n(w_4Xs6@rvFOFz1?Z?Ht6IH>O2L=ucN%{ zC}%b0xtH~)ygf+#RZ#aLQn)hSQSu|;&1cE!%RCPy{ZXXNfW$eSS|gU2#!H5UB$v{g z!BGL5(ltKHylTSw5aQkV%6Tz*7ytTGXM*`gxJf%;55CO@D@AaX`V>9zlPnkKCC4#L{$yO+o8lzosZeQEEfC`%dhA~;Vp{~l=u0>c^P;%)#n@U*h$kNjNm z`7!rjlFvo7+pMlq67T8p)b7b4$C`n8cIbKMyZspKE~C7Tu%A8x;zMGNrM+EGhHS(J zba!J*5L5bKdyu}!vnSjaxa*T}tg<~M(j!p(b%gdpUY3)3J*{0t+I>p9*~<2^wO^kg zCw)h*I~+tg_YrF#R`zQ0>IZMWN;zBUZL|YX5Bv|S$Z8x4rBho< z)`8MP3B5O17*0E6k-{0L8DWP9?Bs?YSp_;?aaXeGYQ;KlLp!q3d+j*CICIzKgo}=y+a256g?WfP-F0`1It|RTzqx~>7 zLrXk6w(QIxF>mf_SpWNm7qb~X_yPEH9?EUl)Z>Xc4yw&ae+qBaK)Z<)TKGBYW+$y?Cv{dn$@jn2(EDiV_^Y!FgHKe|0<2XAhrP7wO ze#s6}>Y0P@&S6ZlcRviO0Ps_Jn(7U)Wt<;@?*VcH}BQN&v z=HHQrD)LJsC1;ik1DL@uJ^LQlPfs(!Jc}U{FHGk<1X!t zqQxAAS;2FX(0yoMPczs+y_Ke(D)t_BZY!|ff<)Z{l_*tz&-XLnHCGh5M@By6n7Gid z-bziBXX2|TlBy9ds0Y>=q&Nvsqqj_-co*~cEax-~b0u)MUwXL&|-h!4L&;JP=a{)PxB8IK=bMXES z@$=EQtp`WrgM(b%sb$lPd_Fn{jPb{pK9qh=S;E^}y$9JJGm zGCn1(eKB>b^J3p6hcV=(Z_nA$(*3=(RoeAqGA-!IZLW^^G4X!Dn|pcai&KL*2DDq0 zom#_xSE)PFvlLAKoDw^E>)@w_I5e!5!4YQ5=Lxf{lC5hmU@_8c3-sl>iFBTkm{i}c zr>!85D%qX@k|Ou&p!-6+xev)y!pbSFgD*GI zTAlFi)(F!l;|$%5tAcXhMSjdelP7EJSm1O;=@Y~~4k~b-mt%1g@ffs_m;F#F-u`X{ zEmB6v{=h7(w?iE(sp)&<^$4xdiewxc_*7Qqh&vUIRcCqmUf}1B9@X5{AXz+#!Sp0I z=mp*$@yei@lr}@lIVPXh1SKgonQzJ;F_(ILVxT~pjCY}(1m2Xg#&e&69N&$rHhTcA zq#aY%b#*^A>2oaw&WZ@Nhm?m7uOY@s)J=Jp5#ml2jpWvW6#RgiYV);#ar;yC zP*#R?IuMDQkxqH4FfQ&-OY8@pT;(@EjVUlR6IyD|!@PA3%Df zk>yF$Q~#0eHIJOz!PEDE*g2c7fvluu#nTm7rhlU4^thf)h;)+t1JzKxUQTEF3xl@S zFZuWM^+ekj@#YAUz0VH9ay(fJ=W8|qR|jd`N7?>G`Z-SQEa2v78Ef;O;8=h43R0HZ z^CRTn4tU^j=<_6UwbitHzN&JAzRTMcX!mXghahCsU+ zwN@v*A9^oGja{SXj#sYZ&-j)HcIVaRc@~-8 z8w6gYy9{2t4P4igCUJp!!^_-%&{fj4(Jnb)Z3_(R!_#)Poy4Em(|6|iZfi-azr#Lq zcd5#+(womEjM_IhT>ED{l;z&ed((5BMA%*Ac{JhD^6ao?)PcsH!_S&+CLi}IaqO!m zAdo6nMjZWi#l<KBp*=u(;4 zho8_nhUNSd_tBT4lJpW|uJ$md7V22&s|g&cXWh9%J-VE@w)fGzIp^Q?(5+}mcT4#a zlzAR$UZvEpLQOe(;^UDG=IXT+j-E)ndFq5Ec=EAp#fFkgTG6VVn7oB@#dh*^MbF#F ztdFUwW0{7`4#_rx7$e9nLuX#eztYH4<8tu$0$Rkiz`GHCAx}$z{AgsOvw%JQ^sVp; z7DzFSCwJXe()Qq6FJSWg3jHg;Lf*;4_UGQ?yFP_EJlPM|xAYh4r_U_m*zZdsmQP8J zN?&a|dE3z4(dZq26DtEt&UYFc`k)ZGNPW&kn8r^p+iWOf2X)t?l-D-$e-=_`GObnM zcOUXRPh2etSGVs;OC15^^C({^(uxiD&XONDb+v2-wS6%#kah9iw`gZ=+D2-5b;Wwr5L$jd@^j)sCC2B)v_uKy&RY1=Ixy>udG~b8EyRujLn#YUuFgdTIH5O2tV}Rs(`1#DR7@XnW7n)rI zwW@2bMN<70K5-wrIe6E!D1~$xKl_pHu|JkFa=a@aX8=5S8}NVONxxTnYaz^???Vw!?nNas;p%vV>Sya0UP z#|}F-?0fedwq@1&pQ78HF}V-WH$u(xz}-*C$uWi^n!kk_c~;Y5VIMeq<}>VW$1BgK z7TeI@MM@n&3602V{S3}!uO}>n{!awzzU1*kuy!r-VJ-Fej2NEZqXu-}mTkPr=gzWr z_usA1R*`(&E9o+Do1?U+V~p!t;AAy>ayAlIZaWiChIlDZ<0a8E^wgd-IwC}E^ssZp zI1y^PDLfA!(bi40jMBRwZ6sbZesbSpQno~hp;E3iqYp$&XfhaU0uK#@eiYx0<9yH} zFuf4H?+wghTL2As8t!J~+9{!NA0k)h8QC9lr+_Tzs3f_b0U+T>%Z1M(KrEjBD3xAMMGP1}uaE z?U(GMrSt=e;8d-c`il0;l$0ycgvleJU)6D*966jvEUEQVooB@W^wg&|* zh|y1Ct{_r z(tpLySn32X<)k)fb3io=UGGk-&XSr!`&1$)HpNdn&wX6QrHL?|`51bR6{g3bFJw-3nx* z(GsVV>)oVqmV&2NI102McR6%6dg4304+5|AXpcH#o(Q#R4}Xk(v6k}o0>T@>>r2=t zgRy5ldA1Bas4x07O1p?B_k8GpqU4{11H;^{&sOxg2AQc1Gl%E$;KsCTr*k=zm7snv zz)wGfuIizF=Y^bwJexwkcVl7Q1dMIe@Fil{BDGPj zqUoPE)3?yKMq+<8j6mvBd)n`qqz zYMk-@5>Lt7q&`nx1^3R7GS1f=>Vb*`uT^E-jl$u0D9%UqjT-V`H1|L zvyNViG5aiXr2XDx^OQ!C{VM$(6?f&4RO7COZ4r_@tH#I!ZHn!_^@+08=JxW3(-uR( zgXbwJ?NUF}+3o*CZq7L?M;7aaZUocEq3w!YakPGwRL&48`KzQo*PuB{r;O27?Ssa? z9ZskS|FXVK=~*1w|9TBwr@Sa0szr=kE{G?23Lb~f?@pRF# zNFqH^(#)Yqk#~V{GnDXegsF2zAh+tEwGG5{2Hkpcb0*>Zs8<;+aB|pU`W1!*mYiL3 z0QnC@J8z79t+C@fW#nKxG$B(uL*}?PW&yPE9loW@^^xgG-;kK|O*&4kRf<=V_TBHZ zX`LI1HH)|tLOzP?9C?TXt@qI}S20KZoQ>hkkAtY!1aj9-agK+)>L^f-yO(W|0iC0v zzU$$04es#hkB_4+k06I?&_b)pZ6Df4i%KuI*PO0=@;uAw%2jDB)hWdhGWY4Rca!!` z(MysiRPL1DGg`=VbfnRdKG4GqcK(;-`_nJzqVV{|W5c z?w=9&H0q~ssgeIniF-DbZ~|{X1-d${@gc-@MaskQ`#{n=W;G1jviH#x<3p}Ro5auE zhxH+eqZWQIkz2yoZ00@ykTdK3S*g1a;a>P)cdzE$UkB&nf|dx6Uiu(SqC_hGnR@~)QXuz> z$a?%3DOwBuNKwGa?Fn&9~@2|M<|1|f&iTj~G(c)yr`^Xl$R4ni{&B1GTAnPA)z zmv{JAqiL^Qh6H&nICTLyNxi^Qu0I8mM&hpH*Ui7CbHN%=SB`|!gg<_{m%qL z6(OaPpL`(N(p4Fr9HDNR!&6z*u$>wt$<&*t6KUVh=pndLdJRvm;cr3~Z%4}IDYbW3 z-R^-e7gC0^ji*7`6}0PV;BOGHXg3X|JoQr(X=agUM#=@y)j;h2@d3dR#Cw3++Ug_G zGjF4Lw(@*Dl;*hb7-Z;opxGwo=S)Ys6v5m^zN_(1;I7S>R-CTD4zt(&Fc?*Sc{v7_ zo_76Dv;31!vwCODU1OA$}RzAqzW6 zF&H>=K^5xqQjYm}N^(?c@%*VgmvT>!jF#llV8(OmM@37@2TCEOOFdUraA+$$Y@bjc zn|aIK{gHA$d#U@B3*x}96X3k}pou8zb09NMX=hkgN18%a5q`?bg);h7xt+@Jo0vzJ#8;@Q0V z!?*EYM{O2S$~>s&Y2Lq!_n?Eaw#1m1C$X%B%RIqQNdE#SO(dTekd>>WB=^u07Nxq> zlL7s)^eCNQ=?;TM=)H{EIoro`fU_>!vG4|TqsjWLq%?!+F z`Shc%!>PG-Dg&;KP~3~;V$BzaUa}uJgl`$pXfcXa&z*M`)V~8`8I&T9_vP9Ae@30( z!v1l`ydnIgNHO>_7 zx4jZi#%;8hy|n+Lp1pzAQK>YeG_{&7XYZ*%x&qH$o>5n%Y@h7`dv`~Kx$*QJIP3%< zHgp0!Dn#}H?+Yz889aENXwpo!!2aate8`jGR?~Pn^Q#fw|0TTS`+i7VSL+UpHhO_C zPZ7F~l%5n;N_8Bv#qqt=s><gp*q&aY_Ydll3pJ=*sy<>kmk zbEMFFs7;pqta{TDHQ9bO#F3^ZauuM%ZM4f~>{@3&xu;AH`1S=ehk=`M*a|05Z&ObS zu9yogI~UisKNPyZ9=e`He$G%D&D?-I(0QUz6M20Ml%6!`xu1^GNefRCvJRThMcdos z^L$7x;FGZAdNLbFD9W)jTz8b`S@|?4LVAPIbEs2aT2iVi5cgks{*3$4U0B!8?|6;> z(wBFs;()FvZB=dZ)}8MuhF)iByu$L)=qc2)#dgwY-j_jvO1~OvZNILPR&}&=E49|9 z84sl@9o#uFt)R>C#W?^^fp2FQ+bYTzPjB2CDv~1{?RfwkxITMn)Xnmb;H&-UzY?vSoIkIIgVQHR9(t3^-36XVu0Qw&bT-M4tQlJ(?1+==w>0H#;KjhK4`X_*mfuibqr zAH}jPho)W(|I1L~UBY+Q9Hy~nTl6YYD(eSA32o%O5gd6Yon@wzkZOUhH}vYsaYcCb z4XoN3P?+a6S(85ajDAYKjzq5~#B=b(*eOW+Q=#=W0q#gj&ual>v z1$oL9OXqy?mC$(A~Iq3Cz zjFgUX9}6Gop_xPZ&3qM(X7uBK1DSIBZR(mv1bUHUIkN=&p#fY=XL-5!--ozriL0YF zwzIQQUAIt9-&A|62ETPaG>GkH$}~^ZHU1ldrsOqc@^`4QR*Jpi6}&qtQSuIb6x4VH z&^Hp+1AbVv6C-=@-cIPxh_7U9<|o{}px$Glfn>k_g!Xgi5FwDm?<20<;Mk74Yv|8i zM>{C7m831CeHYBz`nUD7tv|<%(uh6tBA8UES5?j{4?#A$M>ME;;xckhmZB@ zPK$9YcYk)j8T$e2(K}~T>fNmL?HT^F%dxGtM2jbxb18Idz11JCb#Q#swO&eyldvey zfr5)*!+v2OzFY#&Xai`et>d1~wCKazNPgx~x;E~7GQ|9~i)z7lf&aqizNR#P8>aMA z{^<*^2+JYSO`b40;-A4no~P8SvCjJkj9GA;o+rN!0Z-OIuIsG#4*tA{4Op z)cASuuRk>pwdhf;0gK9STS@LtMyGfeOLYqf$i$mY_*+LoUN z)caD3YgBFmvRd@de+CEt7gGCSQs?;E5<0If+F}A>>X!of&7r;Xyg9ow!8{x7T#RRT zo*sxq+=CFMjU%MTb033kdREZhLzG#BwjQGulm0{gaw`(&`s7#dw2<2vB?iP1{DNwoa))OJ3xX46JX zDfbRqDrs+N()k@nQs>l zWf{+jPSvh^f^A1KE+(Hm^fNasho?gBThlT$o7SgztbqT%M##&^8?}e>Q!hjw8g(Y? zz^Hdhr3Cv7ylG3O5WS(|V+X%MNS@EI*=%EXo=#M+owb5|oKt6Sz;uJD^F_fE`mmkV zp9_wt1tPUb4!G=%Ya__V55N->xhn@R1Xr`bLB_j%cvr=L9SX})C+l++vP*pDky>dk zf?fj0ww@*BJgC}!ux;Y#vL|l6g-$*I3{}N`smk;Hq2(*VxBlHDNzp>w^O2I}(8m?z zoX$0OrddB)qHkzs#~u3vg?5}4)ECjIwo*H-koJImh`yJSO#PzQ#F(AYznVe`+7nvu zUiJ&@PwBr%A^-P(bIge8@AFrmo$r=hhE4Y@|F&4e&M#Vij*QC(u1n1GG?ibXC)-RO z_uLTMVkOkN9?Q&8(iOa$F4-TcZ|yGVwv}AxDsnyg?!=UZ94Ym^4+W2-ps_z_)X;EYN@-(v9!OldvUhHoz7Qw zS7BvMe_B34<{tbNa8d(O?a$!RoV6E}D|vEx4?a5&$)N`J?5qMkQ1?C60Tg* z1AR1f{8Pe&+e@$QBf+C$*D-3Pv-(pBQBS&AL81Cr`tzpLun*!Kjfs>qBXTh}=M8y= zr9S1`$iq>(0yuC~U?LX!*|gh!VHJDM%{<`D(=L~T>pA@L)N?v9rHN|t@iee~f)cfo zu8r{*CB4%5GGZS{85^Odqj}#Ge9q(UwHG)p$sb1tYw-+U1pYQg&fn@PA;sumH16Bf z$oYixxvLq{Jd>%!^JJ`yvRo7Q7Su2p>qq&bJ){Q7^S^}Og3z{7cnz)A&Rq)ajkS<* zDM8b2_=|Z~Q>jgz`C;lDbY)M-G3TE^p_W=kt(}9>H)@~cNV28YQIAG)Yaxy^l%)f` zSYJY4ssTJ(LwHUMnRil1O(nl$!S=})#MjpX9zE7tnMwow$imgYe-#wFGOk{vvWH#` zeout%Dv@EX9Qc^0kv!>{{+JYAn+RJ?Pv$GMnoGWFbg??rt|xwy(!``xu4!vk~n%apveU^vMqDMtMz&q;^Uvg3aD zN4U~)AaDE8@6)mx3yuZ>OA$JCW}34G7m{ZGXf5Y>I8ttITA#M08e=^76(Kp?#jBj2 zry9gKB^fH@&QScME7ULQTzhRfo-Zf9GISw#<)t%JQ~#zna0!84gt8<8~RSW`B1Pq1Ss;<*Aq!k1e!X2hXs|^5#kK9li&wW6_`$Gli;T`i)sw{ zC_U||x@(KpgDV8yCB0|0h^vWERUSC*1k!`}{W~;u2j$6C{|*d!o;r!w7fM@2?)BvJ z2k!R5mD0ta9-Vj74+(WBk~AN2 z{sHegD2Yju<=uclI*uX8+brqssLr?`$# zIkZ45cNTDFU`ej)&rz>sk;d6><}@D4JO=oa)N^f>+_jb#t%Az7fYSzgFM4$yDZ2p5 zwTD(mD>O!WXT@pzZVnq$O}vekxPrEIU5_=^lOg7-p^@bCZ3-#yFnD)IwmdN!h~e6B zXY(!U(%GIyTLxau;U8bpwk68#(r%LZ|0r&1_XWhr5pOlz>khYNT{tjB8eQ?JlUTw# zpRzrbM@gj(>D9>Z*YU6Xui?$u`S_M%*u(7s72g`)4Eu;&T=}kaxsrUHgVZ0oXvBXZ z&CKDKE^U_7%hAfv)~JQsUyX8irC;@& zvr8|D{hS=*$pziA<fn9MekhG`+>l5m1n5SV|b#DMp}wdImWSErI{EiAQhC( z?zHbn-2TM!YmWa6EZ&No8c*E0yxl{MO=FMF@$Dh>s?{Oq5lc!x13YM#t__?@t9^m1 zg?w)XJErW5+-)bWYll+%%$T>{v}8DK%<`w zuH3!G{@ERb_K)^{915BMWwkOY`xqnYXF*}k=Ta`4R<131xwTW=F~D_+o5}5}*dxIa zxGi8qU0Mq+k{-~mQ^Q`)zqusLh@Ev{L7b`;Jhx3-%)Yz)VSh+T>euF=r!p{Vztw#a z8VUc~%hh-F!0frLrfR3Hq>c`L^7df*mT3-B6_$%oTm`n=W6Yj+cW%ls=Md=$8jtq7 zg3_G-ohObmL98aZsg00YbY0k!=Aq51^hlR|Pd`%-9lC~y|}t}ST)NelQw z{#&^3PL6sDGHAoTL>c#!`S(imx?BgPE!@!+n&k0D-Ui^)%DO@;d~WxA>Zr?rG- z)ICve>6@JHbD~^vUrlRiGZ?>&+H2pJXkIJK^4fv$;lN9tccq-tr~+z}`n-zJUq-9~ zUy~IjhLYbV#Yj>rJvUgZ!MFXXp0~c~S?H`&qOahN_mzaa1;uP5*J0QX+6Bw$Wf#fU-7NBiTdNu*qdvdn z(NeMsV?Rp#4zWg2U+2xA3r%KS^ygkKUx20tp}U>a=;|EjUQZ*RrRc&@$k8*8LA5_af{&rb&gfM0?jX)5q@NB{%4ScLUx2l&4na%j#cu_R+VZR(i~nk8L4w^fMHQkwK06MU`KtQ>q#z`PLnq%M;Vf-A@~UY9iPy z28Y|~B`Nqs^o4!9g|N?}G*1n+U!^^eJb>m>;JtJyyZkvD`Vv|Voke-t=bn(*1Qz;% z>ogjbC)Fd^(H-1N)@$E4c#JE-LhYFUL8{frcUrhT&bz7$(Z zY;DHQaaP1q+MpbNko}w8qbIHX{W5ySe&Z3~tqCmdi_BaFH#Sg{Z@@EpeOrK4{&)X% z*Lhj`F~nT}&$eQ>sG$tG2zWEn9v%=l1F1;3djl&w^lNDe)gl*iK;n5ANk^}wzOHw4 z9eNc~OW%U&reUAG#M2J&_LV-$4tlqOX?&SA(&WJuToo z!rj?=JKo^+d_51DvX+|6L&7|a%-%+6^GG30JPPiTH}5_2yND8$u%`l5A7I%^oje`& zMx?BIGXvvV=;~X0koIMheWD$bQTBaEAy@ZIMTXkua-Z$t{100qts!_1+HggW7{8dG zt9;a8sYh=A)j2=L*E8vr~Spz=@=5$J~ZK5368!wmT?wXgnK8*vH;4yXqt?Ejz<07td>i4v_vQ8?N8+Xk^ z$+GMF-#<%#hKf0P+Z^*eE1~{TZ_seBvHV=&T2C7{1C?IA^u>QMH6~g=wTNNmNR-{_ z_qgL)P4IdpcxXVws~^t^d&PJ!!+YKjf#&t-i4|A;7HDO$WpCd0P9wV7^R5&7by%Lx zm2pn>_Mn?H8KK$$mz!H9beH3v`k+!v@=4kIOjfS35`APmCC%>LR%?k6-%3}4Lko8i z5a%NA;l%3^Jt%t@uV70&8}QZuqkFQ=i+qI8U5z_QI{`1uGoa_)pn>j~=tZXTod&FDgX0q)4VEVxlmr86K_LMvkEIbs|S z-qMI{5y@mpM}n!euU>jHf%U zK}Ne{)%D2D$B@`tf!{rw9jm_(YS(K~qN^SLVl;r8A&PM+t zI$#x8EhjhEWj>0mS%TH6?a%{!Nu}+;`(1K%2cj+HHUzwQwsSes_or}=df`#9dk164 z?x9jnD-?mqHq?`*Ct1&;dnOw8B}%xO|7R(A3$*$Jo>u_V^~haUPfE+SKyN}1poBbM z4n_jp0L8jGlI73?L6mMkXaxP5Ya{~bKL z<90fQLkt>T;I5Q#WrSf9p@7wtsm}A1Ps8k+*%KVWlM+5%N-M`0<-$o|X|nk!Puv0Ik=&995V-t2%g-$e-xgqw?VSCx6)X$?=5 zv1^#md2l3_Oj{QD>Cw`o=eR{FcAodrn|NuUe6g5PnNFX-GGuvpEJ1x7_XYp*`h748fHEyWkRU~@IGt8~uf z`^UhvJ?NzaY&aflf5i3>cT1shpH}kA@oVLmuD|ym82{;=-%~8Tb&fi|!oOaJQp;EH ztiRb<_0;l4x;=c#V=AI-c)|>prw8u1RTf0--(~{gqc>4-d6s<3sr51FkvcD zYKcQj(kIy}p8TZG&K$gye!E`Is8;i(bq&f-*UY6DuFCPr9=3O9KRGk?h>(wcp@S4J z{mVbfs1h9Vi)DZQNFDD$D7zJ$u3j&2V)tz7J2^sm8eJmN}hJNKdd)% zKIy9hZ>fyWr!i;G7%|kHpycgBSH9J7e<`+_U{2SYk|z1EZwKq9(6_sV(kw||cqvtq zZhCUn+*gZf#nN^Aa0XXvFgLQ>kL%MeG7U16W3nj`i~M|}u;!Z8=|rk?TN)gMJ- zCc98tREu>N-n=+#(hTE*5! z`(-Iqe>0r!9)V8;w$}085{HtPX&t595%4Q-%D~!sO6`rbQxYzRC-esWm~ZxYYUv|> z$n(Bf#(xANbNUG-INQY?d$f_>K#v_9df1+q){L?zhaOiBIX>|-;yUMdD0ZiObrh1~ z8Srv2p`$2GFKUtJ9muQq$@4il&Gn#*i8~T&IS(c)LKo~t-j$qQ_XKxGew}mW9uc3C z-d&_$B3=hDt)c##vFdsttHyzqTBLK16lrDTLL|a!7`of$$pS$4GR7J@s>tPq$Kvr;(f%e0e*v`4Rf3?l?M?y4oupN&fR92kEH? z66n|Tf8BfRLTb4gdDl$;)syJ!!OST@;jEPZMa>)5$6t?@dnw4oY?IuvZQ=o!E_zLOc*0 zWZWsdwQ=v@=jEO&U+;RhzDjLdRIOf3NsY9|BB)k-O=;^aeC-cyh6Z3>16||;9|b~A z=ZmW%^I&`{U-P7MbyK#B73OJUsch*ri?=kUpfpgsmcA+vgu?Tu^1*(UD>`Bs2pMX4R*>{>?eK5b-?b~F7|1Z zhW4XuO|_tBNza4^o&*oVsSK^=%ZEH2f$db}?E%_qDG+VO(o@$~gO@UXYTt=aiSkJA z)dX1UXd{otol87t8hPG}rzunc)wgMDG5kKYITaqa@B1Sn86E@$)-23zMa;9Ak(0GEPbIhNm<*tnS^Z}~d$;I)nxp3Whq{aXw)Ex+&Y(IU9(W$eu&Ia21lq0vG(MN%Y z?X;4;iUXTxhmr=la7Sd=NUIu0LS19w?&}`%IeYhvT zKJ78+kOzRvHMDx#9PwBI1swr(e4XCNVA9_K#s88v`2hSm;(HW0NIGLGJ|cDgdDL$y z^mPxk3bl^PdcNi%)pLW8z1IO~PiC+g9 z)IgGl1H|Dxmo}JGbonbI&hPklo2VMV-wovH;;-S}kDn9yYiPJVA^lLAD({0) z@=t_)8Bh+Pn$x&X1u1!|bv)w2!yvv6UchAu@m&|y4DSE$um1!4*`=xGdIM%Df^3KR6c__|>Jk6{ z2LX^K6C|t}r-WZIyX)n#Uv7wA1)27urlvd;%%*ne!MAid-0Wq1r~cVWrW^nXei5Ro z1TW`;&65#MZq8SW`{+j0+(YC$m{xHJ>SYL|j`r9F{!W3N{)^UKMtdnRJYf9cuJRqu zDP6n5`uXQneyOh3KGjjEJ|DC6X8Y-KbO^hivR)_NZp5~(;?LSR6-5VzLa{Vh?p7D( zp>t06-hJ^_%JuViH-W!8SS&{y6}n>Tpi(Wib1zM@Ns~f*L92Zb0ZWPfG~b>85*;ue z!Byb>J>D*cj#IbFjkntPX1)&~Y_1L!qGGt75W6Jnz^D+d3B|YvoKi(gT#kBig3D6A zObrz>nI}z=gShv@Kx&9DI`2|_+|Krkl>a0&<^h%G0RMUfk@fJ1;OF@+C0qqI_aXJM z;Ksv(%7Ne%;CY*nV^RE#;KL!~W_W2HY1>IL838;K$aMtYLYk+*fSbcDLJ8ae?jC>w zE67#zU>fkg8becGM)(~;%Z$KWa)MST4iSewJ^bSgxIvS}iA7TLJzuSiof~3Kvw0HbIZ>;bYZ)!mlh(hQ-wI+l)!5@TCIIUl z)I(=r59*jv#yP~!(GuoA67KmV^48IL5%Bc@c4@@&6~VRSS%PE~NR0A; zz)w!dw8Rhi^$4DDYdoE^PLpqjW=50pYe?D);7d~M+>GoLze=(vVo;$9+Q`tIza?BF zd;$5^1EbrV*+OYr#Uh@j^85e4UM6p`Abh2}TTC1BTVT-TAl6+dy#NVQ1B^O&I^x~Q z3DWaMeyP(T?cU0p3-ONTZr$A`%_k24zmvQ5v1Lk~yo^>6Kv;iNo^4k&Y>DZ;aV`N?d~W_%8II?6u?n67Nc#AJW2g$gx{! z3Aag-PjoH1e5f-drjrZA`!KNi08pI=93C?FEkb&R+5J9};w+?Nj>!r+zI}!fCwIsR z=RzT8Kz$XMly3c5LEZ(@>K-3PT;1dE@jL=!Zg1MH7Wr}$)R-Z+F9VVeAl?B*jD<4Y zDD6#haG+b$xgMI!Vcfgz$0~C5_?)riT}PQtVby(i4R0UuG!QyniGeA%t|OgJ>I$A# z(>56xR1RK^C9r}g-3m@`v27maTNAk0gHqZk@l@#6!IKkk%;eF@cKTK#p*@nuu6i2G zQXaUy>LNy|>0 zEzzsb^{aAMf8KQ!4MI*_0i7yk<^2Q6VK1~+k(_;h6)N8cSuh8C!|r+w z=(=aK_Jx2?^19T5!Upt|AEwnWU zDWr|_85H5+@op-=iu?!B7jm)5|B(BA(Ciz0ok3ZzQ0r-6Z8R32({jgxlUcOS7@&QE zvOfmTgRoDXu&zr)SUfsjdVD`*+7GcJ9XeIUJq5%Wc?`f!;euQj*}h2HcD~$0IfJ2+ zm*6drvs($*jimI!)Z!?x)q-YXcoU8OWN^n1X(_k2Zr&-QoIq_KDMt()+VZ}e9}NZz z((oRkA#}KnrFE4&mj?7wi%Y_!g)AU<5_c2uUkYvrHPQc@~KQl1W#e~i}2X!8PaSxzcT*b^e$rZffm%PU-yDRcg!vKW%w)%Yn@w^RG5(=HJP7_9>EH%Xn756(Vgq?%Re_99;oCSG6oz-t?PBgYQsJ{%WS2W94oeGk0m;d!E>Tf;*1T)LOulWr+Jryi$oxu6koVxQ9k z6|aVdE07WP3f=5NN_ASiu{}n^-d;v3dsEVqvb2J>tOVD!d~>6vi@{2s@HdF5TXh!C zyVGWFNTSQV6Bk?yFuM89RoFeY=4|3z$9KI6Pf!ylR@xR#3@fc85A9le9>ut)Vbx2B z-!ocm1TAB4LK>Fu)`Js!+UgY#AR9*eaCRj0_pCACQl>vKG2Ij?Tf(ft4P)&u-Xqll{~ce z-5@}$&LzIMI1_m8r=&s9>v3Rn6>aK-+RLH1k>sH~b?Z~9aww8j8gn}6&Ez*5`?mw= z95lZHS9DtX+d-|I{4@`+G=WgQBKejJ1zh>Y&1uku*_B%pu^rMdZf&uH6dceC;S|eO3ze>@4$;DoD4$kUXw!ucyMT9EC?nm0;rRRa!%@*ED`w zZ#|$Hr#2V5u(5=EooG7KaYr(HaqaOk-UrLDly^yA`^*HL$-7 z`B?)WT*KS&t|?FDwAm`sjDTtv1J9#8jo^I_+PxPre?Mrc$d^v+nmh}1dO#)O!EG%* zC8o<*E0Dmm;5L_3{)V_Ol74UCc`zWZ2dXM+oHnQ)4t#f$*1`2o;vPzUJ4vgBB!#7kBCI zi^#NBiRY5xjF3I~3GJcWJNO;Q(=2p-1=2DLPoC+@;op6rjmcoXi2SOcPv*fjgQ6ar zkVE!XmJ;(&LWTnA-O%LOP>KVpHH1G!Dy`}%#M}YwzoQNw2rwOgRU@$I*Qi2!{g#ji ziNBik+P|aW*rj}}p^VY!%PLy&ODv~&0{l$osR&MlCM|s_N4}p+na=~;fwaUvq;xR$ zd@z}(HkU#hozS8(cO<$<&Ho|wzZq%YK-eh4>ft4+(9MW?fYqTuvx@vjU`;&%9CriX z;pCsZrBmrgH~(p*ICofh2*Za$&L###>^+xeR8 z5&O+vMScVMX(?q9lES4@W&HgS!@Rq#pHd8`iE7hmv1&^vTYd?+)$TJ^d0;R{nZ9Mj zRsWbqshQSxIYkk)X7wyl^Y2^f88xj{W_1&a^u&ZLJwC`^Q*`s50C$jx(V?TFmo39 zwDD|Rh2S`P)T6Pe^@dl#qgP|a>_z_5q1BAgA4l)>5U6G*F_i5t`SEZ9r$IQB`(fCN zWt3W^97lAX$4Yqt?!TJ&2~$qf?2J~?H{+9wCXYkfDMOTLw%I`Jx_X{Hxcs-Yd&#Cw zQ#(^mF9hAC=k&ic$|#j&*rn9mdnJ6(K?y>h>;|oOM}MRfeWU|{qaB$d?bt6#BN=08 zAFcSwXw6i4xL(73XoH`?q(=%*h1_@*9E=3K|L zE+Z_swzh;=P8 z_Z9|NW6|ZbUE-+cXQhnn|1L z)#=1%wT;jm6!HdIMK9fPNC)e95|+LHZnu6Y^m`9joW}J(&~UPq91oD5cc-LY&}!0q zb<}YOGY&6>U8{> z3u*2o&Bx$-5MM4qZ|Nh?gKK$v8hueW-_;tZgah>bZAMFe8!q@Ou{J`Hd7gg_TqAjQ zbDs|4&j%xF-1_i)y@>s^8ESWI@_=Yzz28dXVccC#SjY1s%G1)V0&4k0|3De=wZxkt zZZGDxy91)FwPezzMDr=x$t6Ex>FYgcd41gZh%-Cx8)%i%cHd=4Stkz`Xq}9E7P42) z^!aMul008UtPzoiJY34XbXVgkBeiP`(_i}%cSpJMJbNUM-e;}Y zKFg2F<-uV+iK_zP1A*{NaG2-mXn0P~VhwSnaD7j?kXd{2^ljp}3A_F%?Qci8EMa|@ zT$urE|4ys5(I)yi&LS`E%y#sDJ0&O!j|Ff42FwSL&%4-sb$IohB(5Q z=?xY2@8pJo%uIU=m8~P&SCLFHF;Owc#8grv^L%>0-*4UXIAHd@``i24*R`+bdQR(E zhkM=YKCOEl9wq0Kshvh6pEF{(k?1VyvWh&G0O20wumsCU8_*5#9RFJdK0bu@T(J5G zd}nV`%-EY#x9Hh^9!}kxnwFURpfxS^gNV&#j-M69TZ+FV^x7~@8z^@6K}R^O_GU* zdf3}NnpBf{cInCixmecXH z9fUf{5R*$g;Y*M=r3$rRKAU3*km(0Inx=*V_)PD z=&KJo`ja9@)HcwnSCaFk)Z$8@d>RP!BiA7VC(-^L(KGFVM1RAP$k`%v=ThBeK(;@% zz6~7O-&={E`Vb23Lm7?qDt-l?Z-Oc>Ce?IO*;o7qbf@k-r&Y?>4>^&t9IuIytBkUy zkb}Qbs6^h4gnsp#JAbE0t*WWV7C1$oaZG6z>Gl9q4`Q{5{e6Lc6+ukx<%Tng-g9CkjLGa@=5H^+gKa`Lxd zUrkt7>?%i{{Uz%}!dy&wHED|EP#|>&=0QpE>VHQjCq+LEjc%Bfr__>`W~C4%;7FdT zfx!~pmPuZ74uIScl$N!|4Zb>yH$5l$sWt7hvyGe(>+c%XyM@F9O)0JYf>L^lpVO8R zOFoXTWjy(|nrnHAB^4X0m)8-mVtNyzmQ+7+5r-oTJ{5u8zJ~S7(mlRA78_3u+~tq_ z&Cz7N4o%R6+;;?&LWqVIGSnn1s$ zy+-|6uS5GQ2=7h0qu`7E37MMYkfR+#u%ewHe;s^c%P1LlqhzPeI|pHTq9^69mX(Ln z#@?M0!oK_AiU0lmi1PJT$`3E)Wzha%Kq162kzZnB`I@EhQjBJ?aq4@_Fpzwt6D8u=Qs&3rb%skY`qpdOLD7+ZN80X8Rt zHz~;<2cAkk8J1r=IKh#{)ktC~tuLv+L+d`nb4Mhda=A#|2N6%p@KMULE_+e;3UcU( z3~nLEr@^z9YiwOniF61b%_gMISX%o9Lb~8Tm9H{NHJ5#o9`FazZb;y-kn}dcibSx7 zF$##xagSuLYJ<0sZX}W}1A6n{iX~Z#bdS-gjif&a?5&4Nj|MOIAcx!r;fqK_Yv9tc zkH~8Qw)-G(;dJ=%NReU0Y(YMcC(jK?$5*kLe}$#6KX9*rBJTqWKcPK3)Ar7`dkGBf zPt4imw1xU=*|z}wCf*)_8fz)b(J4KTZnV>t5PM>J2ELeV-7?^~nH&ewI@-4CVSgc` z_fC3`K4f1obsG7{*z6YYSOy&WJ6|Q&W+eNoDYve8DC9lqXA012!#lzx6njCH7ZaKR z^2JQB`w%u*bG$(Mn( zExf7mlo#roO60FSIm^$FkUa6`4`#ww;=42cD!I#LS&~P)@#F^Ger+JEk^5TkIWC3R zFZIlGc6NZz5vCQIe~EiYVCCuz+Os0zr9>R1e(_Ur^OPAanDMQ5;-@?`?#dY_+;1k1 z+YDAAMaLzNX;_I}ll(EYW1}Vb+|9_9!pbr8Jp+kbi?`MdHvEm_^W?J$$n44V1BZ^O z#O$yk+{KND+qjEyX-)5hJbE_V_YBmt9=X~JnN$H+>ErUpU*_7C_N%A8Gs2XUU6IMQ z;w&hkCE2ulK)>6dyrXGHy^y1zoHFdk5>lowrAv_q0DFlXFQlfSA)Y6{@mSFE)^xOq zKbCV$-xB6?l_H!i;~Bg$8F<_y z-q;$Xzy*Zr0on(cdsEL^+OvqgyqIGXb(w+Rcnkg%Z4O75wQ4$pWp(nS$kS_(rH6v; zmlBPQpuCl-$G0yMWgRtJiOnJnD9^Q=oqt}Uw7x*>*rt@Jg=BA38Q{Dc`xyF#0?W#} zj8q%=9Jxi9_5(26lC?ZWr9 zglMn$6Rjf8Z*a~gW-sc0QKCv|Vj>iwjMqTYn7q}{;U&W5ABfT7;m03(*?HgrpIZmcgxfw_ZsC-rJl+H zWn}#JW)h{Ty+ZPbZt=TtHFNEio+HGi!O~-V6(cR=TTQvu(4P5rh4R#V+Nt4@mVV5q z6R~Vz=X~^s&K%3F2mVXo8E0V*B2Jbpl6{g*3;R{PpT{v{rxG(|O0v zS>6-RM?2v1w>W>~X*hX1YwBWVEZ>e!c46O2XyYZ|nFz1hXFncz?PYCE5^@x{aFn$J zy}L$A{4c(}4NTg2O;D&mshJD`6BI*Kg_B3$|xU+b_v9sr}^4HmyQ zC2WgdB~4JC(s%}bq#F7srYnPYcW`x7!kH2Up4Ilw#_CUcTj{10L+e;dxFT6FrdY!J zB+j|$Pv>Xkl0S4z^qlkwDOXo;8uvTEZlBkgt}Bpj9kGu}g!kpU(x56mM@e7C(s~Lh zy%h;F742~)t>T98*Ah>Up?SMR_%2F!^NFcoXg<%Asmsgo^DDrAAT%`x%k%@tmi~IQ$6siVmU)XWgOSsnsPV6n9G!r55K^Q_8FJhWP|}rzNlBJBm)=5m zq@y1Dk7(!DlizqgaBT;7`#@j!fkk~1&Ixr!^LVtzQbJs6C;tyeYL_MHdpOohf!J$l z6=ke`cE|bD{Pvfe+m^vK#@FW8ag@;L&WpZ~{92%AH{VcZ zpU3K(nWS(ol}q4V52Jrg@99$R?Ko?|jJcl48Lh6w&}wq5A$ro$%kw7_TEX_4R`9hh zZ6vpYT%?AW|60bAI!?ZqzRo3Fiqu~*6uWZsA|%=Hbfj$QS$@f zZ@tv*S}Aq`Jv@85QdL+bN-1SSmU0hoxAhP|ao6(E2Vi}+rLf`Y>IjA7R}st6FQHqQ z-jz+!>k=LP67#l-UfPVt&iBNz0h0x%;z{dC;9B z_02$2Ak1;2m@(Cj_UZ)Og#_U~Nax#l+6KjF-P_l0;cky}1o=1;wF7C-0lhvKBmkQsrz4&7ittSwNb8V?fE$B<>9Po?Y>&1$Gvde zo|LqA`Z9)V^dlptGv{~J^7bs<*1J7CoV8*g!X^S$HQ^J0Aftu@Q;RPrzX>USZCYoY z+Sk%bixQgCUG5zR?&qf%@>Z6@M{{*%Qv-SRrWRvK=edkrZH1uhE!?+I`>n*C#<#G1 zY|G|W8g@+B*fr2ZmarnszT3xf_LOgO#GAC{p-@M7^qxw3?lfpXneKO6crGn@1NmGA zch14ux(|(XBX?)Y%?7gt`pX|BiL?w&GYGm-|5U*#u1+}yS$rR|(rr8LghJM%ot4%? z_$R351LXTHGjs`k^vuncf zc%+;@&nd*PFP&i{stL4Y9B*(=P?yB5&S2h@T2m`yI58djx7R24otvJV6WtNqlwsd& z1uvoHq&Q`Y^RN!#t7C3TsFsx52A;PAdkd+;ej3JI5B?jJ`)yi7KJ;jp;yCV+0aG!B zDM3ahxP}A4+9drJKpTz)>s69AD&U$gk){){%_IDAsKj~fZUYgr&GB-7yC@|df`4HP zX`V;(xh~*c;yJS>WQRZTbSA(l*r{vqt2a?Azj^4@Y)`7na0x| z(zpuR*~ph7>D=V@G`K*zbL?Iy{H507PhhoOrxDtKtS%(I)C;H|g>r797xPi_2iQ7e z$WM#pv?K)!+{3%tOq#pl4Qb0wnH(j)iaK?Mzoof^z)=Rg=b~2)TTr>mNvd+SR3V+QZ3S&izWFsk(%0Hc`w-pdnT#wwhV3uyhpN2 zcS9z)7S>S%>+JZQ)LNnh^?W_Y?qFd)xK>9xX604fWn5!S_C#9N*?9I99F1Df7riYR z5=hPVEpl9x?3^Jy_2%o7K)jI>Lwnobvd6m*U-i#rP=_P*NtIbh66Mf+z}J!b-bBpt@bc??FQ8Xe^RAyje}O+2d4n9q z=|uRgCpq5;jLv&;zJjB-8# zg_2C0sC}BBCw03j-SW;&{t4%Tx1==ko%AnF$1h%$v`WDXksk%}a;Dws(CB#TRZOFt z&URJHcZCK!0q1v##)x-4-CcAt{~KnS692)1jv;0XR4|)^BAI`gbL=@9ob+Y!x z0*?}S2v*5ZN<9F4>x~Kji2V!mb!@LAlBkfhO?zN-mE!yaLy>Ryb4~_YbxIuD(z80L zD^gUCMKjm1h_rRzqJ;NI?+=GghqHe~j`~d7qXqq?U2~F0T6^8Gr#1qaecUGUxCXe3 z)ZJSD3Oivwv!iT-Z&Q0$W=P9!pyT}UUt^&y2eR!@bUl)K8~CVzBL4uzoQVv0kJyvJ zu5-~{`R022W^nK_QpT~1+EnvCSROafCe7eOfDko0{5^d=`we^l;AmK3sN(fw89rw=U?hnMB z!RCxa;f`@GpFEUrN^&_{+o_VUdP3rBevh}X1wzvM^8o!7&YP4bge!PyR0?q>uwS%) zqJ=`voJ31oFX!1_4z&$rr0q!37724d7yV6**bD1{H4ZhLK8~Bgqd%j%3fnbi4K|SX z{!o>kv_Fx4IpO+VNAm34{S!&4|LY&vO0N<29Ztu3TKG0Lwfb0Sv6$f513Tyrbm|gf z+yt)FB=#%ShdqdU7TkAOYUNY0Zlu$N(1QCsO~lrC1V}C>p6jojQPY9iJ99_Ras_!5 z$mv!}dYdoK)DvUYS9rv@zf5#=y&clg{U>Y{TQEy8jHM*~0URK;EJL>YL*mZ(E}POf zt>>~LM8Ji0IPtT}~M&~}Hyn^M9PSgEJd z4k3fJj7O5cGGj3HZ033;vd+2Y-NEj=JZ(r6b`UsNi)X$)^z{>T*Lj312cBZqm$NP2iYUx)guP+eA?g8bgM$SCYpWwJq_y*j^<7Hw>4Hyw-dHWuAb&IsadVDZXSivp$Yo_gE6Q9I|vy#y}Gf$Uo7LAy_i)gMrT z=6h173hH($^(sTwitEvE#SCKV1@#xLHv_NM<6!bxo&4?g=;snM4)-H-hD5(lLUthS zLsB|==PwVhriLx#tSqRZrmj4(L^V|n`S&FMRpea`_Ud_e%wrWWtWEmEne#QImFC=n zuO0lN7AjJIcLC9FBP?#%@61ATP#Z~o7p9gR!n@YAEt|tViKiP9*UM?zJ+6eRBUB38 z6OBHP=PfC|ZKHlNhql9+cFyrN#QAmcD%a5NR$`lug3+o*u24Kvo2Y=2pX3H_ji2^=50^Zc@bP zR?2Y}lHLSqaxV4%h~Y~}z;Y-`DYcH4RF?Nm)~1}|=JCoMXSez7Hu@+XiL0k# zDNMkk0{1X# z?b`2Gp`Tgs{b`i>D_T=uuD&>V;3l9D>dxf-GPuxc_#Ede&O(kdl(4@hfvwd@-sojrPObD*UPricV0E%2m842u*ZD=S z`yQUVf)7{s704yyUF*|%X^x*?%=5|c)VS2@qZ4g8x~bheJM{{kBRqVy%4m#I#7n+^|I#i3hAkVT&!(JVia4y zjY)U;OWhMt4&c5Uw4i+=ooqy}x|hyA)S)rSRIRA}(U9h=&uT-m=Ni)6>!~%BeD3Y1 zo^TAf2YRFmnc+yCJ_UOdi<1@ZE^b=~44 z8*HK7^v@K@$&sK}!K_$~IXJE}bA$8RB+b<2j)ru=UhYnBA$-6wcg7LOGFqqvRXI9k zdufd}a<1lDPpy7Qc!9TOa$m_TFF7rZ|De&&<-5M0B4K-SN+lEMw>6XgIlSQ`xU2v5 zrr8^vNgZlwDLsp75a+0yL(Fp)(*5`zw(=(44AC-CL%AZx9adwJOZ@9B9p$BZ zHuq*8O*_c@Eq32?E`0gl5`5cz6 zKQ?q7m#dBYB);s0g#1Q=?`_&`DjU52h}5p%SWUYf2sJ!_Ecys;R#)qbI)?9OLwoiV zoZq$(Uqw6meBUF@jR~uLkd?wcA1U(&<>=SAkawX_D|7^F(aMJ;tmrKn55(=Eokg^m zvnt)MLz`z9r}o)wWNa;|!aLVHwS;z3ff^kLC7llp&hJzLI8P!ltZiEd?J0|uxQ04L zuXb}Sk~^9yTi)gRUV4snU6V~4_N;rttOPI4IhWFEsl(IM#95v8{j@jL?xUc8b-(k> ziad2mEpL9EX({ay^+cOBF%isaQ5=#qzFaMh#Xe?ohLF7n9nMTRSVf6r$-(-$hHxHt zSMQlak5r!RIEz%4Ei_);gKRFC@hVLW1Bx=Rp`>dD56XJug$J>gyLjrH>bZcYapYD- zEj{BoB&*hs`15U#l$TuB=4FUHhw?=@&obA-uFb7Uqp+$Z&&cs zNcd?1uhyMG~YBl8M{D)@FpYXju^n3-9#J#YT8v_}Ox{nyr!dOCelb<3!`IE-~j(NmZDGQK~; zsot_*?UT}ZG-c__h#eMONj5WmGjCz?=Um&6OR>InZ|dN@jf>!#GI(EoDU@p8BDAg@ zJC>FbPr)^|wb0qi8%jR2$Uk~-o8U`%R-4Uxz$9FIAa%6K*6`e$)0{ko-5C$Vcv3cp zMlrl^!iG;u(=6SuXTQzAqHC9-J^baZT3#))2`T3o!boZ@Z4?RD3v>Z-z5p(#q<-kl z$dyy!SUr+U8QYsp9vL-zF-f8BK;g*EY$V~mw8?eA8GB1y3-v9g*EWzGU59rsbma+4Nc&HoT7i{35;*Nc$4)nnEl8oGQ=1jQmVSvX z*aL2DzXh6hCeR;%t0qYq_W^kW46Q)YXUJdew%tgp{o>r%vv_xRA%E_AH8#pY@SAuW z1U#-wRr=by>Be}icv1)GcM`)=S}l1fVWa}***Qz2Yxu10@Ep4cW zt3^J5C#>nIiSHu3h4z%nl!aQVebB?3z=*b?I=(JZQy)0+T*6{qQi<5kgIj}5?w&|h zv{px83~rLEHWR~sc$A@aS<(7zKPI3NulXDY{V`H)P_O;#2Hrw4nn(ECBDegS?;xGO z$6emDEjOflzUm{7W4|n(i|IZXJiCfTE8AHgg%nx@w@QEsd>crM7icXl_wNy|lq~_7 zGgPlk*3U@N909cL(U7xftGZO%f#g_?yvx9a^8~t(n{y!@C7MsJ#;=2hU*_GBT6>rJ zanvo_k_6DhHjFknEqR)q;j#z(l+T*edr-@qqT|Wcbpg^&Gp+DInycaLrZ>oI2b}0i zuQl{z+`)1G#HX8}u)Wa=vuLwJN$0f#oZxD!Yk60aHgKN>1?L*TF>f(9FIfyBbpn&_ z!=-;fkM4!oOq-D%a#;`TnvvYQC#_<8D6{O*=&5zC`a@9L{lJw0sdlz&u)EVDuD=q( zSTVjgbrHssd5>Sj9*%|X_}(~rSHkdf%2i&q@GP!#?K+U>7dYDvXBx>r_94Z#eyd{Z zcXg>TwSIr*(2&EjNM##9jrYkgC%>M6)RO<(972x+SYNyV058p3<-RpSL zhqH#e`(tiLrZgjKTsh`9GY~7LSh%*@5ut>N+aMw0>-Axaqe1W<~n{Ff4=p-R?{?GAf zFCc%8yQ7kh(3Q{v8T^$&hZ%7O5mVoQJXHK7O$F3dgf9)ZH^02K^(H15iqv8^zFYyE zmE7IM$$m!6Y*Bvdm7Yt!wyl=-x#ZC|VZDWVtb#+G2kz*r{2;V?tfZaAiPu7Ui(##V z#iVwXf@4_jZQ6<;wvcZbbyP|!J--AW`Ci4d#JC?ZKMx2+) zUE60jPWwpywDu`5XD;3NCKU+FKD65?>eWJSld&As1+zIOL2*%AN5t&E`3DR=Dpx_l zbCX(sLgqxU&@sL5d=V7JsU9zBhkM~^3iPXi`rY`Q+n zw{m)~Gq|ryvhq5(YdyA4>_n}Mb9SYN9-}heJ0&?BAN~QWoe!mN+tI?ab@z~>!$%OMhwX03j_)r6 z8(C{iV_ysmu4Ht?Inw9X*vrZ{Z6<%GHUi3w+%`kKv%o|xU){&_CqU`HFP2fV|BvWF z4SPa+1JH}grLC0yIqpK?NT;(`PNe;{;xFgzYV0mYuapL7lY>8Gv*bIFt0iJhLk4f) zi~2~aYawS>y3OWYpXVyd3i?@)YWE^kkl_)y3N2kt8}_Hg-TTIQx8q17X3OE(ysq+q z_HP^&v~4vq(($m2u(00T|Q^@d0w8mN4fzDaWH`tCRlMj}-fDMJta)-+Ktq7aXhSFaNlR<&vY#>dw`@0xR((}|DtkP{W^kgFC=OxBh;VhX}g5h)qkROI*Yh!Nax(% z2bFaK*RK@;Ir@T9jkY(K6o5gl?4^H==>@Q{rHS~iE{`od`ZHjeINUF z%EXxC7G>^yjrFc#&axZG(=vt9{JobaDvvZd)U{YWdK4;1rB&@rPoWoMwxRUtjB6>x z7I0?6RPf!EcKYJufE70=(B)lCKP)JVqwUP>?v*+RNLwK7C=avDjdl9Ms@e5Y zotn7G9*;APW2DEKd?iw4K-iaBs!!}wJF-_Kl_Q*%tmINdMo-&5ptia@`o*Z)$kuqq znwnB*i8Mpe&b?5YUqPBj_~I^I_MZBn$J}+@pOiW_rhP6{H)7YVqSf_ZbZ3;kXQIIK z;KcEy^jGY)iSG7;2KE74ei--5%6NH!x~zbMPv`42!iS|e(#Kp<%TsdSeC~2g+mYLP za+CfnHKlXVs#6@>NsQK{aG#YuvCo5V?LXmLQ@+RaRG#q0qU>_~GTmHJXwIqla>+_?!3CBN8<-?u7h#EiCtG^ed^YwAtb zLWw$ByRz1!w)O6u6=#j!nR06)zdXig-PL@?P}&Y`jj2~C;+_RHwWZSPc20)*XHap5 zl~o`tR+n|7HcctEW9i#?a;Bs`KWFF70`K;4>!Iv^Xt+GrvCz;`@TMI;7(DKP+r)x8 zbQxF2g6$O#A}2>Tu1;5XGd5@EzSbZKu0onQq3>Nkb@`4iN6$k6XW&lOmy+Uiop^{GWi@Er6b zEriY-PDp5SSMKNEwQ=3u++BG~DO8Nd?!|M^+Rhr70T((_(gP?;l(rvG4@Em@(Yo$? zH|(h=uzBj?W4x+kdq$?0A(DRBhA37v+PVUibWvPmCEk+ZVH>FH=Iy#=C*9v5$cL#47M{7BxMS z`st(XNL${NtkJ==WpC1)14f;VE$dZX6n*EA$0eZA zPiOibv~rtXZ_CXm*DN%HgC=795=xR6?NuF2Juipm+`(lcWol(QN+7otiDRq+Sg7SG z#?35gIIZiq;E!2pl}ROU%MvBol9$mY&4eiL9OL>D60#$0(*S=sE8X_77o=CR7myE6 z5=jkn3GuY=TqUrUr&8;gQ>8V|5^e$l>Gk|XcfJdS@@zn|J+)CUNqIF9?)k3{<)z-U z0x4FJqxpQ+;mFBtd>O#$AMVzYt2$)^@1_~TnQ?|6c`z20GlH*&J{^B3B5AaD9Wije z{iEE^f*#N1zJQU+YVJxY@zxm{*WM~{+7^4k72}a4u5WRlxl*z)SHLmXQSxkJtpU3Y z*gKCyi+=}ur$BMqZcp*;CDMK;K~_PV-^%+2u;!jm4?(?pOsn9p_Q3Re!kx8wG*C~0 z9vmC3B8TIW?w=R@z*+&M7XhO)FxL{JOR`>@d1~`RhRn46w5Aq+Rv^{A>)XzWgy+MB+My|JP67>^=fcO8_ng><(>W#aIn zWLLKkrZzEuZDoBHqo|cDoMP457_e64tM;@yE%*H10DSfyT-CmJ8Y8hp|H2qM9tpEE zs;&DbAlH{uf%WA$lH>T!*tM-gIy%~K%o^}y55TvuIL*iU*=rUK-|YcE-pWJUX{Xnr zJ8j%P33C&eW%)AiufshLBbPdX#c7<*OwniOzgRXBV$XXNUhW;_<~|J>c4lwVyaQcy9#2Ie8$(@Vo|LU9uXZNfws!sBW@=CcR=cM8!SU~5@mozi z?XGI{nb@cX0_*cxN37Ke4g1gE+wa1Ka=jcj>ywV<_;=e=p^h(roIx2>9p4-5rlA!^t+anzW{)bW@Wl1?2y~NQmwRIasXt@cGhhs0h(`R)g_;oa+ zaA$-|4;8KUi1J6s+-<~kuBR^%=GaInMA7n=e&ec>q+Cdet*7JA=?gTCDjH>UI+iUZ<-Q9+k7cCaNoW5aC+o6D&=TR zV;)7+cLb@GSJtqK)b8!sf~QNH%>8MdMOT*iLT<5s)x2x@>(jGM9FuVe?+KKuheBy& zj6y}J5#0#E8O=K^&h2Yna8%i1`@IPDS0S^nSK+zc55 z+-0QN!1)kdR>?e*;c%WD5&k#7+9z{gy&u!Uoroci)N*u2YbnM3JFdHqWYlXC8r0dQ zD=Fkjm)zJNo) zimiGovPSx`MbzC@)Lsbf`FT1w$6lPZQ{s95^pRUppAMG-%vVY4H=iObk13tTvoS5V zn9}4u6W99dVQ5NUl@{W0FtscZ;yCGc;O`0idYEl{xiZEF)OWU8ctCprefUcbN_knD zf5yww+gz^QX#@Mn>K19p-7;nYjT%ocdIo3C2SRB=dtdzNBQDWyJ{8jS>~w8_Bi5v+ zBT2i2k%WoN{f$xUMc6bOv6!S5rM+tfcS~`#s%D`N2PCMp?}SdP%^6zSv-YyXqZ1hM zi$_=>@hleduhz%Ie@iSPx0r(zVGGh1>*L7I0w~s;vrnLECR`}ZBj!X!y7150H#Npy z=*fu*`|{v$__IK$R?861j2vpIV~J;T*}+wbH;3M2|5UObh$5EJO;E9GP&3jU3D-If zsD0UoQf!^M@PVVt8R^YwC4M6RxZx@d=iRz-j#Z={?|5q7|V(Ka!NBk$pPC{-KwsQ zKV7Zk-P+neu=Rp|ZN&m@)Q&!hx*&QsEl`>{yW+}TivQ=(Q|kWFhHnGTkxA}~-zySa z(Oc}{Qf^H^86CrYv9#GE`SZgL+PCSiM`t(mK)*u6u zb$rz;8Tq$EqqhKQk@r23Nc*9+-JQ!B=#DObMC_-jgSOZc@Qu=J1#eQ1vr0}T{32RS z-7t{&YPmzP8Jw%+yzoj&dJO!YLP!f`ySvvY+Su`SdGtiE>Rjp;Jb&7&@`d%VcVio5 zDNI-kym@Wn8k}mVaKwG*tMKmpo{d5qvhDLD??!PUvl z)M#z8=aU>~q04XL`yJ5WMkAC-Ih; z)gE*1fU7gw>aL%tf!Mad05~y5;FL$!;R4Q`k9-A&wp>oxJNxj8V4)eDxi6VItSzm2 zDl~LhY0kTgJ*=f>tqQNsC~{X0ZU1;HL!!v9mgZa@?cm#xDWhra`%^j9JdXf-_WQN+ z{U2q4UTQTV*K-}i-Q6_R(6`a1jYvH4YmWUA_qex~zLo}Jc0{jd+(S=|=8M)$9eF#y zYgt;QJ`LQehu@>G+>CwTJaFfSYflbH7THI45leVR<`RToFMwfq)_p5zP z_g5O4-n2K=o?>%7G4vQYQfHrT0Wi*`mHkI;2GqhW54mfZ{Bt0d*jwB?(C5}>Rj-th z*AA|;fK@40ms&^ow1n31ejE8IL9#?2!J)$CUxi#LGafn<(!H^)3fxPa2f)!~;Ise? z!lH!op9uw?hbD<7bkd|V?y70s0#D7!hBohEoZ?)lEm1o@KZf%(3yh1$y!X0|=eMcB zL2PlkUVBwlAI zPnf8$G11(+FOa zf;-Ua%ZRnLHKw}P7W^@B-6?W07Q62o!2fk{o}TYT#0~3Oso9yCx7UjFg!RLU%-CBC{?7aEkkdyoAPbcRdK>s$<^fb~t<3`-rcYlL> z9Il?z3iU1H=@q^;Br37@+GYz&Ss}Z%7F^w>?96k?^U4T`ybS4`zDHU6;-x(%>CAfQ zVo2gMDPM28_GO8y+^pn{+~TUHv-V4pL>o%HB5|6*P=CVZBlmL4*Mg}B4FU6LbVu+m zr!61Ht2~1ib%xL9z)Jj^MK6%!a`p?PO?yj4zI8}?yNWg&Ltd_{DnZ%q2(GTOudeoU z7w$oXHu5|lo4qr1;U6LnB`kbm{`a?vudZ|KNbMK$Y)>aVZ5>;&8Q8JMqYgNdI8xd? z;?*O?TuZx%v!1Zq6UXZxbktj0RjcZhWJRfct*`4n-62FtFq#@$17(Gp%)e^Nr{cAi zn2!+Vs$>72=YQq(Yw8&_MsIBV3hpzQ$JGZgP1rB3aZ z5}{4tMh~_=S9~UXb$={= zJSym2>%XhS@)AaEo<~XP-}Tl~PgmDC=eaYOxeK_jr5q*TKZr4hl;`6;xi6K|kJ{Fz z*?6w2kDkB%?1|LT)vQxFqjY!P(-!E2#*^YcEphg0r7E?!bKLFm2edms7(x3ZzI|i#&Z_7Qw@?{jM4?*HchXYf7{!Q94TsL$& z4nno^EK84N#9G#B7sPx`_p!AmwsA&ll(9-#-{EE`-PK@cCrRo2h%8aEewCZRhc@$v zU-`!4U!n%e=SM-rMPcmt~ZTA45_USkwb$a4R3=@Tz>9y{(si5*AIO}>@LKM(g_ zyUSbK7gLMr|J7lwgJ$-jUgFUC9EHU9rt{t2S0Ux%e-mE-GdO@LCk zGQqMttycl(>c=;4cTjPbmig_6t@I{$-=d$j73tNTQDk|x9a5#Sl}7B$O@hN6 zsQrWGH=t#)@H`wgowD48pf|8RL6{{uZ$vHb+(^?F$oqYGU%qIdg$g`*xN~X`cuh@s zBNYEA{jgX?a1E(XqO3A3_bPfaKY%y19d1kM&f=<1Qg5O8_?}arPwt=94(lvvT_249 ztu2uH7`}doaQ$V%V?Ww8^zQh638C(-^*59{CKPQ;`tYR=J;ybKRW#^bDo003-69`wU47a++}aRsyF3|O2?mvNXbj-0=hmHNui+|U*N29MU>Z3uqm~nR((B<&e)kyaMY|RW z^M78>TyPZA)l+M^HuG&a-t}Y6CFD>jQ~$s7m>zD$NjGd=V=Bj;>!dWR1os7~#J>7yB_L9_1VRQD@WQ3+fA^h%?ds~;8OSxq=mv6iKIJ;JVjVfSe{4PU z8Mt@u%0bk>9Wgclg*zMjce?|De?Z#Dr;--&pRCK0U4IGKcMjHlQ1LwM2)k@UcarVrUrQ1vr9k(2JutDV=Aa3Q;bTD!yDpD9D`S6VcuyRD@S7PIBs19=8z%WqwaePbYLV8odK$I|I6me6;AaxgB|S zJn<^>GhZdHrMY{&5~G3V2C(Aacr#*X5ASM`^d;QYb3ba8_d+1H>iwFL*CUi%o?Z9BCYfzVV++#tR)7PjS_@ZE5{xh^-QTU(V%M zNSA-M0>bwzbrr#7G3B8*ILerqLd1%&>$?aUXN*th{LdxC2yp0IEr%khi{L~Et!3Op z*2i<;A$Hu<3a)|V z75hMPy|KkkU?`{EsGkydbBf`6GpGGb^|R8L=5LPUI_e3$#>vV?EvXoX{H~= zKMDC4r*Y8WjATzur3K$kyy+}}k;H$B@|FCqC%7U}gj!T++*P5Jo_q_7%y(zcv~YDjqkBs@KCC4) z2ntt{8{0z;58q%5Z=<265Bct&3JcWiaOJHooZ}vv2A5_@a$bPTix<;=pie-8fnwrj9;b ziTe<~dn)aHZ%py5$Bx#gC_BGa5jTI%@6q1A*=EwSyCVsSl3SmWxIBTU0{*~N+^wUd zM=fC7-l2UEHJjdI|M{{RzL1{ZqE(!kto+b6D@WVgMh_(~r#jhHO452rm>1zry=O&o zvJOSA-t|ss(N#e)L&04??iZyNm;aQ>xzzV9j;*ctkof*>vO2++fA^l_A}>jeW4Lpy zI#bt-XSJesTM21;0ADUAm&o(T8nAtXmllNEvGA|E#JM-uK0sd*i72{gmX4uqD)QV`^th(vFIM z+lrI>NUJn$;*{@t0<-WRjSQ+tvuWBfA1P`Pdhjr0m6oek&v0nwi^TtJFM2#1ka)ZC zW;=RzMArpfIZrYCYr<&DhL>?Vse>-G-po1Hu@rgIvlm{G+&i7W`E4u^ghk&M?fho4 zjK7<5P}lZOPx**+=utfl>fEtl)Y-|g*V}_gs~xSn*wQB?cmhJ<+4;yrZAUF9J!5Jo zJt%4nb*MH=SW1Q(&qLdC=QxqVug;R3o?6a$<{20;KXcEIii9(KuhS=9pya?bcRAl;iXuSP=m08f{<)^0Ge+L`T-5b7UZwSX?=6ifSH z$-Ba`5B2GYH^tQ>&IQ!^bLOU_rRNZHAo*QH{?1aG$XQQb6G$m9%tB@;`=fq`(`L0` z!L?S}c&^XagYtWDSdqpn?9Db~X}ChD9L}_bgv?n9()L8SNr`$Etr@e~{du&wBNS@G<*;LbhBVL+0z^-24OwKpU^TSFzA<0Tu2V~NU5EdjkITAcRlTF6WM z6+#Tl-@O3-KtX zYrqsC|NT`f$P@)mZRQ&iPL)}XB1ma!?~nva7f)qgg!mpuJcSg@!`kk-l(Yvp9XRFX zoh=7vdgjtzp3X;_q>Z**{`J!y;JWDQt%kSflWf=ouV}?ifC`+o z;!ZP<(squ*M?Y{DR6h`X?mX!h(mliTU3ibzKsD{?;Yh0|Lv`+WI~Qpc@cf9jS3kP{ z)L`yS@JV0Fc7Mn}@Wqu`9a4Wlo#>8ip-p$LO%Zx1Py=P<6i}gZ-3|>QYKCA(iZn52K9t!D}<&1wt>z(g^S4NJ6G_dYCG= zKp%oHT_=&_Bw`fOY%INP&Usw}T=r?5zoP7NUEUf}2ys1ETX}t=t`bjj`4H;k-+O#-I0?MD9sJt8B`%$~T z+#6HA&UjGXX?r^7#?`F$mX#aN5w50{7Q~IPIr0{3L84z0^{s#t+Ck6iJbQcofixq2 zZb8|G_2>{=em**(ncSBpj*0Yod3|cLw1W4mNLM7>d3t4(;By?(x3J~!`Q6n9@f_Al z$e55NS}VDfFi!E4+Qn#8TiW>g&$;KN{PR8X)ibMgRwQ?At*V3#pXE`{ol_bs>_Tz- zSw}f-7_!#gUj3(NkzOgr(fj z=u(7x?VJTx%+G&(xE3@dp}p4{yhFBub45;tZ}s}v$MO%aL-5%=l5Ep$(B~7R{5AGc z7i_7c!9>_{wz0ioy*|3(h>D_nonoboeIkMMo_ zS6<&f4^;LPZK+GZlYM*rFN3ia^~+2nd+60 zjR(5e$x=^Gkt2FG+WcWBR8WStRO~o)9Ah2QmOCBWcP#NHe)A)C>MwrsuIEkaa@G|s zm-^`L^_(F`o&TeBb!WJsSLcA{`(XNi=T_UXI!V|8oUJ4+vXUDD#N@x#X1 zHQufdtj!6_;fbh&a`q?0 zdC}6UHmb8=eUrYm1N4zdyN;*0V~-r#NE)pJE%+s1Y5&xI`@?z8^l+x%5JJjH;m$MV z>1!=tlvl2g>Q9>OiJ~(oSbo{LMzJ$_LkbO=h_CWOWs2#aCGr$vT#ezsWFO+Y^dW>1 zSN`rsx%obWfj{Fr+M~B)JhWU52Tx7(+X*@>PU3-}!&SF72#jNWhqvTYOT z*I~(MLySinxy$CWv_mbj=mK!K4La7-BaW0|!sEOEM?U8gW(z6}9JTuqEp;=`3pn+y zcco2>eA&diIz&0?D1%Zcb`G~@?vMU3mW6x5&1mg)h}~7#iWiX1*=o{2OA70RtSBNa z{PXRvNnJ~6l|VfO+)m|-kYv11#v`UWvs(rY>i@rrjP_JqqyIBH>eyFPJOgxSN5L(0gX|L@)HUFI=`&F9Kg zN4X--z_xZp9y9KhylXe^eC)=lk;wvu!<5g8?QYTG{v1^$Gbl+~8v4DPpizK*iO8(v2HyMvv7(sKQs zy!`_CUBtbLGR#R$JrN$O05_fCoe_NRNuSCdV^{Lkw_rQ<$G$lm%!Re*J}(QfN2Pzq z^X=_*L_(^0_ouJ(2(2Q0Df{e|m3VUS>Om$C8rpT zKAd{hH8p$FY7a1iU7z9_P>9X z+Wx;d+Ioa)N&P=zSLR;pkkh&J-Wh`8XTW2RR88}Ky4%Jki6m9o~I)f>`NEm!A% zxCS~SrQfB}dgN%kG_K}Irv9uYe76VS{yvL&c5c#}#1_w8z|OeT2KqJjf@8F!2GJW( zM!HT%FSc|y4C`e8&>WWX*#IV;uM}(a{0qIjZ#xEUK8bb~9<5OS8Q>1W73Afp#zzT@ zElKwEq4(BI37rX(!=*Vj@}XEO;@Xzo43xizYrmcPyG3HRz{P&kE9&8&4(x~RNgvz! zI*yp?6LW8dzol_K|5D(tOU91B_j!LP`FA!Xn`b@{*QQx*w#6IN)X}z!se@AAV=!|n zgDM9m>}&(hz@oNOd&Z&s%YiiXBP2<0V0?@CHLTFBp{M%+P?d3N`@7~?Jh^YL9sq5Y z9=zKEN|NKS4};GQl}F->e=Ben$!5Oh`pp>1pUH_DOO$>X^^|th=Sm#=oh9Cfrn4Qb zE7Vow5WM4CSV+px!;=*mHO?hlk?;TiK8oN{PftLpzs8*b>j~VZM1^sKw-~?9>4@qV9fa-;$0(!9hcRhm3A}mIy^1A0OX$-AkAuwJI`4y>; ztIXxhPRNF~Jl)}_m^>_w?a#B-vbK0{m~0`bjZEj zryiBqyb($&~N0J65}Pe+x7*8T?xZakN*GDhpes zjgX6frO+!#cxB_2c>g95Qj^kaoBCH2$3mT9snnW5+uGN3*9<+;QDRxjXMQW6jcpD6 zN3v;LUDAi|1z-$shc=47MW?}X2Kf5L-gv^M(QqSL{(FhZ_#p&4_Fgr z$yA=C1tp0VgVvR^0!oD05=sGUX)kI3c@=n56RuAsXxCgyeo73>Glkx@&@D~eqh}Cf z3ch(nE9P&dl&(J2MC&#W|9^iEZL(`V1@bSYGTNSYjp3V^{^!sOtW18@Hpu)V>xNWQTXjfl3ZLwOxOb!V^%NJu z$3A=L>2-zEvh<}ry&LAqVoy!t{sdC53aXw)d9F0^&jR;w){*LMyd`2{0~)a%xTs6i z?w*M9y*YQ|tboJT!qMx2<6>%iR)VR4t3H||@s~Tx!f{mlIu&>v+NZez(z!Ok63*y% z6u1vhZ*uS=N{j#9)FX|wfG$n^zm!oZ^=Cf%q3&?buojFt$?IiFB4n-i2#xE(2}4zU zIT?=KlRC{vE%g#L{D^O^2efze8(?(pzNy|w*Z612P-^pia<5?o-WkpA+84XHHxo;$ zvQJqepSkI6A+>OR!G|f2Ip9t@vJEHk+=+N%GQ0s*gj|#8#hTbx3{P!n#5S8kE%7dC z>|ckRuve__!_oI#b~tY0nM)J%3Y{`N*;}iDQfth7)PJ^(eQtXplYt~CKCm_yICqs4 z(!dbH{+}Oh@}TXhEw=(Wmv+hhvtk*4cWU(rExCza!yZ^_qZyYtiGPkBj|H~|%5*i( zv}3`7Q^7rsUyz2`W1XSnq<%^mzTB_(U7wN65w>%9q*P94t+_;qy9@LOz5~*%h`z1g zEWr^JS8v&;7)2Y|OHqp|F-_|qnd~QM1s1vcM{@Z}zjb&jw2Xq|Yg$9>OW5BDn<*gh z>Tb;wxW^&SwM`91Egc?wpT zy)Pw@d#X8C?Gc_m-X-TUzG^MH#$L|V%8P!GP;BND6D@qRX659sRTg;b19s0O&e)WX zHk($7dj{2UpPO`b6V%`=&?0f{&k1`GnOcuz7Q6Pdv~ArV&)xd{GkBKrchvJcsokyN z0%Ci#P-E}zE;$&=i8rk!Lu19dR!4@VZBOV(Lbq_b7n18#om&*L%~1wZ)N(xrJG?-6 z3zRt zYvt%x-ixH$%2#uab-a%QM`J24O4Wwg4f@|k9AkT!@3z+G2x+_KV@7Qu|N2SVm(KFz z)9;3>{e&)$qt|Q+t!N+EznuIIN%$H&L)#xZ8SB0uBjh*YEmVSRM3$U=?7y?aO9L|F zL_+sM_MHzO|BYD^{n!QOLZt4`=>z|bujg}p6xw(c9r`gc?KRqD6~_{eckphGL|Yz5 ztN#-Dd^{G{n@GUPshq!)T~9@%vQ ztuZqlE2+s3p@%h4++oPVSBUSD&ReAVE-L?H9?!FBC(IBzxZ=-a8knV_o?9pwBdem z@knwrp8qo0NIugzujA{-VAVb7=77~^O8*mh{t)b`$qoYAZP3V9!NXv1`ZMtK9wjb^ z7W7IlK-V4)#;)Z!o9`!5+d1Srhmy{tl)po7OQD8kyse|GYpBn!Xu-S5e+)8VI;DRD zy>Aov2XPb1Ffp~R{1%eBzvBiIrPfnf?Io&;uo2VPI2 zMmHc6O*0;9{~5KJ3Cym)If3gblyNV3xsmf%>ECNdfbln>fxnR30`flx+BqBO5204i z(kgcV{V3Y}r(pa!DDUfF_}5_YXhL3S7Ly{P_m7 z%5ot18`xOR^;XW!)bX3N<&SxjZWnN##2lTE!Pg(b>JqSX1{iOkE&l=rUj#cd6P53q zxb%o*Ic4`^WNh4S6r8W-F-Q736(4;qOE3rvTv}5`A7no8Lj| zo|N}DDEX^kvV~mkBLCIoxe|H$b1?R6PJIwv!P(24zo+hBhr;|f>J4y)viwb+{zm(} z1r~ow$*Z8BCwabyvOh$^oeShI;L*7s$@?bQ|0C_Z1^hirotlU_4Q+lt{UHCQ@IB~c zDCO_q`C#md55Y-eD&-5*;1WvN3z&Za92DexX&j)rQM1NU9jb`F>wik{yW z33VxP20_2BkpXmldAsf8Mr5qoiNE_w$O#VzU=nVcjDPKpBq9ko&By_XSc*m(lCj(v5laLZM$kd)5+S)k-|;6^d|k z5c3gJ-;LvNVxu$nZmCwbw>!_&5dM6U+5x{;YaNGvnf|E`+TecC4!5`SzdnS0XFK+@ zV+&E6B^ytsig43J3raUng0)6u*<@^(KR_i5k&|lXOQ0gxvYw1S{XdX_ZvpXP*aKgH zrluo3oO$za=&2WxC`!#QLS5?b>1gHE>Tx=Cn}VGE586#VGar57+Z1F-*sPC(omKZKvJq3-uX*N;;3)r2;o(at3H zeY8kVbi%{wSd8|)8@>1#SP+i`p!JpLpc@IDKyDMk(lzMD$2he1{)$%JPI-p`&*R|v z=kVXF@X+~C#$~+!k=Cr(b3nN-aIHlveh3YnM4peM`}GM;fj++i zPW}R|{tb+M6F3IbT7QH#XM>5mp{Ab#$vH?d_4HKe=zetge*mGqmdQw#y=nOxYNj;U z4n0X%U!?4nV6z(~eS_l!(*GV^`U2E`BvMbxssWnclG|(0U3=*I4(j=JQmr7rYq=i^ z?VL@HLrAGb_BP-9LLKiD`WxU}okE{LLjR8E>&WXB@cuB+jzzxx1G#P~{|Ti`fjgFi zhl9Y@Xt4Df(7r4dB8q{dPrH5$*Dc~`w$$wM2!DInN48iw{X^P z$#n%Sw34=c1X!QI7W@nCdIB{$iZWjWd;dvp_I^eI_lszWOM&fWj_tJYI$|D7315M( z8fniZz%+$L_+@N>B1R_OOX;l?Fg$AZbrfLBWD1vF=pN3R6W9ZiAdM@NIbYXQVad z<8bc`YV0^*BM|+TryqbF_q}?SHn-34UFtCx`F%W)J_TpIOwMOuSzL#VxR*Fv$nyi@ z?M=*EXyfi+V>9$N6D*zw<*x+uw@|kqCj5L0oq7(M;96{+2gqkX>U9S4;C@OTL5lsr zOfMk06F9d6@w>EVKk)n#Z(5Ikri2I3j*mmhO1KliSugDBx#WH@R_X7cu)l)MkAZa! zbaE{%HU=7a5zVrl5?(?kuB7Zop{irxg9a>;8-cDn)Hi{gKS-M7ePU0eA9YZY41cCI zmqPpFXp!eAL&{kW4sCC}x4og~C%|wM&@};*a?TlQ-AV5s!(40UAARm6?fKBjFTid! zWnF;9cRuOgfpV8q<9_J*4~g>*>7NA`-$Yu-52NAMuO*50Eim&0G&Gbv-=v=QTAxKv z>!Dc*C)`D;PjWp5NCr_ecT{~3-E|r5@QYN-XK1HWsrB_hcrtzJbD_Rfw7Y%#pMimc zz{z>g>Cegk9NOhI@_i8q-X@0$(8lqUy@Iw9()E;eG_~kWoIA;5EN%A`G0%mf^xZ#0 zjZXyAS3?ih^Yh5O^O47wl4>QjI~~~Gfveu4weCimyo3Bcj21hC8jMCteuG*E#`{tF z&%x0u;=Pf^(VcJdAlP0?XajY4oo`=7QeO%sh9$hK#gZXYw3$Ea7)5nfWUAOEiPV3_scrzN~{b-IJvD@Ov^eh%n1O5W{)#*9nOr%`V_`x`Fi!=_Bsmg?7$P z?`2S8Gc|IhO1s1@t{oB57*Vt5pwB;}M4@+;xcgAq>r|uL^RUlhn-#gcKV9@G2Ex1g z+&dsyuI7t#m+W^qs;I^9e^v(}xch_WFlK%OgNR0_RO$07`6C=`9d%u`qwP*3ae^`2cSo}KKSx$wZ#eA9>aBKbUv zEVZZb5xM=Ic&{VJ)}l==pv;fR{l`eW=a7i6z?UVAM!JRMA*jQsl=95Ro3ZAb3vBmAF~-3$EJJh{qK+0OS65y zzwht&{omfV_v!VzUf1Ob9;lWEGyp3e+vGjg(wbe*C^Um82KurXZpeNz>7>@969L#t)5I=t{|j#JBOwaw#K4&*`f0`)$) zel=*@=>aoaJ=v2Fk(6Qh+(oTt<3~AanXmnqwB8r>o8w95+ux!Ap-G#!h5!@+(D zELMP{4xWrqdWCkM;xBZ<=|i=+7LCE9}UUrh2$w-d4IfY2!{Dy;msUWU+Kxq{BMHtgSD1^uO7RxolGo4 zvy3HbvPF}$ay`iyuKp?HKWinAqG}yiavD*)!dtr46Yr9N^`xW;sB@;)jY>BMNoM+A z0Qq_}X@Ex4ad|0t7m>(BM7{-L1J|uh3f7 z`Ok4Tqq%XYI~Z)&pyl=CC%Su$tIL#K#uGZ1?0=+&9c1=;)}=D8RcEt@;aA3fpW^i$ zB)zK^UO?|lz`q`aVhh?Es~toRf6tcZSLCF zv6{29{19{)h0h0qFzajUXkKLeV$^AYw!c>QSn|Jr(cjNP*(bpBN4n(YXzHb_!n8K0lQ7E zy$rsbYw`*n9Iuvs#`&4`8VmQa;zQ;E zcKDxF)opN{Sn#Pr9~b>wi;~+|tmBGumyypmVN=Dk>$P<#Zmd^sFpTa%i^lBOQmqeU zb(Vtt2`w)LUqf2mktCO6=*&`N6n0i&ua2cHfng1M`#L(mM3Yu}rW#oqh_{<)Zynz| zKz34r$FD*C1bV;W$)^iheTh_0Ly^nS?kj#kMrawcTnV}+s1`kW8Lfwb;v}5=PcP&@ zeUbQonU&5fv)4y@;`@FlLU9lDob6iT0c#f5GUMOBC>wwDW0I4z^y1$qLt?!3zqpoj z4H6}iI&Irvs#I5c+zLq zUy_TWy7}7bo?B5onJA8A4?11XUSgk2iIrfREZKoQYXI*Q-`^qM`Ts4gXRiOpti(xr zwY~hF!1wtzxxV;oJd$lP6wiK&i~-_7XHRG)QuR}Ia|r&_R3^3M4A>H%j4O7<&Fn93 z>fSLR9qj%@W58b*JGME>kTf8Qynl95#Q%H%}s-|0Cr zhWdw+yMQS%VmFbg4aI08e*AZ|9q&HR)bx%-4)Usma=UsbC-}})ri?4&`DbtN1iwa@ zImnYGw*5WxO%z(WT2+fq2H$5qbC-9yQgM!JS&zz2m$b2`9zA2eV4q0FWBdvnx4C!Q z*DVCT|J|#ma-)l~-~7r*wS0PX@hjZP-T(SJ4mC4ZeV+Co;f;*aXS~EN9Hr;&N|Jh` z=)G*h+wQJnar)4!#`>s9DEg`QKUt-T?9JWE^wTd4XX`ew9(7%xqz}J`)}P~ELmZjR z(w#ykrx#XiSkdZY_vW!US%bZpbY|6}GktD`;_s65`mDz$9@U-tz$%_@ONu|yS55(0 z))j8?d_DFwwy!hGxQ3i(u4I|(r|HLECgrnn_fyi6Gpo0$DY2HD+0lA>nY#48r8Yie z4WH7^WOSH^4wHOm)@L;cnyG&*-e)dylRAde_}Xw7uBSLh8*j4DiI#bst$v+voE{`G zCSRiA4^S??=oqCkJH5bjU(nXJuEs0N=z1yGCh(WPqkMdm6+TC?%Prm6q2}qXUCItl zW2awN%bn<(_=)iBEWO*IK2OE5foOCmo6=tY(+b7AvQH1Gqb|$w9IlSX!^~(Uir}-t z*E&}1-Ic8jqq{x%J=ng4-#G_%7=G`gd_DEef?G#ET4PtUT9H1ttyX>viYEB6x0-u< zra~W+`P@I^{2Cbb0e_otujZ=neRjACI#u_*6g}Ii>l!%CA} zs{pIDx=xJ)z3$OkV#H>Ma##bVNlMky_q73Ke3y=FLU-6t(sDfV$6d`VXkGXv+NBl> zEWqnIxP2u%)c_1xyD8WB9@EZE_|p%+C&2er`u08wz7F1xwVZQ)TEMP`Qi-(Kh)Pw+ z=KlED7pxD1U@Ao>ZB-~o^(Qs*Kt4J9d0faG?NCYGWu>An-s4uJ7X+Gv9NgF%wD zsOMoE53-AEtMD_iy>m!pb3RsWCF*E7XWPv!C@_`>G6sz=_6j!!pzTZAo2uj*GSdWP zZ9u#pJrigAHD3 zX&8)B`=R4p&_7M0dbzq@>!Dc>t2U?dJ%33)*3iGq{5A*8i*zoMc(WS!AnE!Z{dU zm+8AN_FN5h#X59{$LC}@GaU8s{&m#*HSW$K=?mDUwxqW;UVM+NSJu*F_<0Em45W*< z<8vZ)Za~|Uv=DtgMIBvG{WN8>lWRE0E(G`Q82cfG#a>hRg7f_ox5WF_Kg^3a{d`kp&y z(}CC7nv0d_5qhB1Z9d!c@nT7Cpi|??>bOE0o8!(1^f*b&b3Ks}SO?S@<$D3DyaS^R zu4kqA09Yk*t`~?G@(la=&Z^fx{Z9YgQC*Ya_b!RL-}P9kapY|*?%hvvdXvlEBs4RD z8&GAN7T1%V^WZuU)PeYUl>a@sh-Yv%nC_)L2YIdup0sj*Bnl)Vs!`!TCF^edsPf-` zdQ_#ipApDztV?pPBqwL~$z|^LC$v82{v5;G$#;Cqa=*>)MmU*SPR677`}-C5l4*YD zibb-dR4P2g-;lWM_1xKVcEXf3X1f*k{HLJ739$c{lGRt57c%SF?vKcN1SWnm*meXG!ML zD&C1SD))<*s|bm|&o0p3`ju1hD;YO)GGf|&$LDpPcoCFYVLB35lXLLbt{+0@_q6Je zQ$MmBH|=F~n_pdvky9hKH)|+4AK-7U++KL>TS-J~(CqK&WbaJWUQ^G62N^fyvm8H^ zqmiBCnd~?WOj!f@DNH)yWU>mB`=(MV^o{iOES|6K|NC0X%2u*bgc7Me-<8cfJ_*?rOiFi@>p120J?#!(~YcDV!g8?H&$pId-6El zKTGMZc#-paZZ~(6nZ#JiMB5}b=|SAg-h-Cz^k&U#^K#axeVCs5MD}ExXA?pGSC*zW zEA|t0cI0slbbW`B?Rvd$W0r2Jf2;JbS@X@9ATbCzNp6XnzhdJLV*OvyRtx`oXfNLK zL{Fc{ZoR}?X`qe^wecQkhWdR!7<=n~A0^3;=;s#~w)q-uq^Iu9ws+FX$1G}fTK5}$ zVK?$y-)C%ma>y*^FTJAv#00EEyPj(3Oy5J_c!Ks#>7YvpkThU>?lc z90KNiP=je|FvN1tS7j!7!So$j<4Dhjw?yhfwos{h$oZ7CZNg; zY+dxcoV9%w&WTBR1NRaS@RITS1lZ5>Ox@yjfO-xE(+%XHH@L26^B(|5A2eO5pI-sz zL>_e2R$_t@$+Vc)@EdKOsN52=khrdT_>~CfKNjA_U4FI2%@z96W$5&wf471st7h@~ z#*m$E`sn4_dl0wYEhsjMR=%J_Vr_Q-p&D72c5`%j}*V_J4DD3*b#HJZgU8%&1l z!*g$vI{^J2fkkh8T2biHNYs5+>12w`T4+`-h7~^VVD#&RHs#Ks&uQNew3E?Z3vyAD zB=<#|>U{FONZDjQNHq`*)83C^H=Bke(lI_|`TuBmCDJqYe^Q zm8)F2iH+I^^uPB^Z15Z-vrS55^qNZne$L#ttWEQd3W`HsUO&fY~0KWkK?5uqs(Q>PEh6x zc$Dj|b<|U#{iUSjd45P_eS_cG?cCnDH#w6w6xQQ#_1;M*tEgiZ&FRd;$ja$sO2iV5 zAk`O=$jh|3nof*mT^5t&Bk;7cdvjgQl~2^&o@8{xubh99c(zN?YCMT}q!_i%re!Pq zdJN{{$m=k4Z|t4X-VvTm7WM43xD$-of4o&atH7MhVOdA7udN5zh3}%)9=P7v-K?jQ7tF_c{m9RZzqwXfmSg-Dtf{>VDF;T^&t3C z`(tsfJGdgzi^yf>!77x$so-LFvU??%deXb)g&*G#g~zD#1dwO$>==;0p{5Vnv~47& zB^Wa5ITmNHN8R)BtP>7?skeVb`B}7lToGefQ(HNcvO8`jmMt?NB;_8I9Yq3TJ=)QNwys{n4&Moy*o~!Neb5uV(B>zouz%sje&fOI2d-o< z_~yzx>MC>9i4W6Ge|)Kp>NU_g>qU2>i`)%* z^hUTi+V7ofQ%CyU)YJV@A$7f2_^D}s=Ze%$STZ)q*)5sp-+3+H@psFS%C748dGW82 z-Xtp=SwCA>cUQ?a%VIRVmPmgd9FujS-IWTQzjEyzHD@H1alip;$u6B_ zYdym6>=S8JJehqTjc_Zq<&|;W9$)|diFz|~Os?SmUtddWfA%|6dKMi*r;;+YV0?_< ziHt~|*oR1DVz086midUP^f$ZPGnTjkuEDUxIb-QW5GGT}DpHu;DMJbhpRV@Pymp=l z{=O%UU`Kl?b(UT&YleBf<(-jlD>Ws$?RInQJF}uxWtYoR8MBp`mSe??=Q5(nyNn;K z0sfkv-_yu5)C$G(D$9hY8IhE^WW$%+yt3MrGQm9b>yHbG3=2HraMpwNhi_IPvjSPJ z4~6gj{m%Fq`=6oT#Il zX!N@*Z~WQE$^9DE^ld&scD$WNkK+#=3XgEHiT?^fW2jvr0Jcr%gqP>x5m~*qUZuU0# zyMig(; zUSSQ2^aA5JwXV^tW&isePuAl@v{Fk}PL|Q~_y~#0dY5KTgUMJN7>c`_z}JOUoT8RL z@`gJ4d;pF5YUK=G;bNFxub!+dUge2ha3t|7*+=~VsJqeU+Tfk8L{^p({d1N3 z)*+esKGVA_M4x0r8K>6WLH0TxCpQ00^>pP|WS>Q%IopGy9gd{8x){{I&>Nivn|R&X z^|@M$RzXGcH>-$56$@YrVrxe_2v;DQ9fS4WOWZ9MZ2QJ(R%XcIJQaYiN@lKQThj@A$s#FoZGO7 zM}g!8zG*y`F zF=91Xz06-P0!207*>Qh93S{)w*^`OQS_`WCz?@w>1GH6DZA10w_tAl@ggk?;iKH8e zm+!F&waHx87_u6;3 z@5^4FDyVmc--p0rxcA-UE^|_!;>SE~HDG5(;`=KsU2k-50>aFpeacqN;9JHc9Rsq& zM4n7%Ci^~+)NLXSD?vV!G;A%Peh}wolbowiEPNlTD&FE&Q<$$!uh-Ru^rb1M}?fYo&Z@xIlk1xsdL<=rx^3 zm5eC^(ez^SI7gZDQ2IkQGwZ8~s2ghBISMXw*@WcqIM~%>E=gXH7JAX)pze(0XQ5ed z_patye9RNOSpBO|dlYPci>}R4A!|-;LDW!PXVamZ@nDu((z_p{#mt(g-^uQ~dkfh+ zTrGb?#Shh$xuIsPM&?vwnBcJI2_IQS#o3ugDq2|V`2}R~WmI1V-exE|$1`wRgGkdV^{TiMZ{m!aMO>hkG`_I}b$F|9Un5&~*Ezd2?IHklbSMtigWUR`X(h>f@ zPmUX-S7j|{z3->=Gtr6%f;DS>8My?Ww3-~E8O40_DzAR?zxV>O?5>D~4tLw)FGq6Xg~C4*hvi)8x{XWVJZE6(|l`>;xH z!t|%?MJqnTZ3QG%)tdHpElTf7e|F+}j5@;O+wt*d1=n`w=nD2t!BU>UQ?0-?yM%V` zMk=#;?Mt5{f#sTFPBSQD505G2GZCWqsz0%t-()Q)<*=M#NOWbm8)^K6ttm&{UGcw$ zfBnCffpRVIYXABxnKq6po*CpbGL-NCeAWEd|IteS=e_^kzsy!IWdAS3x##qhiB^7{ z&CPggDC!PF#ShqkU!n7t=$C!%vq3WkG|99wnbqFU^>JjgLOomQdoTQL<@u~ft>-07 zW(U{vjytno$9P}GF5JlGXLou-Egym_ zZKC(HE^t2I;C?>IYkG&7tYr4Ewp40Atz@liHG6xynllFYn`g7GwXEpjvRC1BmOuL$ zviIUaR)32ge3M?NqZ)(xSd!j^t-p^iH3Os>r9Mh(TA=A-wVsady-}|kEVI7UkbPdF z#jNYR369fIA(4^E3YD?Yo61z?Q*`mUPJa+jd4%UHKrsMUH|if}sQpTG&R*p8THB=U zP5QH}!Tk_Sfpe5=jrBW;rpwr38}Fm9y5rF&`s+qnoCBBKJ$og+zw7mv~JyGD7px;JcHo8*}e3f~Zd*j4Lnq2Ne z$(;5u&|HU$-ND}$o#T_&(au5~J{zWe)w&6lv#!+(^%KK)7WlqEu_4MOMk@Q6Q23H9TxNg688}- zKGE`*U`veGq4@Yu*kxqV3T{to@kM$!Nu9GmkXevVLA#9yF_yI+qn*qNEK<)fb98+{ zl9SISp?|V+W{mO`T;9N|U{arlksZ}p35h(fByX|L7ihU5+*)b3hbsell*!Z8PRZd1P>kq|AT%{^IS$P8?@RUEk8xSWWqQN?boYe4IGl$s7TS=d zrFe2QnrCIG58IxZlRk9mcR1Ep&0}yr8B#|TEe&?%c`Z!rHn3 zAKJ)FN#;2^z@V~PGfT6&;6M}F_yTMjxtle(`0~%OC|QR*28WiZ{aDl+kB?ufF*_G) zqFiR=KF6PKD7d=d=`WPen$2_imF$~LG;a1+C%aAu9?4>5vpT~wh%Sk* zNJiXiX>6pwmut07Gp3*nKqwnbhYN#&91%$ns!{Q3n8db7H+>oZz#yE_}mO`-#a={Yi@e1WVc z4zVxFE`I1?qAXB15fPFx?T^$WD5Tf_mVrX zJq;LyzL{4Wrp~L-<=Z4Mk@uPZj7*%Vg$i=e(e>@PpOwC`c)3np8S(c=-BsvwGJKa7 zdVBzR{2VtDubFJ6Z;`usdh20IXC|Vf8m`CVw?R4pUdiy7IF*MAX!^13S8F-@)OLek z_V9M`J(HfE>-j{UoaFzXJegR|Ss+c+Np!Ips*d%6CWqlGN;N~-MC5EBsU3N*r7xRP zgeT!tBXteZSMREh3A894@QKRAC*G6pKI3kp)v~8Cp6*R3Sk{nnS<07rA!g!gYUfAF{<845Q!4&>{OFVDX^^R>8y~JtC)MB@XlINYdLPdf`;w9y7N1A z?n_&;ewekW?OMr5<_vPySlw@Gskz@LqxC}g978X2TJQnNXVomPTlFK)_-^XCzqq%v z?*HjKBk}T;(>9Wkt(BUy%C;+>4sdTwaX)Ku8S5lwF=rXqWHVQi*H&IV;J+v6c2gp` zB6cUO&lD>_zvS=N^8X$0-}s#@4>`T3%n1^>vr_j<^<`!CkL3E{0_J2z%pR`qkkQd- zP)B{`sBzbV<3Ifujo$I(&iK;I5M1o!9^ETaW&4|Ni6mH2?0vfs0s@^s6n8c24WS>?UVf;fo8`K-0XoP1s={=utudUV} z0_{3}&iaFPu<)r&Vbj9LHr9;zHfzca>u8eJ02*j&lvTS#P5;%t(;bt{bCW0BLd_k&+w z7_El&0>73h`6g=h^=gU6D^Mv>u=~7nzMuDKWmM%lTqfnrN#r{}(Ss_~B>aFZ?cl}4^Wj|J9{Kn2&4{y>VcHu!} z|Nm4}OwOz946My-%Jc8?kh6y`adADtbS}=+#jC+EeO-UMl0C|tKplP8U{Z7$N{wa9 zvQpia^{ZY`c%Ghcyx)Ik0UlOgR#@kOZ22UeRn&WiQ7>5|eewx7F6W813}M-DE<)Cy$?d0`KNywVp-}ZXgHgftUH8HPc}{r}JRg0A(k;ei|AK zQ07yz7!PSM{@+Jx&cN*t@cdW$@yv5{Be`$#lSZ;3nN3LUk+~%HD%|bOqe;x^P;xw& zzw$d0^>m?wr?|J2Z5*QZCals0;0=#@gJUxuJ_?4M7*UnxKIq+$d}cO%796LlaU3YJ zPTjPCx*uw1$5(Pf#LHQR7_KRvzhMo%C; zx8Z)YGkF}6`R*`$zO>M?WUIIq1eJLmXKLvtz2X$Dv{p~{t9H>_Uyt@(X>V0j-wd{1 z;CWJuv1Aj}lW|l_I+Ho>L~tL*ChgGDy?nHu>OYtCRVS&9w3PLtP3V?*w!K-X&&cEB z+6#>$x2K?aH?p)3P0t$Ybanp|KQBc2kF=dN(RTE)73yW(=Lcw+S^2DrUhKOkNE1Un zj8^;>O?%*O<`tK-C|My~q1*!Uwuz)oBw-!U`F(9)TC`N5%%l#R- zJ?BL%@N8oS60NFcozWroq9 zjCiZ)X=ah#8YuN4${gk2Ano>2-y`Ix6`v%#Dl%J^==6-G-URtA>dyY9acuWwvO5+{ zk{{+l@U16n$<^5$e$&aoC^+7TF3s_FG`}pFV@C2uo9Kn^)6zn}x{|skJm%y8Pxg*= z;P1sQB^P?M;TPl~p8URWTd3XacpHfZP0>7S-NU@*gY8rJ&-Q%cV3*O<*t(2>N9sEl zs&x)1C!@fvBrPYnj76Uz1qC~j??oUz$Y;*2n8yMp`%@D=S0`S@V7*F9_$7|ImG^rj zq%vfDDW7?fgJ8Q44#ejl#!eoF z;+KK^V|a8%_ffdnTpMTL!*o4kc8H)IGtK z2h`9H=9A!kgZhpzFPj=Ne!Lf~D~jIh60Ox%dXf7TuKXMx13;14sbA8hmpz#t@=Di+ zp~d&yS?QT`z+^=ZEVY0H2#ZnIX9sjE8hbS(e68q zVxL3NuRQwzDs4fL>b$ofnh$8@=}x$im8)de&B;Gksb`GmPE~d#E{F0*(!43?nt6eF zT5YYpM9x;IDPx- zzZM2t&@+1*!mo@MFQSFBQ8AGg*>~H5%&bD?2R)q}Bk@Lhk&gG=e~K1$FRV?@u!=n$ zi>HU->LsA-sr}?rKS3Y1Loc|}uju7S&+n_f?(m-J`s*+s29{(ixq!u~z?IiniiI@e zF7)Y*mMw8*uv!z(loOJ+^F!m^Bm>Q7g&&kTz=7zR2>tJqwT$npE7`2DeVLzatZv@S7Q0CmPKHA=<#i}S!jbtxE*MDgFKJ?7Wb>d}T zQqvfv{zPsD721}a49(D`8G0o@W@6D2A3GRsAGn^}37H8V2GXqTe~iZGYvG?{`8+uN z1RZPoHBRl(s5d~6y&9+C+%QiktJ-um%wca%)=S+(MyI24b~0sV?KqIG$KBDMe+TZ- zr_(`mrBTI;`lug(sXxw@({PS!ketPWki{n z%l-6(`}y7#?BjhOhL(TRLe_1w+V_dt5|z7`cU~F$W~7%k^2*w2P4E6+|1ZZN1C$x0 z&WynB@>#A+?q;Ny_0>jR8B1l4;m%mqta!cyg4yn*uCo4&jw=1zt6T#vuk2ho&FW>~ zD97o0qQPzSuP2x?=APtw31hI!D`U+6{z{IPZ_WZg;9sR`u$leUkWt)dzcYGE%;}A+ zcSe0VhbsF@%aLVOHYPY{xBj=#{QuO>(qf*rHIDZ$?svz-(SH3B?Q;&?5Bxj8@8pg= ziADHxaW8km^(Jgb)>o5%CZ`g&EAY+?NKX((GXCh^u6{kET&SDX^3e1YFy#cPWQ7?` zI>MJSN8^R;c6p*~R@0Ngz2sQN#N}FWC0F(@?q$bpGye|q&MRZymhNOXV>la1rXJ%#09Xgho@iQ)O=~~9B$oujQ01_ndOB?mP4{Mi)~u#dUKy2>(O(vNG;Dk!YX9$G;5Ad zW%HRHxEJ4JxpwcMr)R6T4al9#h%Pq0ucN&16&hQVyf zHf>+Z;#}_e%%*+{p2T;|@@#L`DH+XH>sgwT=0)mULNXE+vQ2N1-A{?Qd4vQeztv6b zO>k-DsYkV!6_?gz=yXpcHfcTlGb50ZN@AtHWZjaN>{S1fTRW>h+trYmmQkpbx%i*^ zH{Y-JY~=#B;X0PAJDWWd#agMYhUe4kWjA_gn6Y~=rQ5M;XOj7e`kUeUlE!fE!U`|u zb2LVgC2VYJUV_L~#*ms?24AXG}k)xPep#m`wvw$yi=udm6<$tmD!?AJJbU&dBh zp%|j2PeFcB!QHFzVw~Quj(WPnK0Zv=Q%}`vuVcse#>)p`w1za)RabJ%-;XEBSe>=Y zTH457tFiEVfP9`#-jabOBfzR?pHOTIi(zz21|_ z644EvvidL?99Ph?v{MI-5`(xLv|D*!&+{U)H}?dRFvv4ov{~l=9txsl!c4~Ntj1pj zqlKu_9zRwU@--hdYQkupkz-cxlPRPJypkblGf29FX9ZehrF{%Z${xm}z27G3U*XRL z^~a;z1BR`%){Q0Fh~M2nn3;t$VK)@r63yI0>-Et%XF&DF*-u%sCFC;RWNokHWQuR{ zvYHal`<%O3^-m_B%&jaTjk9ojDcFaDZg;xY3McO)qdiE+rC?bGpV)-;aLHb~(7Oiz zGx^}NhLDVu_bQkAx)!iUBC=CC@sh{jbaH>(#rs$amd9~0*(mOz0fP!kHvmD-OUnxW zAJF@1r6bMPYj1bYX7rg^h-AzDP+Q4H(Va(~U87YC`;#*+UV`67++2>E6O`-1(lkc< z2_RXG;~7D(SNk*GH}PtQs-q8Y?g3PLP3^Ht*#mqjEvXF7r`&ssEDuA=%x7QkZXLRj zwausK{1f<_thtNuCG$!986QUGPgOQ~T~C8YcHd^aGD1sP?W%%iYw)WPDrFZ~_Gc|Y z)A*wqX(glW32gYiaL&xnm1>yaU&hZJ3J$kr_0|;p&$-qkN!=?juc4;woA@J-EUU?% zjXX8|67j6XmaMW8x zqh4mmvj4c2o+O!o$EYzmq>`UMc%QGXC7#*FlD6?V6~DKF^l&{z)@@HGLqGKFSLpv2 z&}FCeN#+GJhn3xoiH@4juSopJRB{^#CKfbGd~LGq)J2o*A}Cilr}#e*g$Ai9d8e{^ zmMrX{NJf=i!2O%T`d#CGP5|i)&YV4vK5Ynj%-U1dkKYCTpV8oAdXTxy}ils$@Vw2^E*na>-duwSp zX#1%Sv!-#D0|~8DUs82a>89AVqSMQXWnguYck1XRq;O5N{*oDdvxw; zS4$0QXw=h}3?&0^ypj3vn+xZIJdx4ILhxthcQ@K}6Q1W3f~+$puTm$NW)1pz+VhIG zXX*3a@|iv{QLV}8xxy_-zDA2;THW%zJz2XZS2{>PWaUYb* z4xh}14c31gS8(AXSiVz0w2(gKM2myqmh)>8Kakn*+wdXL(m5fcC+HJ%&`Hf%fykQv zAMp6^aLHOl@Jp0aatM41ugGz791rzg@0D4{*s3*ZE;DSc!?CP$XD`cU^v+EBEhvyV zmqax8L&0k7$y4A-)`kkDlHVcg35S4V6xx-$D_83avtxV<8rq`582aaa_yI^#)Di*Y~mg$SMGz{RnMLM z0{x4pYWmDrJuOVtexstjZ(#rLzcQ-Yk4EiW?=3T_X6?MDXES!r*e$0dlO}ZAkmb2M! z(=#P*U;;{H1$r;GrQB=Unv7(e`V-}H!bBraWIWu`_dmV!s_C8R;5|WmPH|;{b}AKR z%aL-qBK%`e<+;R0XAOD(0`}C9QaQ;W@)J6SkGYHnCo@AWTe6gp*P zIPy2zoyg^0en-;&eGVhf=U!_4|NHtbjl7GkxQd0klO4(Y+<3DKnIRuRqn~1<61SLG zsp+(R6`p475gp%y7XOY8U(b`uKK*20YsIG4RnN=%jGRyVwBE24Ym`WoQ`qCIy}p7g z9rP=|Q`hCJP0o9nqtN9EQA!KVj-(s`YjR^JV z%gE*=^|z<5IVJN*ebDceS+2$k_vXRj1{C`Oo>?Ky&h3}=4DXSXe-ikJhQSdz4_bLng0(+Zmr`bc95n@(19xvu* z`=HX*=y4$)bn#Ti+u50!oc0T0n;7t~$i+?I%IIni{w2ci*5b}=nB7N0vPUs_1ZJ?_ zccb>-aN+@_n!_u5gfd!7{AvwPC-+6X&ty%?Ds1K_KJoi~|E`5kFMOWlS1Wx`YcTyC zcfLpV2YLPyl91EFvc@q0U(V9%ZAza2{#LkFN1It=NDOq=059~8C1?hloUuBJyv8@m z+<#UhbNc7wsFWFr^pLAjU{7~EQ9q@Dl|ht|>bYo^J;OO!ZV^pL%<3Cp*_Ui&%yX)zvm%l#I;VknKQh~wx0%zB zo*-Y@_0$kF&CqAO^4E)AxQxV~2e0mh99@XwOJMf2YZsu@^ZsW%F-2P)$n6|=bCO&3 zWaSjD>{>rWyNR&f1lNp9Dud>JzmtJ%in2LlHnGN^yV{Ue=FH;b@TVRcECJO3w&pVW zxm}6m4;qCUiHlsVmGyLLn_3P;jSAA0mBdZ>&<<={3YmNa>@U%})u3)c25v?5cwmVQ zOT1(QGL<|r)6pRj=h-EGHb{DcDE|1no*S&>qx`0Ev|zgSZs9BM3zDj!tO|ofRsP<{ zG?sP}&U|JR_<(UyGoICZ1wU$QA@g@vknmP){0en;f?ZaTAK=qX4ivAlcVP+sRQVQhv6@K7W@)Q3sP<7E`}lPgu4M(33sCDi?dF_?<*Y;YsbuzKK8g*|X7=UxVJUZjFC*mZ%+Dr| z+X+h5MT3@Vc?E>o6P{S2%zlp7{=;mg8aZL;Nc3IIAG#l0(XS&(;;Gk+bS-z%J^nua#sL&no{zFug{dBf$~tQ<*M21KzHnT%i0#Ak1iQ7;NTyy1HH^ zd4Ial-`J6h)b~2gAF7_yc%6@+?}cc8kQ!USEKv`;`FD{Xy#;Pu51y>}Z-8NPCp-(f zjH5UDHv@LRQYve?nF~o?tmGN$z&}eQ_*`<*Px%S>e;R1_r&pOr`dD2v$VT$Gt|K!~ zDE%_%9^sKauin6)d4=&P6OSb_P?;7bZa!zaR8~*cL^pt>2fULLw<&BFYpD(zJqovA zQx9!kLV@%nfAd`C4>P`-0plw?`!uTD4)ToJf6T*8KE;%ugqK+zj^FSD?;BBYpgJyg zryrW$i|*aD`w8zLz3DQ+$?0g7hXcSv5_Y)nK3f9=}mCu^ac`QBJCy&NB3* z&xe6%0Xm%Tse4iTb7da`d2$+bf%kTOLRUCfcI^dpejlFqc~3)`F)YK)FgYGq2Ez4p zCFd2^pl(q*XItK()^lk>ZIr$rZI+_*kH|pm;dJ#r3X7>I^N4nm$KWJ*JO+jqv?AwH zW&hd?7GesmYN^fhlz870SMv|D?>4<-ZKI4E_0|j6-m41_YKW_sqGNMXv&uXCs#aLl zWXx>LZaqpT7mr zGFMhE`$RsZf8R&<#41c?qh4ico}lr2(3qzSo$dyY>FQZk+|N91V&6X?SxpQ1t*O1u z?*3kx>|ERIem`_i3|me&K3iW~3)g3n^S{L5PJKZ`M->{ae%Rrs6yQyHUW1|X}#jf%33{5~1KDxuDG>dF1=+ReE8=YBWG z$*g%+B`0O%@r$lr4ew8UXQtvd_F{ndcj@PUj}6X>Yet%PsrBx!;YwukzpsnF1M`&n z_gA@-xxA9G_P^iDcyj0JZ0fFr9vR*c>av=v-7(V2@NH39?VWkH1aF?u#%5Cba_$BM{1>9#cr&=Y9P(bMqXJZFXPTb z%hR-%dh#x35h5=s7l;B&O6_d3Y&Sc>!`J`wI(YVrm(o?X_rLl4Hj30@`!3e6W_Gd- zDSDqJJ{w0~rCrn5n@zOwFqSUkxcC5PvL4CCdMNwS!qds0n*8X|*Yn|<_{P)~Ke~d2 z*uqA%(r>PyVW+Zt88t0qbr$Jo*6>bxuo3f=$mlgA{OraV$-hes-6$nKDJ(^L?m)1O z4Z2s4zQpe}c$8?T!MdB)q-no-;HTFI<%P65m58cXpYKG^3h!6ue^5&N_^-y`R1 zG^NWe*yYjwB~JZn{I09cEiB6Y`q~|8&z{*6wVn}JvWR3AX8~)GT#j1``(wwLF5`cu@LIKn>^c6klTPGoi7gF!OzCL*sbyZ<4`bLP}W)-n6VhQRY< z-xs2EvOuIqs!%2~EQhhh4LzMago#+s`Cb{>C&R}tV9}WE%8L7D{A{O%#G~ibmC0&I zTzE2_$L=-+{a4z_Zt6auyHM?O)b}3gpR0H4p}m{*!?Va?M>cGMS&5wH5ziwL1UIls z&**C|MbFFi;CE=RGw2hmloLZLaAZ2`meteNsGc!$`0)@LE_b&FoT}q>dYPW+m3lK% z7>_S2xfinj9m!mM8l1geIX^8c)j9t%aa04fKTDh0x%nmxG73&^j*D?^X~Czn*x2Ny ziJw~GyPBHn7wt?!m0oDJ6bvi8Gm|h%Z`)QIEAjcCpzeuUcaWS7YF>`dSs|&Tzs)|b z?B`4tj6;-9PO)UmZ-(~u^%;M`$=66tMzCk%&atSFG3cRcO|Fas$Z7iC#G2$3@Fi+X z{+-(TxSP=D5OmL}$TR?uf zdPC37e82O+lr_sU&>_1y2b1%ecviQNrbzo;TBuMqo@y6%@gx7cfpmbfFVU_C)ty{9 z!%?;so;?S)NjU$N&m(dAMU;G5sm$tTJ|HoZS+Q=$5@cVZ`K~$;^f>Z733S-L0j6i0hb7Qq>1KB`rU5rX2$X5*(^hJ=yyM7E*IcFhz zsUzvxVew<-muY*r7G@Ynw!zzsQKsTS&Q^LHY#Hfi*I-Y0p6`7FU+!w$Og61$>Pvh? z_9N5*cMpABZ2oqZxG$`evF19`unq@W8MRDA-RvVtUlUDv#P7rP=AXOU4)lTfEVG_} z(8na}PzQXP38u{C)Ww7B$ImQt1!^QOQWg50lj6@)Urq-a%TLUSKbftGHYOiVJvHad zir2_wEI~Zmw_`#f*v%k0lK9w_U$$9<}S&FSsq)twkOe8@|Qkj1B2mU?6Q(TE! zi5uz%i_Gd|&Ug%*GK>Bd9B-j9?NBn&CW~SH1?i}ON&2LVL2wn{a~iGwEkC(2+^<5x zG1@tpOg)DeYm|}4FU488#{%}DrcJ^Dpxb_FV2fl7-!kuxq5zdi<^kHW_` zpwAgRS({Dt&=SuL#<3=(WFg3ppg+%`S5E5f#6Qf;MRHNKL$^EBo;A-7aJ@utmkel` z0mwd+2BdDWd&vs2k^fkkl&)b^hzur{&Y|xCUuU zq{c5?JsVsT*tkTEV3GNQVK8~Ys9**8%A8acv`Rjyex3_0W6cwB(3Oq(Gs*{(^nC}z_Zb|R zM)q=s&BOS$J9=D?|JC4-dA}<3=}GclnWakXan@fFo1a#Pq4#6T#}jFehuIslSpAtt zJks~KVVl{+sc145g=2RXz&`r+B#2gn<5KpeIeK(t!INVu`=DxpEGOKoQeqrB1kw+2 zyb9}({cIn>^iJ>O)H;eq&c5cHEtXl)BVij2&5rIRIGefGHYk|AbbWEPjO=+7+=&|N zLtZ1lRmjwzi@vc;FY*0^@1to{W{-|EI&6*qBWdQH;QR=! z)_^Fx9DmQ6PC$*NY|C8K9qsBvII$cL678PMg<0D_59EiibVq_RJNe>2Oi<5SP(Dvm zlCS;+I+^*EEv_cl!*e8c8!ziK`Y{&mGE33LGbh6HZT?PMwYG*;W@cXmS+a2tP<()ddC_eKmE$j=1vhG~TYd6rOoko6VJ|W}U)RX^ZOm@bf<(Tr{ zzsqcE<=C{WDRuqaQ~&vTrue@v>-7v?RU!|Oko8*0IAB-pWi2|-)WnZ-Nca0BU>_DH zvnVat^YRb7hUy|s*6g9u+>J2>T%D9jSjWX{1ER=rH z?3NGYSqCxIuWj%Z; zF&Lv%t%65s_Yg4GBH6e39j@hxL|g>l{sk@a9d72ktS{x#LcX)kpWpp`ALYvb^xE@( zyz}3m*iB6X$#pH%`=@><`}W$PyMRHD7GS}wJ&dmI+XK}}(+ZNiM9LIB6 zorxs*Q5G`0A^Wf-v-EqJCEmem-=U6&*ok?1uAKYajnx^iwqtPVM!IkaPvIvlL05OT zu_hg1nOzrFO!er?_#fTbk$y@gf7e8Jrl?~Fes1DJzV6-*vXJv5uV!Js2ga;s^kqA{ z=>xI{cN2R~PGf0yA-R2Wd#w;$#|YrPc@aVA%^$#sU7-zomM~_|=hw6(GEp{cnY{$*b}LEAW{X z7lS@~;gjtq^ZprmK1zyLlgSl$n7HDZp8UT5ALtEVz{Bj#$UehGdXwHD|45yyjWEmT zl8l+_!YOkY?cuqm_;tLRJ^}4xxS6Q+=fQh7d@{zkR-K(l%WLSHeF}l12J4c2)amc$ zlfj&{ITNQZH4~nb!X?5$nL-7zrLKWaT)Gkss6*sOT5GS zINOQ_m3Jzm$i?izyUP8xpnha0S={GnXBoO?@8=M_--@Qg*n-U0KZA<#sxrrN3rbDb z+ciPUPsr~$T-;7V>Y`Ho+sqzpVAr!N^(eh~&I0^3jeUdkC(G$tux`+gC!gLDK2sI1 z6XCtXok3(P*&@!>T2?2rs(B9zpXNOQgxOb+QP?4VCBwuWpiS0-)mqL9Pxe`){m4yc z@47JQ$nqTm_W9b%id4p)i}9f)nO{XBJNus#Wv2S|BwN-L%m;xWIRU%*mG!ag&&vvQ zWON*Ba;?vK;7N9{9(dSK*#l8FJ8TwvZ9|)6(90@o_WsTWVQ-qA70aB_@(`Ix9_h@9 z^zptFU+ds_VwB?hWPS8(a+uuO9l`YgE`-j*&~XP@`HD@=s#*3f=3LK=Yn!w2S^uf6 zU;V(7owb}yeM9K`mE`0m&t|Vdd-cRazk!v@F7?E_=QNu8&^%h28Jz{Z*(;U0p3Yv5 z1G%5v(Vvrm*Nrfjs%e8~x1oJ)R2#sPYongJ;ID=ve@2sas1!R=mlXCYq^BROANOp1 z@5~@%NBI))&t-p;qkIWFcnV&3_J269t_S?Wg%0H6N%xasIb;2b!p># z%bZunLd$UCJ#{Ab`C{^uoj*0uud&v0dQ=Cpn+W1DYB@)_>;(-yGOLx0m5F5;PG_>( zRj+{SQ+ytx?@1=MwYZuwX?9rDquo!!JUgjJp+i%p`oKTAk}qXR4^{g{7&Qaedwj9^ zbZUU>IVE+o(#ausAKbcpJ!4*;rFI@zuEpbw%6@`!KS71>qU{Q;+^($!+DLzr6~{l4 znw))`nX}GH=3IqtBvoVc6p&O zKcL$aS=!Us&3j!Lr>1FYJ&N6E~ICHmzL#w3lcB&86i)C;4v#w1i7qN^ld3GcUY=qOV=yNh{Wga8gBrjhgAfEF~ zB9$^9axy#Ih1B)+)U<+6e}#3ja*sC}I1lH44U=a4pv>MjGd63gm;D*Ob+NYaSJ1u> z=QG&qXF+%^pCzNdcUX!UnyMiITR%IUP0_C%#A*Wwlr=6U!o1I*vXiruX zVo5WL`4XxpE5lNh+o$LY=c_0A7=rU{bmUY|S0{1Jm1&3~iJ~|iU6vb7bOLp6GXA0` zl2v?@t9zqjW=L!ECl*{z_>e!$9VS^|dbSXu$gT~X{K?Ilui z0SK0X_flNmo31=X4paXbwCn}6$cle5Ev)owlR6TQzs$dQyz$_&7T<#=9Zln66`I5J zeDXS(EpM(TXvWgz+=fKbpHgk1t$&b)m-Sgny!;>G9w!`3+ z(^``2JY%N#HJKlL3_M%VV4D6qwrK|$y8&Nofu%Q?+R@HhS~}Ete+$0Pr%l;=Kg4Gn zy>>^~k2BVZUtODJOg8*PEY(Kuf%NFNBrZA7ul5dIJHsx%!%JFUrtO2&*o;ldzVMsq z+XV9d5!f=?Uyq*2Bk&3h{~K=S)V-Va7cD{C0>6gRw9DD_z9GWi@&$vDrUh>Qy^rw%qKLqQ`ID1oZXB$Woft~StUX8Tc$aw8upXC~8BdumHa=Bvq zpCjxa6whSjmvzy7j2&|lY@XQn>#<XIql6=I;~8t7`meh?*K>E4{;%J`FDt%jy^-^i00AOCM&nG5-rlHV#m|NV)zxS6c#pR4(B z7OFYRROU8M)IufKPo{aJ_{y=M9pv{oAf&uf-K)Agc>G2`W!FXR}W7E71?h zF0+@uIwSPYz_FaZf8u&Z@|h24N`ijr-j{Sc`}VWT{X6QcLGH8jf3%X}N8Sf`mwDck zQ84>6GQQr+uZ-@G#L@nAJ99O8O=NSD5AOucGV3(=`L>v1?MHArtgh}V-yspQhkxwvBs4{`}RlpJHnlHlrv)!9tC zPek2Vpqi}K3wrcR>1tw?Cc$h5?f<_1;WYJUC(=|JpS|1JDH{vZ7iYJyYi)}fj#EBa zB$Hj^X7IN0e?Dth1KmC>EWs2WT5MzXRvp8tZH7@|ThGRuW{zgiS{i2 z57l-zN=-xmvq*6=bk=}DZ+9D_WOwZk*H>HwwkP16Rrrq!4?a232I1yLPhJAXmLOP- z8ueh7v2fzU|B5D)wcHZ+@#Mm%cD$A8=)V??&jwdUac7})qFMipy2(79a<9X_1?tSE zjeDSTTfEL(@8jxDyueS$NOGQL{E?aUau(-NmhMi{)ernR`Qkw~d!`<85o&j#8~tIH z`HfYs=B%9y^oNh>FOofAk>|$qS6+7KI9Syw;OOq(!E|!9Cz2(p+{r#345QW3g(PeM z`J3o^ov~WR_?cs)ttfVdaVZ$y=P#gmQn zJjr%d3;g4i%ec0RE2BW4ymeF1^bXHvMd?kw*)&u*9kg$d>mDfDfbW>Oh_0T`?%R{e zQiXf>D%%CNi{Z5azry21d&dUV65YwgpT>_>ZxP32vWkq!+{hyAiiF&S} zWm)lD%(L9xlYL=43(s1h@@Dt5kEE;b>gr3T-%H>y-QA8LpMd0M@Kr|X z#5d&xgJfyD8mc%6HBmodx3Ln?D#`Hh?qv&TEmR!6>mxsovUXPtLZ&d9dI2 z`?nBoGe~21SY}Ms)F>e9fSF&oieGv%?e5`P<`0tZ;(9RUl!RnvN(RY3ps%Xc?~?7O zV1G1Tt{~r8C(JogiO@U)u8)HJ4G?GKm6J3QbCP_(f6{tYTu5~2?^vuil=#>)Jv}=W z=eEEnqoW)AN~Y*4C~-DT|FDpwHAVT}#t=`FpjgOyBw{Uj`B=R-v1toflP12ihd%45 z%Zt`tLFN6>=_yn>%yZ#eM#9NckzEg&;d~#iiR1W4ndDmAN>5JL>R9}{566@Jz76_j z9FZ0IoM^h#)5&}GPyAWQGGxDIcD^QWSVmi{5k4MKR!bZ%}LgC>WN+3!Pm*&fmhUa zh1%+pj`3tF*I{P-JrPZU^XvfIErdvh*=aYoo1Ks^kEpP=Iy6s?RhiBxK(hB;u` z2*+Tw5~aSY&CHEvmHPw|m-+r4dXMv2qeSC%ROS?ICa$Tw-^bwB6f&0;^&!6ZHb$Au zQjI4G_i3RG?KodupMq*Vs84Y%I}Q^!7TtJI515F81-|zHLwf9QlaNj7OCFb_lGckUz;dpGvxQTQFq>LkPDAYMV%d~5ramG?_XSA2lz(A4-pFEa#gl=>XnveA+(F=5h(Di_h1xhi9j-ZlavIIKm?Ry{ z1};(iG~YQv{!#RqMsIS4LQZbViN7Cc?FYQs#4RL?QJ~1G!awkTmfq|toXW^DF)P{I zx0rlgMHVt2*_yQ+3AX-9cEYbsEXILo`x;rQ#gg5NE_FaL1tuL>&Zph&j4DG|lbUEW zleA=Qc_l7~at9UGcyIK-PK^W5=wSUuE%Nh>T5mz|U!ZIBbh!5FdNwg32eTZ%b@kgQ zGX!t0#gjx7bkOSV#r(%`@}7CR_&13j*-pPRgE|@Ik~5_)?QcnfzODZ3?^%HwnG;>_ ziK*^>NxqtZ;C4?;^W-Ki*1+{ucwe3n_ZT`KRMgoU<#V1z*4+P2VlrP)p}kYdSh9O2 zCqr^AEM?n!(t>&!h%*M@A(+FdzBk?e&a%JAVlXvpp zCpKyv4WF;wVkMG6D81ZGN*|3@YV;`yyQGFpzGofDJG*@1r+b!SYv z6?H1H9<|Kn9D{=6Tzenaeh#C|lDy$=UYVOHYc2o(=VuwGp3}L$c_p6VHu_cdU)K^n z@Jq0NQ>Ofta{u*OM&cQF|M%gARzm9_J$d}hQxLJb)ojl{e2Tzg!}#BxscIfZ^^1t_P!WOs7L7=sE; ze491?fcAZ$7kON3$wZU0nzOTZrMh#f&vCRQYXLbC_d$Aoxx2~u+EDEcaBPXP6>M#y zi{{e)>=cid|4b_xi;vLeBJ%MC{%(V5WtzX4<<66X=z6ktuW@w|OV$rR%MnX<*-s!V z@uX{O@8|rI6`&|B$OZb7=Cu3{eS9-dzNek5S+PVqZ(>U~sqf+_3CH^M#plh5)@bGXcK_cb*9Eljf$dKLvS$;6v1!jJ1$+T&BQ|6H%Pe~ztxoK?&Ym*#xmrpmVP7SvV_}Gb;-=?>HQLTUS^h~xpD=*oNzuEO> zdbm-f=ve;d^Xgm&k6xba5BJO*UrUn0$DFWvjrMOhrs+gRmhvo4!;!3|wN>*n^<*#n z!)X3EZFru}v~cfWrQgB%e!e$pKR)*uzF=~@#q&Bx-;py=Ld|%LS!>@&0-w`^B(r{M z$&SV!;&mblE3}dma_52dAE>a|?9ScfN1iR`f!tw~-uEe$k$y4_jW0%l z*}`s6F9Q6o*aAQv^JJLzf8}sDRgRD;ZM%;|8RETY#1d&NNac` zUOV{)FN5h#df>I%%kJPu@of(^4nm8}2S%?Zc_$*}RJ8qwTx2gvvLj4F^=CaZ9rs@J zJ7?vzCliS!z5oQlEoTEnr~g03-UB?VYFpF>M7Iq!0xC@qwjf0%h9E@*w*f2^k&Tj(G*TR;7)>NaYIX?@5SM_+bpQ)}TDR$fS2~kZr-I2z0B4bS2YD1`v7{ z)5!Enft9gz9cXzF*>!$w7W-GAm$SU)l=ob#u|8d>y&I|~fQK2k(xJPy<}GAZF64!h zZGo~NeN9FJg~c6c*YSP$X#$O-X{9|{HV|m_0jz*>3FyRq(AG2F&+*KaaV#8d0`^hJ zdle|+?v1f7Gmt0u?L18p$LLZcu#gSlu#v9askeaNwb44WNw#53S_Z|2GqdUgHT9qL zh1<^kUuAZ)8BU#`|IP5RH+*Q0E~u?nscY7;4b(D!Txp)UCNYy%TlCQL*`8T`i}CV% zwAvkg_lM$navs>+zx2HNy|k%)^yGRr&=*AWyMecPuipk5_jAn`VPr>9sMv;jTD2xn zGYRQf0BpwIdTL-6Kc19r3lw9KYfoi(KFBN=oxn{Bx2DG9v=vGDyI4QG2_)^1rDgOu z9X;~Q`Ce#K1er*On@Y6#o)^>euaroneNR9P!VblQc@6qE@Bc`wedjC7#3r z>_+>H*{VgK-lDn|sXB@MFNsu61X?|IQAl_)Bl;#N(*_#rZJSOniIhlV7Gu^t{o3Z6 z?*N3xyB)z2?1ejf!7ClfJqRA}0Fl}M5AGq1 zN5dnLxOlkN4$EWwk9S4yMRq4rUN1~VplldOlMhcj)ObD0vrDHiv|_c^V~9g_%tneVwM4PZ(p3NpzLnAL&{Eef|#L^%F%f zO3$I!^1#y?=z1}}wSqzeSX-qq2Q*Xf?`TFhY=ZvV9=tbOKp$FHcgzX567K#9znU{I z$z*2KAC793^l|AoO9UJ5b#DMIi(^xM)mXx0nKT!{yLD;{q(U2Y>k#*jvg#Q58Cpd=ND^$+XQ;`R$t?p zh9OhlbaspfW#?a86($jqdpf3%Ub}nNvlNW;^O_2RCo}Fha(h5KWMnhmqaQ%w$S4GQ0%jA5rpq z@I4%G^)6&|4V-arpe8+Bf|KUjz0TOZ8^~tzek_n|0xNx2Q=#8%O1uI!H_?V?2K8PS z0zWOGXRSP&*atY$XeT{b>3KG-26z}7QWV;HPQ%lBo{irDKF7i5E3Bni5{x#84s_F~ zs=SoRWEC6vd*{x* zD|J0Q%drb}f!JIDm665>e5D_w|5ee9dXz3{w7 zA6pY&f8>UqPTUdr###>vpz zEX!r#qBl4l2inPCQ5fr0o^NV!2cX^!6|Pby0?B9zhAp9h_dUG|^=m*o=QVEvtvM&Q zP^$p^IRhlI%(7~rM@wjT2`!m_swy*8<5SIkIwxpf8@Y_JY9tgfYNQ|<_!c$yKmj8l zt6%{W+sJTefCm5yYVv==ChmFWU6%n)0Oa;br>b5=GPp5=qHb0~3|r)E7fOY&q& zo?`qo|3@Y~eg}IWt~orL)e)K*D|{O4BH@2$Y+Em8OJ>x10~$=Fc1i4`x9x2}7muJ- z?!5g8CEnqgw+!4Clvo0lbD)gT8!cIX&PU5V({7x`PVn{&cSFi=fg=mSsw_X@iq2CH zvypt)>$%bG)BbulgueAUglF2T@XbAa@5S&Yh^Me3c^F$=nYvdVmh-=zoC6VdU>l zsQUz3cNG1%R4I7qoeXub+V1K$fcrlLZ@OSZ^-L5*%KnB_H4ggt9=vZNZo`bXMn7B+ z=<^}t(w(edbNvlT!8fBfay>TQb}$XE&NOP(fk)w~7JUt#Y>;OafWx~9%&?&M=T_{w ze~rxeiof0>@XcRuhQMnv_vFLPK$pb#FiS*Xbm=bS`AM)g%U@n#twF6~NTb*)3HENp zMk)SI=QE8qgzJC$v26G=E%^Jd|8t**fAhjCXJ{{C%Qr!*vruRiba(w<44C#Jy%CHu z=DB+ZTeqJPOP_xNJgE(boQocZXU#Xf zUBqI!PiHjY-Pou{u>>cB^}!3+10!J2)08>`vEt3KeA&E>=hK1rqgY9;gUZ{n6GQ3iA{HY9ysz@M z8oaqPXki@o^fK7lT0E9V8qLFA8HH90*)kvEbu8y;VCjtQG>4n{n3hp48Ejkd-JE`| ziP|$WY(c$tw5i=GjlI2zHb22y8hc{ibNvvRyi3Hg?4p4WZ2d;YA8#hm z|LZMu?n#^iFK?t82*1oa?~3+5;Q9c}x6@;I*R*+O+CiaLp-5Zw#oJDgg0uT^QyC{B zsOS7~9XS?HTndj=MLyU97!n5`?iex-U;9yQYU7u`Z2QU{}OPmg!iqm*=Bq; z8`*LAd;orYKs`O@nMmdR^yfV@hxxQbFVf-q6j~RWcEDZ_I$guoegHKOP(K-Z8Kx|p;;i2~=dT&EEvjI=gn76__c4m`^rPY<-;ktf2yW07vI%$MnB7F2c@zbP}Hj?A1|0JGk?M9S(jtO94;hfHNnF$FJ^=R7L{r!fTC zz`p@%%>%BQ&|)x9dL!0ED13?^JxNt)QEVF_6W(dz+&vQvyTO?%P_;B8qFBWQyFKO{ z*aTmV;M#|tP6igUqPWXijk+C>F7rU20smfLbpkHVL3=Jx-Wj8_^fE|cVan@onN532 z-aPm=o&V!llP{poih$Fek8MQ{MS$gbuo^&*S(6V{j#IrN5<9=txvkNY{*DqI=(MK9j3m^}! zPn(0oa%Kznqi4MtBZgr`tI*p0a9jPEjN}f4GS1E;v45Ty?!-8e2*uZdskb-whPJ(^ z^E$kF1KyZV!Ypr9Xv3H>rRqCingUFp4;6X2@% z=G+ahjm^G_HSGcw3IUxf)I!MmBlIwrQNepTT!F_x-Fi?<2`m7=T$P&FLI00?%O`o4 z%*QPE=5cg4IvKuP1VTND4V`>ucEz+@UtPk8-XQ6 z1)+)l5B;j0fYouUFccg^@5Y(GL_gE9;LG5UImL`&@kDqGJuRV(SryHt=N{%6X2C;( zRX|&`%Q(pf$a7ideFflJ46Qwe6)q0N8Zb)eF+2jqBZ0Um+M>@)uS#Y3=xFy1G{^xq z@8b20<9W1uGxA%NdJDlpf0^Uh>u@6uDwuz!EEG4YE&|NI#e!Xix<mIZF(ymMO$QsXKe`Ap04Mo4Sz60PKhtwtV zyB=C_9=c^CMW?{N8jyIVFbgca*WQd1Q-G{A|F>m@`w)~+L;BFZISuy%{{zTU2kcQP zq{1EcG0Y`5VBvZ|i+%Ly9j4D9iv_S4`sC+<#owW!>r`_+nx|y~yy%LaHiC-Ydf>QH z18&Llb-@^F#^gcl1C<8HEZQCkH9GNkI@;J2$lv8@Y4B_dL|M?c7*Bqt?rm6{{QNb$ zV-{o8-HeFd)cFn4={+KEg2_VW#0}u6o(u0eo(>*ofOik=6-5S5LV?zJhPxtJdajG3 zU+#pPKesS6^t_dGh&O5bZg3w-dGGNo1Rju@N1{2y4gWqT}pbWW~E*jqq)c zTp9VOFDVCT!W_;OfcQ!H)C11;pj;DLYXS6*`p);fW6*r=PXJ?KtZrN6sUI}SME769 zhIwkL8tv{t#-|`<@d2M#QqEXPwQ2&Gw&uU{z`O*GWxyrlQ#J+s{Q^spP9KfvYd;cM z5l;05_j0uU6gV%2ipHOX-hxRiQ)z&t8XO$*d`FN1}f~N?r*@8gr#J z;}gx?e;8|_WmtrT(8%+HuJp}Ex;n6Dy(q6PS3w(`U2Fu-XQ96tt=$|^G zFXsd+uYuT=##r7kqQW`nEOgWpg~n5uS^E$*pQMMef%cnC%}i+}7$4K?m#|f0>eWA*XIC95^rxsr*k7k@?Zz_TzTt){suEVIzCC0FlWYHe5+0w+tq&) z##C4*jJWWx&#Qr-=SY+^`Ft~5J0&;Q<6_JI`YFZt zX@Okd8a!3P|MSmnK>jiM;o5I7_VXh1m=oxldG`)u3reGd1+fDMu$#NEg}t!?`remA zmoe0vh2~$!o){M%j}GdeaFnyXFR`DlTaWQP7LD|DdOd1;uks$WB!`*C4EnEyj!b2I zEJ2AWwCz6XF2;d4?27BzeYEbb(s`bH6L&hJ^fB!50D3cz^zDr2qbac;Sl@^9ZGfsZ zeS3zi8TQSH`#yXt$?VB^;3H^aHrAva&^6&*EqLZ0cm-fEcjE~x;!b`$Msvt*up+QY5cvh#h!@k3`|d9f#+hM zogcc79)+yd4QAu6+?{0^NpI%qWjvh^)GdHUzt4Ik=Mh>m_xegk;6t>y2F`dILSO%s zV20xwvjr5~%$uo<^~MPoVVp3_jv4F|(3Z>0*1g@hCU4C@@DdmmqJ{mybsm}x4|r;1 zosn(sU%As+5IBy&_ZD#1^Ff_}ViC2?qL>BUKc>Z1*hAwWcLC*kXzNm%JjqDlj4g*Y=P+_Mf(psl`u>b4RhZKaWaMzypat!@riwypjst5$ z;CvVwFQ&x=aLoiZXLC=1L3ny$G+39QOh4$@061qLkH$QkfgwHM#*Cm(Zw<*tl2;-3 zW)t0y9QA->wXqL9fx>8-@1R8^$|YbyQs7SGpk>bh%!OkUnX?Q5;*_#n!2T(7eod(REDnX-U zAlya2Rp>8_x~|CR_a}1%^HUB-D@sG51mt)o_9Z;y?|J^I*sSB=;0a1S7YCW+c-qOE z0nXB2GSF>C`@J`A7tc3>!)0vlbg*(S-d(x~S{#pNd-v4$=!>%f{f#;FYIcSJv{jP6 z_69Wg2t4}m^b{1WO}pNj*8=JIi81hgu*{cy-=g2o{t(#phl)q3(-&>KLcLg4Qmc91@%h^s(VdeH1)h$KDaLx) z;-u1G2 zQ~GP*VJy8jwtdW7SF}@U%iG}H!+e8p#(o)ra2m<*R(0c&+AvSw2UQATXUi~OC=mFB zyz8SiIC)Ei5gbJ+e--{UN7t)C<>z>AJ`2xz9|7hkprlbQMy9&wYBWf{K>v*}eu6f= zCntfi!m~POfVeR4TWWW>KnMhzmY=HB{ zt<3y?04`(DSJ6*xu&sujc^esa+!;un1m?;58jPoGjlSLwrk-7Jg{D=FfK$b=R6DRe zdN#Zj?kY6bUT&i0C~&L@Yy+?dnamqcK(lsO#;MeO2`H=6e;RmAW)^UOG8cINJSC#( zPybFfoLNF|W_ZcrX9*J01P<+C<>i^|8T=S==DDpL-nlL@V!H;Co{UT$q=%iXquL{5 z!ve`}0%iI^`)b&+qF`KrnW(XCU1{Nc+V>>q=dAn8iQ!GVnE^D7;fLc@Q-XKyZY-hBlUOKcSl;J%3P|QL5*bITN3ACI@;Tb*LjT^-nGLi} zSSyr)PAOR9N$}DYjd?pGz_B*7v!TdyIb@+Y9M%h}H@FX+s6ji)(D4ks_C#GOwE6@- z=$Wy`Gt5X^g8O15w>Q?osJg4@U0Jx?2|6e}<&Z2fEeMv=`PK!F@1&K^v>198JJa$y zXg`Xw``~+CdTvVj>R2h~pGJ;HFw#5%JjSGG!6qW#-u5wqQnP@_%pPXwQ{G#_f8p-I zn#KD|^fgX}j>d%kM6c#n^;BFkd@(*|8Q6F`?G3p~)bF=xyE2sbMggNJib7HSGkV5LgY`er zX>)eyVY-X?!mo@?g^`)(l6o(@?uNV}Z8;Djhjk7f-gv z82y9u!O}ZpvylMrf}MZ_pQOD?jOc9wZ86(V1knG66!k(wy%p5)xF&rrhC`o2N#n4F zp-V>nrlVy&;EQ`fo?RMEzxoSbg0^~NBayk0z)&6h^s3GatPGcz0Oea-~F zBy5Z)r}TMvZS+vSESkFLzdhk92NjgBCs(}va-Twfd8c~u+&1a!hcck`5C2YkSv+R0`gP<%KMN;PH7m__iJzmP5kd1^%Crn7^aHg`j$Vcova*Nim)#^rzJYiX>WDPf2|+b+7`S%*hI<68f0Jn3g;79M^^a-t?i==zl51eDf>5 zh3Bcv&v7Am6XrKjc07ZmJWYc((`b7dQc{XC{{K)w!P!V^t~W@pPVRZA@P>^#Ky#V) zz3tOmI_+192qT=V=lP@De67B#%O~_C-_7t(1$IBc>L?lfBojJ^I+)Q1^vyz z?wzNvRAkO|p)udF$onSjkGEI5>efazW8HBMzKq9OjSFN8LICXcT zh0ZYhQQ}YJ?g{Ab{SwjW`xz{)c^_*bpEc0&1B?^ru((D~M^mOU&vw#tB0PK+{dPCl z`>?j4cdmfvW5ugNvv@4-c4VL>rM*G%0zH_`E*=}!2v`mU^Q}ar={zv4!d5iH-c6>( z7L0r|=x-?+wGLTxm2?trpP}^^upHNbOHV=?Ps}=LE`ir5Z;WmW@M_CwX2z}Yj1=yA zdEz|>o%;zp9|yN%84b@t)kkR4X!aJ^sABYK?EkM&r#a={#}bSJ#y5agzn75=y`cM9 z@bostd%7h(UGVW*7pp8+k*&>RQfjoj!1uU7!8tL$E2<_Xs6^yd8E`xZx2 zek@pf4%^YZIi-&vT`jN(#^w}K->q51~+oruH3O68-@EJmKZdBE6qHEti;VxCHO zr-ngEZ$ovza2MF^N8{YHD;CT#*TAm|KwSpz^+1lL&SK!Zlc$v^=ee;}Xzgg=cU|Co zqyd;2g&syiFJeYul!i00iSTe6w&XOls2a>LQsJnv`D38kXSCsMGiP}AIkP(V{msVv zCY)*n{X1bbzoY*Y-g~xZA~N3vO&A7;=QAFj1fst1G#2R?2e(|~JWP3STb&4Q-luUA zuJuJSb|Yn<@$C<&+=+Qy6{KSl(7%T^o9W{;vTz=|;%WbdQ2H`hwn1A{&;_#%x^8I< zRlfl1HSm8V&%BG!cq#YnzNZyex{H_(L;{mOy2Vg@H?$kgDxwFF6a!o3Z!elzlfSO- zl!6!})H+R(g@eezcJN7~pJeEof^=65u(8fKq@oc|{s4}u)YdyO7W~hEsZnu8!33bay(o#K?H#?@pj$?Vw#(BsvWYJcZz@ z)Za^i!JGhQHp!wz_f)+#Y#85nf`gtivzaQ{-dkwi!u{yikCgTd#_52j1*tz2dNk(8 z+`A(v<&DERv@wS_yZJFg@g(r`bgCzvj8=LD9WX~y0y5>u<$lXqs5m7UZH#kDg*yY` z?nY=@EEuuQK#93%h|GOP#}hS zjp2pkP-);YtEDH0+>4)wcWn>yJqLblq>gchg(>6xfCp)cb#4Wnj6n!HV6GS(O}`(u@l(aW#AYIO^l9nAHn%oQ#j$u)U&br8%9%7itFVt z%Bd0j@SeC+QVzr*VY zs8<$f`UCMxls5)5X zV}N59-HptK59@f>o)NuMV1+yfm&5$o$dx!Oc^ov;XS)cE zHQ$mE$ulUk7)ZRMQ;f3E0>fe6RRZ>z^zQAi zKS9NZz*{S^4vF87e3;Xw0=-7ER!s$qwcs?IUQY3O7f7bEdhwQ)&ykxmSG1Q>hLr+JDT zTStMn8JJ?>M?84zoBstYi(v6RzrKT>y{#YzYi`utR-QR~Jp&b=gvajP8w=fxXU=^t z(0hBJ+>Fe4uls%UYF7Efyz_)tBz&1csbWyS3)pF$PtfONR%!CkENWL76R%>)Jl}f{ z^O-DGXIrRc&N6c^Y)5xIrR5s)8NRtX?S-D^0q>@itp;QJfD=tU(cyL0C(@_?`qyZgKj<$cpfg%XF0xyxmy2;j%Oh=mNomdIfV!Du0L43 z&0jN(<$-JeU_3E9M?AL1_@dXL(ATuAlkK@f@v{`&QQu`1y?l*Y0 zhfi&u-GFT@jvw9^{bT7(@76giM<{E0 zlaxrMGt9UfN@x?_Hwma7dU4!KkbeG1NvR%EKKva%4d4Iw-};gMr=KEN8rM2ejJ6S2 zHct?k74bS&a|f34d8prv_tD6Q`)lr0y~Jo`UgmUW6&ek==mwyYr}kLRiN>#o*2Jzn!xqroJ@BTqlYri)0h70( zjs)|WNXux(JI8!y1LhVH~yLfTcSWew@BuqF(s`rhh>FOz?BATL4NNq*Xn5vw^W@ zFo$>)YM3i019}-}WNsTrNcS!Jqoeb|WgFjjW3R25jU-N@rdf^6z@d-L*oSkB+a1x7 zreJBV0?*ywz_&@PJcsdi2>6wNuSP6nGi9UI3bh{>W71X*?Xt4_t3k>MS#-THr7XUZgRa zMpG{xIXVu7j5lyEO%IYe6U}ezim)qk@+I%q!TVLftEbKL4_zodgE^7U^la`%&z#w2 z@P8w)a~8A;d@m#aztWEPP+mvYXV8+~yn?)OR{u9z%mxDUS$N*8CbBq$cK5(NXQefn z5glV5WUTZ&@HhcqO2Aj6Puxp(zxz!_sFBpm0LMPO^{)AmKvN#;{tobTfR=}t^Yv%M zT8VY_oU>6Ftr!W7+;|)fm-#0UXRcrMI9QJZTWf zs*xT>o_o{kB&fCo={ZA5eTS}B^_gYBBlkJI!(5+oX>gv8&X{khCo=vp+};M=`cd)> z?>g{oBP$9mtT|Ka(vB9-b?f`+q&L^T!Cx~MMMLr7;IV=B^?_W6R-WUjM=#Da^|q{q za(loe3T{Ti*9u^(?n`;2CR!np>A>JF>Rcdy15QeJeI4bg6V3Rf_s$%g*XT(vGyw7DQ-_zcEy_YLa@ z@dBHGG6KxD^2{CI8CV_9s`aB4f2u?29n3F0qp=G(wQ8Tz-yTYPgVsW5TN1qM@|g=C zjPkgS1<RtzD9$CCkq^v3d2*+>nhhda5oz2yhs}{ zjA$>=`sF}2*MU)WU~LMu_S1s9)#Kq_Je)R4#QQOfCfW~Ao%v5?ta3j>ORyO$_%Jlg z=52fAcyX{d@Gdl5i?$d&*PEaHNZ$bZ^*)bAw3!7&Um`2L7(KtIr)h8{0_rv8T|QtM zMStG;JP?Z2LteU4V;*ujAB{0vm0E8^q}f7yLANSsST){ffOB=Ea6dAU4K0r`O5BeZ zq6w5}3MHekv#qhI3s?ynr|S7VWojfY+n&GY2Y+p*81ImYG3IIrL~|s`^Oo z2`tY@xV0b74WNA^wT;~KCKvai%p{Uc4~`oD7`1@SQBPr5m49| zYUy($PwuB?OZr(GP$&hO=10Dbp;M;y3ZH?-QP89l^tl^L5f2UDz(NcG;y7UNrHZ=rrUr->dzlcPu{&glF|!+^mm{f&NKJozd@3W^2wsTvDkrnc-XcE6i5e}L&2=+ zHhMM=aS_&2TQZzJ6Os3Ops%N=or8HR*ch<54@f-KG>3PinH!lsJIv%f2)LuM2kQb! zGAGHqU~mTg>!)&m#g(Ugm9i`c0wb zE=Y|TL*4!I^p_q{{ifQ=h0NwYL0gM}X=eSHJF<-EZ64^q<`Le5a%I9g8* zn5{4xT`3Jr4Z$v(zJ3YhC;~}HL-QNZnD_EH7}i6cjzXFGcoX!Tj-;pk z$p2Dk;w-rfAJ277hd#d{&zmUi97G?I5X^*%F94f+>+W6Z%c%~|t{&<`t71^pT)lev z=7Ez|O>3XRlSHJaGdR9P3%#Jq889{KwFyvqr_nA-+<-+|6wuTYsMVm%Qd-%UV!$KZL=JM1GqeK3ub$9sA z9IJX>T&4dE)-RwdFY>>6Z6;#7o?#|+BfVCGmp1@S6ml~nfUGP$sQ?sBdGZ0@lfbJq zFho#VU(5^e!R*+zXesRL258h2IQ2_BiY!d!`={udo~y5b>2vX1T%o}3=0RqDpk*`d+ygw~C{4l}gT*lx z8v0u=l-X;fsu_>-(1+MfTyaRhC5=V7fOFr=!NsPsE){(KDBD}bthb!%+E7$ zp*!?(FMc_;Bh0n?6(zsojdzpRL6XAUK`#Sw9r*PEts0XiuKtAgE$J(Ik@wQuj7oZ^ z{2NAf8DnPT)q6a-8HkPedV}7+2&f$Ttdt0OV7%9K@V%MQSI?95(=VmRPM@AI3Pb4k z3o{?(!csSQo*SE#`}E)c=E|XM*wTOfxe>~F^1B0LmQkoJunfjS48oq~AcLpTF(aja zXB4at9s4l1+KdGo0tfXLyoi=Qhb(`G){aEJiqe*MggnfXmT0wi_m%{^Bk;;|mu5Ge zi6!$?tP#%jpvyt@KQ9~@4XwRvW*4$sA0GFI53Z8kRlQ6v?qV8QeH*sQ+*Ubww>;(6 zoPJz2&S&o8Zl!VEOOT4QaNSsm%RtnJ(MHSD9*Y}8ze8z#KTyv3>uCjWDhmaS2pNG5 zO+X{v{Y(RcePCu5hO1b$VvK%bJe@)xCxR9ufg~@|TZKM126LANK(mXHqYqXl9(pZ-3WuOv z8sF!m8G{(9i!lnBpKb&Ywa2Erw{nnKLw{(I&X`n!HA^|<$-Tcb^mLNn*P!@6;gkL! zHKYX#-?4fUyR!)ChKvKgSx4fL1wBD1k9{ z2CWu>VjH1=xq&^aZ!DI%6j$){7D`rN+#bNkx%D=Ds|)F`E7XqxmWf!A`uxQ6#5j6$ z;k-mo<{5Q8kU`J-yUg9-ogLrNdoOU-50DCfx*%P%!B(%tF=SvF7N9pRc;AM1*B<5V zNU(ntt9u$6ZlD)$;ClorFGUtzpOmNUqs)a)(Ytp&^`?gfd}{}_QsG4tsC*wSwF&gc zIB#{pTcsCciAK=dS@@C-PrbR&SYyw4oJNZWgYR*AG*^n2p(S`FBkAu0|2;t~?m9Pt z5p{TI?i}&z$3|4s;Tzd|hcw%4zviDEkSAka-(S{cl#! zen^8^idMnZEG+B*BwGtM27En1;l0Yu>HQQk{}6cSC2WsGGyv{A(8j!0v!K&a))i~8 zfZnI3SEB_zI74t{Z@kL~P-y^CyAP~A54{*g2ck|9KJYFEGQt zi`K3`{~aTGBGw`UjTlC|o}F}kG#sm8){6CLu(zg<28X509*pFC4I6!)C#gty8n#J4 zl$ln{t)mo|M(Z8{hOzwr6S$gjU^KF?$H3dN!u8^GzGX7&HF7Kx?mj}Rr|Ib#+AU9; z8?m?N`9Buznn0h;fy#Z=Td?EC+Jv`=dFs-$cFm!XCtW=S<@~V-^r?xo4FSg=1Id?b z#d+ckwlO8N+licwhK5HKngEXvl<*}1*g{o(IoJ7 zFM2aFIusne4SsK6SG;3OZ}nQ9t)it&VDk>}ezdj&c`~o_JZ$(rXrm|l0W@BJP!yca zfF7HWyVvRWPIyoj*;oO0n}N{~v?&4pdwRGXd>skIU-8$P{jm(ofNeke?u~A>u{eJK z-yKlAAMJW$Mv8DQUN9_#9O|*uE9`0QUi5bcn=y=1-Y@?TEL0Js%2|PN z5XNfgZK)n`)ht`Tz^6`d(py;iP;NOb^Z=5wjCPyQ0b>OQVR^&-wHuhZsjF4tVmI2= zx3mCSxsHuPk9GogXZqL)tvzLAEaVDW?nciyqnll!&>`x1o^>)@UxEdH3l6-7ym_+N zGsEq%m);R?ys@!#p2TzCEe8xw2bxlw-kzt-uhfYFo7h0cqu|eQw9K`tw>ldM*n=@) zB0AB4nyJ)Dr2Rg8zZ?B(#XRm4*0|@WaSwcq#PaD4DUJM`hr;!s$%k+y4hY*JDYsK2 z2AHP7?N(4Nj-OAV>Q<;d3qEY2*XHzpirTM3d(UazkM!+^cF9o4JW1Y7Zk*L3dR~PF zWq{{7TBr*QDQM6q>^hi1%RLM4Cv1gOs{#5=jZilxbvgW=!(Y!OHm8MXWKK(}#+5~$ zGob2+aHczyjOMpvv36t#6m*Y5c^Uu&v4PALV1$fEmbD3mp`g_Fbo)J6=<&h$Yqs55 zjP{kiKmR^#0ScxKk9uPD_EO`L$mZ^g>P z+f0_Dt9!v_7#uV+&fARaeV~Rj1U<#IkTP!pt3o;F=Ub4G7WD6Y!dY9(;D#2#M(e>rQ5)uCxsxYd^ymjRVo@6xF4$UXyoYy@4t zhCarT=)XUJEOkXks=&p6(EmWNGNX#VQg>))V9ktyn@pcRiQ`>!w8`i+^F*%T-9~g& z>llH4*m^Kuz=OGStR-aXF=@1O>na#RLYNRZQ^+YB+omZ3SfcLfvPpSQ5s88 zkM?%(q&d3ZiTS~C=uisW>p~ar?8u@{Lu7LlR>OO|N<%SYpJN%(2eRhUvlYn-*{C?@ zTBBIiddt;Fbg>1R_#3e6?Q~o*VtE2~-K<#dQkZwzTcV~@M*p|#|G8-7MzmGmx_a3X z>gliB0oDG&dd&5*x9N38nj15_m;!xMsI`RFdqRnF^qIRFN=C-h(Xi%Nla{ow8olr| zp{GrX((Ws~EyG(gMovdhF2Y&QESIL9IdXPTb~tZb&Bdbw`ddfx&VBfCP%Dc5D^h1a zH0%mh;?ah_@XK7E=JYTtn>P%2Q}%Cg(39xKna;uvL}FK5%Phxk7^5BDN*B(bHenq+ z8Q&Fq=L)Ml@C^z|A44ZEvOYY9ym&HXA26!xHTZsk?|b1|UE0)_>R9|G*jJ=q?W*VB zKvuc(t08`dOfY3fi8XduQ;uxK|P}! z^%MTr75D$kf71hRLJ~D^4r;%JCRp!odbkxH_Tl>t^z;>0W-K0;BqXR9zdd>O4zRf9 zH~ZMFXiP;$C4CYv^ZP9A<%iy$Un|XwOaEB~EP&nx&+y+0zAO1|EX@~`c$c>&`R?sS z`S~v|I+>3izM!7)=r>B@O+KiQ1$EA1wYJgTjm-A+dEJK1(raOkr{B=8*|fHsdBr69 z%8xAQ1N#DLjm0*Mowv2XW*TFIekbAk6I<*#vi(5u40w0uyFdAeJ1WIm ztQ6mi^3elxKP7Ge4l@(!uc)VjPs2-AAEVc;ErkBd9+~pQG}q$I7^<{BXDq5*Wr>36baRawDef@n1jx`RTvj=fPV2 z_;2MyKK=Xq&{J|0yH^I-%?!L6i}@Xv)%=R)7p#UXuEHYJ2l5}V0a=teft7h1Yw#r> zPojLuc(5Jc!l%sXw?j4UVpd?khJv-X=M|@ZK4z_Y_uVJbk{FS+8>?Z2dn^{+Ok?`% z*I+Nb|8X5QJdL-j;i~Z+W`NI&6!oFUhS+d(m@K2LyBqE_8#iGbmUH;aNJM+;H>Q-k zzWcEF-bMQmw(&{Y-in^A2&_y`q)!i%H&9h#EL+0}dY$q8N1$=P_qza!DbyQI|+BjzK?t8a}qRGIU2EN9LTVo)lM$=|t;EBfW8BH5c z4Kq$2q}O(g9A=5~M#PiU^)!Sdc1Om(8(1TI`n@yo&7!BmUPA^(YW!$RA zH|ONW#V>XA6)3a{P93G~i?rl@U?-u{AK2;XSUt~K?4=*M=_QkA#g&EO{GhW0phq;cF zb%I~+@_449AM*4aoSs6TE77DgtntiO>b}fscz+T3>5eWMtL%R4De$TWl^z4`htMcJ z4-0|TF=h<&Z|9n3JMo@i;}|^KV6IyIJ&!<c` zRhNL0<)N1OXub9GCTO+^T3vy*neg}&@?st$=S<$l@thO9TE7_`V$|%}sESzwY6dWgd_U1Ury|R@7bvWz83$Z>%0tF#--&VV>vxpW~si zF~}V$?-{1SwB!u}r>SH1)Y{;88Yo&}LFZu~oiB&^&dO82e=yqTAb&qWM}7E#u2N+BgtMvzfMvGahUJ zx4OtdH-2wq)@7W^R(@jX|6_ViK`y#N%WcqkAC`Y7zth3rTgE4Y-F>_@2Z8(c^Pu(@ z*rBFCrqp@n*3(R8rMif`)kUk_Dc74b37ou7s5Erm2JQQSnOR)*s%0R-_cN3B7S;}E zpL3}3fj)UkY6YI&4oJ@l=)Vv>oWZQm4Eg#6^_HDNX1rOuKXS2}+G~JpCV!t~K5!mt z>rZNd^>Me-h^8L&-V}@z>Gxw`bk+D6&%Jfd)5ooYo{hRUI^{=XbqnnomDY{+7V~M2 z2By=>S#UDG{tjrKiqtue_tvsgP&FFNx*?CAjqyy^0VuW@u5E;_OTnis&&}v$q*ny= zG=9LlgC+$WpNA}G0Q)i8?oTaa_D{prAE3XUhGqd>S3>v3Se}oOiO1p8UA*ZERG*=3 z1JUFO{O79RBjEo#6!heWd!qL2$Y-wGec+o;Z?22g!70EOg|@}e$D^$FXVSYi?`O&w z?~}o*;STgI4LGl28Jx8mP1g@hh6na76^p9pxi6H{U(*ci-Qhoo=46BEhm1$t!MY?R zy1>t&@OmzNdS)Vp@>P)9MBYbZLqDhP6)0=|uNnNE#jK*f z%aG!Gko#dk_cmN!2A}rwZUefS4pib`#PDRewhH~52?VjwwG|TE7K!(C(zndQa*)6{ z?EVVqXU3$JK)n%eFM&tia*#@ElYrK&L*_l+3Z?c3G#}0Xzanjepjmfn_5-R+___($ zja2cZd2z~T(g_OI0cEEkH(qJr!%mW29EErI)ejBnGwjfwDt8n4zV}f%ig^;W9x^hQ8SjJ4FTHK(P+&FQg!{iU^19y>iIg@$ zTOECs^iI&R4DCJ3*q8sVYyoKF^qOff@B2@O~MXD)RA+p)u6n z)NQmx6s=#vE>DNE?j#sFw~y7Ze#3)MuR2enC_f4cU!l%)F!m(iD~wGUNQHLzIlKTp zz_?pLrK@PrJxE1IXqgCZlfZQXG&Sqe9%f+9yw*aq<Imp$-h~p#b_^D3c_0l(g5N`c)zQYhN=D+G2CrF^{gE-&8{LeG>jJ0EjrRkzugV*< zt$YG?Mj&y<;(JbO5)k}Jf0faIMfBeseBD1#ii|39|EwK$+130?o}Z+b`;h0`poTkG zz3BZCG@F9V?S<<*p!`<2z7^f{{yR^g|3Yipj>goV1!l+LvnLTySTog#cEK?oO;Ko`5U}A zf+dZEUki}l-=OLt>J_BjcUTWyz#5q;eja@}U+D(jJtwHV)I-lo!!y0gpF_n8P}4}R z2~hohY=0mA^NwRP*6wsl~PFakCb*?1PaWCYD}w9%YswHYl>(e^pYmqSut zr?zz?GX9V6q zn1E(@0`5I%okCAW4i2Ghcf40Y)l4W-jg_ajP23I~anwu*@XCghztV1DBrt~7#)7l^ zhEY&;GjlOBNqq#R-H|E|jZOip=Sp`Xy&Wjmo?b@-rM@a}GJiQAWp}%A|wR#{mUr~QNke3G5*BCRhfpZgOt3iV#%1&iuHBUh;WMdJ~N7B|2 z_~1R3yQ$L&%kDYug77?xH_`O2R$N3*)VGe{SpwcDO+~0-o?G*?dZU4R;jaL3Z~jk! zdpY!Ujwhbu><-l&;R^Bh8kiV^Yse^-31{D$FXb@xqmi0#z|VPM z2j1RTTYHxVgQB0O!g46YS{zwUA#;nN5VDdPs16DAJ2TR+k>XqIP&wxbKa;G6t>?B}yadHv(B* zWYYc4XtX~cwDrD&{7}}tl^ZF23nRFGu5#FnI$+zK&s5qi!d&uMdK`e(7lWENLr+_d zVAU7K85whQHh|s}gCSLfLSI}hFl$V0anAh-&m#^FV9bM!_H^RUyuASqd!o_Uwp<(@ zL;LjlzlZer)C_c8ShaYAwRjf#VR1RxAN4;vS*;9ocM^3v4VP} z&Z3!LV3q&IZ}Co|b-jt&oM})}f2n5>Z$)2j=6~Pi<`0*jd9f$Pd=2MK5~UtMDhl)6 z+tG~FstpBV>B;|%N-P9FbF)<40^G{M|NkfSzrFAu-WZXW8%6eC>-~F7*o{c8_a;|h zMBfPYE~7zb&{1z~QuB-!Hp9sgbn-lw%B&h=vHHerR72+5G9TH36uFDglXb3l>U!hP zF0}AQY*IT$l@r+EEVRiuklI+HDD2Wp$V?Pgp%~g>B!M$%&sDi2YE0>D`e=;4H=}HC z#$V^S)3Hm_;GQ@CPQpr<8>ufnwP3E{jbc@i9pg_{F_UaW4^PwXRBTrqywbxFy7Qh8Bc}<r3bISSFHkKj!qo;%<6 z<_b@+=sgJi9s2z`A}_PRq!e#DKv(BF<~!a76oa6W(JA^y%#hj?4LlEpHv)|rX}SeA z)_oiA2%Uu_rU9L|Rl16t1g=&Q6mqs!6}q|i>?*ziBhPAJJ4yTDSrhM2-GOZF z1+FXDezRVAzlEpK^h^Kx8Ua> z;5UnlC)&CPvX=mDHv&~z`1dfgoA8n($iCBH+xgIIKG1!OtfqsHEuRMdU!bP`2{Uy}Mr+zLR`dh5)9~68 zt8K6ltAQ>G7@fx~0o#t$$mC-_Dx+9Ffb)&vv$=KjE61ayzkvJqKx{n8GM;u|=40*~ z|2MMi6nv2yo>-fR)OSE$8&fA5>NW#X^Edr1sQ)`Heo6nvjCxYABbda)u{Wu$*Log2 zH)_-qdJVzZ+nLv4Evf?DZghA!{kanS0;-vF-JS2Ul&(gcyDsX$2!q_wepJFnErh8pD(FaMD}ToDG@x%-MD)`WlM97`b*P z&_Gx0=D58H7_RYNJyv2CV=)&6dhCkW``T-RVH{H94nbWwU6m2C7;F94x?1xMkB{t8DLeWhoSFS9o{fzEA#Edv{6-Z*b_PJ;&CAOL+h{ji_mrWzv-Z$$JNOkt?@s{pWoY4UimQ&Y zlv#-#8i2fbYNi|QnoH3$9$Cz8yi2hSHC-?LNR1WH>^0=w*h5bgr30rPEV&sEtjz+M zUJDMx11R;Dj0bML(F-Y88S0gxuMq*=d(*;6`fJQg;tzfvfexeS&*;B`^iqsgJ@vel zr)A)$v4NeyJq@0;N2o|E=6OpI|tOpa@q>4 zJr$RRR5Sp>ro7pRMsMNwPV~WfwYL`(L_QY5xn8tXl(kxCtb-Yeia}H3pf4jC-!h(r zF*?=JZ0WF)zA}LLb-0;|+zdsw;-RCuSc1An7yW{5@+@Zpt(&326}cm03h;QFto{OX zE_$k7$<{aLT~)=9j!rRQO)4@8QNV*tqPRt z&m4a-_znb{gS6>++DK@#iMA@jk5;t$IOEW6r0Fuxy%E&0+sIFMNX`JmB5*N6z6H8b zjUJsD=VRR1!*e;S4|*JO?cHV_fYYcj{U%b|Gvz%PJ2wDzJ*3N9%EKATFTC4KFIm`y zEarfY?)L%Lad@rGxCrgEE8CD(DKv?nj*PZ#(cA`bW*P57e_T`gyaZ&KCY*PzRz)M^oM!TU!#0_$Sx zH%C_D;ofp2stb#DBbcbJ}|m8nSC!$3F`nNFs+e)QXtZpCJLM+4E z!0xW;2duyDhSs;!rfcxu;I3nGb$IQH*jxt_DW{+2Za8B0#80rY70_lS$6a(YcZ~(p zg>XC%&!VA76?R~rroH!(4Nr|f1{IAk9>+J&=Az>`LJ7Nx=cw_95W0oHCQ3 z-vVe*6g#jF2#mINcBfZLiMmEB?U1;y_&kHOG{QoT0?&7ljb=#tN_gOT^Mdr94!q~V zyDT)Eh`c?_`+tIg9su+3IU_SiVmjZ=TxfK|N#yJvW;9=b&nD!j5^`M(+Ki{w`;pwr z^gjV@Jd5q~Ug;=!YHXKTt*Rg|1^F@K%3w zX}Ld;fnS(oHiP1`>HRXCDh;e+VPCBX&;dY3=^}=bouC{41cOaTD9%jS+J0VRZ+F zP1p-Z7BdEq2Ws;(C-b*Ay|x3kv;61D_21|x7LI9UE7OOOEbh8_=TTqAxD=%N0ys2= zYOZ9Xkgn?RW&|a-@b@&@k&3NY4$s}YGv36zYCLuIIAxlkH9633Grc%ko2&6S<%@&+ z3T)_do@t3vDRmtB$51K*7$<_MCmj}pzV+L-HS$%o7s-=?=OFVMe=#S1gm8}X1tO=GVfLdFs)V|I=_P-_J;c9`+b zoX5#LX+-~B`8xvGzd>eSr4RFkX!FX`N*-FR1O4>r`S}pt|DFGfK$D%!PVc3bGrC-; zrqM?Dy%hEIapt}W|4tA5Ru2VFLob){hk1}dquc5+4=}q{K47TM#|W=jpmpUukG6V1 zD{ZZ}drZX>_av=70jJhuEsWg>^P-t`&V2KED67po#HTz`JoGR6Jj4GH*yFrle-|>U z53&(jbYnoxpLuIuvG4}PH~zxlI=+QFd8$WGOw9Eb`Wub&3300pbbh-Xn%7~Z5`(av zK8zbffV%=R_a62?JXv}JZ4`%lN%a0Sx+oU6BZD1j-Mb2>fVcZa@xX3Gq_LQJu_r%M zUkbj1rmd%JS!Av!xIK%F`4np80n+0@ke5~uqYq|S%MU&s;8`i2hR`;l^_P)3|1&Bt z%u?2nk;j@(1#f(-cUXV&U5o~L5{q{yK#3fBRkMzm+S3E`}%X?qCe|p0`R^f^=~AmfTWE^rk=> zKEfgxQ*aGwY6NwhdwR2&(HGA7=LObiBNFCZHwQi4i%sf|P93AA_RQAnv+m2`srx+6 zOO3)SM9t0EDEDD9uu>UVCFdkzWW#lA$qb&k`u7CZAZnaM1Jke$=D^>B^-sYj?qLkj z>b;89tOE4TZ@f=C2I{&$F$j3gVziz25$MDY@aTeF=)r$R3Ux-mJ<++0`F|LfttD(g zJ)_871+KwjFQ=^8S0-UeFM+f7INcB5jh{)T<}PZ~3)Tw8N$Ra{LJ8x4%qr|i>2AAO zldmF;-b&UJ-F2jhM$WxG-Z|GMYDJ)(gQ3e3#tTm-dCRg9^&c>A*b0^02lj-~DdwU2 z_w+9uVf=A_tk(0Pa!y%FGJcIHuxCuUt zyghCV7QhVfp8MF%Q&%<%X!im(@J(8;1#H(DwVFb=n#>&LQ`X!-ZD?yAT+t8pB-(4{ ztvF@|-Z!=f>Doq}w&1i8trGSDNQvtV%RP!zc3_0|tf!e@)97n6yp4l~Sx{#g&ss2N z(9^$-UW}gX3bovu>j*bDg3SzS8eN}>{5+4W6=Ec<#f;dqgRTi&!=B;0QBFpSc0jsc zfU`#XH-vY`dF$Pn-yq{DQ0*~vTMhLrUYHKf74UivUbq4)fuy*PtPi3K@3N`81gyL#brTdWgfyE`zdo>;VWt+`sSOuB zY1HQVj%mQBL!-W`2%;qzTHr@xu4U8&nq5deKGTZriXe;HLQN~;; z7x@f^#yL>*B$~B7fXiK2PwYe^)ir_I+>G;Se;M?Pr|p7#dyw*>)mTCMlX=pc`q5zG zIhlTZhEu~Fn-h_&qVRY=+%$(_0+=fqdwKFE*cT7Rx$97+F&cf2`Zb`;$CPM6Z=XY9 zV_jduhP1${q{DA-zFP}+O|ZXfkwj0?O{Xv8M$dxJETqS%ALA(oL9c`G*(`el;g0t| zG~}lZ?KGja2eIx?!BbDinm;`Tx@>|{cTi(B+%mJhvD{_R!BMo=ml=%r19+EU44UCR zY3cCVRrwDAU*dtbe;|p;(B)omFxJ=n;B}#=W1m@_y$jrP|DHBXMa%RH7;}^jzLBgD zyc@(g(-P3RKk{@C98)Rv04-hQo#Uw<^e1_$Rc^>^;wrNePePkt+1y#;;+gGr(6%Pu zuRzTv^x~bDuA@4lKcB-lqj95nyA+)p4vyuZekwD-y-58UB*+N3H0)Y^N_@`8JKlap zuR6m^Zv#vKre6GiJ66?<$$!$8*{6C)SFdJav&ptMjO5?6aPNiO577 zxD`VQGcaAn4%BC!)B|br?pkkIz8MMr1ncOIX)FGklgZIDygQ&dT-UETfqwO^n%7Ei zWi-}c3sAlVAC@vg9Az{;3ViX|n=dXCR0>@9OR~^_M2iB#)WG>!p*W?~=}L&38!oPG>xoeLeFLl*wYSWy#c)^D4ScSe;RMrPfc`U9=s08RTt zZ%?lo&%Xyd@ggO>5pNNnXe>%2B+J|cMw)9evY3zU2iHU*ZXOP}-j4CixXrfoHirH) z;9*&$`4at_U%M;X>h4J&=#UOS2J+uq$kr=?bs9j=nb2t$yk3QkYzZuV;eQOhdk2Om zI6ucO>vb@S?LJzqgMMa$8S!TyGj4d$j1qV7*ojj*n!{9|67-pMQp$V^SxE3< z^hN(vCEAiN<$?GTaG5*AoD?&VkJjM#ci{hscX8N*yu9m4k3RtS80P8in0JLIQ}zIN z29i*KryJp8B{cCAtILnj`suVE$1~#r`?03nhQ{55EPAKcNXqFM83$a~>2Wk|kEN9q zv|%ThCxbzL;JE-@777x9o;Pm&2FLn7Vvuokj=sheFpVA!P8D? zsd2@fu&pU*Qg6!kq>taJ^(7E{m%4MQw$QjYT6PROor5&~K`%y`_d{RxDC>8MUGDR^q3F4Hc3eZU^-dY7?B3@bG`Sj**Ob=1uV5%|dZ9b+nnr*{BdBPe(Ms?n5?uB( zvl>hr{`Ve$~x6O>M+82B&GBz z)}U9tNylh)AuSc-cS!TnP^LM0a|+t6M6*@|61^5W+=nKw1l#Ip^;~RBH|m=&*AwW* zmdt{$%aEh`*o)RsC6Iw-c;_Pfcj20h+%eu&^89 zt23Ku<|p1jIt(dvg&E#2;e5Rv{4vJjcj)#N+-wMs7V$*?rjlI(dD#b!bAiwkOlEV` z({m7gZ^yh~EB3B3`uZ_=^rVd~j8tKC^nJjT3fG45JB_jYDil5qB<6Gd1u7;YSvlZo zP8fN80!n#bf}ZmlSP=Aw^(dI39a0+4X z{q4EYTG;5SXl4xBn-}f!F2^Rc=aY%SZ+$VL@1r{Ny!>eXCFUghQuK$dr`3ljS%$yn zX4CWM=_avmfJMmx-<$ZI6tr;@_3omav4Ah2-Fcu&9;lrMYw6AB`U}lhX|8*7_1dp> zJb&r;_23)&>ZS$%mtvePP3fN03s1Qk8Rxs%;Oxzbo^qItetB~DXC&TM2JzfzIKMwi zFWxtH1_?3NsUXnYi{$C|8O3{jVs+@nQ>oqy8G6dv!N(*@7#pH*EhQK~D>MF@2W>X; zoe%l6#0~WCj3e~B-HuiDhK6x}wX5&Tyn46t*PpwfL-p(k~C zUyyGv@Yj1O_aS@ElFj~Bn9`SlJr7X5N|_t5Szqy=H#NTpSEfPBUwER=?k)J^xoSOk zZ}U8c7VZpie3EB*kR5$}hw0DzEcZd*eL!bdoM&DB-&U` zIadeBLZ6)&7UqfB2uktpAzJq{JNRB24!j%CVhGTMKE2TMXKf`WY{@u5VU7gT~C+YJo^t3QiyNL0?o8irw>xrS)kf3JRJ99HF3v5MaDC`Y` z8?Xc3Oz+-V59nY*r+5L<2P(YuNxfZG;2kYT`8kF=zUK`e#+8%e?hGLO~ zvjjHFle5lzI$;a6f0Kds5_RWL=W!&<+q9e$uE4tg$p}$|5yBPMR;0-|IA@JdQ*I?z z&+$qxj`z^t353zun;!6V9rQ?`+)Bp5pBM|=iH-p}@4n2YR9$SNXKC7_9iCB4q@~X( z<+;LCTDXI8$UA#qqMgfF1oH#U$LjTg_S=BxBgVfjV0D!hMM*~M^TEt#1Qc`^*L(Z6 zFlv2CZ&TpJG4M1x;{WjVCSX2S?E^O|QCfr|DN(Y7#*!^eWqWFD%?!y>Ot!%YgAkz} z4?>H`GL}XLLnGNTgFIP7q%oGp*ms7?3`Jb;=kxvL`v0%@eXsvD*Kg)G-|t!O{hV{3 z``l+1I`%X==WK2u+;CT~4mFLuGS2cGC2s|D=!d{&Fp$g!`j5f247d(}cRvG7ALxG`#{v7aq2R&2Z%^N_soKe$^az$w? z5_~f0ZO}h7T10DKLp^WNGCNQhlz0mXXvlcjF@S6w{OrsK{52Sw^*0Ha^i(cIs!s=V zn4#d`j`q!bG&opI8iDj266gKXo<&u5qrj;HHN5}aEFPY?sSi%BNQ_bNl%uyUWFaYk z0O<+dn!DDuP93P<6Fkh$eI0w}p58fn%VNgvofqaR%7Jbj;lW2x%{yc}1nY#RaQ|_h zT?xj1v!yI!=KC^OOk!*`zx#Sx+6sji!WYkWRl;JqYB%c4d#ueA-VI2+VfHjMt<5JJ zD2${qhE%W3eaO*jo_b$}XClm!qudRGGG@lENPCBn0?)m;YV{0iEZov3sQ2~_w6rJl zS2F-7(|;;`M9^nKsG zq^9SASJUSu)&l0aH~y_IcHBEw^nrGPD>KmNKahzk%$|hb(>_u#mX*<7Xk(6EqtlG> ztcOM$%bN`Kz0>Y8dhS^f<4*SS%vi%cw0HuEaJD>?=N+MeQATC}Jc|qymBWZ`%>EE+ zv6{t~;9J z&5>uI!4a%tHdt;)Lqn&TlNp0IV{ca?ugk&03<~CaC=9h{!aZ+f`iZACkoU3B(S58$ z=%%-@F?G#Vcm~>(p>?C6KSxKWLFw&q!PD)Huwkuve*u_dpjJB6@ywvNCa-}`bx;k(*+aJpA zhhE0in}t+AzBvf>LvF)ncmwCN&~6YIuYrHB&`Mo+P>^=a4Dvph6%4Fj7chKIvuRCj z?;f;u5gjm3T3PVhMvGnUT3OR-+axp$DOUBvgD6{nN{Tk& zwqR^}6ddyLt~afiwIGZq=P6g56-Yt2I-6OAHg7ELRKz|%L(gwBHZFupw7>OnZ zCH2H^1G5^)fP2Pfj4Oh+y%yNP8T3;azGNVw)#-T#mP)C87aU&ViQeGFSc+-%;~t6; zb`$CCZJunSokx)~^C)`bK`medD@Jr}v^rzFw(F>CP+$vkp8+19QQJJJeX08l5YB-^ z=B*C-3SR<8o6ZyChy{T{ui*C6Dp0r?%wr=QpN|ofSh}t+0zp> zfUhvWn=l{#28@kZn*gn~_z8^E{@n*}+rj=gKR0V!QiYbogLya1yWSj7YIx-VtC)1(Zv%76;rV-?Makpld!uoRPNKMQWO1P5nb z-e}zg%5Mv-ihC=^f%!wUNne*UIr;M_f6bw47ESLS_Y7H4;Cu?qJn7^K>JOpXiNH#| zh!p$_Tj?3>oA9tBb2c+RG=x^W;c6fLYXI-h(Mtqy2&Xp=dE(J~COmm$?z&az{bl%6 zHJC%1FLMn&G(|eb(^6Mv2+P67%;%nLao5S)RtHc!9z2Y4`5bF70cq?79hB4~Kr;dg z?O+~tmY&j}veC_rq1`O-eg~Nx2m_K#;k70buO}I zw8;)|H5YpdGU#m&>dggcb~kO^LXXVeGL%t#C9qwCPkN!NApvhNdbkthx!J~0Y7g?` zxO9PjV`*nFGgIdszRj{ z%}9QiV8vPpI_o>rPiJmUW8aM9(x2y9OFdhqq1dOC9M2560IiJWZzx{tD6RZYR8%!e zR)9_&c<)^#dfd7~Yuoa64Nq#e1E)s(4X2*|8GR|Oz}t34qR*Zk4SDPI7?$Q~X}DYx zOZOSy$|D17gR*&Odu~w2djs+XembN0!h_MEG<_cB?Vmx}4&e0|@;^5yWlwJf&+iB% zEu7j>SjvMuGkb&?1hkxQ(cf>t^DLY;AHn0i{|$KvMGBfzZX|lzhu=ohd1AFT?d0&? z`!t3Ci9W4XSOvYMo=R>_2~W+w03{>9M33J40gm3IYDAo|X5m2Yy#_~V)96P}5pO`R zLQ!TXvB^dnN-0m26@b?@;HkKI*H20KS&kBQSS!AT*1a4^L}?(+W(571cJsg?GxpU$ z!+Z007@z#qyqhw&&@1nT_z!I~g5$$z;{}$G6BzOo7J$mG{q;S!qG#I*`SUy-9HRRDa6|br*LWkog*?f^A38g1jZlWZ#`KVv z(jgz<|C}qf#Q*j4zkPs4&3m)<1p0Eu@4X0Z(AAHyAYIYhPtoyco?qo>J+h>B#xN!) zQOjAjkzkL*oBNp=)yA@40;l3ou{BbtUwA)uY68}x5B7FE_RommO~KrDEjFSLqlEEq zgRu=0u{HALZ$QHIv_lV#ApTesJuzv#`#>r zir2$(|G>x)3B3A^E@L0hQacW*j09$Hb1a6Py3Y9Sj=}(7H@3bXy^Y2CbwzH>Lv4h# zSy9HkI*t+Db5@Z^)62APz8YgsdSR8N*-T*4zu*bh_xa{&5l=9io2MP4;&80vRQemn zPdYS-pw0*2xgW{#1YIsRtRjC`frokb8$t>5Yk8i=6|yIit}xE|R6SKvR-Yr0xn zi=8+IOc6-e2za{^c`z%*_lyv!)N_`&1xh`Nye>1#uY2l&t1th2#=GPr+A4et$EM-QK1YpTNkX!^{Cj?M$tqn{7JvmBn9pJxws zGok4U+Uf!Ka_Gx5OwDQC*>xx2Yz;>5GomHZlRl01%rT6Df1G+R!#~fH8WX0(n7_xh zxtUGOx;GTszE2%JpDC1y1@fLqgE!&2esdP2r=cvPk0+D=!oD3w-n@Ze9#Ve^8dL}3 z1WLOz>6+5B;_39{9=0R9yNnZ&h(pMut9x@A&*EttWb_!)Fo-&J;ddHPxUXkCRu&ef z7Nc$?Ty-^Sr2QB4V|K}s@Z~k&iw*iNO6~P{%`(8o$V)x%&Olq!`%m;L*2ke+1L*JV za*Ju#xIF!)E5LOw+{$4!xt!nL&1qzVYg?mFydf%uH|Nl)n^2`K<=y`;2en^@cSe5d z^~?aP!vP(4U^g1`zmoJiR5=Zg4j{3v9K4&pFuL?AxTgZsURu(3n_`vg&v|I*^Tow168A!==W+Hwb8|(E2dyx)<(!C!1K) z3GP& zUchl+S1!=qUzi1@U>7`bG#fsDK#Pa?@l0P0uxidzBLY4D6$^(wTUG|14xx@`KK0lN zy*sbwHQxU(~23s0W}>NwPS~qBNKhILIX2x}R3-Ng<)tdS-^rdIb zeHioQRfpaqupfz(_4I5s+&42+7PUMzy`0vJQ!uCYPXXTjXm$L{={fMo4rZL^M^y?O6`a9YE>b0?!>Yg#P~eAlR{kL3)# zo&>wEkgeL((QkVPRz=B+Vyxx9aqMANFsz5gNnrdnK5IL8CIZ25Fnl}U!#Mh=1kDN~ z0Rw2U4db_w2}S{zL(7b>zK^G#F6fvg8lvnjWp&oSP89dKeI5VfV& zEU-NYFSj6%iRja#aMB2!I>^yfwDTPN)Pr0Y9cmQF^BC;suhcUKW7z<=BH;T2v^W2v z9^ax^r$OMK%Fj^BtY-dkn%P(p_<^q@L9ayUW9)7#cxK+H8EBkiZ((3G4~72WFQ{<|csvhT z5J{{L1C@69k;mM~`WpTWyD$m1S3@?Uz_2ib~J-o)U_-8u3*tam@gUK{ha9qqY7 z-=R5CFX~QV)?y~QFxGgnSP##{CIrw;MS{D4^9dyA1K`>M-3lRl`ev#D+i}K6PpF(^ z-tZ2#+o;`edg%}LZ?lf_#OGvS3Io2o=-HjOKX{WLnt0RvBHoMvLaFQQYc06_2b?MO7q@b1ixEgRtQ~AK=&F#g*`~S=NVhjg1a5&{xNd#6t&{9dQF(eU4eT| z=*1`kXBn=d3Q|+=<%^WkFPRCg=b`W3#o`WyC$*Q+o{|)cB^b$b{R~}@r`|}gx5>L_ z_9<4NIdhg>{20#_4V($sGJUKw;6w}~MP)c{PT#snt+_ef7oAN{OQ2OG^f^V(VYKJn zG5Kh}9Qt$$jN=1+{tydQ8D8m~Gq;>0z(@2u9UCzO*z}fv17(fyy#`%7LXikCGj6#( z+U-7(-`4KXfQ?C2)AnZkHzw$UW$`2;lfT&$TCiUZLZ`ui4LeFnW=Luv1Q zDMK3{K_8>Qy<^sMTdw!g1L>HEUDyIt`?23~l-Lf3X9O@$XQapvcIILBq;)99y9`f` zqF;Kcj3SfM#vD|l&2xNz7^}P;ER0|t7f5#ma&srr>W;VPT;o}<_hCNpJydgdz8}v{ zK=rm*ek0F{@pKw+pGDJaLJ=ciw}Q)j=$!z^j)Rfy8Kpm!|I8sf2VHiDsXv@Ihlx9~ zOR-)x@dp%RmM{am?5XI9)X4yU@6$PrXTq#hImk`ueAxu(_d9f&ja09vrL#QuK0xo$ znhi(#;s0>uHkJ0yLSG}?t7C2EVkhPTm0qLw==XDU{4T7hXYXD>rtd}WjezRP$Nc*b zz>PgT{{-Gw3%nEFN#ttZm>&0-iXbm}fUh-AEuojhEOwzgHq9Uw@;ez_HIkC9Gj9LR zi~POAe+4M*TR-`Nzy7vd=tsYXA7hg$A{C(sv{2;H{lS0UM6UVu5<|3&Z4A@SkR z{w>Pg4;MdyZ`+_{Gjz?H553>>6uQ?O*&2m?GsAFr;O|*LjaQJ&WU#%H5#2~DPxs!9 zCHJ-j@$+WaD*SDTZcheEW7RBWRug0MJmc&A7UTKo0WtEWI$^i!G$d}f}1Fv__X_Gvry&D-tlCyD-2kW*J9 zk5hjK@VCSQ$70JC@EJ>6FJgh5x461haZAKX0Oor?q(0U|TbMFPhZ?CMH%Ybthh9k4#n4C4@1^D$(c=`j(Q-M9QA7`tcFj^_a~ z8P^BG&oN*Z+FM;i+mEt#D9U*95%pX_o0%bw+I_IT=fVCczulep_O;)!+o@>F0w`w& zgo?ltP3^&q7RFgP&+iZP8Ns^OJW4CkQe%sc0E4oX4HWL&Zbot+qlY+38{c+}_uiXw16nkub>pPk zQrmd`cgXFlHH@CGb@3qWurvktG z+!=7$l|dHvYb@F}nWs0XX||6&aCsxWq)@H_SjEF<^G5eX+q~th6VGSB>Bkv0?gobb za7+JEJET7jemU3lWY%u{Rc(RUliguJ@H}*yh7`BtXDF?mr}b08oFWFRnnN$IgMiQaaN(ReCP-QG|ZwVyQ%rj;9)*QVu z7IPUi>O(tiDYXY0^}(8?Vn4mx;cIB<4y9RzzXRIC^kbxhC&9gc%P8s_^fD0p6hq(9 zNX(Or#B$Ay^+lP*tYWsdhTeAYb_95gg_CFa{~R^7srC5p6DZjcIHGCEtSh~dFVC38 zBN0Qq!OVX%)~IAz^OpRIu6vO;^#+O{A|LwCfJVvw*iU za@&-4Y9R$ipDdt{`EW1{YBYvN>v?MSdUL~E1>#VK{IS4kX4YN-mfkyH7O3OM?K$vr z#bsueBs8fTpEA&=0WF+^58fQU0t%Y9d=KAF0c|q;GD7GUl&(gtlay*mxob#P3pli& zUd(r=pCkk6I0WUhu?~*c>SsgrDi)nEx}pVKzYdkXwbVF(P4sJa!Z(G401-b@Uf|9;`6W%_+MO*xiXv zfzR%u^gtt8gX!@grZgK4ITC#i9rY|bk{e}`h9*{mW*31kl78H^=!U#5L!RE_S$DWK z99?LRTDPnuFnI9NOmi>PeYosMb9gA@$N;hTcn{>NIM3!$OEJu2e=t zsRdB_Yg+K0mA`4h__oDB;SE63fYMX%MuptI+cFc#Qs9lTrEdU>w~%De%BRq}BoxS@ zu6GsH26uD(D`z8+E92$vKu!iQyD9<}O4W2)=o!p%-3wa?xw9I-iN^p&mtQWl89`zhK_%L|6peSp1jtJa1DwY{W2Oj^ubN@wmoZRVnS0d-?nSiJ5Lq&gVLM#Zi&PUi^8WR& zXw`dO57CbMUjJfrD2ALp&r>6nmP5Zf;QA}{+XNTAMbaB&qmUBMxp@|DCUT~4&kPq4 zV2}+562U(ntJer_Wg^+bp~6pShTf-**k5-rYQS4_L3sak??BTpKo|KD15V~fFoOFN zN~O_X407PjkC%XAVqhJ5^3;=V&U+Tn_6Z=3fun1H#&wF+P6eXtyf_h>;p&$7Pyls9i7i;wWmoxd(n zV;60wAsfbjSEGgM$WaSgPlQ7;j5zm#bxX>207Wk0B_&H5RPeuNq^NZUrNOJ2&fz*Ck>TgQ+>Pb}))9FM*gfd}p; zx@%&twp_k>!ZC~TW)OK1NnJ`Sg~3(%@cgEJTsi42q6NU+y`OKPVQt2TBfxnN^XH$D zvLwoUN8QKhy$j{4F`MwFgvPWV1^>gqe-TtK`D$fGbo0)ogVRXh)*t#6{gvQJA$aHP zVJ)1$h19t&@vQL~EOlpERe$~s3`Qfl-?4(R>nWc7K!2`9j>GHrz~a8`%TVYruzS*c zFj{d4iHQK-w&?n^)EP!ALm6osVp&@9F|VUD*)3>mjbOY9WoFe=*`GEZL5ns(@V)Ts74at1&A^ic-}a)l z<~Vc2&OnbBqqRxsL=JY^NUELGiw4Rtbfr7^XaT-PrxylNV8pL6vp3N8NzmCF+@>RS z+0eKxTvAfTVxwB1RX1qobw*QXuV#O6m$y6pc{;#sIG%&r2VLf1Exv$ryMb*Xzi+~& zC-4wNVdS7n;UjdCa?nQ4xXw|Q;La+DjC!xry zJa98lAp4&LZ$fb~5wsSnWxKX{yZtg)@lVheZ>YQ-2@@)L`?s(eDO44zTlY~$jPm?n zg3nPtdUXDRE7zF8>6Os8rsw4?K8_wnclp$N8p83u6kv^zADd==`Ln!hg_bO%MWceM z(8n<7;%%(q$YVD?ec<&>?8u$Szo#-s0>R(G_j|Ca@AAykg~r~Vr-%RYJ>>738$ehc z*$~EiX?Zqn)J9+LLKgJS=!+|We3-*d+Wr?l+=C9?_V5Vr-IRKp*6#pbaWMnlG35D2 z`rCk{m`nL7FpwT<-TlZ(ZN^3KEv-gNYr%0Ea`+V`^gNYBzP?0H=Awg-Q_480*}?zb z-C6)UYL*vsqvZ|c!)QQH&euS{M^fWO`uL4^d4VFFpT7d1RV43zr7cgu>ZQ~tHNjs7xH%;8e%M)ellaI zX7*G1^lR$FlCpXEuB3WSHslWs-M4CsM)rmN{(pb#%L;kHeCMu76RgA?*tRK* z@Xj$;Go$q8#h>ZVv)Er?|8f}}cLT8*LL5chE7^dKufX;sVQY*cZjFr-r}KdY?M6$X zT=Cn3U4oCO(H|@4%GC4NZ7DY&yAj$Ej-Y>cK=Wf^R$;5pVvl+uSw=)`q5NUW??rMi z2W#O|^kaP20^l>kRSWVIaHYYGBE5k*OlVeDQPIb3bq@ z67~@=8JV#LJJS*^{EBuOVNtv}%#-$Q(VEAxTt**ws!V^yLB=IdQyK?m9?>vHeoy3k zUdNH_4Dz)R$l5aYXJGBN0Q*YxtPpLO3+6JI>)E-?+rnVB2`kr`Uf#!ojerK;GU+)e zJ!{7qN6Zjrgjid2#j~!i9@{Yfx^8hjlZK6zM;+jxtEbKZ|56zvZ!in;Ch2{&l1OWF z=p&r=yy0p!ntc{Z#vuc-w9=W;&5Y>g(hi*zjfAQLnDH15^ad?>&c?X8&DdklhfL(D z{wz7L4=P7PW#g(nGnt8=41vF%Xx$4|-qe~6e=0)T!;C<0P;UV|F~5smo@g-hq+n0* z`~~`64o1y0$UuK;o`sqnnL&ByMn`5Ce?q}F{B1xl-p*<+AEW5bQ_IK%J&p6=W-=rC zO|&nGGBu&Z6eQ*v*x#V88N$6?q&L(X4y4Ao|Y!iLIY~;hRXHfw6V+4P;UZt zYGJWD)6R9|A`KdOro!=TJJfPtsyA=C@}JqX&I3g!N|>LeCjBQe@@9Z_C{wo{>z-Jy zNHCwn2)qu#K4MQuoi^y4V`Y?yvuV9zMOtA>1|3LnaFb+6_@aDcwlt`ye1QKBM zzW2EArq0*U#j}_>fkbu!s&q6v9=rZKE!V{ooq&@1C)R;wDsUQ4))(o20$E#%>^aBD z1*$PnMepEB+SkWm^hz7*FJOk;oi>taI|CXoLMMwNQTip#Id_ORsr(^Q!+a zZsxKgSWOAfleiMA0KE^;m%i4%@YmC!{dwxCy+kN$2AK}jY7cF-Gu5#)6#`xTl@iAI ztC8iH84ZKyZTVzE;qTyrd)MZ;G3q{(wmmPJ4V~&BkvqWgJ)kv8y(Hy2q5BnhX7;wO zVDEXKXOX1Yz@=}h0`idreb2ygzY{}*uW0BbG9Z@_0h=g)xMbNxl=`AX1g4pd6R_Q<6wyj@T07lEdHFcNsj_GBdS zCbI*%;pws+&}<*vn?pV4w2mx)!|TgX&hy?~!R9&~UJOj?Rti-21YBLTM=wJ+@O8%Q zX*+YCRHj@u_M>RV+N2Jq2+yPCD7)n zK%j@J9$NVvx>E@&sGXaJ#1^7d81LI)Tg*MsgtPCq{bB1aE!F=i!>?iK<{b{{a3~NT(SlLpPnzqm439dmlwg38U& zDz!;({CcGO2(&+p&0Ei`N4_QUcS}&BGW>j-y57*_oina;JiDyFzdIw)Pn}NV~p4z_xo#(+v zK}UHW2Mx2pP}^>7j5}av>hbQmu~5e>MtZvI^W#0ztC1XS%L3jW!Q!vsyZaJ1fv+U6 zEv8i?PD&xQyU>%W*h}~5ics1Z@_NjYPg2I-9)#y*_VP{`YO%xa}q_BUG9XK4n(`Pe2SJv=47 z5MDWls0G%kNLD&fc+awVRHwgskP&?q7B&LD-v<|sCY!+%Iq80ATl9DW{Z;{cBQzXW zzW|EE@Wec_M#dW%5e*)9A{|qp?_KGp~OOu(j>S=>%NW^OB_$@V}X!TWS zu!@f}+f=l!0k+#aSxNz$=Wgd>my5u)AK=hz^wQf;o`FiyU|W$l-c5TaWy7FYD4xnY zBc>vyW(hrj4vd3p&5$u;=zCD>JlO7mx=%w@J(Zr^{sEZ7fx;7>g`mMCRzb?3`B*b) zX(hOROHE^QyP^5^K7wbjfZ-~rRhsYmR+FgTg89c9+A{;bnIH9=xXW9gRjPYR?mfIf zt^L@A;{5*@>zy!qiA0~h7pF08H3#c+TIK`j z!_$<}0!qkzLOk~t(oM`^jJ;V@)G)=>lA4k~0I{Ec$B+&Ym})2bZQG7y5U{^4%x_1mVChT|?P`fvf15~G}w+qUC!05FC43>aHXa2TE1{=Y_r{{B8V5jpj zi!*D@5G>Hs$khm>$GeC0Ai87m5PCBm9SwuS`WWj$p_X8Ij-UNl#`%=n%)BF)kMR)Z z9@v7UY{U+?gfH$^MN!k6^D>Z$6X;PFW`+0A`@OVLAI`7G;(rB<$xw6xT6ch%)d|Ww z(|HZ4$)SYNEi<4|Hmy`;Hgpjf&8Id4s~QSekQxeP_M>D$m>Vw{hD z62BMY_w7iY7lSv&;PxnJvJD)eMy>FfmS_t(PF&jF@c$f!0AC2DK1&y~OIb*OH-(#a`cR@iIK%^Mes~fGUPo!IKQi+tRM*$;J*TA^dFKYyeTNbY!Kyf8%`GUifiijz zU!tc`fn7PoUnx=xZ`+4(%pFL*Q|1$s4H!UUxyo`hjfdyEYYp(!Jnd4#1JnWeU%wx92Nwd-1~g7LuO-J4@Qpe@ZFS zQotY2H1tEm&0TgFd-DaW*iKj%BL<93HV2H}4QJ!E(OvIqR)5bjrk?;p{bio1*v*s0 zjCy&o9^Tb55y><9;t{0J`vxKy6(6O2XTp`hViOi61&iqVWg2w(h`x@)>!nzfC+OSg zw5!;JOK`p-^;RRHu1X`YKAyuFf}}Rbo?M5k$@~n2Zogv}8`Ay(VA88K8>#pS`~4U; zZa>=Jk&pX+3j)8$^UPRUV!0~w)SWsbK6>yi9%}E#5+`T38d7FlfH7k{8pgbY&%4R(};T;3xX<09iz8`P3o`=ME7Qvb2O7z$D zd=j`#2ma0QFAXiN0%k|xw(H?FwClXABO`;k$z3O$0{2k1q54pGD%h?^8j|S$CR7_p ziGFCw4rIlXIS=zj?}6tG%p-S&(#ybR8*LAw(6xLDH*(m(c_x|l$W6O0DAC@ zQLkXc@f4P)Zr1?aL?rHgF#3^RjVH+EyV>a1GGbf>*SfUrsni3&tRHw9T6+Td><%7j zXi6>+31Kd9orZHu;A%VYcmz4gf%3E9q3iEDQ1uO97!Mx9k(49U@|#|Ee9CQ20Bz83ARCUe{x;U%x3FT8U=W2HJFN^)A|WrDNv% zqR99PWT+zZxL#1Q7W~p0#6y=*uKAY0lZ@t=OTl=yE%Yh8#rU{yw;3v3fpVqbNhW2h z!b4AcmSd*jxq)ZVgAbs?8tO^2EXqe>E9TM98YIP4vDq%7dG9*tIC5k*z)rM(n4Zn7 zzaNP+>s({#VxI7Rw6PUlm4*(N0{o38)4%9RyJpO<1|!GHlPeCh813M>Ir;V1cb@b}facfG zhDvCv=gb!a#dX>(g05Dg7tdXG0rPp3T+3UtTUSC4Jx{$39xnth^C|U#W0{l~27E?d zEnr6GNsdv#R|l+D0DT)qYwr{M3TgQoJheIIAaZ@;8ICG}t<8ca(ZKFqYYngiQn4Ph zwTG#7kvT*=peX}ICo$j2*1#`N4X!9B)dR=<=o;2@}_Ebd* z{dyaAoq(bb15XsKMgy5~nywm*wKPBFR@xkl^YK9#!FI^T=?j4`sA;~F7i+;i|;B-&J`@{Rvw|=A9$jDN9 zy&GubsDG8e=5LFpwOFX3jMoRQQj{|$>KgN&kD$q7Y`?ig+tbDtTF?%j1+SBQY5?;P z<~{w9u`I^1%RE^LOb^3L^?5#4UH!4 zjO7+!kIZq>6B%^eo(1&(h6?B5tG3iwgOorsLc8)CdEzOPH0s0wss3W)(Y@v9I(VIf zlV!oh8#y-uuOo^#D{95uX*fntj{#*}WYMUF6U;Z_7<~#O5gADJ7D}0E{yOrIPRqs~ zuf!^tK|s!Jhnr_9?>xqQgN2z7>_E@XqlMlf?|rv#K-(K=%5UgfGTc1}eE)_Pm60ZM z4j3so4lA<`?j)ln=0KPU6dj?cC-2P()dzX)XlolI*JfzF%V>+-ZD+R%&gd(j5(=zbJ=aaK@< zveogEUPPL`DP%ObyPNH9oz$%X#moa>e4LS~xwIY!%--i})|*b0ssd%T;AU>{4B|fY zVIgoCF*pEt%TX&5XzMUAoOVMlynbD)EJxXymkp^%=l4#4L98`t<;K!@}zT>}4y>DN;X%Yh^nc+9q8 z{7odSjYf-{!97Ns$LVuDrN>hLI8yT(Wv(z!?@2p}NPu^2pTLfm1SUuI7EsJvi@iNE zo_5UekV`+2!0Ee&XoQw!4Qrap^gIA*u8U0Sd(vCk6i8O0=@WQsHrAfVd1HRB0TaiT z3&?AAw9*KE_t3S)3Ft};{;PScEGE&WSsu(6Y`%9-oO$EXEbx7W8asLGeMt)$J&bUB z77QjLCp#%){6T-pc}q!eXy77LK|F-Suz-VTCS z1E>>43ucj+!;_ATaC4DJZ%|r@Rz8ir=?Et7A>IcZd#Ufe^(Sas|JYeLJq>%k6SzWq zNFRfp`5E*m8zVb7@Pc_;{Y|V#8r16vuRQ5Jjw&u5vrAtO7@~U+%z)R}7@an6|H=+8(gV51jgZq+=!Q zVk#QEllE=^$!@5x2XGAKJuBlrdO@VexW;qvp&cb-fXaQ@snlEuPbUD^Oy(X-@JGeK zg~`tX-}vj5ISEs1Q7y#Q0kq4@FIQB&YN~ zze5{^u^``rQ(Yu}0=oMoSU9d60WR+_FOTf%8LNvXdUy5!#^MIdoV?S<5$tVf;_26X z^kJTzV{p!!%#x_nf)Y{4>J{YtCNkn)U=;FZUd`!PZSQfoPR&mA=={N5ua(H;0Pt}a zVis*K<9mHtbmz(a6r<^S0+Ts9ry=Q=p#GykGXV*4O&p8vxHIxR(0mImr9~0!Mb}_f zunAbs0?QrzHXry+%I$^sNl<Wtmt{Z z>ye9~P4Ah~pAd;A-h<8XWcX+})(B}dbEaOq>c~kItcJdm(onJ_`gSi6-V;d6DJ0x@ zrXomfQQEZiJjlQX#+DcO_9gvC@%Mk?m%LkZ6z|QNl@CZgKdPUl0I(#`;}X8l4SaIe zivm-jC<|phvwb_JDk{+WkXNQ4I(gd{^S}Q_(X!=@zj}}o=FfT%ctSoV|M#R}IG>t) zyel*>QrQoF_UBszXsU1EE-YsSYMI6B8h9Q-6Yc_oI&icoRCaXqk@t4bK1C{eh%@wCN<8|_xIM_Nt?#d-4k$rB!BhM{RgSC)rvtoA^)wdxOe9B{AkV@ zMg`;VlAwqlLVbm$`8>)0p}Ov>R19EEKyKsFo)@s5?lgRW&c;ydb~)>p=Rf;?lE0xA z^b#7M8T!w%!VrQi^xyyfEl2gXjKlVP8LZ=*V0T(T!|qrZqkN3gO69Hhz8J0h5L(*{ zYhezMrx}k!kysO$*Sri}jjd{neLRVmVL#F}2R@t8$Nm3ljBsse$$8Es-u6Kzk251x zK7K-XTyM|BI;10|b-{N$WqyHUW=eiF7%SX;cfP!jTITP4inkMyF!S5DhgO4tCk45@ zfYvAQaV@`#HoccEjuyKImbeKRO$8cHKP598pU=B%VAK&f^k#XwG8(E>0nfISa2?VG zY3z>0O9vC9B--%T3>Rxa>0fFnu49M+s)8=;YLM8+Q0 z1rBF`=lJ~xEzE=Z$^X3l8D2W$&JOyR%JUnP@&>)xwEHZ&^b9?x2X9{i7Vk3C5A-FH zpajgIuD5P`m!Te(EPBvaRFmhvbp3-$pxmYgbQND*|tBu|oM6+~a`O zEG@=Yn!j!a)OZ64{FU~Ut)7gLt`?1OGQQC$pLxh#dwMpy?0)|DyrcKRbp#6|{mr;O zoF`+UQ#@@OnSPOZTsJ77JS?HN1m0eOL$T0&J$yB?cQ(CVLe_qQQ@=2ZT?Y$uP#L-D z-8imfvY>@GTpEvG87{p>8_`he4=kLy-&O#Z^6>?f_7+n8ShcVsMnrYt*)Q0HZ2_%a z%NC)GHwD*4N<3YWipQk_aCpM{dwT8|$Ym0<@XP?Zo=8DD|9Nujad24x^f~C>6SU|_ zt66+|9Z1dQUkcmYlK%Dt6!0ulb*$}HEPV`hF0hulMDK&3-X(bKu5SiUhx24J{LDcb zoomQFcOvI8CK)%W|9LMkq=5f$sE|!LeeHKM>N-F4tev@kuhQ2aj4kE?6L00=W2|{> zAgB7U{|mmK1V3k4!P?bL9iTU|%Tru>qfaAwH-UN+bI(7Jkk-hB-qbnF zzLl~}%5>tbk+=03fxWrl0uXeevNUJ9vk7@JlG$?gZ%u7i!_nS|sE>FelmJhJi$_V_Av zx)ID#!_es`k*JQmF+BF!NtjByV2|DUYi9;S%!kwDvz*pd#g2ir&UK~QE*1(Hd za3>tPd1}}5gXTgA*`Zm;yJsdhgMq8{d08_$UtC)oz9^5T!l#9o_q z&)dX2F?|Aiv=h1v<4uF__OK!5uIjWh4=P`)X(UI!xYX>&xb0pyKUThBZkgY33NKrNzITlrfs*agc&^2#17)xzEQrDsGo77zk z#LqDTIEHxN@vuPRUxmjFz;-e9%-#1pGP4ty+weS%Szj_7{1Zvh$2%*q0Wb1AnYWG| z1Cgp!bmlQ2ufyn;L5&17bt6wRpqsh(PSdXWWx}w|?vi*8-4#gaoRc@tdgFu{|L$e< zN(5qKe5XOpWaOt0l=A%wFglIigh5d=`*;s-e{8BTZoL9MT1k&}(Nc3l8nsp%-8Y_L zJaVt)9R&A`j@C!p4=l~xc7=W$A)i|UJneTR8W;tCnlS2^N6b^$o*L5|*B0CwLqoF- zb)$y9zHhPgW;$&ScAm8MMn~@r_!SJs@n#`ZTLGlT+%~6Qb7XrvWO<~wF%qJET?NKZz3C%)lHa*#VI~kigXO-;D5QsCEVLaCCmVuU1u!(L#U3!+&09~C z>;Us@WU4>hTSx7x^cxk71H}XTa2D9qByW~*4699T_w9^xHOjaku)fa9S^u_YG zo}gZgBwwVhkcV^3eE5_&50y}RuDT2nP!JYHSrr9$u!<+Z$M{B14XB8Ck z%v2;gR1&(61G{Ulc40&>Ly318Z;ief49_Eg=L9`^N<(koTC}$wv~#6C2RfaBKVA9X zUEoB@7v@=CX1B(?*9W5s$h7N$rBL+@BT{~-hZ`fq{lsD#X~0s{AErb7k2 zcUyq&D|oDQe@MNF%!a-QR;)%nYzM=VP-Y&s#LRb}q8p2$%0w)fF`e#;D4A=4G#ZHY z9hfa_CLhNSZ%{4?UCdu-T>biBo@k{1D{#QvCG~(~3{t4y>>(`31X|q>M6RydA&F07 z1s;L+d#P`%uo0}wkf*`4c^JH2fR4^K`$B1bd+vSKhSo}aCooI{H{)^ikhg$BW8rxM zI&4hvBrIxYG`s}uHidrMXwN?SfMW$DYDO?({fqKL(Xr5e#nbekNPAaU6;+_dIMyZ0 zk1W!hK^-FAYvcks4>W*;;6*-QO3)al8ar}=h~kEfP5&_Xo$d+KC7Etsdl zTnnF~CD)Mp)^Pm>HLlZR2}-=f%1*D_88{J(^$B^bm$EvYjMP*_k{_m(-K^-gBP}mt z1H9SS+;iS@`Yh6(hn^$gZ53p!5EN;Mtkhx^+5yO4M|R)g$r8BnBmLb;tEaFmHIa_I z(CT66FcKW=!rPa4V!YJ>TJ*j{CHP~wUY_#dQ1t@5@MiIqSc7RuN-Es&?t<#rV$b}V z;qnr^JP%*IA^u^W3}SZRC}(`{v-~arq!EGa4hEJtpp$WGp0ce(pPtFl|2_?EcnZvo z5uAetE&-ED^i-JN-%|fdz}Z1 z?dQSI-B5oQExdw!jDvpeMw*Rs9xdw`_2yzTj`pVJFk1WxOl+$b6b^?blhBNGFl_?= z76RWk@R^9+cpaPS?Uu8VAy-*Oz^Z}wASs*a+Y|DYfy3;>=7Svu{^x*vM!=&E*dlK) zHP`hIj3H*@$wm|23#?c>^d=qYTM4Cdnv&@ z(pb6-=1fDup%}cI4}ML+(Cn?o(~JNzvl^QB*7%gu^t}%~yMV;F$FdWgEq6Ue%=IY)NB}1?#ZEZj4Pg&^$b8?XtNNhHcPl;?>MMzHYo38T0sx4 zXW!;OqvCG=L?XxeDE~Gx(4W?gl-my9Dx!G>(WV!f@g@LwX{hgR-s^B^JodaG^j1Ff z&Q+ma1e(;0Z#SUJXGr{9c=jp$dK;@YfN$05F%h2LMaeC+T@w6%hGXR@@9t_9@HFDI zJaSco(-qy4R?2ozlgC z;|0o|XC_?`8@m)7KH^z#%5A{Hc{`}5YDY8Tm4@a$cyl-Z)d#ZtJTCzK^_=;0gc04i zI=`D;CLFqtrG825lD8TpQua>zG8>)uGknD-6}xo^%=P581M+If@Ngjc3rl&E+T)SD zJjj~Tc>xHd${r-3J{J2VIJ`q`J#?kvWF>gmlRiSwOvG-sLO#94I+FJVfV~`CT*k;| zbX)~E@&I~c>1x#bgumWqT9y{gXI7APQ0R>8dtmSqQYmfqn|jY=IBkadxd$$5t&N<# z1*ZB{^#~g$7ao)e`C#?w<_X$V&!)p`^YdB4n+!Zjye;srHs+aa7!6v7HHqWpc3+IfJ{SkwPILeIAR$qW7B!TTL_XE@gAGFo??@$*OKpc}Cp z?gz}ovQ|c#%-toyJ}p72gaqr9~~6$|2-l#$F9Jj;~~&-4#C({eP(WqjDr&m4Nl<%y6T zq3vpLN3Vmoa%W&)r(@4N^LLo>_Xt+x0@hPs#UqSU#$6Ahwx`LS#kRYT8As2t!OZDF z+VKqJ8aNm=dGn?>N2aFc{KGqEu^>5 zj!Xt+rJg%Kg=uj)bWVrz$-&Gr1KZ{ah*C)YB<%fe-sw5V}0aeDE9LNblry>bVt75U>q?!YbIk!7BtO*()rG~HZ0)fqoq(?Si_3*(UScxH_4 zp}eOVlfk4SA9H#P14?t##=!G+K-&XeF2M4HLGKap=RAB_MGa?{Ga0MR;_QuvcVda2 zVD56AHK!+S%$%k_WG52$98@=Bh4=0m_coB0jmb&}H+@=lcru*YXL;HIN_sQ9eg!jX zdH104Am+YGMX$^#aRZ(t^4+NS=zx<(sU3h1=8iMVzFDXWLRaswnS<_Dg*wI?7^js6 zeVYavq7OI>o0Wxr?LuBWC!Pybb)a8iW>535>_+XCg=+2{q{Ac6$=yW0^m!ZWxeiKs zchoK@>bm7i@IMH2d*M@2sL>00rqbK1;OH90Q)%j5b?oq;;AbYHWME7Nb3L?M8C8q{ zsYiRQ!8sZn^u>G%XE&k;TK_Uo-@oS8i~C0tFC)oDewjUM79XYBQ~zeXc29Z&*sXvf zPe57Yqs%>Bf^z0m^7fPnpo+nIj)+(JW*Qpz%8PcOK50DAldFcILk_gYepN)q0j5c+H&ee0i8Gh?d11f=)D1%N~ec@ zP(BVkhw#prQe#KX1n}na%vimpP~F>Yw;>VksFdq)P7^!H0zPNJvj86yt?|>xDgZ9Q#diS+nvg52my`S_5JvBsP4}hVT(-^(I zaHb`tjYHTB^^=jDKHydgDpdmJrqJwr+I@vydhoUvc=n_XV?WLBW_DZW`nBMnF)K&W zy}Ha5t3a_m^y7IwZw4L$UX9^d8c@4S(AEMVT#H@23g^^;)2!@HqnQr|)Rgn)Kl9{q zceL18LT~@51cz!`XRq&!N4J!e)R=<5_zs;%-Vw%vsqKZe543 zuBB^1%i)3bybQJ}@TU%R7>AYfl=xEKJ&nG4t3?ayx+m|hiFvuaC)OxfclSI$>5Yhy zz+{AC0z8U>4#pPNMAnKyLw%UuK&j2_2G7hwa)Oza@onZA+zGvmdG~a6VMC zqPC}F(}24Uv@s)=p>mls|Y-A0gOAq%G-jxv7s=Qt`anLE)>NK#Is0W0DB4E?m?Th&J*FC zk?5CzMeo~bMp)1FJ_XLFc;@(02M!p$=Im$}wfE4Xnsyg*=e<@d(WFgi@CqzN{lGTA z#2b0j1Nk2S-kw)YgTC@aAKWzH8N$0J;57kidHcj@IHZ;;iRM0t!LC<_*4`;v44S54 z%Z6a%rh)k;EUGbN=fG(RbgIeRdpo$qvnsnm-OI>mZT@qoN4eNR&j|q)20*`oz+epI zU-VuYS}rt}bxgc!4Uik70VU`?P^aronWy#_Fy#RgS`u4Z#|ZqXZ<^3g*#$mR|j2c zPs`ch<}Ok$I_dn{h%96Jx&i<9%<`+Dk6(d9aiC3O&S-{-b<`LL{q&F&fj-`Rs$a1% zeR%Vr(Vr)3j{x7tDnV|xR?GVi%Fh<`$-D&-^z4Xw z6sh}$r+Qk9w$Drd51~ul!8x3g#$r^bb+ZKyq*NJxj9)absu8+p=(P?bqBoxWfPQR1 z{)|b@!A3t#$?B}+?hGU(4jdLBsVk9`3uwAptEb3Vzx+r-0yB;Mj8KD+;gDBaufMru ziz4qw=!LU+jOgAMIhC4vG5!lLyggz&w9Q9L<>)abSQBLNUnS;~&L10E$cn zqb|^618wC;kC*T+0sJZ;f#I|ihQH%mV3+PiPn*68Ft~)$*PUi^dE$+SGO$54Jgy)Iaf+DokkNLtk$Zu#4 zJd_e|GDZ}oS21*EWoANmt5$)(>od;^uR^N5@oGM;dQE$mtdJC+IQtvuC;0+g>nB8mz!bj-MmCXR)H8)RFN`@i#W_mW_JQ-bC4ad%a zOL=M^L?XQpW)D&@9_e_B@!PoA3G_D{8jMC-^-&mOrhmIM^ve_c@3YWnRN%Gn_Rza1 z?X4}L$fmhG{}KLdgKwTZG*g%wSrE9lvuer1>}d^ejeh+AiF^_HxF7s0Q0Eb#C`}2Y zA-`o@(a*Glapncy7pJYaq1H3NU4?Ru(8GM#oY|C&fMR!3zZkqTe$42$`@v)vb@Ur< z1E!m_t2bs5Jyc|j653(FCC- zGL~*uKmkkoqc8LMf4+NCvoz&L^Zz}JlMf>KBjIILo`!r+o>&dVkiE-abK4oUbrjy- zg=98o4(5HD#^=5arQ(6gJ-pJ4wF{xmkI-`@(9WgYcreWiAIuC_6HeyEs>D&oQwe_p z=OeUvC+`c;&+~k*3C|y()Cz3+5a4`^xAyKWq>(^y3z~bv^a7` zAa6o3hSJp&wMBt69LRmznO5~rhVF_D#m7B~%^3+@{m+lLoa&z~&BrLWQGq8=Su!G9 zI&FaCAy4i@KrPE{{K6= z?slpF>+ikr=xi{~c3MnB2A)06j} zxcGwS?)keTU^HGd?Ri(jAB=Nv^4xq<=ddZ=v198P56mxShN|({bEA4Zw`NSzQslD^ zmSK5d$I>YK04+a^{5xw23oY@+;p+D*%cxEC==Iojc z?wg@UALP3Xb*CX49jLK~y32qy63jbL(wlsY(pwG2N3cM3fGds>=2vX#U`nRb>N0GP z{;O-e_s*(i&|?+P^J6WHv2-q%1Z-WP)mC6}rT#wDw@r6lJ_4Sn>9JG*?NW4M9+cI? zXzr4w%wW2J>sII!PoItWHV=uO!B0h~(wx7>otq!s+;RG0JdrvaI{iZ3gY=QWh<+1l zdK&mJ)UQwfuD%z8*(vC&&(71NC$SRm(Y`0Fnu2FHXy?A|G+^F{{ku-hn&7O*&`kG# zGg58fSsXabrCc)eJLiDzr<)=5Rp>bbow~`mdjyEif&Dt@?);$?FhoF?EHu}+8)NE= zfb+xjH3J#Upcf-@gu%5=DaHoRdK{ro*H%-gy(Oqq6Iipc0s8Tb7~4dD(-?mup>P_r z?z7OxOzW-;9IK7=tIr#~c^Bx>eP!d@mmwXFwoQ?iyBQ;md-FE%n>^8169tDeXvdr# zpFvA+q%iu!nXRYGJ=rw@eq;h+C?;?Sa(EpXYfo=+@W{K@W}u5kk|v|;E8%PcvKW#(C=+R7at-&=JDf8}>RXqO!JD!2Pp9d6rjz2RA7bpVIWym_A z7Y)a@g3$!vehpirAK*OrMe)=eLFedk1vFm8w*}B=9&qIZ^jJsxE#Xg5@HFd{xq83j z^D4c%k~DL0EifI3JT#}(uNjA2n|%t6MzDrz#rP5nkNQBzSTy4_nlgdDeuWn6z|Q+Y zR#I;PP#vaBCuBJj{Tv2{X69Us<~T>*0skAoO{4Yt0^tgvn*d$3Ugv?UB5!9xyGmeN z7%Daaqb^t!^QxReO3MS4`vUqSJx}JnV3|;Q4wB%$NMj%>M~~n0-q^GqKzfThg_-lc z!QWxf;9;oS4GFIRrwg+iIu&dqu_l?|vYZhl12ei1`|C&YQeMM=h>s=K>n|w zZQlOs%DxBR$IyN}y#I+=n0xHTP`*JQdfU9S!W<2c@zni{IasAs=#>Ng+aebk=x!Ri zQV|lsJ<62#e;KodNOF*K9GDcQz4^#!H=gTv zbfxhkZ9l<#_d{%HDQ$c2LNYyg?)M^)HU+lB@Z5Z%y#pyQZ>@LmR;7gtz&M-Q#x-VO z*SBM7x-tvxgXGRYo6Et4nPA);3eG@^cJSYPFue#A=EFS)JCC!yP&Ek;>~0A zChZ)Ay5^Ab#O!IFy9YV|_;vt?v$AML^l?z42(psQCl<|gug6&Cp99Txr)4LwRYqSu z-7yb5R?vDWAUAVT4BB4<%wMIwebgKW1>>QMH_j|aI@^NVWb|<*c4a#9X_Uz&X5s?_ zSs9E(8wvF_(47p{)lI0^n36_BT>#&5aBwY8>H^JF=wdEGrMEp}!W&RyJmYOKTJf~E zR=WjK(2N!21*Fu|9Miz#9$GKUQ!~?=iMLM|LiOrG6#P7F+_7HOSooc)gA` z*V5+?=vD_-QQpHjhu;Ix(*$_pU73Z!*a(-dz-r92zP)C67YfnuQm9~t{0V$-g#Gvi z&0j_NGVsi}>0a=@2J%#gcFyp(Fi;qiKMd-+M$4C=Uj`QF z43sdFqrTm_NMZx{ZH5}Ho>U71jxT}jb$IzKV@x-m7;SIv#NJ?JHq1sq6p3sY;lB<_ z)uFe)p{9EUPob0MYv~46{{^=f0fAA)j^E}@D8skX)LFr^O|;pBZs$XI%GHtsCu2k0Ti3ISI~pz988oh#Qv zW)?tCBYju%W_Lh^Fm!S+9CjYj2ABrX)`vVZYCRVi%q?<(?^(Qe9pJ|(uOETcGsDl( z&lO56$09~hb`5@&cC=x(4^Km{gQpeXlKa)M^qB{a#ABIu(~{3$Ls!kNxdMIh#jvD#-U+Yl>%GLzlGPL1cV6)KO;^5}} zPqWaNv1pvT8+y`@(MnHbWE!<=Q+hRgz5(RrXepc)h6DdWaD6VI%=jx;NeuX;{i={XON}6E$Q0a=2*L!H=P? zdob=>E`fR<1Hoc?EeEA?fnX85*#Yb;p~hw?>4}THfn0bCSS<3U@3}4Z#Cw~bfdbor zco|agBV5W2d~17w;3@RX^N)Ia5|J9Clf8LkDsr-yQWKe(7J)9>Gb6CVq1>m?W+aeA z(Tle(1lche(L?8L!zq^sYi}#&1Z)WXj|R^g^Y<8|O)+r22_?;?`3sq zU53>dhLtv=QcvbHK)QkcVxZMgblfw-d-=Z>x^R`5PXQn;kCiTft@@d^YXi;mV5o%T zL8iTps1ReoXgHXP^c%NmKF0jC9TCh7@*Z_-ZAPj7&?PTehw41U-$hX5A!zXgql-_K=)Zz*p6%Al9l9r^HsAG^dFJ`{$6b!w z=gmVc%JYziJoLXe-s;1RqE};9mAcRyqhR$;-~K=J^#A|2-ruTN1fz@G=U)KcyRZ&Z zuoSM6%0apRFza<~Y_6s>q-zCbPom-Dd16eTXA5p(bB*zKe|9}qHU(a)c}<{y8?1_P zA1$yTLuu8!Btqw?hQVWVfJ7hzo_JdW4AUc!ZWCEG7_gTV_3l4!k8`h=*FD%fqKWQ?5f zbclI?!?14p5W|4f(@=|`T1n<5IsDvYL>x{V?!Sg+e)nKq<^cB;c=R!JGRopFDCs(_ zJ9Df1>18#w0j88T>cV(g>hy2+dN#?-Cq1OH1>SvElT$ zka8n}zMWS`g0nd!-DNBr*vwMs*9o{^krqcI8=l+E#Y(*Zh2-99-u0rN2E6m`)pGq?Ym>S3tZfzHSg~&c+nP_+6RT*r4OUkII!6b-sWa( z4po1JmcEI+yQJO%r1?6FaWp8=cq&uVdtBL6*UwFH)AHMm6tlAXlJu#Q%u0`pQ9gT&Y=mvLVb5}JxO+q_RQ~`&W|4I36wNq@?EsK6&UJOKf;r9%-7y#6w=!h zgw#wefM5_t=ZSwbIm!IlIiZV6~)Xu$>5t&W}eRT0(L&MC;T<*puJcj z^)L*me6u}D58pDnt2cFMEO;MeeDlM8fD@ruCi6437ZrK-J81VlPs{|Xen9gnzn;80 znI6YsH$4&A2;N%A|AWA99(ogNs7DG)f={PK#{^Argxi#&-fpX?&n-glCnK<}Y z1C8`F8~rhR;EnV7??`ejs8y=>-i`8wTGY^=7vsy@AbiaJy;~deb42d3p`eMGRDn! z(~sbg6<2*X)i!y)qcycX-$F}vg4uf`GGXj`77|#EHai%5H~6Lkt#u1rGg~*$S_p%Y zUeBoMie!(57je*YA-u6JS8+5%pQ+i%W=9?eMx$)XGK!u~&r_O056%htJ3X(n0=PW` zHpb6*jWM+-xNS*#~&u@sO@5_M{in}MZ zm{ENRoHnzn(@*n0Pesuxh9`dHjVukCc{0GPKnLg2lczlmV@0qZYZMN=Mx}P9h51m6 zkhr($aSiseCDhX6XzuhyXi$T8N4XlsGoH9%Rk&ERLeFG*=&xtqv%nUCwH{s1*)n!~ z0etSuH+K#`<6X2RT4_v244)bxcV;QCNB+%ZSF_@fsP)XGm!PqcADQs#E~BQ<*A1vp z1n#c{Mq|MDB7cqG>{On$vRn?7{)20Y;8KwBKgY7Jhb{@g;Lf1&!o9H^Szv4hmwWDYm=*dPt2BKD7m>{dyb(%)PV};aPtSv^Ij0+FH3l9url;xj zSqmzwRc-his%`o_JuhbdbQjUM!RSEqieP!o5ID-n#{cOtsLj1SeD?%YPZ!O_C(o_X zW51I%$XWXS4rtuR3dhDx!Vc`E*9uT?B=}{+8BYr?&#X`o$ju*_i@n;;v!Pmv)-zmNQ%6t#$!X2b?J}~GRjD-rZ{HPK7C(KJI9#F}cKu_GLO>dq7y8}%w zO0R99cMei#hSr5(=J^);Bfo^dJ3mJlsRfp>0i5UuZtkW&4{xn-HvqZzRGLw|E&T{w zMq2ho;-cZjC;U$5${zks=SeNI_R@HzbjI5Qhvo` zLUX;g6L~5b-D?3yGa1L6;RAv1NFI6CLTTDxKpt-L+XQNUg$4`FV(humhC=Ml^*~kw z(MWv>?oOk>I7Xg{RnZIWzNWP{_W)@xDB1>zPvO&(jAiUtM`&nf^M9CE)!O1f)0Z`V z1GHJ|VRnTPpU>?tS0`5UZ zPeKOu4&49>ZJ}#Uh!_m&JDlRgZj{7De$d8Vt&MDaFp?P zF{2LvlEZM=n5|#ov8PtFN5lRJ#9#8>XY}<4U~4YsK(IAaJ_QOTLz5J=uq(1<4M%rB z*TZRZpY?V>4Xk6ZS)LXfg++f1I#)&djLz$Vb@r6;b=>icjGA22zv)h1FM2eC;SwvO z+&sI0D`RLoiB>CUyOh;TB6cDU3V7bGwF#}=XvEd?+}HEd2kjUId`8)y!YUZ^uFku^ zr#DusS`RwpW5h_L(Oeqi9S31!-s10Z_@g#S@deQNIGo%J40`dBSikn=nkP(XG1kGQ zN0EtqT(1iq|AvkmE4CZ?w4!Gw_pNZU9&836$DT&KhOzDT$++7Ao%x-j`?cR=xxb zt-srcaW-*x6uNm6{qq-t~u7rq3#rPJ_-%8(v7&f6L}a~*Q5U^Mo0&@63hiwcl-)(pY`9%(RT*3 zW(rqQ18p-iLf`8bP^S;04@U>KA~V@QW6in4%p_y@=?%X!f#6lB{Su>g1~&P51g)I~ zmRhg-`TGiZny=y$VaVljBp{rTzJLeQ=q(F7@Hn`I(|=y1q8&YyfL?Dy&n&R+P2c^| z#7L}}tAv_-H>SA(T&j!~sQbo!>#M8HQw4z5H`2vWqz}^nHlK|aeG;h5MEL=kn!%u+cZRHd(p!BN>;CxtDTT#>znA49sysXu!0#_{v5P*9`aY2zGH!GrAqS9Gvose$QNJa|duWU)bDJ&z&s<&#Z#+GFML1^9rtd z{>MrF3*Am6q%~S(1@3=ycQbu#r_c4kFap11Bv5(!Z5oyW;|aVZP^*b z1-=R{KcR_>;N}{*-ju$rd1Q`nAwCbI*Yill5_(*Vw%6f%3+V27O~0ZwGvK}zKaVlq zW>(S#=-r=?w=14c`WC|(Zy2)LgJ-S8sL#vX;EIf^7t-wC5G6W5kET5BSxD`GH6J_+ z`MXBom#h@DGm5el>yCJMY-V&p;ExAFqh1TqZ+^a6hcX}18-tcMr?+~{g>`{@Fa5gn zk&98fv#O1tttY4CM+=7WU0>rcc=ia^Xebyp;JNzr^mqPR0rq=rVlH}{!Dqd-)|+@5 z3Kinpzmb7>_^Ma09`Yvkp41YLWQp^)jQK?%+o!nFl6xnC?GWR;=1&Wx=TZ2Qi`IE~ z%GgLx8F@YEwIA20%gSBjAPexs5};qm{Yk+{3mMs{O#SS6_@pP<`iZ#%nG_PGYzud- z&AFL2DM-_cT-8%q0m<-eW20x6(0@6up)1VjjuwH9`)|}{s4w@oLQpE@#%0O@ulV#5 zMp?w?czP`mKofe(zj~?v??0ZOx)%71Od1X+H=)Vvur8jaF$zsh15Ve}?l)OgY&}0# zYnY1;?g;E{6DaGc30Ae)ghqeH{A=!77Syy>^$m1;IFPto*_fH$n6DK6uB4Cm&^+JF z?cFu>IPgEi9J7N_%>D{T?zKyi^k_w-Bk+GdJr1oysmez zJ97OEvKk9CR@|D*&pY67llE1ZYw9ASeF95=5guOxiV9G76gJb7XOHugbvJr}`va_6 zJS(I%*ILq_dxgFqwZkr$%WVW!43=vvtBn|_P!CJ1rS1R~t6)KI0hau>`Q3=K_WP@iY(q;noJDgGav)VKgCkpL<9q4NC^E;ZG0~7~oeVh@hGtN}t z>wp!mfkgSjLCXd5*)*d8V=NocBP*PmqO=>AeP=b0;wjY&v13%xKscVD2ehqxrOq zS$-A0d1mMH%ooOmXVcca@7IAn8XWpE!xrSe+o|{mkGqP`< zcPThn8Cf>x^l{+yU85mmOQ&DJAr*b|jO*@jL6rCf8bTtC4-Td9$?=Ds`qAg`wr?^9ddBMk+JZ{?D~1wydvo4BD8Tj zJ?}$4_VzzgyKeH=RkEmFCm}R7_N9)(OAaQ@7WtTz6bYZjBXV7-;oLR z=@Rnb%D)K~eL5rFhK@$|HsZd~Y}U3eg`9cL$7HCzff+CgX*C8h6`tRrT^Q782c?b7 zxX6rK8~V(qwSHTx<+-~22`J|Rp)r*a+&#>EsAcgi?8QilE8<`2XIHRK@(8_VKm*S) zSd5&U2HQDU1*4@8K&zhvj%p$G5&Z^sKOlL&m3#K{7@%`+p$l}F9jZF|6Dt8LRcFEbA@Iqn7Oseeuoh5fa`k&45rQNjHD1g;;{>eP=gcbBEGY|A z&Cz6cihc}WlhP>xXSy=Bb=t-uw}+6Z71#{3vppf*U7mJO^af)&&S>=H4^|^%gC|{u zyYHS1ZeziBHj?)bs5lb4*#`WKs2qeaRL>9b9hH*F@S1tr3=qU~>T*u`?J&kA6qsPlU4LpiDZ? z-UJ@AA0p_f7V_ZCegXMbMx24q(EC`t3#n*oNu*#YbC`Q%Er5AHvek|DzhgDthK5$` z=ma;0Gfo5Oc>!rlfy&+CU<9#XQ1-j>3pGd#N- z-uSk58wyw}qaQN&D)OXMz0Z@@nKi~KKM+}`#M*1G(mob>-3?8IgU-Luanh z!jta%Vu#Hic#!|@);tC5p4>DF$iIgZVT|s%z-hp21yENod*N9TII{*jYZcTx$l8El zjbKFRXf(}P+{|<9_;%xprwOY#S83b~vtcPWq1DAdTu4JyzRhb72h|bfT|&%JWZz+*xt{xzXF$@73<2*qP>+4??Ij} z0g>mOwV}09qC23<%d~UntS;JarC;mxxUV8kU$KU_cAxq4k-Qy^M?3XlnlWp1lo`C8 z0zH`p=iDXv70$;ayH*cv2sIiq&Pm3r4z;_2vsu4YxooSmm9fEMgNoGh}ngGfG`(2))h&t2}PbjavL&EE9BBu*dJJ~ zAz*L*NDR;xMl*KMkCj;5@p>6}2Y{a$!5gGMloV;B+xtx1L=FD~Bn^h}w^LZt9LP;;hQ&ykWe_9Wju8(X! z#Jp1;88AvNlD@3I>G@ap0>vdX@da$LGtZ-Fml5AT!V&*QfXlt;`Z}KR4N&iL20WgR z7oZT3T6?8AvX;Zs`YOLe4y<=`KNk08@Y7Q5;F+pOi|ae3(U9Qjc0Jsjx904(EFY-W3Aw!F*? zwOA?GXGiqwEo5g1^NcZwV}Mx?*)HJmJmin~wPM6;V5SF8?@K&#dzEjl^71jW7X>ms z<}(=0%6Pfya|Znv!6(&(5sC-bJ%wXofL$mePM@2epuF_`W^gA3St$<|%^Be|G8T{J z8p*S1=#sg>)qyx3Ouq(RGYUfIjf@7uo7l5*v@+vL@1{NEf)@88;akArZ)jc$R532O z8FJH>cZr&e<{UJG_GVb?JKG-c|1mh_>3{iw<+Z>YmI4<>(Bc(-e`Q{ahts*~-8F*M z9`A6)GYcMK%v}7GLf1?3e=GeJL&j!6qm#7OcXtiSUE*nX5%SZo8PAUfQ0ND=R$xcI z-H-k)1in!2`wE`+924&=v!3i8^0NJkRy5_aQGt3Do1=+(9K~JkKFJ7H#atNt`ux4X z9_nW=@-#EJvM{(3x_SlMw3+Ynx#|CR-Fts|<^KDr3fDqs7JIE6&rIPb)b{`NuQpw; zfSE(Lv6$~d`EkgiQG&&=vP;oq>uCMJx}ywMVKj5dE&dNh8@r()d9ZqaKyl+gTq93q zp3;vq2X4N?TxH(o1~l9#!b*W8=U^fG)5j!a_dL8X>*QnRs^WbAg`Q8)qcIu26HH;Q z*RNvzq`Sz_8P-n+uuSgTl||<3V(Hz*-UOUEXw?qj8jRE(fd8%l#$h`KpeO0Te0B}(xxvN_%w+{)5Ez}g3F z62W!!pQ}oDQ6_`^3|e^>%29048D;=$61PK=j5R9@#O5Ztx4Md1>>_Yh0qT`pHJ)Yz z--o~nIkS!bH;|RBNcB%(bQBr(Y=qrVx&>5<A-oM z*+V`VFVrMhUEYF%m!N$(y|_BJf?6+Z^dV^d0IPcQB=mrp0eJ%H=|oRwkO0qjm3CI| z@T7|lVD}XB*iBZ3XBpAC%aeiA_I1eV$=tJ6>)$N%XH?cBCC|Q3! zl_w&Y%eMyoZi4Qek!8KUpU|>6khTb9&a8T4S|5NzXXt%35`2wTMtiiva=gTScRPI- zUxK}H_c#*#+ksCZa5Rthd8lw3jxI$Hln`aa9jAAnd7T+OlNFem*G6EjM?MmuM+xj( zGQBLNZ)0$*-R{|BH_@9Pfnz<;`%Y8`{#Qpsm(ebgen;_?Z`7{DlAw=KwR4b6^UQo( zGLLUKw7G%odpfDKFAk+J29Tb|dKy)+1ZtS! zl+B#)Zu)Pu+z-5F;x_~&hLJBuLqr0zxl1u{>LRrH4sPs2&*i)4LSG5=MLxQ^i$Kel zBN=OHu^jsm7w|^d+cVO?=p1KbLEL;E_=;?a|?q^nRW(_2Jirmo1RU za4fs^4yJ&|6na^Y7G?mq=dpTv!9_Ut7E+Q41Tpl|9v*r!R4p(n29B>{jor_^4b_X$ zrW?JRlX(V6Lg)98kT2-p%&X4Gz(}MroY|rw(9NfRvs09Y?O^U(M?*02UBTUa z$6LWFs23F8Ll3KhH=Z_Vbx)vgi?ulmMxBs}rL>ribR1;d*JxLbaXJP1Y7SCoG)Ldn zYtVN$u$V7YjaJ5pw?o#yl~?300~?1tV^Au#;g(nP)S;%8IH6 zPnor!4i${aw33yvwR@4f3?$VPsgCmeex5RNvJvv-PSQH4l>;Of;D+yf>-n{py6{`h zj>5*9FX%hWEM}t%fe*vjHseB;!?$JhWh7V@y>8{xdSr1WH1*WA3|drRobill?b0Z; z*~k#j`m$=M=f7%IUxvTG@%&HlO|M`-nGm=2kLOeDw^hjUqddeN|+B`bp&_ocv%(1>H&M9{uwM?64%@Du3&wHQfN_S;2H&7 z^%?CBnxluhEz)2#)M$P;p>bw+t_3$&J!k06c)1hU;Uvag&4^>s)yqiLW$0F(@$&=q zZYbbxXcAU#I}l4X{i_9$!3|hYCD*8q%IHy-0H+LkibZ!ypp)HL^JhT0kAX#~tlyP@ z>}x&bXiKc5Z?*3*@-5y!HX#A+X?GQ>cjMC_G-@%|%<(=Gyq~THDsA77(0n7bH9y1E zo@=waXrr}ljP1)r$BaPAf*a{by}n#`CDT}C>8sn0TzLAcQA?hEItD5^pLu5DJIpQC z!hIA76#W3?U=3QeA9E{<2DEt}%)Jf2?KYSjK z&FPQc6l8=1?hoKeGhwZecNm(K2bXEQ6}e~V8~G0ABK5Z-tB*uJDH(dziz1_0@X9)` zjj^ouk%w_^gNGS}tow3P(iW&z^V#~DdjE}e z8^zk$Is;wd(p2=VKi1W1-pv{1O>Dv@dJgAZp;thUy}XeOg>UBrD(vO`>~pvljU>!s ztPI*(4XHf1MDW+@94})vt%r9BNwqrNb7++HK1%S6RcPlUtqp)Yi7{I;JCxupVkeew z2J>(v7J3$ZE(#~xKrM6HJQ2Y5@3JP_i?n;XwI?>^VXU*fbsj~(tpEEMI<=7Aw?a!J zsGgw5Xx0~n;Gr|AXT9uZUa^MaAmrwEDB+oVt)Si8@S!dgaR#i#TKx{=T0ded)cGi& zhH<7}(u=hsJ&)P*bMt{i98&!+V9W=Htt4Pg9;?po08X>U^=g_s)DHewDR4VaKMf@L zfXZ{2&9c@TsctRCT2|m^G!${=V2xe<-d`h0P`6D z1FyqSrne zjb=tNUp5-KSO>*R!n;#Ys~0^~0?(Du*J#Q?XmkCb|Jlriv!KXcFc<6DKz}H}LMs#n zf43vA#syviUVX0pkyF>5*5EW7V-K{e4g~JU8^4#0E{$Wv;jG*SAaz#V$YRuN`r3ko zodWw0Xg8BFbcPb+(f4iNqV z{3FoHdB9=pmVT^!w8;aP@-jjtsPP7Ug(#&zDK{KALvKxKH6ytG8}G5@m{~tTLvo`j z@lYZIDVPcDiCifKZ;ZLl167`c?#yRAr#~_tm>%O?_TJTca4rK zMjzI@JOtz?8A0FIu|Vpgc;k7O&v~GzRU3smH~7EK-*Q-?g*>bh8-%Yj||42<2&JImPLVxhyC z&8(1c8JVy)Nv+_XnZ8E0%tgzKA@?a*Hcz16z>2}Q;5z7>aV8xZ*(z^UkR>a~d-k@C{{KZA zeHyON4xmw1WsE}OHbOz;F&==2`WMDwm)x`VY`e8snW12F0WBZL8r*thzw?Binp=Ec z4WE5~c5hkyleuo(hgC06ASn}>NzAz`3f#V<>x)apMpd9c-wh+-<5}9ChT^Nap9Kf? z0}KFLBl+4xd!y_<@2ws8mSf3(V2;)EI~FU$1lZAB?sd$fUfvVy#$xy~^7B3S9G)+2^5eZLsqUiJ|DI{aMfD z9MWPYr>DYR2SaO;MFPuZ`f$f(5&fldB>~($QO}C`O2aXpyn-w)V>CS?+gR1if__#A z$pa78V{J0%vlIPQqSvkPXCRn{yc{i|h1s*4u;#u$nw8%eyw(NtavXR>^F~yY`F#Tv zaX0Z3bjRwT;n*5$Cg~Sh!hcT#u8o|SV-toJZe_g3!KOX7@)la62Do1F%!k8Z-H)Ek zuQr#c7Hd``T5AJYVV)kyIC?pzB2{ezJ&(g08@;re(dPr@K<2hkPW2vmXVt@KH26Bw zyBzrcfI6KRaUzy}I6XYcOf?kg(C@qxNcF3k_t_4bYMW-FBkSpRC6sn;W*xiGiLnm> zp%qPi*XxDl&_nkeI5nqN`B4k%)Zn`utj0Ct$<{)XpBcm4z;6ONenN|9z)mmjKzf)B zyzeu!ya~5zfk$j0>#4|keK=#aGT&mYtZx=_Z!DR9&7YA4bDrA4-{$bcHIw;?Mlh5B z^Y@Xn#z=yGuLQ6+!pn1>-ElChE#wilPL(HI?58(Ri8kIg8`>D9r#Egi-^|T;1RMM< z-^Oys^Lk&VH!H?C7kiTOFSIl6c`mDYeLWFqm_DHdbTqWio6EBi@H?EJ&djxgfx)_+ z?*+8G1Gcxo9tTDtIq?U6~WyHO5(Xs7(0Vi+IPtJPjcA{S^jN*F5jOq`SRcSLOb7Fe@Jws!epM)Y()>y5w-03e ztH||OdTYQl2f;HqqQg>Xh955&4xa+ zX>XO_6l_>Jt0O(^M$(r7yG_8ch!!V!`^aMC;yj}t@h2#t-~AfWVbx$GFnwd$%BLLK zRG~M0xcayEva->`@CjPo4tdbmm4THQL%ZtWV(j*Z^x1(PtoWJ~Xzp&_Umc+X-!DVk z_N)@)ct+pjS@gdj{rg_riV=%|UpnpF9kI%L3UrG^e%^vgy|DagTp0icb+P2qORPNQ za34?ZLaME>XqH?}@V*Gm_Rxz_nx6K04;)m^q8Z^AtbtXDlEB`0d9#d1aBVPG`+)0q zY=S4n=Fs~xK38Y7xwPvA-&^y2I8wEZ5r)FI2WgYT&r)cZinhiwU+skZn~iI4aK>rd}(wcF4=Y3<2y0()-sa2{9sfztx|G7~Wu zFuT*GpU6m4?Yt4*RxmjTW%A>#odyqen@H#)zDM zK@)v573tXw7;D9uC8tMw2atG@r|16Ug<|?H>cW%CV6vGxBD9xl{SUpO+3+j{+jkNE zy6e83xo$7BsM)kNSxs3bF&-YaWet}L%&a_l8I1J*8?EmdemBtPFnYV6o{Z%?9Q5UC zJptVs4u*r6wQ@r(ckj#tT*K9Y!1EujM*&eu#>)%jRiW}?)+{UeZ#6yV{*}OB9*XhR zyBO6fAon7LGkN|J>l&q|H9aPxyDNY`ioV`uo-F|^`lCBxkIjRQ%O$>0bK_& zhA|1wn-$=95oW_UxLOWsI9K#S=DP8$^|Z|9%m%I@T#djwS%Yv4t>;1Kwc!0DeNBTi zDZ!l2IV(K74M-NCyS@2ek8h2bon6`bj;cTPAkXOKjAIVUg8L(pvJ&)?n|b5hGxeC! zlkuD^MbE7Y&;UAE)p`~1?FuBSBGkGA+$*r=Rl#5xqk5Kod-(PQ?L8sA1Xy~0VjHyk zATm%Nne%j+pP@&2o(AG4qF2oRhKT4?~%AjOjU2=dqZ@8CRd8XA>LizZ-ss z1BcP))4@4Ee7_522C(WpM<3ha#{+1V@iLxf_7k?;RZd0xrq-cNgm#OOyClXjIx(8{ zQ~_E?z`d5x$5W%jpnNA_X@=bnXY`XmF$RjbCfEf9^$i*m<)Cbdm^C3ow=dOMChVFVIjqx%raTnJX24nu^bI}J>4jhGA|I=wGY2~*6dNR!r z?Fp4n)06w)!x*V89 zfcv2{bc@0%;{?ZYrz8C|<$4(Ha{>AL0WER^(F}g>L6?ex>1pnn0sLa{-*a7jyR~A$ z3ZyRN|I80%(%_dhuor@vp5o@{vDF3d1NWWq%0BFA1}#d{^HjKZAN+T9J^)NSdF5;R zKE{|&0Yfbycoyw=1N;hL3megQUaaFA{I?(Rf1BCHv!I>=x*XY;Gc(Y zrTJePJaaLl=nGZC^t9<)EdrdL@nSq;LA2^&S`6d*5`N7;el(DhoNG?oPyxYYAQDuY$YbSW5S8rt|FQ$fEBE#>GU^pS4%k0f9Mr8_<@{^fCwC{++QO z=IM0wV;r_jPtzDy4W3_bCD38aPm{rX6DylB{Jq31wSnJL%wuD*sJ>CGpueYq&Fn5~ zo?l>fR0Rn)Q_b9_1ZdM0yi4&Z9XscFB~id(rIhPnwGjyPQMbU>*W&5Jtd6W(VD6K? zgfJ*Jk=bD>ck~&#kK7Y#A7JjvW|m069uMI;_rI+v6%FiG#V!Z+-roqSu=BXW(_ ziCCGl!1H(hcZ5r`=*bADZuDW5w^{Jl(|I%K_ZYLza;R_}t=xr$P6OJGz)%lJdNbA< zxO1D9eSxwb?FT_iDQu1URnXXTklwR-g#ar@-;L(0F2ixhEhD;i>cVdY=D!5Y0-q?&~tzSe5K1^loG9XpOS+$)1NUh;PW1^7zvg`p=J|i(EN-NizIXh_nx%y^y?*z-UEK=mog^Nc#)f2 zOJ-cF@-$)ApGqsE16^f$0_#3zT&o+)`46#a#=aVT)EEj`m$o&Yz`68dj*syHYtWLV z*syU>>|v-im!5p@Hb%81y*&VRT;1=0Qqvi)JR@hrCD*8)d-^$j9trp`i@vQ+Je6^c zaWiNB5k~P8nn?cM0u!^2jn(}foj1qJ8fz_i+I55Tq?uE@c|HfdXaQZtDVcUg8Csq5 z4|-kA6H9?tkLyG@W3@lMB36(7Baph<(7{Na+rVdh<5ZqELu3)qyo61w2%MLohP&If zu{QdGjTO1W`olGMDPU|x?~TDDlhKUBnGMXNxM!@(P59)yo^RY^z`@g}L%ymhSO#Nl zJgK!gblFJHpQCG@?OPohwBm1PIOM4*lcDrg=wY2eqnx(^qpMqYEsW&M02kk!i!-{> z4@R6#0KN=J(63!{IPJF6QWfm@!zXGC=^upEo^(4tib^4}OiPrsJ+ z5;Ir^Fp9w-V3-IjS=fqifM0umDWJC}#uormSJ!9JfKfmeOW)Pu_#k-oA^+P#CvzU# z!@usx-A&-?0bJ(f=i>WDT1@9%A{AZGL%9?h8MAQ~IZLHSv)@NUi&*-;!TM<&mD%G4Zi z)m!iW&gZmfK!2eMieq`s7@bf?&Sb9N4Cp>IpnG>_7ti#&3r?Qot_*w%1!LgAS?G9> zzI$Rl^)pNWDt%yEfnx>|c&AgWMLU2cykTLFd6p?E|d9^>+86w_8|2-_VWQN`wbr@LVcft&8St;<+To z98d3;;Ht6BRveoGmP>=V@E!PUbh&$H`U=8nuV1$fuxWjg;Ip~WuR-tnjB|rHd1{2U}_MJ= zzj>85MUWkLs654a3U6cP@LHGN8lY9NSjt9epxnqx)m)}2;G|Vp%P6bq!Xq{;ZP0rYYKYv!4N-vN6Rm|K0NQNV{C%vKfo z))}ij23WLTp%Z;NAYbl%x1#SXew|a?^}PrM zvVHI#o`f8w(04sxJr5_prQHsCdx0zN8b$ETUZ6}snh#^&oe}CmX)8rsgu7*eRd*QP zuIv2dK!qrN8ltP!p}#%oqX}cg4nX02+L7m~0&y?!c?mlA2V?8Wx!&4^6y@Xj`p9W2 zI(0u9und{7O5H^`{~s{k#2b?PP*u@Zt9`x?Y+=9^iRRB_cjgLsJ&fjA&C?Sbvf=VG z%=YbAJLn6vnyQ%$?s8aT<7w#i3oZ8mVFq)DSq;M(yC=446?S12R>rz+?qqdhhP($1 zT0_~RfqlFWsyxgr_a!~`W<;aB4l#4~2fJ@sxf-+lM1V;&&pQ`s@g{KP0yw+tVeLn~ zGqt(<57t$Ucvm!=_Xclh+XHTfL)`&ju7An*mZJC*4+BLuV@$(dX~Cz#pRrKo8lTK_ z(^EGTi*O*2uoaAFp3->cqsefvBwi<1mvO+?8~D=cKZ&1Y#?^zVU++b}e~YGgdcbxh zw<)sl5*S)wits>_AgV`Yp?MPs(Y~<94AnS=3rJ4lADnctgN@noN!>WPiv_lUM zGs-P!l#MO1j!F!iFTl5sXjgBn$cucNg~kp7s-g6ih?MDPEDmk^(91XY#w#OJ#?>c>0g~*PcRtFBWz$ z5}3fxcW}r!_0sTVG4z|ooy=el*fYYOVb;=@aGPuL)IEobjIkRDi=`K1mtDEN2yLxg zFp#zS2=1oQv-@d!V?V|MjAZ;6=-@2mn^-scb0zQdKe3uh3#@esppN7Id*El~8zYK+ zpL~j5Hex3?Fzf5r7=UG3!;{9q{2j?opugKtL_fx}a893$Cv3b8YzyJtJfui3p(l6i zg)vrcVer=&=sd_uF4i)7)qViy+4PnRNqH7K70=(cP{67Qo>yY+#vP1kG@s`!H^q{C z4)>a1hw{Q{uP*{hJ$=P^Ufs{l`^YdTF#yYK>{x!dsb6afWB(hiErc|bU{*c*Cp!sGa#9?X>zp4Mtc9CSg3K z{N}~F#MABpa6ie@hiN^RnX6ggbEp7a@}txGBfBA^rNL@4xWCCWM(GK^d9#baIFs=` zS41D0CpA9{4qov@Oue4DnE6W6vw6_^Z)wmWbVZ+Q1UT7?Tod02o_Uk&p`QNd-}s1=uv>yt>l1E&W+D&YB0Vu+M-;3Nw?@G(FH> zt9r#j(QVkPzQC2AXY~pmg%;Bo&3E@_7-0$cq%dcE2b7+DX$Eu!^zB_%SA&4YweyJJ z>d1gT$#8iMI{GSb;ev>w5?Pq-GzO1L)qzLLJj@thv2 zkk>;CzaA(&Eg}gSGjk&oehdKXSFpB^fZZnW*@i7|4DDPsdv5nl@UZHZtDkHz*}z{b zA=QR@VMvKp5%w_i808U(=IQ+i?Xj5)^c*(l0^HpM1+AK6)oD+u_??-u2Qy!5bi(?9 zBapkg%$uv3KitXkZ0tnn^aJ#|1uakU8;j;2fktEL=`0i&g+6|Wd>K9QRPa7}8A;Q> zI~@w_r3cSxqm>w&O7tY)3+8*XQOYvn zKhUEA!3x&XBBYzqXa~`G;~Ts3l-bK#*Hq-HFI>Ng_T@ma7PMUrRIXQy=WRfL>%piK zl=>ZxDYHi7S4D;n0n^7km4Zz^4jru?dKVa5puY{V5WdxpM%PC0|0wqpfORC=G7QWX zfrIrLeXGfWDzkXHJF}#@s|$eYcUrjeGyY~H<6GacA$+QhY*a?FjD5|57CZPnjVGER zTfLy$duV-Mq&6GwPU8A?qq!Iw1{Cw@6O2OgQxdHCp`oQphcPB&C6$uCSMik z#&Im{3TSR-enTj51fJAIvM-})Ian&A^*trZx(=Q`l!WALLCTCzGh)g2m|u{U(5m<% z9KH|gn}?pr>`)gPp9SJ1uvmpG`SxVq#Xx8lMvE7Ld_H5`3zSwLIKq=@T<;6+W;Ra6 zu4eFLHTtniLu2&wD73KZ&wMn~({_ySZN*%szqLK~(TZ55z<)b9e}z^_)miY>sL=B~ z6NT>1MY7s5OWo%B9B}W9EE(nL{?vA)!twxJV_6a(-syu}&El_dL#=_sO0z?e3#0z^ z_W2(9DYP94CZ2J67OB_AkWIS^eD{PGb24LrbZH<9jp5rURx)Y8F@zp6&>zo!ngE3B zkwE# z$o~v9r3w~oJ>yqF;!>f4l^2fky!8r-Arq{V78{_~B&6T0{V*_EiCwXFdlI9x4YY78Zw}u;L(lkK z&GXga%N3rsl1c*Hn7~-m(XZP`hwB>iOv2DBYXP(cmn!tN9llw=%>4f>WNQmn$bF=L zf`_?_HFIXh;{g~ zq02}hG)CuPsB2d0MX*=`C%=ZW+U}*S#-rimYOak!Dl5{m0TTB;`eXgP4cLX(c*gjn z4oKwLpzRNE+9;*B;ZX@B$!O0`%>VE6cO?3%w=E3Xc~bIHe$17sO>e)^&bT9YKc+#6 zmyl~~x@f~+h7CdF995kYm;{Uk)ZeCnY$_G=nADz)k3fIbDJOf zBV2LabsU+t9?X7LGGlnY4(p-%$eh(T@*q34h^tUGPD4li0#(6xIJC|UjLE$HxQZK& zy^M!O=dq7^tvzM)B2v)>Tq;4qWZq0VB31L?^A%{`DUeO;!MY}{hOSmZUOXEki!t>F zxsDsgJ=Zs*;Dp(xGrzG0_lm1hnY;j;|0F57mT4~VfduUq|c$d+GUS55bbHLKteC8O2 zGu9&7ZfD#yhG8(8`Kx6mZwg68O6zc`nMu|m%-!kr7 zSI>yxr&vWPQ1Ftsdx7pVVSZ7p0;@&_b z{$T!f?f){o-wR)y=d=;VnEIx+9{Q|>?p8gRgSC&OU32JWH6N?&O+ym#aA@FKSpn0j-CHEGsG~N-t ze}Z@Gj~LY$=#Frq0z7=0RYL-BCjv<#i zXM(30>>Gej&$hez*7ELxUG#+TRA$2Gz-kV9sNFTbpaN|!g5_>_+yNYpGPCrBAI4ZO zhL#@$bTmUM8>yLuy{`sF$~W zpkFn#=u1_U$yio1bfDbP=pYGpK2n@lK?w0#dw$eG>BxJ!hjFeGTJpWq4B(JAV&$7Apbe*8b!ez>(yWsj zz<85!YS2>eTzL{sh`ZswWp!J)ePU{xsJiN-{7HVrW9i} zS^=6EPk27)_g*Nwj3@twmNudHGHAo!X;lRY%S3LBk8F(QUFXwgcxzU3e){$V8uQML z?7T%Av)?Q5{gHr=Mpb{s+%zMg=o8RsZ@{TsK+~L-Z^6s=prW-7jkK1-o;2N-D@B8G z%JE6+nsFb4?$@WqFrLu=sJHJPB-_7cZ5xw0i)UB!DHK;L#R`B=Qz+%<5WkCKbCl2qbeZc11t5JFRY}SdDNJIVqb=tCb{0&|AUZ|a)azUfbaLko|EOIymiLZktwZf0F zBAbw2&tCJq5bHST--t`z*ReMG1Q~e+$G?c1>mcLC0>?;X!*{ufSi*a;^=~o{-9^uJK;>mHyjj8T8vcg%1g)#s80wvQqM+2A;ZXB!6d>>YH=DpL*7>Se&$jl0L8+ydx%5IeRJD`&ju&)hTT zuon8a9t^^u$96Qo3@yxdX$uyQK#gVS=pf|i%RgIN<A5S{jf+%1vXF)cSUJ_kx=i7(F~@DN zww=Mp7;n9HH?bMpc+OKJjLzwgM33dFK2p#1b4+((Cu2FRdOn!(8q(@JR%1n&sfQyU z=FB8Rh3-HyBCw1`Hin@!t{YcEkL^IZ4C?4N)KlM`xyds~B9Z1hK=>J>#ejp9HY@s8 zpg(uOa4)OB7=BaGtjSPj8vT7rt5Lx4Bs_dDupuL{IL0g81wP|&e0!b_6`CQ@p7%5g z{9H}%iuY|g*P55 zlJ-Vtjz-(A!P^d8*L!;j-d$$)sE>5^gumv&Uqt3lLz`MijFfu@I`#=@kcpM?H2Q_K zo=7kK1G{K+$Z|&R3SI}8KSx7beH-So??s1qn4OeS6)YQVe7KhCDlmOOZHm?gc()po;P9zE@g7p$?z)?i!0bB9PfM zq{I4Vdbd4YZ74VyH*J>of1rlBx<;IZGs0y)@1c(zcz%v`*M3IRBE-PO72vDAErUgf z=4oT#OVGC`F29D{z6_>%HpYU@VD4C{tqm5%(@wM_eUT{7)0V2SNb_)T%*5UuL+1J- z+r5#3@ytDXD~(>$3(%By2l#&%ZMp;nr$FB}v=~PF3(QX|fKSVz4@xiEZAOZL){DU4 z0#X`ROeSv)H)ni_UHzWl8|>#%bLrKE>(RS9Y$ZCWBe8L83UcQh!>%fH4Im= zCJ(*Wcpi{dyiTHtL($>|^gbB+&!@i*=tT=C{2|iP9(i~L?tIAoJ3zP}jt*h%(4Teb zlfdyl&pXpv&F)()W=mwIHP}W1U1{zao!>5yPh)1L!0Bv$J*}rbJi5V^K0q1AOuHLA zx`4SmoonHOo`{mHzpRC2)X=a%s`^7i>uPKO+x1X7iIy*5-;N?l`i65O)88_JIkmA+ za4`_2fz>|vZFLRf)T5D$`t&-2R`rmmqQQI7R5WuH{Y`?V((efrv3l+=*o{a$FzLv=-uP%} z;Hl7kuqkH9dG@)M-0b@*SiX6T{C&`;5mp6x!psP3b*)Acd{1w|^@?DuNBRZmW+lFZ zd^Q(zAo`dA^sXv~1If4a>#mu(46b*q{67r~+7g(JvkX;XbAQgY%yQ&R>Fq(Q`{?yN zI`cd7`WRzgr$vA0FcM2O0?AVo??JM@M(Rp&XAy6o5AwS|=y@wLevUVnn#}m#m%cTj zeIgK42G$l>j97G}5xQLhIu%708UnHBVl@Q2A+#I`%$<-+>oR_UU#~B^@&@wxG@P{d zS3J7c7D+wGf8&OY!}l~MV_B?sSc327uX@7ASm;^+{{MoVo5{-XMPTvkf9+CXG|w~VMl(Wd#;gTJ&TvPMYXzPf z0TdO{06k5{w25sN{oca*4uQjF`3ztNaZl80)AIv7D#E!8D1U}F<^!3h;dwvhkQLuZ ztTkt>z1HbjH0lXxX3XzL@XRw;M#HUrKs%Vvo3MR@;lwU9Kb#f=g7vaHPrK?n zlV=i}Q8f~}eg}kK@jr{v=K0js`gRfhEkJ8F!mYwwugzTL z`$9G_dlY`9OfO>t9qcn6;!k`nRE8Kh_+j zu|}!%eGz)CK!VG2#Y*G9K+h#$S_k~ijJ}1W7!P6oRTs4FePqb}v^SBG2spJ2iFpxe z^E|u`^#3hf^Bw;>64RMyYtnNy{`W*qVv&}jNaSDMHGONo~QBQI^^ zu`58&QB|IxFyzs)@cOR}PpN%^7 z>}5yD_2=DqKHZOoF9;|T4^_-u^lB)kQTUX)(2rTiLKx3jp({n9UZ~PxNA7s73Uv4X zUonxk9>xq14-K1g-7}w`4O-YL_Q8? zE#c7H%;)Yw-)6*R(bno9o)Pc_y;?JBGI&ITbr^s91Dp9O6L`XG3D0(H#hhU!wF-eH zt3^9An)W~w&s845JDCz31D4m(xdHTCnUz-*KeJg;=+AqFd&XLgMCLqCUjTi&Jx+8_IA|7JKC=4#*HVo<-Ep`+obdKNaX(+uwTj=rZ{}vj*M9Y=yNDxj=~)5w7yGomh^{vPKReMXVbSk5C_5d zEhuP~%s_O|o!Z~Q*7F;_!ESsAZn5yt=)Vm}O9k%aU^|}2dYE565Bj=KXY6hr=8tdT zTr77-AV-(soe^Q-TyFq>)4;`uGb3)_Wv1&7l;%Q4@%;)TwFd?xv`!!i2hrXRaLF|a zd|*bOi>$Sy|0zhHal7tY|H0MD&~ykRJPs^Jfqf%(+1O$=Y#Qr{+Hj#SRLQ}DOaX?2 zjM<6jRv;5c&=WnOS6De^GF#mPMrQ550Nt#YUl%E@2i^5L`M$V}b<|kwnQwW1cQ@1O za7U0!{R(E1l%ki<8Eqo?~_G}N=jBa68_NTwQKyBu*BzZ03v`a) z`7jRv)5mb)PGCpPZ#TN zoyPy!&~glrJw!_@P-t_^V~jutHq(QZJ~}h*aCqySlmu4RJQ&Xt+8MJUUxvzUk=jI{ zE=F(qE++>1q%|@+Fb$p>)2%OaEI8Z*UU#&#TgD7}hJz={j)pc`0V_GG^$|$iB|huP z@J@a3S`LMc#aBv=r!WinI2u-)-lFLvEbxdoK|Vv(?v5ivp|^l6pgl~l=AiWp=(-e| zyAyOAUc>|xH?BAa8G8Y}(#M?w70v>AeQd}C==317b}uAhJeFZKEk1yT8+iTz9Q>Kz z24EyaqXODafr3Tg(FP>FHhBDoz4QcLPtoce*d`<4j6hBcEaraJe$MzgaA7U>BoZC` z2^pQrSRJ6iZ@}-l+{TZ)187Cp#r(Hsdk3EJUH>@T(dWD$i(w@~<3Eprt(6?~x77t> zGg9}`do^@;I8W3Dr&_Gu1~85@XDW6j2VJ+?-XZK=EGt0QL|2itEl9oINYB9R2@ghb zeFD8@(t0T^eP^wYd}kqVRe;x1$YPKRvsB*#6K!W26tU*7)~f~ZT!707!0DcZ^J_L# z^mNW@tPelw%+aI3dI*+B-#}q7@ce-UaL$LEbY#?8^km$V`3c`3 z(Ic@DWvLc+8jbaA*m2O| zWm?Xmy}#dcH7?M?pOI^KzQ&`6nLu)x zYi25M$D++bqQb!0iUV_bwks08<(Y71bXO~%LCQtc7 z)+2YnPJgy%5_syIJ30DB`a)GR8jW|+ve$*<>d`r9Hy7Gh=GrLeT$5{_Z{bPb#DsCyp2Ih@1uP?v}_X+Y(%v3b%vhTLz4mU;WKbEO0*wODbY*Np-Z$4txFa# zg6~$ZU>me%Mn*hO<^BAoH$ zk%~~(Xy4qtL+pZHv!O{7U@!we9sV2vs;xX}EJ0OfAtl#Zf$fpJC@e}pp1#4=aX{WH zSY7#+wH+(r>E=VYW{rLK&dLT7Fq!Y?c=98(!JTt6d96uo-gYF^E)F%1A`wPjbb#*e z%J<{j9$H4ho$63SOLhcZD;03k^Ky*M+X?55O^<{gpF)QcNTnyU9p;lK-z-5}-E01U zF%HnPEM z(-~(7ecXg=#$Wq>E1%kN&y1LH(ECR$R#`OW6XbO%I3*$j<|;e`Udp;Qc|B4!4*HvU z(UG^(wXCliFe_{Y!g5G#HDFo~?ZWuB6RIsiOQ-NV1j*EA<9p~z{u{++o?;n)umf+?%RJhc#qKF=1^C&3_E}ZXD5OR_=R1sNljlLh&C{$;>yv0^ZSLno z{`+|Cl%NVB2m&S6S(!N*+(0$HboY4{Yf|L zI+@j(AKcAv?g{+uc-lIOQc}7Vf>Uqs5_x%pwWU>yY-^G*uv9=Xgi5ERk=S57#A|G{=!_y zb8d9!-_UO!?-fRg>ACd8tp&h*7y3>It~hYJ3>9*aLMw(ph1|K%+Z}0-MNT_onf9X% z6R;FUDrj|DBdw1h8ynFitD|`W@kVBjW>DlZ+zG|qsLfX8S%fCMgOzVjKkf)U&C~X% z-E)8KEwueC(1}%he(!*<=ZRG2lRF=0v3!T%#?XM0Z_qAZ&_W!I^RtqoF?sa@jub}L ze*vGOV7P}X&+>UEc$cMBLHbtOiapSi*M->rCRKN5P3otNf} zn+sZnJcedAD;4vDzvcMr&oFv@16j_^v!(f-8!gJu6W%LK?|KC5@Kngt=vmf9konNF zwlZF@ZqVLHJ;(T;Udd3qe6%ad@3Z_g`}0{(YB8JJ=xw6g@B?h9J|f? zrx8|5kdO^rv!2&$z||6-``$T<-#O?uWt^d#uj`6-Si*bq}M9JOyFq0GFz4g;{7~%0Qh^-_6hpmN&A08-vQV?*HR~t zt8vW4x6$MuX!#8o=rO7UMXv#unI0P%WiV@7SAu#Se}g;CXtNDDH$$@(tKDRvHzQ8S zjY_%=HGF?D-#P`WoJwy?>AgFWRz1-CzTjfbpIT^`J8ZT2^&Q7L6mhh9ff=wNFq;?S z&f9T%%LIc2?wYZh5J=o=+AYNTRsx=N@TUqmS<@~R?(4t#HyAtt1h<%dt@oY^T*jNN z0!CMYwc(qdT5BqGX1@142;)V~+4IEYZRoJskY}^h zQ{b<;b3gNGDl5EbY><^yTf&p4Z=@pDPZC9)i|$d5^CFca-aNB-fLTnq$xGAp?1< zjU?#NjsVi4NWJy#hNC6s_INj%v8Qpx3QfvUD*rRlFLNM9ArI~l8&h-`DjAIy$@9Ls zo`;@BYPg@-6u8QOjWKK;fHMUhHRHt<=D=d`8?me|Nr}5bIr{6`#k44=ibkG?sMPw@hpqB^yd4AIfi;+=EB2+!B~TV*|o3L zj65^PHyz)Q4nr-o?dvkuYT)sm-1qy=Kwpgc<#%Rqy|j8-=kxaB+~OYiT(smeR`(UG zOiOz5+~1}=X%y>Lo)|$-r=hZMm81D}f8TfEXOW&X^gE4b_Cj+bR&oKOry)(io<-B+ zTBw%7C>0oe2lQ|c<~;p*{;k>R;m}fvTM0%((Ya@lxU$HTd(ubHgHep-sl#z-g1(=Y zK=2RGz5#90(a0t|=Xse^S=}rJmecfPR{J2h;NDIef7fD7E@KZpq2s|on)PkX1Fulr z+}A-rdUEF?G3SBw0eZK--4WKZ-_e_1&RsnH05T;n!-FTyrZ(pJbu4D6#=~{EF3hWW zBAdVN&4+xuYmfu0Olh6#)2qJD-@!Bz`+Es$*Jd@x?Pfet&BdewoJiinT*^A|zjRbv)Bo;zSPNIcY zwHq5qa5->F;>lm3q_q;FfoKVEyLV8V7DnBUgfrF+Z2}f%Ot=r9&aCtUSV%WxU>8C2 z-q2e+r;oW3V@ySQBhc2`(E0)Hw`TR?8RynmbDiWo(inT7W-RB4GM z!O?dqb@*F)Dv6AJ3zb59`SY>KC9pCXSORuenb9}!oVCbT!7Z!Q4MgYffgT)Qwm_lk_PvI<9Mew(B7JK)mx6U9y~Y9K zCM46`@nu~1gqak0TM25;f|h0?A4Nve`LA!S6`#y~It7=11xwF&=zx?M9aD+FR@c{) z(HNWe8t+7&?xZvy1E!YrVHD@1(ALgFzOcTI^TJNxZw0i?X&nyt zjE2#39?A~<5*V!fwmeEdp4w>jwxRIBc%jL_;>>C_ z5GyiU&15l9rSeP*MvjMu#?U+S_UE~ZV7(QouMd=Cp@*KejX>k6F2+;mAVu>SF$Nj- zY}T$w&v($T3z8YbERf0g`X9}sU&33;1X>ugbdk5db@W#sY5kcwu_Kr+1lLS#L{&c5 zL<^mz&BDwDwP)~L5B94*0m2CSe3hrBpykHV%|xy@@fMT?Bqf2^s*TS>rC6Tt%l|Fl zv_wY`fKp)0L8^LGYTK7+W;k#=C&tcuqf4!LV;9my)&ckYWN`&VlzkpTx1sTg7 zyg`nFKc3oPoLD3hPzs7J17>|QkZP9}~yjyI; zk`;$fo;Ios-9X;mA99Dj2eiKz=sexR%8Nz{T1Umz*#)Sd&b{Z*NKb|>i5&I9nq?z( zci`JLqN){rzk$U43Pp=U3)g4vsFsDdUm=D6K<{sm9HD&}E_u$fXLAbiw!pT|4f?TW zLuD{Cv($ZGt0ZP2WyP?G?)tXmDbJNLF7HKP9RQ@(QP*2k7@3G;Ts^coP&agLYbKhq z87y~m%{oA4N?P-*B(S}T-FP0FwuX-GVfnt`neqDX{-EtL=x7wD6>+|RBHG9IfJ2Nk zc|X+KW5)J2ARd4=ctW-(AGBp2D2g>6hMoEWii~GnW2W&-z~r8uk=OsAulfX|0*yA0 z$hk2V9PgmNWsF#!URppOSIzhEDHChCJkZk!B%m@aBAI~>V#Sw0&o`J2AL8yv+L>?M z5?f*on)dY5lO7v@k(D)#72HNY(~-Ur;OjfkpXh+KCyggJX6jk2xMvrxg|Bhw%0h5# z4`0t9EBcVs`90q#|?gl1YfldS~`X1+n@ofMzoixr-h>E#Mq_WCY+PY-SFBi!h?m0M_aG8$Es)XdR_F+WI@~ ztY!Zht<2Ot%`9g`y{n9uS#5>9|DU6yAJD!URQVM9tADIJvY7`NSpk1mAam`I!ROJZ z6ng%KUY~SLvz}R@x5KD6W8D7c z^HC&Y0i&*`#RO)5ccBVl>)fR{h#tlBS&T9QZPeQq%H-6(9K}W$f%iSKXSVkoB*v4- z|HH0IuE0`$2%J{=8x2lvusKG^-3!e|!TGkx{(fY60el!k4`Do?i&;W1~I^#Xl)YDrB-c@F7+1_>$(Mg^g~zR;%_-6FY z(Re|@aZ1lCb}HE6HLcR_H~ zcDc}duau$RM6OyF@l_ybh8*Sx|G^L-{)o@52L94)B;#Cy^7v(Uo422V0|mFPJF)>dB2=cnOcQZRpdD()>L@)B0< zGWVv^t}4>_G1lgFz8llt5sq)6#a1NFctg*Bs?G>Tms&fbW-xOaTk!!_e;?Z5|0BRs z4;glyTp3vY0@{A)`$f3>4Ky!}e60YAqu}5hM>BM)6gJ%a+{@tbBcmCgsE;C?StcEN z9Aw6DHnLXir)Z@fNj=u{nR(n(%!Urz!Mri=O;#RUgFSA}6YhWdF4Gpw{$_RJSw>gU zar2+7FXr4~EZR8kT6IEea2`nv$S|K6LnTwVOJ(+!x#a!d5D63d^{RkxP+_?Vn zL`S`%i^0AoIJd$sU17Fthoo5(L|XZ_txu&r&yNQ?`_pSviMc18J|2KZCDFA9>A5Ws z{01D~0Zkux{UW{SAG4m3yZB~ujNo@1wDVlpuh5K@Sm90dRx_Z_2qXi3HESh$m1D+8)#0wOtoko6@!4|$&qGyf1nHIUjhtLU4o>mZ zYWgs0VhG$Y?yCdycO2Yk4Tl;)kF!8+^usSeVU4v&@D5dl)BY}{*N34;XP#8*)`EX2 zp4dT~2GFA`aJ$A!1DcaiX&?O`33eFGLK_1-ouERf+D9fFwR&R;E%jR%@3{h9xXz3| z3<){TSUrJUU!U*d-}23<)klz+?$CE0QeZagM{vorX*(b(2hb_sF?_4=d=H;n##0Rg z`L@!Sr~MU$W*vaoni3zwM>84qW1AhY4>~uYUt@*Zqor0r>cyOAlu~;v`bk!qR-dz8 ze|_|!6L#}3*qUQm3*9Y2%kK1LMwR~G6Ne`CBJfxXFqU!58@?Ah^+P|FaD5vTcnsJY(5sqpFW<*7>M5S|9HJQP^)t}&CN0)- zbt_jE0NEvExMi?5=t(bTqP_!No|pSVg4=WG)kru`xSI*p z-$kp?I;a>E8;CmeieAnFucGVcIC=$?u z-i!d+8Nl`kZRVm0#xr~Ff@kEMKxQ`}c_*=o-@?%Y$c6gW7`Wc!>J{b)PqTOm3RR}J zrGdoOLaIFr+A5YEfu}uIVK4XU!0i)UvxetR_?nKjstw$iks|AWYNK4wpJsM6K2K}% zCD<;8gWDNR&r2fuI1*_rk5$a%>Je~!42a%E8syUr)~N4;e-a$izC8f8R$~~6bj^ZW z4dBOG#!O;tbJ+Ay+@!aj$c{S#Ujw^U<=iWI8cEexW~4$}IIxjfwLki@0-TpYiK6s- zEzpO`w40CAbcNF|(7U#D4BU<5lhjt)p2138=UM_1I}T1OiAU%y3vQPOvkZEw2SjU- zf9vk4InCj^HRXIenFj@2?H5FXPNN&wfv6@=887`dy*`e-jiR;LqYG(w18OeD8hFNp zF~Od*_DMj+%ShnsNd6ASNk>kzz;+*;cpP43!S^EA#(_{b3LM7L%9Ah5p0^HnGM4%V zFdDz`2Rv}+^j$bCot}oOp?bANz_S6iy&F0wKdXUjD6T=z_5I*e4r=vcy#!|eFT#Z0lPS8_(p3%OTcl#{(dggQvI&=iRF_+N&jfnw0$M7y=jOi{Ur(g3S!gX&uR283P_Euu->D!R#yggpfPsA)x^iZV?^-~B=HG) z7zPEeBNro?4~C;veYj#qLOVt^Tg|%5H=+95&^HMx<)BM{K!+wsx|YrTglr^051uCj zMKI#Iz}Bn+x<=619iDW0$l$NOJI{I>Pd_Jt)00j%0L^|z^Yj_tbGFi(6)dbxZ!Kjl zoHou=K;7?Hgoc)YZ?(YOiVOSbUr&8WsHk7i7|U;1o3@8y8ECha3_Q!cC)Vp%u=9N5 zJa|=%^Gu~5>(#xA<gjXbr(Nd6ai&icudcpGsSb{ZHI20u^NYtP8Wv+V~a zV>5kY@5%G|kl$_a-(8s`+Ky-3Ym9ppI8xBUE%4U57Rqm7ti~ANPNEn6+Ov3{x&x?3 zArG!GFTl}cu(lH96g1o&K=T;hU{&xJbdLl_{PoP}4?xTQaAPh#o4I%xivP&y+2G=d zsm4KDCm<4O)RWc~y3gbZ&&pQUzChZnUR;{zT$MSCKY-28L6b9>r}sh&<5Bgnx(C&V zUXJmmX2yfN)JB9Sz{wrZI~m!2ADWo67md~!&3uIw;%Z>I%oY7I)<`fad>pbA&Zjr& z*W79&;Kv5?qc)oL-HI!o9Qg(G)AAYJ)eb&PhKk0uP377-;4ubJUu{S5@MNPqp;Vv1 z4w$=Oh3%(lVMfVd`dLPs&uQsu)mha1xSl)_nkV-mSBn@aHxTzh_SI>1`FSAM@8c;e zRwy%)SIp}I%MGsTVa#Tx_r270%|2RLad8fmaR)YzcSQZD?lNon)-mcrzHh*Kj-vlO zw2KXNxDvSh22`7&c@%GB))Mt}O7n&EaNQ3ER$}~()_alWr&-nLQQU=uRHct`v}}Pa zTic>p;5T(XT>~Xz@hX_ZR1SGJ?xzOS9E$BVBQ-BAr0YgnWMcU}kMjwn=?!`?4>KO> zx-UMLr$>Ih`M?7Mitl z37u$vHsgzXx}ofgh8L%oz<2ji-{R_hystzd-G{jL6qt3O^?megMyo!# zcvc{-fZjT4diz}2Tff@V38H{LjIqjtr{_3?G9}CMtu{YKbp1(N&l>ueex%VO=){7+ z7h?sfuUY$7;5kp-^n43XtI&ru7p~Po9_DcE9`0_3&xd&1nt+8|g2eoZrhJYz=H{vQ zv2uU1Hk*Pxyu?WRk+lTIxykQ(bod4mbrCAujfTeq>i}q}cjjyCglDJU4-D2Q{GJi? z0sYJ>fM`mGMNr|`3$*>*G(ss$Y8?pi(X9_ZpOmv&!Y zjq#ECVLh$lC7_Lj^JRcQ4#x0ISNPVDC-px~;jfjdo6uI@j(*$kp;$#Q{F5vG#Ye^YOlrwv_~BF>X}rdxpTX^-p+qp=`r=H z9=f8z|FA&5y=uf=QK)GZJ~`Zk|HkMw4DN*D@otahe>GKZhn{uMAun`m0q6CVbl|&r zBOB3t>%F?7eTw%e&!zAb;#0v)y$c#nWbS$g-jB!b+yLqf=FKebe+Q@PBW*^wjDh3P z%tTAjfHvS)iBKJ4!m_?-nznOHMVbke4J zj;B$B)tN2Q(7P4b=h@gf_d04pzgzI`TX2}o%wTLtI+lARE5Z}>buWDdX+2Ev%08yvGXj7?#<6b$yF3LYW+X*MW)I0mej~Vh)%LRCh7XMCK1q zA6?7{Yp|JLbKSGf#v^m5kQn_}#+)|gTXn3fwXfP^e_d;}=E`O;FALQDk)daKemMAy zp_i4=@>k}URIt*odJ>Hu!-_MKp7gRlr-0rH0H0x(_d&Z| z^!y$&lEkMXP{g`Nh2ZlkEPir;*HcKb^|CghRTqP`-z;eJ9P;rx&{}U}Zh(n(hJIll z3VF;T7~v`Sa+TFV7jV)Ga0_g#KDQV8TeZk~9Q%<9VdO)o~0GYBdf079D`BH!yix!^7C7vhd7UkZbhZlNH8`%q72LqlQD% z#lY$A{1w{FMJDu?l>&M*Ic}k?4NFFS$ikFWugDS z18FLwPT`4-z_kdTJ^%&VL5-gnGn>AK@Kj0Q?FIKo(r0&cpb7F&7%6TCAN~dl-_q(J z2PcraBvy2%;IA3kRyNts-)9+ZAoHxU<4&Oa(8hG+5GjbW5j#xOr3l-s)=DpzF`PrVregd1296`^%6 z?i<}QiT+9kdLkAr`L1Re9aWD}x`W|(Z1-w-w3g95E7bFb#s;`8rAO=R9%25l>Y09M z{c}yA=~g)LB~NLkuAvv#@h2E{dl_q89yom8_Kj*d5@qJ4xys$aek|X9WlcH}dOQw| z+VX^}xRG#Z7Eh%yBW+*=>s#rQ^IYacTAL9)AGr0Bn+uYF&bfM64u1Mp9z@m}18*T_ zLQngshaSw}{mIp%m8`pBUz2z~8tV0f2F7XWt2ZP4Ui$Wpt{imuIT$k%ZMHtC*)Q{u z?yTTVb{6#pj$V33juF0>fU5%dp9kk& zJo5&4?Eo@&S9(IjvB;k5x7|=72bgO?)7gx=A1pE$r2{kC^rRE<)~%gj#)K8Slkg8D@PsE(wE;`(cp6#O z5BO7fYAJ6zo+OZp1f534-C?Q&oJJ*o3zd#>*N7|a@LX1G>Z@zyF7Wp|tkuI{VufyN zM;bHy7&J+N7GFZsK6vGtq6NPrA%$t71v$z*wjUl;0OnNiF{7~#e9Hl!$Dx|`q6MF8 z@RV`%_U(C`M(0^iWCs-2hQAI?thj0Ai%|Zb{s1F4%_cW;$b1Vc2{`}j5kCSfMr1dI z=l#LzCA7L20pV8Kr^ENAQ24)aWHKYP zfM4>`NX1ETz9q7HFFe|c{_17>lv#Wav~2=~n!tN=%#-SCkRjLA|QbI}^C{t5<_&r2{O|v6C}-)(DUm@Wu0D9>M~B zh!s2$NabYgL?wEBm2cLfm<#Q`Wv+MyE?fnQuB->LptN-7as)MU(Lv{p|>6KtZVyeDCMih!Rdn8kqo7_eH&)2V^&EzD@k z(Ho!XLff85U=ID(L#mDNbEdccOe}mngJd;<9-fxv8o-DiYsNXN7Uii3=x4^g(rMk+ z`EafsZz+1RzecY2(z|X#*liB%OSjjJ~ngqn|J&djxdcjzv4lU#rY}^43+XODB3v#r};3+elh|%iT2Ahx#ouumXC} zOTvwX^r`owCidtUm<$I><8F;$E*Q+NCunsQ?QVpX*v4;5`Y3?aehy!o`wWxl+uCWF z(BmGsZHCryz)H<7Co*SBm!^O714vt=wkC#0W5ppjCM=*3&3n)^n@|-TR=I*1XW$Zbe6{^SQh5 zH!`L!E`y_i1oHqVGm`7tzHrct zhI4%PETpMG<2jsGe2Sz09z0nAX|Xb;Ss%5bL{hNg>4U}&hFdA{p%479X2J+KZv~bD ztUs)rk_H!QgO|JXQ<%|5(ZA8U>Z^4>wNxKMg#@TM6wS7_kMH(IjhX>{7F}F{O>q^M z8yqKtm*)ogF4hg%ei|;iFX8UTHl%1d5XbS}>YC2{%pPE7-m!Cg1>ju^q)mCE1(b=S zuNK@%q4zWNucnj$=955eU8SYSN^4|b7QCrL-?MKVXLkqpqeu0S0_#u2GH?C>O_l;t9_&|ZtdF%zJRfBwz1Byr7Bcopd=p*Py zF)&I6k~#eS1K#EZpG|z4iBwnt&hMdK3*{9X9AfS=rGUo?J? zuE>^|{y!jX4H&Zwt@K7V1QR_c1>uw48Y}KMd z4O)dGLw5)MyOr>0Q-F6v#*p^PRU`1u51gs={UE$42d&KmyA$g$9qO-uR#)Ik7~?#Q zSIm>J!kCj%z;8Ox>EUh$)T4Q`^t@G{-N(4O=xZl<>}IC#%gB0jthGRAY|zd&Uj5^J-18q1qJmyd6y7&R|ARG>HG244jhcOH%LY=r%tgC(jA zB;5nPS!Z2Oq}ksExbhXWGp<}uq@HlIl9fTvH4M>1|G5P|^Zk3*KK$`)7~9_demg(>;oyoAEB+;U(zfsX`~4_X zI?AY1f-#NMeK5GAmp|0g?tlM>o;PAJ6mi*>>vw`#D>z{U;s2Yb9P(_2uIcr>?X%Rg zY4)<2%la}M=QdTW-dTvdSHaz#A$PkIutMf)tb^mHu|{_wot~lKyH7=0j7M)5f{pJP zGq5fXVfk9JR_elA!Y(Yc_5aMpnU6;1!!BA4)-yKSp?fj(UpBCv&mmDGv8e8@9Dq-Y z(I&ITe3NL-2xqZXTd=?GBi7<=cu^qtdx4@cW8DW2tzu=IyK(te(RGG1^L!*Du0V#| zqq63|b+@g%^&TyavUe_@gie0|XDVa=&D=CyJJ#e}HGf&7`I+dZZ*-=KAxh6GF<2p8H4Kq}6FgJ&E1hj4fq~EOPRv^9tz4YYCi*t-ump=7w_CXtSfV>HGbJaN)Jk7-J z%x5#}RzO25^*%xGMqcd#4r7ge#132iY6`fUW8i!ITE;Xh-ra$1j1XG6Gyt|SNT6@b zRui2AZ^EIT-uG!>;YpZ>(ZUJz@iR1?%dd4S*Fx|IB$xYB8jEb33_ z3k|c;zii-f%v2=N6E2H_?^V8!KuXQGoCdWA!av{2&7;<{ZH&nUEb(;Y;0RboqTR+0 znnkNO)l8!<+`GisErDzja94qfp5~AO#zu6h(~n>eZh%EaX#FcVx5JXpL^3>4JC1p9 z9{sLEzU1FSV6r{nhB?h;kvHG2=b~wsp@HXmW~1|S_&b?iN7Bc${PbitR8u_T#rVt< zNVWAwhaowOz|QRBRN&kK?DdhS9klhW{BuSP#cjnxP2XAUH4!=_2J61|fkre14o^3{ z0KLs|_Y4ltF36$(mC(u>=;qs%M}GDI^(^G_bs#n$bbwFRrE7^x}aG>7KessY%E8_1k>Y5GEKcSy4s zYYP;S|NYUgTFg&+8IypiAJ3;VBPosFVs%;~AzkQuFZRb(n$mU^e1@Z|(fAr?@yv4Y zb~bjr1B~HH*i$ABBk4uaLQiZSOp?YPw+Ro&bPpm=u8Z)^$M6h-G~2+(f`VPGRDiOGh>=t(PkC8@Gw^RcVt6< z{yo5G27eJK+XJ0B$o(Sdn(OUzv@+tM5_00aR}otDMu)2)^?GZPxmN}bt$=RtqLr8V zUz7FbI&^6kS}+pcyp9E34pq$FG>$wO3L1@TwXl1kS{hVQUaNrRM8?gaePj6iG0(IH zmT%#|QC361q#L{)97wSKt%Xp-J*1mZ%xH2?eENsJx5Gbco4JcJ4k)6LJI^Bbz0wm} z+!5Nw9CMQP=DS*}a4V8AiqTtvOG)^e&Ry-EbqexAkEUpzRY!k;Q`PCQFjTePq5JR` zf&5w4fadhq2cBE-!TKInW;Ei;EYeQ2*Ecd7?J381VOh_6&M{`9P&HlS->mvzb+U=j zZ#;KDWlSqXwPrj|=D;^{1AO(O%VBPYI#hE7s2zs-{drXc0fP%aNJ%tq?0wpR{1_r{_a(V;cf zqvJVBAJgBDL7(nbghRp9KyubWi={}tu>u>>m?x2q708A?dZticDB!B(NkMXeD;pZ!GMN7QK(l(#{Tr~V0p{%jshtAdzu`Hv5Zs?IgUv|XJCR!Vjs8NW zcf*~w@M#VGtUzaPAUz#vcZ??z;jnS`uKk7r_cGo$Uxs?t1bPt2T0y@@p}e(1&LX$& z2xJ1eaZy$XZqIw=d7jl1VZ3TJWK6re6?!M~S-o$Eq%eOX9F1iOjO8~Cp z;CO;7-^0mIkfQQPtohAGARZ26%Kcd5*~}Qp&7FzJ>Ts}~9nised}FCLLg|qIJqc)x zl(HVtF#bOXf6Rtn1;4f;+eSm1>0{o(Oklo<=AA*-W3c7*15JDpD#ZUs(^mQKJ@Yy(Bxtr`y`gOM8LIKNNa!ErZS!xoEw39GTNobdm$}H z(egObs6nk@ZOP~|Rio0w69b7t#-qk~9+rSC4b>?8h z%(|ORpPo$i7-M)Y*f{2#@;qhj!rOkST5xL(*k#bq+enn_Me70>En;@$WTdMGQf*}2 zSmv-9%os*vmZfjs#NJ1L7O}R{+m^|X@ra4E`HAuM@av6t{gcYN;#(lMdf*Kx>H4xY z@>!JclXE$ioCNoS3-hy#JMQpYqjzf!b_6d^sgD8LE3fG*6zoH*^m)=L7t+ty4jAoJSLw{c6`Ljr@XCp^J8&3rs0^N5aBj!l!_tr!8 z1DvonZdb5-lQ9+t^V%HnaUbSCNYgd2TMLbofoTvLkqX^^hQFJb_rC?gr_teUP%oS@ z>mUQy>2EdnJ2T>!Xif*TG74SDLXuhnLrq54PI%I+`e@}s-?&`Yq+=zmLw%Ya%5!fV zKb@hSRg~- z0IzjCcNezi3$$zkUW3npZ!>-ftCqchrW8Y3L$Ows(fzVOd<}b$ihVx>MDNp6F4}so z?T0}5JN6`Z@JtitL|3=^S1KdZ#^X(d#}#?kxRZJQL;g18IpZ!zG7B23a)?%Xi0%YN zJ(T+EJX?1x99xg&(1$gOtMS0O0S<&bMWyNEKD0kSl64;#KLb|A4Zh8c9)?EcLW7N5 z`z7#|m4_#H@vQ>Z+2M>hg6mK4&9h-r!JrhD*OOlE z#`1~zU5s53XbwU73#^TgV|kLX6k0jo$F1Ds>G3JdS-zj92P0YC&nka~Smy}29;>_g zokL$`p@cO6#m6}MA$)4XZ*)LSy<|uE)dN-=Y12>ar#oq*hjb4VehVl=CzRxa&hbDt z9%)+4yfX&5co@3fL@G<8*ZSiIbGHxt7Xm#|;$%Lu`OHR6)`8z8=;ICW3!}f6;JvYq zM!22`<~)1J&05erj(9Y|Gl9M4Paaw*C+pzg+uVH_+j-loct7`F4q7Rthv1U(^#(Rh ze61zBiECf;$((7gh>icHnOxllKZi21-~Rs}=~*VY>ZehRV05JP(~}zdY54E||8mDG zQnktd^+XC!QCge5e+TrJ2cg*I(9?P^{jooLZv>TJ4fMM^T7430dm8R^!g4l4BCPE7 z6!vi%-`6oeY{tfSM;iAs7wkf-R`J&ipr?_p>#Sd1;mOrVl<%zOEqhYN3}k2-IGbH; zCf+~DX9m`y6IQeq@_Lzmj5}$JT{KS1nEic>-5)Dl1-UfZ#nW}&TSy7!ZqM^JL$@N( z%|~MAq8*;g;tq$iz+~*`4tmw6WxX-IWwV)8`qTSbsQM*imO!eF^vOh)tP0dEfFy=* zN9jio&nB>YlM#QUmu@`Q6N!5c?wMm5ixj;M^j2tIh-Gz7@~z!U%|<8a32;x`YK>+g zCW7}7aG1i_S-c7CV@@$bXJRm0Xbr#S^8s3#p=$Jv*}ij`2@cY?o?FkzG~VGauJ%NV zOF&g?jCs%XcwO$NAf*RrISU#O0P`5Qkj2c}6Np@)duH(8Q1um_O8{$6lgtIIG0ZZZ zp?C|R-N;WCbaWTs8RTjy6g98XbL?i*;snoRLW2X0V$70xhUSxGqeDs9FXQ2IF=zZk zuY<8Hm4emoe4u!U-p-+g#{3LJQ|s}RJ1`y4WxYzCulpct12c!(0>zKucnf|12FY6p z98$>`9M@$1nD4DKxSu{M!IQd3gB3EZ>OK!V(;40NouK1zXfX#^je)YB;D_9M5veT6 z=no-3zD4M7cXrn+JOpW8OK*c2$;z&-2(zHzTqI{Gcqg$Q+t2DS9IJ2%nXkf{XeA?^ zp}pC|m$7L_ka6Qc^?~~)X3Y@KE**kxs?UmZbb#$;;C&joUV>E21}7_F|Ar16<>?Nz z9SJX8%~+RnF*HBRle6IA4S23c?Ia`r1s)d|XAz^y<=3HWHuP)>rk5DS^KQ;Dc5C|a z1etMM@vTDX(}!qY$|SH)f-6tMCHI)j+}pzVE$DqS8eE?qqnU-RGI#_`Lnpsh3N#}U z3)zj)9%A)zgPyJ3Um}oM-+PUJ(yMre5wqY~G%f9S2OKt!{||7wiCkBP0)u(lx2JtP zzXcwzMu*Nq38OQuN@>KyI@TuUq8g2P5^5x1X)d$U7#7Uvf5NxfP&}I_D)Z|~%SeF? zuFOF1;=0ESquXxj9vV%lA2>>^vdWy%OWftbdim*v64EcU?4a+Q(4ia^wzccYz*t%Y=zueVA){TVK=h7z@*m66VB z)d8RpF7Z@9P6c*TZ>f3R*8WaGT1JCee;{1S8p7zUK0LdW5x248Sp-ERuDT{H0*`8M-!|PapC`e}05TYu5$IKLAHPw^+NTP3Q+tJz23l)F}#w7P8JfjKsw= z{z%}Q2z1idm~h{WtlC)%$USBH0GMg-p9IIFz!Aj^91iV6bs&t@_R}(+c>=l^iLitB z*JyZURT}3I`5nQ}r%<^7GlG_G4Zofc`XrXFB{a~-*bCcu3@seYn1i8e=mflvcz6CK z;MFZ~yFy>(nDgU!LRii*ep@KwPLjUL!SpZ!2&{2w)R!k`T|jD1LnpK3#&T^WD>k*n zxS>Dc@>t+s1D)mqyK~@XFj68zXNiQ<*EU8m>&c4Ldji{*fE*ZKwhMT6fU9R)-GsL1 z(Kn-Sd~?l!zDF533cG5)!>_b&k51@S>`R{`kk5l)vY#iq@mrBQv9$S(w({jL&{u)0 z)&tuP#RtF{--Mq*9z0|F6X3261m^R80CsH|eF0Z$A&(`1Ha~X;!mVOxh1&iFFq>8F zDGoJ(I17oj;@z7(`xBDi35Y#o;xj1G32I(p72+uhd3nlx9(UlDVo|SxjXof~wcD`S z=3MJd+)6)9nYT|ftF;FoE4Ww%b~~%8<4A$?z5c5iflY70z4qu?6p~>ycU`dC2G$#( z{NBKJ&Ek0@v5dzbK%XIc4?@z)@nj^w)^9sP@8-q!rL~d1W~1)|iUz>Z4vDa4n|_Y5 z&~!T%BOLsa==V4@F^hZ)cg*YjhzTOe%zYJk!gI1V!S&fd>Dd&g0{-hw z{tJ5Vg6Bq6T1D#%T4-nW`WabP9lJ7E#++qbueEtyXQX3S27=l9 zXq%M;vcPWxbN%1^=r8;ojjn?9Rt2VCu{f*Y*kH7K2>6(hQ;7M&s_WaJwi%@dp}i+w zM>DE#`L&?1zeicG9HbZ5ohf{Ct@J7Mxd0XJ!dln=*T3-7;WYR$4jI-vXqD7ZOxPPpMon6U1Hm9LNTXjf z-Nylm6=6oeuPbPgb-K*J*XN_nHaUUs+ zYlLI0DpnvN6VceI@Ggv}Q)q1zvvXT4*4(JpXOK7J3#@Z$G~X9sSsAI1f_9ydvHo!1 zeW+ymkK|_pW8UH|dI%Vo0+RzgSBZA9e79#KoQ>}_`e7Nmwt$tHb}k0~MFR5-B+R-9 z+C6P=X+Di$l|P9Q^hD3$NnxG?tOcNM23qryx4+PIcsa5+y&^*thfs+ zG8TEK)z-p=X+Uac*cy?9j*86>D8yuMC&p52498G ztI^Og@T4}@qZ{vcuJWu9I~j^xrkz|PyxkEG#1`yB z6)e+a=+g>Wt;L8P!RB%1o?}qzE?QJWcMGF8Ml%{osfXt$WZoR@VOZ1{bl3Q|E3~oZ z;a%`6H#YQRBxM5JT8eZv4Q5LHNBXz)CF&7!4>6t<#NEKBZz!IA-iKqk;q5)}!#aJ| zh<<}_UjfxyXw4nLINO2x8~PXs*6xO8V(q5@No8!|Q^0eEUUQ)>Ptqoq-}}K?50KGq zJ-E{no_q+k3ect&-^`N^?cE(@-mAye@W6j)rKK{AnaDN0on{DM#@@LfV8y+Mk&@D^ zvh*9BMOUpRX)gCU?wFP9Uax!DxzOfe!1xpW-k=v_2+i`%P0#C*04oTlVcj2rKSiLx zaA2zkyq?{Ty{}R}>Lox;dK?^AF%z2r%`q3sr zDO=tRH6KAwzUJDWw5$Z2RhTz&0cA-zV2s8+wBF3;74Y*8Byj}&Z$q~AVO_xYIfuTN z(3e^FM!F~mjggQzs8|u2G)9i&X{#5r1nr&&%8JNn$U~<0wILont2`P9*_OXjwox!c z8=d|foEU&aEZ}Yh@T|)Jhtd40*vN3ORNlhiniUp}hn)(o@?&r6pojVvp9y%A4|}EG za5udhz1WcJS;&FZlMYJ9Gw@>+GLpdc62RMxb)I{O`GMYSaI=e>!GANRiqPK3!+Lx= z%7~#eoI_AFV@=qQD}VAkmS6c&k7dh|hfV6Y`iB;D4#WJKGeiPt|Y5lBT4Q;{~pKg zq`{fTu{G9}TZMjRU=KGz)hg((IXWRxLvA0nUlcRxMZ;IcK&4UOkdq>3u!;_vA@)uG}q+!`eRv|JI;E`cLN4lU0X3f$K0& zjRRlb*;Am|PITb}qg6qEJ?X+)hn@v>2HfVe(obUUNaM+exL*pqt;u|oZ#S8PX0Spq zU&b@(P63_Va>wxl{vQOwbwG9zsB*wy4Kysxx@#Z2cDK%%-%MA1Yi7-PjzuI;O<>OT zjr0bX+=7zYHou<@*2LdHNAu)-*R~#5Kjx8x^lq)k!+iEVcm{7p_aZSnxKO}9UtN;dsxxPU2f>uDk<9xQFmV4YSkTd0?FqDYIs%*wm zf5ZwW3ph^DzA!TK4ifncn7f19f!QtzJDbQ1w}Tnd6Bx|zFzPj%CoaR2N4fty7<>!< zgMeo;(796f6q|O8(jQKFPW5T9xR+=3lIUYLO4PVMtGb&VV6~NDz|e%%kNtndIz_o2 zPCp;8#?wD~2-+76T6IP5CoxOi0^4rba8Di9|1g3ltkNb&y7E*8R9OQA<~=VAaP9@x zlfl{j%1maSN@&6r__B+Ut-UXN^Y}E2RYqr|%Jub&$Y%^sM`8z+-kn@E5-tLYwt)Xu z(Q_ZzSSrue*$Cu%#GFApBZ0=;EMW%80p}AuyB*rQ`kn^GOG2+?q^%pa;08VGF>o(* zG`-Y7+m}Q2lTgo-IP@JD|2)=zryN=**Y|vtUYcl?Q!P>bjSn7+}hECm! zgseqUD#EqdK;4ls6QG@M?B;alFp?)?3_%9;{1s-!qz}{jf5y(1qL*&`yaMH|cOUZe zn(4fOwO=Mu;f{#ar=+g2og?V04)Wdv+>Lsjgr@7|cm=v8)6SibNbG7WpwTMUhW?&( z?D>*gfY-W*<7bEMT z_auQ2prdvgF$2GuNkJ<_TTLWCn{uT4~89t4GR=0p{CiLzLK32B<4}3P- zW))+NrPq~UnL{hzZ)YNLYvGqx?O~{U3mw#hy*8l4VI-*oGU(f+J6-jm%1rEn@fmu* zFVgBPEe2rs>e9w@tTUlTJHE|9Q;qL48vlJ{YYtLx&B(3L!ML(GdJDyWjsTK9*g(DK zzaeYIk=S2>$2G+wD3pZEN%<`3eG5uQ@u@KK_y^LZC3z66Zz8#?q0Sa?yd10r^mV#B zuo6qqj^3l8->0AA=+18DTwrFQH8qvw`;0J(}`BItSVDETQ*+ z-YRy*;cN;cxgWWKzq8@#EJicB&D`&%P+=kXZ-&E_!C?yPtQ+tvoYjARxU3Il9;=z( zpy7X+tqL;cTX1?D*zZU8YD2?V;QSIh;a=H9aC#pqCmZ&gXN_m=YQAfrd40acMwt=uGdX=K*;U}a~{OfO*l68Mi} zx!a;8-GFZj{EWuR9YAjvV?(XjTnXqKAPqkTurxrEPC_I7UiE-i4?#X?;hDB(y7mU& zM*=?9hnGt7gHSa$l=uBzPjh=DwlOX8g5Ngudm5{Y_Q<-Gf~o^^FkxJzK$m=K->BOEI%Man~?oC!N)jIPhQ>z%+r9TZor}WjP5@6ci?So<`noU zhJE4H9<*W&ywn%vO#V4oCqs{A^goF;@f56;I(`_co#y`qI9!%BUNPEvqKG?(%L2X{ zeVvW?BS_aNI(V@^{t^SwmKS{-4)#1 z&{ha@uzdQw!UHaqhc;V)G!v=HfD`T|v&GooRP2_u zvAeKJnat?tpsN*C)aB_w7L83B2RFuItK3tV%#}I8o7qZeQHPO=qFKY}rEy?eZXw;f z;CxSP&~-4sggxj5@2B$FjGSmJ*@|FA<2%7*#_IqM+u+k^pm!biGLq~HKLyL6&-NJY z+`D-ax_Rc)B;eI5bz|ggG~!pTMqn?xGgf{$W=;8Mbk}TZs}!^a*M3lc3~SQwfMh4V z_XpEzNK88@@&)!h3CULPF9mad4z$P#q+%K)WCF!9M!f{A6By@N?At*25(UMr%$kS> z&0sWF7)CPv%-cZ^EJAVYf^o)STrbV@rI8M!pc|vv9kHSv`PK_KC!p>Bq4lHCcrjKe z42xV5?&$@4gm=HT&D$`eKf)Xo7R(Pj`Co=H)}zb!z?FHl>5UEhnSKUy#dmj4Q)tYu zxtyWAS5FQ3jXPE|O`-SD8-8W%yk54$K=1~#U_HGFSdJcO=>+WLP~f!BGT6D?NK*?& z+01CMSk(2jo=yvMXe%I5M}uq5h3nCkMf^|3;=F~#?Sw9Qn4cPf!w$5uPS8frM_!=w z%;R`8>=|fR5Gc2Cr2_9Dd4NS9P5qNQ&@eTa21ztoyfF=wp+KYYH-RRAF@H2YOI*oTT7s`pk$WC4$`!78G6WK8G@N0V34>cVQ z#-jxVk@-;M;VraY{Qu`ikDU>Sav~2Dok06|`uLgmPid{>m|^V`LVJk+#u5s}DSlJY zhg|4sGc4%h;A&KWotSuna9#cv{CCU-e7+rncUSP)?~b6}__<;5Nmz;iQ{&*jIoHv^ z63QYr2fuC5_D&=(o>rb~qGb5I(ThG6ipmVd1QvouaXeKF9P=^*v}3(JgO$Kav_7{@o)0uq-F(2u#sEk~^%80{|#VBx_#5`)QUpYn|Nzdl&PK0jNSpitj-}P-- z_?-{PUxT8Bk-*dd2WvNugc3)f=H%c_$-0Ne-Z(qB0<#W-dp+HGB7!+(4G;`LLY6T< z{LbANI$EpfTmXb zs|5bn;PDJ#7$5MX3D5baVm{OcBzG%!XCnirxNpXDDptkn|E@EP0Gx-_euxoTG6ReN ze)GuQ0g?x>)@!hQlYqK9^fB-FJXqDoR^Ejr8Nl@jB&j_(8K+PgtNsoBMZvZ4;I0>~ zCS!YMWHwk>4W~Z5pN|BzLi&=x=YIO^%(bPozY4vLwt5Z-^i>#x<9nIWm(h%yj(wDu zHKC0sp&Auut()mcdn`~okG;r9yOGa+tS(pbY$;^+4A(q+NC@Od6TA|kYL;^YNr{1v z)<1Q97mnN;xA+0Dd0tFAC~j6TQBKV0gSqE>VhmT#LASS=nQkIu=EM4Kx{0~RlR4^v zr#Y%>inTsRA$c*}Zw;RBz%@P6=I0roc^s{L42iK8UIO&(7Q8XE;0e9kZwJ;P89j1O z(in$d>2U;iE}=Vh(9p(#q_hSSV_+M=zp`lRw_xs$|7xJ^0!*o3w;f6T8fa!A5mt}7 z4xEF)>;^P{o;8v6#a?CZxfiI{a;GzQJP$Sv?vH{R))W{HhV{5}1pKW`5UMoe=|nT( zeRs4v7S8R4OIMK(t8k8^-xw_ZdGM|ZRZc=1&s2Q{3akYl&l$H$O)~U!m%_Nx9C&2R z$U^~7yKr?X`0B+-4B-C>>1z(x%)`6N%Fc6&^t-tatG~#zY`S7;{@}@KV7>=FpW@j$ zz%&y*a`!Znxm}NGHrf@(wKe?pZLA;gdScRh(0eDeOQ4T6%tDD+nG$g8CeL;WFxv#i zArIU!u=F&(@=)7da^IkiK%0ipNU59(#fxDtvVp>tbja5*2A%wjZ`L9=3%dwB_x$Y1 zV6_D}4}yu7s}6MA5x}e;Y!0(l74B99s$x)e9RHJpx0Xowynqo-qrbcPJDByVr!3c@ z&tz5tU(mV>6fn0uoV#VQ59T0s1ZTZfDg0`e6VT+XaAprH5`81>(1YHAgztj-W|dt= zzIubJ*?~r0l}9qmp^f91W5z)hBhbx}8H%jhqxIOTAqCP;uT3M~8M;BGOt^R#pNFw} zD*|r%IiEp3toC(k& zcn^TDxr_SKf2XI?VEY@G`-WuA(Y4I5<$*JtJ5#YmYr%XXTS~Rb2r0Em)4{=>HN}^oCBukPH2@t%G-^lgLABo;FG- zl=r1BcVpD^Q0irHbzPB&g&fKVem0lO8uuYfP&LqWql62C{S{{CV(P15d94*(N0TDEKF1hcam6S?uePoVLKL?JR{JoBQsm6P}tpiRTV8Vgav_px?vuE#RN0+OCBUiF_XgUG4_zKah~ZK)RM0Ahf&Sxyiom?g8pj*x?vBJ%s1C zu@;^ScGgC_4{P%UV_BhV7<4a;Ok_Z_zv1~f=H5L>+BhWOSE$t=j?_b|>Oe#DK|8`- z-_hs5x3}nb6Sxn;dSugMLFlHWETx|cz?L0o`a!7Nh5o*zZ8f0m2GyKf+&A2V&94cJ znauVN0_ji4{aSi)UHBY-GlI297;iIWp~ZCmdOoz3|2z@Y97xZb{2942I?8CZSZK4E zzO6D~)K3L$_yWp88ET{2ZUKy>q3iu|%)3%)xxE1$E#-}7l=;GDH-R)SGHoHly$3Gg)kyg1l5p!E-oVosV_@McseV4Hd)5#h8* z0Ymq*tT$wom%fIPKyfEhH-_*3phNmrHz3QeAt!o{B59)s*{CYB2$aP{C_4h(i-xy) z^vperL~imjo_oE|*N z6Y04mRze8(AmL50Znv;q*4^C)|IY#Cd@yf@zJ@%bo~ZBxI@t=@T#Aet{o4k~n#{Gy z!1pHOe8_WpGFzZK)*Ltlwyu$lo9afdwZQ5FU|CMf9Z0sgSRb-%U<-`I@|2<8NV2P! zVZf80yCZ?{3wns4_f352Pn(*+?EZWr_NN4RS;cV=bTZF#5E3(ls~3=~#lY%{*gVZ$ zP)g68H6V?SOTiA$V%@MC9Hn0iFjtp91e(T-<{GpE6jC=^0`2=~#)sfzeLm?I3r(jY z$$gQkBwA^YeJhKDPgb-v!pd2+9;0l*o>|36zuG?R(`KNl2OhQP(R%dmb~I*HHxTN$ zPmmPINfUTjD;P@~9|<>ec;*oxJBCiJ#OAF=A`W4RhJmNE=4^V%Lb^Ky>lmXnY6FWW zrmf(=m7r!q#rd>##u|nmU0~*}&#XM16^iezM;IpxX*2iu4>)oQ9M6KqK`3k;U?UUW zrr-Yn?gTG04&@N?Z%2`tTO{}6e)6Ay(RxV%`T0!VU0u+m?&9%)$s%_ksNp8c^G znMj4(_wejAxcfI&&o}<|NY{_h+#Zd*d4o^p=T3l^Rt8VtZyA0c=9zf3t~RpTk@jXB zg**dA7(u^+-U2-vR!h^LbRTdTSEsk&5BTbdCO;vUQq)tC?|^>BafP9g6}alYbrI;4 z0F3`|B_Fcx2{++bl-hh!L&JdE2xI*~zwqp9Tq}Ua-T{qDpcy+Ebpm!oPt+G+{2CB= zx|Ju5=*?MAYg;v-heL3%KHu~}>4ynb`DtuI%q27g0m^;V|;LiFJ|(ed0%;Z4en5Pd%R zkfYCOD_`TeKZn2L@lEDtW=f(>S$YmdG$znK^dlUl!0`t()`~;Jkg|qgdkFp*x2N~1 z7CqF4UdDo%AFMCW6N2O6pKp4ffNL|pxi=fnOll7AA^z6HO1=SZ4f&jl7J8`k=aqps zjj%rrc|Man3%DKz<>j|Oj;yCM^i2x;TQca!Yx1}>tMqu*s(QNC2Q5Qqk{F3r5`HXT z- z#%0mg@9C>z(Axg|6c451p_t!&=6}BBhNtoTEatj-#PLu-?%!@1%F4a{y&yW8AG^2> z{MIv87vTP(gOEitSr1~n%45f@oA)sk&1v4A89fcT_ysAR2|um39}Nwx zeAbAyNMT@E99Wi1SS$0HVxh06ANE7$=3`Ytwd;2UZPL&iD~>*mtj%X!<6{={SIZQ~ z6*EFlVh5k+zT>o{k6LKcS)^e}@Mhp1qP4!dpdU>c={-KZP7l`NYR8OSg&rP9lFowf zHD;c>gSVNQ^lW8b>*|za1(Ff?eDsBw7v$XX5wqP*XmS$R{$kbUo@O{ww+&nRHCmUA z{j@q;3i{T8p3S;&Z*4LZO~rbop+>Rn#i;cGmm)UD;`8f}NQt7cG?aWSW3LjS^#Y?b7 z#&=mU)u;#~E3Bi^gYO3z^()5KUzZ3}p*WwVK+*)7oT8;!M8(lrGe64$g>j32am6>s z;q-52=~>40j9$+aG#cM5BQw*AGG<*U@IE%_67606q%l)}1%)QTJ7?aSto^L>u>uJg z$jWFgTq_E#)-uXd$oD#)e;2*hpW6(&>kV4SI`#%V?uUl%(fQ6(4>-+ZxdeosPZNp7 zH`<^Z9P%wR9FCraUZd&#duXnY<|E|7>VQ_48qa(b%X71N&bSDxm_5n3)_*gDtpOuU zfl40%;b16y7<#|V)Aus>EP?U~aKZ|Rm6(-1$KXC@PiwQeJ7-0RR5<6^+-k#K+LlIo zE^&7_96kjt7BlWB=7W;7Gva*=n%9+a#_|7m#@GdZYmv83U=fN^NJmzpx!#y@i$J+J z)=|cP87C4B#e2bvG;B?kfQKRfjF~Bk{LBZ!9gJNR*;#`Y|H$YuJQcx6AF-Bb%$T95 zepkJnXq`%{gFvrO=`!+a)WbBe=)-fygZi$U!nG8>)yDGu20zu`&ya<6@Xh*T#?WjB z8uPkN)5lfdT|p0*us{vaFl)FZqHAU#E}@4CNXpaP&!q1)@L>}c$@);{{U|mK-kxulU&I;BlOKoXala@-N9v&lpJee^Zu3D!ljrl7T&Ygp==iqz| zq{_3Z+anS4kqgf#S^~#Uv0D2TD)eDZFp!>Ig(flXc79$%wxW5u3-qszUid!#7aBH> z-wNnpeO4u|*sQH(l!6+rJ)Oy{oWnP>0XIRx2srRAc(`ucg*@~@&dgJ?KBJKv%8EIy z#_hd;f5=>fZb)W0v>cAy6b7aQ=%}>2?vx9wfNLQuiiObeMfflY`w-6aQDAluO6v>B z=BZd<_4JA2JiQMa*$j*aarZNz(l>CLzeYP&Mdo~C){AJwhgo24kvDz5>A^bMsAQv3 zVvvzD^r0Pg|0Nkp4M*PEVv(%RSPfoOXRMVxTbyye0*)c@JCin{TCjKUj3?Gu*WXjE z7Xjx9u$}=HvuN#ZcrUDX6RcEIbl@8EX$Ne_d|-CP?{#61rPqttkz- zBv9sudMQvP5gBQPWvRhT?ZmC0sCHTOr~Noi<%Ie@bq&mQLP5G<8(SgxzLBInxocNl9$pcgZN zX$mcS@r-Zd(ah-k(9+q!`5p8t1Y|3q)_dr|Kwy0k2+P8SDe$N)ni_$=6=1fRk8Nv* zJaz(#^H{$_{O=16g&A)y&lv;Lict!n*IIn5;AhgW^$wjUtb(WiDuULzSV&cA$tQEHOCa$nP|b=Kv3&O&*emEz6Qu7I zzWY9Bo!7cti(uqvdfkr|zlMewY4HKy7J;qr?)8w3@<8Z*lqXl{6PN`?o{JL6bID+F zCAb<1)dtb`szBb{gKP#oYv4#f`myr0{>u?uSMGX4)t6WmlwkFGl2MK0A4ZR(z7*p!cVEvpSCs8uTt2IK5(%G^mg@SzT6I|m&2UX9h;R74(7>~ z1P-Hd=P>t0^5gqvGUIFq{?lkgEZ;pr##(nv=+in1#<5z7U?VhgXD1S>*MmN02pGrq zF+9Hj$2!xqm0sO3GtR|xDq?ubc$ytpLL;F(iA^hLlzK_-Tn)THRp81N=rA0dD?`^> zjN;5^<&xt-Z1#fZqI3bDDrnna)_p6Wk=cgzpuj=k{1Giaf*j3brJT%+WNy9tfzAVzr6CkmAi~Wg(#ORIOXk`FOyIIgF}T$6Sjv=xk1| zys80)t^yXZCXJ^B4t-RD15?n>Qe4{s ztgGPYX0UZn@^41p44&>f|ASP;(DpKPYKsl^%)jcu>^*l-jKQ1_&Ge&IKwh+E_0Xtj zys!PS?U%$?E6NkHf+SmvVYpEwTxzu>wa75BQ#wlg~Ic_1Ikys`&dH2@iVjOViG z^)j|+G7!{)dR4d^2}NF`XJfi|(aRJ#cODH}&6`mTu+=xV3H;1=v)1h~U>ycj&PP?? zmuKmEBJ6UwP>A~lfM^c0x@*h&yr~xl_TLz{B9JFCicx>>LLq0xv5Z~{X;YqV^4UK1 z0et~Z<_h~cKYhOnY?)Bhm84nZ&7q5Hf#rdz)BEAV0Q&2XT)hs|Re;}Wl4cY4rU&QmM0ivm>Lr2K zF=(CvJbR(kbe>#_mOFcVhn`ta(6b55D=&<^mPa~=!|6f%R)#lm@Ny#0Bp@R@xu!2` zG$UI(Rj-v*7=A*7Lsb$A0fF(1#_4TFDwVl(xV;02J=5YQ@@6G}cX7XkcIHIvLx0Rz z--yP0vSa>$tJb<0jl`IB-;k%%xH_G0YoSItS_+Z#V_j_0I;7M2vn>3#4u>Ab*BJR1 zY;Xk6%|X(t1(x?m{2wuJ@*=$ReWE{@M=@Xb1}ZT!d!rTn`WCF+Ke4XUaIWd)|B6qZ zQ)z^~ZwuyeMKN=K3kFu7NDbQgZaIm5f1IFKLgjf=>JzMhWO55H?EdOHvFu6k<1 zji2Fz(&SM10L2fi`C0Mn9yNmfg2x&}Y-dG*mo7a56qoS}j2)*7GelpRJNnh>`9H?ig3+`IN~(+YHWx0hzuX zt3Daq)|$`erP)hU=o(L7W;(vhwY&IyiZx9suv*6qVmwu2{x*bPxxnI2+MR_HW;YH5 zZgVTo@po)MnOuyUAGjU>$A+}2#hs&Q*8=_e- zXA7SI5`BJokO0pcZ-B%}qYCtJAOBzDnm)#dgZBDqg~?dAulN)?O=NVyokPKOd-cam z2qWV>5qVA^r_KJokN^A`3%xi^9H8K{{Te0>Xc-|Gl=WE(c+9$@u^QDZ&YVOER|bv3ZFn(66pwP>t2wX6)gxU@hBFXwT*R; zvZ08(o3+q~MDDw*ucs%RPeqvvQ=o^rD`~+TWOcBjKomjC$~@y+;STJD)j^CSu|8cl z?Ek3%o?i5u1_m*-uTQ`CFu%vZd(VgS6wBpMPdKeL)d0&_g%%CqOB{MNl9uM#T%pBk z^zHzCmZ!gI^p%a)8?!hQNX+o4k3NPx2q(e3A2MN$U2_@rA2x?ZQQ+Z;!r9pM2)O9F zTFvB$_bw#cprLP3w^R&lJ;H@X1GgL{(HcaAsNu>TodcTECk4EY@ z^Vw(;{RXZ1e~PDv;WwxQ+(Y3)H1w$j|MXNEt70{yXKCq~BeCE-1Sz<{{e!_Q?0aS_ ztYilImquUx=`jgtc>fLVkL6wq#An7|nDQdX|L7*LWrpS$GfqHT!Nhe_eZ8 zDQ+fkd>7ys1J_Qn&K?65D??e|v3ek_2f20y81{gZkyGzNk0!{-TqrPpNjC3JMHEEnem)cgo3_cXBoL)Mvq z>0Gssf9!-1%9cb?5(Z;mrb2qNHVu`X?E9`1lOj7Mg>2Dl8AY1x$&&45keDn(2w5_e zY*DWN=X0KV-`{opueoOCdA{GX+}pX&Irq8Gedpn+-f|Uz;&o{H7^8b7<5e$AbNU^p zb*;_b2G5C9^kIBfWX7zFBpj!%@#s47$(t*l{BNZd^8=otR&~lmMxSw<+9Lr2fY+OQ zC0LVwLyu;jF(ywxsG&Vd8^_7N$MY3tWOFDRGpG`{Xfta;&GpddC}Yxy-|j)CA#t^k z5>IR$f?j$8(Y;84Hq;~Zn@B5WPgzS}MxD~8e~#VuCd!|qr`>Sf+kB~v z6>l@CJC#rOwc673%4q{t>qDz#*44>idxG{Hono!U)?H_RFdN8kHFVBdFnfZw-C3J& zSn5`1j{XQJK0t12vI08~PRZyQZ4p0H>J~DRpRw@?b@aqD8r&^ro_$IS3E*XpL{Cprz-}6) zu0nhLfbz0B$q;%}O5*v+8+3sl7E_TlW5ZXYTxV*xWPGLt%;RZ6Z~BJl(!xmXetLCI z-GeOujCM+*jqk%cMpK;gP~y}TG2a}d#P{!EPX~# zGKP#ec>!#`1lAu}4@FiBv#G6bk`{;$`ClDv;SFcc?BbA{MCPA28Ihj4YHc|VY-wmE zBgAQE)w84rP@G4%nG>Fz5i;U4ReADxD6(eXjvUG!|t{S_Ak$RN1 z=~rRoOkj@g2_3w^C*)*W|9Ufeuh0OG69AJHEv<4@_%QqQKix`R7plLTs z?*kt-Nc+Go?>}ur)_#KqjcCCLq}#ztt92cy`zyFy<-0d?*FljK_@hUv_ORhd-@Ei| zR4sGW@1`#;Jl>dW$e!T`WX98r)y(LP>ESa<=!rd+Hmd`Hkq?GYqX4bXL=N=r_CDet zw4mSAfPhU0=wa@H#z^jYezbhAqh~eDgRChoF-lg_hWgW(BtJoKZK8W=O+Q4lu`Pxs zztd|$X!SVHpMz3H`>Kh)(7rN(-aU77hHZ#WGdFZ+M$1>s1vSypm!Y&d%w2UYMTfQK zsWYf@?LC8dVA4wJZP*wfFq=*zR+Y1mJE@}=*FG?Q3H+Dy%sc7>sBbheA)5k@t~j(W z`n)_)oZ)FnsQUx8PJz!_c21X|-2`U(%1EGbUkcFc5A@p|eq4m|k=P9@z^MggGf`VR zk7py=D7EPBf;x$m*W>mAQl>@lCi0XAcwNa=gCecrj1uDdy&E;Xnd*JVR+MZ7?0TyI zn;uGl|5o;X+o6yrkDfYo3p&M_I*zt}<(#jb3gEzSH_v{3OMM^-xlq7mKm=yCHq6K zMPT(1x}+*~R|P$I6)Dvg?Y)>hU_SyH78_wVF-CKv5!WGq?*z`hKns<4Hj0&#UbW_Tcm`=Ox?ux)`+(U|Kb`c9 ztw*3jF?j2JMxr`FXSMH0o;=1(l#Mw@AGYyOzBH?mkw}-GYx8*K4N3jjGBV~8D4``H z5s3cT)4Rj>XE7zyL0|oaR&T|>U=(N_$uo2Sz| zq$gRqUE-S-C|fnM+D@Q$XLbp!cY$#NvN;BQ`v;blM&O`*ULBT~H%;jIBsH&tQzoo9 zK5NUfRCL~OB**w*W&t-JMpnu_26nUf-y2!{^zS~;!Z~eIfADLR|MK9T8CW8jNzM0= z4oYZ~EDxnUJ28rHBsYN(OWzOQ&hxiFeV8Ft%IhcGm@#fX+-zv^SY+7z*BR)iCi1z9 z-vr(cMpG7p3uYjm4X-j&Vkfd!it(@+dW=O6l@4vULz-hLdl*e)<_z-$ci_oX%4=~%|33X=YmS1JO&PNpXr~IFo(WcPV?pV?co-P7LV2zIo_2lB z_}B~QjU(j^gJWp=(u@n^tZ5@N&!#J!g{(??p?y9_yWgRNaXz#qEMjg+Vcl>CP3=vj zCA4}HeV7gzc|6SDdK>L$Y#3XhHDl`|zOUxfDLDHVwa)Xq4%xm)+gs6+eUP3>w4L$! zeaz^o^xF+My&<-Nzeb+^6WWgsqiG@fCyue1f*i*1>rTU40)^3NW#PSZ!4=Ay1JLt= ziP#6tG&vPId*31_n$^?nj_f?$O?3l`UO=@A-C6?fn}O(cq)U$@Pgb_0FN|o?lD1u0 zt94yhH>c-bXm3|`dC=XB`K-r?*(r^TF^u`}L|Ab(K`&S0(^{}T&O5U*JOPCxXG7j_ z`kI!!r=!)y3_>-+Y|sG>5)C)>OM4uceq^rH5@$@QI@#r+<;LAg(ZbT!)^2F)QW|JB5MN zc!QZ)KREX-o}Igf*T20BbBNNi4*8f5-sO1Xj-?x=y&-cJjh8|@_2KJQG?vlHjjk~X zS|-6QZ-Hr*Oam@YPG^Lf&6sF&k+F-^jpy?+N)Mx-1JKYr%{9R5Fi-V8>jLKcY2EC} zn|S6ar#C&#J6Hv)>mi``^f-mqJzHD~U2o7@Kd9{8$I7&4jNd1K!OQ_aLxU02_fD7* zgp9l54(wMrJRS_()&IykQ!7?)%6sydkCL^)>MwXv2Px_R7RvM`K6$!thT!|Db1%4h zclT8=@%F_I+C0Y!?K;%Dj{LftP6ZdT!<$egzke?wn7%ea}-|6dGk7G?at4u`k%>z?>Eps&{ub0Z3b-K!WjWC zf2XHo)JO)-MZtdc7_A-;b*3D8NV6-(?aXUG&Vv^3e+}Up!OQ03*1j_qs1&_ zJnZGkS>8HtJP%LJI5;1wcqhgYrk%(;%xY+@Z|lH9{~T{fU*N5~fH`30p3n${H<*Lg zL!Ui-H&db+fYjF;4EZKh76{LA<4v|!}C)|48=ZzA)8QQ04adk;Vx;|=WR&Hcb;PL3<^WGrL#H{?PO zv6pEp5t>hiA7f*h~!60a;T`8V^ z^TMz5jQB(}i+9${SyT(j(F$kWqSn-if$!RHOHw8?wXZNP+arapS9(#VH}tFlruvuY zfjTfqjOQ8JnlnQ+_k+fr7##X|mKqhA$*zFqY4&to7>$jYVJ`EVmA3SrPT{$DH-xs! zfVnjiGMTN)sf*6I{gL?*Qwnb$u0wp3o*KcP%DQwjPC&PO!f<&urxxZ zFEN(%^?D3Bdy`owGG2}$XFb6>9v*%RFT5|A1UGU*mHw2S160lpNl5Y`=oQbJr6)YO zKne36y@RC8goE?IW))P>n${k;wQOsPuK*tA{BahK<=sN~Gm5`P8=4*D^;<^OTa@ks z=H>?(46b7SEIl|nOVgiP@FiLsh>mr%s6Y49;u^+TKd{}+EZG(gR^q$&nT<4Y8~U3y zY%}e3LnjuX##}Hr>r-#auVe%?hhmO}#&Dz*?Pz6g4cuM8sT<{rLha1-SQNctmWqx@ z_+en{!N|%MXjPIiGm#R`?&ew;N6$tj(GKMelpL(AwS=hw2GQGA`ZqSH7O<~??^_@_ z#;Db5RTn5wrRPT*>i*rbTNUu!u9D0L(KV!=LS~qyFZ;X0Z&$@5v{TST#&f*DR>K62tt6by4 z7#G#Z$+f__E|mC)z6<=l#vX#?ccryjU{r&VFgxIQi}tiRI|Fxu%IDF-dXitI*Ku%Z z3A?s5@ae=%@fvlS)1xtoZ?k?_hi+dBohH*_5oj`mQs%PGPtT{B@!aWC@aO zoYN#eeTMvRf%=|&diwqz(qVq!G(Jy7M|qc58=*1DjArHycKza$C~*{yj-c(fjDVSZ zY6R8w6;NY(j_rELlP>Sq4hpii4NA9%f(Lo>2Q8OD-di$LRA;^9IM72YJ2QR{WFrYk z>agPbPnZYIK30lni_nw#nR^pycMK4ngZ>YokH$j}Et!L8{SMDc(_UThtqC5>f#}~* z@*;1PKRH|*OpV3z3p^+S?X-=4gv52>SFgq4td$o-E29Z`YxoD`rvL4xI+2QMZ`#X@1VFqXiEh`{}W+RAYB<`ZEM0Np1fIl~!w!AZII%M5E5^MRccav7Y z%*^-AXdxH0+RW(pKBRd=@*o4nv17f%2sgLU7^v!9w)OO2l>0A`4lOj9;X^;THU;V? zK$$#XV3yJ^c&oqIc`)|#3dY^f(9b9c#`AYC-5##y=KXPWe?Dp#3w87b>IIdcJ7I$*p)3)kS}_moUU8q7VA4mmXbmaT7oeg-r8VQRL5 zb4B3BtL$o?0&a81)<4xep$2{=~Kj!wTMvwaMOaxA|8{EUU_LT6ZeFoMB z5r4WEV4F;>`aC^J`$meJhb}QUY9`ZYHMi<=4Q#t$e4+u#;bgegEFUhmjjIM<7pq{brU^&3f5Dg2ZY8e(=J#cNK-Dx$wRy~ zk7+*e&=ag6tIuPU>&xdy_%*kNcNa6DMG^w_q?%rbLU0Utynp*2zRCCJ_+JEVSCg{l z#WmiXb}ivP3Dt^%zqNDnJ_FD7$;t-Q`j-`@mwTx>lTUKQ+vFp8CjYd#<^mr5-L${W zM?+=duhHpzvN*JA%dz2;84CRN|NqD}EwI{7BR`LY|9+Gy_z$Y6R-Lk@uPEV%6|B$`oQG3fieXrFUvr=9%n zfKzGci{*@=6=<9u%t9L&6F;GMi}U^Mplzdhs>iufbb%fsD@5;kE`iEMpMMukb(c|f z4ZXN4Xyjv*T+BLVEV4b29=B3jYvfks-w4&0(2$Lg7HkdmW+$Z3BA0 z^hP($L{t6<6(c#~%ujtUFq&C0a?8O8R7aqM@u@DdkJZ!5(@-O%ze2lhXx}I@$v|rC zw%Jg@I7&*D>w-G06LJEj@lN`~C37pfD=5Y6XuJ=zEWE<6Syc5&aaBKr@cRsHGLq5g1fm3j1*`LxOkwh z!RqHrX!|a5v>i>C0dDH~z70y11ST^Pn&HX2Ro^2WIgt8q71(v;PKX{GOfkby`VN*Z}aK1x|Uu>FdzOKK5vIUJK(~{^q7)Hzi>a9k%rtmEl3BAb3NkzvQ3D)tYZD%wxsb7Wnb2Cy>e;;m;F{8T{ z&cmDA;CT-ckcvd_LZ{nmgMjE!uJYwag@>G=Q}NbKC5EP)IAd1H2oJ-mOGzT#9MJDGIFd@w*Xdu&hpR|;HTqE=hDOS(naJ|NwS1H-04KRjv#)YZe9+&EQgnQ>5NFW5{3Z|?{hLFr{=$;d`#(C7esZv(xz z)-NE1T8&ns2lWK$46GNBg=*-$qtubp9|QAg^s=6hC0QkUBc&qz^Cnvj-c*D;o*{b2 z-ZT5{%;&3tArZ~5AIpQ({1Cn?QPDtm8VdeS`IKO-_e@$DGY9r0<~bw7ccxu;|Ib4S zeFfxEG9`?BWK{A6SpUw^PIX3tz8Tt6`y&Zb{U{ih1o}$!*$fD?fSt1IecUBbB8Jvx zLqGF6tcTym-@XVp?;s)WPRwKHK5Z)X*8*cl|2b6{Q%P9Gw{deV8vts{`HLt{2VPiS@ZG^h%A zt%h#us)q36PiB%a)bECr?hTms;knr|G63y-+L^}pt3YsDRsYAlf%R;!rRd zg>)HLsxiDU+Q|j<+-jaUBSzxie*|VmQ8a(+C_ev=H0%Q|&wRQ;K{KKXzmc-^vU!+( zE3&5Z_R!BrXcBK00M88cpQl-R8F)vsCv>e2#@cJlr;ssd+|%em;|A?TqiaD`KX@v* zo6)-$8fo=&y$}!ooa>U&8;ziFJ=!v7!D33ygzI|od8?@r8dQt9nbwB`j~;8@bi0oo zmY$S*qTS9*xb<7`7cL61iP{J(ATFX*t#~4;d zG|;x#8u--md699m0gq(_Y=)-Zt$qhubOM?rWG)+AFrw6R(6BE2G9yPM)2*i{TiFR! zq=YxHv_I?tpZ?H!1=u-qXF;FoXvhw3G`<>H~H9$htQ zX(sJ2gJ<9J^kX1z&bZvqdu=*(SgmbGBfpPM8;CB~zpVo@l?)!wg2zDUQ5U|b6^k>1 zHo|pnZwrBQFT6=Xy3Am|o!y9jRHY~ru>pEBasacf*`j`-x6i2WO6(5O;#e{w*J$wX z&F9`ouHH`Ffq#C`epeYKdxLgAf@Dob_A`U=1xo0N^(J!ex1QAbgB8VE=jq8!36lAspYZceFhrqozGG&a{mdu||(pnWH=5^Lv{osyk%5~@+{X4xw z(2=<i=t9dFZ9Eyh-7p6IbM2N~Lr^iE~$zQc}b6ngp; zl+6U+yE8v^1ddmE{wmZhg9PM83JWuuk5DFx(J&HvXqUeP-Z6a7$L9}t?>V#5SccEu zoYVK`6Eyf^{J4v32d(bmeJ8a3r}UbSx<)BfUuNegEAnL2cQ?!TXaFU1&--!gtHAYq>{LmG)Fvj2+$es_G{zM99c``Co=*<6D9BSn0U zylJ$U=lVB!zbicwn?S2R^RC(fKI!q)0SHc0wj|ulO8p{GORpH?j}3#OMkOo6QzM4| zLQ9QV?R^5?=2l2Y3$ak_4Em!jWAHvOFOQ_VPF@kLHb%|TA7=@&r_a(GjQM;}#9S;l z;9)Ftka-O}6Yd+RZ`RD3;NTtkozy6Yw2k50R{BlPPa6F64w)ElR?1g?7_3X7!%QjYQ`|K8?N_*;{L+ z$VDlwQeOv(>!aeHKOHOjL@1OFEjNqVLOSsL9FG4D{X~iK{}} zT80K7tzD7uG@$$xoW0pL21|w58`FO+JkYM8cb0AZ8YuM~Wo|;jh|NXog?5)}=!C;? zx;$ly2K$XPG2f_GGHoSN#yDQy1Q zH2c$k$!8h)D=qo}Y1L4x3Y4-h-?pRvuhiH?A3jl9b2GxdS*eFq1j{Jo&0Go*xwhOt zKgOGB8hZDJU%P;by@{{c1tP8L0hRz}a;I3R>zMP?yuhP>TKnhwJ@mYZw^>8kSmYv> zPtBl+SzWzpITZ|YAsGW`Av5#e6*%IJ?u@+mu7$D2+*@gh+RNx($lTzmSU294glop? z9l@Hl1$&$nblGJvtO(xv7#NMu)0F#|HKO^^(?hHH*g&TP%wk_MVrH_=p251m40O7~ zGczfgyJsJ(3~ek+89Q}Yp%{zd0^_(iI`l59jp=CN?#RaT$hgtGk1=1=0`hHW!7}K) zwPEz_0q(8H+}q>?t;vvw;5w*16dEu4=~b_fg+O8`~mPX zx`er$3V~~P@biXjC*T?k&4yE>KC_k)N0u?O9)osf=4(Yy&Md9MUfGC;%fc#bKeTj) zz5?|9p{lzj&*#oDa{hv*MsRY@d7Wo^MHq#D4)UsZPBrNF3UuuZG~0lA7_it+Nu+WL zd_0UCw?KOLGlL4aQMZcf=P{fsbwt%k@v@_9?_Z8NH_W`iGOHJc$%?5&=v~83WW9&46 z%UUU1eNSV>vH=Y00oyo!%^9{98Cnf>JM+ILtB}Dw`vW?g>n#x~7-76Fd^^W_{rvMA zn9=*w%bW0}IoR%H?Ck~02K?$N^kU$Yr^?@;r>oMckvcr-^?qg-=Eg0+?MmcZB0KCHcdU$%2Lx)(Vo!C z2tDo~+~qWf{$?jE4K8N0HqL<_W;=kTAzaLdwCP#m&7z`UkOi4t2khD%ThqIH=8?QN z=3H^!pXKd(Xt)CUc`xD(-|7FvOIE!H#d(c2Ej!(3{8jf6$ksrnDOvi3g7IposaXn+33hZVK=-u14m z1idOzVlN{}--=7X=BcaqbK`*NC_FUsxH8g$m7h@}ymRv-*v&=q+>5l~?{AcRl(L6t zqa=UNGVbD-H?>qur;pV@;=RH5p_g|i>hfd6@u*`d&2wFMjqM4dvCTC0!uS@lm>tO z*^JVnwS6uy>iu(*a^}i1ii@_|HE=(UnO5(H4R9`rQgi8FAJoCX(g&GZ0Zr#4q5I*T zx0>~abr+$Rl=;g_pm%OS4R?*F(Ojc|v=VfAfN^sQifWhiPIx1v(|e&Sf!Vm-9|48g z;*J7GKQJju>sx_Mi`F%GeiJ;@VP64@F%q^jZW=%VXN5jMJ_Z^58tHrl34aiYGxJp^ zboraXez1=5*#{hF(wkWup8^MCQklVF2pIeb@AMNgy3h(}x*NP#QPybYdPtbt%=x+m z5?X>c-ms43m8;LV*+Lth^M49`unUbU#U5%@w_uq@*LFdNZGRJI5U^% z*E171yrFn0z?T;c&4Y3tDI0{|GxDN3`Z#d9w>MAR0@hwV=;H^V?+gvi0QLbRX#=ep zp=uHo%T6zospDygKE0l0KMhWE&__$?`63VvKptLzFU~|M(60#epNqyFO*!L&l?5Vi z6V?De_vpq}_Kr**_+A^jya~1RnQMcQ3WKg3wRpettS*RlBbcP&9D9iPrd)JmpY$9mAz1YK!C6=^}C>{-=@#T zK%56yljz$y()-aVj4@BHUSd4t2fkHMX*PH-fK&aTd;%r(N;2-!FTffPMk(m!eL0kx<9Z=) z76w`B0sMcmo_!mwycZnJGP)X0Y2*By|5`P-!iTE7c@=oQS-J*E+Q>@aEBLUOZ;^O* ztB{#pXa-}*n}x!&hZET0v`S*@U`98ONmY9DM$t#;m?rSgNPlP1-rltR9cuhS@8$-% z0OdL}_ZVyIVQ|q?vKeiC4Lm==&Em+_Mx;6oY|NS04V`@mt@JZ}#qh~{SjlkpA}hc< zz)}&J*IQigrpoLb%@OkkGsYBTM=h>BqY1P*3cqI}xt$neYESQAdrQ>_`wih*D%5-& zx)g#R`++|W3ADbJ?#F>y`u#@Dm%zX%NuFRDJMl?0oVMI#-suOuEO61h4RxW@GIaMY zM&@hO|BNzPXSB7u0$&D3W{s6b&RD$|OWUDiqac$ffLagj{cz!C;NkQzQ_p48Z$%Rs zA?Q7zEyHN@##1v!n4{A;)bF5?Mxlwa0L7QQEzUUC(vY9B=F#&el>R0!QEm*FYUwb) zXf)5u^2z9ni{XQI4n0w>!q>|{mxa$KX(=nZ_W>x}l<`smDr}H;QQ6ngtn^EMgqQ`)0 zIZ!{xH>2n1F;tp5BasVZ7wx3B77b(VM0(VA@Bpwjf>!fFe^HE8vkACj@Me{_bt1Q! zjLx2u(G`gll^(dI>chxWPax1fkOyjHVH6p;XbKdY%8xgjPSSIC{y)Tj`F9nq=zXR1 z=r?Z=ous}#8cpHc5K8EIqm@Kl-RDQ~lMW30ujRn|wsUEF6Fi7KGcvb+9T7i@tnl_^ zc%!|;jD17dXU~JC`XNZ=&5VdX@LI|i2_^L>GABs_-*PdcrKfYMJ}gy&O(YlEcQ)@U zfM*nV7R8FThT3nTVWX%U3-ps|Q$7?yviuaWT@>V#mYt@w81;XC{`Xx>o1(?40(tEJ zDWTQp|9+3u&KIO3azCpcCGLfO@=1HjZ9W@Ap&i=wAm2|y%Z$k3Gw7FRphgtB=_lH( zkLA7g+3FyoEP`oyJxHoe9HG2PFj5_zbr@*HxH3l$~w&B+p zr}KG#0|-*++cQkPT)yJ}7U*LXIisI>&m)<&kvFQUFfZzNv5ij~*+&h4u0~Cq!Cqhh zGfNZN8i@4m2h&x+_92)hp?}=@b_5FZy}txK9{^Tw&Fdpt4txhe(GzHHV-TEx3v%a1 zz-lG62cnU8BR>bx&~E@&Mc_$-Cq03?4|>`duKl2|R)76Dj94RtT@}oKqQ_`Z(w-?;d{yJkJc}r!pF}BYUr?RG`pn{qs87l8r}$K4ShZb&aTw%1K%%!&3yI)#`yh(@n*agZ;NcC&1jzGpyWc< zRz0AHEBRWX?ZNc+5o6tT<}hfqhk6I#a$la_rnepNqdFSjypcnZx@VzZJbZV*>WO?2 zq-!nTjm_vC^@I`vKBUIz)r|rN&4OmG_{H?+9Y1%{8=!h)R(T&G$%El|u#a4uo z`zbux1eF)@bOTf#gIs8}YzAc}K+_gL(VW)wf!4SC4y(aFteDJ7*a$p|^S(P+y-bbP zz+s-MocxbRK3<}{tA8Vz^a`?Ylv-xwzQNi{U$~_}tA+SFkWWC?jm5r&)-OSKZAz7p zyNmA6OfG#S@@Ql$gWM zHK^i`cMx_kSKUO0y!EeFT4AWM8xHFcqOEBHyep4f8vFlodYQq`Y_xwNAiqtYdg+ga zmUHR#QzYv)l+;S%nQlL1qctrxK{M3ljlLCY0uMXT%Aeq(Wg!a~?}qyko|zxfIO#`` zDm{kofaxmuQk-^{(1wxLoiVhjo!jyG8>p%d)N*y2aeE({!I(kWSuK@f z&DRYaw!@>0lr_G`F?5$V8t3z71{kb_-u)O+RpGF?>=zV`&2w#)3^N>Ys$Vvw*8yVlM|Vz&FwP*-c*G$ccI}x#@p#Is=d$W>2xC0Kg{ZXFx1Y&_eYSHUEtP`(d&() zT}V|PdK|&Lv=myIU+XyfrVG$(OVTDVj9S_|&O`a$z+vo-J&aDHxwWJ1A-pxenfK%# zhOf?qS{6>zj`X#ZDbb6* z-H(|eO5giljA-vC^k(ifW2tu$Jk?cJ2JzR3Q+gMkLDHuq#f?Ib^?{)=^m?6A;%z2TntMKi9+SO;LA-F7mK9w1LDcsFV zIaelnG@27@0%M~FrQ(paHc)aSG&N$*c%;ff6}IR@U&(A$=Y`_5T8aveIF>@6>PlWbBJ+03>~7S zZ!1z&4Xo18ASIbmoRPe-o5Wbr&%oPVgBV*ad7@vX*)c97KW!P?`nqMH6+IZWr5ID$tjCXp)1Kq!N()1P^i{fd{}|pP@$?8-uA=oE@uq?6bqI zo{Wiycsc+q(_uh9W-Z_0j+ zPBo+NbtKlj1|!(Ln4Pm1+!;-O#%XQ#RSsnU2Cws>!1#VmfThBQDVnli_ah!K~7z=u7 z^rO{mj1oUTMfqoF!RW;8ndQG=2c%8EFQecjm}RHd9qMmFdg}6fk{wZBMp-NVk3`G# zWoMEd=*=!LkMe~n`xgCV7oX|}sH?G)~^O0z1mop-^$I1*uWDI@Q^%jg~n z6?;;43a#t_7jNJg?QAjayhZ!&@=k?*Y$-oK_Uc`a$WJ5>TEzF?OzPg}h~uw2REmc{%YkI6X21W<9N~zeI3QS+sKyMFdOn%j+Qbort1Nbww-6dIkF2k zi&Hf4c>gUQI`$YPjfAZ&;57KO1Nuq4(K8?c400kdU6G9|P~o9qHOK`{IpC6Ug^eIk ziTYX;ra+UPJo}#hex~(E?3{?r#ntLgs8JAl{=mCztO(CAmgIxccC>qZ$>%6O52KCr zNW(Dn&Rp8h4UF#Yy>HZnQ85IaaBrZ9J`-;v>yMxGWHJw>#5$kAfFDaPlIc`_VH(PKtCgb{fnwvRJF?k#nFk7*VKNULRDMtL<`Z*cgIcb<_RrvF-uv8IeRty<-S)k_UKJ+$Pido7k*Bj4Tz znmlpP(J|(h+2H*v{hokc9jLR1 zb&GasBS>pksg9g2q)#o?-aPBfPH$BF9KdSYr?6 zyYYBZ(Y)Uy0S`iPZxk$Jtc|9nmB4JiFYohvmNABPc0JZGp1@azvaZ{A(tmeKMuYcW zBrX$cfcM~x=NE&3zCOF7519?!Gj+rVCy6zqaalX@eLbx=0{2PKr8cACI=a1epkpIi zFe2{0M>!f z*2s$b4*du(2E*lVc#=&2S_|H04)D~s9=&@CwSfPvz|>LTPGUHHxDPRVxVJ&-vHI@C ztoABcmZqJv)ZI@F<#GxA3_yda58O9;=J5e*-KVl8wrKid&_myG{=~-WwM#$w3^yE!AF_Yfb@+=M=ygc+~zBN7F zF7wZU(`Za1DpyA8FQ92Y4jQ5&vicx;tVzj*oy*$xBZ$To1sj zDb({eMID|hiHo64XCQKx@Rou4A}hLWs}^u5;;Nq>g&;GPt8nzn;Tc6hb@ z+w}7edTuNut{QwYT0?E*=mVY^ldd2$x)wOIi;v;|?63}N#JJX4W)uteRW*@jJ*Zmo z+|%cCXk9tt?CUyyDdid>i^u7470>Izsg+1cet2!P=4XMlI+XAX`#AV7gIM)eQ3-<2& zTL4`d%05J$5|q;tY{an_DL)RG(vqegoym&gUA{d4Rzr~jXZ@*gR2xh)%C1GuUWE4V z!xMd_^l`YvJAIq3Vd*hvZ8h4?gRHDalWP6-jq$L?0ktP)+Ew%bJcvv?M;p;k9|-Td z{>h3bG6Fi%MpY!x+eUho{RxJ%(W}?tzo!=)&_8O6U2w4(FkRvODY)g0ww9F88n_sI zjQ_j?3HN+w7`k8q^wa)63m$lP&ZxMa?&4#D_VKok zQ4~AS&k$x2u~+Z^azWbKOcoYrNR6N-1q|ej_31IM%QelS%2d~jCXS~ zOauqB+#R5`x=4SkFmKL8YL_ClT^MUVOGamSXYU$MjN1J%oOuupI|01R;qBSvB53x< z-{+1;p_R7%=}34NxcUjy`-D-VCEgXz4k+uH!G&<@_iyMqoRRuZv+t=E^{@o#lyW(Jn4WmWo0}zWc+9e$xJIL z?0nXMllpBG|I4z@ZOG?N=#TV_&uE^G;duc{WDoWa*GdH#Z4<%Q()Hk#w@tRN3NM2k zw5NW0dMHo1^ub=Dl_i0)#t)s3{CH;l8ay)3&?xE~-_`bZ(uX#Qe8A;?+$`YQOv=H< zreQCpjF^SW{>DN^% zgSQMa({jYl5vdvZn+fXG1+%7qx96T$pTk&SOyH?rADe;Lb921|^hoKBT%BZ|91aEc zGIP~Ki%K8QxV1o<3(B>fQO}Db>E^o2#izG;+84;(1)B~1ZJuXhM7Zb7g1qW85IKj> zghtE+MOs7OD71e*O6TMIT_~iD)j`DM+H3ljg%s^DSk9r&>t7A){B_5?Wmku10H_!f!UH>22@d`8)4;>ns)BxX*Zl@?E=JR3KxUhUowA-5QS|Sf zkpl3`TOk>s@~nVEVQA^?p-Yqzqcw~}tq8ZkyI;^vUr>4q{MJsekp7%qtA*0$9-qLc z%(S=&dAA?4BfSMRCn7r?X;&*o7RGcwsNNT9d8^}L^g-m#l~Fe~QCdv>iC9jaL+cwa zWDJ}+O1*mm-su>R#dxlTCSqSX7^GurD4h)$wXfNVGrfCLxigh#r-K!t6!l5~LuLL~ z0fQ}!w!W11UJ&>)qu1nHyU>EI+zZ})`ArBfQ$D4f%cvIs9B6MO0U26Wem8*;7}F4=B<~Ctk%ylN0=XdHY?PYtQpM7 zx{Og_{EJ7JlQyGKHX?Ty8H+REh&#eP=zM+LMtLVc#&iQE7TlQw_ zIl>y_8f{+!qFz9)U-6^V?@SHzTeM*I-2slDF&nI5KJCos_2_wXyT|k0Tw4`@=mEy9 z^H4l+y#`)mm_gj;kmh}j2(HAej+z7^bG7x z%Vx3uhS})|^WWTnYe(pGjkeui7~x_l>xq}Cy@WC;%uv5Wy=(O5ia(wo@B8baKZo&U z-ev6^+E~18ZDhHL%#iME(suD#EZ!z#f z897C(57Mfi&2Qt$U|u-40vxvRy&81$Ttx4XpIHM`hfbfNJ;$RbypKBs`lbSxQltH< zI()1HK5MAwPOlj)cvJTlqsvGDMwR}7r~Ub6c6smRrZF#i|J{2utKr~SO639n_S8&- zE>~EsT3(NWi}V!9wmlqeTLs#9d&#xGcL6T|<5IZ12fllb*d3{j#BA$Id!D=g%&Ma@ zEqivilV_eRJqz96;Iq*{1_19Vp0wuMHYjlmj&Fz8KT-2z#>-l8F3UT8^u2}GnLf;h z8I3+G%=_LzltddVSp^nH>bp{_1>?Dmzf?s3)eR#4c_&jAMFbD(9CDyv5|vTFdG|Hz&(=w6j~uV!tZC1 z`5Vw}5B%FtJL{P(+tSi_aFd$->7@_-nBQUp_Ry-EY5axbIRj3*s= z+7{~7MMwS#)ZXPD4As1E;o0snU>N~!b7{FZ>-44U8@{3ibE=eq7M{Et8@dwfi(2S2 zPcujHE)T8g4Y8V%*BHOqpzu+o<8!by)}}FXKZ8?8Xf+L3heLrCSVL05)hIr9;Hy$; zT*z2xmWrL=ad-o+o?X;0m9N0qKcAKcKn@4DQDqsUPyKH$AJF&242aCG%K}K>{9T zZdwfOzNYjAcwQe0>Vx(gwagGTG050f@YlYP2psMq8`9nu-W6(HOS z9oZ^YUp49MhQI$PR>8l=>QV*BogE9Om6{gmixf$4Iq4wQDtT| zZ+KM!`=0dc4NY&VJqBDa^88_1=uZ8Qfk+#39D3KoG4S>(<5&y4aUvaqPoT|`p~O;t+Cl}_>FzQ+uo}8dpUy}^bv z*XZ4I%2e=lzy5D}SpkK$uglFXv{{33J%*mYg0?LgU7iKiX9YGFxynp!V>(QuO{1wN z0fn)lyz^(6EcdQy=0ztXmhk#5wb4)i+!+4zhWT)%4(F#1FTcxF7d zwooD)D*|sebq{@wqwH;P_p~N*?%zF(?gPv)$EmRpn7ShqdKlD%zsrH;1i1VR#s8$g zl8ln;*dhARN^|)45Ok{uoxcUY35?DTjJ~?iRqN%ppcnEm3#?%rmZr`VXyDvv`YnwA zO|+%2!*--?0(#QBA(5x|v)<3n8)L2)qwFbaG(aB>qCM?ndU0rfcoo>(r)FnQU>2~B zQ1t;I&&Cc+%S1*bu{+Rs-e6XqN>J1|pPuFSU=AyQM3kh3p=gkHNNGG}yq8cOZpH97 zJse4(<~+vBW4t>FJ&iMPFPIPJt-BcaI-VRhV>Wt`_lJ4wiTtRrZ!x0dkF-1yJR*A} z{q1+L<0%C0#$J4#9=-`s#eii#^z0rmsZ8&S!~Q6eDMbsyuRytzr`}YVA9iI4p>OXH zM51a$PNU_jFvapT4;tqlVAlt8BsG79M*CSmlxI|W;**=5p}V-8>=B}n%*UXIu`|3i zafC5!L{x8i4TI)cp^0}@v>}K~9%vC8@c#!6EiYo+hIe%tr?E(^yEE@NWMouKh2j;V zruQe-z#(l35qrW=>OBHS-Dyvur&`EPI{Ne8P6p<>FR(`60?GvXKg(#_i++0t-j3tR zI@)u8wI3Y3(eWv;u7O77_**<^%c+dDh!;UD(mIcEHy^!jgr8)z%Y5k7j=uF--;Cr( z;#wHDb_XpKW<;3{$zAt+;5AaK@4QE{8BSFK4lN1h=J^iDuY#AlBpuI;>iHedV;R8- zNM#-HZU@GtkO5=c9YfAv;H|5O-=KR1r1(2}SOaFSBg1!~|5J>D4D5J^KR5 zdEgk%sQ8Nho?@gKm$V3V<;Hi6g-!HU6i70ozv9qpF-Ygr(6<gc{;$8kM*;@3bCQrqx?6Dgj~(aPekJHhVN%65ke6u}0C??X2pR}9E*G;_n7u3@OBn{{a(|07!>E>WdvERtgW8nGM-~IpdOu7HZ z%OV0gNhBi995% z1m9vnqqm>gnOD%N(LIkbw;2878Y_Tav~h*i>Ur8Ud*mMUOeA;Pi~#%l=&Y?k?5Zk_ zUR}4G4Q0GTzMOGT1FFxVxAm;Sw;}h@j2J!LlYr2B3*M%m#(e0>aC}&$bVri+pwC@p z4yDu)Aoae4K4OD_`2t$ED%hB7#@V)3c&lGU9DOxq1)#Tu@v4V`Q-9WogIOD-vEM7t z_%ie0BJg}3?W-rwHQF%RK}S~p>5!#2X>B1bbYNF@0jOO^Eu=&jTJR=FN#>ht>~Y=$ zvQOamA$n~DRNhG%$g0$gp~Inp@n$z58^%2v4EGN}C+{zNSJT)o`}l1gG}t(JCVjdw zGkF4C3wbrxmZ#jccvc9W$)S4G*#i7#2Hc6xiUaqEFKRsP>)UQ#&>Z~gW3I2j0=U?P zcY3s+r7cg3<^!d9WLzIky@+EL7eGMQ>3*7f?A9ECHH4(5Wgic8GEG15fn~)ypHAT~<5V$q!#H@q86H8yUYp z<+Y7?)|*Vd@4|R{h4D2Qz9+${y3ALjPID*`jShPhsx)TLGZTzYfNeR}Ey~te+M7r#zcTl&qrNA6S%Aklrul$WD`0K< zQ(G*B=1N)tXfv2H&ES7^) z;CnREuRS&o?~LoC->z{Xiz7j)XvekC^kZ-}I>6VA;_K9Ov>a#lpT)?YjYe`6=b7Ii zzCFrJr;kb+W3M;TH4TjPiWq>@-T|%?jQj@z4DI2t-fIJBZx?+TPk9jBe*`L4qGU9j za|Y;wt}4uk@a%px97v?R`K>z9k34}%c2E{r--Ty#WcTDb?ZX*|3k@kY+VkqT( zg^EZ@639-O+_MvVv)H2GdI(7i?ea#3PNBP-s z+bGE1puC@2i)d#SbC~h762my3h}Jm*wa3v~eWaoRE%!o_pX6;SrHxfti*MelbJi~k zrB{Jf9mdB5xSB|x-ZwB`sy3SUpr`Yz_lX(+jb1d~1W2TxHjFp5UL5jgWLLd9C<^HA8l%>+y}KQF+ZfStTZiu64JeP&*@ z&w)ohq77>Zyx$MiO436E__q$&jow)tyvhKPo+L5w&m74{W*7{X?%~f;R=d`E+OLRS zp92;>c%RG@cQ=(7AKG%Am4_pRU4U={wU@zRwYvAs9;b!jqxdg-|fF$BCl+21I5-&3i{Wp#l%tS6N&04*J z)>49IS7){c-h*g>h3J{$$eNa#uE4BxA3%#5Uu8I045Yu>;5dxgS$m&j)>E17)L4e} zn}N+ZWewq<_nF7T*-ESjw=rt<#5xN{8Z$p;32U`_NYh2Mi(XVl7;YHqc+)s?OQI2V zwS<~uU3;be8GFD$oB9-_YB!Y6jnvBjIlL_i_1;4Fx$E|HKO^u>N4h=nUxB{y_QWQ* z*_Z!!=`8~@ruQH`bGV=OJ3#yLv}q2J9n?sm%sp`N9@f?s*vTXy7nQ)_ML1Ls7}AB& za0@D?1484UXbyLSjZhzXoCVQq4 zbuXM7d<*bBA1!o(rmi3w0^Rh`UV28C5jDIGosV{o(_Sv{HsfwisFN8z8$}tJwXb47o#BPGXs*zJS+el{5BAW8~Pqzmb5x2<0+UYXn%W zg1>#yA?6>N&u1Yn#^0=AoX-oNwOK@Hvw|_Yg=dc%xzK+o-}}Jn z6Y$9#Ig5~|JIwFS#osbUYC*5olrD<2z5y>>IUCK`9j^Blo(J0Yi`n78^od((R$8QLzQh0{>o`|2b3=J|xtkN$#Q7nwiK zA^VM~VLpr=jN@v2)+VBNKtU+s&iZS9^)Boi*5Hx+gzlObK#^&b?11EW-cgNtx(FIx zKZ+P+)H5Yxxn&Nk+Gaev0ItTeH{ZrJw1s)wT)V5YPeUbV_&2Gg|EWFw&)|4JT&vGBPv+JGp?k~j zj4HFm#et{!m)kI#?nWQq1*e9<_ccH3!MG)FXQDx>(bwx>8v|TJSnuigas~{ODSHXX zCLvuvP*(olfSQ#VAA0ke2e1d@su0im(#A5Vsm(!8xB>9J4YDwpHGn=o718$h(3Wfc zcz$X?G4Eu4PLB&{VHL9WJ-e-=Ds($3%XN-Y*VYKP%tHo|R+G{d$Jq_;frL^Al`t*A0sc%qq z#_2}jHiP#}C^iyNaU9`zWAWMj~;qcOdDGG^TP&!D|0mRo7J5n92UO@D!%_Rngx zAe}w4O99K(wC7sp08g^On~L;*6e+7gJJX?F1Iiy_y)XpW&2=;!D`*c&XJHJjMtdaXCO^RQz!=>18GOkpBrJH?QTSmcF%Tm0*5zi%*b9B4#qRaw7=cu`B>y9 zm0g%V*!wBb2(C^6HhDp z5_q=!7@YFfc*I{(&-WHk%qWOjKWBmGJ4ni>%$$3evE6OCuhPrP*hc0*&>m&}P;Ys7 zgI@LJzygUttE_93!n1cS(3{}#0W88kA3 zMhjqkkvDp@HDlZXvY1OoMu7PYo`(`z2^?F$ z(}yuTH!}A+cFexDkndOclpWd254^SLcL5r|0emroxbsRpn5QBGdbLg9yS^pf{(2jY zb)9d!PlWoV{nZ_`X=#vosnoH*(^wH3uMY$OONyG zJ-@9;rzh`U@$D2m{+(5#K3dw?Uqmj)(@MlrZRBCcR(IfqGyQcar+-BWW-If%)Z%*@*qJMJ zJ#8g1AKZ%^dgeYKO2-4Oz7u9!xk-;R7(Z{aUcL^DW1-bnT6hBbHK6n?-t3};R3=(~_Q~MrkNS8IfLyv%2% zQ9Jk%X6AqH0!&6Ten_jfRv78sjXvE)?T6T9Y1Ms+UiD@_MV<7aKdlGz;D1&m!$=Fp zQpgOJW=9=^ROhE;JD%MG)a_Z(ZwIfkVAC4?Z7!cLLf`4>uPmz}eHUD}>l2m2ir-y8 zI!3ylPIW__{6H`d{qYIX?(V?-veEVP!0G(#6mEgl*YxZ@z+8@>P}Utp#FyFg)u-8o z>6lZ6V?eYAXV4cD_n)YwqncWXrnz) z7o&v?NOUeZdkkn}c^CN^MZ2Eo$Nl$-yPUp^1|vk6p~Aj%!-?3iBkNDuqVV%9vr7V! zrX3>zh^z3|s02|+i?=lgQ@$;l;W6ayEo7}f+&dNY=zKoUWUf5UUOy|+e+}qTd7BG~ zevWVA>7Bft(4;rA(}y<7A%W&2(Jx%9r0X!@uLq5%!LRi6Kaz2_i5~Sg@MK%dgS?$h zd+q}p!Ik^zC!C%$qZ?^5Bjw%snNy@I-wVSpBX@Z7qc7j;1A}%QGw__}d1mA;mcFJi zv*{;d+(>t@?lk}4Ni3ggVhyMZhu`2?G1@Oi+qwDN9*s7Ll|TaUJcXn^1H4ATyo7F? z2P9fh%zkU!fsz4Q_s#Cji=oB70?{l##Z^vh$nL>#uG-)%qWO;8AIVR?XR-}}fBaB`CZ+d7_$^Up7gtEp%~ zt;!40j-J5|V=enWPX!H)zi|w7=^r z*M&L2-}SC%m6hPc3N%A?@O>Hl^i1B)n3)ZxZv)XazSm_>P?vCy}9>Xz||S| zP1+gAJdy_7i9q}x;C&P7K1=NuK=UMXt~4_nUJ`gNK+5!<)IZdd&jP%Eka_2IW?N4c z%oMM8nwj2>iT^U#4ggMbR8560IniNvf#1x%qv$z>dI^EMwV-=EG&CNdnaADRn-R_% zi}|78Z(#L4Elz?j`x*1A`E(qJjGizaE}x^GH${yg9ZlUh{=NuYtEpcHs+&8aG0!}W zeV1~Bc)pmKL#`X~=?*;Y1g1-9*Ru?xi|^x&@ptpnS}n%-amsH)9?aEQmbFuDR%s=n zt@q*G*?RVB7SXzt@;o9LnXkbrs1|fpVvT9J0GigJeRC)3)hm|XM)a12aNpo*A*f)c z(!rE6Cf0oVQXiOW_dVo&JUyEG;VyV}VQyH0bj^o0?pD1!a)Q=+!JS8FV;XwL-I1rJ zuDJeU=1K&^6SQ2CPd$(wZ*Ul8%=-!M0*2A*Y*q}dXs-pD&8z~apxO!gn8oaH8Hrs0 zetDqd4JeqGK2IVGo`+sUmdylsf%*0(8u%cscA@8`l+}x35csD;!4}Z{4lA9>U^$j{ z-Jh!a=K@nxEwvB-{>N^{U&to!cHW+ZbzaQ0{ZG$#8DG~0O~PDVd8 zV)m#FmAsFkAGN1`$<#LYV$Me3uC z3PNGmXN!R{jW-dSS0Ykzinh|2(e-TgKD-vCcuKp&7y>n31IqUF>W!1jte&++UE^I- zq)E?uv)l}U(jPEdmoYNO@84X8DhJLqsV`PNVVFXy-)9A*$p9h}q_tlR^mw5%v>z(Z% zXvec&y{4)oE7#zMaX90kgm*1>f~PU=4g>jAR?hL@)0lb#!TC02y>)N_Uglx$sLsy_ zw7>K_KpjtAE1_Y>Gm?!aXj}@Y_ID zLO*o;K_KD1d^xCatQy8@MC-%b3ygsa|fia2biq@=kegs5$Ml` z(R>F9H~Uei&5S*>UL`;wcjfMCHzD(8zc4dIW_aBGzi-^1w?Z$wTQ0y)7vAUv z|1JBM(acfCoN;%)8Ht@kk9C=^qR}KP!rN9ryo2#$Y*VvG9^>~ky1Eur^j`22aNV2x zdFbEWd<9@?8sLcR?cJI7r7iuopGJGT{x^o*cl7Dm;uhpA9XuUOJMJ%ydUzQ8UJ6j| zf@)7gwX(F|lz#N@8wK96wC5f>L%^yof2T7Jt(AaWr_#zPU?>h>-iOxS5AZJ8J*>~Y zVc_lM_vtSa9A1YsCGg$&h4YZQ%RqG#NRG4fYDZo7WbRC?Jqyk77O-j)@TuMdo`}0s zHvYEXHo@2L>F04iw*yA&ZsM<==kCV5$*#xsHJL%_t(Gf{x7whI(4=>b*c&q z9Zmk}&|`wJjeFiT0x8fYNwF*TsI4KA2f-pBM zJto7Oo#5CrGMzPU4G!CpDjSY#4HS;74)ASE3fH}iqm9n|kLB&%qxA0xUO!qlQyU2JO&}92HRMv(z^gN^_@@#i8vlbL}p7Wu!<2 zY`O2^LbOm4YSAY;o<24j-pB+;%%@YvqiFP}z`$8xaVK^B2pkO}m3yB_AEl(-1NS9> zYyf#2(dpw!h4%@h$D>f@956VK{Kt?`v%$ePv~L!312a;PUwV!e;!Lq;3!e>aE;B$gD>I0~x z$I|p?Wx$JLb0Y#ABRgN-AL(tpdJ|;%O#b7T)_M9GsM6Cui-9N2bwARic~VVH?k&2J zZxve7vGFQC?qA?;2acuV$_;m15c8EhaRq|4X5gfCV6Qr~p)O887v40UV_rz*Y`sw~ z#sb8%E`?QZOM+BeLe6z?d#m6Nr~wo1SD|Ojb3SWmlaVYH@P#rg#t|q1B853(-v*wN zymy5}^f;u#zfd)-h0GdF8{=8L&JI>XOSDH44fhx1*MCEweUTeRL#{_3UWt@*hXChj z;=YG}0y8s$j_!!u`3lZF9Zq`-F1Z_Se*md^HGTdeyzf4R?l<%vGT;*=WN+xYCz5?B zPmISJ57t(~iC6G@AsT2Ol1~r(f59E!@OC9sY(!z)-%n4pba6X2z?-CZ2e@aDD~Ea38m;|D zF#iPDK8!8#D0vqlA6lYEZ--Oj{$MvEsb<0#&!QX8hhANW@0s_jf%ez*)b-SS0W}&` zrvJe)#SU=t7_{oCdyc}NL!-M2Tz|n8wAeb)sQpIT{0tNmeJNLii-U}Y*u}l)le`Kq84L0ul-`nZ4pXn=qE~3qjkIkN7J&14mr?#y z;QAeO_cGX63iR&WGnU%UMIO9OON|M3fA1e7``$sa{X0GE9O{428l%XU^0pj!+}(RK z^|_8>2JQWt9{wQoTPylci)j-*>Dd#`-5){g7)dgNa&Crh+ru~ihf-bLY!vmw@cxg0 zVhOgUt0y)MwTs{w8VVt3H?R5a%^KZAljuhDbOTEhjuqZYk4X9JUK61pO{ zr=ZbS)0Y!~<8R>U@AUPh;KuoA@i`b9^KhVvXQ_KL5FY{-Bd@PQQv42k$@Rc;-Fk9+ zO1yLIBjF9BFqZYh2K_z%wtQQ=}VX zv)zYvB7NrBGoORcS4g#;de`%O9C=-bG8!n}q}7)|Tc_wtPo5b8ef=lB_5iQ zKWfo6$#ZJdd@asU!+mJpNAr&qluXue8THJ`%@vK|D zz=ueEg!0`7U_E`b3oF67?pfgTPjGyH>bW$~$r0XPgI>9k-~R-=p8(;VK)E&9?~NbZ6jmz{V%k*^_sxz`(aqaRcz95AIuPz6$DaeaWZL zjeaBd8Feh^y!~yo-1%;!GNyu&m*_^t$e+R$L&CUUD zN3oao!oTmJP4&wd+w&RP!WnVr$eUBE^XInDamZ}?(!DxeMVkZ9xlggNMeTsfb>Z$h z{4{yoOXgitxzfyi-QFbKFX2JYZ<+=qyGipIm}>=W55W=5c;h-Yqe9|ohZlic_kQgI zHuUAbVN0y5UO?q3KdwDC zovY^8KuIf+H@(2sOz?IdE!vIc?bLu3N1Jj zdfy((+>Capn{{r3WEc#*qtPjocz-2$>_=&FJnYVw_2K>bScs0YOV9=G^ZyaKto0tA z#whk1!RS-)l;;wTq$Ee>u2nAqYi*Fsby4u+c-j+q7Vy3UvPWoBq{*X=y(qzvdp+7= z?6mveIm=i=&(+obK6t-0)ZLojR-}0ho3u0b?l!2%QS6ztq&c~rNi5>YcsO(eR{2=U zbvAt~@16ZJs=5Zcdlar)L@wv+8&j6E=e}P;{!-El-$>whj8K8C;>@(xcN-+Pd%?LU znR8whl#&IdcSnj;B2UT~tv09cFQ$anjCC5p5BYG{lVHcW_ID`B^XHsPJdM6;P0d>E zEg2O!!gq!}?gVdM_pWOIT#xX^9rzRQc~!uTGo#MSH>MI;}LSf#oh2uH8hh1Zy8dh zY44B7>Aw5BY43Hsaj(B>+PgZm+1cj~SQ~Noy0z5vD0S~9XCw5pd;C>XMg`E^04-IK zt|fA-g51l1;{)u39?)E0xXwE4QD;++hmpB+)|r6mY{p(j6nqYq{+043gfy$cv%BpX z{qPRIJ!y|?w47_UlxFl@0$93`MsLU*WLX+$*CUJ4X!McrZ-(y>`Xz&8G7fw;J>=*t zjSOAD?^H^2{k^lN?nYi)3hnm-MzQ4xbm(c?!x&Df86C}!B{@oeR zC=JKE8L-rp+@2$l&Hp^GI}97aQFOoc0UC*dfwoi(8^I<`)N=5C;9IHyv7j} z@SmoiGVo9q(kj6y$-NTWK_`2-Gp^qC0k&j1C0`9?kEZ@s^kuhzNoS?wJasapm5Mh&~;zd^(fT+>OGRcUPWd;3{SEJh=8Ssip&CAs8{IBmJ|1xSW)p z>^Yc}D@Z3V+6wC^MebC5U%t6w-fXB?iLnv5a*zg|Kl(mYrWZxZ|2d-+XV#A(xnH1_ z`>=bihTne)E%l_#+ek4A4RsIDWUvm^pB<>%c;(-t$NwB`qSw$wzl8>k?EVBzVJQQ! zY_12ZT14-nQye#b&Yk;6J3H>!`%m=nMt(i#*OP_ZvF#c3{3dEx0@pjTe-CYcD^&L} z^57Hj@+z1bgU$RRd@eL6!|$6&yxnM;4QSo3v2U&<^#fq?dMr`*>T=9&jQx1@<||yk z4E|d|>G}&S#cyNe%>zJn5?kob@Z?h>3&KKl;1gzafJ^i8CzX9)6 zd^bTWN6_3Yp_Ma3o@v+xH&Wvyxak9U=|0+&LpiR`igcfM)J9y+m*LRz|u9e z@iu6qAN{Z%T)11_P|`F)!fxb=<=)1VsX)J#{Of_=)dIWG>9_KH6}0*k@ZJ>Y!_k+m zcNM|kXwNCA^IB~3ACvZM%6S$W^Ch@LT6JY|T+{a^6ySWpVaorOmXD!-zb5sC0q6ft zsyVbRAI!9ao^Iy-9D4MZp|4z>;n;6DHr|(%aFkX(4mb1#17D-1_d%DhVncXtgW@4Z@$M|O6TG-H z_7I*J;rlo}Y_x$f8YY4;xQ-sRn^^vKzi`wv=pg!+0T zi9E@E8XDb^?oD7yik^DM{5&q(S20`~i$(#NU!UbymIuxQ#l!JDzPdba*d zdtMJuKY>nui8j6sSYM^zJx$~BKmq3Yzx45MxxbnBo_X>axOL>d29EkIJ@N$*8};CN z8RxWLr9X=((fv05LCwu+gP59&&Ut}85ex1X`G0B6)ueeQ@SQOt=YzK!prh8bel~sO z4y)(WYtx|eA<)beq;xeD=^maRQSv$5KL#G#qk)RQ#=;|Q&}QZ@q^$ovL%4_MXMsIqdt;ohv9?c!yfY|I zuUj0~JVifW2_B7*tQ!Y8z9|DcDe@Z^+#Kq59m_+Mb{UlF3}g;Z9{{fjFzn3c2q>#= z3{{1dF`lQbsmasH+ym12O85S9jOBV?CCesSH8bF{1s0a0ILBDU0pemjBJKkAIHful zSxTSf(c|Y(#t>TE3!Qd0DUCLsN}n46EG%WKZ`}wzJ%N@Ki_R#z^K~zNJ$F72 zddsWLRTf}#Ce3w%j>Te1?c&8~;dsW{LH_lkwc_h8DO)MQmA=KK)@#rWtMh$oF@iUr z)*V3~bVcs14L0d0>e>u9en>C6`nUz$_9fU{0JTq~)wxj1658(xTs6Scl2lq*&dO*P ztVI@WpF62VA5T_Tkz(ueA7^m^B)H&?KtnW4okVtqF2Yl zb+c%PK0dp1>>X^`Po@m#A)UIHi5563)B+vu7wEb0Z*$N0wK-E`Jg0^!Bq+zh>51s}Rcm~}f(wgcLiP73LrJoxTnN>9^@g_P(F<2;_n5rmo} zn-bm2D`L$u-X^E*8W4Wn_xv{UH3J5taSM1_!q@%4%Xw0tXEW#{M|eW(7{Lg1N1;YW zTS^n!>l~-lap7;GO=OgTQtfIggN|5fti( z>0GGO5&m+1J7ZJ1hWT7z?o5r1;h#IPnO7n!tI=`Yxz;-9f-WA4F0P=IN1@i{l)Mz# z{c~jH4y26Q>l!rrU-3T_1N(1Dy$roPkC0FW0(-w<~ox zp6?69N3co$2v`0)bqs}CuIAl-`fnfobra=!#_R~-UPUi?8que;@Npm!FWqUuz0kmH z@No}teF|h_z}2tl;e%LL*4LW)Hq+DZ(t`0|>Rk9;?Ee?kbtlhHfy0Nu&q(NZ4*lpD zy9f-uO3HTh#|F4zT&=}+2C2u>F864|iKQi%6@jfZV5now!q~7SJGMfd7Mm!x-SY*7)BKnbVeh zg_PO@%fgjdZGkKoXtwYzTikMGzKppUAhj*T}GepqFzscbLT33cj}zjCUwln+9xS`Qct_SR6V4|m8F2G zG+14YX~}B%@@wi&AjO}iFOE^-X!PL<=%z7J)Kg2>!^6*lXZI;?j;yT#j&Fh6lcH06 zvVq||Wal#A*VleOHM-Nsb(E^#{}|<~^`a;84K!f^&}5T;HFTlB-jR&9!Bf<89BjJJ zt&u}Nf<_XgIvYAW#?@Vv>-hP4uG84y`Xp!vxKXOT+JhGU*5mLz2~C# zhiW_}r5h#dRUUy(NP#0)lJz5<>wC2NzT(|au*HmmdxI3t;_VEO=sBsuBI$_qXaP;t z5B;Z=HV;U;^F-TbHuPev-GO8``TNnLru0&RzNkbdj0T79(_2az?!B4{7G=HA_5`_Z zA$|0g4chfKh)@iy+4?zziG?Rip-R#7Ea;#BJCOCMG8wg!!^H+ecW{4JzbCgvi` z`_h{8LZ43q;wS0jNj&cYb$$vbSz_HMk1J!~qD`tlH@>MK`q=$^vM6gCPorHo3pmPo z))`JI4`~~RcE?=eUTe)cj|!d_)Rrc#C!p8iNHA080<~ka>ENOTDVLDX{a0M8seLT> z>dSA#w?3`egDy|=+~46yzXieHUjcsWlfyPxRu)%vcM%?Kt~jcT_r`ZEM~fM4S;=oc zHD=IuIdGSp`cAM9qvaO&a;*n^qmd2S@L@;19w}fq=FnN3!dfcVi`yBUFrvQrO}G&lNe3xjOzMY?bI9VYa!fr z5haVQ6Xfue>^fgX3HVOd<}IwnUN5lYjPV2TnIr!?m|q5}x}C!2)3aM@Yo1KKy`d6e zwp(m7(<0-Q-w&qrPEFXQ522$sG%L5Mx(|p>TCgowi zX=p^Qa%MO3i@Mi28fS#RLT06S=SNBA+S@z$ofz_zgy*gUJrf?-1f@pHHiZt@ij>I) zo~4XQ^RYQcB4e)PY7emO!rFNjIlr6~o(=K>^>qs6_NDB4NL_1F#yL-U37jqdHVml^$4uJEO>qo*GVw(A-|_n#;4S+uVNBy>Q8Bwl%WMB;Br1=v@)bw zAF!9El}X+WN0JSub?S^qfW=7j`n0Sy_s8iUeY{gCxe`nnr`(4&PNLRfc2##Q|0P*<9N=S0h?M>^9lWiP2S^aQxepR4EVhbKN$cT9rr1|#dPqjg&tSEa~T zNzXnQG~tCvyIG_%9$at14zTEY0yUsk#c-bglhj6I-4@ch;yK2v#}nhV|5hViBDBIX z9ceR={|s1gU%c~?i}See#aHNdK)=Rl7livf=s88&!L%wXa9GFC7VR0+4*{y^VUW(W z?NX#uLnLUGakx^|o7CX?2TxL};a=L4d*?xIed&SoX_q^;>A5+9M%PbrfVA?4o{JjN zxQa%smC-(HUQ)}$UaQIKJS1}XDDK?Db``zZ0SRy%S`}7(DFdmqkhgjsy3k5@7f>&c z;C}*Fe&UX3+G@QiZ4pnR2Cb7DNubatlA)c0NPPjN_M@aWJb4jYBEd-0@rWTk?#BO^egHD5HQdl9)Luz{TnUv8N5Z%lz&QLq-FTZ1 zOPk+>PmQQsU0F&_ZQVAE zT803f`#~D@AEmE!BgKyidE5(84I`~a8q%L8UAmV?eV)ZV2K1!r8INh|e)XN#UDu;# z$DpUtUZY^7z1>jsUP>AO42}R>BQ>)56!EqMsQ-afYzW+2Nn0N~<1;8}A-V6Q_Z|lZ zcTKDW7y8AO$Mg8k58O5%tVL*)!j3~eL%Zz+=6PT!jqXxIG^M6w;5&W$>a3>lZ2`~D zC*7q$+&bG&dv(lptn#=ofkA+CwgcX5I9CwV(9-}j<@KeFix{i zHc&zy5OfJ+72`@BUGxQCM!tMSj$T;9;2qogVA8$! z#74)U>1uQa_8X zAA?;MM>BoM(H3}$>DjzmFF_A%sm+vgiq!MrThF4E_T#x8jy0;7M-scAQ`%8l>dq%4 z`F_FEUC_+aaJT#u^`(CE8l<0h&Lj;4?^<*j{_WHFt>YilsriP3p;Wlm+L%P?KF<$S zs6KuZ=;S@ugLUV7HoiY&6nX+@LvXnVs&nm>R@F?>>HjDNL*?OLzqi)mb+n=yyj}?P z-^ZIlq>JNp$Ew!vxGM`k(kbJHrRKHlNR-C;Z^^?25sI!b2oKO1R^jtS`^#)VR z$=e1^vw;>kx=^xr23wB1767^XSI5y!2J0gCM4DJ=`5o%}#I zO^{X9z+Nv%!l6i*D`;O^a(k|C8M!6~3U}=JDHQxZa@pupcT4UBb^n4E>EWxSB}Sfl z9$Fz5^=k0G9_lgHqhybCV0HB~pu?2B}huNIvr$=XkWM;t4|a;c>@Mrfm+6yCIp5 zlkx*-%ySr)(?_51_CZ=LcfJSxoD3FOZ?Gy3TL;T88yiB6;<|=j$f0@QQ_lK^x}8<5 zfsVBl)mHuBeeLSClrV!986CWuclAkA41Xy(Mh06gg~W7ET2GGJjwCiFGZUyR3%s|Y z9B0SWLMdv zE;*8M*XtT`8$Ig^#%Njoj&W6n)_EF^_IMtpYWpnZ%1F|)>91Vsjq3qRfbnJCUPE~k zDYZdJGaGrSoYodigT)S5hn$+sF|l60`SfC%5rwl3u11_px*W>hNh{RFO5t?4zBA~JN}eQ0 ztqt%bDfLqqQA!dBV!Hi2P18SfNii3GcfVz|QWY&xBD(`(L$Il4_?BMQvM7h&_fm^E zx{fl_jDX)px>eAkW8^Zev(9Rmy_95IU0<<-cPFveW`#Z;j3(LzN2*O`(wBKavOnk! zVODDmq?A@b>}kf%!cK-G_CUMt5SJjm`)%jYH>UR^Ry%xu=PJ$$pwFXK(&HZ5Aujx; zk(-VfvKZeQukEbma9Yv~c*Y|!CWKnl>Q~y zO-XsQ*;Z#!zi{0}>+`}@D*XB^2{cLC7E_36_kdc&lQM8Ri+cg8gS>e-X-J>MyhvajNO7jh{V)6jK3efJmM zD+?N;w;ID^dSILpGQLKAP@fta2hZCG`nw$;M7(#OX1Zt9C;g%#Q6W8 zLq}ezpUw#Wv#%(tE4}Mk;hq$mfvO5?^+FyTlui4LqKV^>eA>T-zHqGUX|5-NjL#$Q z9n|eva$BGmcNED+GdO>?6v!7M=_}Ej&TDuM>%DN~7~mG$OQ};kzYTqz6Vi?Yf|itf zB~Kj}Ia0}h8}*RhMMpHNxHDR32#~v1fU9ctGqs?f)FO@+w}+mXjg79YSIlp2_)P>^ zHVACB;`tB#q8v|=J4HGrx8t95c<+e8^Xt_zBcZ4=EK)6yll)flZUL#|Ddh5qayHgs zPkZgEX}tq}8XzOKkmqjvA?}-3$?rvQ{6^YU%KfpQ<)P8_W$mYD24OFK1$DSff#>Ah zh)iDz2kBAK6WJ(_`5>iJ6KHQQ{Nm0H`gFI@a}Ni8ca&Hk3aOx0BTh<4UkS~u2SV2c93f8v`ZNmS zA)Y--nH5Mcv9*M>pMi_+Tz3bbY5rXue+yS{@IMonjC1*x`?f%QO|T9bqNi~BR&0^dIj&Yi={L7VG0a<988AiDtlT}poUYODgEtGIHm zrj(lYLJ8f_CIcwB47$|ncm(>m0NxM_>ZVtag*R~3lMoQpuB*A@4`HhJL-*xKI+E9je+hcX`Sb2$5Z!>c!3^XPfOLv zYKbL$)qM7+Uv2hdK(w0wsC_)Cz#8X}`(tQFh)d{a<*t6+rsOM!D%>4VeWT`?1~xPB zqq089SL*h>yEH9K!=X`X#u1bJ)}A^&VbT4MozL||iP&;I8Sad&_2$_CY0CN)^yVJ> zhe)H`FdC^YwV09fa)CvzPw@V3awWKTHb=QD9ccOekVlx+Tgs%)y!+wJY9vt!`0Iq< zMBi;8GSp}$y&0pSkt6);$JBrCe2`!3cO;@mB96SY8MEk}JD|wu;a5($4}voeu6NuA zj7@^f&jylvD8aFhr8pDdEV&--a$0^c)Hgp!{l;8xg2Einw+v$z<6{@{^lmV&{Eso0 zD`@d!L1UoY$*9V({4wd%I2>EwxisQeP^!}lDZ9LPb3N^g{D=$#9cUyl!_~O9M?NiNYNfg z`ziYKAXgKRipOcaE4U^@k8_b;6~Sh@jWnL+x*bg43BOK8N;e1FuGO6cZMGybW&kOM z(2JMCC61pm;A>~lNP5+tq%@&S*0wxNo$lzSUY!iz3*8mqE(^Y>kJPV(BE+JxY1x5_ zqqX5KA=+B*c;YO^LT1rBlcFAd>>h$Q!5M|LdldLNmm2gK-pBVzda(gmbhfB9y}E`L zId5*{e|Oq=C(nnH`*Kp~t-BG03p|qfX-Fr|8{674zHZ6iK#!&At^jZ=1IFIG7u&pR}6_VjON^u5d11&BE zqGz$;)W!?Y=%4Z|8_i)9Muz~Q<0Dt-=}o$acNfz~d#G&?ZOXO^A|qA&RO9hOr- zuI|#>AA; zED6*NkRI{8)g12KL3Tc^5)R=JqGh2cy(*`?OMs>gbe2Yg>IXE6A!-uKD(B9Tb#Z9f zee{oO`{d(+LHmfe6zQGwUrZT2xz~c&Oj&D5nF0oN>2YN7%b_&a9j0iVCqZWsxsd{n zf5PwD?F+zBF}?gFBGmLn?d6$XI>%|+bJfAx@4>T-tI>}rl8ZQq*ZTB2v2RB_%)_oDlujBoYd*!R+LbPWJ!_IulBa_@B4|m6S`aJ z87atF)^~!J;9}Y{jvk&%O^u*$ceWZuDb8-|^H8%a#i|mHFX^i>^s10eM_yDwgO~AC zNtzBADWlaTV9aRb*3?m-Hg87?tweU01Z&-K*BUsc2Fg!SejB)AFOqN`sig{OB8NI+ z4EAE~E&!74l;}#IQDA8+vhKWq$#vj)1(vz?{>rezd=FGsAG&>!eAmM}*VBt@>7C}t zL}Q#5(Z;*sa<%<~NSPF{E(N<{{4L6l(L`0G-U8RRgzv_LYrQTdH`-y>N!Q)eTn z^D97Gy@>QbGfduPC0h5S?AS3|Q?c-EF0 z#ERCEbmA=TWS~Ub7OHZ0gVS|P@C9YuPpjqRB@igImk0z55+^Yk)kX?wMk z@)DG1&pc0AN44mFZ{KRhY!%2c zxwsEcr-oFsz@YwA^?M4)wEicfO`1VLm9(dd7zW27j=v|tZ{?xguaJH>z0es=shzw6 z4X0=9dT^10Z!nkArvssS>sm?~fmD78spM&JX)s*?eOL2F>q5ItY+V4=9EbKUgf8{5 ztOgIgxl;b?A5R165U4~C-XSQc19CD&tF?d1=xt9^NC3Z5V;`maT%7sI4bn@mdm6sT zhgXWAgdw5#w7o}hod)tX{902D*LTw&jwU*VXEVacS*(0P{%^uOM?Nqvp|6b{7{%|) zwY4q5;?ZaF(}4d3I5H;Axk_o}zkyKipYWwAuTO1%X}weeRYkaOfZaD8>=^H23yOK0 z1yrR_xzTL;{EghN2UZ618HZ%>NhN8GZc_$JP0xWL@$m4^TA|Th8KK-WW>YLX!&!lJ z%!E3W`=fxs$?Yg`NceS*;b!_TMQ!TFu>k@pU(M@WWE?pRg65-#te9R^ zubRS-a~6&fysrCKHkEMicta~vKgK{{QQ8?@&<%X;0%PvXP|5GplzA6z)Q5jLJrt#~ zB}bde^3*`q>`YLSb}H4K!)b%8Sq@j|nNjLE8(IcsyOTyEs7}i~%ID}QHX>jm<)oqJ zGH|0jk8>L1UlH6H6YFzXF!&N%5ZB%v!B-8bSHdSnB6W3q1T28DekiK}iG+;(ED!`q48cYr@Rs4rJ%RV8{0l zEv`W`_lEMcbxMIjOU0G{)8W)cVHPaOb1AqhB^qbo=r|8<)fc@Ap4?1}JBBn$yY~V% zjSP3k39WHG-OfMi%XMYDmh4ch_z~1o07pE7m(Mvw=^_`&l0#{&$=MBz4g!+d^iVhO z;GFt+aG^J~0?yM?GtO5T^g2A?PVQ;aJKEBl=8Vsc^kjdoot?cZXuh@3<4O9PaoR* z+FjE_N&2{*lSo2Qg*@+%u08^k?_=Q3mzJl*@o~;0$DyYM~U9@?6@vD44 z2A4FTH0Mon;i)Omp}RwR)}7;#lW5K&p7fzq{qAky5@#rs+s-+3;jV9>z&v_2&7Jca zZ&H(fN#_UjX2nr~?^56)#b~G*^sODzm(&BGoNE4^qnHa8s(?7|Xc^Z#4yG;tq$eJt zujk?KtELQ3k5Ss+24}87FF5M(jMUG$nj0WBCP0sxYt7~dNl^xbjw?pd`dnI?BCV@q zo&cA6(dCvn2d;MVZU~&3BF|tbW;OSY8Mg$VW(Lenr=-sG>ItCf0Ov+;fD!XIkgjvk zLdHf(jXimrOWJHGTs`Ifokm%vf!jHgw~&Md)TGAyDcspVa8g~|fK=xGntB)HsJY}@ z!Z(}#u@xEcsdb|zB$Ubu?QT zdPOKFi5zmKe*!v2{nZ5vm;23X<$fpr1s~D&IZV*D+E*(kdxN zkiH+(AxBj~4UW{Ff|89xP`5cB-vtTh89>gl*yp9RYBCbMU1;BO>YRdnc3ny_HGW0Q zj9itzwb3HL+kv6mW}7R zvUxeJ{*XS+6^UyepB1p;Mm0VVcjJaIYuDpZ2^eA>Q` zGM+^aJ&diW9CjYS{!a1Sy{z7bLo2cVHt@uFf&Iv*)%3$GO3Q;LJo8wM@FiF{L~qnE zGH^6@fcNfne4XFXhDgvS(wR1dBXN5xhj!iwW}V3>;hoQSQ*$NQ%|gCK zpO%uXX)Qf#4ZEIw7OABp>DW18@q8Y?xybN8(JEtv*FiNYX#Wf>pF82-Gvt8tFTpnK$LU zdV3MFOW%cSy^N4eUJ8Mo*Tqm%31!Wyg=PCl7^O?m*%Az~@GHId-T^6-OA3;`ac9PsLjf+^O6t z0<+4F&Rid(=2hsh@1VxFN!OYlDyQyL;OU=yUuudgrIk`oThtMd9?n?0826*TJ1xK= z{Zs~to<)mVQHy+KTvY}A(32W20EYW1YZ9r{21#hXfV|2}XNc{=67HSx8Al3R5dB`2 zl#%4Cf65u|LjJShKvy$lkQ&p#`MdPYo`9e8!&~P^>S0NAA*Xu(DE+9!bY`@It6aFF zGc8zB+mhHGeGHZ1Njm&yqjloUQ+!|dYkZwjO)gU#I`T0R_XVh=QJ~9I;3(q)Q}q7N zX^Wbo0a9Z=STNTp@~7ZnM|gh%N^NlEcrG69Ezp(cb{3DP7u2&ihWmW_GdU|Cszhim(9Qoqt#5G8s2sef|N{ad1y+kid z=Wzy1s&C8_*K^e4oqJ!$SWFJk`qCC<_D4XU2x;HLnjgirGg6Dl zZ~a=NHTc*nf|1UxcQ}silIwsdcrB|_%9@UfTe3bg261b#}*LZ9EO*O4B%C(x- zZ^F8Ikd_$t@DeQ(k}K)`=YVr`$bTWy$w;L#dRuOmCXG3Gkg^8RmIOG^W>u1I!o%iV z+)dnNNauQ-lTc0}*EvY9eQ=8MRPCT$orY(%=9kfnrL^rruxh-wGf+85<8eWDM?0+r zt-OP$a968l2_85Bf>pqCPz(u9lS&g9b>ra?{>2Z*(X%-1EPs!Jiy=$p`Uy%V_d`8`No?!2kBCg#+nMh1djFGU*@;;Yd0iS9+E{^8evC?(=Bl0XgmwQ02jR|BOeGh zlX46F^bOkg17!97&@b+_lp*JRK?c81?Zw=E#=A6aab1eu#TrIZC2+soyAxkVIEQ5q7!x}W==f$EPsLyNT5CO@_Z8enKYj&PrdEynej016-=qXtqy0S> zxpf+Jy_D1`Aeeys@@$I&B*O$!6aj&Lwt3W@560GWodZ{>GyB6+>!{fo)KUEB2aUFm zn(QMv$B%XSFW1S3j?pJj>J60Gj{2nmN80*u^oLi_2G56W3$FC+Hh@lhfhWfRuBX`! z{pg3$zoV_=T-zJmyXtNb*TYHIj=R~x@1@*rMSG;5EVP!Z8jZZVg}yu=`O}0Jcp7dw zbF{sno_VBn+?P-4A%U6)BN3i}PR^q?cNAO|>`2#;KMNPApN&@U2&75!#*>({uqE`G z*lR6;a|5+GF48w-oOd;2&OJZNL8B*WSqUZBvwHr;qjswEupL8*dVTb$4yS%caqc6f zR7j(1l!~r}Gk)oK==*DsKSr}Dot%$$#h#_zNA7dNSo5neay162E0i;n=lUa!4|Wy* zW~eJ49y*s6KOf$;gZiwuFMOn*H3_}j!*4(v^P%Dv!7?%eTbpJzb*%sz_nS3-(_J76 zz|mf?s;;R5k6qw|*4VvW$n9RiaL5^6Q`>Wxpe%frPSldq7upIm@hQEyb zc!W3WflaP^nx1jRf^mfU`lX+;m|D{6HwW#`zhKxfet)Ygj{ zls0WcT}B-l7>tM`R#wY24~gA*$GPw_?{tm)L-?sy}%3b>Y&nn0gT z!`mohJGtE$^5yO}bW;>59l@ME?}f>H*X^j`mETEw|7HXLB}( zoQuInU0IF*)E;r2fd6U}txW&Z-=|X}Xx&$bC!VRK9o(IiN)~&36a1)dm;&DKB;6)* zltD*}u>_3Mb8fOU;NmP&=fZ2zJ5?6`?HTLTMvWg^ltX*Of?svVL9XiFM{VXzx1*8x z)GNQyvPkk&4YHVjM|NZQmO+0NVCxUTKiZck#_`U3qmAR&w;in9Zb z20W9g5JwO!?aY)bo%mG9bO8E8ihFV-C;>eZW$bSw*PRy*d=mhF`8XnkWX{d5ez z_qYy3x!pKm)YDw_hYkR$Twt3H?`dV`BDLCK!zBYo$&4XE$W4-1#0{y#^V4IVoBq0~=Alvm=+FktYX!ucpLOB%D-rBJ}BKC^`uS zHz8f0M zE;udZ>Dy347g`tBJ$4J@g5gMH=kE)#Ow!Qkb~NreU_(vkn4x~C)hLuCSeAlM;lDRlQsoL%xA>HIC{o-dG|2aj%tKPT+Wm2{HE#IG<4%Q z)2NTQlf({WNgPp@ppTrfG{d^xN$S&WsH0{457=yKA0C}JS6 z=JRXWt_zO54kGXZpuMhqm1Kd%fby^|PSf zZj@gVo<)5sZz)kcDLBo)9$fv{jzz+_0F5pj%4{`;r%)#F+5W3cGo3m8m_|uv{y3hv zmn(U>1tZPL^iwY|r-bMa^_Ec66=8f~pS%CMBL+FrkK-pT0ja6Zrgp9*8NSgzRENgY zO8fQTu9Dt~n!)Fu1?vils4eQ#i#w^wNGdH%clT?ETx*Jyan-|UaMh1~$WV%2A@>i9 z8hd7-rF^)+k-oFiYSlrsG z_&R^%Sq0AdJLlaU_??+v9mcSE$fYNs@sH5b?xZ4xIk%J#b*pjq@#G@zj0|)|P=9FA zl8q&g5jjoaykk(RbBk&}_02lcyPr~9D5sh?*8!!nczl>W`vsJI3Y@yX)CI&&xr*1m z)%Tvme?PD_Gt{Eym$UT_Jr8tSDa%vY?EfZ6vKQfJp>Y?$`@q&{Bvl{U=^9o&3T=4i z*i6X2fnq%E-gUV*QrFFVr-#u(b{Hw>f6^0}3?*9X9Q;-4Jw2*MLLB2+J{*#vueJ9Q zJR2PNM=DR#L*+cv;w<1_8g^bx%5-NDwN);Xq9yNJ)8?(=z3Y}8yUeA;xc`yf_fyc! zGqll>r8AWsNYMj(V<>q2HSa2em!KOhb$s3u{B#RrV)tP`KTvr&8X(Rvx3+q@Y7hD<@bq(|<=jTKF)j+D{<;cYt zx-mqqC?Q_LyRO7+NgLgJ#psUZ@bB-*;f%+HV8=CfZ=j_;L%@D^2aofiHfdeE+8G)B zVo&n+SG>8O7PxZK$f7iISN&U$mT9FWLmyuZ-6|8^!>lp5)?04$ji=_Sdy-)2e<))V zt$7kS%IKj_&=|%gxdX1zgBzjd)ud@e&nmm)xsskrl_2*?N-mgL#r9^2@i4SwNpKmrW?tzc= zvA9mEnkTMeF?Mnw_gjKg$OJr?Tg*7ysdc!WYfngNL7wiwqqXd6m@H(eayHJ>JKiwo zUHH7z6Ux3+c;eG~-(Ptbt=_8xC9j~aN-)|GX`ybl=We8b26AOQL@tnJ!)HcrD-qiT zyRIkwIGwjy&|w;U#WF3s?i1~BeToqdWn$es?MGu|y%(|qzV>Szr8rZdrP>BgaeoV2 z?Jj@rpytZ*=fT;tXmWdbCp0+^y3vcJ$MbS}^9Wqh5sPC(khUFw_$Vna2<=cpokQ9r z-yc5zI(BgfC~Nd>9BtVC3hpYPBl%Hod>UFbMKb()rO(}{V?9Z!z?sA0zM{|)TUhY3 z-C-WRJ~Sa+ID+&{BWbcZ(m#d1D8(+;*=+*9=*$h9+QmGrQNx{OO{NXo8Y{3qY{ zQN({U?9|1yyabFpYt#(h?ni62{v26PhiW{vEzMi^)W0dvVlkzTfY#mB@pRrq+ol5$ zI#*py%U6>l34cuCojWbGg5IvDt*MZ@9{0rPpVP{m^ydZh=3FDB0aCg#83K%18RUzWq^JslBMhmyH&xW^DK}(m>p< zqI`S!a-JG*e=_vX0qRtO>3cC|Y7MP)f94{1c{J9>QQou+9J!nH)xa?yY-sK2iO%M? zggzZe%BP@tBZxX-m8xTY1^pCb=^YAQkrU9@H>5v8dChCKA&8>jQiTQQCU?!MUddbyb4#M9{L*S0%M-dv1qQwPhbdqFo2peBur^?me^!Slc3B zH<98TpjO^BAcgjLSAKJ-Q~drq;7J|jc)`84)ttt4nMTDHu!B!%9w9VW=N3 zqicyl`dDD2vpfe$-y>ec_q|b-R9!V?pBTEuTn=D18^x z{)&Ku#i7jXfN^D*PyKEThz^`{grN!|7 zBeeW@m|Ik3Wd zU~{y`_9{o>>NQUD)cG{=_`~P4fZ1Z&C3cN4P#;LEMc^Vu+4@ZfP`mOVL{Hb+HFpcfNis zI;HYw^U&zh(loevG552viH*~4h-|K=XBy+DQmbBv)%lyy+ijria#Cv>mQml8U`Q-Y zuN|McW0M}LrhHnG)|l9i^l)q1Jf2ia+JW@0{5K06K(wnCAdSJvG95`TGF0Nz=dbNy8o>9 z_zmFrAlxIjxC@$R)gES4A!YVJ78~oWccTfsF&wQn5ez;Go$LRZP5U1s#c2Tp?y~3l zVOQ|RNVR*zysnW~Ig~b;clrXHLCS+l4srE$&@sEvN5_GFGj@cOn<4e1;Na;X{f%os zMDG^kA=6?Q9w^p5BDH(QQhGD$85R8e`t;Ml-XZi)M<}yBt=%_&NR7_K)n)b-2>;T zZ;cw&QWyrWNiq6Aj4L^Z{Khipkx#kfJc#n4?z52^>aO)yY$Q+e`zK0&?S^T*nZwuh z6FqrS1+=cE)?@Nh+Ha)C>+qR5ogs3!id^0$f>+LXLF-U2rQuYs>pu4PgkTM%zJHb= zUG(wG`H?oG|GRD!;Zwg=VKk?{%7sIc$hy5;zl-fW3w*>C<*x782K7nV##Jd#dSQv& zL4Vv9sKr&Jor8_l3(7XeDG!>GXU*fdS+CAH^rbSP3|XRvHpVQE{|4xrXbC4m`yUN? z>|Zr=|Ii!SChkX5%*U}=D$v7za@T!khCUZVtMTV=3}4rKR+3_RpfmN2BhU7=cLQ_*omLI7%lJgn)c&g|BDgQ zS=jRTajir&2BQdQlw(gLr#Esn8;m!k=50{!Ke;zf?{4P#^yWz$p8~OaBW`C z+H5CyW?bkA>fKMw+MNOV@krWk@bYj<`5Sl`$-8oB_A2_N8D+UzOn*)qDO(>%JqP7(pwmx3o0OBG zgEVE{3tc-Zk@}XBQydvF)`ln2vHLmcrBDZdOxf* z-L4rhhWl&Y8d2){y=%eKEtIE^$CF;2A$6yC$As_jOp3UfR%CeNT*z1O%NIO#^g96R znMEEwqs}$C|9=+mq@_Ikx3AGpJLu_(fDLDdwSis#v6%ny^tGt_!aRC29>i;x)cKw-&E@oIRyT zf0MO3j+zgq#X<=d{0y)c<&-}9onX8^5PBYpyX(52ZZ)}*P?#g`i>S|aeq-qy_xk9C zOudnwa%`_(v=~lw_BjbHq(Xh-G=40{XqollKh3Xl-@f&^FpL8>t*7GfB<700J`pG0 z74q?4-;n=OhQ(Kf7QI}*$ucvbzFnNB_ zexMi(UpaSl2i*89Q85`L??K+XLu9WY#kcU=ft;Puw)cb*^-F8*xKErMrDnCJH+j~H zTypE`kSBqx?atl5ejKMLe~lZ`o>DR{4luL|>4t;Jk+t@4%vTt2R1CyDsV(}R-1T3* zt}g8WWa}wo5teizd8Ms!K%ItL4)Zn*{JRT5H@f-kAYI`r#(=l=(a(bJbAb!>xnzuZ$nZoOv4 z={hER1)FCc@7#$^EW3+`Et^eGybt!IA7k6&j%R%+K~IV?nsQYF8GQ!#YvBj2nAMc! zSE7P-QY>b&L%7i{o@)iPuHj-T?P#3 zE6zniw8`yds_!XP|HdMaJtvH+~qP|m%SCVrgpR_0Hl7t4_@MnV{8CJj5!w3L4% zq9Q%l{W_LN@l44sMqC$hE4X$a+hw(DuJx5Ag2eFLhv)IgS934#?}398L7RG#UR{~K ztEBgn;Yo}RSw*hx;KMYd`07P4l{!#<=?txF_oT?Bl#FF%^Ts?slx^DRDG{$q@soVD zw~Vp2weqrFD>=C@?brw;op`2%(|>(4B{);v4}7|sax!@K%*Ulfg}h14?iD_Po|R9l zDWyB3J#9ec)JA&n4(j;VwGBq3*p?*zz%Jxn0-fm%SF;sTht^SSO>F04QdaQYSo_qr zMXBgIy?&&x0G>SoOO9hID5HclMbPnP@H7XFz66P_WO4=5F{GjLP-)(R-qcs+*elwk zaSWu_-qjx!U~D;5F&A9h8hu8>CP#Q?L5k}v=*@pEin!{`Yxf6K(j@tvOxymAv9)oX z`dr%ctKZQSe(Q&O^_jIwgxyQf>l3^a``Mv?)TR1! zqRo*T;ImdGgmo)z;-`Ks>}t#%j19X`g6H5AP~Ms7N8|0>Yd_8qi1`gzNfF2H{^oc} z-6&6`p$GT&@XWO;U^7-k+q4oWT{(INn3n4Fy~w*iruUs~-3YA4w`ORQFx~`xno1tM z9X=CM&txwLXC+{?76x#&UyDs-MC_Hc;&0GpHFC3pH0MCm+Q-^a zCux=CttAbDT z9Jv~E7y3iJBQ+l9q_jr-uj1Zux_rMG?#-tsE`i@`XuX#AQ0_eOydJnLC3P9CaHe%E^X8wB z`kP??tO_mD21$h28YSr!aKSXN)E+x`JP?%x%TX$K{?u609i+;mEou17SQ;s#G|1%p zx$}fBqZd3!%2<{-S9C8DJ6ayiLY%bs5!}%J{q6rQdX2^N=&(`IXR-P=|bC{giTE7~Ka12vci^(>%Z_ChE>V{-(TGK1Zqi#~Z`>q2RGG1msZ;V&$ z9kiUIbE9qCxBe8*S_t~TCS&;Ift!HV+MFHY-kde- zL*A!Ja}H(J3%K4-8lx%eK@sZMa-h%#-VZ0#%}_hS6mD$-apiUVybrCJBfi(FNb{y~ ztxl>-<-gvUx>U;HO77&Vx-><|Oh@ImV{WKJsnsU%*LMOl?#I6asj2SR#%RpgY}4pTG3rmQIfi7b zfY+Uu9S(IH&)x~CSqaT)NjREvb|=GEzk&OJ?Lz{O3Zr!Qz}E-m{E+*?4q(@%Li(&N zGNC>)+Og9S>b@ql!r1L>;Mv3T8A$cnNH6W<6g93#=DO;q3mRP+`yi>lMXnf0a{@a4 zh_ADLXM(*A;9)rZ*AWW%j(K!xeJh-sPhFdMzY)*lv)BlpQRMy&#<@EK;VgmJ|Aw#r ztli{Ni#rO}cj-Pu?SoX?$=hf{>G3e;Nndmoa$_hhvX9f0y`DPNex3*5Zdlq$${hFB z8p!`~zJ5Hbq=J;I>77o&qBx1ZwC_vlBU31mj&W~HsXK)g!DH?S<_feyXb3gN$UqIA z3%C^w8)tfgQnVzD#c7Gx-N+o{fLl=G2-^F1?%l!bUfQ@87__F1#Bp{Z7oDbGdJ1(s z$^TsRg;Lpg_P(^j7Rd?jcdF&m1c-d%$DIO{%T?r+w%ikW0snJoq1K)|K2~yXEWJFi zCP+->vAplt+_%!%Mt-a4HSxBPUUz-7@XX?U93P&dM~pTo<%t%As|MPH)>ffC_fVVO zVYzc~h{U`L>8QWfQJ(Q`i^(Hjx=uo9o&PX)O}y*ft`7_qKx*vBC|VXRHg|D~TEjQ- ztx{?|C3%jqkvsYwib+3#Z~Q1r>#o%^@lBUN3(@zI;pyM$;d0U`U$v{WMaI*nH22xq zM^Vcw4craOr+zB=IuFv3=k6MIiu)GuS$z8irG1OdZpkU0sY&XQcWmwZSd%oQW;+!6 zLrJHi18Xq3zWh~RD!eDf{U%fr?&2Z}dH}0LY@T|%#Fqj3dUDcv>lZzCn2L{JO z$K_lVfwNvfz7R^9LaOJX1LGUlFrL$M%(#GgP))^8Qx6@ zeUqVA&O_Qg&XcGOT>oKtSx}uG7DuFVn`;J?9oyg!&&v7NIurP43P_m*uEl&e(DG%Z zDyP1Bw7LWe(pJ(ZS`XOVhtAQZ=Q*z7Upw|A{vCU?;X5;wu1C?a>t1AbiZmtRsj=Kc zxiY1@9lK|}{ypa{s-W{m;BOI}bslN-Nezcb4^!e`FfxboX7E1`Sxv;WmdC+8fO(`Wyn)CWne zB|8E9twzRoqkXHO!F|-x4S07UrFPKnb>uYu#(wn_v<+OX3v$SvS(9MgGk1(tj5~6A zD(qq4%>>*JrIbIAe+iOa3DkpLajl2*eeS+6pLC7k#!28&J8%tnigYj=tjOu@X~!0D z;mW#0f%cUyQ=xbF?m8RDzoSL2I+NpC0-v*f$HItb9@fls=qnB6?uzvaP*el)P;jxH zG+DH80i&`E$P1C?Wpqq1PO{}HmAFFSzFyasPe{gCj+5$?#D4qb8s}3bk6g2hAMxs>S7L~ z{$ozZ_@nqrug0NvrzJ_Q>OS4Szp~z*c(#PtG)u{sg&NKwImf6P(F5 zj_@FTYu;jhrJcH&WNRqpYwVpXou%*;UqAXXlsV6XaaVckk^3pI)xCX$w`#fyJ_|#y z52igm=ts4?R2?-zqzAn#?kF4W_*jO&js`5j2rxB{)@g=+BYuqnSb;y@wN?kw9M1I? zz`+OUL+3>*kut`~Y=Mra^PkV%0OlFoSE2=yT3?E#DACgJ!pEdc@kV>A4fk_+|1>R% zy155fDWtcBpiZY9B>hNiyoKbCdo1RW*B#YH(ND+WX73yFT?B3tU~ELN8>(yDDZS4n zZ%_X9GBrRxd9B85NIC8#AbdjP9ObCm|59*m9F7*%Yn0^~^~T4#`ur`P%Bkk?G_%K< zfpQgwEo}(*l#)Id33(FOoe?_8bKk{}JmB8F%lU7^*OMprlK3C6|Sr(+>d0?vues(2#F;Xb{p1X2Cja2GztwT>#yn|Le4Q3oq80p*)No{F2 z(2|G2ojPwQn3+-)2OV0;(5RGS<5bxRwv|XxBOL%pmTLh2C=KSLdRf8+HBb0{*X~ z{i}GI4}@o9>(8ghm3O^(@+4H30_Quyzo(4lkkcJUr6c>M5}Y3k8tFI8gP+fsEXC|a zf8?Wke9WNLAA?ilVn3h<)aXsX?|yh0b!LKw~aQ?$Mk$ex4_GURnmMxEhaHO?EAAPKZXJ%6u1_RID%%sZTj@{=9<|NX z9-U~VCvltv_wIR-13wwrXzC1C<@C)7aPd3J=oPs4La<>3m-ddm5w*OYwq$~6^28^$Aswzq@~Z`CY;_zhC`A*L zU#D;4RlV@kH~Ii#Y?F6MzPYq#Jnf2Swz>+>`~`ta+^MGq9O&mwQR^PwxDSOkigWHl zZW(%;{MF7Z4_ErM3&|DXvk!dlc*i|kkE7Au=|PSc^Klka=;W8ld~KQITQyzub=gi= zC8}8pxVAhsp}%@E_M-7d0F$Zp%#7pLG44RPd+z>*&3k_8FgQm0MN3%CXpQQ4wW^kmUJcjO#-5ZLgxx3~Q?C#I-J#Sp zQYXpXb5LW{2koJ?3@|q1X>8LHB!iH~c^r2xbf;a{KSuqh*VcJ7u`vi~(gVtKEK-4f z(T3Q?@1sb+H2?O~%22v9rqa7{uHDF^w%UeuR!LrGE;`Z^6{Ok1)r0h4Um$WczOl8fh$)u)d$;KpCU5f24Yzl|Mg4c(Q0txEaM6!atbNA7%e-OJ7=?u_&1(OzxHnex9E>h zYN}nGe{j8(R>OE`+7+zMEvVJipd%U$07Nq-tYIT*YcC)l0-Y7{K+kGYb}w^J;Qy>8NAm2X_d`2P0}oR{^Jy&1X}Sd(5>9Y1RH|t~_Vlyb(G_XC?HgcC32E zeNq~LH&f^ht_c4n;n&k1;=F?TE{Jh zASei>>0yesB*3N7wSAy?ZAxcSU9}Qz$C#!=$QiHGwNT~puidTO+Janti#$fk)yeEA zLF?)miJVc^=LdUSId-hJ9Cyj<9;BJ`WQS-`906<(@4Hi8lCSW$gDc#DzJmMsRJgih z!BvJdt_`ce9x=LDD3oWZP;Wo%ANMFW-&oQ_d%!!r`|0p}Bx$23sa3f5ZH)V=SEew;*$=3>L18SY&jt4&)GM&{D4KF7601E)#n8HF49pIZC8BQ?2Kqo;>V z;i?bc=drn7!iJa@sHBo7ouPOotrVk|dM0mbw%c)SS9s3d zF<&9??Qm2xY>{TbTn&ELA*tGtW(|FCF{Nq=<*Ss)+L7j&qe6E(>k%+#d-QMU z5&tfvj4cd09*wR=Y1GI<{-x)2A@G?y9cy8^;#PmA7PW}oRXxF zFR-Wn3DnMCyu!b7u{z*IzH(CO~W{d+zg^JBN{Y~q9`Fs2tHBo{r#%?2uYm(X8yQqIGp~z^L_Q$wQJYj zyLRo`wHX*j(|7JXA4l7Mg1pxw)>Wk6!6QyVi|+)sc2R~qxgC!}yq!K=4Zr^gcqT&6 zV*)&vP?{@FJ!nHCn60A6z6f&q6Ks6jSIcqu0P$Y#2k?AsxN`*V33=4CqS;;%atV#J4W z!8=At9VJu)X^rtDR9)fOY05}*FJ$o(IqQv0zv8SQ3H3Po{WSpIA*YS0)!k11j5+C- z`nVQ$HE%UNz%mA*^1sP*<(a3~7D175wcIs}x#79i)dJGE zo?aQordp0Yeh3Zl6~>xtVV;w&Ch{%PqMZJ%hlce5xPz31q>yV?GS(Wo-oZ>JOs1X&?7yY?9Bg|rNSNJ^`o}`ZBQ`h2m!{=PF%kbB+5`C21 zduvn?%dX|AJLlgFvyx7Mzi0*Z1M!9jk8==~Bxs8#24)tuHeKTn9c+FD2JqlbU7zy3D z8?rz331erpcy-2Yz_NimV|pE6_2X%Hc zR+=1>prU#xRu9!l=oHRvl;-XRd=ff%5&Ug~>wN}>lhmm!d6d7Ns9KJ0@{gogKrdd3 zMAx%dnd0uTok*o?!N$Ll_f;V5!Z;j-O#6g;ZRPrjy0*FuU8If@9DTos;%9>|HH1dq z)x(|TZpNKhM_=vW*IIDZ9Z1@N8NHIUAQ)eB6x61@C0aHPGEO{4br2B}JJWNXth5V` zaS!_Xe0t$jYV|yBrNw4a908WIfO#6NETC;tt*1G3hqBKGqT+yeSI@O@eM60Y+XXrQ z2O!7~lx8Vv3SWRX?a}uFvwP%n7+>m=^JuC3J^J|6Q~DP0(!i6Bfd{sucdn+a8-T7W zwQk|fU9_oF;E3YQT`%eb&aLDA->p!2hO%90ejsoS=hV_`k~GR-S8J4OcX9twc;=dP zbJAP0>$~noP{(kDE)6MK5~E68GCs+rTs4P*Kq4o#dHTUF?@m_ME*=&UG~>^VQm5+#CTHJeK6%id?N@^YWc)tt2^;7Rh| zu3*GIKN}j0X`_~}_90(sORgi~Ui~sFkrUy*B+NVH<=R|qgB5MzQ99d-CS~Z+ijIj(590Ef^tp=lqUm2xNqc(MDdr?}_vv~&n_Z?ih zdyp$1lkW~jTACUbK+~RS`W)0h2Us^iUmbw<7_G5f_o8}&fv~p)ZpT+GWlA@3XGff8 zr}<^<$0TFnia;M~KVs(qn$8u_sPm5raG_tp(GLLYX}lju9mPR=)pzk}?zdC&?O@$_ z4qCd?q}3B934OULqOZ$6yl-HES_0rTgd==Zwq1Nt*FcI~^s#a&!I z)zejyb@aU);~3b#06WcSzC)=;i*bMGXC_bG;k_CT`y*i7K#j`VgYX#rpPd;UM0ThL zJQw<-JKD(Hz(>>-jBox7*t;M2wNo1X-+fMp!M$TE0c^K1n|U~lFRduc;Ow6ORgyML z07~uA!=Oz)f@VMo`C)w~+9&5xnzpMY&`J%Z?IuqRDbhSM&T0ZWeT_CGfz$Io?xDPP zyln(V|Gp31E(^4_k1{+3>?ky2-0$8dTbP?QgllssQ?H0L zdDURD-zmT?CWS65nc^HkPCK6di~9SCm5i9uzkhDe@=0ST#nvc)Y`33=ut!8GS&IJa zT*{RC3&faAF;(_&DHu$Ke9FeH+rXE!?Nny?mZh;2X1I1fB1x;*QkFq81 z1#kzCl1|xD!e>xi*;a5jS=AJOFZR0)YodbZVJiuJZT~5gBsdDbg zQPMaYaBpKRROKmRa=%Jw-8o!-&>&r*sze^>op>ERnB>l}VjHHxd+Z&3GR4Iv-gxHp zD0tqTywU38&e0K!k;nt}Z0$}8skb#8@^;4OT;Nfje?{A)me7+vm<ILxQKxt&t? zP}+91{5z>#A0M?JS9hKF>9e!|o>)YxD#oob#^2{&3&}0C^kSeeM$IXKcNyzrJfER~ zTRhD8-b>F83!2_BAW$Z{V@(<;gNja}oW}$1Jlm`SuC^auU)J>QpTMKivk~a*{{!&C zJwVcqzA*NwtB6bCg?e6ZWL+gen;r+M7l6M9vq1Y-E_GGV)~YB!4-1SjjsK21e-|_r z*Jy__B0K}iwf>?&sh$I&G&l>L;zzW>9h0-rT3nAX z{*7xJvnjPRZ8ZKud6+YwK{>{3+CX0SA?g$5*)~SW>&o>I?a%|y8IJMVvrp zjn2;izU9=riL}lR8u;5B+-14Hmb$ls9_McPxs<5|YdBPFx_RgdY1*!ysyBDvKp#8! zrO&3LEk-U%b00tX^sH~xFx^w{DLfyMY7FhXhuqGi8o*p-&@)UY9~7E&xLUFnsP5#* zW!C3ulPBbmJl52HTus)3(;iN2`B6KsmqM#{QjA%wsJEgw_B5v&$>^*qPKgt1XFr%4W_nr zYD0H5<;%)7b@3=UtS=d??ooG-xlWYVhd$HaCJn~Ak}DIa!uR&2wwc&|_mjLtsxrRX z5Ek>PfDX3PYv+>J`I9Tm`n;+47{7TgG^t;^XS*olHw3CGC7r)|!5Mx-N^5m~kc3ap zLCnLFemg0v=@7l-nK+ua% zT=ncjo()j#dvK?Afrsq`a@Q;pp{1L{-`>=iVLn{}$5FcI1Ej`s9l2XD>aL+SBY`=4 zuOz3mzX=W6RlzR6w4c_BXZ@2_Gal4JjIf_XR{S%6#pu6NljoCZpLCY^IxUeaBtzYk z7^Q!J6O0efI?;>Afn#P^6)7c`oKQd=eE&*7JQ=(Yvju{ZQ!oS2!kWd^*qmL4%C+Zedv zzjM8l8AOl3IZq+4akY$j;W?YG!H;5`JJz*+xjrkEFA19ZKE{JKlzecf{C3sIwjY4c zCXm=QaAsxq*Z{xtrBb-xL1>~At?*PVqeUpOzN9UlJ(gzP_c5(@cl$MP^2xltjdmP| z&P##j9N-vDe{=wXe`R#MM9OcW=dT%&hiKt`Fi=D9WRNS4BP}HzfnJYAPt*7b_%G*) z_WmWj{}%f8WOP@Cl$uv@H5zpEaQYo`+5Pgy^4=Y=Tfn?<0lGs2^*+nI$eDxtU`F#+ z-9HTs337c!>kgoOY5#bcx~1A@>5*Kno#~;aKyXQzS#$}s;2a`{&p=9$VvWV&c|EQT zN#Anwt&|`KRNHrz!;=ax;;$$4Rt9;i#ZnJ>S1{ylakX0;nUs4U++i!2i5@K0qF>kR zz~kzWTBmb6DXk{ZO5}$rqQ*K0^Lz|taROY(sgnFPwu`fv?x9p=pEEO|RSR}?*H)ZH z?KylB;ct|C{`M0+cjc(wt8J@w`!(W)awDem?_bX!+VCt&3g2fn`R??vQT4Q3K1lx+ zKoh;;T3R_ufua`Z(oN+ixw_*yP3xU?`b#}uUYp>~HJY!u`q4%q*V8H_({XB5yeo;NzBCp%Vn7^3&uCz;u;E@w~e88i2WvE`}Q%Hl>! zYfC$NAkn`GcKS|0tDRllzXS?@3vRJHXavGCj~b3s?lEA|53>>&UV|H5%l&8cwqxul z`Gz1B_QL`9L3i%|x($enfqMaPJCD8qe)I}sb{f!S$mJ}&J7of}utvc8ly6FK}LrPb1oN`C9@yJWaV z%Yl5|Xld%SQ^Kgx#?}z1T)*1}-gE{7ed%Fo#<|K?$}9&?bG8MSo4H1FR